repo_name
string
path
string
copies
string
size
string
content
string
license
string
vic-nation/android_kernel_gogh
drivers/usb/gadget/ci13xxx_udc.c
23
82862
/* * ci13xxx_udc.c - MIPS USB IP core family device controller * * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved. * * Author: David Lopo * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* * Description: MIPS USB IP core family device controller * Currently it only supports IP part number CI13412 * * This driver is composed of several blocks: * - HW: hardware interface * - DBG: debug facilities (optional) * - UTIL: utilities * - ISR: interrupts handling * - ENDPT: endpoint operations (Gadget API) * - GADGET: gadget operations (Gadget API) * - BUS: bus glue code, bus abstraction layer * * Compile Options * - CONFIG_USB_GADGET_DEBUG_FILES: enable debug facilities * - STALL_IN: non-empty bulk-in pipes cannot be halted * if defined mass storage compliance succeeds but with warnings * => case 4: Hi > Dn * => case 5: Hi > Di * => case 8: Hi <> Do * if undefined usbtest 13 fails * - TRACE: enable function tracing (depends on DEBUG) * * Main Features * - Chapter 9 & Mass Storage Compliance with Gadget File Storage * - Chapter 9 Compliance with Gadget Zero (STALL_IN undefined) * - Normal & LPM support * * USBTEST Report * - OK: 0-12, 13 (STALL_IN defined) & 14 * - Not Supported: 15 & 16 (ISO) * * TODO List * - OTG * - Isochronous & Interrupt Traffic * - Handle requests which spawns into several TDs * - GET_STATUS(device) - always reports 0 * - Gadget API (majority of optional features) * - Suspend & Remote Wakeup */ #include <linux/delay.h> #include <linux/device.h> #include <linux/dmapool.h> #include <linux/dma-mapping.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/usb/otg.h> #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE #include <linux/usb/composite.h> #endif #include "ci13xxx_udc.h" /****************************************************************************** * DEFINE *****************************************************************************/ /* ctrl register bank access */ static DEFINE_SPINLOCK(udc_lock); /* control endpoint description */ static const struct usb_endpoint_descriptor ctrl_endpt_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_CONTROL, .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX), }; static const struct usb_endpoint_descriptor ctrl_endpt_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_CONTROL, .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX), }; /* UDC descriptor */ static struct ci13xxx *_udc; /* Interrupt statistics */ #define ISR_MASK 0x1F static struct { u32 test; u32 ui; u32 uei; u32 pci; u32 uri; u32 sli; u32 none; struct { u32 cnt; u32 buf[ISR_MASK+1]; u32 idx; } hndl; } isr_statistics; /** * ffs_nr: find first (least significant) bit set * @x: the word to search * * This function returns bit number (instead of position) */ static int ffs_nr(u32 x) { int n = ffs(x); return n ? n-1 : 32; } /****************************************************************************** * HW block *****************************************************************************/ /* register bank descriptor */ static struct { unsigned lpm; /* is LPM? */ void __iomem *abs; /* bus map offset */ void __iomem *cap; /* bus map offset + CAP offset + CAP data */ size_t size; /* bank size */ } hw_bank; /* MSM specific */ #define ABS_AHBBURST (0x0090UL) #define ABS_AHBMODE (0x0098UL) /* UDC register map */ #define ABS_CAPLENGTH (0x100UL) #define ABS_HCCPARAMS (0x108UL) #define ABS_DCCPARAMS (0x124UL) #define ABS_TESTMODE (hw_bank.lpm ? 0x0FCUL : 0x138UL) /* offset to CAPLENTGH (addr + data) */ #define CAP_USBCMD (0x000UL) #define CAP_USBSTS (0x004UL) #define CAP_USBINTR (0x008UL) #define CAP_DEVICEADDR (0x014UL) #define CAP_ENDPTLISTADDR (0x018UL) #define CAP_PORTSC (0x044UL) #define CAP_DEVLC (0x084UL) #define CAP_ENDPTPIPEID (0x0BCUL) #define CAP_USBMODE (hw_bank.lpm ? 0x0C8UL : 0x068UL) #define CAP_ENDPTSETUPSTAT (hw_bank.lpm ? 0x0D8UL : 0x06CUL) #define CAP_ENDPTPRIME (hw_bank.lpm ? 0x0DCUL : 0x070UL) #define CAP_ENDPTFLUSH (hw_bank.lpm ? 0x0E0UL : 0x074UL) #define CAP_ENDPTSTAT (hw_bank.lpm ? 0x0E4UL : 0x078UL) #define CAP_ENDPTCOMPLETE (hw_bank.lpm ? 0x0E8UL : 0x07CUL) #define CAP_ENDPTCTRL (hw_bank.lpm ? 0x0ECUL : 0x080UL) #define CAP_LAST (hw_bank.lpm ? 0x12CUL : 0x0C0UL) /* maximum number of enpoints: valid only after hw_device_reset() */ static unsigned hw_ep_max; /** * hw_ep_bit: calculates the bit number * @num: endpoint number * @dir: endpoint direction * * This function returns bit number */ static inline int hw_ep_bit(int num, int dir) { return num + (dir ? 16 : 0); } /** * hw_aread: reads from register bitfield * @addr: address relative to bus map * @mask: bitfield mask * * This function returns register bitfield data */ static u32 hw_aread(u32 addr, u32 mask) { return ioread32(addr + hw_bank.abs) & mask; } /** * hw_awrite: writes to register bitfield * @addr: address relative to bus map * @mask: bitfield mask * @data: new data */ static void hw_awrite(u32 addr, u32 mask, u32 data) { iowrite32(hw_aread(addr, ~mask) | (data & mask), addr + hw_bank.abs); } /** * hw_cread: reads from register bitfield * @addr: address relative to CAP offset plus content * @mask: bitfield mask * * This function returns register bitfield data */ static u32 hw_cread(u32 addr, u32 mask) { return ioread32(addr + hw_bank.cap) & mask; } /** * hw_cwrite: writes to register bitfield * @addr: address relative to CAP offset plus content * @mask: bitfield mask * @data: new data */ static void hw_cwrite(u32 addr, u32 mask, u32 data) { iowrite32(hw_cread(addr, ~mask) | (data & mask), addr + hw_bank.cap); } /** * hw_ctest_and_clear: tests & clears register bitfield * @addr: address relative to CAP offset plus content * @mask: bitfield mask * * This function returns register bitfield data */ static u32 hw_ctest_and_clear(u32 addr, u32 mask) { u32 reg = hw_cread(addr, mask); iowrite32(reg, addr + hw_bank.cap); return reg; } /** * hw_ctest_and_write: tests & writes register bitfield * @addr: address relative to CAP offset plus content * @mask: bitfield mask * @data: new data * * This function returns register bitfield data */ static u32 hw_ctest_and_write(u32 addr, u32 mask, u32 data) { u32 reg = hw_cread(addr, ~0); iowrite32((reg & ~mask) | (data & mask), addr + hw_bank.cap); return (reg & mask) >> ffs_nr(mask); } static int hw_device_init(void __iomem *base) { u32 reg; /* bank is a module variable */ hw_bank.abs = base; hw_bank.cap = hw_bank.abs; hw_bank.cap += ABS_CAPLENGTH; hw_bank.cap += ioread8(hw_bank.cap); reg = hw_aread(ABS_HCCPARAMS, HCCPARAMS_LEN) >> ffs_nr(HCCPARAMS_LEN); hw_bank.lpm = reg; hw_bank.size = hw_bank.cap - hw_bank.abs; hw_bank.size += CAP_LAST; hw_bank.size /= sizeof(u32); reg = hw_aread(ABS_DCCPARAMS, DCCPARAMS_DEN) >> ffs_nr(DCCPARAMS_DEN); hw_ep_max = reg * 2; /* cache hw ENDPT_MAX */ if (hw_ep_max == 0 || hw_ep_max > ENDPT_MAX) return -ENODEV; /* setup lock mode ? */ /* ENDPTSETUPSTAT is '0' by default */ /* HCSPARAMS.bf.ppc SHOULD BE zero for device */ return 0; } /** * hw_device_reset: resets chip (execute without interruption) * @base: register base address * * This function returns an error code */ static int hw_device_reset(struct ci13xxx *udc) { printk(KERN_INFO "usb:: %s\n", __func__); /* should flush & stop before reset */ hw_cwrite(CAP_ENDPTFLUSH, ~0, ~0); hw_cwrite(CAP_USBCMD, USBCMD_RS, 0); hw_cwrite(CAP_USBCMD, USBCMD_RST, USBCMD_RST); while (hw_cread(CAP_USBCMD, USBCMD_RST)) udelay(10); /* not RTOS friendly */ if (udc->udc_driver->notify_event) udc->udc_driver->notify_event(udc, CI13XXX_CONTROLLER_RESET_EVENT); if (udc->udc_driver->flags & CI13XXX_DISABLE_STREAMING) hw_cwrite(CAP_USBMODE, USBMODE_SDIS, USBMODE_SDIS); /* USBMODE should be configured step by step */ hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_IDLE); hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_DEVICE); hw_cwrite(CAP_USBMODE, USBMODE_SLOM, USBMODE_SLOM); /* HW >= 2.3 */ /* * ITC (Interrupt Threshold Control) field is to set the maximum * rate at which the device controller will issue interrupts. * The maximum interrupt interval measured in micro frames. * Valid values are 0, 1, 2, 4, 8, 16, 32, 64. The default value is * 8 micro frames. If CPU can handle interrupts at faster rate, ITC * can be set to lesser value to gain performance. */ if (udc->udc_driver->flags & CI13XXX_ZERO_ITC) hw_cwrite(CAP_USBCMD, USBCMD_ITC_MASK, USBCMD_ITC(0)); if (hw_cread(CAP_USBMODE, USBMODE_CM) != USBMODE_CM_DEVICE) { pr_err("cannot enter in device mode"); pr_err("lpm = %i", hw_bank.lpm); return -ENODEV; } return 0; } /** * hw_device_state: enables/disables interrupts & starts/stops device (execute * without interruption) * @dma: 0 => disable, !0 => enable and set dma engine * * This function returns an error code */ static int hw_device_state(u32 dma) { printk(KERN_INFO "usb:: %s dma: %x\n", __func__, dma); if (dma) { hw_cwrite(CAP_ENDPTLISTADDR, ~0, dma); /* interrupt, error, port change, reset, sleep/suspend */ hw_cwrite(CAP_USBINTR, ~0, USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI); hw_cwrite(CAP_USBCMD, USBCMD_RS, USBCMD_RS); printk(KERN_INFO "usb:: %s hw_read(CAP_ENDPTLISTADDR, ~0): %x\n", __func__, hw_cread(CAP_ENDPTLISTADDR, ~0)); } else { hw_cwrite(CAP_USBCMD, USBCMD_RS, 0); hw_cwrite(CAP_USBINTR, ~0, 0); } printk(KERN_INFO "usb:: %s hw_read(CAP_USBINTR, ~0): %x\n", __func__, hw_cread(CAP_USBINTR, ~0)); printk(KERN_INFO "usb:: %s hw_read(CAP_USBCMD, USBCMD_RS): %x\n", __func__, hw_cread(CAP_USBCMD, USBCMD_RS)); return 0; } /** * hw_ep_flush: flush endpoint fifo (execute without interruption) * @num: endpoint number * @dir: endpoint direction * * This function returns an error code */ static int hw_ep_flush(int num, int dir) { int n = hw_ep_bit(num, dir); do { /* flush any pending transfer */ hw_cwrite(CAP_ENDPTFLUSH, BIT(n), BIT(n)); while (hw_cread(CAP_ENDPTFLUSH, BIT(n))) cpu_relax(); } while (hw_cread(CAP_ENDPTSTAT, BIT(n))); return 0; } /** * hw_ep_disable: disables endpoint (execute without interruption) * @num: endpoint number * @dir: endpoint direction * * This function returns an error code */ static int hw_ep_disable(int num, int dir) { hw_ep_flush(num, dir); hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32), dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0); return 0; } /** * hw_ep_enable: enables endpoint (execute without interruption) * @num: endpoint number * @dir: endpoint direction * @type: endpoint type * * This function returns an error code */ static int hw_ep_enable(int num, int dir, int type) { u32 mask, data; if (dir) { mask = ENDPTCTRL_TXT; /* type */ data = type << ffs_nr(mask); mask |= ENDPTCTRL_TXS; /* unstall */ mask |= ENDPTCTRL_TXR; /* reset data toggle */ data |= ENDPTCTRL_TXR; mask |= ENDPTCTRL_TXE; /* enable */ data |= ENDPTCTRL_TXE; } else { mask = ENDPTCTRL_RXT; /* type */ data = type << ffs_nr(mask); mask |= ENDPTCTRL_RXS; /* unstall */ mask |= ENDPTCTRL_RXR; /* reset data toggle */ data |= ENDPTCTRL_RXR; mask |= ENDPTCTRL_RXE; /* enable */ data |= ENDPTCTRL_RXE; } hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32), mask, data); /* make sure endpoint is enabled before returning */ mb(); return 0; } /** * hw_ep_get_halt: return endpoint halt status * @num: endpoint number * @dir: endpoint direction * * This function returns 1 if endpoint halted */ static int hw_ep_get_halt(int num, int dir) { u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS; return hw_cread(CAP_ENDPTCTRL + num * sizeof(u32), mask) ? 1 : 0; } /** * hw_test_and_clear_setup_status: test & clear setup status (execute without * interruption) * @n: bit number (endpoint) * * This function returns setup status */ static int hw_test_and_clear_setup_status(int n) { return hw_ctest_and_clear(CAP_ENDPTSETUPSTAT, BIT(n)); } /** * hw_ep_prime: primes endpoint (execute without interruption) * @num: endpoint number * @dir: endpoint direction * @is_ctrl: true if control endpoint * * This function returns an error code */ static int hw_ep_prime(int num, int dir, int is_ctrl) { int n = hw_ep_bit(num, dir); if (is_ctrl && dir == RX && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num))) return -EAGAIN; hw_cwrite(CAP_ENDPTPRIME, BIT(n), BIT(n)); while (hw_cread(CAP_ENDPTPRIME, BIT(n))) cpu_relax(); if (is_ctrl && dir == RX && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num))) return -EAGAIN; /* status shoult be tested according with manual but it doesn't work */ return 0; } /** * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute * without interruption) * @num: endpoint number * @dir: endpoint direction * @value: true => stall, false => unstall * * This function returns an error code */ static int hw_ep_set_halt(int num, int dir, int value) { u32 addr, mask_xs, mask_xr; if (value != 0 && value != 1) return -EINVAL; do { if (hw_cread(CAP_ENDPTSETUPSTAT, BIT(num))) return 0; addr = CAP_ENDPTCTRL + num * sizeof(u32); mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS; mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR; /* data toggle - reserved for EP0 but it's in ESS */ hw_cwrite(addr, mask_xs|mask_xr, value ? mask_xs : mask_xr); } while (value != hw_ep_get_halt(num, dir)); return 0; } /** * hw_intr_clear: disables interrupt & clears interrupt status (execute without * interruption) * @n: interrupt bit * * This function returns an error code */ static int hw_intr_clear(int n) { if (n >= REG_BITS) return -EINVAL; hw_cwrite(CAP_USBINTR, BIT(n), 0); hw_cwrite(CAP_USBSTS, BIT(n), BIT(n)); return 0; } /** * hw_intr_force: enables interrupt & forces interrupt status (execute without * interruption) * @n: interrupt bit * * This function returns an error code */ static int hw_intr_force(int n) { if (n >= REG_BITS) return -EINVAL; hw_awrite(ABS_TESTMODE, TESTMODE_FORCE, TESTMODE_FORCE); hw_cwrite(CAP_USBINTR, BIT(n), BIT(n)); hw_cwrite(CAP_USBSTS, BIT(n), BIT(n)); hw_awrite(ABS_TESTMODE, TESTMODE_FORCE, 0); return 0; } /** * hw_is_port_high_speed: test if port is high speed * * This function returns true if high speed port */ static int hw_port_is_high_speed(void) { return hw_bank.lpm ? hw_cread(CAP_DEVLC, DEVLC_PSPD) : hw_cread(CAP_PORTSC, PORTSC_HSP); } /** * hw_port_test_get: reads port test mode value * * This function returns port test mode value */ static u8 hw_port_test_get(void) { return hw_cread(CAP_PORTSC, PORTSC_PTC) >> ffs_nr(PORTSC_PTC); } /** * hw_port_test_set: writes port test mode (execute without interruption) * @mode: new value * * This function returns an error code */ static int hw_port_test_set(u8 mode) { const u8 TEST_MODE_MAX = 7; if (mode > TEST_MODE_MAX) return -EINVAL; hw_cwrite(CAP_PORTSC, PORTSC_PTC, mode << ffs_nr(PORTSC_PTC)); return 0; } /** * hw_read_intr_enable: returns interrupt enable register * * This function returns register data */ static u32 hw_read_intr_enable(void) { return hw_cread(CAP_USBINTR, ~0); } /** * hw_read_intr_status: returns interrupt status register * * This function returns register data */ static u32 hw_read_intr_status(void) { return hw_cread(CAP_USBSTS, ~0); } /** * hw_register_read: reads all device registers (execute without interruption) * @buf: destination buffer * @size: buffer size * * This function returns number of registers read */ static size_t hw_register_read(u32 *buf, size_t size) { unsigned i; if (size > hw_bank.size) size = hw_bank.size; for (i = 0; i < size; i++) buf[i] = hw_aread(i * sizeof(u32), ~0); return size; } /** * hw_register_write: writes to register * @addr: register address * @data: register value * * This function returns an error code */ static int hw_register_write(u16 addr, u32 data) { /* align */ addr /= sizeof(u32); if (addr >= hw_bank.size) return -EINVAL; /* align */ addr *= sizeof(u32); hw_awrite(addr, ~0, data); return 0; } /** * hw_test_and_clear_complete: test & clear complete status (execute without * interruption) * @n: bit number (endpoint) * * This function returns complete status */ static int hw_test_and_clear_complete(int n) { return hw_ctest_and_clear(CAP_ENDPTCOMPLETE, BIT(n)); } /** * hw_test_and_clear_intr_active: test & clear active interrupts (execute * without interruption) * * This function returns active interrutps */ static u32 hw_test_and_clear_intr_active(void) { u32 reg = hw_read_intr_status() & hw_read_intr_enable(); hw_cwrite(CAP_USBSTS, ~0, reg); return reg; } /** * hw_test_and_clear_setup_guard: test & clear setup guard (execute without * interruption) * * This function returns guard value */ static int hw_test_and_clear_setup_guard(void) { return hw_ctest_and_write(CAP_USBCMD, USBCMD_SUTW, 0); } /** * hw_test_and_set_setup_guard: test & set setup guard (execute without * interruption) * * This function returns guard value */ static int hw_test_and_set_setup_guard(void) { return hw_ctest_and_write(CAP_USBCMD, USBCMD_SUTW, USBCMD_SUTW); } /** * hw_usb_set_address: configures USB address (execute without interruption) * @value: new USB address * * This function returns an error code */ static int hw_usb_set_address(u8 value) { /* advance */ hw_cwrite(CAP_DEVICEADDR, DEVICEADDR_USBADR | DEVICEADDR_USBADRA, value << ffs_nr(DEVICEADDR_USBADR) | DEVICEADDR_USBADRA); return 0; } /** * hw_usb_reset: restart device after a bus reset (execute without * interruption) * * This function returns an error code */ static int hw_usb_reset(void) { hw_usb_set_address(0); /* ESS flushes only at end?!? */ hw_cwrite(CAP_ENDPTFLUSH, ~0, ~0); /* flush all EPs */ /* clear setup token semaphores */ hw_cwrite(CAP_ENDPTSETUPSTAT, 0, 0); /* writes its content */ /* clear complete status */ hw_cwrite(CAP_ENDPTCOMPLETE, 0, 0); /* writes its content */ /* wait until all bits cleared */ while (hw_cread(CAP_ENDPTPRIME, ~0)) udelay(10); /* not RTOS friendly */ /* reset all endpoints ? */ /* reset internal status and wait for further instructions no need to verify the port reset status (ESS does it) */ return 0; } /****************************************************************************** * DBG block *****************************************************************************/ /** * show_device: prints information about device capabilities and status * * Check "device.h" for details */ static ssize_t show_device(struct device *dev, struct device_attribute *attr, char *buf) { struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev); struct usb_gadget *gadget = &udc->gadget; int n = 0; dbg_trace("[%s] %p\n", __func__, buf); if (attr == NULL || buf == NULL) { dev_err(dev, "[%s] EINVAL\n", __func__); return 0; } n += scnprintf(buf + n, PAGE_SIZE - n, "speed = %d\n", gadget->speed); n += scnprintf(buf + n, PAGE_SIZE - n, "is_dualspeed = %d\n", gadget->is_dualspeed); n += scnprintf(buf + n, PAGE_SIZE - n, "is_otg = %d\n", gadget->is_otg); n += scnprintf(buf + n, PAGE_SIZE - n, "is_a_peripheral = %d\n", gadget->is_a_peripheral); n += scnprintf(buf + n, PAGE_SIZE - n, "b_hnp_enable = %d\n", gadget->b_hnp_enable); n += scnprintf(buf + n, PAGE_SIZE - n, "a_hnp_support = %d\n", gadget->a_hnp_support); n += scnprintf(buf + n, PAGE_SIZE - n, "a_alt_hnp_support = %d\n", gadget->a_alt_hnp_support); n += scnprintf(buf + n, PAGE_SIZE - n, "name = %s\n", (gadget->name ? gadget->name : "")); return n; } static DEVICE_ATTR(device, S_IRUSR, show_device, NULL); /** * show_driver: prints information about attached gadget (if any) * * Check "device.h" for details */ static ssize_t show_driver(struct device *dev, struct device_attribute *attr, char *buf) { struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev); struct usb_gadget_driver *driver = udc->driver; int n = 0; dbg_trace("[%s] %p\n", __func__, buf); if (attr == NULL || buf == NULL) { dev_err(dev, "[%s] EINVAL\n", __func__); return 0; } if (driver == NULL) return scnprintf(buf, PAGE_SIZE, "There is no gadget attached!\n"); n += scnprintf(buf + n, PAGE_SIZE - n, "function = %s\n", (driver->function ? driver->function : "")); n += scnprintf(buf + n, PAGE_SIZE - n, "max speed = %d\n", driver->speed); return n; } static DEVICE_ATTR(driver, S_IRUSR, show_driver, NULL); /* Maximum event message length */ #define DBG_DATA_MSG 64UL /* Maximum event messages */ #define DBG_DATA_MAX 128UL /* Event buffer descriptor */ static struct { char (buf[DBG_DATA_MAX])[DBG_DATA_MSG]; /* buffer */ unsigned idx; /* index */ unsigned tty; /* print to console? */ rwlock_t lck; /* lock */ } dbg_data = { .idx = 0, .tty = 0, .lck = __RW_LOCK_UNLOCKED(lck) }; /** * dbg_dec: decrements debug event index * @idx: buffer index */ static void dbg_dec(unsigned *idx) { *idx = (*idx - 1) & (DBG_DATA_MAX-1); } /** * dbg_inc: increments debug event index * @idx: buffer index */ static void dbg_inc(unsigned *idx) { *idx = (*idx + 1) & (DBG_DATA_MAX-1); } /** * dbg_print: prints the common part of the event * @addr: endpoint address * @name: event name * @status: status * @extra: extra information */ static void dbg_print(u8 addr, const char *name, int status, const char *extra) { struct timeval tval; unsigned int stamp; unsigned long flags; write_lock_irqsave(&dbg_data.lck, flags); do_gettimeofday(&tval); stamp = tval.tv_sec & 0xFFFF; /* 2^32 = 4294967296. Limit to 4096s */ stamp = stamp * 1000000 + tval.tv_usec; scnprintf(dbg_data.buf[dbg_data.idx], DBG_DATA_MSG, "%04X\t?%02X %-7.7s %4i ?t%s\n", stamp, addr, name, status, extra); dbg_inc(&dbg_data.idx); write_unlock_irqrestore(&dbg_data.lck, flags); if (dbg_data.tty != 0) pr_notice("%04X\t?%02X %-7.7s %4i ?t%s\n", stamp, addr, name, status, extra); } /** * dbg_done: prints a DONE event * @addr: endpoint address * @td: transfer descriptor * @status: status */ static void dbg_done(u8 addr, const u32 token, int status) { char msg[DBG_DATA_MSG]; scnprintf(msg, sizeof(msg), "%d %02X", (int)(token & TD_TOTAL_BYTES) >> ffs_nr(TD_TOTAL_BYTES), (int)(token & TD_STATUS) >> ffs_nr(TD_STATUS)); dbg_print(addr, "DONE", status, msg); } /** * dbg_event: prints a generic event * @addr: endpoint address * @name: event name * @status: status */ static void dbg_event(u8 addr, const char *name, int status) { if (name != NULL) dbg_print(addr, name, status, ""); } /* * dbg_queue: prints a QUEUE event * @addr: endpoint address * @req: USB request * @status: status */ static void dbg_queue(u8 addr, const struct usb_request *req, int status) { char msg[DBG_DATA_MSG]; if (req != NULL) { scnprintf(msg, sizeof(msg), "%d %d", !req->no_interrupt, req->length); dbg_print(addr, "QUEUE", status, msg); } } /** * dbg_setup: prints a SETUP event * @addr: endpoint address * @req: setup request */ static void dbg_setup(u8 addr, const struct usb_ctrlrequest *req) { char msg[DBG_DATA_MSG]; if (req != NULL) { scnprintf(msg, sizeof(msg), "%02X %02X %04X %04X %d", req->bRequestType, req->bRequest, le16_to_cpu(req->wValue), le16_to_cpu(req->wIndex), le16_to_cpu(req->wLength)); dbg_print(addr, "SETUP", 0, msg); } } /** * show_events: displays the event buffer * * Check "device.h" for details */ static ssize_t show_events(struct device *dev, struct device_attribute *attr, char *buf) { unsigned long flags; unsigned i, j, n = 0; dbg_trace("[%s] %p\n", __func__, buf); if (attr == NULL || buf == NULL) { dev_err(dev, "[%s] EINVAL\n", __func__); return 0; } read_lock_irqsave(&dbg_data.lck, flags); i = dbg_data.idx; for (dbg_dec(&i); i != dbg_data.idx; dbg_dec(&i)) { n += strlen(dbg_data.buf[i]); if (n >= PAGE_SIZE) { n -= strlen(dbg_data.buf[i]); break; } } for (j = 0, dbg_inc(&i); j < n; dbg_inc(&i)) j += scnprintf(buf + j, PAGE_SIZE - j, "%s", dbg_data.buf[i]); read_unlock_irqrestore(&dbg_data.lck, flags); return n; } /** * store_events: configure if events are going to be also printed to console * * Check "device.h" for details */ static ssize_t store_events(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned tty; dbg_trace("[%s] %p, %d\n", __func__, buf, count); if (attr == NULL || buf == NULL) { dev_err(dev, "[%s] EINVAL\n", __func__); goto done; } if (sscanf(buf, "%u", &tty) != 1 || tty > 1) { dev_err(dev, "<1|0>: enable|disable console log\n"); goto done; } dbg_data.tty = tty; dev_info(dev, "tty = %u", dbg_data.tty); done: return count; } static DEVICE_ATTR(events, S_IRUSR | S_IWUSR, show_events, store_events); /** * show_inters: interrupt status, enable status and historic * * Check "device.h" for details */ static ssize_t show_inters(struct device *dev, struct device_attribute *attr, char *buf) { struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev); unsigned long flags; u32 intr; unsigned i, j, n = 0; dbg_trace("[%s] %p\n", __func__, buf); if (attr == NULL || buf == NULL) { dev_err(dev, "[%s] EINVAL\n", __func__); return 0; } spin_lock_irqsave(udc->lock, flags); n += scnprintf(buf + n, PAGE_SIZE - n, "status = %08x\n", hw_read_intr_status()); n += scnprintf(buf + n, PAGE_SIZE - n, "enable = %08x\n", hw_read_intr_enable()); n += scnprintf(buf + n, PAGE_SIZE - n, "*test = %d\n", isr_statistics.test); n += scnprintf(buf + n, PAGE_SIZE - n, "?ui = %d\n", isr_statistics.ui); n += scnprintf(buf + n, PAGE_SIZE - n, "?uei = %d\n", isr_statistics.uei); n += scnprintf(buf + n, PAGE_SIZE - n, "?pci = %d\n", isr_statistics.pci); n += scnprintf(buf + n, PAGE_SIZE - n, "?uri = %d\n", isr_statistics.uri); n += scnprintf(buf + n, PAGE_SIZE - n, "?sli = %d\n", isr_statistics.sli); n += scnprintf(buf + n, PAGE_SIZE - n, "*none = %d\n", isr_statistics.none); n += scnprintf(buf + n, PAGE_SIZE - n, "*hndl = %d\n", isr_statistics.hndl.cnt); for (i = isr_statistics.hndl.idx, j = 0; j <= ISR_MASK; j++, i++) { i &= ISR_MASK; intr = isr_statistics.hndl.buf[i]; if (USBi_UI & intr) n += scnprintf(buf + n, PAGE_SIZE - n, "ui "); intr &= ~USBi_UI; if (USBi_UEI & intr) n += scnprintf(buf + n, PAGE_SIZE - n, "uei "); intr &= ~USBi_UEI; if (USBi_PCI & intr) n += scnprintf(buf + n, PAGE_SIZE - n, "pci "); intr &= ~USBi_PCI; if (USBi_URI & intr) n += scnprintf(buf + n, PAGE_SIZE - n, "uri "); intr &= ~USBi_URI; if (USBi_SLI & intr) n += scnprintf(buf + n, PAGE_SIZE - n, "sli "); intr &= ~USBi_SLI; if (intr) n += scnprintf(buf + n, PAGE_SIZE - n, "??? "); if (isr_statistics.hndl.buf[i]) n += scnprintf(buf + n, PAGE_SIZE - n, "\n"); } spin_unlock_irqrestore(udc->lock, flags); return n; } /** * store_inters: enable & force or disable an individual interrutps * (to be used for test purposes only) * * Check "device.h" for details */ static ssize_t store_inters(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev); unsigned long flags; unsigned en, bit; dbg_trace("[%s] %p, %d\n", __func__, buf, count); if (attr == NULL || buf == NULL) { dev_err(dev, "[%s] EINVAL\n", __func__); goto done; } if (sscanf(buf, "%u %u", &en, &bit) != 2 || en > 1) { dev_err(dev, "<1|0> <bit>: enable|disable interrupt"); goto done; } spin_lock_irqsave(udc->lock, flags); if (en) { if (hw_intr_force(bit)) dev_err(dev, "invalid bit number\n"); else isr_statistics.test++; } else { if (hw_intr_clear(bit)) dev_err(dev, "invalid bit number\n"); } spin_unlock_irqrestore(udc->lock, flags); done: return count; } static DEVICE_ATTR(inters, S_IRUSR | S_IWUSR, show_inters, store_inters); /** * show_port_test: reads port test mode * * Check "device.h" for details */ static ssize_t show_port_test(struct device *dev, struct device_attribute *attr, char *buf) { struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev); unsigned long flags; unsigned mode; dbg_trace("[%s] %p\n", __func__, buf); if (attr == NULL || buf == NULL) { dev_err(dev, "[%s] EINVAL\n", __func__); return 0; } spin_lock_irqsave(udc->lock, flags); mode = hw_port_test_get(); spin_unlock_irqrestore(udc->lock, flags); return scnprintf(buf, PAGE_SIZE, "mode = %u\n", mode); } /** * store_port_test: writes port test mode * * Check "device.h" for details */ static ssize_t store_port_test(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev); unsigned long flags; unsigned mode; dbg_trace("[%s] %p, %d\n", __func__, buf, count); if (attr == NULL || buf == NULL) { dev_err(dev, "[%s] EINVAL\n", __func__); goto done; } if (sscanf(buf, "%u", &mode) != 1) { dev_err(dev, "<mode>: set port test mode"); goto done; } spin_lock_irqsave(udc->lock, flags); if (hw_port_test_set(mode)) dev_err(dev, "invalid mode\n"); spin_unlock_irqrestore(udc->lock, flags); done: return count; } static DEVICE_ATTR(port_test, S_IRUSR | S_IWUSR, show_port_test, store_port_test); /** * show_qheads: DMA contents of all queue heads * * Check "device.h" for details */ static ssize_t show_qheads(struct device *dev, struct device_attribute *attr, char *buf) { struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev); unsigned long flags; unsigned i, j, n = 0; dbg_trace("[%s] %p\n", __func__, buf); if (attr == NULL || buf == NULL) { dev_err(dev, "[%s] EINVAL\n", __func__); return 0; } spin_lock_irqsave(udc->lock, flags); for (i = 0; i < hw_ep_max/2; i++) { struct ci13xxx_ep *mEpRx = &udc->ci13xxx_ep[i]; struct ci13xxx_ep *mEpTx = &udc->ci13xxx_ep[i + hw_ep_max/2]; n += scnprintf(buf + n, PAGE_SIZE - n, "EP=%02i: RX=%08X TX=%08X\n", i, (u32)mEpRx->qh.dma, (u32)mEpTx->qh.dma); for (j = 0; j < (sizeof(struct ci13xxx_qh)/sizeof(u32)); j++) { n += scnprintf(buf + n, PAGE_SIZE - n, " %04X: %08X %08X\n", j, *((u32 *)mEpRx->qh.ptr + j), *((u32 *)mEpTx->qh.ptr + j)); } } spin_unlock_irqrestore(udc->lock, flags); return n; } static DEVICE_ATTR(qheads, S_IRUSR, show_qheads, NULL); /** * show_registers: dumps all registers * * Check "device.h" for details */ static ssize_t show_registers(struct device *dev, struct device_attribute *attr, char *buf) { struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev); unsigned long flags; u32 *dump; unsigned i, k, n = 0; dbg_trace("[%s] %p\n", __func__, buf); if (attr == NULL || buf == NULL) { dev_err(dev, "[%s] EINVAL\n", __func__); return 0; } dump = kmalloc(2048, GFP_KERNEL); if (dump == NULL) return -ENOMEM; spin_lock_irqsave(udc->lock, flags); k = hw_register_read(dump, 512); spin_unlock_irqrestore(udc->lock, flags); for (i = 0; i < k; i++) { n += scnprintf(buf + n, PAGE_SIZE - n, "reg[0x%04X] = 0x%08X\n", i * (unsigned)sizeof(u32), dump[i]); } kfree(dump); return n; } /** * store_registers: writes value to register address * * Check "device.h" for details */ static ssize_t store_registers(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev); unsigned long addr, data, flags; dbg_trace("[%s] %p, %d\n", __func__, buf, count); if (attr == NULL || buf == NULL) { dev_err(dev, "[%s] EINVAL\n", __func__); goto done; } if (sscanf(buf, "%li %li", &addr, &data) != 2) { dev_err(dev, "<addr> <data>: write data to register address"); goto done; } spin_lock_irqsave(udc->lock, flags); if (hw_register_write(addr, data)) dev_err(dev, "invalid address range\n"); spin_unlock_irqrestore(udc->lock, flags); done: return count; } static DEVICE_ATTR(registers, S_IRUSR | S_IWUSR, show_registers, store_registers); /** * show_requests: DMA contents of all requests currently queued (all endpts) * * Check "device.h" for details */ static ssize_t show_requests(struct device *dev, struct device_attribute *attr, char *buf) { struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev); unsigned long flags; struct list_head *ptr = NULL; struct ci13xxx_req *req = NULL; unsigned i, j, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32); dbg_trace("[%s] %p\n", __func__, buf); if (attr == NULL || buf == NULL) { dev_err(dev, "[%s] EINVAL\n", __func__); return 0; } spin_lock_irqsave(udc->lock, flags); for (i = 0; i < hw_ep_max; i++) list_for_each(ptr, &udc->ci13xxx_ep[i].qh.queue) { req = list_entry(ptr, struct ci13xxx_req, queue); n += scnprintf(buf + n, PAGE_SIZE - n, "EP=%02i: TD=%08X %s\n", i % hw_ep_max/2, (u32)req->dma, ((i < hw_ep_max/2) ? "RX" : "TX")); for (j = 0; j < qSize; j++) n += scnprintf(buf + n, PAGE_SIZE - n, " %04X: %08X\n", j, *((u32 *)req->ptr + j)); } spin_unlock_irqrestore(udc->lock, flags); return n; } static DEVICE_ATTR(requests, S_IRUSR, show_requests, NULL); /* EP# and Direction */ static ssize_t prime_ept(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev); struct ci13xxx_ep *mEp; unsigned int ep_num, dir; int n; struct ci13xxx_req *mReq = NULL; if (sscanf(buf, "%u %u", &ep_num, &dir) != 2) { dev_err(dev, "<ep_num> <dir>: prime the ep"); goto done; } if (dir) mEp = &udc->ci13xxx_ep[ep_num + hw_ep_max/2]; else mEp = &udc->ci13xxx_ep[ep_num]; n = hw_ep_bit(mEp->num, mEp->dir); mReq = list_entry(mEp->qh.queue.next, struct ci13xxx_req, queue); mEp->qh.ptr->td.next = mReq->dma; mEp->qh.ptr->td.token &= ~TD_STATUS; wmb(); hw_cwrite(CAP_ENDPTPRIME, BIT(n), BIT(n)); while (hw_cread(CAP_ENDPTPRIME, BIT(n))) cpu_relax(); pr_info("%s: prime:%08x stat:%08x ep#%d dir:%s\n", __func__, hw_cread(CAP_ENDPTPRIME, ~0), hw_cread(CAP_ENDPTSTAT, ~0), mEp->num, mEp->dir ? "IN" : "OUT"); done: return count; } static DEVICE_ATTR(prime, S_IWUSR, NULL, prime_ept); /* EP# and Direction */ static ssize_t print_dtds(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev); struct ci13xxx_ep *mEp; unsigned int ep_num, dir; int n; struct list_head *ptr = NULL; struct ci13xxx_req *req = NULL; if (sscanf(buf, "%u %u", &ep_num, &dir) != 2) { dev_err(dev, "<ep_num> <dir>: to print dtds"); goto done; } if (dir) mEp = &udc->ci13xxx_ep[ep_num + hw_ep_max/2]; else mEp = &udc->ci13xxx_ep[ep_num]; n = hw_ep_bit(mEp->num, mEp->dir); pr_info("%s: prime:%08x stat:%08x ep#%d dir:%s" "dTD_update_fail_count: %lu" "mEp->dTD_update_fail_count: %lu\n", __func__, hw_cread(CAP_ENDPTPRIME, ~0), hw_cread(CAP_ENDPTSTAT, ~0), mEp->num, mEp->dir ? "IN" : "OUT", udc->dTD_update_fail_count, mEp->dTD_update_fail_count); pr_info("QH: cap:%08x cur:%08x next:%08x token:%08x\n", mEp->qh.ptr->cap, mEp->qh.ptr->curr, mEp->qh.ptr->td.next, mEp->qh.ptr->td.token); list_for_each(ptr, &mEp->qh.queue) { req = list_entry(ptr, struct ci13xxx_req, queue); pr_info("\treq:%08x next:%08x token:%08x page0:%08x status:%d\n", req->dma, req->ptr->next, req->ptr->token, req->ptr->page[0], req->req.status); } done: return count; } static DEVICE_ATTR(dtds, S_IWUSR, NULL, print_dtds); static int ci13xxx_wakeup(struct usb_gadget *_gadget) { struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget); unsigned long flags; int ret = 0; trace(); spin_lock_irqsave(udc->lock, flags); if (!udc->remote_wakeup) { ret = -EOPNOTSUPP; dbg_trace("remote wakeup feature is not enabled\n"); goto out; } if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) { ret = -EINVAL; dbg_trace("port is not suspended\n"); goto out; } hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR); out: spin_unlock_irqrestore(udc->lock, flags); return ret; } static ssize_t usb_remote_wakeup(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev); ci13xxx_wakeup(&udc->gadget); return count; } static DEVICE_ATTR(wakeup, S_IWUSR, 0, usb_remote_wakeup); /** * dbg_create_files: initializes the attribute interface * @dev: device * * This function returns an error code */ __maybe_unused static int dbg_create_files(struct device *dev) { int retval = 0; if (dev == NULL) return -EINVAL; retval = device_create_file(dev, &dev_attr_device); if (retval) goto done; retval = device_create_file(dev, &dev_attr_driver); if (retval) goto rm_device; retval = device_create_file(dev, &dev_attr_events); if (retval) goto rm_driver; retval = device_create_file(dev, &dev_attr_inters); if (retval) goto rm_events; retval = device_create_file(dev, &dev_attr_port_test); if (retval) goto rm_inters; retval = device_create_file(dev, &dev_attr_qheads); if (retval) goto rm_port_test; retval = device_create_file(dev, &dev_attr_registers); if (retval) goto rm_qheads; retval = device_create_file(dev, &dev_attr_requests); if (retval) goto rm_registers; retval = device_create_file(dev, &dev_attr_wakeup); if (retval) goto rm_remote_wakeup; retval = device_create_file(dev, &dev_attr_prime); if (retval) goto rm_prime; retval = device_create_file(dev, &dev_attr_dtds); if (retval) goto rm_dtds; return 0; rm_dtds: device_remove_file(dev, &dev_attr_dtds); rm_prime: device_remove_file(dev, &dev_attr_prime); rm_remote_wakeup: device_remove_file(dev, &dev_attr_wakeup); rm_registers: device_remove_file(dev, &dev_attr_registers); rm_qheads: device_remove_file(dev, &dev_attr_qheads); rm_port_test: device_remove_file(dev, &dev_attr_port_test); rm_inters: device_remove_file(dev, &dev_attr_inters); rm_events: device_remove_file(dev, &dev_attr_events); rm_driver: device_remove_file(dev, &dev_attr_driver); rm_device: device_remove_file(dev, &dev_attr_device); done: return retval; } /** * dbg_remove_files: destroys the attribute interface * @dev: device * * This function returns an error code */ __maybe_unused static int dbg_remove_files(struct device *dev) { if (dev == NULL) return -EINVAL; device_remove_file(dev, &dev_attr_requests); device_remove_file(dev, &dev_attr_registers); device_remove_file(dev, &dev_attr_qheads); device_remove_file(dev, &dev_attr_port_test); device_remove_file(dev, &dev_attr_inters); device_remove_file(dev, &dev_attr_events); device_remove_file(dev, &dev_attr_driver); device_remove_file(dev, &dev_attr_device); device_remove_file(dev, &dev_attr_wakeup); return 0; } /****************************************************************************** * UTIL block *****************************************************************************/ /** * _usb_addr: calculates endpoint address from direction & number * @ep: endpoint */ static inline u8 _usb_addr(struct ci13xxx_ep *ep) { return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num; } /** * _hardware_queue: configures a request at hardware level * @gadget: gadget * @mEp: endpoint * * This function returns an error code */ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) { unsigned i; int ret = 0; unsigned length = mReq->req.length; trace("%p, %p", mEp, mReq); /* don't queue twice */ if (mReq->req.status == -EALREADY) return -EALREADY; mReq->req.status = -EALREADY; if (length && !mReq->req.dma) { mReq->req.dma = \ dma_map_single(mEp->device, mReq->req.buf, length, mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE); if (mReq->req.dma == 0) return -ENOMEM; mReq->map = 1; } if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) { mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC, &mReq->zdma); if (mReq->zptr == NULL) { if (mReq->map) { dma_unmap_single(mEp->device, mReq->req.dma, length, mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE); mReq->req.dma = 0; mReq->map = 0; } return -ENOMEM; } memset(mReq->zptr, 0, sizeof(*mReq->zptr)); mReq->zptr->next = TD_TERMINATE; mReq->zptr->token = TD_STATUS_ACTIVE; if (!mReq->req.no_interrupt) mReq->zptr->token |= TD_IOC; } /* * TD configuration * TODO - handle requests which spawns into several TDs */ memset(mReq->ptr, 0, sizeof(*mReq->ptr)); mReq->ptr->token = length << ffs_nr(TD_TOTAL_BYTES); mReq->ptr->token &= TD_TOTAL_BYTES; mReq->ptr->token |= TD_STATUS_ACTIVE; if (mReq->zptr) { mReq->ptr->next = mReq->zdma; } else { mReq->ptr->next = TD_TERMINATE; if (!mReq->req.no_interrupt) mReq->ptr->token |= TD_IOC; } /* MSM Specific: updating the request as required for * SPS mode. Enable MSM proprietary DMA engine acording * to the UDC private data in the request. */ if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) { if (mReq->req.udc_priv & MSM_SPS_MODE) { mReq->ptr->token = TD_STATUS_ACTIVE; if (mReq->req.udc_priv & MSM_TBE) mReq->ptr->next = TD_TERMINATE; else mReq->ptr->next = MSM_ETD_TYPE | mReq->dma; if (!mReq->req.no_interrupt) mReq->ptr->token |= MSM_ETD_IOC; } } mReq->ptr->page[0] = mReq->req.dma; for (i = 1; i < 5; i++) mReq->ptr->page[i] = (mReq->req.dma + i * CI13XXX_PAGE_SIZE) & ~TD_RESERVED_MASK; if (!list_empty(&mEp->qh.queue)) { struct ci13xxx_req *mReqPrev; int n = hw_ep_bit(mEp->num, mEp->dir); int tmp_stat; mReqPrev = list_entry(mEp->qh.queue.prev, struct ci13xxx_req, queue); if (mReqPrev->zptr) mReqPrev->zptr->next = mReq->dma & TD_ADDR_MASK; else mReqPrev->ptr->next = mReq->dma & TD_ADDR_MASK; wmb(); if (hw_cread(CAP_ENDPTPRIME, BIT(n))) goto done; do { hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW); tmp_stat = hw_cread(CAP_ENDPTSTAT, BIT(n)); } while (!hw_cread(CAP_USBCMD, USBCMD_ATDTW)); hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, 0); if (tmp_stat) goto done; } /* QH configuration */ if (!list_empty(&mEp->qh.queue)) { struct ci13xxx_req *mReq = \ list_entry(mEp->qh.queue.next, struct ci13xxx_req, queue); if (TD_STATUS_ACTIVE & mReq->ptr->token) { mEp->qh.ptr->td.next = mReq->dma; mEp->qh.ptr->td.token &= ~TD_STATUS; goto prime; } } mEp->qh.ptr->td.next = mReq->dma; /* TERMINATE = 0 */ if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) { if (mReq->req.udc_priv & MSM_SPS_MODE) { mEp->qh.ptr->td.next |= MSM_ETD_TYPE; i = hw_cread(CAP_ENDPTPIPEID + mEp->num * sizeof(u32), ~0); /* Read current value of this EPs pipe id */ i = (mEp->dir == TX) ? ((i >> MSM_TX_PIPE_ID_OFS) & MSM_PIPE_ID_MASK) : (i & MSM_PIPE_ID_MASK); /* If requested pipe id is different from current, then write it */ if (i != (mReq->req.udc_priv & MSM_PIPE_ID_MASK)) { if (mEp->dir == TX) hw_cwrite( CAP_ENDPTPIPEID + mEp->num * sizeof(u32), MSM_PIPE_ID_MASK << MSM_TX_PIPE_ID_OFS, (mReq->req.udc_priv & MSM_PIPE_ID_MASK) << MSM_TX_PIPE_ID_OFS); else hw_cwrite( CAP_ENDPTPIPEID + mEp->num * sizeof(u32), MSM_PIPE_ID_MASK, mReq->req.udc_priv & MSM_PIPE_ID_MASK); } } } mEp->qh.ptr->td.token &= ~TD_STATUS; /* clear status */ mEp->qh.ptr->cap |= QH_ZLT; prime: wmb(); /* synchronize before ep prime */ ret = hw_ep_prime(mEp->num, mEp->dir, mEp->type == USB_ENDPOINT_XFER_CONTROL); done: return ret; } /** * _hardware_dequeue: handles a request at hardware level * @gadget: gadget * @mEp: endpoint * * This function returns an error code */ static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) { trace("%p, %p", mEp, mReq); if (mReq->req.status != -EALREADY) return -EINVAL; /* clean speculative fetches on req->ptr->token */ mb(); if ((TD_STATUS_ACTIVE & mReq->ptr->token) != 0) return -EBUSY; if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) if ((mReq->req.udc_priv & MSM_SPS_MODE) && (mReq->req.udc_priv & MSM_TBE)) return -EBUSY; if (mReq->zptr) { if ((TD_STATUS_ACTIVE & mReq->zptr->token) != 0) return -EBUSY; dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma); mReq->zptr = NULL; } mReq->req.status = 0; if (mReq->map) { dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length, mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE); mReq->req.dma = 0; mReq->map = 0; } mReq->req.status = mReq->ptr->token & TD_STATUS; if ((TD_STATUS_HALTED & mReq->req.status) != 0) mReq->req.status = -1; else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0) mReq->req.status = -1; else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0) mReq->req.status = -1; mReq->req.actual = mReq->ptr->token & TD_TOTAL_BYTES; mReq->req.actual >>= ffs_nr(TD_TOTAL_BYTES); mReq->req.actual = mReq->req.length - mReq->req.actual; mReq->req.actual = mReq->req.status ? 0 : mReq->req.actual; return mReq->req.actual; } /** * _ep_nuke: dequeues all endpoint requests * @mEp: endpoint * * This function returns an error code * Caller must hold lock */ static int _ep_nuke(struct ci13xxx_ep *mEp) __releases(mEp->lock) __acquires(mEp->lock) { struct ci13xxx_ep *mEpTemp = mEp; unsigned val; trace("%p", mEp); if (mEp == NULL) return -EINVAL; hw_ep_flush(mEp->num, mEp->dir); while (!list_empty(&mEp->qh.queue)) { /* pop oldest request */ struct ci13xxx_req *mReq = \ list_entry(mEp->qh.queue.next, struct ci13xxx_req, queue); list_del_init(&mReq->queue); /* MSM Specific: Clear end point proprietary register */ if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) { if (mReq->req.udc_priv & MSM_SPS_MODE) { val = hw_cread(CAP_ENDPTPIPEID + mEp->num * sizeof(u32), ~0); if (val != MSM_EP_PIPE_ID_RESET_VAL) hw_cwrite( CAP_ENDPTPIPEID + mEp->num * sizeof(u32), ~0, MSM_EP_PIPE_ID_RESET_VAL); } } mReq->req.status = -ESHUTDOWN; if (mReq->map) { dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length, mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE); mReq->req.dma = 0; mReq->map = 0; } if (mReq->req.complete != NULL) { spin_unlock(mEp->lock); if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) && mReq->req.length) mEpTemp = &_udc->ep0in; mReq->req.complete(&mEpTemp->ep, &mReq->req); spin_lock(mEp->lock); } } return 0; } /** * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts * @gadget: gadget * * This function returns an error code * Caller must hold lock */ static int _gadget_stop_activity(struct usb_gadget *gadget) { struct usb_ep *ep; struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget); unsigned long flags; trace("%p", gadget); if (gadget == NULL) return -EINVAL; spin_lock_irqsave(udc->lock, flags); udc->gadget.speed = USB_SPEED_UNKNOWN; udc->remote_wakeup = 0; udc->suspended = 0; udc->configured = 0; spin_unlock_irqrestore(udc->lock, flags); /* flush all endpoints */ gadget_for_each_ep(ep, gadget) { usb_ep_fifo_flush(ep); } usb_ep_fifo_flush(&udc->ep0out.ep); usb_ep_fifo_flush(&udc->ep0in.ep); udc->driver->disconnect(gadget); /* make sure to disable all endpoints */ gadget_for_each_ep(ep, gadget) { usb_ep_disable(ep); } if (udc->status != NULL) { usb_ep_free_request(&udc->ep0in.ep, udc->status); udc->status = NULL; } return 0; } /****************************************************************************** * ISR block *****************************************************************************/ /** * isr_reset_handler: USB reset interrupt handler * @udc: UDC device * * This function resets USB engine after a bus reset occurred */ static void isr_reset_handler(struct ci13xxx *udc) __releases(udc->lock) __acquires(udc->lock) { int retval; trace("%p", udc); printk(KERN_INFO "usb:: %s udc: %p\n", __func__, udc); if (udc == NULL) { err("EINVAL"); return; } dbg_event(0xFF, "BUS RST", 0); spin_unlock(udc->lock); /*stop charging upon reset */ if (udc->transceiver) otg_set_power(udc->transceiver, 0); retval = _gadget_stop_activity(&udc->gadget); if (retval) goto done; retval = hw_usb_reset(); if (retval) goto done; udc->status = usb_ep_alloc_request(&udc->ep0in.ep, GFP_ATOMIC); if (udc->status == NULL) retval = -ENOMEM; spin_lock(udc->lock); done: if (retval) err("error: %i", retval); } /** * isr_get_status_complete: get_status request complete function * @ep: endpoint * @req: request handled * * Caller must release lock */ static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req) { trace("%p, %p", ep, req); if (ep == NULL || req == NULL) { err("EINVAL"); return; } kfree(req->buf); usb_ep_free_request(ep, req); } /** * isr_get_status_response: get_status request response * @udc: udc struct * @setup: setup request packet * * This function returns an error code */ static int isr_get_status_response(struct ci13xxx *udc, struct usb_ctrlrequest *setup) __releases(mEp->lock) __acquires(mEp->lock) { struct ci13xxx_ep *mEp = &udc->ep0in; struct usb_request *req = NULL; gfp_t gfp_flags = GFP_ATOMIC; int dir, num, retval; trace("%p, %p", mEp, setup); if (mEp == NULL || setup == NULL) return -EINVAL; spin_unlock(mEp->lock); req = usb_ep_alloc_request(&mEp->ep, gfp_flags); spin_lock(mEp->lock); if (req == NULL) return -ENOMEM; req->complete = isr_get_status_complete; req->length = 2; req->buf = kzalloc(req->length, gfp_flags); if (req->buf == NULL) { retval = -ENOMEM; goto err_free_req; } if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) { /* Assume that device is bus powered for now. */ *((u16 *)req->buf) = _udc->remote_wakeup << 1; retval = 0; } else if ((setup->bRequestType & USB_RECIP_MASK) \ == USB_RECIP_ENDPOINT) { dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ? TX : RX; num = le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK; *((u16 *)req->buf) = hw_ep_get_halt(num, dir); } /* else do nothing; reserved for future use */ spin_unlock(mEp->lock); retval = usb_ep_queue(&mEp->ep, req, gfp_flags); spin_lock(mEp->lock); if (retval) goto err_free_buf; return 0; err_free_buf: kfree(req->buf); err_free_req: spin_unlock(mEp->lock); usb_ep_free_request(&mEp->ep, req); spin_lock(mEp->lock); return retval; } /** * isr_setup_status_complete: setup_status request complete function * @ep: endpoint * @req: request handled * * Caller must release lock. Put the port in test mode if test mode * feature is selected. */ static void isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req) { struct ci13xxx *udc = req->context; unsigned long flags; trace("%p, %p", ep, req); spin_lock_irqsave(udc->lock, flags); if (udc->test_mode) hw_port_test_set(udc->test_mode); spin_unlock_irqrestore(udc->lock, flags); } /** * isr_setup_status_phase: queues the status phase of a setup transation * @udc: udc struct * * This function returns an error code */ static int isr_setup_status_phase(struct ci13xxx *udc) __releases(mEp->lock) __acquires(mEp->lock) { int retval; struct ci13xxx_ep *mEp; trace("%p", udc); mEp = (udc->ep0_dir == TX) ? &udc->ep0out : &udc->ep0in; if (udc->status) { udc->status->context = udc; udc->status->complete = isr_setup_status_complete; } else return -EINVAL; spin_unlock(mEp->lock); retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC); spin_lock(mEp->lock); return retval; } /** * isr_tr_complete_low: transaction complete low level handler * @mEp: endpoint * * This function returns an error code * Caller must hold lock */ static int isr_tr_complete_low(struct ci13xxx_ep *mEp) __releases(mEp->lock) __acquires(mEp->lock) { struct ci13xxx_req *mReq, *mReqTemp; struct ci13xxx_ep *mEpTemp = mEp; int uninitialized_var(retval); int req_dequeue = 1; struct ci13xxx *udc = _udc; trace("%p", mEp); if (list_empty(&mEp->qh.queue)) return 0; list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue, queue) { dequeue: retval = _hardware_dequeue(mEp, mReq); if (retval < 0) { /* * FIXME: don't know exact delay * required for HW to update dTD status * bits. This is a temporary workaround till * HW designers come back on this. */ if (retval == -EBUSY && req_dequeue && mEp->dir == 0) { req_dequeue = 0; udc->dTD_update_fail_count++; mEp->dTD_update_fail_count++; udelay(10); goto dequeue; } break; } req_dequeue = 0; list_del_init(&mReq->queue); dbg_done(_usb_addr(mEp), mReq->ptr->token, retval); if (mReq->req.complete != NULL) { spin_unlock(mEp->lock); if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) && mReq->req.length) mEpTemp = &_udc->ep0in; mReq->req.complete(&mEpTemp->ep, &mReq->req); if (mEp->type == USB_ENDPOINT_XFER_CONTROL) mReq->req.complete = NULL; spin_lock(mEp->lock); } } if (retval == -EBUSY) retval = 0; if (retval < 0) dbg_event(_usb_addr(mEp), "DONE", retval); return retval; } /** * isr_tr_complete_handler: transaction complete interrupt handler * @udc: UDC descriptor * * This function handles traffic events */ static void isr_tr_complete_handler(struct ci13xxx *udc) __releases(udc->lock) __acquires(udc->lock) { unsigned i; u8 tmode = 0; trace("%p", udc); if (udc == NULL) { err("EINVAL"); return; } for (i = 0; i < hw_ep_max; i++) { struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i]; int type, num, dir, err = -EINVAL; struct usb_ctrlrequest req; if (mEp->desc == NULL) continue; /* not configured */ if (hw_test_and_clear_complete(i)) { err = isr_tr_complete_low(mEp); if (mEp->type == USB_ENDPOINT_XFER_CONTROL) { if (err > 0) /* needs status phase */ err = isr_setup_status_phase(udc); if (err < 0) { dbg_event(_usb_addr(mEp), "ERROR", err); spin_unlock(udc->lock); if (usb_ep_set_halt(&mEp->ep)) err("error: ep_set_halt"); spin_lock(udc->lock); } } } if (mEp->type != USB_ENDPOINT_XFER_CONTROL || !hw_test_and_clear_setup_status(i)) continue; if (i != 0) { warn("ctrl traffic received at endpoint"); continue; } /* * Flush data and handshake transactions of previous * setup packet. */ _ep_nuke(&udc->ep0out); _ep_nuke(&udc->ep0in); /* read_setup_packet */ do { hw_test_and_set_setup_guard(); memcpy(&req, &mEp->qh.ptr->setup, sizeof(req)); /* Ensure buffer is read before acknowledging to h/w */ mb(); } while (!hw_test_and_clear_setup_guard()); type = req.bRequestType; udc->ep0_dir = (type & USB_DIR_IN) ? TX : RX; dbg_setup(_usb_addr(mEp), &req); switch (req.bRequest) { case USB_REQ_CLEAR_FEATURE: if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) && le16_to_cpu(req.wValue) == USB_ENDPOINT_HALT) { if (req.wLength != 0) break; num = le16_to_cpu(req.wIndex); dir = num & USB_ENDPOINT_DIR_MASK; num &= USB_ENDPOINT_NUMBER_MASK; if (dir) /* TX */ num += hw_ep_max/2; if (!udc->ci13xxx_ep[num].wedge) { spin_unlock(udc->lock); err = usb_ep_clear_halt( &udc->ci13xxx_ep[num].ep); spin_lock(udc->lock); if (err) break; } err = isr_setup_status_phase(udc); } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) && le16_to_cpu(req.wValue) == USB_DEVICE_REMOTE_WAKEUP) { if (req.wLength != 0) break; udc->remote_wakeup = 0; err = isr_setup_status_phase(udc); } else { goto delegate; } break; case USB_REQ_GET_STATUS: if (type != (USB_DIR_IN|USB_RECIP_DEVICE) && type != (USB_DIR_IN|USB_RECIP_ENDPOINT) && type != (USB_DIR_IN|USB_RECIP_INTERFACE)) goto delegate; if (le16_to_cpu(req.wLength) != 2 || le16_to_cpu(req.wValue) != 0) break; err = isr_get_status_response(udc, &req); break; case USB_REQ_SET_ADDRESS: if (type != (USB_DIR_OUT|USB_RECIP_DEVICE)) goto delegate; if (le16_to_cpu(req.wLength) != 0 || le16_to_cpu(req.wIndex) != 0) break; err = hw_usb_set_address((u8)le16_to_cpu(req.wValue)); if (err) break; err = isr_setup_status_phase(udc); break; case USB_REQ_SET_CONFIGURATION: if (type == (USB_DIR_OUT|USB_TYPE_STANDARD)) udc->configured = !!req.wValue; goto delegate; case USB_REQ_SET_FEATURE: if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) && le16_to_cpu(req.wValue) == USB_ENDPOINT_HALT) { if (req.wLength != 0) break; num = le16_to_cpu(req.wIndex); dir = num & USB_ENDPOINT_DIR_MASK; num &= USB_ENDPOINT_NUMBER_MASK; if (dir) /* TX */ num += hw_ep_max/2; spin_unlock(udc->lock); err = usb_ep_set_halt(&udc->ci13xxx_ep[num].ep); spin_lock(udc->lock); if (!err) isr_setup_status_phase(udc); } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) { if (req.wLength != 0) break; switch (le16_to_cpu(req.wValue)) { case USB_DEVICE_REMOTE_WAKEUP: udc->remote_wakeup = 1; err = isr_setup_status_phase(udc); break; case USB_DEVICE_TEST_MODE: tmode = le16_to_cpu(req.wIndex) >> 8; switch (tmode) { case TEST_J: case TEST_K: case TEST_SE0_NAK: case TEST_PACKET: case TEST_FORCE_EN: udc->test_mode = tmode; err = isr_setup_status_phase( udc); break; default: break; } default: goto delegate; } } else { goto delegate; } break; default: delegate: if (req.wLength == 0) /* no data phase */ udc->ep0_dir = TX; spin_unlock(udc->lock); err = udc->driver->setup(&udc->gadget, &req); spin_lock(udc->lock); break; } if (err < 0) { dbg_event(_usb_addr(mEp), "ERROR", err); spin_unlock(udc->lock); if (usb_ep_set_halt(&mEp->ep)) err("error: ep_set_halt"); spin_lock(udc->lock); } } } /****************************************************************************** * ENDPT block *****************************************************************************/ /** * ep_enable: configure endpoint, making it usable * * Check usb_ep_enable() at "usb_gadget.h" for details */ static int ep_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc) { struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); int retval = 0; unsigned long flags; unsigned mult = 0; trace("%p, %p", ep, desc); if (ep == NULL || desc == NULL) return -EINVAL; spin_lock_irqsave(mEp->lock, flags); /* only internal SW should enable ctrl endpts */ mEp->desc = desc; if (!list_empty(&mEp->qh.queue)) warn("enabling a non-empty endpoint!"); mEp->dir = usb_endpoint_dir_in(desc) ? TX : RX; mEp->num = usb_endpoint_num(desc); mEp->type = usb_endpoint_type(desc); mEp->ep.maxpacket = __constant_le16_to_cpu(desc->wMaxPacketSize); dbg_event(_usb_addr(mEp), "ENABLE", 0); mEp->qh.ptr->cap = 0; if (mEp->type == USB_ENDPOINT_XFER_CONTROL) { mEp->qh.ptr->cap |= QH_IOS; } else if (mEp->type == USB_ENDPOINT_XFER_ISOC) { mEp->qh.ptr->cap &= ~QH_MULT; mult = ((mEp->ep.maxpacket >> QH_MULT_SHIFT) + 1) & 0x03; mEp->qh.ptr->cap |= (mult << ffs_nr(QH_MULT)); } else { mEp->qh.ptr->cap |= QH_ZLT; } mEp->qh.ptr->cap |= (mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT; mEp->qh.ptr->td.next |= TD_TERMINATE; /* needed? */ /* complete all the updates to ept->head before enabling endpoint*/ mb(); /* * Enable endpoints in the HW other than ep0 as ep0 * is always enabled */ if (mEp->num) retval |= hw_ep_enable(mEp->num, mEp->dir, mEp->type); spin_unlock_irqrestore(mEp->lock, flags); return retval; } /** * ep_disable: endpoint is no longer usable * * Check usb_ep_disable() at "usb_gadget.h" for details */ static int ep_disable(struct usb_ep *ep) { struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); int direction, retval = 0; unsigned long flags; trace("%p", ep); if (ep == NULL) return -EINVAL; else if (mEp->desc == NULL) return -EBUSY; spin_lock_irqsave(mEp->lock, flags); /* only internal SW should disable ctrl endpts */ direction = mEp->dir; do { dbg_event(_usb_addr(mEp), "DISABLE", 0); retval |= _ep_nuke(mEp); retval |= hw_ep_disable(mEp->num, mEp->dir); if (mEp->type == USB_ENDPOINT_XFER_CONTROL) mEp->dir = (mEp->dir == TX) ? RX : TX; } while (mEp->dir != direction); mEp->desc = NULL; spin_unlock_irqrestore(mEp->lock, flags); return retval; } /** * ep_alloc_request: allocate a request object to use with this endpoint * * Check usb_ep_alloc_request() at "usb_gadget.h" for details */ static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) { struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); struct ci13xxx_req *mReq = NULL; trace("%p, %i", ep, gfp_flags); if (ep == NULL) { err("EINVAL"); return NULL; } mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags); if (mReq != NULL) { INIT_LIST_HEAD(&mReq->queue); mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags, &mReq->dma); if (mReq->ptr == NULL) { kfree(mReq); mReq = NULL; } } dbg_event(_usb_addr(mEp), "ALLOC", mReq == NULL); return (mReq == NULL) ? NULL : &mReq->req; } /** * ep_free_request: frees a request object * * Check usb_ep_free_request() at "usb_gadget.h" for details */ static void ep_free_request(struct usb_ep *ep, struct usb_request *req) { struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req); unsigned long flags; trace("%p, %p", ep, req); if (ep == NULL || req == NULL) { err("EINVAL"); return; } else if (!list_empty(&mReq->queue)) { err("EBUSY"); return; } spin_lock_irqsave(mEp->lock, flags); if (mReq->ptr) dma_pool_free(mEp->td_pool, mReq->ptr, mReq->dma); kfree(mReq); dbg_event(_usb_addr(mEp), "FREE", 0); spin_unlock_irqrestore(mEp->lock, flags); } /** * ep_queue: queues (submits) an I/O request to an endpoint * * Check usb_ep_queue()* at usb_gadget.h" for details */ static int ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t __maybe_unused gfp_flags) { struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req); int retval = 0; unsigned long flags; struct ci13xxx *udc = _udc; trace("%p, %p, %X", ep, req, gfp_flags); if (ep == NULL || req == NULL || mEp->desc == NULL) return -EINVAL; spin_lock_irqsave(mEp->lock, flags); if (!udc->configured && mEp->type != USB_ENDPOINT_XFER_CONTROL) { spin_unlock_irqrestore(mEp->lock, flags); trace("usb is not configured" "ept #%d, ept name#%s\n", mEp->num, mEp->ep.name); return -ESHUTDOWN; } if (mEp->type == USB_ENDPOINT_XFER_CONTROL) { if (req->length) mEp = (_udc->ep0_dir == RX) ? &_udc->ep0out : &_udc->ep0in; if (!list_empty(&mEp->qh.queue)) { _ep_nuke(mEp); retval = -EOVERFLOW; warn("endpoint ctrl %X nuked", _usb_addr(mEp)); } } /* first nuke then test link, e.g. previous status has not sent */ if (!list_empty(&mReq->queue)) { retval = -EBUSY; err("request already in queue"); goto done; } if (req->length > (4 * CI13XXX_PAGE_SIZE)) { req->length = (4 * CI13XXX_PAGE_SIZE); retval = -EMSGSIZE; warn("request length truncated"); } dbg_queue(_usb_addr(mEp), req, retval); /* push request */ mReq->req.status = -EINPROGRESS; mReq->req.actual = 0; retval = _hardware_enqueue(mEp, mReq); if (retval == -EALREADY) { dbg_event(_usb_addr(mEp), "QUEUE", retval); retval = 0; } if (!retval) list_add_tail(&mReq->queue, &mEp->qh.queue); done: spin_unlock_irqrestore(mEp->lock, flags); return retval; } /** * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint * * Check usb_ep_dequeue() at "usb_gadget.h" for details */ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req) { struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); struct ci13xxx_ep *mEpTemp = mEp; struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req); unsigned long flags; trace("%p, %p", ep, req); if (ep == NULL || req == NULL || mReq->req.status != -EALREADY || mEp->desc == NULL || list_empty(&mReq->queue) || list_empty(&mEp->qh.queue)) return -EINVAL; spin_lock_irqsave(mEp->lock, flags); dbg_event(_usb_addr(mEp), "DEQUEUE", 0); if ((mEp->type == USB_ENDPOINT_XFER_CONTROL)) { hw_ep_flush(_udc->ep0out.num, RX); hw_ep_flush(_udc->ep0in.num, TX); } else { hw_ep_flush(mEp->num, mEp->dir); } /* pop request */ list_del_init(&mReq->queue); if (mReq->map) { dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length, mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE); mReq->req.dma = 0; mReq->map = 0; } req->status = -ECONNRESET; if (mReq->req.complete != NULL) { spin_unlock(mEp->lock); if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) && mReq->req.length) mEpTemp = &_udc->ep0in; mReq->req.complete(&mEpTemp->ep, &mReq->req); if (mEp->type == USB_ENDPOINT_XFER_CONTROL) mReq->req.complete = NULL; spin_lock(mEp->lock); } spin_unlock_irqrestore(mEp->lock, flags); return 0; } /** * ep_set_halt: sets the endpoint halt feature * * Check usb_ep_set_halt() at "usb_gadget.h" for details */ static int ep_set_halt(struct usb_ep *ep, int value) { struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); int direction, retval = 0; unsigned long flags; trace("%p, %i", ep, value); if (ep == NULL || mEp->desc == NULL) return -EINVAL; spin_lock_irqsave(mEp->lock, flags); #ifndef STALL_IN /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */ if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX && !list_empty(&mEp->qh.queue)) { spin_unlock_irqrestore(mEp->lock, flags); return -EAGAIN; } #endif direction = mEp->dir; do { dbg_event(_usb_addr(mEp), "HALT", value); retval |= hw_ep_set_halt(mEp->num, mEp->dir, value); if (!value) mEp->wedge = 0; if (mEp->type == USB_ENDPOINT_XFER_CONTROL) mEp->dir = (mEp->dir == TX) ? RX : TX; } while (mEp->dir != direction); spin_unlock_irqrestore(mEp->lock, flags); return retval; } /** * ep_set_wedge: sets the halt feature and ignores clear requests * * Check usb_ep_set_wedge() at "usb_gadget.h" for details */ static int ep_set_wedge(struct usb_ep *ep) { struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); unsigned long flags; trace("%p", ep); if (ep == NULL || mEp->desc == NULL) return -EINVAL; spin_lock_irqsave(mEp->lock, flags); dbg_event(_usb_addr(mEp), "WEDGE", 0); mEp->wedge = 1; spin_unlock_irqrestore(mEp->lock, flags); return usb_ep_set_halt(ep); } /** * ep_fifo_flush: flushes contents of a fifo * * Check usb_ep_fifo_flush() at "usb_gadget.h" for details */ static void ep_fifo_flush(struct usb_ep *ep) { struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); unsigned long flags; trace("%p", ep); if (ep == NULL) { err("%02X: -EINVAL", _usb_addr(mEp)); return; } spin_lock_irqsave(mEp->lock, flags); dbg_event(_usb_addr(mEp), "FFLUSH", 0); hw_ep_flush(mEp->num, mEp->dir); spin_unlock_irqrestore(mEp->lock, flags); } /** * Endpoint-specific part of the API to the USB controller hardware * Check "usb_gadget.h" for details */ static const struct usb_ep_ops usb_ep_ops = { .enable = ep_enable, .disable = ep_disable, .alloc_request = ep_alloc_request, .free_request = ep_free_request, .queue = ep_queue, .dequeue = ep_dequeue, .set_halt = ep_set_halt, .set_wedge = ep_set_wedge, .fifo_flush = ep_fifo_flush, }; /****************************************************************************** * GADGET block *****************************************************************************/ static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active) { struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget); unsigned long flags; int gadget_ready = 0; #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE struct usb_composite_dev *cdev; cdev = get_gadget_data(_gadget); #endif printk(KERN_INFO "usb:: %s,%d\n", __func__, __LINE__); if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS)) return -EOPNOTSUPP; spin_lock_irqsave(udc->lock, flags); udc->vbus_active = is_active; if (udc->driver) gadget_ready = 1; printk(KERN_INFO "usb:: %s gadget_ready:%d, is_active:%d\n", __func__, gadget_ready, is_active); spin_unlock_irqrestore(udc->lock, flags); if (gadget_ready) { if (is_active) { #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE if (cdev != NULL) cdev->cable_connect = true; #endif pm_runtime_get_sync(&_gadget->dev); hw_device_reset(udc); printk(KERN_INFO "usb:: %s softconnect: %d\n", __func__, udc->softconnect); if (udc->softconnect) hw_device_state(udc->ep0out.qh.dma); } else { #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE if (cdev != NULL) cdev->cable_connect = false; #endif hw_device_state(0); _gadget_stop_activity(&udc->gadget); pm_runtime_put_sync(&_gadget->dev); } } return 0; } static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned mA) { struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget); if (udc->transceiver) return otg_set_power(udc->transceiver, mA); return -ENOTSUPP; } static int ci13xxx_pullup(struct usb_gadget *_gadget, int is_active) { struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget); unsigned long flags; printk(KERN_INFO "usb:: %s is_active: %d\n", __func__, is_active); spin_lock_irqsave(udc->lock, flags); udc->softconnect = is_active; if (((udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) && !udc->vbus_active) || !udc->driver) { printk(KERN_INFO "usb:: %s udc->udc_driver->flags:%lx\n", __func__, udc->udc_driver->flags); printk(KERN_INFO "usb:: %s !udc->vbus_active:%x, !udc->driver:%x\n", __func__, !udc->vbus_active, !udc->driver); spin_unlock_irqrestore(udc->lock, flags); return 0; } spin_unlock_irqrestore(udc->lock, flags); if (is_active) hw_device_state(udc->ep0out.qh.dma); else hw_device_state(0); return 0; } /** * Device operations part of the API to the USB controller hardware, * which don't involve endpoints (or i/o) * Check "usb_gadget.h" for details */ static const struct usb_gadget_ops usb_gadget_ops = { .vbus_session = ci13xxx_vbus_session, .wakeup = ci13xxx_wakeup, .vbus_draw = ci13xxx_vbus_draw, .pullup = ci13xxx_pullup, }; /** * usb_gadget_probe_driver: register a gadget driver * @driver: the driver being registered * @bind: the driver's bind callback * * Check usb_gadget_probe_driver() at <linux/usb/gadget.h> for details. * Interrupts are enabled here. */ int usb_gadget_probe_driver(struct usb_gadget_driver *driver, int (*bind)(struct usb_gadget *)) { struct ci13xxx *udc = _udc; unsigned long flags; int i, j; int retval = -ENOMEM; bool put = false; trace("%p", driver); if (driver == NULL || bind == NULL || driver->setup == NULL || driver->disconnect == NULL || driver->suspend == NULL || driver->resume == NULL) return -EINVAL; else if (udc == NULL) return -ENODEV; else if (udc->driver != NULL) return -EBUSY; /* alloc resources */ udc->qh_pool = dma_pool_create("ci13xxx_qh", &udc->gadget.dev, sizeof(struct ci13xxx_qh), 64, CI13XXX_PAGE_SIZE); if (udc->qh_pool == NULL) return -ENOMEM; udc->td_pool = dma_pool_create("ci13xxx_td", &udc->gadget.dev, sizeof(struct ci13xxx_td), 64, CI13XXX_PAGE_SIZE); if (udc->td_pool == NULL) { dma_pool_destroy(udc->qh_pool); udc->qh_pool = NULL; return -ENOMEM; } spin_lock_irqsave(udc->lock, flags); info("hw_ep_max = %d", hw_ep_max); udc->gadget.dev.driver = NULL; retval = 0; for (i = 0; i < hw_ep_max/2; i++) { for (j = RX; j <= TX; j++) { int k = i + j * hw_ep_max/2; struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[k]; scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i, (j == TX) ? "in" : "out"); mEp->lock = udc->lock; mEp->device = &udc->gadget.dev; mEp->td_pool = udc->td_pool; mEp->ep.name = mEp->name; mEp->ep.ops = &usb_ep_ops; if (i == 0) mEp->ep.maxpacket = CTRL_PAYLOAD_MAX; else /* For ISO EP */ mEp->ep.maxpacket = 512; INIT_LIST_HEAD(&mEp->qh.queue); spin_unlock_irqrestore(udc->lock, flags); mEp->qh.ptr = dma_pool_alloc(udc->qh_pool, GFP_KERNEL, &mEp->qh.dma); spin_lock_irqsave(udc->lock, flags); if (mEp->qh.ptr == NULL) retval = -ENOMEM; else memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr)); /* skip ep0 out and in endpoints */ if (i == 0) continue; list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list); } } if (retval) goto done; spin_unlock_irqrestore(udc->lock, flags); retval = usb_ep_enable(&udc->ep0out.ep, &ctrl_endpt_out_desc); if (retval) return retval; retval = usb_ep_enable(&udc->ep0in.ep, &ctrl_endpt_in_desc); if (retval) return retval; spin_lock_irqsave(udc->lock, flags); udc->gadget.ep0 = &udc->ep0in.ep; /* bind gadget */ driver->driver.bus = NULL; udc->gadget.dev.driver = &driver->driver; udc->softconnect = 1; spin_unlock_irqrestore(udc->lock, flags); pm_runtime_get_sync(&udc->gadget.dev); retval = bind(&udc->gadget); /* MAY SLEEP */ spin_lock_irqsave(udc->lock, flags); if (retval) { udc->gadget.dev.driver = NULL; goto done; } udc->driver = driver; if (udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) { if (udc->vbus_active) { if (udc->udc_driver->flags & CI13XXX_REGS_SHARED) hw_device_reset(udc); } else { put = true; goto done; } } if (!udc->softconnect) { put = true; goto done; } retval = hw_device_state(udc->ep0out.qh.dma); done: spin_unlock_irqrestore(udc->lock, flags); if (retval || put) pm_runtime_put_sync(&udc->gadget.dev); return retval; } EXPORT_SYMBOL(usb_gadget_probe_driver); /** * usb_gadget_unregister_driver: unregister a gadget driver * * Check usb_gadget_unregister_driver() at "usb_gadget.h" for details */ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) { struct ci13xxx *udc = _udc; unsigned long i, flags; trace("%p", driver); if (driver == NULL || driver->unbind == NULL || driver->setup == NULL || driver->disconnect == NULL || driver->suspend == NULL || driver->resume == NULL || driver != udc->driver) return -EINVAL; spin_lock_irqsave(udc->lock, flags); if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) || udc->vbus_active) { hw_device_state(0); _gadget_stop_activity(&udc->gadget); pm_runtime_put(&udc->gadget.dev); } /* unbind gadget */ spin_unlock_irqrestore(udc->lock, flags); driver->unbind(&udc->gadget); /* MAY SLEEP */ spin_lock_irqsave(udc->lock, flags); udc->gadget.dev.driver = NULL; /* free resources */ for (i = 0; i < hw_ep_max; i++) { struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i]; if (!list_empty(&mEp->ep.ep_list)) list_del_init(&mEp->ep.ep_list); if (mEp->qh.ptr != NULL) dma_pool_free(udc->qh_pool, mEp->qh.ptr, mEp->qh.dma); } udc->gadget.ep0 = NULL; udc->driver = NULL; spin_unlock_irqrestore(udc->lock, flags); if (udc->td_pool != NULL) { dma_pool_destroy(udc->td_pool); udc->td_pool = NULL; } if (udc->qh_pool != NULL) { dma_pool_destroy(udc->qh_pool); udc->qh_pool = NULL; } return 0; } EXPORT_SYMBOL(usb_gadget_unregister_driver); /****************************************************************************** * BUS block *****************************************************************************/ /** * udc_irq: global interrupt handler * * This function returns IRQ_HANDLED if the IRQ has been handled * It locks access to registers */ static irqreturn_t udc_irq(void) { struct ci13xxx *udc = _udc; irqreturn_t retval; u32 intr; trace(); if (udc == NULL) { err("ENODEV"); return IRQ_HANDLED; } spin_lock(udc->lock); if (udc->udc_driver->flags & CI13XXX_REGS_SHARED) { if (hw_cread(CAP_USBMODE, USBMODE_CM) != USBMODE_CM_DEVICE) { spin_unlock(udc->lock); return IRQ_NONE; } } intr = hw_test_and_clear_intr_active(); if (intr) { isr_statistics.hndl.buf[isr_statistics.hndl.idx++] = intr; isr_statistics.hndl.idx &= ISR_MASK; isr_statistics.hndl.cnt++; /* order defines priority - do NOT change it */ if (USBi_URI & intr) { isr_statistics.uri++; isr_reset_handler(udc); } if (USBi_PCI & intr) { isr_statistics.pci++; udc->gadget.speed = hw_port_is_high_speed() ? USB_SPEED_HIGH : USB_SPEED_FULL; if (udc->suspended) { spin_unlock(udc->lock); udc->driver->resume(&udc->gadget); spin_lock(udc->lock); udc->suspended = 0; } } if (USBi_UEI & intr) isr_statistics.uei++; if (USBi_UI & intr) { isr_statistics.ui++; isr_tr_complete_handler(udc); } if (USBi_SLI & intr) { if (udc->gadget.speed != USB_SPEED_UNKNOWN) { udc->suspended = 1; spin_unlock(udc->lock); udc->driver->suspend(&udc->gadget); spin_lock(udc->lock); } isr_statistics.sli++; } retval = IRQ_HANDLED; } else { isr_statistics.none++; retval = IRQ_NONE; } spin_unlock(udc->lock); return retval; } /** * udc_release: driver release function * @dev: device * * Currently does nothing */ static void udc_release(struct device *dev) { trace("%p", dev); if (dev == NULL) err("EINVAL"); } /** * udc_probe: parent probe must call this to initialize UDC * @dev: parent device * @regs: registers base address * @name: driver name * * This function returns an error code * No interrupts active, the IRQ has not been requested yet * Kernel assumes 32-bit DMA operations by default, no need to dma_set_mask */ static int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev, void __iomem *regs) { struct ci13xxx *udc; int retval = 0; trace("%p, %p, %p", dev, regs, name); if (dev == NULL || regs == NULL || driver == NULL || driver->name == NULL) return -EINVAL; udc = kzalloc(sizeof(struct ci13xxx), GFP_KERNEL); if (udc == NULL) return -ENOMEM; udc->lock = &udc_lock; udc->regs = regs; udc->udc_driver = driver; udc->gadget.ops = &usb_gadget_ops; udc->gadget.speed = USB_SPEED_UNKNOWN; udc->gadget.is_dualspeed = 1; udc->gadget.is_otg = 0; udc->gadget.name = driver->name; INIT_LIST_HEAD(&udc->gadget.ep_list); udc->gadget.ep0 = NULL; dev_set_name(&udc->gadget.dev, "gadget"); udc->gadget.dev.dma_mask = dev->dma_mask; udc->gadget.dev.coherent_dma_mask = dev->coherent_dma_mask; udc->gadget.dev.parent = dev; udc->gadget.dev.release = udc_release; retval = hw_device_init(regs); if (retval < 0) goto free_udc; udc->transceiver = otg_get_transceiver(); if (udc->udc_driver->flags & CI13XXX_REQUIRE_TRANSCEIVER) { if (udc->transceiver == NULL) { retval = -ENODEV; goto free_udc; } } if (!(udc->udc_driver->flags & CI13XXX_REGS_SHARED)) { retval = hw_device_reset(udc); if (retval) goto put_transceiver; } retval = device_register(&udc->gadget.dev); if (retval) { put_device(&udc->gadget.dev); goto put_transceiver; } #ifdef CONFIG_USB_GADGET_DEBUG_FILES retval = dbg_create_files(&udc->gadget.dev); #endif if (retval) goto unreg_device; if (udc->transceiver) { retval = otg_set_peripheral(udc->transceiver, &udc->gadget); if (retval) goto remove_dbg; } pm_runtime_no_callbacks(&udc->gadget.dev); pm_runtime_enable(&udc->gadget.dev); _udc = udc; return retval; err("error = %i", retval); remove_dbg: #ifdef CONFIG_USB_GADGET_DEBUG_FILES dbg_remove_files(&udc->gadget.dev); #endif unreg_device: device_unregister(&udc->gadget.dev); put_transceiver: if (udc->transceiver) otg_put_transceiver(udc->transceiver); free_udc: kfree(udc); _udc = NULL; return retval; } /** * udc_remove: parent remove must call this to remove UDC * * No interrupts active, the IRQ has been released */ static void udc_remove(void) { struct ci13xxx *udc = _udc; if (udc == NULL) { err("EINVAL"); return; } if (udc->transceiver) { otg_set_peripheral(udc->transceiver, &udc->gadget); otg_put_transceiver(udc->transceiver); } #ifdef CONFIG_USB_GADGET_DEBUG_FILES dbg_remove_files(&udc->gadget.dev); #endif device_unregister(&udc->gadget.dev); kfree(udc); _udc = NULL; }
gpl-2.0
dr4go/lg.p990.kernel
drivers/media/dvb/frontends/si21xx.c
535
21733
/* DVB compliant Linux driver for the DVB-S si2109/2110 demodulator * * Copyright (C) 2008 Igor M. Liplianin (liplianin@me.by) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <asm/div64.h> #include "dvb_frontend.h" #include "si21xx.h" #define REVISION_REG 0x00 #define SYSTEM_MODE_REG 0x01 #define TS_CTRL_REG_1 0x02 #define TS_CTRL_REG_2 0x03 #define PIN_CTRL_REG_1 0x04 #define PIN_CTRL_REG_2 0x05 #define LOCK_STATUS_REG_1 0x0f #define LOCK_STATUS_REG_2 0x10 #define ACQ_STATUS_REG 0x11 #define ACQ_CTRL_REG_1 0x13 #define ACQ_CTRL_REG_2 0x14 #define PLL_DIVISOR_REG 0x15 #define COARSE_TUNE_REG 0x16 #define FINE_TUNE_REG_L 0x17 #define FINE_TUNE_REG_H 0x18 #define ANALOG_AGC_POWER_LEVEL_REG 0x28 #define CFO_ESTIMATOR_CTRL_REG_1 0x29 #define CFO_ESTIMATOR_CTRL_REG_2 0x2a #define CFO_ESTIMATOR_CTRL_REG_3 0x2b #define SYM_RATE_ESTIMATE_REG_L 0x31 #define SYM_RATE_ESTIMATE_REG_M 0x32 #define SYM_RATE_ESTIMATE_REG_H 0x33 #define CFO_ESTIMATOR_OFFSET_REG_L 0x36 #define CFO_ESTIMATOR_OFFSET_REG_H 0x37 #define CFO_ERROR_REG_L 0x38 #define CFO_ERROR_REG_H 0x39 #define SYM_RATE_ESTIMATOR_CTRL_REG 0x3a #define SYM_RATE_REG_L 0x3f #define SYM_RATE_REG_M 0x40 #define SYM_RATE_REG_H 0x41 #define SYM_RATE_ESTIMATOR_MAXIMUM_REG 0x42 #define SYM_RATE_ESTIMATOR_MINIMUM_REG 0x43 #define C_N_ESTIMATOR_CTRL_REG 0x7c #define C_N_ESTIMATOR_THRSHLD_REG 0x7d #define C_N_ESTIMATOR_LEVEL_REG_L 0x7e #define C_N_ESTIMATOR_LEVEL_REG_H 0x7f #define BLIND_SCAN_CTRL_REG 0x80 #define LSA_CTRL_REG_1 0x8D #define SPCTRM_TILT_CORR_THRSHLD_REG 0x8f #define ONE_DB_BNDWDTH_THRSHLD_REG 0x90 #define TWO_DB_BNDWDTH_THRSHLD_REG 0x91 #define THREE_DB_BNDWDTH_THRSHLD_REG 0x92 #define INBAND_POWER_THRSHLD_REG 0x93 #define REF_NOISE_LVL_MRGN_THRSHLD_REG 0x94 #define VIT_SRCH_CTRL_REG_1 0xa0 #define VIT_SRCH_CTRL_REG_2 0xa1 #define VIT_SRCH_CTRL_REG_3 0xa2 #define VIT_SRCH_STATUS_REG 0xa3 #define VITERBI_BER_COUNT_REG_L 0xab #define REED_SOLOMON_CTRL_REG 0xb0 #define REED_SOLOMON_ERROR_COUNT_REG_L 0xb1 #define PRBS_CTRL_REG 0xb5 #define LNB_CTRL_REG_1 0xc0 #define LNB_CTRL_REG_2 0xc1 #define LNB_CTRL_REG_3 0xc2 #define LNB_CTRL_REG_4 0xc3 #define LNB_CTRL_STATUS_REG 0xc4 #define LNB_FIFO_REGS_0 0xc5 #define LNB_FIFO_REGS_1 0xc6 #define LNB_FIFO_REGS_2 0xc7 #define LNB_FIFO_REGS_3 0xc8 #define LNB_FIFO_REGS_4 0xc9 #define LNB_FIFO_REGS_5 0xca #define LNB_SUPPLY_CTRL_REG_1 0xcb #define LNB_SUPPLY_CTRL_REG_2 0xcc #define LNB_SUPPLY_CTRL_REG_3 0xcd #define LNB_SUPPLY_CTRL_REG_4 0xce #define LNB_SUPPLY_STATUS_REG 0xcf #define FALSE 0 #define TRUE 1 #define FAIL -1 #define PASS 0 #define ALLOWABLE_FS_COUNT 10 #define STATUS_BER 0 #define STATUS_UCBLOCKS 1 static int debug; #define dprintk(args...) \ do { \ if (debug) \ printk(KERN_DEBUG "si21xx: " args); \ } while (0) enum { ACTIVE_HIGH, ACTIVE_LOW }; enum { BYTE_WIDE, BIT_WIDE }; enum { CLK_GAPPED_MODE, CLK_CONTINUOUS_MODE }; enum { RISING_EDGE, FALLING_EDGE }; enum { MSB_FIRST, LSB_FIRST }; enum { SERIAL, PARALLEL }; struct si21xx_state { struct i2c_adapter *i2c; const struct si21xx_config *config; struct dvb_frontend frontend; u8 initialised:1; int errmode; int fs; /*Sampling rate of the ADC in MHz*/ }; /* register default initialization */ static u8 serit_sp1511lhb_inittab[] = { 0x01, 0x28, /* set i2c_inc_disable */ 0x20, 0x03, 0x27, 0x20, 0xe0, 0x45, 0xe1, 0x08, 0xfe, 0x01, 0x01, 0x28, 0x89, 0x09, 0x04, 0x80, 0x05, 0x01, 0x06, 0x00, 0x20, 0x03, 0x24, 0x88, 0x29, 0x09, 0x2a, 0x0f, 0x2c, 0x10, 0x2d, 0x19, 0x2e, 0x08, 0x2f, 0x10, 0x30, 0x19, 0x34, 0x20, 0x35, 0x03, 0x45, 0x02, 0x46, 0x45, 0x47, 0xd0, 0x48, 0x00, 0x49, 0x40, 0x4a, 0x03, 0x4c, 0xfd, 0x4f, 0x2e, 0x50, 0x2e, 0x51, 0x10, 0x52, 0x10, 0x56, 0x92, 0x59, 0x00, 0x5a, 0x2d, 0x5b, 0x33, 0x5c, 0x1f, 0x5f, 0x76, 0x62, 0xc0, 0x63, 0xc0, 0x64, 0xf3, 0x65, 0xf3, 0x79, 0x40, 0x6a, 0x40, 0x6b, 0x0a, 0x6c, 0x80, 0x6d, 0x27, 0x71, 0x06, 0x75, 0x60, 0x78, 0x00, 0x79, 0xb5, 0x7c, 0x05, 0x7d, 0x1a, 0x87, 0x55, 0x88, 0x72, 0x8f, 0x08, 0x90, 0xe0, 0x94, 0x40, 0xa0, 0x3f, 0xa1, 0xc0, 0xa4, 0xcc, 0xa5, 0x66, 0xa6, 0x66, 0xa7, 0x7b, 0xa8, 0x7b, 0xa9, 0x7b, 0xaa, 0x9a, 0xed, 0x04, 0xad, 0x00, 0xae, 0x03, 0xcc, 0xab, 0x01, 0x08, 0xff, 0xff }; /* low level read/writes */ static int si21_writeregs(struct si21xx_state *state, u8 reg1, u8 *data, int len) { int ret; u8 buf[60];/* = { reg1, data };*/ struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = len + 1 }; msg.buf[0] = reg1; memcpy(msg.buf + 1, data, len); ret = i2c_transfer(state->i2c, &msg, 1); if (ret != 1) dprintk("%s: writereg error (reg1 == 0x%02x, data == 0x%02x, " "ret == %i)\n", __func__, reg1, data[0], ret); return (ret != 1) ? -EREMOTEIO : 0; } static int si21_writereg(struct si21xx_state *state, u8 reg, u8 data) { int ret; u8 buf[] = { reg, data }; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 2 }; ret = i2c_transfer(state->i2c, &msg, 1); if (ret != 1) dprintk("%s: writereg error (reg == 0x%02x, data == 0x%02x, " "ret == %i)\n", __func__, reg, data, ret); return (ret != 1) ? -EREMOTEIO : 0; } static int si21_write(struct dvb_frontend *fe, u8 *buf, int len) { struct si21xx_state *state = fe->demodulator_priv; if (len != 2) return -EINVAL; return si21_writereg(state, buf[0], buf[1]); } static u8 si21_readreg(struct si21xx_state *state, u8 reg) { int ret; u8 b0[] = { reg }; u8 b1[] = { 0 }; struct i2c_msg msg[] = { { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 1 }, { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } }; ret = i2c_transfer(state->i2c, msg, 2); if (ret != 2) dprintk("%s: readreg error (reg == 0x%02x, ret == %i)\n", __func__, reg, ret); return b1[0]; } static int si21_readregs(struct si21xx_state *state, u8 reg1, u8 *b, u8 len) { int ret; struct i2c_msg msg[] = { { .addr = state->config->demod_address, .flags = 0, .buf = &reg1, .len = 1 }, { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b, .len = len } }; ret = i2c_transfer(state->i2c, msg, 2); if (ret != 2) dprintk("%s: readreg error (ret == %i)\n", __func__, ret); return ret == 2 ? 0 : -1; } static int si21xx_wait_diseqc_idle(struct si21xx_state *state, int timeout) { unsigned long start = jiffies; dprintk("%s\n", __func__); while ((si21_readreg(state, LNB_CTRL_REG_1) & 0x8) == 8) { if (jiffies - start > timeout) { dprintk("%s: timeout!!\n", __func__); return -ETIMEDOUT; } msleep(10); }; return 0; } static int si21xx_set_symbolrate(struct dvb_frontend *fe, u32 srate) { struct si21xx_state *state = fe->demodulator_priv; u32 sym_rate, data_rate; int i; u8 sym_rate_bytes[3]; dprintk("%s : srate = %i\n", __func__ , srate); if ((srate < 1000000) || (srate > 45000000)) return -EINVAL; data_rate = srate; sym_rate = 0; for (i = 0; i < 4; ++i) { sym_rate /= 100; sym_rate = sym_rate + ((data_rate % 100) * 0x800000) / state->fs; data_rate /= 100; } for (i = 0; i < 3; ++i) sym_rate_bytes[i] = (u8)((sym_rate >> (i * 8)) & 0xff); si21_writeregs(state, SYM_RATE_REG_L, sym_rate_bytes, 0x03); return 0; } static int si21xx_send_diseqc_msg(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd *m) { struct si21xx_state *state = fe->demodulator_priv; u8 lnb_status; u8 LNB_CTRL_1; int status; dprintk("%s\n", __func__); status = PASS; LNB_CTRL_1 = 0; status |= si21_readregs(state, LNB_CTRL_STATUS_REG, &lnb_status, 0x01); status |= si21_readregs(state, LNB_CTRL_REG_1, &lnb_status, 0x01); /*fill the FIFO*/ status |= si21_writeregs(state, LNB_FIFO_REGS_0, m->msg, m->msg_len); LNB_CTRL_1 = (lnb_status & 0x70); LNB_CTRL_1 |= m->msg_len; LNB_CTRL_1 |= 0x80; /* begin LNB signaling */ status |= si21_writeregs(state, LNB_CTRL_REG_1, &LNB_CTRL_1, 0x01); return status; } static int si21xx_send_diseqc_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t burst) { struct si21xx_state *state = fe->demodulator_priv; u8 val; dprintk("%s\n", __func__); if (si21xx_wait_diseqc_idle(state, 100) < 0) return -ETIMEDOUT; val = (0x80 | si21_readreg(state, 0xc1)); if (si21_writereg(state, LNB_CTRL_REG_1, burst == SEC_MINI_A ? (val & ~0x10) : (val | 0x10))) return -EREMOTEIO; if (si21xx_wait_diseqc_idle(state, 100) < 0) return -ETIMEDOUT; if (si21_writereg(state, LNB_CTRL_REG_1, val)) return -EREMOTEIO; return 0; } /* 30.06.2008 */ static int si21xx_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone) { struct si21xx_state *state = fe->demodulator_priv; u8 val; dprintk("%s\n", __func__); val = (0x80 | si21_readreg(state, LNB_CTRL_REG_1)); switch (tone) { case SEC_TONE_ON: return si21_writereg(state, LNB_CTRL_REG_1, val | 0x20); case SEC_TONE_OFF: return si21_writereg(state, LNB_CTRL_REG_1, (val & ~0x20)); default: return -EINVAL; } } static int si21xx_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t volt) { struct si21xx_state *state = fe->demodulator_priv; u8 val; dprintk("%s: %s\n", __func__, volt == SEC_VOLTAGE_13 ? "SEC_VOLTAGE_13" : volt == SEC_VOLTAGE_18 ? "SEC_VOLTAGE_18" : "??"); val = (0x80 | si21_readreg(state, LNB_CTRL_REG_1)); switch (volt) { case SEC_VOLTAGE_18: return si21_writereg(state, LNB_CTRL_REG_1, val | 0x40); break; case SEC_VOLTAGE_13: return si21_writereg(state, LNB_CTRL_REG_1, (val & ~0x40)); break; default: return -EINVAL; }; } static int si21xx_init(struct dvb_frontend *fe) { struct si21xx_state *state = fe->demodulator_priv; int i; int status = 0; u8 reg1; u8 val; u8 reg2[2]; dprintk("%s\n", __func__); for (i = 0; ; i += 2) { reg1 = serit_sp1511lhb_inittab[i]; val = serit_sp1511lhb_inittab[i+1]; if (reg1 == 0xff && val == 0xff) break; si21_writeregs(state, reg1, &val, 1); } /*DVB QPSK SYSTEM MODE REG*/ reg1 = 0x08; si21_writeregs(state, SYSTEM_MODE_REG, &reg1, 0x01); /*transport stream config*/ /* mode = PARALLEL; sdata_form = LSB_FIRST; clk_edge = FALLING_EDGE; clk_mode = CLK_GAPPED_MODE; strt_len = BYTE_WIDE; sync_pol = ACTIVE_HIGH; val_pol = ACTIVE_HIGH; err_pol = ACTIVE_HIGH; sclk_rate = 0x00; parity = 0x00 ; data_delay = 0x00; clk_delay = 0x00; pclk_smooth = 0x00; */ reg2[0] = PARALLEL + (LSB_FIRST << 1) + (FALLING_EDGE << 2) + (CLK_GAPPED_MODE << 3) + (BYTE_WIDE << 4) + (ACTIVE_HIGH << 5) + (ACTIVE_HIGH << 6) + (ACTIVE_HIGH << 7); reg2[1] = 0; /* sclk_rate + (parity << 2) + (data_delay << 3) + (clk_delay << 4) + (pclk_smooth << 5); */ status |= si21_writeregs(state, TS_CTRL_REG_1, reg2, 0x02); if (status != 0) dprintk(" %s : TS Set Error\n", __func__); return 0; } static int si21_read_status(struct dvb_frontend *fe, fe_status_t *status) { struct si21xx_state *state = fe->demodulator_priv; u8 regs_read[2]; u8 reg_read; u8 i; u8 lock; u8 signal = si21_readreg(state, ANALOG_AGC_POWER_LEVEL_REG); si21_readregs(state, LOCK_STATUS_REG_1, regs_read, 0x02); reg_read = 0; for (i = 0; i < 7; ++i) reg_read |= ((regs_read[0] >> i) & 0x01) << (6 - i); lock = ((reg_read & 0x7f) | (regs_read[1] & 0x80)); dprintk("%s : FE_READ_STATUS : VSTATUS: 0x%02x\n", __func__, lock); *status = 0; if (signal > 10) *status |= FE_HAS_SIGNAL; if (lock & 0x2) *status |= FE_HAS_CARRIER; if (lock & 0x20) *status |= FE_HAS_VITERBI; if (lock & 0x40) *status |= FE_HAS_SYNC; if ((lock & 0x7b) == 0x7b) *status |= FE_HAS_LOCK; return 0; } static int si21_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { struct si21xx_state *state = fe->demodulator_priv; /*status = si21_readreg(state, ANALOG_AGC_POWER_LEVEL_REG, (u8*)agclevel, 0x01);*/ u16 signal = (3 * si21_readreg(state, 0x27) * si21_readreg(state, 0x28)); dprintk("%s : AGCPWR: 0x%02x%02x, signal=0x%04x\n", __func__, si21_readreg(state, 0x27), si21_readreg(state, 0x28), (int) signal); signal <<= 4; *strength = signal; return 0; } static int si21_read_ber(struct dvb_frontend *fe, u32 *ber) { struct si21xx_state *state = fe->demodulator_priv; dprintk("%s\n", __func__); if (state->errmode != STATUS_BER) return 0; *ber = (si21_readreg(state, 0x1d) << 8) | si21_readreg(state, 0x1e); return 0; } static int si21_read_snr(struct dvb_frontend *fe, u16 *snr) { struct si21xx_state *state = fe->demodulator_priv; s32 xsnr = 0xffff - ((si21_readreg(state, 0x24) << 8) | si21_readreg(state, 0x25)); xsnr = 3 * (xsnr - 0xa100); *snr = (xsnr > 0xffff) ? 0xffff : (xsnr < 0) ? 0 : xsnr; dprintk("%s\n", __func__); return 0; } static int si21_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { struct si21xx_state *state = fe->demodulator_priv; dprintk("%s\n", __func__); if (state->errmode != STATUS_UCBLOCKS) *ucblocks = 0; else *ucblocks = (si21_readreg(state, 0x1d) << 8) | si21_readreg(state, 0x1e); return 0; } /* initiates a channel acquisition sequence using the specified symbol rate and code rate */ static int si21xx_setacquire(struct dvb_frontend *fe, int symbrate, fe_code_rate_t crate) { struct si21xx_state *state = fe->demodulator_priv; u8 coderates[] = { 0x0, 0x01, 0x02, 0x04, 0x00, 0x8, 0x10, 0x20, 0x00, 0x3f }; u8 coderate_ptr; int status; u8 start_acq = 0x80; u8 reg, regs[3]; dprintk("%s\n", __func__); status = PASS; coderate_ptr = coderates[crate]; si21xx_set_symbolrate(fe, symbrate); /* write code rates to use in the Viterbi search */ status |= si21_writeregs(state, VIT_SRCH_CTRL_REG_1, &coderate_ptr, 0x01); /* clear acq_start bit */ status |= si21_readregs(state, ACQ_CTRL_REG_2, &reg, 0x01); reg &= ~start_acq; status |= si21_writeregs(state, ACQ_CTRL_REG_2, &reg, 0x01); /* use new Carrier Frequency Offset Estimator (QuickLock) */ regs[0] = 0xCB; regs[1] = 0x40; regs[2] = 0xCB; status |= si21_writeregs(state, TWO_DB_BNDWDTH_THRSHLD_REG, &regs[0], 0x03); reg = 0x56; status |= si21_writeregs(state, LSA_CTRL_REG_1, &reg, 1); reg = 0x05; status |= si21_writeregs(state, BLIND_SCAN_CTRL_REG, &reg, 1); /* start automatic acq */ status |= si21_writeregs(state, ACQ_CTRL_REG_2, &start_acq, 0x01); return status; } static int si21xx_set_property(struct dvb_frontend *fe, struct dtv_property *p) { dprintk("%s(..)\n", __func__); return 0; } static int si21xx_get_property(struct dvb_frontend *fe, struct dtv_property *p) { dprintk("%s(..)\n", __func__); return 0; } static int si21xx_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *dfp) { struct si21xx_state *state = fe->demodulator_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; /* freq Channel carrier frequency in KHz (i.e. 1550000 KHz) datarate Channel symbol rate in Sps (i.e. 22500000 Sps)*/ /* in MHz */ unsigned char coarse_tune_freq; int fine_tune_freq; unsigned char sample_rate = 0; /* boolean */ unsigned int inband_interferer_ind; /* INTERMEDIATE VALUES */ int icoarse_tune_freq; /* MHz */ int ifine_tune_freq; /* MHz */ unsigned int band_high; unsigned int band_low; unsigned int x1; unsigned int x2; int i; unsigned int inband_interferer_div2[ALLOWABLE_FS_COUNT] = { FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE }; unsigned int inband_interferer_div4[ALLOWABLE_FS_COUNT] = { FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE }; int status; /* allowable sample rates for ADC in MHz */ int afs[ALLOWABLE_FS_COUNT] = { 200, 192, 193, 194, 195, 196, 204, 205, 206, 207 }; /* in MHz */ int if_limit_high; int if_limit_low; int lnb_lo; int lnb_uncertanity; int rf_freq; int data_rate; unsigned char regs[4]; dprintk("%s : FE_SET_FRONTEND\n", __func__); if (c->delivery_system != SYS_DVBS) { dprintk("%s: unsupported delivery system selected (%d)\n", __func__, c->delivery_system); return -EOPNOTSUPP; } for (i = 0; i < ALLOWABLE_FS_COUNT; ++i) inband_interferer_div2[i] = inband_interferer_div4[i] = FALSE; if_limit_high = -700000; if_limit_low = -100000; /* in MHz */ lnb_lo = 0; lnb_uncertanity = 0; rf_freq = 10 * c->frequency ; data_rate = c->symbol_rate / 100; status = PASS; band_low = (rf_freq - lnb_lo) - ((lnb_uncertanity * 200) + (data_rate * 135)) / 200; band_high = (rf_freq - lnb_lo) + ((lnb_uncertanity * 200) + (data_rate * 135)) / 200; icoarse_tune_freq = 100000 * (((rf_freq - lnb_lo) - (if_limit_low + if_limit_high) / 2) / 100000); ifine_tune_freq = (rf_freq - lnb_lo) - icoarse_tune_freq ; for (i = 0; i < ALLOWABLE_FS_COUNT; ++i) { x1 = ((rf_freq - lnb_lo) / (afs[i] * 2500)) * (afs[i] * 2500) + afs[i] * 2500; x2 = ((rf_freq - lnb_lo) / (afs[i] * 2500)) * (afs[i] * 2500); if (((band_low < x1) && (x1 < band_high)) || ((band_low < x2) && (x2 < band_high))) inband_interferer_div4[i] = TRUE; } for (i = 0; i < ALLOWABLE_FS_COUNT; ++i) { x1 = ((rf_freq - lnb_lo) / (afs[i] * 5000)) * (afs[i] * 5000) + afs[i] * 5000; x2 = ((rf_freq - lnb_lo) / (afs[i] * 5000)) * (afs[i] * 5000); if (((band_low < x1) && (x1 < band_high)) || ((band_low < x2) && (x2 < band_high))) inband_interferer_div2[i] = TRUE; } inband_interferer_ind = TRUE; for (i = 0; i < ALLOWABLE_FS_COUNT; ++i) inband_interferer_ind &= inband_interferer_div2[i] | inband_interferer_div4[i]; if (inband_interferer_ind) { for (i = 0; i < ALLOWABLE_FS_COUNT; ++i) { if (inband_interferer_div2[i] == FALSE) { sample_rate = (u8) afs[i]; break; } } } else { for (i = 0; i < ALLOWABLE_FS_COUNT; ++i) { if ((inband_interferer_div2[i] | inband_interferer_div4[i]) == FALSE) { sample_rate = (u8) afs[i]; break; } } } if (sample_rate > 207 || sample_rate < 192) sample_rate = 200; fine_tune_freq = ((0x4000 * (ifine_tune_freq / 10)) / ((sample_rate) * 1000)); coarse_tune_freq = (u8)(icoarse_tune_freq / 100000); regs[0] = sample_rate; regs[1] = coarse_tune_freq; regs[2] = fine_tune_freq & 0xFF; regs[3] = fine_tune_freq >> 8 & 0xFF; status |= si21_writeregs(state, PLL_DIVISOR_REG, &regs[0], 0x04); state->fs = sample_rate;/*ADC MHz*/ si21xx_setacquire(fe, c->symbol_rate, c->fec_inner); return 0; } static int si21xx_sleep(struct dvb_frontend *fe) { struct si21xx_state *state = fe->demodulator_priv; u8 regdata; dprintk("%s\n", __func__); si21_readregs(state, SYSTEM_MODE_REG, &regdata, 0x01); regdata |= 1 << 6; si21_writeregs(state, SYSTEM_MODE_REG, &regdata, 0x01); state->initialised = 0; return 0; } static void si21xx_release(struct dvb_frontend *fe) { struct si21xx_state *state = fe->demodulator_priv; dprintk("%s\n", __func__); kfree(state); } static struct dvb_frontend_ops si21xx_ops = { .info = { .name = "SL SI21XX DVB-S", .type = FE_QPSK, .frequency_min = 950000, .frequency_max = 2150000, .frequency_stepsize = 125, /* kHz for QPSK frontends */ .frequency_tolerance = 0, .symbol_rate_min = 1000000, .symbol_rate_max = 45000000, .symbol_rate_tolerance = 500, /* ppm */ .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_QPSK | FE_CAN_FEC_AUTO }, .release = si21xx_release, .init = si21xx_init, .sleep = si21xx_sleep, .write = si21_write, .read_status = si21_read_status, .read_ber = si21_read_ber, .read_signal_strength = si21_read_signal_strength, .read_snr = si21_read_snr, .read_ucblocks = si21_read_ucblocks, .diseqc_send_master_cmd = si21xx_send_diseqc_msg, .diseqc_send_burst = si21xx_send_diseqc_burst, .set_tone = si21xx_set_tone, .set_voltage = si21xx_set_voltage, .set_property = si21xx_set_property, .get_property = si21xx_get_property, .set_frontend = si21xx_set_frontend, }; struct dvb_frontend *si21xx_attach(const struct si21xx_config *config, struct i2c_adapter *i2c) { struct si21xx_state *state = NULL; int id; dprintk("%s\n", __func__); /* allocate memory for the internal state */ state = kzalloc(sizeof(struct si21xx_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ state->config = config; state->i2c = i2c; state->initialised = 0; state->errmode = STATUS_BER; /* check if the demod is there */ id = si21_readreg(state, SYSTEM_MODE_REG); si21_writereg(state, SYSTEM_MODE_REG, id | 0x40); /* standby off */ msleep(200); id = si21_readreg(state, 0x00); /* register 0x00 contains: 0x34 for SI2107 0x24 for SI2108 0x14 for SI2109 0x04 for SI2110 */ if (id != 0x04 && id != 0x14) goto error; /* create dvb_frontend */ memcpy(&state->frontend.ops, &si21xx_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error: kfree(state); return NULL; } EXPORT_SYMBOL(si21xx_attach); module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); MODULE_DESCRIPTION("SL SI21XX DVB Demodulator driver"); MODULE_AUTHOR("Igor M. Liplianin"); MODULE_LICENSE("GPL");
gpl-2.0
SignFinder/FaceCore
dep/acelite/ace/Name_Proxy.cpp
535
5809
// $Id: Name_Proxy.cpp 91286 2010-08-05 09:04:31Z johnnyw $ #include "ace/Name_Proxy.h" #include "ace/Log_Msg.h" #include "ace/os_include/arpa/os_inet.h" ACE_BEGIN_VERSIONED_NAMESPACE_DECL void ACE_Name_Proxy::dump (void) const { #if defined (ACE_HAS_DUMP) ACE_TRACE ("ACE_Name_Proxy::dump"); ACE_DEBUG ((LM_DEBUG, ACE_BEGIN_DUMP, this)); this->connector_.dump (); this->peer_.dump (); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("reactor_ = %x"), this->reactor_)); ACE_DEBUG ((LM_DEBUG, ACE_END_DUMP)); #endif /* ACE_HAS_DUMP */ } // Default constructor. ACE_Name_Proxy::ACE_Name_Proxy (void) : reactor_ (0) { ACE_TRACE ("ACE_Name_Proxy::ACE_Name_Proxy"); } // Establish binding with the ACE_Name Server at remote_addr. int ACE_Name_Proxy::open (const ACE_INET_Addr &remote_addr, ACE_Synch_Options& options) { ACE_TRACE ("ACE_Name_Proxy::open"); ACE_Time_Value *timeout = 0; if (options[ACE_Synch_Options::USE_TIMEOUT]) timeout = const_cast<ACE_Time_Value *> (options.time_value ()); // Initiate the connection. return this->connector_.connect (this->peer_, remote_addr, timeout); } // Establish binding with the ACE_Name Server at remote_addr. ACE_Name_Proxy::ACE_Name_Proxy ( const ACE_INET_Addr &remote_addr, ACE_Synch_Options& options) : reactor_ (0) { ACE_TRACE ("ACE_Name_Proxy::ACE_Name_Proxy"); if (this->open (remote_addr, options) == -1 && options[ACE_Synch_Options::USE_TIMEOUT] && errno != EWOULDBLOCK) ACE_ERROR ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("ACE_Name_Proxy::ACE_Name_Proxy"))); } // Obtain underlying handle. /* VIRTUAL */ ACE_HANDLE ACE_Name_Proxy::get_handle (void) const { ACE_TRACE ("ACE_Name_Proxy::get_handle"); return this->peer_.get_handle (); } int ACE_Name_Proxy::request_reply (ACE_Name_Request &request) { ACE_TRACE ("ACE_Name_Proxy::request_reply"); void *buffer; ssize_t length = request.encode (buffer); if (length == -1) ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("encode failed")), -1); // Transmit request via a blocking send. if (this->peer_.send_n (buffer, length) != length) ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("send_n failed")), -1); else { ACE_Name_Reply reply; // Receive reply via blocking read. if (this->peer_.recv_n (&reply, sizeof reply) == -1) ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("recv failed")), -1); else if (reply.decode () == -1) ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("decode failed")), -1); errno = int (reply.errnum ()); return reply.status (); } } int ACE_Name_Proxy::send_request (ACE_Name_Request &request) { ACE_TRACE ("ACE_Name_Proxy::send_request"); void *buffer; ssize_t length = request.encode (buffer); if (length == -1) ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("encode failed")), -1); // Transmit request via a blocking send. else if (this->peer_.send_n (buffer, length) != length) ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("send_n failed")), -1); return 0; } int ACE_Name_Proxy::recv_reply (ACE_Name_Request &reply) { ACE_TRACE ("ACE_Name_Proxy::recv_reply"); // Read the first 4 bytes to get the length of the message This // implementation assumes that the first 4 bytes are the length of // the message. ssize_t n = this->peer_.recv ((void *) &reply, sizeof (ACE_UINT32)); switch (n) { case -1: // FALLTHROUGH ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("****************** recv_reply returned -1\n"))); default: ACE_ERROR ((LM_ERROR, ACE_TEXT ("%p got %d bytes, expected %d bytes\n"), ACE_TEXT ("recv failed"), n, sizeof (ACE_UINT32))); // FALLTHROUGH case 0: // We've shutdown unexpectedly return -1; // NOTREACHED case sizeof (ACE_UINT32): { // Transform the length into host byte order. ssize_t length = ACE_NTOHL (reply.length ()); // Receive the rest of the request message. // @@ beware of blocking read!!!. n = this->peer_.recv ((void *) (((char *) &reply) + sizeof (ACE_UINT32)), length - sizeof (ACE_UINT32)); // Subtract off the size of the part we skipped over... if (n != ssize_t (length - sizeof (ACE_UINT32))) { ACE_ERROR ((LM_ERROR, ACE_TEXT ("%p expected %d, got %d\n"), ACE_TEXT ("invalid length"), length, n)); return -1; } // Decode the request into host byte order. if (reply.decode () == -1) { ACE_ERROR ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("decode failed"))); return -1; } } } return 0; } // Close down the connection to the server. ACE_Name_Proxy::~ACE_Name_Proxy (void) { ACE_TRACE ("ACE_Name_Proxy::~ACE_Name_Proxy"); this->peer_.close (); } ACE_END_VERSIONED_NAMESPACE_DECL
gpl-2.0
goodwinos/linux-latest
arch/powerpc/platforms/83xx/misc.c
1303
3165
/* * misc setup functions for MPC83xx * * Maintainer: Kumar Gala <galak@kernel.crashing.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/of_platform.h> #include <linux/pci.h> #include <asm/io.h> #include <asm/hw_irq.h> #include <asm/ipic.h> #include <asm/qe_ic.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "mpc83xx.h" static __be32 __iomem *restart_reg_base; static int __init mpc83xx_restart_init(void) { /* map reset restart_reg_baseister space */ restart_reg_base = ioremap(get_immrbase() + 0x900, 0xff); return 0; } arch_initcall(mpc83xx_restart_init); void mpc83xx_restart(char *cmd) { #define RST_OFFSET 0x00000900 #define RST_PROT_REG 0x00000018 #define RST_CTRL_REG 0x0000001c local_irq_disable(); if (restart_reg_base) { /* enable software reset "RSTE" */ out_be32(restart_reg_base + (RST_PROT_REG >> 2), 0x52535445); /* set software hard reset */ out_be32(restart_reg_base + (RST_CTRL_REG >> 2), 0x2); } else { printk (KERN_EMERG "Error: Restart registers not mapped, spinning!\n"); } for (;;) ; } long __init mpc83xx_time_init(void) { #define SPCR_OFFSET 0x00000110 #define SPCR_TBEN 0x00400000 __be32 __iomem *spcr = ioremap(get_immrbase() + SPCR_OFFSET, 4); __be32 tmp; tmp = in_be32(spcr); out_be32(spcr, tmp | SPCR_TBEN); iounmap(spcr); return 0; } void __init mpc83xx_ipic_init_IRQ(void) { struct device_node *np; /* looking for fsl,pq2pro-pic which is asl compatible with fsl,ipic */ np = of_find_compatible_node(NULL, NULL, "fsl,ipic"); if (!np) np = of_find_node_by_type(NULL, "ipic"); if (!np) return; ipic_init(np, 0); of_node_put(np); /* Initialize the default interrupt mapping priorities, * in case the boot rom changed something on us. */ ipic_set_default_priority(); } #ifdef CONFIG_QUICC_ENGINE void __init mpc83xx_qe_init_IRQ(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic"); if (!np) { np = of_find_node_by_type(NULL, "qeic"); if (!np) return; } qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic); of_node_put(np); } void __init mpc83xx_ipic_and_qe_init_IRQ(void) { mpc83xx_ipic_init_IRQ(); mpc83xx_qe_init_IRQ(); } #endif /* CONFIG_QUICC_ENGINE */ static const struct of_device_id of_bus_ids[] __initconst = { { .type = "soc", }, { .compatible = "soc", }, { .compatible = "simple-bus" }, { .compatible = "gianfar" }, { .compatible = "gpio-leds", }, { .type = "qe", }, { .compatible = "fsl,qe", }, {}, }; int __init mpc83xx_declare_of_platform_devices(void) { of_platform_bus_probe(NULL, of_bus_ids, NULL); return 0; } #ifdef CONFIG_PCI void __init mpc83xx_setup_pci(void) { struct device_node *np; for_each_compatible_node(np, "pci", "fsl,mpc8349-pci") mpc83xx_add_bridge(np); for_each_compatible_node(np, "pci", "fsl,mpc8314-pcie") mpc83xx_add_bridge(np); } #endif
gpl-2.0
ea4862/boeffla_cm12.1
drivers/s390/char/vmcp.c
1559
5222
/* * Copyright IBM Corp. 2004,2010 * Interface implementation for communication with the z/VM control program * * Author(s): Christian Borntraeger <borntraeger@de.ibm.com> * * z/VMs CP offers the possibility to issue commands via the diagnose code 8 * this driver implements a character device that issues these commands and * returns the answer of CP. * * The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS */ #include <linux/fs.h> #include <linux/init.h> #include <linux/compat.h> #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <asm/compat.h> #include <asm/cpcmd.h> #include <asm/debug.h> #include <asm/uaccess.h> #include "vmcp.h" static debug_info_t *vmcp_debug; static int vmcp_open(struct inode *inode, struct file *file) { struct vmcp_session *session; if (!capable(CAP_SYS_ADMIN)) return -EPERM; session = kmalloc(sizeof(*session), GFP_KERNEL); if (!session) return -ENOMEM; session->bufsize = PAGE_SIZE; session->response = NULL; session->resp_size = 0; mutex_init(&session->mutex); file->private_data = session; return nonseekable_open(inode, file); } static int vmcp_release(struct inode *inode, struct file *file) { struct vmcp_session *session; session = file->private_data; file->private_data = NULL; free_pages((unsigned long)session->response, get_order(session->bufsize)); kfree(session); return 0; } static ssize_t vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos) { ssize_t ret; size_t size; struct vmcp_session *session; session = file->private_data; if (mutex_lock_interruptible(&session->mutex)) return -ERESTARTSYS; if (!session->response) { mutex_unlock(&session->mutex); return 0; } size = min_t(size_t, session->resp_size, session->bufsize); ret = simple_read_from_buffer(buff, count, ppos, session->response, size); mutex_unlock(&session->mutex); return ret; } static ssize_t vmcp_write(struct file *file, const char __user *buff, size_t count, loff_t *ppos) { char *cmd; struct vmcp_session *session; if (count > 240) return -EINVAL; cmd = kmalloc(count + 1, GFP_KERNEL); if (!cmd) return -ENOMEM; if (copy_from_user(cmd, buff, count)) { kfree(cmd); return -EFAULT; } cmd[count] = '\0'; session = file->private_data; if (mutex_lock_interruptible(&session->mutex)) { kfree(cmd); return -ERESTARTSYS; } if (!session->response) session->response = (char *)__get_free_pages(GFP_KERNEL | __GFP_REPEAT | GFP_DMA, get_order(session->bufsize)); if (!session->response) { mutex_unlock(&session->mutex); kfree(cmd); return -ENOMEM; } debug_text_event(vmcp_debug, 1, cmd); session->resp_size = cpcmd(cmd, session->response, session->bufsize, &session->resp_code); mutex_unlock(&session->mutex); kfree(cmd); *ppos = 0; /* reset the file pointer after a command */ return count; } /* * These ioctls are available, as the semantics of the diagnose 8 call * does not fit very well into a Linux call. Diagnose X'08' is described in * CP Programming Services SC24-6084-00 * * VMCP_GETCODE: gives the CP return code back to user space * VMCP_SETBUF: sets the response buffer for the next write call. diagnose 8 * expects adjacent pages in real storage and to make matters worse, we * dont know the size of the response. Therefore we default to PAGESIZE and * let userspace to change the response size, if userspace expects a bigger * response */ static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct vmcp_session *session; int __user *argp; int temp; session = file->private_data; if (is_compat_task()) argp = compat_ptr(arg); else argp = (int __user *)arg; if (mutex_lock_interruptible(&session->mutex)) return -ERESTARTSYS; switch (cmd) { case VMCP_GETCODE: temp = session->resp_code; mutex_unlock(&session->mutex); return put_user(temp, argp); case VMCP_SETBUF: free_pages((unsigned long)session->response, get_order(session->bufsize)); session->response=NULL; temp = get_user(session->bufsize, argp); if (get_order(session->bufsize) > 8) { session->bufsize = PAGE_SIZE; temp = -EINVAL; } mutex_unlock(&session->mutex); return temp; case VMCP_GETSIZE: temp = session->resp_size; mutex_unlock(&session->mutex); return put_user(temp, argp); default: mutex_unlock(&session->mutex); return -ENOIOCTLCMD; } } static const struct file_operations vmcp_fops = { .owner = THIS_MODULE, .open = vmcp_open, .release = vmcp_release, .read = vmcp_read, .write = vmcp_write, .unlocked_ioctl = vmcp_ioctl, .compat_ioctl = vmcp_ioctl, .llseek = no_llseek, }; static struct miscdevice vmcp_dev = { .name = "vmcp", .minor = MISC_DYNAMIC_MINOR, .fops = &vmcp_fops, }; static int __init vmcp_init(void) { int ret; if (!MACHINE_IS_VM) return 0; vmcp_debug = debug_register("vmcp", 1, 1, 240); if (!vmcp_debug) return -ENOMEM; ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view); if (ret) { debug_unregister(vmcp_debug); return ret; } ret = misc_register(&vmcp_dev); if (ret) debug_unregister(vmcp_debug); return ret; } device_initcall(vmcp_init);
gpl-2.0
hustard/h2fs
arch/mn10300/unit-asb2305/pci.c
2071
14078
/* ASB2305 PCI support * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * Derived from arch/i386/kernel/pci-pc.c * (c) 1999--2000 Martin Mares <mj@suse.cz> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/irq.h> #include <asm/io.h> #include <asm/irq.h> #include "pci-asb2305.h" unsigned int pci_probe = 1; int pcibios_last_bus = -1; struct pci_ops *pci_root_ops; /* * The accessible PCI window does not cover the entire CPU address space, but * there are devices we want to access outside of that window, so we need to * insert specific PCI bus resources instead of using the platform-level bus * resources directly for the PCI root bus. * * These are configured and inserted by pcibios_init(). */ static struct resource pci_ioport_resource = { .name = "PCI IO", .start = 0xbe000000, .end = 0xbe03ffff, .flags = IORESOURCE_IO, }; static struct resource pci_iomem_resource = { .name = "PCI mem", .start = 0xb8000000, .end = 0xbbffffff, .flags = IORESOURCE_MEM, }; /* * Functions for accessing PCI configuration space */ #define CONFIG_CMD(bus, devfn, where) \ (0x80000000 | (bus->number << 16) | (devfn << 8) | (where & ~3)) #define MEM_PAGING_REG (*(volatile __u32 *) 0xBFFFFFF4) #define CONFIG_ADDRESS (*(volatile __u32 *) 0xBFFFFFF8) #define CONFIG_DATAL(X) (*(volatile __u32 *) 0xBFFFFFFC) #define CONFIG_DATAW(X) (*(volatile __u16 *) (0xBFFFFFFC + ((X) & 2))) #define CONFIG_DATAB(X) (*(volatile __u8 *) (0xBFFFFFFC + ((X) & 3))) #define BRIDGEREGB(X) (*(volatile __u8 *) (0xBE040000 + (X))) #define BRIDGEREGW(X) (*(volatile __u16 *) (0xBE040000 + (X))) #define BRIDGEREGL(X) (*(volatile __u32 *) (0xBE040000 + (X))) static inline int __query(const struct pci_bus *bus, unsigned int devfn) { #if 0 return bus->number == 0 && (devfn == PCI_DEVFN(0, 0)); return bus->number == 1; return bus->number == 0 && (devfn == PCI_DEVFN(2, 0) || devfn == PCI_DEVFN(3, 0)); #endif return 1; } /* * */ static int pci_ampci_read_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u32 *_value) { u32 rawval, value; if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { value = BRIDGEREGB(where); __pcbdebug("=> %02hx", &BRIDGEREGL(where), value); } else { CONFIG_ADDRESS = CONFIG_CMD(bus, devfn, where); rawval = CONFIG_ADDRESS; value = CONFIG_DATAB(where); if (__query(bus, devfn)) __pcidebug("=> %02hx", bus, devfn, where, value); } *_value = value; return PCIBIOS_SUCCESSFUL; } static int pci_ampci_read_config_word(struct pci_bus *bus, unsigned int devfn, int where, u32 *_value) { u32 rawval, value; if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { value = BRIDGEREGW(where); __pcbdebug("=> %04hx", &BRIDGEREGL(where), value); } else { CONFIG_ADDRESS = CONFIG_CMD(bus, devfn, where); rawval = CONFIG_ADDRESS; value = CONFIG_DATAW(where); if (__query(bus, devfn)) __pcidebug("=> %04hx", bus, devfn, where, value); } *_value = value; return PCIBIOS_SUCCESSFUL; } static int pci_ampci_read_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 *_value) { u32 rawval, value; if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { value = BRIDGEREGL(where); __pcbdebug("=> %08x", &BRIDGEREGL(where), value); } else { CONFIG_ADDRESS = CONFIG_CMD(bus, devfn, where); rawval = CONFIG_ADDRESS; value = CONFIG_DATAL(where); if (__query(bus, devfn)) __pcidebug("=> %08x", bus, devfn, where, value); } *_value = value; return PCIBIOS_SUCCESSFUL; } static int pci_ampci_write_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 value) { u32 rawval; if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { __pcbdebug("<= %02x", &BRIDGEREGB(where), value); BRIDGEREGB(where) = value; } else { if (bus->number == 0 && (devfn == PCI_DEVFN(2, 0) || devfn == PCI_DEVFN(3, 0)) ) __pcidebug("<= %02x", bus, devfn, where, value); CONFIG_ADDRESS = CONFIG_CMD(bus, devfn, where); rawval = CONFIG_ADDRESS; CONFIG_DATAB(where) = value; } return PCIBIOS_SUCCESSFUL; } static int pci_ampci_write_config_word(struct pci_bus *bus, unsigned int devfn, int where, u16 value) { u32 rawval; if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { __pcbdebug("<= %04hx", &BRIDGEREGW(where), value); BRIDGEREGW(where) = value; } else { if (__query(bus, devfn)) __pcidebug("<= %04hx", bus, devfn, where, value); CONFIG_ADDRESS = CONFIG_CMD(bus, devfn, where); rawval = CONFIG_ADDRESS; CONFIG_DATAW(where) = value; } return PCIBIOS_SUCCESSFUL; } static int pci_ampci_write_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 value) { u32 rawval; if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { __pcbdebug("<= %08x", &BRIDGEREGL(where), value); BRIDGEREGL(where) = value; } else { if (__query(bus, devfn)) __pcidebug("<= %08x", bus, devfn, where, value); CONFIG_ADDRESS = CONFIG_CMD(bus, devfn, where); rawval = CONFIG_ADDRESS; CONFIG_DATAL(where) = value; } return PCIBIOS_SUCCESSFUL; } static int pci_ampci_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { switch (size) { case 1: return pci_ampci_read_config_byte(bus, devfn, where, val); case 2: return pci_ampci_read_config_word(bus, devfn, where, val); case 4: return pci_ampci_read_config_dword(bus, devfn, where, val); default: BUG(); return -EOPNOTSUPP; } } static int pci_ampci_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { switch (size) { case 1: return pci_ampci_write_config_byte(bus, devfn, where, val); case 2: return pci_ampci_write_config_word(bus, devfn, where, val); case 4: return pci_ampci_write_config_dword(bus, devfn, where, val); default: BUG(); return -EOPNOTSUPP; } } static struct pci_ops pci_direct_ampci = { pci_ampci_read_config, pci_ampci_write_config, }; /* * Before we decide to use direct hardware access mechanisms, we try to do some * trivial checks to ensure it at least _seems_ to be working -- we just test * whether bus 00 contains a host bridge (this is similar to checking * techniques used in XFree86, but ours should be more reliable since we * attempt to make use of direct access hints provided by the PCI BIOS). * * This should be close to trivial, but it isn't, because there are buggy * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID. */ static int __init pci_sanity_check(struct pci_ops *o) { struct pci_bus bus; /* Fake bus and device */ u32 x; bus.number = 0; if ((!o->read(&bus, 0, PCI_CLASS_DEVICE, 2, &x) && (x == PCI_CLASS_BRIDGE_HOST || x == PCI_CLASS_DISPLAY_VGA)) || (!o->read(&bus, 0, PCI_VENDOR_ID, 2, &x) && (x == PCI_VENDOR_ID_INTEL || x == PCI_VENDOR_ID_COMPAQ))) return 1; printk(KERN_ERR "PCI: Sanity check failed\n"); return 0; } static int __init pci_check_direct(void) { unsigned long flags; local_irq_save(flags); /* * Check if access works. */ if (pci_sanity_check(&pci_direct_ampci)) { local_irq_restore(flags); printk(KERN_INFO "PCI: Using configuration ampci\n"); request_mem_region(0xBE040000, 256, "AMPCI bridge"); request_mem_region(0xBFFFFFF4, 12, "PCI ampci"); request_mem_region(0xBC000000, 32 * 1024 * 1024, "PCI SRAM"); return 0; } local_irq_restore(flags); return -ENODEV; } static int is_valid_resource(struct pci_dev *dev, int idx) { unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM; struct resource *devr = &dev->resource[idx], *busr; if (dev->bus) { pci_bus_for_each_resource(dev->bus, busr, i) { if (!busr || (busr->flags ^ devr->flags) & type_mask) continue; if (devr->start && devr->start >= busr->start && devr->end <= busr->end) return 1; } } return 0; } static void pcibios_fixup_device_resources(struct pci_dev *dev) { int limit, i; if (dev->bus->number != 0) return; limit = (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) ? PCI_BRIDGE_RESOURCES : PCI_NUM_RESOURCES; for (i = 0; i < limit; i++) { if (!dev->resource[i].flags) continue; if (is_valid_resource(dev, i)) pci_claim_resource(dev, i); } } /* * Called after each bus is probed, but before its children * are examined. */ void pcibios_fixup_bus(struct pci_bus *bus) { struct pci_dev *dev; if (bus->self) { pci_read_bridge_bases(bus); pcibios_fixup_device_resources(bus->self); } list_for_each_entry(dev, &bus->devices, bus_list) pcibios_fixup_device_resources(dev); } /* * Initialization. Try all known PCI access methods. Note that we support * using both PCI BIOS and direct access: in such cases, we use I/O ports * to access config space, but we still keep BIOS order of cards to be * compatible with 2.0.X. This should go away some day. */ static int __init pcibios_init(void) { resource_size_t io_offset, mem_offset; LIST_HEAD(resources); ioport_resource.start = 0xA0000000; ioport_resource.end = 0xDFFFFFFF; iomem_resource.start = 0xA0000000; iomem_resource.end = 0xDFFFFFFF; if (insert_resource(&iomem_resource, &pci_iomem_resource) < 0) panic("Unable to insert PCI IOMEM resource\n"); if (insert_resource(&ioport_resource, &pci_ioport_resource) < 0) panic("Unable to insert PCI IOPORT resource\n"); if (!pci_probe) return 0; if (pci_check_direct() < 0) { printk(KERN_WARNING "PCI: No PCI bus detected\n"); return 0; } printk(KERN_INFO "PCI: Probing PCI hardware [mempage %08x]\n", MEM_PAGING_REG); io_offset = pci_ioport_resource.start - (pci_ioport_resource.start & 0x00ffffff); mem_offset = pci_iomem_resource.start - ((pci_iomem_resource.start & 0x03ffffff) | MEM_PAGING_REG); pci_add_resource_offset(&resources, &pci_ioport_resource, io_offset); pci_add_resource_offset(&resources, &pci_iomem_resource, mem_offset); pci_scan_root_bus(NULL, 0, &pci_direct_ampci, NULL, &resources); pcibios_irq_init(); pcibios_fixup_irqs(); pcibios_resource_survey(); return 0; } arch_initcall(pcibios_init); char *__init pcibios_setup(char *str) { if (!strcmp(str, "off")) { pci_probe = 0; return NULL; } else if (!strncmp(str, "lastbus=", 8)) { pcibios_last_bus = simple_strtol(str+8, NULL, 0); return NULL; } return str; } int pcibios_enable_device(struct pci_dev *dev, int mask) { int err; err = pci_enable_resources(dev, mask); if (err == 0) pcibios_enable_irq(dev); return err; } /* * disable the ethernet chipset */ static void __init unit_disable_pcnet(struct pci_bus *bus, struct pci_ops *o) { u32 x; bus->number = 0; o->read (bus, PCI_DEVFN(2, 0), PCI_VENDOR_ID, 4, &x); o->read (bus, PCI_DEVFN(2, 0), PCI_COMMAND, 2, &x); x |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_SERR | PCI_COMMAND_PARITY; o->write(bus, PCI_DEVFN(2, 0), PCI_COMMAND, 2, x); o->read (bus, PCI_DEVFN(2, 0), PCI_COMMAND, 2, &x); o->write(bus, PCI_DEVFN(2, 0), PCI_BASE_ADDRESS_0, 4, 0x00030001); o->read (bus, PCI_DEVFN(2, 0), PCI_BASE_ADDRESS_0, 4, &x); #define RDP (*(volatile u32 *) 0xBE030010) #define RAP (*(volatile u32 *) 0xBE030014) #define __set_RAP(X) do { RAP = (X); x = RAP; } while (0) #define __set_RDP(X) do { RDP = (X); x = RDP; } while (0) #define __get_RDP() ({ RDP & 0xffff; }) __set_RAP(0); __set_RDP(0x0004); /* CSR0 = STOP */ __set_RAP(88); /* check CSR88 indicates an Am79C973 */ BUG_ON(__get_RDP() != 0x5003); for (x = 0; x < 100; x++) asm volatile("nop"); __set_RDP(0x0004); /* CSR0 = STOP */ } /* * initialise the unit hardware */ asmlinkage void __init unit_pci_init(void) { struct pci_bus bus; /* Fake bus and device */ struct pci_ops *o = &pci_direct_ampci; u32 x; set_intr_level(XIRQ1, NUM2GxICR_LEVEL(CONFIG_PCI_IRQ_LEVEL)); memset(&bus, 0, sizeof(bus)); MEM_PAGING_REG = 0xE8000000; /* we need to set up the bridge _now_ or we won't be able to access the * PCI config registers */ BRIDGEREGW(PCI_COMMAND) |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_MEMORY | PCI_COMMAND_IO | PCI_COMMAND_MASTER; BRIDGEREGW(PCI_STATUS) = 0xF800; BRIDGEREGB(PCI_LATENCY_TIMER) = 0x10; BRIDGEREGL(PCI_BASE_ADDRESS_0) = 0x80000000; BRIDGEREGB(PCI_INTERRUPT_LINE) = 1; BRIDGEREGL(0x48) = 0x98000000; /* AMPCI base addr */ BRIDGEREGB(0x41) = 0x00; /* secondary bus * number */ BRIDGEREGB(0x42) = 0x01; /* subordinate bus * number */ BRIDGEREGB(0x44) = 0x01; BRIDGEREGL(0x50) = 0x00000001; BRIDGEREGL(0x58) = 0x00001002; BRIDGEREGL(0x5C) = 0x00000011; /* we also need to set up the PCI-PCI bridge */ bus.number = 0; /* IO: 0x00000000-0x00020000 */ o->read (&bus, PCI_DEVFN(3, 0), PCI_COMMAND, 2, &x); x |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_SERR | PCI_COMMAND_PARITY; o->write(&bus, PCI_DEVFN(3, 0), PCI_COMMAND, 2, x); o->read (&bus, PCI_DEVFN(3, 0), PCI_IO_BASE, 1, &x); o->read (&bus, PCI_DEVFN(3, 0), PCI_IO_BASE_UPPER16, 4, &x); o->read (&bus, PCI_DEVFN(3, 0), PCI_MEMORY_BASE, 4, &x); o->read (&bus, PCI_DEVFN(3, 0), PCI_PREF_MEMORY_BASE, 4, &x); o->write(&bus, PCI_DEVFN(3, 0), PCI_IO_BASE, 1, 0x01); o->read (&bus, PCI_DEVFN(3, 0), PCI_IO_BASE, 1, &x); o->write(&bus, PCI_DEVFN(3, 0), PCI_IO_BASE_UPPER16, 4, 0x00020000); o->read (&bus, PCI_DEVFN(3, 0), PCI_IO_BASE_UPPER16, 4, &x); o->write(&bus, PCI_DEVFN(3, 0), PCI_MEMORY_BASE, 4, 0xEBB0EA00); o->read (&bus, PCI_DEVFN(3, 0), PCI_MEMORY_BASE, 4, &x); o->write(&bus, PCI_DEVFN(3, 0), PCI_PREF_MEMORY_BASE, 4, 0xE9F0E800); o->read (&bus, PCI_DEVFN(3, 0), PCI_PREF_MEMORY_BASE, 4, &x); unit_disable_pcnet(&bus, o); }
gpl-2.0
goodhanrry/G9250_goodhanrry_kernel
drivers/net/wireless/brcm80211/brcmsmac/channel.c
2327
21373
/* * Copyright (c) 2010 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/types.h> #include <net/cfg80211.h> #include <net/mac80211.h> #include <net/regulatory.h> #include <defs.h> #include "pub.h" #include "phy/phy_hal.h" #include "main.h" #include "stf.h" #include "channel.h" #include "mac80211_if.h" #include "debug.h" /* QDB() macro takes a dB value and converts to a quarter dB value */ #define QDB(n) ((n) * BRCMS_TXPWR_DB_FACTOR) #define LOCALE_MIMO_IDX_bn 0 #define LOCALE_MIMO_IDX_11n 0 /* max of BAND_5G_PWR_LVLS and 14 for 2.4 GHz */ #define BRCMS_MAXPWR_MIMO_TBL_SIZE 14 /* maxpwr mapping to 5GHz band channels: * maxpwr[0] - channels [34-48] * maxpwr[1] - channels [52-60] * maxpwr[2] - channels [62-64] * maxpwr[3] - channels [100-140] * maxpwr[4] - channels [149-165] */ #define BAND_5G_PWR_LVLS 5 /* 5 power levels for 5G */ #define LC(id) LOCALE_MIMO_IDX_ ## id #define LOCALES(mimo2, mimo5) \ {LC(mimo2), LC(mimo5)} /* macro to get 5 GHz channel group index for tx power */ #define CHANNEL_POWER_IDX_5G(c) (((c) < 52) ? 0 : \ (((c) < 62) ? 1 : \ (((c) < 100) ? 2 : \ (((c) < 149) ? 3 : 4)))) #define BRCM_2GHZ_2412_2462 REG_RULE(2412-10, 2462+10, 40, 0, 19, 0) #define BRCM_2GHZ_2467_2472 REG_RULE(2467-10, 2472+10, 20, 0, 19, \ NL80211_RRF_PASSIVE_SCAN | \ NL80211_RRF_NO_IBSS) #define BRCM_5GHZ_5180_5240 REG_RULE(5180-10, 5240+10, 40, 0, 21, \ NL80211_RRF_PASSIVE_SCAN | \ NL80211_RRF_NO_IBSS) #define BRCM_5GHZ_5260_5320 REG_RULE(5260-10, 5320+10, 40, 0, 21, \ NL80211_RRF_PASSIVE_SCAN | \ NL80211_RRF_DFS | \ NL80211_RRF_NO_IBSS) #define BRCM_5GHZ_5500_5700 REG_RULE(5500-10, 5700+10, 40, 0, 21, \ NL80211_RRF_PASSIVE_SCAN | \ NL80211_RRF_DFS | \ NL80211_RRF_NO_IBSS) #define BRCM_5GHZ_5745_5825 REG_RULE(5745-10, 5825+10, 40, 0, 21, \ NL80211_RRF_PASSIVE_SCAN | \ NL80211_RRF_NO_IBSS) static const struct ieee80211_regdomain brcms_regdom_x2 = { .n_reg_rules = 6, .alpha2 = "X2", .reg_rules = { BRCM_2GHZ_2412_2462, BRCM_2GHZ_2467_2472, BRCM_5GHZ_5180_5240, BRCM_5GHZ_5260_5320, BRCM_5GHZ_5500_5700, BRCM_5GHZ_5745_5825, } }; /* locale per-channel tx power limits for MIMO frames * maxpwr arrays are index by channel for 2.4 GHz limits, and * by sub-band for 5 GHz limits using CHANNEL_POWER_IDX_5G(channel) */ struct locale_mimo_info { /* tx 20 MHz power limits, qdBm units */ s8 maxpwr20[BRCMS_MAXPWR_MIMO_TBL_SIZE]; /* tx 40 MHz power limits, qdBm units */ s8 maxpwr40[BRCMS_MAXPWR_MIMO_TBL_SIZE]; }; /* Country names and abbreviations with locale defined from ISO 3166 */ struct country_info { const u8 locale_mimo_2G; /* 2.4G mimo info */ const u8 locale_mimo_5G; /* 5G mimo info */ }; struct brcms_regd { struct country_info country; const struct ieee80211_regdomain *regdomain; }; struct brcms_cm_info { struct brcms_pub *pub; struct brcms_c_info *wlc; const struct brcms_regd *world_regd; }; /* * MIMO Locale Definitions - 2.4 GHz */ static const struct locale_mimo_info locale_bn = { {QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13)}, {0, 0, QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), 0, 0}, }; static const struct locale_mimo_info *g_mimo_2g_table[] = { &locale_bn }; /* * MIMO Locale Definitions - 5 GHz */ static const struct locale_mimo_info locale_11n = { { /* 12.5 dBm */ 50, 50, 50, QDB(15), QDB(15)}, {QDB(14), QDB(15), QDB(15), QDB(15), QDB(15)}, }; static const struct locale_mimo_info *g_mimo_5g_table[] = { &locale_11n }; static const struct brcms_regd cntry_locales[] = { /* Worldwide RoW 2, must always be at index 0 */ { .country = LOCALES(bn, 11n), .regdomain = &brcms_regdom_x2, }, }; static const struct locale_mimo_info *brcms_c_get_mimo_2g(u8 locale_idx) { if (locale_idx >= ARRAY_SIZE(g_mimo_2g_table)) return NULL; return g_mimo_2g_table[locale_idx]; } static const struct locale_mimo_info *brcms_c_get_mimo_5g(u8 locale_idx) { if (locale_idx >= ARRAY_SIZE(g_mimo_5g_table)) return NULL; return g_mimo_5g_table[locale_idx]; } /* * Indicates whether the country provided is valid to pass * to cfg80211 or not. * * returns true if valid; false if not. */ static bool brcms_c_country_valid(const char *ccode) { /* * only allow ascii alpha uppercase for the first 2 * chars. */ if (!((0x80 & ccode[0]) == 0 && ccode[0] >= 0x41 && ccode[0] <= 0x5A && (0x80 & ccode[1]) == 0 && ccode[1] >= 0x41 && ccode[1] <= 0x5A)) return false; /* * do not match ISO 3166-1 user assigned country codes * that may be in the driver table */ if (!strcmp("AA", ccode) || /* AA */ !strcmp("ZZ", ccode) || /* ZZ */ ccode[0] == 'X' || /* XA - XZ */ (ccode[0] == 'Q' && /* QM - QZ */ (ccode[1] >= 'M' && ccode[1] <= 'Z'))) return false; if (!strcmp("NA", ccode)) return false; return true; } static const struct brcms_regd *brcms_world_regd(const char *regdom, int len) { const struct brcms_regd *regd = NULL; int i; for (i = 0; i < ARRAY_SIZE(cntry_locales); i++) { if (!strncmp(regdom, cntry_locales[i].regdomain->alpha2, len)) { regd = &cntry_locales[i]; break; } } return regd; } static const struct brcms_regd *brcms_default_world_regd(void) { return &cntry_locales[0]; } /* JP, J1 - J10 are Japan ccodes */ static bool brcms_c_japan_ccode(const char *ccode) { return (ccode[0] == 'J' && (ccode[1] == 'P' || (ccode[1] >= '1' && ccode[1] <= '9'))); } static void brcms_c_channel_min_txpower_limits_with_local_constraint( struct brcms_cm_info *wlc_cm, struct txpwr_limits *txpwr, u8 local_constraint_qdbm) { int j; /* CCK Rates */ for (j = 0; j < WL_TX_POWER_CCK_NUM; j++) txpwr->cck[j] = min(txpwr->cck[j], local_constraint_qdbm); /* 20 MHz Legacy OFDM SISO */ for (j = 0; j < WL_TX_POWER_OFDM_NUM; j++) txpwr->ofdm[j] = min(txpwr->ofdm[j], local_constraint_qdbm); /* 20 MHz Legacy OFDM CDD */ for (j = 0; j < BRCMS_NUM_RATES_OFDM; j++) txpwr->ofdm_cdd[j] = min(txpwr->ofdm_cdd[j], local_constraint_qdbm); /* 40 MHz Legacy OFDM SISO */ for (j = 0; j < BRCMS_NUM_RATES_OFDM; j++) txpwr->ofdm_40_siso[j] = min(txpwr->ofdm_40_siso[j], local_constraint_qdbm); /* 40 MHz Legacy OFDM CDD */ for (j = 0; j < BRCMS_NUM_RATES_OFDM; j++) txpwr->ofdm_40_cdd[j] = min(txpwr->ofdm_40_cdd[j], local_constraint_qdbm); /* 20MHz MCS 0-7 SISO */ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) txpwr->mcs_20_siso[j] = min(txpwr->mcs_20_siso[j], local_constraint_qdbm); /* 20MHz MCS 0-7 CDD */ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) txpwr->mcs_20_cdd[j] = min(txpwr->mcs_20_cdd[j], local_constraint_qdbm); /* 20MHz MCS 0-7 STBC */ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) txpwr->mcs_20_stbc[j] = min(txpwr->mcs_20_stbc[j], local_constraint_qdbm); /* 20MHz MCS 8-15 MIMO */ for (j = 0; j < BRCMS_NUM_RATES_MCS_2_STREAM; j++) txpwr->mcs_20_mimo[j] = min(txpwr->mcs_20_mimo[j], local_constraint_qdbm); /* 40MHz MCS 0-7 SISO */ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) txpwr->mcs_40_siso[j] = min(txpwr->mcs_40_siso[j], local_constraint_qdbm); /* 40MHz MCS 0-7 CDD */ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) txpwr->mcs_40_cdd[j] = min(txpwr->mcs_40_cdd[j], local_constraint_qdbm); /* 40MHz MCS 0-7 STBC */ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) txpwr->mcs_40_stbc[j] = min(txpwr->mcs_40_stbc[j], local_constraint_qdbm); /* 40MHz MCS 8-15 MIMO */ for (j = 0; j < BRCMS_NUM_RATES_MCS_2_STREAM; j++) txpwr->mcs_40_mimo[j] = min(txpwr->mcs_40_mimo[j], local_constraint_qdbm); /* 40MHz MCS 32 */ txpwr->mcs32 = min(txpwr->mcs32, local_constraint_qdbm); } /* * set the driver's current country and regulatory information * using a country code as the source. Look up built in country * information found with the country code. */ static void brcms_c_set_country(struct brcms_cm_info *wlc_cm, const struct brcms_regd *regd) { struct brcms_c_info *wlc = wlc_cm->wlc; if ((wlc->pub->_n_enab & SUPPORT_11N) != wlc->protection->nmode_user) brcms_c_set_nmode(wlc); brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_2G_INDEX]); brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_5G_INDEX]); brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false); return; } struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc) { struct brcms_cm_info *wlc_cm; struct brcms_pub *pub = wlc->pub; struct ssb_sprom *sprom = &wlc->hw->d11core->bus->sprom; const char *ccode = sprom->alpha2; int ccode_len = sizeof(sprom->alpha2); wlc_cm = kzalloc(sizeof(struct brcms_cm_info), GFP_ATOMIC); if (wlc_cm == NULL) return NULL; wlc_cm->pub = pub; wlc_cm->wlc = wlc; wlc->cmi = wlc_cm; /* store the country code for passing up as a regulatory hint */ wlc_cm->world_regd = brcms_world_regd(ccode, ccode_len); if (brcms_c_country_valid(ccode)) strncpy(wlc->pub->srom_ccode, ccode, ccode_len); /* * If no custom world domain is found in the SROM, use the * default "X2" domain. */ if (!wlc_cm->world_regd) { wlc_cm->world_regd = brcms_default_world_regd(); ccode = wlc_cm->world_regd->regdomain->alpha2; ccode_len = BRCM_CNTRY_BUF_SZ - 1; } /* save default country for exiting 11d regulatory mode */ strncpy(wlc->country_default, ccode, ccode_len); /* initialize autocountry_default to driver default */ strncpy(wlc->autocountry_default, ccode, ccode_len); brcms_c_set_country(wlc_cm, wlc_cm->world_regd); return wlc_cm; } void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm) { kfree(wlc_cm); } void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec, u8 local_constraint_qdbm) { struct brcms_c_info *wlc = wlc_cm->wlc; struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.chandef.chan; struct txpwr_limits txpwr; brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr); brcms_c_channel_min_txpower_limits_with_local_constraint( wlc_cm, &txpwr, local_constraint_qdbm ); /* set or restore gmode as required by regulatory */ if (ch->flags & IEEE80211_CHAN_NO_OFDM) brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false); else brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false); brcms_b_set_chanspec(wlc->hw, chanspec, !!(ch->flags & IEEE80211_CHAN_PASSIVE_SCAN), &txpwr); } void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec, struct txpwr_limits *txpwr) { struct brcms_c_info *wlc = wlc_cm->wlc; struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.chandef.chan; uint i; uint chan; int maxpwr; int delta; const struct country_info *country; struct brcms_band *band; int conducted_max = BRCMS_TXPWR_MAX; const struct locale_mimo_info *li_mimo; int maxpwr20, maxpwr40; int maxpwr_idx; uint j; memset(txpwr, 0, sizeof(struct txpwr_limits)); if (WARN_ON(!ch)) return; country = &wlc_cm->world_regd->country; chan = CHSPEC_CHANNEL(chanspec); band = wlc->bandstate[chspec_bandunit(chanspec)]; li_mimo = (band->bandtype == BRCM_BAND_5G) ? brcms_c_get_mimo_5g(country->locale_mimo_5G) : brcms_c_get_mimo_2g(country->locale_mimo_2G); delta = band->antgain; if (band->bandtype == BRCM_BAND_2G) conducted_max = QDB(22); maxpwr = QDB(ch->max_power) - delta; maxpwr = max(maxpwr, 0); maxpwr = min(maxpwr, conducted_max); /* CCK txpwr limits for 2.4G band */ if (band->bandtype == BRCM_BAND_2G) { for (i = 0; i < BRCMS_NUM_RATES_CCK; i++) txpwr->cck[i] = (u8) maxpwr; } for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) { txpwr->ofdm[i] = (u8) maxpwr; /* * OFDM 40 MHz SISO has the same power as the corresponding * MCS0-7 rate unless overriden by the locale specific code. * We set this value to 0 as a flag (presumably 0 dBm isn't * a possibility) and then copy the MCS0-7 value to the 40 MHz * value if it wasn't explicitly set. */ txpwr->ofdm_40_siso[i] = 0; txpwr->ofdm_cdd[i] = (u8) maxpwr; txpwr->ofdm_40_cdd[i] = 0; } delta = 0; if (band->antgain > QDB(6)) delta = band->antgain - QDB(6); /* Excess over 6 dB */ if (band->bandtype == BRCM_BAND_2G) maxpwr_idx = (chan - 1); else maxpwr_idx = CHANNEL_POWER_IDX_5G(chan); maxpwr20 = li_mimo->maxpwr20[maxpwr_idx]; maxpwr40 = li_mimo->maxpwr40[maxpwr_idx]; maxpwr20 = maxpwr20 - delta; maxpwr20 = max(maxpwr20, 0); maxpwr40 = maxpwr40 - delta; maxpwr40 = max(maxpwr40, 0); /* Fill in the MCS 0-7 (SISO) rates */ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) { /* * 20 MHz has the same power as the corresponding OFDM rate * unless overriden by the locale specific code. */ txpwr->mcs_20_siso[i] = txpwr->ofdm[i]; txpwr->mcs_40_siso[i] = 0; } /* Fill in the MCS 0-7 CDD rates */ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) { txpwr->mcs_20_cdd[i] = (u8) maxpwr20; txpwr->mcs_40_cdd[i] = (u8) maxpwr40; } /* * These locales have SISO expressed in the * table and override CDD later */ if (li_mimo == &locale_bn) { if (li_mimo == &locale_bn) { maxpwr20 = QDB(16); maxpwr40 = 0; if (chan >= 3 && chan <= 11) maxpwr40 = QDB(16); } for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) { txpwr->mcs_20_siso[i] = (u8) maxpwr20; txpwr->mcs_40_siso[i] = (u8) maxpwr40; } } /* Fill in the MCS 0-7 STBC rates */ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) { txpwr->mcs_20_stbc[i] = 0; txpwr->mcs_40_stbc[i] = 0; } /* Fill in the MCS 8-15 SDM rates */ for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++) { txpwr->mcs_20_mimo[i] = (u8) maxpwr20; txpwr->mcs_40_mimo[i] = (u8) maxpwr40; } /* Fill in MCS32 */ txpwr->mcs32 = (u8) maxpwr40; for (i = 0, j = 0; i < BRCMS_NUM_RATES_OFDM; i++, j++) { if (txpwr->ofdm_40_cdd[i] == 0) txpwr->ofdm_40_cdd[i] = txpwr->mcs_40_cdd[j]; if (i == 0) { i = i + 1; if (txpwr->ofdm_40_cdd[i] == 0) txpwr->ofdm_40_cdd[i] = txpwr->mcs_40_cdd[j]; } } /* * Copy the 40 MHZ MCS 0-7 CDD value to the 40 MHZ MCS 0-7 SISO * value if it wasn't provided explicitly. */ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) { if (txpwr->mcs_40_siso[i] == 0) txpwr->mcs_40_siso[i] = txpwr->mcs_40_cdd[i]; } for (i = 0, j = 0; i < BRCMS_NUM_RATES_OFDM; i++, j++) { if (txpwr->ofdm_40_siso[i] == 0) txpwr->ofdm_40_siso[i] = txpwr->mcs_40_siso[j]; if (i == 0) { i = i + 1; if (txpwr->ofdm_40_siso[i] == 0) txpwr->ofdm_40_siso[i] = txpwr->mcs_40_siso[j]; } } /* * Copy the 20 and 40 MHz MCS0-7 CDD values to the corresponding * STBC values if they weren't provided explicitly. */ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) { if (txpwr->mcs_20_stbc[i] == 0) txpwr->mcs_20_stbc[i] = txpwr->mcs_20_cdd[i]; if (txpwr->mcs_40_stbc[i] == 0) txpwr->mcs_40_stbc[i] = txpwr->mcs_40_cdd[i]; } return; } /* * Verify the chanspec is using a legal set of parameters, i.e. that the * chanspec specified a band, bw, ctl_sb and channel and that the * combination could be legal given any set of circumstances. * RETURNS: true is the chanspec is malformed, false if it looks good. */ static bool brcms_c_chspec_malformed(u16 chanspec) { /* must be 2G or 5G band */ if (!CHSPEC_IS5G(chanspec) && !CHSPEC_IS2G(chanspec)) return true; /* must be 20 or 40 bandwidth */ if (!CHSPEC_IS40(chanspec) && !CHSPEC_IS20(chanspec)) return true; /* 20MHZ b/w must have no ctl sb, 40 must have a ctl sb */ if (CHSPEC_IS20(chanspec)) { if (!CHSPEC_SB_NONE(chanspec)) return true; } else if (!CHSPEC_SB_UPPER(chanspec) && !CHSPEC_SB_LOWER(chanspec)) { return true; } return false; } /* * Validate the chanspec for this locale, for 40MHZ we need to also * check that the sidebands are valid 20MZH channels in this locale * and they are also a legal HT combination */ static bool brcms_c_valid_chanspec_ext(struct brcms_cm_info *wlc_cm, u16 chspec) { struct brcms_c_info *wlc = wlc_cm->wlc; u8 channel = CHSPEC_CHANNEL(chspec); /* check the chanspec */ if (brcms_c_chspec_malformed(chspec)) { brcms_err(wlc->hw->d11core, "wl%d: malformed chanspec 0x%x\n", wlc->pub->unit, chspec); return false; } if (CHANNEL_BANDUNIT(wlc_cm->wlc, channel) != chspec_bandunit(chspec)) return false; return true; } bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, u16 chspec) { return brcms_c_valid_chanspec_ext(wlc_cm, chspec); } static bool brcms_is_radar_freq(u16 center_freq) { return center_freq >= 5260 && center_freq <= 5700; } static void brcms_reg_apply_radar_flags(struct wiphy *wiphy) { struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; int i; sband = wiphy->bands[IEEE80211_BAND_5GHZ]; if (!sband) return; for (i = 0; i < sband->n_channels; i++) { ch = &sband->channels[i]; if (!brcms_is_radar_freq(ch->center_freq)) continue; /* * All channels in this range should be passive and have * DFS enabled. */ if (!(ch->flags & IEEE80211_CHAN_DISABLED)) ch->flags |= IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_PASSIVE_SCAN; } } static void brcms_reg_apply_beaconing_flags(struct wiphy *wiphy, enum nl80211_reg_initiator initiator) { struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; const struct ieee80211_reg_rule *rule; int band, i; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { sband = wiphy->bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) { ch = &sband->channels[i]; if (ch->flags & (IEEE80211_CHAN_DISABLED | IEEE80211_CHAN_RADAR)) continue; if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) { rule = freq_reg_info(wiphy, ch->center_freq); if (IS_ERR(rule)) continue; if (!(rule->flags & NL80211_RRF_NO_IBSS)) ch->flags &= ~IEEE80211_CHAN_NO_IBSS; if (!(rule->flags & NL80211_RRF_PASSIVE_SCAN)) ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; } else if (ch->beacon_found) { ch->flags &= ~(IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_PASSIVE_SCAN); } } } } static void brcms_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct brcms_info *wl = hw->priv; struct brcms_c_info *wlc = wl->wlc; struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; int band, i; bool ch_found = false; brcms_reg_apply_radar_flags(wiphy); if (request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) brcms_reg_apply_beaconing_flags(wiphy, request->initiator); /* Disable radio if all channels disallowed by regulatory */ for (band = 0; !ch_found && band < IEEE80211_NUM_BANDS; band++) { sband = wiphy->bands[band]; if (!sband) continue; for (i = 0; !ch_found && i < sband->n_channels; i++) { ch = &sband->channels[i]; if (!(ch->flags & IEEE80211_CHAN_DISABLED)) ch_found = true; } } if (ch_found) { mboolclr(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE); } else { mboolset(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE); brcms_err(wlc->hw->d11core, "wl%d: %s: no valid channel for \"%s\"\n", wlc->pub->unit, __func__, request->alpha2); } if (wlc->pub->_nbands > 1 || wlc->band->bandtype == BRCM_BAND_2G) wlc_phy_chanspec_ch14_widefilter_set(wlc->band->pi, brcms_c_japan_ccode(request->alpha2)); } void brcms_c_regd_init(struct brcms_c_info *wlc) { struct wiphy *wiphy = wlc->wiphy; const struct brcms_regd *regd = wlc->cmi->world_regd; struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; struct brcms_chanvec sup_chan; struct brcms_band *band; int band_idx, i; /* Disable any channels not supported by the phy */ for (band_idx = 0; band_idx < wlc->pub->_nbands; band_idx++) { band = wlc->bandstate[band_idx]; wlc_phy_chanspec_band_validch(band->pi, band->bandtype, &sup_chan); if (band_idx == BAND_2G_INDEX) sband = wiphy->bands[IEEE80211_BAND_2GHZ]; else sband = wiphy->bands[IEEE80211_BAND_5GHZ]; for (i = 0; i < sband->n_channels; i++) { ch = &sband->channels[i]; if (!isset(sup_chan.vec, ch->hw_value)) ch->flags |= IEEE80211_CHAN_DISABLED; } } wlc->wiphy->reg_notifier = brcms_reg_notifier; wlc->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_STRICT_REGULATORY; wiphy_apply_custom_regulatory(wlc->wiphy, regd->regdomain); brcms_reg_apply_beaconing_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER); }
gpl-2.0
ktoonsez/KTSGS6
drivers/gpu/drm/nouveau/nouveau_vga.c
2327
2476
#include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include "nouveau_drm.h" #include "nouveau_acpi.h" #include "nouveau_fbcon.h" #include "nouveau_vga.h" static unsigned int nouveau_vga_set_decode(void *priv, bool state) { struct nouveau_device *device = nouveau_dev(priv); if (device->chipset >= 0x40) nv_wr32(device, 0x088054, state); else nv_wr32(device, 0x001854, state); if (state) return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; else return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; } static void nouveau_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) { struct drm_device *dev = pci_get_drvdata(pdev); if (state == VGA_SWITCHEROO_ON) { printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; nouveau_pmops_resume(&pdev->dev); drm_kms_helper_poll_enable(dev); dev->switch_power_state = DRM_SWITCH_POWER_ON; } else { printk(KERN_ERR "VGA switcheroo: switched nouveau off\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; drm_kms_helper_poll_disable(dev); nouveau_switcheroo_optimus_dsm(); nouveau_pmops_suspend(&pdev->dev); dev->switch_power_state = DRM_SWITCH_POWER_OFF; } } static void nouveau_switcheroo_reprobe(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); nouveau_fbcon_output_poll_changed(dev); } static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); bool can_switch; spin_lock(&dev->count_lock); can_switch = (dev->open_count == 0); spin_unlock(&dev->count_lock); return can_switch; } static const struct vga_switcheroo_client_ops nouveau_switcheroo_ops = { .set_gpu_state = nouveau_switcheroo_set_state, .reprobe = nouveau_switcheroo_reprobe, .can_switch = nouveau_switcheroo_can_switch, }; void nouveau_vga_init(struct nouveau_drm *drm) { struct drm_device *dev = drm->dev; vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops); } void nouveau_vga_fini(struct nouveau_drm *drm) { struct drm_device *dev = drm->dev; vga_switcheroo_unregister_client(dev->pdev); vga_client_register(dev->pdev, NULL, NULL, NULL); } void nouveau_vga_lastclose(struct drm_device *dev) { vga_switcheroo_process_delayed_switch(); }
gpl-2.0
ISTweak/android_kernel_sharp_is15sh
drivers/media/rc/keymaps/rc-videomate-s350.c
3095
2091
/* videomate-s350.h - Keytable for videomate_s350 Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> static struct rc_map_table videomate_s350[] = { { 0x00, KEY_TV}, { 0x01, KEY_DVD}, { 0x04, KEY_RECORD}, { 0x05, KEY_VIDEO}, /* TV/Video */ { 0x07, KEY_STOP}, { 0x08, KEY_PLAYPAUSE}, { 0x0a, KEY_REWIND}, { 0x0f, KEY_FASTFORWARD}, { 0x10, KEY_CHANNELUP}, { 0x12, KEY_VOLUMEUP}, { 0x13, KEY_CHANNELDOWN}, { 0x14, KEY_MUTE}, { 0x15, KEY_VOLUMEDOWN}, { 0x16, KEY_1}, { 0x17, KEY_2}, { 0x18, KEY_3}, { 0x19, KEY_4}, { 0x1a, KEY_5}, { 0x1b, KEY_6}, { 0x1c, KEY_7}, { 0x1d, KEY_8}, { 0x1e, KEY_9}, { 0x1f, KEY_0}, { 0x21, KEY_SLEEP}, { 0x24, KEY_ZOOM}, { 0x25, KEY_LAST}, /* Recall */ { 0x26, KEY_SUBTITLE}, /* CC */ { 0x27, KEY_LANGUAGE}, /* MTS */ { 0x29, KEY_CHANNEL}, /* SURF */ { 0x2b, KEY_A}, { 0x2c, KEY_B}, { 0x2f, KEY_CAMERA}, /* Snapshot */ { 0x23, KEY_RADIO}, { 0x02, KEY_PREVIOUSSONG}, { 0x06, KEY_NEXTSONG}, { 0x03, KEY_EPG}, { 0x09, KEY_SETUP}, { 0x22, KEY_BACKSPACE}, { 0x0c, KEY_UP}, { 0x0e, KEY_DOWN}, { 0x0b, KEY_LEFT}, { 0x0d, KEY_RIGHT}, { 0x11, KEY_ENTER}, { 0x20, KEY_TEXT}, }; static struct rc_map_list videomate_s350_map = { .map = { .scan = videomate_s350, .size = ARRAY_SIZE(videomate_s350), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_VIDEOMATE_S350, } }; static int __init init_rc_map_videomate_s350(void) { return rc_map_register(&videomate_s350_map); } static void __exit exit_rc_map_videomate_s350(void) { rc_map_unregister(&videomate_s350_map); } module_init(init_rc_map_videomate_s350) module_exit(exit_rc_map_videomate_s350) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
Snuzzo/funky_tuna
drivers/media/rc/keymaps/rc-gadmei-rm008z.c
3095
2068
/* gadmei-rm008z.h - Keytable for gadmei_rm008z Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> /* GADMEI UTV330+ RM008Z remote Shine Liu <shinel@foxmail.com> */ static struct rc_map_table gadmei_rm008z[] = { { 0x14, KEY_POWER2}, /* POWER OFF */ { 0x0c, KEY_MUTE}, /* MUTE */ { 0x18, KEY_TV}, /* TV */ { 0x0e, KEY_VIDEO}, /* AV */ { 0x0b, KEY_AUDIO}, /* SV */ { 0x0f, KEY_RADIO}, /* FM */ { 0x00, KEY_1}, { 0x01, KEY_2}, { 0x02, KEY_3}, { 0x03, KEY_4}, { 0x04, KEY_5}, { 0x05, KEY_6}, { 0x06, KEY_7}, { 0x07, KEY_8}, { 0x08, KEY_9}, { 0x09, KEY_0}, { 0x0a, KEY_INFO}, /* OSD */ { 0x1c, KEY_BACKSPACE}, /* LAST */ { 0x0d, KEY_PLAY}, /* PLAY */ { 0x1e, KEY_CAMERA}, /* SNAPSHOT */ { 0x1a, KEY_RECORD}, /* RECORD */ { 0x17, KEY_STOP}, /* STOP */ { 0x1f, KEY_UP}, /* UP */ { 0x44, KEY_DOWN}, /* DOWN */ { 0x46, KEY_TAB}, /* BACK */ { 0x4a, KEY_ZOOM}, /* FULLSECREEN */ { 0x10, KEY_VOLUMEUP}, /* VOLUMEUP */ { 0x11, KEY_VOLUMEDOWN}, /* VOLUMEDOWN */ { 0x12, KEY_CHANNELUP}, /* CHANNELUP */ { 0x13, KEY_CHANNELDOWN}, /* CHANNELDOWN */ { 0x15, KEY_ENTER}, /* OK */ }; static struct rc_map_list gadmei_rm008z_map = { .map = { .scan = gadmei_rm008z, .size = ARRAY_SIZE(gadmei_rm008z), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_GADMEI_RM008Z, } }; static int __init init_rc_map_gadmei_rm008z(void) { return rc_map_register(&gadmei_rm008z_map); } static void __exit exit_rc_map_gadmei_rm008z(void) { rc_map_unregister(&gadmei_rm008z_map); } module_init(init_rc_map_gadmei_rm008z) module_exit(exit_rc_map_gadmei_rm008z) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
araca/Zen-Kernel-Huawei-P7
sound/drivers/mpu401/mpu401.c
4631
7763
/* * Driver for generic MPU-401 boards (UART mode only) * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Copyright (c) 2004 by Castet Matthieu <castet.matthieu@free.fr> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/pnp.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/moduleparam.h> #include <sound/core.h> #include <sound/mpu401.h> #include <sound/initval.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("MPU-401 UART"); MODULE_LICENSE("GPL"); static int index[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = -2}; /* exclude the first card */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE; /* Enable this card */ #ifdef CONFIG_PNP static int pnp[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1}; #endif static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* MPU-401 port number */ static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* MPU-401 IRQ */ static int uart_enter[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1}; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for MPU-401 device."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for MPU-401 device."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable MPU-401 device."); #ifdef CONFIG_PNP module_param_array(pnp, bool, NULL, 0444); MODULE_PARM_DESC(pnp, "PnP detection for MPU-401 device."); #endif module_param_array(port, long, NULL, 0444); MODULE_PARM_DESC(port, "Port # for MPU-401 device."); module_param_array(irq, int, NULL, 0444); MODULE_PARM_DESC(irq, "IRQ # for MPU-401 device."); module_param_array(uart_enter, bool, NULL, 0444); MODULE_PARM_DESC(uart_enter, "Issue UART_ENTER command at open."); static struct platform_device *platform_devices[SNDRV_CARDS]; static int pnp_registered; static unsigned int snd_mpu401_devices; static int snd_mpu401_create(int dev, struct snd_card **rcard) { struct snd_card *card; int err; if (!uart_enter[dev]) snd_printk(KERN_ERR "the uart_enter option is obsolete; remove it\n"); *rcard = NULL; err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; strcpy(card->driver, "MPU-401 UART"); strcpy(card->shortname, card->driver); sprintf(card->longname, "%s at %#lx, ", card->shortname, port[dev]); if (irq[dev] >= 0) { sprintf(card->longname + strlen(card->longname), "irq %d", irq[dev]); } else { strcat(card->longname, "polled"); } err = snd_mpu401_uart_new(card, 0, MPU401_HW_MPU401, port[dev], 0, irq[dev], irq[dev] >= 0 ? IRQF_DISABLED : 0, NULL); if (err < 0) { printk(KERN_ERR "MPU401 not detected at 0x%lx\n", port[dev]); goto _err; } *rcard = card; return 0; _err: snd_card_free(card); return err; } static int __devinit snd_mpu401_probe(struct platform_device *devptr) { int dev = devptr->id; int err; struct snd_card *card; if (port[dev] == SNDRV_AUTO_PORT) { snd_printk(KERN_ERR "specify port\n"); return -EINVAL; } if (irq[dev] == SNDRV_AUTO_IRQ) { snd_printk(KERN_ERR "specify or disable IRQ\n"); return -EINVAL; } err = snd_mpu401_create(dev, &card); if (err < 0) return err; snd_card_set_dev(card, &devptr->dev); if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } platform_set_drvdata(devptr, card); return 0; } static int __devexit snd_mpu401_remove(struct platform_device *devptr) { snd_card_free(platform_get_drvdata(devptr)); platform_set_drvdata(devptr, NULL); return 0; } #define SND_MPU401_DRIVER "snd_mpu401" static struct platform_driver snd_mpu401_driver = { .probe = snd_mpu401_probe, .remove = __devexit_p(snd_mpu401_remove), .driver = { .name = SND_MPU401_DRIVER }, }; #ifdef CONFIG_PNP #define IO_EXTENT 2 static struct pnp_device_id snd_mpu401_pnpids[] = { { .id = "PNPb006" }, { .id = "" } }; MODULE_DEVICE_TABLE(pnp, snd_mpu401_pnpids); static int __devinit snd_mpu401_pnp(int dev, struct pnp_dev *device, const struct pnp_device_id *id) { if (!pnp_port_valid(device, 0) || pnp_port_flags(device, 0) & IORESOURCE_DISABLED) { snd_printk(KERN_ERR "no PnP port\n"); return -ENODEV; } if (pnp_port_len(device, 0) < IO_EXTENT) { snd_printk(KERN_ERR "PnP port length is %llu, expected %d\n", (unsigned long long)pnp_port_len(device, 0), IO_EXTENT); return -ENODEV; } port[dev] = pnp_port_start(device, 0); if (!pnp_irq_valid(device, 0) || pnp_irq_flags(device, 0) & IORESOURCE_DISABLED) { snd_printk(KERN_WARNING "no PnP irq, using polling\n"); irq[dev] = -1; } else { irq[dev] = pnp_irq(device, 0); } return 0; } static int __devinit snd_mpu401_pnp_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id) { static int dev; struct snd_card *card; int err; for ( ; dev < SNDRV_CARDS; ++dev) { if (!enable[dev] || !pnp[dev]) continue; err = snd_mpu401_pnp(dev, pnp_dev, id); if (err < 0) return err; err = snd_mpu401_create(dev, &card); if (err < 0) return err; if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } snd_card_set_dev(card, &pnp_dev->dev); pnp_set_drvdata(pnp_dev, card); snd_mpu401_devices++; ++dev; return 0; } return -ENODEV; } static void __devexit snd_mpu401_pnp_remove(struct pnp_dev *dev) { struct snd_card *card = (struct snd_card *) pnp_get_drvdata(dev); snd_card_disconnect(card); snd_card_free_when_closed(card); } static struct pnp_driver snd_mpu401_pnp_driver = { .name = "mpu401", .id_table = snd_mpu401_pnpids, .probe = snd_mpu401_pnp_probe, .remove = __devexit_p(snd_mpu401_pnp_remove), }; #else static struct pnp_driver snd_mpu401_pnp_driver; #endif static void snd_mpu401_unregister_all(void) { int i; if (pnp_registered) pnp_unregister_driver(&snd_mpu401_pnp_driver); for (i = 0; i < ARRAY_SIZE(platform_devices); ++i) platform_device_unregister(platform_devices[i]); platform_driver_unregister(&snd_mpu401_driver); } static int __init alsa_card_mpu401_init(void) { int i, err; if ((err = platform_driver_register(&snd_mpu401_driver)) < 0) return err; for (i = 0; i < SNDRV_CARDS; i++) { struct platform_device *device; if (! enable[i]) continue; #ifdef CONFIG_PNP if (pnp[i]) continue; #endif device = platform_device_register_simple(SND_MPU401_DRIVER, i, NULL, 0); if (IS_ERR(device)) continue; if (!platform_get_drvdata(device)) { platform_device_unregister(device); continue; } platform_devices[i] = device; snd_mpu401_devices++; } err = pnp_register_driver(&snd_mpu401_pnp_driver); if (!err) pnp_registered = 1; if (!snd_mpu401_devices) { #ifdef MODULE printk(KERN_ERR "MPU-401 device not found or device busy\n"); #endif snd_mpu401_unregister_all(); return -ENODEV; } return 0; } static void __exit alsa_card_mpu401_exit(void) { snd_mpu401_unregister_all(); } module_init(alsa_card_mpu401_init) module_exit(alsa_card_mpu401_exit)
gpl-2.0
StelixROM/android_kernel_htc_msm8974
drivers/ide/buddha.c
7703
5699
/* * Amiga Buddha, Catweasel and X-Surf IDE Driver * * Copyright (C) 1997, 2001 by Geert Uytterhoeven and others * * This driver was written based on the specifications in README.buddha and * the X-Surf info from Inside_XSurf.txt available at * http://www.jschoenfeld.com * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * TODO: * - test it :-) * - tune the timings using the speed-register */ #include <linux/types.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/blkdev.h> #include <linux/zorro.h> #include <linux/ide.h> #include <linux/init.h> #include <linux/module.h> #include <asm/amigahw.h> #include <asm/amigaints.h> /* * The Buddha has 2 IDE interfaces, the Catweasel has 3, X-Surf has 2 */ #define BUDDHA_NUM_HWIFS 2 #define CATWEASEL_NUM_HWIFS 3 #define XSURF_NUM_HWIFS 2 #define MAX_NUM_HWIFS 3 /* * Bases of the IDE interfaces (relative to the board address) */ #define BUDDHA_BASE1 0x800 #define BUDDHA_BASE2 0xa00 #define BUDDHA_BASE3 0xc00 #define XSURF_BASE1 0xb000 /* 2.5" Interface */ #define XSURF_BASE2 0xd000 /* 3.5" Interface */ static u_int buddha_bases[CATWEASEL_NUM_HWIFS] __initdata = { BUDDHA_BASE1, BUDDHA_BASE2, BUDDHA_BASE3 }; static u_int xsurf_bases[XSURF_NUM_HWIFS] __initdata = { XSURF_BASE1, XSURF_BASE2 }; /* * Offsets from one of the above bases */ #define BUDDHA_CONTROL 0x11a /* * Other registers */ #define BUDDHA_IRQ1 0xf00 /* MSB = 1, Harddisk is source of */ #define BUDDHA_IRQ2 0xf40 /* interrupt */ #define BUDDHA_IRQ3 0xf80 #define XSURF_IRQ1 0x7e #define XSURF_IRQ2 0x7e static int buddha_irqports[CATWEASEL_NUM_HWIFS] __initdata = { BUDDHA_IRQ1, BUDDHA_IRQ2, BUDDHA_IRQ3 }; static int xsurf_irqports[XSURF_NUM_HWIFS] __initdata = { XSURF_IRQ1, XSURF_IRQ2 }; #define BUDDHA_IRQ_MR 0xfc0 /* master interrupt enable */ /* * Board information */ typedef enum BuddhaType_Enum { BOARD_BUDDHA, BOARD_CATWEASEL, BOARD_XSURF } BuddhaType; static const char *buddha_board_name[] = { "Buddha", "Catweasel", "X-Surf" }; /* * Check and acknowledge the interrupt status */ static int buddha_test_irq(ide_hwif_t *hwif) { unsigned char ch; ch = z_readb(hwif->io_ports.irq_addr); if (!(ch & 0x80)) return 0; return 1; } static void xsurf_clear_irq(ide_drive_t *drive) { /* * X-Surf needs 0 written to IRQ register to ensure ISA bit A11 stays at 0 */ z_writeb(0, drive->hwif->io_ports.irq_addr); } static void __init buddha_setup_ports(struct ide_hw *hw, unsigned long base, unsigned long ctl, unsigned long irq_port) { int i; memset(hw, 0, sizeof(*hw)); hw->io_ports.data_addr = base; for (i = 1; i < 8; i++) hw->io_ports_array[i] = base + 2 + i * 4; hw->io_ports.ctl_addr = ctl; hw->io_ports.irq_addr = irq_port; hw->irq = IRQ_AMIGA_PORTS; } static const struct ide_port_ops buddha_port_ops = { .test_irq = buddha_test_irq, }; static const struct ide_port_ops xsurf_port_ops = { .clear_irq = xsurf_clear_irq, .test_irq = buddha_test_irq, }; static const struct ide_port_info buddha_port_info = { .port_ops = &buddha_port_ops, .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, .irq_flags = IRQF_SHARED, .chipset = ide_generic, }; /* * Probe for a Buddha or Catweasel IDE interface */ static int __init buddha_init(void) { struct zorro_dev *z = NULL; u_long buddha_board = 0; BuddhaType type; int buddha_num_hwifs, i; while ((z = zorro_find_device(ZORRO_WILDCARD, z))) { unsigned long board; struct ide_hw hw[MAX_NUM_HWIFS], *hws[MAX_NUM_HWIFS]; struct ide_port_info d = buddha_port_info; if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) { buddha_num_hwifs = BUDDHA_NUM_HWIFS; type=BOARD_BUDDHA; } else if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_CATWEASEL) { buddha_num_hwifs = CATWEASEL_NUM_HWIFS; type=BOARD_CATWEASEL; } else if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF) { buddha_num_hwifs = XSURF_NUM_HWIFS; type=BOARD_XSURF; d.port_ops = &xsurf_port_ops; } else continue; board = z->resource.start; if(type != BOARD_XSURF) { if (!request_mem_region(board+BUDDHA_BASE1, 0x800, "IDE")) continue; } else { if (!request_mem_region(board+XSURF_BASE1, 0x1000, "IDE")) continue; if (!request_mem_region(board+XSURF_BASE2, 0x1000, "IDE")) goto fail_base2; if (!request_mem_region(board+XSURF_IRQ1, 0x8, "IDE")) { release_mem_region(board+XSURF_BASE2, 0x1000); fail_base2: release_mem_region(board+XSURF_BASE1, 0x1000); continue; } } buddha_board = ZTWO_VADDR(board); /* write to BUDDHA_IRQ_MR to enable the board IRQ */ /* X-Surf doesn't have this. IRQs are always on */ if (type != BOARD_XSURF) z_writeb(0, buddha_board+BUDDHA_IRQ_MR); printk(KERN_INFO "ide: %s IDE controller\n", buddha_board_name[type]); for (i = 0; i < buddha_num_hwifs; i++) { unsigned long base, ctl, irq_port; if (type != BOARD_XSURF) { base = buddha_board + buddha_bases[i]; ctl = base + BUDDHA_CONTROL; irq_port = buddha_board + buddha_irqports[i]; } else { base = buddha_board + xsurf_bases[i]; /* X-Surf has no CS1* (Control/AltStat) */ ctl = 0; irq_port = buddha_board + xsurf_irqports[i]; } buddha_setup_ports(&hw[i], base, ctl, irq_port); hws[i] = &hw[i]; } ide_host_add(&d, hws, i, NULL); } return 0; } module_init(buddha_init); MODULE_LICENSE("GPL");
gpl-2.0
robreardon/android_kernel_motorola_olympus
arch/mips/loongson/common/reset.c
8727
1672
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology * Author: Fuxin Zhang, zhangfx@lemote.com * Copyright (C) 2009 Lemote, Inc. * Author: Zhangjin Wu, wuzhangjin@gmail.com */ #include <linux/init.h> #include <linux/pm.h> #include <asm/reboot.h> #include <loongson.h> static inline void loongson_reboot(void) { #ifndef CONFIG_CPU_JUMP_WORKAROUNDS ((void (*)(void))ioremap_nocache(LOONGSON_BOOT_BASE, 4)) (); #else void (*func)(void); func = (void *)ioremap_nocache(LOONGSON_BOOT_BASE, 4); __asm__ __volatile__( " .set noat \n" " jr %[func] \n" " .set at \n" : /* No outputs */ : [func] "r" (func)); #endif } static void loongson_restart(char *command) { /* do preparation for reboot */ mach_prepare_reboot(); /* reboot via jumping to boot base address */ loongson_reboot(); } static void loongson_poweroff(void) { mach_prepare_shutdown(); unreachable(); } static void loongson_halt(void) { pr_notice("\n\n** You can safely turn off the power now **\n\n"); while (1) { if (cpu_wait) cpu_wait(); } } static int __init mips_reboot_setup(void) { _machine_restart = loongson_restart; _machine_halt = loongson_halt; pm_power_off = loongson_poweroff; return 0; } arch_initcall(mips_reboot_setup);
gpl-2.0
kabata1975/android_kernel_c8690
arch/mips/loongson/common/mem.c
8727
2798
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/fs.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <asm/bootinfo.h> #include <loongson.h> #include <mem.h> #include <pci.h> void __init prom_init_memory(void) { add_memory_region(0x0, (memsize << 20), BOOT_MEM_RAM); add_memory_region(memsize << 20, LOONGSON_PCI_MEM_START - (memsize << 20), BOOT_MEM_RESERVED); #ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG { int bit; bit = fls(memsize + highmemsize); if (bit != ffs(memsize + highmemsize)) bit += 20; else bit = bit + 20 - 1; /* set cpu window3 to map CPU to DDR: 2G -> 2G */ LOONGSON_ADDRWIN_CPUTODDR(ADDRWIN_WIN3, 0x80000000ul, 0x80000000ul, (1 << bit)); mmiowb(); } #endif /* !CONFIG_CPU_SUPPORTS_ADDRWINCFG */ #ifdef CONFIG_64BIT if (highmemsize > 0) add_memory_region(LOONGSON_HIGHMEM_START, highmemsize << 20, BOOT_MEM_RAM); add_memory_region(LOONGSON_PCI_MEM_END + 1, LOONGSON_HIGHMEM_START - LOONGSON_PCI_MEM_END - 1, BOOT_MEM_RESERVED); #endif /* !CONFIG_64BIT */ } /* override of arch/mips/mm/cache.c: __uncached_access */ int __uncached_access(struct file *file, unsigned long addr) { if (file->f_flags & O_DSYNC) return 1; return addr >= __pa(high_memory) || ((addr >= LOONGSON_MMIO_MEM_START) && (addr < LOONGSON_MMIO_MEM_END)); } #ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED #include <linux/pci.h> #include <linux/sched.h> #include <asm/current.h> static unsigned long uca_start, uca_end; pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) { unsigned long offset = pfn << PAGE_SHIFT; unsigned long end = offset + size; if (__uncached_access(file, offset)) { if (uca_start && (offset >= uca_start) && (end <= uca_end)) return __pgprot((pgprot_val(vma_prot) & ~_CACHE_MASK) | _CACHE_UNCACHED_ACCELERATED); else return pgprot_noncached(vma_prot); } return vma_prot; } static int __init find_vga_mem_init(void) { struct pci_dev *dev = 0; struct resource *r; int idx; if (uca_start) return 0; for_each_pci_dev(dev) { if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) { r = &dev->resource[idx]; if (!r->start && r->end) continue; if (r->flags & IORESOURCE_IO) continue; if (r->flags & IORESOURCE_MEM) { uca_start = r->start; uca_end = r->end; return 0; } } } } return 0; } late_initcall(find_vga_mem_init); #endif /* !CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED */
gpl-2.0
intervigilium/android_kernel_htc_msm7x30
arch/arm/mach-ixp4xx/ixp4xx_npe.c
8983
21541
/* * Intel IXP4xx Network Processor Engine driver for Linux * * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. * * The code is based on publicly available information: * - Intel IXP4xx Developer's Manual and other e-papers * - Intel IXP400 Access Library Software (BSD license) * - previous works by Christian Hohnstaedt <chohnstaedt@innominate.com> * Thanks, Christian. */ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/firmware.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <mach/npe.h> #define DEBUG_MSG 0 #define DEBUG_FW 0 #define NPE_COUNT 3 #define MAX_RETRIES 1000 /* microseconds */ #define NPE_42X_DATA_SIZE 0x800 /* in dwords */ #define NPE_46X_DATA_SIZE 0x1000 #define NPE_A_42X_INSTR_SIZE 0x1000 #define NPE_B_AND_C_42X_INSTR_SIZE 0x800 #define NPE_46X_INSTR_SIZE 0x1000 #define REGS_SIZE 0x1000 #define NPE_PHYS_REG 32 #define FW_MAGIC 0xFEEDF00D #define FW_BLOCK_TYPE_INSTR 0x0 #define FW_BLOCK_TYPE_DATA 0x1 #define FW_BLOCK_TYPE_EOF 0xF /* NPE exec status (read) and command (write) */ #define CMD_NPE_STEP 0x01 #define CMD_NPE_START 0x02 #define CMD_NPE_STOP 0x03 #define CMD_NPE_CLR_PIPE 0x04 #define CMD_CLR_PROFILE_CNT 0x0C #define CMD_RD_INS_MEM 0x10 /* instruction memory */ #define CMD_WR_INS_MEM 0x11 #define CMD_RD_DATA_MEM 0x12 /* data memory */ #define CMD_WR_DATA_MEM 0x13 #define CMD_RD_ECS_REG 0x14 /* exec access register */ #define CMD_WR_ECS_REG 0x15 #define STAT_RUN 0x80000000 #define STAT_STOP 0x40000000 #define STAT_CLEAR 0x20000000 #define STAT_ECS_K 0x00800000 /* pipeline clean */ #define NPE_STEVT 0x1B #define NPE_STARTPC 0x1C #define NPE_REGMAP 0x1E #define NPE_CINDEX 0x1F #define INSTR_WR_REG_SHORT 0x0000C000 #define INSTR_WR_REG_BYTE 0x00004000 #define INSTR_RD_FIFO 0x0F888220 #define INSTR_RESET_MBOX 0x0FAC8210 #define ECS_BG_CTXT_REG_0 0x00 /* Background Executing Context */ #define ECS_BG_CTXT_REG_1 0x01 /* Stack level */ #define ECS_BG_CTXT_REG_2 0x02 #define ECS_PRI_1_CTXT_REG_0 0x04 /* Priority 1 Executing Context */ #define ECS_PRI_1_CTXT_REG_1 0x05 /* Stack level */ #define ECS_PRI_1_CTXT_REG_2 0x06 #define ECS_PRI_2_CTXT_REG_0 0x08 /* Priority 2 Executing Context */ #define ECS_PRI_2_CTXT_REG_1 0x09 /* Stack level */ #define ECS_PRI_2_CTXT_REG_2 0x0A #define ECS_DBG_CTXT_REG_0 0x0C /* Debug Executing Context */ #define ECS_DBG_CTXT_REG_1 0x0D /* Stack level */ #define ECS_DBG_CTXT_REG_2 0x0E #define ECS_INSTRUCT_REG 0x11 /* NPE Instruction Register */ #define ECS_REG_0_ACTIVE 0x80000000 /* all levels */ #define ECS_REG_0_NEXTPC_MASK 0x1FFF0000 /* BG/PRI1/PRI2 levels */ #define ECS_REG_0_LDUR_BITS 8 #define ECS_REG_0_LDUR_MASK 0x00000700 /* all levels */ #define ECS_REG_1_CCTXT_BITS 16 #define ECS_REG_1_CCTXT_MASK 0x000F0000 /* all levels */ #define ECS_REG_1_SELCTXT_BITS 0 #define ECS_REG_1_SELCTXT_MASK 0x0000000F /* all levels */ #define ECS_DBG_REG_2_IF 0x00100000 /* debug level */ #define ECS_DBG_REG_2_IE 0x00080000 /* debug level */ /* NPE watchpoint_fifo register bit */ #define WFIFO_VALID 0x80000000 /* NPE messaging_status register bit definitions */ #define MSGSTAT_OFNE 0x00010000 /* OutFifoNotEmpty */ #define MSGSTAT_IFNF 0x00020000 /* InFifoNotFull */ #define MSGSTAT_OFNF 0x00040000 /* OutFifoNotFull */ #define MSGSTAT_IFNE 0x00080000 /* InFifoNotEmpty */ #define MSGSTAT_MBINT 0x00100000 /* Mailbox interrupt */ #define MSGSTAT_IFINT 0x00200000 /* InFifo interrupt */ #define MSGSTAT_OFINT 0x00400000 /* OutFifo interrupt */ #define MSGSTAT_WFINT 0x00800000 /* WatchFifo interrupt */ /* NPE messaging_control register bit definitions */ #define MSGCTL_OUT_FIFO 0x00010000 /* enable output FIFO */ #define MSGCTL_IN_FIFO 0x00020000 /* enable input FIFO */ #define MSGCTL_OUT_FIFO_WRITE 0x01000000 /* enable FIFO + WRITE */ #define MSGCTL_IN_FIFO_WRITE 0x02000000 /* NPE mailbox_status value for reset */ #define RESET_MBOX_STAT 0x0000F0F0 const char *npe_names[] = { "NPE-A", "NPE-B", "NPE-C" }; #define print_npe(pri, npe, fmt, ...) \ printk(pri "%s: " fmt, npe_name(npe), ## __VA_ARGS__) #if DEBUG_MSG #define debug_msg(npe, fmt, ...) \ print_npe(KERN_DEBUG, npe, fmt, ## __VA_ARGS__) #else #define debug_msg(npe, fmt, ...) #endif static struct { u32 reg, val; } ecs_reset[] = { { ECS_BG_CTXT_REG_0, 0xA0000000 }, { ECS_BG_CTXT_REG_1, 0x01000000 }, { ECS_BG_CTXT_REG_2, 0x00008000 }, { ECS_PRI_1_CTXT_REG_0, 0x20000080 }, { ECS_PRI_1_CTXT_REG_1, 0x01000000 }, { ECS_PRI_1_CTXT_REG_2, 0x00008000 }, { ECS_PRI_2_CTXT_REG_0, 0x20000080 }, { ECS_PRI_2_CTXT_REG_1, 0x01000000 }, { ECS_PRI_2_CTXT_REG_2, 0x00008000 }, { ECS_DBG_CTXT_REG_0, 0x20000000 }, { ECS_DBG_CTXT_REG_1, 0x00000000 }, { ECS_DBG_CTXT_REG_2, 0x001E0000 }, { ECS_INSTRUCT_REG, 0x1003C00F }, }; static struct npe npe_tab[NPE_COUNT] = { { .id = 0, .regs = (struct npe_regs __iomem *)IXP4XX_NPEA_BASE_VIRT, .regs_phys = IXP4XX_NPEA_BASE_PHYS, }, { .id = 1, .regs = (struct npe_regs __iomem *)IXP4XX_NPEB_BASE_VIRT, .regs_phys = IXP4XX_NPEB_BASE_PHYS, }, { .id = 2, .regs = (struct npe_regs __iomem *)IXP4XX_NPEC_BASE_VIRT, .regs_phys = IXP4XX_NPEC_BASE_PHYS, } }; int npe_running(struct npe *npe) { return (__raw_readl(&npe->regs->exec_status_cmd) & STAT_RUN) != 0; } static void npe_cmd_write(struct npe *npe, u32 addr, int cmd, u32 data) { __raw_writel(data, &npe->regs->exec_data); __raw_writel(addr, &npe->regs->exec_addr); __raw_writel(cmd, &npe->regs->exec_status_cmd); } static u32 npe_cmd_read(struct npe *npe, u32 addr, int cmd) { __raw_writel(addr, &npe->regs->exec_addr); __raw_writel(cmd, &npe->regs->exec_status_cmd); /* Iintroduce extra read cycles after issuing read command to NPE so that we read the register after the NPE has updated it. This is to overcome race condition between XScale and NPE */ __raw_readl(&npe->regs->exec_data); __raw_readl(&npe->regs->exec_data); return __raw_readl(&npe->regs->exec_data); } static void npe_clear_active(struct npe *npe, u32 reg) { u32 val = npe_cmd_read(npe, reg, CMD_RD_ECS_REG); npe_cmd_write(npe, reg, CMD_WR_ECS_REG, val & ~ECS_REG_0_ACTIVE); } static void npe_start(struct npe *npe) { /* ensure only Background Context Stack Level is active */ npe_clear_active(npe, ECS_PRI_1_CTXT_REG_0); npe_clear_active(npe, ECS_PRI_2_CTXT_REG_0); npe_clear_active(npe, ECS_DBG_CTXT_REG_0); __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd); __raw_writel(CMD_NPE_START, &npe->regs->exec_status_cmd); } static void npe_stop(struct npe *npe) { __raw_writel(CMD_NPE_STOP, &npe->regs->exec_status_cmd); __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd); /*FIXME?*/ } static int __must_check npe_debug_instr(struct npe *npe, u32 instr, u32 ctx, u32 ldur) { u32 wc; int i; /* set the Active bit, and the LDUR, in the debug level */ npe_cmd_write(npe, ECS_DBG_CTXT_REG_0, CMD_WR_ECS_REG, ECS_REG_0_ACTIVE | (ldur << ECS_REG_0_LDUR_BITS)); /* set CCTXT at ECS DEBUG L3 to specify in which context to execute the instruction, and set SELCTXT at ECS DEBUG Level to specify which context store to access. Debug ECS Level Reg 1 has form 0x000n000n, where n = context number */ npe_cmd_write(npe, ECS_DBG_CTXT_REG_1, CMD_WR_ECS_REG, (ctx << ECS_REG_1_CCTXT_BITS) | (ctx << ECS_REG_1_SELCTXT_BITS)); /* clear the pipeline */ __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd); /* load NPE instruction into the instruction register */ npe_cmd_write(npe, ECS_INSTRUCT_REG, CMD_WR_ECS_REG, instr); /* we need this value later to wait for completion of NPE execution step */ wc = __raw_readl(&npe->regs->watch_count); /* issue a Step One command via the Execution Control register */ __raw_writel(CMD_NPE_STEP, &npe->regs->exec_status_cmd); /* Watch Count register increments when NPE completes an instruction */ for (i = 0; i < MAX_RETRIES; i++) { if (wc != __raw_readl(&npe->regs->watch_count)) return 0; udelay(1); } print_npe(KERN_ERR, npe, "reset: npe_debug_instr(): timeout\n"); return -ETIMEDOUT; } static int __must_check npe_logical_reg_write8(struct npe *npe, u32 addr, u8 val, u32 ctx) { /* here we build the NPE assembler instruction: mov8 d0, #0 */ u32 instr = INSTR_WR_REG_BYTE | /* OpCode */ addr << 9 | /* base Operand */ (val & 0x1F) << 4 | /* lower 5 bits to immediate data */ (val & ~0x1F) << (18 - 5);/* higher 3 bits to CoProc instr. */ return npe_debug_instr(npe, instr, ctx, 1); /* execute it */ } static int __must_check npe_logical_reg_write16(struct npe *npe, u32 addr, u16 val, u32 ctx) { /* here we build the NPE assembler instruction: mov16 d0, #0 */ u32 instr = INSTR_WR_REG_SHORT | /* OpCode */ addr << 9 | /* base Operand */ (val & 0x1F) << 4 | /* lower 5 bits to immediate data */ (val & ~0x1F) << (18 - 5);/* higher 11 bits to CoProc instr. */ return npe_debug_instr(npe, instr, ctx, 1); /* execute it */ } static int __must_check npe_logical_reg_write32(struct npe *npe, u32 addr, u32 val, u32 ctx) { /* write in 16 bit steps first the high and then the low value */ if (npe_logical_reg_write16(npe, addr, val >> 16, ctx)) return -ETIMEDOUT; return npe_logical_reg_write16(npe, addr + 2, val & 0xFFFF, ctx); } static int npe_reset(struct npe *npe) { u32 val, ctl, exec_count, ctx_reg2; int i; ctl = (__raw_readl(&npe->regs->messaging_control) | 0x3F000000) & 0x3F3FFFFF; /* disable parity interrupt */ __raw_writel(ctl & 0x3F00FFFF, &npe->regs->messaging_control); /* pre exec - debug instruction */ /* turn off the halt bit by clearing Execution Count register. */ exec_count = __raw_readl(&npe->regs->exec_count); __raw_writel(0, &npe->regs->exec_count); /* ensure that IF and IE are on (temporarily), so that we don't end up stepping forever */ ctx_reg2 = npe_cmd_read(npe, ECS_DBG_CTXT_REG_2, CMD_RD_ECS_REG); npe_cmd_write(npe, ECS_DBG_CTXT_REG_2, CMD_WR_ECS_REG, ctx_reg2 | ECS_DBG_REG_2_IF | ECS_DBG_REG_2_IE); /* clear the FIFOs */ while (__raw_readl(&npe->regs->watchpoint_fifo) & WFIFO_VALID) ; while (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_OFNE) /* read from the outFIFO until empty */ print_npe(KERN_DEBUG, npe, "npe_reset: read FIFO = 0x%X\n", __raw_readl(&npe->regs->in_out_fifo)); while (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE) /* step execution of the NPE intruction to read inFIFO using the Debug Executing Context stack */ if (npe_debug_instr(npe, INSTR_RD_FIFO, 0, 0)) return -ETIMEDOUT; /* reset the mailbox reg from the XScale side */ __raw_writel(RESET_MBOX_STAT, &npe->regs->mailbox_status); /* from NPE side */ if (npe_debug_instr(npe, INSTR_RESET_MBOX, 0, 0)) return -ETIMEDOUT; /* Reset the physical registers in the NPE register file */ for (val = 0; val < NPE_PHYS_REG; val++) { if (npe_logical_reg_write16(npe, NPE_REGMAP, val >> 1, 0)) return -ETIMEDOUT; /* address is either 0 or 4 */ if (npe_logical_reg_write32(npe, (val & 1) * 4, 0, 0)) return -ETIMEDOUT; } /* Reset the context store = each context's Context Store registers */ /* Context 0 has no STARTPC. Instead, this value is used to set NextPC for Background ECS, to set where NPE starts executing code */ val = npe_cmd_read(npe, ECS_BG_CTXT_REG_0, CMD_RD_ECS_REG); val &= ~ECS_REG_0_NEXTPC_MASK; val |= (0 /* NextPC */ << 16) & ECS_REG_0_NEXTPC_MASK; npe_cmd_write(npe, ECS_BG_CTXT_REG_0, CMD_WR_ECS_REG, val); for (i = 0; i < 16; i++) { if (i) { /* Context 0 has no STEVT nor STARTPC */ /* STEVT = off, 0x80 */ if (npe_logical_reg_write8(npe, NPE_STEVT, 0x80, i)) return -ETIMEDOUT; if (npe_logical_reg_write16(npe, NPE_STARTPC, 0, i)) return -ETIMEDOUT; } /* REGMAP = d0->p0, d8->p2, d16->p4 */ if (npe_logical_reg_write16(npe, NPE_REGMAP, 0x820, i)) return -ETIMEDOUT; if (npe_logical_reg_write8(npe, NPE_CINDEX, 0, i)) return -ETIMEDOUT; } /* post exec */ /* clear active bit in debug level */ npe_cmd_write(npe, ECS_DBG_CTXT_REG_0, CMD_WR_ECS_REG, 0); /* clear the pipeline */ __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd); /* restore previous values */ __raw_writel(exec_count, &npe->regs->exec_count); npe_cmd_write(npe, ECS_DBG_CTXT_REG_2, CMD_WR_ECS_REG, ctx_reg2); /* write reset values to Execution Context Stack registers */ for (val = 0; val < ARRAY_SIZE(ecs_reset); val++) npe_cmd_write(npe, ecs_reset[val].reg, CMD_WR_ECS_REG, ecs_reset[val].val); /* clear the profile counter */ __raw_writel(CMD_CLR_PROFILE_CNT, &npe->regs->exec_status_cmd); __raw_writel(0, &npe->regs->exec_count); __raw_writel(0, &npe->regs->action_points[0]); __raw_writel(0, &npe->regs->action_points[1]); __raw_writel(0, &npe->regs->action_points[2]); __raw_writel(0, &npe->regs->action_points[3]); __raw_writel(0, &npe->regs->watch_count); val = ixp4xx_read_feature_bits(); /* reset the NPE */ ixp4xx_write_feature_bits(val & ~(IXP4XX_FEATURE_RESET_NPEA << npe->id)); /* deassert reset */ ixp4xx_write_feature_bits(val | (IXP4XX_FEATURE_RESET_NPEA << npe->id)); for (i = 0; i < MAX_RETRIES; i++) { if (ixp4xx_read_feature_bits() & (IXP4XX_FEATURE_RESET_NPEA << npe->id)) break; /* NPE is back alive */ udelay(1); } if (i == MAX_RETRIES) return -ETIMEDOUT; npe_stop(npe); /* restore NPE configuration bus Control Register - parity settings */ __raw_writel(ctl, &npe->regs->messaging_control); return 0; } int npe_send_message(struct npe *npe, const void *msg, const char *what) { const u32 *send = msg; int cycles = 0; debug_msg(npe, "Trying to send message %s [%08X:%08X]\n", what, send[0], send[1]); if (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE) { debug_msg(npe, "NPE input FIFO not empty\n"); return -EIO; } __raw_writel(send[0], &npe->regs->in_out_fifo); if (!(__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNF)) { debug_msg(npe, "NPE input FIFO full\n"); return -EIO; } __raw_writel(send[1], &npe->regs->in_out_fifo); while ((cycles < MAX_RETRIES) && (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE)) { udelay(1); cycles++; } if (cycles == MAX_RETRIES) { debug_msg(npe, "Timeout sending message\n"); return -ETIMEDOUT; } #if DEBUG_MSG > 1 debug_msg(npe, "Sending a message took %i cycles\n", cycles); #endif return 0; } int npe_recv_message(struct npe *npe, void *msg, const char *what) { u32 *recv = msg; int cycles = 0, cnt = 0; debug_msg(npe, "Trying to receive message %s\n", what); while (cycles < MAX_RETRIES) { if (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_OFNE) { recv[cnt++] = __raw_readl(&npe->regs->in_out_fifo); if (cnt == 2) break; } else { udelay(1); cycles++; } } switch(cnt) { case 1: debug_msg(npe, "Received [%08X]\n", recv[0]); break; case 2: debug_msg(npe, "Received [%08X:%08X]\n", recv[0], recv[1]); break; } if (cycles == MAX_RETRIES) { debug_msg(npe, "Timeout waiting for message\n"); return -ETIMEDOUT; } #if DEBUG_MSG > 1 debug_msg(npe, "Receiving a message took %i cycles\n", cycles); #endif return 0; } int npe_send_recv_message(struct npe *npe, void *msg, const char *what) { int result; u32 *send = msg, recv[2]; if ((result = npe_send_message(npe, msg, what)) != 0) return result; if ((result = npe_recv_message(npe, recv, what)) != 0) return result; if ((recv[0] != send[0]) || (recv[1] != send[1])) { debug_msg(npe, "Message %s: unexpected message received\n", what); return -EIO; } return 0; } int npe_load_firmware(struct npe *npe, const char *name, struct device *dev) { const struct firmware *fw_entry; struct dl_block { u32 type; u32 offset; } *blk; struct dl_image { u32 magic; u32 id; u32 size; union { u32 data[0]; struct dl_block blocks[0]; }; } *image; struct dl_codeblock { u32 npe_addr; u32 size; u32 data[0]; } *cb; int i, j, err, data_size, instr_size, blocks, table_end; u32 cmd; if ((err = request_firmware(&fw_entry, name, dev)) != 0) return err; err = -EINVAL; if (fw_entry->size < sizeof(struct dl_image)) { print_npe(KERN_ERR, npe, "incomplete firmware file\n"); goto err; } image = (struct dl_image*)fw_entry->data; #if DEBUG_FW print_npe(KERN_DEBUG, npe, "firmware: %08X %08X %08X (0x%X bytes)\n", image->magic, image->id, image->size, image->size * 4); #endif if (image->magic == swab32(FW_MAGIC)) { /* swapped file */ image->id = swab32(image->id); image->size = swab32(image->size); } else if (image->magic != FW_MAGIC) { print_npe(KERN_ERR, npe, "bad firmware file magic: 0x%X\n", image->magic); goto err; } if ((image->size * 4 + sizeof(struct dl_image)) != fw_entry->size) { print_npe(KERN_ERR, npe, "inconsistent size of firmware file\n"); goto err; } if (((image->id >> 24) & 0xF /* NPE ID */) != npe->id) { print_npe(KERN_ERR, npe, "firmware file NPE ID mismatch\n"); goto err; } if (image->magic == swab32(FW_MAGIC)) for (i = 0; i < image->size; i++) image->data[i] = swab32(image->data[i]); if (cpu_is_ixp42x() && ((image->id >> 28) & 0xF /* device ID */)) { print_npe(KERN_INFO, npe, "IXP43x/IXP46x firmware ignored on " "IXP42x\n"); goto err; } if (npe_running(npe)) { print_npe(KERN_INFO, npe, "unable to load firmware, NPE is " "already running\n"); err = -EBUSY; goto err; } #if 0 npe_stop(npe); npe_reset(npe); #endif print_npe(KERN_INFO, npe, "firmware functionality 0x%X, " "revision 0x%X:%X\n", (image->id >> 16) & 0xFF, (image->id >> 8) & 0xFF, image->id & 0xFF); if (cpu_is_ixp42x()) { if (!npe->id) instr_size = NPE_A_42X_INSTR_SIZE; else instr_size = NPE_B_AND_C_42X_INSTR_SIZE; data_size = NPE_42X_DATA_SIZE; } else { instr_size = NPE_46X_INSTR_SIZE; data_size = NPE_46X_DATA_SIZE; } for (blocks = 0; blocks * sizeof(struct dl_block) / 4 < image->size; blocks++) if (image->blocks[blocks].type == FW_BLOCK_TYPE_EOF) break; if (blocks * sizeof(struct dl_block) / 4 >= image->size) { print_npe(KERN_INFO, npe, "firmware EOF block marker not " "found\n"); goto err; } #if DEBUG_FW print_npe(KERN_DEBUG, npe, "%i firmware blocks found\n", blocks); #endif table_end = blocks * sizeof(struct dl_block) / 4 + 1 /* EOF marker */; for (i = 0, blk = image->blocks; i < blocks; i++, blk++) { if (blk->offset > image->size - sizeof(struct dl_codeblock) / 4 || blk->offset < table_end) { print_npe(KERN_INFO, npe, "invalid offset 0x%X of " "firmware block #%i\n", blk->offset, i); goto err; } cb = (struct dl_codeblock*)&image->data[blk->offset]; if (blk->type == FW_BLOCK_TYPE_INSTR) { if (cb->npe_addr + cb->size > instr_size) goto too_big; cmd = CMD_WR_INS_MEM; } else if (blk->type == FW_BLOCK_TYPE_DATA) { if (cb->npe_addr + cb->size > data_size) goto too_big; cmd = CMD_WR_DATA_MEM; } else { print_npe(KERN_INFO, npe, "invalid firmware block #%i " "type 0x%X\n", i, blk->type); goto err; } if (blk->offset + sizeof(*cb) / 4 + cb->size > image->size) { print_npe(KERN_INFO, npe, "firmware block #%i doesn't " "fit in firmware image: type %c, start 0x%X," " length 0x%X\n", i, blk->type == FW_BLOCK_TYPE_INSTR ? 'I' : 'D', cb->npe_addr, cb->size); goto err; } for (j = 0; j < cb->size; j++) npe_cmd_write(npe, cb->npe_addr + j, cmd, cb->data[j]); } npe_start(npe); if (!npe_running(npe)) print_npe(KERN_ERR, npe, "unable to start\n"); release_firmware(fw_entry); return 0; too_big: print_npe(KERN_INFO, npe, "firmware block #%i doesn't fit in NPE " "memory: type %c, start 0x%X, length 0x%X\n", i, blk->type == FW_BLOCK_TYPE_INSTR ? 'I' : 'D', cb->npe_addr, cb->size); err: release_firmware(fw_entry); return err; } struct npe *npe_request(unsigned id) { if (id < NPE_COUNT) if (npe_tab[id].valid) if (try_module_get(THIS_MODULE)) return &npe_tab[id]; return NULL; } void npe_release(struct npe *npe) { module_put(THIS_MODULE); } static int __init npe_init_module(void) { int i, found = 0; for (i = 0; i < NPE_COUNT; i++) { struct npe *npe = &npe_tab[i]; if (!(ixp4xx_read_feature_bits() & (IXP4XX_FEATURE_RESET_NPEA << i))) continue; /* NPE already disabled or not present */ if (!(npe->mem_res = request_mem_region(npe->regs_phys, REGS_SIZE, npe_name(npe)))) { print_npe(KERN_ERR, npe, "failed to request memory region\n"); continue; } if (npe_reset(npe)) continue; npe->valid = 1; found++; } if (!found) return -ENODEV; return 0; } static void __exit npe_cleanup_module(void) { int i; for (i = 0; i < NPE_COUNT; i++) if (npe_tab[i].mem_res) { npe_reset(&npe_tab[i]); release_resource(npe_tab[i].mem_res); } } module_init(npe_init_module); module_exit(npe_cleanup_module); MODULE_AUTHOR("Krzysztof Halasa"); MODULE_LICENSE("GPL v2"); EXPORT_SYMBOL(npe_names); EXPORT_SYMBOL(npe_running); EXPORT_SYMBOL(npe_request); EXPORT_SYMBOL(npe_release); EXPORT_SYMBOL(npe_load_firmware); EXPORT_SYMBOL(npe_send_message); EXPORT_SYMBOL(npe_recv_message); EXPORT_SYMBOL(npe_send_recv_message);
gpl-2.0
burstlam/zte-blade-35
fs/nls/nls_cp861.c
12567
17508
/* * linux/fs/nls/nls_cp861.c * * Charset cp861 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x00c7, 0x00fc, 0x00e9, 0x00e2, 0x00e4, 0x00e0, 0x00e5, 0x00e7, 0x00ea, 0x00eb, 0x00e8, 0x00d0, 0x00f0, 0x00de, 0x00c4, 0x00c5, /* 0x90*/ 0x00c9, 0x00e6, 0x00c6, 0x00f4, 0x00f6, 0x00fe, 0x00fb, 0x00dd, 0x00fd, 0x00d6, 0x00dc, 0x00f8, 0x00a3, 0x00d8, 0x20a7, 0x0192, /* 0xa0*/ 0x00e1, 0x00ed, 0x00f3, 0x00fa, 0x00c1, 0x00cd, 0x00d3, 0x00da, 0x00bf, 0x2310, 0x00ac, 0x00bd, 0x00bc, 0x00a1, 0x00ab, 0x00bb, /* 0xb0*/ 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556, 0x2555, 0x2563, 0x2551, 0x2557, 0x255d, 0x255c, 0x255b, 0x2510, /* 0xc0*/ 0x2514, 0x2534, 0x252c, 0x251c, 0x2500, 0x253c, 0x255e, 0x255f, 0x255a, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256c, 0x2567, /* 0xd0*/ 0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256b, 0x256a, 0x2518, 0x250c, 0x2588, 0x2584, 0x258c, 0x2590, 0x2580, /* 0xe0*/ 0x03b1, 0x00df, 0x0393, 0x03c0, 0x03a3, 0x03c3, 0x00b5, 0x03c4, 0x03a6, 0x0398, 0x03a9, 0x03b4, 0x221e, 0x03c6, 0x03b5, 0x2229, /* 0xf0*/ 0x2261, 0x00b1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00f7, 0x2248, 0x00b0, 0x2219, 0x00b7, 0x221a, 0x207f, 0x00b2, 0x25a0, 0x00a0, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xff, 0xad, 0x00, 0x9c, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0xae, 0xaa, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0xf8, 0xf1, 0xfd, 0x00, 0x00, 0xe6, 0x00, 0xfa, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0xaf, 0xac, 0xab, 0x00, 0xa8, /* 0xb8-0xbf */ 0x00, 0xa4, 0x00, 0x00, 0x8e, 0x8f, 0x92, 0x80, /* 0xc0-0xc7 */ 0x00, 0x90, 0x00, 0x00, 0x00, 0xa5, 0x00, 0x00, /* 0xc8-0xcf */ 0x8b, 0x00, 0x00, 0xa6, 0x00, 0x00, 0x99, 0x00, /* 0xd0-0xd7 */ 0x9d, 0x00, 0xa7, 0x00, 0x9a, 0x97, 0x8d, 0xe1, /* 0xd8-0xdf */ 0x85, 0xa0, 0x83, 0x00, 0x84, 0x86, 0x91, 0x87, /* 0xe0-0xe7 */ 0x8a, 0x82, 0x88, 0x89, 0x00, 0xa1, 0x00, 0x00, /* 0xe8-0xef */ 0x8c, 0x00, 0x00, 0xa2, 0x93, 0x00, 0x94, 0xf6, /* 0xf0-0xf7 */ 0x9b, 0x00, 0xa3, 0x96, 0x81, 0x98, 0x95, 0x00, /* 0xf8-0xff */ }; static const unsigned char page01[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ }; static const unsigned char page03[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0xe2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0xe9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0xe4, 0x00, 0x00, 0xe8, 0x00, /* 0xa0-0xa7 */ 0x00, 0xea, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0xe0, 0x00, 0x00, 0xeb, 0xee, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0xe3, 0x00, 0x00, 0xe5, 0xe7, 0x00, 0xed, 0x00, /* 0xc0-0xc7 */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9e, /* 0xa0-0xa7 */ }; static const unsigned char page22[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0xf9, 0xfb, 0x00, 0x00, 0x00, 0xec, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0xf7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0xf0, 0x00, 0x00, 0xf3, 0xf2, 0x00, 0x00, /* 0x60-0x67 */ }; static const unsigned char page23[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0xa9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0xf4, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ }; static const unsigned char page25[256] = { 0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, /* 0x50-0x57 */ 0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, /* 0x58-0x5f */ 0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, /* 0x60-0x67 */ 0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0xdb, 0x00, 0x00, 0x00, 0xdd, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0xde, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ }; static const unsigned char *const page_uni2charset[256] = { page00, page01, NULL, page03, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, NULL, page22, page23, NULL, page25, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x87, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8c, 0x8c, 0x95, 0x84, 0x86, /* 0x88-0x8f */ 0x82, 0x91, 0x91, 0x93, 0x94, 0x95, 0x96, 0x98, /* 0x90-0x97 */ 0x98, 0x94, 0x81, 0x9b, 0x9c, 0x9b, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa0, 0xa1, 0xa2, 0xa3, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0x00, 0xe3, 0xe5, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xed, 0x00, 0x00, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x9a, 0x90, 0x00, 0x8e, 0x00, 0x8f, 0x80, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x8b, 0x8b, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x92, 0x92, 0x00, 0x99, 0x8d, 0x00, 0x97, /* 0x90-0x97 */ 0x97, 0x99, 0x9a, 0x9d, 0x9c, 0x9d, 0x9e, 0x00, /* 0x98-0x9f */ 0xa4, 0xa5, 0xa6, 0xa7, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0x00, 0xe1, 0xe2, 0x00, 0xe4, 0xe4, 0x00, 0x00, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0x00, 0xec, 0xe8, 0x00, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "cp861", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_cp861(void) { return register_nls(&table); } static void __exit exit_nls_cp861(void) { unregister_nls(&table); } module_init(init_nls_cp861) module_exit(exit_nls_cp861) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
holyangel/HTC_M8_GPE-4.4.3
drivers/infiniband/hw/amso1100/c2_mm.c
13335
8887
/* * Copyright (c) 2005 Ammasso, Inc. All rights reserved. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/slab.h> #include "c2.h" #include "c2_vq.h" #define PBL_VIRT 1 #define PBL_PHYS 2 /* * Send all the PBL messages to convey the remainder of the PBL * Wait for the adapter's reply on the last one. * This is indicated by setting the MEM_PBL_COMPLETE in the flags. * * NOTE: vq_req is _not_ freed by this function. The VQ Host * Reply buffer _is_ freed by this function. */ static int send_pbl_messages(struct c2_dev *c2dev, __be32 stag_index, unsigned long va, u32 pbl_depth, struct c2_vq_req *vq_req, int pbl_type) { u32 pbe_count; /* amt that fits in a PBL msg */ u32 count; /* amt in this PBL MSG. */ struct c2wr_nsmr_pbl_req *wr; /* PBL WR ptr */ struct c2wr_nsmr_pbl_rep *reply; /* reply ptr */ int err, pbl_virt, pbl_index, i; switch (pbl_type) { case PBL_VIRT: pbl_virt = 1; break; case PBL_PHYS: pbl_virt = 0; break; default: return -EINVAL; break; } pbe_count = (c2dev->req_vq.msg_size - sizeof(struct c2wr_nsmr_pbl_req)) / sizeof(u64); wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); if (!wr) { return -ENOMEM; } c2_wr_set_id(wr, CCWR_NSMR_PBL); /* * Only the last PBL message will generate a reply from the verbs, * so we set the context to 0 indicating there is no kernel verbs * handler blocked awaiting this reply. */ wr->hdr.context = 0; wr->rnic_handle = c2dev->adapter_handle; wr->stag_index = stag_index; /* already swapped */ wr->flags = 0; pbl_index = 0; while (pbl_depth) { count = min(pbe_count, pbl_depth); wr->addrs_length = cpu_to_be32(count); /* * If this is the last message, then reference the * vq request struct cuz we're gonna wait for a reply. * also make this PBL msg as the last one. */ if (count == pbl_depth) { /* * reference the request struct. dereferenced in the * int handler. */ vq_req_get(c2dev, vq_req); wr->flags = cpu_to_be32(MEM_PBL_COMPLETE); /* * This is the last PBL message. * Set the context to our VQ Request Object so we can * wait for the reply. */ wr->hdr.context = (unsigned long) vq_req; } /* * If pbl_virt is set then va is a virtual address * that describes a virtually contiguous memory * allocation. The wr needs the start of each virtual page * to be converted to the corresponding physical address * of the page. If pbl_virt is not set then va is an array * of physical addresses and there is no conversion to do. * Just fill in the wr with what is in the array. */ for (i = 0; i < count; i++) { if (pbl_virt) { va += PAGE_SIZE; } else { wr->paddrs[i] = cpu_to_be64(((u64 *)va)[pbl_index + i]); } } /* * Send WR to adapter */ err = vq_send_wr(c2dev, (union c2wr *) wr); if (err) { if (count <= pbe_count) { vq_req_put(c2dev, vq_req); } goto bail0; } pbl_depth -= count; pbl_index += count; } /* * Now wait for the reply... */ err = vq_wait_for_reply(c2dev, vq_req); if (err) { goto bail0; } /* * Process reply */ reply = (struct c2wr_nsmr_pbl_rep *) (unsigned long) vq_req->reply_msg; if (!reply) { err = -ENOMEM; goto bail0; } err = c2_errno(reply); vq_repbuf_free(c2dev, reply); bail0: kfree(wr); return err; } #define C2_PBL_MAX_DEPTH 131072 int c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list, int page_size, int pbl_depth, u32 length, u32 offset, u64 *va, enum c2_acf acf, struct c2_mr *mr) { struct c2_vq_req *vq_req; struct c2wr_nsmr_register_req *wr; struct c2wr_nsmr_register_rep *reply; u16 flags; int i, pbe_count, count; int err; if (!va || !length || !addr_list || !pbl_depth) return -EINTR; /* * Verify PBL depth is within rnic max */ if (pbl_depth > C2_PBL_MAX_DEPTH) { return -EINTR; } /* * allocate verbs request object */ vq_req = vq_req_alloc(c2dev); if (!vq_req) return -ENOMEM; wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); if (!wr) { err = -ENOMEM; goto bail0; } /* * build the WR */ c2_wr_set_id(wr, CCWR_NSMR_REGISTER); wr->hdr.context = (unsigned long) vq_req; wr->rnic_handle = c2dev->adapter_handle; flags = (acf | MEM_VA_BASED | MEM_REMOTE); /* * compute how many pbes can fit in the message */ pbe_count = (c2dev->req_vq.msg_size - sizeof(struct c2wr_nsmr_register_req)) / sizeof(u64); if (pbl_depth <= pbe_count) { flags |= MEM_PBL_COMPLETE; } wr->flags = cpu_to_be16(flags); wr->stag_key = 0; //stag_key; wr->va = cpu_to_be64(*va); wr->pd_id = mr->pd->pd_id; wr->pbe_size = cpu_to_be32(page_size); wr->length = cpu_to_be32(length); wr->pbl_depth = cpu_to_be32(pbl_depth); wr->fbo = cpu_to_be32(offset); count = min(pbl_depth, pbe_count); wr->addrs_length = cpu_to_be32(count); /* * fill out the PBL for this message */ for (i = 0; i < count; i++) { wr->paddrs[i] = cpu_to_be64(addr_list[i]); } /* * regerence the request struct */ vq_req_get(c2dev, vq_req); /* * send the WR to the adapter */ err = vq_send_wr(c2dev, (union c2wr *) wr); if (err) { vq_req_put(c2dev, vq_req); goto bail1; } /* * wait for reply from adapter */ err = vq_wait_for_reply(c2dev, vq_req); if (err) { goto bail1; } /* * process reply */ reply = (struct c2wr_nsmr_register_rep *) (unsigned long) (vq_req->reply_msg); if (!reply) { err = -ENOMEM; goto bail1; } if ((err = c2_errno(reply))) { goto bail2; } //*p_pb_entries = be32_to_cpu(reply->pbl_depth); mr->ibmr.lkey = mr->ibmr.rkey = be32_to_cpu(reply->stag_index); vq_repbuf_free(c2dev, reply); /* * if there are still more PBEs we need to send them to * the adapter and wait for a reply on the final one. * reuse vq_req for this purpose. */ pbl_depth -= count; if (pbl_depth) { vq_req->reply_msg = (unsigned long) NULL; atomic_set(&vq_req->reply_ready, 0); err = send_pbl_messages(c2dev, cpu_to_be32(mr->ibmr.lkey), (unsigned long) &addr_list[i], pbl_depth, vq_req, PBL_PHYS); if (err) { goto bail1; } } vq_req_free(c2dev, vq_req); kfree(wr); return err; bail2: vq_repbuf_free(c2dev, reply); bail1: kfree(wr); bail0: vq_req_free(c2dev, vq_req); return err; } int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index) { struct c2_vq_req *vq_req; /* verbs request object */ struct c2wr_stag_dealloc_req wr; /* work request */ struct c2wr_stag_dealloc_rep *reply; /* WR reply */ int err; /* * allocate verbs request object */ vq_req = vq_req_alloc(c2dev); if (!vq_req) { return -ENOMEM; } /* * Build the WR */ c2_wr_set_id(&wr, CCWR_STAG_DEALLOC); wr.hdr.context = (u64) (unsigned long) vq_req; wr.rnic_handle = c2dev->adapter_handle; wr.stag_index = cpu_to_be32(stag_index); /* * reference the request struct. dereferenced in the int handler. */ vq_req_get(c2dev, vq_req); /* * Send WR to adapter */ err = vq_send_wr(c2dev, (union c2wr *) & wr); if (err) { vq_req_put(c2dev, vq_req); goto bail0; } /* * Wait for reply from adapter */ err = vq_wait_for_reply(c2dev, vq_req); if (err) { goto bail0; } /* * Process reply */ reply = (struct c2wr_stag_dealloc_rep *) (unsigned long) vq_req->reply_msg; if (!reply) { err = -ENOMEM; goto bail0; } err = c2_errno(reply); vq_repbuf_free(c2dev, reply); bail0: vq_req_free(c2dev, vq_req); return err; }
gpl-2.0
klin1344/kernel_ville_2.31
drivers/infiniband/hw/amso1100/c2_mm.c
13335
8887
/* * Copyright (c) 2005 Ammasso, Inc. All rights reserved. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/slab.h> #include "c2.h" #include "c2_vq.h" #define PBL_VIRT 1 #define PBL_PHYS 2 /* * Send all the PBL messages to convey the remainder of the PBL * Wait for the adapter's reply on the last one. * This is indicated by setting the MEM_PBL_COMPLETE in the flags. * * NOTE: vq_req is _not_ freed by this function. The VQ Host * Reply buffer _is_ freed by this function. */ static int send_pbl_messages(struct c2_dev *c2dev, __be32 stag_index, unsigned long va, u32 pbl_depth, struct c2_vq_req *vq_req, int pbl_type) { u32 pbe_count; /* amt that fits in a PBL msg */ u32 count; /* amt in this PBL MSG. */ struct c2wr_nsmr_pbl_req *wr; /* PBL WR ptr */ struct c2wr_nsmr_pbl_rep *reply; /* reply ptr */ int err, pbl_virt, pbl_index, i; switch (pbl_type) { case PBL_VIRT: pbl_virt = 1; break; case PBL_PHYS: pbl_virt = 0; break; default: return -EINVAL; break; } pbe_count = (c2dev->req_vq.msg_size - sizeof(struct c2wr_nsmr_pbl_req)) / sizeof(u64); wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); if (!wr) { return -ENOMEM; } c2_wr_set_id(wr, CCWR_NSMR_PBL); /* * Only the last PBL message will generate a reply from the verbs, * so we set the context to 0 indicating there is no kernel verbs * handler blocked awaiting this reply. */ wr->hdr.context = 0; wr->rnic_handle = c2dev->adapter_handle; wr->stag_index = stag_index; /* already swapped */ wr->flags = 0; pbl_index = 0; while (pbl_depth) { count = min(pbe_count, pbl_depth); wr->addrs_length = cpu_to_be32(count); /* * If this is the last message, then reference the * vq request struct cuz we're gonna wait for a reply. * also make this PBL msg as the last one. */ if (count == pbl_depth) { /* * reference the request struct. dereferenced in the * int handler. */ vq_req_get(c2dev, vq_req); wr->flags = cpu_to_be32(MEM_PBL_COMPLETE); /* * This is the last PBL message. * Set the context to our VQ Request Object so we can * wait for the reply. */ wr->hdr.context = (unsigned long) vq_req; } /* * If pbl_virt is set then va is a virtual address * that describes a virtually contiguous memory * allocation. The wr needs the start of each virtual page * to be converted to the corresponding physical address * of the page. If pbl_virt is not set then va is an array * of physical addresses and there is no conversion to do. * Just fill in the wr with what is in the array. */ for (i = 0; i < count; i++) { if (pbl_virt) { va += PAGE_SIZE; } else { wr->paddrs[i] = cpu_to_be64(((u64 *)va)[pbl_index + i]); } } /* * Send WR to adapter */ err = vq_send_wr(c2dev, (union c2wr *) wr); if (err) { if (count <= pbe_count) { vq_req_put(c2dev, vq_req); } goto bail0; } pbl_depth -= count; pbl_index += count; } /* * Now wait for the reply... */ err = vq_wait_for_reply(c2dev, vq_req); if (err) { goto bail0; } /* * Process reply */ reply = (struct c2wr_nsmr_pbl_rep *) (unsigned long) vq_req->reply_msg; if (!reply) { err = -ENOMEM; goto bail0; } err = c2_errno(reply); vq_repbuf_free(c2dev, reply); bail0: kfree(wr); return err; } #define C2_PBL_MAX_DEPTH 131072 int c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list, int page_size, int pbl_depth, u32 length, u32 offset, u64 *va, enum c2_acf acf, struct c2_mr *mr) { struct c2_vq_req *vq_req; struct c2wr_nsmr_register_req *wr; struct c2wr_nsmr_register_rep *reply; u16 flags; int i, pbe_count, count; int err; if (!va || !length || !addr_list || !pbl_depth) return -EINTR; /* * Verify PBL depth is within rnic max */ if (pbl_depth > C2_PBL_MAX_DEPTH) { return -EINTR; } /* * allocate verbs request object */ vq_req = vq_req_alloc(c2dev); if (!vq_req) return -ENOMEM; wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); if (!wr) { err = -ENOMEM; goto bail0; } /* * build the WR */ c2_wr_set_id(wr, CCWR_NSMR_REGISTER); wr->hdr.context = (unsigned long) vq_req; wr->rnic_handle = c2dev->adapter_handle; flags = (acf | MEM_VA_BASED | MEM_REMOTE); /* * compute how many pbes can fit in the message */ pbe_count = (c2dev->req_vq.msg_size - sizeof(struct c2wr_nsmr_register_req)) / sizeof(u64); if (pbl_depth <= pbe_count) { flags |= MEM_PBL_COMPLETE; } wr->flags = cpu_to_be16(flags); wr->stag_key = 0; //stag_key; wr->va = cpu_to_be64(*va); wr->pd_id = mr->pd->pd_id; wr->pbe_size = cpu_to_be32(page_size); wr->length = cpu_to_be32(length); wr->pbl_depth = cpu_to_be32(pbl_depth); wr->fbo = cpu_to_be32(offset); count = min(pbl_depth, pbe_count); wr->addrs_length = cpu_to_be32(count); /* * fill out the PBL for this message */ for (i = 0; i < count; i++) { wr->paddrs[i] = cpu_to_be64(addr_list[i]); } /* * regerence the request struct */ vq_req_get(c2dev, vq_req); /* * send the WR to the adapter */ err = vq_send_wr(c2dev, (union c2wr *) wr); if (err) { vq_req_put(c2dev, vq_req); goto bail1; } /* * wait for reply from adapter */ err = vq_wait_for_reply(c2dev, vq_req); if (err) { goto bail1; } /* * process reply */ reply = (struct c2wr_nsmr_register_rep *) (unsigned long) (vq_req->reply_msg); if (!reply) { err = -ENOMEM; goto bail1; } if ((err = c2_errno(reply))) { goto bail2; } //*p_pb_entries = be32_to_cpu(reply->pbl_depth); mr->ibmr.lkey = mr->ibmr.rkey = be32_to_cpu(reply->stag_index); vq_repbuf_free(c2dev, reply); /* * if there are still more PBEs we need to send them to * the adapter and wait for a reply on the final one. * reuse vq_req for this purpose. */ pbl_depth -= count; if (pbl_depth) { vq_req->reply_msg = (unsigned long) NULL; atomic_set(&vq_req->reply_ready, 0); err = send_pbl_messages(c2dev, cpu_to_be32(mr->ibmr.lkey), (unsigned long) &addr_list[i], pbl_depth, vq_req, PBL_PHYS); if (err) { goto bail1; } } vq_req_free(c2dev, vq_req); kfree(wr); return err; bail2: vq_repbuf_free(c2dev, reply); bail1: kfree(wr); bail0: vq_req_free(c2dev, vq_req); return err; } int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index) { struct c2_vq_req *vq_req; /* verbs request object */ struct c2wr_stag_dealloc_req wr; /* work request */ struct c2wr_stag_dealloc_rep *reply; /* WR reply */ int err; /* * allocate verbs request object */ vq_req = vq_req_alloc(c2dev); if (!vq_req) { return -ENOMEM; } /* * Build the WR */ c2_wr_set_id(&wr, CCWR_STAG_DEALLOC); wr.hdr.context = (u64) (unsigned long) vq_req; wr.rnic_handle = c2dev->adapter_handle; wr.stag_index = cpu_to_be32(stag_index); /* * reference the request struct. dereferenced in the int handler. */ vq_req_get(c2dev, vq_req); /* * Send WR to adapter */ err = vq_send_wr(c2dev, (union c2wr *) & wr); if (err) { vq_req_put(c2dev, vq_req); goto bail0; } /* * Wait for reply from adapter */ err = vq_wait_for_reply(c2dev, vq_req); if (err) { goto bail0; } /* * Process reply */ reply = (struct c2wr_stag_dealloc_rep *) (unsigned long) vq_req->reply_msg; if (!reply) { err = -ENOMEM; goto bail0; } err = c2_errno(reply); vq_repbuf_free(c2dev, reply); bail0: vq_req_free(c2dev, vq_req); return err; }
gpl-2.0
primiano/udoo_kernel_imx
drivers/infiniband/hw/amso1100/c2_mm.c
13335
8887
/* * Copyright (c) 2005 Ammasso, Inc. All rights reserved. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/slab.h> #include "c2.h" #include "c2_vq.h" #define PBL_VIRT 1 #define PBL_PHYS 2 /* * Send all the PBL messages to convey the remainder of the PBL * Wait for the adapter's reply on the last one. * This is indicated by setting the MEM_PBL_COMPLETE in the flags. * * NOTE: vq_req is _not_ freed by this function. The VQ Host * Reply buffer _is_ freed by this function. */ static int send_pbl_messages(struct c2_dev *c2dev, __be32 stag_index, unsigned long va, u32 pbl_depth, struct c2_vq_req *vq_req, int pbl_type) { u32 pbe_count; /* amt that fits in a PBL msg */ u32 count; /* amt in this PBL MSG. */ struct c2wr_nsmr_pbl_req *wr; /* PBL WR ptr */ struct c2wr_nsmr_pbl_rep *reply; /* reply ptr */ int err, pbl_virt, pbl_index, i; switch (pbl_type) { case PBL_VIRT: pbl_virt = 1; break; case PBL_PHYS: pbl_virt = 0; break; default: return -EINVAL; break; } pbe_count = (c2dev->req_vq.msg_size - sizeof(struct c2wr_nsmr_pbl_req)) / sizeof(u64); wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); if (!wr) { return -ENOMEM; } c2_wr_set_id(wr, CCWR_NSMR_PBL); /* * Only the last PBL message will generate a reply from the verbs, * so we set the context to 0 indicating there is no kernel verbs * handler blocked awaiting this reply. */ wr->hdr.context = 0; wr->rnic_handle = c2dev->adapter_handle; wr->stag_index = stag_index; /* already swapped */ wr->flags = 0; pbl_index = 0; while (pbl_depth) { count = min(pbe_count, pbl_depth); wr->addrs_length = cpu_to_be32(count); /* * If this is the last message, then reference the * vq request struct cuz we're gonna wait for a reply. * also make this PBL msg as the last one. */ if (count == pbl_depth) { /* * reference the request struct. dereferenced in the * int handler. */ vq_req_get(c2dev, vq_req); wr->flags = cpu_to_be32(MEM_PBL_COMPLETE); /* * This is the last PBL message. * Set the context to our VQ Request Object so we can * wait for the reply. */ wr->hdr.context = (unsigned long) vq_req; } /* * If pbl_virt is set then va is a virtual address * that describes a virtually contiguous memory * allocation. The wr needs the start of each virtual page * to be converted to the corresponding physical address * of the page. If pbl_virt is not set then va is an array * of physical addresses and there is no conversion to do. * Just fill in the wr with what is in the array. */ for (i = 0; i < count; i++) { if (pbl_virt) { va += PAGE_SIZE; } else { wr->paddrs[i] = cpu_to_be64(((u64 *)va)[pbl_index + i]); } } /* * Send WR to adapter */ err = vq_send_wr(c2dev, (union c2wr *) wr); if (err) { if (count <= pbe_count) { vq_req_put(c2dev, vq_req); } goto bail0; } pbl_depth -= count; pbl_index += count; } /* * Now wait for the reply... */ err = vq_wait_for_reply(c2dev, vq_req); if (err) { goto bail0; } /* * Process reply */ reply = (struct c2wr_nsmr_pbl_rep *) (unsigned long) vq_req->reply_msg; if (!reply) { err = -ENOMEM; goto bail0; } err = c2_errno(reply); vq_repbuf_free(c2dev, reply); bail0: kfree(wr); return err; } #define C2_PBL_MAX_DEPTH 131072 int c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list, int page_size, int pbl_depth, u32 length, u32 offset, u64 *va, enum c2_acf acf, struct c2_mr *mr) { struct c2_vq_req *vq_req; struct c2wr_nsmr_register_req *wr; struct c2wr_nsmr_register_rep *reply; u16 flags; int i, pbe_count, count; int err; if (!va || !length || !addr_list || !pbl_depth) return -EINTR; /* * Verify PBL depth is within rnic max */ if (pbl_depth > C2_PBL_MAX_DEPTH) { return -EINTR; } /* * allocate verbs request object */ vq_req = vq_req_alloc(c2dev); if (!vq_req) return -ENOMEM; wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); if (!wr) { err = -ENOMEM; goto bail0; } /* * build the WR */ c2_wr_set_id(wr, CCWR_NSMR_REGISTER); wr->hdr.context = (unsigned long) vq_req; wr->rnic_handle = c2dev->adapter_handle; flags = (acf | MEM_VA_BASED | MEM_REMOTE); /* * compute how many pbes can fit in the message */ pbe_count = (c2dev->req_vq.msg_size - sizeof(struct c2wr_nsmr_register_req)) / sizeof(u64); if (pbl_depth <= pbe_count) { flags |= MEM_PBL_COMPLETE; } wr->flags = cpu_to_be16(flags); wr->stag_key = 0; //stag_key; wr->va = cpu_to_be64(*va); wr->pd_id = mr->pd->pd_id; wr->pbe_size = cpu_to_be32(page_size); wr->length = cpu_to_be32(length); wr->pbl_depth = cpu_to_be32(pbl_depth); wr->fbo = cpu_to_be32(offset); count = min(pbl_depth, pbe_count); wr->addrs_length = cpu_to_be32(count); /* * fill out the PBL for this message */ for (i = 0; i < count; i++) { wr->paddrs[i] = cpu_to_be64(addr_list[i]); } /* * regerence the request struct */ vq_req_get(c2dev, vq_req); /* * send the WR to the adapter */ err = vq_send_wr(c2dev, (union c2wr *) wr); if (err) { vq_req_put(c2dev, vq_req); goto bail1; } /* * wait for reply from adapter */ err = vq_wait_for_reply(c2dev, vq_req); if (err) { goto bail1; } /* * process reply */ reply = (struct c2wr_nsmr_register_rep *) (unsigned long) (vq_req->reply_msg); if (!reply) { err = -ENOMEM; goto bail1; } if ((err = c2_errno(reply))) { goto bail2; } //*p_pb_entries = be32_to_cpu(reply->pbl_depth); mr->ibmr.lkey = mr->ibmr.rkey = be32_to_cpu(reply->stag_index); vq_repbuf_free(c2dev, reply); /* * if there are still more PBEs we need to send them to * the adapter and wait for a reply on the final one. * reuse vq_req for this purpose. */ pbl_depth -= count; if (pbl_depth) { vq_req->reply_msg = (unsigned long) NULL; atomic_set(&vq_req->reply_ready, 0); err = send_pbl_messages(c2dev, cpu_to_be32(mr->ibmr.lkey), (unsigned long) &addr_list[i], pbl_depth, vq_req, PBL_PHYS); if (err) { goto bail1; } } vq_req_free(c2dev, vq_req); kfree(wr); return err; bail2: vq_repbuf_free(c2dev, reply); bail1: kfree(wr); bail0: vq_req_free(c2dev, vq_req); return err; } int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index) { struct c2_vq_req *vq_req; /* verbs request object */ struct c2wr_stag_dealloc_req wr; /* work request */ struct c2wr_stag_dealloc_rep *reply; /* WR reply */ int err; /* * allocate verbs request object */ vq_req = vq_req_alloc(c2dev); if (!vq_req) { return -ENOMEM; } /* * Build the WR */ c2_wr_set_id(&wr, CCWR_STAG_DEALLOC); wr.hdr.context = (u64) (unsigned long) vq_req; wr.rnic_handle = c2dev->adapter_handle; wr.stag_index = cpu_to_be32(stag_index); /* * reference the request struct. dereferenced in the int handler. */ vq_req_get(c2dev, vq_req); /* * Send WR to adapter */ err = vq_send_wr(c2dev, (union c2wr *) & wr); if (err) { vq_req_put(c2dev, vq_req); goto bail0; } /* * Wait for reply from adapter */ err = vq_wait_for_reply(c2dev, vq_req); if (err) { goto bail0; } /* * Process reply */ reply = (struct c2wr_stag_dealloc_rep *) (unsigned long) vq_req->reply_msg; if (!reply) { err = -ENOMEM; goto bail0; } err = c2_errno(reply); vq_repbuf_free(c2dev, reply); bail0: vq_req_free(c2dev, vq_req); return err; }
gpl-2.0
DustysPatch/OpenJK
codeJK2/game/g_itemLoad.cpp
24
17076
/* =========================================================================== Copyright (C) 2000 - 2013, Raven Software, Inc. Copyright (C) 2001 - 2013, Activision, Inc. Copyright (C) 2013 - 2015, OpenJK contributors This file is part of the OpenJK source code. OpenJK is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, see <http://www.gnu.org/licenses/>. =========================================================================== */ //g_itemLoad.cpp //reads in ext_data\items.dat to bg_itemlist[] #include "g_headers.h" #include "g_local.h" #include "g_items.h" #define PICKUPSOUND "sound/weapons/w_pkup.wav" //qboolean COM_ParseInt( char **data, int *i ); //qboolean COM_ParseString( char **data, char **s ); //qboolean COM_ParseFloat( char **data, float *f ); extern gitem_t bg_itemlist[]; static int itemNum; static void IT_ClassName (const char **holdBuf); static void IT_Count (const char **holdBuf); static void IT_Icon (const char **holdBuf); static void IT_Min (const char **holdBuf); static void IT_Max (const char **holdBuf); static void IT_Name (const char **holdBuf); static void IT_PickupSound (const char **holdBuf); static void IT_Tag (const char **holdBuf); static void IT_Type (const char **holdBuf); static void IT_WorldModel (const char **holdBuf); typedef struct itemParms_s { const char *parmName; void (*func)(const char **holdBuf); } itemParms_t; #define IT_PARM_MAX 10 itemParms_t ItemParms[IT_PARM_MAX] = { { "itemname", IT_Name }, { "classname", IT_ClassName }, { "count", IT_Count }, { "icon", IT_Icon }, { "min", IT_Min }, { "max", IT_Max }, { "pickupsound", IT_PickupSound }, { "tag", IT_Tag }, { "type", IT_Type }, { "worldmodel", IT_WorldModel }, }; static void IT_SetDefaults( void ) { bg_itemlist[itemNum].mins[0] = -16; bg_itemlist[itemNum].mins[1] = -16; bg_itemlist[itemNum].mins[2] = -2; bg_itemlist[itemNum].maxs[0] = 16; bg_itemlist[itemNum].maxs[1] = 16; bg_itemlist[itemNum].maxs[2] = 16; bg_itemlist[itemNum].pickup_sound = PICKUPSOUND; //give it a default sound bg_itemlist[itemNum].precaches = NULL; bg_itemlist[itemNum].sounds = NULL; } static void IT_Name( const char **holdBuf ) { const char *tokenStr; if ( COM_ParseString( holdBuf, &tokenStr ) ) { return; } if ( !Q_stricmp( tokenStr, "ITM_NONE" ) ) { itemNum = ITM_NONE; } else if ( !Q_stricmp( tokenStr, "ITM_STUN_BATON_PICKUP" ) ) { itemNum = ITM_STUN_BATON_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_SABER_PICKUP" ) ) { itemNum = ITM_SABER_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_BRYAR_PISTOL_PICKUP" ) ) { itemNum = ITM_BRYAR_PISTOL_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_BLASTER_PICKUP" ) ) { itemNum = ITM_BLASTER_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_DISRUPTOR_PICKUP" ) ) { itemNum = ITM_DISRUPTOR_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_BOWCASTER_PICKUP" ) ) { itemNum = ITM_BOWCASTER_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_REPEATER_PICKUP" ) ) { itemNum = ITM_REPEATER_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_DEMP2_PICKUP" ) ) { itemNum = ITM_DEMP2_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_FLECHETTE_PICKUP" ) ) { itemNum = ITM_FLECHETTE_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_ROCKET_LAUNCHER_PICKUP" ) ) { itemNum = ITM_ROCKET_LAUNCHER_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_THERMAL_DET_PICKUP" ) ) { itemNum = ITM_THERMAL_DET_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_TRIP_MINE_PICKUP" ) ) { itemNum = ITM_TRIP_MINE_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_DET_PACK_PICKUP" ) ) { itemNum = ITM_DET_PACK_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_BOT_LASER_PICKUP" ) ) { itemNum = ITM_BOT_LASER_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_EMPLACED_GUN_PICKUP" ) ) { itemNum = ITM_EMPLACED_GUN_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_TURRET_PICKUP" ) ) { itemNum = ITM_TURRET_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_MELEE" ) ) { itemNum = ITM_MELEE; } else if ( !Q_stricmp( tokenStr, "ITM_ATST_MAIN_PICKUP" ) ) { itemNum = ITM_ATST_MAIN_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_ATST_SIDE_PICKUP" ) ) { itemNum = ITM_ATST_SIDE_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_TIE_FIGHTER_PICKUP" ) ) { itemNum = ITM_TIE_FIGHTER_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_RAPID_FIRE_CONC_PICKUP" ) ) { itemNum = ITM_RAPID_FIRE_CONC_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_AMMO_FORCE_PICKUP" ) ) { itemNum = ITM_AMMO_FORCE_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_AMMO_BLASTER_PICKUP" ) ) { itemNum = ITM_AMMO_BLASTER_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_AMMO_POWERCELL_PICKUP" ) ) { itemNum = ITM_AMMO_POWERCELL_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_AMMO_METAL_BOLTS_PICKUP" ) ) { itemNum = ITM_AMMO_METAL_BOLTS_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_AMMO_ROCKETS_PICKUP" ) ) { itemNum = ITM_AMMO_ROCKETS_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_AMMO_EMPLACED_PICKUP" ) ) { itemNum = ITM_AMMO_EMPLACED_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_AMMO_THERMAL_PICKUP" ) ) { itemNum = ITM_AMMO_THERMAL_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_AMMO_TRIPMINE_PICKUP" ) ) { itemNum = ITM_AMMO_TRIPMINE_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_AMMO_DETPACK_PICKUP" ) ) { itemNum = ITM_AMMO_DETPACK_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_FORCE_HEAL_PICKUP" ) ) { itemNum = ITM_FORCE_HEAL_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_FORCE_LEVITATION_PICKUP" ) ) { itemNum = ITM_FORCE_LEVITATION_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_FORCE_SPEED_PICKUP" ) ) { itemNum = ITM_FORCE_SPEED_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_FORCE_PUSH_PICKUP" ) ) { itemNum = ITM_FORCE_PUSH_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_FORCE_PULL_PICKUP" ) ) { itemNum = ITM_FORCE_PULL_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_FORCE_TELEPATHY_PICKUP" ) ) { itemNum = ITM_FORCE_TELEPATHY_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_FORCE_GRIP_PICKUP" ) ) { itemNum = ITM_FORCE_GRIP_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_FORCE_LIGHTNING_PICKUP" ) ) { itemNum = ITM_FORCE_LIGHTNING_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_FORCE_SABERTHROW_PICKUP" ) ) { itemNum = ITM_FORCE_SABERTHROW_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_BATTERY_PICKUP" ) ) { itemNum = ITM_BATTERY_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_SEEKER_PICKUP" ) ) { itemNum = ITM_SEEKER_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_SHIELD_PICKUP" ) ) { itemNum = ITM_SHIELD_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_BACTA_PICKUP" ) ) { itemNum = ITM_BACTA_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_DATAPAD_PICKUP" ) ) { itemNum = ITM_DATAPAD_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_BINOCULARS_PICKUP" ) ) { itemNum = ITM_BINOCULARS_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_SENTRY_GUN_PICKUP" ) ) { itemNum = ITM_SENTRY_GUN_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_LA_GOGGLES_PICKUP" ) ) { itemNum = ITM_LA_GOGGLES_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_BLASTER_PISTOL_PICKUP" ) ) { itemNum = ITM_BLASTER_PISTOL_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_MEDPAK_PICKUP" ) ) { itemNum = ITM_MEDPAK_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_SHIELD_SM_PICKUP" ) ) { itemNum = ITM_SHIELD_SM_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_SHIELD_LRG_PICKUP" ) ) { itemNum = ITM_SHIELD_LRG_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_GOODIE_KEY_PICKUP" ) ) { itemNum = ITM_GOODIE_KEY_PICKUP; } else if ( !Q_stricmp( tokenStr, "ITM_SECURITY_KEY_PICKUP" ) ) { itemNum = ITM_SECURITY_KEY_PICKUP; } else { itemNum = 0; gi.Printf( "WARNING: bad itemname in external item data '%s'\n", tokenStr ); } IT_SetDefaults(); } static void IT_ClassName( const char **holdBuf ) { int len; const char *tokenStr; if ( COM_ParseString( holdBuf, &tokenStr ) ) { return; } len = strlen( tokenStr ) + 1; if ( len > 32 ) { len = 32; gi.Printf( "WARNING: weaponclass too long in external ITEMS.DAT '%s'\n", tokenStr ); } bg_itemlist[itemNum].classname = G_NewString( tokenStr ); } static void IT_WorldModel( const char **holdBuf ) { int len; const char *tokenStr; if ( COM_ParseString( holdBuf, &tokenStr ) ) { return; } len = strlen( tokenStr ) + 1; if ( len > 64 ) { len = 64; gi.Printf( "WARNING: world model too long in external ITEMS.DAT '%s'\n", tokenStr ); } bg_itemlist[itemNum].world_model = G_NewString( tokenStr ); } static void IT_Tag( const char **holdBuf ) { int tag; const char *tokenStr; if ( COM_ParseString( holdBuf, &tokenStr ) ) { return; } if ( !Q_stricmp( tokenStr, "WP_NONE" ) ) { tag = WP_NONE; } else if ( !Q_stricmp( tokenStr,"WP_STUN_BATON" ) ) { tag = WP_STUN_BATON; } else if ( !Q_stricmp( tokenStr,"WP_SABER" ) ) { tag = WP_SABER; } else if ( !Q_stricmp( tokenStr,"WP_BRYAR_PISTOL" ) ) { tag = WP_BRYAR_PISTOL; } else if ( !Q_stricmp( tokenStr,"WP_BLASTER" ) ) { tag = WP_BLASTER; } else if ( !Q_stricmp( tokenStr,"WP_DISRUPTOR" ) ) { tag = WP_DISRUPTOR; } else if ( !Q_stricmp( tokenStr,"WP_BOWCASTER" ) ) { tag = WP_BOWCASTER; } else if ( !Q_stricmp( tokenStr,"WP_REPEATER" ) ) { tag = WP_REPEATER; } else if ( !Q_stricmp( tokenStr,"WP_DEMP2" ) ) { tag = WP_DEMP2; } else if ( !Q_stricmp( tokenStr,"WP_FLECHETTE" ) ) { tag = WP_FLECHETTE; } else if ( !Q_stricmp( tokenStr,"WP_ROCKET_LAUNCHER" ) ) { tag = WP_ROCKET_LAUNCHER; } else if ( !Q_stricmp( tokenStr,"WP_THERMAL" ) ) { tag = WP_THERMAL; } else if ( !Q_stricmp( tokenStr,"WP_TRIP_MINE" ) ) { tag = WP_TRIP_MINE; } else if ( !Q_stricmp( tokenStr,"WP_DET_PACK" ) ) { tag = WP_DET_PACK; } else if ( !Q_stricmp( tokenStr,"WP_BOT_LASER" ) ) { tag = WP_BOT_LASER; } else if ( !Q_stricmp( tokenStr,"WP_EMPLACED_GUN" ) ) { tag = WP_EMPLACED_GUN; } else if ( !Q_stricmp( tokenStr,"WP_MELEE" ) ) { tag = WP_MELEE; } else if ( !Q_stricmp( tokenStr,"WP_TURRET" ) ) { tag = WP_TURRET; } else if ( !Q_stricmp( tokenStr,"WP_ATST_MAIN" ) ) { tag = WP_ATST_MAIN; } else if ( !Q_stricmp( tokenStr,"WP_ATST_SIDE" ) ) { tag = WP_ATST_SIDE; } else if ( !Q_stricmp( tokenStr,"WP_TIE_FIGHTER" ) ) { tag = WP_TIE_FIGHTER; } else if ( !Q_stricmp( tokenStr,"WP_RAPID_FIRE_CONC" ) ) { tag = WP_RAPID_FIRE_CONC; } else if ( !Q_stricmp( tokenStr,"WP_BLASTER_PISTOL" ) ) { tag = WP_BLASTER_PISTOL; } else if ( !Q_stricmp( tokenStr,"AMMO_FORCE" ) ) { tag = AMMO_FORCE; } else if ( !Q_stricmp( tokenStr,"AMMO_BLASTER" ) ) { tag = AMMO_BLASTER; } else if ( !Q_stricmp( tokenStr,"AMMO_POWERCELL" ) ) { tag = AMMO_POWERCELL; } else if ( !Q_stricmp( tokenStr,"AMMO_METAL_BOLTS" ) ) { tag = AMMO_METAL_BOLTS; } else if ( !Q_stricmp( tokenStr,"AMMO_ROCKETS" ) ) { tag = AMMO_ROCKETS; } else if ( !Q_stricmp( tokenStr,"AMMO_EMPLACED" ) ) { tag = AMMO_EMPLACED; } else if ( !Q_stricmp( tokenStr,"AMMO_THERMAL" ) ) { tag = AMMO_THERMAL; } else if ( !Q_stricmp( tokenStr,"AMMO_TRIPMINE" ) ) { tag = AMMO_TRIPMINE; } else if ( !Q_stricmp( tokenStr,"AMMO_DETPACK" ) ) { tag = AMMO_DETPACK; } else if ( !Q_stricmp( tokenStr,"FP_HEAL" ) ) { tag = FP_HEAL; } else if ( !Q_stricmp( tokenStr,"FP_LEVITATION" ) ) { tag = FP_LEVITATION; } else if ( !Q_stricmp( tokenStr,"FP_SPEED" ) ) { tag = FP_SPEED; } else if ( !Q_stricmp( tokenStr,"FP_PUSH" ) ) { tag = FP_PUSH; } else if ( !Q_stricmp( tokenStr,"FP_PULL" ) ) { tag = FP_PULL; } else if ( !Q_stricmp( tokenStr,"FP_TELEPATHY" ) ) { tag = FP_TELEPATHY; } else if ( !Q_stricmp( tokenStr,"FP_GRIP" ) ) { tag = FP_GRIP; } else if ( !Q_stricmp( tokenStr,"FP_LIGHTNING" ) ) { tag = FP_LIGHTNING; } else if ( !Q_stricmp( tokenStr,"FP_SABERTHROW" ) ) { tag = FP_SABERTHROW; } else if ( !Q_stricmp( tokenStr,"ITM_BATTERY_PICKUP" ) ) { tag = ITM_BATTERY_PICKUP; } else if ( !Q_stricmp( tokenStr,"INV_SEEKER" ) ) { tag = INV_SEEKER; } else if ( !Q_stricmp( tokenStr,"ITM_SHIELD_PICKUP" ) ) { tag = ITM_SHIELD_PICKUP; } else if ( !Q_stricmp( tokenStr,"INV_BACTA_CANISTER" ) ) { tag = INV_BACTA_CANISTER; } else if ( !Q_stricmp( tokenStr,"ITM_DATAPAD_PICKUP" ) ) { tag = ITM_DATAPAD_PICKUP; } else if ( !Q_stricmp( tokenStr,"INV_ELECTROBINOCULARS" ) ) { tag = INV_ELECTROBINOCULARS; } else if ( !Q_stricmp( tokenStr,"INV_SENTRY" ) ) { tag = INV_SENTRY; } else if ( !Q_stricmp( tokenStr,"INV_LIGHTAMP_GOGGLES" ) ) { tag = INV_LIGHTAMP_GOGGLES; } else if ( !Q_stricmp( tokenStr,"INV_GOODIE_KEY" ) ) { tag = INV_GOODIE_KEY; } else if ( !Q_stricmp( tokenStr,"INV_SECURITY_KEY" ) ) { tag = INV_SECURITY_KEY; } else if ( !Q_stricmp( tokenStr,"ITM_MEDPAK_PICKUP" ) ) { tag = ITM_MEDPAK_PICKUP; } else if ( !Q_stricmp( tokenStr,"ITM_SHIELD_SM_PICKUP" ) ) { tag = ITM_SHIELD_SM_PICKUP; } else if ( !Q_stricmp( tokenStr,"ITM_SHIELD_LRG_PICKUP" ) ) { tag = ITM_SHIELD_LRG_PICKUP; } else { tag = WP_BRYAR_PISTOL; gi.Printf( "WARNING: bad tagname in external item data '%s'\n", tokenStr ); } bg_itemlist[itemNum].giTag = tag; } static void IT_Type( const char **holdBuf ) { int type; const char *tokenStr; if ( COM_ParseString( holdBuf, &tokenStr ) ) { return; } if ( !Q_stricmp( tokenStr, "IT_BAD" ) ) { type = IT_BAD; } else if ( !Q_stricmp( tokenStr, "IT_WEAPON" ) ) { type = IT_WEAPON; } else if ( !Q_stricmp( tokenStr, "IT_AMMO" ) ) { type = IT_AMMO; } else if ( !Q_stricmp( tokenStr, "IT_ARMOR" ) ) { type = IT_ARMOR; } else if ( !Q_stricmp( tokenStr, "IT_HEALTH" ) ) { type = IT_HEALTH; } else if ( !Q_stricmp( tokenStr, "IT_HOLDABLE" ) ) { type = IT_HOLDABLE; } else if ( !Q_stricmp( tokenStr, "IT_BATTERY" ) ) { type = IT_BATTERY; } else if ( !Q_stricmp( tokenStr, "IT_HOLOCRON" ) ) { type = IT_HOLOCRON; } else { type = IT_BAD; gi.Printf( "WARNING: bad itemname in external item data '%s'\n", tokenStr ); } bg_itemlist[itemNum].giType = (itemType_t)type; } static void IT_Icon( const char **holdBuf ) { int len; const char *tokenStr; if ( COM_ParseString( holdBuf, &tokenStr ) ) { return; } len = strlen( tokenStr ) + 1; if ( len > 32 ) { len = 32; gi.Printf( "WARNING: icon too long in external ITEMS.DAT '%s'\n", tokenStr ); } bg_itemlist[itemNum].icon = G_NewString( tokenStr ); } static void IT_Count( const char **holdBuf ) { int tokenInt; if ( COM_ParseInt( holdBuf, &tokenInt ) ) { SkipRestOfLine( holdBuf ); return; } if ( tokenInt < 0 || tokenInt > 1000 ) { gi.Printf( "WARNING: bad Count in external item data '%d'\n", tokenInt ); return; } bg_itemlist[itemNum].quantity = tokenInt; } static void IT_Min( const char **holdBuf ) { int tokenInt; for ( int i = 0; i < 3; i++ ) { if ( COM_ParseInt( holdBuf, &tokenInt ) ) { SkipRestOfLine( holdBuf ); return; } bg_itemlist[itemNum].mins[i] = tokenInt; } } static void IT_Max( const char **holdBuf ) { int tokenInt; for ( int i = 0; i < 3; i++ ) { if ( COM_ParseInt( holdBuf, &tokenInt ) ) { SkipRestOfLine( holdBuf ); return; } bg_itemlist[itemNum].maxs[i] = tokenInt; } } static void IT_PickupSound( const char **holdBuf ) { int len; const char *tokenStr; if ( COM_ParseString( holdBuf, &tokenStr ) ) { return; } len = strlen( tokenStr ) + 1; if ( len > 32 ) { len = 32; gi.Printf("WARNING: Pickup Sound too long in external ITEMS.DAT '%s'\n", tokenStr); } bg_itemlist[itemNum].pickup_sound = G_NewString(tokenStr); } static void IT_ParseWeaponParms( const char **holdBuf ) { int i; while ( holdBuf ) { const char *token = COM_ParseExt( holdBuf, qtrue ); if ( !Q_stricmp( token, "}" ) ) { // end of data for this weapon break; } // loop through possible parameters for ( i = 0; i < IT_PARM_MAX; i++ ) { if ( !Q_stricmp( token, ItemParms[i].parmName ) ) { ItemParms[i].func( holdBuf ); break; } } if ( i < IT_PARM_MAX ) { // find parameter??? continue; } Com_Printf( S_COLOR_YELLOW "WARNING: bad parameter in external item data '%s'\n", token ); SkipRestOfLine( holdBuf ); } } static void IT_ParseParms( const char *buffer ) { const char *holdBuf, *token; holdBuf = buffer; COM_BeginParseSession(); while ( holdBuf ) { token = COM_ParseExt( &holdBuf, qtrue ); if ( !Q_stricmp( token, "{" ) ) { IT_ParseWeaponParms( &holdBuf ); } } COM_EndParseSession(); } void IT_LoadItemParms( void ) { char *buffer; gi.FS_ReadFile( "ext_data/items.dat", (void **)&buffer ); IT_ParseParms( buffer ); gi.FS_FreeFile( buffer ); }
gpl-2.0
kmtoki/qmk_firmware
layouts/community/65_ansi/mechmerlin/keymap.c
24
4816
/* Copyright 2019 MechMerlin * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include QMK_KEYBOARD_H #include "mechmerlin.h" const uint16_t PROGMEM keymaps[][MATRIX_ROWS][MATRIX_COLS] = { /* * ┌───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───┬───────┬───┐ * │ ` │ 1 │ 2 │ 3 │ 4 │ 5 │ 6 │ 7 │ 8 │ 9 │ 0 │ - │ = │ Backsp│Hom│ * ├───┴─┬─┴─┬─┴─┬─┴─┬─┴─┬─┴─┬─┴─┬─┴─┬─┴─┬─┴─┬─┴─┬─┴─┬─┴─┬─────┼───┤ * │ Tab │ Q │ W │ E │ R │ T │ Y │ U │ I │ O │ P │ [ │ ] │ \ │PgU│ * ├─────┴┬──┴┬──┴┬──┴┬──┴┬──┴┬──┴┬──┴┬──┴┬──┴┬──┴┬──┴┬──┴─────┼───┤ * │ Ctrl │ A │ S │ D │ F │ G │ H │ J │ K │ L │ ; │ ' │ Enter │PgD│ * ├──────┴─┬─┴─┬─┴─┬─┴─┬─┴─┬─┴─┬─┴─┬─┴─┬─┴─┬─┴─┬─┴─┬─┴────┬───┼───┤ * │ Shift │ Z │ X │ C │ V │ B │ N │ M │ , │ . │ / │ Shift│ ↑ │End│ * ├────┬───┴┬──┴─┬─┴───┴───┴───┴───┴───┴──┬┴──┬┴──┬┴──┬───┼───┼───┤ * │Ctrl│GUI │Alt │ │Alt│FN │Ctl│ ← │ ↓ │ → │ * └────┴────┴────┴────────────────────────┴───┴───┴───┴───┴───┴───┘ */ [_BL] = LAYOUT_65_ansi( KC_GESC, KC_1, KC_2, KC_3, KC_4, KC_5, KC_6, KC_7, KC_8, KC_9, KC_0, KC_MINS, KC_EQL, KC_BSPC, KC_HOME, KC_TAB, KC_Q, KC_W, KC_E, KC_R, KC_T, KC_Y, KC_U, KC_I, KC_O, KC_P, KC_LBRC, KC_RBRC, KC_BSLS, KC_PGUP, KC_LCTL, KC_A, KC_S, KC_D, KC_F, KC_G, KC_H, KC_J, KC_K, KC_L, KC_SCLN, KC_QUOT, KC_ENT, KC_PGDN, KC_LSFT, KC_Z, KC_X, KC_C, KC_V, KC_B, KC_N, KC_M, KC_COMM, KC_DOT, KC_SLSH, KC_RSFT, KC_UP, KC_END, KC_LCTL, KC_LGUI, KC_LALT, KC_SPC, KC_RALT, MO(_FL), KC_RCTL, KC_LEFT, KC_DOWN, KC_RGHT ), [_FL] = LAYOUT_65_ansi( KC_GRV, KC_F1, KC_F2, KC_F3, KC_F4, KC_F5, KC_F6, KC_F7, KC_F8, KC_F9, KC_F10, KC_F11, KC_F12, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, RGB_TOG, RGB_MOD, RGB_HUI, RGB_SAI, RGB_VAI, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, RGB_RMOD,RGB_HUD, RGB_SAD, RGB_VAD, _______, _______, KC_MUTE, KC_VOLD, KC_VOLU, _______, _______, KC_PGUP, _______, _______, _______, _______, MO(_CL), _______, _______, _______, KC_HOME, KC_PGDN, KC_END ), [_CL] = LAYOUT_65_ansi( _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, EEP_RST, RESET, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, KC_VER, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______ ), };
gpl-2.0
clearwater/chumby-linux
arch/x86/kernel/dumpstack_32.c
24
9924
/* * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs */ #include <linux/kallsyms.h> #include <linux/kprobes.h> #include <linux/uaccess.h> #include <linux/utsname.h> #include <linux/hardirq.h> #include <linux/kdebug.h> #include <linux/module.h> #include <linux/ptrace.h> #include <linux/kexec.h> #include <linux/bug.h> #include <linux/nmi.h> #include <linux/sysfs.h> #include <asm/stacktrace.h> #define STACKSLOTS_PER_LINE 8 #define get_bp(bp) asm("movl %%ebp, %0" : "=r" (bp) :) int panic_on_unrecovered_nmi; int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE; static unsigned int code_bytes = 64; static int die_counter; void printk_address(unsigned long address, int reliable) { printk(" [<%p>] %s%pS\n", (void *) address, reliable ? "" : "? ", (void *) address); } static inline int valid_stack_ptr(struct thread_info *tinfo, void *p, unsigned int size, void *end) { void *t = tinfo; if (end) { if (p < end && p >= (end-THREAD_SIZE)) return 1; else return 0; } return p > t && p < t + THREAD_SIZE - size; } /* The form of the top of the frame on the stack */ struct stack_frame { struct stack_frame *next_frame; unsigned long return_address; }; static inline unsigned long print_context_stack(struct thread_info *tinfo, unsigned long *stack, unsigned long bp, const struct stacktrace_ops *ops, void *data, unsigned long *end) { struct stack_frame *frame = (struct stack_frame *)bp; while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) { unsigned long addr; addr = *stack; if (__kernel_text_address(addr)) { if ((unsigned long) stack == bp + sizeof(long)) { ops->address(data, addr, 1); frame = frame->next_frame; bp = (unsigned long) frame; } else { ops->address(data, addr, bp == 0); } } stack++; } return bp; } void dump_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, unsigned long bp, const struct stacktrace_ops *ops, void *data) { if (!task) task = current; if (!stack) { unsigned long dummy; stack = &dummy; if (task && task != current) stack = (unsigned long *)task->thread.sp; } #ifdef CONFIG_FRAME_POINTER if (!bp) { if (task == current) { /* Grab bp right from our regs */ get_bp(bp); } else { /* bp is the last reg pushed by switch_to */ bp = *(unsigned long *) task->thread.sp; } } #endif for (;;) { struct thread_info *context; context = (struct thread_info *) ((unsigned long)stack & (~(THREAD_SIZE - 1))); bp = print_context_stack(context, stack, bp, ops, data, NULL); stack = (unsigned long *)context->previous_esp; if (!stack) break; if (ops->stack(data, "IRQ") < 0) break; touch_nmi_watchdog(); } } EXPORT_SYMBOL(dump_trace); static void print_trace_warning_symbol(void *data, char *msg, unsigned long symbol) { printk(data); print_symbol(msg, symbol); printk("\n"); } static void print_trace_warning(void *data, char *msg) { printk("%s%s\n", (char *)data, msg); } static int print_trace_stack(void *data, char *name) { printk("%s <%s> ", (char *)data, name); return 0; } /* * Print one address/symbol entries per line. */ static void print_trace_address(void *data, unsigned long addr, int reliable) { touch_nmi_watchdog(); printk(data); printk_address(addr, reliable); } static const struct stacktrace_ops print_trace_ops = { .warning = print_trace_warning, .warning_symbol = print_trace_warning_symbol, .stack = print_trace_stack, .address = print_trace_address, }; static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, unsigned long bp, char *log_lvl) { printk("%sCall Trace:\n", log_lvl); dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); } void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, unsigned long bp) { show_trace_log_lvl(task, regs, stack, bp, ""); } static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *sp, unsigned long bp, char *log_lvl) { unsigned long *stack; int i; if (sp == NULL) { if (task) sp = (unsigned long *)task->thread.sp; else sp = (unsigned long *)&sp; } stack = sp; for (i = 0; i < kstack_depth_to_print; i++) { if (kstack_end(stack)) break; if (i && ((i % STACKSLOTS_PER_LINE) == 0)) printk("\n%s", log_lvl); printk(" %08lx", *stack++); touch_nmi_watchdog(); } printk("\n"); show_trace_log_lvl(task, regs, sp, bp, log_lvl); } void show_stack(struct task_struct *task, unsigned long *sp) { show_stack_log_lvl(task, NULL, sp, 0, ""); } /* * The architecture-independent dump_stack generator */ void dump_stack(void) { unsigned long bp = 0; unsigned long stack; #ifdef CONFIG_FRAME_POINTER if (!bp) get_bp(bp); #endif printk("Pid: %d, comm: %.20s %s %s %.*s\n", current->pid, current->comm, print_tainted(), init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); show_trace(NULL, NULL, &stack, bp); } EXPORT_SYMBOL(dump_stack); void show_registers(struct pt_regs *regs) { int i; print_modules(); __show_regs(regs, 0); printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n", TASK_COMM_LEN, current->comm, task_pid_nr(current), current_thread_info(), current, task_thread_info(current)); /* * When in-kernel, we also print out the stack and code at the * time of the fault.. */ if (!user_mode_vm(regs)) { unsigned int code_prologue = code_bytes * 43 / 64; unsigned int code_len = code_bytes; unsigned char c; u8 *ip; printk(KERN_EMERG "Stack:\n"); show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG); printk(KERN_EMERG "Code: "); ip = (u8 *)regs->ip - code_prologue; if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { /* try starting at IP */ ip = (u8 *)regs->ip; code_len = code_len - code_prologue + 1; } for (i = 0; i < code_len; i++, ip++) { if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { printk(" Bad EIP value."); break; } if (ip == (u8 *)regs->ip) printk("<%02x> ", c); else printk("%02x ", c); } } printk("\n"); } int is_valid_bugaddr(unsigned long ip) { unsigned short ud2; if (ip < PAGE_OFFSET) return 0; if (probe_kernel_address((unsigned short *)ip, ud2)) return 0; return ud2 == 0x0b0f; } static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; static int die_owner = -1; static unsigned int die_nest_count; unsigned __kprobes long oops_begin(void) { unsigned long flags; oops_enter(); if (die_owner != raw_smp_processor_id()) { console_verbose(); raw_local_irq_save(flags); __raw_spin_lock(&die_lock); die_owner = smp_processor_id(); die_nest_count = 0; bust_spinlocks(1); } else { raw_local_irq_save(flags); } die_nest_count++; return flags; } void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) { bust_spinlocks(0); die_owner = -1; add_taint(TAINT_DIE); __raw_spin_unlock(&die_lock); raw_local_irq_restore(flags); if (!regs) return; if (kexec_should_crash(current)) crash_kexec(regs); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); oops_exit(); do_exit(signr); } int __kprobes __die(const char *str, struct pt_regs *regs, long err) { unsigned short ss; unsigned long sp; printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); #ifdef CONFIG_PREEMPT printk("PREEMPT "); #endif #ifdef CONFIG_SMP printk("SMP "); #endif #ifdef CONFIG_DEBUG_PAGEALLOC printk("DEBUG_PAGEALLOC"); #endif printk("\n"); sysfs_printk_last_file(); if (notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) return 1; show_registers(regs); /* Executive summary in case the oops scrolled away */ sp = (unsigned long) (&regs->sp); savesegment(ss, ss); if (user_mode(regs)) { sp = regs->sp; ss = regs->ss & 0xffff; } printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip); print_symbol("%s", regs->ip); printk(" SS:ESP %04x:%08lx\n", ss, sp); return 0; } /* * This is gone through when something in the kernel has done something bad * and is about to be terminated: */ void die(const char *str, struct pt_regs *regs, long err) { unsigned long flags = oops_begin(); if (die_nest_count < 3) { report_bug(regs->ip, regs); if (__die(str, regs, err)) regs = NULL; } else { printk(KERN_EMERG "Recursive die() failure, output suppressed\n"); } oops_end(flags, regs, SIGSEGV); } static DEFINE_SPINLOCK(nmi_print_lock); void notrace __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic) { if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP) return; spin_lock(&nmi_print_lock); /* * We are in trouble anyway, lets at least try * to get a message out: */ bust_spinlocks(1); printk(KERN_EMERG "%s", str); printk(" on CPU%d, ip %08lx, registers:\n", smp_processor_id(), regs->ip); show_registers(regs); if (do_panic) panic("Non maskable interrupt"); console_silent(); spin_unlock(&nmi_print_lock); /* * If we are in kernel we are probably nested up pretty bad * and might aswell get out now while we still can: */ if (!user_mode_vm(regs)) { current->thread.trap_no = 2; crash_kexec(regs); } bust_spinlocks(0); do_exit(SIGSEGV); } static int __init oops_setup(char *s) { if (!s) return -EINVAL; if (!strcmp(s, "panic")) panic_on_oops = 1; return 0; } early_param("oops", oops_setup); static int __init kstack_setup(char *s) { if (!s) return -EINVAL; kstack_depth_to_print = simple_strtoul(s, NULL, 0); return 0; } early_param("kstack", kstack_setup); static int __init code_bytes_setup(char *s) { code_bytes = simple_strtoul(s, NULL, 0); if (code_bytes > 8192) code_bytes = 8192; return 1; } __setup("code_bytes=", code_bytes_setup);
gpl-2.0
parvata/liquid-chocolate-jb
arch/arm/mm/mmu.c
24
26186
/* * linux/arch/arm/mm/mmu.c * * Copyright (C) 1995-2005 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/mman.h> #include <linux/nodemask.h> #include <asm/cputype.h> #include <asm/mach-types.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/sizes.h> #include <asm/tlb.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include "mm.h" DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); /* * empty_zero_page is a special page that is used for * zero-initialized data and COW. */ struct page *empty_zero_page; EXPORT_SYMBOL(empty_zero_page); /* * The pmd table for the upper-most set of pages. */ pmd_t *top_pmd; #define CPOLICY_UNCACHED 0 #define CPOLICY_BUFFERED 1 #define CPOLICY_WRITETHROUGH 2 #define CPOLICY_WRITEBACK 3 #define CPOLICY_WRITEALLOC 4 static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK; static unsigned int ecc_mask __initdata = 0; pgprot_t pgprot_user; pgprot_t pgprot_kernel; EXPORT_SYMBOL(pgprot_user); EXPORT_SYMBOL(pgprot_kernel); struct cachepolicy { const char policy[16]; unsigned int cr_mask; unsigned int pmd; unsigned int pte; }; static struct cachepolicy cache_policies[] __initdata = { { .policy = "uncached", .cr_mask = CR_W|CR_C, .pmd = PMD_SECT_UNCACHED, .pte = L_PTE_MT_UNCACHED, }, { .policy = "buffered", .cr_mask = CR_C, .pmd = PMD_SECT_BUFFERED, .pte = L_PTE_MT_BUFFERABLE, }, { .policy = "writethrough", .cr_mask = 0, .pmd = PMD_SECT_WT, .pte = L_PTE_MT_WRITETHROUGH, }, { .policy = "writeback", .cr_mask = 0, .pmd = PMD_SECT_WB, .pte = L_PTE_MT_WRITEBACK, }, { .policy = "writealloc", .cr_mask = 0, .pmd = PMD_SECT_WBWA, .pte = L_PTE_MT_WRITEALLOC, } }; /* * These are useful for identifying cache coherency * problems by allowing the cache or the cache and * writebuffer to be turned off. (Note: the write * buffer should not be on and the cache off). */ static void __init early_cachepolicy(char **p) { int i; for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { int len = strlen(cache_policies[i].policy); if (memcmp(*p, cache_policies[i].policy, len) == 0) { cachepolicy = i; cr_alignment &= ~cache_policies[i].cr_mask; cr_no_alignment &= ~cache_policies[i].cr_mask; *p += len; break; } } if (i == ARRAY_SIZE(cache_policies)) printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); if (cpu_architecture() >= CPU_ARCH_ARMv6) { printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n"); cachepolicy = CPOLICY_WRITEBACK; } flush_cache_all(); set_cr(cr_alignment); } __early_param("cachepolicy=", early_cachepolicy); static void __init early_nocache(char **__unused) { char *p = "buffered"; printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); early_cachepolicy(&p); } __early_param("nocache", early_nocache); static void __init early_nowrite(char **__unused) { char *p = "uncached"; printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); early_cachepolicy(&p); } __early_param("nowb", early_nowrite); static void __init early_ecc(char **p) { if (memcmp(*p, "on", 2) == 0) { ecc_mask = PMD_PROTECTION; *p += 2; } else if (memcmp(*p, "off", 3) == 0) { ecc_mask = 0; *p += 3; } } __early_param("ecc=", early_ecc); static int __init noalign_setup(char *__unused) { cr_alignment &= ~CR_A; cr_no_alignment &= ~CR_A; set_cr(cr_alignment); return 1; } __setup("noalign", noalign_setup); #ifndef CONFIG_SMP void adjust_cr(unsigned long mask, unsigned long set) { unsigned long flags; mask &= ~CR_A; set &= mask; local_irq_save(flags); cr_no_alignment = (cr_no_alignment & ~mask) | set; cr_alignment = (cr_alignment & ~mask) | set; set_cr((get_cr() & ~mask) | set); local_irq_restore(flags); } #endif #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE static struct mem_type mem_types[] = { [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | L_PTE_SHARED, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, .domain = DOMAIN_IO, }, [MT_DEVICE_STRONGLY_ORDERED] = { /* Guaranteed strongly ordered */ .prot_pte = PROT_PTE_DEVICE, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED, .domain = DOMAIN_IO, }, [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PROT_SECT_DEVICE, .domain = DOMAIN_IO, }, [MT_DEVICE_CACHED] = { /* ioremap_cached */ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, .domain = DOMAIN_IO, }, [MT_DEVICE_WC] = { /* ioremap_wc */ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PROT_SECT_DEVICE, .domain = DOMAIN_IO, }, [MT_UNCACHED] = { .prot_pte = PROT_PTE_DEVICE, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, .domain = DOMAIN_IO, }, [MT_CACHECLEAN] = { .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, .domain = DOMAIN_KERNEL, }, [MT_MINICLEAN] = { .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, .domain = DOMAIN_KERNEL, }, [MT_LOW_VECTORS] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_EXEC, .prot_l1 = PMD_TYPE_TABLE, .domain = DOMAIN_USER, }, [MT_HIGH_VECTORS] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_USER | L_PTE_EXEC, .prot_l1 = PMD_TYPE_TABLE, .domain = DOMAIN_USER, }, [MT_MEMORY] = { .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, .domain = DOMAIN_KERNEL, }, [MT_ROM] = { .prot_sect = PMD_TYPE_SECT, .domain = DOMAIN_KERNEL, }, }; const struct mem_type *get_mem_type(unsigned int type) { return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL; } /* * Adjust the PMD section entries according to the CPU in use. */ static void __init build_mem_type_table(void) { struct cachepolicy *cp; unsigned int cr = get_cr(); unsigned int user_pgprot, kern_pgprot, vecs_pgprot; int cpu_arch = cpu_architecture(); int i; if (cpu_arch < CPU_ARCH_ARMv6) { #if defined(CONFIG_CPU_DCACHE_DISABLE) if (cachepolicy > CPOLICY_BUFFERED) cachepolicy = CPOLICY_BUFFERED; #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH) if (cachepolicy > CPOLICY_WRITETHROUGH) cachepolicy = CPOLICY_WRITETHROUGH; #endif } if (cpu_arch < CPU_ARCH_ARMv5) { if (cachepolicy >= CPOLICY_WRITEALLOC) cachepolicy = CPOLICY_WRITEBACK; ecc_mask = 0; } #ifdef CONFIG_SMP cachepolicy = CPOLICY_WRITEALLOC; #endif /* * Strip out features not present on earlier architectures. * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those * without extended page tables don't have the 'Shared' bit. */ if (cpu_arch < CPU_ARCH_ARMv5) for (i = 0; i < ARRAY_SIZE(mem_types); i++) mem_types[i].prot_sect &= ~PMD_SECT_TEX(7); if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3()) for (i = 0; i < ARRAY_SIZE(mem_types); i++) mem_types[i].prot_sect &= ~PMD_SECT_S; /* * ARMv5 and lower, bit 4 must be set for page tables (was: cache * "update-able on write" bit on ARM610). However, Xscale and * Xscale3 require this bit to be cleared. */ if (cpu_is_xscale() || cpu_is_xsc3()) { for (i = 0; i < ARRAY_SIZE(mem_types); i++) { mem_types[i].prot_sect &= ~PMD_BIT4; mem_types[i].prot_l1 &= ~PMD_BIT4; } } else if (cpu_arch < CPU_ARCH_ARMv6) { for (i = 0; i < ARRAY_SIZE(mem_types); i++) { if (mem_types[i].prot_l1) mem_types[i].prot_l1 |= PMD_BIT4; if (mem_types[i].prot_sect) mem_types[i].prot_sect |= PMD_BIT4; } } /* * Mark the device areas according to the CPU/architecture. */ if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) { if (!cpu_is_xsc3()) { /* * Mark device regions on ARMv6+ as execute-never * to prevent speculative instruction fetches. */ mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN; mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN; mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN; mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN; mem_types[MT_DEVICE_STRONGLY_ORDERED].prot_sect |= PMD_SECT_XN; } if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { /* * For ARMv7 with TEX remapping, * - shared device is SXCB=1100 * - nonshared device is SXCB=0100 * - write combine device mem is SXCB=0001 * (Uncached Normal memory) */ mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1); mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1); mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; } else if (cpu_is_xsc3()) { /* * For Xscale3, * - shared device is TEXCB=00101 * - nonshared device is TEXCB=01000 * - write combine device mem is TEXCB=00100 * (Inner/Outer Uncacheable in xsc3 parlance) */ mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED; mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2); mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); } else { /* * For ARMv6 and ARMv7 without TEX remapping, * - shared device is TEXCB=00001 * - nonshared device is TEXCB=01000 * - write combine device mem is TEXCB=00100 * (Uncached Normal in ARMv6 parlance). */ mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2); mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); } } else { /* * On others, write combining is "Uncached/Buffered" */ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; } /* * Now deal with the memory-type mappings */ cp = &cache_policies[cachepolicy]; vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; #ifndef CONFIG_SMP /* * Only use write-through for non-SMP systems */ if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; #endif /* * Enable CPU-specific coherency if supported. * (Only available on XSC3 at the moment.) */ if (arch_is_coherent() && cpu_is_xsc3()) mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; /* * ARMv6 and above have extended page tables. */ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { /* * Mark cache clean areas and XIP ROM read only * from SVC mode and no access from userspace. */ mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; #ifdef CONFIG_SMP /* * Mark memory with the "shared" attribute for SMP systems */ user_pgprot |= L_PTE_SHARED; kern_pgprot |= L_PTE_SHARED; vecs_pgprot |= L_PTE_SHARED; mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; #endif } for (i = 0; i < 16; i++) { unsigned long v = pgprot_val(protection_map[i]); protection_map[i] = __pgprot(v | user_pgprot); } mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | L_PTE_EXEC | kern_pgprot); mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; mem_types[MT_ROM].prot_sect |= cp->pmd; switch (cp->pmd) { case PMD_SECT_WT: mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; break; case PMD_SECT_WB: case PMD_SECT_WBWA: mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; break; } printk("Memory policy: ECC %sabled, Data cache %s\n", ecc_mask ? "en" : "dis", cp->policy); for (i = 0; i < ARRAY_SIZE(mem_types); i++) { struct mem_type *t = &mem_types[i]; if (t->prot_l1) t->prot_l1 |= PMD_DOMAIN(t->domain); if (t->prot_sect) t->prot_sect |= PMD_DOMAIN(t->domain); } } #define vectors_base() (vectors_high() ? 0xffff0000 : 0) static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, const struct mem_type *type) { pte_t *pte; if (pmd_none(*pmd)) { pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); __pmd_populate(pmd, __pa(pte) | type->prot_l1); } pte = pte_offset_kernel(pmd, addr); do { set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0); pfn++; } while (pte++, addr += PAGE_SIZE, addr != end); } static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, unsigned long end, unsigned long phys, const struct mem_type *type) { pmd_t *pmd = pmd_offset(pgd, addr); /* * Try a section mapping - end, addr and phys must all be aligned * to a section boundary. Note that PMDs refer to the individual * L1 entries, whereas PGDs refer to a group of L1 entries making * up one logical pointer to an L2 table. */ if (((addr | end | phys) & ~SECTION_MASK) == 0) { pmd_t *p = pmd; if (addr & SECTION_SIZE) pmd++; do { *pmd = __pmd(phys | type->prot_sect); phys += SECTION_SIZE; } while (pmd++, addr += SECTION_SIZE, addr != end); flush_pmd_entry(p); } else { /* * No need to loop; pte's aren't interested in the * individual L1 entries. */ alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); } } static void __init create_36bit_mapping(struct map_desc *md, const struct mem_type *type) { unsigned long phys, addr, length, end; pgd_t *pgd; addr = md->virtual; phys = (unsigned long)__pfn_to_phys(md->pfn); length = PAGE_ALIGN(md->length); if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) { printk(KERN_ERR "MM: CPU does not support supersection " "mapping for 0x%08llx at 0x%08lx\n", __pfn_to_phys((u64)md->pfn), addr); return; } /* N.B. ARMv6 supersections are only defined to work with domain 0. * Since domain assignments can in fact be arbitrary, the * 'domain == 0' check below is required to insure that ARMv6 * supersections are only allocated for domain 0 regardless * of the actual domain assignments in use. */ if (type->domain) { printk(KERN_ERR "MM: invalid domain in supersection " "mapping for 0x%08llx at 0x%08lx\n", __pfn_to_phys((u64)md->pfn), addr); return; } if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { printk(KERN_ERR "MM: cannot create mapping for " "0x%08llx at 0x%08lx invalid alignment\n", __pfn_to_phys((u64)md->pfn), addr); return; } /* * Shift bits [35:32] of address into bits [23:20] of PMD * (See ARMv6 spec). */ phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); pgd = pgd_offset_k(addr); end = addr + length; do { pmd_t *pmd = pmd_offset(pgd, addr); int i; for (i = 0; i < 16; i++) *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER); addr += SUPERSECTION_SIZE; phys += SUPERSECTION_SIZE; pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT; } while (addr != end); } /* * Create the page directory entries and any necessary * page tables for the mapping specified by `md'. We * are able to cope here with varying sizes and address * offsets, and we take full advantage of sections and * supersections. */ void __init create_mapping(struct map_desc *md) { unsigned long phys, addr, length, end; const struct mem_type *type; pgd_t *pgd; if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { printk(KERN_WARNING "BUG: not creating mapping for " "0x%08llx at 0x%08lx in user region\n", __pfn_to_phys((u64)md->pfn), md->virtual); return; } if ((md->type == MT_DEVICE || md->type == MT_ROM) && md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx " "overlaps vmalloc space\n", __pfn_to_phys((u64)md->pfn), md->virtual); } type = &mem_types[md->type]; /* * Catch 36-bit addresses */ if (md->pfn >= 0x100000) { create_36bit_mapping(md, type); return; } addr = md->virtual & PAGE_MASK; phys = (unsigned long)__pfn_to_phys(md->pfn); length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " "be mapped using pages, ignoring.\n", __pfn_to_phys(md->pfn), addr); return; } pgd = pgd_offset_k(addr); end = addr + length; do { unsigned long next = pgd_addr_end(addr, end); alloc_init_section(pgd, addr, next, phys, type); phys += next - addr; addr = next; } while (pgd++, addr != end); } /* * Create the architecture specific mappings */ void __init iotable_init(struct map_desc *io_desc, int nr) { int i; for (i = 0; i < nr; i++) create_mapping(io_desc + i); } static unsigned long __initdata vmalloc_reserve = SZ_128M; /* * vmalloc=size forces the vmalloc area to be exactly 'size' * bytes. This can be used to increase (or decrease) the vmalloc * area - the default is 128m. */ static void __init early_vmalloc(char **arg) { vmalloc_reserve = memparse(*arg, arg); if (vmalloc_reserve < SZ_16M) { vmalloc_reserve = SZ_16M; printk(KERN_WARNING "vmalloc area too small, limiting to %luMB\n", vmalloc_reserve >> 20); } if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); printk(KERN_WARNING "vmalloc area is too big, limiting to %luMB\n", vmalloc_reserve >> 20); } } __early_param("vmalloc=", early_vmalloc); #define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) static void __init sanity_check_meminfo(void) { int i, j; for (i = 0, j = 0; i < meminfo.nr_banks; i++) { struct membank *bank = &meminfo.bank[j]; *bank = meminfo.bank[i]; #ifdef CONFIG_HIGHMEM /* * Split those memory banks which are partially overlapping * the vmalloc area greatly simplifying things later. */ if (__va(bank->start) < VMALLOC_MIN && bank->size > VMALLOC_MIN - __va(bank->start)) { if (meminfo.nr_banks >= NR_BANKS) { printk(KERN_CRIT "NR_BANKS too low, " "ignoring high memory\n"); } else { memmove(bank + 1, bank, (meminfo.nr_banks - i) * sizeof(*bank)); meminfo.nr_banks++; i++; bank[1].size -= VMALLOC_MIN - __va(bank->start); bank[1].start = __pa(VMALLOC_MIN - 1) + 1; j++; } bank->size = VMALLOC_MIN - __va(bank->start); } #else /* * Check whether this memory bank would entirely overlap * the vmalloc area. */ if (__va(bank->start) >= VMALLOC_MIN || __va(bank->start) < (void *)PAGE_OFFSET) { printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " "(vmalloc region overlap).\n", bank->start, bank->start + bank->size - 1); continue; } /* * Check whether this memory bank would partially overlap * the vmalloc area. */ if (__va(bank->start + bank->size) > VMALLOC_MIN || __va(bank->start + bank->size) < __va(bank->start)) { unsigned long newsize = VMALLOC_MIN - __va(bank->start); printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " "to -%.8lx (vmalloc region overlap).\n", bank->start, bank->start + bank->size - 1, bank->start + newsize - 1); bank->size = newsize; } #endif j++; } meminfo.nr_banks = j; } static inline void prepare_page_table(void) { unsigned long addr; /* * Clear out all the mappings below the kernel image. */ for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); #ifdef CONFIG_XIP_KERNEL /* The XIP kernel is mapped in the module area -- skip over it */ addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK; #endif for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); /* * Clear out all the kernel space mappings, except for the first * memory bank, up to the end of the vmalloc region. */ for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0])); addr < VMALLOC_END; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); } /* * Reserve the various regions of node 0 */ void __init reserve_node_zero(pg_data_t *pgdat) { unsigned long res_size = 0; /* * Register the kernel text and data with bootmem. * Note that this can only be in node 0. */ #ifdef CONFIG_XIP_KERNEL reserve_bootmem_node(pgdat, __pa(_data), _end - _data, BOOTMEM_DEFAULT); #else reserve_bootmem_node(pgdat, __pa(_stext), _end - _stext, BOOTMEM_DEFAULT); #endif /* * Reserve the page tables. These are already in use, * and can only be in node 0. */ reserve_bootmem_node(pgdat, __pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t), BOOTMEM_DEFAULT); /* * Hmm... This should go elsewhere, but we really really need to * stop things allocating the low memory; ideally we need a better * implementation of GFP_DMA which does not assume that DMA-able * memory starts at zero. */ if (machine_is_integrator() || machine_is_cintegrator()) res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; /* * These should likewise go elsewhere. They pre-reserve the * screen memory region at the start of main system memory. */ if (machine_is_edb7211()) res_size = 0x00020000; if (machine_is_p720t()) res_size = 0x00014000; /* H1940 and RX3715 need to reserve this for suspend */ if (machine_is_h1940() || machine_is_rx3715()) { reserve_bootmem_node(pgdat, 0x30003000, 0x1000, BOOTMEM_DEFAULT); reserve_bootmem_node(pgdat, 0x30081000, 0x1000, BOOTMEM_DEFAULT); } #ifdef CONFIG_SA1111 /* * Because of the SA1111 DMA bug, we want to preserve our * precious DMA-able memory... */ res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; #endif #ifdef CONFIG_ARCH_MSM_SCORPION res_size = PAGE_SIZE; #endif if (res_size) reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size, BOOTMEM_DEFAULT); } /* * Set up device the mappings. Since we clear out the page tables for all * mappings above VMALLOC_END, we will remove any debug device mappings. * This means you have to be careful how you debug this function, or any * called function. This means you can't use any function or debugging * method which may touch any device, otherwise the kernel _will_ crash. */ static void __init devicemaps_init(struct machine_desc *mdesc) { struct map_desc map; unsigned long addr; void *vectors; /* * Allocate the vector page early. */ vectors = alloc_bootmem_low_pages(PAGE_SIZE); for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); /* * Map the kernel if it is XIP. * It is always first in the modulearea. */ #ifdef CONFIG_XIP_KERNEL map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); map.virtual = MODULES_VADDR; map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; map.type = MT_ROM; create_mapping(&map); #endif /* * Map the cache flushing regions. */ #ifdef FLUSH_BASE map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); map.virtual = FLUSH_BASE; map.length = SZ_1M; map.type = MT_CACHECLEAN; create_mapping(&map); #endif #ifdef FLUSH_BASE_MINICACHE map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M); map.virtual = FLUSH_BASE_MINICACHE; map.length = SZ_1M; map.type = MT_MINICLEAN; create_mapping(&map); #endif /* * Create a mapping for the machine vectors at the high-vectors * location (0xffff0000). If we aren't using high-vectors, also * create a mapping at the low-vectors virtual address. */ map.pfn = __phys_to_pfn(virt_to_phys(vectors)); map.virtual = 0xffff0000; map.length = PAGE_SIZE; map.type = MT_HIGH_VECTORS; create_mapping(&map); if (!vectors_high()) { map.virtual = 0; map.type = MT_LOW_VECTORS; create_mapping(&map); } /* * Ask the machine support to map in the statically mapped devices. */ if (mdesc->map_io) mdesc->map_io(); /* * Finally flush the caches and tlb to ensure that we're in a * consistent state wrt the writebuffer. This also ensures that * any write-allocated cache lines in the vector page are written * back. After this point, we can start to touch devices again. */ local_flush_tlb_all(); flush_cache_all(); } /* * paging_init() sets up the page tables, initialises the zone memory * maps, and sets up the zero page, bad page and bad page tables. */ void __init paging_init(struct machine_desc *mdesc) { void *zero_page; build_mem_type_table(); sanity_check_meminfo(); prepare_page_table(); bootmem_init(); devicemaps_init(mdesc); top_pmd = pmd_off_k(0xffff0000); /* * allocate the zero page. Note that this always succeeds and * returns a zeroed result. */ zero_page = alloc_bootmem_low_pages(PAGE_SIZE); empty_zero_page = virt_to_page(zero_page); flush_dcache_page(empty_zero_page); } /* * In order to soft-boot, we need to insert a 1:1 mapping in place of * the user-mode pages. This will then ensure that we have predictable * results when turning the mmu off */ void setup_mm_for_reboot(char mode) { unsigned long base_pmdval; pgd_t *pgd; int i; if (current->mm && current->mm->pgd) pgd = current->mm->pgd; else pgd = init_mm.pgd; base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) base_pmdval |= PMD_BIT4; for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) { unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval; pmd_t *pmd; pmd = pmd_off(pgd, i << PGDIR_SHIFT); pmd[0] = __pmd(pmdval); pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); flush_pmd_entry(pmd); } }
gpl-2.0
rickgaiser/linux-2.4.17-ps2
drivers/char/joystick/spaceball.c
24
5973
/* * $Id: spaceball.c,v 1.8 2000/11/23 11:42:39 vojtech Exp $ * * Copyright (c) 1999-2000 Vojtech Pavlik * * Based on the work of: * David Thompson * Joseph Krahn * * Sponsored by SuSE */ /* * SpaceTec SpaceBall 4000 FLX driver for Linux */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail: * Vojtech Pavlik, Ucitelska 1576, Prague 8, 182 00 Czech Republic */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> #include <linux/input.h> #include <linux/serio.h> /* * Constants. */ #define JS_SBALL_MAX_LENGTH 128 static int spaceball_axes[] = { ABS_X, ABS_Z, ABS_Y, ABS_RX, ABS_RZ, ABS_RY }; static char *spaceball_name = "SpaceTec SpaceBall 4000 FLX"; /* * Per-Ball data. */ struct spaceball { struct input_dev dev; struct serio *serio; int idx; int escape; unsigned char data[JS_SBALL_MAX_LENGTH]; }; /* * spaceball_process_packet() decodes packets the driver receives from the * SpaceBall. */ static void spaceball_process_packet(struct spaceball* spaceball) { struct input_dev *dev = &spaceball->dev; unsigned char *data = spaceball->data; int i; if (spaceball->idx < 2) return; printk("%c %d\n", spaceball->data[0], spaceball->idx); switch (spaceball->data[0]) { case '@': /* Reset packet */ spaceball->data[spaceball->idx - 1] = 0; for (i = 1; i < spaceball->idx && spaceball->data[i] == ' '; i++); printk(KERN_INFO "input%d: %s [%s] on serio%d\n", spaceball->dev.number, spaceball_name, spaceball->data + i, spaceball->serio->number); break; case 'D': /* Ball data */ if (spaceball->idx != 15) return; for (i = 0; i < 6; i++) { input_report_abs(dev, spaceball_axes[i], (__s16)((data[2 * i + 3] << 8) | data[2 * i + 2])); } break; case '.': /* Button data, part2 */ if (spaceball->idx != 3) return; input_report_key(dev, BTN_0, data[2] & 1); input_report_key(dev, BTN_1, data[2] & 2); break; case '?': /* Error packet */ spaceball->data[spaceball->idx - 1] = 0; printk(KERN_ERR "spaceball: Device error. [%s]\n", spaceball->data + 1); break; } } /* * Spaceball 4000 FLX packets all start with a one letter packet-type decriptor, * and end in 0x0d. It uses '^' as an escape for CR, XOFF and XON characters which * can occur in the axis values. */ static void spaceball_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct spaceball *spaceball = serio->private; switch (data) { case 0xd: spaceball_process_packet(spaceball); spaceball->idx = 0; spaceball->escape = 0; return; case '^': if (!spaceball->escape) { spaceball->escape = 1; return; } spaceball->escape = 0; case 'M': case 'Q': case 'S': if (spaceball->escape) { spaceball->escape = 0; data &= 0x1f; } default: if (spaceball->escape) { spaceball->escape = 0; printk(KERN_WARNING "spaceball.c: Unknown escaped character: %#x (%c)\n", data, data); } if (spaceball->idx < JS_SBALL_MAX_LENGTH) spaceball->data[spaceball->idx++] = data; return; } } /* * spaceball_disconnect() is the opposite of spaceball_connect() */ static void spaceball_disconnect(struct serio *serio) { struct spaceball* spaceball = serio->private; input_unregister_device(&spaceball->dev); serio_close(serio); kfree(spaceball); } /* * spaceball_connect() is the routine that is called when someone adds a * new serio device. It looks for the Magellan, and if found, registers * it as an input device. */ static void spaceball_connect(struct serio *serio, struct serio_dev *dev) { struct spaceball *spaceball; int i, t; if (serio->type != (SERIO_RS232 | SERIO_SPACEBALL)) return; if (!(spaceball = kmalloc(sizeof(struct spaceball), GFP_KERNEL))) return; memset(spaceball, 0, sizeof(struct spaceball)); spaceball->dev.evbit[0] = BIT(EV_KEY) | BIT(EV_ABS); spaceball->dev.keybit[LONG(BTN_0)] = BIT(BTN_0) | BIT(BTN_1); for (i = 0; i < 6; i++) { t = spaceball_axes[i]; set_bit(t, spaceball->dev.absbit); spaceball->dev.absmin[t] = i < 3 ? -8000 : -1600; spaceball->dev.absmax[t] = i < 3 ? 8000 : 1600; spaceball->dev.absflat[t] = i < 3 ? 40 : 8; spaceball->dev.absfuzz[t] = i < 3 ? 8 : 2; } spaceball->serio = serio; spaceball->dev.private = spaceball; spaceball->dev.name = spaceball_name; spaceball->dev.idbus = BUS_RS232; spaceball->dev.idvendor = SERIO_SPACEBALL; spaceball->dev.idproduct = 0x0001; spaceball->dev.idversion = 0x0100; serio->private = spaceball; if (serio_open(serio, dev)) { kfree(spaceball); return; } input_register_device(&spaceball->dev); } /* * The serio device structure. */ static struct serio_dev spaceball_dev = { interrupt: spaceball_interrupt, connect: spaceball_connect, disconnect: spaceball_disconnect, }; /* * The functions for inserting/removing us as a module. */ int __init spaceball_init(void) { serio_register_device(&spaceball_dev); return 0; } void __exit spaceball_exit(void) { serio_unregister_device(&spaceball_dev); } module_init(spaceball_init); module_exit(spaceball_exit); MODULE_LICENSE("GPL");
gpl-2.0
Naoya-Horiguchi/linux
drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
24
23736
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018 MediaTek Inc. * * Author: Sean Wang <sean.wang@mediatek.com> * */ #include <dt-bindings/pinctrl/mt65xx.h> #include <linux/device.h> #include <linux/err.h> #include <linux/gpio/driver.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of_irq.h> #include "mtk-eint.h" #include "pinctrl-mtk-common-v2.h" /** * struct mtk_drive_desc - the structure that holds the information * of the driving current * @min: the minimum current of this group * @max: the maximum current of this group * @step: the step current of this group * @scal: the weight factor * * formula: output = ((input) / step - 1) * scal */ struct mtk_drive_desc { u8 min; u8 max; u8 step; u8 scal; }; /* The groups of drive strength */ static const struct mtk_drive_desc mtk_drive[] = { [DRV_GRP0] = { 4, 16, 4, 1 }, [DRV_GRP1] = { 4, 16, 4, 2 }, [DRV_GRP2] = { 2, 8, 2, 1 }, [DRV_GRP3] = { 2, 8, 2, 2 }, [DRV_GRP4] = { 2, 16, 2, 1 }, }; static void mtk_w32(struct mtk_pinctrl *pctl, u8 i, u32 reg, u32 val) { writel_relaxed(val, pctl->base[i] + reg); } static u32 mtk_r32(struct mtk_pinctrl *pctl, u8 i, u32 reg) { return readl_relaxed(pctl->base[i] + reg); } void mtk_rmw(struct mtk_pinctrl *pctl, u8 i, u32 reg, u32 mask, u32 set) { u32 val; unsigned long flags; spin_lock_irqsave(&pctl->lock, flags); val = mtk_r32(pctl, i, reg); val &= ~mask; val |= set; mtk_w32(pctl, i, reg, val); spin_unlock_irqrestore(&pctl->lock, flags); } static int mtk_hw_pin_field_lookup(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, int field, struct mtk_pin_field *pfd) { const struct mtk_pin_field_calc *c; const struct mtk_pin_reg_calc *rc; int start = 0, end, check; bool found = false; u32 bits; if (hw->soc->reg_cal && hw->soc->reg_cal[field].range) { rc = &hw->soc->reg_cal[field]; } else { dev_dbg(hw->dev, "Not support field %d for this soc\n", field); return -ENOTSUPP; } end = rc->nranges - 1; while (start <= end) { check = (start + end) >> 1; if (desc->number >= rc->range[check].s_pin && desc->number <= rc->range[check].e_pin) { found = true; break; } else if (start == end) break; else if (desc->number < rc->range[check].s_pin) end = check - 1; else start = check + 1; } if (!found) { dev_dbg(hw->dev, "Not support field %d for pin = %d (%s)\n", field, desc->number, desc->name); return -ENOTSUPP; } c = rc->range + check; if (c->i_base > hw->nbase - 1) { dev_err(hw->dev, "Invalid base for field %d for pin = %d (%s)\n", field, desc->number, desc->name); return -EINVAL; } /* Calculated bits as the overall offset the pin is located at, * if c->fixed is held, that determines the all the pins in the * range use the same field with the s_pin. */ bits = c->fixed ? c->s_bit : c->s_bit + (desc->number - c->s_pin) * (c->x_bits); /* Fill pfd from bits. For example 32-bit register applied is assumed * when c->sz_reg is equal to 32. */ pfd->index = c->i_base; pfd->offset = c->s_addr + c->x_addrs * (bits / c->sz_reg); pfd->bitpos = bits % c->sz_reg; pfd->mask = (1 << c->x_bits) - 1; /* pfd->next is used for indicating that bit wrapping-around happens * which requires the manipulation for bit 0 starting in the next * register to form the complete field read/write. */ pfd->next = pfd->bitpos + c->x_bits > c->sz_reg ? c->x_addrs : 0; return 0; } static int mtk_hw_pin_field_get(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, int field, struct mtk_pin_field *pfd) { if (field < 0 || field >= PINCTRL_PIN_REG_MAX) { dev_err(hw->dev, "Invalid Field %d\n", field); return -EINVAL; } return mtk_hw_pin_field_lookup(hw, desc, field, pfd); } static void mtk_hw_bits_part(struct mtk_pin_field *pf, int *h, int *l) { *l = 32 - pf->bitpos; *h = get_count_order(pf->mask) - *l; } static void mtk_hw_write_cross_field(struct mtk_pinctrl *hw, struct mtk_pin_field *pf, int value) { int nbits_l, nbits_h; mtk_hw_bits_part(pf, &nbits_h, &nbits_l); mtk_rmw(hw, pf->index, pf->offset, pf->mask << pf->bitpos, (value & pf->mask) << pf->bitpos); mtk_rmw(hw, pf->index, pf->offset + pf->next, BIT(nbits_h) - 1, (value & pf->mask) >> nbits_l); } static void mtk_hw_read_cross_field(struct mtk_pinctrl *hw, struct mtk_pin_field *pf, int *value) { int nbits_l, nbits_h, h, l; mtk_hw_bits_part(pf, &nbits_h, &nbits_l); l = (mtk_r32(hw, pf->index, pf->offset) >> pf->bitpos) & (BIT(nbits_l) - 1); h = (mtk_r32(hw, pf->index, pf->offset + pf->next)) & (BIT(nbits_h) - 1); *value = (h << nbits_l) | l; } int mtk_hw_set_value(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, int field, int value) { struct mtk_pin_field pf; int err; err = mtk_hw_pin_field_get(hw, desc, field, &pf); if (err) return err; if (value < 0 || value > pf.mask) return -EINVAL; if (!pf.next) mtk_rmw(hw, pf.index, pf.offset, pf.mask << pf.bitpos, (value & pf.mask) << pf.bitpos); else mtk_hw_write_cross_field(hw, &pf, value); return 0; } EXPORT_SYMBOL_GPL(mtk_hw_set_value); int mtk_hw_get_value(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, int field, int *value) { struct mtk_pin_field pf; int err; err = mtk_hw_pin_field_get(hw, desc, field, &pf); if (err) return err; if (!pf.next) *value = (mtk_r32(hw, pf.index, pf.offset) >> pf.bitpos) & pf.mask; else mtk_hw_read_cross_field(hw, &pf, value); return 0; } EXPORT_SYMBOL_GPL(mtk_hw_get_value); static int mtk_xt_find_eint_num(struct mtk_pinctrl *hw, unsigned long eint_n) { const struct mtk_pin_desc *desc; int i = 0; desc = (const struct mtk_pin_desc *)hw->soc->pins; while (i < hw->soc->npins) { if (desc[i].eint.eint_n == eint_n) return desc[i].number; i++; } return EINT_NA; } /* * Virtual GPIO only used inside SOC and not being exported to outside SOC. * Some modules use virtual GPIO as eint (e.g. pmif or usb). * In MTK platform, external interrupt (EINT) and GPIO is 1-1 mapping * and we can set GPIO as eint. * But some modules use specific eint which doesn't have real GPIO pin. * So we use virtual GPIO to map it. */ bool mtk_is_virt_gpio(struct mtk_pinctrl *hw, unsigned int gpio_n) { const struct mtk_pin_desc *desc; bool virt_gpio = false; desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio_n]; /* if the GPIO is not supported for eint mode */ if (desc->eint.eint_m == NO_EINT_SUPPORT) return virt_gpio; if (desc->funcs && !desc->funcs[desc->eint.eint_m].name) virt_gpio = true; return virt_gpio; } EXPORT_SYMBOL_GPL(mtk_is_virt_gpio); static int mtk_xt_get_gpio_n(void *data, unsigned long eint_n, unsigned int *gpio_n, struct gpio_chip **gpio_chip) { struct mtk_pinctrl *hw = (struct mtk_pinctrl *)data; const struct mtk_pin_desc *desc; desc = (const struct mtk_pin_desc *)hw->soc->pins; *gpio_chip = &hw->chip; /* Be greedy to guess first gpio_n is equal to eint_n */ if (desc[eint_n].eint.eint_n == eint_n) *gpio_n = eint_n; else *gpio_n = mtk_xt_find_eint_num(hw, eint_n); return *gpio_n == EINT_NA ? -EINVAL : 0; } static int mtk_xt_get_gpio_state(void *data, unsigned long eint_n) { struct mtk_pinctrl *hw = (struct mtk_pinctrl *)data; const struct mtk_pin_desc *desc; struct gpio_chip *gpio_chip; unsigned int gpio_n; int value, err; err = mtk_xt_get_gpio_n(hw, eint_n, &gpio_n, &gpio_chip); if (err) return err; desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio_n]; err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DI, &value); if (err) return err; return !!value; } static int mtk_xt_set_gpio_as_eint(void *data, unsigned long eint_n) { struct mtk_pinctrl *hw = (struct mtk_pinctrl *)data; const struct mtk_pin_desc *desc; struct gpio_chip *gpio_chip; unsigned int gpio_n; int err; err = mtk_xt_get_gpio_n(hw, eint_n, &gpio_n, &gpio_chip); if (err) return err; if (mtk_is_virt_gpio(hw, gpio_n)) return 0; desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio_n]; err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_MODE, desc->eint.eint_m); if (err) return err; err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR, MTK_INPUT); if (err) return err; err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT, MTK_ENABLE); /* SMT is supposed to be supported by every real GPIO and doesn't * support virtual GPIOs, so the extra condition err != -ENOTSUPP * is just for adding EINT support to these virtual GPIOs. It should * add an extra flag in the pin descriptor when more pins with * distinctive characteristic come out. */ if (err && err != -ENOTSUPP) return err; return 0; } static const struct mtk_eint_xt mtk_eint_xt = { .get_gpio_n = mtk_xt_get_gpio_n, .get_gpio_state = mtk_xt_get_gpio_state, .set_gpio_as_eint = mtk_xt_set_gpio_as_eint, }; int mtk_build_eint(struct mtk_pinctrl *hw, struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; int ret; if (!IS_ENABLED(CONFIG_EINT_MTK)) return 0; if (!of_property_read_bool(np, "interrupt-controller")) return -ENODEV; hw->eint = devm_kzalloc(hw->dev, sizeof(*hw->eint), GFP_KERNEL); if (!hw->eint) return -ENOMEM; hw->eint->base = devm_platform_ioremap_resource_byname(pdev, "eint"); if (IS_ERR(hw->eint->base)) { ret = PTR_ERR(hw->eint->base); goto err_free_eint; } hw->eint->irq = irq_of_parse_and_map(np, 0); if (!hw->eint->irq) { ret = -EINVAL; goto err_free_eint; } if (!hw->soc->eint_hw) { ret = -ENODEV; goto err_free_eint; } hw->eint->dev = &pdev->dev; hw->eint->hw = hw->soc->eint_hw; hw->eint->pctl = hw; hw->eint->gpio_xlate = &mtk_eint_xt; return mtk_eint_do_init(hw->eint); err_free_eint: devm_kfree(hw->dev, hw->eint); hw->eint = NULL; return ret; } EXPORT_SYMBOL_GPL(mtk_build_eint); /* Revision 0 */ int mtk_pinconf_bias_disable_set(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc) { int err; err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PU, MTK_DISABLE); if (err) return err; err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PD, MTK_DISABLE); if (err) return err; return 0; } EXPORT_SYMBOL_GPL(mtk_pinconf_bias_disable_set); int mtk_pinconf_bias_disable_get(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, int *res) { int v, v2; int err; err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PU, &v); if (err) return err; err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PD, &v2); if (err) return err; if (v == MTK_ENABLE || v2 == MTK_ENABLE) return -EINVAL; *res = 1; return 0; } EXPORT_SYMBOL_GPL(mtk_pinconf_bias_disable_get); int mtk_pinconf_bias_set(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, bool pullup) { int err, arg; arg = pullup ? 1 : 2; err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PU, arg & 1); if (err) return err; err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PD, !!(arg & 2)); if (err) return err; return 0; } EXPORT_SYMBOL_GPL(mtk_pinconf_bias_set); int mtk_pinconf_bias_get(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, bool pullup, int *res) { int reg, err, v; reg = pullup ? PINCTRL_PIN_REG_PU : PINCTRL_PIN_REG_PD; err = mtk_hw_get_value(hw, desc, reg, &v); if (err) return err; if (!v) return -EINVAL; *res = 1; return 0; } EXPORT_SYMBOL_GPL(mtk_pinconf_bias_get); /* Revision 1 */ int mtk_pinconf_bias_disable_set_rev1(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc) { return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PULLEN, MTK_DISABLE); } EXPORT_SYMBOL_GPL(mtk_pinconf_bias_disable_set_rev1); int mtk_pinconf_bias_disable_get_rev1(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, int *res) { int v, err; err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PULLEN, &v); if (err) return err; if (v == MTK_ENABLE) return -EINVAL; *res = 1; return 0; } EXPORT_SYMBOL_GPL(mtk_pinconf_bias_disable_get_rev1); int mtk_pinconf_bias_set_rev1(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, bool pullup) { int err, arg; arg = pullup ? MTK_PULLUP : MTK_PULLDOWN; err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PULLEN, MTK_ENABLE); if (err) return err; err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PULLSEL, arg); if (err) return err; return 0; } EXPORT_SYMBOL_GPL(mtk_pinconf_bias_set_rev1); int mtk_pinconf_bias_get_rev1(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, bool pullup, int *res) { int err, v; err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PULLEN, &v); if (err) return err; if (v == MTK_DISABLE) return -EINVAL; err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PULLSEL, &v); if (err) return err; if (pullup ^ (v == MTK_PULLUP)) return -EINVAL; *res = 1; return 0; } EXPORT_SYMBOL_GPL(mtk_pinconf_bias_get_rev1); /* Combo for the following pull register type: * 1. PU + PD * 2. PULLSEL + PULLEN * 3. PUPD + R0 + R1 */ static int mtk_pinconf_bias_set_pu_pd(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, u32 pullup, u32 arg) { int err, pu, pd; if (arg == MTK_DISABLE) { pu = 0; pd = 0; } else if ((arg == MTK_ENABLE) && pullup) { pu = 1; pd = 0; } else if ((arg == MTK_ENABLE) && !pullup) { pu = 0; pd = 1; } else { err = -EINVAL; goto out; } err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PU, pu); if (err) goto out; err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PD, pd); out: return err; } static int mtk_pinconf_bias_set_pullsel_pullen(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, u32 pullup, u32 arg) { int err, enable; if (arg == MTK_DISABLE) enable = 0; else if (arg == MTK_ENABLE) enable = 1; else { err = -EINVAL; goto out; } err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PULLEN, enable); if (err) goto out; err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PULLSEL, pullup); out: return err; } static int mtk_pinconf_bias_set_pupd_r1_r0(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, u32 pullup, u32 arg) { int err, r0, r1; if ((arg == MTK_DISABLE) || (arg == MTK_PUPD_SET_R1R0_00)) { pullup = 0; r0 = 0; r1 = 0; } else if (arg == MTK_PUPD_SET_R1R0_01) { r0 = 1; r1 = 0; } else if (arg == MTK_PUPD_SET_R1R0_10) { r0 = 0; r1 = 1; } else if (arg == MTK_PUPD_SET_R1R0_11) { r0 = 1; r1 = 1; } else { err = -EINVAL; goto out; } /* MTK HW PUPD bit: 1 for pull-down, 0 for pull-up */ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PUPD, !pullup); if (err) goto out; err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_R0, r0); if (err) goto out; err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_R1, r1); out: return err; } static int mtk_pinconf_bias_get_pu_pd(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, u32 *pullup, u32 *enable) { int err, pu, pd; err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PU, &pu); if (err) goto out; err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PD, &pd); if (err) goto out; if (pu == 0 && pd == 0) { *pullup = 0; *enable = MTK_DISABLE; } else if (pu == 1 && pd == 0) { *pullup = 1; *enable = MTK_ENABLE; } else if (pu == 0 && pd == 1) { *pullup = 0; *enable = MTK_ENABLE; } else err = -EINVAL; out: return err; } static int mtk_pinconf_bias_get_pullsel_pullen(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, u32 *pullup, u32 *enable) { int err; err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PULLSEL, pullup); if (err) goto out; err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PULLEN, enable); out: return err; } static int mtk_pinconf_bias_get_pupd_r1_r0(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, u32 *pullup, u32 *enable) { int err, r0, r1; err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PUPD, pullup); if (err) goto out; /* MTK HW PUPD bit: 1 for pull-down, 0 for pull-up */ *pullup = !(*pullup); err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_R0, &r0); if (err) goto out; err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_R1, &r1); if (err) goto out; if ((r1 == 0) && (r0 == 0)) *enable = MTK_PUPD_SET_R1R0_00; else if ((r1 == 0) && (r0 == 1)) *enable = MTK_PUPD_SET_R1R0_01; else if ((r1 == 1) && (r0 == 0)) *enable = MTK_PUPD_SET_R1R0_10; else if ((r1 == 1) && (r0 == 1)) *enable = MTK_PUPD_SET_R1R0_11; else err = -EINVAL; out: return err; } int mtk_pinconf_bias_set_combo(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, u32 pullup, u32 arg) { int err; err = mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, arg); if (!err) goto out; err = mtk_pinconf_bias_set_pullsel_pullen(hw, desc, pullup, arg); if (!err) goto out; err = mtk_pinconf_bias_set_pupd_r1_r0(hw, desc, pullup, arg); out: return err; } EXPORT_SYMBOL_GPL(mtk_pinconf_bias_set_combo); int mtk_pinconf_bias_get_combo(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, u32 *pullup, u32 *enable) { int err; err = mtk_pinconf_bias_get_pu_pd(hw, desc, pullup, enable); if (!err) goto out; err = mtk_pinconf_bias_get_pullsel_pullen(hw, desc, pullup, enable); if (!err) goto out; err = mtk_pinconf_bias_get_pupd_r1_r0(hw, desc, pullup, enable); out: return err; } EXPORT_SYMBOL_GPL(mtk_pinconf_bias_get_combo); /* Revision 0 */ int mtk_pinconf_drive_set(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, u32 arg) { const struct mtk_drive_desc *tb; int err = -ENOTSUPP; tb = &mtk_drive[desc->drv_n]; /* 4mA when (e8, e4) = (0, 0) * 8mA when (e8, e4) = (0, 1) * 12mA when (e8, e4) = (1, 0) * 16mA when (e8, e4) = (1, 1) */ if ((arg >= tb->min && arg <= tb->max) && !(arg % tb->step)) { arg = (arg / tb->step - 1) * tb->scal; err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_E4, arg & 0x1); if (err) return err; err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_E8, (arg & 0x2) >> 1); if (err) return err; } return err; } EXPORT_SYMBOL_GPL(mtk_pinconf_drive_set); int mtk_pinconf_drive_get(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, int *val) { const struct mtk_drive_desc *tb; int err, val1, val2; tb = &mtk_drive[desc->drv_n]; err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_E4, &val1); if (err) return err; err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_E8, &val2); if (err) return err; /* 4mA when (e8, e4) = (0, 0); 8mA when (e8, e4) = (0, 1) * 12mA when (e8, e4) = (1, 0); 16mA when (e8, e4) = (1, 1) */ *val = (((val2 << 1) + val1) / tb->scal + 1) * tb->step; return 0; } EXPORT_SYMBOL_GPL(mtk_pinconf_drive_get); /* Revision 1 */ int mtk_pinconf_drive_set_rev1(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, u32 arg) { const struct mtk_drive_desc *tb; int err = -ENOTSUPP; tb = &mtk_drive[desc->drv_n]; if ((arg >= tb->min && arg <= tb->max) && !(arg % tb->step)) { arg = (arg / tb->step - 1) * tb->scal; err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DRV, arg); if (err) return err; } return err; } EXPORT_SYMBOL_GPL(mtk_pinconf_drive_set_rev1); int mtk_pinconf_drive_get_rev1(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, int *val) { const struct mtk_drive_desc *tb; int err, val1; tb = &mtk_drive[desc->drv_n]; err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DRV, &val1); if (err) return err; *val = ((val1 & 0x7) / tb->scal + 1) * tb->step; return 0; } EXPORT_SYMBOL_GPL(mtk_pinconf_drive_get_rev1); int mtk_pinconf_drive_set_raw(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, u32 arg) { return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DRV, arg); } EXPORT_SYMBOL_GPL(mtk_pinconf_drive_set_raw); int mtk_pinconf_drive_get_raw(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, int *val) { return mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DRV, val); } EXPORT_SYMBOL_GPL(mtk_pinconf_drive_get_raw); int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, bool pullup, u32 arg) { int err; /* 10K off & 50K (75K) off, when (R0, R1) = (0, 0); * 10K off & 50K (75K) on, when (R0, R1) = (0, 1); * 10K on & 50K (75K) off, when (R0, R1) = (1, 0); * 10K on & 50K (75K) on, when (R0, R1) = (1, 1) */ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_R0, arg & 1); if (err) return 0; err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_R1, !!(arg & 2)); if (err) return 0; arg = pullup ? 0 : 1; err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PUPD, arg); /* If PUPD register is not supported for that pin, let's fallback to * general bias control. */ if (err == -ENOTSUPP) { if (hw->soc->bias_set) { err = hw->soc->bias_set(hw, desc, pullup); if (err) return err; } else if (hw->soc->bias_set_combo) { err = hw->soc->bias_set_combo(hw, desc, pullup, arg); if (err) return err; } else { return -ENOTSUPP; } } return err; } EXPORT_SYMBOL_GPL(mtk_pinconf_adv_pull_set); int mtk_pinconf_adv_pull_get(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, bool pullup, u32 *val) { u32 t, t2; int err; err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PUPD, &t); /* If PUPD register is not supported for that pin, let's fallback to * general bias control. */ if (err == -ENOTSUPP) { if (hw->soc->bias_get) { err = hw->soc->bias_get(hw, desc, pullup, val); if (err) return err; } else { return -ENOTSUPP; } } else { /* t == 0 supposes PULLUP for the customized PULL setup */ if (err) return err; if (pullup ^ !t) return -EINVAL; } err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_R0, &t); if (err) return err; err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_R1, &t2); if (err) return err; *val = (t | t2 << 1) & 0x7; return 0; } EXPORT_SYMBOL_GPL(mtk_pinconf_adv_pull_get); int mtk_pinconf_adv_drive_set(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, u32 arg) { int err; int en = arg & 1; int e0 = !!(arg & 2); int e1 = !!(arg & 4); err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DRV_EN, en); if (err) return err; if (!en) return err; err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DRV_E0, e0); if (err) return err; err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DRV_E1, e1); if (err) return err; return err; } EXPORT_SYMBOL_GPL(mtk_pinconf_adv_drive_set); int mtk_pinconf_adv_drive_get(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, u32 *val) { u32 en, e0, e1; int err; err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DRV_EN, &en); if (err) return err; err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DRV_E0, &e0); if (err) return err; err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DRV_E1, &e1); if (err) return err; *val = (en | e0 << 1 | e1 << 2) & 0x7; return 0; } EXPORT_SYMBOL_GPL(mtk_pinconf_adv_drive_get); int mtk_pinconf_adv_drive_set_raw(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, u32 arg) { return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DRV_ADV, arg); } EXPORT_SYMBOL_GPL(mtk_pinconf_adv_drive_set_raw); int mtk_pinconf_adv_drive_get_raw(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, u32 *val) { return mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DRV_ADV, val); } EXPORT_SYMBOL_GPL(mtk_pinconf_adv_drive_get_raw); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); MODULE_DESCRIPTION("Pin configuration library module for mediatek SoCs");
gpl-2.0
0xZERO3/vba-rerecording
src/win32/7zip/7z/C/Aes.c
24
7887
/* Aes.c -- AES encryption / decryption 2008-08-05 Igor Pavlov Public domain */ #include "Aes.h" #include "CpuArch.h" static UInt32 T[256 * 4]; static Byte Sbox[256] = { 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16}; static UInt32 D[256 * 4]; static Byte InvS[256]; static Byte Rcon[11] = { 0x00, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36 }; #define xtime(x) ((((x) << 1) ^ (((x) & 0x80) != 0 ? 0x1B : 0)) & 0xFF) #define Ui32(a0, a1, a2, a3) ((UInt32)(a0) | ((UInt32)(a1) << 8) | ((UInt32)(a2) << 16) | ((UInt32)(a3) << 24)) #define gb0(x) ( (x) & 0xFF) #define gb1(x) (((x) >> ( 8)) & 0xFF) #define gb2(x) (((x) >> (16)) & 0xFF) #define gb3(x) (((x) >> (24)) & 0xFF) void AesGenTables(void) { unsigned i; for (i = 0; i < 256; i++) InvS[Sbox[i]] = (Byte)i; for (i = 0; i < 256; i++) { { UInt32 a1 = Sbox[i]; UInt32 a2 = xtime(a1); UInt32 a3 = xtime(a1) ^ a1; T[ i] = Ui32(a2, a1, a1, a3); T[0x100 + i] = Ui32(a3, a2, a1, a1); T[0x200 + i] = Ui32(a1, a3, a2, a1); T[0x300 + i] = Ui32(a1, a1, a3, a2); } { UInt32 a1 = InvS[i]; UInt32 a2 = xtime(a1); UInt32 a4 = xtime(a2); UInt32 a8 = xtime(a4); UInt32 a9 = a8 ^ a1; UInt32 aB = a8 ^ a2 ^ a1; UInt32 aD = a8 ^ a4 ^ a1; UInt32 aE = a8 ^ a4 ^ a2; D[ i] = Ui32(aE, a9, aD, aB); D[0x100 + i] = Ui32(aB, aE, a9, aD); D[0x200 + i] = Ui32(aD, aB, aE, a9); D[0x300 + i] = Ui32(a9, aD, aB, aE); } } } #define HT(i, x, s) (T + (x << 8))[gb ## x(s[(i + x) & 3])] #define HT4(m, i, s, p) m[i] = \ HT(i, 0, s) ^ \ HT(i, 1, s) ^ \ HT(i, 2, s) ^ \ HT(i, 3, s) ^ w[p + i] /* such order (2031) in HT16 is for VC6/K8 speed optimization) */ #define HT16(m, s, p) \ HT4(m, 2, s, p); \ HT4(m, 0, s, p); \ HT4(m, 3, s, p); \ HT4(m, 1, s, p); \ #define FT(i, x) Sbox[gb ## x(m[(i + x) & 3])] #define FT4(i) dest[i] = Ui32(FT(i, 0), FT(i, 1), FT(i, 2), FT(i, 3)) ^ w[i]; #define HD(i, x, s) (D + (x << 8))[gb ## x(s[(i - x) & 3])] #define HD4(m, i, s, p) m[i] = \ HD(i, 0, s) ^ \ HD(i, 1, s) ^ \ HD(i, 2, s) ^ \ HD(i, 3, s) ^ w[p + i]; /* such order (0231) in HD16 is for VC6/K8 speed optimization) */ #define HD16(m, s, p) \ HD4(m, 0, s, p); \ HD4(m, 2, s, p); \ HD4(m, 3, s, p); \ HD4(m, 1, s, p); \ #define FD(i, x) InvS[gb ## x(m[(i - x) & 3])] #define FD4(i) dest[i] = Ui32(FD(i, 0), FD(i, 1), FD(i, 2), FD(i, 3)) ^ w[i]; void Aes_SetKeyEncode(CAes *p, const Byte *key, unsigned keySize) { unsigned i, wSize; UInt32 *w; keySize /= 4; p->numRounds2 = keySize / 2 + 3; wSize = (p->numRounds2 * 2 + 1) * 4; w = p->rkey; for (i = 0; i < keySize; i++, key += 4) w[i] = Ui32(key[0], key[1], key[2], key[3]); for (; i < wSize; i++) { UInt32 t = w[i - 1]; unsigned rem = i % keySize; if (rem == 0) t = Ui32(Sbox[gb1(t)] ^ Rcon[i / keySize], Sbox[gb2(t)], Sbox[gb3(t)], Sbox[gb0(t)]); else if (keySize > 6 && rem == 4) t = Ui32(Sbox[gb0(t)], Sbox[gb1(t)], Sbox[gb2(t)], Sbox[gb3(t)]); w[i] = w[i - keySize] ^ t; } } void Aes_SetKeyDecode(CAes *p, const Byte *key, unsigned keySize) { unsigned i, num; UInt32 *w; Aes_SetKeyEncode(p, key, keySize); num = p->numRounds2 * 8 - 4; w = p->rkey + 4; for (i = 0; i < num; i++) { UInt32 r = w[i]; w[i] = D[ Sbox[gb0(r)]] ^ D[0x100 + Sbox[gb1(r)]] ^ D[0x200 + Sbox[gb2(r)]] ^ D[0x300 + Sbox[gb3(r)]]; } } static void AesEncode32(UInt32 *dest, const UInt32 *src, const UInt32 *w, unsigned numRounds2) { UInt32 s[4]; UInt32 m[4]; s[0] = src[0] ^ w[0]; s[1] = src[1] ^ w[1]; s[2] = src[2] ^ w[2]; s[3] = src[3] ^ w[3]; w += 4; for (;;) { HT16(m, s, 0); if (--numRounds2 == 0) break; HT16(s, m, 4); w += 8; } w += 4; FT4(0); FT4(1); FT4(2); FT4(3); } static void AesDecode32(UInt32 *dest, const UInt32 *src, const UInt32 *w, unsigned numRounds2) { UInt32 s[4]; UInt32 m[4]; w += numRounds2 * 8; s[0] = src[0] ^ w[0]; s[1] = src[1] ^ w[1]; s[2] = src[2] ^ w[2]; s[3] = src[3] ^ w[3]; for (;;) { w -= 8; HD16(m, s, 4); if (--numRounds2 == 0) break; HD16(s, m, 0); } FD4(0); FD4(1); FD4(2); FD4(3); } void Aes_Encode32(const CAes *p, UInt32 *dest, const UInt32 *src) { AesEncode32(dest, src, p->rkey, p->numRounds2); } void Aes_Decode32(const CAes *p, UInt32 *dest, const UInt32 *src) { AesDecode32(dest, src, p->rkey, p->numRounds2); } void AesCbc_Init(CAesCbc *p, const Byte *iv) { unsigned i; for (i = 0; i < 4; i++) p->prev[i] = GetUi32(iv + i * 4); } SizeT AesCbc_Encode(CAesCbc *p, Byte *data, SizeT size) { SizeT i; if (size == 0) return 0; if (size < AES_BLOCK_SIZE) return AES_BLOCK_SIZE; size -= AES_BLOCK_SIZE; for (i = 0; i <= size; i += AES_BLOCK_SIZE, data += AES_BLOCK_SIZE) { p->prev[0] ^= GetUi32(data); p->prev[1] ^= GetUi32(data + 4); p->prev[2] ^= GetUi32(data + 8); p->prev[3] ^= GetUi32(data + 12); AesEncode32(p->prev, p->prev, p->aes.rkey, p->aes.numRounds2); SetUi32(data, p->prev[0]); SetUi32(data + 4, p->prev[1]); SetUi32(data + 8, p->prev[2]); SetUi32(data + 12, p->prev[3]); } return i; } SizeT AesCbc_Decode(CAesCbc *p, Byte *data, SizeT size) { SizeT i; UInt32 in[4], out[4]; if (size == 0) return 0; if (size < AES_BLOCK_SIZE) return AES_BLOCK_SIZE; size -= AES_BLOCK_SIZE; for (i = 0; i <= size; i += AES_BLOCK_SIZE, data += AES_BLOCK_SIZE) { in[0] = GetUi32(data); in[1] = GetUi32(data + 4); in[2] = GetUi32(data + 8); in[3] = GetUi32(data + 12); AesDecode32(out, in, p->aes.rkey, p->aes.numRounds2); SetUi32(data, p->prev[0] ^ out[0]); SetUi32(data + 4, p->prev[1] ^ out[1]); SetUi32(data + 8, p->prev[2] ^ out[2]); SetUi32(data + 12, p->prev[3] ^ out[3]); p->prev[0] = in[0]; p->prev[1] = in[1]; p->prev[2] = in[2]; p->prev[3] = in[3]; } return i; }
gpl-2.0
CyanogenMod/htc-kernel-incrediblec
drivers/usb/core/devio.c
280
48518
/*****************************************************************************/ /* * devio.c -- User space communication with USB devices. * * Copyright (C) 1999-2000 Thomas Sailer (sailer@ife.ee.ethz.ch) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * This file implements the usbfs/x/y files, where * x is the bus number and y the device number. * * It allows user space programs/"drivers" to communicate directly * with USB devices without intervening kernel driver. * * Revision history * 22.12.1999 0.1 Initial release (split from proc_usb.c) * 04.01.2000 0.2 Turned into its own filesystem * 30.09.2005 0.3 Fix user-triggerable oops in async URB delivery * (CAN-2005-3055) */ /*****************************************************************************/ #include <linux/fs.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/smp_lock.h> #include <linux/signal.h> #include <linux/poll.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usbdevice_fs.h> #include <linux/cdev.h> #include <linux/notifier.h> #include <linux/security.h> #include <asm/uaccess.h> #include <asm/byteorder.h> #include <linux/moduleparam.h> #include "hcd.h" /* for usbcore internals */ #include "usb.h" #include "hub.h" #define USB_MAXBUS 64 #define USB_DEVICE_MAX USB_MAXBUS * 128 /* Mutual exclusion for removal, open, and release */ DEFINE_MUTEX(usbfs_mutex); struct dev_state { struct list_head list; /* state list */ struct usb_device *dev; struct file *file; spinlock_t lock; /* protects the async urb lists */ struct list_head async_pending; struct list_head async_completed; wait_queue_head_t wait; /* wake up if a request completed */ unsigned int discsignr; struct pid *disc_pid; uid_t disc_uid, disc_euid; void __user *disccontext; unsigned long ifclaimed; u32 secid; u32 disabled_bulk_eps; }; struct async { struct list_head asynclist; struct dev_state *ps; struct pid *pid; uid_t uid, euid; unsigned int signr; unsigned int ifnum; void __user *userbuffer; void __user *userurb; struct urb *urb; int status; u32 secid; u8 bulk_addr; u8 bulk_status; }; static int usbfs_snoop; module_param(usbfs_snoop, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(usbfs_snoop, "true to log all usbfs traffic"); #define snoop(dev, format, arg...) \ do { \ if (usbfs_snoop) \ dev_info(dev , format , ## arg); \ } while (0) enum snoop_when { SUBMIT, COMPLETE }; #define USB_DEVICE_DEV MKDEV(USB_DEVICE_MAJOR, 0) #define MAX_USBFS_BUFFER_SIZE 16384 static int connected(struct dev_state *ps) { return (!list_empty(&ps->list) && ps->dev->state != USB_STATE_NOTATTACHED); } static loff_t usbdev_lseek(struct file *file, loff_t offset, int orig) { loff_t ret; lock_kernel(); switch (orig) { case 0: file->f_pos = offset; ret = file->f_pos; break; case 1: file->f_pos += offset; ret = file->f_pos; break; case 2: default: ret = -EINVAL; } unlock_kernel(); return ret; } static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct dev_state *ps = file->private_data; struct usb_device *dev = ps->dev; ssize_t ret = 0; unsigned len; loff_t pos; int i; pos = *ppos; usb_lock_device(dev); if (!connected(ps)) { ret = -ENODEV; goto err; } else if (pos < 0) { ret = -EINVAL; goto err; } if (pos < sizeof(struct usb_device_descriptor)) { /* 18 bytes - fits on the stack */ struct usb_device_descriptor temp_desc; memcpy(&temp_desc, &dev->descriptor, sizeof(dev->descriptor)); le16_to_cpus(&temp_desc.bcdUSB); le16_to_cpus(&temp_desc.idVendor); le16_to_cpus(&temp_desc.idProduct); le16_to_cpus(&temp_desc.bcdDevice); len = sizeof(struct usb_device_descriptor) - pos; if (len > nbytes) len = nbytes; if (copy_to_user(buf, ((char *)&temp_desc) + pos, len)) { ret = -EFAULT; goto err; } *ppos += len; buf += len; nbytes -= len; ret += len; } pos = sizeof(struct usb_device_descriptor); for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) { struct usb_config_descriptor *config = (struct usb_config_descriptor *)dev->rawdescriptors[i]; unsigned int length = le16_to_cpu(config->wTotalLength); if (*ppos < pos + length) { /* The descriptor may claim to be longer than it * really is. Here is the actual allocated length. */ unsigned alloclen = le16_to_cpu(dev->config[i].desc.wTotalLength); len = length - (*ppos - pos); if (len > nbytes) len = nbytes; /* Simply don't write (skip over) unallocated parts */ if (alloclen > (*ppos - pos)) { alloclen -= (*ppos - pos); if (copy_to_user(buf, dev->rawdescriptors[i] + (*ppos - pos), min(len, alloclen))) { ret = -EFAULT; goto err; } } *ppos += len; buf += len; nbytes -= len; ret += len; } pos += length; } err: usb_unlock_device(dev); return ret; } /* * async list handling */ static struct async *alloc_async(unsigned int numisoframes) { struct async *as; as = kzalloc(sizeof(struct async), GFP_KERNEL); if (!as) return NULL; as->urb = usb_alloc_urb(numisoframes, GFP_KERNEL); if (!as->urb) { kfree(as); return NULL; } return as; } static void free_async(struct async *as) { put_pid(as->pid); kfree(as->urb->transfer_buffer); kfree(as->urb->setup_packet); usb_free_urb(as->urb); kfree(as); } static void async_newpending(struct async *as) { struct dev_state *ps = as->ps; unsigned long flags; spin_lock_irqsave(&ps->lock, flags); list_add_tail(&as->asynclist, &ps->async_pending); spin_unlock_irqrestore(&ps->lock, flags); } static void async_removepending(struct async *as) { struct dev_state *ps = as->ps; unsigned long flags; spin_lock_irqsave(&ps->lock, flags); list_del_init(&as->asynclist); spin_unlock_irqrestore(&ps->lock, flags); } static struct async *async_getcompleted(struct dev_state *ps) { unsigned long flags; struct async *as = NULL; spin_lock_irqsave(&ps->lock, flags); if (!list_empty(&ps->async_completed)) { as = list_entry(ps->async_completed.next, struct async, asynclist); list_del_init(&as->asynclist); } spin_unlock_irqrestore(&ps->lock, flags); return as; } static struct async *async_getpending(struct dev_state *ps, void __user *userurb) { unsigned long flags; struct async *as; spin_lock_irqsave(&ps->lock, flags); list_for_each_entry(as, &ps->async_pending, asynclist) if (as->userurb == userurb) { list_del_init(&as->asynclist); spin_unlock_irqrestore(&ps->lock, flags); return as; } spin_unlock_irqrestore(&ps->lock, flags); return NULL; } static void snoop_urb(struct usb_device *udev, void __user *userurb, int pipe, unsigned length, int timeout_or_status, enum snoop_when when) { static const char *types[] = {"isoc", "int", "ctrl", "bulk"}; static const char *dirs[] = {"out", "in"}; int ep; const char *t, *d; if (!usbfs_snoop) return; ep = usb_pipeendpoint(pipe); t = types[usb_pipetype(pipe)]; d = dirs[!!usb_pipein(pipe)]; if (userurb) { /* Async */ if (when == SUBMIT) dev_info(&udev->dev, "userurb %p, ep%d %s-%s, " "length %u\n", userurb, ep, t, d, length); else dev_info(&udev->dev, "userurb %p, ep%d %s-%s, " "actual_length %u status %d\n", userurb, ep, t, d, length, timeout_or_status); } else { if (when == SUBMIT) dev_info(&udev->dev, "ep%d %s-%s, length %u, " "timeout %d\n", ep, t, d, length, timeout_or_status); else dev_info(&udev->dev, "ep%d %s-%s, actual_length %u, " "status %d\n", ep, t, d, length, timeout_or_status); } } #define AS_CONTINUATION 1 #define AS_UNLINK 2 static void cancel_bulk_urbs(struct dev_state *ps, unsigned bulk_addr) __releases(ps->lock) __acquires(ps->lock) { struct async *as; /* Mark all the pending URBs that match bulk_addr, up to but not * including the first one without AS_CONTINUATION. If such an * URB is encountered then a new transfer has already started so * the endpoint doesn't need to be disabled; otherwise it does. */ list_for_each_entry(as, &ps->async_pending, asynclist) { if (as->bulk_addr == bulk_addr) { if (as->bulk_status != AS_CONTINUATION) goto rescan; as->bulk_status = AS_UNLINK; as->bulk_addr = 0; } } ps->disabled_bulk_eps |= (1 << bulk_addr); /* Now carefully unlink all the marked pending URBs */ rescan: list_for_each_entry(as, &ps->async_pending, asynclist) { if (as->bulk_status == AS_UNLINK) { as->bulk_status = 0; /* Only once */ spin_unlock(&ps->lock); /* Allow completions */ usb_unlink_urb(as->urb); spin_lock(&ps->lock); goto rescan; } } } static void async_completed(struct urb *urb) { struct async *as = urb->context; struct dev_state *ps = as->ps; struct siginfo sinfo; struct pid *pid = NULL; uid_t uid = 0; uid_t euid = 0; u32 secid = 0; int signr; spin_lock(&ps->lock); list_move_tail(&as->asynclist, &ps->async_completed); as->status = urb->status; signr = as->signr; if (signr) { sinfo.si_signo = as->signr; sinfo.si_errno = as->status; sinfo.si_code = SI_ASYNCIO; sinfo.si_addr = as->userurb; pid = as->pid; uid = as->uid; euid = as->euid; secid = as->secid; } snoop(&urb->dev->dev, "urb complete\n"); snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length, as->status, COMPLETE); if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET && as->status != -ENOENT) cancel_bulk_urbs(ps, as->bulk_addr); spin_unlock(&ps->lock); if (signr) kill_pid_info_as_uid(sinfo.si_signo, &sinfo, pid, uid, euid, secid); wake_up(&ps->wait); } static void destroy_async(struct dev_state *ps, struct list_head *list) { struct async *as; unsigned long flags; spin_lock_irqsave(&ps->lock, flags); while (!list_empty(list)) { as = list_entry(list->next, struct async, asynclist); list_del_init(&as->asynclist); /* drop the spinlock so the completion handler can run */ spin_unlock_irqrestore(&ps->lock, flags); usb_kill_urb(as->urb); spin_lock_irqsave(&ps->lock, flags); } spin_unlock_irqrestore(&ps->lock, flags); } static void destroy_async_on_interface(struct dev_state *ps, unsigned int ifnum) { struct list_head *p, *q, hitlist; unsigned long flags; INIT_LIST_HEAD(&hitlist); spin_lock_irqsave(&ps->lock, flags); list_for_each_safe(p, q, &ps->async_pending) if (ifnum == list_entry(p, struct async, asynclist)->ifnum) list_move_tail(p, &hitlist); spin_unlock_irqrestore(&ps->lock, flags); destroy_async(ps, &hitlist); } static void destroy_all_async(struct dev_state *ps) { destroy_async(ps, &ps->async_pending); } /* * interface claims are made only at the request of user level code, * which can also release them (explicitly or by closing files). * they're also undone when devices disconnect. */ static int driver_probe(struct usb_interface *intf, const struct usb_device_id *id) { return -ENODEV; } static void driver_disconnect(struct usb_interface *intf) { struct dev_state *ps = usb_get_intfdata(intf); unsigned int ifnum = intf->altsetting->desc.bInterfaceNumber; if (!ps) return; /* NOTE: this relies on usbcore having canceled and completed * all pending I/O requests; 2.6 does that. */ if (likely(ifnum < 8*sizeof(ps->ifclaimed))) clear_bit(ifnum, &ps->ifclaimed); else dev_warn(&intf->dev, "interface number %u out of range\n", ifnum); usb_set_intfdata(intf, NULL); /* force async requests to complete */ destroy_async_on_interface(ps, ifnum); } /* The following routines are merely placeholders. There is no way * to inform a user task about suspend or resumes. */ static int driver_suspend(struct usb_interface *intf, pm_message_t msg) { return 0; } static int driver_resume(struct usb_interface *intf) { return 0; } struct usb_driver usbfs_driver = { .name = "usbfs", .probe = driver_probe, .disconnect = driver_disconnect, .suspend = driver_suspend, .resume = driver_resume, }; static int claimintf(struct dev_state *ps, unsigned int ifnum) { struct usb_device *dev = ps->dev; struct usb_interface *intf; int err; if (ifnum >= 8*sizeof(ps->ifclaimed)) return -EINVAL; /* already claimed */ if (test_bit(ifnum, &ps->ifclaimed)) return 0; intf = usb_ifnum_to_if(dev, ifnum); if (!intf) err = -ENOENT; else err = usb_driver_claim_interface(&usbfs_driver, intf, ps); if (err == 0) set_bit(ifnum, &ps->ifclaimed); return err; } static int releaseintf(struct dev_state *ps, unsigned int ifnum) { struct usb_device *dev; struct usb_interface *intf; int err; err = -EINVAL; if (ifnum >= 8*sizeof(ps->ifclaimed)) return err; dev = ps->dev; intf = usb_ifnum_to_if(dev, ifnum); if (!intf) err = -ENOENT; else if (test_and_clear_bit(ifnum, &ps->ifclaimed)) { usb_driver_release_interface(&usbfs_driver, intf); err = 0; } return err; } static int checkintf(struct dev_state *ps, unsigned int ifnum) { if (ps->dev->state != USB_STATE_CONFIGURED) return -EHOSTUNREACH; if (ifnum >= 8*sizeof(ps->ifclaimed)) return -EINVAL; if (test_bit(ifnum, &ps->ifclaimed)) return 0; /* if not yet claimed, claim it for the driver */ dev_warn(&ps->dev->dev, "usbfs: process %d (%s) did not claim " "interface %u before use\n", task_pid_nr(current), current->comm, ifnum); return claimintf(ps, ifnum); } static int findintfep(struct usb_device *dev, unsigned int ep) { unsigned int i, j, e; struct usb_interface *intf; struct usb_host_interface *alts; struct usb_endpoint_descriptor *endpt; if (ep & ~(USB_DIR_IN|0xf)) return -EINVAL; if (!dev->actconfig) return -ESRCH; for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) { intf = dev->actconfig->interface[i]; for (j = 0; j < intf->num_altsetting; j++) { alts = &intf->altsetting[j]; for (e = 0; e < alts->desc.bNumEndpoints; e++) { endpt = &alts->endpoint[e].desc; if (endpt->bEndpointAddress == ep) return alts->desc.bInterfaceNumber; } } } return -ENOENT; } static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype, unsigned int index) { int ret = 0; if (ps->dev->state != USB_STATE_UNAUTHENTICATED && ps->dev->state != USB_STATE_ADDRESS && ps->dev->state != USB_STATE_CONFIGURED) return -EHOSTUNREACH; if (USB_TYPE_VENDOR == (USB_TYPE_MASK & requesttype)) return 0; index &= 0xff; switch (requesttype & USB_RECIP_MASK) { case USB_RECIP_ENDPOINT: ret = findintfep(ps->dev, index); if (ret >= 0) ret = checkintf(ps, ret); break; case USB_RECIP_INTERFACE: ret = checkintf(ps, index); break; } return ret; } static int match_devt(struct device *dev, void *data) { return dev->devt == (dev_t) (unsigned long) data; } static struct usb_device *usbdev_lookup_by_devt(dev_t devt) { struct device *dev; dev = bus_find_device(&usb_bus_type, NULL, (void *) (unsigned long) devt, match_devt); if (!dev) return NULL; return container_of(dev, struct usb_device, dev); } /* * file operations */ static int usbdev_open(struct inode *inode, struct file *file) { struct usb_device *dev = NULL; struct dev_state *ps; const struct cred *cred = current_cred(); int ret; lock_kernel(); /* Protect against simultaneous removal or release */ mutex_lock(&usbfs_mutex); ret = -ENOMEM; ps = kmalloc(sizeof(struct dev_state), GFP_KERNEL); if (!ps) goto out; ret = -ENODEV; /* usbdev device-node */ if (imajor(inode) == USB_DEVICE_MAJOR) dev = usbdev_lookup_by_devt(inode->i_rdev); #ifdef CONFIG_USB_DEVICEFS /* procfs file */ if (!dev) { dev = inode->i_private; if (dev && dev->usbfs_dentry && dev->usbfs_dentry->d_inode == inode) usb_get_dev(dev); else dev = NULL; } #endif if (!dev || dev->state == USB_STATE_NOTATTACHED) goto out; ret = usb_autoresume_device(dev); if (ret) goto out; ret = 0; ps->dev = dev; ps->file = file; spin_lock_init(&ps->lock); INIT_LIST_HEAD(&ps->list); INIT_LIST_HEAD(&ps->async_pending); INIT_LIST_HEAD(&ps->async_completed); init_waitqueue_head(&ps->wait); ps->discsignr = 0; ps->disc_pid = get_pid(task_pid(current)); ps->disc_uid = cred->uid; ps->disc_euid = cred->euid; ps->disccontext = NULL; ps->ifclaimed = 0; security_task_getsecid(current, &ps->secid); smp_wmb(); list_add_tail(&ps->list, &dev->filelist); file->private_data = ps; snoop(&dev->dev, "opened by process %d: %s\n", task_pid_nr(current), current->comm); out: if (ret) { kfree(ps); usb_put_dev(dev); } mutex_unlock(&usbfs_mutex); unlock_kernel(); return ret; } static int usbdev_release(struct inode *inode, struct file *file) { struct dev_state *ps = file->private_data; struct usb_device *dev = ps->dev; unsigned int ifnum; struct async *as; usb_lock_device(dev); usb_hub_release_all_ports(dev, ps); /* Protect against simultaneous open */ mutex_lock(&usbfs_mutex); list_del_init(&ps->list); mutex_unlock(&usbfs_mutex); for (ifnum = 0; ps->ifclaimed && ifnum < 8*sizeof(ps->ifclaimed); ifnum++) { if (test_bit(ifnum, &ps->ifclaimed)) releaseintf(ps, ifnum); } destroy_all_async(ps); usb_autosuspend_device(dev); usb_unlock_device(dev); usb_put_dev(dev); put_pid(ps->disc_pid); as = async_getcompleted(ps); while (as) { free_async(as); as = async_getcompleted(ps); } kfree(ps); return 0; } static int proc_control(struct dev_state *ps, void __user *arg) { struct usb_device *dev = ps->dev; struct usbdevfs_ctrltransfer ctrl; unsigned int tmo; unsigned char *tbuf; unsigned wLength; int i, pipe, ret; if (copy_from_user(&ctrl, arg, sizeof(ctrl))) return -EFAULT; ret = check_ctrlrecip(ps, ctrl.bRequestType, ctrl.wIndex); if (ret) return ret; wLength = ctrl.wLength; /* To suppress 64k PAGE_SIZE warning */ if (wLength > PAGE_SIZE) return -EINVAL; tbuf = (unsigned char *)__get_free_page(GFP_KERNEL); if (!tbuf) return -ENOMEM; tmo = ctrl.timeout; if (ctrl.bRequestType & 0x80) { if (ctrl.wLength && !access_ok(VERIFY_WRITE, ctrl.data, ctrl.wLength)) { free_page((unsigned long)tbuf); return -EINVAL; } pipe = usb_rcvctrlpipe(dev, 0); snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT); usb_unlock_device(dev); i = usb_control_msg(dev, pipe, ctrl.bRequest, ctrl.bRequestType, ctrl.wValue, ctrl.wIndex, tbuf, ctrl.wLength, tmo); usb_lock_device(dev); snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE); if ((i > 0) && ctrl.wLength) { if (copy_to_user(ctrl.data, tbuf, i)) { free_page((unsigned long)tbuf); return -EFAULT; } } } else { if (ctrl.wLength) { if (copy_from_user(tbuf, ctrl.data, ctrl.wLength)) { free_page((unsigned long)tbuf); return -EFAULT; } } pipe = usb_sndctrlpipe(dev, 0); snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT); usb_unlock_device(dev); i = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ctrl.bRequest, ctrl.bRequestType, ctrl.wValue, ctrl.wIndex, tbuf, ctrl.wLength, tmo); usb_lock_device(dev); snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE); } free_page((unsigned long)tbuf); if (i < 0 && i != -EPIPE) { dev_printk(KERN_DEBUG, &dev->dev, "usbfs: USBDEVFS_CONTROL " "failed cmd %s rqt %u rq %u len %u ret %d\n", current->comm, ctrl.bRequestType, ctrl.bRequest, ctrl.wLength, i); } return i; } static int proc_bulk(struct dev_state *ps, void __user *arg) { struct usb_device *dev = ps->dev; struct usbdevfs_bulktransfer bulk; unsigned int tmo, len1, pipe; int len2; unsigned char *tbuf; int i, ret; if (copy_from_user(&bulk, arg, sizeof(bulk))) return -EFAULT; ret = findintfep(ps->dev, bulk.ep); if (ret < 0) return ret; ret = checkintf(ps, ret); if (ret) return ret; if (bulk.ep & USB_DIR_IN) pipe = usb_rcvbulkpipe(dev, bulk.ep & 0x7f); else pipe = usb_sndbulkpipe(dev, bulk.ep & 0x7f); if (!usb_maxpacket(dev, pipe, !(bulk.ep & USB_DIR_IN))) return -EINVAL; len1 = bulk.len; if (len1 > MAX_USBFS_BUFFER_SIZE) return -EINVAL; if (!(tbuf = kmalloc(len1, GFP_KERNEL))) return -ENOMEM; tmo = bulk.timeout; if (bulk.ep & 0x80) { if (len1 && !access_ok(VERIFY_WRITE, bulk.data, len1)) { kfree(tbuf); return -EINVAL; } snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT); usb_unlock_device(dev); i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo); usb_lock_device(dev); snoop_urb(dev, NULL, pipe, len2, i, COMPLETE); if (!i && len2) { if (copy_to_user(bulk.data, tbuf, len2)) { kfree(tbuf); return -EFAULT; } } } else { if (len1) { if (copy_from_user(tbuf, bulk.data, len1)) { kfree(tbuf); return -EFAULT; } } snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT); usb_unlock_device(dev); i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo); usb_lock_device(dev); snoop_urb(dev, NULL, pipe, len2, i, COMPLETE); } kfree(tbuf); if (i < 0) return i; return len2; } static int proc_resetep(struct dev_state *ps, void __user *arg) { unsigned int ep; int ret; if (get_user(ep, (unsigned int __user *)arg)) return -EFAULT; ret = findintfep(ps->dev, ep); if (ret < 0) return ret; ret = checkintf(ps, ret); if (ret) return ret; usb_reset_endpoint(ps->dev, ep); return 0; } static int proc_clearhalt(struct dev_state *ps, void __user *arg) { unsigned int ep; int pipe; int ret; if (get_user(ep, (unsigned int __user *)arg)) return -EFAULT; ret = findintfep(ps->dev, ep); if (ret < 0) return ret; ret = checkintf(ps, ret); if (ret) return ret; if (ep & USB_DIR_IN) pipe = usb_rcvbulkpipe(ps->dev, ep & 0x7f); else pipe = usb_sndbulkpipe(ps->dev, ep & 0x7f); return usb_clear_halt(ps->dev, pipe); } static int proc_getdriver(struct dev_state *ps, void __user *arg) { struct usbdevfs_getdriver gd; struct usb_interface *intf; int ret; if (copy_from_user(&gd, arg, sizeof(gd))) return -EFAULT; intf = usb_ifnum_to_if(ps->dev, gd.interface); if (!intf || !intf->dev.driver) ret = -ENODATA; else { strncpy(gd.driver, intf->dev.driver->name, sizeof(gd.driver)); ret = (copy_to_user(arg, &gd, sizeof(gd)) ? -EFAULT : 0); } return ret; } static int proc_connectinfo(struct dev_state *ps, void __user *arg) { struct usbdevfs_connectinfo ci; ci.devnum = ps->dev->devnum; ci.slow = ps->dev->speed == USB_SPEED_LOW; if (copy_to_user(arg, &ci, sizeof(ci))) return -EFAULT; return 0; } static int proc_resetdevice(struct dev_state *ps) { return usb_reset_device(ps->dev); } static int proc_setintf(struct dev_state *ps, void __user *arg) { struct usbdevfs_setinterface setintf; int ret; if (copy_from_user(&setintf, arg, sizeof(setintf))) return -EFAULT; if ((ret = checkintf(ps, setintf.interface))) return ret; return usb_set_interface(ps->dev, setintf.interface, setintf.altsetting); } static int proc_setconfig(struct dev_state *ps, void __user *arg) { int u; int status = 0; struct usb_host_config *actconfig; if (get_user(u, (int __user *)arg)) return -EFAULT; actconfig = ps->dev->actconfig; /* Don't touch the device if any interfaces are claimed. * It could interfere with other drivers' operations, and if * an interface is claimed by usbfs it could easily deadlock. */ if (actconfig) { int i; for (i = 0; i < actconfig->desc.bNumInterfaces; ++i) { if (usb_interface_claimed(actconfig->interface[i])) { dev_warn(&ps->dev->dev, "usbfs: interface %d claimed by %s " "while '%s' sets config #%d\n", actconfig->interface[i] ->cur_altsetting ->desc.bInterfaceNumber, actconfig->interface[i] ->dev.driver->name, current->comm, u); status = -EBUSY; break; } } } /* SET_CONFIGURATION is often abused as a "cheap" driver reset, * so avoid usb_set_configuration()'s kick to sysfs */ if (status == 0) { if (actconfig && actconfig->desc.bConfigurationValue == u) status = usb_reset_configuration(ps->dev); else status = usb_set_configuration(ps->dev, u); } return status; } static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, struct usbdevfs_iso_packet_desc __user *iso_frame_desc, void __user *arg) { struct usbdevfs_iso_packet_desc *isopkt = NULL; struct usb_host_endpoint *ep; struct async *as; struct usb_ctrlrequest *dr = NULL; const struct cred *cred = current_cred(); unsigned int u, totlen, isofrmlen; int ret, ifnum = -1; int is_in; if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP | USBDEVFS_URB_SHORT_NOT_OK | USBDEVFS_URB_BULK_CONTINUATION | USBDEVFS_URB_NO_FSBR | USBDEVFS_URB_ZERO_PACKET | USBDEVFS_URB_NO_INTERRUPT)) return -EINVAL; if (uurb->buffer_length > 0 && !uurb->buffer) return -EINVAL; if (!(uurb->type == USBDEVFS_URB_TYPE_CONTROL && (uurb->endpoint & ~USB_ENDPOINT_DIR_MASK) == 0)) { ifnum = findintfep(ps->dev, uurb->endpoint); if (ifnum < 0) return ifnum; ret = checkintf(ps, ifnum); if (ret) return ret; } if ((uurb->endpoint & USB_ENDPOINT_DIR_MASK) != 0) { is_in = 1; ep = ps->dev->ep_in[uurb->endpoint & USB_ENDPOINT_NUMBER_MASK]; } else { is_in = 0; ep = ps->dev->ep_out[uurb->endpoint & USB_ENDPOINT_NUMBER_MASK]; } if (!ep) return -ENOENT; switch(uurb->type) { case USBDEVFS_URB_TYPE_CONTROL: if (!usb_endpoint_xfer_control(&ep->desc)) return -EINVAL; /* min 8 byte setup packet, * max 8 byte setup plus an arbitrary data stage */ if (uurb->buffer_length < 8 || uurb->buffer_length > (8 + MAX_USBFS_BUFFER_SIZE)) return -EINVAL; dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); if (!dr) return -ENOMEM; if (copy_from_user(dr, uurb->buffer, 8)) { kfree(dr); return -EFAULT; } if (uurb->buffer_length < (le16_to_cpup(&dr->wLength) + 8)) { kfree(dr); return -EINVAL; } ret = check_ctrlrecip(ps, dr->bRequestType, le16_to_cpup(&dr->wIndex)); if (ret) { kfree(dr); return ret; } uurb->number_of_packets = 0; uurb->buffer_length = le16_to_cpup(&dr->wLength); uurb->buffer += 8; if ((dr->bRequestType & USB_DIR_IN) && uurb->buffer_length) { is_in = 1; uurb->endpoint |= USB_DIR_IN; } else { is_in = 0; uurb->endpoint &= ~USB_DIR_IN; } break; case USBDEVFS_URB_TYPE_BULK: switch (usb_endpoint_type(&ep->desc)) { case USB_ENDPOINT_XFER_CONTROL: case USB_ENDPOINT_XFER_ISOC: return -EINVAL; /* allow single-shot interrupt transfers, at bogus rates */ } uurb->number_of_packets = 0; if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE) return -EINVAL; break; case USBDEVFS_URB_TYPE_ISO: /* arbitrary limit */ if (uurb->number_of_packets < 1 || uurb->number_of_packets > 128) return -EINVAL; if (!usb_endpoint_xfer_isoc(&ep->desc)) return -EINVAL; isofrmlen = sizeof(struct usbdevfs_iso_packet_desc) * uurb->number_of_packets; if (!(isopkt = kmalloc(isofrmlen, GFP_KERNEL))) return -ENOMEM; if (copy_from_user(isopkt, iso_frame_desc, isofrmlen)) { kfree(isopkt); return -EFAULT; } for (totlen = u = 0; u < uurb->number_of_packets; u++) { /* arbitrary limit, * sufficient for USB 2.0 high-bandwidth iso */ if (isopkt[u].length > 8192) { kfree(isopkt); return -EINVAL; } totlen += isopkt[u].length; } /* 3072 * 64 microframes */ if (totlen > 196608) { kfree(isopkt); return -EINVAL; } uurb->buffer_length = totlen; break; case USBDEVFS_URB_TYPE_INTERRUPT: uurb->number_of_packets = 0; if (!usb_endpoint_xfer_int(&ep->desc)) return -EINVAL; if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE) return -EINVAL; break; default: return -EINVAL; } if (uurb->buffer_length > 0 && !access_ok(is_in ? VERIFY_WRITE : VERIFY_READ, uurb->buffer, uurb->buffer_length)) { kfree(isopkt); kfree(dr); return -EFAULT; } as = alloc_async(uurb->number_of_packets); if (!as) { kfree(isopkt); kfree(dr); return -ENOMEM; } if (uurb->buffer_length > 0) { as->urb->transfer_buffer = kmalloc(uurb->buffer_length, GFP_KERNEL); if (!as->urb->transfer_buffer) { kfree(isopkt); kfree(dr); free_async(as); return -ENOMEM; } } as->urb->dev = ps->dev; as->urb->pipe = (uurb->type << 30) | __create_pipe(ps->dev, uurb->endpoint & 0xf) | (uurb->endpoint & USB_DIR_IN); /* This tedious sequence is necessary because the URB_* flags * are internal to the kernel and subject to change, whereas * the USBDEVFS_URB_* flags are a user API and must not be changed. */ u = (is_in ? URB_DIR_IN : URB_DIR_OUT); if (uurb->flags & USBDEVFS_URB_ISO_ASAP) u |= URB_ISO_ASAP; if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK) u |= URB_SHORT_NOT_OK; if (uurb->flags & USBDEVFS_URB_NO_FSBR) u |= URB_NO_FSBR; if (uurb->flags & USBDEVFS_URB_ZERO_PACKET) u |= URB_ZERO_PACKET; if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT) u |= URB_NO_INTERRUPT; as->urb->transfer_flags = u; as->urb->transfer_buffer_length = uurb->buffer_length; as->urb->setup_packet = (unsigned char *)dr; as->urb->start_frame = uurb->start_frame; as->urb->number_of_packets = uurb->number_of_packets; if (uurb->type == USBDEVFS_URB_TYPE_ISO || ps->dev->speed == USB_SPEED_HIGH) as->urb->interval = 1 << min(15, ep->desc.bInterval - 1); else as->urb->interval = ep->desc.bInterval; as->urb->context = as; as->urb->complete = async_completed; for (totlen = u = 0; u < uurb->number_of_packets; u++) { as->urb->iso_frame_desc[u].offset = totlen; as->urb->iso_frame_desc[u].length = isopkt[u].length; totlen += isopkt[u].length; } kfree(isopkt); as->ps = ps; as->userurb = arg; if (is_in && uurb->buffer_length > 0) as->userbuffer = uurb->buffer; else as->userbuffer = NULL; as->signr = uurb->signr; as->ifnum = ifnum; as->pid = get_pid(task_pid(current)); as->uid = cred->uid; as->euid = cred->euid; security_task_getsecid(current, &as->secid); if (!is_in && uurb->buffer_length > 0) { if (copy_from_user(as->urb->transfer_buffer, uurb->buffer, uurb->buffer_length)) { free_async(as); return -EFAULT; } } snoop_urb(ps->dev, as->userurb, as->urb->pipe, as->urb->transfer_buffer_length, 0, SUBMIT); async_newpending(as); if (usb_endpoint_xfer_bulk(&ep->desc)) { spin_lock_irq(&ps->lock); /* Not exactly the endpoint address; the direction bit is * shifted to the 0x10 position so that the value will be * between 0 and 31. */ as->bulk_addr = usb_endpoint_num(&ep->desc) | ((ep->desc.bEndpointAddress & USB_ENDPOINT_DIR_MASK) >> 3); /* If this bulk URB is the start of a new transfer, re-enable * the endpoint. Otherwise mark it as a continuation URB. */ if (uurb->flags & USBDEVFS_URB_BULK_CONTINUATION) as->bulk_status = AS_CONTINUATION; else ps->disabled_bulk_eps &= ~(1 << as->bulk_addr); /* Don't accept continuation URBs if the endpoint is * disabled because of an earlier error. */ if (ps->disabled_bulk_eps & (1 << as->bulk_addr)) ret = -EREMOTEIO; else ret = usb_submit_urb(as->urb, GFP_ATOMIC); spin_unlock_irq(&ps->lock); } else { ret = usb_submit_urb(as->urb, GFP_KERNEL); } if (ret) { dev_printk(KERN_DEBUG, &ps->dev->dev, "usbfs: usb_submit_urb returned %d\n", ret); snoop_urb(ps->dev, as->userurb, as->urb->pipe, 0, ret, COMPLETE); async_removepending(as); free_async(as); return ret; } return 0; } static int proc_submiturb(struct dev_state *ps, void __user *arg) { struct usbdevfs_urb uurb; if (copy_from_user(&uurb, arg, sizeof(uurb))) return -EFAULT; return proc_do_submiturb(ps, &uurb, (((struct usbdevfs_urb __user *)arg)->iso_frame_desc), arg); } static int proc_unlinkurb(struct dev_state *ps, void __user *arg) { struct async *as; as = async_getpending(ps, arg); if (!as) return -EINVAL; usb_kill_urb(as->urb); return 0; } static int processcompl(struct async *as, void __user * __user *arg) { struct urb *urb = as->urb; struct usbdevfs_urb __user *userurb = as->userurb; void __user *addr = as->userurb; unsigned int i; if (as->userbuffer && urb->actual_length) if (copy_to_user(as->userbuffer, urb->transfer_buffer, urb->actual_length)) goto err_out; if (put_user(as->status, &userurb->status)) goto err_out; if (put_user(urb->actual_length, &userurb->actual_length)) goto err_out; if (put_user(urb->error_count, &userurb->error_count)) goto err_out; if (usb_endpoint_xfer_isoc(&urb->ep->desc)) { for (i = 0; i < urb->number_of_packets; i++) { if (put_user(urb->iso_frame_desc[i].actual_length, &userurb->iso_frame_desc[i].actual_length)) goto err_out; if (put_user(urb->iso_frame_desc[i].status, &userurb->iso_frame_desc[i].status)) goto err_out; } } if (put_user(addr, (void __user * __user *)arg)) return -EFAULT; return 0; err_out: return -EFAULT; } static struct async *reap_as(struct dev_state *ps) { DECLARE_WAITQUEUE(wait, current); struct async *as = NULL; struct usb_device *dev = ps->dev; add_wait_queue(&ps->wait, &wait); for (;;) { __set_current_state(TASK_INTERRUPTIBLE); as = async_getcompleted(ps); if (as) break; if (signal_pending(current)) break; usb_unlock_device(dev); schedule(); usb_lock_device(dev); } remove_wait_queue(&ps->wait, &wait); set_current_state(TASK_RUNNING); return as; } static int proc_reapurb(struct dev_state *ps, void __user *arg) { struct async *as = reap_as(ps); if (as) { int retval = processcompl(as, (void __user * __user *)arg); free_async(as); return retval; } if (signal_pending(current)) return -EINTR; return -EIO; } static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg) { int retval; struct async *as; as = async_getcompleted(ps); retval = -EAGAIN; if (as) { retval = processcompl(as, (void __user * __user *)arg); free_async(as); } return retval; } #ifdef CONFIG_COMPAT static int get_urb32(struct usbdevfs_urb *kurb, struct usbdevfs_urb32 __user *uurb) { __u32 uptr; if (!access_ok(VERIFY_READ, uurb, sizeof(*uurb)) || __get_user(kurb->type, &uurb->type) || __get_user(kurb->endpoint, &uurb->endpoint) || __get_user(kurb->status, &uurb->status) || __get_user(kurb->flags, &uurb->flags) || __get_user(kurb->buffer_length, &uurb->buffer_length) || __get_user(kurb->actual_length, &uurb->actual_length) || __get_user(kurb->start_frame, &uurb->start_frame) || __get_user(kurb->number_of_packets, &uurb->number_of_packets) || __get_user(kurb->error_count, &uurb->error_count) || __get_user(kurb->signr, &uurb->signr)) return -EFAULT; if (__get_user(uptr, &uurb->buffer)) return -EFAULT; kurb->buffer = compat_ptr(uptr); if (__get_user(uptr, &uurb->usercontext)) return -EFAULT; kurb->usercontext = compat_ptr(uptr); return 0; } static int proc_submiturb_compat(struct dev_state *ps, void __user *arg) { struct usbdevfs_urb uurb; if (get_urb32(&uurb, (struct usbdevfs_urb32 __user *)arg)) return -EFAULT; return proc_do_submiturb(ps, &uurb, ((struct usbdevfs_urb32 __user *)arg)->iso_frame_desc, arg); } static int processcompl_compat(struct async *as, void __user * __user *arg) { struct urb *urb = as->urb; struct usbdevfs_urb32 __user *userurb = as->userurb; void __user *addr = as->userurb; unsigned int i; if (as->userbuffer && urb->actual_length) if (copy_to_user(as->userbuffer, urb->transfer_buffer, urb->actual_length)) return -EFAULT; if (put_user(as->status, &userurb->status)) return -EFAULT; if (put_user(urb->actual_length, &userurb->actual_length)) return -EFAULT; if (put_user(urb->error_count, &userurb->error_count)) return -EFAULT; if (usb_endpoint_xfer_isoc(&urb->ep->desc)) { for (i = 0; i < urb->number_of_packets; i++) { if (put_user(urb->iso_frame_desc[i].actual_length, &userurb->iso_frame_desc[i].actual_length)) return -EFAULT; if (put_user(urb->iso_frame_desc[i].status, &userurb->iso_frame_desc[i].status)) return -EFAULT; } } if (put_user(ptr_to_compat(addr), (u32 __user *)arg)) return -EFAULT; return 0; } static int proc_reapurb_compat(struct dev_state *ps, void __user *arg) { struct async *as = reap_as(ps); if (as) { int retval = processcompl_compat(as, (void __user * __user *)arg); free_async(as); return retval; } if (signal_pending(current)) return -EINTR; return -EIO; } static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg) { int retval; struct async *as; retval = -EAGAIN; as = async_getcompleted(ps); if (as) { retval = processcompl_compat(as, (void __user * __user *)arg); free_async(as); } return retval; } #endif static int proc_disconnectsignal(struct dev_state *ps, void __user *arg) { struct usbdevfs_disconnectsignal ds; if (copy_from_user(&ds, arg, sizeof(ds))) return -EFAULT; ps->discsignr = ds.signr; ps->disccontext = ds.context; return 0; } static int proc_claiminterface(struct dev_state *ps, void __user *arg) { unsigned int ifnum; if (get_user(ifnum, (unsigned int __user *)arg)) return -EFAULT; return claimintf(ps, ifnum); } static int proc_releaseinterface(struct dev_state *ps, void __user *arg) { unsigned int ifnum; int ret; if (get_user(ifnum, (unsigned int __user *)arg)) return -EFAULT; if ((ret = releaseintf(ps, ifnum)) < 0) return ret; destroy_async_on_interface (ps, ifnum); return 0; } static int proc_ioctl(struct dev_state *ps, struct usbdevfs_ioctl *ctl) { int size; void *buf = NULL; int retval = 0; struct usb_interface *intf = NULL; struct usb_driver *driver = NULL; /* alloc buffer */ if ((size = _IOC_SIZE(ctl->ioctl_code)) > 0) { if ((buf = kmalloc(size, GFP_KERNEL)) == NULL) return -ENOMEM; if ((_IOC_DIR(ctl->ioctl_code) & _IOC_WRITE)) { if (copy_from_user(buf, ctl->data, size)) { kfree(buf); return -EFAULT; } } else { memset(buf, 0, size); } } if (!connected(ps)) { kfree(buf); return -ENODEV; } if (ps->dev->state != USB_STATE_CONFIGURED) retval = -EHOSTUNREACH; else if (!(intf = usb_ifnum_to_if(ps->dev, ctl->ifno))) retval = -EINVAL; else switch (ctl->ioctl_code) { /* disconnect kernel driver from interface */ case USBDEVFS_DISCONNECT: if (intf->dev.driver) { driver = to_usb_driver(intf->dev.driver); dev_dbg(&intf->dev, "disconnect by usbfs\n"); usb_driver_release_interface(driver, intf); } else retval = -ENODATA; break; /* let kernel drivers try to (re)bind to the interface */ case USBDEVFS_CONNECT: if (!intf->dev.driver) retval = device_attach(&intf->dev); else retval = -EBUSY; break; /* talk directly to the interface's driver */ default: if (intf->dev.driver) driver = to_usb_driver(intf->dev.driver); if (driver == NULL || driver->ioctl == NULL) { retval = -ENOTTY; } else { retval = driver->ioctl(intf, ctl->ioctl_code, buf); if (retval == -ENOIOCTLCMD) retval = -ENOTTY; } } /* cleanup and return */ if (retval >= 0 && (_IOC_DIR(ctl->ioctl_code) & _IOC_READ) != 0 && size > 0 && copy_to_user(ctl->data, buf, size) != 0) retval = -EFAULT; kfree(buf); return retval; } static int proc_ioctl_default(struct dev_state *ps, void __user *arg) { struct usbdevfs_ioctl ctrl; if (copy_from_user(&ctrl, arg, sizeof(ctrl))) return -EFAULT; return proc_ioctl(ps, &ctrl); } #ifdef CONFIG_COMPAT static int proc_ioctl_compat(struct dev_state *ps, compat_uptr_t arg) { struct usbdevfs_ioctl32 __user *uioc; struct usbdevfs_ioctl ctrl; u32 udata; uioc = compat_ptr((long)arg); if (!access_ok(VERIFY_READ, uioc, sizeof(*uioc)) || __get_user(ctrl.ifno, &uioc->ifno) || __get_user(ctrl.ioctl_code, &uioc->ioctl_code) || __get_user(udata, &uioc->data)) return -EFAULT; ctrl.data = compat_ptr(udata); return proc_ioctl(ps, &ctrl); } #endif static int proc_claim_port(struct dev_state *ps, void __user *arg) { unsigned portnum; int rc; if (get_user(portnum, (unsigned __user *) arg)) return -EFAULT; rc = usb_hub_claim_port(ps->dev, portnum, ps); if (rc == 0) snoop(&ps->dev->dev, "port %d claimed by process %d: %s\n", portnum, task_pid_nr(current), current->comm); return rc; } static int proc_release_port(struct dev_state *ps, void __user *arg) { unsigned portnum; if (get_user(portnum, (unsigned __user *) arg)) return -EFAULT; return usb_hub_release_port(ps->dev, portnum, ps); } /* * NOTE: All requests here that have interface numbers as parameters * are assuming that somehow the configuration has been prevented from * changing. But there's no mechanism to ensure that... */ static int usbdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { struct dev_state *ps = file->private_data; struct usb_device *dev = ps->dev; void __user *p = (void __user *)arg; int ret = -ENOTTY; if (!(file->f_mode & FMODE_WRITE)) return -EPERM; usb_lock_device(dev); if (!connected(ps)) { usb_unlock_device(dev); return -ENODEV; } switch (cmd) { case USBDEVFS_CONTROL: snoop(&dev->dev, "%s: CONTROL\n", __func__); ret = proc_control(ps, p); if (ret >= 0) inode->i_mtime = CURRENT_TIME; break; case USBDEVFS_BULK: snoop(&dev->dev, "%s: BULK\n", __func__); ret = proc_bulk(ps, p); if (ret >= 0) inode->i_mtime = CURRENT_TIME; break; case USBDEVFS_RESETEP: snoop(&dev->dev, "%s: RESETEP\n", __func__); ret = proc_resetep(ps, p); if (ret >= 0) inode->i_mtime = CURRENT_TIME; break; case USBDEVFS_RESET: snoop(&dev->dev, "%s: RESET\n", __func__); ret = proc_resetdevice(ps); break; case USBDEVFS_CLEAR_HALT: snoop(&dev->dev, "%s: CLEAR_HALT\n", __func__); ret = proc_clearhalt(ps, p); if (ret >= 0) inode->i_mtime = CURRENT_TIME; break; case USBDEVFS_GETDRIVER: snoop(&dev->dev, "%s: GETDRIVER\n", __func__); ret = proc_getdriver(ps, p); break; case USBDEVFS_CONNECTINFO: snoop(&dev->dev, "%s: CONNECTINFO\n", __func__); ret = proc_connectinfo(ps, p); break; case USBDEVFS_SETINTERFACE: snoop(&dev->dev, "%s: SETINTERFACE\n", __func__); ret = proc_setintf(ps, p); break; case USBDEVFS_SETCONFIGURATION: snoop(&dev->dev, "%s: SETCONFIGURATION\n", __func__); ret = proc_setconfig(ps, p); break; case USBDEVFS_SUBMITURB: snoop(&dev->dev, "%s: SUBMITURB\n", __func__); ret = proc_submiturb(ps, p); if (ret >= 0) inode->i_mtime = CURRENT_TIME; break; #ifdef CONFIG_COMPAT case USBDEVFS_SUBMITURB32: snoop(&dev->dev, "%s: SUBMITURB32\n", __func__); ret = proc_submiturb_compat(ps, p); if (ret >= 0) inode->i_mtime = CURRENT_TIME; break; case USBDEVFS_REAPURB32: snoop(&dev->dev, "%s: REAPURB32\n", __func__); ret = proc_reapurb_compat(ps, p); break; case USBDEVFS_REAPURBNDELAY32: snoop(&dev->dev, "%s: REAPURBNDELAY32\n", __func__); ret = proc_reapurbnonblock_compat(ps, p); break; case USBDEVFS_IOCTL32: snoop(&dev->dev, "%s: IOCTL\n", __func__); ret = proc_ioctl_compat(ps, ptr_to_compat(p)); break; #endif case USBDEVFS_DISCARDURB: snoop(&dev->dev, "%s: DISCARDURB\n", __func__); ret = proc_unlinkurb(ps, p); break; case USBDEVFS_REAPURB: snoop(&dev->dev, "%s: REAPURB\n", __func__); ret = proc_reapurb(ps, p); break; case USBDEVFS_REAPURBNDELAY: snoop(&dev->dev, "%s: REAPURBNDELAY\n", __func__); ret = proc_reapurbnonblock(ps, p); break; case USBDEVFS_DISCSIGNAL: snoop(&dev->dev, "%s: DISCSIGNAL\n", __func__); ret = proc_disconnectsignal(ps, p); break; case USBDEVFS_CLAIMINTERFACE: snoop(&dev->dev, "%s: CLAIMINTERFACE\n", __func__); ret = proc_claiminterface(ps, p); break; case USBDEVFS_RELEASEINTERFACE: snoop(&dev->dev, "%s: RELEASEINTERFACE\n", __func__); ret = proc_releaseinterface(ps, p); break; case USBDEVFS_IOCTL: snoop(&dev->dev, "%s: IOCTL\n", __func__); ret = proc_ioctl_default(ps, p); break; case USBDEVFS_CLAIM_PORT: snoop(&dev->dev, "%s: CLAIM_PORT\n", __func__); ret = proc_claim_port(ps, p); break; case USBDEVFS_RELEASE_PORT: snoop(&dev->dev, "%s: RELEASE_PORT\n", __func__); ret = proc_release_port(ps, p); break; } usb_unlock_device(dev); if (ret >= 0) inode->i_atime = CURRENT_TIME; return ret; } /* No kernel lock - fine */ static unsigned int usbdev_poll(struct file *file, struct poll_table_struct *wait) { struct dev_state *ps = file->private_data; unsigned int mask = 0; poll_wait(file, &ps->wait, wait); if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed)) mask |= POLLOUT | POLLWRNORM; if (!connected(ps)) mask |= POLLERR | POLLHUP; return mask; } const struct file_operations usbdev_file_operations = { .owner = THIS_MODULE, .llseek = usbdev_lseek, .read = usbdev_read, .poll = usbdev_poll, .ioctl = usbdev_ioctl, .open = usbdev_open, .release = usbdev_release, }; static void usbdev_remove(struct usb_device *udev) { struct dev_state *ps; struct siginfo sinfo; while (!list_empty(&udev->filelist)) { ps = list_entry(udev->filelist.next, struct dev_state, list); destroy_all_async(ps); wake_up_all(&ps->wait); list_del_init(&ps->list); if (ps->discsignr) { sinfo.si_signo = ps->discsignr; sinfo.si_errno = EPIPE; sinfo.si_code = SI_ASYNCIO; sinfo.si_addr = ps->disccontext; kill_pid_info_as_uid(ps->discsignr, &sinfo, ps->disc_pid, ps->disc_uid, ps->disc_euid, ps->secid); } } } #ifdef CONFIG_USB_DEVICE_CLASS static struct class *usb_classdev_class; static int usb_classdev_add(struct usb_device *dev) { struct device *cldev; cldev = device_create(usb_classdev_class, &dev->dev, dev->dev.devt, NULL, "usbdev%d.%d", dev->bus->busnum, dev->devnum); if (IS_ERR(cldev)) return PTR_ERR(cldev); dev->usb_classdev = cldev; return 0; } static void usb_classdev_remove(struct usb_device *dev) { if (dev->usb_classdev) device_unregister(dev->usb_classdev); } #else #define usb_classdev_add(dev) 0 #define usb_classdev_remove(dev) do {} while (0) #endif static int usbdev_notify(struct notifier_block *self, unsigned long action, void *dev) { switch (action) { case USB_DEVICE_ADD: if (usb_classdev_add(dev)) return NOTIFY_BAD; break; case USB_DEVICE_REMOVE: usb_classdev_remove(dev); usbdev_remove(dev); break; } return NOTIFY_OK; } static struct notifier_block usbdev_nb = { .notifier_call = usbdev_notify, }; static struct cdev usb_device_cdev; int __init usb_devio_init(void) { int retval; retval = register_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX, "usb_device"); if (retval) { printk(KERN_ERR "Unable to register minors for usb_device\n"); goto out; } cdev_init(&usb_device_cdev, &usbdev_file_operations); retval = cdev_add(&usb_device_cdev, USB_DEVICE_DEV, USB_DEVICE_MAX); if (retval) { printk(KERN_ERR "Unable to get usb_device major %d\n", USB_DEVICE_MAJOR); goto error_cdev; } #ifdef CONFIG_USB_DEVICE_CLASS usb_classdev_class = class_create(THIS_MODULE, "usb_device"); if (IS_ERR(usb_classdev_class)) { printk(KERN_ERR "Unable to register usb_device class\n"); retval = PTR_ERR(usb_classdev_class); cdev_del(&usb_device_cdev); usb_classdev_class = NULL; goto out; } /* devices of this class shadow the major:minor of their parent * device, so clear ->dev_kobj to prevent adding duplicate entries * to /sys/dev */ usb_classdev_class->dev_kobj = NULL; #endif usb_register_notify(&usbdev_nb); out: return retval; error_cdev: unregister_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX); goto out; } void usb_devio_cleanup(void) { usb_unregister_notify(&usbdev_nb); #ifdef CONFIG_USB_DEVICE_CLASS class_destroy(usb_classdev_class); #endif cdev_del(&usb_device_cdev); unregister_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX); }
gpl-2.0
bluechiptechnology/linux-bctrx3
drivers/net/ppp/ppp_generic.c
536
71994
/* * Generic PPP layer for Linux. * * Copyright 1999-2002 Paul Mackerras. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * The generic PPP layer handles the PPP network interfaces, the * /dev/ppp device, packet and VJ compression, and multilink. * It talks to PPP `channels' via the interface defined in * include/linux/ppp_channel.h. Channels provide the basic means for * sending and receiving PPP frames on some kind of communications * channel. * * Part of the code in this driver was inspired by the old async-only * PPP driver, written by Michael Callahan and Al Longyear, and * subsequently hacked by Paul Mackerras. * * ==FILEVERSION 20041108== */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/init.h> #include <linux/list.h> #include <linux/idr.h> #include <linux/netdevice.h> #include <linux/poll.h> #include <linux/ppp_defs.h> #include <linux/filter.h> #include <linux/ppp-ioctl.h> #include <linux/ppp_channel.h> #include <linux/ppp-comp.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/if_arp.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/spinlock.h> #include <linux/rwsem.h> #include <linux/stddef.h> #include <linux/device.h> #include <linux/mutex.h> #include <linux/slab.h> #include <asm/unaligned.h> #include <net/slhc_vj.h> #include <linux/atomic.h> #include <linux/nsproxy.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #define PPP_VERSION "2.4.2" /* * Network protocols we support. */ #define NP_IP 0 /* Internet Protocol V4 */ #define NP_IPV6 1 /* Internet Protocol V6 */ #define NP_IPX 2 /* IPX protocol */ #define NP_AT 3 /* Appletalk protocol */ #define NP_MPLS_UC 4 /* MPLS unicast */ #define NP_MPLS_MC 5 /* MPLS multicast */ #define NUM_NP 6 /* Number of NPs. */ #define MPHDRLEN 6 /* multilink protocol header length */ #define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */ /* * An instance of /dev/ppp can be associated with either a ppp * interface unit or a ppp channel. In both cases, file->private_data * points to one of these. */ struct ppp_file { enum { INTERFACE=1, CHANNEL } kind; struct sk_buff_head xq; /* pppd transmit queue */ struct sk_buff_head rq; /* receive queue for pppd */ wait_queue_head_t rwait; /* for poll on reading /dev/ppp */ atomic_t refcnt; /* # refs (incl /dev/ppp attached) */ int hdrlen; /* space to leave for headers */ int index; /* interface unit / channel number */ int dead; /* unit/channel has been shut down */ }; #define PF_TO_X(pf, X) container_of(pf, X, file) #define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp) #define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel) /* * Data structure to hold primary network stats for which * we want to use 64 bit storage. Other network stats * are stored in dev->stats of the ppp strucute. */ struct ppp_link_stats { u64 rx_packets; u64 tx_packets; u64 rx_bytes; u64 tx_bytes; }; /* * Data structure describing one ppp unit. * A ppp unit corresponds to a ppp network interface device * and represents a multilink bundle. * It can have 0 or more ppp channels connected to it. */ struct ppp { struct ppp_file file; /* stuff for read/write/poll 0 */ struct file *owner; /* file that owns this unit 48 */ struct list_head channels; /* list of attached channels 4c */ int n_channels; /* how many channels are attached 54 */ spinlock_t rlock; /* lock for receive side 58 */ spinlock_t wlock; /* lock for transmit side 5c */ int mru; /* max receive unit 60 */ unsigned int flags; /* control bits 64 */ unsigned int xstate; /* transmit state bits 68 */ unsigned int rstate; /* receive state bits 6c */ int debug; /* debug flags 70 */ struct slcompress *vj; /* state for VJ header compression */ enum NPmode npmode[NUM_NP]; /* what to do with each net proto 78 */ struct sk_buff *xmit_pending; /* a packet ready to go out 88 */ struct compressor *xcomp; /* transmit packet compressor 8c */ void *xc_state; /* its internal state 90 */ struct compressor *rcomp; /* receive decompressor 94 */ void *rc_state; /* its internal state 98 */ unsigned long last_xmit; /* jiffies when last pkt sent 9c */ unsigned long last_recv; /* jiffies when last pkt rcvd a0 */ struct net_device *dev; /* network interface device a4 */ int closing; /* is device closing down? a8 */ #ifdef CONFIG_PPP_MULTILINK int nxchan; /* next channel to send something on */ u32 nxseq; /* next sequence number to send */ int mrru; /* MP: max reconst. receive unit */ u32 nextseq; /* MP: seq no of next packet */ u32 minseq; /* MP: min of most recent seqnos */ struct sk_buff_head mrq; /* MP: receive reconstruction queue */ #endif /* CONFIG_PPP_MULTILINK */ #ifdef CONFIG_PPP_FILTER struct sock_filter *pass_filter; /* filter for packets to pass */ struct sock_filter *active_filter;/* filter for pkts to reset idle */ unsigned pass_len, active_len; #endif /* CONFIG_PPP_FILTER */ struct net *ppp_net; /* the net we belong to */ struct ppp_link_stats stats64; /* 64 bit network stats */ }; /* * Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC, * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP, * SC_MUST_COMP * Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR. * Bits in xstate: SC_COMP_RUN */ #define SC_FLAG_BITS (SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \ |SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \ |SC_COMP_TCP|SC_REJ_COMP_TCP|SC_MUST_COMP) /* * Private data structure for each channel. * This includes the data structure used for multilink. */ struct channel { struct ppp_file file; /* stuff for read/write/poll */ struct list_head list; /* link in all/new_channels list */ struct ppp_channel *chan; /* public channel data structure */ struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */ spinlock_t downl; /* protects `chan', file.xq dequeue */ struct ppp *ppp; /* ppp unit we're connected to */ struct net *chan_net; /* the net channel belongs to */ struct list_head clist; /* link in list of channels per unit */ rwlock_t upl; /* protects `ppp' */ #ifdef CONFIG_PPP_MULTILINK u8 avail; /* flag used in multilink stuff */ u8 had_frag; /* >= 1 fragments have been sent */ u32 lastseq; /* MP: last sequence # received */ int speed; /* speed of the corresponding ppp channel*/ #endif /* CONFIG_PPP_MULTILINK */ }; /* * SMP locking issues: * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels * list and the ppp.n_channels field, you need to take both locks * before you modify them. * The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock -> * channel.downl. */ static DEFINE_MUTEX(ppp_mutex); static atomic_t ppp_unit_count = ATOMIC_INIT(0); static atomic_t channel_count = ATOMIC_INIT(0); /* per-net private data for this module */ static int ppp_net_id __read_mostly; struct ppp_net { /* units to ppp mapping */ struct idr units_idr; /* * all_ppp_mutex protects the units_idr mapping. * It also ensures that finding a ppp unit in the units_idr * map and updating its file.refcnt field is atomic. */ struct mutex all_ppp_mutex; /* channels */ struct list_head all_channels; struct list_head new_channels; int last_channel_index; /* * all_channels_lock protects all_channels and * last_channel_index, and the atomicity of find * a channel and updating its file.refcnt field. */ spinlock_t all_channels_lock; }; /* Get the PPP protocol number from a skb */ #define PPP_PROTO(skb) get_unaligned_be16((skb)->data) /* We limit the length of ppp->file.rq to this (arbitrary) value */ #define PPP_MAX_RQLEN 32 /* * Maximum number of multilink fragments queued up. * This has to be large enough to cope with the maximum latency of * the slowest channel relative to the others. Strictly it should * depend on the number of channels and their characteristics. */ #define PPP_MP_MAX_QLEN 128 /* Multilink header bits. */ #define B 0x80 /* this fragment begins a packet */ #define E 0x40 /* this fragment ends a packet */ /* Compare multilink sequence numbers (assumed to be 32 bits wide) */ #define seq_before(a, b) ((s32)((a) - (b)) < 0) #define seq_after(a, b) ((s32)((a) - (b)) > 0) /* Prototypes. */ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, struct file *file, unsigned int cmd, unsigned long arg); static void ppp_xmit_process(struct ppp *ppp); static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); static void ppp_push(struct ppp *ppp); static void ppp_channel_push(struct channel *pch); static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch); static void ppp_receive_error(struct ppp *ppp); static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb); static struct sk_buff *ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb); #ifdef CONFIG_PPP_MULTILINK static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch); static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb); static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp); static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb); #endif /* CONFIG_PPP_MULTILINK */ static int ppp_set_compress(struct ppp *ppp, unsigned long arg); static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound); static void ppp_ccp_closed(struct ppp *ppp); static struct compressor *find_compressor(int type); static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st); static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp); static void init_ppp_file(struct ppp_file *pf, int kind); static void ppp_shutdown_interface(struct ppp *ppp); static void ppp_destroy_interface(struct ppp *ppp); static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit); static struct channel *ppp_find_channel(struct ppp_net *pn, int unit); static int ppp_connect_channel(struct channel *pch, int unit); static int ppp_disconnect_channel(struct channel *pch); static void ppp_destroy_channel(struct channel *pch); static int unit_get(struct idr *p, void *ptr); static int unit_set(struct idr *p, void *ptr, int n); static void unit_put(struct idr *p, int n); static void *unit_find(struct idr *p, int n); static struct class *ppp_class; /* per net-namespace data */ static inline struct ppp_net *ppp_pernet(struct net *net) { BUG_ON(!net); return net_generic(net, ppp_net_id); } /* Translates a PPP protocol number to a NP index (NP == network protocol) */ static inline int proto_to_npindex(int proto) { switch (proto) { case PPP_IP: return NP_IP; case PPP_IPV6: return NP_IPV6; case PPP_IPX: return NP_IPX; case PPP_AT: return NP_AT; case PPP_MPLS_UC: return NP_MPLS_UC; case PPP_MPLS_MC: return NP_MPLS_MC; } return -EINVAL; } /* Translates an NP index into a PPP protocol number */ static const int npindex_to_proto[NUM_NP] = { PPP_IP, PPP_IPV6, PPP_IPX, PPP_AT, PPP_MPLS_UC, PPP_MPLS_MC, }; /* Translates an ethertype into an NP index */ static inline int ethertype_to_npindex(int ethertype) { switch (ethertype) { case ETH_P_IP: return NP_IP; case ETH_P_IPV6: return NP_IPV6; case ETH_P_IPX: return NP_IPX; case ETH_P_PPPTALK: case ETH_P_ATALK: return NP_AT; case ETH_P_MPLS_UC: return NP_MPLS_UC; case ETH_P_MPLS_MC: return NP_MPLS_MC; } return -1; } /* Translates an NP index into an ethertype */ static const int npindex_to_ethertype[NUM_NP] = { ETH_P_IP, ETH_P_IPV6, ETH_P_IPX, ETH_P_PPPTALK, ETH_P_MPLS_UC, ETH_P_MPLS_MC, }; /* * Locking shorthand. */ #define ppp_xmit_lock(ppp) spin_lock_bh(&(ppp)->wlock) #define ppp_xmit_unlock(ppp) spin_unlock_bh(&(ppp)->wlock) #define ppp_recv_lock(ppp) spin_lock_bh(&(ppp)->rlock) #define ppp_recv_unlock(ppp) spin_unlock_bh(&(ppp)->rlock) #define ppp_lock(ppp) do { ppp_xmit_lock(ppp); \ ppp_recv_lock(ppp); } while (0) #define ppp_unlock(ppp) do { ppp_recv_unlock(ppp); \ ppp_xmit_unlock(ppp); } while (0) /* * /dev/ppp device routines. * The /dev/ppp device is used by pppd to control the ppp unit. * It supports the read, write, ioctl and poll functions. * Open instances of /dev/ppp can be in one of three states: * unattached, attached to a ppp unit, or attached to a ppp channel. */ static int ppp_open(struct inode *inode, struct file *file) { /* * This could (should?) be enforced by the permissions on /dev/ppp. */ if (!capable(CAP_NET_ADMIN)) return -EPERM; return 0; } static int ppp_release(struct inode *unused, struct file *file) { struct ppp_file *pf = file->private_data; struct ppp *ppp; if (pf) { file->private_data = NULL; if (pf->kind == INTERFACE) { ppp = PF_TO_PPP(pf); if (file == ppp->owner) ppp_shutdown_interface(ppp); } if (atomic_dec_and_test(&pf->refcnt)) { switch (pf->kind) { case INTERFACE: ppp_destroy_interface(PF_TO_PPP(pf)); break; case CHANNEL: ppp_destroy_channel(PF_TO_CHANNEL(pf)); break; } } } return 0; } static ssize_t ppp_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct ppp_file *pf = file->private_data; DECLARE_WAITQUEUE(wait, current); ssize_t ret; struct sk_buff *skb = NULL; struct iovec iov; ret = count; if (!pf) return -ENXIO; add_wait_queue(&pf->rwait, &wait); for (;;) { set_current_state(TASK_INTERRUPTIBLE); skb = skb_dequeue(&pf->rq); if (skb) break; ret = 0; if (pf->dead) break; if (pf->kind == INTERFACE) { /* * Return 0 (EOF) on an interface that has no * channels connected, unless it is looping * network traffic (demand mode). */ struct ppp *ppp = PF_TO_PPP(pf); if (ppp->n_channels == 0 && (ppp->flags & SC_LOOP_TRAFFIC) == 0) break; } ret = -EAGAIN; if (file->f_flags & O_NONBLOCK) break; ret = -ERESTARTSYS; if (signal_pending(current)) break; schedule(); } set_current_state(TASK_RUNNING); remove_wait_queue(&pf->rwait, &wait); if (!skb) goto out; ret = -EOVERFLOW; if (skb->len > count) goto outf; ret = -EFAULT; iov.iov_base = buf; iov.iov_len = count; if (skb_copy_datagram_iovec(skb, 0, &iov, skb->len)) goto outf; ret = skb->len; outf: kfree_skb(skb); out: return ret; } static ssize_t ppp_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct ppp_file *pf = file->private_data; struct sk_buff *skb; ssize_t ret; if (!pf) return -ENXIO; ret = -ENOMEM; skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL); if (!skb) goto out; skb_reserve(skb, pf->hdrlen); ret = -EFAULT; if (copy_from_user(skb_put(skb, count), buf, count)) { kfree_skb(skb); goto out; } skb_queue_tail(&pf->xq, skb); switch (pf->kind) { case INTERFACE: ppp_xmit_process(PF_TO_PPP(pf)); break; case CHANNEL: ppp_channel_push(PF_TO_CHANNEL(pf)); break; } ret = count; out: return ret; } /* No kernel lock - fine */ static unsigned int ppp_poll(struct file *file, poll_table *wait) { struct ppp_file *pf = file->private_data; unsigned int mask; if (!pf) return 0; poll_wait(file, &pf->rwait, wait); mask = POLLOUT | POLLWRNORM; if (skb_peek(&pf->rq)) mask |= POLLIN | POLLRDNORM; if (pf->dead) mask |= POLLHUP; else if (pf->kind == INTERFACE) { /* see comment in ppp_read */ struct ppp *ppp = PF_TO_PPP(pf); if (ppp->n_channels == 0 && (ppp->flags & SC_LOOP_TRAFFIC) == 0) mask |= POLLIN | POLLRDNORM; } return mask; } #ifdef CONFIG_PPP_FILTER static int get_filter(void __user *arg, struct sock_filter **p) { struct sock_fprog uprog; struct sock_filter *code = NULL; int len, err; if (copy_from_user(&uprog, arg, sizeof(uprog))) return -EFAULT; if (!uprog.len) { *p = NULL; return 0; } len = uprog.len * sizeof(struct sock_filter); code = memdup_user(uprog.filter, len); if (IS_ERR(code)) return PTR_ERR(code); err = sk_chk_filter(code, uprog.len); if (err) { kfree(code); return err; } *p = code; return uprog.len; } #endif /* CONFIG_PPP_FILTER */ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct ppp_file *pf = file->private_data; struct ppp *ppp; int err = -EFAULT, val, val2, i; struct ppp_idle idle; struct npioctl npi; int unit, cflags; struct slcompress *vj; void __user *argp = (void __user *)arg; int __user *p = argp; if (!pf) return ppp_unattached_ioctl(current->nsproxy->net_ns, pf, file, cmd, arg); if (cmd == PPPIOCDETACH) { /* * We have to be careful here... if the file descriptor * has been dup'd, we could have another process in the * middle of a poll using the same file *, so we had * better not free the interface data structures - * instead we fail the ioctl. Even in this case, we * shut down the interface if we are the owner of it. * Actually, we should get rid of PPPIOCDETACH, userland * (i.e. pppd) could achieve the same effect by closing * this fd and reopening /dev/ppp. */ err = -EINVAL; mutex_lock(&ppp_mutex); if (pf->kind == INTERFACE) { ppp = PF_TO_PPP(pf); if (file == ppp->owner) ppp_shutdown_interface(ppp); } if (atomic_long_read(&file->f_count) < 2) { ppp_release(NULL, file); err = 0; } else pr_warn("PPPIOCDETACH file->f_count=%ld\n", atomic_long_read(&file->f_count)); mutex_unlock(&ppp_mutex); return err; } if (pf->kind == CHANNEL) { struct channel *pch; struct ppp_channel *chan; mutex_lock(&ppp_mutex); pch = PF_TO_CHANNEL(pf); switch (cmd) { case PPPIOCCONNECT: if (get_user(unit, p)) break; err = ppp_connect_channel(pch, unit); break; case PPPIOCDISCONN: err = ppp_disconnect_channel(pch); break; default: down_read(&pch->chan_sem); chan = pch->chan; err = -ENOTTY; if (chan && chan->ops->ioctl) err = chan->ops->ioctl(chan, cmd, arg); up_read(&pch->chan_sem); } mutex_unlock(&ppp_mutex); return err; } if (pf->kind != INTERFACE) { /* can't happen */ pr_err("PPP: not interface or channel??\n"); return -EINVAL; } mutex_lock(&ppp_mutex); ppp = PF_TO_PPP(pf); switch (cmd) { case PPPIOCSMRU: if (get_user(val, p)) break; ppp->mru = val; err = 0; break; case PPPIOCSFLAGS: if (get_user(val, p)) break; ppp_lock(ppp); cflags = ppp->flags & ~val; ppp->flags = val & SC_FLAG_BITS; ppp_unlock(ppp); if (cflags & SC_CCP_OPEN) ppp_ccp_closed(ppp); err = 0; break; case PPPIOCGFLAGS: val = ppp->flags | ppp->xstate | ppp->rstate; if (put_user(val, p)) break; err = 0; break; case PPPIOCSCOMPRESS: err = ppp_set_compress(ppp, arg); break; case PPPIOCGUNIT: if (put_user(ppp->file.index, p)) break; err = 0; break; case PPPIOCSDEBUG: if (get_user(val, p)) break; ppp->debug = val; err = 0; break; case PPPIOCGDEBUG: if (put_user(ppp->debug, p)) break; err = 0; break; case PPPIOCGIDLE: idle.xmit_idle = (jiffies - ppp->last_xmit) / HZ; idle.recv_idle = (jiffies - ppp->last_recv) / HZ; if (copy_to_user(argp, &idle, sizeof(idle))) break; err = 0; break; case PPPIOCSMAXCID: if (get_user(val, p)) break; val2 = 15; if ((val >> 16) != 0) { val2 = val >> 16; val &= 0xffff; } vj = slhc_init(val2+1, val+1); if (!vj) { netdev_err(ppp->dev, "PPP: no memory (VJ compressor)\n"); err = -ENOMEM; break; } ppp_lock(ppp); if (ppp->vj) slhc_free(ppp->vj); ppp->vj = vj; ppp_unlock(ppp); err = 0; break; case PPPIOCGNPMODE: case PPPIOCSNPMODE: if (copy_from_user(&npi, argp, sizeof(npi))) break; err = proto_to_npindex(npi.protocol); if (err < 0) break; i = err; if (cmd == PPPIOCGNPMODE) { err = -EFAULT; npi.mode = ppp->npmode[i]; if (copy_to_user(argp, &npi, sizeof(npi))) break; } else { ppp->npmode[i] = npi.mode; /* we may be able to transmit more packets now (??) */ netif_wake_queue(ppp->dev); } err = 0; break; #ifdef CONFIG_PPP_FILTER case PPPIOCSPASS: { struct sock_filter *code; err = get_filter(argp, &code); if (err >= 0) { ppp_lock(ppp); kfree(ppp->pass_filter); ppp->pass_filter = code; ppp->pass_len = err; ppp_unlock(ppp); err = 0; } break; } case PPPIOCSACTIVE: { struct sock_filter *code; err = get_filter(argp, &code); if (err >= 0) { ppp_lock(ppp); kfree(ppp->active_filter); ppp->active_filter = code; ppp->active_len = err; ppp_unlock(ppp); err = 0; } break; } #endif /* CONFIG_PPP_FILTER */ #ifdef CONFIG_PPP_MULTILINK case PPPIOCSMRRU: if (get_user(val, p)) break; ppp_recv_lock(ppp); ppp->mrru = val; ppp_recv_unlock(ppp); err = 0; break; #endif /* CONFIG_PPP_MULTILINK */ default: err = -ENOTTY; } mutex_unlock(&ppp_mutex); return err; } static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, struct file *file, unsigned int cmd, unsigned long arg) { int unit, err = -EFAULT; struct ppp *ppp; struct channel *chan; struct ppp_net *pn; int __user *p = (int __user *)arg; mutex_lock(&ppp_mutex); switch (cmd) { case PPPIOCNEWUNIT: /* Create a new ppp unit */ if (get_user(unit, p)) break; ppp = ppp_create_interface(net, unit, &err); if (!ppp) break; file->private_data = &ppp->file; ppp->owner = file; err = -EFAULT; if (put_user(ppp->file.index, p)) break; err = 0; break; case PPPIOCATTACH: /* Attach to an existing ppp unit */ if (get_user(unit, p)) break; err = -ENXIO; pn = ppp_pernet(net); mutex_lock(&pn->all_ppp_mutex); ppp = ppp_find_unit(pn, unit); if (ppp) { atomic_inc(&ppp->file.refcnt); file->private_data = &ppp->file; err = 0; } mutex_unlock(&pn->all_ppp_mutex); break; case PPPIOCATTCHAN: if (get_user(unit, p)) break; err = -ENXIO; pn = ppp_pernet(net); spin_lock_bh(&pn->all_channels_lock); chan = ppp_find_channel(pn, unit); if (chan) { atomic_inc(&chan->file.refcnt); file->private_data = &chan->file; err = 0; } spin_unlock_bh(&pn->all_channels_lock); break; default: err = -ENOTTY; } mutex_unlock(&ppp_mutex); return err; } static const struct file_operations ppp_device_fops = { .owner = THIS_MODULE, .read = ppp_read, .write = ppp_write, .poll = ppp_poll, .unlocked_ioctl = ppp_ioctl, .open = ppp_open, .release = ppp_release, .llseek = noop_llseek, }; static __net_init int ppp_init_net(struct net *net) { struct ppp_net *pn = net_generic(net, ppp_net_id); idr_init(&pn->units_idr); mutex_init(&pn->all_ppp_mutex); INIT_LIST_HEAD(&pn->all_channels); INIT_LIST_HEAD(&pn->new_channels); spin_lock_init(&pn->all_channels_lock); return 0; } static __net_exit void ppp_exit_net(struct net *net) { struct ppp_net *pn = net_generic(net, ppp_net_id); idr_destroy(&pn->units_idr); } static struct pernet_operations ppp_net_ops = { .init = ppp_init_net, .exit = ppp_exit_net, .id = &ppp_net_id, .size = sizeof(struct ppp_net), }; #define PPP_MAJOR 108 /* Called at boot time if ppp is compiled into the kernel, or at module load time (from init_module) if compiled as a module. */ static int __init ppp_init(void) { int err; pr_info("PPP generic driver version " PPP_VERSION "\n"); err = register_pernet_device(&ppp_net_ops); if (err) { pr_err("failed to register PPP pernet device (%d)\n", err); goto out; } err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); if (err) { pr_err("failed to register PPP device (%d)\n", err); goto out_net; } ppp_class = class_create(THIS_MODULE, "ppp"); if (IS_ERR(ppp_class)) { err = PTR_ERR(ppp_class); goto out_chrdev; } /* not a big deal if we fail here :-) */ device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp"); return 0; out_chrdev: unregister_chrdev(PPP_MAJOR, "ppp"); out_net: unregister_pernet_device(&ppp_net_ops); out: return err; } /* * Network interface unit routines. */ static netdev_tx_t ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ppp *ppp = netdev_priv(dev); int npi, proto; unsigned char *pp; npi = ethertype_to_npindex(ntohs(skb->protocol)); if (npi < 0) goto outf; /* Drop, accept or reject the packet */ switch (ppp->npmode[npi]) { case NPMODE_PASS: break; case NPMODE_QUEUE: /* it would be nice to have a way to tell the network system to queue this one up for later. */ goto outf; case NPMODE_DROP: case NPMODE_ERROR: goto outf; } /* Put the 2-byte PPP protocol number on the front, making sure there is room for the address and control fields. */ if (skb_cow_head(skb, PPP_HDRLEN)) goto outf; pp = skb_push(skb, 2); proto = npindex_to_proto[npi]; put_unaligned_be16(proto, pp); skb_queue_tail(&ppp->file.xq, skb); ppp_xmit_process(ppp); return NETDEV_TX_OK; outf: kfree_skb(skb); ++dev->stats.tx_dropped; return NETDEV_TX_OK; } static int ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct ppp *ppp = netdev_priv(dev); int err = -EFAULT; void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data; struct ppp_stats stats; struct ppp_comp_stats cstats; char *vers; switch (cmd) { case SIOCGPPPSTATS: ppp_get_stats(ppp, &stats); if (copy_to_user(addr, &stats, sizeof(stats))) break; err = 0; break; case SIOCGPPPCSTATS: memset(&cstats, 0, sizeof(cstats)); if (ppp->xc_state) ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c); if (ppp->rc_state) ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d); if (copy_to_user(addr, &cstats, sizeof(cstats))) break; err = 0; break; case SIOCGPPPVER: vers = PPP_VERSION; if (copy_to_user(addr, vers, strlen(vers) + 1)) break; err = 0; break; default: err = -EINVAL; } return err; } static struct rtnl_link_stats64* ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) { struct ppp *ppp = netdev_priv(dev); ppp_recv_lock(ppp); stats64->rx_packets = ppp->stats64.rx_packets; stats64->rx_bytes = ppp->stats64.rx_bytes; ppp_recv_unlock(ppp); ppp_xmit_lock(ppp); stats64->tx_packets = ppp->stats64.tx_packets; stats64->tx_bytes = ppp->stats64.tx_bytes; ppp_xmit_unlock(ppp); stats64->rx_errors = dev->stats.rx_errors; stats64->tx_errors = dev->stats.tx_errors; stats64->rx_dropped = dev->stats.rx_dropped; stats64->tx_dropped = dev->stats.tx_dropped; stats64->rx_length_errors = dev->stats.rx_length_errors; return stats64; } static struct lock_class_key ppp_tx_busylock; static int ppp_dev_init(struct net_device *dev) { dev->qdisc_tx_busylock = &ppp_tx_busylock; return 0; } static const struct net_device_ops ppp_netdev_ops = { .ndo_init = ppp_dev_init, .ndo_start_xmit = ppp_start_xmit, .ndo_do_ioctl = ppp_net_ioctl, .ndo_get_stats64 = ppp_get_stats64, }; static void ppp_setup(struct net_device *dev) { dev->netdev_ops = &ppp_netdev_ops; dev->hard_header_len = PPP_HDRLEN; dev->mtu = PPP_MRU; dev->addr_len = 0; dev->tx_queue_len = 3; dev->type = ARPHRD_PPP; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; dev->features |= NETIF_F_NETNS_LOCAL; dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; } /* * Transmit-side routines. */ /* * Called to do any work queued up on the transmit side * that can now be done. */ static void ppp_xmit_process(struct ppp *ppp) { struct sk_buff *skb; ppp_xmit_lock(ppp); if (!ppp->closing) { ppp_push(ppp); while (!ppp->xmit_pending && (skb = skb_dequeue(&ppp->file.xq))) ppp_send_frame(ppp, skb); /* If there's no work left to do, tell the core net code that we can accept some more. */ if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) netif_wake_queue(ppp->dev); else netif_stop_queue(ppp->dev); } ppp_xmit_unlock(ppp); } static inline struct sk_buff * pad_compress_skb(struct ppp *ppp, struct sk_buff *skb) { struct sk_buff *new_skb; int len; int new_skb_size = ppp->dev->mtu + ppp->xcomp->comp_extra + ppp->dev->hard_header_len; int compressor_skb_size = ppp->dev->mtu + ppp->xcomp->comp_extra + PPP_HDRLEN; new_skb = alloc_skb(new_skb_size, GFP_ATOMIC); if (!new_skb) { if (net_ratelimit()) netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n"); return NULL; } if (ppp->dev->hard_header_len > PPP_HDRLEN) skb_reserve(new_skb, ppp->dev->hard_header_len - PPP_HDRLEN); /* compressor still expects A/C bytes in hdr */ len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2, new_skb->data, skb->len + 2, compressor_skb_size); if (len > 0 && (ppp->flags & SC_CCP_UP)) { consume_skb(skb); skb = new_skb; skb_put(skb, len); skb_pull(skb, 2); /* pull off A/C bytes */ } else if (len == 0) { /* didn't compress, or CCP not up yet */ consume_skb(new_skb); new_skb = skb; } else { /* * (len < 0) * MPPE requires that we do not send unencrypted * frames. The compressor will return -1 if we * should drop the frame. We cannot simply test * the compress_proto because MPPE and MPPC share * the same number. */ if (net_ratelimit()) netdev_err(ppp->dev, "ppp: compressor dropped pkt\n"); kfree_skb(skb); consume_skb(new_skb); new_skb = NULL; } return new_skb; } /* * Compress and send a frame. * The caller should have locked the xmit path, * and xmit_pending should be 0. */ static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) { int proto = PPP_PROTO(skb); struct sk_buff *new_skb; int len; unsigned char *cp; if (proto < 0x8000) { #ifdef CONFIG_PPP_FILTER /* check if we should pass this packet */ /* the filter instructions are constructed assuming a four-byte PPP header on each packet */ *skb_push(skb, 2) = 1; if (ppp->pass_filter && sk_run_filter(skb, ppp->pass_filter) == 0) { if (ppp->debug & 1) netdev_printk(KERN_DEBUG, ppp->dev, "PPP: outbound frame " "not passed\n"); kfree_skb(skb); return; } /* if this packet passes the active filter, record the time */ if (!(ppp->active_filter && sk_run_filter(skb, ppp->active_filter) == 0)) ppp->last_xmit = jiffies; skb_pull(skb, 2); #else /* for data packets, record the time */ ppp->last_xmit = jiffies; #endif /* CONFIG_PPP_FILTER */ } ++ppp->stats64.tx_packets; ppp->stats64.tx_bytes += skb->len - 2; switch (proto) { case PPP_IP: if (!ppp->vj || (ppp->flags & SC_COMP_TCP) == 0) break; /* try to do VJ TCP header compression */ new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2, GFP_ATOMIC); if (!new_skb) { netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n"); goto drop; } skb_reserve(new_skb, ppp->dev->hard_header_len - 2); cp = skb->data + 2; len = slhc_compress(ppp->vj, cp, skb->len - 2, new_skb->data + 2, &cp, !(ppp->flags & SC_NO_TCP_CCID)); if (cp == skb->data + 2) { /* didn't compress */ consume_skb(new_skb); } else { if (cp[0] & SL_TYPE_COMPRESSED_TCP) { proto = PPP_VJC_COMP; cp[0] &= ~SL_TYPE_COMPRESSED_TCP; } else { proto = PPP_VJC_UNCOMP; cp[0] = skb->data[2]; } consume_skb(skb); skb = new_skb; cp = skb_put(skb, len + 2); cp[0] = 0; cp[1] = proto; } break; case PPP_CCP: /* peek at outbound CCP frames */ ppp_ccp_peek(ppp, skb, 0); break; } /* try to do packet compression */ if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state && proto != PPP_LCP && proto != PPP_CCP) { if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) { if (net_ratelimit()) netdev_err(ppp->dev, "ppp: compression required but " "down - pkt dropped.\n"); goto drop; } skb = pad_compress_skb(ppp, skb); if (!skb) goto drop; } /* * If we are waiting for traffic (demand dialling), * queue it up for pppd to receive. */ if (ppp->flags & SC_LOOP_TRAFFIC) { if (ppp->file.rq.qlen > PPP_MAX_RQLEN) goto drop; skb_queue_tail(&ppp->file.rq, skb); wake_up_interruptible(&ppp->file.rwait); return; } ppp->xmit_pending = skb; ppp_push(ppp); return; drop: kfree_skb(skb); ++ppp->dev->stats.tx_errors; } /* * Try to send the frame in xmit_pending. * The caller should have the xmit path locked. */ static void ppp_push(struct ppp *ppp) { struct list_head *list; struct channel *pch; struct sk_buff *skb = ppp->xmit_pending; if (!skb) return; list = &ppp->channels; if (list_empty(list)) { /* nowhere to send the packet, just drop it */ ppp->xmit_pending = NULL; kfree_skb(skb); return; } if ((ppp->flags & SC_MULTILINK) == 0) { /* not doing multilink: send it down the first channel */ list = list->next; pch = list_entry(list, struct channel, clist); spin_lock_bh(&pch->downl); if (pch->chan) { if (pch->chan->ops->start_xmit(pch->chan, skb)) ppp->xmit_pending = NULL; } else { /* channel got unregistered */ kfree_skb(skb); ppp->xmit_pending = NULL; } spin_unlock_bh(&pch->downl); return; } #ifdef CONFIG_PPP_MULTILINK /* Multilink: fragment the packet over as many links as can take the packet at the moment. */ if (!ppp_mp_explode(ppp, skb)) return; #endif /* CONFIG_PPP_MULTILINK */ ppp->xmit_pending = NULL; kfree_skb(skb); } #ifdef CONFIG_PPP_MULTILINK static bool mp_protocol_compress __read_mostly = true; module_param(mp_protocol_compress, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(mp_protocol_compress, "compress protocol id in multilink fragments"); /* * Divide a packet to be transmitted into fragments and * send them out the individual links. */ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) { int len, totlen; int i, bits, hdrlen, mtu; int flen; int navail, nfree, nzero; int nbigger; int totspeed; int totfree; unsigned char *p, *q; struct list_head *list; struct channel *pch; struct sk_buff *frag; struct ppp_channel *chan; totspeed = 0; /*total bitrate of the bundle*/ nfree = 0; /* # channels which have no packet already queued */ navail = 0; /* total # of usable channels (not deregistered) */ nzero = 0; /* number of channels with zero speed associated*/ totfree = 0; /*total # of channels available and *having no queued packets before *starting the fragmentation*/ hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; i = 0; list_for_each_entry(pch, &ppp->channels, clist) { if (pch->chan) { pch->avail = 1; navail++; pch->speed = pch->chan->speed; } else { pch->avail = 0; } if (pch->avail) { if (skb_queue_empty(&pch->file.xq) || !pch->had_frag) { if (pch->speed == 0) nzero++; else totspeed += pch->speed; pch->avail = 2; ++nfree; ++totfree; } if (!pch->had_frag && i < ppp->nxchan) ppp->nxchan = i; } ++i; } /* * Don't start sending this packet unless at least half of * the channels are free. This gives much better TCP * performance if we have a lot of channels. */ if (nfree == 0 || nfree < navail / 2) return 0; /* can't take now, leave it in xmit_pending */ /* Do protocol field compression */ p = skb->data; len = skb->len; if (*p == 0 && mp_protocol_compress) { ++p; --len; } totlen = len; nbigger = len % nfree; /* skip to the channel after the one we last used and start at that one */ list = &ppp->channels; for (i = 0; i < ppp->nxchan; ++i) { list = list->next; if (list == &ppp->channels) { i = 0; break; } } /* create a fragment for each channel */ bits = B; while (len > 0) { list = list->next; if (list == &ppp->channels) { i = 0; continue; } pch = list_entry(list, struct channel, clist); ++i; if (!pch->avail) continue; /* * Skip this channel if it has a fragment pending already and * we haven't given a fragment to all of the free channels. */ if (pch->avail == 1) { if (nfree > 0) continue; } else { pch->avail = 1; } /* check the channel's mtu and whether it is still attached. */ spin_lock_bh(&pch->downl); if (pch->chan == NULL) { /* can't use this channel, it's being deregistered */ if (pch->speed == 0) nzero--; else totspeed -= pch->speed; spin_unlock_bh(&pch->downl); pch->avail = 0; totlen = len; totfree--; nfree--; if (--navail == 0) break; continue; } /* *if the channel speed is not set divide *the packet evenly among the free channels; *otherwise divide it according to the speed *of the channel we are going to transmit on */ flen = len; if (nfree > 0) { if (pch->speed == 0) { flen = len/nfree; if (nbigger > 0) { flen++; nbigger--; } } else { flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) / ((totspeed*totfree)/pch->speed)) - hdrlen; if (nbigger > 0) { flen += ((totfree - nzero)*pch->speed)/totspeed; nbigger -= ((totfree - nzero)*pch->speed)/ totspeed; } } nfree--; } /* *check if we are on the last channel or *we exceded the length of the data to *fragment */ if ((nfree <= 0) || (flen > len)) flen = len; /* *it is not worth to tx on slow channels: *in that case from the resulting flen according to the *above formula will be equal or less than zero. *Skip the channel in this case */ if (flen <= 0) { pch->avail = 2; spin_unlock_bh(&pch->downl); continue; } /* * hdrlen includes the 2-byte PPP protocol field, but the * MTU counts only the payload excluding the protocol field. * (RFC1661 Section 2) */ mtu = pch->chan->mtu - (hdrlen - 2); if (mtu < 4) mtu = 4; if (flen > mtu) flen = mtu; if (flen == len) bits |= E; frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC); if (!frag) goto noskb; q = skb_put(frag, flen + hdrlen); /* make the MP header */ put_unaligned_be16(PPP_MP, q); if (ppp->flags & SC_MP_XSHORTSEQ) { q[2] = bits + ((ppp->nxseq >> 8) & 0xf); q[3] = ppp->nxseq; } else { q[2] = bits; q[3] = ppp->nxseq >> 16; q[4] = ppp->nxseq >> 8; q[5] = ppp->nxseq; } memcpy(q + hdrlen, p, flen); /* try to send it down the channel */ chan = pch->chan; if (!skb_queue_empty(&pch->file.xq) || !chan->ops->start_xmit(chan, frag)) skb_queue_tail(&pch->file.xq, frag); pch->had_frag = 1; p += flen; len -= flen; ++ppp->nxseq; bits = 0; spin_unlock_bh(&pch->downl); } ppp->nxchan = i; return 1; noskb: spin_unlock_bh(&pch->downl); if (ppp->debug & 1) netdev_err(ppp->dev, "PPP: no memory (fragment)\n"); ++ppp->dev->stats.tx_errors; ++ppp->nxseq; return 1; /* abandon the frame */ } #endif /* CONFIG_PPP_MULTILINK */ /* * Try to send data out on a channel. */ static void ppp_channel_push(struct channel *pch) { struct sk_buff *skb; struct ppp *ppp; spin_lock_bh(&pch->downl); if (pch->chan) { while (!skb_queue_empty(&pch->file.xq)) { skb = skb_dequeue(&pch->file.xq); if (!pch->chan->ops->start_xmit(pch->chan, skb)) { /* put the packet back and try again later */ skb_queue_head(&pch->file.xq, skb); break; } } } else { /* channel got deregistered */ skb_queue_purge(&pch->file.xq); } spin_unlock_bh(&pch->downl); /* see if there is anything from the attached unit to be sent */ if (skb_queue_empty(&pch->file.xq)) { read_lock_bh(&pch->upl); ppp = pch->ppp; if (ppp) ppp_xmit_process(ppp); read_unlock_bh(&pch->upl); } } /* * Receive-side routines. */ struct ppp_mp_skb_parm { u32 sequence; u8 BEbits; }; #define PPP_MP_CB(skb) ((struct ppp_mp_skb_parm *)((skb)->cb)) static inline void ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) { ppp_recv_lock(ppp); if (!ppp->closing) ppp_receive_frame(ppp, skb, pch); else kfree_skb(skb); ppp_recv_unlock(ppp); } void ppp_input(struct ppp_channel *chan, struct sk_buff *skb) { struct channel *pch = chan->ppp; int proto; if (!pch) { kfree_skb(skb); return; } read_lock_bh(&pch->upl); if (!pskb_may_pull(skb, 2)) { kfree_skb(skb); if (pch->ppp) { ++pch->ppp->dev->stats.rx_length_errors; ppp_receive_error(pch->ppp); } goto done; } proto = PPP_PROTO(skb); if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) { /* put it on the channel queue */ skb_queue_tail(&pch->file.rq, skb); /* drop old frames if queue too long */ while (pch->file.rq.qlen > PPP_MAX_RQLEN && (skb = skb_dequeue(&pch->file.rq))) kfree_skb(skb); wake_up_interruptible(&pch->file.rwait); } else { ppp_do_recv(pch->ppp, skb, pch); } done: read_unlock_bh(&pch->upl); } /* Put a 0-length skb in the receive queue as an error indication */ void ppp_input_error(struct ppp_channel *chan, int code) { struct channel *pch = chan->ppp; struct sk_buff *skb; if (!pch) return; read_lock_bh(&pch->upl); if (pch->ppp) { skb = alloc_skb(0, GFP_ATOMIC); if (skb) { skb->len = 0; /* probably unnecessary */ skb->cb[0] = code; ppp_do_recv(pch->ppp, skb, pch); } } read_unlock_bh(&pch->upl); } /* * We come in here to process a received frame. * The receive side of the ppp unit is locked. */ static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) { /* note: a 0-length skb is used as an error indication */ if (skb->len > 0) { #ifdef CONFIG_PPP_MULTILINK /* XXX do channel-level decompression here */ if (PPP_PROTO(skb) == PPP_MP) ppp_receive_mp_frame(ppp, skb, pch); else #endif /* CONFIG_PPP_MULTILINK */ ppp_receive_nonmp_frame(ppp, skb); } else { kfree_skb(skb); ppp_receive_error(ppp); } } static void ppp_receive_error(struct ppp *ppp) { ++ppp->dev->stats.rx_errors; if (ppp->vj) slhc_toss(ppp->vj); } static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) { struct sk_buff *ns; int proto, len, npi; /* * Decompress the frame, if compressed. * Note that some decompressors need to see uncompressed frames * that come in as well as compressed frames. */ if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN) && (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0) skb = ppp_decompress_frame(ppp, skb); if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR) goto err; proto = PPP_PROTO(skb); switch (proto) { case PPP_VJC_COMP: /* decompress VJ compressed packets */ if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP)) goto err; if (skb_tailroom(skb) < 124 || skb_cloned(skb)) { /* copy to a new sk_buff with more tailroom */ ns = dev_alloc_skb(skb->len + 128); if (!ns) { netdev_err(ppp->dev, "PPP: no memory " "(VJ decomp)\n"); goto err; } skb_reserve(ns, 2); skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len); consume_skb(skb); skb = ns; } else skb->ip_summed = CHECKSUM_NONE; len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2); if (len <= 0) { netdev_printk(KERN_DEBUG, ppp->dev, "PPP: VJ decompression error\n"); goto err; } len += 2; if (len > skb->len) skb_put(skb, len - skb->len); else if (len < skb->len) skb_trim(skb, len); proto = PPP_IP; break; case PPP_VJC_UNCOMP: if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP)) goto err; /* Until we fix the decompressor need to make sure * data portion is linear. */ if (!pskb_may_pull(skb, skb->len)) goto err; if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) { netdev_err(ppp->dev, "PPP: VJ uncompressed error\n"); goto err; } proto = PPP_IP; break; case PPP_CCP: ppp_ccp_peek(ppp, skb, 1); break; } ++ppp->stats64.rx_packets; ppp->stats64.rx_bytes += skb->len - 2; npi = proto_to_npindex(proto); if (npi < 0) { /* control or unknown frame - pass it to pppd */ skb_queue_tail(&ppp->file.rq, skb); /* limit queue length by dropping old frames */ while (ppp->file.rq.qlen > PPP_MAX_RQLEN && (skb = skb_dequeue(&ppp->file.rq))) kfree_skb(skb); /* wake up any process polling or blocking on read */ wake_up_interruptible(&ppp->file.rwait); } else { /* network protocol frame - give it to the kernel */ #ifdef CONFIG_PPP_FILTER /* check if the packet passes the pass and active filters */ /* the filter instructions are constructed assuming a four-byte PPP header on each packet */ if (ppp->pass_filter || ppp->active_filter) { if (skb_unclone(skb, GFP_ATOMIC)) goto err; *skb_push(skb, 2) = 0; if (ppp->pass_filter && sk_run_filter(skb, ppp->pass_filter) == 0) { if (ppp->debug & 1) netdev_printk(KERN_DEBUG, ppp->dev, "PPP: inbound frame " "not passed\n"); kfree_skb(skb); return; } if (!(ppp->active_filter && sk_run_filter(skb, ppp->active_filter) == 0)) ppp->last_recv = jiffies; __skb_pull(skb, 2); } else #endif /* CONFIG_PPP_FILTER */ ppp->last_recv = jiffies; if ((ppp->dev->flags & IFF_UP) == 0 || ppp->npmode[npi] != NPMODE_PASS) { kfree_skb(skb); } else { /* chop off protocol */ skb_pull_rcsum(skb, 2); skb->dev = ppp->dev; skb->protocol = htons(npindex_to_ethertype[npi]); skb_reset_mac_header(skb); netif_rx(skb); } } return; err: kfree_skb(skb); ppp_receive_error(ppp); } static struct sk_buff * ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb) { int proto = PPP_PROTO(skb); struct sk_buff *ns; int len; /* Until we fix all the decompressor's need to make sure * data portion is linear. */ if (!pskb_may_pull(skb, skb->len)) goto err; if (proto == PPP_COMP) { int obuff_size; switch(ppp->rcomp->compress_proto) { case CI_MPPE: obuff_size = ppp->mru + PPP_HDRLEN + 1; break; default: obuff_size = ppp->mru + PPP_HDRLEN; break; } ns = dev_alloc_skb(obuff_size); if (!ns) { netdev_err(ppp->dev, "ppp_decompress_frame: " "no memory\n"); goto err; } /* the decompressor still expects the A/C bytes in the hdr */ len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2, skb->len + 2, ns->data, obuff_size); if (len < 0) { /* Pass the compressed frame to pppd as an error indication. */ if (len == DECOMP_FATALERROR) ppp->rstate |= SC_DC_FERROR; kfree_skb(ns); goto err; } consume_skb(skb); skb = ns; skb_put(skb, len); skb_pull(skb, 2); /* pull off the A/C bytes */ } else { /* Uncompressed frame - pass to decompressor so it can update its dictionary if necessary. */ if (ppp->rcomp->incomp) ppp->rcomp->incomp(ppp->rc_state, skb->data - 2, skb->len + 2); } return skb; err: ppp->rstate |= SC_DC_ERROR; ppp_receive_error(ppp); return skb; } #ifdef CONFIG_PPP_MULTILINK /* * Receive a multilink frame. * We put it on the reconstruction queue and then pull off * as many completed frames as we can. */ static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) { u32 mask, seq; struct channel *ch; int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0) goto err; /* no good, throw it away */ /* Decode sequence number and begin/end bits */ if (ppp->flags & SC_MP_SHORTSEQ) { seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3]; mask = 0xfff; } else { seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5]; mask = 0xffffff; } PPP_MP_CB(skb)->BEbits = skb->data[2]; skb_pull(skb, mphdrlen); /* pull off PPP and MP headers */ /* * Do protocol ID decompression on the first fragment of each packet. */ if ((PPP_MP_CB(skb)->BEbits & B) && (skb->data[0] & 1)) *skb_push(skb, 1) = 0; /* * Expand sequence number to 32 bits, making it as close * as possible to ppp->minseq. */ seq |= ppp->minseq & ~mask; if ((int)(ppp->minseq - seq) > (int)(mask >> 1)) seq += mask + 1; else if ((int)(seq - ppp->minseq) > (int)(mask >> 1)) seq -= mask + 1; /* should never happen */ PPP_MP_CB(skb)->sequence = seq; pch->lastseq = seq; /* * If this packet comes before the next one we were expecting, * drop it. */ if (seq_before(seq, ppp->nextseq)) { kfree_skb(skb); ++ppp->dev->stats.rx_dropped; ppp_receive_error(ppp); return; } /* * Reevaluate minseq, the minimum over all channels of the * last sequence number received on each channel. Because of * the increasing sequence number rule, we know that any fragment * before `minseq' which hasn't arrived is never going to arrive. * The list of channels can't change because we have the receive * side of the ppp unit locked. */ list_for_each_entry(ch, &ppp->channels, clist) { if (seq_before(ch->lastseq, seq)) seq = ch->lastseq; } if (seq_before(ppp->minseq, seq)) ppp->minseq = seq; /* Put the fragment on the reconstruction queue */ ppp_mp_insert(ppp, skb); /* If the queue is getting long, don't wait any longer for packets before the start of the queue. */ if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) { struct sk_buff *mskb = skb_peek(&ppp->mrq); if (seq_before(ppp->minseq, PPP_MP_CB(mskb)->sequence)) ppp->minseq = PPP_MP_CB(mskb)->sequence; } /* Pull completed packets off the queue and receive them. */ while ((skb = ppp_mp_reconstruct(ppp))) { if (pskb_may_pull(skb, 2)) ppp_receive_nonmp_frame(ppp, skb); else { ++ppp->dev->stats.rx_length_errors; kfree_skb(skb); ppp_receive_error(ppp); } } return; err: kfree_skb(skb); ppp_receive_error(ppp); } /* * Insert a fragment on the MP reconstruction queue. * The queue is ordered by increasing sequence number. */ static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb) { struct sk_buff *p; struct sk_buff_head *list = &ppp->mrq; u32 seq = PPP_MP_CB(skb)->sequence; /* N.B. we don't need to lock the list lock because we have the ppp unit receive-side lock. */ skb_queue_walk(list, p) { if (seq_before(seq, PPP_MP_CB(p)->sequence)) break; } __skb_queue_before(list, p, skb); } /* * Reconstruct a packet from the MP fragment queue. * We go through increasing sequence numbers until we find a * complete packet, or we get to the sequence number for a fragment * which hasn't arrived but might still do so. */ static struct sk_buff * ppp_mp_reconstruct(struct ppp *ppp) { u32 seq = ppp->nextseq; u32 minseq = ppp->minseq; struct sk_buff_head *list = &ppp->mrq; struct sk_buff *p, *tmp; struct sk_buff *head, *tail; struct sk_buff *skb = NULL; int lost = 0, len = 0; if (ppp->mrru == 0) /* do nothing until mrru is set */ return NULL; head = list->next; tail = NULL; skb_queue_walk_safe(list, p, tmp) { again: if (seq_before(PPP_MP_CB(p)->sequence, seq)) { /* this can't happen, anyway ignore the skb */ netdev_err(ppp->dev, "ppp_mp_reconstruct bad " "seq %u < %u\n", PPP_MP_CB(p)->sequence, seq); __skb_unlink(p, list); kfree_skb(p); continue; } if (PPP_MP_CB(p)->sequence != seq) { u32 oldseq; /* Fragment `seq' is missing. If it is after minseq, it might arrive later, so stop here. */ if (seq_after(seq, minseq)) break; /* Fragment `seq' is lost, keep going. */ lost = 1; oldseq = seq; seq = seq_before(minseq, PPP_MP_CB(p)->sequence)? minseq + 1: PPP_MP_CB(p)->sequence; if (ppp->debug & 1) netdev_printk(KERN_DEBUG, ppp->dev, "lost frag %u..%u\n", oldseq, seq-1); goto again; } /* * At this point we know that all the fragments from * ppp->nextseq to seq are either present or lost. * Also, there are no complete packets in the queue * that have no missing fragments and end before this * fragment. */ /* B bit set indicates this fragment starts a packet */ if (PPP_MP_CB(p)->BEbits & B) { head = p; lost = 0; len = 0; } len += p->len; /* Got a complete packet yet? */ if (lost == 0 && (PPP_MP_CB(p)->BEbits & E) && (PPP_MP_CB(head)->BEbits & B)) { if (len > ppp->mrru + 2) { ++ppp->dev->stats.rx_length_errors; netdev_printk(KERN_DEBUG, ppp->dev, "PPP: reconstructed packet" " is too long (%d)\n", len); } else { tail = p; break; } ppp->nextseq = seq + 1; } /* * If this is the ending fragment of a packet, * and we haven't found a complete valid packet yet, * we can discard up to and including this fragment. */ if (PPP_MP_CB(p)->BEbits & E) { struct sk_buff *tmp2; skb_queue_reverse_walk_from_safe(list, p, tmp2) { if (ppp->debug & 1) netdev_printk(KERN_DEBUG, ppp->dev, "discarding frag %u\n", PPP_MP_CB(p)->sequence); __skb_unlink(p, list); kfree_skb(p); } head = skb_peek(list); if (!head) break; } ++seq; } /* If we have a complete packet, copy it all into one skb. */ if (tail != NULL) { /* If we have discarded any fragments, signal a receive error. */ if (PPP_MP_CB(head)->sequence != ppp->nextseq) { skb_queue_walk_safe(list, p, tmp) { if (p == head) break; if (ppp->debug & 1) netdev_printk(KERN_DEBUG, ppp->dev, "discarding frag %u\n", PPP_MP_CB(p)->sequence); __skb_unlink(p, list); kfree_skb(p); } if (ppp->debug & 1) netdev_printk(KERN_DEBUG, ppp->dev, " missed pkts %u..%u\n", ppp->nextseq, PPP_MP_CB(head)->sequence-1); ++ppp->dev->stats.rx_dropped; ppp_receive_error(ppp); } skb = head; if (head != tail) { struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list; p = skb_queue_next(list, head); __skb_unlink(skb, list); skb_queue_walk_from_safe(list, p, tmp) { __skb_unlink(p, list); *fragpp = p; p->next = NULL; fragpp = &p->next; skb->len += p->len; skb->data_len += p->len; skb->truesize += p->truesize; if (p == tail) break; } } else { __skb_unlink(skb, list); } ppp->nextseq = PPP_MP_CB(tail)->sequence + 1; } return skb; } #endif /* CONFIG_PPP_MULTILINK */ /* * Channel interface. */ /* Create a new, unattached ppp channel. */ int ppp_register_channel(struct ppp_channel *chan) { return ppp_register_net_channel(current->nsproxy->net_ns, chan); } /* Create a new, unattached ppp channel for specified net. */ int ppp_register_net_channel(struct net *net, struct ppp_channel *chan) { struct channel *pch; struct ppp_net *pn; pch = kzalloc(sizeof(struct channel), GFP_KERNEL); if (!pch) return -ENOMEM; pn = ppp_pernet(net); pch->ppp = NULL; pch->chan = chan; pch->chan_net = net; chan->ppp = pch; init_ppp_file(&pch->file, CHANNEL); pch->file.hdrlen = chan->hdrlen; #ifdef CONFIG_PPP_MULTILINK pch->lastseq = -1; #endif /* CONFIG_PPP_MULTILINK */ init_rwsem(&pch->chan_sem); spin_lock_init(&pch->downl); rwlock_init(&pch->upl); spin_lock_bh(&pn->all_channels_lock); pch->file.index = ++pn->last_channel_index; list_add(&pch->list, &pn->new_channels); atomic_inc(&channel_count); spin_unlock_bh(&pn->all_channels_lock); return 0; } /* * Return the index of a channel. */ int ppp_channel_index(struct ppp_channel *chan) { struct channel *pch = chan->ppp; if (pch) return pch->file.index; return -1; } /* * Return the PPP unit number to which a channel is connected. */ int ppp_unit_number(struct ppp_channel *chan) { struct channel *pch = chan->ppp; int unit = -1; if (pch) { read_lock_bh(&pch->upl); if (pch->ppp) unit = pch->ppp->file.index; read_unlock_bh(&pch->upl); } return unit; } /* * Return the PPP device interface name of a channel. */ char *ppp_dev_name(struct ppp_channel *chan) { struct channel *pch = chan->ppp; char *name = NULL; if (pch) { read_lock_bh(&pch->upl); if (pch->ppp && pch->ppp->dev) name = pch->ppp->dev->name; read_unlock_bh(&pch->upl); } return name; } /* * Disconnect a channel from the generic layer. * This must be called in process context. */ void ppp_unregister_channel(struct ppp_channel *chan) { struct channel *pch = chan->ppp; struct ppp_net *pn; if (!pch) return; /* should never happen */ chan->ppp = NULL; /* * This ensures that we have returned from any calls into the * the channel's start_xmit or ioctl routine before we proceed. */ down_write(&pch->chan_sem); spin_lock_bh(&pch->downl); pch->chan = NULL; spin_unlock_bh(&pch->downl); up_write(&pch->chan_sem); ppp_disconnect_channel(pch); pn = ppp_pernet(pch->chan_net); spin_lock_bh(&pn->all_channels_lock); list_del(&pch->list); spin_unlock_bh(&pn->all_channels_lock); pch->file.dead = 1; wake_up_interruptible(&pch->file.rwait); if (atomic_dec_and_test(&pch->file.refcnt)) ppp_destroy_channel(pch); } /* * Callback from a channel when it can accept more to transmit. * This should be called at BH/softirq level, not interrupt level. */ void ppp_output_wakeup(struct ppp_channel *chan) { struct channel *pch = chan->ppp; if (!pch) return; ppp_channel_push(pch); } /* * Compression control. */ /* Process the PPPIOCSCOMPRESS ioctl. */ static int ppp_set_compress(struct ppp *ppp, unsigned long arg) { int err; struct compressor *cp, *ocomp; struct ppp_option_data data; void *state, *ostate; unsigned char ccp_option[CCP_MAX_OPTION_LENGTH]; err = -EFAULT; if (copy_from_user(&data, (void __user *) arg, sizeof(data)) || (data.length <= CCP_MAX_OPTION_LENGTH && copy_from_user(ccp_option, (void __user *) data.ptr, data.length))) goto out; err = -EINVAL; if (data.length > CCP_MAX_OPTION_LENGTH || ccp_option[1] < 2 || ccp_option[1] > data.length) goto out; cp = try_then_request_module( find_compressor(ccp_option[0]), "ppp-compress-%d", ccp_option[0]); if (!cp) goto out; err = -ENOBUFS; if (data.transmit) { state = cp->comp_alloc(ccp_option, data.length); if (state) { ppp_xmit_lock(ppp); ppp->xstate &= ~SC_COMP_RUN; ocomp = ppp->xcomp; ostate = ppp->xc_state; ppp->xcomp = cp; ppp->xc_state = state; ppp_xmit_unlock(ppp); if (ostate) { ocomp->comp_free(ostate); module_put(ocomp->owner); } err = 0; } else module_put(cp->owner); } else { state = cp->decomp_alloc(ccp_option, data.length); if (state) { ppp_recv_lock(ppp); ppp->rstate &= ~SC_DECOMP_RUN; ocomp = ppp->rcomp; ostate = ppp->rc_state; ppp->rcomp = cp; ppp->rc_state = state; ppp_recv_unlock(ppp); if (ostate) { ocomp->decomp_free(ostate); module_put(ocomp->owner); } err = 0; } else module_put(cp->owner); } out: return err; } /* * Look at a CCP packet and update our state accordingly. * We assume the caller has the xmit or recv path locked. */ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound) { unsigned char *dp; int len; if (!pskb_may_pull(skb, CCP_HDRLEN + 2)) return; /* no header */ dp = skb->data + 2; switch (CCP_CODE(dp)) { case CCP_CONFREQ: /* A ConfReq starts negotiation of compression * in one direction of transmission, * and hence brings it down...but which way? * * Remember: * A ConfReq indicates what the sender would like to receive */ if(inbound) /* He is proposing what I should send */ ppp->xstate &= ~SC_COMP_RUN; else /* I am proposing to what he should send */ ppp->rstate &= ~SC_DECOMP_RUN; break; case CCP_TERMREQ: case CCP_TERMACK: /* * CCP is going down, both directions of transmission */ ppp->rstate &= ~SC_DECOMP_RUN; ppp->xstate &= ~SC_COMP_RUN; break; case CCP_CONFACK: if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN) break; len = CCP_LENGTH(dp); if (!pskb_may_pull(skb, len + 2)) return; /* too short */ dp += CCP_HDRLEN; len -= CCP_HDRLEN; if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp)) break; if (inbound) { /* we will start receiving compressed packets */ if (!ppp->rc_state) break; if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len, ppp->file.index, 0, ppp->mru, ppp->debug)) { ppp->rstate |= SC_DECOMP_RUN; ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR); } } else { /* we will soon start sending compressed packets */ if (!ppp->xc_state) break; if (ppp->xcomp->comp_init(ppp->xc_state, dp, len, ppp->file.index, 0, ppp->debug)) ppp->xstate |= SC_COMP_RUN; } break; case CCP_RESETACK: /* reset the [de]compressor */ if ((ppp->flags & SC_CCP_UP) == 0) break; if (inbound) { if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) { ppp->rcomp->decomp_reset(ppp->rc_state); ppp->rstate &= ~SC_DC_ERROR; } } else { if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN)) ppp->xcomp->comp_reset(ppp->xc_state); } break; } } /* Free up compression resources. */ static void ppp_ccp_closed(struct ppp *ppp) { void *xstate, *rstate; struct compressor *xcomp, *rcomp; ppp_lock(ppp); ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP); ppp->xstate = 0; xcomp = ppp->xcomp; xstate = ppp->xc_state; ppp->xc_state = NULL; ppp->rstate = 0; rcomp = ppp->rcomp; rstate = ppp->rc_state; ppp->rc_state = NULL; ppp_unlock(ppp); if (xstate) { xcomp->comp_free(xstate); module_put(xcomp->owner); } if (rstate) { rcomp->decomp_free(rstate); module_put(rcomp->owner); } } /* List of compressors. */ static LIST_HEAD(compressor_list); static DEFINE_SPINLOCK(compressor_list_lock); struct compressor_entry { struct list_head list; struct compressor *comp; }; static struct compressor_entry * find_comp_entry(int proto) { struct compressor_entry *ce; list_for_each_entry(ce, &compressor_list, list) { if (ce->comp->compress_proto == proto) return ce; } return NULL; } /* Register a compressor */ int ppp_register_compressor(struct compressor *cp) { struct compressor_entry *ce; int ret; spin_lock(&compressor_list_lock); ret = -EEXIST; if (find_comp_entry(cp->compress_proto)) goto out; ret = -ENOMEM; ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC); if (!ce) goto out; ret = 0; ce->comp = cp; list_add(&ce->list, &compressor_list); out: spin_unlock(&compressor_list_lock); return ret; } /* Unregister a compressor */ void ppp_unregister_compressor(struct compressor *cp) { struct compressor_entry *ce; spin_lock(&compressor_list_lock); ce = find_comp_entry(cp->compress_proto); if (ce && ce->comp == cp) { list_del(&ce->list); kfree(ce); } spin_unlock(&compressor_list_lock); } /* Find a compressor. */ static struct compressor * find_compressor(int type) { struct compressor_entry *ce; struct compressor *cp = NULL; spin_lock(&compressor_list_lock); ce = find_comp_entry(type); if (ce) { cp = ce->comp; if (!try_module_get(cp->owner)) cp = NULL; } spin_unlock(&compressor_list_lock); return cp; } /* * Miscelleneous stuff. */ static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st) { struct slcompress *vj = ppp->vj; memset(st, 0, sizeof(*st)); st->p.ppp_ipackets = ppp->stats64.rx_packets; st->p.ppp_ierrors = ppp->dev->stats.rx_errors; st->p.ppp_ibytes = ppp->stats64.rx_bytes; st->p.ppp_opackets = ppp->stats64.tx_packets; st->p.ppp_oerrors = ppp->dev->stats.tx_errors; st->p.ppp_obytes = ppp->stats64.tx_bytes; if (!vj) return; st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed; st->vj.vjs_compressed = vj->sls_o_compressed; st->vj.vjs_searches = vj->sls_o_searches; st->vj.vjs_misses = vj->sls_o_misses; st->vj.vjs_errorin = vj->sls_i_error; st->vj.vjs_tossed = vj->sls_i_tossed; st->vj.vjs_uncompressedin = vj->sls_i_uncompressed; st->vj.vjs_compressedin = vj->sls_i_compressed; } /* * Stuff for handling the lists of ppp units and channels * and for initialization. */ /* * Create a new ppp interface unit. Fails if it can't allocate memory * or if there is already a unit with the requested number. * unit == -1 means allocate a new number. */ static struct ppp * ppp_create_interface(struct net *net, int unit, int *retp) { struct ppp *ppp; struct ppp_net *pn; struct net_device *dev = NULL; int ret = -ENOMEM; int i; dev = alloc_netdev(sizeof(struct ppp), "", ppp_setup); if (!dev) goto out1; pn = ppp_pernet(net); ppp = netdev_priv(dev); ppp->dev = dev; ppp->mru = PPP_MRU; init_ppp_file(&ppp->file, INTERFACE); ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */ for (i = 0; i < NUM_NP; ++i) ppp->npmode[i] = NPMODE_PASS; INIT_LIST_HEAD(&ppp->channels); spin_lock_init(&ppp->rlock); spin_lock_init(&ppp->wlock); #ifdef CONFIG_PPP_MULTILINK ppp->minseq = -1; skb_queue_head_init(&ppp->mrq); #endif /* CONFIG_PPP_MULTILINK */ /* * drum roll: don't forget to set * the net device is belong to */ dev_net_set(dev, net); mutex_lock(&pn->all_ppp_mutex); if (unit < 0) { unit = unit_get(&pn->units_idr, ppp); if (unit < 0) { ret = unit; goto out2; } } else { ret = -EEXIST; if (unit_find(&pn->units_idr, unit)) goto out2; /* unit already exists */ /* * if caller need a specified unit number * lets try to satisfy him, otherwise -- * he should better ask us for new unit number * * NOTE: yes I know that returning EEXIST it's not * fair but at least pppd will ask us to allocate * new unit in this case so user is happy :) */ unit = unit_set(&pn->units_idr, ppp, unit); if (unit < 0) goto out2; } /* Initialize the new ppp unit */ ppp->file.index = unit; sprintf(dev->name, "ppp%d", unit); ret = register_netdev(dev); if (ret != 0) { unit_put(&pn->units_idr, unit); netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n", dev->name, ret); goto out2; } ppp->ppp_net = net; atomic_inc(&ppp_unit_count); mutex_unlock(&pn->all_ppp_mutex); *retp = 0; return ppp; out2: mutex_unlock(&pn->all_ppp_mutex); free_netdev(dev); out1: *retp = ret; return NULL; } /* * Initialize a ppp_file structure. */ static void init_ppp_file(struct ppp_file *pf, int kind) { pf->kind = kind; skb_queue_head_init(&pf->xq); skb_queue_head_init(&pf->rq); atomic_set(&pf->refcnt, 1); init_waitqueue_head(&pf->rwait); } /* * Take down a ppp interface unit - called when the owning file * (the one that created the unit) is closed or detached. */ static void ppp_shutdown_interface(struct ppp *ppp) { struct ppp_net *pn; pn = ppp_pernet(ppp->ppp_net); mutex_lock(&pn->all_ppp_mutex); /* This will call dev_close() for us. */ ppp_lock(ppp); if (!ppp->closing) { ppp->closing = 1; ppp_unlock(ppp); unregister_netdev(ppp->dev); unit_put(&pn->units_idr, ppp->file.index); } else ppp_unlock(ppp); ppp->file.dead = 1; ppp->owner = NULL; wake_up_interruptible(&ppp->file.rwait); mutex_unlock(&pn->all_ppp_mutex); } /* * Free the memory used by a ppp unit. This is only called once * there are no channels connected to the unit and no file structs * that reference the unit. */ static void ppp_destroy_interface(struct ppp *ppp) { atomic_dec(&ppp_unit_count); if (!ppp->file.dead || ppp->n_channels) { /* "can't happen" */ netdev_err(ppp->dev, "ppp: destroying ppp struct %p " "but dead=%d n_channels=%d !\n", ppp, ppp->file.dead, ppp->n_channels); return; } ppp_ccp_closed(ppp); if (ppp->vj) { slhc_free(ppp->vj); ppp->vj = NULL; } skb_queue_purge(&ppp->file.xq); skb_queue_purge(&ppp->file.rq); #ifdef CONFIG_PPP_MULTILINK skb_queue_purge(&ppp->mrq); #endif /* CONFIG_PPP_MULTILINK */ #ifdef CONFIG_PPP_FILTER kfree(ppp->pass_filter); ppp->pass_filter = NULL; kfree(ppp->active_filter); ppp->active_filter = NULL; #endif /* CONFIG_PPP_FILTER */ kfree_skb(ppp->xmit_pending); free_netdev(ppp->dev); } /* * Locate an existing ppp unit. * The caller should have locked the all_ppp_mutex. */ static struct ppp * ppp_find_unit(struct ppp_net *pn, int unit) { return unit_find(&pn->units_idr, unit); } /* * Locate an existing ppp channel. * The caller should have locked the all_channels_lock. * First we look in the new_channels list, then in the * all_channels list. If found in the new_channels list, * we move it to the all_channels list. This is for speed * when we have a lot of channels in use. */ static struct channel * ppp_find_channel(struct ppp_net *pn, int unit) { struct channel *pch; list_for_each_entry(pch, &pn->new_channels, list) { if (pch->file.index == unit) { list_move(&pch->list, &pn->all_channels); return pch; } } list_for_each_entry(pch, &pn->all_channels, list) { if (pch->file.index == unit) return pch; } return NULL; } /* * Connect a PPP channel to a PPP interface unit. */ static int ppp_connect_channel(struct channel *pch, int unit) { struct ppp *ppp; struct ppp_net *pn; int ret = -ENXIO; int hdrlen; pn = ppp_pernet(pch->chan_net); mutex_lock(&pn->all_ppp_mutex); ppp = ppp_find_unit(pn, unit); if (!ppp) goto out; write_lock_bh(&pch->upl); ret = -EINVAL; if (pch->ppp) goto outl; ppp_lock(ppp); if (pch->file.hdrlen > ppp->file.hdrlen) ppp->file.hdrlen = pch->file.hdrlen; hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ if (hdrlen > ppp->dev->hard_header_len) ppp->dev->hard_header_len = hdrlen; list_add_tail(&pch->clist, &ppp->channels); ++ppp->n_channels; pch->ppp = ppp; atomic_inc(&ppp->file.refcnt); ppp_unlock(ppp); ret = 0; outl: write_unlock_bh(&pch->upl); out: mutex_unlock(&pn->all_ppp_mutex); return ret; } /* * Disconnect a channel from its ppp unit. */ static int ppp_disconnect_channel(struct channel *pch) { struct ppp *ppp; int err = -EINVAL; write_lock_bh(&pch->upl); ppp = pch->ppp; pch->ppp = NULL; write_unlock_bh(&pch->upl); if (ppp) { /* remove it from the ppp unit's list */ ppp_lock(ppp); list_del(&pch->clist); if (--ppp->n_channels == 0) wake_up_interruptible(&ppp->file.rwait); ppp_unlock(ppp); if (atomic_dec_and_test(&ppp->file.refcnt)) ppp_destroy_interface(ppp); err = 0; } return err; } /* * Free up the resources used by a ppp channel. */ static void ppp_destroy_channel(struct channel *pch) { atomic_dec(&channel_count); if (!pch->file.dead) { /* "can't happen" */ pr_err("ppp: destroying undead channel %p !\n", pch); return; } skb_queue_purge(&pch->file.xq); skb_queue_purge(&pch->file.rq); kfree(pch); } static void __exit ppp_cleanup(void) { /* should never happen */ if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count)) pr_err("PPP: removing module but units remain!\n"); unregister_chrdev(PPP_MAJOR, "ppp"); device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0)); class_destroy(ppp_class); unregister_pernet_device(&ppp_net_ops); } /* * Units handling. Caller must protect concurrent access * by holding all_ppp_mutex */ /* associate pointer with specified number */ static int unit_set(struct idr *p, void *ptr, int n) { int unit; unit = idr_alloc(p, ptr, n, n + 1, GFP_KERNEL); if (unit == -ENOSPC) unit = -EINVAL; return unit; } /* get new free unit number and associate pointer with it */ static int unit_get(struct idr *p, void *ptr) { return idr_alloc(p, ptr, 0, 0, GFP_KERNEL); } /* put unit number back to a pool */ static void unit_put(struct idr *p, int n) { idr_remove(p, n); } /* get pointer associated with the number */ static void *unit_find(struct idr *p, int n) { return idr_find(p, n); } /* Module/initialization stuff */ module_init(ppp_init); module_exit(ppp_cleanup); EXPORT_SYMBOL(ppp_register_net_channel); EXPORT_SYMBOL(ppp_register_channel); EXPORT_SYMBOL(ppp_unregister_channel); EXPORT_SYMBOL(ppp_channel_index); EXPORT_SYMBOL(ppp_unit_number); EXPORT_SYMBOL(ppp_dev_name); EXPORT_SYMBOL(ppp_input); EXPORT_SYMBOL(ppp_input_error); EXPORT_SYMBOL(ppp_output_wakeup); EXPORT_SYMBOL(ppp_register_compressor); EXPORT_SYMBOL(ppp_unregister_compressor); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0); MODULE_ALIAS("devname:ppp");
gpl-2.0
caoxin1988/linux-3.10.33
arch/mips/cavium-octeon/setup.c
1048
31706
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2004-2007 Cavium Networks * Copyright (C) 2008, 2009 Wind River Systems * written by Ralf Baechle <ralf@linux-mips.org> */ #include <linux/compiler.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/serial.h> #include <linux/smp.h> #include <linux/types.h> #include <linux/string.h> /* for memset */ #include <linux/tty.h> #include <linux/time.h> #include <linux/platform_device.h> #include <linux/serial_core.h> #include <linux/serial_8250.h> #include <linux/of_fdt.h> #include <linux/libfdt.h> #include <linux/kexec.h> #include <asm/processor.h> #include <asm/reboot.h> #include <asm/smp-ops.h> #include <asm/irq_cpu.h> #include <asm/mipsregs.h> #include <asm/bootinfo.h> #include <asm/sections.h> #include <asm/time.h> #include <asm/octeon/octeon.h> #include <asm/octeon/pci-octeon.h> #include <asm/octeon/cvmx-mio-defs.h> #ifdef CONFIG_CAVIUM_DECODE_RSL extern void cvmx_interrupt_rsl_decode(void); extern int __cvmx_interrupt_ecc_report_single_bit_errors; extern void cvmx_interrupt_rsl_enable(void); #endif extern struct plat_smp_ops octeon_smp_ops; #ifdef CONFIG_PCI extern void pci_console_init(const char *arg); #endif static unsigned long long MAX_MEMORY = 512ull << 20; struct octeon_boot_descriptor *octeon_boot_desc_ptr; struct cvmx_bootinfo *octeon_bootinfo; EXPORT_SYMBOL(octeon_bootinfo); static unsigned long long RESERVE_LOW_MEM = 0ull; #ifdef CONFIG_KEXEC #ifdef CONFIG_SMP /* * Wait for relocation code is prepared and send * secondary CPUs to spin until kernel is relocated. */ static void octeon_kexec_smp_down(void *ignored) { int cpu = smp_processor_id(); local_irq_disable(); set_cpu_online(cpu, false); while (!atomic_read(&kexec_ready_to_reboot)) cpu_relax(); asm volatile ( " sync \n" " synci ($0) \n"); relocated_kexec_smp_wait(NULL); } #endif #define OCTEON_DDR0_BASE (0x0ULL) #define OCTEON_DDR0_SIZE (0x010000000ULL) #define OCTEON_DDR1_BASE (0x410000000ULL) #define OCTEON_DDR1_SIZE (0x010000000ULL) #define OCTEON_DDR2_BASE (0x020000000ULL) #define OCTEON_DDR2_SIZE (0x3e0000000ULL) #define OCTEON_MAX_PHY_MEM_SIZE (16*1024*1024*1024ULL) static struct kimage *kimage_ptr; static void kexec_bootmem_init(uint64_t mem_size, uint32_t low_reserved_bytes) { int64_t addr; struct cvmx_bootmem_desc *bootmem_desc; bootmem_desc = cvmx_bootmem_get_desc(); if (mem_size > OCTEON_MAX_PHY_MEM_SIZE) { mem_size = OCTEON_MAX_PHY_MEM_SIZE; pr_err("Error: requested memory too large," "truncating to maximum size\n"); } bootmem_desc->major_version = CVMX_BOOTMEM_DESC_MAJ_VER; bootmem_desc->minor_version = CVMX_BOOTMEM_DESC_MIN_VER; addr = (OCTEON_DDR0_BASE + RESERVE_LOW_MEM + low_reserved_bytes); bootmem_desc->head_addr = 0; if (mem_size <= OCTEON_DDR0_SIZE) { __cvmx_bootmem_phy_free(addr, mem_size - RESERVE_LOW_MEM - low_reserved_bytes, 0); return; } __cvmx_bootmem_phy_free(addr, OCTEON_DDR0_SIZE - RESERVE_LOW_MEM - low_reserved_bytes, 0); mem_size -= OCTEON_DDR0_SIZE; if (mem_size > OCTEON_DDR1_SIZE) { __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, OCTEON_DDR1_SIZE, 0); __cvmx_bootmem_phy_free(OCTEON_DDR2_BASE, mem_size - OCTEON_DDR1_SIZE, 0); } else __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, mem_size, 0); } static int octeon_kexec_prepare(struct kimage *image) { int i; char *bootloader = "kexec"; octeon_boot_desc_ptr->argc = 0; for (i = 0; i < image->nr_segments; i++) { if (!strncmp(bootloader, (char *)image->segment[i].buf, strlen(bootloader))) { /* * convert command line string to array * of parameters (as bootloader does). */ int argc = 0, offt; char *str = (char *)image->segment[i].buf; char *ptr = strchr(str, ' '); while (ptr && (OCTEON_ARGV_MAX_ARGS > argc)) { *ptr = '\0'; if (ptr[1] != ' ') { offt = (int)(ptr - str + 1); octeon_boot_desc_ptr->argv[argc] = image->segment[i].mem + offt; argc++; } ptr = strchr(ptr + 1, ' '); } octeon_boot_desc_ptr->argc = argc; break; } } /* * Information about segments will be needed during pre-boot memory * initialization. */ kimage_ptr = image; return 0; } static void octeon_generic_shutdown(void) { int i; #ifdef CONFIG_SMP int cpu; #endif struct cvmx_bootmem_desc *bootmem_desc; void *named_block_array_ptr; bootmem_desc = cvmx_bootmem_get_desc(); named_block_array_ptr = cvmx_phys_to_ptr(bootmem_desc->named_block_array_addr); #ifdef CONFIG_SMP /* disable watchdogs */ for_each_online_cpu(cpu) cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0); #else cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0); #endif if (kimage_ptr != kexec_crash_image) { memset(named_block_array_ptr, 0x0, CVMX_BOOTMEM_NUM_NAMED_BLOCKS * sizeof(struct cvmx_bootmem_named_block_desc)); /* * Mark all memory (except low 0x100000 bytes) as free. * It is the same thing that bootloader does. */ kexec_bootmem_init(octeon_bootinfo->dram_size*1024ULL*1024ULL, 0x100000); /* * Allocate all segments to avoid their corruption during boot. */ for (i = 0; i < kimage_ptr->nr_segments; i++) cvmx_bootmem_alloc_address( kimage_ptr->segment[i].memsz + 2*PAGE_SIZE, kimage_ptr->segment[i].mem - PAGE_SIZE, PAGE_SIZE); } else { /* * Do not mark all memory as free. Free only named sections * leaving the rest of memory unchanged. */ struct cvmx_bootmem_named_block_desc *ptr = (struct cvmx_bootmem_named_block_desc *) named_block_array_ptr; for (i = 0; i < bootmem_desc->named_block_num_blocks; i++) if (ptr[i].size) cvmx_bootmem_free_named(ptr[i].name); } kexec_args[2] = 1UL; /* running on octeon_main_processor */ kexec_args[3] = (unsigned long)octeon_boot_desc_ptr; #ifdef CONFIG_SMP secondary_kexec_args[2] = 0UL; /* running on secondary cpu */ secondary_kexec_args[3] = (unsigned long)octeon_boot_desc_ptr; #endif } static void octeon_shutdown(void) { octeon_generic_shutdown(); #ifdef CONFIG_SMP smp_call_function(octeon_kexec_smp_down, NULL, 0); smp_wmb(); while (num_online_cpus() > 1) { cpu_relax(); mdelay(1); } #endif } static void octeon_crash_shutdown(struct pt_regs *regs) { octeon_generic_shutdown(); default_machine_crash_shutdown(regs); } #endif /* CONFIG_KEXEC */ #ifdef CONFIG_CAVIUM_RESERVE32 uint64_t octeon_reserve32_memory; EXPORT_SYMBOL(octeon_reserve32_memory); #endif #ifdef CONFIG_KEXEC /* crashkernel cmdline parameter is parsed _after_ memory setup * we also parse it here (workaround for EHB5200) */ static uint64_t crashk_size, crashk_base; #endif static int octeon_uart; extern asmlinkage void handle_int(void); extern asmlinkage void plat_irq_dispatch(void); /** * Return non zero if we are currently running in the Octeon simulator * * Returns */ int octeon_is_simulation(void) { return octeon_bootinfo->board_type == CVMX_BOARD_TYPE_SIM; } EXPORT_SYMBOL(octeon_is_simulation); /** * Return true if Octeon is in PCI Host mode. This means * Linux can control the PCI bus. * * Returns Non zero if Octeon in host mode. */ int octeon_is_pci_host(void) { #ifdef CONFIG_PCI return octeon_bootinfo->config_flags & CVMX_BOOTINFO_CFG_FLAG_PCI_HOST; #else return 0; #endif } /** * Get the clock rate of Octeon * * Returns Clock rate in HZ */ uint64_t octeon_get_clock_rate(void) { struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get(); return sysinfo->cpu_clock_hz; } EXPORT_SYMBOL(octeon_get_clock_rate); static u64 octeon_io_clock_rate; u64 octeon_get_io_clock_rate(void) { return octeon_io_clock_rate; } EXPORT_SYMBOL(octeon_get_io_clock_rate); /** * Write to the LCD display connected to the bootbus. This display * exists on most Cavium evaluation boards. If it doesn't exist, then * this function doesn't do anything. * * @s: String to write */ void octeon_write_lcd(const char *s) { if (octeon_bootinfo->led_display_base_addr) { void __iomem *lcd_address = ioremap_nocache(octeon_bootinfo->led_display_base_addr, 8); int i; for (i = 0; i < 8; i++, s++) { if (*s) iowrite8(*s, lcd_address + i); else iowrite8(' ', lcd_address + i); } iounmap(lcd_address); } } /** * Return the console uart passed by the bootloader * * Returns uart (0 or 1) */ int octeon_get_boot_uart(void) { int uart; #ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL uart = 1; #else uart = (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ? 1 : 0; #endif return uart; } /** * Get the coremask Linux was booted on. * * Returns Core mask */ int octeon_get_boot_coremask(void) { return octeon_boot_desc_ptr->core_mask; } /** * Check the hardware BIST results for a CPU */ void octeon_check_cpu_bist(void) { const int coreid = cvmx_get_core_num(); unsigned long long mask; unsigned long long bist_val; /* Check BIST results for COP0 registers */ mask = 0x1f00000000ull; bist_val = read_octeon_c0_icacheerr(); if (bist_val & mask) pr_err("Core%d BIST Failure: CacheErr(icache) = 0x%llx\n", coreid, bist_val); bist_val = read_octeon_c0_dcacheerr(); if (bist_val & 1) pr_err("Core%d L1 Dcache parity error: " "CacheErr(dcache) = 0x%llx\n", coreid, bist_val); mask = 0xfc00000000000000ull; bist_val = read_c0_cvmmemctl(); if (bist_val & mask) pr_err("Core%d BIST Failure: COP0_CVM_MEM_CTL = 0x%llx\n", coreid, bist_val); write_octeon_c0_dcacheerr(0); } /** * Reboot Octeon * * @command: Command to pass to the bootloader. Currently ignored. */ static void octeon_restart(char *command) { /* Disable all watchdogs before soft reset. They don't get cleared */ #ifdef CONFIG_SMP int cpu; for_each_online_cpu(cpu) cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0); #else cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0); #endif mb(); while (1) cvmx_write_csr(CVMX_CIU_SOFT_RST, 1); } /** * Permanently stop a core. * * @arg: Ignored. */ static void octeon_kill_core(void *arg) { if (octeon_is_simulation()) /* A break instruction causes the simulator stop a core */ asm volatile ("break" ::: "memory"); local_irq_disable(); /* Disable watchdog on this core. */ cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0); /* Spin in a low power mode. */ while (true) asm volatile ("wait" ::: "memory"); } /** * Halt the system */ static void octeon_halt(void) { smp_call_function(octeon_kill_core, NULL, 0); switch (octeon_bootinfo->board_type) { case CVMX_BOARD_TYPE_NAO38: /* Driving a 1 to GPIO 12 shuts off this board */ cvmx_write_csr(CVMX_GPIO_BIT_CFGX(12), 1); cvmx_write_csr(CVMX_GPIO_TX_SET, 0x1000); break; default: octeon_write_lcd("PowerOff"); break; } octeon_kill_core(NULL); } /** * Handle all the error condition interrupts that might occur. * */ #ifdef CONFIG_CAVIUM_DECODE_RSL static irqreturn_t octeon_rlm_interrupt(int cpl, void *dev_id) { cvmx_interrupt_rsl_decode(); return IRQ_HANDLED; } #endif /** * Return a string representing the system type * * Returns */ const char *octeon_board_type_string(void) { static char name[80]; sprintf(name, "%s (%s)", cvmx_board_type_to_string(octeon_bootinfo->board_type), octeon_model_get_string(read_c0_prid())); return name; } const char *get_system_type(void) __attribute__ ((alias("octeon_board_type_string"))); void octeon_user_io_init(void) { union octeon_cvmemctl cvmmemctl; union cvmx_iob_fau_timeout fau_timeout; union cvmx_pow_nw_tim nm_tim; /* Get the current settings for CP0_CVMMEMCTL_REG */ cvmmemctl.u64 = read_c0_cvmmemctl(); /* R/W If set, marked write-buffer entries time out the same * as as other entries; if clear, marked write-buffer entries * use the maximum timeout. */ cvmmemctl.s.dismarkwblongto = 1; /* R/W If set, a merged store does not clear the write-buffer * entry timeout state. */ cvmmemctl.s.dismrgclrwbto = 0; /* R/W Two bits that are the MSBs of the resultant CVMSEG LM * word location for an IOBDMA. The other 8 bits come from the * SCRADDR field of the IOBDMA. */ cvmmemctl.s.iobdmascrmsb = 0; /* R/W If set, SYNCWS and SYNCS only order marked stores; if * clear, SYNCWS and SYNCS only order unmarked * stores. SYNCWSMARKED has no effect when DISSYNCWS is * set. */ cvmmemctl.s.syncwsmarked = 0; /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as SYNC. */ cvmmemctl.s.dissyncws = 0; /* R/W If set, no stall happens on write buffer full. */ if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) cvmmemctl.s.diswbfst = 1; else cvmmemctl.s.diswbfst = 0; /* R/W If set (and SX set), supervisor-level loads/stores can * use XKPHYS addresses with <48>==0 */ cvmmemctl.s.xkmemenas = 0; /* R/W If set (and UX set), user-level loads/stores can use * XKPHYS addresses with VA<48>==0 */ cvmmemctl.s.xkmemenau = 0; /* R/W If set (and SX set), supervisor-level loads/stores can * use XKPHYS addresses with VA<48>==1 */ cvmmemctl.s.xkioenas = 0; /* R/W If set (and UX set), user-level loads/stores can use * XKPHYS addresses with VA<48>==1 */ cvmmemctl.s.xkioenau = 0; /* R/W If set, all stores act as SYNCW (NOMERGE must be set * when this is set) RW, reset to 0. */ cvmmemctl.s.allsyncw = 0; /* R/W If set, no stores merge, and all stores reach the * coherent bus in order. */ cvmmemctl.s.nomerge = 0; /* R/W Selects the bit in the counter used for DID time-outs 0 * = 231, 1 = 230, 2 = 229, 3 = 214. Actual time-out is * between 1x and 2x this interval. For example, with * DIDTTO=3, expiration interval is between 16K and 32K. */ cvmmemctl.s.didtto = 0; /* R/W If set, the (mem) CSR clock never turns off. */ cvmmemctl.s.csrckalwys = 0; /* R/W If set, mclk never turns off. */ cvmmemctl.s.mclkalwys = 0; /* R/W Selects the bit in the counter used for write buffer * flush time-outs (WBFLT+11) is the bit position in an * internal counter used to determine expiration. The write * buffer expires between 1x and 2x this interval. For * example, with WBFLT = 0, a write buffer expires between 2K * and 4K cycles after the write buffer entry is allocated. */ cvmmemctl.s.wbfltime = 0; /* R/W If set, do not put Istream in the L2 cache. */ cvmmemctl.s.istrnol2 = 0; /* * R/W The write buffer threshold. As per erratum Core-14752 * for CN63XX, a sc/scd might fail if the write buffer is * full. Lowering WBTHRESH greatly lowers the chances of the * write buffer ever being full and triggering the erratum. */ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)) cvmmemctl.s.wbthresh = 4; else cvmmemctl.s.wbthresh = 10; /* R/W If set, CVMSEG is available for loads/stores in * kernel/debug mode. */ #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 cvmmemctl.s.cvmsegenak = 1; #else cvmmemctl.s.cvmsegenak = 0; #endif /* R/W If set, CVMSEG is available for loads/stores in * supervisor mode. */ cvmmemctl.s.cvmsegenas = 0; /* R/W If set, CVMSEG is available for loads/stores in user * mode. */ cvmmemctl.s.cvmsegenau = 0; /* R/W Size of local memory in cache blocks, 54 (6912 bytes) * is max legal value. */ cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE; write_c0_cvmmemctl(cvmmemctl.u64); if (smp_processor_id() == 0) pr_notice("CVMSEG size: %d cache lines (%d bytes)\n", CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128); /* Set a default for the hardware timeouts */ fau_timeout.u64 = 0; fau_timeout.s.tout_val = 0xfff; /* Disable tagwait FAU timeout */ fau_timeout.s.tout_enb = 0; cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_timeout.u64); nm_tim.u64 = 0; /* 4096 cycles */ nm_tim.s.nw_tim = 3; cvmx_write_csr(CVMX_POW_NW_TIM, nm_tim.u64); write_octeon_c0_icacheerr(0); write_c0_derraddr1(0); } /** * Early entry point for arch setup */ void __init prom_init(void) { struct cvmx_sysinfo *sysinfo; const char *arg; char *p; int i; int argc; #ifdef CONFIG_CAVIUM_RESERVE32 int64_t addr = -1; #endif /* * The bootloader passes a pointer to the boot descriptor in * $a3, this is available as fw_arg3. */ octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3; octeon_bootinfo = cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr); cvmx_bootmem_init(cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr)); sysinfo = cvmx_sysinfo_get(); memset(sysinfo, 0, sizeof(*sysinfo)); sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20; sysinfo->phy_mem_desc_ptr = cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr); sysinfo->core_mask = octeon_bootinfo->core_mask; sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr; sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz; sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2; sysinfo->board_type = octeon_bootinfo->board_type; sysinfo->board_rev_major = octeon_bootinfo->board_rev_major; sysinfo->board_rev_minor = octeon_bootinfo->board_rev_minor; memcpy(sysinfo->mac_addr_base, octeon_bootinfo->mac_addr_base, sizeof(sysinfo->mac_addr_base)); sysinfo->mac_addr_count = octeon_bootinfo->mac_addr_count; memcpy(sysinfo->board_serial_number, octeon_bootinfo->board_serial_number, sizeof(sysinfo->board_serial_number)); sysinfo->compact_flash_common_base_addr = octeon_bootinfo->compact_flash_common_base_addr; sysinfo->compact_flash_attribute_base_addr = octeon_bootinfo->compact_flash_attribute_base_addr; sysinfo->led_display_base_addr = octeon_bootinfo->led_display_base_addr; sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz; sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags; if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { /* I/O clock runs at a different rate than the CPU. */ union cvmx_mio_rst_boot rst_boot; rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul; } else { octeon_io_clock_rate = sysinfo->cpu_clock_hz; } /* * Only enable the LED controller if we're running on a CN38XX, CN58XX, * or CN56XX. The CN30XX and CN31XX don't have an LED controller. */ if (!octeon_is_simulation() && octeon_has_feature(OCTEON_FEATURE_LED_CONTROLLER)) { cvmx_write_csr(CVMX_LED_EN, 0); cvmx_write_csr(CVMX_LED_PRT, 0); cvmx_write_csr(CVMX_LED_DBG, 0); cvmx_write_csr(CVMX_LED_PRT_FMT, 0); cvmx_write_csr(CVMX_LED_UDD_CNTX(0), 32); cvmx_write_csr(CVMX_LED_UDD_CNTX(1), 32); cvmx_write_csr(CVMX_LED_UDD_DATX(0), 0); cvmx_write_csr(CVMX_LED_UDD_DATX(1), 0); cvmx_write_csr(CVMX_LED_EN, 1); } #ifdef CONFIG_CAVIUM_RESERVE32 /* * We need to temporarily allocate all memory in the reserve32 * region. This makes sure the kernel doesn't allocate this * memory when it is getting memory from the * bootloader. Later, after the memory allocations are * complete, the reserve32 will be freed. * * Allocate memory for RESERVED32 aligned on 2MB boundary. This * is in case we later use hugetlb entries with it. */ addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20, 0, 0, 2 << 20, "CAVIUM_RESERVE32", 0); if (addr < 0) pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n"); else octeon_reserve32_memory = addr; #endif #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2 if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) { pr_info("Skipping L2 locking due to reduced L2 cache size\n"); } else { uint32_t __maybe_unused ebase = read_c0_ebase() & 0x3ffff000; #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB /* TLB refill */ cvmx_l2c_lock_mem_region(ebase, 0x100); #endif #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION /* General exception */ cvmx_l2c_lock_mem_region(ebase + 0x180, 0x80); #endif #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT /* Interrupt handler */ cvmx_l2c_lock_mem_region(ebase + 0x200, 0x80); #endif #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT cvmx_l2c_lock_mem_region(__pa_symbol(handle_int), 0x100); cvmx_l2c_lock_mem_region(__pa_symbol(plat_irq_dispatch), 0x80); #endif #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY cvmx_l2c_lock_mem_region(__pa_symbol(memcpy), 0x480); #endif } #endif octeon_check_cpu_bist(); octeon_uart = octeon_get_boot_uart(); #ifdef CONFIG_SMP octeon_write_lcd("LinuxSMP"); #else octeon_write_lcd("Linux"); #endif #ifdef CONFIG_CAVIUM_GDB /* * When debugging the linux kernel, force the cores to enter * the debug exception handler to break in. */ if (octeon_get_boot_debug_flag()) { cvmx_write_csr(CVMX_CIU_DINT, 1 << cvmx_get_core_num()); cvmx_read_csr(CVMX_CIU_DINT); } #endif octeon_setup_delays(); /* * BIST should always be enabled when doing a soft reset. L2 * Cache locking for instance is not cleared unless BIST is * enabled. Unfortunately due to a chip errata G-200 for * Cn38XX and CN31XX, BIST msut be disabled on these parts. */ if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) || OCTEON_IS_MODEL(OCTEON_CN31XX)) cvmx_write_csr(CVMX_CIU_SOFT_BIST, 0); else cvmx_write_csr(CVMX_CIU_SOFT_BIST, 1); /* Default to 64MB in the simulator to speed things up */ if (octeon_is_simulation()) MAX_MEMORY = 64ull << 20; arg = strstr(arcs_cmdline, "mem="); if (arg) { MAX_MEMORY = memparse(arg + 4, &p); if (MAX_MEMORY == 0) MAX_MEMORY = 32ull << 30; if (*p == '@') RESERVE_LOW_MEM = memparse(p + 1, &p); } arcs_cmdline[0] = 0; argc = octeon_boot_desc_ptr->argc; for (i = 0; i < argc; i++) { const char *arg = cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]); if ((strncmp(arg, "MEM=", 4) == 0) || (strncmp(arg, "mem=", 4) == 0)) { MAX_MEMORY = memparse(arg + 4, &p); if (MAX_MEMORY == 0) MAX_MEMORY = 32ull << 30; if (*p == '@') RESERVE_LOW_MEM = memparse(p + 1, &p); } else if (strcmp(arg, "ecc_verbose") == 0) { #ifdef CONFIG_CAVIUM_REPORT_SINGLE_BIT_ECC __cvmx_interrupt_ecc_report_single_bit_errors = 1; pr_notice("Reporting of single bit ECC errors is " "turned on\n"); #endif #ifdef CONFIG_KEXEC } else if (strncmp(arg, "crashkernel=", 12) == 0) { crashk_size = memparse(arg+12, &p); if (*p == '@') crashk_base = memparse(p+1, &p); strcat(arcs_cmdline, " "); strcat(arcs_cmdline, arg); /* * To do: switch parsing to new style, something like: * parse_crashkernel(arg, sysinfo->system_dram_size, * &crashk_size, &crashk_base); */ #endif } else if (strlen(arcs_cmdline) + strlen(arg) + 1 < sizeof(arcs_cmdline) - 1) { strcat(arcs_cmdline, " "); strcat(arcs_cmdline, arg); } } if (strstr(arcs_cmdline, "console=") == NULL) { #ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL strcat(arcs_cmdline, " console=ttyS0,115200"); #else if (octeon_uart == 1) strcat(arcs_cmdline, " console=ttyS1,115200"); else strcat(arcs_cmdline, " console=ttyS0,115200"); #endif } if (octeon_is_simulation()) { /* * The simulator uses a mtdram device pre filled with * the filesystem. Also specify the calibration delay * to avoid calculating it every time. */ strcat(arcs_cmdline, " rw root=1f00 slram=root,0x40000000,+1073741824"); } mips_hpt_frequency = octeon_get_clock_rate(); octeon_init_cvmcount(); _machine_restart = octeon_restart; _machine_halt = octeon_halt; #ifdef CONFIG_KEXEC _machine_kexec_shutdown = octeon_shutdown; _machine_crash_shutdown = octeon_crash_shutdown; _machine_kexec_prepare = octeon_kexec_prepare; #endif octeon_user_io_init(); register_smp_ops(&octeon_smp_ops); } /* Exclude a single page from the regions obtained in plat_mem_setup. */ #ifndef CONFIG_CRASH_DUMP static __init void memory_exclude_page(u64 addr, u64 *mem, u64 *size) { if (addr > *mem && addr < *mem + *size) { u64 inc = addr - *mem; add_memory_region(*mem, inc, BOOT_MEM_RAM); *mem += inc; *size -= inc; } if (addr == *mem && *size > PAGE_SIZE) { *mem += PAGE_SIZE; *size -= PAGE_SIZE; } } #endif /* CONFIG_CRASH_DUMP */ void __init plat_mem_setup(void) { uint64_t mem_alloc_size; uint64_t total; uint64_t crashk_end; #ifndef CONFIG_CRASH_DUMP int64_t memory; uint64_t kernel_start; uint64_t kernel_size; #endif total = 0; crashk_end = 0; /* * The Mips memory init uses the first memory location for * some memory vectors. When SPARSEMEM is in use, it doesn't * verify that the size is big enough for the final * vectors. Making the smallest chuck 4MB seems to be enough * to consistently work. */ mem_alloc_size = 4 << 20; if (mem_alloc_size > MAX_MEMORY) mem_alloc_size = MAX_MEMORY; /* Crashkernel ignores bootmem list. It relies on mem=X@Y option */ #ifdef CONFIG_CRASH_DUMP add_memory_region(RESERVE_LOW_MEM, MAX_MEMORY, BOOT_MEM_RAM); total += MAX_MEMORY; #else #ifdef CONFIG_KEXEC if (crashk_size > 0) { add_memory_region(crashk_base, crashk_size, BOOT_MEM_RAM); crashk_end = crashk_base + crashk_size; } #endif /* * When allocating memory, we want incrementing addresses from * bootmem_alloc so the code in add_memory_region can merge * regions next to each other. */ cvmx_bootmem_lock(); while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX) && (total < MAX_MEMORY)) { memory = cvmx_bootmem_phy_alloc(mem_alloc_size, __pa_symbol(&__init_end), -1, 0x100000, CVMX_BOOTMEM_FLAG_NO_LOCKING); if (memory >= 0) { u64 size = mem_alloc_size; #ifdef CONFIG_KEXEC uint64_t end; #endif /* * exclude a page at the beginning and end of * the 256MB PCIe 'hole' so the kernel will not * try to allocate multi-page buffers that * span the discontinuity. */ memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE, &memory, &size); memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE, &memory, &size); #ifdef CONFIG_KEXEC end = memory + mem_alloc_size; /* * This function automatically merges address regions * next to each other if they are received in * incrementing order */ if (memory < crashk_base && end > crashk_end) { /* region is fully in */ add_memory_region(memory, crashk_base - memory, BOOT_MEM_RAM); total += crashk_base - memory; add_memory_region(crashk_end, end - crashk_end, BOOT_MEM_RAM); total += end - crashk_end; continue; } if (memory >= crashk_base && end <= crashk_end) /* * Entire memory region is within the new * kernel's memory, ignore it. */ continue; if (memory > crashk_base && memory < crashk_end && end > crashk_end) { /* * Overlap with the beginning of the region, * reserve the beginning. */ mem_alloc_size -= crashk_end - memory; memory = crashk_end; } else if (memory < crashk_base && end > crashk_base && end < crashk_end) /* * Overlap with the beginning of the region, * chop of end. */ mem_alloc_size -= end - crashk_base; #endif add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM); total += mem_alloc_size; /* Recovering mem_alloc_size */ mem_alloc_size = 4 << 20; } else { break; } } cvmx_bootmem_unlock(); /* Add the memory region for the kernel. */ kernel_start = (unsigned long) _text; kernel_size = _end - _text; /* Adjust for physical offset. */ kernel_start &= ~0xffffffff80000000ULL; add_memory_region(kernel_start, kernel_size, BOOT_MEM_RAM); #endif /* CONFIG_CRASH_DUMP */ #ifdef CONFIG_CAVIUM_RESERVE32 /* * Now that we've allocated the kernel memory it is safe to * free the reserved region. We free it here so that builtin * drivers can use the memory. */ if (octeon_reserve32_memory) cvmx_bootmem_free_named("CAVIUM_RESERVE32"); #endif /* CONFIG_CAVIUM_RESERVE32 */ if (total == 0) panic("Unable to allocate memory from " "cvmx_bootmem_phy_alloc\n"); } /* * Emit one character to the boot UART. Exported for use by the * watchdog timer. */ int prom_putchar(char c) { uint64_t lsrval; /* Spin until there is room */ do { lsrval = cvmx_read_csr(CVMX_MIO_UARTX_LSR(octeon_uart)); } while ((lsrval & 0x20) == 0); /* Write the byte */ cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c & 0xffull); return 1; } EXPORT_SYMBOL(prom_putchar); void prom_free_prom_memory(void) { if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)) { /* Check for presence of Core-14449 fix. */ u32 insn; u32 *foo; foo = &insn; asm volatile("# before" : : : "memory"); prefetch(foo); asm volatile( ".set push\n\t" ".set noreorder\n\t" "bal 1f\n\t" "nop\n" "1:\tlw %0,-12($31)\n\t" ".set pop\n\t" : "=r" (insn) : : "$31", "memory"); if ((insn >> 26) != 0x33) panic("No PREF instruction at Core-14449 probe point."); if (((insn >> 16) & 0x1f) != 28) panic("Core-14449 WAR not in place (%04x).\n" "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).", insn); } #ifdef CONFIG_CAVIUM_DECODE_RSL cvmx_interrupt_rsl_enable(); /* Add an interrupt handler for general failures. */ if (request_irq(OCTEON_IRQ_RML, octeon_rlm_interrupt, IRQF_SHARED, "RML/RSL", octeon_rlm_interrupt)) { panic("Unable to request_irq(OCTEON_IRQ_RML)"); } #endif } int octeon_prune_device_tree(void); extern const char __dtb_octeon_3xxx_begin; extern const char __dtb_octeon_3xxx_end; extern const char __dtb_octeon_68xx_begin; extern const char __dtb_octeon_68xx_end; void __init device_tree_init(void) { int dt_size; struct boot_param_header *fdt; bool do_prune; if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) { fdt = phys_to_virt(octeon_bootinfo->fdt_addr); if (fdt_check_header(fdt)) panic("Corrupt Device Tree passed to kernel."); dt_size = be32_to_cpu(fdt->totalsize); do_prune = false; } else if (OCTEON_IS_MODEL(OCTEON_CN68XX)) { fdt = (struct boot_param_header *)&__dtb_octeon_68xx_begin; dt_size = &__dtb_octeon_68xx_end - &__dtb_octeon_68xx_begin; do_prune = true; } else { fdt = (struct boot_param_header *)&__dtb_octeon_3xxx_begin; dt_size = &__dtb_octeon_3xxx_end - &__dtb_octeon_3xxx_begin; do_prune = true; } /* Copy the default tree from init memory. */ initial_boot_params = early_init_dt_alloc_memory_arch(dt_size, 8); if (initial_boot_params == NULL) panic("Could not allocate initial_boot_params\n"); memcpy(initial_boot_params, fdt, dt_size); if (do_prune) { octeon_prune_device_tree(); pr_info("Using internal Device Tree.\n"); } else { pr_info("Using passed Device Tree.\n"); } unflatten_device_tree(); } static int __initdata disable_octeon_edac_p; static int __init disable_octeon_edac(char *str) { disable_octeon_edac_p = 1; return 0; } early_param("disable_octeon_edac", disable_octeon_edac); static char *edac_device_names[] = { "octeon_l2c_edac", "octeon_pc_edac", }; static int __init edac_devinit(void) { struct platform_device *dev; int i, err = 0; int num_lmc; char *name; if (disable_octeon_edac_p) return 0; for (i = 0; i < ARRAY_SIZE(edac_device_names); i++) { name = edac_device_names[i]; dev = platform_device_register_simple(name, -1, NULL, 0); if (IS_ERR(dev)) { pr_err("Registation of %s failed!\n", name); err = PTR_ERR(dev); } } num_lmc = OCTEON_IS_MODEL(OCTEON_CN68XX) ? 4 : (OCTEON_IS_MODEL(OCTEON_CN56XX) ? 2 : 1); for (i = 0; i < num_lmc; i++) { dev = platform_device_register_simple("octeon_lmc_edac", i, NULL, 0); if (IS_ERR(dev)) { pr_err("Registation of octeon_lmc_edac %d failed!\n", i); err = PTR_ERR(dev); } } return err; } device_initcall(edac_devinit);
gpl-2.0
CyanogenMod/android_kernel_samsung_d2
drivers/staging/prima/CORE/BAP/src/bapModule.c
1304
47007
/* * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * Copyright (c) 2012, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /*=========================================================================== b a p M o d u l e . C OVERVIEW: This software unit holds the implementation of the WLAN BAP modules Module support functions. It is also where the global BAP module context, and per-instance (returned in BAP_Open device open) contexts. The functions externalized by this module are to be called by the device specific BAP Shim Layer (BSL) (in HDD) which implements a stream device on a particular platform. DEPENDENCIES: Are listed for each API below. ===========================================================================*/ /*=========================================================================== EDIT HISTORY FOR FILE This section contains comments describing changes made to the module. Notice that changes are listed in reverse chronological order. $Header: /home/labuser/ampBlueZ_2/CORE/BAP/src/bapModule.c,v 1.1 2010/07/12 19:05:35 labuser Exp labuser $$DateTime$$Author: labuser $ when who what, where, why ---------- --- -------------------------------------------------------- 2008-09-15 jez Created module ===========================================================================*/ /*---------------------------------------------------------------------------- * Include Files * -------------------------------------------------------------------------*/ // Pull in some message types used by BTC #include "sirParams.h" //#include "halFwApi.h" #include "wlan_qct_tl.h" #include "vos_trace.h" // Pick up the sme callback registration API #include "sme_Api.h" #include "ccmApi.h" /* BT-AMP PAL API header file */ #include "bapApi.h" #include "bapInternal.h" // Pick up the BTAMP RSN definitions #include "bapRsnTxRx.h" //#include "assert.h" #include "bapApiTimer.h" #if defined(ANI_OS_TYPE_ANDROID) #include "bap_hdd_main.h" #endif //#define BAP_DEBUG /*---------------------------------------------------------------------------- * Preprocessor Definitions and Constants * -------------------------------------------------------------------------*/ //#define VOS_GET_BAP_CB(ctx) vos_get_context( VOS_MODULE_ID_BAP, ctx) /*---------------------------------------------------------------------------- * Type Declarations * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Global Data Definitions * -------------------------------------------------------------------------*/ // include the phy link state machine structure here static tWLAN_BAPbapPhysLinkMachine bapPhysLinkMachineInitial = BTAMPFSM_INSTANCEDATA_INIT; /*---------------------------------------------------------------------------- * External declarations for global context * -------------------------------------------------------------------------*/ // No! Get this from VOS. // The main per-Physical Link (per WLAN association) context. //tBtampContext btampCtx; ptBtampContext gpBtampCtx; // Include the Local AMP Info structure. tBtampHCI_AMP_Info btampHCI_AMP_Info; // Include the Local Data Block Size info structure. tBtampHCI_Data_Block_Size btampHCI_Data_Block_Size; // Include the Local Version info structure. tBtampHCI_Version_Info btampHCI_Version_Info; // Include the Local Supported Cmds info structure. tBtampHCI_Supported_Cmds btampHCI_Supported_Cmds; static unsigned char pBtStaOwnMacAddr[WNI_CFG_BSSID_LEN]; /*BT-AMP SSID; per spec should have this format: "AMP-00-0a-f5-04-05-08" */ #define WLAN_BAP_SSID_MAX_LEN 21 static char pBtStaOwnSsid[WLAN_BAP_SSID_MAX_LEN]; /*---------------------------------------------------------------------------- * Static Variable Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Static Function Declarations and Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Externalized Function Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Function Declarations and Documentation * -------------------------------------------------------------------------*/ /*========================================================================== FUNCTION WLANBAP_Open DESCRIPTION Called at driver initialization (vos_open). BAP will initialize all its internal resources and will wait for the call to start to register with the other modules. DEPENDENCIES PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to BAP's control block can be extracted from its context RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to BAP cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_Open ( v_PVOID_t pvosGCtx ) { ptBtampContext pBtampCtx = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Allocate (and sanity check?!) BAP control block ------------------------------------------------------------------------*/ vos_alloc_context(pvosGCtx, VOS_MODULE_ID_BAP, (v_VOID_t**)&pBtampCtx, sizeof(tBtampContext)); pBtampCtx = VOS_GET_BAP_CB(pvosGCtx); if ( NULL == pBtampCtx ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Invalid BAP pointer from pvosGCtx on WLANBAP_Open"); //"Failed to allocate BAP pointer from pvosGCtx on WLANBAP_Open"); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Clean up BAP control block, initialize all values ------------------------------------------------------------------------*/ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "WLANBAP_Open"); WLANBAP_CleanCB(pBtampCtx, 0 /*do not empty*/); // Setup the "link back" to the VOSS context pBtampCtx->pvosGCtx = pvosGCtx; // Store a pointer to the BAP context provided by VOSS gpBtampCtx = pBtampCtx; /*------------------------------------------------------------------------ Allocate internal resources ------------------------------------------------------------------------*/ return VOS_STATUS_SUCCESS; }/* WLANBAP_Open */ /*========================================================================== FUNCTION WLANBAP_Start DESCRIPTION Called as part of the overall start procedure (vos_start). BAP will use this call to register with TL as the BAP entity for BT-AMP RSN frames. DEPENDENCIES PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to BAP's control block can be extracted from its context RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to BAP cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) Other codes can be returned as a result of a BAL failure; SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_Start ( v_PVOID_t pvosGCtx ) { ptBtampContext pBtampCtx = NULL; VOS_STATUS vosStatus; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check Extract BAP control block ------------------------------------------------------------------------*/ pBtampCtx = VOS_GET_BAP_CB(pvosGCtx); if ( NULL == pBtampCtx ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Invalid BAP pointer from pvosGCtx on WLANBAP_Start"); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Register with TL as an BT-AMP RSN client ------------------------------------------------------------------------*/ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "WLANBAP_Start TL register"); /*------------------------------------------------------------------------ Register with CSR for Roam (connection status) Events ------------------------------------------------------------------------*/ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "WLANBAP_Start CSR Register"); /* Initialize the BAP Tx packet monitor timer */ WLANBAP_InitConnectionAcceptTimer (pBtampCtx ); WLANBAP_InitLinkSupervisionTimer(pBtampCtx); vosStatus = vos_timer_init( &pBtampCtx->bapTxPktMonitorTimer, VOS_TIMER_TYPE_SW, /* use this type */ WLANBAP_TxPacketMonitorHandler, pBtampCtx); vosStatus = vos_lock_init(&pBtampCtx->bapLock); if(!VOS_IS_STATUS_SUCCESS(vosStatus)) { VOS_TRACE(VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,"Lock Init Fail"); } return vosStatus; }/* WLANBAP_Start */ /*========================================================================== FUNCTION WLANBAP_Stop DESCRIPTION Called by vos_stop to stop operation in BAP, before close. BAP will suspend all BT-AMP Protocol Adaption Layer operation and will wait for the close request to clean up its resources. DEPENDENCIES PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to BAP's control block can be extracted from its context RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to BAP cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_Stop ( v_PVOID_t pvosGCtx ) { ptBtampContext pBtampCtx = NULL; VOS_STATUS vosStatus = VOS_STATUS_SUCCESS; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check Extract BAP control block ------------------------------------------------------------------------*/ pBtampCtx = VOS_GET_BAP_CB(pvosGCtx); if ( NULL == pBtampCtx ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Invalid BAP pointer from pvosGCtx on WLANBAP_Stop"); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Stop BAP (de-register RSN handler!?) ------------------------------------------------------------------------*/ vosStatus = WLANBAP_DeinitConnectionAcceptTimer(pBtampCtx); if ( VOS_STATUS_SUCCESS != vosStatus) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Couldn't destroy bapConnectionAcceptTimer"); } vosStatus = WLANBAP_DeinitLinkSupervisionTimer(pBtampCtx); if ( VOS_STATUS_SUCCESS != vosStatus) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Couldn't destroy bapLinkSupervisionTimer"); } vosStatus = vos_timer_destroy ( &pBtampCtx->bapTxPktMonitorTimer ); if ( VOS_STATUS_SUCCESS != vosStatus) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Couldn't destroy bapTxPktMonitorTimer"); } vos_lock_destroy(&pBtampCtx->bapLock); return VOS_STATUS_SUCCESS; }/* WLANBAP_Stop */ /*========================================================================== FUNCTION WLANBAP_Close DESCRIPTION Called by vos_close during general driver close procedure. BAP will clean up all the internal resources. DEPENDENCIES PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to BAP's control block can be extracted from its context RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to BAP cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_Close ( v_PVOID_t pvosGCtx ) { ptBtampContext pBtampCtx = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check Extract BAP control block ------------------------------------------------------------------------*/ pBtampCtx = VOS_GET_BAP_CB(pvosGCtx); if ( NULL == pBtampCtx ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Invalid BAP pointer from pvosGCtx on WLANBAP_Close"); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Cleanup BAP control block. ------------------------------------------------------------------------*/ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "WLANBAP_Close"); WLANBAP_CleanCB(pBtampCtx, 1 /* empty queues/lists/pkts if any*/); #if defined(ANI_OS_TYPE_ANDROID) && defined(WLAN_BTAMP_FEATURE) BSL_Deinit(pvosGCtx); #endif /*------------------------------------------------------------------------ Free BAP context from VOSS global ------------------------------------------------------------------------*/ vos_free_context(pvosGCtx, VOS_MODULE_ID_BAP, pBtampCtx); return VOS_STATUS_SUCCESS; }/* WLANBAP_Close */ /*---------------------------------------------------------------------------- HDD interfaces - Per instance initialization ---------------------------------------------------------------------------*/ /*========================================================================== FUNCTION WLANBAP_GetNewHndl DESCRIPTION Called by HDD at driver open (BSL_Open). BAP will initialize allocate a per-instance "file handle" equivalent for this specific open call. There should only ever be one call to BSL_Open. Since the open app user is the BT stack. DEPENDENCIES PARAMETERS IN hBtampHandle: Handle to return btampHandle value in. RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to BAP cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_GetNewHndl ( ptBtampHandle *hBtampHandle /* Handle to return btampHandle value in */ ) { ptBtampContext btampContext = NULL; /*------------------------------------------------------------------------ Sanity check params ------------------------------------------------------------------------*/ if ( NULL == hBtampHandle) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Invalid BAP handle pointer in WLANBAP_GetNewHndl"); return VOS_STATUS_E_FAULT; } #ifndef BTAMP_MULTIPLE_PHY_LINKS /*------------------------------------------------------------------------ Sanity check the BAP control block pointer ------------------------------------------------------------------------*/ if ( NULL == gpBtampCtx ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Invalid BAP pointer in WLANBAP_GetNewHndl"); return VOS_STATUS_E_FAULT; } //*hBtampHandle = (ptBtampHandle) &btampCtx; /* return a pointer to the tBtampContext structure - allocated by VOS for us */ *hBtampHandle = (ptBtampHandle) gpBtampCtx; btampContext = gpBtampCtx; /* Update the MAC address and SSID if in case the Read Local AMP Assoc * Request is made before Create Physical Link creation. */ WLANBAP_ReadMacConfig (btampContext); return VOS_STATUS_SUCCESS; #else // defined(BTAMP_MULTIPLE_PHY_LINKS) #endif //BTAMP_MULTIPLE_PHY_LINKS }/* WLANBAP_GetNewHndl */ /*========================================================================== FUNCTION WLANBAP_ReleaseHndl DESCRIPTION Called by HDD at driver open (BSL_Close). BAP will reclaim (invalidate) the "file handle" passed into this call. DEPENDENCIES PARAMETERS IN btampHandle: btampHandle value to invalidate. RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: btampHandle is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_ReleaseHndl ( ptBtampHandle btampHandle /* btamp handle value to release */ ) { /* obtain btamp Context */ ptBtampContext btampContext = (ptBtampContext) btampHandle; tHalHandle halHandle; eHalStatus halStatus = eHAL_STATUS_SUCCESS; /*------------------------------------------------------------------------ Sanity check params ------------------------------------------------------------------------*/ if ( NULL == btampHandle) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Invalid BAP handle value in WLANBAP_ReleaseHndl"); return VOS_STATUS_E_FAULT; } /* JEZ081001: TODO: Major: */ /* Check to see if any wireless associations are still active */ /* ...if so, I have to call * sme_RoamDisconnect(VOS_GET_HAL_CB(btampHandle->pvosGCtx), * btampHandle->sessionId, * eCSR_DISCONNECT_REASON_UNSPECIFIED); * on all of them */ halHandle = VOS_GET_HAL_CB(btampContext->pvosGCtx); if(NULL == halHandle) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "halHandle is NULL in %s", __func__); return VOS_STATUS_E_FAULT; } if( btampContext->isBapSessionOpen == TRUE ) { halStatus = sme_CloseSession(halHandle, btampContext->sessionId, NULL, NULL); if(eHAL_STATUS_SUCCESS == halStatus) { btampContext->isBapSessionOpen = FALSE; } } /* release the btampHandle */ return VOS_STATUS_SUCCESS; }/* WLANBAP_ReleaseHndl */ /*---------------------------------------------------------------------------- * Utility Function implementations * -------------------------------------------------------------------------*/ /*========================================================================== FUNCTION WLANBAP_CleanCB DESCRIPTION Clear out all fields in the BAP context. DEPENDENCIES PARAMETERS IN pBtampCtx: pointer to the BAP control block freeFlag: flag indicating whether to free any allocations. RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to BAP cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_CleanCB ( ptBtampContext pBtampCtx, v_U32_t freeFlag // 0 /*do not empty*/); ) { v_U16_t i; /* Logical Link index */ tpBtampLogLinkCtx pLogLinkContext = NULL; /*------------------------------------------------------------------------ Sanity check BAP control block ------------------------------------------------------------------------*/ if ( NULL == pBtampCtx ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Invalid BAP pointer in WLANBAP_CleanCB"); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Clean up BAP control block, initialize all values ------------------------------------------------------------------------*/ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "WLANBAP_CleanCB"); // First, clear out EVERYTHING in the BT-AMP context vos_mem_set( pBtampCtx, sizeof( *pBtampCtx), 0); pBtampCtx->pvosGCtx = NULL; // Initialize physical link state machine to DISCONNECTED state //pBtampCtx->bapPhysLinkMachine = BTAMPFSM_INSTANCEDATA_INIT; // Initialize physical link state machine to DISCONNECTED state vos_mem_copy( &pBtampCtx->bapPhysLinkMachine, &bapPhysLinkMachineInitial, /* BTAMPFSM_INSTANCEDATA_INIT; */ sizeof( pBtampCtx->bapPhysLinkMachine)); VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: Initializing State: %d", __func__, bapPhysLinkMachineInitial.stateVar); VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: Initialized State: %d", __func__, pBtampCtx->bapPhysLinkMachine.stateVar); //VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: btampContext value: %x", __func__, pBtampCtx); #ifdef BAP_DEBUG /* Trace the tBtampCtx being passed in. */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN BAP Context Monitor: pBtampCtx value = %x in %s:%d", pBtampCtx, __func__, __LINE__ ); #endif //BAP_DEBUG pBtampCtx->sessionId = 0; pBtampCtx->pAppHdl = NULL; // Per-app BSL context pBtampCtx->pHddHdl = NULL; // Per-app BSL context /* 8 bits of phy_link_handle identifies this association */ pBtampCtx->phy_link_handle = 0; pBtampCtx->channel = 0; pBtampCtx->BAPDeviceRole = BT_RESPONDER; pBtampCtx->ucSTAId = 0; // gNeedPhysLinkCompEvent pBtampCtx->gNeedPhysLinkCompEvent = VOS_FALSE; // gPhysLinkStatus pBtampCtx->gPhysLinkStatus = WLANBAP_STATUS_SUCCESS; // gDiscRequested pBtampCtx->gDiscRequested = VOS_FALSE; // gDiscReason pBtampCtx->gDiscReason = WLANBAP_STATUS_SUCCESS; /* Connection Accept Timer interval*/ pBtampCtx->bapConnectionAcceptTimerInterval = WLANBAP_CONNECTION_ACCEPT_TIMEOUT; /* Link Supervision Timer interval*/ pBtampCtx->bapLinkSupervisionTimerInterval = WLANBAP_LINK_SUPERVISION_TIMEOUT; /* Logical Link Accept Timer interval*/ pBtampCtx->bapLogicalLinkAcceptTimerInterval = WLANBAP_LOGICAL_LINK_ACCEPT_TIMEOUT; /* Best Effort Flush timer interval*/ pBtampCtx->bapBEFlushTimerInterval = WLANBAP_BE_FLUSH_TIMEOUT; // Include the associations MAC addresses vos_mem_copy( pBtampCtx->self_mac_addr, pBtStaOwnMacAddr, /* Where do I get the current MAC address? */ sizeof(pBtampCtx->self_mac_addr)); vos_mem_set( pBtampCtx->peer_mac_addr, sizeof(pBtampCtx->peer_mac_addr), 0); // The array of logical links pBtampCtx->current_log_link_index = 0; /* assigned mod 16 */ pBtampCtx->total_log_link_index = 0; /* should never be >16 */ // Clear up the array of logical links for (i = 0; i < WLANBAP_MAX_LOG_LINKS ; i++) { pLogLinkContext = &pBtampCtx->btampLogLinkCtx[i]; pLogLinkContext->present = 0; pLogLinkContext->uTxPktCompleted = 0; pLogLinkContext->log_link_handle = 0; } // Include the HDD BAP Shim Layer callbacks for Fetch, TxComp, and RxPkt pBtampCtx->pfnBtampFetchPktCB = NULL; pBtampCtx->pfnBtamp_STARxCB = NULL; pBtampCtx->pfnBtampTxCompCB = NULL; /* Implements the callback for ALL asynchronous events. */ pBtampCtx->pBapHCIEventCB = NULL; /* Set the default for event mask */ vos_mem_set( pBtampCtx->event_mask_page_2, sizeof(pBtampCtx->event_mask_page_2), 0); /* Set the default for location data. */ pBtampCtx->btamp_Location_Data_Info.loc_options = 0x58; /* Set the default data transfer mode */ pBtampCtx->ucDataTrafficMode = WLANBAP_FLOW_CONTROL_MODE_BLOCK_BASED; return VOS_STATUS_SUCCESS; }/* WLANBAP_CleanCB */ /*========================================================================== FUNCTION WLANBAP_GetCtxFromStaId DESCRIPTION Called inside the BT-AMP PAL (BAP) layer whenever we need either the BSL context or the BTAMP context from the StaId. DEPENDENCIES PARAMETERS IN ucSTAId: The StaId (used by TL, PE, and HAL) OUT hBtampHandle: Handle (pointer to a pointer) to return the btampHandle value in. hHddHdl: Handle to return the BSL context pointer in. RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: NULL pointer; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_GetCtxFromStaId ( v_U8_t ucSTAId, /* The StaId (used by TL, PE, and HAL) */ ptBtampHandle *hBtampHandle, /* Handle to return per app btampHandle value in */ ptBtampContext *hBtampContext, /* Handle to return per assoc btampContext value in */ v_PVOID_t *hHddHdl /* Handle to return BSL context in */ ) { #ifndef BTAMP_MULTIPLE_PHY_LINKS /* For now, we know there is only one application context */ /* ...and only one physical link context */ //*hBtampHandle = &((ptBtampContext) btampCtx); //*hBtampHandle = &btampCtx; *hBtampHandle = (v_VOID_t*)gpBtampCtx; //*hBtampContext = &btampCtx; *hBtampContext = gpBtampCtx; /* Handle to return BSL context in */ //*hHddHdl = btampCtx.pHddHdl; *hHddHdl = gpBtampCtx->pHddHdl; return VOS_STATUS_SUCCESS; #else // defined(BTAMP_MULTIPLE_PHY_LINKS) #endif //BTAMP_MULTIPLE_PHY_LINKS }/* WLANBAP_GetCtxFromStaId */ /*========================================================================== FUNCTION WLANBAP_GetStaIdFromLinkCtx DESCRIPTION Called inside the BT-AMP PAL (BAP) layer whenever we need the StaId (or hHddHdl) from the BTAMP context and phy_link_handle. DEPENDENCIES PARAMETERS IN hBtampHandle: Handle (pointer to a pointer) to return the btampHandle value in. phy_link_handle: physical link handle value. Unique per assoc. OUT pucSTAId: The StaId (used by TL, PE, and HAL) hHddHdl: Handle to return the BSL context pointer in. RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: NULL pointer; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_GetStaIdFromLinkCtx ( ptBtampHandle btampHandle, /* btampHandle value in */ v_U8_t phy_link_handle, /* phy_link_handle value in */ v_U8_t *pucSTAId, /* The StaId (used by TL, PE, and HAL) */ v_PVOID_t *hHddHdl /* Handle to return BSL context */ ) { #ifndef BTAMP_MULTIPLE_PHY_LINKS ptBtampContext pBtampCtx = (ptBtampContext) btampHandle; /*------------------------------------------------------------------------ Sanity check params ------------------------------------------------------------------------*/ if ( NULL == pBtampCtx) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Invalid BAP handle value in %s", __func__); return VOS_STATUS_E_FAULT; } /* Since there is only one physical link...we have stored all * the physical link specific context in the application context */ /* The StaId (used by TL, PE, and HAL) */ *pucSTAId = pBtampCtx->ucSTAId; /* Handle to return BSL context */ *hHddHdl = pBtampCtx->pHddHdl; return VOS_STATUS_SUCCESS; #else // defined(BTAMP_MULTIPLE_PHY_LINKS) #endif //BTAMP_MULTIPLE_PHY_LINKS }/* WLANBAP_GetStaIdFromLinkCtx */ /*========================================================================== FUNCTION WLANBAP_CreateNewPhyLinkCtx DESCRIPTION Called in order to create (or update) a BAP Physical Link "context" DEPENDENCIES PARAMETERS IN btampHandle: BAP app context handle phy_link_handle: phy_link_handle from the Command pHddHdl: BSL passes in its specific context OUT hBtampContext: Handle (pointer to a pointer) to return the per "Phy Link" ptBtampContext value in. RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: NULL pointer; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_CreateNewPhyLinkCtx ( ptBtampHandle btampHandle, v_U8_t phy_link_handle, /* I get phy_link_handle from the Command */ v_PVOID_t pHddHdl, /* BSL passes in its specific context */ ptBtampContext *hBtampContext, /* Handle to return per assoc btampContext value in */ tWLAN_BAPRole BAPDeviceRole ) { #ifndef BTAMP_MULTIPLE_PHY_LINKS ptBtampContext pBtampCtx = gpBtampCtx; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /* Read and Set MAC address and SSID to BT-AMP context */ WLANBAP_ReadMacConfig (pBtampCtx); /*------------------------------------------------------------------------ For now, presume security is not enabled. ------------------------------------------------------------------------*/ pBtampCtx->ucSecEnabled = WLANBAP_SECURITY_ENABLED_STATE; /*------------------------------------------------------------------------ Initial Short Range Mode for this physical link is 'disabled' ------------------------------------------------------------------------*/ pBtampCtx->phy_link_srm = 0; /*------------------------------------------------------------------------ Clear out the logical links. ------------------------------------------------------------------------*/ pBtampCtx->current_log_link_index = 0; pBtampCtx->total_log_link_index = 0; /*------------------------------------------------------------------------ Now configure the roaming profile links. To SSID and bssid. ------------------------------------------------------------------------*/ // We have room for two SSIDs. pBtampCtx->csrRoamProfile.SSIDs.numOfSSIDs = 1; // This is true for now. pBtampCtx->csrRoamProfile.SSIDs.SSIDList = pBtampCtx->SSIDList; //Array of two pBtampCtx->csrRoamProfile.SSIDs.SSIDList[0].SSID.length = 0; pBtampCtx->csrRoamProfile.SSIDs.SSIDList[0].handoffPermitted = VOS_FALSE; pBtampCtx->csrRoamProfile.SSIDs.SSIDList[0].ssidHidden = VOS_FALSE; pBtampCtx->csrRoamProfile.BSSIDs.numOfBSSIDs = 1; // This is true for now. pBtampCtx->csrRoamProfile.BSSIDs.bssid = &pBtampCtx->bssid; // Now configure the auth type in the roaming profile. To open. //pBtampCtx->csrRoamProfile.AuthType = eCSR_AUTH_TYPE_OPEN_SYSTEM; // open is the default //pBtampCtx->csrRoamProfile.negotiatedAuthType = eCSR_AUTH_TYPE_OPEN_SYSTEM; // open is the default pBtampCtx->csrRoamProfile.negotiatedAuthType = eCSR_AUTH_TYPE_RSN_PSK; pBtampCtx->csrRoamProfile.negotiatedUCEncryptionType = eCSR_ENCRYPT_TYPE_AES; pBtampCtx->phy_link_handle = phy_link_handle; /* For now, we know there is only one physical link context */ //*hBtampContext = &btampCtx; pBtampCtx->pHddHdl = pHddHdl; *hBtampContext = pBtampCtx; VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Btamp Ctxt = %p", pBtampCtx); return VOS_STATUS_SUCCESS; #else // defined(BTAMP_MULTIPLE_PHY_LINKS) #endif //BTAMP_MULTIPLE_PHY_LINKS }/* WLANBAP_CreateNewPhyLinkCtx */ /*========================================================================== FUNCTION WLANBAP_UpdatePhyLinkCtxStaId DESCRIPTION Called to update the STAId value associated with Physical Link "context" DEPENDENCIES PARAMETERS IN pBtampContext: ptBtampContext to update. ucSTAId: The StaId (used by TL, PE, and HAL) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: NULL pointer; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_UpdatePhyLinkCtxStaId ( ptBtampContext pBtampContext, /* btampContext value in */ v_U8_t ucSTAId ) { #ifndef BTAMP_MULTIPLE_PHY_LINKS /*------------------------------------------------------------------------ Sanity check params ------------------------------------------------------------------------*/ if ( NULL == pBtampContext) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Invalid BAP handle value in %s", __func__); return VOS_STATUS_E_FAULT; } /* The StaId (used by TL, PE, and HAL) */ pBtampContext->ucSTAId = ucSTAId; return VOS_STATUS_SUCCESS; #else // defined(BTAMP_MULTIPLE_PHY_LINKS) #endif //BTAMP_MULTIPLE_PHY_LINKS }/* WLANBAP_UpdatePhyLinkCtxStaId */ v_U8_t bapAllocNextLogLinkIndex ( ptBtampContext pBtampContext, /* Pointer to the per assoc btampContext value */ v_U8_t phy_link_handle /* I get phy_link_handle from the Command */ ) { return ++(pBtampContext->current_log_link_index) % WLANBAP_MAX_LOG_LINKS; }/* bapAllocNextLogLinkIndex */ /*========================================================================== FUNCTION WLANBAP_CreateNewLogLinkCtx DESCRIPTION Called in order to allocate a BAP Logical Link "context" and "index" DEPENDENCIES PARAMETERS IN pBtampContext: Pointer to the ptBtampContext value in. phy_link_handle: phy_link_handle involved OUT pLog_link_handle: return the log_link_handle here RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: NULL pointer; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_CreateNewLogLinkCtx ( ptBtampContext pBtampContext, /* Pointer to the per assoc btampContext value */ v_U8_t phy_link_handle, /* I get phy_link_handle from the Command */ v_U8_t tx_flow_spec[18], v_U8_t rx_flow_spec[18], v_U16_t *pLog_link_handle /* Return the logical link index here */ ) { #ifndef BTAMP_MULTIPLE_PHY_LINKS v_U16_t i; /* Logical Link index */ tpBtampLogLinkCtx pLogLinkContext; v_U32_t retval; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ For now, allocate the logical links serially. ------------------------------------------------------------------------*/ i = pBtampContext->current_log_link_index = bapAllocNextLogLinkIndex(pBtampContext, phy_link_handle); pBtampContext->total_log_link_index++; *pLog_link_handle = (i << 8) + ( v_U16_t ) phy_link_handle ; /* Return the logical link index here */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO, " %s:*pLog_link_handle=%x", __func__,*pLog_link_handle); /*------------------------------------------------------------------------ Evaluate the Tx and Rx Flow specification for this logical link. ------------------------------------------------------------------------*/ // Currently we only support flow specs with service types of BE (0x01) #ifdef BAP_DEBUG /* Trace the tBtampCtx being passed in. */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN BAP Context Monitor: pBtampContext value = %p in %s:%d", pBtampContext, __func__, __LINE__ ); #endif //BAP_DEBUG /*------------------------------------------------------------------------ Now configure the Logical Link context. ------------------------------------------------------------------------*/ pLogLinkContext = &(pBtampContext->btampLogLinkCtx[i]); /* Extract Tx flow spec into the context structure */ retval = btampUnpackTlvFlow_Spec((void *)pBtampContext, tx_flow_spec, WLAN_BAP_PAL_FLOW_SPEC_TLV_LEN, &pLogLinkContext->btampFlowSpec); if (retval != BTAMP_PARSE_SUCCESS) { /* Flow spec parsing failed, return failure */ return VOS_STATUS_E_BADMSG; } /* Save the Logical link handle in the logical link context As of now, only the index is saved as logical link handle since same is returned in the event. FIXME: Decide whether this index has to be combined with physical link handle to generate the Logical link handle. */ pLogLinkContext->log_link_handle = *pLog_link_handle; // Mark this entry as OCCUPIED pLogLinkContext->present = VOS_TRUE; // Now initialize the Logical Link context pLogLinkContext->btampAC = 1; // Now initialize the values in the Logical Link context pLogLinkContext->ucTID = 0; // Currently we only support BE TID (0x00) pLogLinkContext->ucUP = 0; pLogLinkContext->uTxPktCompleted = 0; return VOS_STATUS_SUCCESS; #else // defined(BTAMP_MULTIPLE_PHY_LINKS) #endif //BTAMP_MULTIPLE_PHY_LINKS }/* WLANBAP_CreateNewLogLinkCtx */ /*========================================================================== FUNCTION WLANBAP_pmcFullPwrReqCB DESCRIPTION Callback provide to PMC in the pmcRequestFullPower API. DEPENDENCIES PARAMETERS IN callbackContext: The user passed in a context to identify status: The halStatus RETURN VALUE None SIDE EFFECTS ============================================================================*/ void WLANBAP_pmcFullPwrReqCB ( void *callbackContext, eHalStatus status ) { }/* WLANBAP_pmcFullPwrReqCB */ /*========================================================================== FUNCTION WLANBAP_ReadMacConfig DESCRIPTION This function sets the MAC config (Address and SSID to BT-AMP context DEPENDENCIES PARAMETERS pvosGCtx: pointer to the global vos context; a handle to BAP's control block can be extracted from its context RETURN VALUE None SIDE EFFECTS ============================================================================*/ void WLANBAP_ReadMacConfig ( ptBtampContext pBtampCtx ) { tANI_U32 len = WNI_CFG_BSSID_LEN; tHalHandle pMac = NULL; /*------------------------------------------------------------------------ Temporary method to get the self MAC address ------------------------------------------------------------------------*/ if (NULL == pBtampCtx) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "pBtampCtx is NULL in %s", __func__); return; } pMac = (tHalHandle)vos_get_context( VOS_MODULE_ID_SME, pBtampCtx->pvosGCtx); if (NULL == pMac) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "pMac is NULL in %s", __func__); return; } ccmCfgGetStr( pMac, WNI_CFG_STA_ID, pBtStaOwnMacAddr, &len ); VOS_ASSERT( WNI_CFG_BSSID_LEN == len ); /* Form the SSID from Mac address */ VOS_SNPRINTF( pBtStaOwnSsid, WLAN_BAP_SSID_MAX_LEN, "AMP-%02x-%02x-%02x-%02x-%02x-%02x", pBtStaOwnMacAddr[0], pBtStaOwnMacAddr[1], pBtStaOwnMacAddr[2], pBtStaOwnMacAddr[3], pBtStaOwnMacAddr[4], pBtStaOwnMacAddr[5]); /*------------------------------------------------------------------------ Set the MAC address for this instance ------------------------------------------------------------------------*/ vos_mem_copy( pBtampCtx->self_mac_addr, pBtStaOwnMacAddr, sizeof(pBtampCtx->self_mac_addr)); /*------------------------------------------------------------------------ Set our SSID value ------------------------------------------------------------------------*/ pBtampCtx->ownSsidLen = 21; vos_mem_copy( pBtampCtx->ownSsid, pBtStaOwnSsid, pBtampCtx->ownSsidLen); } /*========================================================================== FUNCTION WLANBAP_NeedBTCoexPriority DESCRIPTION This function will cause a message to be sent to BTC firmware if a change in priority has occurred. (From AMP's point-of-view.) DEPENDENCIES PARAMETERS pvosGCtx: pointer to the global vos context; a handle to HAL's control block can be extracted from its context RETURN VALUE None SIDE EFFECTS ============================================================================*/ // Global static int gBapCoexPriority; void WLANBAP_NeedBTCoexPriority ( ptBtampContext pBtampCtx, v_U32_t needCoexPriority ) { tHalHandle pMac = NULL; tSmeBtAmpEvent btAmpEvent; /*------------------------------------------------------------------------ Retrieve the pMac (HAL context) ------------------------------------------------------------------------*/ pMac = (tHalHandle)vos_get_context( VOS_MODULE_ID_SME, pBtampCtx->pvosGCtx); // Is re-entrancy protection needed for this? if (needCoexPriority != gBapCoexPriority) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "Calling %s with needCoexPriority=%d.", __func__, needCoexPriority); gBapCoexPriority = needCoexPriority; switch ( needCoexPriority) { case 0: /* Idle */ btAmpEvent.btAmpEventType = BTAMP_EVENT_CONNECTION_TERMINATED; pBtampCtx->btamp_session_on = FALSE; sme_sendBTAmpEvent(pMac, btAmpEvent); break; case 1: /* Associating */ btAmpEvent.btAmpEventType = BTAMP_EVENT_CONNECTION_START; pBtampCtx->btamp_session_on = TRUE; sme_sendBTAmpEvent(pMac, btAmpEvent); break; case 2: /* Post-assoc */ btAmpEvent.btAmpEventType = BTAMP_EVENT_CONNECTION_STOP; sme_sendBTAmpEvent(pMac, btAmpEvent); break; default: VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "%s: Invalid Coexistence priority request: %d", __func__, needCoexPriority); } } } /*========================================================================== FUNCTION WLANBAP_RxCallback DESCRIPTION This function is called by TL call this function for all frames except for Data frames DEPENDENCIES PARAMETERS pvosGCtx: pointer to the global vos context; a handle to BAP's control block can be extracted from its context pPacket Vos packet frameType Frame type RETURN VALUE None SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_RxCallback ( v_PVOID_t pvosGCtx, vos_pkt_t *pPacket, WLANTL_BAPFrameEnumType frameType ) { ptBtampContext pBtampCtx = NULL; pBtampCtx = VOS_GET_BAP_CB(pvosGCtx); if ( NULL == pBtampCtx ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Invalid BAP pointer from pvosGCtx on WLANBAP_Start"); return VOS_STATUS_E_FAULT; } switch (frameType) { case WLANTL_BT_AMP_TYPE_LS_REQ: /* Fall through */ case WLANTL_BT_AMP_TYPE_LS_REP: { /* Link supervision frame, process this frame */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: link Supervision packet received over TL: %d, => BAP", __func__, frameType); WLANBAP_RxProcLsPkt((ptBtampHandle)pBtampCtx, pBtampCtx->phy_link_handle, frameType, pPacket); break; } case WLANTL_BT_AMP_TYPE_AR: /* Fall through */ case WLANTL_BT_AMP_TYPE_SEC: { /* Call the RSN callback handler */ bapRsnRxCallback (pvosGCtx, pPacket); break; } default: VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "%s: Invalid frametype from TL: %d, => BAP", __func__, frameType); } return ( VOS_STATUS_SUCCESS ); }
gpl-2.0
alinuredini/nova
sound/soc/fsl/fsl_ssi.c
1816
22668
/* * Freescale SSI ALSA SoC Digital Audio Interface (DAI) driver * * Author: Timur Tabi <timur@freescale.com> * * Copyright 2007-2010 Freescale Semiconductor, Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/of_platform.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/initval.h> #include <sound/soc.h> #include "fsl_ssi.h" /** * FSLSSI_I2S_RATES: sample rates supported by the I2S * * This driver currently only supports the SSI running in I2S slave mode, * which means the codec determines the sample rate. Therefore, we tell * ALSA that we support all rates and let the codec driver decide what rates * are really supported. */ #define FSLSSI_I2S_RATES (SNDRV_PCM_RATE_5512 | SNDRV_PCM_RATE_8000_192000 | \ SNDRV_PCM_RATE_CONTINUOUS) /** * FSLSSI_I2S_FORMATS: audio formats supported by the SSI * * This driver currently only supports the SSI running in I2S slave mode. * * The SSI has a limitation in that the samples must be in the same byte * order as the host CPU. This is because when multiple bytes are written * to the STX register, the bytes and bits must be written in the same * order. The STX is a shift register, so all the bits need to be aligned * (bit-endianness must match byte-endianness). Processors typically write * the bits within a byte in the same order that the bytes of a word are * written in. So if the host CPU is big-endian, then only big-endian * samples will be written to STX properly. */ #ifdef __BIG_ENDIAN #define FSLSSI_I2S_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_BE | \ SNDRV_PCM_FMTBIT_S18_3BE | SNDRV_PCM_FMTBIT_S20_3BE | \ SNDRV_PCM_FMTBIT_S24_3BE | SNDRV_PCM_FMTBIT_S24_BE) #else #define FSLSSI_I2S_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE | \ SNDRV_PCM_FMTBIT_S18_3LE | SNDRV_PCM_FMTBIT_S20_3LE | \ SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_LE) #endif /* SIER bitflag of interrupts to enable */ #define SIER_FLAGS (CCSR_SSI_SIER_TFRC_EN | CCSR_SSI_SIER_TDMAE | \ CCSR_SSI_SIER_TIE | CCSR_SSI_SIER_TUE0_EN | \ CCSR_SSI_SIER_TUE1_EN | CCSR_SSI_SIER_RFRC_EN | \ CCSR_SSI_SIER_RDMAE | CCSR_SSI_SIER_RIE | \ CCSR_SSI_SIER_ROE0_EN | CCSR_SSI_SIER_ROE1_EN) /** * fsl_ssi_private: per-SSI private data * * @ssi: pointer to the SSI's registers * @ssi_phys: physical address of the SSI registers * @irq: IRQ of this SSI * @first_stream: pointer to the stream that was opened first * @second_stream: pointer to second stream * @playback: the number of playback streams opened * @capture: the number of capture streams opened * @asynchronous: 0=synchronous mode, 1=asynchronous mode * @cpu_dai: the CPU DAI for this device * @dev_attr: the sysfs device attribute structure * @stats: SSI statistics * @name: name for this device */ struct fsl_ssi_private { struct ccsr_ssi __iomem *ssi; dma_addr_t ssi_phys; unsigned int irq; struct snd_pcm_substream *first_stream; struct snd_pcm_substream *second_stream; unsigned int playback; unsigned int capture; int asynchronous; unsigned int fifo_depth; struct snd_soc_dai_driver cpu_dai_drv; struct device_attribute dev_attr; struct platform_device *pdev; struct { unsigned int rfrc; unsigned int tfrc; unsigned int cmdau; unsigned int cmddu; unsigned int rxt; unsigned int rdr1; unsigned int rdr0; unsigned int tde1; unsigned int tde0; unsigned int roe1; unsigned int roe0; unsigned int tue1; unsigned int tue0; unsigned int tfs; unsigned int rfs; unsigned int tls; unsigned int rls; unsigned int rff1; unsigned int rff0; unsigned int tfe1; unsigned int tfe0; } stats; char name[1]; }; /** * fsl_ssi_isr: SSI interrupt handler * * Although it's possible to use the interrupt handler to send and receive * data to/from the SSI, we use the DMA instead. Programming is more * complicated, but the performance is much better. * * This interrupt handler is used only to gather statistics. * * @irq: IRQ of the SSI device * @dev_id: pointer to the ssi_private structure for this SSI device */ static irqreturn_t fsl_ssi_isr(int irq, void *dev_id) { struct fsl_ssi_private *ssi_private = dev_id; struct ccsr_ssi __iomem *ssi = ssi_private->ssi; irqreturn_t ret = IRQ_NONE; __be32 sisr; __be32 sisr2 = 0; /* We got an interrupt, so read the status register to see what we were interrupted for. We mask it with the Interrupt Enable register so that we only check for events that we're interested in. */ sisr = in_be32(&ssi->sisr) & SIER_FLAGS; if (sisr & CCSR_SSI_SISR_RFRC) { ssi_private->stats.rfrc++; sisr2 |= CCSR_SSI_SISR_RFRC; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_TFRC) { ssi_private->stats.tfrc++; sisr2 |= CCSR_SSI_SISR_TFRC; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_CMDAU) { ssi_private->stats.cmdau++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_CMDDU) { ssi_private->stats.cmddu++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_RXT) { ssi_private->stats.rxt++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_RDR1) { ssi_private->stats.rdr1++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_RDR0) { ssi_private->stats.rdr0++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_TDE1) { ssi_private->stats.tde1++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_TDE0) { ssi_private->stats.tde0++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_ROE1) { ssi_private->stats.roe1++; sisr2 |= CCSR_SSI_SISR_ROE1; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_ROE0) { ssi_private->stats.roe0++; sisr2 |= CCSR_SSI_SISR_ROE0; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_TUE1) { ssi_private->stats.tue1++; sisr2 |= CCSR_SSI_SISR_TUE1; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_TUE0) { ssi_private->stats.tue0++; sisr2 |= CCSR_SSI_SISR_TUE0; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_TFS) { ssi_private->stats.tfs++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_RFS) { ssi_private->stats.rfs++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_TLS) { ssi_private->stats.tls++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_RLS) { ssi_private->stats.rls++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_RFF1) { ssi_private->stats.rff1++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_RFF0) { ssi_private->stats.rff0++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_TFE1) { ssi_private->stats.tfe1++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_TFE0) { ssi_private->stats.tfe0++; ret = IRQ_HANDLED; } /* Clear the bits that we set */ if (sisr2) out_be32(&ssi->sisr, sisr2); return ret; } /** * fsl_ssi_startup: create a new substream * * This is the first function called when a stream is opened. * * If this is the first stream open, then grab the IRQ and program most of * the SSI registers. */ static int fsl_ssi_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(rtd->cpu_dai); /* * If this is the first stream opened, then request the IRQ * and initialize the SSI registers. */ if (!ssi_private->playback && !ssi_private->capture) { struct ccsr_ssi __iomem *ssi = ssi_private->ssi; int ret; /* The 'name' should not have any slashes in it. */ ret = request_irq(ssi_private->irq, fsl_ssi_isr, 0, ssi_private->name, ssi_private); if (ret < 0) { dev_err(substream->pcm->card->dev, "could not claim irq %u\n", ssi_private->irq); return ret; } /* * Section 16.5 of the MPC8610 reference manual says that the * SSI needs to be disabled before updating the registers we set * here. */ clrbits32(&ssi->scr, CCSR_SSI_SCR_SSIEN); /* * Program the SSI into I2S Slave Non-Network Synchronous mode. * Also enable the transmit and receive FIFO. * * FIXME: Little-endian samples require a different shift dir */ clrsetbits_be32(&ssi->scr, CCSR_SSI_SCR_I2S_MODE_MASK | CCSR_SSI_SCR_SYN, CCSR_SSI_SCR_TFR_CLK_DIS | CCSR_SSI_SCR_I2S_MODE_SLAVE | (ssi_private->asynchronous ? 0 : CCSR_SSI_SCR_SYN)); out_be32(&ssi->stcr, CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TFEN0 | CCSR_SSI_STCR_TFSI | CCSR_SSI_STCR_TEFS | CCSR_SSI_STCR_TSCKP); out_be32(&ssi->srcr, CCSR_SSI_SRCR_RXBIT0 | CCSR_SSI_SRCR_RFEN0 | CCSR_SSI_SRCR_RFSI | CCSR_SSI_SRCR_REFS | CCSR_SSI_SRCR_RSCKP); /* * The DC and PM bits are only used if the SSI is the clock * master. */ /* 4. Enable the interrupts and DMA requests */ out_be32(&ssi->sier, SIER_FLAGS); /* * Set the watermark for transmit FIFI 0 and receive FIFO 0. We * don't use FIFO 1. We program the transmit water to signal a * DMA transfer if there are only two (or fewer) elements left * in the FIFO. Two elements equals one frame (left channel, * right channel). This value, however, depends on the depth of * the transmit buffer. * * We program the receive FIFO to notify us if at least two * elements (one frame) have been written to the FIFO. We could * make this value larger (and maybe we should), but this way * data will be written to memory as soon as it's available. */ out_be32(&ssi->sfcsr, CCSR_SSI_SFCSR_TFWM0(ssi_private->fifo_depth - 2) | CCSR_SSI_SFCSR_RFWM0(ssi_private->fifo_depth - 2)); /* * We keep the SSI disabled because if we enable it, then the * DMA controller will start. It's not supposed to start until * the SCR.TE (or SCR.RE) bit is set, but it does anyway. The * DMA controller will transfer one "BWC" of data (i.e. the * amount of data that the MR.BWC bits are set to). The reason * this is bad is because at this point, the PCM driver has not * finished initializing the DMA controller. */ } if (!ssi_private->first_stream) ssi_private->first_stream = substream; else { /* This is the second stream open, so we need to impose sample * rate and maybe sample size constraints. Note that this can * cause a race condition if the second stream is opened before * the first stream is fully initialized. * * We provide some protection by checking to make sure the first * stream is initialized, but it's not perfect. ALSA sometimes * re-initializes the driver with a different sample rate or * size. If the second stream is opened before the first stream * has received its final parameters, then the second stream may * be constrained to the wrong sample rate or size. * * FIXME: This code does not handle opening and closing streams * repeatedly. If you open two streams and then close the first * one, you may not be able to open another stream until you * close the second one as well. */ struct snd_pcm_runtime *first_runtime = ssi_private->first_stream->runtime; if (!first_runtime->sample_bits) { dev_err(substream->pcm->card->dev, "set sample size in %s stream first\n", substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? "capture" : "playback"); return -EAGAIN; } /* If we're in synchronous mode, then we need to constrain * the sample size as well. We don't support independent sample * rates in asynchronous mode. */ if (!ssi_private->asynchronous) snd_pcm_hw_constraint_minmax(substream->runtime, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, first_runtime->sample_bits, first_runtime->sample_bits); ssi_private->second_stream = substream; } if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ssi_private->playback++; if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ssi_private->capture++; return 0; } /** * fsl_ssi_hw_params - program the sample size * * Most of the SSI registers have been programmed in the startup function, * but the word length must be programmed here. Unfortunately, programming * the SxCCR.WL bits requires the SSI to be temporarily disabled. This can * cause a problem with supporting simultaneous playback and capture. If * the SSI is already playing a stream, then that stream may be temporarily * stopped when you start capture. * * Note: The SxCCR.DC and SxCCR.PM bits are only used if the SSI is the * clock master. */ static int fsl_ssi_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params, struct snd_soc_dai *cpu_dai) { struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai); if (substream == ssi_private->first_stream) { struct ccsr_ssi __iomem *ssi = ssi_private->ssi; unsigned int sample_size = snd_pcm_format_width(params_format(hw_params)); u32 wl = CCSR_SSI_SxCCR_WL(sample_size); /* The SSI should always be disabled at this points (SSIEN=0) */ /* In synchronous mode, the SSI uses STCCR for capture */ if ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK) || !ssi_private->asynchronous) clrsetbits_be32(&ssi->stccr, CCSR_SSI_SxCCR_WL_MASK, wl); else clrsetbits_be32(&ssi->srccr, CCSR_SSI_SxCCR_WL_MASK, wl); } return 0; } /** * fsl_ssi_trigger: start and stop the DMA transfer. * * This function is called by ALSA to start, stop, pause, and resume the DMA * transfer of data. * * The DMA channel is in external master start and pause mode, which * means the SSI completely controls the flow of data. */ static int fsl_ssi_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(rtd->cpu_dai); struct ccsr_ssi __iomem *ssi = ssi_private->ssi; switch (cmd) { case SNDRV_PCM_TRIGGER_START: clrbits32(&ssi->scr, CCSR_SSI_SCR_SSIEN); case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) setbits32(&ssi->scr, CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_TE); else setbits32(&ssi->scr, CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_RE); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) clrbits32(&ssi->scr, CCSR_SSI_SCR_TE); else clrbits32(&ssi->scr, CCSR_SSI_SCR_RE); break; default: return -EINVAL; } return 0; } /** * fsl_ssi_shutdown: shutdown the SSI * * Shutdown the SSI if there are no other substreams open. */ static void fsl_ssi_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(rtd->cpu_dai); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ssi_private->playback--; if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ssi_private->capture--; if (ssi_private->first_stream == substream) ssi_private->first_stream = ssi_private->second_stream; ssi_private->second_stream = NULL; /* * If this is the last active substream, disable the SSI and release * the IRQ. */ if (!ssi_private->playback && !ssi_private->capture) { struct ccsr_ssi __iomem *ssi = ssi_private->ssi; clrbits32(&ssi->scr, CCSR_SSI_SCR_SSIEN); free_irq(ssi_private->irq, ssi_private); } } static struct snd_soc_dai_ops fsl_ssi_dai_ops = { .startup = fsl_ssi_startup, .hw_params = fsl_ssi_hw_params, .shutdown = fsl_ssi_shutdown, .trigger = fsl_ssi_trigger, }; /* Template for the CPU dai driver structure */ static struct snd_soc_dai_driver fsl_ssi_dai_template = { .playback = { /* The SSI does not support monaural audio. */ .channels_min = 2, .channels_max = 2, .rates = FSLSSI_I2S_RATES, .formats = FSLSSI_I2S_FORMATS, }, .capture = { .channels_min = 2, .channels_max = 2, .rates = FSLSSI_I2S_RATES, .formats = FSLSSI_I2S_FORMATS, }, .ops = &fsl_ssi_dai_ops, }; /* Show the statistics of a flag only if its interrupt is enabled. The * compiler will optimze this code to a no-op if the interrupt is not * enabled. */ #define SIER_SHOW(flag, name) \ do { \ if (SIER_FLAGS & CCSR_SSI_SIER_##flag) \ length += sprintf(buf + length, #name "=%u\n", \ ssi_private->stats.name); \ } while (0) /** * fsl_sysfs_ssi_show: display SSI statistics * * Display the statistics for the current SSI device. To avoid confusion, * we only show those counts that are enabled. */ static ssize_t fsl_sysfs_ssi_show(struct device *dev, struct device_attribute *attr, char *buf) { struct fsl_ssi_private *ssi_private = container_of(attr, struct fsl_ssi_private, dev_attr); ssize_t length = 0; SIER_SHOW(RFRC_EN, rfrc); SIER_SHOW(TFRC_EN, tfrc); SIER_SHOW(CMDAU_EN, cmdau); SIER_SHOW(CMDDU_EN, cmddu); SIER_SHOW(RXT_EN, rxt); SIER_SHOW(RDR1_EN, rdr1); SIER_SHOW(RDR0_EN, rdr0); SIER_SHOW(TDE1_EN, tde1); SIER_SHOW(TDE0_EN, tde0); SIER_SHOW(ROE1_EN, roe1); SIER_SHOW(ROE0_EN, roe0); SIER_SHOW(TUE1_EN, tue1); SIER_SHOW(TUE0_EN, tue0); SIER_SHOW(TFS_EN, tfs); SIER_SHOW(RFS_EN, rfs); SIER_SHOW(TLS_EN, tls); SIER_SHOW(RLS_EN, rls); SIER_SHOW(RFF1_EN, rff1); SIER_SHOW(RFF0_EN, rff0); SIER_SHOW(TFE1_EN, tfe1); SIER_SHOW(TFE0_EN, tfe0); return length; } /** * Make every character in a string lower-case */ static void make_lowercase(char *s) { char *p = s; char c; while ((c = *p)) { if ((c >= 'A') && (c <= 'Z')) *p = c + ('a' - 'A'); p++; } } static int __devinit fsl_ssi_probe(struct platform_device *pdev) { struct fsl_ssi_private *ssi_private; int ret = 0; struct device_attribute *dev_attr = NULL; struct device_node *np = pdev->dev.of_node; const char *p, *sprop; const uint32_t *iprop; struct resource res; char name[64]; /* SSIs that are not connected on the board should have a * status = "disabled" * property in their device tree nodes. */ if (!of_device_is_available(np)) return -ENODEV; /* Check for a codec-handle property. */ if (!of_get_property(np, "codec-handle", NULL)) { dev_err(&pdev->dev, "missing codec-handle property\n"); return -ENODEV; } /* We only support the SSI in "I2S Slave" mode */ sprop = of_get_property(np, "fsl,mode", NULL); if (!sprop || strcmp(sprop, "i2s-slave")) { dev_notice(&pdev->dev, "mode %s is unsupported\n", sprop); return -ENODEV; } /* The DAI name is the last part of the full name of the node. */ p = strrchr(np->full_name, '/') + 1; ssi_private = kzalloc(sizeof(struct fsl_ssi_private) + strlen(p), GFP_KERNEL); if (!ssi_private) { dev_err(&pdev->dev, "could not allocate DAI object\n"); return -ENOMEM; } strcpy(ssi_private->name, p); /* Initialize this copy of the CPU DAI driver structure */ memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template, sizeof(fsl_ssi_dai_template)); ssi_private->cpu_dai_drv.name = ssi_private->name; /* Get the addresses and IRQ */ ret = of_address_to_resource(np, 0, &res); if (ret) { dev_err(&pdev->dev, "could not determine device resources\n"); kfree(ssi_private); return ret; } ssi_private->ssi = ioremap(res.start, 1 + res.end - res.start); ssi_private->ssi_phys = res.start; ssi_private->irq = irq_of_parse_and_map(np, 0); /* Are the RX and the TX clocks locked? */ if (of_find_property(np, "fsl,ssi-asynchronous", NULL)) ssi_private->asynchronous = 1; else ssi_private->cpu_dai_drv.symmetric_rates = 1; /* Determine the FIFO depth. */ iprop = of_get_property(np, "fsl,fifo-depth", NULL); if (iprop) ssi_private->fifo_depth = *iprop; else /* Older 8610 DTs didn't have the fifo-depth property */ ssi_private->fifo_depth = 8; /* Initialize the the device_attribute structure */ dev_attr = &ssi_private->dev_attr; sysfs_attr_init(&dev_attr->attr); dev_attr->attr.name = "statistics"; dev_attr->attr.mode = S_IRUGO; dev_attr->show = fsl_sysfs_ssi_show; ret = device_create_file(&pdev->dev, dev_attr); if (ret) { dev_err(&pdev->dev, "could not create sysfs %s file\n", ssi_private->dev_attr.attr.name); goto error; } /* Register with ASoC */ dev_set_drvdata(&pdev->dev, ssi_private); ret = snd_soc_register_dai(&pdev->dev, &ssi_private->cpu_dai_drv); if (ret) { dev_err(&pdev->dev, "failed to register DAI: %d\n", ret); goto error; } /* Trigger the machine driver's probe function. The platform driver * name of the machine driver is taken from the /model property of the * device tree. We also pass the address of the CPU DAI driver * structure. */ sprop = of_get_property(of_find_node_by_path("/"), "model", NULL); /* Sometimes the model name has a "fsl," prefix, so we strip that. */ p = strrchr(sprop, ','); if (p) sprop = p + 1; snprintf(name, sizeof(name), "snd-soc-%s", sprop); make_lowercase(name); ssi_private->pdev = platform_device_register_data(&pdev->dev, name, 0, NULL, 0); if (IS_ERR(ssi_private->pdev)) { ret = PTR_ERR(ssi_private->pdev); dev_err(&pdev->dev, "failed to register platform: %d\n", ret); goto error; } return 0; error: snd_soc_unregister_dai(&pdev->dev); dev_set_drvdata(&pdev->dev, NULL); if (dev_attr) device_remove_file(&pdev->dev, dev_attr); irq_dispose_mapping(ssi_private->irq); iounmap(ssi_private->ssi); kfree(ssi_private); return ret; } static int fsl_ssi_remove(struct platform_device *pdev) { struct fsl_ssi_private *ssi_private = dev_get_drvdata(&pdev->dev); platform_device_unregister(ssi_private->pdev); snd_soc_unregister_dai(&pdev->dev); device_remove_file(&pdev->dev, &ssi_private->dev_attr); kfree(ssi_private); dev_set_drvdata(&pdev->dev, NULL); return 0; } static const struct of_device_id fsl_ssi_ids[] = { { .compatible = "fsl,mpc8610-ssi", }, {} }; MODULE_DEVICE_TABLE(of, fsl_ssi_ids); static struct platform_driver fsl_ssi_driver = { .driver = { .name = "fsl-ssi-dai", .owner = THIS_MODULE, .of_match_table = fsl_ssi_ids, }, .probe = fsl_ssi_probe, .remove = fsl_ssi_remove, }; static int __init fsl_ssi_init(void) { printk(KERN_INFO "Freescale Synchronous Serial Interface (SSI) ASoC Driver\n"); return platform_driver_register(&fsl_ssi_driver); } static void __exit fsl_ssi_exit(void) { platform_driver_unregister(&fsl_ssi_driver); } module_init(fsl_ssi_init); module_exit(fsl_ssi_exit); MODULE_AUTHOR("Timur Tabi <timur@freescale.com>"); MODULE_DESCRIPTION("Freescale Synchronous Serial Interface (SSI) ASoC Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
TeamSPR/kernel
drivers/mtd/chips/cfi_cmdset_0002.c
2072
72129
/* * Common Flash Interface support: * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002) * * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp> * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com> * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com> * * 2_by_8 routines added by Simon Munton * * 4_by_16 work by Carolyn J. Smith * * XIP support hooks by Vitaly Wool (based on code for Intel flash * by Nicolas Pitre) * * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0 * * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com * * This code is GPL */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/init.h> #include <asm/io.h> #include <asm/byteorder.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/reboot.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/mtd/map.h> #include <linux/mtd/mtd.h> #include <linux/mtd/cfi.h> #include <linux/mtd/xip.h> #define AMD_BOOTLOC_BUG #define FORCE_WORD_WRITE 0 #define MAX_WORD_RETRIES 3 #define SST49LF004B 0x0060 #define SST49LF040B 0x0050 #define SST49LF008A 0x005a #define AT49BV6416 0x00d6 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *); static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); static void cfi_amdstd_sync (struct mtd_info *); static int cfi_amdstd_suspend (struct mtd_info *); static void cfi_amdstd_resume (struct mtd_info *); static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); static void cfi_amdstd_destroy(struct mtd_info *); struct mtd_info *cfi_cmdset_0002(struct map_info *, int); static struct mtd_info *cfi_amdstd_setup (struct mtd_info *); static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); #include "fwh_lock.h" static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len); static struct mtd_chip_driver cfi_amdstd_chipdrv = { .probe = NULL, /* Not usable directly */ .destroy = cfi_amdstd_destroy, .name = "cfi_cmdset_0002", .module = THIS_MODULE }; /* #define DEBUG_CFI_FEATURES */ #ifdef DEBUG_CFI_FEATURES static void cfi_tell_features(struct cfi_pri_amdstd *extp) { const char* erase_suspend[3] = { "Not supported", "Read only", "Read/write" }; const char* top_bottom[6] = { "No WP", "8x8KiB sectors at top & bottom, no WP", "Bottom boot", "Top boot", "Uniform, Bottom WP", "Uniform, Top WP" }; printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1); printk(" Address sensitive unlock: %s\n", (extp->SiliconRevision & 1) ? "Not required" : "Required"); if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]); else printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend); if (extp->BlkProt == 0) printk(" Block protection: Not supported\n"); else printk(" Block protection: %d sectors per group\n", extp->BlkProt); printk(" Temporary block unprotect: %s\n", extp->TmpBlkUnprotect ? "Supported" : "Not supported"); printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot); printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps); printk(" Burst mode: %s\n", extp->BurstMode ? "Supported" : "Not supported"); if (extp->PageMode == 0) printk(" Page mode: Not supported\n"); else printk(" Page mode: %d word page\n", extp->PageMode << 2); printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", extp->VppMin >> 4, extp->VppMin & 0xf); printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", extp->VppMax >> 4, extp->VppMax & 0xf); if (extp->TopBottom < ARRAY_SIZE(top_bottom)) printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]); else printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom); } #endif #ifdef AMD_BOOTLOC_BUG /* Wheee. Bring me the head of someone at AMD. */ static void fixup_amd_bootblock(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; struct cfi_pri_amdstd *extp = cfi->cmdset_priv; __u8 major = extp->MajorVersion; __u8 minor = extp->MinorVersion; if (((major << 8) | minor) < 0x3131) { /* CFI version 1.0 => don't trust bootloc */ pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n", map->name, cfi->mfr, cfi->id); /* AFAICS all 29LV400 with a bottom boot block have a device ID * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode. * These were badly detected as they have the 0x80 bit set * so treat them as a special case. */ if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) && /* Macronix added CFI to their 2nd generation * MX29LV400C B/T but AFAICS no other 29LV400 (AMD, * Fujitsu, Spansion, EON, ESI and older Macronix) * has CFI. * * Therefore also check the manufacturer. * This reduces the risk of false detection due to * the 8-bit device ID. */ (cfi->mfr == CFI_MFR_MACRONIX)) { pr_debug("%s: Macronix MX29LV400C with bottom boot block" " detected\n", map->name); extp->TopBottom = 2; /* bottom boot */ } else if (cfi->id & 0x80) { printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id); extp->TopBottom = 3; /* top boot */ } else { extp->TopBottom = 2; /* bottom boot */ } pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;" " deduced %s from Device ID\n", map->name, major, minor, extp->TopBottom == 2 ? "bottom" : "top"); } } #endif static void fixup_use_write_buffers(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; if (cfi->cfiq->BufWriteTimeoutTyp) { pr_debug("Using buffer write method\n" ); mtd->_write = cfi_amdstd_write_buffers; } } /* Atmel chips don't use the same PRI format as AMD chips */ static void fixup_convert_atmel_pri(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; struct cfi_pri_amdstd *extp = cfi->cmdset_priv; struct cfi_pri_atmel atmel_pri; memcpy(&atmel_pri, extp, sizeof(atmel_pri)); memset((char *)extp + 5, 0, sizeof(*extp) - 5); if (atmel_pri.Features & 0x02) extp->EraseSuspend = 2; /* Some chips got it backwards... */ if (cfi->id == AT49BV6416) { if (atmel_pri.BottomBoot) extp->TopBottom = 3; else extp->TopBottom = 2; } else { if (atmel_pri.BottomBoot) extp->TopBottom = 2; else extp->TopBottom = 3; } /* burst write mode not supported */ cfi->cfiq->BufWriteTimeoutTyp = 0; cfi->cfiq->BufWriteTimeoutMax = 0; } static void fixup_use_secsi(struct mtd_info *mtd) { /* Setup for chips with a secsi area */ mtd->_read_user_prot_reg = cfi_amdstd_secsi_read; mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read; } static void fixup_use_erase_chip(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; if ((cfi->cfiq->NumEraseRegions == 1) && ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { mtd->_erase = cfi_amdstd_erase_chip; } } /* * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors * locked by default. */ static void fixup_use_atmel_lock(struct mtd_info *mtd) { mtd->_lock = cfi_atmel_lock; mtd->_unlock = cfi_atmel_unlock; mtd->flags |= MTD_POWERUP_LOCK; } static void fixup_old_sst_eraseregion(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; /* * These flashes report two separate eraseblock regions based on the * sector_erase-size and block_erase-size, although they both operate on the * same memory. This is not allowed according to CFI, so we just pick the * sector_erase-size. */ cfi->cfiq->NumEraseRegions = 1; } static void fixup_sst39vf(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; fixup_old_sst_eraseregion(mtd); cfi->addr_unlock1 = 0x5555; cfi->addr_unlock2 = 0x2AAA; } static void fixup_sst39vf_rev_b(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; fixup_old_sst_eraseregion(mtd); cfi->addr_unlock1 = 0x555; cfi->addr_unlock2 = 0x2AA; cfi->sector_erase_cmd = CMD(0x50); } static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; fixup_sst39vf_rev_b(mtd); /* * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where * it should report a size of 8KBytes (0x0020*256). */ cfi->cfiq->EraseRegionInfo[0] = 0x002003ff; pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name); } static void fixup_s29gl064n_sectors(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { cfi->cfiq->EraseRegionInfo[0] |= 0x0040; pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name); } } static void fixup_s29gl032n_sectors(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name); } } static void fixup_s29ns512p_sectors(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; /* * S29NS512P flash uses more than 8bits to report number of sectors, * which is not permitted by CFI. */ cfi->cfiq->EraseRegionInfo[0] = 0x020001ff; pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name); } /* Used to fix CFI-Tables of chips without Extended Query Tables */ static struct cfi_fixup cfi_nopri_fixup_table[] = { { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */ { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */ { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */ { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */ { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */ { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */ { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */ { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */ { 0, 0, NULL } }; static struct cfi_fixup cfi_fixup_table[] = { { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri }, #ifdef AMD_BOOTLOC_BUG { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock }, { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock }, { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock }, #endif { CFI_MFR_AMD, 0x0050, fixup_use_secsi }, { CFI_MFR_AMD, 0x0053, fixup_use_secsi }, { CFI_MFR_AMD, 0x0055, fixup_use_secsi }, { CFI_MFR_AMD, 0x0056, fixup_use_secsi }, { CFI_MFR_AMD, 0x005C, fixup_use_secsi }, { CFI_MFR_AMD, 0x005F, fixup_use_secsi }, { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors }, { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors }, { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors }, { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors }, { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors }, { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */ { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */ { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */ { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */ #if !FORCE_WORD_WRITE { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers }, #endif { 0, 0, NULL } }; static struct cfi_fixup jedec_fixup_table[] = { { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock }, { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock }, { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock }, { 0, 0, NULL } }; static struct cfi_fixup fixup_table[] = { /* The CFI vendor ids and the JEDEC vendor IDs appear * to be common. It is like the devices id's are as * well. This table is to pick all cases where * we know that is the case. */ { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip }, { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock }, { 0, 0, NULL } }; static void cfi_fixup_major_minor(struct cfi_private *cfi, struct cfi_pri_amdstd *extp) { if (cfi->mfr == CFI_MFR_SAMSUNG) { if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') || (extp->MajorVersion == '3' && extp->MinorVersion == '3')) { /* * Samsung K8P2815UQB and K8D6x16UxM chips * report major=0 / minor=0. * K8D3x16UxC chips report major=3 / minor=3. */ printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu" " Extended Query version to 1.%c\n", extp->MinorVersion); extp->MajorVersion = '1'; } } /* * SST 38VF640x chips report major=0xFF / minor=0xFF. */ if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) { extp->MajorVersion = '1'; extp->MinorVersion = '0'; } } static int is_m29ew(struct cfi_private *cfi) { if (cfi->mfr == CFI_MFR_INTEL && ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) || (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e))) return 1; return 0; } /* * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20: * Some revisions of the M29EW suffer from erase suspend hang ups. In * particular, it can occur when the sequence * Erase Confirm -> Suspend -> Program -> Resume * causes a lockup due to internal timing issues. The consequence is that the * erase cannot be resumed without inserting a dummy command after programming * and prior to resuming. [...] The work-around is to issue a dummy write cycle * that writes an F0 command code before the RESUME command. */ static void cfi_fixup_m29ew_erase_suspend(struct map_info *map, unsigned long adr) { struct cfi_private *cfi = map->fldrv_priv; /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */ if (is_m29ew(cfi)) map_write(map, CMD(0xF0), adr); } /* * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22: * * Some revisions of the M29EW (for example, A1 and A2 step revisions) * are affected by a problem that could cause a hang up when an ERASE SUSPEND * command is issued after an ERASE RESUME operation without waiting for a * minimum delay. The result is that once the ERASE seems to be completed * (no bits are toggling), the contents of the Flash memory block on which * the erase was ongoing could be inconsistent with the expected values * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84 * values), causing a consequent failure of the ERASE operation. * The occurrence of this issue could be high, especially when file system * operations on the Flash are intensive. As a result, it is recommended * that a patch be applied. Intensive file system operations can cause many * calls to the garbage routine to free Flash space (also by erasing physical * Flash blocks) and as a result, many consecutive SUSPEND and RESUME * commands can occur. The problem disappears when a delay is inserted after * the RESUME command by using the udelay() function available in Linux. * The DELAY value must be tuned based on the customer's platform. * The maximum value that fixes the problem in all cases is 500us. * But, in our experience, a delay of 30 µs to 50 µs is sufficient * in most cases. * We have chosen 500µs because this latency is acceptable. */ static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi) { /* * Resolving the Delay After Resume Issue see Micron TN-13-07 * Worst case delay must be 500µs but 30-50µs should be ok as well */ if (is_m29ew(cfi)) cfi_udelay(500); } struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) { struct cfi_private *cfi = map->fldrv_priv; struct device_node __maybe_unused *np = map->device_node; struct mtd_info *mtd; int i; mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); if (!mtd) { printk(KERN_WARNING "Failed to allocate memory for MTD device\n"); return NULL; } mtd->priv = map; mtd->type = MTD_NORFLASH; /* Fill in the default mtd operations */ mtd->_erase = cfi_amdstd_erase_varsize; mtd->_write = cfi_amdstd_write_words; mtd->_read = cfi_amdstd_read; mtd->_sync = cfi_amdstd_sync; mtd->_suspend = cfi_amdstd_suspend; mtd->_resume = cfi_amdstd_resume; mtd->flags = MTD_CAP_NORFLASH; mtd->name = map->name; mtd->writesize = 1; mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; pr_debug("MTD %s(): write buffer size %d\n", __func__, mtd->writebufsize); mtd->_panic_write = cfi_amdstd_panic_write; mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; if (cfi->cfi_mode==CFI_MODE_CFI){ unsigned char bootloc; __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; struct cfi_pri_amdstd *extp; extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); if (extp) { /* * It's a real CFI chip, not one for which the probe * routine faked a CFI structure. */ cfi_fixup_major_minor(cfi, extp); /* * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf */ if (extp->MajorVersion != '1' || (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) { printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " "version %c.%c (%#02x/%#02x).\n", extp->MajorVersion, extp->MinorVersion, extp->MajorVersion, extp->MinorVersion); kfree(extp); kfree(mtd); return NULL; } printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n", extp->MajorVersion, extp->MinorVersion); /* Install our own private info structure */ cfi->cmdset_priv = extp; /* Apply cfi device specific fixups */ cfi_fixup(mtd, cfi_fixup_table); #ifdef DEBUG_CFI_FEATURES /* Tell the user about it in lots of lovely detail */ cfi_tell_features(extp); #endif #ifdef CONFIG_OF if (np && of_property_read_bool( np, "use-advanced-sector-protection") && extp->BlkProtUnprot == 8) { printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n"); mtd->_lock = cfi_ppb_lock; mtd->_unlock = cfi_ppb_unlock; mtd->_is_locked = cfi_ppb_is_locked; } #endif bootloc = extp->TopBottom; if ((bootloc < 2) || (bootloc > 5)) { printk(KERN_WARNING "%s: CFI contains unrecognised boot " "bank location (%d). Assuming bottom.\n", map->name, bootloc); bootloc = 2; } if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name); for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { int j = (cfi->cfiq->NumEraseRegions-1)-i; __u32 swap; swap = cfi->cfiq->EraseRegionInfo[i]; cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j]; cfi->cfiq->EraseRegionInfo[j] = swap; } } /* Set the default CFI lock/unlock addresses */ cfi->addr_unlock1 = 0x555; cfi->addr_unlock2 = 0x2aa; } cfi_fixup(mtd, cfi_nopri_fixup_table); if (!cfi->addr_unlock1 || !cfi->addr_unlock2) { kfree(mtd); return NULL; } } /* CFI mode */ else if (cfi->cfi_mode == CFI_MODE_JEDEC) { /* Apply jedec specific fixups */ cfi_fixup(mtd, jedec_fixup_table); } /* Apply generic fixups */ cfi_fixup(mtd, fixup_table); for (i=0; i< cfi->numchips; i++) { cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; cfi->chips[i].ref_point_counter = 0; init_waitqueue_head(&(cfi->chips[i].wq)); } map->fldrv = &cfi_amdstd_chipdrv; return cfi_amdstd_setup(mtd); } struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); EXPORT_SYMBOL_GPL(cfi_cmdset_0002); EXPORT_SYMBOL_GPL(cfi_cmdset_0006); EXPORT_SYMBOL_GPL(cfi_cmdset_0701); static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave; unsigned long offset = 0; int i,j; printk(KERN_NOTICE "number of %s chips: %d\n", (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); /* Select the correct geometry setup */ mtd->size = devsize * cfi->numchips; mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) * mtd->numeraseregions, GFP_KERNEL); if (!mtd->eraseregions) { printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n"); goto setup_err; } for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { unsigned long ernum, ersize; ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; if (mtd->erasesize < ersize) { mtd->erasesize = ersize; } for (j=0; j<cfi->numchips; j++) { mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; } offset += (ersize * ernum); } if (offset != devsize) { /* Argh */ printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize); goto setup_err; } __module_get(THIS_MODULE); register_reboot_notifier(&mtd->reboot_notifier); return mtd; setup_err: kfree(mtd->eraseregions); kfree(mtd); kfree(cfi->cmdset_priv); kfree(cfi->cfiq); return NULL; } /* * Return true if the chip is ready. * * Ready is one of: read mode, query mode, erase-suspend-read mode (in any * non-suspended sector) and is indicated by no toggle bits toggling. * * Note that anything more complicated than checking if no bits are toggling * (including checking DQ5 for an error status) is tricky to get working * correctly and is therefore not done (particularly with interleaved chips * as each chip must be checked independently of the others). */ static int __xipram chip_ready(struct map_info *map, unsigned long addr) { map_word d, t; d = map_read(map, addr); t = map_read(map, addr); return map_word_equal(map, d, t); } /* * Return true if the chip is ready and has the correct value. * * Ready is one of: read mode, query mode, erase-suspend-read mode (in any * non-suspended sector) and it is indicated by no bits toggling. * * Error are indicated by toggling bits or bits held with the wrong value, * or with bits toggling. * * Note that anything more complicated than checking if no bits are toggling * (including checking DQ5 for an error status) is tricky to get working * correctly and is therefore not done (particularly with interleaved chips * as each chip must be checked independently of the others). * */ static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected) { map_word oldd, curd; oldd = map_read(map, addr); curd = map_read(map, addr); return map_word_equal(map, oldd, curd) && map_word_equal(map, curd, expected); } static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) { DECLARE_WAITQUEUE(wait, current); struct cfi_private *cfi = map->fldrv_priv; unsigned long timeo; struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv; resettime: timeo = jiffies + HZ; retry: switch (chip->state) { case FL_STATUS: for (;;) { if (chip_ready(map, adr)) break; if (time_after(jiffies, timeo)) { printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); return -EIO; } mutex_unlock(&chip->mutex); cfi_udelay(1); mutex_lock(&chip->mutex); /* Someone else might have been playing with it. */ goto retry; } case FL_READY: case FL_CFI_QUERY: case FL_JEDEC_QUERY: return 0; case FL_ERASING: if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) || !(mode == FL_READY || mode == FL_POINT || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) goto sleep; /* We could check to see if we're trying to access the sector * that is currently being erased. However, no user will try * anything like that so we just wait for the timeout. */ /* Erase suspend */ /* It's harmless to issue the Erase-Suspend and Erase-Resume * commands when the erase algorithm isn't in progress. */ map_write(map, CMD(0xB0), chip->in_progress_block_addr); chip->oldstate = FL_ERASING; chip->state = FL_ERASE_SUSPENDING; chip->erase_suspended = 1; for (;;) { if (chip_ready(map, adr)) break; if (time_after(jiffies, timeo)) { /* Should have suspended the erase by now. * Send an Erase-Resume command as either * there was an error (so leave the erase * routine to recover from it) or we trying to * use the erase-in-progress sector. */ put_chip(map, chip, adr); printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); return -EIO; } mutex_unlock(&chip->mutex); cfi_udelay(1); mutex_lock(&chip->mutex); /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. So we can just loop here. */ } chip->state = FL_READY; return 0; case FL_XIP_WHILE_ERASING: if (mode != FL_READY && mode != FL_POINT && (!cfip || !(cfip->EraseSuspend&2))) goto sleep; chip->oldstate = chip->state; chip->state = FL_READY; return 0; case FL_SHUTDOWN: /* The machine is rebooting */ return -EIO; case FL_POINT: /* Only if there's no operation suspended... */ if (mode == FL_READY && chip->oldstate == FL_READY) return 0; default: sleep: set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); mutex_lock(&chip->mutex); goto resettime; } } static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr) { struct cfi_private *cfi = map->fldrv_priv; switch(chip->oldstate) { case FL_ERASING: cfi_fixup_m29ew_erase_suspend(map, chip->in_progress_block_addr); map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); cfi_fixup_m29ew_delay_after_resume(cfi); chip->oldstate = FL_READY; chip->state = FL_ERASING; break; case FL_XIP_WHILE_ERASING: chip->state = chip->oldstate; chip->oldstate = FL_READY; break; case FL_READY: case FL_STATUS: break; default: printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); } wake_up(&chip->wq); } #ifdef CONFIG_MTD_XIP /* * No interrupt what so ever can be serviced while the flash isn't in array * mode. This is ensured by the xip_disable() and xip_enable() functions * enclosing any code path where the flash is known not to be in array mode. * And within a XIP disabled code path, only functions marked with __xipram * may be called and nothing else (it's a good thing to inspect generated * assembly to make sure inline functions were actually inlined and that gcc * didn't emit calls to its own support functions). Also configuring MTD CFI * support to a single buswidth and a single interleave is also recommended. */ static void xip_disable(struct map_info *map, struct flchip *chip, unsigned long adr) { /* TODO: chips with no XIP use should ignore and return */ (void) map_read(map, adr); /* ensure mmu mapping is up to date */ local_irq_disable(); } static void __xipram xip_enable(struct map_info *map, struct flchip *chip, unsigned long adr) { struct cfi_private *cfi = map->fldrv_priv; if (chip->state != FL_POINT && chip->state != FL_READY) { map_write(map, CMD(0xf0), adr); chip->state = FL_READY; } (void) map_read(map, adr); xip_iprefetch(); local_irq_enable(); } /* * When a delay is required for the flash operation to complete, the * xip_udelay() function is polling for both the given timeout and pending * (but still masked) hardware interrupts. Whenever there is an interrupt * pending then the flash erase operation is suspended, array mode restored * and interrupts unmasked. Task scheduling might also happen at that * point. The CPU eventually returns from the interrupt or the call to * schedule() and the suspended flash operation is resumed for the remaining * of the delay period. * * Warning: this function _will_ fool interrupt latency tracing tools. */ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, unsigned long adr, int usec) { struct cfi_private *cfi = map->fldrv_priv; struct cfi_pri_amdstd *extp = cfi->cmdset_priv; map_word status, OK = CMD(0x80); unsigned long suspended, start = xip_currtime(); flstate_t oldstate; do { cpu_relax(); if (xip_irqpending() && extp && ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) && (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { /* * Let's suspend the erase operation when supported. * Note that we currently don't try to suspend * interleaved chips if there is already another * operation suspended (imagine what happens * when one chip was already done with the current * operation while another chip suspended it, then * we resume the whole thing at once). Yes, it * can happen! */ map_write(map, CMD(0xb0), adr); usec -= xip_elapsed_since(start); suspended = xip_currtime(); do { if (xip_elapsed_since(suspended) > 100000) { /* * The chip doesn't want to suspend * after waiting for 100 msecs. * This is a critical error but there * is not much we can do here. */ return; } status = map_read(map, adr); } while (!map_word_andequal(map, status, OK, OK)); /* Suspend succeeded */ oldstate = chip->state; if (!map_word_bitsset(map, status, CMD(0x40))) break; chip->state = FL_XIP_WHILE_ERASING; chip->erase_suspended = 1; map_write(map, CMD(0xf0), adr); (void) map_read(map, adr); xip_iprefetch(); local_irq_enable(); mutex_unlock(&chip->mutex); xip_iprefetch(); cond_resched(); /* * We're back. However someone else might have * decided to go write to the chip if we are in * a suspended erase state. If so let's wait * until it's done. */ mutex_lock(&chip->mutex); while (chip->state != FL_XIP_WHILE_ERASING) { DECLARE_WAITQUEUE(wait, current); set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); mutex_lock(&chip->mutex); } /* Disallow XIP again */ local_irq_disable(); /* Correct Erase Suspend Hangups for M29EW */ cfi_fixup_m29ew_erase_suspend(map, adr); /* Resume the write or erase operation */ map_write(map, cfi->sector_erase_cmd, adr); chip->state = oldstate; start = xip_currtime(); } else if (usec >= 1000000/HZ) { /* * Try to save on CPU power when waiting delay * is at least a system timer tick period. * No need to be extremely accurate here. */ xip_cpu_idle(); } status = map_read(map, adr); } while (!map_word_andequal(map, status, OK, OK) && xip_elapsed_since(start) < usec); } #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) /* * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while * the flash is actively programming or erasing since we have to poll for * the operation to complete anyway. We can't do that in a generic way with * a XIP setup so do it before the actual flash operation in this case * and stub it out from INVALIDATE_CACHE_UDELAY. */ #define XIP_INVAL_CACHED_RANGE(map, from, size) \ INVALIDATE_CACHED_RANGE(map, from, size) #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ UDELAY(map, chip, adr, usec) /* * Extra notes: * * Activating this XIP support changes the way the code works a bit. For * example the code to suspend the current process when concurrent access * happens is never executed because xip_udelay() will always return with the * same chip state as it was entered with. This is why there is no care for * the presence of add_wait_queue() or schedule() calls from within a couple * xip_disable()'d areas of code, like in do_erase_oneblock for example. * The queueing and scheduling are always happening within xip_udelay(). * * Similarly, get_chip() and put_chip() just happen to always be executed * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state * is in array mode, therefore never executing many cases therein and not * causing any problem with XIP. */ #else #define xip_disable(map, chip, adr) #define xip_enable(map, chip, adr) #define XIP_INVAL_CACHED_RANGE(x...) #define UDELAY(map, chip, adr, usec) \ do { \ mutex_unlock(&chip->mutex); \ cfi_udelay(usec); \ mutex_lock(&chip->mutex); \ } while (0) #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ do { \ mutex_unlock(&chip->mutex); \ INVALIDATE_CACHED_RANGE(map, adr, len); \ cfi_udelay(usec); \ mutex_lock(&chip->mutex); \ } while (0) #endif static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) { unsigned long cmd_addr; struct cfi_private *cfi = map->fldrv_priv; int ret; adr += chip->start; /* Ensure cmd read/writes are aligned. */ cmd_addr = adr & ~(map_bankwidth(map)-1); mutex_lock(&chip->mutex); ret = get_chip(map, chip, cmd_addr, FL_READY); if (ret) { mutex_unlock(&chip->mutex); return ret; } if (chip->state != FL_POINT && chip->state != FL_READY) { map_write(map, CMD(0xf0), cmd_addr); chip->state = FL_READY; } map_copy_from(map, buf, adr, len); put_chip(map, chip, cmd_addr); mutex_unlock(&chip->mutex); return 0; } static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; unsigned long ofs; int chipnum; int ret = 0; /* ofs: offset within the first chip that the first read should start */ chipnum = (from >> cfi->chipshift); ofs = from - (chipnum << cfi->chipshift); while (len) { unsigned long thislen; if (chipnum >= cfi->numchips) break; if ((len + ofs -1) >> cfi->chipshift) thislen = (1<<cfi->chipshift) - ofs; else thislen = len; ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); if (ret) break; *retlen += thislen; len -= thislen; buf += thislen; ofs = 0; chipnum++; } return ret; } static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) { DECLARE_WAITQUEUE(wait, current); unsigned long timeo = jiffies + HZ; struct cfi_private *cfi = map->fldrv_priv; retry: mutex_lock(&chip->mutex); if (chip->state != FL_READY){ set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); timeo = jiffies + HZ; goto retry; } adr += chip->start; chip->state = FL_READY; cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); map_copy_from(map, buf, adr, len); cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); wake_up(&chip->wq); mutex_unlock(&chip->mutex); return 0; } static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; unsigned long ofs; int chipnum; int ret = 0; /* ofs: offset within the first chip that the first read should start */ /* 8 secsi bytes per chip */ chipnum=from>>3; ofs=from & 7; while (len) { unsigned long thislen; if (chipnum >= cfi->numchips) break; if ((len + ofs -1) >> 3) thislen = (1<<3) - ofs; else thislen = len; ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); if (ret) break; *retlen += thislen; len -= thislen; buf += thislen; ofs = 0; chipnum++; } return ret; } static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum) { struct cfi_private *cfi = map->fldrv_priv; unsigned long timeo = jiffies + HZ; /* * We use a 1ms + 1 jiffies generic timeout for writes (most devices * have a max write time of a few hundreds usec). However, we should * use the maximum timeout value given by the chip at probe time * instead. Unfortunately, struct flchip does have a field for * maximum timeout, only for typical which can be far too short * depending of the conditions. The ' + 1' is to avoid having a * timeout of 0 jiffies if HZ is smaller than 1000. */ unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; int ret = 0; map_word oldd; int retry_cnt = 0; adr += chip->start; mutex_lock(&chip->mutex); ret = get_chip(map, chip, adr, FL_WRITING); if (ret) { mutex_unlock(&chip->mutex); return ret; } pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", __func__, adr, datum.x[0] ); /* * Check for a NOP for the case when the datum to write is already * present - it saves time and works around buggy chips that corrupt * data at other locations when 0xff is written to a location that * already contains 0xff. */ oldd = map_read(map, adr); if (map_word_equal(map, oldd, datum)) { pr_debug("MTD %s(): NOP\n", __func__); goto op_done; } XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map)); ENABLE_VPP(map); xip_disable(map, chip, adr); retry: cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); map_write(map, datum, adr); chip->state = FL_WRITING; INVALIDATE_CACHE_UDELAY(map, chip, adr, map_bankwidth(map), chip->word_write_time); /* See comment above for timeout value. */ timeo = jiffies + uWriteTimeout; for (;;) { if (chip->state != FL_WRITING) { /* Someone's suspended the write. Sleep */ DECLARE_WAITQUEUE(wait, current); set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); timeo = jiffies + (HZ / 2); /* FIXME */ mutex_lock(&chip->mutex); continue; } if (time_after(jiffies, timeo) && !chip_ready(map, adr)){ xip_enable(map, chip, adr); printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); xip_disable(map, chip, adr); break; } if (chip_ready(map, adr)) break; /* Latency issues. Drop the lock, wait a while and retry */ UDELAY(map, chip, adr, 1); } /* Did we succeed? */ if (!chip_good(map, adr, datum)) { /* reset on all failures. */ map_write( map, CMD(0xF0), chip->start ); /* FIXME - should have reset delay before continuing */ if (++retry_cnt <= MAX_WORD_RETRIES) goto retry; ret = -EIO; } xip_enable(map, chip, adr); op_done: chip->state = FL_READY; DISABLE_VPP(map); put_chip(map, chip, adr); mutex_unlock(&chip->mutex); return ret; } static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; int ret = 0; int chipnum; unsigned long ofs, chipstart; DECLARE_WAITQUEUE(wait, current); chipnum = to >> cfi->chipshift; ofs = to - (chipnum << cfi->chipshift); chipstart = cfi->chips[chipnum].start; /* If it's not bus-aligned, do the first byte write */ if (ofs & (map_bankwidth(map)-1)) { unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1); int i = ofs - bus_ofs; int n = 0; map_word tmp_buf; retry: mutex_lock(&cfi->chips[chipnum].mutex); if (cfi->chips[chipnum].state != FL_READY) { set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&cfi->chips[chipnum].wq, &wait); mutex_unlock(&cfi->chips[chipnum].mutex); schedule(); remove_wait_queue(&cfi->chips[chipnum].wq, &wait); goto retry; } /* Load 'tmp_buf' with old contents of flash */ tmp_buf = map_read(map, bus_ofs+chipstart); mutex_unlock(&cfi->chips[chipnum].mutex); /* Number of bytes to copy from buffer */ n = min_t(int, len, map_bankwidth(map)-i); tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); ret = do_write_oneword(map, &cfi->chips[chipnum], bus_ofs, tmp_buf); if (ret) return ret; ofs += n; buf += n; (*retlen) += n; len -= n; if (ofs >> cfi->chipshift) { chipnum ++; ofs = 0; if (chipnum == cfi->numchips) return 0; } } /* We are now aligned, write as much as possible */ while(len >= map_bankwidth(map)) { map_word datum; datum = map_word_load(map, buf); ret = do_write_oneword(map, &cfi->chips[chipnum], ofs, datum); if (ret) return ret; ofs += map_bankwidth(map); buf += map_bankwidth(map); (*retlen) += map_bankwidth(map); len -= map_bankwidth(map); if (ofs >> cfi->chipshift) { chipnum ++; ofs = 0; if (chipnum == cfi->numchips) return 0; chipstart = cfi->chips[chipnum].start; } } /* Write the trailing bytes if any */ if (len & (map_bankwidth(map)-1)) { map_word tmp_buf; retry1: mutex_lock(&cfi->chips[chipnum].mutex); if (cfi->chips[chipnum].state != FL_READY) { set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&cfi->chips[chipnum].wq, &wait); mutex_unlock(&cfi->chips[chipnum].mutex); schedule(); remove_wait_queue(&cfi->chips[chipnum].wq, &wait); goto retry1; } tmp_buf = map_read(map, ofs + chipstart); mutex_unlock(&cfi->chips[chipnum].mutex); tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); ret = do_write_oneword(map, &cfi->chips[chipnum], ofs, tmp_buf); if (ret) return ret; (*retlen) += len; } return 0; } /* * FIXME: interleaved mode not tested, and probably not supported! */ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, unsigned long adr, const u_char *buf, int len) { struct cfi_private *cfi = map->fldrv_priv; unsigned long timeo = jiffies + HZ; /* see comments in do_write_oneword() regarding uWriteTimeo. */ unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; int ret = -EIO; unsigned long cmd_adr; int z, words; map_word datum; adr += chip->start; cmd_adr = adr; mutex_lock(&chip->mutex); ret = get_chip(map, chip, adr, FL_WRITING); if (ret) { mutex_unlock(&chip->mutex); return ret; } datum = map_word_load(map, buf); pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", __func__, adr, datum.x[0] ); XIP_INVAL_CACHED_RANGE(map, adr, len); ENABLE_VPP(map); xip_disable(map, chip, cmd_adr); cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); /* Write Buffer Load */ map_write(map, CMD(0x25), cmd_adr); chip->state = FL_WRITING_TO_BUFFER; /* Write length of data to come */ words = len / map_bankwidth(map); map_write(map, CMD(words - 1), cmd_adr); /* Write data */ z = 0; while(z < words * map_bankwidth(map)) { datum = map_word_load(map, buf); map_write(map, datum, adr + z); z += map_bankwidth(map); buf += map_bankwidth(map); } z -= map_bankwidth(map); adr += z; /* Write Buffer Program Confirm: GO GO GO */ map_write(map, CMD(0x29), cmd_adr); chip->state = FL_WRITING; INVALIDATE_CACHE_UDELAY(map, chip, adr, map_bankwidth(map), chip->word_write_time); timeo = jiffies + uWriteTimeout; for (;;) { if (chip->state != FL_WRITING) { /* Someone's suspended the write. Sleep */ DECLARE_WAITQUEUE(wait, current); set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); timeo = jiffies + (HZ / 2); /* FIXME */ mutex_lock(&chip->mutex); continue; } if (time_after(jiffies, timeo) && !chip_ready(map, adr)) break; if (chip_ready(map, adr)) { xip_enable(map, chip, adr); goto op_done; } /* Latency issues. Drop the lock, wait a while and retry */ UDELAY(map, chip, adr, 1); } /* * Recovery from write-buffer programming failures requires * the write-to-buffer-reset sequence. Since the last part * of the sequence also works as a normal reset, we can run * the same commands regardless of why we are here. * See e.g. * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf */ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); xip_enable(map, chip, adr); /* FIXME - should have reset delay before continuing */ printk(KERN_WARNING "MTD %s(): software timeout\n", __func__ ); ret = -EIO; op_done: chip->state = FL_READY; DISABLE_VPP(map); put_chip(map, chip, adr); mutex_unlock(&chip->mutex); return ret; } static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; int ret = 0; int chipnum; unsigned long ofs; chipnum = to >> cfi->chipshift; ofs = to - (chipnum << cfi->chipshift); /* If it's not bus-aligned, do the first word write */ if (ofs & (map_bankwidth(map)-1)) { size_t local_len = (-ofs)&(map_bankwidth(map)-1); if (local_len > len) local_len = len; ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), local_len, retlen, buf); if (ret) return ret; ofs += local_len; buf += local_len; len -= local_len; if (ofs >> cfi->chipshift) { chipnum ++; ofs = 0; if (chipnum == cfi->numchips) return 0; } } /* Write buffer is worth it only if more than one word to write... */ while (len >= map_bankwidth(map) * 2) { /* We must not cross write block boundaries */ int size = wbufsize - (ofs & (wbufsize-1)); if (size > len) size = len; if (size % map_bankwidth(map)) size -= size % map_bankwidth(map); ret = do_write_buffer(map, &cfi->chips[chipnum], ofs, buf, size); if (ret) return ret; ofs += size; buf += size; (*retlen) += size; len -= size; if (ofs >> cfi->chipshift) { chipnum ++; ofs = 0; if (chipnum == cfi->numchips) return 0; } } if (len) { size_t retlen_dregs = 0; ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), len, &retlen_dregs, buf); *retlen += retlen_dregs; return ret; } return 0; } /* * Wait for the flash chip to become ready to write data * * This is only called during the panic_write() path. When panic_write() * is called, the kernel is in the process of a panic, and will soon be * dead. Therefore we don't take any locks, and attempt to get access * to the chip as soon as possible. */ static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip, unsigned long adr) { struct cfi_private *cfi = map->fldrv_priv; int retries = 10; int i; /* * If the driver thinks the chip is idle, and no toggle bits * are changing, then the chip is actually idle for sure. */ if (chip->state == FL_READY && chip_ready(map, adr)) return 0; /* * Try several times to reset the chip and then wait for it * to become idle. The upper limit of a few milliseconds of * delay isn't a big problem: the kernel is dying anyway. It * is more important to save the messages. */ while (retries > 0) { const unsigned long timeo = (HZ / 1000) + 1; /* send the reset command */ map_write(map, CMD(0xF0), chip->start); /* wait for the chip to become ready */ for (i = 0; i < jiffies_to_usecs(timeo); i++) { if (chip_ready(map, adr)) return 0; udelay(1); } } /* the chip never became ready */ return -EBUSY; } /* * Write out one word of data to a single flash chip during a kernel panic * * This is only called during the panic_write() path. When panic_write() * is called, the kernel is in the process of a panic, and will soon be * dead. Therefore we don't take any locks, and attempt to get access * to the chip as soon as possible. * * The implementation of this routine is intentionally similar to * do_write_oneword(), in order to ease code maintenance. */ static int do_panic_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum) { const unsigned long uWriteTimeout = (HZ / 1000) + 1; struct cfi_private *cfi = map->fldrv_priv; int retry_cnt = 0; map_word oldd; int ret = 0; int i; adr += chip->start; ret = cfi_amdstd_panic_wait(map, chip, adr); if (ret) return ret; pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n", __func__, adr, datum.x[0]); /* * Check for a NOP for the case when the datum to write is already * present - it saves time and works around buggy chips that corrupt * data at other locations when 0xff is written to a location that * already contains 0xff. */ oldd = map_read(map, adr); if (map_word_equal(map, oldd, datum)) { pr_debug("MTD %s(): NOP\n", __func__); goto op_done; } ENABLE_VPP(map); retry: cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); map_write(map, datum, adr); for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) { if (chip_ready(map, adr)) break; udelay(1); } if (!chip_good(map, adr, datum)) { /* reset on all failures. */ map_write(map, CMD(0xF0), chip->start); /* FIXME - should have reset delay before continuing */ if (++retry_cnt <= MAX_WORD_RETRIES) goto retry; ret = -EIO; } op_done: DISABLE_VPP(map); return ret; } /* * Write out some data during a kernel panic * * This is used by the mtdoops driver to save the dying messages from a * kernel which has panic'd. * * This routine ignores all of the locking used throughout the rest of the * driver, in order to ensure that the data gets written out no matter what * state this driver (and the flash chip itself) was in when the kernel crashed. * * The implementation of this routine is intentionally similar to * cfi_amdstd_write_words(), in order to ease code maintenance. */ static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; unsigned long ofs, chipstart; int ret = 0; int chipnum; chipnum = to >> cfi->chipshift; ofs = to - (chipnum << cfi->chipshift); chipstart = cfi->chips[chipnum].start; /* If it's not bus aligned, do the first byte write */ if (ofs & (map_bankwidth(map) - 1)) { unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1); int i = ofs - bus_ofs; int n = 0; map_word tmp_buf; ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs); if (ret) return ret; /* Load 'tmp_buf' with old contents of flash */ tmp_buf = map_read(map, bus_ofs + chipstart); /* Number of bytes to copy from buffer */ n = min_t(int, len, map_bankwidth(map) - i); tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); ret = do_panic_write_oneword(map, &cfi->chips[chipnum], bus_ofs, tmp_buf); if (ret) return ret; ofs += n; buf += n; (*retlen) += n; len -= n; if (ofs >> cfi->chipshift) { chipnum++; ofs = 0; if (chipnum == cfi->numchips) return 0; } } /* We are now aligned, write as much as possible */ while (len >= map_bankwidth(map)) { map_word datum; datum = map_word_load(map, buf); ret = do_panic_write_oneword(map, &cfi->chips[chipnum], ofs, datum); if (ret) return ret; ofs += map_bankwidth(map); buf += map_bankwidth(map); (*retlen) += map_bankwidth(map); len -= map_bankwidth(map); if (ofs >> cfi->chipshift) { chipnum++; ofs = 0; if (chipnum == cfi->numchips) return 0; chipstart = cfi->chips[chipnum].start; } } /* Write the trailing bytes if any */ if (len & (map_bankwidth(map) - 1)) { map_word tmp_buf; ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs); if (ret) return ret; tmp_buf = map_read(map, ofs + chipstart); tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); ret = do_panic_write_oneword(map, &cfi->chips[chipnum], ofs, tmp_buf); if (ret) return ret; (*retlen) += len; } return 0; } /* * Handle devices with one erase region, that only implement * the chip erase command. */ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) { struct cfi_private *cfi = map->fldrv_priv; unsigned long timeo = jiffies + HZ; unsigned long int adr; DECLARE_WAITQUEUE(wait, current); int ret = 0; adr = cfi->addr_unlock1; mutex_lock(&chip->mutex); ret = get_chip(map, chip, adr, FL_WRITING); if (ret) { mutex_unlock(&chip->mutex); return ret; } pr_debug("MTD %s(): ERASE 0x%.8lx\n", __func__, chip->start ); XIP_INVAL_CACHED_RANGE(map, adr, map->size); ENABLE_VPP(map); xip_disable(map, chip, adr); cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); chip->state = FL_ERASING; chip->erase_suspended = 0; chip->in_progress_block_addr = adr; INVALIDATE_CACHE_UDELAY(map, chip, adr, map->size, chip->erase_time*500); timeo = jiffies + (HZ*20); for (;;) { if (chip->state != FL_ERASING) { /* Someone's suspended the erase. Sleep */ set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); mutex_lock(&chip->mutex); continue; } if (chip->erase_suspended) { /* This erase was suspended and resumed. Adjust the timeout */ timeo = jiffies + (HZ*20); /* FIXME */ chip->erase_suspended = 0; } if (chip_ready(map, adr)) break; if (time_after(jiffies, timeo)) { printk(KERN_WARNING "MTD %s(): software timeout\n", __func__ ); break; } /* Latency issues. Drop the lock, wait a while and retry */ UDELAY(map, chip, adr, 1000000/HZ); } /* Did we succeed? */ if (!chip_good(map, adr, map_word_ff(map))) { /* reset on all failures. */ map_write( map, CMD(0xF0), chip->start ); /* FIXME - should have reset delay before continuing */ ret = -EIO; } chip->state = FL_READY; xip_enable(map, chip, adr); DISABLE_VPP(map); put_chip(map, chip, adr); mutex_unlock(&chip->mutex); return ret; } static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) { struct cfi_private *cfi = map->fldrv_priv; unsigned long timeo = jiffies + HZ; DECLARE_WAITQUEUE(wait, current); int ret = 0; adr += chip->start; mutex_lock(&chip->mutex); ret = get_chip(map, chip, adr, FL_ERASING); if (ret) { mutex_unlock(&chip->mutex); return ret; } pr_debug("MTD %s(): ERASE 0x%.8lx\n", __func__, adr ); XIP_INVAL_CACHED_RANGE(map, adr, len); ENABLE_VPP(map); xip_disable(map, chip, adr); cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); map_write(map, cfi->sector_erase_cmd, adr); chip->state = FL_ERASING; chip->erase_suspended = 0; chip->in_progress_block_addr = adr; INVALIDATE_CACHE_UDELAY(map, chip, adr, len, chip->erase_time*500); timeo = jiffies + (HZ*20); for (;;) { if (chip->state != FL_ERASING) { /* Someone's suspended the erase. Sleep */ set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); mutex_lock(&chip->mutex); continue; } if (chip->erase_suspended) { /* This erase was suspended and resumed. Adjust the timeout */ timeo = jiffies + (HZ*20); /* FIXME */ chip->erase_suspended = 0; } if (chip_ready(map, adr)) { xip_enable(map, chip, adr); break; } if (time_after(jiffies, timeo)) { xip_enable(map, chip, adr); printk(KERN_WARNING "MTD %s(): software timeout\n", __func__ ); break; } /* Latency issues. Drop the lock, wait a while and retry */ UDELAY(map, chip, adr, 1000000/HZ); } /* Did we succeed? */ if (!chip_good(map, adr, map_word_ff(map))) { /* reset on all failures. */ map_write( map, CMD(0xF0), chip->start ); /* FIXME - should have reset delay before continuing */ ret = -EIO; } chip->state = FL_READY; DISABLE_VPP(map); put_chip(map, chip, adr); mutex_unlock(&chip->mutex); return ret; } static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) { unsigned long ofs, len; int ret; ofs = instr->addr; len = instr->len; ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL); if (ret) return ret; instr->state = MTD_ERASE_DONE; mtd_erase_callback(instr); return 0; } static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; int ret = 0; if (instr->addr != 0) return -EINVAL; if (instr->len != mtd->size) return -EINVAL; ret = do_erase_chip(map, &cfi->chips[0]); if (ret) return ret; instr->state = MTD_ERASE_DONE; mtd_erase_callback(instr); return 0; } static int do_atmel_lock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) { struct cfi_private *cfi = map->fldrv_priv; int ret; mutex_lock(&chip->mutex); ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); if (ret) goto out_unlock; chip->state = FL_LOCKING; pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); map_write(map, CMD(0x40), chip->start + adr); chip->state = FL_READY; put_chip(map, chip, adr + chip->start); ret = 0; out_unlock: mutex_unlock(&chip->mutex); return ret; } static int do_atmel_unlock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) { struct cfi_private *cfi = map->fldrv_priv; int ret; mutex_lock(&chip->mutex); ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); if (ret) goto out_unlock; chip->state = FL_UNLOCKING; pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); map_write(map, CMD(0x70), adr); chip->state = FL_READY; put_chip(map, chip, adr + chip->start); ret = 0; out_unlock: mutex_unlock(&chip->mutex); return ret; } static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL); } static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); } /* * Advanced Sector Protection - PPB (Persistent Protection Bit) locking */ struct ppb_lock { struct flchip *chip; loff_t offset; int locked; }; #define MAX_SECTORS 512 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1) #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2) #define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3) static int __maybe_unused do_ppb_xxlock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) { struct cfi_private *cfi = map->fldrv_priv; unsigned long timeo; int ret; mutex_lock(&chip->mutex); ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); if (ret) { mutex_unlock(&chip->mutex); return ret; } pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len); cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); /* PPB entry command */ cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) { chip->state = FL_LOCKING; map_write(map, CMD(0xA0), chip->start + adr); map_write(map, CMD(0x00), chip->start + adr); } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) { /* * Unlocking of one specific sector is not supported, so we * have to unlock all sectors of this device instead */ chip->state = FL_UNLOCKING; map_write(map, CMD(0x80), chip->start); map_write(map, CMD(0x30), chip->start); } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) { chip->state = FL_JEDEC_QUERY; /* Return locked status: 0->locked, 1->unlocked */ ret = !cfi_read_query(map, adr); } else BUG(); /* * Wait for some time as unlocking of all sectors takes quite long */ timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */ for (;;) { if (chip_ready(map, adr)) break; if (time_after(jiffies, timeo)) { printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); ret = -EIO; break; } UDELAY(map, chip, adr, 1); } /* Exit BC commands */ map_write(map, CMD(0x90), chip->start); map_write(map, CMD(0x00), chip->start); chip->state = FL_READY; put_chip(map, chip, adr + chip->start); mutex_unlock(&chip->mutex); return ret; } static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, DO_XXLOCK_ONEBLOCK_LOCK); } static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { struct mtd_erase_region_info *regions = mtd->eraseregions; struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; struct ppb_lock *sect; unsigned long adr; loff_t offset; uint64_t length; int chipnum; int i; int sectors; int ret; /* * PPB unlocking always unlocks all sectors of the flash chip. * We need to re-lock all previously locked sectors. So lets * first check the locking status of all sectors and save * it for future use. */ sect = kzalloc(MAX_SECTORS * sizeof(struct ppb_lock), GFP_KERNEL); if (!sect) return -ENOMEM; /* * This code to walk all sectors is a slightly modified version * of the cfi_varsize_frob() code. */ i = 0; chipnum = 0; adr = 0; sectors = 0; offset = 0; length = mtd->size; while (length) { int size = regions[i].erasesize; /* * Only test sectors that shall not be unlocked. The other * sectors shall be unlocked, so lets keep their locking * status at "unlocked" (locked=0) for the final re-locking. */ if ((adr < ofs) || (adr >= (ofs + len))) { sect[sectors].chip = &cfi->chips[chipnum]; sect[sectors].offset = offset; sect[sectors].locked = do_ppb_xxlock( map, &cfi->chips[chipnum], adr, 0, DO_XXLOCK_ONEBLOCK_GETLOCK); } adr += size; offset += size; length -= size; if (offset == regions[i].offset + size * regions[i].numblocks) i++; if (adr >> cfi->chipshift) { adr = 0; chipnum++; if (chipnum >= cfi->numchips) break; } sectors++; if (sectors >= MAX_SECTORS) { printk(KERN_ERR "Only %d sectors for PPB locking supported!\n", MAX_SECTORS); kfree(sect); return -EINVAL; } } /* Now unlock the whole chip */ ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK); if (ret) { kfree(sect); return ret; } /* * PPB unlocking always unlocks all sectors of the flash chip. * We need to re-lock all previously locked sectors. */ for (i = 0; i < sectors; i++) { if (sect[i].locked) do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0, DO_XXLOCK_ONEBLOCK_LOCK); } kfree(sect); return ret; } static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) { return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0; } static void cfi_amdstd_sync (struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; int i; struct flchip *chip; int ret = 0; DECLARE_WAITQUEUE(wait, current); for (i=0; !ret && i<cfi->numchips; i++) { chip = &cfi->chips[i]; retry: mutex_lock(&chip->mutex); switch(chip->state) { case FL_READY: case FL_STATUS: case FL_CFI_QUERY: case FL_JEDEC_QUERY: chip->oldstate = chip->state; chip->state = FL_SYNCING; /* No need to wake_up() on this state change - * as the whole point is that nobody can do anything * with the chip now anyway. */ case FL_SYNCING: mutex_unlock(&chip->mutex); break; default: /* Not an idle state */ set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); goto retry; } } /* Unlock the chips again */ for (i--; i >=0; i--) { chip = &cfi->chips[i]; mutex_lock(&chip->mutex); if (chip->state == FL_SYNCING) { chip->state = chip->oldstate; wake_up(&chip->wq); } mutex_unlock(&chip->mutex); } } static int cfi_amdstd_suspend(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; int i; struct flchip *chip; int ret = 0; for (i=0; !ret && i<cfi->numchips; i++) { chip = &cfi->chips[i]; mutex_lock(&chip->mutex); switch(chip->state) { case FL_READY: case FL_STATUS: case FL_CFI_QUERY: case FL_JEDEC_QUERY: chip->oldstate = chip->state; chip->state = FL_PM_SUSPENDED; /* No need to wake_up() on this state change - * as the whole point is that nobody can do anything * with the chip now anyway. */ case FL_PM_SUSPENDED: break; default: ret = -EAGAIN; break; } mutex_unlock(&chip->mutex); } /* Unlock the chips again */ if (ret) { for (i--; i >=0; i--) { chip = &cfi->chips[i]; mutex_lock(&chip->mutex); if (chip->state == FL_PM_SUSPENDED) { chip->state = chip->oldstate; wake_up(&chip->wq); } mutex_unlock(&chip->mutex); } } return ret; } static void cfi_amdstd_resume(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; int i; struct flchip *chip; for (i=0; i<cfi->numchips; i++) { chip = &cfi->chips[i]; mutex_lock(&chip->mutex); if (chip->state == FL_PM_SUSPENDED) { chip->state = FL_READY; map_write(map, CMD(0xF0), chip->start); wake_up(&chip->wq); } else printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); mutex_unlock(&chip->mutex); } } /* * Ensure that the flash device is put back into read array mode before * unloading the driver or rebooting. On some systems, rebooting while * the flash is in query/program/erase mode will prevent the CPU from * fetching the bootloader code, requiring a hard reset or power cycle. */ static int cfi_amdstd_reset(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; int i, ret; struct flchip *chip; for (i = 0; i < cfi->numchips; i++) { chip = &cfi->chips[i]; mutex_lock(&chip->mutex); ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); if (!ret) { map_write(map, CMD(0xF0), chip->start); chip->state = FL_SHUTDOWN; put_chip(map, chip, chip->start); } mutex_unlock(&chip->mutex); } return 0; } static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val, void *v) { struct mtd_info *mtd; mtd = container_of(nb, struct mtd_info, reboot_notifier); cfi_amdstd_reset(mtd); return NOTIFY_DONE; } static void cfi_amdstd_destroy(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; cfi_amdstd_reset(mtd); unregister_reboot_notifier(&mtd->reboot_notifier); kfree(cfi->cmdset_priv); kfree(cfi->cfiq); kfree(cfi); kfree(mtd->eraseregions); } MODULE_LICENSE("GPL"); MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); MODULE_ALIAS("cfi_cmdset_0006"); MODULE_ALIAS("cfi_cmdset_0701");
gpl-2.0
fosser2/tegra-l4t-r16r2-rc
fs/jfs/super.c
2840
21537
/* * Copyright (C) International Business Machines Corp., 2000-2004 * Portions Copyright (C) Christoph Hellwig, 2001-2002 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include <linux/module.h> #include <linux/parser.h> #include <linux/completion.h> #include <linux/vfs.h> #include <linux/quotaops.h> #include <linux/mount.h> #include <linux/moduleparam.h> #include <linux/kthread.h> #include <linux/posix_acl.h> #include <linux/buffer_head.h> #include <linux/exportfs.h> #include <linux/crc32.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <linux/seq_file.h> #include "jfs_incore.h" #include "jfs_filsys.h" #include "jfs_inode.h" #include "jfs_metapage.h" #include "jfs_superblock.h" #include "jfs_dmap.h" #include "jfs_imap.h" #include "jfs_acl.h" #include "jfs_debug.h" MODULE_DESCRIPTION("The Journaled Filesystem (JFS)"); MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM"); MODULE_LICENSE("GPL"); static struct kmem_cache * jfs_inode_cachep; static const struct super_operations jfs_super_operations; static const struct export_operations jfs_export_operations; static struct file_system_type jfs_fs_type; #define MAX_COMMIT_THREADS 64 static int commit_threads = 0; module_param(commit_threads, int, 0); MODULE_PARM_DESC(commit_threads, "Number of commit threads"); static struct task_struct *jfsCommitThread[MAX_COMMIT_THREADS]; struct task_struct *jfsIOthread; struct task_struct *jfsSyncThread; #ifdef CONFIG_JFS_DEBUG int jfsloglevel = JFS_LOGLEVEL_WARN; module_param(jfsloglevel, int, 0644); MODULE_PARM_DESC(jfsloglevel, "Specify JFS loglevel (0, 1 or 2)"); #endif static void jfs_handle_error(struct super_block *sb) { struct jfs_sb_info *sbi = JFS_SBI(sb); if (sb->s_flags & MS_RDONLY) return; updateSuper(sb, FM_DIRTY); if (sbi->flag & JFS_ERR_PANIC) panic("JFS (device %s): panic forced after error\n", sb->s_id); else if (sbi->flag & JFS_ERR_REMOUNT_RO) { jfs_err("ERROR: (device %s): remounting filesystem " "as read-only\n", sb->s_id); sb->s_flags |= MS_RDONLY; } /* nothing is done for continue beyond marking the superblock dirty */ } void jfs_error(struct super_block *sb, const char * function, ...) { static char error_buf[256]; va_list args; va_start(args, function); vsnprintf(error_buf, sizeof(error_buf), function, args); va_end(args); printk(KERN_ERR "ERROR: (device %s): %s\n", sb->s_id, error_buf); jfs_handle_error(sb); } static struct inode *jfs_alloc_inode(struct super_block *sb) { struct jfs_inode_info *jfs_inode; jfs_inode = kmem_cache_alloc(jfs_inode_cachep, GFP_NOFS); if (!jfs_inode) return NULL; return &jfs_inode->vfs_inode; } static void jfs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); struct jfs_inode_info *ji = JFS_IP(inode); INIT_LIST_HEAD(&inode->i_dentry); kmem_cache_free(jfs_inode_cachep, ji); } static void jfs_destroy_inode(struct inode *inode) { struct jfs_inode_info *ji = JFS_IP(inode); BUG_ON(!list_empty(&ji->anon_inode_list)); spin_lock_irq(&ji->ag_lock); if (ji->active_ag != -1) { struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap; atomic_dec(&bmap->db_active[ji->active_ag]); ji->active_ag = -1; } spin_unlock_irq(&ji->ag_lock); call_rcu(&inode->i_rcu, jfs_i_callback); } static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb); s64 maxinodes; struct inomap *imap = JFS_IP(sbi->ipimap)->i_imap; jfs_info("In jfs_statfs"); buf->f_type = JFS_SUPER_MAGIC; buf->f_bsize = sbi->bsize; buf->f_blocks = sbi->bmap->db_mapsize; buf->f_bfree = sbi->bmap->db_nfree; buf->f_bavail = sbi->bmap->db_nfree; /* * If we really return the number of allocated & free inodes, some * applications will fail because they won't see enough free inodes. * We'll try to calculate some guess as to how may inodes we can * really allocate * * buf->f_files = atomic_read(&imap->im_numinos); * buf->f_ffree = atomic_read(&imap->im_numfree); */ maxinodes = min((s64) atomic_read(&imap->im_numinos) + ((sbi->bmap->db_nfree >> imap->im_l2nbperiext) << L2INOSPEREXT), (s64) 0xffffffffLL); buf->f_files = maxinodes; buf->f_ffree = maxinodes - (atomic_read(&imap->im_numinos) - atomic_read(&imap->im_numfree)); buf->f_fsid.val[0] = (u32)crc32_le(0, sbi->uuid, sizeof(sbi->uuid)/2); buf->f_fsid.val[1] = (u32)crc32_le(0, sbi->uuid + sizeof(sbi->uuid)/2, sizeof(sbi->uuid)/2); buf->f_namelen = JFS_NAME_MAX; return 0; } static void jfs_put_super(struct super_block *sb) { struct jfs_sb_info *sbi = JFS_SBI(sb); int rc; jfs_info("In jfs_put_super"); dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); rc = jfs_umount(sb); if (rc) jfs_err("jfs_umount failed with return code %d", rc); unload_nls(sbi->nls_tab); truncate_inode_pages(sbi->direct_inode->i_mapping, 0); iput(sbi->direct_inode); kfree(sbi); } enum { Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize, Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota, Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask }; static const match_table_t tokens = { {Opt_integrity, "integrity"}, {Opt_nointegrity, "nointegrity"}, {Opt_iocharset, "iocharset=%s"}, {Opt_resize, "resize=%u"}, {Opt_resize_nosize, "resize"}, {Opt_errors, "errors=%s"}, {Opt_ignore, "noquota"}, {Opt_ignore, "quota"}, {Opt_usrquota, "usrquota"}, {Opt_grpquota, "grpquota"}, {Opt_uid, "uid=%u"}, {Opt_gid, "gid=%u"}, {Opt_umask, "umask=%u"}, {Opt_err, NULL} }; static int parse_options(char *options, struct super_block *sb, s64 *newLVSize, int *flag) { void *nls_map = (void *)-1; /* -1: no change; NULL: none */ char *p; struct jfs_sb_info *sbi = JFS_SBI(sb); *newLVSize = 0; if (!options) return 1; while ((p = strsep(&options, ",")) != NULL) { substring_t args[MAX_OPT_ARGS]; int token; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_integrity: *flag &= ~JFS_NOINTEGRITY; break; case Opt_nointegrity: *flag |= JFS_NOINTEGRITY; break; case Opt_ignore: /* Silently ignore the quota options */ /* Don't do anything ;-) */ break; case Opt_iocharset: if (nls_map && nls_map != (void *) -1) unload_nls(nls_map); if (!strcmp(args[0].from, "none")) nls_map = NULL; else { nls_map = load_nls(args[0].from); if (!nls_map) { printk(KERN_ERR "JFS: charset not found\n"); goto cleanup; } } break; case Opt_resize: { char *resize = args[0].from; *newLVSize = simple_strtoull(resize, &resize, 0); break; } case Opt_resize_nosize: { *newLVSize = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits; if (*newLVSize == 0) printk(KERN_ERR "JFS: Cannot determine volume size\n"); break; } case Opt_errors: { char *errors = args[0].from; if (!errors || !*errors) goto cleanup; if (!strcmp(errors, "continue")) { *flag &= ~JFS_ERR_REMOUNT_RO; *flag &= ~JFS_ERR_PANIC; *flag |= JFS_ERR_CONTINUE; } else if (!strcmp(errors, "remount-ro")) { *flag &= ~JFS_ERR_CONTINUE; *flag &= ~JFS_ERR_PANIC; *flag |= JFS_ERR_REMOUNT_RO; } else if (!strcmp(errors, "panic")) { *flag &= ~JFS_ERR_CONTINUE; *flag &= ~JFS_ERR_REMOUNT_RO; *flag |= JFS_ERR_PANIC; } else { printk(KERN_ERR "JFS: %s is an invalid error handler\n", errors); goto cleanup; } break; } #ifdef CONFIG_QUOTA case Opt_quota: case Opt_usrquota: *flag |= JFS_USRQUOTA; break; case Opt_grpquota: *flag |= JFS_GRPQUOTA; break; #else case Opt_usrquota: case Opt_grpquota: case Opt_quota: printk(KERN_ERR "JFS: quota operations not supported\n"); break; #endif case Opt_uid: { char *uid = args[0].from; sbi->uid = simple_strtoul(uid, &uid, 0); break; } case Opt_gid: { char *gid = args[0].from; sbi->gid = simple_strtoul(gid, &gid, 0); break; } case Opt_umask: { char *umask = args[0].from; sbi->umask = simple_strtoul(umask, &umask, 8); if (sbi->umask & ~0777) { printk(KERN_ERR "JFS: Invalid value of umask\n"); goto cleanup; } break; } default: printk("jfs: Unrecognized mount option \"%s\" " " or missing value\n", p); goto cleanup; } } if (nls_map != (void *) -1) { /* Discard old (if remount) */ unload_nls(sbi->nls_tab); sbi->nls_tab = nls_map; } return 1; cleanup: if (nls_map && nls_map != (void *) -1) unload_nls(nls_map); return 0; } static int jfs_remount(struct super_block *sb, int *flags, char *data) { s64 newLVSize = 0; int rc = 0; int flag = JFS_SBI(sb)->flag; int ret; if (!parse_options(data, sb, &newLVSize, &flag)) { return -EINVAL; } if (newLVSize) { if (sb->s_flags & MS_RDONLY) { printk(KERN_ERR "JFS: resize requires volume to be mounted read-write\n"); return -EROFS; } rc = jfs_extendfs(sb, newLVSize, 0); if (rc) return rc; } if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) { /* * Invalidate any previously read metadata. fsck may have * changed the on-disk data since we mounted r/o */ truncate_inode_pages(JFS_SBI(sb)->direct_inode->i_mapping, 0); JFS_SBI(sb)->flag = flag; ret = jfs_mount_rw(sb, 1); /* mark the fs r/w for quota activity */ sb->s_flags &= ~MS_RDONLY; dquot_resume(sb, -1); return ret; } if ((!(sb->s_flags & MS_RDONLY)) && (*flags & MS_RDONLY)) { rc = dquot_suspend(sb, -1); if (rc < 0) { return rc; } rc = jfs_umount_rw(sb); JFS_SBI(sb)->flag = flag; return rc; } if ((JFS_SBI(sb)->flag & JFS_NOINTEGRITY) != (flag & JFS_NOINTEGRITY)) if (!(sb->s_flags & MS_RDONLY)) { rc = jfs_umount_rw(sb); if (rc) return rc; JFS_SBI(sb)->flag = flag; ret = jfs_mount_rw(sb, 1); return ret; } JFS_SBI(sb)->flag = flag; return 0; } static int jfs_fill_super(struct super_block *sb, void *data, int silent) { struct jfs_sb_info *sbi; struct inode *inode; int rc; s64 newLVSize = 0; int flag, ret = -EINVAL; jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags); if (!new_valid_dev(sb->s_bdev->bd_dev)) return -EOVERFLOW; sbi = kzalloc(sizeof (struct jfs_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; sb->s_fs_info = sbi; sbi->sb = sb; sbi->uid = sbi->gid = sbi->umask = -1; /* initialize the mount flag and determine the default error handler */ flag = JFS_ERR_REMOUNT_RO; if (!parse_options((char *) data, sb, &newLVSize, &flag)) goto out_kfree; sbi->flag = flag; #ifdef CONFIG_JFS_POSIX_ACL sb->s_flags |= MS_POSIXACL; #endif if (newLVSize) { printk(KERN_ERR "resize option for remount only\n"); goto out_kfree; } /* * Initialize blocksize to 4K. */ sb_set_blocksize(sb, PSIZE); /* * Set method vectors. */ sb->s_op = &jfs_super_operations; sb->s_export_op = &jfs_export_operations; #ifdef CONFIG_QUOTA sb->dq_op = &dquot_operations; sb->s_qcop = &dquot_quotactl_ops; #endif /* * Initialize direct-mapping inode/address-space */ inode = new_inode(sb); if (inode == NULL) { ret = -ENOMEM; goto out_unload; } inode->i_ino = 0; inode->i_nlink = 1; inode->i_size = sb->s_bdev->bd_inode->i_size; inode->i_mapping->a_ops = &jfs_metapage_aops; insert_inode_hash(inode); mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); sbi->direct_inode = inode; rc = jfs_mount(sb); if (rc) { if (!silent) { jfs_err("jfs_mount failed w/return code = %d", rc); } goto out_mount_failed; } if (sb->s_flags & MS_RDONLY) sbi->log = NULL; else { rc = jfs_mount_rw(sb, 0); if (rc) { if (!silent) { jfs_err("jfs_mount_rw failed, return code = %d", rc); } goto out_no_rw; } } sb->s_magic = JFS_SUPER_MAGIC; if (sbi->mntflag & JFS_OS2) sb->s_d_op = &jfs_ci_dentry_operations; inode = jfs_iget(sb, ROOT_I); if (IS_ERR(inode)) { ret = PTR_ERR(inode); goto out_no_rw; } sb->s_root = d_alloc_root(inode); if (!sb->s_root) goto out_no_root; /* logical blocks are represented by 40 bits in pxd_t, etc. */ sb->s_maxbytes = ((u64) sb->s_blocksize) << 40; #if BITS_PER_LONG == 32 /* * Page cache is indexed by long. * I would use MAX_LFS_FILESIZE, but it's only half as big */ sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1, (u64)sb->s_maxbytes); #endif sb->s_time_gran = 1; return 0; out_no_root: jfs_err("jfs_read_super: get root dentry failed"); iput(inode); out_no_rw: rc = jfs_umount(sb); if (rc) { jfs_err("jfs_umount failed with return code %d", rc); } out_mount_failed: filemap_write_and_wait(sbi->direct_inode->i_mapping); truncate_inode_pages(sbi->direct_inode->i_mapping, 0); make_bad_inode(sbi->direct_inode); iput(sbi->direct_inode); sbi->direct_inode = NULL; out_unload: if (sbi->nls_tab) unload_nls(sbi->nls_tab); out_kfree: kfree(sbi); return ret; } static int jfs_freeze(struct super_block *sb) { struct jfs_sb_info *sbi = JFS_SBI(sb); struct jfs_log *log = sbi->log; if (!(sb->s_flags & MS_RDONLY)) { txQuiesce(sb); lmLogShutdown(log); updateSuper(sb, FM_CLEAN); } return 0; } static int jfs_unfreeze(struct super_block *sb) { struct jfs_sb_info *sbi = JFS_SBI(sb); struct jfs_log *log = sbi->log; int rc = 0; if (!(sb->s_flags & MS_RDONLY)) { updateSuper(sb, FM_MOUNT); if ((rc = lmLogInit(log))) jfs_err("jfs_unlock failed with return code %d", rc); else txResume(sb); } return 0; } static struct dentry *jfs_do_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, jfs_fill_super); } static int jfs_sync_fs(struct super_block *sb, int wait) { struct jfs_log *log = JFS_SBI(sb)->log; /* log == NULL indicates read-only mount */ if (log) { jfs_flush_journal(log, wait); jfs_syncpt(log, 0); } return 0; } static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs) { struct jfs_sb_info *sbi = JFS_SBI(vfs->mnt_sb); if (sbi->uid != -1) seq_printf(seq, ",uid=%d", sbi->uid); if (sbi->gid != -1) seq_printf(seq, ",gid=%d", sbi->gid); if (sbi->umask != -1) seq_printf(seq, ",umask=%03o", sbi->umask); if (sbi->flag & JFS_NOINTEGRITY) seq_puts(seq, ",nointegrity"); if (sbi->nls_tab) seq_printf(seq, ",iocharset=%s", sbi->nls_tab->charset); if (sbi->flag & JFS_ERR_CONTINUE) seq_printf(seq, ",errors=continue"); if (sbi->flag & JFS_ERR_PANIC) seq_printf(seq, ",errors=panic"); #ifdef CONFIG_QUOTA if (sbi->flag & JFS_USRQUOTA) seq_puts(seq, ",usrquota"); if (sbi->flag & JFS_GRPQUOTA) seq_puts(seq, ",grpquota"); #endif return 0; } #ifdef CONFIG_QUOTA /* Read data from quotafile - avoid pagecache and such because we cannot afford * acquiring the locks... As quota files are never truncated and quota code * itself serializes the operations (and no one else should touch the files) * we don't have to be afraid of races */ static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; sector_t blk = off >> sb->s_blocksize_bits; int err = 0; int offset = off & (sb->s_blocksize - 1); int tocopy; size_t toread; struct buffer_head tmp_bh; struct buffer_head *bh; loff_t i_size = i_size_read(inode); if (off > i_size) return 0; if (off+len > i_size) len = i_size-off; toread = len; while (toread > 0) { tocopy = sb->s_blocksize - offset < toread ? sb->s_blocksize - offset : toread; tmp_bh.b_state = 0; tmp_bh.b_size = 1 << inode->i_blkbits; err = jfs_get_block(inode, blk, &tmp_bh, 0); if (err) return err; if (!buffer_mapped(&tmp_bh)) /* A hole? */ memset(data, 0, tocopy); else { bh = sb_bread(sb, tmp_bh.b_blocknr); if (!bh) return -EIO; memcpy(data, bh->b_data+offset, tocopy); brelse(bh); } offset = 0; toread -= tocopy; data += tocopy; blk++; } return len; } /* Write to quotafile */ static ssize_t jfs_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; sector_t blk = off >> sb->s_blocksize_bits; int err = 0; int offset = off & (sb->s_blocksize - 1); int tocopy; size_t towrite = len; struct buffer_head tmp_bh; struct buffer_head *bh; mutex_lock(&inode->i_mutex); while (towrite > 0) { tocopy = sb->s_blocksize - offset < towrite ? sb->s_blocksize - offset : towrite; tmp_bh.b_state = 0; tmp_bh.b_size = 1 << inode->i_blkbits; err = jfs_get_block(inode, blk, &tmp_bh, 1); if (err) goto out; if (offset || tocopy != sb->s_blocksize) bh = sb_bread(sb, tmp_bh.b_blocknr); else bh = sb_getblk(sb, tmp_bh.b_blocknr); if (!bh) { err = -EIO; goto out; } lock_buffer(bh); memcpy(bh->b_data+offset, data, tocopy); flush_dcache_page(bh->b_page); set_buffer_uptodate(bh); mark_buffer_dirty(bh); unlock_buffer(bh); brelse(bh); offset = 0; towrite -= tocopy; data += tocopy; blk++; } out: if (len == towrite) { mutex_unlock(&inode->i_mutex); return err; } if (inode->i_size < off+len-towrite) i_size_write(inode, off+len-towrite); inode->i_version++; inode->i_mtime = inode->i_ctime = CURRENT_TIME; mark_inode_dirty(inode); mutex_unlock(&inode->i_mutex); return len - towrite; } #endif static const struct super_operations jfs_super_operations = { .alloc_inode = jfs_alloc_inode, .destroy_inode = jfs_destroy_inode, .dirty_inode = jfs_dirty_inode, .write_inode = jfs_write_inode, .evict_inode = jfs_evict_inode, .put_super = jfs_put_super, .sync_fs = jfs_sync_fs, .freeze_fs = jfs_freeze, .unfreeze_fs = jfs_unfreeze, .statfs = jfs_statfs, .remount_fs = jfs_remount, .show_options = jfs_show_options, #ifdef CONFIG_QUOTA .quota_read = jfs_quota_read, .quota_write = jfs_quota_write, #endif }; static const struct export_operations jfs_export_operations = { .fh_to_dentry = jfs_fh_to_dentry, .fh_to_parent = jfs_fh_to_parent, .get_parent = jfs_get_parent, }; static struct file_system_type jfs_fs_type = { .owner = THIS_MODULE, .name = "jfs", .mount = jfs_do_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; static void init_once(void *foo) { struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo; memset(jfs_ip, 0, sizeof(struct jfs_inode_info)); INIT_LIST_HEAD(&jfs_ip->anon_inode_list); init_rwsem(&jfs_ip->rdwrlock); mutex_init(&jfs_ip->commit_mutex); init_rwsem(&jfs_ip->xattr_sem); spin_lock_init(&jfs_ip->ag_lock); jfs_ip->active_ag = -1; inode_init_once(&jfs_ip->vfs_inode); } static int __init init_jfs_fs(void) { int i; int rc; jfs_inode_cachep = kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, init_once); if (jfs_inode_cachep == NULL) return -ENOMEM; /* * Metapage initialization */ rc = metapage_init(); if (rc) { jfs_err("metapage_init failed w/rc = %d", rc); goto free_slab; } /* * Transaction Manager initialization */ rc = txInit(); if (rc) { jfs_err("txInit failed w/rc = %d", rc); goto free_metapage; } /* * I/O completion thread (endio) */ jfsIOthread = kthread_run(jfsIOWait, NULL, "jfsIO"); if (IS_ERR(jfsIOthread)) { rc = PTR_ERR(jfsIOthread); jfs_err("init_jfs_fs: fork failed w/rc = %d", rc); goto end_txmngr; } if (commit_threads < 1) commit_threads = num_online_cpus(); if (commit_threads > MAX_COMMIT_THREADS) commit_threads = MAX_COMMIT_THREADS; for (i = 0; i < commit_threads; i++) { jfsCommitThread[i] = kthread_run(jfs_lazycommit, NULL, "jfsCommit"); if (IS_ERR(jfsCommitThread[i])) { rc = PTR_ERR(jfsCommitThread[i]); jfs_err("init_jfs_fs: fork failed w/rc = %d", rc); commit_threads = i; goto kill_committask; } } jfsSyncThread = kthread_run(jfs_sync, NULL, "jfsSync"); if (IS_ERR(jfsSyncThread)) { rc = PTR_ERR(jfsSyncThread); jfs_err("init_jfs_fs: fork failed w/rc = %d", rc); goto kill_committask; } #ifdef PROC_FS_JFS jfs_proc_init(); #endif return register_filesystem(&jfs_fs_type); kill_committask: for (i = 0; i < commit_threads; i++) kthread_stop(jfsCommitThread[i]); kthread_stop(jfsIOthread); end_txmngr: txExit(); free_metapage: metapage_exit(); free_slab: kmem_cache_destroy(jfs_inode_cachep); return rc; } static void __exit exit_jfs_fs(void) { int i; jfs_info("exit_jfs_fs called"); txExit(); metapage_exit(); kthread_stop(jfsIOthread); for (i = 0; i < commit_threads; i++) kthread_stop(jfsCommitThread[i]); kthread_stop(jfsSyncThread); #ifdef PROC_FS_JFS jfs_proc_clean(); #endif unregister_filesystem(&jfs_fs_type); kmem_cache_destroy(jfs_inode_cachep); } module_init(init_jfs_fs) module_exit(exit_jfs_fs)
gpl-2.0
ModADroid/android-omap-tuna
sound/pci/au88x0/au88x0_core.c
3096
79116
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Library General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Vortex core low level functions. Author: Manuel Jander (mjander@users.sourceforge.cl) These functions are mainly the result of translations made from the original disassembly of the au88x0 binary drivers, written by Aureal before they went down. Many thanks to the Jeff Muizelaar, Kester Maddock, and whoever contributed to the OpenVortex project. The author of this file, put the few available pieces together and translated the rest of the riddle (Mix, Src and connection stuff). Some things are still to be discovered, and their meanings are unclear. Some of these functions aren't intended to be really used, rather to help to understand how does the AU88X0 chips work. Keep them in, because they could be used somewhere in the future. This code hasn't been tested or proof read thoroughly. If you wanna help, take a look at the AU88X0 assembly and check if this matches. Functions tested ok so far are (they show the desired effect at least): vortex_routes(); (1 bug fixed). vortex_adb_addroute(); vortex_adb_addroutes(); vortex_connect_codecplay(); vortex_src_flushbuffers(); vortex_adbdma_setmode(); note: still some unknown arguments! vortex_adbdma_startfifo(); vortex_adbdma_stopfifo(); vortex_fifo_setadbctrl(); note: still some unknown arguments! vortex_mix_setinputvolumebyte(); vortex_mix_enableinput(); vortex_mixer_addWTD(); (fixed) vortex_connection_adbdma_src_src(); vortex_connection_adbdma_src(); vortex_src_change_convratio(); vortex_src_addWTD(); (fixed) History: 01-03-2003 First revision. 01-21-2003 Some bug fixes. 17-02-2003 many bugfixes after a big versioning mess. 18-02-2003 JAAAAAHHHUUUUUU!!!! The mixer works !! I'm just so happy ! (2 hours later...) I cant believe it! Im really lucky today. Now the SRC is working too! Yeah! XMMS works ! 20-02-2003 First steps into the ALSA world. 28-02-2003 As my birthday present, i discovered how the DMA buffer pages really work :-). It was all wrong. 12-03-2003 ALSA driver starts working (2 channels). 16-03-2003 More srcblock_setupchannel discoveries. 12-04-2003 AU8830 playback support. Recording in the works. 17-04-2003 vortex_route() and vortex_routes() bug fixes. AU8830 recording works now, but chipn' dale effect is still there. 16-05-2003 SrcSetupChannel cleanup. Moved the Src setup stuff entirely into au88x0_pcm.c . 06-06-2003 Buffer shifter bugfix. Mixer volume fix. 07-12-2003 A3D routing finally fixed. Believed to be OK. 25-03-2004 Many thanks to Claudia, for such valuable bug reports. */ #include "au88x0.h" #include "au88x0_a3d.h" #include <linux/delay.h> /* MIXER (CAsp4Mix.s and CAsp4Mixer.s) */ // FIXME: get rid of this. static int mchannels[NR_MIXIN]; static int rampchs[NR_MIXIN]; static void vortex_mixer_en_sr(vortex_t * vortex, int channel) { hwwrite(vortex->mmio, VORTEX_MIXER_SR, hwread(vortex->mmio, VORTEX_MIXER_SR) | (0x1 << channel)); } static void vortex_mixer_dis_sr(vortex_t * vortex, int channel) { hwwrite(vortex->mmio, VORTEX_MIXER_SR, hwread(vortex->mmio, VORTEX_MIXER_SR) & ~(0x1 << channel)); } #if 0 static void vortex_mix_muteinputgain(vortex_t * vortex, unsigned char mix, unsigned char channel) { hwwrite(vortex->mmio, VORTEX_MIX_INVOL_A + ((mix << 5) + channel), 0x80); hwwrite(vortex->mmio, VORTEX_MIX_INVOL_B + ((mix << 5) + channel), 0x80); } static int vortex_mix_getvolume(vortex_t * vortex, unsigned char mix) { int a; a = hwread(vortex->mmio, VORTEX_MIX_VOL_A + (mix << 2)) & 0xff; //FP2LinearFrac(a); return (a); } static int vortex_mix_getinputvolume(vortex_t * vortex, unsigned char mix, int channel, int *vol) { int a; if (!(mchannels[mix] & (1 << channel))) return 0; a = hwread(vortex->mmio, VORTEX_MIX_INVOL_A + (((mix << 5) + channel) << 2)); /* if (rampchs[mix] == 0) a = FP2LinearFrac(a); else a = FP2LinearFracWT(a); */ *vol = a; return (0); } static unsigned int vortex_mix_boost6db(unsigned char vol) { return (vol + 8); /* WOW! what a complex function! */ } static void vortex_mix_rampvolume(vortex_t * vortex, int mix) { int ch; char a; // This function is intended for ramping down only (see vortex_disableinput()). for (ch = 0; ch < 0x20; ch++) { if (((1 << ch) & rampchs[mix]) == 0) continue; a = hwread(vortex->mmio, VORTEX_MIX_INVOL_B + (((mix << 5) + ch) << 2)); if (a > -126) { a -= 2; hwwrite(vortex->mmio, VORTEX_MIX_INVOL_A + (((mix << 5) + ch) << 2), a); hwwrite(vortex->mmio, VORTEX_MIX_INVOL_B + (((mix << 5) + ch) << 2), a); } else vortex_mix_killinput(vortex, mix, ch); } } static int vortex_mix_getenablebit(vortex_t * vortex, unsigned char mix, int mixin) { int addr, temp; if (mixin >= 0) addr = mixin; else addr = mixin + 3; addr = ((mix << 3) + (addr >> 2)) << 2; temp = hwread(vortex->mmio, VORTEX_MIX_ENIN + addr); return ((temp >> (mixin & 3)) & 1); } #endif static void vortex_mix_setvolumebyte(vortex_t * vortex, unsigned char mix, unsigned char vol) { int temp; hwwrite(vortex->mmio, VORTEX_MIX_VOL_A + (mix << 2), vol); if (1) { /*if (this_10) */ temp = hwread(vortex->mmio, VORTEX_MIX_VOL_B + (mix << 2)); if ((temp != 0x80) || (vol == 0x80)) return; } hwwrite(vortex->mmio, VORTEX_MIX_VOL_B + (mix << 2), vol); } static void vortex_mix_setinputvolumebyte(vortex_t * vortex, unsigned char mix, int mixin, unsigned char vol) { int temp; hwwrite(vortex->mmio, VORTEX_MIX_INVOL_A + (((mix << 5) + mixin) << 2), vol); if (1) { /* this_10, initialized to 1. */ temp = hwread(vortex->mmio, VORTEX_MIX_INVOL_B + (((mix << 5) + mixin) << 2)); if ((temp != 0x80) || (vol == 0x80)) return; } hwwrite(vortex->mmio, VORTEX_MIX_INVOL_B + (((mix << 5) + mixin) << 2), vol); } static void vortex_mix_setenablebit(vortex_t * vortex, unsigned char mix, int mixin, int en) { int temp, addr; if (mixin < 0) addr = (mixin + 3); else addr = mixin; addr = ((mix << 3) + (addr >> 2)) << 2; temp = hwread(vortex->mmio, VORTEX_MIX_ENIN + addr); if (en) temp |= (1 << (mixin & 3)); else temp &= ~(1 << (mixin & 3)); /* Mute input. Astatic void crackling? */ hwwrite(vortex->mmio, VORTEX_MIX_INVOL_B + (((mix << 5) + mixin) << 2), 0x80); /* Looks like clear buffer. */ hwwrite(vortex->mmio, VORTEX_MIX_SMP + (mixin << 2), 0x0); hwwrite(vortex->mmio, VORTEX_MIX_SMP + 4 + (mixin << 2), 0x0); /* Write enable bit. */ hwwrite(vortex->mmio, VORTEX_MIX_ENIN + addr, temp); } static void vortex_mix_killinput(vortex_t * vortex, unsigned char mix, int mixin) { rampchs[mix] &= ~(1 << mixin); vortex_mix_setinputvolumebyte(vortex, mix, mixin, 0x80); mchannels[mix] &= ~(1 << mixin); vortex_mix_setenablebit(vortex, mix, mixin, 0); } static void vortex_mix_enableinput(vortex_t * vortex, unsigned char mix, int mixin) { vortex_mix_killinput(vortex, mix, mixin); if ((mchannels[mix] & (1 << mixin)) == 0) { vortex_mix_setinputvolumebyte(vortex, mix, mixin, 0x80); /*0x80 : mute */ mchannels[mix] |= (1 << mixin); } vortex_mix_setenablebit(vortex, mix, mixin, 1); } static void vortex_mix_disableinput(vortex_t * vortex, unsigned char mix, int channel, int ramp) { if (ramp) { rampchs[mix] |= (1 << channel); // Register callback. //vortex_mix_startrampvolume(vortex); vortex_mix_killinput(vortex, mix, channel); } else vortex_mix_killinput(vortex, mix, channel); } static int vortex_mixer_addWTD(vortex_t * vortex, unsigned char mix, unsigned char ch) { int temp, lifeboat = 0, prev; temp = hwread(vortex->mmio, VORTEX_MIXER_SR); if ((temp & (1 << ch)) == 0) { hwwrite(vortex->mmio, VORTEX_MIXER_CHNBASE + (ch << 2), mix); vortex_mixer_en_sr(vortex, ch); return 1; } prev = VORTEX_MIXER_CHNBASE + (ch << 2); temp = hwread(vortex->mmio, prev); while (temp & 0x10) { prev = VORTEX_MIXER_RTBASE + ((temp & 0xf) << 2); temp = hwread(vortex->mmio, prev); //printk(KERN_INFO "vortex: mixAddWTD: while addr=%x, val=%x\n", prev, temp); if ((++lifeboat) > 0xf) { printk(KERN_ERR "vortex_mixer_addWTD: lifeboat overflow\n"); return 0; } } hwwrite(vortex->mmio, VORTEX_MIXER_RTBASE + ((temp & 0xf) << 2), mix); hwwrite(vortex->mmio, prev, (temp & 0xf) | 0x10); return 1; } static int vortex_mixer_delWTD(vortex_t * vortex, unsigned char mix, unsigned char ch) { int esp14 = -1, esp18, eax, ebx, edx, ebp, esi = 0; //int esp1f=edi(while)=src, esp10=ch; eax = hwread(vortex->mmio, VORTEX_MIXER_SR); if (((1 << ch) & eax) == 0) { printk(KERN_ERR "mix ALARM %x\n", eax); return 0; } ebp = VORTEX_MIXER_CHNBASE + (ch << 2); esp18 = hwread(vortex->mmio, ebp); if (esp18 & 0x10) { ebx = (esp18 & 0xf); if (mix == ebx) { ebx = VORTEX_MIXER_RTBASE + (mix << 2); edx = hwread(vortex->mmio, ebx); //7b60 hwwrite(vortex->mmio, ebp, edx); hwwrite(vortex->mmio, ebx, 0); } else { //7ad3 edx = hwread(vortex->mmio, VORTEX_MIXER_RTBASE + (ebx << 2)); //printk(KERN_INFO "vortex: mixdelWTD: 1 addr=%x, val=%x, src=%x\n", ebx, edx, src); while ((edx & 0xf) != mix) { if ((esi) > 0xf) { printk(KERN_ERR "vortex: mixdelWTD: error lifeboat overflow\n"); return 0; } esp14 = ebx; ebx = edx & 0xf; ebp = ebx << 2; edx = hwread(vortex->mmio, VORTEX_MIXER_RTBASE + ebp); //printk(KERN_INFO "vortex: mixdelWTD: while addr=%x, val=%x\n", ebp, edx); esi++; } //7b30 ebp = ebx << 2; if (edx & 0x10) { /* Delete entry in between others */ ebx = VORTEX_MIXER_RTBASE + ((edx & 0xf) << 2); edx = hwread(vortex->mmio, ebx); //7b60 hwwrite(vortex->mmio, VORTEX_MIXER_RTBASE + ebp, edx); hwwrite(vortex->mmio, ebx, 0); //printk(KERN_INFO "vortex mixdelWTD between addr= 0x%x, val= 0x%x\n", ebp, edx); } else { /* Delete last entry */ //7b83 if (esp14 == -1) hwwrite(vortex->mmio, VORTEX_MIXER_CHNBASE + (ch << 2), esp18 & 0xef); else { ebx = (0xffffffe0 & edx) | (0xf & ebx); hwwrite(vortex->mmio, VORTEX_MIXER_RTBASE + (esp14 << 2), ebx); //printk(KERN_INFO "vortex mixdelWTD last addr= 0x%x, val= 0x%x\n", esp14, ebx); } hwwrite(vortex->mmio, VORTEX_MIXER_RTBASE + ebp, 0); return 1; } } } else { //printk(KERN_INFO "removed last mix\n"); //7be0 vortex_mixer_dis_sr(vortex, ch); hwwrite(vortex->mmio, ebp, 0); } return 1; } static void vortex_mixer_init(vortex_t * vortex) { u32 addr; int x; // FIXME: get rid of this crap. memset(mchannels, 0, NR_MIXOUT * sizeof(int)); memset(rampchs, 0, NR_MIXOUT * sizeof(int)); addr = VORTEX_MIX_SMP + 0x17c; for (x = 0x5f; x >= 0; x--) { hwwrite(vortex->mmio, addr, 0); addr -= 4; } addr = VORTEX_MIX_ENIN + 0x1fc; for (x = 0x7f; x >= 0; x--) { hwwrite(vortex->mmio, addr, 0); addr -= 4; } addr = VORTEX_MIX_SMP + 0x17c; for (x = 0x5f; x >= 0; x--) { hwwrite(vortex->mmio, addr, 0); addr -= 4; } addr = VORTEX_MIX_INVOL_A + 0x7fc; for (x = 0x1ff; x >= 0; x--) { hwwrite(vortex->mmio, addr, 0x80); addr -= 4; } addr = VORTEX_MIX_VOL_A + 0x3c; for (x = 0xf; x >= 0; x--) { hwwrite(vortex->mmio, addr, 0x80); addr -= 4; } addr = VORTEX_MIX_INVOL_B + 0x7fc; for (x = 0x1ff; x >= 0; x--) { hwwrite(vortex->mmio, addr, 0x80); addr -= 4; } addr = VORTEX_MIX_VOL_B + 0x3c; for (x = 0xf; x >= 0; x--) { hwwrite(vortex->mmio, addr, 0x80); addr -= 4; } addr = VORTEX_MIXER_RTBASE + (MIXER_RTBASE_SIZE - 1) * 4; for (x = (MIXER_RTBASE_SIZE - 1); x >= 0; x--) { hwwrite(vortex->mmio, addr, 0x0); addr -= 4; } hwwrite(vortex->mmio, VORTEX_MIXER_SR, 0); /* Set clipping ceiling (this may be all wrong). */ /* for (x = 0; x < 0x80; x++) { hwwrite(vortex->mmio, VORTEX_MIXER_CLIP + (x << 2), 0x3ffff); } */ /* call CAsp4Mix__Initialize_CAsp4HwIO____CAsp4Mixer____ Register ISR callback for volume smooth fade out. Maybe this avoids clicks when press "stop" ? */ } /* SRC (CAsp4Src.s and CAsp4SrcBlock) */ static void vortex_src_en_sr(vortex_t * vortex, int channel) { hwwrite(vortex->mmio, VORTEX_SRCBLOCK_SR, hwread(vortex->mmio, VORTEX_SRCBLOCK_SR) | (0x1 << channel)); } static void vortex_src_dis_sr(vortex_t * vortex, int channel) { hwwrite(vortex->mmio, VORTEX_SRCBLOCK_SR, hwread(vortex->mmio, VORTEX_SRCBLOCK_SR) & ~(0x1 << channel)); } static void vortex_src_flushbuffers(vortex_t * vortex, unsigned char src) { int i; for (i = 0x1f; i >= 0; i--) hwwrite(vortex->mmio, VORTEX_SRC_DATA0 + (src << 7) + (i << 2), 0); hwwrite(vortex->mmio, VORTEX_SRC_DATA + (src << 3), 0); hwwrite(vortex->mmio, VORTEX_SRC_DATA + (src << 3) + 4, 0); } static void vortex_src_cleardrift(vortex_t * vortex, unsigned char src) { hwwrite(vortex->mmio, VORTEX_SRC_DRIFT0 + (src << 2), 0); hwwrite(vortex->mmio, VORTEX_SRC_DRIFT1 + (src << 2), 0); hwwrite(vortex->mmio, VORTEX_SRC_DRIFT2 + (src << 2), 1); } static void vortex_src_set_throttlesource(vortex_t * vortex, unsigned char src, int en) { int temp; temp = hwread(vortex->mmio, VORTEX_SRC_SOURCE); if (en) temp |= 1 << src; else temp &= ~(1 << src); hwwrite(vortex->mmio, VORTEX_SRC_SOURCE, temp); } static int vortex_src_persist_convratio(vortex_t * vortex, unsigned char src, int ratio) { int temp, lifeboat = 0; do { hwwrite(vortex->mmio, VORTEX_SRC_CONVRATIO + (src << 2), ratio); temp = hwread(vortex->mmio, VORTEX_SRC_CONVRATIO + (src << 2)); if ((++lifeboat) > 0x9) { printk(KERN_ERR "Vortex: Src cvr fail\n"); break; } } while (temp != ratio); return temp; } #if 0 static void vortex_src_slowlock(vortex_t * vortex, unsigned char src) { int temp; hwwrite(vortex->mmio, VORTEX_SRC_DRIFT2 + (src << 2), 1); hwwrite(vortex->mmio, VORTEX_SRC_DRIFT0 + (src << 2), 0); temp = hwread(vortex->mmio, VORTEX_SRC_U0 + (src << 2)); if (temp & 0x200) hwwrite(vortex->mmio, VORTEX_SRC_U0 + (src << 2), temp & ~0x200L); } static void vortex_src_change_convratio(vortex_t * vortex, unsigned char src, int ratio) { int temp, a; if ((ratio & 0x10000) && (ratio != 0x10000)) { if (ratio & 0x3fff) a = (0x11 - ((ratio >> 0xe) & 0x3)) - 1; else a = (0x11 - ((ratio >> 0xe) & 0x3)) - 2; } else a = 0xc; temp = hwread(vortex->mmio, VORTEX_SRC_U0 + (src << 2)); if (((temp >> 4) & 0xf) != a) hwwrite(vortex->mmio, VORTEX_SRC_U0 + (src << 2), (temp & 0xf) | ((a & 0xf) << 4)); vortex_src_persist_convratio(vortex, src, ratio); } static int vortex_src_checkratio(vortex_t * vortex, unsigned char src, unsigned int desired_ratio) { int hw_ratio, lifeboat = 0; hw_ratio = hwread(vortex->mmio, VORTEX_SRC_CONVRATIO + (src << 2)); while (hw_ratio != desired_ratio) { hwwrite(vortex->mmio, VORTEX_SRC_CONVRATIO + (src << 2), desired_ratio); if ((lifeboat++) > 15) { printk(KERN_ERR "Vortex: could not set src-%d from %d to %d\n", src, hw_ratio, desired_ratio); break; } } return hw_ratio; } #endif /* Objective: Set samplerate for given SRC module. Arguments: card: pointer to vortex_t strcut. src: Integer index of the SRC module. cr: Current sample rate conversion factor. b: unknown 16 bit value. sweep: Enable Samplerate fade from cr toward tr flag. dirplay: 1: playback, 0: recording. sl: Slow Lock flag. tr: Target samplerate conversion. thsource: Throttle source flag (no idea what that means). */ static void vortex_src_setupchannel(vortex_t * card, unsigned char src, unsigned int cr, unsigned int b, int sweep, int d, int dirplay, int sl, unsigned int tr, int thsource) { // noplayback: d=2,4,7,0xa,0xb when using first 2 src's. // c: enables pitch sweep. // looks like g is c related. Maybe g is a sweep parameter ? // g = cvr // dirplay: 0 = recording, 1 = playback // d = src hw index. int esi, ebp = 0, esp10; vortex_src_flushbuffers(card, src); if (sweep) { if ((tr & 0x10000) && (tr != 0x10000)) { tr = 0; esi = 0x7; } else { if ((((short)tr) < 0) && (tr != 0x8000)) { tr = 0; esi = 0x8; } else { tr = 1; esi = 0xc; } } } else { if ((cr & 0x10000) && (cr != 0x10000)) { tr = 0; /*ebx = 0 */ esi = 0x11 - ((cr >> 0xe) & 7); if (cr & 0x3fff) esi -= 1; else esi -= 2; } else { tr = 1; esi = 0xc; } } vortex_src_cleardrift(card, src); vortex_src_set_throttlesource(card, src, thsource); if ((dirplay == 0) && (sweep == 0)) { if (tr) esp10 = 0xf; else esp10 = 0xc; ebp = 0; } else { if (tr) ebp = 0xf; else ebp = 0xc; esp10 = 0; } hwwrite(card->mmio, VORTEX_SRC_U0 + (src << 2), (sl << 0x9) | (sweep << 0x8) | ((esi & 0xf) << 4) | d); /* 0xc0 esi=0xc c=f=0 d=0 */ vortex_src_persist_convratio(card, src, cr); hwwrite(card->mmio, VORTEX_SRC_U1 + (src << 2), b & 0xffff); /* 0 b=0 */ hwwrite(card->mmio, VORTEX_SRC_U2 + (src << 2), (tr << 0x11) | (dirplay << 0x10) | (ebp << 0x8) | esp10); /* 0x30f00 e=g=1 esp10=0 ebp=f */ //printk(KERN_INFO "vortex: SRC %d, d=0x%x, esi=0x%x, esp10=0x%x, ebp=0x%x\n", src, d, esi, esp10, ebp); } static void vortex_srcblock_init(vortex_t * vortex) { u32 addr; int x; hwwrite(vortex->mmio, VORTEX_SRC_SOURCESIZE, 0x1ff); /* for (x=0; x<0x10; x++) { vortex_src_init(&vortex_src[x], x); } */ //addr = 0xcc3c; //addr = 0x26c3c; addr = VORTEX_SRC_RTBASE + 0x3c; for (x = 0xf; x >= 0; x--) { hwwrite(vortex->mmio, addr, 0); addr -= 4; } //addr = 0xcc94; //addr = 0x26c94; addr = VORTEX_SRC_CHNBASE + 0x54; for (x = 0x15; x >= 0; x--) { hwwrite(vortex->mmio, addr, 0); addr -= 4; } } static int vortex_src_addWTD(vortex_t * vortex, unsigned char src, unsigned char ch) { int temp, lifeboat = 0, prev; // esp13 = src temp = hwread(vortex->mmio, VORTEX_SRCBLOCK_SR); if ((temp & (1 << ch)) == 0) { hwwrite(vortex->mmio, VORTEX_SRC_CHNBASE + (ch << 2), src); vortex_src_en_sr(vortex, ch); return 1; } prev = VORTEX_SRC_CHNBASE + (ch << 2); /*ebp */ temp = hwread(vortex->mmio, prev); //while (temp & NR_SRC) { while (temp & 0x10) { prev = VORTEX_SRC_RTBASE + ((temp & 0xf) << 2); /*esp12 */ //prev = VORTEX_SRC_RTBASE + ((temp & (NR_SRC-1)) << 2); /*esp12*/ temp = hwread(vortex->mmio, prev); //printk(KERN_INFO "vortex: srcAddWTD: while addr=%x, val=%x\n", prev, temp); if ((++lifeboat) > 0xf) { printk(KERN_ERR "vortex_src_addWTD: lifeboat overflow\n"); return 0; } } hwwrite(vortex->mmio, VORTEX_SRC_RTBASE + ((temp & 0xf) << 2), src); //hwwrite(vortex->mmio, prev, (temp & (NR_SRC-1)) | NR_SRC); hwwrite(vortex->mmio, prev, (temp & 0xf) | 0x10); return 1; } static int vortex_src_delWTD(vortex_t * vortex, unsigned char src, unsigned char ch) { int esp14 = -1, esp18, eax, ebx, edx, ebp, esi = 0; //int esp1f=edi(while)=src, esp10=ch; eax = hwread(vortex->mmio, VORTEX_SRCBLOCK_SR); if (((1 << ch) & eax) == 0) { printk(KERN_ERR "src alarm\n"); return 0; } ebp = VORTEX_SRC_CHNBASE + (ch << 2); esp18 = hwread(vortex->mmio, ebp); if (esp18 & 0x10) { ebx = (esp18 & 0xf); if (src == ebx) { ebx = VORTEX_SRC_RTBASE + (src << 2); edx = hwread(vortex->mmio, ebx); //7b60 hwwrite(vortex->mmio, ebp, edx); hwwrite(vortex->mmio, ebx, 0); } else { //7ad3 edx = hwread(vortex->mmio, VORTEX_SRC_RTBASE + (ebx << 2)); //printk(KERN_INFO "vortex: srcdelWTD: 1 addr=%x, val=%x, src=%x\n", ebx, edx, src); while ((edx & 0xf) != src) { if ((esi) > 0xf) { printk ("vortex: srcdelWTD: error, lifeboat overflow\n"); return 0; } esp14 = ebx; ebx = edx & 0xf; ebp = ebx << 2; edx = hwread(vortex->mmio, VORTEX_SRC_RTBASE + ebp); //printk(KERN_INFO "vortex: srcdelWTD: while addr=%x, val=%x\n", ebp, edx); esi++; } //7b30 ebp = ebx << 2; if (edx & 0x10) { /* Delete entry in between others */ ebx = VORTEX_SRC_RTBASE + ((edx & 0xf) << 2); edx = hwread(vortex->mmio, ebx); //7b60 hwwrite(vortex->mmio, VORTEX_SRC_RTBASE + ebp, edx); hwwrite(vortex->mmio, ebx, 0); //printk(KERN_INFO "vortex srcdelWTD between addr= 0x%x, val= 0x%x\n", ebp, edx); } else { /* Delete last entry */ //7b83 if (esp14 == -1) hwwrite(vortex->mmio, VORTEX_SRC_CHNBASE + (ch << 2), esp18 & 0xef); else { ebx = (0xffffffe0 & edx) | (0xf & ebx); hwwrite(vortex->mmio, VORTEX_SRC_RTBASE + (esp14 << 2), ebx); //printk(KERN_INFO"vortex srcdelWTD last addr= 0x%x, val= 0x%x\n", esp14, ebx); } hwwrite(vortex->mmio, VORTEX_SRC_RTBASE + ebp, 0); return 1; } } } else { //7be0 vortex_src_dis_sr(vortex, ch); hwwrite(vortex->mmio, ebp, 0); } return 1; } /*FIFO*/ static void vortex_fifo_clearadbdata(vortex_t * vortex, int fifo, int x) { for (x--; x >= 0; x--) hwwrite(vortex->mmio, VORTEX_FIFO_ADBDATA + (((fifo << FIFO_SIZE_BITS) + x) << 2), 0); } #if 0 static void vortex_fifo_adbinitialize(vortex_t * vortex, int fifo, int j) { vortex_fifo_clearadbdata(vortex, fifo, FIFO_SIZE); #ifdef CHIP_AU8820 hwwrite(vortex->mmio, VORTEX_FIFO_ADBCTRL + (fifo << 2), (FIFO_U1 | ((j & FIFO_MASK) << 0xb))); #else hwwrite(vortex->mmio, VORTEX_FIFO_ADBCTRL + (fifo << 2), (FIFO_U1 | ((j & FIFO_MASK) << 0xc))); #endif } #endif static void vortex_fifo_setadbvalid(vortex_t * vortex, int fifo, int en) { hwwrite(vortex->mmio, VORTEX_FIFO_ADBCTRL + (fifo << 2), (hwread(vortex->mmio, VORTEX_FIFO_ADBCTRL + (fifo << 2)) & 0xffffffef) | ((1 & en) << 4) | FIFO_U1); } static void vortex_fifo_setadbctrl(vortex_t * vortex, int fifo, int b, int priority, int empty, int valid, int f) { int temp, lifeboat = 0; //int this_8[NR_ADB] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; /* position */ int this_4 = 0x2; /* f seems priority related. * CAsp4AdbDma::SetPriority is the only place that calls SetAdbCtrl with f set to 1 * every where else it is set to 0. It seems, however, that CAsp4AdbDma::SetPriority * is never called, thus the f related bits remain a mystery for now. */ do { temp = hwread(vortex->mmio, VORTEX_FIFO_ADBCTRL + (fifo << 2)); if (lifeboat++ > 0xbb8) { printk(KERN_ERR "Vortex: vortex_fifo_setadbctrl fail\n"); break; } } while (temp & FIFO_RDONLY); // AU8830 semes to take some special care about fifo content (data). // But i'm just to lazy to translate that :) if (valid) { if ((temp & FIFO_VALID) == 0) { //this_8[fifo] = 0; vortex_fifo_clearadbdata(vortex, fifo, FIFO_SIZE); // this_4 #ifdef CHIP_AU8820 temp = (this_4 & 0x1f) << 0xb; #else temp = (this_4 & 0x3f) << 0xc; #endif temp = (temp & 0xfffffffd) | ((b & 1) << 1); temp = (temp & 0xfffffff3) | ((priority & 3) << 2); temp = (temp & 0xffffffef) | ((valid & 1) << 4); temp |= FIFO_U1; temp = (temp & 0xffffffdf) | ((empty & 1) << 5); #ifdef CHIP_AU8820 temp = (temp & 0xfffbffff) | ((f & 1) << 0x12); #endif #ifdef CHIP_AU8830 temp = (temp & 0xf7ffffff) | ((f & 1) << 0x1b); temp = (temp & 0xefffffff) | ((f & 1) << 0x1c); #endif #ifdef CHIP_AU8810 temp = (temp & 0xfeffffff) | ((f & 1) << 0x18); temp = (temp & 0xfdffffff) | ((f & 1) << 0x19); #endif } } else { if (temp & FIFO_VALID) { #ifdef CHIP_AU8820 temp = ((f & 1) << 0x12) | (temp & 0xfffbffef); #endif #ifdef CHIP_AU8830 temp = ((f & 1) << 0x1b) | (temp & 0xe7ffffef) | FIFO_BITS; #endif #ifdef CHIP_AU8810 temp = ((f & 1) << 0x18) | (temp & 0xfcffffef) | FIFO_BITS; #endif } else /*if (this_8[fifo]) */ vortex_fifo_clearadbdata(vortex, fifo, FIFO_SIZE); } hwwrite(vortex->mmio, VORTEX_FIFO_ADBCTRL + (fifo << 2), temp); hwread(vortex->mmio, VORTEX_FIFO_ADBCTRL + (fifo << 2)); } #ifndef CHIP_AU8810 static void vortex_fifo_clearwtdata(vortex_t * vortex, int fifo, int x) { if (x < 1) return; for (x--; x >= 0; x--) hwwrite(vortex->mmio, VORTEX_FIFO_WTDATA + (((fifo << FIFO_SIZE_BITS) + x) << 2), 0); } static void vortex_fifo_wtinitialize(vortex_t * vortex, int fifo, int j) { vortex_fifo_clearwtdata(vortex, fifo, FIFO_SIZE); #ifdef CHIP_AU8820 hwwrite(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2), (FIFO_U1 | ((j & FIFO_MASK) << 0xb))); #else hwwrite(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2), (FIFO_U1 | ((j & FIFO_MASK) << 0xc))); #endif } static void vortex_fifo_setwtvalid(vortex_t * vortex, int fifo, int en) { hwwrite(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2), (hwread(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2)) & 0xffffffef) | ((en & 1) << 4) | FIFO_U1); } static void vortex_fifo_setwtctrl(vortex_t * vortex, int fifo, int ctrl, int priority, int empty, int valid, int f) { int temp = 0, lifeboat = 0; int this_4 = 2; do { temp = hwread(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2)); if (lifeboat++ > 0xbb8) { printk(KERN_ERR "Vortex: vortex_fifo_setwtctrl fail\n"); break; } } while (temp & FIFO_RDONLY); if (valid) { if ((temp & FIFO_VALID) == 0) { vortex_fifo_clearwtdata(vortex, fifo, FIFO_SIZE); // this_4 #ifdef CHIP_AU8820 temp = (this_4 & 0x1f) << 0xb; #else temp = (this_4 & 0x3f) << 0xc; #endif temp = (temp & 0xfffffffd) | ((ctrl & 1) << 1); temp = (temp & 0xfffffff3) | ((priority & 3) << 2); temp = (temp & 0xffffffef) | ((valid & 1) << 4); temp |= FIFO_U1; temp = (temp & 0xffffffdf) | ((empty & 1) << 5); #ifdef CHIP_AU8820 temp = (temp & 0xfffbffff) | ((f & 1) << 0x12); #endif #ifdef CHIP_AU8830 temp = (temp & 0xf7ffffff) | ((f & 1) << 0x1b); temp = (temp & 0xefffffff) | ((f & 1) << 0x1c); #endif #ifdef CHIP_AU8810 temp = (temp & 0xfeffffff) | ((f & 1) << 0x18); temp = (temp & 0xfdffffff) | ((f & 1) << 0x19); #endif } } else { if (temp & FIFO_VALID) { #ifdef CHIP_AU8820 temp = ((f & 1) << 0x12) | (temp & 0xfffbffef); #endif #ifdef CHIP_AU8830 temp = ((f & 1) << 0x1b) | (temp & 0xe7ffffef) | FIFO_BITS; #endif #ifdef CHIP_AU8810 temp = ((f & 1) << 0x18) | (temp & 0xfcffffef) | FIFO_BITS; #endif } else /*if (this_8[fifo]) */ vortex_fifo_clearwtdata(vortex, fifo, FIFO_SIZE); } hwwrite(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2), temp); hwread(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2)); /* do { temp = hwread(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2)); if (lifeboat++ > 0xbb8) { printk(KERN_ERR "Vortex: vortex_fifo_setwtctrl fail (hanging)\n"); break; } } while ((temp & FIFO_RDONLY)&&(temp & FIFO_VALID)&&(temp != 0xFFFFFFFF)); if (valid) { if (temp & FIFO_VALID) { temp = 0x40000; //temp |= 0x08000000; //temp |= 0x10000000; //temp |= 0x04000000; //temp |= 0x00400000; temp |= 0x1c400000; temp &= 0xFFFFFFF3; temp &= 0xFFFFFFEF; temp |= (valid & 1) << 4; hwwrite(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2), temp); return; } else { vortex_fifo_clearwtdata(vortex, fifo, FIFO_SIZE); return; } } else { temp &= 0xffffffef; temp |= 0x08000000; temp |= 0x10000000; temp |= 0x04000000; temp |= 0x00400000; hwwrite(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2), temp); temp = hwread(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2)); //((temp >> 6) & 0x3f) priority = 0; if (((temp & 0x0fc0) ^ ((temp >> 6) & 0x0fc0)) & 0FFFFFFC0) vortex_fifo_clearwtdata(vortex, fifo, FIFO_SIZE); valid = 0xfb; temp = (temp & 0xfffffffd) | ((ctrl & 1) << 1); temp = (temp & 0xfffdffff) | ((f & 1) << 0x11); temp = (temp & 0xfffffff3) | ((priority & 3) << 2); temp = (temp & 0xffffffef) | ((valid & 1) << 4); temp = (temp & 0xffffffdf) | ((empty & 1) << 5); hwwrite(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2), temp); } */ /* temp = (temp & 0xfffffffd) | ((ctrl & 1) << 1); temp = (temp & 0xfffdffff) | ((f & 1) << 0x11); temp = (temp & 0xfffffff3) | ((priority & 3) << 2); temp = (temp & 0xffffffef) | ((valid & 1) << 4); temp = (temp & 0xffffffdf) | ((empty & 1) << 5); #ifdef FIFO_BITS temp = temp | FIFO_BITS | 40000; #endif // 0x1c440010, 0x1c400000 hwwrite(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2), temp); */ } #endif static void vortex_fifo_init(vortex_t * vortex) { int x; u32 addr; /* ADB DMA channels fifos. */ addr = VORTEX_FIFO_ADBCTRL + ((NR_ADB - 1) * 4); for (x = NR_ADB - 1; x >= 0; x--) { hwwrite(vortex->mmio, addr, (FIFO_U0 | FIFO_U1)); if (hwread(vortex->mmio, addr) != (FIFO_U0 | FIFO_U1)) printk(KERN_ERR "bad adb fifo reset!"); vortex_fifo_clearadbdata(vortex, x, FIFO_SIZE); addr -= 4; } #ifndef CHIP_AU8810 /* WT DMA channels fifos. */ addr = VORTEX_FIFO_WTCTRL + ((NR_WT - 1) * 4); for (x = NR_WT - 1; x >= 0; x--) { hwwrite(vortex->mmio, addr, FIFO_U0); if (hwread(vortex->mmio, addr) != FIFO_U0) printk(KERN_ERR "bad wt fifo reset (0x%08x, 0x%08x)!\n", addr, hwread(vortex->mmio, addr)); vortex_fifo_clearwtdata(vortex, x, FIFO_SIZE); addr -= 4; } #endif /* trigger... */ #ifdef CHIP_AU8820 hwwrite(vortex->mmio, 0xf8c0, 0xd03); //0x0843 0xd6b #else #ifdef CHIP_AU8830 hwwrite(vortex->mmio, 0x17000, 0x61); /* wt a */ hwwrite(vortex->mmio, 0x17004, 0x61); /* wt b */ #endif hwwrite(vortex->mmio, 0x17008, 0x61); /* adb */ #endif } /* ADBDMA */ static void vortex_adbdma_init(vortex_t * vortex) { } static void vortex_adbdma_setfirstbuffer(vortex_t * vortex, int adbdma) { stream_t *dma = &vortex->dma_adb[adbdma]; hwwrite(vortex->mmio, VORTEX_ADBDMA_CTRL + (adbdma << 2), dma->dma_ctrl); } static void vortex_adbdma_setstartbuffer(vortex_t * vortex, int adbdma, int sb) { stream_t *dma = &vortex->dma_adb[adbdma]; //hwwrite(vortex->mmio, VORTEX_ADBDMA_START + (adbdma << 2), sb << (((NR_ADB-1)-((adbdma&0xf)*2)))); hwwrite(vortex->mmio, VORTEX_ADBDMA_START + (adbdma << 2), sb << ((0xf - (adbdma & 0xf)) * 2)); dma->period_real = dma->period_virt = sb; } static void vortex_adbdma_setbuffers(vortex_t * vortex, int adbdma, int psize, int count) { stream_t *dma = &vortex->dma_adb[adbdma]; dma->period_bytes = psize; dma->nr_periods = count; dma->cfg0 = 0; dma->cfg1 = 0; switch (count) { /* Four or more pages */ default: case 4: dma->cfg1 |= 0x88000000 | 0x44000000 | 0x30000000 | (psize - 1); hwwrite(vortex->mmio, VORTEX_ADBDMA_BUFBASE + (adbdma << 4) + 0xc, snd_pcm_sgbuf_get_addr(dma->substream, psize * 3)); /* 3 pages */ case 3: dma->cfg0 |= 0x12000000; dma->cfg1 |= 0x80000000 | 0x40000000 | ((psize - 1) << 0xc); hwwrite(vortex->mmio, VORTEX_ADBDMA_BUFBASE + (adbdma << 4) + 0x8, snd_pcm_sgbuf_get_addr(dma->substream, psize * 2)); /* 2 pages */ case 2: dma->cfg0 |= 0x88000000 | 0x44000000 | 0x10000000 | (psize - 1); hwwrite(vortex->mmio, VORTEX_ADBDMA_BUFBASE + (adbdma << 4) + 0x4, snd_pcm_sgbuf_get_addr(dma->substream, psize)); /* 1 page */ case 1: dma->cfg0 |= 0x80000000 | 0x40000000 | ((psize - 1) << 0xc); hwwrite(vortex->mmio, VORTEX_ADBDMA_BUFBASE + (adbdma << 4), snd_pcm_sgbuf_get_addr(dma->substream, 0)); break; } /* printk(KERN_DEBUG "vortex: cfg0 = 0x%x\nvortex: cfg1=0x%x\n", dma->cfg0, dma->cfg1); */ hwwrite(vortex->mmio, VORTEX_ADBDMA_BUFCFG0 + (adbdma << 3), dma->cfg0); hwwrite(vortex->mmio, VORTEX_ADBDMA_BUFCFG1 + (adbdma << 3), dma->cfg1); vortex_adbdma_setfirstbuffer(vortex, adbdma); vortex_adbdma_setstartbuffer(vortex, adbdma, 0); } static void vortex_adbdma_setmode(vortex_t * vortex, int adbdma, int ie, int dir, int fmt, int d, u32 offset) { stream_t *dma = &vortex->dma_adb[adbdma]; dma->dma_unknown = d; dma->dma_ctrl = ((offset & OFFSET_MASK) | (dma->dma_ctrl & ~OFFSET_MASK)); /* Enable PCMOUT interrupts. */ dma->dma_ctrl = (dma->dma_ctrl & ~IE_MASK) | ((ie << IE_SHIFT) & IE_MASK); dma->dma_ctrl = (dma->dma_ctrl & ~DIR_MASK) | ((dir << DIR_SHIFT) & DIR_MASK); dma->dma_ctrl = (dma->dma_ctrl & ~FMT_MASK) | ((fmt << FMT_SHIFT) & FMT_MASK); hwwrite(vortex->mmio, VORTEX_ADBDMA_CTRL + (adbdma << 2), dma->dma_ctrl); hwread(vortex->mmio, VORTEX_ADBDMA_CTRL + (adbdma << 2)); } static int vortex_adbdma_bufshift(vortex_t * vortex, int adbdma) { stream_t *dma = &vortex->dma_adb[adbdma]; int page, p, pp, delta, i; page = (hwread(vortex->mmio, VORTEX_ADBDMA_STAT + (adbdma << 2)) & ADB_SUBBUF_MASK) >> ADB_SUBBUF_SHIFT; if (dma->nr_periods >= 4) delta = (page - dma->period_real) & 3; else { delta = (page - dma->period_real); if (delta < 0) delta += dma->nr_periods; } if (delta == 0) return 0; /* refresh hw page table */ if (dma->nr_periods > 4) { for (i = 0; i < delta; i++) { /* p: audio buffer page index */ p = dma->period_virt + i + 4; if (p >= dma->nr_periods) p -= dma->nr_periods; /* pp: hardware DMA page index. */ pp = dma->period_real + i; if (pp >= 4) pp -= 4; //hwwrite(vortex->mmio, VORTEX_ADBDMA_BUFBASE+(((adbdma << 2)+pp) << 2), dma->table[p].addr); hwwrite(vortex->mmio, VORTEX_ADBDMA_BUFBASE + (((adbdma << 2) + pp) << 2), snd_pcm_sgbuf_get_addr(dma->substream, dma->period_bytes * p)); /* Force write thru cache. */ hwread(vortex->mmio, VORTEX_ADBDMA_BUFBASE + (((adbdma << 2) + pp) << 2)); } } dma->period_virt += delta; dma->period_real = page; if (dma->period_virt >= dma->nr_periods) dma->period_virt -= dma->nr_periods; if (delta != 1) printk(KERN_INFO "vortex: %d virt=%d, real=%d, delta=%d\n", adbdma, dma->period_virt, dma->period_real, delta); return delta; } static void vortex_adbdma_resetup(vortex_t *vortex, int adbdma) { stream_t *dma = &vortex->dma_adb[adbdma]; int p, pp, i; /* refresh hw page table */ for (i=0 ; i < 4 && i < dma->nr_periods; i++) { /* p: audio buffer page index */ p = dma->period_virt + i; if (p >= dma->nr_periods) p -= dma->nr_periods; /* pp: hardware DMA page index. */ pp = dma->period_real + i; if (dma->nr_periods < 4) { if (pp >= dma->nr_periods) pp -= dma->nr_periods; } else { if (pp >= 4) pp -= 4; } hwwrite(vortex->mmio, VORTEX_ADBDMA_BUFBASE + (((adbdma << 2) + pp) << 2), snd_pcm_sgbuf_get_addr(dma->substream, dma->period_bytes * p)); /* Force write thru cache. */ hwread(vortex->mmio, VORTEX_ADBDMA_BUFBASE + (((adbdma << 2)+pp) << 2)); } } static inline int vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma) { stream_t *dma = &vortex->dma_adb[adbdma]; int temp, page, delta; temp = hwread(vortex->mmio, VORTEX_ADBDMA_STAT + (adbdma << 2)); page = (temp & ADB_SUBBUF_MASK) >> ADB_SUBBUF_SHIFT; if (dma->nr_periods >= 4) delta = (page - dma->period_real) & 3; else { delta = (page - dma->period_real); if (delta < 0) delta += dma->nr_periods; } return (dma->period_virt + delta) * dma->period_bytes + (temp & (dma->period_bytes - 1)); } static void vortex_adbdma_startfifo(vortex_t * vortex, int adbdma) { int this_8 = 0 /*empty */ , this_4 = 0 /*priority */ ; stream_t *dma = &vortex->dma_adb[adbdma]; switch (dma->fifo_status) { case FIFO_START: vortex_fifo_setadbvalid(vortex, adbdma, dma->fifo_enabled ? 1 : 0); break; case FIFO_STOP: this_8 = 1; hwwrite(vortex->mmio, VORTEX_ADBDMA_CTRL + (adbdma << 2), dma->dma_ctrl); vortex_fifo_setadbctrl(vortex, adbdma, dma->dma_unknown, this_4, this_8, dma->fifo_enabled ? 1 : 0, 0); break; case FIFO_PAUSE: vortex_fifo_setadbctrl(vortex, adbdma, dma->dma_unknown, this_4, this_8, dma->fifo_enabled ? 1 : 0, 0); break; } dma->fifo_status = FIFO_START; } static void vortex_adbdma_resumefifo(vortex_t * vortex, int adbdma) { stream_t *dma = &vortex->dma_adb[adbdma]; int this_8 = 1, this_4 = 0; switch (dma->fifo_status) { case FIFO_STOP: hwwrite(vortex->mmio, VORTEX_ADBDMA_CTRL + (adbdma << 2), dma->dma_ctrl); vortex_fifo_setadbctrl(vortex, adbdma, dma->dma_unknown, this_4, this_8, dma->fifo_enabled ? 1 : 0, 0); break; case FIFO_PAUSE: vortex_fifo_setadbctrl(vortex, adbdma, dma->dma_unknown, this_4, this_8, dma->fifo_enabled ? 1 : 0, 0); break; } dma->fifo_status = FIFO_START; } static void vortex_adbdma_pausefifo(vortex_t * vortex, int adbdma) { stream_t *dma = &vortex->dma_adb[adbdma]; int this_8 = 0, this_4 = 0; switch (dma->fifo_status) { case FIFO_START: vortex_fifo_setadbctrl(vortex, adbdma, dma->dma_unknown, this_4, this_8, 0, 0); break; case FIFO_STOP: hwwrite(vortex->mmio, VORTEX_ADBDMA_CTRL + (adbdma << 2), dma->dma_ctrl); vortex_fifo_setadbctrl(vortex, adbdma, dma->dma_unknown, this_4, this_8, 0, 0); break; } dma->fifo_status = FIFO_PAUSE; } #if 0 // Using pause instead static void vortex_adbdma_stopfifo(vortex_t * vortex, int adbdma) { stream_t *dma = &vortex->dma_adb[adbdma]; int this_4 = 0, this_8 = 0; if (dma->fifo_status == FIFO_START) vortex_fifo_setadbctrl(vortex, adbdma, dma->dma_unknown, this_4, this_8, 0, 0); else if (dma->fifo_status == FIFO_STOP) return; dma->fifo_status = FIFO_STOP; dma->fifo_enabled = 0; } #endif /* WTDMA */ #ifndef CHIP_AU8810 static void vortex_wtdma_setfirstbuffer(vortex_t * vortex, int wtdma) { //int this_7c=dma_ctrl; stream_t *dma = &vortex->dma_wt[wtdma]; hwwrite(vortex->mmio, VORTEX_WTDMA_CTRL + (wtdma << 2), dma->dma_ctrl); } static void vortex_wtdma_setstartbuffer(vortex_t * vortex, int wtdma, int sb) { stream_t *dma = &vortex->dma_wt[wtdma]; //hwwrite(vortex->mmio, VORTEX_WTDMA_START + (wtdma << 2), sb << ((0x1f-(wtdma&0xf)*2))); hwwrite(vortex->mmio, VORTEX_WTDMA_START + (wtdma << 2), sb << ((0xf - (wtdma & 0xf)) * 2)); dma->period_real = dma->period_virt = sb; } static void vortex_wtdma_setbuffers(vortex_t * vortex, int wtdma, int psize, int count) { stream_t *dma = &vortex->dma_wt[wtdma]; dma->period_bytes = psize; dma->nr_periods = count; dma->cfg0 = 0; dma->cfg1 = 0; switch (count) { /* Four or more pages */ default: case 4: dma->cfg1 |= 0x88000000 | 0x44000000 | 0x30000000 | (psize-1); hwwrite(vortex->mmio, VORTEX_WTDMA_BUFBASE + (wtdma << 4) + 0xc, snd_pcm_sgbuf_get_addr(dma->substream, psize * 3)); /* 3 pages */ case 3: dma->cfg0 |= 0x12000000; dma->cfg1 |= 0x80000000 | 0x40000000 | ((psize-1) << 0xc); hwwrite(vortex->mmio, VORTEX_WTDMA_BUFBASE + (wtdma << 4) + 0x8, snd_pcm_sgbuf_get_addr(dma->substream, psize * 2)); /* 2 pages */ case 2: dma->cfg0 |= 0x88000000 | 0x44000000 | 0x10000000 | (psize-1); hwwrite(vortex->mmio, VORTEX_WTDMA_BUFBASE + (wtdma << 4) + 0x4, snd_pcm_sgbuf_get_addr(dma->substream, psize)); /* 1 page */ case 1: dma->cfg0 |= 0x80000000 | 0x40000000 | ((psize-1) << 0xc); hwwrite(vortex->mmio, VORTEX_WTDMA_BUFBASE + (wtdma << 4), snd_pcm_sgbuf_get_addr(dma->substream, 0)); break; } hwwrite(vortex->mmio, VORTEX_WTDMA_BUFCFG0 + (wtdma << 3), dma->cfg0); hwwrite(vortex->mmio, VORTEX_WTDMA_BUFCFG1 + (wtdma << 3), dma->cfg1); vortex_wtdma_setfirstbuffer(vortex, wtdma); vortex_wtdma_setstartbuffer(vortex, wtdma, 0); } static void vortex_wtdma_setmode(vortex_t * vortex, int wtdma, int ie, int fmt, int d, /*int e, */ u32 offset) { stream_t *dma = &vortex->dma_wt[wtdma]; //dma->this_08 = e; dma->dma_unknown = d; dma->dma_ctrl = 0; dma->dma_ctrl = ((offset & OFFSET_MASK) | (dma->dma_ctrl & ~OFFSET_MASK)); /* PCMOUT interrupt */ dma->dma_ctrl = (dma->dma_ctrl & ~IE_MASK) | ((ie << IE_SHIFT) & IE_MASK); /* Always playback. */ dma->dma_ctrl |= (1 << DIR_SHIFT); /* Audio Format */ dma->dma_ctrl = (dma->dma_ctrl & FMT_MASK) | ((fmt << FMT_SHIFT) & FMT_MASK); /* Write into hardware */ hwwrite(vortex->mmio, VORTEX_WTDMA_CTRL + (wtdma << 2), dma->dma_ctrl); } static int vortex_wtdma_bufshift(vortex_t * vortex, int wtdma) { stream_t *dma = &vortex->dma_wt[wtdma]; int page, p, pp, delta, i; page = (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) & WT_SUBBUF_MASK) >> WT_SUBBUF_SHIFT; if (dma->nr_periods >= 4) delta = (page - dma->period_real) & 3; else { delta = (page - dma->period_real); if (delta < 0) delta += dma->nr_periods; } if (delta == 0) return 0; /* refresh hw page table */ if (dma->nr_periods > 4) { for (i = 0; i < delta; i++) { /* p: audio buffer page index */ p = dma->period_virt + i + 4; if (p >= dma->nr_periods) p -= dma->nr_periods; /* pp: hardware DMA page index. */ pp = dma->period_real + i; if (pp >= 4) pp -= 4; hwwrite(vortex->mmio, VORTEX_WTDMA_BUFBASE + (((wtdma << 2) + pp) << 2), snd_pcm_sgbuf_get_addr(dma->substream, dma->period_bytes * p)); /* Force write thru cache. */ hwread(vortex->mmio, VORTEX_WTDMA_BUFBASE + (((wtdma << 2) + pp) << 2)); } } dma->period_virt += delta; if (dma->period_virt >= dma->nr_periods) dma->period_virt -= dma->nr_periods; dma->period_real = page; if (delta != 1) printk(KERN_WARNING "vortex: wt virt = %d, delta = %d\n", dma->period_virt, delta); return delta; } #if 0 static void vortex_wtdma_getposition(vortex_t * vortex, int wtdma, int *subbuf, int *pos) { int temp; temp = hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)); *subbuf = (temp >> WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK; *pos = temp & POS_MASK; } static int vortex_wtdma_getcursubuffer(vortex_t * vortex, int wtdma) { return ((hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) >> POS_SHIFT) & POS_MASK); } #endif static inline int vortex_wtdma_getlinearpos(vortex_t * vortex, int wtdma) { stream_t *dma = &vortex->dma_wt[wtdma]; int temp; temp = hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)); temp = (dma->period_virt * dma->period_bytes) + (temp & (dma->period_bytes - 1)); return temp; } static void vortex_wtdma_startfifo(vortex_t * vortex, int wtdma) { stream_t *dma = &vortex->dma_wt[wtdma]; int this_8 = 0, this_4 = 0; switch (dma->fifo_status) { case FIFO_START: vortex_fifo_setwtvalid(vortex, wtdma, dma->fifo_enabled ? 1 : 0); break; case FIFO_STOP: this_8 = 1; hwwrite(vortex->mmio, VORTEX_WTDMA_CTRL + (wtdma << 2), dma->dma_ctrl); vortex_fifo_setwtctrl(vortex, wtdma, dma->dma_unknown, this_4, this_8, dma->fifo_enabled ? 1 : 0, 0); break; case FIFO_PAUSE: vortex_fifo_setwtctrl(vortex, wtdma, dma->dma_unknown, this_4, this_8, dma->fifo_enabled ? 1 : 0, 0); break; } dma->fifo_status = FIFO_START; } static void vortex_wtdma_resumefifo(vortex_t * vortex, int wtdma) { stream_t *dma = &vortex->dma_wt[wtdma]; int this_8 = 0, this_4 = 0; switch (dma->fifo_status) { case FIFO_STOP: hwwrite(vortex->mmio, VORTEX_WTDMA_CTRL + (wtdma << 2), dma->dma_ctrl); vortex_fifo_setwtctrl(vortex, wtdma, dma->dma_unknown, this_4, this_8, dma->fifo_enabled ? 1 : 0, 0); break; case FIFO_PAUSE: vortex_fifo_setwtctrl(vortex, wtdma, dma->dma_unknown, this_4, this_8, dma->fifo_enabled ? 1 : 0, 0); break; } dma->fifo_status = FIFO_START; } static void vortex_wtdma_pausefifo(vortex_t * vortex, int wtdma) { stream_t *dma = &vortex->dma_wt[wtdma]; int this_8 = 0, this_4 = 0; switch (dma->fifo_status) { case FIFO_START: vortex_fifo_setwtctrl(vortex, wtdma, dma->dma_unknown, this_4, this_8, 0, 0); break; case FIFO_STOP: hwwrite(vortex->mmio, VORTEX_WTDMA_CTRL + (wtdma << 2), dma->dma_ctrl); vortex_fifo_setwtctrl(vortex, wtdma, dma->dma_unknown, this_4, this_8, 0, 0); break; } dma->fifo_status = FIFO_PAUSE; } static void vortex_wtdma_stopfifo(vortex_t * vortex, int wtdma) { stream_t *dma = &vortex->dma_wt[wtdma]; int this_4 = 0, this_8 = 0; if (dma->fifo_status == FIFO_START) vortex_fifo_setwtctrl(vortex, wtdma, dma->dma_unknown, this_4, this_8, 0, 0); else if (dma->fifo_status == FIFO_STOP) return; dma->fifo_status = FIFO_STOP; dma->fifo_enabled = 0; } #endif /* ADB Routes */ typedef int ADBRamLink; static void vortex_adb_init(vortex_t * vortex) { int i; /* it looks like we are writing more than we need to... * if we write what we are supposed to it breaks things... */ hwwrite(vortex->mmio, VORTEX_ADB_SR, 0); for (i = 0; i < VORTEX_ADB_RTBASE_COUNT; i++) hwwrite(vortex->mmio, VORTEX_ADB_RTBASE + (i << 2), hwread(vortex->mmio, VORTEX_ADB_RTBASE + (i << 2)) | ROUTE_MASK); for (i = 0; i < VORTEX_ADB_CHNBASE_COUNT; i++) { hwwrite(vortex->mmio, VORTEX_ADB_CHNBASE + (i << 2), hwread(vortex->mmio, VORTEX_ADB_CHNBASE + (i << 2)) | ROUTE_MASK); } } static void vortex_adb_en_sr(vortex_t * vortex, int channel) { hwwrite(vortex->mmio, VORTEX_ADB_SR, hwread(vortex->mmio, VORTEX_ADB_SR) | (0x1 << channel)); } static void vortex_adb_dis_sr(vortex_t * vortex, int channel) { hwwrite(vortex->mmio, VORTEX_ADB_SR, hwread(vortex->mmio, VORTEX_ADB_SR) & ~(0x1 << channel)); } static void vortex_adb_addroutes(vortex_t * vortex, unsigned char channel, ADBRamLink * route, int rnum) { int temp, prev, lifeboat = 0; if ((rnum <= 0) || (route == NULL)) return; /* Write last routes. */ rnum--; hwwrite(vortex->mmio, VORTEX_ADB_RTBASE + ((route[rnum] & ADB_MASK) << 2), ROUTE_MASK); while (rnum > 0) { hwwrite(vortex->mmio, VORTEX_ADB_RTBASE + ((route[rnum - 1] & ADB_MASK) << 2), route[rnum]); rnum--; } /* Write first route. */ temp = hwread(vortex->mmio, VORTEX_ADB_CHNBASE + (channel << 2)) & ADB_MASK; if (temp == ADB_MASK) { /* First entry on this channel. */ hwwrite(vortex->mmio, VORTEX_ADB_CHNBASE + (channel << 2), route[0]); vortex_adb_en_sr(vortex, channel); return; } /* Not first entry on this channel. Need to link. */ do { prev = temp; temp = hwread(vortex->mmio, VORTEX_ADB_RTBASE + (temp << 2)) & ADB_MASK; if ((lifeboat++) > ADB_MASK) { printk(KERN_ERR "vortex_adb_addroutes: unending route! 0x%x\n", *route); return; } } while (temp != ADB_MASK); hwwrite(vortex->mmio, VORTEX_ADB_RTBASE + (prev << 2), route[0]); } static void vortex_adb_delroutes(vortex_t * vortex, unsigned char channel, ADBRamLink route0, ADBRamLink route1) { int temp, lifeboat = 0, prev; /* Find route. */ temp = hwread(vortex->mmio, VORTEX_ADB_CHNBASE + (channel << 2)) & ADB_MASK; if (temp == (route0 & ADB_MASK)) { temp = hwread(vortex->mmio, VORTEX_ADB_RTBASE + ((route1 & ADB_MASK) << 2)); if ((temp & ADB_MASK) == ADB_MASK) vortex_adb_dis_sr(vortex, channel); hwwrite(vortex->mmio, VORTEX_ADB_CHNBASE + (channel << 2), temp); return; } do { prev = temp; temp = hwread(vortex->mmio, VORTEX_ADB_RTBASE + (prev << 2)) & ADB_MASK; if (((lifeboat++) > ADB_MASK) || (temp == ADB_MASK)) { printk(KERN_ERR "vortex_adb_delroutes: route not found! 0x%x\n", route0); return; } } while (temp != (route0 & ADB_MASK)); temp = hwread(vortex->mmio, VORTEX_ADB_RTBASE + (temp << 2)); if ((temp & ADB_MASK) == route1) temp = hwread(vortex->mmio, VORTEX_ADB_RTBASE + (temp << 2)); /* Make bridge over deleted route. */ hwwrite(vortex->mmio, VORTEX_ADB_RTBASE + (prev << 2), temp); } static void vortex_route(vortex_t * vortex, int en, unsigned char channel, unsigned char source, unsigned char dest) { ADBRamLink route; route = ((source & ADB_MASK) << ADB_SHIFT) | (dest & ADB_MASK); if (en) { vortex_adb_addroutes(vortex, channel, &route, 1); if ((source < (OFFSET_SRCOUT + NR_SRC)) && (source >= OFFSET_SRCOUT)) vortex_src_addWTD(vortex, (source - OFFSET_SRCOUT), channel); else if ((source < (OFFSET_MIXOUT + NR_MIXOUT)) && (source >= OFFSET_MIXOUT)) vortex_mixer_addWTD(vortex, (source - OFFSET_MIXOUT), channel); } else { vortex_adb_delroutes(vortex, channel, route, route); if ((source < (OFFSET_SRCOUT + NR_SRC)) && (source >= OFFSET_SRCOUT)) vortex_src_delWTD(vortex, (source - OFFSET_SRCOUT), channel); else if ((source < (OFFSET_MIXOUT + NR_MIXOUT)) && (source >= OFFSET_MIXOUT)) vortex_mixer_delWTD(vortex, (source - OFFSET_MIXOUT), channel); } } #if 0 static void vortex_routes(vortex_t * vortex, int en, unsigned char channel, unsigned char source, unsigned char dest0, unsigned char dest1) { ADBRamLink route[2]; route[0] = ((source & ADB_MASK) << ADB_SHIFT) | (dest0 & ADB_MASK); route[1] = ((source & ADB_MASK) << ADB_SHIFT) | (dest1 & ADB_MASK); if (en) { vortex_adb_addroutes(vortex, channel, route, 2); if ((source < (OFFSET_SRCOUT + NR_SRC)) && (source >= (OFFSET_SRCOUT))) vortex_src_addWTD(vortex, (source - OFFSET_SRCOUT), channel); else if ((source < (OFFSET_MIXOUT + NR_MIXOUT)) && (source >= (OFFSET_MIXOUT))) vortex_mixer_addWTD(vortex, (source - OFFSET_MIXOUT), channel); } else { vortex_adb_delroutes(vortex, channel, route[0], route[1]); if ((source < (OFFSET_SRCOUT + NR_SRC)) && (source >= (OFFSET_SRCOUT))) vortex_src_delWTD(vortex, (source - OFFSET_SRCOUT), channel); else if ((source < (OFFSET_MIXOUT + NR_MIXOUT)) && (source >= (OFFSET_MIXOUT))) vortex_mixer_delWTD(vortex, (source - OFFSET_MIXOUT), channel); } } #endif /* Route two sources to same target. Sources must be of same class !!! */ static void vortex_routeLRT(vortex_t * vortex, int en, unsigned char ch, unsigned char source0, unsigned char source1, unsigned char dest) { ADBRamLink route[2]; route[0] = ((source0 & ADB_MASK) << ADB_SHIFT) | (dest & ADB_MASK); route[1] = ((source1 & ADB_MASK) << ADB_SHIFT) | (dest & ADB_MASK); if (dest < 0x10) route[1] = (route[1] & ~ADB_MASK) | (dest + 0x20); /* fifo A */ if (en) { vortex_adb_addroutes(vortex, ch, route, 2); if ((source0 < (OFFSET_SRCOUT + NR_SRC)) && (source0 >= OFFSET_SRCOUT)) { vortex_src_addWTD(vortex, (source0 - OFFSET_SRCOUT), ch); vortex_src_addWTD(vortex, (source1 - OFFSET_SRCOUT), ch); } else if ((source0 < (OFFSET_MIXOUT + NR_MIXOUT)) && (source0 >= OFFSET_MIXOUT)) { vortex_mixer_addWTD(vortex, (source0 - OFFSET_MIXOUT), ch); vortex_mixer_addWTD(vortex, (source1 - OFFSET_MIXOUT), ch); } } else { vortex_adb_delroutes(vortex, ch, route[0], route[1]); if ((source0 < (OFFSET_SRCOUT + NR_SRC)) && (source0 >= OFFSET_SRCOUT)) { vortex_src_delWTD(vortex, (source0 - OFFSET_SRCOUT), ch); vortex_src_delWTD(vortex, (source1 - OFFSET_SRCOUT), ch); } else if ((source0 < (OFFSET_MIXOUT + NR_MIXOUT)) && (source0 >= OFFSET_MIXOUT)) { vortex_mixer_delWTD(vortex, (source0 - OFFSET_MIXOUT), ch); vortex_mixer_delWTD(vortex, (source1 - OFFSET_MIXOUT), ch); } } } /* Connection stuff */ // Connect adbdma to src('s). static void vortex_connection_adbdma_src(vortex_t * vortex, int en, unsigned char ch, unsigned char adbdma, unsigned char src) { vortex_route(vortex, en, ch, ADB_DMA(adbdma), ADB_SRCIN(src)); } // Connect SRC to mixin. static void vortex_connection_src_mixin(vortex_t * vortex, int en, unsigned char channel, unsigned char src, unsigned char mixin) { vortex_route(vortex, en, channel, ADB_SRCOUT(src), ADB_MIXIN(mixin)); } // Connect mixin with mix output. static void vortex_connection_mixin_mix(vortex_t * vortex, int en, unsigned char mixin, unsigned char mix, int a) { if (en) { vortex_mix_enableinput(vortex, mix, mixin); vortex_mix_setinputvolumebyte(vortex, mix, mixin, MIX_DEFIGAIN); // added to original code. } else vortex_mix_disableinput(vortex, mix, mixin, a); } // Connect absolut address to mixin. static void vortex_connection_adb_mixin(vortex_t * vortex, int en, unsigned char channel, unsigned char source, unsigned char mixin) { vortex_route(vortex, en, channel, source, ADB_MIXIN(mixin)); } static void vortex_connection_src_adbdma(vortex_t * vortex, int en, unsigned char ch, unsigned char src, unsigned char adbdma) { vortex_route(vortex, en, ch, ADB_SRCOUT(src), ADB_DMA(adbdma)); } static void vortex_connection_src_src_adbdma(vortex_t * vortex, int en, unsigned char ch, unsigned char src0, unsigned char src1, unsigned char adbdma) { vortex_routeLRT(vortex, en, ch, ADB_SRCOUT(src0), ADB_SRCOUT(src1), ADB_DMA(adbdma)); } // mix to absolut address. static void vortex_connection_mix_adb(vortex_t * vortex, int en, unsigned char ch, unsigned char mix, unsigned char dest) { vortex_route(vortex, en, ch, ADB_MIXOUT(mix), dest); vortex_mix_setvolumebyte(vortex, mix, MIX_DEFOGAIN); // added to original code. } // mixer to src. static void vortex_connection_mix_src(vortex_t * vortex, int en, unsigned char ch, unsigned char mix, unsigned char src) { vortex_route(vortex, en, ch, ADB_MIXOUT(mix), ADB_SRCIN(src)); vortex_mix_setvolumebyte(vortex, mix, MIX_DEFOGAIN); // added to original code. } #if 0 static void vortex_connection_adbdma_src_src(vortex_t * vortex, int en, unsigned char channel, unsigned char adbdma, unsigned char src0, unsigned char src1) { vortex_routes(vortex, en, channel, ADB_DMA(adbdma), ADB_SRCIN(src0), ADB_SRCIN(src1)); } // Connect two mix to AdbDma. static void vortex_connection_mix_mix_adbdma(vortex_t * vortex, int en, unsigned char ch, unsigned char mix0, unsigned char mix1, unsigned char adbdma) { ADBRamLink routes[2]; routes[0] = (((mix0 + OFFSET_MIXOUT) & ADB_MASK) << ADB_SHIFT) | (adbdma & ADB_MASK); routes[1] = (((mix1 + OFFSET_MIXOUT) & ADB_MASK) << ADB_SHIFT) | ((adbdma + 0x20) & ADB_MASK); if (en) { vortex_adb_addroutes(vortex, ch, routes, 0x2); vortex_mixer_addWTD(vortex, mix0, ch); vortex_mixer_addWTD(vortex, mix1, ch); } else { vortex_adb_delroutes(vortex, ch, routes[0], routes[1]); vortex_mixer_delWTD(vortex, mix0, ch); vortex_mixer_delWTD(vortex, mix1, ch); } } #endif /* CODEC connect. */ static void vortex_connect_codecplay(vortex_t * vortex, int en, unsigned char mixers[]) { #ifdef CHIP_AU8820 vortex_connection_mix_adb(vortex, en, 0x11, mixers[0], ADB_CODECOUT(0)); vortex_connection_mix_adb(vortex, en, 0x11, mixers[1], ADB_CODECOUT(1)); #else #if 1 // Connect front channels through EQ. vortex_connection_mix_adb(vortex, en, 0x11, mixers[0], ADB_EQIN(0)); vortex_connection_mix_adb(vortex, en, 0x11, mixers[1], ADB_EQIN(1)); /* Lower volume, since EQ has some gain. */ vortex_mix_setvolumebyte(vortex, mixers[0], 0); vortex_mix_setvolumebyte(vortex, mixers[1], 0); vortex_route(vortex, en, 0x11, ADB_EQOUT(0), ADB_CODECOUT(0)); vortex_route(vortex, en, 0x11, ADB_EQOUT(1), ADB_CODECOUT(1)); /* Check if reg 0x28 has SDAC bit set. */ if (VORTEX_IS_QUAD(vortex)) { /* Rear channel. Note: ADB_CODECOUT(0+2) and (1+2) is for AC97 modem */ vortex_connection_mix_adb(vortex, en, 0x11, mixers[2], ADB_CODECOUT(0 + 4)); vortex_connection_mix_adb(vortex, en, 0x11, mixers[3], ADB_CODECOUT(1 + 4)); /* printk(KERN_DEBUG "SDAC detected "); */ } #else // Use plain direct output to codec. vortex_connection_mix_adb(vortex, en, 0x11, mixers[0], ADB_CODECOUT(0)); vortex_connection_mix_adb(vortex, en, 0x11, mixers[1], ADB_CODECOUT(1)); #endif #endif } static void vortex_connect_codecrec(vortex_t * vortex, int en, unsigned char mixin0, unsigned char mixin1) { /* Enable: 0x1, 0x1 Channel: 0x11, 0x11 ADB Source address: 0x48, 0x49 Destination Asp4Topology_0x9c,0x98 */ vortex_connection_adb_mixin(vortex, en, 0x11, ADB_CODECIN(0), mixin0); vortex_connection_adb_mixin(vortex, en, 0x11, ADB_CODECIN(1), mixin1); } // Higher level ADB audio path (de)allocator. /* Resource manager */ static int resnum[VORTEX_RESOURCE_LAST] = { NR_ADB, NR_SRC, NR_MIXIN, NR_MIXOUT, NR_A3D }; /* Checkout/Checkin resource of given type. resmap: resource map to be used. If NULL means that we want to allocate a DMA resource (root of all other resources of a dma channel). out: Mean checkout if != 0. Else mean Checkin resource. restype: Indicates type of resource to be checked in or out. */ static char vortex_adb_checkinout(vortex_t * vortex, int resmap[], int out, int restype) { int i, qty = resnum[restype], resinuse = 0; if (out) { /* Gather used resources by all streams. */ for (i = 0; i < NR_ADB; i++) { resinuse |= vortex->dma_adb[i].resources[restype]; } resinuse |= vortex->fixed_res[restype]; /* Find and take free resource. */ for (i = 0; i < qty; i++) { if ((resinuse & (1 << i)) == 0) { if (resmap != NULL) resmap[restype] |= (1 << i); else vortex->dma_adb[i].resources[restype] |= (1 << i); /* printk(KERN_DEBUG "vortex: ResManager: type %d out %d\n", restype, i); */ return i; } } } else { if (resmap == NULL) return -EINVAL; /* Checkin first resource of type restype. */ for (i = 0; i < qty; i++) { if (resmap[restype] & (1 << i)) { resmap[restype] &= ~(1 << i); /* printk(KERN_DEBUG "vortex: ResManager: type %d in %d\n", restype, i); */ return i; } } } printk(KERN_ERR "vortex: FATAL: ResManager: resource type %d exhausted.\n", restype); return -ENOMEM; } /* Default Connections */ static int vortex_adb_allocroute(vortex_t * vortex, int dma, int nr_ch, int dir, int type); static void vortex_connect_default(vortex_t * vortex, int en) { // Connect AC97 codec. vortex->mixplayb[0] = vortex_adb_checkinout(vortex, vortex->fixed_res, en, VORTEX_RESOURCE_MIXOUT); vortex->mixplayb[1] = vortex_adb_checkinout(vortex, vortex->fixed_res, en, VORTEX_RESOURCE_MIXOUT); if (VORTEX_IS_QUAD(vortex)) { vortex->mixplayb[2] = vortex_adb_checkinout(vortex, vortex->fixed_res, en, VORTEX_RESOURCE_MIXOUT); vortex->mixplayb[3] = vortex_adb_checkinout(vortex, vortex->fixed_res, en, VORTEX_RESOURCE_MIXOUT); } vortex_connect_codecplay(vortex, en, vortex->mixplayb); vortex->mixcapt[0] = vortex_adb_checkinout(vortex, vortex->fixed_res, en, VORTEX_RESOURCE_MIXIN); vortex->mixcapt[1] = vortex_adb_checkinout(vortex, vortex->fixed_res, en, VORTEX_RESOURCE_MIXIN); vortex_connect_codecrec(vortex, en, MIX_CAPT(0), MIX_CAPT(1)); // Connect SPDIF #ifndef CHIP_AU8820 vortex->mixspdif[0] = vortex_adb_checkinout(vortex, vortex->fixed_res, en, VORTEX_RESOURCE_MIXOUT); vortex->mixspdif[1] = vortex_adb_checkinout(vortex, vortex->fixed_res, en, VORTEX_RESOURCE_MIXOUT); vortex_connection_mix_adb(vortex, en, 0x14, vortex->mixspdif[0], ADB_SPDIFOUT(0)); vortex_connection_mix_adb(vortex, en, 0x14, vortex->mixspdif[1], ADB_SPDIFOUT(1)); #endif // Connect WT #ifndef CHIP_AU8810 vortex_wt_connect(vortex, en); #endif // A3D (crosstalk canceler and A3D slices). AU8810 disabled for now. #ifndef CHIP_AU8820 vortex_Vort3D_connect(vortex, en); #endif // Connect I2S // Connect DSP interface for SQ3500 turbo (not here i think...) // Connect AC98 modem codec } /* Allocate nr_ch pcm audio routes if dma < 0. If dma >= 0, existing routes are deallocated. dma: DMA engine routes to be deallocated when dma >= 0. nr_ch: Number of channels to be de/allocated. dir: direction of stream. Uses same values as substream->stream. type: Type of audio output/source (codec, spdif, i2s, dsp, etc) Return: Return allocated DMA or same DMA passed as "dma" when dma >= 0. */ static int vortex_adb_allocroute(vortex_t * vortex, int dma, int nr_ch, int dir, int type) { stream_t *stream; int i, en; if ((nr_ch == 3) || ((dir == SNDRV_PCM_STREAM_CAPTURE) && (nr_ch > 2))) return -EBUSY; if (dma >= 0) { en = 0; vortex_adb_checkinout(vortex, vortex->dma_adb[dma].resources, en, VORTEX_RESOURCE_DMA); } else { en = 1; if ((dma = vortex_adb_checkinout(vortex, NULL, en, VORTEX_RESOURCE_DMA)) < 0) return -EBUSY; } stream = &vortex->dma_adb[dma]; stream->dma = dma; stream->dir = dir; stream->type = type; /* PLAYBACK ROUTES. */ if (dir == SNDRV_PCM_STREAM_PLAYBACK) { int src[4], mix[4], ch_top; #ifndef CHIP_AU8820 int a3d = 0; #endif /* Get SRC and MIXER hardware resources. */ if (stream->type != VORTEX_PCM_SPDIF) { for (i = 0; i < nr_ch; i++) { if ((src[i] = vortex_adb_checkinout(vortex, stream->resources, en, VORTEX_RESOURCE_SRC)) < 0) { memset(stream->resources, 0, sizeof(unsigned char) * VORTEX_RESOURCE_LAST); return -EBUSY; } if (stream->type != VORTEX_PCM_A3D) { if ((mix[i] = vortex_adb_checkinout(vortex, stream->resources, en, VORTEX_RESOURCE_MIXIN)) < 0) { memset(stream->resources, 0, sizeof(unsigned char) * VORTEX_RESOURCE_LAST); return -EBUSY; } } } } #ifndef CHIP_AU8820 if (stream->type == VORTEX_PCM_A3D) { if ((a3d = vortex_adb_checkinout(vortex, stream->resources, en, VORTEX_RESOURCE_A3D)) < 0) { memset(stream->resources, 0, sizeof(unsigned char) * VORTEX_RESOURCE_LAST); printk(KERN_ERR "vortex: out of A3D sources. Sorry\n"); return -EBUSY; } /* (De)Initialize A3D hardware source. */ vortex_Vort3D_InitializeSource(&(vortex->a3d[a3d]), en); } /* Make SPDIF out exclusive to "spdif" device when in use. */ if ((stream->type == VORTEX_PCM_SPDIF) && (en)) { vortex_route(vortex, 0, 0x14, ADB_MIXOUT(vortex->mixspdif[0]), ADB_SPDIFOUT(0)); vortex_route(vortex, 0, 0x14, ADB_MIXOUT(vortex->mixspdif[1]), ADB_SPDIFOUT(1)); } #endif /* Make playback routes. */ for (i = 0; i < nr_ch; i++) { if (stream->type == VORTEX_PCM_ADB) { vortex_connection_adbdma_src(vortex, en, src[nr_ch - 1], dma, src[i]); vortex_connection_src_mixin(vortex, en, 0x11, src[i], mix[i]); vortex_connection_mixin_mix(vortex, en, mix[i], MIX_PLAYB(i), 0); #ifndef CHIP_AU8820 vortex_connection_mixin_mix(vortex, en, mix[i], MIX_SPDIF(i % 2), 0); vortex_mix_setinputvolumebyte(vortex, MIX_SPDIF(i % 2), mix[i], MIX_DEFIGAIN); #endif } #ifndef CHIP_AU8820 if (stream->type == VORTEX_PCM_A3D) { vortex_connection_adbdma_src(vortex, en, src[nr_ch - 1], dma, src[i]); vortex_route(vortex, en, 0x11, ADB_SRCOUT(src[i]), ADB_A3DIN(a3d)); /* XTalk test. */ //vortex_route(vortex, en, 0x11, dma, ADB_XTALKIN(i?9:4)); //vortex_route(vortex, en, 0x11, ADB_SRCOUT(src[i]), ADB_XTALKIN(i?4:9)); } if (stream->type == VORTEX_PCM_SPDIF) vortex_route(vortex, en, 0x14, ADB_DMA(stream->dma), ADB_SPDIFOUT(i)); #endif } if (stream->type != VORTEX_PCM_SPDIF && stream->type != VORTEX_PCM_A3D) { ch_top = (VORTEX_IS_QUAD(vortex) ? 4 : 2); for (i = nr_ch; i < ch_top; i++) { vortex_connection_mixin_mix(vortex, en, mix[i % nr_ch], MIX_PLAYB(i), 0); #ifndef CHIP_AU8820 vortex_connection_mixin_mix(vortex, en, mix[i % nr_ch], MIX_SPDIF(i % 2), 0); vortex_mix_setinputvolumebyte(vortex, MIX_SPDIF(i % 2), mix[i % nr_ch], MIX_DEFIGAIN); #endif } } #ifndef CHIP_AU8820 else { if (nr_ch == 1 && stream->type == VORTEX_PCM_SPDIF) vortex_route(vortex, en, 0x14, ADB_DMA(stream->dma), ADB_SPDIFOUT(1)); } /* Reconnect SPDIF out when "spdif" device is down. */ if ((stream->type == VORTEX_PCM_SPDIF) && (!en)) { vortex_route(vortex, 1, 0x14, ADB_MIXOUT(vortex->mixspdif[0]), ADB_SPDIFOUT(0)); vortex_route(vortex, 1, 0x14, ADB_MIXOUT(vortex->mixspdif[1]), ADB_SPDIFOUT(1)); } #endif /* CAPTURE ROUTES. */ } else { int src[2], mix[2]; /* Get SRC and MIXER hardware resources. */ for (i = 0; i < nr_ch; i++) { if ((mix[i] = vortex_adb_checkinout(vortex, stream->resources, en, VORTEX_RESOURCE_MIXOUT)) < 0) { memset(stream->resources, 0, sizeof(unsigned char) * VORTEX_RESOURCE_LAST); return -EBUSY; } if ((src[i] = vortex_adb_checkinout(vortex, stream->resources, en, VORTEX_RESOURCE_SRC)) < 0) { memset(stream->resources, 0, sizeof(unsigned char) * VORTEX_RESOURCE_LAST); return -EBUSY; } } /* Make capture routes. */ vortex_connection_mixin_mix(vortex, en, MIX_CAPT(0), mix[0], 0); vortex_connection_mix_src(vortex, en, 0x11, mix[0], src[0]); if (nr_ch == 1) { vortex_connection_mixin_mix(vortex, en, MIX_CAPT(1), mix[0], 0); vortex_connection_src_adbdma(vortex, en, src[0], src[0], dma); } else { vortex_connection_mixin_mix(vortex, en, MIX_CAPT(1), mix[1], 0); vortex_connection_mix_src(vortex, en, 0x11, mix[1], src[1]); vortex_connection_src_src_adbdma(vortex, en, src[1], src[0], src[1], dma); } } vortex->dma_adb[dma].nr_ch = nr_ch; #if 0 /* AC97 Codec channel setup. FIXME: this has no effect on some cards !! */ if (nr_ch < 4) { /* Copy stereo to rear channel (surround) */ snd_ac97_write_cache(vortex->codec, AC97_SIGMATEL_DAC2INVERT, snd_ac97_read(vortex->codec, AC97_SIGMATEL_DAC2INVERT) | 4); } else { /* Allow separate front and rear channels. */ snd_ac97_write_cache(vortex->codec, AC97_SIGMATEL_DAC2INVERT, snd_ac97_read(vortex->codec, AC97_SIGMATEL_DAC2INVERT) & ~((u32) 4)); } #endif return dma; } /* Set the SampleRate of the SRC's attached to the given DMA engine. */ static void vortex_adb_setsrc(vortex_t * vortex, int adbdma, unsigned int rate, int dir) { stream_t *stream = &(vortex->dma_adb[adbdma]); int i, cvrt; /* dir=1:play ; dir=0:rec */ if (dir) cvrt = SRC_RATIO(rate, 48000); else cvrt = SRC_RATIO(48000, rate); /* Setup SRC's */ for (i = 0; i < NR_SRC; i++) { if (stream->resources[VORTEX_RESOURCE_SRC] & (1 << i)) vortex_src_setupchannel(vortex, i, cvrt, 0, 0, i, dir, 1, cvrt, dir); } } // Timer and ISR functions. static void vortex_settimer(vortex_t * vortex, int period) { //set the timer period to <period> 48000ths of a second. hwwrite(vortex->mmio, VORTEX_IRQ_STAT, period); } #if 0 static void vortex_enable_timer_int(vortex_t * card) { hwwrite(card->mmio, VORTEX_IRQ_CTRL, hwread(card->mmio, VORTEX_IRQ_CTRL) | IRQ_TIMER | 0x60); } static void vortex_disable_timer_int(vortex_t * card) { hwwrite(card->mmio, VORTEX_IRQ_CTRL, hwread(card->mmio, VORTEX_IRQ_CTRL) & ~IRQ_TIMER); } #endif static void vortex_enable_int(vortex_t * card) { // CAsp4ISR__EnableVortexInt_void_ hwwrite(card->mmio, VORTEX_CTRL, hwread(card->mmio, VORTEX_CTRL) | CTRL_IRQ_ENABLE); hwwrite(card->mmio, VORTEX_IRQ_CTRL, (hwread(card->mmio, VORTEX_IRQ_CTRL) & 0xffffefc0) | 0x24); } static void vortex_disable_int(vortex_t * card) { hwwrite(card->mmio, VORTEX_CTRL, hwread(card->mmio, VORTEX_CTRL) & ~CTRL_IRQ_ENABLE); } static irqreturn_t vortex_interrupt(int irq, void *dev_id) { vortex_t *vortex = dev_id; int i, handled; u32 source; //check if the interrupt is ours. if (!(hwread(vortex->mmio, VORTEX_STAT) & 0x1)) return IRQ_NONE; // This is the Interrupt Enable flag we set before (consistency check). if (!(hwread(vortex->mmio, VORTEX_CTRL) & CTRL_IRQ_ENABLE)) return IRQ_NONE; source = hwread(vortex->mmio, VORTEX_IRQ_SOURCE); // Reset IRQ flags. hwwrite(vortex->mmio, VORTEX_IRQ_SOURCE, source); hwread(vortex->mmio, VORTEX_IRQ_SOURCE); // Is at least one IRQ flag set? if (source == 0) { printk(KERN_ERR "vortex: missing irq source\n"); return IRQ_NONE; } handled = 0; // Attend every interrupt source. if (unlikely(source & IRQ_ERR_MASK)) { if (source & IRQ_FATAL) { printk(KERN_ERR "vortex: IRQ fatal error\n"); } if (source & IRQ_PARITY) { printk(KERN_ERR "vortex: IRQ parity error\n"); } if (source & IRQ_REG) { printk(KERN_ERR "vortex: IRQ reg error\n"); } if (source & IRQ_FIFO) { printk(KERN_ERR "vortex: IRQ fifo error\n"); } if (source & IRQ_DMA) { printk(KERN_ERR "vortex: IRQ dma error\n"); } handled = 1; } if (source & IRQ_PCMOUT) { /* ALSA period acknowledge. */ spin_lock(&vortex->lock); for (i = 0; i < NR_ADB; i++) { if (vortex->dma_adb[i].fifo_status == FIFO_START) { if (!vortex_adbdma_bufshift(vortex, i)) continue; spin_unlock(&vortex->lock); snd_pcm_period_elapsed(vortex->dma_adb[i]. substream); spin_lock(&vortex->lock); } } #ifndef CHIP_AU8810 for (i = 0; i < NR_WT; i++) { if (vortex->dma_wt[i].fifo_status == FIFO_START) { if (vortex_wtdma_bufshift(vortex, i)) ; spin_unlock(&vortex->lock); snd_pcm_period_elapsed(vortex->dma_wt[i]. substream); spin_lock(&vortex->lock); } } #endif spin_unlock(&vortex->lock); handled = 1; } //Acknowledge the Timer interrupt if (source & IRQ_TIMER) { hwread(vortex->mmio, VORTEX_IRQ_STAT); handled = 1; } if (source & IRQ_MIDI) { snd_mpu401_uart_interrupt(vortex->irq, vortex->rmidi->private_data); handled = 1; } if (!handled) { printk(KERN_ERR "vortex: unknown irq source %x\n", source); } return IRQ_RETVAL(handled); } /* Codec */ #define POLL_COUNT 1000 static void vortex_codec_init(vortex_t * vortex) { int i; for (i = 0; i < 32; i++) { /* the windows driver writes -i, so we write -i */ hwwrite(vortex->mmio, (VORTEX_CODEC_CHN + (i << 2)), -i); msleep(2); } if (0) { hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0x8068); msleep(1); hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0x00e8); msleep(1); } else { hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0x00a8); msleep(2); hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0x80a8); msleep(2); hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0x80e8); msleep(2); hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0x80a8); msleep(2); hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0x00a8); msleep(2); hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0x00e8); } for (i = 0; i < 32; i++) { hwwrite(vortex->mmio, (VORTEX_CODEC_CHN + (i << 2)), -i); msleep(5); } hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0xe8); msleep(1); /* Enable codec channels 0 and 1. */ hwwrite(vortex->mmio, VORTEX_CODEC_EN, hwread(vortex->mmio, VORTEX_CODEC_EN) | EN_CODEC); } static void vortex_codec_write(struct snd_ac97 * codec, unsigned short addr, unsigned short data) { vortex_t *card = (vortex_t *) codec->private_data; unsigned int lifeboat = 0; /* wait for transactions to clear */ while (!(hwread(card->mmio, VORTEX_CODEC_CTRL) & 0x100)) { udelay(100); if (lifeboat++ > POLL_COUNT) { printk(KERN_ERR "vortex: ac97 codec stuck busy\n"); return; } } /* write register */ hwwrite(card->mmio, VORTEX_CODEC_IO, ((addr << VORTEX_CODEC_ADDSHIFT) & VORTEX_CODEC_ADDMASK) | ((data << VORTEX_CODEC_DATSHIFT) & VORTEX_CODEC_DATMASK) | VORTEX_CODEC_WRITE | (codec->num << VORTEX_CODEC_ID_SHIFT) ); /* Flush Caches. */ hwread(card->mmio, VORTEX_CODEC_IO); } static unsigned short vortex_codec_read(struct snd_ac97 * codec, unsigned short addr) { vortex_t *card = (vortex_t *) codec->private_data; u32 read_addr, data; unsigned lifeboat = 0; /* wait for transactions to clear */ while (!(hwread(card->mmio, VORTEX_CODEC_CTRL) & 0x100)) { udelay(100); if (lifeboat++ > POLL_COUNT) { printk(KERN_ERR "vortex: ac97 codec stuck busy\n"); return 0xffff; } } /* set up read address */ read_addr = ((addr << VORTEX_CODEC_ADDSHIFT) & VORTEX_CODEC_ADDMASK) | (codec->num << VORTEX_CODEC_ID_SHIFT) ; hwwrite(card->mmio, VORTEX_CODEC_IO, read_addr); /* wait for address */ do { udelay(100); data = hwread(card->mmio, VORTEX_CODEC_IO); if (lifeboat++ > POLL_COUNT) { printk(KERN_ERR "vortex: ac97 address never arrived\n"); return 0xffff; } } while ((data & VORTEX_CODEC_ADDMASK) != (addr << VORTEX_CODEC_ADDSHIFT)); /* return data. */ return (u16) (data & VORTEX_CODEC_DATMASK); } /* SPDIF support */ static void vortex_spdif_init(vortex_t * vortex, int spdif_sr, int spdif_mode) { int i, this_38 = 0, this_04 = 0, this_08 = 0, this_0c = 0; /* CAsp4Spdif::InitializeSpdifHardware(void) */ hwwrite(vortex->mmio, VORTEX_SPDIF_FLAGS, hwread(vortex->mmio, VORTEX_SPDIF_FLAGS) & 0xfff3fffd); //for (i=0x291D4; i<0x29200; i+=4) for (i = 0; i < 11; i++) hwwrite(vortex->mmio, VORTEX_SPDIF_CFG1 + (i << 2), 0); //hwwrite(vortex->mmio, 0x29190, hwread(vortex->mmio, 0x29190) | 0xc0000); hwwrite(vortex->mmio, VORTEX_CODEC_EN, hwread(vortex->mmio, VORTEX_CODEC_EN) | EN_SPDIF); /* CAsp4Spdif::ProgramSRCInHardware(enum SPDIF_SR,enum SPDIFMODE) */ if (this_04 && this_08) { int edi; i = (((0x5DC00000 / spdif_sr) + 1) >> 1); if (i > 0x800) { if (i < 0x1ffff) edi = (i >> 1); else edi = 0x1ffff; } else { i = edi = 0x800; } /* this_04 and this_08 are the CASp4Src's (samplerate converters) */ vortex_src_setupchannel(vortex, this_04, edi, 0, 1, this_0c, 1, 0, edi, 1); vortex_src_setupchannel(vortex, this_08, edi, 0, 1, this_0c, 1, 0, edi, 1); } i = spdif_sr; spdif_sr |= 0x8c; switch (i) { case 32000: this_38 &= 0xFFFFFFFE; this_38 &= 0xFFFFFFFD; this_38 &= 0xF3FFFFFF; this_38 |= 0x03000000; /* set 32khz samplerate */ this_38 &= 0xFFFFFF3F; spdif_sr &= 0xFFFFFFFD; spdif_sr |= 1; break; case 44100: this_38 &= 0xFFFFFFFE; this_38 &= 0xFFFFFFFD; this_38 &= 0xF0FFFFFF; this_38 |= 0x03000000; this_38 &= 0xFFFFFF3F; spdif_sr &= 0xFFFFFFFC; break; case 48000: if (spdif_mode == 1) { this_38 &= 0xFFFFFFFE; this_38 &= 0xFFFFFFFD; this_38 &= 0xF2FFFFFF; this_38 |= 0x02000000; /* set 48khz samplerate */ this_38 &= 0xFFFFFF3F; } else { /* J. Gordon Wolfe: I think this stuff is for AC3 */ this_38 |= 0x00000003; this_38 &= 0xFFFFFFBF; this_38 |= 0x80; } spdif_sr |= 2; spdif_sr &= 0xFFFFFFFE; break; } /* looks like the next 2 lines transfer a 16-bit value into 2 8-bit registers. seems to be for the standard IEC/SPDIF initialization stuff */ hwwrite(vortex->mmio, VORTEX_SPDIF_CFG0, this_38 & 0xffff); hwwrite(vortex->mmio, VORTEX_SPDIF_CFG1, this_38 >> 0x10); hwwrite(vortex->mmio, VORTEX_SPDIF_SMPRATE, spdif_sr); } /* Initialization */ static int __devinit vortex_core_init(vortex_t * vortex) { printk(KERN_INFO "Vortex: init.... "); /* Hardware Init. */ hwwrite(vortex->mmio, VORTEX_CTRL, 0xffffffff); msleep(5); hwwrite(vortex->mmio, VORTEX_CTRL, hwread(vortex->mmio, VORTEX_CTRL) & 0xffdfffff); msleep(5); /* Reset IRQ flags */ hwwrite(vortex->mmio, VORTEX_IRQ_SOURCE, 0xffffffff); hwread(vortex->mmio, VORTEX_IRQ_STAT); vortex_codec_init(vortex); #ifdef CHIP_AU8830 hwwrite(vortex->mmio, VORTEX_CTRL, hwread(vortex->mmio, VORTEX_CTRL) | 0x1000000); #endif /* Init audio engine. */ vortex_adbdma_init(vortex); hwwrite(vortex->mmio, VORTEX_ENGINE_CTRL, 0x0); //, 0xc83c7e58, 0xc5f93e58 vortex_adb_init(vortex); /* Init processing blocks. */ vortex_fifo_init(vortex); vortex_mixer_init(vortex); vortex_srcblock_init(vortex); #ifndef CHIP_AU8820 vortex_eq_init(vortex); vortex_spdif_init(vortex, 48000, 1); vortex_Vort3D_enable(vortex); #endif #ifndef CHIP_AU8810 vortex_wt_init(vortex); #endif // Moved to au88x0.c //vortex_connect_default(vortex, 1); vortex_settimer(vortex, 0x90); // Enable Interrupts. // vortex_enable_int() must be first !! // hwwrite(vortex->mmio, VORTEX_IRQ_CTRL, 0); // vortex_enable_int(vortex); //vortex_enable_timer_int(vortex); //vortex_disable_timer_int(vortex); printk(KERN_INFO "done.\n"); spin_lock_init(&vortex->lock); return 0; } static int vortex_core_shutdown(vortex_t * vortex) { printk(KERN_INFO "Vortex: shutdown..."); #ifndef CHIP_AU8820 vortex_eq_free(vortex); vortex_Vort3D_disable(vortex); #endif //vortex_disable_timer_int(vortex); vortex_disable_int(vortex); vortex_connect_default(vortex, 0); /* Reset all DMA fifos. */ vortex_fifo_init(vortex); /* Erase all audio routes. */ vortex_adb_init(vortex); /* Disable MPU401 */ //hwwrite(vortex->mmio, VORTEX_IRQ_CTRL, hwread(vortex->mmio, VORTEX_IRQ_CTRL) & ~IRQ_MIDI); //hwwrite(vortex->mmio, VORTEX_CTRL, hwread(vortex->mmio, VORTEX_CTRL) & ~CTRL_MIDI_EN); hwwrite(vortex->mmio, VORTEX_IRQ_CTRL, 0); hwwrite(vortex->mmio, VORTEX_CTRL, 0); msleep(5); hwwrite(vortex->mmio, VORTEX_IRQ_SOURCE, 0xffff); printk(KERN_INFO "done.\n"); return 0; } /* Alsa support. */ static int vortex_alsafmt_aspfmt(int alsafmt) { int fmt; switch (alsafmt) { case SNDRV_PCM_FORMAT_U8: fmt = 0x1; break; case SNDRV_PCM_FORMAT_MU_LAW: fmt = 0x2; break; case SNDRV_PCM_FORMAT_A_LAW: fmt = 0x3; break; case SNDRV_PCM_FORMAT_SPECIAL: fmt = 0x4; /* guess. */ break; case SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE: fmt = 0x5; /* guess. */ break; case SNDRV_PCM_FORMAT_S16_LE: fmt = 0x8; break; case SNDRV_PCM_FORMAT_S16_BE: fmt = 0x9; /* check this... */ break; default: fmt = 0x8; printk(KERN_ERR "vortex: format unsupported %d\n", alsafmt); break; } return fmt; } /* Some not yet useful translations. */ #if 0 typedef enum { ASPFMTLINEAR16 = 0, /* 0x8 */ ASPFMTLINEAR8, /* 0x1 */ ASPFMTULAW, /* 0x2 */ ASPFMTALAW, /* 0x3 */ ASPFMTSPORT, /* ? */ ASPFMTSPDIF, /* ? */ } ASPENCODING; static int vortex_translateformat(vortex_t * vortex, char bits, char nch, int encod) { int a, this_194; if ((bits != 8) && (bits != 16)) return -1; switch (encod) { case 0: if (bits == 0x10) a = 8; // 16 bit break; case 1: if (bits == 8) a = 1; // 8 bit break; case 2: a = 2; // U_LAW break; case 3: a = 3; // A_LAW break; } switch (nch) { case 1: this_194 = 0; break; case 2: this_194 = 1; break; case 4: this_194 = 1; break; case 6: this_194 = 1; break; } return (a); } static void vortex_cdmacore_setformat(vortex_t * vortex, int bits, int nch) { short int d, this_148; d = ((bits >> 3) * nch); this_148 = 0xbb80 / d; } #endif
gpl-2.0
YU-N/android_kernel_cyanogen_msm8916
drivers/net/wireless/libertas_tf/if_usb.c
3608
23354
/* * Copyright (C) 2008, cozybit Inc. * Copyright (C) 2003-2006, Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. */ #define DRV_NAME "lbtf_usb" #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "libertas_tf.h" #include "if_usb.h" #include <linux/delay.h> #include <linux/module.h> #include <linux/firmware.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/usb.h> #define INSANEDEBUG 0 #define lbtf_deb_usb2(...) do { if (INSANEDEBUG) lbtf_deb_usbd(__VA_ARGS__); } while (0) #define MESSAGE_HEADER_LEN 4 static char *lbtf_fw_name = "lbtf_usb.bin"; module_param_named(fw_name, lbtf_fw_name, charp, 0644); MODULE_FIRMWARE("lbtf_usb.bin"); static struct usb_device_id if_usb_table[] = { /* Enter the device signature inside */ { USB_DEVICE(0x1286, 0x2001) }, { USB_DEVICE(0x05a3, 0x8388) }, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, if_usb_table); static void if_usb_receive(struct urb *urb); static void if_usb_receive_fwload(struct urb *urb); static int if_usb_prog_firmware(struct if_usb_card *cardp); static int if_usb_host_to_card(struct lbtf_private *priv, uint8_t type, uint8_t *payload, uint16_t nb); static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload, uint16_t nb, u8 data); static void if_usb_free(struct if_usb_card *cardp); static int if_usb_submit_rx_urb(struct if_usb_card *cardp); static int if_usb_reset_device(struct if_usb_card *cardp); /** * if_usb_wrike_bulk_callback - call back to handle URB status * * @param urb pointer to urb structure */ static void if_usb_write_bulk_callback(struct urb *urb) { if (urb->status != 0) { /* print the failure status number for debug */ pr_info("URB in failure status: %d\n", urb->status); } else { lbtf_deb_usb2(&urb->dev->dev, "URB status is successful\n"); lbtf_deb_usb2(&urb->dev->dev, "Actual length transmitted %d\n", urb->actual_length); } } /** * if_usb_free - free tx/rx urb, skb and rx buffer * * @param cardp pointer if_usb_card */ static void if_usb_free(struct if_usb_card *cardp) { lbtf_deb_enter(LBTF_DEB_USB); /* Unlink tx & rx urb */ usb_kill_urb(cardp->tx_urb); usb_kill_urb(cardp->rx_urb); usb_kill_urb(cardp->cmd_urb); usb_free_urb(cardp->tx_urb); cardp->tx_urb = NULL; usb_free_urb(cardp->rx_urb); cardp->rx_urb = NULL; usb_free_urb(cardp->cmd_urb); cardp->cmd_urb = NULL; kfree(cardp->ep_out_buf); cardp->ep_out_buf = NULL; lbtf_deb_leave(LBTF_DEB_USB); } static void if_usb_setup_firmware(struct lbtf_private *priv) { struct if_usb_card *cardp = priv->card; struct cmd_ds_set_boot2_ver b2_cmd; lbtf_deb_enter(LBTF_DEB_USB); if_usb_submit_rx_urb(cardp); b2_cmd.hdr.size = cpu_to_le16(sizeof(b2_cmd)); b2_cmd.action = 0; b2_cmd.version = cardp->boot2_version; if (lbtf_cmd_with_response(priv, CMD_SET_BOOT2_VER, &b2_cmd)) lbtf_deb_usb("Setting boot2 version failed\n"); lbtf_deb_leave(LBTF_DEB_USB); } static void if_usb_fw_timeo(unsigned long priv) { struct if_usb_card *cardp = (void *)priv; lbtf_deb_enter(LBTF_DEB_USB); if (!cardp->fwdnldover) { /* Download timed out */ cardp->priv->surpriseremoved = 1; pr_err("Download timed out\n"); } else { lbtf_deb_usb("Download complete, no event. Assuming success\n"); } wake_up(&cardp->fw_wq); lbtf_deb_leave(LBTF_DEB_USB); } /** * if_usb_probe - sets the configuration values * * @ifnum interface number * @id pointer to usb_device_id * * Returns: 0 on success, error code on failure */ static int if_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; struct lbtf_private *priv; struct if_usb_card *cardp; int i; lbtf_deb_enter(LBTF_DEB_USB); udev = interface_to_usbdev(intf); cardp = kzalloc(sizeof(struct if_usb_card), GFP_KERNEL); if (!cardp) goto error; setup_timer(&cardp->fw_timeout, if_usb_fw_timeo, (unsigned long)cardp); init_waitqueue_head(&cardp->fw_wq); cardp->udev = udev; iface_desc = intf->cur_altsetting; lbtf_deb_usbd(&udev->dev, "bcdUSB = 0x%X bDeviceClass = 0x%X" " bDeviceSubClass = 0x%X, bDeviceProtocol = 0x%X\n", le16_to_cpu(udev->descriptor.bcdUSB), udev->descriptor.bDeviceClass, udev->descriptor.bDeviceSubClass, udev->descriptor.bDeviceProtocol); for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; if (usb_endpoint_is_bulk_in(endpoint)) { cardp->ep_in_size = le16_to_cpu(endpoint->wMaxPacketSize); cardp->ep_in = usb_endpoint_num(endpoint); lbtf_deb_usbd(&udev->dev, "in_endpoint = %d\n", cardp->ep_in); lbtf_deb_usbd(&udev->dev, "Bulk in size is %d\n", cardp->ep_in_size); } else if (usb_endpoint_is_bulk_out(endpoint)) { cardp->ep_out_size = le16_to_cpu(endpoint->wMaxPacketSize); cardp->ep_out = usb_endpoint_num(endpoint); lbtf_deb_usbd(&udev->dev, "out_endpoint = %d\n", cardp->ep_out); lbtf_deb_usbd(&udev->dev, "Bulk out size is %d\n", cardp->ep_out_size); } } if (!cardp->ep_out_size || !cardp->ep_in_size) { lbtf_deb_usbd(&udev->dev, "Endpoints not found\n"); /* Endpoints not found */ goto dealloc; } cardp->rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!cardp->rx_urb) { lbtf_deb_usbd(&udev->dev, "Rx URB allocation failed\n"); goto dealloc; } cardp->tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!cardp->tx_urb) { lbtf_deb_usbd(&udev->dev, "Tx URB allocation failed\n"); goto dealloc; } cardp->cmd_urb = usb_alloc_urb(0, GFP_KERNEL); if (!cardp->cmd_urb) { lbtf_deb_usbd(&udev->dev, "Cmd URB allocation failed\n"); goto dealloc; } cardp->ep_out_buf = kmalloc(MRVDRV_ETH_TX_PACKET_BUFFER_SIZE, GFP_KERNEL); if (!cardp->ep_out_buf) { lbtf_deb_usbd(&udev->dev, "Could not allocate buffer\n"); goto dealloc; } priv = lbtf_add_card(cardp, &udev->dev); if (!priv) goto dealloc; cardp->priv = priv; priv->hw_host_to_card = if_usb_host_to_card; priv->hw_prog_firmware = if_usb_prog_firmware; priv->hw_reset_device = if_usb_reset_device; cardp->boot2_version = udev->descriptor.bcdDevice; usb_get_dev(udev); usb_set_intfdata(intf, cardp); return 0; dealloc: if_usb_free(cardp); error: lbtf_deb_leave(LBTF_DEB_MAIN); return -ENOMEM; } /** * if_usb_disconnect - free resource and cleanup * * @intf USB interface structure */ static void if_usb_disconnect(struct usb_interface *intf) { struct if_usb_card *cardp = usb_get_intfdata(intf); struct lbtf_private *priv = cardp->priv; lbtf_deb_enter(LBTF_DEB_MAIN); if_usb_reset_device(cardp); if (priv) lbtf_remove_card(priv); /* Unlink and free urb */ if_usb_free(cardp); usb_set_intfdata(intf, NULL); usb_put_dev(interface_to_usbdev(intf)); lbtf_deb_leave(LBTF_DEB_MAIN); } /** * if_usb_send_fw_pkt - This function downloads the FW * * @priv pointer to struct lbtf_private * * Returns: 0 */ static int if_usb_send_fw_pkt(struct if_usb_card *cardp) { struct fwdata *fwdata = cardp->ep_out_buf; u8 *firmware = (u8 *) cardp->fw->data; lbtf_deb_enter(LBTF_DEB_FW); /* If we got a CRC failure on the last block, back up and retry it */ if (!cardp->CRC_OK) { cardp->totalbytes = cardp->fwlastblksent; cardp->fwseqnum--; } lbtf_deb_usb2(&cardp->udev->dev, "totalbytes = %d\n", cardp->totalbytes); /* struct fwdata (which we sent to the card) has an extra __le32 field in between the header and the data, which is not in the struct fwheader in the actual firmware binary. Insert the seqnum in the middle... */ memcpy(&fwdata->hdr, &firmware[cardp->totalbytes], sizeof(struct fwheader)); cardp->fwlastblksent = cardp->totalbytes; cardp->totalbytes += sizeof(struct fwheader); memcpy(fwdata->data, &firmware[cardp->totalbytes], le32_to_cpu(fwdata->hdr.datalength)); lbtf_deb_usb2(&cardp->udev->dev, "Data length = %d\n", le32_to_cpu(fwdata->hdr.datalength)); fwdata->seqnum = cpu_to_le32(++cardp->fwseqnum); cardp->totalbytes += le32_to_cpu(fwdata->hdr.datalength); usb_tx_block(cardp, cardp->ep_out_buf, sizeof(struct fwdata) + le32_to_cpu(fwdata->hdr.datalength), 0); if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_DATA_TO_RECV)) { lbtf_deb_usb2(&cardp->udev->dev, "There are data to follow\n"); lbtf_deb_usb2(&cardp->udev->dev, "seqnum = %d totalbytes = %d\n", cardp->fwseqnum, cardp->totalbytes); } else if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_LAST_BLOCK)) { lbtf_deb_usb2(&cardp->udev->dev, "Host has finished FW downloading\n"); lbtf_deb_usb2(&cardp->udev->dev, "Donwloading FW JUMP BLOCK\n"); /* Host has finished FW downloading * Donwloading FW JUMP BLOCK */ cardp->fwfinalblk = 1; } lbtf_deb_usb2(&cardp->udev->dev, "Firmware download done; size %d\n", cardp->totalbytes); lbtf_deb_leave(LBTF_DEB_FW); return 0; } static int if_usb_reset_device(struct if_usb_card *cardp) { struct cmd_ds_802_11_reset *cmd = cardp->ep_out_buf + 4; int ret; lbtf_deb_enter(LBTF_DEB_USB); *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST); cmd->hdr.command = cpu_to_le16(CMD_802_11_RESET); cmd->hdr.size = cpu_to_le16(sizeof(struct cmd_ds_802_11_reset)); cmd->hdr.result = cpu_to_le16(0); cmd->hdr.seqnum = cpu_to_le16(0x5a5a); cmd->action = cpu_to_le16(CMD_ACT_HALT); usb_tx_block(cardp, cardp->ep_out_buf, 4 + sizeof(struct cmd_ds_802_11_reset), 0); msleep(100); ret = usb_reset_device(cardp->udev); msleep(100); lbtf_deb_leave_args(LBTF_DEB_USB, "ret %d", ret); return ret; } EXPORT_SYMBOL_GPL(if_usb_reset_device); /** * usb_tx_block - transfer data to the device * * @priv pointer to struct lbtf_private * @payload pointer to payload data * @nb data length * @data non-zero for data, zero for commands * * Returns: 0 on success, nonzero otherwise. */ static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload, uint16_t nb, u8 data) { int ret = -1; struct urb *urb; lbtf_deb_enter(LBTF_DEB_USB); /* check if device is removed */ if (cardp->priv->surpriseremoved) { lbtf_deb_usbd(&cardp->udev->dev, "Device removed\n"); goto tx_ret; } if (data) urb = cardp->tx_urb; else urb = cardp->cmd_urb; usb_fill_bulk_urb(urb, cardp->udev, usb_sndbulkpipe(cardp->udev, cardp->ep_out), payload, nb, if_usb_write_bulk_callback, cardp); urb->transfer_flags |= URB_ZERO_PACKET; if (usb_submit_urb(urb, GFP_ATOMIC)) { lbtf_deb_usbd(&cardp->udev->dev, "usb_submit_urb failed: %d\n", ret); goto tx_ret; } lbtf_deb_usb2(&cardp->udev->dev, "usb_submit_urb success\n"); ret = 0; tx_ret: lbtf_deb_leave(LBTF_DEB_USB); return ret; } static int __if_usb_submit_rx_urb(struct if_usb_card *cardp, void (*callbackfn)(struct urb *urb)) { struct sk_buff *skb; int ret = -1; lbtf_deb_enter(LBTF_DEB_USB); skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE); if (!skb) { pr_err("No free skb\n"); lbtf_deb_leave(LBTF_DEB_USB); return -1; } cardp->rx_skb = skb; /* Fill the receive configuration URB and initialise the Rx call back */ usb_fill_bulk_urb(cardp->rx_urb, cardp->udev, usb_rcvbulkpipe(cardp->udev, cardp->ep_in), skb_tail_pointer(skb), MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp); cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET; lbtf_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n", cardp->rx_urb); ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC); if (ret) { lbtf_deb_usbd(&cardp->udev->dev, "Submit Rx URB failed: %d\n", ret); kfree_skb(skb); cardp->rx_skb = NULL; lbtf_deb_leave(LBTF_DEB_USB); return -1; } else { lbtf_deb_usb2(&cardp->udev->dev, "Submit Rx URB success\n"); lbtf_deb_leave(LBTF_DEB_USB); return 0; } } static int if_usb_submit_rx_urb_fwload(struct if_usb_card *cardp) { return __if_usb_submit_rx_urb(cardp, &if_usb_receive_fwload); } static int if_usb_submit_rx_urb(struct if_usb_card *cardp) { return __if_usb_submit_rx_urb(cardp, &if_usb_receive); } static void if_usb_receive_fwload(struct urb *urb) { struct if_usb_card *cardp = urb->context; struct sk_buff *skb = cardp->rx_skb; struct fwsyncheader *syncfwheader; struct bootcmdresp bcmdresp; lbtf_deb_enter(LBTF_DEB_USB); if (urb->status) { lbtf_deb_usbd(&cardp->udev->dev, "URB status is failed during fw load\n"); kfree_skb(skb); lbtf_deb_leave(LBTF_DEB_USB); return; } if (cardp->fwdnldover) { __le32 *tmp = (__le32 *)(skb->data); if (tmp[0] == cpu_to_le32(CMD_TYPE_INDICATION) && tmp[1] == cpu_to_le32(MACREG_INT_CODE_FIRMWARE_READY)) { /* Firmware ready event received */ pr_info("Firmware ready event received\n"); wake_up(&cardp->fw_wq); } else { lbtf_deb_usb("Waiting for confirmation; got %x %x\n", le32_to_cpu(tmp[0]), le32_to_cpu(tmp[1])); if_usb_submit_rx_urb_fwload(cardp); } kfree_skb(skb); lbtf_deb_leave(LBTF_DEB_USB); return; } if (cardp->bootcmdresp <= 0) { memcpy(&bcmdresp, skb->data, sizeof(bcmdresp)); if (le16_to_cpu(cardp->udev->descriptor.bcdDevice) < 0x3106) { kfree_skb(skb); if_usb_submit_rx_urb_fwload(cardp); cardp->bootcmdresp = 1; /* Received valid boot command response */ lbtf_deb_usbd(&cardp->udev->dev, "Received valid boot command response\n"); lbtf_deb_leave(LBTF_DEB_USB); return; } if (bcmdresp.magic != cpu_to_le32(BOOT_CMD_MAGIC_NUMBER)) { if (bcmdresp.magic == cpu_to_le32(CMD_TYPE_REQUEST) || bcmdresp.magic == cpu_to_le32(CMD_TYPE_DATA) || bcmdresp.magic == cpu_to_le32(CMD_TYPE_INDICATION)) { if (!cardp->bootcmdresp) pr_info("Firmware already seems alive; resetting\n"); cardp->bootcmdresp = -1; } else { pr_info("boot cmd response wrong magic number (0x%x)\n", le32_to_cpu(bcmdresp.magic)); } } else if (bcmdresp.cmd != BOOT_CMD_FW_BY_USB) { pr_info("boot cmd response cmd_tag error (%d)\n", bcmdresp.cmd); } else if (bcmdresp.result != BOOT_CMD_RESP_OK) { pr_info("boot cmd response result error (%d)\n", bcmdresp.result); } else { cardp->bootcmdresp = 1; lbtf_deb_usbd(&cardp->udev->dev, "Received valid boot command response\n"); } kfree_skb(skb); if_usb_submit_rx_urb_fwload(cardp); lbtf_deb_leave(LBTF_DEB_USB); return; } syncfwheader = kmemdup(skb->data, sizeof(struct fwsyncheader), GFP_ATOMIC); if (!syncfwheader) { lbtf_deb_usbd(&cardp->udev->dev, "Failure to allocate syncfwheader\n"); kfree_skb(skb); lbtf_deb_leave(LBTF_DEB_USB); return; } if (!syncfwheader->cmd) { lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk with correct CRC\n"); lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk seqnum = %d\n", le32_to_cpu(syncfwheader->seqnum)); cardp->CRC_OK = 1; } else { lbtf_deb_usbd(&cardp->udev->dev, "FW received Blk with CRC error\n"); cardp->CRC_OK = 0; } kfree_skb(skb); /* reschedule timer for 200ms hence */ mod_timer(&cardp->fw_timeout, jiffies + (HZ/5)); if (cardp->fwfinalblk) { cardp->fwdnldover = 1; goto exit; } if_usb_send_fw_pkt(cardp); exit: if_usb_submit_rx_urb_fwload(cardp); kfree(syncfwheader); lbtf_deb_leave(LBTF_DEB_USB); } #define MRVDRV_MIN_PKT_LEN 30 static inline void process_cmdtypedata(int recvlength, struct sk_buff *skb, struct if_usb_card *cardp, struct lbtf_private *priv) { if (recvlength > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE + MESSAGE_HEADER_LEN || recvlength < MRVDRV_MIN_PKT_LEN) { lbtf_deb_usbd(&cardp->udev->dev, "Packet length is Invalid\n"); kfree_skb(skb); return; } skb_put(skb, recvlength); skb_pull(skb, MESSAGE_HEADER_LEN); lbtf_rx(priv, skb); } static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff, struct sk_buff *skb, struct if_usb_card *cardp, struct lbtf_private *priv) { if (recvlength > LBS_CMD_BUFFER_SIZE) { lbtf_deb_usbd(&cardp->udev->dev, "The receive buffer is too large\n"); kfree_skb(skb); return; } BUG_ON(!in_interrupt()); spin_lock(&priv->driver_lock); memcpy(priv->cmd_resp_buff, recvbuff + MESSAGE_HEADER_LEN, recvlength - MESSAGE_HEADER_LEN); kfree_skb(skb); lbtf_cmd_response_rx(priv); spin_unlock(&priv->driver_lock); } /** * if_usb_receive - read data received from the device. * * @urb pointer to struct urb */ static void if_usb_receive(struct urb *urb) { struct if_usb_card *cardp = urb->context; struct sk_buff *skb = cardp->rx_skb; struct lbtf_private *priv = cardp->priv; int recvlength = urb->actual_length; uint8_t *recvbuff = NULL; uint32_t recvtype = 0; __le32 *pkt = (__le32 *) skb->data; lbtf_deb_enter(LBTF_DEB_USB); if (recvlength) { if (urb->status) { lbtf_deb_usbd(&cardp->udev->dev, "RX URB failed: %d\n", urb->status); kfree_skb(skb); goto setup_for_next; } recvbuff = skb->data; recvtype = le32_to_cpu(pkt[0]); lbtf_deb_usbd(&cardp->udev->dev, "Recv length = 0x%x, Recv type = 0x%X\n", recvlength, recvtype); } else if (urb->status) { kfree_skb(skb); lbtf_deb_leave(LBTF_DEB_USB); return; } switch (recvtype) { case CMD_TYPE_DATA: process_cmdtypedata(recvlength, skb, cardp, priv); break; case CMD_TYPE_REQUEST: process_cmdrequest(recvlength, recvbuff, skb, cardp, priv); break; case CMD_TYPE_INDICATION: { /* Event cause handling */ u32 event_cause = le32_to_cpu(pkt[1]); lbtf_deb_usbd(&cardp->udev->dev, "**EVENT** 0x%X\n", event_cause); /* Icky undocumented magic special case */ if (event_cause & 0xffff0000) { u16 tmp; u8 retrycnt; u8 failure; tmp = event_cause >> 16; retrycnt = tmp & 0x00ff; failure = (tmp & 0xff00) >> 8; lbtf_send_tx_feedback(priv, retrycnt, failure); } else if (event_cause == LBTF_EVENT_BCN_SENT) lbtf_bcn_sent(priv); else lbtf_deb_usbd(&cardp->udev->dev, "Unsupported notification %d received\n", event_cause); kfree_skb(skb); break; } default: lbtf_deb_usbd(&cardp->udev->dev, "libertastf: unknown command type 0x%X\n", recvtype); kfree_skb(skb); break; } setup_for_next: if_usb_submit_rx_urb(cardp); lbtf_deb_leave(LBTF_DEB_USB); } /** * if_usb_host_to_card - Download data to the device * * @priv pointer to struct lbtf_private structure * @type type of data * @buf pointer to data buffer * @len number of bytes * * Returns: 0 on success, nonzero otherwise */ static int if_usb_host_to_card(struct lbtf_private *priv, uint8_t type, uint8_t *payload, uint16_t nb) { struct if_usb_card *cardp = priv->card; u8 data = 0; lbtf_deb_usbd(&cardp->udev->dev, "*** type = %u\n", type); lbtf_deb_usbd(&cardp->udev->dev, "size after = %d\n", nb); if (type == MVMS_CMD) { *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST); } else { *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_DATA); data = 1; } memcpy((cardp->ep_out_buf + MESSAGE_HEADER_LEN), payload, nb); return usb_tx_block(cardp, cardp->ep_out_buf, nb + MESSAGE_HEADER_LEN, data); } /** * if_usb_issue_boot_command - Issue boot command to Boot2. * * @ivalue 1 boots from FW by USB-Download, 2 boots from FW in EEPROM. * * Returns: 0 */ static int if_usb_issue_boot_command(struct if_usb_card *cardp, int ivalue) { struct bootcmd *bootcmd = cardp->ep_out_buf; /* Prepare command */ bootcmd->magic = cpu_to_le32(BOOT_CMD_MAGIC_NUMBER); bootcmd->cmd = ivalue; memset(bootcmd->pad, 0, sizeof(bootcmd->pad)); /* Issue command */ usb_tx_block(cardp, cardp->ep_out_buf, sizeof(*bootcmd), 0); return 0; } /** * check_fwfile_format - Check the validity of Boot2/FW image. * * @data pointer to image * @totlen image length * * Returns: 0 if the image is valid, nonzero otherwise. */ static int check_fwfile_format(const u8 *data, u32 totlen) { u32 bincmd, exit; u32 blksize, offset, len; int ret; ret = 1; exit = len = 0; do { struct fwheader *fwh = (void *) data; bincmd = le32_to_cpu(fwh->dnldcmd); blksize = le32_to_cpu(fwh->datalength); switch (bincmd) { case FW_HAS_DATA_TO_RECV: offset = sizeof(struct fwheader) + blksize; data += offset; len += offset; if (len >= totlen) exit = 1; break; case FW_HAS_LAST_BLOCK: exit = 1; ret = 0; break; default: exit = 1; break; } } while (!exit); if (ret) pr_err("firmware file format check FAIL\n"); else lbtf_deb_fw("firmware file format check PASS\n"); return ret; } static int if_usb_prog_firmware(struct if_usb_card *cardp) { int i = 0; static int reset_count = 10; int ret = 0; lbtf_deb_enter(LBTF_DEB_USB); kparam_block_sysfs_write(fw_name); ret = request_firmware(&cardp->fw, lbtf_fw_name, &cardp->udev->dev); if (ret < 0) { pr_err("request_firmware() failed with %#x\n", ret); pr_err("firmware %s not found\n", lbtf_fw_name); kparam_unblock_sysfs_write(fw_name); goto done; } kparam_unblock_sysfs_write(fw_name); if (check_fwfile_format(cardp->fw->data, cardp->fw->size)) goto release_fw; restart: if (if_usb_submit_rx_urb_fwload(cardp) < 0) { lbtf_deb_usbd(&cardp->udev->dev, "URB submission is failed\n"); ret = -1; goto release_fw; } cardp->bootcmdresp = 0; do { int j = 0; i++; /* Issue Boot command = 1, Boot from Download-FW */ if_usb_issue_boot_command(cardp, BOOT_CMD_FW_BY_USB); /* wait for command response */ do { j++; msleep_interruptible(100); } while (cardp->bootcmdresp == 0 && j < 10); } while (cardp->bootcmdresp == 0 && i < 5); if (cardp->bootcmdresp <= 0) { if (--reset_count >= 0) { if_usb_reset_device(cardp); goto restart; } return -1; } i = 0; cardp->totalbytes = 0; cardp->fwlastblksent = 0; cardp->CRC_OK = 1; cardp->fwdnldover = 0; cardp->fwseqnum = -1; cardp->totalbytes = 0; cardp->fwfinalblk = 0; /* Send the first firmware packet... */ if_usb_send_fw_pkt(cardp); /* ... and wait for the process to complete */ wait_event_interruptible(cardp->fw_wq, cardp->priv->surpriseremoved || cardp->fwdnldover); del_timer_sync(&cardp->fw_timeout); usb_kill_urb(cardp->rx_urb); if (!cardp->fwdnldover) { pr_info("failed to load fw, resetting device!\n"); if (--reset_count >= 0) { if_usb_reset_device(cardp); goto restart; } pr_info("FW download failure, time = %d ms\n", i * 100); ret = -1; goto release_fw; } cardp->priv->fw_ready = 1; release_fw: release_firmware(cardp->fw); cardp->fw = NULL; if_usb_setup_firmware(cardp->priv); done: lbtf_deb_leave_args(LBTF_DEB_USB, "ret %d", ret); return ret; } EXPORT_SYMBOL_GPL(if_usb_prog_firmware); #define if_usb_suspend NULL #define if_usb_resume NULL static struct usb_driver if_usb_driver = { .name = DRV_NAME, .probe = if_usb_probe, .disconnect = if_usb_disconnect, .id_table = if_usb_table, .suspend = if_usb_suspend, .resume = if_usb_resume, .disable_hub_initiated_lpm = 1, }; module_usb_driver(if_usb_driver); MODULE_DESCRIPTION("8388 USB WLAN Thinfirm Driver"); MODULE_AUTHOR("Cozybit Inc."); MODULE_LICENSE("GPL");
gpl-2.0
shambakey1/kernel_sh
arch/arm/mach-msm/clock-debug.c
4120
3039
/* * Copyright (C) 2007 Google, Inc. * Copyright (c) 2007-2010, Code Aurora Forum. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/ctype.h> #include <linux/debugfs.h> #include <linux/clk.h> #include "clock.h" static int clock_debug_rate_set(void *data, u64 val) { struct clk *clock = data; int ret; /* Only increases to max rate will succeed, but that's actually good * for debugging purposes so we don't check for error. */ if (clock->flags & CLK_MAX) clk_set_max_rate(clock, val); if (clock->flags & CLK_MIN) ret = clk_set_min_rate(clock, val); else ret = clk_set_rate(clock, val); if (ret != 0) printk(KERN_ERR "clk_set%s_rate failed (%d)\n", (clock->flags & CLK_MIN) ? "_min" : "", ret); return ret; } static int clock_debug_rate_get(void *data, u64 *val) { struct clk *clock = data; *val = clk_get_rate(clock); return 0; } DEFINE_SIMPLE_ATTRIBUTE(clock_rate_fops, clock_debug_rate_get, clock_debug_rate_set, "%llu\n"); static int clock_debug_enable_set(void *data, u64 val) { struct clk *clock = data; int rc = 0; if (val) rc = clock->ops->enable(clock->id); else clock->ops->disable(clock->id); return rc; } static int clock_debug_enable_get(void *data, u64 *val) { struct clk *clock = data; *val = clock->ops->is_enabled(clock->id); return 0; } DEFINE_SIMPLE_ATTRIBUTE(clock_enable_fops, clock_debug_enable_get, clock_debug_enable_set, "%llu\n"); static int clock_debug_local_get(void *data, u64 *val) { struct clk *clock = data; *val = clock->ops->is_local(clock->id); return 0; } DEFINE_SIMPLE_ATTRIBUTE(clock_local_fops, clock_debug_local_get, NULL, "%llu\n"); static struct dentry *debugfs_base; int __init clock_debug_init(void) { debugfs_base = debugfs_create_dir("clk", NULL); if (!debugfs_base) return -ENOMEM; return 0; } int __init clock_debug_add(struct clk *clock) { char temp[50], *ptr; struct dentry *clk_dir; if (!debugfs_base) return -ENOMEM; strncpy(temp, clock->dbg_name, ARRAY_SIZE(temp)-1); for (ptr = temp; *ptr; ptr++) *ptr = tolower(*ptr); clk_dir = debugfs_create_dir(temp, debugfs_base); if (!clk_dir) return -ENOMEM; if (!debugfs_create_file("rate", S_IRUGO | S_IWUSR, clk_dir, clock, &clock_rate_fops)) goto error; if (!debugfs_create_file("enable", S_IRUGO | S_IWUSR, clk_dir, clock, &clock_enable_fops)) goto error; if (!debugfs_create_file("is_local", S_IRUGO, clk_dir, clock, &clock_local_fops)) goto error; return 0; error: debugfs_remove_recursive(clk_dir); return -ENOMEM; }
gpl-2.0
OptimusG-Dev-Team/lg-kernel
drivers/staging/et131x/et131x.c
4120
160170
/* * Agere Systems Inc. * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs * * Copyright © 2005 Agere Systems Inc. * All rights reserved. * http://www.agere.com * * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com> * *------------------------------------------------------------------------------ * * SOFTWARE LICENSE * * This software is provided subject to the following terms and conditions, * which you should read carefully before using the software. Using this * software indicates your acceptance of these terms and conditions. If you do * not agree with these terms and conditions, do not use the software. * * Copyright © 2005 Agere Systems Inc. * All rights reserved. * * Redistribution and use in source or binary forms, with or without * modifications, are permitted provided that the following conditions are met: * * . Redistributions of source code must retain the above copyright notice, this * list of conditions and the following Disclaimer as comments in the code as * well as in the documentation and/or other materials provided with the * distribution. * * . Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following Disclaimer in the documentation * and/or other materials provided with the distribution. * * . Neither the name of Agere Systems Inc. nor the names of the contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * Disclaimer * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ #include <linux/pci.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/delay.h> #include <linux/bitops.h> #include <linux/io.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/ioport.h> #include <linux/crc32.h> #include <linux/random.h> #include <linux/phy.h> #include "et131x.h" MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>"); MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver " "for the ET1310 by Agere Systems"); /* EEPROM defines */ #define MAX_NUM_REGISTER_POLLS 1000 #define MAX_NUM_WRITE_RETRIES 2 /* MAC defines */ #define COUNTER_WRAP_16_BIT 0x10000 #define COUNTER_WRAP_12_BIT 0x1000 /* PCI defines */ #define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */ #define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */ /* ISR defines */ /* * For interrupts, normal running is: * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt, * watchdog_interrupt & txdma_xfer_done * * In both cases, when flow control is enabled for either Tx or bi-direction, * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the * buffer rings are running low. */ #define INT_MASK_DISABLE 0xffffffff /* NOTE: Masking out MAC_STAT Interrupt for now... * #define INT_MASK_ENABLE 0xfff6bf17 * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7 */ #define INT_MASK_ENABLE 0xfffebf17 #define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7 /* General defines */ /* Packet and header sizes */ #define NIC_MIN_PACKET_SIZE 60 /* Multicast list size */ #define NIC_MAX_MCAST_LIST 128 /* Supported Filters */ #define ET131X_PACKET_TYPE_DIRECTED 0x0001 #define ET131X_PACKET_TYPE_MULTICAST 0x0002 #define ET131X_PACKET_TYPE_BROADCAST 0x0004 #define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008 #define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010 /* Tx Timeout */ #define ET131X_TX_TIMEOUT (1 * HZ) #define NIC_SEND_HANG_THRESHOLD 0 /* MP_TCB flags */ #define fMP_DEST_MULTI 0x00000001 #define fMP_DEST_BROAD 0x00000002 /* MP_ADAPTER flags */ #define fMP_ADAPTER_RECV_LOOKASIDE 0x00000004 #define fMP_ADAPTER_INTERRUPT_IN_USE 0x00000008 /* MP_SHARED flags */ #define fMP_ADAPTER_LOWER_POWER 0x00200000 #define fMP_ADAPTER_NON_RECOVER_ERROR 0x00800000 #define fMP_ADAPTER_HARDWARE_ERROR 0x04000000 #define fMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000 /* Some offsets in PCI config space that are actually used. */ #define ET1310_PCI_MAC_ADDRESS 0xA4 #define ET1310_PCI_EEPROM_STATUS 0xB2 #define ET1310_PCI_ACK_NACK 0xC0 #define ET1310_PCI_REPLAY 0xC2 #define ET1310_PCI_L0L1LATENCY 0xCF /* PCI Product IDs */ #define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */ #define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */ /* Define order of magnitude converter */ #define NANO_IN_A_MICRO 1000 #define PARM_RX_NUM_BUFS_DEF 4 #define PARM_RX_TIME_INT_DEF 10 #define PARM_RX_MEM_END_DEF 0x2bc #define PARM_TX_TIME_INT_DEF 40 #define PARM_TX_NUM_BUFS_DEF 4 #define PARM_DMA_CACHE_DEF 0 /* RX defines */ #define USE_FBR0 1 #define FBR_CHUNKS 32 #define MAX_DESC_PER_RING_RX 1024 /* number of RFDs - default and min */ #ifdef USE_FBR0 #define RFD_LOW_WATER_MARK 40 #define NIC_DEFAULT_NUM_RFD 1024 #define NUM_FBRS 2 #else #define RFD_LOW_WATER_MARK 20 #define NIC_DEFAULT_NUM_RFD 256 #define NUM_FBRS 1 #endif #define NIC_MIN_NUM_RFD 64 #define NUM_PACKETS_HANDLED 256 #define ALCATEL_MULTICAST_PKT 0x01000000 #define ALCATEL_BROADCAST_PKT 0x02000000 /* typedefs for Free Buffer Descriptors */ struct fbr_desc { u32 addr_lo; u32 addr_hi; u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */ }; /* Packet Status Ring Descriptors * * Word 0: * * top 16 bits are from the Alcatel Status Word as enumerated in * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2) * * 0: hp hash pass * 1: ipa IP checksum assist * 2: ipp IP checksum pass * 3: tcpa TCP checksum assist * 4: tcpp TCP checksum pass * 5: wol WOL Event * 6: rxmac_error RXMAC Error Indicator * 7: drop Drop packet * 8: ft Frame Truncated * 9: jp Jumbo Packet * 10: vp VLAN Packet * 11-15: unused * 16: asw_prev_pkt_dropped e.g. IFG too small on previous * 17: asw_RX_DV_event short receive event detected * 18: asw_false_carrier_event bad carrier since last good packet * 19: asw_code_err one or more nibbles signalled as errors * 20: asw_CRC_err CRC error * 21: asw_len_chk_err frame length field incorrect * 22: asw_too_long frame length > 1518 bytes * 23: asw_OK valid CRC + no code error * 24: asw_multicast has a multicast address * 25: asw_broadcast has a broadcast address * 26: asw_dribble_nibble spurious bits after EOP * 27: asw_control_frame is a control frame * 28: asw_pause_frame is a pause frame * 29: asw_unsupported_op unsupported OP code * 30: asw_VLAN_tag VLAN tag detected * 31: asw_long_evt Rx long event * * Word 1: * 0-15: length length in bytes * 16-25: bi Buffer Index * 26-27: ri Ring Index * 28-31: reserved */ struct pkt_stat_desc { u32 word0; u32 word1; }; /* Typedefs for the RX DMA status word */ /* * rx status word 0 holds part of the status bits of the Rx DMA engine * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word * which contains the Free Buffer ring 0 and 1 available offset. * * bit 0-9 FBR1 offset * bit 10 Wrap flag for FBR1 * bit 16-25 FBR0 offset * bit 26 Wrap flag for FBR0 */ /* * RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word * which contains the Packet Status Ring available offset. * * bit 0-15 reserved * bit 16-27 PSRoffset * bit 28 PSRwrap * bit 29-31 unused */ /* * struct rx_status_block is a structure representing the status of the Rx * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020 */ struct rx_status_block { u32 word0; u32 word1; }; /* * Structure for look-up table holding free buffer ring pointers, addresses * and state. */ struct fbr_lookup { void *virt[MAX_DESC_PER_RING_RX]; void *buffer1[MAX_DESC_PER_RING_RX]; void *buffer2[MAX_DESC_PER_RING_RX]; u32 bus_high[MAX_DESC_PER_RING_RX]; u32 bus_low[MAX_DESC_PER_RING_RX]; void *ring_virtaddr; dma_addr_t ring_physaddr; void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; u64 real_physaddr; u64 offset; u32 local_full; u32 num_entries; u32 buffsize; }; /* * struct rx_ring is the sructure representing the adaptor's local * reference(s) to the rings * ****************************************************************************** * IMPORTANT NOTE :- fbr_lookup *fbr[NUM_FBRS] uses index 0 to refer to FBR1 * and index 1 to refer to FRB0 ****************************************************************************** */ struct rx_ring { struct fbr_lookup *fbr[NUM_FBRS]; void *ps_ring_virtaddr; dma_addr_t ps_ring_physaddr; u32 local_psr_full; u32 psr_num_entries; struct rx_status_block *rx_status_block; dma_addr_t rx_status_bus; /* RECV */ struct list_head recv_list; u32 num_ready_recv; u32 num_rfd; bool unfinished_receives; /* lookaside lists */ struct kmem_cache *recv_lookaside; }; /* TX defines */ /* * word 2 of the control bits in the Tx Descriptor ring for the ET-1310 * * 0-15: length of packet * 16-27: VLAN tag * 28: VLAN CFI * 29-31: VLAN priority * * word 3 of the control bits in the Tx Descriptor ring for the ET-1310 * * 0: last packet in the sequence * 1: first packet in the sequence * 2: interrupt the processor when this pkt sent * 3: Control word - no packet data * 4: Issue half-duplex backpressure : XON/XOFF * 5: send pause frame * 6: Tx frame has error * 7: append CRC * 8: MAC override * 9: pad packet * 10: Packet is a Huge packet * 11: append VLAN tag * 12: IP checksum assist * 13: TCP checksum assist * 14: UDP checksum assist */ /* struct tx_desc represents each descriptor on the ring */ struct tx_desc { u32 addr_hi; u32 addr_lo; u32 len_vlan; /* control words how to xmit the */ u32 flags; /* data (detailed above) */ }; /* * The status of the Tx DMA engine it sits in free memory, and is pointed to * by 0x101c / 0x1020. This is a DMA10 type */ /* TCB (Transmit Control Block: Host Side) */ struct tcb { struct tcb *next; /* Next entry in ring */ u32 flags; /* Our flags for the packet */ u32 count; /* Used to spot stuck/lost packets */ u32 stale; /* Used to spot stuck/lost packets */ struct sk_buff *skb; /* Network skb we are tied to */ u32 index; /* Ring indexes */ u32 index_start; }; /* Structure representing our local reference(s) to the ring */ struct tx_ring { /* TCB (Transmit Control Block) memory and lists */ struct tcb *tcb_ring; /* List of TCBs that are ready to be used */ struct tcb *tcb_qhead; struct tcb *tcb_qtail; /* list of TCBs that are currently being sent. NOTE that access to all * three of these (including used) are controlled via the * TCBSendQLock. This lock should be secured prior to incementing / * decrementing used, or any queue manipulation on send_head / * tail */ struct tcb *send_head; struct tcb *send_tail; int used; /* The actual descriptor ring */ struct tx_desc *tx_desc_ring; dma_addr_t tx_desc_ring_pa; /* send_idx indicates where we last wrote to in the descriptor ring. */ u32 send_idx; /* The location of the write-back status block */ u32 *tx_status; dma_addr_t tx_status_pa; /* Packets since the last IRQ: used for interrupt coalescing */ int since_irq; }; /* * Do not change these values: if changed, then change also in respective * TXdma and Rxdma engines */ #define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */ #define NUM_TCB 64 /* * These values are all superseded by registry entries to facilitate tuning. * Once the desired performance has been achieved, the optimal registry values * should be re-populated to these #defines: */ #define TX_ERROR_PERIOD 1000 #define LO_MARK_PERCENT_FOR_PSR 15 #define LO_MARK_PERCENT_FOR_RX 15 /* RFD (Receive Frame Descriptor) */ struct rfd { struct list_head list_node; struct sk_buff *skb; u32 len; /* total size of receive frame */ u16 bufferindex; u8 ringindex; }; /* Flow Control */ #define FLOW_BOTH 0 #define FLOW_TXONLY 1 #define FLOW_RXONLY 2 #define FLOW_NONE 3 /* Struct to define some device statistics */ struct ce_stats { /* MIB II variables * * NOTE: atomic_t types are only guaranteed to store 24-bits; if we * MUST have 32, then we'll need another way to perform atomic * operations */ u32 unicast_pkts_rcvd; atomic_t unicast_pkts_xmtd; u32 multicast_pkts_rcvd; atomic_t multicast_pkts_xmtd; u32 broadcast_pkts_rcvd; atomic_t broadcast_pkts_xmtd; u32 rcvd_pkts_dropped; /* Tx Statistics. */ u32 tx_underflows; u32 tx_collisions; u32 tx_excessive_collisions; u32 tx_first_collisions; u32 tx_late_collisions; u32 tx_max_pkt_errs; u32 tx_deferred; /* Rx Statistics. */ u32 rx_overflows; u32 rx_length_errs; u32 rx_align_errs; u32 rx_crc_errs; u32 rx_code_violations; u32 rx_other_errs; u32 synchronous_iterations; u32 interrupt_status; }; /* The private adapter structure */ struct et131x_adapter { struct net_device *netdev; struct pci_dev *pdev; struct mii_bus *mii_bus; struct phy_device *phydev; struct work_struct task; /* Flags that indicate current state of the adapter */ u32 flags; /* local link state, to determine if a state change has occurred */ int link; /* Configuration */ u8 rom_addr[ETH_ALEN]; u8 addr[ETH_ALEN]; bool has_eeprom; u8 eeprom_data[2]; /* Spinlocks */ spinlock_t lock; spinlock_t tcb_send_qlock; spinlock_t tcb_ready_qlock; spinlock_t send_hw_lock; spinlock_t rcv_lock; spinlock_t rcv_pend_lock; spinlock_t fbr_lock; spinlock_t phy_lock; /* Packet Filter and look ahead size */ u32 packet_filter; /* multicast list */ u32 multicast_addr_count; u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN]; /* Pointer to the device's PCI register space */ struct address_map __iomem *regs; /* Registry parameters */ u8 wanted_flow; /* Flow we want for 802.3x flow control */ u32 registry_jumbo_packet; /* Max supported ethernet packet size */ /* Derived from the registry: */ u8 flowcontrol; /* flow control validated by the far-end */ /* Minimize init-time */ struct timer_list error_timer; /* variable putting the phy into coma mode when boot up with no cable * plugged in after 5 seconds */ u8 boot_coma; /* Next two used to save power information at power down. This * information will be used during power up to set up parts of Power * Management in JAGCore */ u16 pdown_speed; u8 pdown_duplex; /* Tx Memory Variables */ struct tx_ring tx_ring; /* Rx Memory Variables */ struct rx_ring rx_ring; /* Stats */ struct ce_stats stats; struct net_device_stats net_stats; }; static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status) { u32 reg; int i; /* * 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and * bits 7,1:0 both equal to 1, at least once after reset. * Subsequent operations need only to check that bits 1:0 are equal * to 1 prior to starting a single byte read/write */ for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) { /* Read registers grouped in DWORD1 */ if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, &reg)) return -EIO; /* I2C idle and Phy Queue Avail both true */ if ((reg & 0x3000) == 0x3000) { if (status) *status = reg; return reg & 0xFF; } } return -ETIMEDOUT; } /** * eeprom_write - Write a byte to the ET1310's EEPROM * @adapter: pointer to our private adapter structure * @addr: the address to write * @data: the value to write * * Returns 1 for a successful write. */ static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data) { struct pci_dev *pdev = adapter->pdev; int index = 0; int retries; int err = 0; int i2c_wack = 0; int writeok = 0; u32 status; u32 val = 0; /* * For an EEPROM, an I2C single byte write is defined as a START * condition followed by the device address, EEPROM address, one byte * of data and a STOP condition. The STOP condition will trigger the * EEPROM's internally timed write cycle to the nonvolatile memory. * All inputs are disabled during this write cycle and the EEPROM will * not respond to any access until the internal write is complete. */ err = eeprom_wait_ready(pdev, NULL); if (err) return err; /* * 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0, * and bits 1:0 both =0. Bit 5 should be set according to the * type of EEPROM being accessed (1=two byte addressing, 0=one * byte addressing). */ if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE)) return -EIO; i2c_wack = 1; /* Prepare EEPROM address for Step 3 */ for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) { /* Write the address to the LBCIF Address Register */ if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) break; /* * Write the data to the LBCIF Data Register (the I2C write * will begin). */ if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data)) break; /* * Monitor bit 1:0 of the LBCIF Status Register. When bits * 1:0 are both equal to 1, the I2C write has completed and the * internal write cycle of the EEPROM is about to start. * (bits 1:0 = 01 is a legal state while waiting from both * equal to 1, but bits 1:0 = 10 is invalid and implies that * something is broken). */ err = eeprom_wait_ready(pdev, &status); if (err < 0) return 0; /* * Check bit 3 of the LBCIF Status Register. If equal to 1, * an error has occurred.Don't break here if we are revision * 1, this is so we do a blind write for load bug. */ if ((status & LBCIF_STATUS_GENERAL_ERROR) && adapter->pdev->revision == 0) break; /* * Check bit 2 of the LBCIF Status Register. If equal to 1 an * ACK error has occurred on the address phase of the write. * This could be due to an actual hardware failure or the * EEPROM may still be in its internal write cycle from a * previous write. This write operation was ignored and must be *repeated later. */ if (status & LBCIF_STATUS_ACK_ERROR) { /* * This could be due to an actual hardware failure * or the EEPROM may still be in its internal write * cycle from a previous write. This write operation * was ignored and must be repeated later. */ udelay(10); continue; } writeok = 1; break; } /* * Set bit 6 of the LBCIF Control Register = 0. */ udelay(10); while (i2c_wack) { if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, LBCIF_CONTROL_LBCIF_ENABLE)) writeok = 0; /* Do read until internal ACK_ERROR goes away meaning write * completed */ do { pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr); do { pci_read_config_dword(pdev, LBCIF_DATA_REGISTER, &val); } while ((val & 0x00010000) == 0); } while (val & 0x00040000); if ((val & 0xFF00) != 0xC000 || index == 10000) break; index++; } return writeok ? 0 : -EIO; } /** * eeprom_read - Read a byte from the ET1310's EEPROM * @adapter: pointer to our private adapter structure * @addr: the address from which to read * @pdata: a pointer to a byte in which to store the value of the read * @eeprom_id: the ID of the EEPROM * @addrmode: how the EEPROM is to be accessed * * Returns 1 for a successful read */ static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata) { struct pci_dev *pdev = adapter->pdev; int err; u32 status; /* * A single byte read is similar to the single byte write, with the * exception of the data flow: */ err = eeprom_wait_ready(pdev, NULL); if (err) return err; /* * Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0, * and bits 1:0 both =0. Bit 5 should be set according to the type * of EEPROM being accessed (1=two byte addressing, 0=one byte * addressing). */ if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, LBCIF_CONTROL_LBCIF_ENABLE)) return -EIO; /* * Write the address to the LBCIF Address Register (I2C read will * begin). */ if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) return -EIO; /* * Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure * has occurred). */ err = eeprom_wait_ready(pdev, &status); if (err < 0) return err; /* * Regardless of error status, read data byte from LBCIF Data * Register. */ *pdata = err; /* * Check bit 2 of the LBCIF Status Register. If = 1, * then an error has occurred. */ return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0; } static int et131x_init_eeprom(struct et131x_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; u8 eestatus; /* We first need to check the EEPROM Status code located at offset * 0xB2 of config space */ pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus); /* THIS IS A WORKAROUND: * I need to call this function twice to get my card in a * LG M1 Express Dual running. I tried also a msleep before this * function, because I thought there could be some time condidions * but it didn't work. Call the whole function twice also work. */ if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) { dev_err(&pdev->dev, "Could not read PCI config space for EEPROM Status\n"); return -EIO; } /* Determine if the error(s) we care about are present. If they are * present we need to fail. */ if (eestatus & 0x4C) { int write_failed = 0; if (pdev->revision == 0x01) { int i; static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF }; /* Re-write the first 4 bytes if we have an eeprom * present and the revision id is 1, this fixes the * corruption seen with 1310 B Silicon */ for (i = 0; i < 3; i++) if (eeprom_write(adapter, i, eedata[i]) < 0) write_failed = 1; } if (pdev->revision != 0x01 || write_failed) { dev_err(&pdev->dev, "Fatal EEPROM Status Error - 0x%04x\n", eestatus); /* This error could mean that there was an error * reading the eeprom or that the eeprom doesn't exist. * We will treat each case the same and not try to * gather additional information that normally would * come from the eeprom, like MAC Address */ adapter->has_eeprom = 0; return -EIO; } } adapter->has_eeprom = 1; /* Read the EEPROM for information regarding LED behavior. Refer to * ET1310_phy.c, et131x_xcvr_init(), for its use. */ eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]); eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]); if (adapter->eeprom_data[0] != 0xcd) /* Disable all optional features */ adapter->eeprom_data[1] = 0x00; return 0; } /** * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310. * @adapter: pointer to our adapter structure */ static void et131x_rx_dma_enable(struct et131x_adapter *adapter) { /* Setup the receive dma configuration register for normal operation */ u32 csr = 0x2000; /* FBR1 enable */ if (adapter->rx_ring.fbr[0]->buffsize == 4096) csr |= 0x0800; else if (adapter->rx_ring.fbr[0]->buffsize == 8192) csr |= 0x1000; else if (adapter->rx_ring.fbr[0]->buffsize == 16384) csr |= 0x1800; #ifdef USE_FBR0 csr |= 0x0400; /* FBR0 enable */ if (adapter->rx_ring.fbr[1]->buffsize == 256) csr |= 0x0100; else if (adapter->rx_ring.fbr[1]->buffsize == 512) csr |= 0x0200; else if (adapter->rx_ring.fbr[1]->buffsize == 1024) csr |= 0x0300; #endif writel(csr, &adapter->regs->rxdma.csr); csr = readl(&adapter->regs->rxdma.csr); if ((csr & 0x00020000) != 0) { udelay(5); csr = readl(&adapter->regs->rxdma.csr); if ((csr & 0x00020000) != 0) { dev_err(&adapter->pdev->dev, "RX Dma failed to exit halt state. CSR 0x%08x\n", csr); } } } /** * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310 * @adapter: pointer to our adapter structure */ static void et131x_rx_dma_disable(struct et131x_adapter *adapter) { u32 csr; /* Setup the receive dma configuration register */ writel(0x00002001, &adapter->regs->rxdma.csr); csr = readl(&adapter->regs->rxdma.csr); if ((csr & 0x00020000) == 0) { /* Check halt status (bit 17) */ udelay(5); csr = readl(&adapter->regs->rxdma.csr); if ((csr & 0x00020000) == 0) dev_err(&adapter->pdev->dev, "RX Dma failed to enter halt state. CSR 0x%08x\n", csr); } } /** * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310. * @adapter: pointer to our adapter structure * * Mainly used after a return to the D0 (full-power) state from a lower state. */ static void et131x_tx_dma_enable(struct et131x_adapter *adapter) { /* Setup the transmit dma configuration register for normal * operation */ writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT), &adapter->regs->txdma.csr); } static inline void add_10bit(u32 *v, int n) { *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP); } static inline void add_12bit(u32 *v, int n) { *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP); } /** * et1310_config_mac_regs1 - Initialize the first part of MAC regs * @adapter: pointer to our adapter structure */ static void et1310_config_mac_regs1(struct et131x_adapter *adapter) { struct mac_regs __iomem *macregs = &adapter->regs->mac; u32 station1; u32 station2; u32 ipg; /* First we need to reset everything. Write to MAC configuration * register 1 to perform reset. */ writel(0xC00F0000, &macregs->cfg1); /* Next lets configure the MAC Inter-packet gap register */ ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */ ipg |= 0x50 << 8; /* ifg enforce 0x50 */ writel(ipg, &macregs->ipg); /* Next lets configure the MAC Half Duplex register */ /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */ writel(0x00A1F037, &macregs->hfdp); /* Next lets configure the MAC Interface Control register */ writel(0, &macregs->if_ctrl); /* Let's move on to setting up the mii management configuration */ writel(0x07, &macregs->mii_mgmt_cfg); /* Clock reset 0x7 */ /* Next lets configure the MAC Station Address register. These * values are read from the EEPROM during initialization and stored * in the adapter structure. We write what is stored in the adapter * structure to the MAC Station Address registers high and low. This * station address is used for generating and checking pause control * packets. */ station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) | (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT); station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) | (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) | (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) | adapter->addr[2]; writel(station1, &macregs->station_addr_1); writel(station2, &macregs->station_addr_2); /* Max ethernet packet in bytes that will be passed by the mac without * being truncated. Allow the MAC to pass 4 more than our max packet * size. This is 4 for the Ethernet CRC. * * Packets larger than (registry_jumbo_packet) that do not contain a * VLAN ID will be dropped by the Rx function. */ writel(adapter->registry_jumbo_packet + 4, &macregs->max_fm_len); /* clear out MAC config reset */ writel(0, &macregs->cfg1); } /** * et1310_config_mac_regs2 - Initialize the second part of MAC regs * @adapter: pointer to our adapter structure */ static void et1310_config_mac_regs2(struct et131x_adapter *adapter) { int32_t delay = 0; struct mac_regs __iomem *mac = &adapter->regs->mac; struct phy_device *phydev = adapter->phydev; u32 cfg1; u32 cfg2; u32 ifctrl; u32 ctl; ctl = readl(&adapter->regs->txmac.ctl); cfg1 = readl(&mac->cfg1); cfg2 = readl(&mac->cfg2); ifctrl = readl(&mac->if_ctrl); /* Set up the if mode bits */ cfg2 &= ~0x300; if (phydev && phydev->speed == SPEED_1000) { cfg2 |= 0x200; /* Phy mode bit */ ifctrl &= ~(1 << 24); } else { cfg2 |= 0x100; ifctrl |= (1 << 24); } /* We need to enable Rx/Tx */ cfg1 |= CFG1_RX_ENABLE | CFG1_TX_ENABLE | CFG1_TX_FLOW; /* Initialize loop back to off */ cfg1 &= ~(CFG1_LOOPBACK | CFG1_RX_FLOW); if (adapter->flowcontrol == FLOW_RXONLY || adapter->flowcontrol == FLOW_BOTH) cfg1 |= CFG1_RX_FLOW; writel(cfg1, &mac->cfg1); /* Now we need to initialize the MAC Configuration 2 register */ /* preamble 7, check length, huge frame off, pad crc, crc enable full duplex off */ cfg2 |= 0x7016; cfg2 &= ~0x0021; /* Turn on duplex if needed */ if (phydev && phydev->duplex == DUPLEX_FULL) cfg2 |= 0x01; ifctrl &= ~(1 << 26); if (phydev && phydev->duplex == DUPLEX_HALF) ifctrl |= (1<<26); /* Enable ghd */ writel(ifctrl, &mac->if_ctrl); writel(cfg2, &mac->cfg2); do { udelay(10); delay++; cfg1 = readl(&mac->cfg1); } while ((cfg1 & CFG1_WAIT) != CFG1_WAIT && delay < 100); if (delay == 100) { dev_warn(&adapter->pdev->dev, "Syncd bits did not respond correctly cfg1 word 0x%08x\n", cfg1); } /* Enable txmac */ ctl |= 0x09; /* TX mac enable, FC disable */ writel(ctl, &adapter->regs->txmac.ctl); /* Ready to start the RXDMA/TXDMA engine */ if (adapter->flags & fMP_ADAPTER_LOWER_POWER) { et131x_rx_dma_enable(adapter); et131x_tx_dma_enable(adapter); } } /** * et1310_in_phy_coma - check if the device is in phy coma * @adapter: pointer to our adapter structure * * Returns 0 if the device is not in phy coma, 1 if it is in phy coma */ static int et1310_in_phy_coma(struct et131x_adapter *adapter) { u32 pmcsr; pmcsr = readl(&adapter->regs->global.pm_csr); return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0; } static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter) { struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; u32 hash1 = 0; u32 hash2 = 0; u32 hash3 = 0; u32 hash4 = 0; u32 pm_csr; /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision * the multi-cast LIST. If it is NOT specified, (and "ALL" is not * specified) then we should pass NO multi-cast addresses to the * driver. */ if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) { int i; /* Loop through our multicast array and set up the device */ for (i = 0; i < adapter->multicast_addr_count; i++) { u32 result; result = ether_crc(6, adapter->multicast_list[i]); result = (result & 0x3F800000) >> 23; if (result < 32) { hash1 |= (1 << result); } else if ((31 < result) && (result < 64)) { result -= 32; hash2 |= (1 << result); } else if ((63 < result) && (result < 96)) { result -= 64; hash3 |= (1 << result); } else { result -= 96; hash4 |= (1 << result); } } } /* Write out the new hash to the device */ pm_csr = readl(&adapter->regs->global.pm_csr); if (!et1310_in_phy_coma(adapter)) { writel(hash1, &rxmac->multi_hash1); writel(hash2, &rxmac->multi_hash2); writel(hash3, &rxmac->multi_hash3); writel(hash4, &rxmac->multi_hash4); } } static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter) { struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; u32 uni_pf1; u32 uni_pf2; u32 uni_pf3; u32 pm_csr; /* Set up unicast packet filter reg 3 to be the first two octets of * the MAC address for both address * * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the * MAC address for second address * * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the * MAC address for first address */ uni_pf3 = (adapter->addr[0] << ET_UNI_PF_ADDR2_1_SHIFT) | (adapter->addr[1] << ET_UNI_PF_ADDR2_2_SHIFT) | (adapter->addr[0] << ET_UNI_PF_ADDR1_1_SHIFT) | adapter->addr[1]; uni_pf2 = (adapter->addr[2] << ET_UNI_PF_ADDR2_3_SHIFT) | (adapter->addr[3] << ET_UNI_PF_ADDR2_4_SHIFT) | (adapter->addr[4] << ET_UNI_PF_ADDR2_5_SHIFT) | adapter->addr[5]; uni_pf1 = (adapter->addr[2] << ET_UNI_PF_ADDR1_3_SHIFT) | (adapter->addr[3] << ET_UNI_PF_ADDR1_4_SHIFT) | (adapter->addr[4] << ET_UNI_PF_ADDR1_5_SHIFT) | adapter->addr[5]; pm_csr = readl(&adapter->regs->global.pm_csr); if (!et1310_in_phy_coma(adapter)) { writel(uni_pf1, &rxmac->uni_pf_addr1); writel(uni_pf2, &rxmac->uni_pf_addr2); writel(uni_pf3, &rxmac->uni_pf_addr3); } } static void et1310_config_rxmac_regs(struct et131x_adapter *adapter) { struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; struct phy_device *phydev = adapter->phydev; u32 sa_lo; u32 sa_hi = 0; u32 pf_ctrl = 0; /* Disable the MAC while it is being configured (also disable WOL) */ writel(0x8, &rxmac->ctrl); /* Initialize WOL to disabled. */ writel(0, &rxmac->crc0); writel(0, &rxmac->crc12); writel(0, &rxmac->crc34); /* We need to set the WOL mask0 - mask4 next. We initialize it to * its default Values of 0x00000000 because there are not WOL masks * as of this time. */ writel(0, &rxmac->mask0_word0); writel(0, &rxmac->mask0_word1); writel(0, &rxmac->mask0_word2); writel(0, &rxmac->mask0_word3); writel(0, &rxmac->mask1_word0); writel(0, &rxmac->mask1_word1); writel(0, &rxmac->mask1_word2); writel(0, &rxmac->mask1_word3); writel(0, &rxmac->mask2_word0); writel(0, &rxmac->mask2_word1); writel(0, &rxmac->mask2_word2); writel(0, &rxmac->mask2_word3); writel(0, &rxmac->mask3_word0); writel(0, &rxmac->mask3_word1); writel(0, &rxmac->mask3_word2); writel(0, &rxmac->mask3_word3); writel(0, &rxmac->mask4_word0); writel(0, &rxmac->mask4_word1); writel(0, &rxmac->mask4_word2); writel(0, &rxmac->mask4_word3); /* Lets setup the WOL Source Address */ sa_lo = (adapter->addr[2] << ET_WOL_LO_SA3_SHIFT) | (adapter->addr[3] << ET_WOL_LO_SA4_SHIFT) | (adapter->addr[4] << ET_WOL_LO_SA5_SHIFT) | adapter->addr[5]; writel(sa_lo, &rxmac->sa_lo); sa_hi = (u32) (adapter->addr[0] << ET_WOL_HI_SA1_SHIFT) | adapter->addr[1]; writel(sa_hi, &rxmac->sa_hi); /* Disable all Packet Filtering */ writel(0, &rxmac->pf_ctrl); /* Let's initialize the Unicast Packet filtering address */ if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) { et1310_setup_device_for_unicast(adapter); pf_ctrl |= 4; /* Unicast filter */ } else { writel(0, &rxmac->uni_pf_addr1); writel(0, &rxmac->uni_pf_addr2); writel(0, &rxmac->uni_pf_addr3); } /* Let's initialize the Multicast hash */ if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) { pf_ctrl |= 2; /* Multicast filter */ et1310_setup_device_for_multicast(adapter); } /* Runt packet filtering. Didn't work in version A silicon. */ pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << 16; pf_ctrl |= 8; /* Fragment filter */ if (adapter->registry_jumbo_packet > 8192) /* In order to transmit jumbo packets greater than 8k, the * FIFO between RxMAC and RxDMA needs to be reduced in size * to (16k - Jumbo packet size). In order to implement this, * we must use "cut through" mode in the RxMAC, which chops * packets down into segments which are (max_size * 16). In * this case we selected 256 bytes, since this is the size of * the PCI-Express TLP's that the 1310 uses. * * seg_en on, fc_en off, size 0x10 */ writel(0x41, &rxmac->mcif_ctrl_max_seg); else writel(0, &rxmac->mcif_ctrl_max_seg); /* Initialize the MCIF water marks */ writel(0, &rxmac->mcif_water_mark); /* Initialize the MIF control */ writel(0, &rxmac->mif_ctrl); /* Initialize the Space Available Register */ writel(0, &rxmac->space_avail); /* Initialize the the mif_ctrl register * bit 3: Receive code error. One or more nibbles were signaled as * errors during the reception of the packet. Clear this * bit in Gigabit, set it in 100Mbit. This was derived * experimentally at UNH. * bit 4: Receive CRC error. The packet's CRC did not match the * internally generated CRC. * bit 5: Receive length check error. Indicates that frame length * field value in the packet does not match the actual data * byte length and is not a type field. * bit 16: Receive frame truncated. * bit 17: Drop packet enable */ if (phydev && phydev->speed == SPEED_100) writel(0x30038, &rxmac->mif_ctrl); else writel(0x30030, &rxmac->mif_ctrl); /* Finally we initialize RxMac to be enabled & WOL disabled. Packet * filter is always enabled since it is where the runt packets are * supposed to be dropped. For version A silicon, runt packet * dropping doesn't work, so it is disabled in the pf_ctrl register, * but we still leave the packet filter on. */ writel(pf_ctrl, &rxmac->pf_ctrl); writel(0x9, &rxmac->ctrl); } static void et1310_config_txmac_regs(struct et131x_adapter *adapter) { struct txmac_regs __iomem *txmac = &adapter->regs->txmac; /* We need to update the Control Frame Parameters * cfpt - control frame pause timer set to 64 (0x40) * cfep - control frame extended pause timer set to 0x0 */ if (adapter->flowcontrol == FLOW_NONE) writel(0, &txmac->cf_param); else writel(0x40, &txmac->cf_param); } static void et1310_config_macstat_regs(struct et131x_adapter *adapter) { struct macstat_regs __iomem *macstat = &adapter->regs->macstat; /* Next we need to initialize all the macstat registers to zero on * the device. */ writel(0, &macstat->txrx_0_64_byte_frames); writel(0, &macstat->txrx_65_127_byte_frames); writel(0, &macstat->txrx_128_255_byte_frames); writel(0, &macstat->txrx_256_511_byte_frames); writel(0, &macstat->txrx_512_1023_byte_frames); writel(0, &macstat->txrx_1024_1518_byte_frames); writel(0, &macstat->txrx_1519_1522_gvln_frames); writel(0, &macstat->rx_bytes); writel(0, &macstat->rx_packets); writel(0, &macstat->rx_fcs_errs); writel(0, &macstat->rx_multicast_packets); writel(0, &macstat->rx_broadcast_packets); writel(0, &macstat->rx_control_frames); writel(0, &macstat->rx_pause_frames); writel(0, &macstat->rx_unknown_opcodes); writel(0, &macstat->rx_align_errs); writel(0, &macstat->rx_frame_len_errs); writel(0, &macstat->rx_code_errs); writel(0, &macstat->rx_carrier_sense_errs); writel(0, &macstat->rx_undersize_packets); writel(0, &macstat->rx_oversize_packets); writel(0, &macstat->rx_fragment_packets); writel(0, &macstat->rx_jabbers); writel(0, &macstat->rx_drops); writel(0, &macstat->tx_bytes); writel(0, &macstat->tx_packets); writel(0, &macstat->tx_multicast_packets); writel(0, &macstat->tx_broadcast_packets); writel(0, &macstat->tx_pause_frames); writel(0, &macstat->tx_deferred); writel(0, &macstat->tx_excessive_deferred); writel(0, &macstat->tx_single_collisions); writel(0, &macstat->tx_multiple_collisions); writel(0, &macstat->tx_late_collisions); writel(0, &macstat->tx_excessive_collisions); writel(0, &macstat->tx_total_collisions); writel(0, &macstat->tx_pause_honored_frames); writel(0, &macstat->tx_drops); writel(0, &macstat->tx_jabbers); writel(0, &macstat->tx_fcs_errs); writel(0, &macstat->tx_control_frames); writel(0, &macstat->tx_oversize_frames); writel(0, &macstat->tx_undersize_frames); writel(0, &macstat->tx_fragments); writel(0, &macstat->carry_reg1); writel(0, &macstat->carry_reg2); /* Unmask any counters that we want to track the overflow of. * Initially this will be all counters. It may become clear later * that we do not need to track all counters. */ writel(0xFFFFBE32, &macstat->carry_reg1_mask); writel(0xFFFE7E8B, &macstat->carry_reg2_mask); } /** * et131x_phy_mii_read - Read from the PHY through the MII Interface on the MAC * @adapter: pointer to our private adapter structure * @addr: the address of the transceiver * @reg: the register to read * @value: pointer to a 16-bit value in which the value will be stored * * Returns 0 on success, errno on failure (as defined in errno.h) */ static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr, u8 reg, u16 *value) { struct mac_regs __iomem *mac = &adapter->regs->mac; int status = 0; u32 delay = 0; u32 mii_addr; u32 mii_cmd; u32 mii_indicator; /* Save a local copy of the registers we are dealing with so we can * set them back */ mii_addr = readl(&mac->mii_mgmt_addr); mii_cmd = readl(&mac->mii_mgmt_cmd); /* Stop the current operation */ writel(0, &mac->mii_mgmt_cmd); /* Set up the register we need to read from on the correct PHY */ writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr); writel(0x1, &mac->mii_mgmt_cmd); do { udelay(50); delay++; mii_indicator = readl(&mac->mii_mgmt_indicator); } while ((mii_indicator & MGMT_WAIT) && delay < 50); /* If we hit the max delay, we could not read the register */ if (delay == 50) { dev_warn(&adapter->pdev->dev, "reg 0x%08x could not be read\n", reg); dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", mii_indicator); status = -EIO; } /* If we hit here we were able to read the register and we need to * return the value to the caller */ *value = readl(&mac->mii_mgmt_stat) & 0xFFFF; /* Stop the read operation */ writel(0, &mac->mii_mgmt_cmd); /* set the registers we touched back to the state at which we entered * this function */ writel(mii_addr, &mac->mii_mgmt_addr); writel(mii_cmd, &mac->mii_mgmt_cmd); return status; } static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) { struct phy_device *phydev = adapter->phydev; if (!phydev) return -EIO; return et131x_phy_mii_read(adapter, phydev->addr, reg, value); } /** * et131x_mii_write - Write to a PHY register through the MII interface of the MAC * @adapter: pointer to our private adapter structure * @reg: the register to read * @value: 16-bit value to write * * FIXME: one caller in netdev still * * Return 0 on success, errno on failure (as defined in errno.h) */ static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value) { struct mac_regs __iomem *mac = &adapter->regs->mac; struct phy_device *phydev = adapter->phydev; int status = 0; u8 addr; u32 delay = 0; u32 mii_addr; u32 mii_cmd; u32 mii_indicator; if (!phydev) return -EIO; addr = phydev->addr; /* Save a local copy of the registers we are dealing with so we can * set them back */ mii_addr = readl(&mac->mii_mgmt_addr); mii_cmd = readl(&mac->mii_mgmt_cmd); /* Stop the current operation */ writel(0, &mac->mii_mgmt_cmd); /* Set up the register we need to write to on the correct PHY */ writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr); /* Add the value to write to the registers to the mac */ writel(value, &mac->mii_mgmt_ctrl); do { udelay(50); delay++; mii_indicator = readl(&mac->mii_mgmt_indicator); } while ((mii_indicator & MGMT_BUSY) && delay < 100); /* If we hit the max delay, we could not write the register */ if (delay == 100) { u16 tmp; dev_warn(&adapter->pdev->dev, "reg 0x%08x could not be written", reg); dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", mii_indicator); dev_warn(&adapter->pdev->dev, "command is 0x%08x\n", readl(&mac->mii_mgmt_cmd)); et131x_mii_read(adapter, reg, &tmp); status = -EIO; } /* Stop the write operation */ writel(0, &mac->mii_mgmt_cmd); /* * set the registers we touched back to the state at which we entered * this function */ writel(mii_addr, &mac->mii_mgmt_addr); writel(mii_cmd, &mac->mii_mgmt_cmd); return status; } /* Still used from _mac for BIT_READ */ static void et1310_phy_access_mii_bit(struct et131x_adapter *adapter, u16 action, u16 regnum, u16 bitnum, u8 *value) { u16 reg; u16 mask = 0x0001 << bitnum; /* Read the requested register */ et131x_mii_read(adapter, regnum, &reg); switch (action) { case TRUEPHY_BIT_READ: *value = (reg & mask) >> bitnum; break; case TRUEPHY_BIT_SET: et131x_mii_write(adapter, regnum, reg | mask); break; case TRUEPHY_BIT_CLEAR: et131x_mii_write(adapter, regnum, reg & ~mask); break; default: break; } } static void et1310_config_flow_control(struct et131x_adapter *adapter) { struct phy_device *phydev = adapter->phydev; if (phydev->duplex == DUPLEX_HALF) { adapter->flowcontrol = FLOW_NONE; } else { char remote_pause, remote_async_pause; et1310_phy_access_mii_bit(adapter, TRUEPHY_BIT_READ, 5, 10, &remote_pause); et1310_phy_access_mii_bit(adapter, TRUEPHY_BIT_READ, 5, 11, &remote_async_pause); if ((remote_pause == TRUEPHY_BIT_SET) && (remote_async_pause == TRUEPHY_BIT_SET)) { adapter->flowcontrol = adapter->wanted_flow; } else if ((remote_pause == TRUEPHY_BIT_SET) && (remote_async_pause == TRUEPHY_BIT_CLEAR)) { if (adapter->wanted_flow == FLOW_BOTH) adapter->flowcontrol = FLOW_BOTH; else adapter->flowcontrol = FLOW_NONE; } else if ((remote_pause == TRUEPHY_BIT_CLEAR) && (remote_async_pause == TRUEPHY_BIT_CLEAR)) { adapter->flowcontrol = FLOW_NONE; } else {/* if (remote_pause == TRUEPHY_CLEAR_BIT && remote_async_pause == TRUEPHY_SET_BIT) */ if (adapter->wanted_flow == FLOW_BOTH) adapter->flowcontrol = FLOW_RXONLY; else adapter->flowcontrol = FLOW_NONE; } } } /** * et1310_update_macstat_host_counters - Update the local copy of the statistics * @adapter: pointer to the adapter structure */ static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter) { struct ce_stats *stats = &adapter->stats; struct macstat_regs __iomem *macstat = &adapter->regs->macstat; stats->tx_collisions += readl(&macstat->tx_total_collisions); stats->tx_first_collisions += readl(&macstat->tx_single_collisions); stats->tx_deferred += readl(&macstat->tx_deferred); stats->tx_excessive_collisions += readl(&macstat->tx_multiple_collisions); stats->tx_late_collisions += readl(&macstat->tx_late_collisions); stats->tx_underflows += readl(&macstat->tx_undersize_frames); stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames); stats->rx_align_errs += readl(&macstat->rx_align_errs); stats->rx_crc_errs += readl(&macstat->rx_code_errs); stats->rcvd_pkts_dropped += readl(&macstat->rx_drops); stats->rx_overflows += readl(&macstat->rx_oversize_packets); stats->rx_code_violations += readl(&macstat->rx_fcs_errs); stats->rx_length_errs += readl(&macstat->rx_frame_len_errs); stats->rx_other_errs += readl(&macstat->rx_fragment_packets); } /** * et1310_handle_macstat_interrupt * @adapter: pointer to the adapter structure * * One of the MACSTAT counters has wrapped. Update the local copy of * the statistics held in the adapter structure, checking the "wrap" * bit for each counter. */ static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter) { u32 carry_reg1; u32 carry_reg2; /* Read the interrupt bits from the register(s). These are Clear On * Write. */ carry_reg1 = readl(&adapter->regs->macstat.carry_reg1); carry_reg2 = readl(&adapter->regs->macstat.carry_reg2); writel(carry_reg1, &adapter->regs->macstat.carry_reg1); writel(carry_reg2, &adapter->regs->macstat.carry_reg2); /* We need to do update the host copy of all the MAC_STAT counters. * For each counter, check it's overflow bit. If the overflow bit is * set, then increment the host version of the count by one complete * revolution of the counter. This routine is called when the counter * block indicates that one of the counters has wrapped. */ if (carry_reg1 & (1 << 14)) adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT; if (carry_reg1 & (1 << 8)) adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT; if (carry_reg1 & (1 << 7)) adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT; if (carry_reg1 & (1 << 2)) adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT; if (carry_reg1 & (1 << 6)) adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT; if (carry_reg1 & (1 << 3)) adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT; if (carry_reg1 & (1 << 0)) adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT; if (carry_reg2 & (1 << 16)) adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT; if (carry_reg2 & (1 << 15)) adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT; if (carry_reg2 & (1 << 6)) adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT; if (carry_reg2 & (1 << 8)) adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT; if (carry_reg2 & (1 << 5)) adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT; if (carry_reg2 & (1 << 4)) adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT; if (carry_reg2 & (1 << 2)) adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT; } static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg) { struct net_device *netdev = bus->priv; struct et131x_adapter *adapter = netdev_priv(netdev); u16 value; int ret; ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value); if (ret < 0) return ret; else return value; } static int et131x_mdio_write(struct mii_bus *bus, int phy_addr, int reg, u16 value) { struct net_device *netdev = bus->priv; struct et131x_adapter *adapter = netdev_priv(netdev); return et131x_mii_write(adapter, reg, value); } static int et131x_mdio_reset(struct mii_bus *bus) { struct net_device *netdev = bus->priv; struct et131x_adapter *adapter = netdev_priv(netdev); et131x_mii_write(adapter, MII_BMCR, BMCR_RESET); return 0; } /** * et1310_phy_power_down - PHY power control * @adapter: device to control * @down: true for off/false for back on * * one hundred, ten, one thousand megs * How would you like to have your LAN accessed * Can't you see that this code processed * Phy power, phy power.. */ static void et1310_phy_power_down(struct et131x_adapter *adapter, bool down) { u16 data; et131x_mii_read(adapter, MII_BMCR, &data); data &= ~BMCR_PDOWN; if (down) data |= BMCR_PDOWN; et131x_mii_write(adapter, MII_BMCR, data); } /** * et131x_xcvr_init - Init the phy if we are setting it into force mode * @adapter: pointer to our private adapter structure * */ static void et131x_xcvr_init(struct et131x_adapter *adapter) { u16 imr; u16 isr; u16 lcr2; et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &isr); et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &imr); /* Set the link status interrupt only. Bad behavior when link status * and auto neg are set, we run into a nested interrupt problem */ imr |= (ET_PHY_INT_MASK_AUTONEGSTAT & ET_PHY_INT_MASK_LINKSTAT & ET_PHY_INT_MASK_ENABLE); et131x_mii_write(adapter, PHY_INTERRUPT_MASK, imr); /* Set the LED behavior such that LED 1 indicates speed (off = * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates * link and activity (on for link, blink off for activity). * * NOTE: Some customizations have been added here for specific * vendors; The LED behavior is now determined by vendor data in the * EEPROM. However, the above description is the default. */ if ((adapter->eeprom_data[1] & 0x4) == 0) { et131x_mii_read(adapter, PHY_LED_2, &lcr2); lcr2 &= (ET_LED2_LED_100TX & ET_LED2_LED_1000T); lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT); if ((adapter->eeprom_data[1] & 0x8) == 0) lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT); else lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT); et131x_mii_write(adapter, PHY_LED_2, lcr2); } } /** * et131x_configure_global_regs - configure JAGCore global regs * @adapter: pointer to our adapter structure * * Used to configure the global registers on the JAGCore */ static void et131x_configure_global_regs(struct et131x_adapter *adapter) { struct global_regs __iomem *regs = &adapter->regs->global; writel(0, &regs->rxq_start_addr); writel(INTERNAL_MEM_SIZE - 1, &regs->txq_end_addr); if (adapter->registry_jumbo_packet < 2048) { /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word * block of RAM that the driver can split between Tx * and Rx as it desires. Our default is to split it * 50/50: */ writel(PARM_RX_MEM_END_DEF, &regs->rxq_end_addr); writel(PARM_RX_MEM_END_DEF + 1, &regs->txq_start_addr); } else if (adapter->registry_jumbo_packet < 8192) { /* For jumbo packets > 2k but < 8k, split 50-50. */ writel(INTERNAL_MEM_RX_OFFSET, &regs->rxq_end_addr); writel(INTERNAL_MEM_RX_OFFSET + 1, &regs->txq_start_addr); } else { /* 9216 is the only packet size greater than 8k that * is available. The Tx buffer has to be big enough * for one whole packet on the Tx side. We'll make * the Tx 9408, and give the rest to Rx */ writel(0x01b3, &regs->rxq_end_addr); writel(0x01b4, &regs->txq_start_addr); } /* Initialize the loopback register. Disable all loopbacks. */ writel(0, &regs->loopback); /* MSI Register */ writel(0, &regs->msi_config); /* By default, disable the watchdog timer. It will be enabled when * a packet is queued. */ writel(0, &regs->watchdog_timer); } /** * et131x_config_rx_dma_regs - Start of Rx_DMA init sequence * @adapter: pointer to our adapter structure */ static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter) { struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; struct rx_ring *rx_local = &adapter->rx_ring; struct fbr_desc *fbr_entry; u32 entry; u32 psr_num_des; unsigned long flags; /* Halt RXDMA to perform the reconfigure. */ et131x_rx_dma_disable(adapter); /* Load the completion writeback physical address * * NOTE : dma_alloc_coherent(), used above to alloc DMA regions, * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses * are ever returned, make sure the high part is retrieved here * before storing the adjusted address. */ writel((u32) ((u64)rx_local->rx_status_bus >> 32), &rx_dma->dma_wb_base_hi); writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo); memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block)); /* Set the address and parameters of the packet status ring into the * 1310's registers */ writel((u32) ((u64)rx_local->ps_ring_physaddr >> 32), &rx_dma->psr_base_hi); writel((u32) rx_local->ps_ring_physaddr, &rx_dma->psr_base_lo); writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des); writel(0, &rx_dma->psr_full_offset); psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF; writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100, &rx_dma->psr_min_des); spin_lock_irqsave(&adapter->rcv_lock, flags); /* These local variables track the PSR in the adapter structure */ rx_local->local_psr_full = 0; /* Now's the best time to initialize FBR1 contents */ fbr_entry = (struct fbr_desc *) rx_local->fbr[0]->ring_virtaddr; for (entry = 0; entry < rx_local->fbr[0]->num_entries; entry++) { fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry]; fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry]; fbr_entry->word2 = entry; fbr_entry++; } /* Set the address and parameters of Free buffer ring 1 (and 0 if * required) into the 1310's registers */ writel((u32) (rx_local->fbr[0]->real_physaddr >> 32), &rx_dma->fbr1_base_hi); writel((u32) rx_local->fbr[0]->real_physaddr, &rx_dma->fbr1_base_lo); writel(rx_local->fbr[0]->num_entries - 1, &rx_dma->fbr1_num_des); writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset); /* This variable tracks the free buffer ring 1 full position, so it * has to match the above. */ rx_local->fbr[0]->local_full = ET_DMA10_WRAP; writel( ((rx_local->fbr[0]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, &rx_dma->fbr1_min_des); #ifdef USE_FBR0 /* Now's the best time to initialize FBR0 contents */ fbr_entry = (struct fbr_desc *) rx_local->fbr[1]->ring_virtaddr; for (entry = 0; entry < rx_local->fbr[1]->num_entries; entry++) { fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry]; fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry]; fbr_entry->word2 = entry; fbr_entry++; } writel((u32) (rx_local->fbr[1]->real_physaddr >> 32), &rx_dma->fbr0_base_hi); writel((u32) rx_local->fbr[1]->real_physaddr, &rx_dma->fbr0_base_lo); writel(rx_local->fbr[1]->num_entries - 1, &rx_dma->fbr0_num_des); writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset); /* This variable tracks the free buffer ring 0 full position, so it * has to match the above. */ rx_local->fbr[1]->local_full = ET_DMA10_WRAP; writel( ((rx_local->fbr[1]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, &rx_dma->fbr0_min_des); #endif /* Program the number of packets we will receive before generating an * interrupt. * For version B silicon, this value gets updated once autoneg is *complete. */ writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done); /* The "time_done" is not working correctly to coalesce interrupts * after a given time period, but rather is giving us an interrupt * regardless of whether we have received packets. * This value gets updated once autoneg is complete. */ writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time); spin_unlock_irqrestore(&adapter->rcv_lock, flags); } /** * et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore. * @adapter: pointer to our private adapter structure * * Configure the transmit engine with the ring buffers we have created * and prepare it for use. */ static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter) { struct txdma_regs __iomem *txdma = &adapter->regs->txdma; /* Load the hardware with the start of the transmit descriptor ring. */ writel((u32) ((u64)adapter->tx_ring.tx_desc_ring_pa >> 32), &txdma->pr_base_hi); writel((u32) adapter->tx_ring.tx_desc_ring_pa, &txdma->pr_base_lo); /* Initialise the transmit DMA engine */ writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des); /* Load the completion writeback physical address */ writel((u32)((u64)adapter->tx_ring.tx_status_pa >> 32), &txdma->dma_wb_base_hi); writel((u32)adapter->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo); *adapter->tx_ring.tx_status = 0; writel(0, &txdma->service_request); adapter->tx_ring.send_idx = 0; } /** * et131x_adapter_setup - Set the adapter up as per cassini+ documentation * @adapter: pointer to our private adapter structure * * Returns 0 on success, errno on failure (as defined in errno.h) */ static void et131x_adapter_setup(struct et131x_adapter *adapter) { /* Configure the JAGCore */ et131x_configure_global_regs(adapter); et1310_config_mac_regs1(adapter); /* Configure the MMC registers */ /* All we need to do is initialize the Memory Control Register */ writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl); et1310_config_rxmac_regs(adapter); et1310_config_txmac_regs(adapter); et131x_config_rx_dma_regs(adapter); et131x_config_tx_dma_regs(adapter); et1310_config_macstat_regs(adapter); et1310_phy_power_down(adapter, 0); et131x_xcvr_init(adapter); } /** * et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310 * @adapter: pointer to our private adapter structure */ static void et131x_soft_reset(struct et131x_adapter *adapter) { /* Disable MAC Core */ writel(0xc00f0000, &adapter->regs->mac.cfg1); /* Set everything to a reset value */ writel(0x7F, &adapter->regs->global.sw_reset); writel(0x000f0000, &adapter->regs->mac.cfg1); writel(0x00000000, &adapter->regs->mac.cfg1); } /** * et131x_enable_interrupts - enable interrupt * @adapter: et131x device * * Enable the appropriate interrupts on the ET131x according to our * configuration */ static void et131x_enable_interrupts(struct et131x_adapter *adapter) { u32 mask; /* Enable all global interrupts */ if (adapter->flowcontrol == FLOW_TXONLY || adapter->flowcontrol == FLOW_BOTH) mask = INT_MASK_ENABLE; else mask = INT_MASK_ENABLE_NO_FLOW; writel(mask, &adapter->regs->global.int_mask); } /** * et131x_disable_interrupts - interrupt disable * @adapter: et131x device * * Block all interrupts from the et131x device at the device itself */ static void et131x_disable_interrupts(struct et131x_adapter *adapter) { /* Disable all global interrupts */ writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask); } /** * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310 * @adapter: pointer to our adapter structure */ static void et131x_tx_dma_disable(struct et131x_adapter *adapter) { /* Setup the tramsmit dma configuration register */ writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT, &adapter->regs->txdma.csr); } /** * et131x_enable_txrx - Enable tx/rx queues * @netdev: device to be enabled */ static void et131x_enable_txrx(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); /* Enable the Tx and Rx DMA engines (if not already enabled) */ et131x_rx_dma_enable(adapter); et131x_tx_dma_enable(adapter); /* Enable device interrupts */ if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE) et131x_enable_interrupts(adapter); /* We're ready to move some data, so start the queue */ netif_start_queue(netdev); } /** * et131x_disable_txrx - Disable tx/rx queues * @netdev: device to be disabled */ static void et131x_disable_txrx(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); /* First thing is to stop the queue */ netif_stop_queue(netdev); /* Stop the Tx and Rx DMA engines */ et131x_rx_dma_disable(adapter); et131x_tx_dma_disable(adapter); /* Disable device interrupts */ et131x_disable_interrupts(adapter); } /** * et131x_init_send - Initialize send data structures * @adapter: pointer to our private adapter structure */ static void et131x_init_send(struct et131x_adapter *adapter) { struct tcb *tcb; u32 ct; struct tx_ring *tx_ring; /* Setup some convenience pointers */ tx_ring = &adapter->tx_ring; tcb = adapter->tx_ring.tcb_ring; tx_ring->tcb_qhead = tcb; memset(tcb, 0, sizeof(struct tcb) * NUM_TCB); /* Go through and set up each TCB */ for (ct = 0; ct++ < NUM_TCB; tcb++) /* Set the link pointer in HW TCB to the next TCB in the * chain */ tcb->next = tcb + 1; /* Set the tail pointer */ tcb--; tx_ring->tcb_qtail = tcb; tcb->next = NULL; /* Curr send queue should now be empty */ tx_ring->send_head = NULL; tx_ring->send_tail = NULL; } /** * et1310_enable_phy_coma - called when network cable is unplugged * @adapter: pointer to our adapter structure * * driver receive an phy status change interrupt while in D0 and check that * phy_status is down. * * -- gate off JAGCore; * -- set gigE PHY in Coma mode * -- wake on phy_interrupt; Perform software reset JAGCore, * re-initialize jagcore and gigE PHY * * Add D0-ASPM-PhyLinkDown Support: * -- while in D0, when there is a phy_interrupt indicating phy link * down status, call the MPSetPhyComa routine to enter this active * state power saving mode * -- while in D0-ASPM-PhyLinkDown mode, when there is a phy_interrupt * indicating linkup status, call the MPDisablePhyComa routine to * restore JAGCore and gigE PHY */ static void et1310_enable_phy_coma(struct et131x_adapter *adapter) { unsigned long flags; u32 pmcsr; pmcsr = readl(&adapter->regs->global.pm_csr); /* Save the GbE PHY speed and duplex modes. Need to restore this * when cable is plugged back in */ /* * TODO - when PM is re-enabled, check if we need to * perform a similar task as this - * adapter->pdown_speed = adapter->ai_force_speed; * adapter->pdown_duplex = adapter->ai_force_duplex; */ /* Stop sending packets. */ spin_lock_irqsave(&adapter->send_hw_lock, flags); adapter->flags |= fMP_ADAPTER_LOWER_POWER; spin_unlock_irqrestore(&adapter->send_hw_lock, flags); /* Wait for outstanding Receive packets */ et131x_disable_txrx(adapter->netdev); /* Gate off JAGCore 3 clock domains */ pmcsr &= ~ET_PMCSR_INIT; writel(pmcsr, &adapter->regs->global.pm_csr); /* Program gigE PHY in to Coma mode */ pmcsr |= ET_PM_PHY_SW_COMA; writel(pmcsr, &adapter->regs->global.pm_csr); } /** * et1310_disable_phy_coma - Disable the Phy Coma Mode * @adapter: pointer to our adapter structure */ static void et1310_disable_phy_coma(struct et131x_adapter *adapter) { u32 pmcsr; pmcsr = readl(&adapter->regs->global.pm_csr); /* Disable phy_sw_coma register and re-enable JAGCore clocks */ pmcsr |= ET_PMCSR_INIT; pmcsr &= ~ET_PM_PHY_SW_COMA; writel(pmcsr, &adapter->regs->global.pm_csr); /* Restore the GbE PHY speed and duplex modes; * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY */ /* TODO - when PM is re-enabled, check if we need to * perform a similar task as this - * adapter->ai_force_speed = adapter->pdown_speed; * adapter->ai_force_duplex = adapter->pdown_duplex; */ /* Re-initialize the send structures */ et131x_init_send(adapter); /* Bring the device back to the state it was during init prior to * autonegotiation being complete. This way, when we get the auto-neg * complete interrupt, we can complete init by calling ConfigMacREGS2. */ et131x_soft_reset(adapter); /* setup et1310 as per the documentation ?? */ et131x_adapter_setup(adapter); /* Allow Tx to restart */ adapter->flags &= ~fMP_ADAPTER_LOWER_POWER; et131x_enable_txrx(adapter->netdev); } static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit) { u32 tmp_free_buff_ring = *free_buff_ring; tmp_free_buff_ring++; /* This works for all cases where limit < 1024. The 1023 case works because 1023++ is 1024 which means the if condition is not taken but the carry of the bit into the wrap bit toggles the wrap value correctly */ if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) { tmp_free_buff_ring &= ~ET_DMA10_MASK; tmp_free_buff_ring ^= ET_DMA10_WRAP; } /* For the 1023 case */ tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP); *free_buff_ring = tmp_free_buff_ring; return tmp_free_buff_ring; } /** * et131x_align_allocated_memory - Align allocated memory on a given boundary * @adapter: pointer to our adapter structure * @phys_addr: pointer to Physical address * @offset: pointer to the offset variable * @mask: correct mask */ static void et131x_align_allocated_memory(struct et131x_adapter *adapter, u64 *phys_addr, u64 *offset, u64 mask) { u64 new_addr = *phys_addr & ~mask; *offset = 0; if (new_addr != *phys_addr) { /* Move to next aligned block */ new_addr += mask + 1; /* Return offset for adjusting virt addr */ *offset = new_addr - *phys_addr; /* Return new physical address */ *phys_addr = new_addr; } } /** * et131x_rx_dma_memory_alloc * @adapter: pointer to our private adapter structure * * Returns 0 on success and errno on failure (as defined in errno.h) * * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required, * and the Packet Status Ring. */ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) { u32 i, j; u32 bufsize; u32 pktstat_ringsize, fbr_chunksize; struct rx_ring *rx_ring; /* Setup some convenience pointers */ rx_ring = &adapter->rx_ring; /* Alloc memory for the lookup table */ #ifdef USE_FBR0 rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL); #endif rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL); /* The first thing we will do is configure the sizes of the buffer * rings. These will change based on jumbo packet support. Larger * jumbo packets increases the size of each entry in FBR0, and the * number of entries in FBR0, while at the same time decreasing the * number of entries in FBR1. * * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1 * entries are huge in order to accommodate a "jumbo" frame, then it * will have less entries. Conversely, FBR1 will now be relied upon * to carry more "normal" frames, thus it's entry size also increases * and the number of entries goes up too (since it now carries * "small" + "regular" packets. * * In this scheme, we try to maintain 512 entries between the two * rings. Also, FBR1 remains a constant size - when it's size doubles * the number of entries halves. FBR0 increases in size, however. */ if (adapter->registry_jumbo_packet < 2048) { #ifdef USE_FBR0 rx_ring->fbr[1]->buffsize = 256; rx_ring->fbr[1]->num_entries = 512; #endif rx_ring->fbr[0]->buffsize = 2048; rx_ring->fbr[0]->num_entries = 512; } else if (adapter->registry_jumbo_packet < 4096) { #ifdef USE_FBR0 rx_ring->fbr[1]->buffsize = 512; rx_ring->fbr[1]->num_entries = 1024; #endif rx_ring->fbr[0]->buffsize = 4096; rx_ring->fbr[0]->num_entries = 512; } else { #ifdef USE_FBR0 rx_ring->fbr[1]->buffsize = 1024; rx_ring->fbr[1]->num_entries = 768; #endif rx_ring->fbr[0]->buffsize = 16384; rx_ring->fbr[0]->num_entries = 128; } #ifdef USE_FBR0 adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr[1]->num_entries + adapter->rx_ring.fbr[0]->num_entries; #else adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr[0]->num_entries; #endif /* Allocate an area of memory for Free Buffer Ring 1 */ bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) + 0xfff; rx_ring->fbr[0]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, bufsize, &rx_ring->fbr[0]->ring_physaddr, GFP_KERNEL); if (!rx_ring->fbr[0]->ring_virtaddr) { dev_err(&adapter->pdev->dev, "Cannot alloc memory for Free Buffer Ring 1\n"); return -ENOMEM; } /* Save physical address * * NOTE: dma_alloc_coherent(), used above to alloc DMA regions, * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses * are ever returned, make sure the high part is retrieved here * before storing the adjusted address. */ rx_ring->fbr[0]->real_physaddr = rx_ring->fbr[0]->ring_physaddr; /* Align Free Buffer Ring 1 on a 4K boundary */ et131x_align_allocated_memory(adapter, &rx_ring->fbr[0]->real_physaddr, &rx_ring->fbr[0]->offset, 0x0FFF); rx_ring->fbr[0]->ring_virtaddr = (void *)((u8 *) rx_ring->fbr[0]->ring_virtaddr + rx_ring->fbr[0]->offset); #ifdef USE_FBR0 /* Allocate an area of memory for Free Buffer Ring 0 */ bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) + 0xfff; rx_ring->fbr[1]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, bufsize, &rx_ring->fbr[1]->ring_physaddr, GFP_KERNEL); if (!rx_ring->fbr[1]->ring_virtaddr) { dev_err(&adapter->pdev->dev, "Cannot alloc memory for Free Buffer Ring 0\n"); return -ENOMEM; } /* Save physical address * * NOTE: dma_alloc_coherent(), used above to alloc DMA regions, * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses * are ever returned, make sure the high part is retrieved here before * storing the adjusted address. */ rx_ring->fbr[1]->real_physaddr = rx_ring->fbr[1]->ring_physaddr; /* Align Free Buffer Ring 0 on a 4K boundary */ et131x_align_allocated_memory(adapter, &rx_ring->fbr[1]->real_physaddr, &rx_ring->fbr[1]->offset, 0x0FFF); rx_ring->fbr[1]->ring_virtaddr = (void *)((u8 *) rx_ring->fbr[1]->ring_virtaddr + rx_ring->fbr[1]->offset); #endif for (i = 0; i < (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); i++) { u64 fbr1_tmp_physaddr; u64 fbr1_offset; u32 fbr1_align; /* This code allocates an area of memory big enough for N * free buffers + (buffer_size - 1) so that the buffers can * be aligned on 4k boundaries. If each buffer were aligned * to a buffer_size boundary, the effect would be to double * the size of FBR0. By allocating N buffers at once, we * reduce this overhead. */ if (rx_ring->fbr[0]->buffsize > 4096) fbr1_align = 4096; else fbr1_align = rx_ring->fbr[0]->buffsize; fbr_chunksize = (FBR_CHUNKS * rx_ring->fbr[0]->buffsize) + fbr1_align - 1; rx_ring->fbr[0]->mem_virtaddrs[i] = dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize, &rx_ring->fbr[0]->mem_physaddrs[i], GFP_KERNEL); if (!rx_ring->fbr[0]->mem_virtaddrs[i]) { dev_err(&adapter->pdev->dev, "Could not alloc memory\n"); return -ENOMEM; } /* See NOTE in "Save Physical Address" comment above */ fbr1_tmp_physaddr = rx_ring->fbr[0]->mem_physaddrs[i]; et131x_align_allocated_memory(adapter, &fbr1_tmp_physaddr, &fbr1_offset, (fbr1_align - 1)); for (j = 0; j < FBR_CHUNKS; j++) { u32 index = (i * FBR_CHUNKS) + j; /* Save the Virtual address of this index for quick * access later */ rx_ring->fbr[0]->virt[index] = (u8 *) rx_ring->fbr[0]->mem_virtaddrs[i] + (j * rx_ring->fbr[0]->buffsize) + fbr1_offset; /* now store the physical address in the descriptor * so the device can access it */ rx_ring->fbr[0]->bus_high[index] = (u32) (fbr1_tmp_physaddr >> 32); rx_ring->fbr[0]->bus_low[index] = (u32) fbr1_tmp_physaddr; fbr1_tmp_physaddr += rx_ring->fbr[0]->buffsize; rx_ring->fbr[0]->buffer1[index] = rx_ring->fbr[0]->virt[index]; rx_ring->fbr[0]->buffer2[index] = rx_ring->fbr[0]->virt[index] - 4; } } #ifdef USE_FBR0 /* Same for FBR0 (if in use) */ for (i = 0; i < (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); i++) { u64 fbr0_tmp_physaddr; u64 fbr0_offset; fbr_chunksize = ((FBR_CHUNKS + 1) * rx_ring->fbr[1]->buffsize) - 1; rx_ring->fbr[1]->mem_virtaddrs[i] = dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize, &rx_ring->fbr[1]->mem_physaddrs[i], GFP_KERNEL); if (!rx_ring->fbr[1]->mem_virtaddrs[i]) { dev_err(&adapter->pdev->dev, "Could not alloc memory\n"); return -ENOMEM; } /* See NOTE in "Save Physical Address" comment above */ fbr0_tmp_physaddr = rx_ring->fbr[1]->mem_physaddrs[i]; et131x_align_allocated_memory(adapter, &fbr0_tmp_physaddr, &fbr0_offset, rx_ring->fbr[1]->buffsize - 1); for (j = 0; j < FBR_CHUNKS; j++) { u32 index = (i * FBR_CHUNKS) + j; rx_ring->fbr[1]->virt[index] = (u8 *) rx_ring->fbr[1]->mem_virtaddrs[i] + (j * rx_ring->fbr[1]->buffsize) + fbr0_offset; rx_ring->fbr[1]->bus_high[index] = (u32) (fbr0_tmp_physaddr >> 32); rx_ring->fbr[1]->bus_low[index] = (u32) fbr0_tmp_physaddr; fbr0_tmp_physaddr += rx_ring->fbr[1]->buffsize; rx_ring->fbr[1]->buffer1[index] = rx_ring->fbr[1]->virt[index]; rx_ring->fbr[1]->buffer2[index] = rx_ring->fbr[1]->virt[index] - 4; } } #endif /* Allocate an area of memory for FIFO of Packet Status ring entries */ pktstat_ringsize = sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries; rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, pktstat_ringsize, &rx_ring->ps_ring_physaddr, GFP_KERNEL); if (!rx_ring->ps_ring_virtaddr) { dev_err(&adapter->pdev->dev, "Cannot alloc memory for Packet Status Ring\n"); return -ENOMEM; } printk(KERN_INFO "Packet Status Ring %lx\n", (unsigned long) rx_ring->ps_ring_physaddr); /* * NOTE : dma_alloc_coherent(), used above to alloc DMA regions, * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses * are ever returned, make sure the high part is retrieved here before * storing the adjusted address. */ /* Allocate an area of memory for writeback of status information */ rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev, sizeof(struct rx_status_block), &rx_ring->rx_status_bus, GFP_KERNEL); if (!rx_ring->rx_status_block) { dev_err(&adapter->pdev->dev, "Cannot alloc memory for Status Block\n"); return -ENOMEM; } rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD; printk(KERN_INFO "PRS %lx\n", (unsigned long)rx_ring->rx_status_bus); /* Recv * kmem_cache_create initializes a lookaside list. After successful * creation, nonpaged fixed-size blocks can be allocated from and * freed to the lookaside list. * RFDs will be allocated from this pool. */ rx_ring->recv_lookaside = kmem_cache_create(adapter->netdev->name, sizeof(struct rfd), 0, SLAB_CACHE_DMA | SLAB_HWCACHE_ALIGN, NULL); adapter->flags |= fMP_ADAPTER_RECV_LOOKASIDE; /* The RFDs are going to be put on lists later on, so initialize the * lists now. */ INIT_LIST_HEAD(&rx_ring->recv_list); return 0; } /** * et131x_rx_dma_memory_free - Free all memory allocated within this module. * @adapter: pointer to our private adapter structure */ static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter) { u32 index; u32 bufsize; u32 pktstat_ringsize; struct rfd *rfd; struct rx_ring *rx_ring; /* Setup some convenience pointers */ rx_ring = &adapter->rx_ring; /* Free RFDs and associated packet descriptors */ WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd); while (!list_empty(&rx_ring->recv_list)) { rfd = (struct rfd *) list_entry(rx_ring->recv_list.next, struct rfd, list_node); list_del(&rfd->list_node); rfd->skb = NULL; kmem_cache_free(adapter->rx_ring.recv_lookaside, rfd); } /* Free Free Buffer Ring 1 */ if (rx_ring->fbr[0]->ring_virtaddr) { /* First the packet memory */ for (index = 0; index < (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); index++) { if (rx_ring->fbr[0]->mem_virtaddrs[index]) { u32 fbr1_align; if (rx_ring->fbr[0]->buffsize > 4096) fbr1_align = 4096; else fbr1_align = rx_ring->fbr[0]->buffsize; bufsize = (rx_ring->fbr[0]->buffsize * FBR_CHUNKS) + fbr1_align - 1; dma_free_coherent(&adapter->pdev->dev, bufsize, rx_ring->fbr[0]->mem_virtaddrs[index], rx_ring->fbr[0]->mem_physaddrs[index]); rx_ring->fbr[0]->mem_virtaddrs[index] = NULL; } } /* Now the FIFO itself */ rx_ring->fbr[0]->ring_virtaddr = (void *)((u8 *) rx_ring->fbr[0]->ring_virtaddr - rx_ring->fbr[0]->offset); bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) + 0xfff; dma_free_coherent(&adapter->pdev->dev, bufsize, rx_ring->fbr[0]->ring_virtaddr, rx_ring->fbr[0]->ring_physaddr); rx_ring->fbr[0]->ring_virtaddr = NULL; } #ifdef USE_FBR0 /* Now the same for Free Buffer Ring 0 */ if (rx_ring->fbr[1]->ring_virtaddr) { /* First the packet memory */ for (index = 0; index < (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); index++) { if (rx_ring->fbr[1]->mem_virtaddrs[index]) { bufsize = (rx_ring->fbr[1]->buffsize * (FBR_CHUNKS + 1)) - 1; dma_free_coherent(&adapter->pdev->dev, bufsize, rx_ring->fbr[1]->mem_virtaddrs[index], rx_ring->fbr[1]->mem_physaddrs[index]); rx_ring->fbr[1]->mem_virtaddrs[index] = NULL; } } /* Now the FIFO itself */ rx_ring->fbr[1]->ring_virtaddr = (void *)((u8 *) rx_ring->fbr[1]->ring_virtaddr - rx_ring->fbr[1]->offset); bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) + 0xfff; dma_free_coherent(&adapter->pdev->dev, bufsize, rx_ring->fbr[1]->ring_virtaddr, rx_ring->fbr[1]->ring_physaddr); rx_ring->fbr[1]->ring_virtaddr = NULL; } #endif /* Free Packet Status Ring */ if (rx_ring->ps_ring_virtaddr) { pktstat_ringsize = sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries; dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize, rx_ring->ps_ring_virtaddr, rx_ring->ps_ring_physaddr); rx_ring->ps_ring_virtaddr = NULL; } /* Free area of memory for the writeback of status information */ if (rx_ring->rx_status_block) { dma_free_coherent(&adapter->pdev->dev, sizeof(struct rx_status_block), rx_ring->rx_status_block, rx_ring->rx_status_bus); rx_ring->rx_status_block = NULL; } /* Destroy the lookaside (RFD) pool */ if (adapter->flags & fMP_ADAPTER_RECV_LOOKASIDE) { kmem_cache_destroy(rx_ring->recv_lookaside); adapter->flags &= ~fMP_ADAPTER_RECV_LOOKASIDE; } /* Free the FBR Lookup Table */ #ifdef USE_FBR0 kfree(rx_ring->fbr[1]); #endif kfree(rx_ring->fbr[0]); /* Reset Counters */ rx_ring->num_ready_recv = 0; } /** * et131x_init_recv - Initialize receive data structures. * @adapter: pointer to our private adapter structure * * Returns 0 on success and errno on failure (as defined in errno.h) */ static int et131x_init_recv(struct et131x_adapter *adapter) { int status = -ENOMEM; struct rfd *rfd = NULL; u32 rfdct; u32 numrfd = 0; struct rx_ring *rx_ring; /* Setup some convenience pointers */ rx_ring = &adapter->rx_ring; /* Setup each RFD */ for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) { rfd = kmem_cache_alloc(rx_ring->recv_lookaside, GFP_ATOMIC | GFP_DMA); if (!rfd) { dev_err(&adapter->pdev->dev, "Couldn't alloc RFD out of kmem_cache\n"); status = -ENOMEM; continue; } rfd->skb = NULL; /* Add this RFD to the recv_list */ list_add_tail(&rfd->list_node, &rx_ring->recv_list); /* Increment both the available RFD's, and the total RFD's. */ rx_ring->num_ready_recv++; numrfd++; } if (numrfd > NIC_MIN_NUM_RFD) status = 0; rx_ring->num_rfd = numrfd; if (status != 0) { kmem_cache_free(rx_ring->recv_lookaside, rfd); dev_err(&adapter->pdev->dev, "Allocation problems in et131x_init_recv\n"); } return status; } /** * et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate. * @adapter: pointer to our adapter structure */ static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter) { struct phy_device *phydev = adapter->phydev; if (!phydev) return; /* For version B silicon, we do not use the RxDMA timer for 10 and 100 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing. */ if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) { writel(0, &adapter->regs->rxdma.max_pkt_time); writel(1, &adapter->regs->rxdma.num_pkt_done); } } /** * NICReturnRFD - Recycle a RFD and put it back onto the receive list * @adapter: pointer to our adapter * @rfd: pointer to the RFD */ static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd) { struct rx_ring *rx_local = &adapter->rx_ring; struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; u16 buff_index = rfd->bufferindex; u8 ring_index = rfd->ringindex; unsigned long flags; /* We don't use any of the OOB data besides status. Otherwise, we * need to clean up OOB data */ if ( #ifdef USE_FBR0 (ring_index == 0 && buff_index < rx_local->fbr[1]->num_entries) || #endif (ring_index == 1 && buff_index < rx_local->fbr[0]->num_entries)) { spin_lock_irqsave(&adapter->fbr_lock, flags); if (ring_index == 1) { struct fbr_desc *next = (struct fbr_desc *) (rx_local->fbr[0]->ring_virtaddr) + INDEX10(rx_local->fbr[0]->local_full); /* Handle the Free Buffer Ring advancement here. Write * the PA / Buffer Index for the returned buffer into * the oldest (next to be freed)FBR entry */ next->addr_hi = rx_local->fbr[0]->bus_high[buff_index]; next->addr_lo = rx_local->fbr[0]->bus_low[buff_index]; next->word2 = buff_index; writel(bump_free_buff_ring( &rx_local->fbr[0]->local_full, rx_local->fbr[0]->num_entries - 1), &rx_dma->fbr1_full_offset); } #ifdef USE_FBR0 else { struct fbr_desc *next = (struct fbr_desc *) rx_local->fbr[1]->ring_virtaddr + INDEX10(rx_local->fbr[1]->local_full); /* Handle the Free Buffer Ring advancement here. Write * the PA / Buffer Index for the returned buffer into * the oldest (next to be freed) FBR entry */ next->addr_hi = rx_local->fbr[1]->bus_high[buff_index]; next->addr_lo = rx_local->fbr[1]->bus_low[buff_index]; next->word2 = buff_index; writel(bump_free_buff_ring( &rx_local->fbr[1]->local_full, rx_local->fbr[1]->num_entries - 1), &rx_dma->fbr0_full_offset); } #endif spin_unlock_irqrestore(&adapter->fbr_lock, flags); } else { dev_err(&adapter->pdev->dev, "%s illegal Buffer Index returned\n", __func__); } /* The processing on this RFD is done, so put it back on the tail of * our list */ spin_lock_irqsave(&adapter->rcv_lock, flags); list_add_tail(&rfd->list_node, &rx_local->recv_list); rx_local->num_ready_recv++; spin_unlock_irqrestore(&adapter->rcv_lock, flags); WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd); } /** * nic_rx_pkts - Checks the hardware for available packets * @adapter: pointer to our adapter * * Returns rfd, a pointer to our MPRFD. * * Checks the hardware for available packets, using completion ring * If packets are available, it gets an RFD from the recv_list, attaches * the packet to it, puts the RFD in the RecvPendList, and also returns * the pointer to the RFD. */ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter) { struct rx_ring *rx_local = &adapter->rx_ring; struct rx_status_block *status; struct pkt_stat_desc *psr; struct rfd *rfd; u32 i; u8 *buf; unsigned long flags; struct list_head *element; u8 ring_index; u16 buff_index; u32 len; u32 word0; u32 word1; /* RX Status block is written by the DMA engine prior to every * interrupt. It contains the next to be used entry in the Packet * Status Ring, and also the two Free Buffer rings. */ status = rx_local->rx_status_block; word1 = status->word1 >> 16; /* Get the useful bits */ /* Check the PSR and wrap bits do not match */ if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF)) /* Looks like this ring is not updated yet */ return NULL; /* The packet status ring indicates that data is available. */ psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) + (rx_local->local_psr_full & 0xFFF); /* Grab any information that is required once the PSR is * advanced, since we can no longer rely on the memory being * accurate */ len = psr->word1 & 0xFFFF; ring_index = (psr->word1 >> 26) & 0x03; buff_index = (psr->word1 >> 16) & 0x3FF; word0 = psr->word0; /* Indicate that we have used this PSR entry. */ /* FIXME wrap 12 */ add_12bit(&rx_local->local_psr_full, 1); if ( (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) { /* Clear psr full and toggle the wrap bit */ rx_local->local_psr_full &= ~0xFFF; rx_local->local_psr_full ^= 0x1000; } writel(rx_local->local_psr_full, &adapter->regs->rxdma.psr_full_offset); #ifndef USE_FBR0 if (ring_index != 1) return NULL; #endif #ifdef USE_FBR0 if (ring_index > 1 || (ring_index == 0 && buff_index > rx_local->fbr[1]->num_entries - 1) || (ring_index == 1 && buff_index > rx_local->fbr[0]->num_entries - 1)) #else if (ring_index != 1 || buff_index > rx_local->fbr[0]->num_entries - 1) #endif { /* Illegal buffer or ring index cannot be used by S/W*/ dev_err(&adapter->pdev->dev, "NICRxPkts PSR Entry %d indicates " "length of %d and/or bad bi(%d)\n", rx_local->local_psr_full & 0xFFF, len, buff_index); return NULL; } /* Get and fill the RFD. */ spin_lock_irqsave(&adapter->rcv_lock, flags); rfd = NULL; element = rx_local->recv_list.next; rfd = (struct rfd *) list_entry(element, struct rfd, list_node); if (rfd == NULL) { spin_unlock_irqrestore(&adapter->rcv_lock, flags); return NULL; } list_del(&rfd->list_node); rx_local->num_ready_recv--; spin_unlock_irqrestore(&adapter->rcv_lock, flags); rfd->bufferindex = buff_index; rfd->ringindex = ring_index; /* In V1 silicon, there is a bug which screws up filtering of * runt packets. Therefore runt packet filtering is disabled * in the MAC and the packets are dropped here. They are * also counted here. */ if (len < (NIC_MIN_PACKET_SIZE + 4)) { adapter->stats.rx_other_errs++; len = 0; } if (len) { /* Determine if this is a multicast packet coming in */ if ((word0 & ALCATEL_MULTICAST_PKT) && !(word0 & ALCATEL_BROADCAST_PKT)) { /* Promiscuous mode and Multicast mode are * not mutually exclusive as was first * thought. I guess Promiscuous is just * considered a super-set of the other * filters. Generally filter is 0x2b when in * promiscuous mode. */ if ((adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) && !(adapter->packet_filter & ET131X_PACKET_TYPE_PROMISCUOUS) && !(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) { /* * Note - ring_index for fbr[] array is reversed * 1 for FBR0 etc */ buf = rx_local->fbr[(ring_index == 0 ? 1 : 0)]-> virt[buff_index]; /* Loop through our list to see if the * destination address of this packet * matches one in our list. */ for (i = 0; i < adapter->multicast_addr_count; i++) { if (buf[0] == adapter->multicast_list[i][0] && buf[1] == adapter->multicast_list[i][1] && buf[2] == adapter->multicast_list[i][2] && buf[3] == adapter->multicast_list[i][3] && buf[4] == adapter->multicast_list[i][4] && buf[5] == adapter->multicast_list[i][5]) { break; } } /* If our index is equal to the number * of Multicast address we have, then * this means we did not find this * packet's matching address in our * list. Set the len to zero, * so we free our RFD when we return * from this function. */ if (i == adapter->multicast_addr_count) len = 0; } if (len > 0) adapter->stats.multicast_pkts_rcvd++; } else if (word0 & ALCATEL_BROADCAST_PKT) adapter->stats.broadcast_pkts_rcvd++; else /* Not sure what this counter measures in * promiscuous mode. Perhaps we should check * the MAC address to see if it is directed * to us in promiscuous mode. */ adapter->stats.unicast_pkts_rcvd++; } if (len > 0) { struct sk_buff *skb = NULL; /*rfd->len = len - 4; */ rfd->len = len; skb = dev_alloc_skb(rfd->len + 2); if (!skb) { dev_err(&adapter->pdev->dev, "Couldn't alloc an SKB for Rx\n"); return NULL; } adapter->net_stats.rx_bytes += rfd->len; /* * Note - ring_index for fbr[] array is reversed, * 1 for FBR0 etc */ memcpy(skb_put(skb, rfd->len), rx_local->fbr[(ring_index == 0 ? 1 : 0)]->virt[buff_index], rfd->len); skb->dev = adapter->netdev; skb->protocol = eth_type_trans(skb, adapter->netdev); skb->ip_summed = CHECKSUM_NONE; netif_rx_ni(skb); } else { rfd->len = 0; } nic_return_rfd(adapter, rfd); return rfd; } /** * et131x_handle_recv_interrupt - Interrupt handler for receive processing * @adapter: pointer to our adapter * * Assumption, Rcv spinlock has been acquired. */ static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter) { struct rfd *rfd = NULL; u32 count = 0; bool done = true; /* Process up to available RFD's */ while (count < NUM_PACKETS_HANDLED) { if (list_empty(&adapter->rx_ring.recv_list)) { WARN_ON(adapter->rx_ring.num_ready_recv != 0); done = false; break; } rfd = nic_rx_pkts(adapter); if (rfd == NULL) break; /* Do not receive any packets until a filter has been set. * Do not receive any packets until we have link. * If length is zero, return the RFD in order to advance the * Free buffer ring. */ if (!adapter->packet_filter || !netif_carrier_ok(adapter->netdev) || rfd->len == 0) continue; /* Increment the number of packets we received */ adapter->net_stats.rx_packets++; /* Set the status on the packet, either resources or success */ if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) { dev_warn(&adapter->pdev->dev, "RFD's are running out\n"); } count++; } if (count == NUM_PACKETS_HANDLED || !done) { adapter->rx_ring.unfinished_receives = true; writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, &adapter->regs->global.watchdog_timer); } else /* Watchdog timer will disable itself if appropriate. */ adapter->rx_ring.unfinished_receives = false; } /** * et131x_tx_dma_memory_alloc * @adapter: pointer to our private adapter structure * * Returns 0 on success and errno on failure (as defined in errno.h). * * Allocates memory that will be visible both to the device and to the CPU. * The OS will pass us packets, pointers to which we will insert in the Tx * Descriptor queue. The device will read this queue to find the packets in * memory. The device will update the "status" in memory each time it xmits a * packet. */ static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) { int desc_size = 0; struct tx_ring *tx_ring = &adapter->tx_ring; /* Allocate memory for the TCB's (Transmit Control Block) */ adapter->tx_ring.tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA); if (!adapter->tx_ring.tcb_ring) { dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n"); return -ENOMEM; } /* Allocate enough memory for the Tx descriptor ring, and allocate * some extra so that the ring can be aligned on a 4k boundary. */ desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1; tx_ring->tx_desc_ring = (struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev, desc_size, &tx_ring->tx_desc_ring_pa, GFP_KERNEL); if (!adapter->tx_ring.tx_desc_ring) { dev_err(&adapter->pdev->dev, "Cannot alloc memory for Tx Ring\n"); return -ENOMEM; } /* Save physical address * * NOTE: dma_alloc_coherent(), used above to alloc DMA regions, * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses * are ever returned, make sure the high part is retrieved here before * storing the adjusted address. */ /* Allocate memory for the Tx status block */ tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev, sizeof(u32), &tx_ring->tx_status_pa, GFP_KERNEL); if (!adapter->tx_ring.tx_status_pa) { dev_err(&adapter->pdev->dev, "Cannot alloc memory for Tx status block\n"); return -ENOMEM; } return 0; } /** * et131x_tx_dma_memory_free - Free all memory allocated within this module * @adapter: pointer to our private adapter structure * * Returns 0 on success and errno on failure (as defined in errno.h). */ static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter) { int desc_size = 0; if (adapter->tx_ring.tx_desc_ring) { /* Free memory relating to Tx rings here */ desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1; dma_free_coherent(&adapter->pdev->dev, desc_size, adapter->tx_ring.tx_desc_ring, adapter->tx_ring.tx_desc_ring_pa); adapter->tx_ring.tx_desc_ring = NULL; } /* Free memory for the Tx status block */ if (adapter->tx_ring.tx_status) { dma_free_coherent(&adapter->pdev->dev, sizeof(u32), adapter->tx_ring.tx_status, adapter->tx_ring.tx_status_pa); adapter->tx_ring.tx_status = NULL; } /* Free the memory for the tcb structures */ kfree(adapter->tx_ring.tcb_ring); } /** * nic_send_packet - NIC specific send handler for version B silicon. * @adapter: pointer to our adapter * @tcb: pointer to struct tcb * * Returns 0 or errno. */ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) { u32 i; struct tx_desc desc[24]; /* 24 x 16 byte */ u32 frag = 0; u32 thiscopy, remainder; struct sk_buff *skb = tcb->skb; u32 nr_frags = skb_shinfo(skb)->nr_frags + 1; struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0]; unsigned long flags; struct phy_device *phydev = adapter->phydev; /* Part of the optimizations of this send routine restrict us to * sending 24 fragments at a pass. In practice we should never see * more than 5 fragments. * * NOTE: The older version of this function (below) can handle any * number of fragments. If needed, we can call this function, * although it is less efficient. */ if (nr_frags > 23) return -EIO; memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1)); for (i = 0; i < nr_frags; i++) { /* If there is something in this element, lets get a * descriptor from the ring and get the necessary data */ if (i == 0) { /* If the fragments are smaller than a standard MTU, * then map them to a single descriptor in the Tx * Desc ring. However, if they're larger, as is * possible with support for jumbo packets, then * split them each across 2 descriptors. * * This will work until we determine why the hardware * doesn't seem to like large fragments. */ if ((skb->len - skb->data_len) <= 1514) { desc[frag].addr_hi = 0; /* Low 16bits are length, high is vlan and unused currently so zero */ desc[frag].len_vlan = skb->len - skb->data_len; /* NOTE: Here, the dma_addr_t returned from * dma_map_single() is implicitly cast as a * u32. Although dma_addr_t can be * 64-bit, the address returned by * dma_map_single() is always 32-bit * addressable (as defined by the pci/dma * subsystem) */ desc[frag++].addr_lo = dma_map_single(&adapter->pdev->dev, skb->data, skb->len - skb->data_len, DMA_TO_DEVICE); } else { desc[frag].addr_hi = 0; desc[frag].len_vlan = (skb->len - skb->data_len) / 2; /* NOTE: Here, the dma_addr_t returned from * dma_map_single() is implicitly cast as a * u32. Although dma_addr_t can be * 64-bit, the address returned by * dma_map_single() is always 32-bit * addressable (as defined by the pci/dma * subsystem) */ desc[frag++].addr_lo = dma_map_single(&adapter->pdev->dev, skb->data, ((skb->len - skb->data_len) / 2), DMA_TO_DEVICE); desc[frag].addr_hi = 0; desc[frag].len_vlan = (skb->len - skb->data_len) / 2; /* NOTE: Here, the dma_addr_t returned from * dma_map_single() is implicitly cast as a * u32. Although dma_addr_t can be * 64-bit, the address returned by * dma_map_single() is always 32-bit * addressable (as defined by the pci/dma * subsystem) */ desc[frag++].addr_lo = dma_map_single(&adapter->pdev->dev, skb->data + ((skb->len - skb->data_len) / 2), ((skb->len - skb->data_len) / 2), DMA_TO_DEVICE); } } else { desc[frag].addr_hi = 0; desc[frag].len_vlan = frags[i - 1].size; /* NOTE: Here, the dma_addr_t returned from * dma_map_page() is implicitly cast as a u32. * Although dma_addr_t can be 64-bit, the address * returned by dma_map_page() is always 32-bit * addressable (as defined by the pci/dma subsystem) */ desc[frag++].addr_lo = skb_frag_dma_map( &adapter->pdev->dev, &frags[i - 1], 0, frags[i - 1].size, DMA_TO_DEVICE); } } if (phydev && phydev->speed == SPEED_1000) { if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) { /* Last element & Interrupt flag */ desc[frag - 1].flags = 0x5; adapter->tx_ring.since_irq = 0; } else { /* Last element */ desc[frag - 1].flags = 0x1; } } else desc[frag - 1].flags = 0x5; desc[0].flags |= 2; /* First element flag */ tcb->index_start = adapter->tx_ring.send_idx; tcb->stale = 0; spin_lock_irqsave(&adapter->send_hw_lock, flags); thiscopy = NUM_DESC_PER_RING_TX - INDEX10(adapter->tx_ring.send_idx); if (thiscopy >= frag) { remainder = 0; thiscopy = frag; } else { remainder = frag - thiscopy; } memcpy(adapter->tx_ring.tx_desc_ring + INDEX10(adapter->tx_ring.send_idx), desc, sizeof(struct tx_desc) * thiscopy); add_10bit(&adapter->tx_ring.send_idx, thiscopy); if (INDEX10(adapter->tx_ring.send_idx) == 0 || INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) { adapter->tx_ring.send_idx &= ~ET_DMA10_MASK; adapter->tx_ring.send_idx ^= ET_DMA10_WRAP; } if (remainder) { memcpy(adapter->tx_ring.tx_desc_ring, desc + thiscopy, sizeof(struct tx_desc) * remainder); add_10bit(&adapter->tx_ring.send_idx, remainder); } if (INDEX10(adapter->tx_ring.send_idx) == 0) { if (adapter->tx_ring.send_idx) tcb->index = NUM_DESC_PER_RING_TX - 1; else tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1); } else tcb->index = adapter->tx_ring.send_idx - 1; spin_lock(&adapter->tcb_send_qlock); if (adapter->tx_ring.send_tail) adapter->tx_ring.send_tail->next = tcb; else adapter->tx_ring.send_head = tcb; adapter->tx_ring.send_tail = tcb; WARN_ON(tcb->next != NULL); adapter->tx_ring.used++; spin_unlock(&adapter->tcb_send_qlock); /* Write the new write pointer back to the device. */ writel(adapter->tx_ring.send_idx, &adapter->regs->txdma.service_request); /* For Gig only, we use Tx Interrupt coalescing. Enable the software * timer to wake us up if this packet isn't followed by N more. */ if (phydev && phydev->speed == SPEED_1000) { writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, &adapter->regs->global.watchdog_timer); } spin_unlock_irqrestore(&adapter->send_hw_lock, flags); return 0; } /** * send_packet - Do the work to send a packet * @skb: the packet(s) to send * @adapter: a pointer to the device's private adapter structure * * Return 0 in almost all cases; non-zero value in extreme hard failure only. * * Assumption: Send spinlock has been acquired */ static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter) { int status; struct tcb *tcb = NULL; u16 *shbufva; unsigned long flags; /* All packets must have at least a MAC address and a protocol type */ if (skb->len < ETH_HLEN) return -EIO; /* Get a TCB for this packet */ spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); tcb = adapter->tx_ring.tcb_qhead; if (tcb == NULL) { spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); return -ENOMEM; } adapter->tx_ring.tcb_qhead = tcb->next; if (adapter->tx_ring.tcb_qhead == NULL) adapter->tx_ring.tcb_qtail = NULL; spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); tcb->skb = skb; if (skb->data != NULL && skb->len - skb->data_len >= 6) { shbufva = (u16 *) skb->data; if ((shbufva[0] == 0xffff) && (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) { tcb->flags |= fMP_DEST_BROAD; } else if ((shbufva[0] & 0x3) == 0x0001) { tcb->flags |= fMP_DEST_MULTI; } } tcb->next = NULL; /* Call the NIC specific send handler. */ status = nic_send_packet(adapter, tcb); if (status != 0) { spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); if (adapter->tx_ring.tcb_qtail) adapter->tx_ring.tcb_qtail->next = tcb; else /* Apparently ready Q is empty. */ adapter->tx_ring.tcb_qhead = tcb; adapter->tx_ring.tcb_qtail = tcb; spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); return status; } WARN_ON(adapter->tx_ring.used > NUM_TCB); return 0; } /** * et131x_send_packets - This function is called by the OS to send packets * @skb: the packet(s) to send * @netdev:device on which to TX the above packet(s) * * Return 0 in almost all cases; non-zero value in extreme hard failure only */ static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev) { int status = 0; struct et131x_adapter *adapter = netdev_priv(netdev); /* Send these packets * * NOTE: The Linux Tx entry point is only given one packet at a time * to Tx, so the PacketCount and it's array used makes no sense here */ /* TCB is not available */ if (adapter->tx_ring.used >= NUM_TCB) { /* NOTE: If there's an error on send, no need to queue the * packet under Linux; if we just send an error up to the * netif layer, it will resend the skb to us. */ status = -ENOMEM; } else { /* We need to see if the link is up; if it's not, make the * netif layer think we're good and drop the packet */ if ((adapter->flags & fMP_ADAPTER_FAIL_SEND_MASK) || !netif_carrier_ok(netdev)) { dev_kfree_skb_any(skb); skb = NULL; adapter->net_stats.tx_dropped++; } else { status = send_packet(skb, adapter); if (status != 0 && status != -ENOMEM) { /* On any other error, make netif think we're * OK and drop the packet */ dev_kfree_skb_any(skb); skb = NULL; adapter->net_stats.tx_dropped++; } } } return status; } /** * free_send_packet - Recycle a struct tcb * @adapter: pointer to our adapter * @tcb: pointer to struct tcb * * Complete the packet if necessary * Assumption - Send spinlock has been acquired */ static inline void free_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) { unsigned long flags; struct tx_desc *desc = NULL; struct net_device_stats *stats = &adapter->net_stats; if (tcb->flags & fMP_DEST_BROAD) atomic_inc(&adapter->stats.broadcast_pkts_xmtd); else if (tcb->flags & fMP_DEST_MULTI) atomic_inc(&adapter->stats.multicast_pkts_xmtd); else atomic_inc(&adapter->stats.unicast_pkts_xmtd); if (tcb->skb) { stats->tx_bytes += tcb->skb->len; /* Iterate through the TX descriptors on the ring * corresponding to this packet and umap the fragments * they point to */ do { desc = (struct tx_desc *) (adapter->tx_ring.tx_desc_ring + INDEX10(tcb->index_start)); dma_unmap_single(&adapter->pdev->dev, desc->addr_lo, desc->len_vlan, DMA_TO_DEVICE); add_10bit(&tcb->index_start, 1); if (INDEX10(tcb->index_start) >= NUM_DESC_PER_RING_TX) { tcb->index_start &= ~ET_DMA10_MASK; tcb->index_start ^= ET_DMA10_WRAP; } } while (desc != (adapter->tx_ring.tx_desc_ring + INDEX10(tcb->index))); dev_kfree_skb_any(tcb->skb); } memset(tcb, 0, sizeof(struct tcb)); /* Add the TCB to the Ready Q */ spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); adapter->net_stats.tx_packets++; if (adapter->tx_ring.tcb_qtail) adapter->tx_ring.tcb_qtail->next = tcb; else /* Apparently ready Q is empty. */ adapter->tx_ring.tcb_qhead = tcb; adapter->tx_ring.tcb_qtail = tcb; spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); WARN_ON(adapter->tx_ring.used < 0); } /** * et131x_free_busy_send_packets - Free and complete the stopped active sends * @adapter: pointer to our adapter * * Assumption - Send spinlock has been acquired */ static void et131x_free_busy_send_packets(struct et131x_adapter *adapter) { struct tcb *tcb; unsigned long flags; u32 freed = 0; /* Any packets being sent? Check the first TCB on the send list */ spin_lock_irqsave(&adapter->tcb_send_qlock, flags); tcb = adapter->tx_ring.send_head; while (tcb != NULL && freed < NUM_TCB) { struct tcb *next = tcb->next; adapter->tx_ring.send_head = next; if (next == NULL) adapter->tx_ring.send_tail = NULL; adapter->tx_ring.used--; spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); freed++; free_send_packet(adapter, tcb); spin_lock_irqsave(&adapter->tcb_send_qlock, flags); tcb = adapter->tx_ring.send_head; } WARN_ON(freed == NUM_TCB); spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); adapter->tx_ring.used = 0; } /** * et131x_handle_send_interrupt - Interrupt handler for sending processing * @adapter: pointer to our adapter * * Re-claim the send resources, complete sends and get more to send from * the send wait queue. * * Assumption - Send spinlock has been acquired */ static void et131x_handle_send_interrupt(struct et131x_adapter *adapter) { unsigned long flags; u32 serviced; struct tcb *tcb; u32 index; serviced = readl(&adapter->regs->txdma.new_service_complete); index = INDEX10(serviced); /* Has the ring wrapped? Process any descriptors that do not have * the same "wrap" indicator as the current completion indicator */ spin_lock_irqsave(&adapter->tcb_send_qlock, flags); tcb = adapter->tx_ring.send_head; while (tcb && ((serviced ^ tcb->index) & ET_DMA10_WRAP) && index < INDEX10(tcb->index)) { adapter->tx_ring.used--; adapter->tx_ring.send_head = tcb->next; if (tcb->next == NULL) adapter->tx_ring.send_tail = NULL; spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); free_send_packet(adapter, tcb); spin_lock_irqsave(&adapter->tcb_send_qlock, flags); /* Goto the next packet */ tcb = adapter->tx_ring.send_head; } while (tcb && !((serviced ^ tcb->index) & ET_DMA10_WRAP) && index > (tcb->index & ET_DMA10_MASK)) { adapter->tx_ring.used--; adapter->tx_ring.send_head = tcb->next; if (tcb->next == NULL) adapter->tx_ring.send_tail = NULL; spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); free_send_packet(adapter, tcb); spin_lock_irqsave(&adapter->tcb_send_qlock, flags); /* Goto the next packet */ tcb = adapter->tx_ring.send_head; } /* Wake up the queue when we hit a low-water mark */ if (adapter->tx_ring.used <= NUM_TCB / 3) netif_wake_queue(adapter->netdev); spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); } static int et131x_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) { struct et131x_adapter *adapter = netdev_priv(netdev); return phy_ethtool_gset(adapter->phydev, cmd); } static int et131x_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) { struct et131x_adapter *adapter = netdev_priv(netdev); return phy_ethtool_sset(adapter->phydev, cmd); } static int et131x_get_regs_len(struct net_device *netdev) { #define ET131X_REGS_LEN 256 return ET131X_REGS_LEN * sizeof(u32); } static void et131x_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *regs_data) { struct et131x_adapter *adapter = netdev_priv(netdev); struct address_map __iomem *aregs = adapter->regs; u32 *regs_buff = regs_data; u32 num = 0; memset(regs_data, 0, et131x_get_regs_len(netdev)); regs->version = (1 << 24) | (adapter->pdev->revision << 16) | adapter->pdev->device; /* PHY regs */ et131x_mii_read(adapter, MII_BMCR, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, MII_BMSR, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, MII_PHYSID1, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, MII_PHYSID2, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, MII_ADVERTISE, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, MII_LPA, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, MII_EXPANSION, (u16 *)&regs_buff[num++]); /* Autoneg next page transmit reg */ et131x_mii_read(adapter, 0x07, (u16 *)&regs_buff[num++]); /* Link partner next page reg */ et131x_mii_read(adapter, 0x08, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, MII_CTRL1000, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, MII_STAT1000, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, MII_ESTATUS, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_INDEX_REG, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_DATA_REG, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL+1, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_CONFIG, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_PHY_CONTROL, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_INTERRUPT_MASK, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_PHY_STATUS, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_LED_1, (u16 *)&regs_buff[num++]); et131x_mii_read(adapter, PHY_LED_2, (u16 *)&regs_buff[num++]); /* Global regs */ regs_buff[num++] = readl(&aregs->global.txq_start_addr); regs_buff[num++] = readl(&aregs->global.txq_end_addr); regs_buff[num++] = readl(&aregs->global.rxq_start_addr); regs_buff[num++] = readl(&aregs->global.rxq_end_addr); regs_buff[num++] = readl(&aregs->global.pm_csr); regs_buff[num++] = adapter->stats.interrupt_status; regs_buff[num++] = readl(&aregs->global.int_mask); regs_buff[num++] = readl(&aregs->global.int_alias_clr_en); regs_buff[num++] = readl(&aregs->global.int_status_alias); regs_buff[num++] = readl(&aregs->global.sw_reset); regs_buff[num++] = readl(&aregs->global.slv_timer); regs_buff[num++] = readl(&aregs->global.msi_config); regs_buff[num++] = readl(&aregs->global.loopback); regs_buff[num++] = readl(&aregs->global.watchdog_timer); /* TXDMA regs */ regs_buff[num++] = readl(&aregs->txdma.csr); regs_buff[num++] = readl(&aregs->txdma.pr_base_hi); regs_buff[num++] = readl(&aregs->txdma.pr_base_lo); regs_buff[num++] = readl(&aregs->txdma.pr_num_des); regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr); regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext); regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr); regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi); regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo); regs_buff[num++] = readl(&aregs->txdma.service_request); regs_buff[num++] = readl(&aregs->txdma.service_complete); regs_buff[num++] = readl(&aregs->txdma.cache_rd_index); regs_buff[num++] = readl(&aregs->txdma.cache_wr_index); regs_buff[num++] = readl(&aregs->txdma.tx_dma_error); regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt); regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt); regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt); regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt); regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt); regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt); regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt); regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt); regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt); regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt); regs_buff[num++] = readl(&aregs->txdma.new_service_complete); regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt); /* RXDMA regs */ regs_buff[num++] = readl(&aregs->rxdma.csr); regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi); regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo); regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done); regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time); regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr); regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext); regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr); regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi); regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo); regs_buff[num++] = readl(&aregs->rxdma.psr_num_des); regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset); regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset); regs_buff[num++] = readl(&aregs->rxdma.psr_access_index); regs_buff[num++] = readl(&aregs->rxdma.psr_min_des); regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo); regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi); regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des); regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset); regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset); regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index); regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des); regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo); regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi); regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des); regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset); regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset); regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index); regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des); } #define ET131X_DRVINFO_LEN 32 /* value from ethtool.h */ static void et131x_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { struct et131x_adapter *adapter = netdev_priv(netdev); strncpy(info->driver, DRIVER_NAME, ET131X_DRVINFO_LEN); strncpy(info->version, DRIVER_VERSION, ET131X_DRVINFO_LEN); strncpy(info->bus_info, pci_name(adapter->pdev), ET131X_DRVINFO_LEN); } static struct ethtool_ops et131x_ethtool_ops = { .get_settings = et131x_get_settings, .set_settings = et131x_set_settings, .get_drvinfo = et131x_get_drvinfo, .get_regs_len = et131x_get_regs_len, .get_regs = et131x_get_regs, .get_link = ethtool_op_get_link, }; static void et131x_set_ethtool_ops(struct net_device *netdev) { SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops); } /** * et131x_hwaddr_init - set up the MAC Address on the ET1310 * @adapter: pointer to our private adapter structure */ static void et131x_hwaddr_init(struct et131x_adapter *adapter) { /* If have our default mac from init and no mac address from * EEPROM then we need to generate the last octet and set it on the * device */ if (adapter->rom_addr[0] == 0x00 && adapter->rom_addr[1] == 0x00 && adapter->rom_addr[2] == 0x00 && adapter->rom_addr[3] == 0x00 && adapter->rom_addr[4] == 0x00 && adapter->rom_addr[5] == 0x00) { /* * We need to randomly generate the last octet so we * decrease our chances of setting the mac address to * same as another one of our cards in the system */ get_random_bytes(&adapter->addr[5], 1); /* * We have the default value in the register we are * working with so we need to copy the current * address into the permanent address */ memcpy(adapter->rom_addr, adapter->addr, ETH_ALEN); } else { /* We do not have an override address, so set the * current address to the permanent address and add * it to the device */ memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN); } } /** * et131x_pci_init - initial PCI setup * @adapter: pointer to our private adapter structure * @pdev: our PCI device * * Perform the initial setup of PCI registers and if possible initialise * the MAC address. At this point the I/O registers have yet to be mapped */ static int et131x_pci_init(struct et131x_adapter *adapter, struct pci_dev *pdev) { int cap = pci_pcie_cap(pdev); u16 max_payload; u16 ctl; int i, rc; rc = et131x_init_eeprom(adapter); if (rc < 0) goto out; if (!cap) { dev_err(&pdev->dev, "Missing PCIe capabilities\n"); goto err_out; } /* Let's set up the PORT LOGIC Register. First we need to know what * the max_payload_size is */ if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCAP, &max_payload)) { dev_err(&pdev->dev, "Could not read PCI config space for Max Payload Size\n"); goto err_out; } /* Program the Ack/Nak latency and replay timers */ max_payload &= 0x07; if (max_payload < 2) { static const u16 acknak[2] = { 0x76, 0xD0 }; static const u16 replay[2] = { 0x1E0, 0x2ED }; if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK, acknak[max_payload])) { dev_err(&pdev->dev, "Could not write PCI config space for ACK/NAK\n"); goto err_out; } if (pci_write_config_word(pdev, ET1310_PCI_REPLAY, replay[max_payload])) { dev_err(&pdev->dev, "Could not write PCI config space for Replay Timer\n"); goto err_out; } } /* l0s and l1 latency timers. We are using default values. * Representing 001 for L0s and 010 for L1 */ if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) { dev_err(&pdev->dev, "Could not write PCI config space for Latency Timers\n"); goto err_out; } /* Change the max read size to 2k */ if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl)) { dev_err(&pdev->dev, "Could not read PCI config space for Max read size\n"); goto err_out; } ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | ( 0x04 << 12); if (pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl)) { dev_err(&pdev->dev, "Could not write PCI config space for Max read size\n"); goto err_out; } /* Get MAC address from config space if an eeprom exists, otherwise * the MAC address there will not be valid */ if (!adapter->has_eeprom) { et131x_hwaddr_init(adapter); return 0; } for (i = 0; i < ETH_ALEN; i++) { if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i, adapter->rom_addr + i)) { dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n"); goto err_out; } } memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN); out: return rc; err_out: rc = -EIO; goto out; } /** * et131x_error_timer_handler * @data: timer-specific variable; here a pointer to our adapter structure * * The routine called when the error timer expires, to track the number of * recurring errors. */ static void et131x_error_timer_handler(unsigned long data) { struct et131x_adapter *adapter = (struct et131x_adapter *) data; struct phy_device *phydev = adapter->phydev; if (et1310_in_phy_coma(adapter)) { /* Bring the device immediately out of coma, to * prevent it from sleeping indefinitely, this * mechanism could be improved! */ et1310_disable_phy_coma(adapter); adapter->boot_coma = 20; } else { et1310_update_macstat_host_counters(adapter); } if (!phydev->link && adapter->boot_coma < 11) adapter->boot_coma++; if (adapter->boot_coma == 10) { if (!phydev->link) { if (!et1310_in_phy_coma(adapter)) { /* NOTE - This was originally a 'sync with * interrupt'. How to do that under Linux? */ et131x_enable_interrupts(adapter); et1310_enable_phy_coma(adapter); } } } /* This is a periodic timer, so reschedule */ mod_timer(&adapter->error_timer, jiffies + TX_ERROR_PERIOD * HZ / 1000); } /** * et131x_adapter_memory_alloc * @adapter: pointer to our private adapter structure * * Returns 0 on success, errno on failure (as defined in errno.h). * * Allocate all the memory blocks for send, receive and others. */ static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter) { int status; /* Allocate memory for the Tx Ring */ status = et131x_tx_dma_memory_alloc(adapter); if (status != 0) { dev_err(&adapter->pdev->dev, "et131x_tx_dma_memory_alloc FAILED\n"); return status; } /* Receive buffer memory allocation */ status = et131x_rx_dma_memory_alloc(adapter); if (status != 0) { dev_err(&adapter->pdev->dev, "et131x_rx_dma_memory_alloc FAILED\n"); et131x_tx_dma_memory_free(adapter); return status; } /* Init receive data structures */ status = et131x_init_recv(adapter); if (status != 0) { dev_err(&adapter->pdev->dev, "et131x_init_recv FAILED\n"); et131x_tx_dma_memory_free(adapter); et131x_rx_dma_memory_free(adapter); } return status; } /** * et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx * @adapter: pointer to our private adapter structure */ static void et131x_adapter_memory_free(struct et131x_adapter *adapter) { /* Free DMA memory */ et131x_tx_dma_memory_free(adapter); et131x_rx_dma_memory_free(adapter); } static void et131x_adjust_link(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); struct phy_device *phydev = adapter->phydev; if (netif_carrier_ok(netdev)) { adapter->boot_coma = 20; if (phydev && phydev->speed == SPEED_10) { /* * NOTE - Is there a way to query this without * TruePHY? * && TRU_QueryCoreType(adapter->hTruePhy, 0)== * EMI_TRUEPHY_A13O) { */ u16 register18; et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, &register18); et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, register18 | 0x4); et131x_mii_write(adapter, PHY_INDEX_REG, register18 | 0x8402); et131x_mii_write(adapter, PHY_DATA_REG, register18 | 511); et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, register18); } et1310_config_flow_control(adapter); if (phydev && phydev->speed == SPEED_1000 && adapter->registry_jumbo_packet > 2048) { u16 reg; et131x_mii_read(adapter, PHY_CONFIG, &reg); reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH; reg |= ET_PHY_CONFIG_FIFO_DEPTH_32; et131x_mii_write(adapter, PHY_CONFIG, reg); } et131x_set_rx_dma_timer(adapter); et1310_config_mac_regs2(adapter); } if (phydev && phydev->link != adapter->link) { /* * Check to see if we are in coma mode and if * so, disable it because we will not be able * to read PHY values until we are out. */ if (et1310_in_phy_coma(adapter)) et1310_disable_phy_coma(adapter); if (phydev->link) { adapter->boot_coma = 20; } else { dev_warn(&adapter->pdev->dev, "Link down - cable problem ?\n"); adapter->boot_coma = 0; if (phydev->speed == SPEED_10) { /* NOTE - Is there a way to query this without * TruePHY? * && TRU_QueryCoreType(adapter->hTruePhy, 0) == * EMI_TRUEPHY_A13O) */ u16 register18; et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, &register18); et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, register18 | 0x4); et131x_mii_write(adapter, PHY_INDEX_REG, register18 | 0x8402); et131x_mii_write(adapter, PHY_DATA_REG, register18 | 511); et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, register18); } /* Free the packets being actively sent & stopped */ et131x_free_busy_send_packets(adapter); /* Re-initialize the send structures */ et131x_init_send(adapter); /* * Bring the device back to the state it was during * init prior to autonegotiation being complete. This * way, when we get the auto-neg complete interrupt, * we can complete init by calling config_mac_regs2. */ et131x_soft_reset(adapter); /* Setup ET1310 as per the documentation */ et131x_adapter_setup(adapter); /* perform reset of tx/rx */ et131x_disable_txrx(netdev); et131x_enable_txrx(netdev); } adapter->link = phydev->link; phy_print_status(phydev); } } static int et131x_mii_probe(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); struct phy_device *phydev = NULL; phydev = phy_find_first(adapter->mii_bus); if (!phydev) { dev_err(&adapter->pdev->dev, "no PHY found\n"); return -ENODEV; } phydev = phy_connect(netdev, dev_name(&phydev->dev), &et131x_adjust_link, 0, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { dev_err(&adapter->pdev->dev, "Could not attach to PHY\n"); return PTR_ERR(phydev); } phydev->supported &= (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII | SUPPORTED_TP); if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST) phydev->supported |= SUPPORTED_1000baseT_Full; phydev->advertising = phydev->supported; adapter->phydev = phydev; dev_info(&adapter->pdev->dev, "attached PHY driver [%s] " "(mii_bus:phy_addr=%s)\n", phydev->drv->name, dev_name(&phydev->dev)); return 0; } /** * et131x_adapter_init * @adapter: pointer to the private adapter struct * @pdev: pointer to the PCI device * * Initialize the data structures for the et131x_adapter object and link * them together with the platform provided device structures. */ static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev, struct pci_dev *pdev) { static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 }; struct et131x_adapter *adapter; /* Allocate private adapter struct and copy in relevant information */ adapter = netdev_priv(netdev); adapter->pdev = pci_dev_get(pdev); adapter->netdev = netdev; /* Initialize spinlocks here */ spin_lock_init(&adapter->lock); spin_lock_init(&adapter->tcb_send_qlock); spin_lock_init(&adapter->tcb_ready_qlock); spin_lock_init(&adapter->send_hw_lock); spin_lock_init(&adapter->rcv_lock); spin_lock_init(&adapter->rcv_pend_lock); spin_lock_init(&adapter->fbr_lock); spin_lock_init(&adapter->phy_lock); adapter->registry_jumbo_packet = 1514; /* 1514-9216 */ /* Set the MAC address to a default */ memcpy(adapter->addr, default_mac, ETH_ALEN); return adapter; } /** * et131x_pci_remove * @pdev: a pointer to the device's pci_dev structure * * Registered in the pci_driver structure, this function is called when the * PCI subsystem detects that a PCI device which matches the information * contained in the pci_device_id table has been removed. */ static void __devexit et131x_pci_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct et131x_adapter *adapter = netdev_priv(netdev); unregister_netdev(netdev); phy_disconnect(adapter->phydev); mdiobus_unregister(adapter->mii_bus); kfree(adapter->mii_bus->irq); mdiobus_free(adapter->mii_bus); et131x_adapter_memory_free(adapter); iounmap(adapter->regs); pci_dev_put(pdev); free_netdev(netdev); pci_release_regions(pdev); pci_disable_device(pdev); } /** * et131x_up - Bring up a device for use. * @netdev: device to be opened */ static void et131x_up(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); et131x_enable_txrx(netdev); phy_start(adapter->phydev); } /** * et131x_down - Bring down the device * @netdev: device to be brought down */ static void et131x_down(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); /* Save the timestamp for the TX watchdog, prevent a timeout */ netdev->trans_start = jiffies; phy_stop(adapter->phydev); et131x_disable_txrx(netdev); } #ifdef CONFIG_PM_SLEEP static int et131x_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); if (netif_running(netdev)) { netif_device_detach(netdev); et131x_down(netdev); pci_save_state(pdev); } return 0; } static int et131x_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); if (netif_running(netdev)) { pci_restore_state(pdev); et131x_up(netdev); netif_device_attach(netdev); } return 0; } static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume); #define ET131X_PM_OPS (&et131x_pm_ops) #else #define ET131X_PM_OPS NULL #endif /** * et131x_isr - The Interrupt Service Routine for the driver. * @irq: the IRQ on which the interrupt was received. * @dev_id: device-specific info (here a pointer to a net_device struct) * * Returns a value indicating if the interrupt was handled. */ irqreturn_t et131x_isr(int irq, void *dev_id) { bool handled = true; struct net_device *netdev = (struct net_device *)dev_id; struct et131x_adapter *adapter = NULL; u32 status; if (!netif_device_present(netdev)) { handled = false; goto out; } adapter = netdev_priv(netdev); /* If the adapter is in low power state, then it should not * recognize any interrupt */ /* Disable Device Interrupts */ et131x_disable_interrupts(adapter); /* Get a copy of the value in the interrupt status register * so we can process the interrupting section */ status = readl(&adapter->regs->global.int_status); if (adapter->flowcontrol == FLOW_TXONLY || adapter->flowcontrol == FLOW_BOTH) { status &= ~INT_MASK_ENABLE; } else { status &= ~INT_MASK_ENABLE_NO_FLOW; } /* Make sure this is our interrupt */ if (!status) { handled = false; et131x_enable_interrupts(adapter); goto out; } /* This is our interrupt, so process accordingly */ if (status & ET_INTR_WATCHDOG) { struct tcb *tcb = adapter->tx_ring.send_head; if (tcb) if (++tcb->stale > 1) status |= ET_INTR_TXDMA_ISR; if (adapter->rx_ring.unfinished_receives) status |= ET_INTR_RXDMA_XFR_DONE; else if (tcb == NULL) writel(0, &adapter->regs->global.watchdog_timer); status &= ~ET_INTR_WATCHDOG; } if (status == 0) { /* This interrupt has in some way been "handled" by * the ISR. Either it was a spurious Rx interrupt, or * it was a Tx interrupt that has been filtered by * the ISR. */ et131x_enable_interrupts(adapter); goto out; } /* We need to save the interrupt status value for use in our * DPC. We will clear the software copy of that in that * routine. */ adapter->stats.interrupt_status = status; /* Schedule the ISR handler as a bottom-half task in the * kernel's tq_immediate queue, and mark the queue for * execution */ schedule_work(&adapter->task); out: return IRQ_RETVAL(handled); } /** * et131x_isr_handler - The ISR handler * @p_adapter, a pointer to the device's private adapter structure * * scheduled to run in a deferred context by the ISR. This is where the ISR's * work actually gets done. */ static void et131x_isr_handler(struct work_struct *work) { struct et131x_adapter *adapter = container_of(work, struct et131x_adapter, task); u32 status = adapter->stats.interrupt_status; struct address_map __iomem *iomem = adapter->regs; /* * These first two are by far the most common. Once handled, we clear * their two bits in the status word. If the word is now zero, we * exit. */ /* Handle all the completed Transmit interrupts */ if (status & ET_INTR_TXDMA_ISR) et131x_handle_send_interrupt(adapter); /* Handle all the completed Receives interrupts */ if (status & ET_INTR_RXDMA_XFR_DONE) et131x_handle_recv_interrupt(adapter); status &= 0xffffffd7; if (status) { /* Handle the TXDMA Error interrupt */ if (status & ET_INTR_TXDMA_ERR) { u32 txdma_err; /* Following read also clears the register (COR) */ txdma_err = readl(&iomem->txdma.tx_dma_error); dev_warn(&adapter->pdev->dev, "TXDMA_ERR interrupt, error = %d\n", txdma_err); } /* Handle Free Buffer Ring 0 and 1 Low interrupt */ if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) { /* * This indicates the number of unused buffers in * RXDMA free buffer ring 0 is <= the limit you * programmed. Free buffer resources need to be * returned. Free buffers are consumed as packets * are passed from the network to the host. The host * becomes aware of the packets from the contents of * the packet status ring. This ring is queried when * the packet done interrupt occurs. Packets are then * passed to the OS. When the OS is done with the * packets the resources can be returned to the * ET1310 for re-use. This interrupt is one method of * returning resources. */ /* If the user has flow control on, then we will * send a pause packet, otherwise just exit */ if (adapter->flowcontrol == FLOW_TXONLY || adapter->flowcontrol == FLOW_BOTH) { u32 pm_csr; /* Tell the device to send a pause packet via * the back pressure register (bp req and * bp xon/xoff) */ pm_csr = readl(&iomem->global.pm_csr); if (!et1310_in_phy_coma(adapter)) writel(3, &iomem->txmac.bp_ctrl); } } /* Handle Packet Status Ring Low Interrupt */ if (status & ET_INTR_RXDMA_STAT_LOW) { /* * Same idea as with the two Free Buffer Rings. * Packets going from the network to the host each * consume a free buffer resource and a packet status * resource. These resoures are passed to the OS. * When the OS is done with the resources, they need * to be returned to the ET1310. This is one method * of returning the resources. */ } /* Handle RXDMA Error Interrupt */ if (status & ET_INTR_RXDMA_ERR) { /* * The rxdma_error interrupt is sent when a time-out * on a request issued by the JAGCore has occurred or * a completion is returned with an un-successful * status. In both cases the request is considered * complete. The JAGCore will automatically re-try the * request in question. Normally information on events * like these are sent to the host using the "Advanced * Error Reporting" capability. This interrupt is * another way of getting similar information. The * only thing required is to clear the interrupt by * reading the ISR in the global resources. The * JAGCore will do a re-try on the request. Normally * you should never see this interrupt. If you start * to see this interrupt occurring frequently then * something bad has occurred. A reset might be the * thing to do. */ /* TRAP();*/ dev_warn(&adapter->pdev->dev, "RxDMA_ERR interrupt, error %x\n", readl(&iomem->txmac.tx_test)); } /* Handle the Wake on LAN Event */ if (status & ET_INTR_WOL) { /* * This is a secondary interrupt for wake on LAN. * The driver should never see this, if it does, * something serious is wrong. We will TRAP the * message when we are in DBG mode, otherwise we * will ignore it. */ dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n"); } /* Let's move on to the TxMac */ if (status & ET_INTR_TXMAC) { u32 err = readl(&iomem->txmac.err); /* * When any of the errors occur and TXMAC generates * an interrupt to report these errors, it usually * means that TXMAC has detected an error in the data * stream retrieved from the on-chip Tx Q. All of * these errors are catastrophic and TXMAC won't be * able to recover data when these errors occur. In * a nutshell, the whole Tx path will have to be reset * and re-configured afterwards. */ dev_warn(&adapter->pdev->dev, "TXMAC interrupt, error 0x%08x\n", err); /* If we are debugging, we want to see this error, * otherwise we just want the device to be reset and * continue */ } /* Handle RXMAC Interrupt */ if (status & ET_INTR_RXMAC) { /* * These interrupts are catastrophic to the device, * what we need to do is disable the interrupts and * set the flag to cause us to reset so we can solve * this issue. */ /* MP_SET_FLAG( adapter, fMP_ADAPTER_HARDWARE_ERROR); */ dev_warn(&adapter->pdev->dev, "RXMAC interrupt, error 0x%08x. Requesting reset\n", readl(&iomem->rxmac.err_reg)); dev_warn(&adapter->pdev->dev, "Enable 0x%08x, Diag 0x%08x\n", readl(&iomem->rxmac.ctrl), readl(&iomem->rxmac.rxq_diag)); /* * If we are debugging, we want to see this error, * otherwise we just want the device to be reset and * continue */ } /* Handle MAC_STAT Interrupt */ if (status & ET_INTR_MAC_STAT) { /* * This means at least one of the un-masked counters * in the MAC_STAT block has rolled over. Use this * to maintain the top, software managed bits of the * counter(s). */ et1310_handle_macstat_interrupt(adapter); } /* Handle SLV Timeout Interrupt */ if (status & ET_INTR_SLV_TIMEOUT) { /* * This means a timeout has occurred on a read or * write request to one of the JAGCore registers. The * Global Resources block has terminated the request * and on a read request, returned a "fake" value. * The most likely reasons are: Bad Address or the * addressed module is in a power-down state and * can't respond. */ } } et131x_enable_interrupts(adapter); } /** * et131x_stats - Return the current device statistics. * @netdev: device whose stats are being queried * * Returns 0 on success, errno on failure (as defined in errno.h) */ static struct net_device_stats *et131x_stats(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); struct net_device_stats *stats = &adapter->net_stats; struct ce_stats *devstat = &adapter->stats; stats->rx_errors = devstat->rx_length_errs + devstat->rx_align_errs + devstat->rx_crc_errs + devstat->rx_code_violations + devstat->rx_other_errs; stats->tx_errors = devstat->tx_max_pkt_errs; stats->multicast = devstat->multicast_pkts_rcvd; stats->collisions = devstat->tx_collisions; stats->rx_length_errors = devstat->rx_length_errs; stats->rx_over_errors = devstat->rx_overflows; stats->rx_crc_errors = devstat->rx_crc_errs; /* NOTE: These stats don't have corresponding values in CE_STATS, * so we're going to have to update these directly from within the * TX/RX code */ /* stats->rx_bytes = 20; devstat->; */ /* stats->tx_bytes = 20; devstat->; */ /* stats->rx_dropped = devstat->; */ /* stats->tx_dropped = devstat->; */ /* NOTE: Not used, can't find analogous statistics */ /* stats->rx_frame_errors = devstat->; */ /* stats->rx_fifo_errors = devstat->; */ /* stats->rx_missed_errors = devstat->; */ /* stats->tx_aborted_errors = devstat->; */ /* stats->tx_carrier_errors = devstat->; */ /* stats->tx_fifo_errors = devstat->; */ /* stats->tx_heartbeat_errors = devstat->; */ /* stats->tx_window_errors = devstat->; */ return stats; } /** * et131x_open - Open the device for use. * @netdev: device to be opened * * Returns 0 on success, errno on failure (as defined in errno.h) */ static int et131x_open(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = adapter->pdev; unsigned int irq = pdev->irq; int result; /* Start the timer to track NIC errors */ init_timer(&adapter->error_timer); adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000; adapter->error_timer.function = et131x_error_timer_handler; adapter->error_timer.data = (unsigned long)adapter; add_timer(&adapter->error_timer); result = request_irq(irq, et131x_isr, IRQF_SHARED, netdev->name, netdev); if (result) { dev_err(&pdev->dev, "could not register IRQ %d\n", irq); return result; } adapter->flags |= fMP_ADAPTER_INTERRUPT_IN_USE; et131x_up(netdev); return result; } /** * et131x_close - Close the device * @netdev: device to be closed * * Returns 0 on success, errno on failure (as defined in errno.h) */ static int et131x_close(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); et131x_down(netdev); adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE; free_irq(adapter->pdev->irq, netdev); /* Stop the error timer */ return del_timer_sync(&adapter->error_timer); } /** * et131x_ioctl - The I/O Control handler for the driver * @netdev: device on which the control request is being made * @reqbuf: a pointer to the IOCTL request buffer * @cmd: the IOCTL command code * * Returns 0 on success, errno on failure (as defined in errno.h) */ static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, int cmd) { struct et131x_adapter *adapter = netdev_priv(netdev); if (!adapter->phydev) return -EINVAL; return phy_mii_ioctl(adapter->phydev, reqbuf, cmd); } /** * et131x_set_packet_filter - Configures the Rx Packet filtering on the device * @adapter: pointer to our private adapter structure * * FIXME: lot of dups with MAC code * * Returns 0 on success, errno on failure */ static int et131x_set_packet_filter(struct et131x_adapter *adapter) { int filter = adapter->packet_filter; int status = 0; u32 ctrl; u32 pf_ctrl; ctrl = readl(&adapter->regs->rxmac.ctrl); pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl); /* Default to disabled packet filtering. Enable it in the individual * case statements that require the device to filter something */ ctrl |= 0x04; /* Set us to be in promiscuous mode so we receive everything, this * is also true when we get a packet filter of 0 */ if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0) pf_ctrl &= ~7; /* Clear filter bits */ else { /* * Set us up with Multicast packet filtering. Three cases are * possible - (1) we have a multi-cast list, (2) we receive ALL * multicast entries or (3) we receive none. */ if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST) pf_ctrl &= ~2; /* Multicast filter bit */ else { et1310_setup_device_for_multicast(adapter); pf_ctrl |= 2; ctrl &= ~0x04; } /* Set us up with Unicast packet filtering */ if (filter & ET131X_PACKET_TYPE_DIRECTED) { et1310_setup_device_for_unicast(adapter); pf_ctrl |= 4; ctrl &= ~0x04; } /* Set us up with Broadcast packet filtering */ if (filter & ET131X_PACKET_TYPE_BROADCAST) { pf_ctrl |= 1; /* Broadcast filter bit */ ctrl &= ~0x04; } else pf_ctrl &= ~1; /* Setup the receive mac configuration registers - Packet * Filter control + the enable / disable for packet filter * in the control reg. */ writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl); writel(ctrl, &adapter->regs->rxmac.ctrl); } return status; } /** * et131x_multicast - The handler to configure multicasting on the interface * @netdev: a pointer to a net_device struct representing the device */ static void et131x_multicast(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); int packet_filter; unsigned long flags; struct netdev_hw_addr *ha; int i; spin_lock_irqsave(&adapter->lock, flags); /* Before we modify the platform-independent filter flags, store them * locally. This allows us to determine if anything's changed and if * we even need to bother the hardware */ packet_filter = adapter->packet_filter; /* Clear the 'multicast' flag locally; because we only have a single * flag to check multicast, and multiple multicast addresses can be * set, this is the easiest way to determine if more than one * multicast address is being set. */ packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; /* Check the net_device flags and set the device independent flags * accordingly */ if (netdev->flags & IFF_PROMISC) adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS; else adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS; if (netdev->flags & IFF_ALLMULTI) adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST; if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST) adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST; if (netdev_mc_count(netdev) < 1) { adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST; adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; } else adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST; /* Set values in the private adapter struct */ i = 0; netdev_for_each_mc_addr(ha, netdev) { if (i == NIC_MAX_MCAST_LIST) break; memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN); } adapter->multicast_addr_count = i; /* Are the new flags different from the previous ones? If not, then no * action is required * * NOTE - This block will always update the multicast_list with the * hardware, even if the addresses aren't the same. */ if (packet_filter != adapter->packet_filter) { /* Call the device's filter function */ et131x_set_packet_filter(adapter); } spin_unlock_irqrestore(&adapter->lock, flags); } /** * et131x_tx - The handler to tx a packet on the device * @skb: data to be Tx'd * @netdev: device on which data is to be Tx'd * * Returns 0 on success, errno on failure (as defined in errno.h) */ static int et131x_tx(struct sk_buff *skb, struct net_device *netdev) { int status = 0; struct et131x_adapter *adapter = netdev_priv(netdev); /* stop the queue if it's getting full */ if (adapter->tx_ring.used >= NUM_TCB - 1 && !netif_queue_stopped(netdev)) netif_stop_queue(netdev); /* Save the timestamp for the TX timeout watchdog */ netdev->trans_start = jiffies; /* Call the device-specific data Tx routine */ status = et131x_send_packets(skb, netdev); /* Check status and manage the netif queue if necessary */ if (status != 0) { if (status == -ENOMEM) status = NETDEV_TX_BUSY; else status = NETDEV_TX_OK; } return status; } /** * et131x_tx_timeout - Timeout handler * @netdev: a pointer to a net_device struct representing the device * * The handler called when a Tx request times out. The timeout period is * specified by the 'tx_timeo" element in the net_device structure (see * et131x_alloc_device() to see how this value is set). */ static void et131x_tx_timeout(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); struct tcb *tcb; unsigned long flags; /* If the device is closed, ignore the timeout */ if (~(adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)) return; /* Any nonrecoverable hardware error? * Checks adapter->flags for any failure in phy reading */ if (adapter->flags & fMP_ADAPTER_NON_RECOVER_ERROR) return; /* Hardware failure? */ if (adapter->flags & fMP_ADAPTER_HARDWARE_ERROR) { dev_err(&adapter->pdev->dev, "hardware error - reset\n"); return; } /* Is send stuck? */ spin_lock_irqsave(&adapter->tcb_send_qlock, flags); tcb = adapter->tx_ring.send_head; if (tcb != NULL) { tcb->count++; if (tcb->count > NIC_SEND_HANG_THRESHOLD) { spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); dev_warn(&adapter->pdev->dev, "Send stuck - reset. tcb->WrIndex %x, flags 0x%08x\n", tcb->index, tcb->flags); adapter->net_stats.tx_errors++; /* perform reset of tx/rx */ et131x_disable_txrx(netdev); et131x_enable_txrx(netdev); return; } } spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); } /** * et131x_change_mtu - The handler called to change the MTU for the device * @netdev: device whose MTU is to be changed * @new_mtu: the desired MTU * * Returns 0 on success, errno on failure (as defined in errno.h) */ static int et131x_change_mtu(struct net_device *netdev, int new_mtu) { int result = 0; struct et131x_adapter *adapter = netdev_priv(netdev); /* Make sure the requested MTU is valid */ if (new_mtu < 64 || new_mtu > 9216) return -EINVAL; et131x_disable_txrx(netdev); et131x_handle_send_interrupt(adapter); et131x_handle_recv_interrupt(adapter); /* Set the new MTU */ netdev->mtu = new_mtu; /* Free Rx DMA memory */ et131x_adapter_memory_free(adapter); /* Set the config parameter for Jumbo Packet support */ adapter->registry_jumbo_packet = new_mtu + 14; et131x_soft_reset(adapter); /* Alloc and init Rx DMA memory */ result = et131x_adapter_memory_alloc(adapter); if (result != 0) { dev_warn(&adapter->pdev->dev, "Change MTU failed; couldn't re-alloc DMA memory\n"); return result; } et131x_init_send(adapter); et131x_hwaddr_init(adapter); memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN); /* Init the device with the new settings */ et131x_adapter_setup(adapter); et131x_enable_txrx(netdev); return result; } /** * et131x_set_mac_addr - handler to change the MAC address for the device * @netdev: device whose MAC is to be changed * @new_mac: the desired MAC address * * Returns 0 on success, errno on failure (as defined in errno.h) * * IMPLEMENTED BY : blux http://berndlux.de 22.01.2007 21:14 */ static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac) { int result = 0; struct et131x_adapter *adapter = netdev_priv(netdev); struct sockaddr *address = new_mac; /* begin blux */ if (adapter == NULL) return -ENODEV; /* Make sure the requested MAC is valid */ if (!is_valid_ether_addr(address->sa_data)) return -EADDRNOTAVAIL; et131x_disable_txrx(netdev); et131x_handle_send_interrupt(adapter); et131x_handle_recv_interrupt(adapter); /* Set the new MAC */ /* netdev->set_mac_address = &new_mac; */ memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len); printk(KERN_INFO "%s: Setting MAC address to %pM\n", netdev->name, netdev->dev_addr); /* Free Rx DMA memory */ et131x_adapter_memory_free(adapter); et131x_soft_reset(adapter); /* Alloc and init Rx DMA memory */ result = et131x_adapter_memory_alloc(adapter); if (result != 0) { dev_err(&adapter->pdev->dev, "Change MAC failed; couldn't re-alloc DMA memory\n"); return result; } et131x_init_send(adapter); et131x_hwaddr_init(adapter); /* Init the device with the new settings */ et131x_adapter_setup(adapter); et131x_enable_txrx(netdev); return result; } static const struct net_device_ops et131x_netdev_ops = { .ndo_open = et131x_open, .ndo_stop = et131x_close, .ndo_start_xmit = et131x_tx, .ndo_set_rx_mode = et131x_multicast, .ndo_tx_timeout = et131x_tx_timeout, .ndo_change_mtu = et131x_change_mtu, .ndo_set_mac_address = et131x_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_get_stats = et131x_stats, .ndo_do_ioctl = et131x_ioctl, }; /** * et131x_pci_setup - Perform device initialization * @pdev: a pointer to the device's pci_dev structure * @ent: this device's entry in the pci_device_id table * * Returns 0 on success, errno on failure (as defined in errno.h) * * Registered in the pci_driver structure, this function is called when the * PCI subsystem finds a new PCI device which matches the information * contained in the pci_device_id table. This routine is the equivalent to * a device insertion routine. */ static int __devinit et131x_pci_setup(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct et131x_adapter *adapter; int rc; int ii; rc = pci_enable_device(pdev); if (rc < 0) { dev_err(&pdev->dev, "pci_enable_device() failed\n"); goto out; } /* Perform some basic PCI checks */ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "Can't find PCI device's base address\n"); rc = -ENODEV; goto err_disable; } rc = pci_request_regions(pdev, DRIVER_NAME); if (rc < 0) { dev_err(&pdev->dev, "Can't get PCI resources\n"); goto err_disable; } pci_set_master(pdev); /* Check the DMA addressing support of this device */ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); if (rc < 0) { dev_err(&pdev->dev, "Unable to obtain 64 bit DMA for consistent allocations\n"); goto err_release_res; } } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (rc < 0) { dev_err(&pdev->dev, "Unable to obtain 32 bit DMA for consistent allocations\n"); goto err_release_res; } } else { dev_err(&pdev->dev, "No usable DMA addressing method\n"); rc = -EIO; goto err_release_res; } /* Allocate netdev and private adapter structs */ netdev = alloc_etherdev(sizeof(struct et131x_adapter)); if (!netdev) { dev_err(&pdev->dev, "Couldn't alloc netdev struct\n"); rc = -ENOMEM; goto err_release_res; } netdev->watchdog_timeo = ET131X_TX_TIMEOUT; netdev->netdev_ops = &et131x_netdev_ops; SET_NETDEV_DEV(netdev, &pdev->dev); et131x_set_ethtool_ops(netdev); adapter = et131x_adapter_init(netdev, pdev); rc = et131x_pci_init(adapter, pdev); if (rc < 0) goto err_free_dev; /* Map the bus-relative registers to system virtual memory */ adapter->regs = pci_ioremap_bar(pdev, 0); if (!adapter->regs) { dev_err(&pdev->dev, "Cannot map device registers\n"); rc = -ENOMEM; goto err_free_dev; } /* If Phy COMA mode was enabled when we went down, disable it here. */ writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr); /* Issue a global reset to the et1310 */ et131x_soft_reset(adapter); /* Disable all interrupts (paranoid) */ et131x_disable_interrupts(adapter); /* Allocate DMA memory */ rc = et131x_adapter_memory_alloc(adapter); if (rc < 0) { dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n"); goto err_iounmap; } /* Init send data structures */ et131x_init_send(adapter); /* Set up the task structure for the ISR's deferred handler */ INIT_WORK(&adapter->task, et131x_isr_handler); /* Copy address into the net_device struct */ memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN); /* Init variable for counting how long we do not have link status */ adapter->boot_coma = 0; et1310_disable_phy_coma(adapter); rc = -ENOMEM; /* Setup the mii_bus struct */ adapter->mii_bus = mdiobus_alloc(); if (!adapter->mii_bus) { dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n"); goto err_mem_free; } adapter->mii_bus->name = "et131x_eth_mii"; snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x", (adapter->pdev->bus->number << 8) | adapter->pdev->devfn); adapter->mii_bus->priv = netdev; adapter->mii_bus->read = et131x_mdio_read; adapter->mii_bus->write = et131x_mdio_write; adapter->mii_bus->reset = et131x_mdio_reset; adapter->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); if (!adapter->mii_bus->irq) { dev_err(&pdev->dev, "mii_bus irq allocation failed\n"); goto err_mdio_free; } for (ii = 0; ii < PHY_MAX_ADDR; ii++) adapter->mii_bus->irq[ii] = PHY_POLL; rc = mdiobus_register(adapter->mii_bus); if (rc < 0) { dev_err(&pdev->dev, "failed to register MII bus\n"); goto err_mdio_free_irq; } rc = et131x_mii_probe(netdev); if (rc < 0) { dev_err(&pdev->dev, "failed to probe MII bus\n"); goto err_mdio_unregister; } /* Setup et1310 as per the documentation */ et131x_adapter_setup(adapter); /* We can enable interrupts now * * NOTE - Because registration of interrupt handler is done in the * device's open(), defer enabling device interrupts to that * point */ /* Register the net_device struct with the Linux network layer */ rc = register_netdev(netdev); if (rc < 0) { dev_err(&pdev->dev, "register_netdev() failed\n"); goto err_phy_disconnect; } /* Register the net_device struct with the PCI subsystem. Save a copy * of the PCI config space for this device now that the device has * been initialized, just in case it needs to be quickly restored. */ pci_set_drvdata(pdev, netdev); out: return rc; err_phy_disconnect: phy_disconnect(adapter->phydev); err_mdio_unregister: mdiobus_unregister(adapter->mii_bus); err_mdio_free_irq: kfree(adapter->mii_bus->irq); err_mdio_free: mdiobus_free(adapter->mii_bus); err_mem_free: et131x_adapter_memory_free(adapter); err_iounmap: iounmap(adapter->regs); err_free_dev: pci_dev_put(pdev); free_netdev(netdev); err_release_res: pci_release_regions(pdev); err_disable: pci_disable_device(pdev); goto out; } static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = { { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL}, { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL}, {0,} }; MODULE_DEVICE_TABLE(pci, et131x_pci_table); static struct pci_driver et131x_driver = { .name = DRIVER_NAME, .id_table = et131x_pci_table, .probe = et131x_pci_setup, .remove = __devexit_p(et131x_pci_remove), .driver.pm = ET131X_PM_OPS, }; /** * et131x_init_module - The "main" entry point called on driver initialization * * Returns 0 on success, errno on failure (as defined in errno.h) */ static int __init et131x_init_module(void) { return pci_register_driver(&et131x_driver); } /** * et131x_cleanup_module - The entry point called on driver cleanup */ static void __exit et131x_cleanup_module(void) { pci_unregister_driver(&et131x_driver); } module_init(et131x_init_module); module_exit(et131x_cleanup_module);
gpl-2.0
daemon32/android_kernel_lge_fx3t
arch/um/os-Linux/signal.c
4888
6560
/* * Copyright (C) 2004 PathScale, Inc * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <stdlib.h> #include <stdarg.h> #include <errno.h> #include <signal.h> #include <strings.h> #include "as-layout.h" #include "kern_util.h" #include "os.h" #include "sysdep/mcontext.h" void (*sig_info[NSIG])(int, struct uml_pt_regs *) = { [SIGTRAP] = relay_signal, [SIGFPE] = relay_signal, [SIGILL] = relay_signal, [SIGWINCH] = winch, [SIGBUS] = bus_handler, [SIGSEGV] = segv_handler, [SIGIO] = sigio_handler, [SIGVTALRM] = timer_handler }; static void sig_handler_common(int sig, mcontext_t *mc) { struct uml_pt_regs r; int save_errno = errno; r.is_user = 0; if (sig == SIGSEGV) { /* For segfaults, we want the data from the sigcontext. */ get_regs_from_mc(&r, mc); GET_FAULTINFO_FROM_MC(r.faultinfo, mc); } /* enable signals if sig isn't IRQ signal */ if ((sig != SIGIO) && (sig != SIGWINCH) && (sig != SIGVTALRM)) unblock_signals(); (*sig_info[sig])(sig, &r); errno = save_errno; } /* * These are the asynchronous signals. SIGPROF is excluded because we want to * be able to profile all of UML, not just the non-critical sections. If * profiling is not thread-safe, then that is not my problem. We can disable * profiling when SMP is enabled in that case. */ #define SIGIO_BIT 0 #define SIGIO_MASK (1 << SIGIO_BIT) #define SIGVTALRM_BIT 1 #define SIGVTALRM_MASK (1 << SIGVTALRM_BIT) static int signals_enabled; static unsigned int signals_pending; void sig_handler(int sig, mcontext_t *mc) { int enabled; enabled = signals_enabled; if (!enabled && (sig == SIGIO)) { signals_pending |= SIGIO_MASK; return; } block_signals(); sig_handler_common(sig, mc); set_signals(enabled); } static void real_alarm_handler(mcontext_t *mc) { struct uml_pt_regs regs; if (mc != NULL) get_regs_from_mc(&regs, mc); regs.is_user = 0; unblock_signals(); timer_handler(SIGVTALRM, &regs); } void alarm_handler(int sig, mcontext_t *mc) { int enabled; enabled = signals_enabled; if (!signals_enabled) { signals_pending |= SIGVTALRM_MASK; return; } block_signals(); real_alarm_handler(mc); set_signals(enabled); } void timer_init(void) { set_handler(SIGVTALRM); } void set_sigstack(void *sig_stack, int size) { stack_t stack = ((stack_t) { .ss_flags = 0, .ss_sp = (__ptr_t) sig_stack, .ss_size = size - sizeof(void *) }); if (sigaltstack(&stack, NULL) != 0) panic("enabling signal stack failed, errno = %d\n", errno); } static void (*handlers[_NSIG])(int sig, mcontext_t *mc) = { [SIGSEGV] = sig_handler, [SIGBUS] = sig_handler, [SIGILL] = sig_handler, [SIGFPE] = sig_handler, [SIGTRAP] = sig_handler, [SIGIO] = sig_handler, [SIGWINCH] = sig_handler, [SIGVTALRM] = alarm_handler }; static void hard_handler(int sig, siginfo_t *info, void *p) { struct ucontext *uc = p; mcontext_t *mc = &uc->uc_mcontext; unsigned long pending = 1UL << sig; do { int nested, bail; /* * pending comes back with one bit set for each * interrupt that arrived while setting up the stack, * plus a bit for this interrupt, plus the zero bit is * set if this is a nested interrupt. * If bail is true, then we interrupted another * handler setting up the stack. In this case, we * have to return, and the upper handler will deal * with this interrupt. */ bail = to_irq_stack(&pending); if (bail) return; nested = pending & 1; pending &= ~1; while ((sig = ffs(pending)) != 0){ sig--; pending &= ~(1 << sig); (*handlers[sig])(sig, mc); } /* * Again, pending comes back with a mask of signals * that arrived while tearing down the stack. If this * is non-zero, we just go back, set up the stack * again, and handle the new interrupts. */ if (!nested) pending = from_irq_stack(nested); } while (pending); } void set_handler(int sig) { struct sigaction action; int flags = SA_SIGINFO | SA_ONSTACK; sigset_t sig_mask; action.sa_sigaction = hard_handler; /* block irq ones */ sigemptyset(&action.sa_mask); sigaddset(&action.sa_mask, SIGVTALRM); sigaddset(&action.sa_mask, SIGIO); sigaddset(&action.sa_mask, SIGWINCH); if (sig == SIGSEGV) flags |= SA_NODEFER; if (sigismember(&action.sa_mask, sig)) flags |= SA_RESTART; /* if it's an irq signal */ action.sa_flags = flags; action.sa_restorer = NULL; if (sigaction(sig, &action, NULL) < 0) panic("sigaction failed - errno = %d\n", errno); sigemptyset(&sig_mask); sigaddset(&sig_mask, sig); if (sigprocmask(SIG_UNBLOCK, &sig_mask, NULL) < 0) panic("sigprocmask failed - errno = %d\n", errno); } int change_sig(int signal, int on) { sigset_t sigset; sigemptyset(&sigset); sigaddset(&sigset, signal); if (sigprocmask(on ? SIG_UNBLOCK : SIG_BLOCK, &sigset, NULL) < 0) return -errno; return 0; } void block_signals(void) { signals_enabled = 0; /* * This must return with signals disabled, so this barrier * ensures that writes are flushed out before the return. * This might matter if gcc figures out how to inline this and * decides to shuffle this code into the caller. */ barrier(); } void unblock_signals(void) { int save_pending; if (signals_enabled == 1) return; /* * We loop because the IRQ handler returns with interrupts off. So, * interrupts may have arrived and we need to re-enable them and * recheck signals_pending. */ while (1) { /* * Save and reset save_pending after enabling signals. This * way, signals_pending won't be changed while we're reading it. */ signals_enabled = 1; /* * Setting signals_enabled and reading signals_pending must * happen in this order. */ barrier(); save_pending = signals_pending; if (save_pending == 0) return; signals_pending = 0; /* * We have pending interrupts, so disable signals, as the * handlers expect them off when they are called. They will * be enabled again above. */ signals_enabled = 0; /* * Deal with SIGIO first because the alarm handler might * schedule, leaving the pending SIGIO stranded until we come * back here. */ if (save_pending & SIGIO_MASK) sig_handler_common(SIGIO, NULL); if (save_pending & SIGVTALRM_MASK) real_alarm_handler(NULL); } } int get_signals(void) { return signals_enabled; } int set_signals(int enable) { int ret; if (signals_enabled == enable) return enable; ret = signals_enabled; if (enable) unblock_signals(); else block_signals(); return ret; }
gpl-2.0
oadam11/kernel_lge_g3-old
drivers/input/keyboard/omap-keypad.c
4888
12312
/* * linux/drivers/input/keyboard/omap-keypad.c * * OMAP Keypad Driver * * Copyright (C) 2003 Nokia Corporation * Written by Timo Teräs <ext-timo.teras@nokia.com> * * Added support for H2 & H3 Keypad * Copyright (C) 2004 Texas Instruments * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/types.h> #include <linux/input.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/errno.h> #include <linux/slab.h> #include <asm/gpio.h> #include <plat/keypad.h> #include <plat/menelaus.h> #include <asm/irq.h> #include <mach/hardware.h> #include <asm/io.h> #include <plat/mux.h> #undef NEW_BOARD_LEARNING_MODE static void omap_kp_tasklet(unsigned long); static void omap_kp_timer(unsigned long); static unsigned char keypad_state[8]; static DEFINE_MUTEX(kp_enable_mutex); static int kp_enable = 1; static int kp_cur_group = -1; struct omap_kp { struct input_dev *input; struct timer_list timer; int irq; unsigned int rows; unsigned int cols; unsigned long delay; unsigned int debounce; }; static DECLARE_TASKLET_DISABLED(kp_tasklet, omap_kp_tasklet, 0); static unsigned int *row_gpios; static unsigned int *col_gpios; #ifdef CONFIG_ARCH_OMAP2 static void set_col_gpio_val(struct omap_kp *omap_kp, u8 value) { int col; for (col = 0; col < omap_kp->cols; col++) gpio_set_value(col_gpios[col], value & (1 << col)); } static u8 get_row_gpio_val(struct omap_kp *omap_kp) { int row; u8 value = 0; for (row = 0; row < omap_kp->rows; row++) { if (gpio_get_value(row_gpios[row])) value |= (1 << row); } return value; } #else #define set_col_gpio_val(x, y) do {} while (0) #define get_row_gpio_val(x) 0 #endif static irqreturn_t omap_kp_interrupt(int irq, void *dev_id) { struct omap_kp *omap_kp = dev_id; /* disable keyboard interrupt and schedule for handling */ if (cpu_is_omap24xx()) { int i; for (i = 0; i < omap_kp->rows; i++) { int gpio_irq = gpio_to_irq(row_gpios[i]); /* * The interrupt which we're currently handling should * be disabled _nosync() to avoid deadlocks waiting * for this handler to complete. All others should * be disabled the regular way for SMP safety. */ if (gpio_irq == irq) disable_irq_nosync(gpio_irq); else disable_irq(gpio_irq); } } else /* disable keyboard interrupt and schedule for handling */ omap_writew(1, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT); tasklet_schedule(&kp_tasklet); return IRQ_HANDLED; } static void omap_kp_timer(unsigned long data) { tasklet_schedule(&kp_tasklet); } static void omap_kp_scan_keypad(struct omap_kp *omap_kp, unsigned char *state) { int col = 0; /* read the keypad status */ if (cpu_is_omap24xx()) { /* read the keypad status */ for (col = 0; col < omap_kp->cols; col++) { set_col_gpio_val(omap_kp, ~(1 << col)); state[col] = ~(get_row_gpio_val(omap_kp)) & 0xff; } set_col_gpio_val(omap_kp, 0); } else { /* disable keyboard interrupt and schedule for handling */ omap_writew(1, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT); /* read the keypad status */ omap_writew(0xff, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBC); for (col = 0; col < omap_kp->cols; col++) { omap_writew(~(1 << col) & 0xff, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBC); udelay(omap_kp->delay); state[col] = ~omap_readw(OMAP1_MPUIO_BASE + OMAP_MPUIO_KBR_LATCH) & 0xff; } omap_writew(0x00, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBC); udelay(2); } } static void omap_kp_tasklet(unsigned long data) { struct omap_kp *omap_kp_data = (struct omap_kp *) data; unsigned short *keycodes = omap_kp_data->input->keycode; unsigned int row_shift = get_count_order(omap_kp_data->cols); unsigned char new_state[8], changed, key_down = 0; int col, row; int spurious = 0; /* check for any changes */ omap_kp_scan_keypad(omap_kp_data, new_state); /* check for changes and print those */ for (col = 0; col < omap_kp_data->cols; col++) { changed = new_state[col] ^ keypad_state[col]; key_down |= new_state[col]; if (changed == 0) continue; for (row = 0; row < omap_kp_data->rows; row++) { int key; if (!(changed & (1 << row))) continue; #ifdef NEW_BOARD_LEARNING_MODE printk(KERN_INFO "omap-keypad: key %d-%d %s\n", col, row, (new_state[col] & (1 << row)) ? "pressed" : "released"); #else key = keycodes[MATRIX_SCAN_CODE(row, col, row_shift)]; if (key < 0) { printk(KERN_WARNING "omap-keypad: Spurious key event %d-%d\n", col, row); /* We scan again after a couple of seconds */ spurious = 1; continue; } if (!(kp_cur_group == (key & GROUP_MASK) || kp_cur_group == -1)) continue; kp_cur_group = key & GROUP_MASK; input_report_key(omap_kp_data->input, key & ~GROUP_MASK, new_state[col] & (1 << row)); #endif } } input_sync(omap_kp_data->input); memcpy(keypad_state, new_state, sizeof(keypad_state)); if (key_down) { int delay = HZ / 20; /* some key is pressed - keep irq disabled and use timer * to poll the keypad */ if (spurious) delay = 2 * HZ; mod_timer(&omap_kp_data->timer, jiffies + delay); } else { /* enable interrupts */ if (cpu_is_omap24xx()) { int i; for (i = 0; i < omap_kp_data->rows; i++) enable_irq(gpio_to_irq(row_gpios[i])); } else { omap_writew(0, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT); kp_cur_group = -1; } } } static ssize_t omap_kp_enable_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%u\n", kp_enable); } static ssize_t omap_kp_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int state; if (sscanf(buf, "%u", &state) != 1) return -EINVAL; if ((state != 1) && (state != 0)) return -EINVAL; mutex_lock(&kp_enable_mutex); if (state != kp_enable) { if (state) enable_irq(INT_KEYBOARD); else disable_irq(INT_KEYBOARD); kp_enable = state; } mutex_unlock(&kp_enable_mutex); return strnlen(buf, count); } static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, omap_kp_enable_show, omap_kp_enable_store); #ifdef CONFIG_PM static int omap_kp_suspend(struct platform_device *dev, pm_message_t state) { /* Nothing yet */ return 0; } static int omap_kp_resume(struct platform_device *dev) { /* Nothing yet */ return 0; } #else #define omap_kp_suspend NULL #define omap_kp_resume NULL #endif static int __devinit omap_kp_probe(struct platform_device *pdev) { struct omap_kp *omap_kp; struct input_dev *input_dev; struct omap_kp_platform_data *pdata = pdev->dev.platform_data; int i, col_idx, row_idx, irq_idx, ret; unsigned int row_shift, keycodemax; if (!pdata->rows || !pdata->cols || !pdata->keymap_data) { printk(KERN_ERR "No rows, cols or keymap_data from pdata\n"); return -EINVAL; } row_shift = get_count_order(pdata->cols); keycodemax = pdata->rows << row_shift; omap_kp = kzalloc(sizeof(struct omap_kp) + keycodemax * sizeof(unsigned short), GFP_KERNEL); input_dev = input_allocate_device(); if (!omap_kp || !input_dev) { kfree(omap_kp); input_free_device(input_dev); return -ENOMEM; } platform_set_drvdata(pdev, omap_kp); omap_kp->input = input_dev; /* Disable the interrupt for the MPUIO keyboard */ if (!cpu_is_omap24xx()) omap_writew(1, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT); input_dev->keycode = &omap_kp[1]; input_dev->keycodesize = sizeof(unsigned short); input_dev->keycodemax = keycodemax; if (pdata->rep) __set_bit(EV_REP, input_dev->evbit); if (pdata->delay) omap_kp->delay = pdata->delay; if (pdata->row_gpios && pdata->col_gpios) { row_gpios = pdata->row_gpios; col_gpios = pdata->col_gpios; } omap_kp->rows = pdata->rows; omap_kp->cols = pdata->cols; if (cpu_is_omap24xx()) { /* Cols: outputs */ for (col_idx = 0; col_idx < omap_kp->cols; col_idx++) { if (gpio_request(col_gpios[col_idx], "omap_kp_col") < 0) { printk(KERN_ERR "Failed to request" "GPIO%d for keypad\n", col_gpios[col_idx]); goto err1; } gpio_direction_output(col_gpios[col_idx], 0); } /* Rows: inputs */ for (row_idx = 0; row_idx < omap_kp->rows; row_idx++) { if (gpio_request(row_gpios[row_idx], "omap_kp_row") < 0) { printk(KERN_ERR "Failed to request" "GPIO%d for keypad\n", row_gpios[row_idx]); goto err2; } gpio_direction_input(row_gpios[row_idx]); } } else { col_idx = 0; row_idx = 0; } setup_timer(&omap_kp->timer, omap_kp_timer, (unsigned long)omap_kp); /* get the irq and init timer*/ tasklet_enable(&kp_tasklet); kp_tasklet.data = (unsigned long) omap_kp; ret = device_create_file(&pdev->dev, &dev_attr_enable); if (ret < 0) goto err2; /* setup input device */ __set_bit(EV_KEY, input_dev->evbit); matrix_keypad_build_keymap(pdata->keymap_data, row_shift, input_dev->keycode, input_dev->keybit); input_dev->name = "omap-keypad"; input_dev->phys = "omap-keypad/input0"; input_dev->dev.parent = &pdev->dev; input_dev->id.bustype = BUS_HOST; input_dev->id.vendor = 0x0001; input_dev->id.product = 0x0001; input_dev->id.version = 0x0100; ret = input_register_device(omap_kp->input); if (ret < 0) { printk(KERN_ERR "Unable to register omap-keypad input device\n"); goto err3; } if (pdata->dbounce) omap_writew(0xff, OMAP1_MPUIO_BASE + OMAP_MPUIO_GPIO_DEBOUNCING); /* scan current status and enable interrupt */ omap_kp_scan_keypad(omap_kp, keypad_state); if (!cpu_is_omap24xx()) { omap_kp->irq = platform_get_irq(pdev, 0); if (omap_kp->irq >= 0) { if (request_irq(omap_kp->irq, omap_kp_interrupt, 0, "omap-keypad", omap_kp) < 0) goto err4; } omap_writew(0, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT); } else { for (irq_idx = 0; irq_idx < omap_kp->rows; irq_idx++) { if (request_irq(gpio_to_irq(row_gpios[irq_idx]), omap_kp_interrupt, IRQF_TRIGGER_FALLING, "omap-keypad", omap_kp) < 0) goto err5; } } return 0; err5: for (i = irq_idx - 1; i >=0; i--) free_irq(row_gpios[i], omap_kp); err4: input_unregister_device(omap_kp->input); input_dev = NULL; err3: device_remove_file(&pdev->dev, &dev_attr_enable); err2: for (i = row_idx - 1; i >=0; i--) gpio_free(row_gpios[i]); err1: for (i = col_idx - 1; i >=0; i--) gpio_free(col_gpios[i]); kfree(omap_kp); input_free_device(input_dev); return -EINVAL; } static int __devexit omap_kp_remove(struct platform_device *pdev) { struct omap_kp *omap_kp = platform_get_drvdata(pdev); /* disable keypad interrupt handling */ tasklet_disable(&kp_tasklet); if (cpu_is_omap24xx()) { int i; for (i = 0; i < omap_kp->cols; i++) gpio_free(col_gpios[i]); for (i = 0; i < omap_kp->rows; i++) { gpio_free(row_gpios[i]); free_irq(gpio_to_irq(row_gpios[i]), omap_kp); } } else { omap_writew(1, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT); free_irq(omap_kp->irq, omap_kp); } del_timer_sync(&omap_kp->timer); tasklet_kill(&kp_tasklet); /* unregister everything */ input_unregister_device(omap_kp->input); kfree(omap_kp); return 0; } static struct platform_driver omap_kp_driver = { .probe = omap_kp_probe, .remove = __devexit_p(omap_kp_remove), .suspend = omap_kp_suspend, .resume = omap_kp_resume, .driver = { .name = "omap-keypad", .owner = THIS_MODULE, }, }; module_platform_driver(omap_kp_driver); MODULE_AUTHOR("Timo Teräs"); MODULE_DESCRIPTION("OMAP Keypad Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:omap-keypad");
gpl-2.0
ryrzy/LG-D802-G2-_Android_KK_D802_v20b
fs/dlm/recover.c
4888
20662
/****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. ** ** This copyrighted material is made available to anyone wishing to use, ** modify, copy, or redistribute it subject to the terms and conditions ** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ #include "dlm_internal.h" #include "lockspace.h" #include "dir.h" #include "config.h" #include "ast.h" #include "memory.h" #include "rcom.h" #include "lock.h" #include "lowcomms.h" #include "member.h" #include "recover.h" /* * Recovery waiting routines: these functions wait for a particular reply from * a remote node, or for the remote node to report a certain status. They need * to abort if the lockspace is stopped indicating a node has failed (perhaps * the one being waited for). */ /* * Wait until given function returns non-zero or lockspace is stopped * (LS_RECOVERY_STOP set due to failure of a node in ls_nodes). When another * function thinks it could have completed the waited-on task, they should wake * up ls_wait_general to get an immediate response rather than waiting for the * timer to detect the result. A timer wakes us up periodically while waiting * to see if we should abort due to a node failure. This should only be called * by the dlm_recoverd thread. */ static void dlm_wait_timer_fn(unsigned long data) { struct dlm_ls *ls = (struct dlm_ls *) data; mod_timer(&ls->ls_timer, jiffies + (dlm_config.ci_recover_timer * HZ)); wake_up(&ls->ls_wait_general); } int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls)) { int error = 0; init_timer(&ls->ls_timer); ls->ls_timer.function = dlm_wait_timer_fn; ls->ls_timer.data = (long) ls; ls->ls_timer.expires = jiffies + (dlm_config.ci_recover_timer * HZ); add_timer(&ls->ls_timer); wait_event(ls->ls_wait_general, testfn(ls) || dlm_recovery_stopped(ls)); del_timer_sync(&ls->ls_timer); if (dlm_recovery_stopped(ls)) { log_debug(ls, "dlm_wait_function aborted"); error = -EINTR; } return error; } /* * An efficient way for all nodes to wait for all others to have a certain * status. The node with the lowest nodeid polls all the others for their * status (wait_status_all) and all the others poll the node with the low id * for its accumulated result (wait_status_low). When all nodes have set * status flag X, then status flag X_ALL will be set on the low nodeid. */ uint32_t dlm_recover_status(struct dlm_ls *ls) { uint32_t status; spin_lock(&ls->ls_recover_lock); status = ls->ls_recover_status; spin_unlock(&ls->ls_recover_lock); return status; } static void _set_recover_status(struct dlm_ls *ls, uint32_t status) { ls->ls_recover_status |= status; } void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status) { spin_lock(&ls->ls_recover_lock); _set_recover_status(ls, status); spin_unlock(&ls->ls_recover_lock); } static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status, int save_slots) { struct dlm_rcom *rc = ls->ls_recover_buf; struct dlm_member *memb; int error = 0, delay; list_for_each_entry(memb, &ls->ls_nodes, list) { delay = 0; for (;;) { if (dlm_recovery_stopped(ls)) { error = -EINTR; goto out; } error = dlm_rcom_status(ls, memb->nodeid, 0); if (error) goto out; if (save_slots) dlm_slot_save(ls, rc, memb); if (rc->rc_result & wait_status) break; if (delay < 1000) delay += 20; msleep(delay); } } out: return error; } static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status, uint32_t status_flags) { struct dlm_rcom *rc = ls->ls_recover_buf; int error = 0, delay = 0, nodeid = ls->ls_low_nodeid; for (;;) { if (dlm_recovery_stopped(ls)) { error = -EINTR; goto out; } error = dlm_rcom_status(ls, nodeid, status_flags); if (error) break; if (rc->rc_result & wait_status) break; if (delay < 1000) delay += 20; msleep(delay); } out: return error; } static int wait_status(struct dlm_ls *ls, uint32_t status) { uint32_t status_all = status << 1; int error; if (ls->ls_low_nodeid == dlm_our_nodeid()) { error = wait_status_all(ls, status, 0); if (!error) dlm_set_recover_status(ls, status_all); } else error = wait_status_low(ls, status_all, 0); return error; } int dlm_recover_members_wait(struct dlm_ls *ls) { struct dlm_member *memb; struct dlm_slot *slots; int num_slots, slots_size; int error, rv; uint32_t gen; list_for_each_entry(memb, &ls->ls_nodes, list) { memb->slot = -1; memb->generation = 0; } if (ls->ls_low_nodeid == dlm_our_nodeid()) { error = wait_status_all(ls, DLM_RS_NODES, 1); if (error) goto out; /* slots array is sparse, slots_size may be > num_slots */ rv = dlm_slots_assign(ls, &num_slots, &slots_size, &slots, &gen); if (!rv) { spin_lock(&ls->ls_recover_lock); _set_recover_status(ls, DLM_RS_NODES_ALL); ls->ls_num_slots = num_slots; ls->ls_slots_size = slots_size; ls->ls_slots = slots; ls->ls_generation = gen; spin_unlock(&ls->ls_recover_lock); } else { dlm_set_recover_status(ls, DLM_RS_NODES_ALL); } } else { error = wait_status_low(ls, DLM_RS_NODES_ALL, DLM_RSF_NEED_SLOTS); if (error) goto out; dlm_slots_copy_in(ls); } out: return error; } int dlm_recover_directory_wait(struct dlm_ls *ls) { return wait_status(ls, DLM_RS_DIR); } int dlm_recover_locks_wait(struct dlm_ls *ls) { return wait_status(ls, DLM_RS_LOCKS); } int dlm_recover_done_wait(struct dlm_ls *ls) { return wait_status(ls, DLM_RS_DONE); } /* * The recover_list contains all the rsb's for which we've requested the new * master nodeid. As replies are returned from the resource directories the * rsb's are removed from the list. When the list is empty we're done. * * The recover_list is later similarly used for all rsb's for which we've sent * new lkb's and need to receive new corresponding lkid's. * * We use the address of the rsb struct as a simple local identifier for the * rsb so we can match an rcom reply with the rsb it was sent for. */ static int recover_list_empty(struct dlm_ls *ls) { int empty; spin_lock(&ls->ls_recover_list_lock); empty = list_empty(&ls->ls_recover_list); spin_unlock(&ls->ls_recover_list_lock); return empty; } static void recover_list_add(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; spin_lock(&ls->ls_recover_list_lock); if (list_empty(&r->res_recover_list)) { list_add_tail(&r->res_recover_list, &ls->ls_recover_list); ls->ls_recover_list_count++; dlm_hold_rsb(r); } spin_unlock(&ls->ls_recover_list_lock); } static void recover_list_del(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; spin_lock(&ls->ls_recover_list_lock); list_del_init(&r->res_recover_list); ls->ls_recover_list_count--; spin_unlock(&ls->ls_recover_list_lock); dlm_put_rsb(r); } static struct dlm_rsb *recover_list_find(struct dlm_ls *ls, uint64_t id) { struct dlm_rsb *r = NULL; spin_lock(&ls->ls_recover_list_lock); list_for_each_entry(r, &ls->ls_recover_list, res_recover_list) { if (id == (unsigned long) r) goto out; } r = NULL; out: spin_unlock(&ls->ls_recover_list_lock); return r; } static void recover_list_clear(struct dlm_ls *ls) { struct dlm_rsb *r, *s; spin_lock(&ls->ls_recover_list_lock); list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) { list_del_init(&r->res_recover_list); r->res_recover_locks_count = 0; dlm_put_rsb(r); ls->ls_recover_list_count--; } if (ls->ls_recover_list_count != 0) { log_error(ls, "warning: recover_list_count %d", ls->ls_recover_list_count); ls->ls_recover_list_count = 0; } spin_unlock(&ls->ls_recover_list_lock); } /* Master recovery: find new master node for rsb's that were mastered on nodes that have been removed. dlm_recover_masters recover_master dlm_send_rcom_lookup -> receive_rcom_lookup dlm_dir_lookup receive_rcom_lookup_reply <- dlm_recover_master_reply set_new_master set_master_lkbs set_lock_master */ /* * Set the lock master for all LKBs in a lock queue * If we are the new master of the rsb, we may have received new * MSTCPY locks from other nodes already which we need to ignore * when setting the new nodeid. */ static void set_lock_master(struct list_head *queue, int nodeid) { struct dlm_lkb *lkb; list_for_each_entry(lkb, queue, lkb_statequeue) if (!(lkb->lkb_flags & DLM_IFL_MSTCPY)) lkb->lkb_nodeid = nodeid; } static void set_master_lkbs(struct dlm_rsb *r) { set_lock_master(&r->res_grantqueue, r->res_nodeid); set_lock_master(&r->res_convertqueue, r->res_nodeid); set_lock_master(&r->res_waitqueue, r->res_nodeid); } /* * Propagate the new master nodeid to locks * The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider. * The NEW_MASTER2 flag tells recover_lvb() and set_locks_purged() which * rsb's to consider. */ static void set_new_master(struct dlm_rsb *r, int nodeid) { lock_rsb(r); r->res_nodeid = nodeid; set_master_lkbs(r); rsb_set_flag(r, RSB_NEW_MASTER); rsb_set_flag(r, RSB_NEW_MASTER2); unlock_rsb(r); } /* * We do async lookups on rsb's that need new masters. The rsb's * waiting for a lookup reply are kept on the recover_list. */ static int recover_master(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; int error, dir_nodeid, ret_nodeid, our_nodeid = dlm_our_nodeid(); dir_nodeid = dlm_dir_nodeid(r); if (dir_nodeid == our_nodeid) { error = dlm_dir_lookup(ls, our_nodeid, r->res_name, r->res_length, &ret_nodeid); if (error) log_error(ls, "recover dir lookup error %d", error); if (ret_nodeid == our_nodeid) ret_nodeid = 0; set_new_master(r, ret_nodeid); } else { recover_list_add(r); error = dlm_send_rcom_lookup(r, dir_nodeid); } return error; } /* * When not using a directory, most resource names will hash to a new static * master nodeid and the resource will need to be remastered. */ static int recover_master_static(struct dlm_rsb *r) { int master = dlm_dir_nodeid(r); if (master == dlm_our_nodeid()) master = 0; if (r->res_nodeid != master) { if (is_master(r)) dlm_purge_mstcpy_locks(r); set_new_master(r, master); return 1; } return 0; } /* * Go through local root resources and for each rsb which has a master which * has departed, get the new master nodeid from the directory. The dir will * assign mastery to the first node to look up the new master. That means * we'll discover in this lookup if we're the new master of any rsb's. * * We fire off all the dir lookup requests individually and asynchronously to * the correct dir node. */ int dlm_recover_masters(struct dlm_ls *ls) { struct dlm_rsb *r; int error = 0, count = 0; log_debug(ls, "dlm_recover_masters"); down_read(&ls->ls_root_sem); list_for_each_entry(r, &ls->ls_root_list, res_root_list) { if (dlm_recovery_stopped(ls)) { up_read(&ls->ls_root_sem); error = -EINTR; goto out; } if (dlm_no_directory(ls)) count += recover_master_static(r); else if (!is_master(r) && (dlm_is_removed(ls, r->res_nodeid) || rsb_flag(r, RSB_NEW_MASTER))) { recover_master(r); count++; } schedule(); } up_read(&ls->ls_root_sem); log_debug(ls, "dlm_recover_masters %d resources", count); error = dlm_wait_function(ls, &recover_list_empty); out: if (error) recover_list_clear(ls); return error; } int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc) { struct dlm_rsb *r; int nodeid; r = recover_list_find(ls, rc->rc_id); if (!r) { log_error(ls, "dlm_recover_master_reply no id %llx", (unsigned long long)rc->rc_id); goto out; } nodeid = rc->rc_result; if (nodeid == dlm_our_nodeid()) nodeid = 0; set_new_master(r, nodeid); recover_list_del(r); if (recover_list_empty(ls)) wake_up(&ls->ls_wait_general); out: return 0; } /* Lock recovery: rebuild the process-copy locks we hold on a remastered rsb on the new rsb master. dlm_recover_locks recover_locks recover_locks_queue dlm_send_rcom_lock -> receive_rcom_lock dlm_recover_master_copy receive_rcom_lock_reply <- dlm_recover_process_copy */ /* * keep a count of the number of lkb's we send to the new master; when we get * an equal number of replies then recovery for the rsb is done */ static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head) { struct dlm_lkb *lkb; int error = 0; list_for_each_entry(lkb, head, lkb_statequeue) { error = dlm_send_rcom_lock(r, lkb); if (error) break; r->res_recover_locks_count++; } return error; } static int recover_locks(struct dlm_rsb *r) { int error = 0; lock_rsb(r); DLM_ASSERT(!r->res_recover_locks_count, dlm_dump_rsb(r);); error = recover_locks_queue(r, &r->res_grantqueue); if (error) goto out; error = recover_locks_queue(r, &r->res_convertqueue); if (error) goto out; error = recover_locks_queue(r, &r->res_waitqueue); if (error) goto out; if (r->res_recover_locks_count) recover_list_add(r); else rsb_clear_flag(r, RSB_NEW_MASTER); out: unlock_rsb(r); return error; } int dlm_recover_locks(struct dlm_ls *ls) { struct dlm_rsb *r; int error, count = 0; log_debug(ls, "dlm_recover_locks"); down_read(&ls->ls_root_sem); list_for_each_entry(r, &ls->ls_root_list, res_root_list) { if (is_master(r)) { rsb_clear_flag(r, RSB_NEW_MASTER); continue; } if (!rsb_flag(r, RSB_NEW_MASTER)) continue; if (dlm_recovery_stopped(ls)) { error = -EINTR; up_read(&ls->ls_root_sem); goto out; } error = recover_locks(r); if (error) { up_read(&ls->ls_root_sem); goto out; } count += r->res_recover_locks_count; } up_read(&ls->ls_root_sem); log_debug(ls, "dlm_recover_locks %d locks", count); error = dlm_wait_function(ls, &recover_list_empty); out: if (error) recover_list_clear(ls); return error; } void dlm_recovered_lock(struct dlm_rsb *r) { DLM_ASSERT(rsb_flag(r, RSB_NEW_MASTER), dlm_dump_rsb(r);); r->res_recover_locks_count--; if (!r->res_recover_locks_count) { rsb_clear_flag(r, RSB_NEW_MASTER); recover_list_del(r); } if (recover_list_empty(r->res_ls)) wake_up(&r->res_ls->ls_wait_general); } /* * The lvb needs to be recovered on all master rsb's. This includes setting * the VALNOTVALID flag if necessary, and determining the correct lvb contents * based on the lvb's of the locks held on the rsb. * * RSB_VALNOTVALID is set if there are only NL/CR locks on the rsb. If it * was already set prior to recovery, it's not cleared, regardless of locks. * * The LVB contents are only considered for changing when this is a new master * of the rsb (NEW_MASTER2). Then, the rsb's lvb is taken from any lkb with * mode > CR. If no lkb's exist with mode above CR, the lvb contents are taken * from the lkb with the largest lvb sequence number. */ static void recover_lvb(struct dlm_rsb *r) { struct dlm_lkb *lkb, *high_lkb = NULL; uint32_t high_seq = 0; int lock_lvb_exists = 0; int big_lock_exists = 0; int lvblen = r->res_ls->ls_lvblen; list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) continue; lock_lvb_exists = 1; if (lkb->lkb_grmode > DLM_LOCK_CR) { big_lock_exists = 1; goto setflag; } if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) { high_lkb = lkb; high_seq = lkb->lkb_lvbseq; } } list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) { if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) continue; lock_lvb_exists = 1; if (lkb->lkb_grmode > DLM_LOCK_CR) { big_lock_exists = 1; goto setflag; } if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) { high_lkb = lkb; high_seq = lkb->lkb_lvbseq; } } setflag: if (!lock_lvb_exists) goto out; if (!big_lock_exists) rsb_set_flag(r, RSB_VALNOTVALID); /* don't mess with the lvb unless we're the new master */ if (!rsb_flag(r, RSB_NEW_MASTER2)) goto out; if (!r->res_lvbptr) { r->res_lvbptr = dlm_allocate_lvb(r->res_ls); if (!r->res_lvbptr) goto out; } if (big_lock_exists) { r->res_lvbseq = lkb->lkb_lvbseq; memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen); } else if (high_lkb) { r->res_lvbseq = high_lkb->lkb_lvbseq; memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen); } else { r->res_lvbseq = 0; memset(r->res_lvbptr, 0, lvblen); } out: return; } /* All master rsb's flagged RECOVER_CONVERT need to be looked at. The locks converting PR->CW or CW->PR need to have their lkb_grmode set. */ static void recover_conversion(struct dlm_rsb *r) { struct dlm_lkb *lkb; int grmode = -1; list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { if (lkb->lkb_grmode == DLM_LOCK_PR || lkb->lkb_grmode == DLM_LOCK_CW) { grmode = lkb->lkb_grmode; break; } } list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) { if (lkb->lkb_grmode != DLM_LOCK_IV) continue; if (grmode == -1) lkb->lkb_grmode = lkb->lkb_rqmode; else lkb->lkb_grmode = grmode; } } /* We've become the new master for this rsb and waiting/converting locks may need to be granted in dlm_grant_after_purge() due to locks that may have existed from a removed node. */ static void set_locks_purged(struct dlm_rsb *r) { if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue)) rsb_set_flag(r, RSB_LOCKS_PURGED); } void dlm_recover_rsbs(struct dlm_ls *ls) { struct dlm_rsb *r; int count = 0; log_debug(ls, "dlm_recover_rsbs"); down_read(&ls->ls_root_sem); list_for_each_entry(r, &ls->ls_root_list, res_root_list) { lock_rsb(r); if (is_master(r)) { if (rsb_flag(r, RSB_RECOVER_CONVERT)) recover_conversion(r); if (rsb_flag(r, RSB_NEW_MASTER2)) set_locks_purged(r); recover_lvb(r); count++; } rsb_clear_flag(r, RSB_RECOVER_CONVERT); rsb_clear_flag(r, RSB_NEW_MASTER2); unlock_rsb(r); } up_read(&ls->ls_root_sem); log_debug(ls, "dlm_recover_rsbs %d rsbs", count); } /* Create a single list of all root rsb's to be used during recovery */ int dlm_create_root_list(struct dlm_ls *ls) { struct rb_node *n; struct dlm_rsb *r; int i, error = 0; down_write(&ls->ls_root_sem); if (!list_empty(&ls->ls_root_list)) { log_error(ls, "root list not empty"); error = -EINVAL; goto out; } for (i = 0; i < ls->ls_rsbtbl_size; i++) { spin_lock(&ls->ls_rsbtbl[i].lock); for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { r = rb_entry(n, struct dlm_rsb, res_hashnode); list_add(&r->res_root_list, &ls->ls_root_list); dlm_hold_rsb(r); } /* If we're using a directory, add tossed rsbs to the root list; they'll have entries created in the new directory, but no other recovery steps should do anything with them. */ if (dlm_no_directory(ls)) { spin_unlock(&ls->ls_rsbtbl[i].lock); continue; } for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = rb_next(n)) { r = rb_entry(n, struct dlm_rsb, res_hashnode); list_add(&r->res_root_list, &ls->ls_root_list); dlm_hold_rsb(r); } spin_unlock(&ls->ls_rsbtbl[i].lock); } out: up_write(&ls->ls_root_sem); return error; } void dlm_release_root_list(struct dlm_ls *ls) { struct dlm_rsb *r, *safe; down_write(&ls->ls_root_sem); list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) { list_del_init(&r->res_root_list); dlm_put_rsb(r); } up_write(&ls->ls_root_sem); } /* If not using a directory, clear the entire toss list, there's no benefit to caching the master value since it's fixed. If we are using a dir, keep the rsb's we're the master of. Recovery will add them to the root list and from there they'll be entered in the rebuilt directory. */ void dlm_clear_toss_list(struct dlm_ls *ls) { struct rb_node *n, *next; struct dlm_rsb *rsb; int i; for (i = 0; i < ls->ls_rsbtbl_size; i++) { spin_lock(&ls->ls_rsbtbl[i].lock); for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) { next = rb_next(n);; rsb = rb_entry(n, struct dlm_rsb, res_hashnode); if (dlm_no_directory(ls) || !is_master(rsb)) { rb_erase(n, &ls->ls_rsbtbl[i].toss); dlm_free_rsb(rsb); } } spin_unlock(&ls->ls_rsbtbl[i].lock); } }
gpl-2.0
GameTheory-/android_kernel_lge_fx1s
drivers/mtd/maps/physmap.c
5144
6956
/* * Normal mappings of chips in physical memory * * Copyright (C) 2003 MontaVista Software Inc. * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net * * 031022 - [jsun] add run-time configure and partition setup */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/mtd/concat.h> #include <linux/io.h> #define MAX_RESOURCES 4 struct physmap_flash_info { struct mtd_info *mtd[MAX_RESOURCES]; struct mtd_info *cmtd; struct map_info map[MAX_RESOURCES]; spinlock_t vpp_lock; int vpp_refcnt; }; static int physmap_flash_remove(struct platform_device *dev) { struct physmap_flash_info *info; struct physmap_flash_data *physmap_data; int i; info = platform_get_drvdata(dev); if (info == NULL) return 0; platform_set_drvdata(dev, NULL); physmap_data = dev->dev.platform_data; if (info->cmtd) { mtd_device_unregister(info->cmtd); if (info->cmtd != info->mtd[0]) mtd_concat_destroy(info->cmtd); } for (i = 0; i < MAX_RESOURCES; i++) { if (info->mtd[i] != NULL) map_destroy(info->mtd[i]); } if (physmap_data->exit) physmap_data->exit(dev); return 0; } static void physmap_set_vpp(struct map_info *map, int state) { struct platform_device *pdev; struct physmap_flash_data *physmap_data; struct physmap_flash_info *info; unsigned long flags; pdev = (struct platform_device *)map->map_priv_1; physmap_data = pdev->dev.platform_data; if (!physmap_data->set_vpp) return; info = platform_get_drvdata(pdev); spin_lock_irqsave(&info->vpp_lock, flags); if (state) { if (++info->vpp_refcnt == 1) /* first nested 'on' */ physmap_data->set_vpp(pdev, 1); } else { if (--info->vpp_refcnt == 0) /* last nested 'off' */ physmap_data->set_vpp(pdev, 0); } spin_unlock_irqrestore(&info->vpp_lock, flags); } static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "qinfo_probe", "map_rom", NULL }; static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", "afs", NULL }; static int physmap_flash_probe(struct platform_device *dev) { struct physmap_flash_data *physmap_data; struct physmap_flash_info *info; const char **probe_type; const char **part_types; int err = 0; int i; int devices_found = 0; physmap_data = dev->dev.platform_data; if (physmap_data == NULL) return -ENODEV; info = devm_kzalloc(&dev->dev, sizeof(struct physmap_flash_info), GFP_KERNEL); if (info == NULL) { err = -ENOMEM; goto err_out; } if (physmap_data->init) { err = physmap_data->init(dev); if (err) goto err_out; } platform_set_drvdata(dev, info); for (i = 0; i < dev->num_resources; i++) { printk(KERN_NOTICE "physmap platform flash device: %.8llx at %.8llx\n", (unsigned long long)resource_size(&dev->resource[i]), (unsigned long long)dev->resource[i].start); if (!devm_request_mem_region(&dev->dev, dev->resource[i].start, resource_size(&dev->resource[i]), dev_name(&dev->dev))) { dev_err(&dev->dev, "Could not reserve memory region\n"); err = -ENOMEM; goto err_out; } info->map[i].name = dev_name(&dev->dev); info->map[i].phys = dev->resource[i].start; info->map[i].size = resource_size(&dev->resource[i]); info->map[i].bankwidth = physmap_data->width; info->map[i].set_vpp = physmap_set_vpp; info->map[i].pfow_base = physmap_data->pfow_base; info->map[i].map_priv_1 = (unsigned long)dev; info->map[i].virt = devm_ioremap(&dev->dev, info->map[i].phys, info->map[i].size); if (info->map[i].virt == NULL) { dev_err(&dev->dev, "Failed to ioremap flash region\n"); err = -EIO; goto err_out; } simple_map_init(&info->map[i]); probe_type = rom_probe_types; if (physmap_data->probe_type == NULL) { for (; info->mtd[i] == NULL && *probe_type != NULL; probe_type++) info->mtd[i] = do_map_probe(*probe_type, &info->map[i]); } else info->mtd[i] = do_map_probe(physmap_data->probe_type, &info->map[i]); if (info->mtd[i] == NULL) { dev_err(&dev->dev, "map_probe failed\n"); err = -ENXIO; goto err_out; } else { devices_found++; } info->mtd[i]->owner = THIS_MODULE; info->mtd[i]->dev.parent = &dev->dev; } if (devices_found == 1) { info->cmtd = info->mtd[0]; } else if (devices_found > 1) { /* * We detected multiple devices. Concatenate them together. */ info->cmtd = mtd_concat_create(info->mtd, devices_found, dev_name(&dev->dev)); if (info->cmtd == NULL) err = -ENXIO; } if (err) goto err_out; spin_lock_init(&info->vpp_lock); part_types = physmap_data->part_probe_types ? : part_probe_types; mtd_device_parse_register(info->cmtd, part_types, NULL, physmap_data->parts, physmap_data->nr_parts); return 0; err_out: physmap_flash_remove(dev); return err; } #ifdef CONFIG_PM static void physmap_flash_shutdown(struct platform_device *dev) { struct physmap_flash_info *info = platform_get_drvdata(dev); int i; for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++) if (mtd_suspend(info->mtd[i]) == 0) mtd_resume(info->mtd[i]); } #else #define physmap_flash_shutdown NULL #endif static struct platform_driver physmap_flash_driver = { .probe = physmap_flash_probe, .remove = physmap_flash_remove, .shutdown = physmap_flash_shutdown, .driver = { .name = "physmap-flash", .owner = THIS_MODULE, }, }; #ifdef CONFIG_MTD_PHYSMAP_COMPAT static struct physmap_flash_data physmap_flash_data = { .width = CONFIG_MTD_PHYSMAP_BANKWIDTH, }; static struct resource physmap_flash_resource = { .start = CONFIG_MTD_PHYSMAP_START, .end = CONFIG_MTD_PHYSMAP_START + CONFIG_MTD_PHYSMAP_LEN - 1, .flags = IORESOURCE_MEM, }; static struct platform_device physmap_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &physmap_flash_data, }, .num_resources = 1, .resource = &physmap_flash_resource, }; #endif static int __init physmap_init(void) { int err; err = platform_driver_register(&physmap_flash_driver); #ifdef CONFIG_MTD_PHYSMAP_COMPAT if (err == 0) { err = platform_device_register(&physmap_flash); if (err) platform_driver_unregister(&physmap_flash_driver); } #endif return err; } static void __exit physmap_exit(void) { #ifdef CONFIG_MTD_PHYSMAP_COMPAT platform_device_unregister(&physmap_flash); #endif platform_driver_unregister(&physmap_flash_driver); } module_init(physmap_init); module_exit(physmap_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); MODULE_DESCRIPTION("Generic configurable MTD map driver"); /* legacy platform drivers can't hotplug or coldplg */ #ifndef CONFIG_MTD_PHYSMAP_COMPAT /* work with hotplug and coldplug */ MODULE_ALIAS("platform:physmap-flash"); #endif
gpl-2.0
xiaogaogao/linuxFromDigilent
arch/powerpc/platforms/wsp/wsp.c
6936
2445
/* * Copyright 2008-2011, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/smp.h> #include <linux/delay.h> #include <linux/time.h> #include <asm/scom.h> #include "wsp.h" #include "ics.h" #define WSP_SOC_COMPATIBLE "ibm,wsp-soc" #define PBIC_COMPATIBLE "ibm,wsp-pbic" #define COPRO_COMPATIBLE "ibm,wsp-coprocessor" static int __init wsp_probe_buses(void) { static __initdata struct of_device_id bus_ids[] = { /* * every node in between needs to be here or you won't * find it */ { .compatible = WSP_SOC_COMPATIBLE, }, { .compatible = PBIC_COMPATIBLE, }, { .compatible = COPRO_COMPATIBLE, }, {}, }; of_platform_bus_probe(NULL, bus_ids, NULL); return 0; } void __init wsp_setup_arch(void) { /* init to some ~sane value until calibrate_delay() runs */ loops_per_jiffy = 50000000; scom_init_wsp(); /* Setup SMP callback */ #ifdef CONFIG_SMP a2_setup_smp(); #endif #ifdef CONFIG_PCI wsp_setup_pci(); #endif } void __init wsp_setup_irq(void) { wsp_init_irq(); opb_pic_init(); } int __init wsp_probe_devices(void) { struct device_node *np; /* Our RTC is a ds1500. It seems to be programatically compatible * with the ds1511 for which we have a driver so let's use that */ np = of_find_compatible_node(NULL, NULL, "dallas,ds1500"); if (np != NULL) { struct resource res; if (of_address_to_resource(np, 0, &res) == 0) platform_device_register_simple("ds1511", 0, &res, 1); } wsp_probe_buses(); return 0; } void wsp_halt(void) { u64 val; scom_map_t m; struct device_node *dn; struct device_node *mine; struct device_node *me; me = of_get_cpu_node(smp_processor_id(), NULL); mine = scom_find_parent(me); /* This will halt all the A2s but not power off the chip */ for_each_node_with_property(dn, "scom-controller") { if (dn == mine) continue; m = scom_map(dn, 0, 1); /* read-modify-write it so the HW probe does not get * confused */ val = scom_read(m, 0); val |= 1; scom_write(m, 0, val); scom_unmap(m); } m = scom_map(mine, 0, 1); val = scom_read(m, 0); val |= 1; scom_write(m, 0, val); /* should never return */ scom_unmap(m); }
gpl-2.0
isnehalkiran/kernel-msm
fs/hpfs/dentry.c
10520
1540
/* * linux/fs/hpfs/dentry.c * * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999 * * dcache operations */ #include "hpfs_fn.h" /* * Note: the dentry argument is the parent dentry. */ static int hpfs_hash_dentry(const struct dentry *dentry, const struct inode *inode, struct qstr *qstr) { unsigned long hash; int i; unsigned l = qstr->len; if (l == 1) if (qstr->name[0]=='.') goto x; if (l == 2) if (qstr->name[0]=='.' || qstr->name[1]=='.') goto x; hpfs_adjust_length(qstr->name, &l); /*if (hpfs_chk_name(qstr->name,&l))*/ /*return -ENAMETOOLONG;*/ /*return -ENOENT;*/ x: hash = init_name_hash(); for (i = 0; i < l; i++) hash = partial_name_hash(hpfs_upcase(hpfs_sb(dentry->d_sb)->sb_cp_table,qstr->name[i]), hash); qstr->hash = end_name_hash(hash); return 0; } static int hpfs_compare_dentry(const struct dentry *parent, const struct inode *pinode, const struct dentry *dentry, const struct inode *inode, unsigned int len, const char *str, const struct qstr *name) { unsigned al = len; unsigned bl = name->len; hpfs_adjust_length(str, &al); /*hpfs_adjust_length(b->name, &bl);*/ /* * 'str' is the nane of an already existing dentry, so the name * must be valid. 'name' must be validated first. */ if (hpfs_chk_name(name->name, &bl)) return 1; if (hpfs_compare_names(parent->d_sb, str, al, name->name, bl, 0)) return 1; return 0; } const struct dentry_operations hpfs_dentry_operations = { .d_hash = hpfs_hash_dentry, .d_compare = hpfs_compare_dentry, };
gpl-2.0
mkl0301/linux
arch/powerpc/boot/ep405.c
14104
1805
/* * Embedded Planet EP405 with PlanetCore firmware * * (c) Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp,\ * * Based on ep88xc.c by * * Scott Wood <scottwood@freescale.com> * * Copyright (c) 2007 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "ops.h" #include "stdio.h" #include "planetcore.h" #include "dcr.h" #include "4xx.h" #include "io.h" static char *table; static u64 mem_size; static void platform_fixups(void) { u64 val; void *nvrtc; dt_fixup_memory(0, mem_size); planetcore_set_mac_addrs(table); if (!planetcore_get_decimal(table, PLANETCORE_KEY_CRYSTAL_HZ, &val)) { printf("No PlanetCore crystal frequency key.\r\n"); return; } ibm405gp_fixup_clocks(val, 0xa8c000); ibm4xx_quiesce_eth((u32 *)0xef600800, NULL); ibm4xx_fixup_ebc_ranges("/plb/ebc"); if (!planetcore_get_decimal(table, PLANETCORE_KEY_KB_NVRAM, &val)) { printf("No PlanetCore NVRAM size key.\r\n"); return; } nvrtc = finddevice("/plb/ebc/nvrtc@4,200000"); if (nvrtc != NULL) { u32 reg[3] = { 4, 0x200000, 0}; getprop(nvrtc, "reg", reg, 3); reg[2] = (val << 10) & 0xffffffff; setprop(nvrtc, "reg", reg, 3); } } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { table = (char *)r3; planetcore_prepare_table(table); if (!planetcore_get_decimal(table, PLANETCORE_KEY_MB_RAM, &mem_size)) return; mem_size *= 1024 * 1024; simple_alloc_init(_end, mem_size - (unsigned long)_end, 32, 64); fdt_init(_dtb_start); planetcore_set_stdout_path(table); serial_console_init(); platform_ops.fixups = platform_fixups; }
gpl-2.0
HofiOne/xbmc
lib/libUPnP/Platinum/Source/Core/PltHttpServer.cpp
25
12330
/***************************************************************** | | Platinum - HTTP Server | | Copyright (c) 2004-2010, Plutinosoft, LLC. | All rights reserved. | http://www.plutinosoft.com | | This program is free software; you can redistribute it and/or | modify it under the terms of the GNU General Public License | as published by the Free Software Foundation; either version 2 | of the License, or (at your option) any later version. | | OEMs, ISVs, VARs and other distributors that combine and | distribute commercially licensed software with Platinum software | and do not wish to distribute the source code for the commercially | licensed software under version 2, or (at your option) any later | version, of the GNU General Public License (the "GPL") must enter | into a commercial license agreement with Plutinosoft, LLC. | licensing@plutinosoft.com | | This program is distributed in the hope that it will be useful, | but WITHOUT ANY WARRANTY; without even the implied warranty of | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | GNU General Public License for more details. | | You should have received a copy of the GNU General Public License | along with this program; see the file LICENSE.txt. If not, write to | the Free Software Foundation, Inc., | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | http://www.gnu.org/licenses/gpl-2.0.html | ****************************************************************/ /*---------------------------------------------------------------------- | includes +---------------------------------------------------------------------*/ #include "PltTaskManager.h" #include "PltHttpServer.h" #include "PltHttp.h" #include "PltVersion.h" #include "PltUtilities.h" #include "PltProtocolInfo.h" #include "PltMimeType.h" NPT_SET_LOCAL_LOGGER("platinum.core.http.server") /*---------------------------------------------------------------------- | PLT_HttpServer::PLT_HttpServer +---------------------------------------------------------------------*/ PLT_HttpServer::PLT_HttpServer(NPT_IpAddress address, NPT_IpPort port, bool allow_random_port_on_bind_failure, /* = false */ NPT_Cardinal max_clients, /* = 50 */ bool reuse_address) : /* = false */ NPT_HttpServer(address, port, true), m_TaskManager(new PLT_TaskManager(max_clients)), m_Address(address), m_Port(port), m_AllowRandomPortOnBindFailure(allow_random_port_on_bind_failure), m_ReuseAddress(reuse_address), m_Running(false), m_Aborted(false) { } /*---------------------------------------------------------------------- | PLT_HttpServer::~PLT_HttpServer +---------------------------------------------------------------------*/ PLT_HttpServer::~PLT_HttpServer() { Stop(); } /*---------------------------------------------------------------------- | PLT_HttpServer::Start +---------------------------------------------------------------------*/ NPT_Result PLT_HttpServer::Start() { NPT_Result res = NPT_FAILURE; // we can't start an already running server or restart an aborted server // because the socket is shared create a new instance if (m_Running || m_Aborted) NPT_CHECK_WARNING(NPT_ERROR_INVALID_STATE); // if we're given a port for our http server, try it if (m_Port) { res = SetListenPort(m_Port, m_ReuseAddress); // return right away if failed and not allowed to try again randomly if (NPT_FAILED(res) && !m_AllowRandomPortOnBindFailure) { NPT_CHECK_SEVERE(res); } } // try random port now if (!m_Port || NPT_FAILED(res)) { int retries = 100; do { int random = NPT_System::GetRandomInteger(); int port = (unsigned short)(1024 + (random % 1024)); if (NPT_SUCCEEDED(SetListenPort(port, m_ReuseAddress))) { break; } } while (--retries > 0); if (retries == 0) NPT_CHECK_SEVERE(NPT_FAILURE); } // keep track of port server has successfully bound m_Port = m_BoundPort; // Tell server to try to listen to more incoming sockets // (this could fail silently) if (m_TaskManager->GetMaxTasks() > 20) { m_Socket.Listen(m_TaskManager->GetMaxTasks()); } // start a task to listen for incoming connections PLT_HttpListenTask *task = new PLT_HttpListenTask(this, &m_Socket, false); NPT_CHECK_SEVERE(m_TaskManager->StartTask(task)); NPT_SocketInfo info; m_Socket.GetInfo(info); NPT_LOG_INFO_2("HttpServer listening on %s:%d", (const char*)info.local_address.GetIpAddress().ToString(), m_Port); m_Running = true; return NPT_SUCCESS; } /*---------------------------------------------------------------------- | PLT_HttpServer::Stop +---------------------------------------------------------------------*/ NPT_Result PLT_HttpServer::Stop() { // we can't restart an aborted server if (m_Aborted || !m_Running) NPT_CHECK_WARNING(NPT_ERROR_INVALID_STATE); // stop all other pending tasks m_TaskManager->Abort(); m_Running = false; m_Aborted = true; return NPT_SUCCESS; } /*---------------------------------------------------------------------- | PLT_HttpServer::SetupResponse +---------------------------------------------------------------------*/ NPT_Result PLT_HttpServer::SetupResponse(NPT_HttpRequest& request, const NPT_HttpRequestContext& context, NPT_HttpResponse& response) { NPT_String prefix = NPT_String::Format("PLT_HttpServer::SetupResponse %s request from %s for \"%s\"", (const char*) request.GetMethod(), (const char*) context.GetRemoteAddress().ToString(), (const char*) request.GetUrl().ToString()); PLT_LOG_HTTP_REQUEST(NPT_LOG_LEVEL_FINE, prefix, &request); NPT_List<NPT_HttpRequestHandler*> handlers = FindRequestHandlers(request); if (handlers.GetItemCount() == 0) return NPT_ERROR_NO_SUCH_ITEM; // ask the handler to setup the response NPT_Result result = (*handlers.GetFirstItem())->SetupResponse(request, context, response); // DLNA compliance PLT_UPnPMessageHelper::SetDate(response); if (request.GetHeaders().GetHeader("Accept-Language")) { response.GetHeaders().SetHeader("Content-Language", "en"); } return result; } /*---------------------------------------------------------------------- | PLT_HttpServer::ServeFile +---------------------------------------------------------------------*/ NPT_Result PLT_HttpServer::ServeFile(const NPT_HttpRequest& request, const NPT_HttpRequestContext& context, NPT_HttpResponse& response, NPT_String file_path) { NPT_InputStreamReference stream; NPT_File file(file_path); NPT_FileInfo file_info; // prevent hackers from accessing files outside of our root if ((file_path.Find("/..") >= 0) || (file_path.Find("\\..") >= 0) || NPT_FAILED(NPT_File::GetInfo(file_path, &file_info))) { return NPT_ERROR_NO_SUCH_ITEM; } // check for range requests const NPT_String* range_spec = request.GetHeaders().GetHeaderValue(NPT_HTTP_HEADER_RANGE); // handle potential 304 only if range header not set NPT_DateTime date; NPT_TimeStamp timestamp; if (NPT_SUCCEEDED(PLT_UPnPMessageHelper::GetIfModifiedSince((NPT_HttpMessage&)request, date)) && !range_spec) { date.ToTimeStamp(timestamp); NPT_LOG_INFO_5("File %s timestamps: request=%d (%s) vs file=%d (%s)", (const char*)request.GetUrl().GetPath(), (NPT_UInt32)timestamp.ToSeconds(), (const char*)date.ToString(), (NPT_UInt32)file_info.m_ModificationTime, (const char*)NPT_DateTime(file_info.m_ModificationTime).ToString()); if (timestamp >= file_info.m_ModificationTime) { // it's a match NPT_LOG_FINE_1("Returning 304 for %s", request.GetUrl().GetPath().GetChars()); response.SetStatus(304, "Not Modified", NPT_HTTP_PROTOCOL_1_1); return NPT_SUCCESS; } } // open file if (NPT_FAILED(file.Open(NPT_FILE_OPEN_MODE_READ)) || NPT_FAILED(file.GetInputStream(stream)) || stream.IsNull()) { return NPT_ERROR_NO_SUCH_ITEM; } // set Last-Modified and Cache-Control headers if (file_info.m_ModificationTime) { NPT_DateTime last_modified = NPT_DateTime(file_info.m_ModificationTime); response.GetHeaders().SetHeader("Last-Modified", last_modified.ToString(NPT_DateTime::FORMAT_RFC_1123), true); response.GetHeaders().SetHeader("Cache-Control", "max-age=0,must-revalidate", true); //response.GetHeaders().SetHeader("Cache-Control", "max-age=1800", true); } PLT_HttpRequestContext tmp_context(request, context); return ServeStream(request, context, response, stream, PLT_MimeType::GetMimeType(file_path, &tmp_context)); } /*---------------------------------------------------------------------- | PLT_HttpServer::ServeStream +---------------------------------------------------------------------*/ NPT_Result PLT_HttpServer::ServeStream(const NPT_HttpRequest& request, const NPT_HttpRequestContext& context, NPT_HttpResponse& response, NPT_InputStreamReference& body, const char* content_type) { if (body.IsNull()) return NPT_FAILURE; // set date NPT_TimeStamp now; NPT_System::GetCurrentTimeStamp(now); response.GetHeaders().SetHeader("Date", NPT_DateTime(now).ToString(NPT_DateTime::FORMAT_RFC_1123), true); // get entity NPT_HttpEntity* entity = response.GetEntity(); NPT_CHECK_POINTER_FATAL(entity); // set the content type entity->SetContentType(content_type); // check for range requests const NPT_String* range_spec = request.GetHeaders().GetHeaderValue(NPT_HTTP_HEADER_RANGE); // setup entity body NPT_CHECK(NPT_HttpFileRequestHandler::SetupResponseBody(response, body, range_spec)); // set some default headers if (response.GetEntity()->GetTransferEncoding() != NPT_HTTP_TRANSFER_ENCODING_CHUNKED) { // set but don't replace Accept-Range header only if body is seekable NPT_Position offset; if (NPT_SUCCEEDED(body->Tell(offset)) && NPT_SUCCEEDED(body->Seek(offset))) { response.GetHeaders().SetHeader(NPT_HTTP_HEADER_ACCEPT_RANGES, "bytes", false); } } // set getcontentFeatures.dlna.org const NPT_String* value = request.GetHeaders().GetHeaderValue("getcontentFeatures.dlna.org"); if (value) { PLT_HttpRequestContext tmp_context(request, context); const char* dlna = PLT_ProtocolInfo::GetDlnaExtension(entity->GetContentType(), &tmp_context); if (dlna) response.GetHeaders().SetHeader("ContentFeatures.DLNA.ORG", dlna, false); } // transferMode.dlna.org value = request.GetHeaders().GetHeaderValue("transferMode.dlna.org"); if (value) { // Interactive mode not supported? /*if (value->Compare("Interactive", true) == 0) { response.SetStatus(406, "Not Acceptable"); return NPT_SUCCESS; }*/ response.GetHeaders().SetHeader("TransferMode.DLNA.ORG", value->GetChars(), false); } else { response.GetHeaders().SetHeader("TransferMode.DLNA.ORG", "Streaming", false); } if (request.GetHeaders().GetHeaderValue("TimeSeekRange.dlna.org")) { response.SetStatus(406, "Not Acceptable"); return NPT_SUCCESS; } return NPT_SUCCESS; }
gpl-2.0
loongson-community/preempt-rt-linux
drivers/serial/imx.c
25
36147
/* * linux/drivers/serial/imx.c * * Driver for Motorola IMX serial ports * * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. * * Author: Sascha Hauer <sascha@saschahauer.de> * Copyright (C) 2004 Pengutronix * * Copyright (C) 2009 emlix GmbH * Author: Fabian Godehardt (added IrDA support for iMX) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * [29-Mar-2005] Mike Lee * Added hardware handshake */ #if defined(CONFIG_SERIAL_IMX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/module.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/console.h> #include <linux/sysrq.h> #include <linux/platform_device.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial_core.h> #include <linux/serial.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/rational.h> #include <asm/io.h> #include <asm/irq.h> #include <mach/hardware.h> #include <mach/imx-uart.h> /* Register definitions */ #define URXD0 0x0 /* Receiver Register */ #define URTX0 0x40 /* Transmitter Register */ #define UCR1 0x80 /* Control Register 1 */ #define UCR2 0x84 /* Control Register 2 */ #define UCR3 0x88 /* Control Register 3 */ #define UCR4 0x8c /* Control Register 4 */ #define UFCR 0x90 /* FIFO Control Register */ #define USR1 0x94 /* Status Register 1 */ #define USR2 0x98 /* Status Register 2 */ #define UESC 0x9c /* Escape Character Register */ #define UTIM 0xa0 /* Escape Timer Register */ #define UBIR 0xa4 /* BRM Incremental Register */ #define UBMR 0xa8 /* BRM Modulator Register */ #define UBRC 0xac /* Baud Rate Count Register */ #define MX2_ONEMS 0xb0 /* One Millisecond register */ #define UTS (cpu_is_mx1() ? 0xd0 : 0xb4) /* UART Test Register */ /* UART Control Register Bit Fields.*/ #define URXD_CHARRDY (1<<15) #define URXD_ERR (1<<14) #define URXD_OVRRUN (1<<13) #define URXD_FRMERR (1<<12) #define URXD_BRK (1<<11) #define URXD_PRERR (1<<10) #define UCR1_ADEN (1<<15) /* Auto dectect interrupt */ #define UCR1_ADBR (1<<14) /* Auto detect baud rate */ #define UCR1_TRDYEN (1<<13) /* Transmitter ready interrupt enable */ #define UCR1_IDEN (1<<12) /* Idle condition interrupt */ #define UCR1_RRDYEN (1<<9) /* Recv ready interrupt enable */ #define UCR1_RDMAEN (1<<8) /* Recv ready DMA enable */ #define UCR1_IREN (1<<7) /* Infrared interface enable */ #define UCR1_TXMPTYEN (1<<6) /* Transimitter empty interrupt enable */ #define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */ #define UCR1_SNDBRK (1<<4) /* Send break */ #define UCR1_TDMAEN (1<<3) /* Transmitter ready DMA enable */ #define MX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, mx1 only */ #define UCR1_DOZE (1<<1) /* Doze */ #define UCR1_UARTEN (1<<0) /* UART enabled */ #define UCR2_ESCI (1<<15) /* Escape seq interrupt enable */ #define UCR2_IRTS (1<<14) /* Ignore RTS pin */ #define UCR2_CTSC (1<<13) /* CTS pin control */ #define UCR2_CTS (1<<12) /* Clear to send */ #define UCR2_ESCEN (1<<11) /* Escape enable */ #define UCR2_PREN (1<<8) /* Parity enable */ #define UCR2_PROE (1<<7) /* Parity odd/even */ #define UCR2_STPB (1<<6) /* Stop */ #define UCR2_WS (1<<5) /* Word size */ #define UCR2_RTSEN (1<<4) /* Request to send interrupt enable */ #define UCR2_TXEN (1<<2) /* Transmitter enabled */ #define UCR2_RXEN (1<<1) /* Receiver enabled */ #define UCR2_SRST (1<<0) /* SW reset */ #define UCR3_DTREN (1<<13) /* DTR interrupt enable */ #define UCR3_PARERREN (1<<12) /* Parity enable */ #define UCR3_FRAERREN (1<<11) /* Frame error interrupt enable */ #define UCR3_DSR (1<<10) /* Data set ready */ #define UCR3_DCD (1<<9) /* Data carrier detect */ #define UCR3_RI (1<<8) /* Ring indicator */ #define UCR3_TIMEOUTEN (1<<7) /* Timeout interrupt enable */ #define UCR3_RXDSEN (1<<6) /* Receive status interrupt enable */ #define UCR3_AIRINTEN (1<<5) /* Async IR wake interrupt enable */ #define UCR3_AWAKEN (1<<4) /* Async wake interrupt enable */ #define MX1_UCR3_REF25 (1<<3) /* Ref freq 25 MHz, only on mx1 */ #define MX1_UCR3_REF30 (1<<2) /* Ref Freq 30 MHz, only on mx1 */ #define MX2_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select, on mx2/mx3 */ #define UCR3_INVT (1<<1) /* Inverted Infrared transmission */ #define UCR3_BPEN (1<<0) /* Preset registers enable */ #define UCR4_CTSTL_32 (32<<10) /* CTS trigger level (32 chars) */ #define UCR4_INVR (1<<9) /* Inverted infrared reception */ #define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */ #define UCR4_WKEN (1<<7) /* Wake interrupt enable */ #define UCR4_REF16 (1<<6) /* Ref freq 16 MHz */ #define UCR4_IRSC (1<<5) /* IR special case */ #define UCR4_TCEN (1<<3) /* Transmit complete interrupt enable */ #define UCR4_BKEN (1<<2) /* Break condition interrupt enable */ #define UCR4_OREN (1<<1) /* Receiver overrun interrupt enable */ #define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */ #define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */ #define UFCR_RFDIV (7<<7) /* Reference freq divider mask */ #define UFCR_RFDIV_REG(x) (((x) < 7 ? 6 - (x) : 6) << 7) #define UFCR_TXTL_SHF 10 /* Transmitter trigger level shift */ #define USR1_PARITYERR (1<<15) /* Parity error interrupt flag */ #define USR1_RTSS (1<<14) /* RTS pin status */ #define USR1_TRDY (1<<13) /* Transmitter ready interrupt/dma flag */ #define USR1_RTSD (1<<12) /* RTS delta */ #define USR1_ESCF (1<<11) /* Escape seq interrupt flag */ #define USR1_FRAMERR (1<<10) /* Frame error interrupt flag */ #define USR1_RRDY (1<<9) /* Receiver ready interrupt/dma flag */ #define USR1_TIMEOUT (1<<7) /* Receive timeout interrupt status */ #define USR1_RXDS (1<<6) /* Receiver idle interrupt flag */ #define USR1_AIRINT (1<<5) /* Async IR wake interrupt flag */ #define USR1_AWAKE (1<<4) /* Aysnc wake interrupt flag */ #define USR2_ADET (1<<15) /* Auto baud rate detect complete */ #define USR2_TXFE (1<<14) /* Transmit buffer FIFO empty */ #define USR2_DTRF (1<<13) /* DTR edge interrupt flag */ #define USR2_IDLE (1<<12) /* Idle condition */ #define USR2_IRINT (1<<8) /* Serial infrared interrupt flag */ #define USR2_WAKE (1<<7) /* Wake */ #define USR2_RTSF (1<<4) /* RTS edge interrupt flag */ #define USR2_TXDC (1<<3) /* Transmitter complete */ #define USR2_BRCD (1<<2) /* Break condition */ #define USR2_ORE (1<<1) /* Overrun error */ #define USR2_RDR (1<<0) /* Recv data ready */ #define UTS_FRCPERR (1<<13) /* Force parity error */ #define UTS_LOOP (1<<12) /* Loop tx and rx */ #define UTS_TXEMPTY (1<<6) /* TxFIFO empty */ #define UTS_RXEMPTY (1<<5) /* RxFIFO empty */ #define UTS_TXFULL (1<<4) /* TxFIFO full */ #define UTS_RXFULL (1<<3) /* RxFIFO full */ #define UTS_SOFTRST (1<<0) /* Software reset */ /* We've been assigned a range on the "Low-density serial ports" major */ #define SERIAL_IMX_MAJOR 207 #define MINOR_START 16 #define DEV_NAME "ttymxc" #define MAX_INTERNAL_IRQ MXC_INTERNAL_IRQS /* * This determines how often we check the modem status signals * for any change. They generally aren't connected to an IRQ * so we have to poll them. We also check immediately before * filling the TX fifo incase CTS has been dropped. */ #define MCTRL_TIMEOUT (250*HZ/1000) #define DRIVER_NAME "IMX-uart" #define UART_NR 8 struct imx_port { struct uart_port port; struct timer_list timer; unsigned int old_status; int txirq,rxirq,rtsirq; unsigned int have_rtscts:1; unsigned int use_irda:1; unsigned int irda_inv_rx:1; unsigned int irda_inv_tx:1; unsigned short trcv_delay; /* transceiver delay */ struct clk *clk; }; #ifdef CONFIG_IRDA #define USE_IRDA(sport) ((sport)->use_irda) #else #define USE_IRDA(sport) (0) #endif /* * Handle any change of modem status signal since we were last called. */ static void imx_mctrl_check(struct imx_port *sport) { unsigned int status, changed; status = sport->port.ops->get_mctrl(&sport->port); changed = status ^ sport->old_status; if (changed == 0) return; sport->old_status = status; if (changed & TIOCM_RI) sport->port.icount.rng++; if (changed & TIOCM_DSR) sport->port.icount.dsr++; if (changed & TIOCM_CAR) uart_handle_dcd_change(&sport->port, status & TIOCM_CAR); if (changed & TIOCM_CTS) uart_handle_cts_change(&sport->port, status & TIOCM_CTS); wake_up_interruptible(&sport->port.state->port.delta_msr_wait); } /* * This is our per-port timeout handler, for checking the * modem status signals. */ static void imx_timeout(unsigned long data) { struct imx_port *sport = (struct imx_port *)data; unsigned long flags; if (sport->port.state) { spin_lock_irqsave(&sport->port.lock, flags); imx_mctrl_check(sport); spin_unlock_irqrestore(&sport->port.lock, flags); mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT); } } /* * interrupts disabled on entry */ static void imx_stop_tx(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; unsigned long temp; if (USE_IRDA(sport)) { /* half duplex - wait for end of transmission */ int n = 256; while ((--n > 0) && !(readl(sport->port.membase + USR2) & USR2_TXDC)) { udelay(5); barrier(); } /* * irda transceiver - wait a bit more to avoid * cutoff, hardware dependent */ udelay(sport->trcv_delay); /* * half duplex - reactivate receive mode, * flush receive pipe echo crap */ if (readl(sport->port.membase + USR2) & USR2_TXDC) { temp = readl(sport->port.membase + UCR1); temp &= ~(UCR1_TXMPTYEN | UCR1_TRDYEN); writel(temp, sport->port.membase + UCR1); temp = readl(sport->port.membase + UCR4); temp &= ~(UCR4_TCEN); writel(temp, sport->port.membase + UCR4); while (readl(sport->port.membase + URXD0) & URXD_CHARRDY) barrier(); temp = readl(sport->port.membase + UCR1); temp |= UCR1_RRDYEN; writel(temp, sport->port.membase + UCR1); temp = readl(sport->port.membase + UCR4); temp |= UCR4_DREN; writel(temp, sport->port.membase + UCR4); } return; } temp = readl(sport->port.membase + UCR1); writel(temp & ~UCR1_TXMPTYEN, sport->port.membase + UCR1); } /* * interrupts disabled on entry */ static void imx_stop_rx(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; unsigned long temp; temp = readl(sport->port.membase + UCR2); writel(temp &~ UCR2_RXEN, sport->port.membase + UCR2); } /* * Set the modem control timer to fire immediately. */ static void imx_enable_ms(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; mod_timer(&sport->timer, jiffies); } static inline void imx_transmit_buffer(struct imx_port *sport) { struct circ_buf *xmit = &sport->port.state->xmit; while (!(readl(sport->port.membase + UTS) & UTS_TXFULL)) { /* send xmit->buf[xmit->tail] * out the port here */ writel(xmit->buf[xmit->tail], sport->port.membase + URTX0); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); sport->port.icount.tx++; if (uart_circ_empty(xmit)) break; } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&sport->port); if (uart_circ_empty(xmit)) imx_stop_tx(&sport->port); } /* * interrupts disabled on entry */ static void imx_start_tx(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; unsigned long temp; if (USE_IRDA(sport)) { /* half duplex in IrDA mode; have to disable receive mode */ temp = readl(sport->port.membase + UCR4); temp &= ~(UCR4_DREN); writel(temp, sport->port.membase + UCR4); temp = readl(sport->port.membase + UCR1); temp &= ~(UCR1_RRDYEN); writel(temp, sport->port.membase + UCR1); } temp = readl(sport->port.membase + UCR1); writel(temp | UCR1_TXMPTYEN, sport->port.membase + UCR1); if (USE_IRDA(sport)) { temp = readl(sport->port.membase + UCR1); temp |= UCR1_TRDYEN; writel(temp, sport->port.membase + UCR1); temp = readl(sport->port.membase + UCR4); temp |= UCR4_TCEN; writel(temp, sport->port.membase + UCR4); } if (readl(sport->port.membase + UTS) & UTS_TXEMPTY) imx_transmit_buffer(sport); } static irqreturn_t imx_rtsint(int irq, void *dev_id) { struct imx_port *sport = dev_id; unsigned int val = readl(sport->port.membase + USR1) & USR1_RTSS; unsigned long flags; spin_lock_irqsave(&sport->port.lock, flags); writel(USR1_RTSD, sport->port.membase + USR1); uart_handle_cts_change(&sport->port, !!val); wake_up_interruptible(&sport->port.state->port.delta_msr_wait); spin_unlock_irqrestore(&sport->port.lock, flags); return IRQ_HANDLED; } static irqreturn_t imx_txint(int irq, void *dev_id) { struct imx_port *sport = dev_id; struct circ_buf *xmit = &sport->port.state->xmit; unsigned long flags; spin_lock_irqsave(&sport->port.lock,flags); if (sport->port.x_char) { /* Send next char */ writel(sport->port.x_char, sport->port.membase + URTX0); goto out; } if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) { imx_stop_tx(&sport->port); goto out; } imx_transmit_buffer(sport); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&sport->port); out: spin_unlock_irqrestore(&sport->port.lock,flags); return IRQ_HANDLED; } static irqreturn_t imx_rxint(int irq, void *dev_id) { struct imx_port *sport = dev_id; unsigned int rx,flg,ignored = 0; struct tty_struct *tty = sport->port.state->port.tty; unsigned long flags, temp; spin_lock_irqsave(&sport->port.lock,flags); while (readl(sport->port.membase + USR2) & USR2_RDR) { flg = TTY_NORMAL; sport->port.icount.rx++; rx = readl(sport->port.membase + URXD0); temp = readl(sport->port.membase + USR2); if (temp & USR2_BRCD) { writel(temp | USR2_BRCD, sport->port.membase + USR2); if (uart_handle_break(&sport->port)) continue; } if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx)) continue; if (rx & (URXD_PRERR | URXD_OVRRUN | URXD_FRMERR) ) { if (rx & URXD_PRERR) sport->port.icount.parity++; else if (rx & URXD_FRMERR) sport->port.icount.frame++; if (rx & URXD_OVRRUN) sport->port.icount.overrun++; if (rx & sport->port.ignore_status_mask) { if (++ignored > 100) goto out; continue; } rx &= sport->port.read_status_mask; if (rx & URXD_PRERR) flg = TTY_PARITY; else if (rx & URXD_FRMERR) flg = TTY_FRAME; if (rx & URXD_OVRRUN) flg = TTY_OVERRUN; #ifdef SUPPORT_SYSRQ sport->port.sysrq = 0; #endif } tty_insert_flip_char(tty, rx, flg); } out: spin_unlock_irqrestore(&sport->port.lock,flags); tty_flip_buffer_push(tty); return IRQ_HANDLED; } static irqreturn_t imx_int(int irq, void *dev_id) { struct imx_port *sport = dev_id; unsigned int sts; sts = readl(sport->port.membase + USR1); if (sts & USR1_RRDY) imx_rxint(irq, dev_id); if (sts & USR1_TRDY && readl(sport->port.membase + UCR1) & UCR1_TXMPTYEN) imx_txint(irq, dev_id); if (sts & USR1_RTSD) imx_rtsint(irq, dev_id); return IRQ_HANDLED; } /* * Return TIOCSER_TEMT when transmitter is not busy. */ static unsigned int imx_tx_empty(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; return (readl(sport->port.membase + USR2) & USR2_TXDC) ? TIOCSER_TEMT : 0; } /* * We have a modem side uart, so the meanings of RTS and CTS are inverted. */ static unsigned int imx_get_mctrl(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; unsigned int tmp = TIOCM_DSR | TIOCM_CAR; if (readl(sport->port.membase + USR1) & USR1_RTSS) tmp |= TIOCM_CTS; if (readl(sport->port.membase + UCR2) & UCR2_CTS) tmp |= TIOCM_RTS; return tmp; } static void imx_set_mctrl(struct uart_port *port, unsigned int mctrl) { struct imx_port *sport = (struct imx_port *)port; unsigned long temp; temp = readl(sport->port.membase + UCR2) & ~UCR2_CTS; if (mctrl & TIOCM_RTS) temp |= UCR2_CTS; writel(temp, sport->port.membase + UCR2); } /* * Interrupts always disabled. */ static void imx_break_ctl(struct uart_port *port, int break_state) { struct imx_port *sport = (struct imx_port *)port; unsigned long flags, temp; spin_lock_irqsave(&sport->port.lock, flags); temp = readl(sport->port.membase + UCR1) & ~UCR1_SNDBRK; if ( break_state != 0 ) temp |= UCR1_SNDBRK; writel(temp, sport->port.membase + UCR1); spin_unlock_irqrestore(&sport->port.lock, flags); } #define TXTL 2 /* reset default */ #define RXTL 1 /* reset default */ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode) { unsigned int val; unsigned int ufcr_rfdiv; /* set receiver / transmitter trigger level. * RFDIV is set such way to satisfy requested uartclk value */ val = TXTL << 10 | RXTL; ufcr_rfdiv = (clk_get_rate(sport->clk) + sport->port.uartclk / 2) / sport->port.uartclk; if(!ufcr_rfdiv) ufcr_rfdiv = 1; val |= UFCR_RFDIV_REG(ufcr_rfdiv); writel(val, sport->port.membase + UFCR); return 0; } static int imx_startup(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; int retval; unsigned long flags, temp; imx_setup_ufcr(sport, 0); /* disable the DREN bit (Data Ready interrupt enable) before * requesting IRQs */ temp = readl(sport->port.membase + UCR4); if (USE_IRDA(sport)) temp |= UCR4_IRSC; writel(temp & ~UCR4_DREN, sport->port.membase + UCR4); if (USE_IRDA(sport)) { /* reset fifo's and state machines */ int i = 100; temp = readl(sport->port.membase + UCR2); temp &= ~UCR2_SRST; writel(temp, sport->port.membase + UCR2); while (!(readl(sport->port.membase + UCR2) & UCR2_SRST) && (--i > 0)) { udelay(1); } } /* * Allocate the IRQ(s) i.MX1 has three interrupts whereas later * chips only have one interrupt. */ if (sport->txirq > 0) { retval = request_irq(sport->rxirq, imx_rxint, 0, DRIVER_NAME, sport); if (retval) goto error_out1; retval = request_irq(sport->txirq, imx_txint, 0, DRIVER_NAME, sport); if (retval) goto error_out2; /* do not use RTS IRQ on IrDA */ if (!USE_IRDA(sport)) { retval = request_irq(sport->rtsirq, imx_rtsint, (sport->rtsirq < MAX_INTERNAL_IRQ) ? 0 : IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, DRIVER_NAME, sport); if (retval) goto error_out3; } } else { retval = request_irq(sport->port.irq, imx_int, 0, DRIVER_NAME, sport); if (retval) { free_irq(sport->port.irq, sport); goto error_out1; } } /* * Finally, clear and enable interrupts */ writel(USR1_RTSD, sport->port.membase + USR1); temp = readl(sport->port.membase + UCR1); temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN; if (USE_IRDA(sport)) { temp |= UCR1_IREN; temp &= ~(UCR1_RTSDEN); } writel(temp, sport->port.membase + UCR1); temp = readl(sport->port.membase + UCR2); temp |= (UCR2_RXEN | UCR2_TXEN); writel(temp, sport->port.membase + UCR2); if (USE_IRDA(sport)) { /* clear RX-FIFO */ int i = 64; while ((--i > 0) && (readl(sport->port.membase + URXD0) & URXD_CHARRDY)) { barrier(); } } if (!cpu_is_mx1()) { temp = readl(sport->port.membase + UCR3); temp |= MX2_UCR3_RXDMUXSEL; writel(temp, sport->port.membase + UCR3); } if (USE_IRDA(sport)) { temp = readl(sport->port.membase + UCR4); if (sport->irda_inv_rx) temp |= UCR4_INVR; else temp &= ~(UCR4_INVR); writel(temp | UCR4_DREN, sport->port.membase + UCR4); temp = readl(sport->port.membase + UCR3); if (sport->irda_inv_tx) temp |= UCR3_INVT; else temp &= ~(UCR3_INVT); writel(temp, sport->port.membase + UCR3); } /* * Enable modem status interrupts */ spin_lock_irqsave(&sport->port.lock,flags); imx_enable_ms(&sport->port); spin_unlock_irqrestore(&sport->port.lock,flags); if (USE_IRDA(sport)) { struct imxuart_platform_data *pdata; pdata = sport->port.dev->platform_data; sport->irda_inv_rx = pdata->irda_inv_rx; sport->irda_inv_tx = pdata->irda_inv_tx; sport->trcv_delay = pdata->transceiver_delay; if (pdata->irda_enable) pdata->irda_enable(1); } return 0; error_out3: if (sport->txirq) free_irq(sport->txirq, sport); error_out2: if (sport->rxirq) free_irq(sport->rxirq, sport); error_out1: return retval; } static void imx_shutdown(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; unsigned long temp; temp = readl(sport->port.membase + UCR2); temp &= ~(UCR2_TXEN); writel(temp, sport->port.membase + UCR2); if (USE_IRDA(sport)) { struct imxuart_platform_data *pdata; pdata = sport->port.dev->platform_data; if (pdata->irda_enable) pdata->irda_enable(0); } /* * Stop our timer. */ del_timer_sync(&sport->timer); /* * Free the interrupts */ if (sport->txirq > 0) { if (!USE_IRDA(sport)) free_irq(sport->rtsirq, sport); free_irq(sport->txirq, sport); free_irq(sport->rxirq, sport); } else free_irq(sport->port.irq, sport); /* * Disable all interrupts, port and break condition. */ temp = readl(sport->port.membase + UCR1); temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN); if (USE_IRDA(sport)) temp &= ~(UCR1_IREN); writel(temp, sport->port.membase + UCR1); } static void imx_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct imx_port *sport = (struct imx_port *)port; unsigned long flags; unsigned int ucr2, old_ucr1, old_txrxen, baud, quot; unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8; unsigned int div, ufcr; unsigned long num, denom; uint64_t tdiv64; /* * If we don't support modem control lines, don't allow * these to be set. */ if (0) { termios->c_cflag &= ~(HUPCL | CRTSCTS | CMSPAR); termios->c_cflag |= CLOCAL; } /* * We only support CS7 and CS8. */ while ((termios->c_cflag & CSIZE) != CS7 && (termios->c_cflag & CSIZE) != CS8) { termios->c_cflag &= ~CSIZE; termios->c_cflag |= old_csize; old_csize = CS8; } if ((termios->c_cflag & CSIZE) == CS8) ucr2 = UCR2_WS | UCR2_SRST | UCR2_IRTS; else ucr2 = UCR2_SRST | UCR2_IRTS; if (termios->c_cflag & CRTSCTS) { if( sport->have_rtscts ) { ucr2 &= ~UCR2_IRTS; ucr2 |= UCR2_CTSC; } else { termios->c_cflag &= ~CRTSCTS; } } if (termios->c_cflag & CSTOPB) ucr2 |= UCR2_STPB; if (termios->c_cflag & PARENB) { ucr2 |= UCR2_PREN; if (termios->c_cflag & PARODD) ucr2 |= UCR2_PROE; } /* * Ask the core to calculate the divisor for us. */ baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16); quot = uart_get_divisor(port, baud); spin_lock_irqsave(&sport->port.lock, flags); sport->port.read_status_mask = 0; if (termios->c_iflag & INPCK) sport->port.read_status_mask |= (URXD_FRMERR | URXD_PRERR); if (termios->c_iflag & (BRKINT | PARMRK)) sport->port.read_status_mask |= URXD_BRK; /* * Characters to ignore */ sport->port.ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) sport->port.ignore_status_mask |= URXD_PRERR; if (termios->c_iflag & IGNBRK) { sport->port.ignore_status_mask |= URXD_BRK; /* * If we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (termios->c_iflag & IGNPAR) sport->port.ignore_status_mask |= URXD_OVRRUN; } del_timer_sync(&sport->timer); /* * Update the per-port timeout. */ uart_update_timeout(port, termios->c_cflag, baud); /* * disable interrupts and drain transmitter */ old_ucr1 = readl(sport->port.membase + UCR1); writel(old_ucr1 & ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN), sport->port.membase + UCR1); while ( !(readl(sport->port.membase + USR2) & USR2_TXDC)) barrier(); /* then, disable everything */ old_txrxen = readl(sport->port.membase + UCR2); writel(old_txrxen & ~( UCR2_TXEN | UCR2_RXEN), sport->port.membase + UCR2); old_txrxen &= (UCR2_TXEN | UCR2_RXEN); if (USE_IRDA(sport)) { /* * use maximum available submodule frequency to * avoid missing short pulses due to low sampling rate */ div = 1; } else { div = sport->port.uartclk / (baud * 16); if (div > 7) div = 7; if (!div) div = 1; } rational_best_approximation(16 * div * baud, sport->port.uartclk, 1 << 16, 1 << 16, &num, &denom); if (port->state && port->state->port.tty) { tdiv64 = sport->port.uartclk; tdiv64 *= num; do_div(tdiv64, denom * 16 * div); tty_encode_baud_rate(sport->port.state->port.tty, (speed_t)tdiv64, (speed_t)tdiv64); } num -= 1; denom -= 1; ufcr = readl(sport->port.membase + UFCR); ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div); writel(ufcr, sport->port.membase + UFCR); writel(num, sport->port.membase + UBIR); writel(denom, sport->port.membase + UBMR); if (!cpu_is_mx1()) writel(sport->port.uartclk / div / 1000, sport->port.membase + MX2_ONEMS); writel(old_ucr1, sport->port.membase + UCR1); /* set the parity, stop bits and data size */ writel(ucr2 | old_txrxen, sport->port.membase + UCR2); if (UART_ENABLE_MS(&sport->port, termios->c_cflag)) imx_enable_ms(&sport->port); spin_unlock_irqrestore(&sport->port.lock, flags); } static const char *imx_type(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; return sport->port.type == PORT_IMX ? "IMX" : NULL; } /* * Release the memory region(s) being used by 'port'. */ static void imx_release_port(struct uart_port *port) { struct platform_device *pdev = to_platform_device(port->dev); struct resource *mmres; mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(mmres->start, mmres->end - mmres->start + 1); } /* * Request the memory region(s) being used by 'port'. */ static int imx_request_port(struct uart_port *port) { struct platform_device *pdev = to_platform_device(port->dev); struct resource *mmres; void *ret; mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mmres) return -ENODEV; ret = request_mem_region(mmres->start, mmres->end - mmres->start + 1, "imx-uart"); return ret ? 0 : -EBUSY; } /* * Configure/autoconfigure the port. */ static void imx_config_port(struct uart_port *port, int flags) { struct imx_port *sport = (struct imx_port *)port; if (flags & UART_CONFIG_TYPE && imx_request_port(&sport->port) == 0) sport->port.type = PORT_IMX; } /* * Verify the new serial_struct (for TIOCSSERIAL). * The only change we allow are to the flags and type, and * even then only between PORT_IMX and PORT_UNKNOWN */ static int imx_verify_port(struct uart_port *port, struct serial_struct *ser) { struct imx_port *sport = (struct imx_port *)port; int ret = 0; if (ser->type != PORT_UNKNOWN && ser->type != PORT_IMX) ret = -EINVAL; if (sport->port.irq != ser->irq) ret = -EINVAL; if (ser->io_type != UPIO_MEM) ret = -EINVAL; if (sport->port.uartclk / 16 != ser->baud_base) ret = -EINVAL; if ((void *)sport->port.mapbase != ser->iomem_base) ret = -EINVAL; if (sport->port.iobase != ser->port) ret = -EINVAL; if (ser->hub6 != 0) ret = -EINVAL; return ret; } static struct uart_ops imx_pops = { .tx_empty = imx_tx_empty, .set_mctrl = imx_set_mctrl, .get_mctrl = imx_get_mctrl, .stop_tx = imx_stop_tx, .start_tx = imx_start_tx, .stop_rx = imx_stop_rx, .enable_ms = imx_enable_ms, .break_ctl = imx_break_ctl, .startup = imx_startup, .shutdown = imx_shutdown, .set_termios = imx_set_termios, .type = imx_type, .release_port = imx_release_port, .request_port = imx_request_port, .config_port = imx_config_port, .verify_port = imx_verify_port, }; static struct imx_port *imx_ports[UART_NR]; #ifdef CONFIG_SERIAL_IMX_CONSOLE static void imx_console_putchar(struct uart_port *port, int ch) { struct imx_port *sport = (struct imx_port *)port; while (readl(sport->port.membase + UTS) & UTS_TXFULL) barrier(); writel(ch, sport->port.membase + URTX0); } /* * Interrupts are disabled on entering */ static void imx_console_write(struct console *co, const char *s, unsigned int count) { struct imx_port *sport = imx_ports[co->index]; unsigned int old_ucr1, old_ucr2, ucr1; /* * First, save UCR1/2 and then disable interrupts */ ucr1 = old_ucr1 = readl(sport->port.membase + UCR1); old_ucr2 = readl(sport->port.membase + UCR2); if (cpu_is_mx1()) ucr1 |= MX1_UCR1_UARTCLKEN; ucr1 |= UCR1_UARTEN; ucr1 &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN); writel(ucr1, sport->port.membase + UCR1); writel(old_ucr2 | UCR2_TXEN, sport->port.membase + UCR2); uart_console_write(&sport->port, s, count, imx_console_putchar); /* * Finally, wait for transmitter to become empty * and restore UCR1/2 */ while (!(readl(sport->port.membase + USR2) & USR2_TXDC)); writel(old_ucr1, sport->port.membase + UCR1); writel(old_ucr2, sport->port.membase + UCR2); } /* * If the port was already initialised (eg, by a boot loader), * try to determine the current setup. */ static void __init imx_console_get_options(struct imx_port *sport, int *baud, int *parity, int *bits) { if (readl(sport->port.membase + UCR1) & UCR1_UARTEN) { /* ok, the port was enabled */ unsigned int ucr2, ubir,ubmr, uartclk; unsigned int baud_raw; unsigned int ucfr_rfdiv; ucr2 = readl(sport->port.membase + UCR2); *parity = 'n'; if (ucr2 & UCR2_PREN) { if (ucr2 & UCR2_PROE) *parity = 'o'; else *parity = 'e'; } if (ucr2 & UCR2_WS) *bits = 8; else *bits = 7; ubir = readl(sport->port.membase + UBIR) & 0xffff; ubmr = readl(sport->port.membase + UBMR) & 0xffff; ucfr_rfdiv = (readl(sport->port.membase + UFCR) & UFCR_RFDIV) >> 7; if (ucfr_rfdiv == 6) ucfr_rfdiv = 7; else ucfr_rfdiv = 6 - ucfr_rfdiv; uartclk = clk_get_rate(sport->clk); uartclk /= ucfr_rfdiv; { /* * The next code provides exact computation of * baud_raw = round(((uartclk/16) * (ubir + 1)) / (ubmr + 1)) * without need of float support or long long division, * which would be required to prevent 32bit arithmetic overflow */ unsigned int mul = ubir + 1; unsigned int div = 16 * (ubmr + 1); unsigned int rem = uartclk % div; baud_raw = (uartclk / div) * mul; baud_raw += (rem * mul + div / 2) / div; *baud = (baud_raw + 50) / 100 * 100; } if(*baud != baud_raw) printk(KERN_INFO "Serial: Console IMX rounded baud rate from %d to %d\n", baud_raw, *baud); } } static int __init imx_console_setup(struct console *co, char *options) { struct imx_port *sport; int baud = 9600; int bits = 8; int parity = 'n'; int flow = 'n'; /* * Check whether an invalid uart number has been specified, and * if so, search for the first available port that does have * console support. */ if (co->index == -1 || co->index >= ARRAY_SIZE(imx_ports)) co->index = 0; sport = imx_ports[co->index]; if(sport == NULL) return -ENODEV; if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); else imx_console_get_options(sport, &baud, &parity, &bits); imx_setup_ufcr(sport, 0); return uart_set_options(&sport->port, co, baud, parity, bits, flow); } static struct uart_driver imx_reg; static struct console imx_console = { .name = DEV_NAME, .write = imx_console_write, .device = uart_console_device, .setup = imx_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &imx_reg, }; #define IMX_CONSOLE &imx_console #else #define IMX_CONSOLE NULL #endif static struct uart_driver imx_reg = { .owner = THIS_MODULE, .driver_name = DRIVER_NAME, .dev_name = DEV_NAME, .major = SERIAL_IMX_MAJOR, .minor = MINOR_START, .nr = ARRAY_SIZE(imx_ports), .cons = IMX_CONSOLE, }; static int serial_imx_suspend(struct platform_device *dev, pm_message_t state) { struct imx_port *sport = platform_get_drvdata(dev); if (sport) uart_suspend_port(&imx_reg, &sport->port); return 0; } static int serial_imx_resume(struct platform_device *dev) { struct imx_port *sport = platform_get_drvdata(dev); if (sport) uart_resume_port(&imx_reg, &sport->port); return 0; } static int serial_imx_probe(struct platform_device *pdev) { struct imx_port *sport; struct imxuart_platform_data *pdata; void __iomem *base; int ret = 0; struct resource *res; sport = kzalloc(sizeof(*sport), GFP_KERNEL); if (!sport) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ret = -ENODEV; goto free; } base = ioremap(res->start, PAGE_SIZE); if (!base) { ret = -ENOMEM; goto free; } sport->port.dev = &pdev->dev; sport->port.mapbase = res->start; sport->port.membase = base; sport->port.type = PORT_IMX, sport->port.iotype = UPIO_MEM; sport->port.irq = platform_get_irq(pdev, 0); sport->rxirq = platform_get_irq(pdev, 0); sport->txirq = platform_get_irq(pdev, 1); sport->rtsirq = platform_get_irq(pdev, 2); sport->port.fifosize = 32; sport->port.ops = &imx_pops; sport->port.flags = UPF_BOOT_AUTOCONF; sport->port.line = pdev->id; init_timer(&sport->timer); sport->timer.function = imx_timeout; sport->timer.data = (unsigned long)sport; sport->clk = clk_get(&pdev->dev, "uart"); if (IS_ERR(sport->clk)) { ret = PTR_ERR(sport->clk); goto unmap; } clk_enable(sport->clk); sport->port.uartclk = clk_get_rate(sport->clk); imx_ports[pdev->id] = sport; pdata = pdev->dev.platform_data; if (pdata && (pdata->flags & IMXUART_HAVE_RTSCTS)) sport->have_rtscts = 1; #ifdef CONFIG_IRDA if (pdata && (pdata->flags & IMXUART_IRDA)) sport->use_irda = 1; #endif if (pdata->init) { ret = pdata->init(pdev); if (ret) goto clkput; } ret = uart_add_one_port(&imx_reg, &sport->port); if (ret) goto deinit; platform_set_drvdata(pdev, &sport->port); return 0; deinit: if (pdata->exit) pdata->exit(pdev); clkput: clk_put(sport->clk); clk_disable(sport->clk); unmap: iounmap(sport->port.membase); free: kfree(sport); return ret; } static int serial_imx_remove(struct platform_device *pdev) { struct imxuart_platform_data *pdata; struct imx_port *sport = platform_get_drvdata(pdev); pdata = pdev->dev.platform_data; platform_set_drvdata(pdev, NULL); if (sport) { uart_remove_one_port(&imx_reg, &sport->port); clk_put(sport->clk); } clk_disable(sport->clk); if (pdata->exit) pdata->exit(pdev); iounmap(sport->port.membase); kfree(sport); return 0; } static struct platform_driver serial_imx_driver = { .probe = serial_imx_probe, .remove = serial_imx_remove, .suspend = serial_imx_suspend, .resume = serial_imx_resume, .driver = { .name = "imx-uart", .owner = THIS_MODULE, }, }; static int __init imx_serial_init(void) { int ret; printk(KERN_INFO "Serial: IMX driver\n"); ret = uart_register_driver(&imx_reg); if (ret) return ret; ret = platform_driver_register(&serial_imx_driver); if (ret != 0) uart_unregister_driver(&imx_reg); return 0; } static void __exit imx_serial_exit(void) { platform_driver_unregister(&serial_imx_driver); uart_unregister_driver(&imx_reg); } module_init(imx_serial_init); module_exit(imx_serial_exit); MODULE_AUTHOR("Sascha Hauer"); MODULE_DESCRIPTION("IMX generic serial port driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:imx-uart");
gpl-2.0
xushichao/qemu-kvm-cpuid
hw/puv3_pm.c
25
3337
/* * Power Management device simulation in PKUnity SoC * * Copyright (C) 2010-2012 Guan Xuetao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation, or any later version. * See the COPYING file in the top-level directory. */ #include "hw.h" #include "sysbus.h" #undef DEBUG_PUV3 #include "puv3.h" typedef struct { SysBusDevice busdev; MemoryRegion iomem; uint32_t reg_PMCR; uint32_t reg_PCGR; uint32_t reg_PLL_SYS_CFG; uint32_t reg_PLL_DDR_CFG; uint32_t reg_PLL_VGA_CFG; uint32_t reg_DIVCFG; } PUV3PMState; static uint64_t puv3_pm_read(void *opaque, target_phys_addr_t offset, unsigned size) { PUV3PMState *s = opaque; uint32_t ret = 0; switch (offset) { case 0x14: ret = s->reg_PCGR; break; case 0x18: ret = s->reg_PLL_SYS_CFG; break; case 0x1c: ret = s->reg_PLL_DDR_CFG; break; case 0x20: ret = s->reg_PLL_VGA_CFG; break; case 0x24: ret = s->reg_DIVCFG; break; case 0x28: /* PLL SYS STATUS */ ret = 0x00002401; break; case 0x2c: /* PLL DDR STATUS */ ret = 0x00100c00; break; case 0x30: /* PLL VGA STATUS */ ret = 0x00003801; break; case 0x34: /* DIV STATUS */ ret = 0x22f52015; break; case 0x38: /* SW RESET */ ret = 0x0; break; case 0x44: /* PLL DFC DONE */ ret = 0x7; break; default: DPRINTF("Bad offset 0x%x\n", offset); } DPRINTF("offset 0x%x, value 0x%x\n", offset, ret); return ret; } static void puv3_pm_write(void *opaque, target_phys_addr_t offset, uint64_t value, unsigned size) { PUV3PMState *s = opaque; switch (offset) { case 0x0: s->reg_PMCR = value; break; case 0x14: s->reg_PCGR = value; break; case 0x18: s->reg_PLL_SYS_CFG = value; break; case 0x1c: s->reg_PLL_DDR_CFG = value; break; case 0x20: s->reg_PLL_VGA_CFG = value; break; case 0x24: case 0x38: break; default: DPRINTF("Bad offset 0x%x\n", offset); } DPRINTF("offset 0x%x, value 0x%x\n", offset, value); } static const MemoryRegionOps puv3_pm_ops = { .read = puv3_pm_read, .write = puv3_pm_write, .impl = { .min_access_size = 4, .max_access_size = 4, }, .endianness = DEVICE_NATIVE_ENDIAN, }; static int puv3_pm_init(SysBusDevice *dev) { PUV3PMState *s = FROM_SYSBUS(PUV3PMState, dev); s->reg_PCGR = 0x0; memory_region_init_io(&s->iomem, &puv3_pm_ops, s, "puv3_pm", PUV3_REGS_OFFSET); sysbus_init_mmio(dev, &s->iomem); return 0; } static void puv3_pm_class_init(ObjectClass *klass, void *data) { SysBusDeviceClass *sdc = SYS_BUS_DEVICE_CLASS(klass); sdc->init = puv3_pm_init; } static const TypeInfo puv3_pm_info = { .name = "puv3_pm", .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(PUV3PMState), .class_init = puv3_pm_class_init, }; static void puv3_pm_register_type(void) { type_register_static(&puv3_pm_info); } type_init(puv3_pm_register_type)
gpl-2.0
HydraCompany/HydraKernel
arch/arm/mach-imx/pm-imx6.c
25
16440
/* * Copyright 2011-2014 Freescale Semiconductor, Inc. * Copyright 2011 Linaro Ltd. * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ #include <linux/delay.h> #include <linux/init.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/genalloc.h> #include <linux/mfd/syscon.h> #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/regmap.h> #include <linux/suspend.h> #include <asm/cacheflush.h> #include <asm/fncpy.h> #include <asm/proc-fns.h> #include <asm/suspend.h> #include <asm/tlb.h> #include "common.h" #include "hardware.h" #define CCR 0x0 #define BM_CCR_WB_COUNT (0x7 << 16) #define BM_CCR_RBC_BYPASS_COUNT (0x3f << 21) #define BM_CCR_RBC_EN (0x1 << 27) #define CLPCR 0x54 #define BP_CLPCR_LPM 0 #define BM_CLPCR_LPM (0x3 << 0) #define BM_CLPCR_BYPASS_PMIC_READY (0x1 << 2) #define BM_CLPCR_ARM_CLK_DIS_ON_LPM (0x1 << 5) #define BM_CLPCR_SBYOS (0x1 << 6) #define BM_CLPCR_DIS_REF_OSC (0x1 << 7) #define BM_CLPCR_VSTBY (0x1 << 8) #define BP_CLPCR_STBY_COUNT 9 #define BM_CLPCR_STBY_COUNT (0x3 << 9) #define BM_CLPCR_COSC_PWRDOWN (0x1 << 11) #define BM_CLPCR_WB_PER_AT_LPM (0x1 << 16) #define BM_CLPCR_WB_CORE_AT_LPM (0x1 << 17) #define BM_CLPCR_BYP_MMDC_CH0_LPM_HS (0x1 << 19) #define BM_CLPCR_BYP_MMDC_CH1_LPM_HS (0x1 << 21) #define BM_CLPCR_MASK_CORE0_WFI (0x1 << 22) #define BM_CLPCR_MASK_CORE1_WFI (0x1 << 23) #define BM_CLPCR_MASK_CORE2_WFI (0x1 << 24) #define BM_CLPCR_MASK_CORE3_WFI (0x1 << 25) #define BM_CLPCR_MASK_SCU_IDLE (0x1 << 26) #define BM_CLPCR_MASK_L2CC_IDLE (0x1 << 27) #define CGPR 0x64 #define BM_CGPR_INT_MEM_CLK_LPM (0x1 << 17) #define MX6Q_SUSPEND_OCRAM_SIZE 0x1000 #define MX6_MAX_MMDC_IO_NUM 33 static void __iomem *ccm_base; static void __iomem *suspend_ocram_base; static void (*imx6_suspend_in_ocram_fn)(void __iomem *ocram_vbase); /* * suspend ocram space layout: * ======================== high address ====================== * . * . * . * ^ * ^ * ^ * imx6_suspend code * PM_INFO structure(imx6_cpu_pm_info) * ======================== low address ======================= */ struct imx6_pm_base { phys_addr_t pbase; void __iomem *vbase; }; struct imx6_pm_socdata { u32 cpu_type; const char *mmdc_compat; const char *src_compat; const char *iomuxc_compat; const char *gpc_compat; const u32 mmdc_io_num; const u32 *mmdc_io_offset; }; static const u32 imx6q_mmdc_io_offset[] __initconst = { 0x5ac, 0x5b4, 0x528, 0x520, /* DQM0 ~ DQM3 */ 0x514, 0x510, 0x5bc, 0x5c4, /* DQM4 ~ DQM7 */ 0x56c, 0x578, 0x588, 0x594, /* CAS, RAS, SDCLK_0, SDCLK_1 */ 0x5a8, 0x5b0, 0x524, 0x51c, /* SDQS0 ~ SDQS3 */ 0x518, 0x50c, 0x5b8, 0x5c0, /* SDQS4 ~ SDQS7 */ 0x784, 0x788, 0x794, 0x79c, /* GPR_B0DS ~ GPR_B3DS */ 0x7a0, 0x7a4, 0x7a8, 0x748, /* GPR_B4DS ~ GPR_B7DS */ 0x59c, 0x5a0, 0x750, 0x774, /* SODT0, SODT1, MODE_CTL, MODE */ 0x74c, /* GPR_ADDS */ }; static const u32 imx6dl_mmdc_io_offset[] __initconst = { 0x470, 0x474, 0x478, 0x47c, /* DQM0 ~ DQM3 */ 0x480, 0x484, 0x488, 0x48c, /* DQM4 ~ DQM7 */ 0x464, 0x490, 0x4ac, 0x4b0, /* CAS, RAS, SDCLK_0, SDCLK_1 */ 0x4bc, 0x4c0, 0x4c4, 0x4c8, /* DRAM_SDQS0 ~ DRAM_SDQS3 */ 0x4cc, 0x4d0, 0x4d4, 0x4d8, /* DRAM_SDQS4 ~ DRAM_SDQS7 */ 0x764, 0x770, 0x778, 0x77c, /* GPR_B0DS ~ GPR_B3DS */ 0x780, 0x784, 0x78c, 0x748, /* GPR_B4DS ~ GPR_B7DS */ 0x4b4, 0x4b8, 0x750, 0x760, /* SODT0, SODT1, MODE_CTL, MODE */ 0x74c, /* GPR_ADDS */ }; static const u32 imx6sl_mmdc_io_offset[] __initconst = { 0x30c, 0x310, 0x314, 0x318, /* DQM0 ~ DQM3 */ 0x5c4, 0x5cc, 0x5d4, 0x5d8, /* GPR_B0DS ~ GPR_B3DS */ 0x300, 0x31c, 0x338, 0x5ac, /* CAS, RAS, SDCLK_0, GPR_ADDS */ 0x33c, 0x340, 0x5b0, 0x5c0, /* SODT0, SODT1, MODE_CTL, MODE */ 0x330, 0x334, 0x320, /* SDCKE0, SDCKE1, RESET */ }; static const u32 imx6sx_mmdc_io_offset[] __initconst = { 0x2ec, 0x2f0, 0x2f4, 0x2f8, /* DQM0 ~ DQM3 */ 0x60c, 0x610, 0x61c, 0x620, /* GPR_B0DS ~ GPR_B3DS */ 0x300, 0x2fc, 0x32c, 0x5f4, /* CAS, RAS, SDCLK_0, GPR_ADDS */ 0x310, 0x314, 0x5f8, 0x608, /* SODT0, SODT1, MODE_CTL, MODE */ 0x330, 0x334, 0x338, 0x33c, /* SDQS0 ~ SDQS3 */ }; static const struct imx6_pm_socdata imx6q_pm_data __initconst = { .cpu_type = MXC_CPU_IMX6Q, .mmdc_compat = "fsl,imx6q-mmdc", .src_compat = "fsl,imx6q-src", .iomuxc_compat = "fsl,imx6q-iomuxc", .gpc_compat = "fsl,imx6q-gpc", .mmdc_io_num = ARRAY_SIZE(imx6q_mmdc_io_offset), .mmdc_io_offset = imx6q_mmdc_io_offset, }; static const struct imx6_pm_socdata imx6dl_pm_data __initconst = { .cpu_type = MXC_CPU_IMX6DL, .mmdc_compat = "fsl,imx6q-mmdc", .src_compat = "fsl,imx6q-src", .iomuxc_compat = "fsl,imx6dl-iomuxc", .gpc_compat = "fsl,imx6q-gpc", .mmdc_io_num = ARRAY_SIZE(imx6dl_mmdc_io_offset), .mmdc_io_offset = imx6dl_mmdc_io_offset, }; static const struct imx6_pm_socdata imx6sl_pm_data __initconst = { .cpu_type = MXC_CPU_IMX6SL, .mmdc_compat = "fsl,imx6sl-mmdc", .src_compat = "fsl,imx6sl-src", .iomuxc_compat = "fsl,imx6sl-iomuxc", .gpc_compat = "fsl,imx6sl-gpc", .mmdc_io_num = ARRAY_SIZE(imx6sl_mmdc_io_offset), .mmdc_io_offset = imx6sl_mmdc_io_offset, }; static const struct imx6_pm_socdata imx6sx_pm_data __initconst = { .cpu_type = MXC_CPU_IMX6SX, .mmdc_compat = "fsl,imx6sx-mmdc", .src_compat = "fsl,imx6sx-src", .iomuxc_compat = "fsl,imx6sx-iomuxc", .gpc_compat = "fsl,imx6sx-gpc", .mmdc_io_num = ARRAY_SIZE(imx6sx_mmdc_io_offset), .mmdc_io_offset = imx6sx_mmdc_io_offset, }; /* * This structure is for passing necessary data for low level ocram * suspend code(arch/arm/mach-imx/suspend-imx6.S), if this struct * definition is changed, the offset definition in * arch/arm/mach-imx/suspend-imx6.S must be also changed accordingly, * otherwise, the suspend to ocram function will be broken! */ struct imx6_cpu_pm_info { phys_addr_t pbase; /* The physical address of pm_info. */ phys_addr_t resume_addr; /* The physical resume address for asm code */ u32 cpu_type; u32 pm_info_size; /* Size of pm_info. */ struct imx6_pm_base mmdc_base; struct imx6_pm_base src_base; struct imx6_pm_base iomuxc_base; struct imx6_pm_base ccm_base; struct imx6_pm_base gpc_base; struct imx6_pm_base l2_base; u32 mmdc_io_num; /* Number of MMDC IOs which need saved/restored. */ u32 mmdc_io_val[MX6_MAX_MMDC_IO_NUM][2]; /* To save offset and value */ } __aligned(8); void imx6q_set_int_mem_clk_lpm(bool enable) { u32 val = readl_relaxed(ccm_base + CGPR); val &= ~BM_CGPR_INT_MEM_CLK_LPM; if (enable) val |= BM_CGPR_INT_MEM_CLK_LPM; writel_relaxed(val, ccm_base + CGPR); } static void imx6q_enable_rbc(bool enable) { u32 val; /* * need to mask all interrupts in GPC before * operating RBC configurations */ imx_gpc_mask_all(); /* configure RBC enable bit */ val = readl_relaxed(ccm_base + CCR); val &= ~BM_CCR_RBC_EN; val |= enable ? BM_CCR_RBC_EN : 0; writel_relaxed(val, ccm_base + CCR); /* configure RBC count */ val = readl_relaxed(ccm_base + CCR); val &= ~BM_CCR_RBC_BYPASS_COUNT; val |= enable ? BM_CCR_RBC_BYPASS_COUNT : 0; writel(val, ccm_base + CCR); /* * need to delay at least 2 cycles of CKIL(32K) * due to hardware design requirement, which is * ~61us, here we use 65us for safe */ udelay(65); /* restore GPC interrupt mask settings */ imx_gpc_restore_all(); } static void imx6q_enable_wb(bool enable) { u32 val; /* configure well bias enable bit */ val = readl_relaxed(ccm_base + CLPCR); val &= ~BM_CLPCR_WB_PER_AT_LPM; val |= enable ? BM_CLPCR_WB_PER_AT_LPM : 0; writel_relaxed(val, ccm_base + CLPCR); /* configure well bias count */ val = readl_relaxed(ccm_base + CCR); val &= ~BM_CCR_WB_COUNT; val |= enable ? BM_CCR_WB_COUNT : 0; writel_relaxed(val, ccm_base + CCR); } int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode) { struct irq_data *iomuxc_irq_data = irq_get_irq_data(32); u32 val = readl_relaxed(ccm_base + CLPCR); val &= ~BM_CLPCR_LPM; switch (mode) { case WAIT_CLOCKED: break; case WAIT_UNCLOCKED: val |= 0x1 << BP_CLPCR_LPM; val |= BM_CLPCR_ARM_CLK_DIS_ON_LPM; break; case STOP_POWER_ON: val |= 0x2 << BP_CLPCR_LPM; val &= ~BM_CLPCR_VSTBY; val &= ~BM_CLPCR_SBYOS; if (cpu_is_imx6sl()) val |= BM_CLPCR_BYPASS_PMIC_READY; if (cpu_is_imx6sl() || cpu_is_imx6sx()) val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS; else val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS; break; case WAIT_UNCLOCKED_POWER_OFF: val |= 0x1 << BP_CLPCR_LPM; val &= ~BM_CLPCR_VSTBY; val &= ~BM_CLPCR_SBYOS; break; case STOP_POWER_OFF: val |= 0x2 << BP_CLPCR_LPM; val |= 0x3 << BP_CLPCR_STBY_COUNT; val |= BM_CLPCR_VSTBY; val |= BM_CLPCR_SBYOS; if (cpu_is_imx6sl() || cpu_is_imx6sx()) val |= BM_CLPCR_BYPASS_PMIC_READY; if (cpu_is_imx6sl() || cpu_is_imx6sx()) val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS; else val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS; break; default: return -EINVAL; } /* * ERR007265: CCM: When improper low-power sequence is used, * the SoC enters low power mode before the ARM core executes WFI. * * Software workaround: * 1) Software should trigger IRQ #32 (IOMUX) to be always pending * by setting IOMUX_GPR1_GINT. * 2) Software should then unmask IRQ #32 in GPC before setting CCM * Low-Power mode. * 3) Software should mask IRQ #32 right after CCM Low-Power mode * is set (set bits 0-1 of CCM_CLPCR). */ imx_gpc_irq_unmask(iomuxc_irq_data); writel_relaxed(val, ccm_base + CLPCR); imx_gpc_irq_mask(iomuxc_irq_data); return 0; } static int imx6q_suspend_finish(unsigned long val) { if (!imx6_suspend_in_ocram_fn) { cpu_do_idle(); } else { /* * call low level suspend function in ocram, * as we need to float DDR IO. */ local_flush_tlb_all(); imx6_suspend_in_ocram_fn(suspend_ocram_base); } return 0; } static int imx6q_pm_enter(suspend_state_t state) { switch (state) { case PM_SUSPEND_STANDBY: imx6q_set_lpm(STOP_POWER_ON); imx6q_set_int_mem_clk_lpm(true); imx_gpc_pre_suspend(false); if (cpu_is_imx6sl()) imx6sl_set_wait_clk(true); /* Zzz ... */ cpu_do_idle(); if (cpu_is_imx6sl()) imx6sl_set_wait_clk(false); imx_gpc_post_resume(); imx6q_set_lpm(WAIT_CLOCKED); break; case PM_SUSPEND_MEM: imx6q_set_lpm(STOP_POWER_OFF); imx6q_set_int_mem_clk_lpm(false); imx6q_enable_wb(true); /* * For suspend into ocram, asm code already take care of * RBC setting, so we do NOT need to do that here. */ if (!imx6_suspend_in_ocram_fn) imx6q_enable_rbc(true); imx_gpc_pre_suspend(true); imx_anatop_pre_suspend(); imx_set_cpu_jump(0, v7_cpu_resume); /* Zzz ... */ cpu_suspend(0, imx6q_suspend_finish); if (cpu_is_imx6q() || cpu_is_imx6dl()) imx_smp_prepare(); imx_anatop_post_resume(); imx_gpc_post_resume(); imx6q_enable_rbc(false); imx6q_enable_wb(false); imx6q_set_int_mem_clk_lpm(true); imx6q_set_lpm(WAIT_CLOCKED); break; default: return -EINVAL; } return 0; } static int imx6q_pm_valid(suspend_state_t state) { return (state == PM_SUSPEND_STANDBY || state == PM_SUSPEND_MEM); } static const struct platform_suspend_ops imx6q_pm_ops = { .enter = imx6q_pm_enter, .valid = imx6q_pm_valid, }; void __init imx6q_pm_set_ccm_base(void __iomem *base) { ccm_base = base; } static int __init imx6_pm_get_base(struct imx6_pm_base *base, const char *compat) { struct device_node *node; struct resource res; int ret = 0; node = of_find_compatible_node(NULL, NULL, compat); if (!node) { ret = -ENODEV; goto out; } ret = of_address_to_resource(node, 0, &res); if (ret) goto put_node; base->pbase = res.start; base->vbase = ioremap(res.start, resource_size(&res)); if (!base->vbase) ret = -ENOMEM; put_node: of_node_put(node); out: return ret; } static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata) { phys_addr_t ocram_pbase; struct device_node *node; struct platform_device *pdev; struct imx6_cpu_pm_info *pm_info; struct gen_pool *ocram_pool; unsigned long ocram_base; int i, ret = 0; const u32 *mmdc_offset_array; suspend_set_ops(&imx6q_pm_ops); if (!socdata) { pr_warn("%s: invalid argument!\n", __func__); return -EINVAL; } node = of_find_compatible_node(NULL, NULL, "mmio-sram"); if (!node) { pr_warn("%s: failed to find ocram node!\n", __func__); return -ENODEV; } pdev = of_find_device_by_node(node); if (!pdev) { pr_warn("%s: failed to find ocram device!\n", __func__); ret = -ENODEV; goto put_node; } ocram_pool = dev_get_gen_pool(&pdev->dev); if (!ocram_pool) { pr_warn("%s: ocram pool unavailable!\n", __func__); ret = -ENODEV; goto put_node; } ocram_base = gen_pool_alloc(ocram_pool, MX6Q_SUSPEND_OCRAM_SIZE); if (!ocram_base) { pr_warn("%s: unable to alloc ocram!\n", __func__); ret = -ENOMEM; goto put_node; } ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base); suspend_ocram_base = __arm_ioremap_exec(ocram_pbase, MX6Q_SUSPEND_OCRAM_SIZE, false); pm_info = suspend_ocram_base; pm_info->pbase = ocram_pbase; pm_info->resume_addr = virt_to_phys(v7_cpu_resume); pm_info->pm_info_size = sizeof(*pm_info); /* * ccm physical address is not used by asm code currently, * so get ccm virtual address directly, as we already have * it from ccm driver. */ pm_info->ccm_base.vbase = ccm_base; ret = imx6_pm_get_base(&pm_info->mmdc_base, socdata->mmdc_compat); if (ret) { pr_warn("%s: failed to get mmdc base %d!\n", __func__, ret); goto put_node; } ret = imx6_pm_get_base(&pm_info->src_base, socdata->src_compat); if (ret) { pr_warn("%s: failed to get src base %d!\n", __func__, ret); goto src_map_failed; } ret = imx6_pm_get_base(&pm_info->iomuxc_base, socdata->iomuxc_compat); if (ret) { pr_warn("%s: failed to get iomuxc base %d!\n", __func__, ret); goto iomuxc_map_failed; } ret = imx6_pm_get_base(&pm_info->gpc_base, socdata->gpc_compat); if (ret) { pr_warn("%s: failed to get gpc base %d!\n", __func__, ret); goto gpc_map_failed; } ret = imx6_pm_get_base(&pm_info->l2_base, "arm,pl310-cache"); if (ret) { pr_warn("%s: failed to get pl310-cache base %d!\n", __func__, ret); goto pl310_cache_map_failed; } pm_info->cpu_type = socdata->cpu_type; pm_info->mmdc_io_num = socdata->mmdc_io_num; mmdc_offset_array = socdata->mmdc_io_offset; for (i = 0; i < pm_info->mmdc_io_num; i++) { pm_info->mmdc_io_val[i][0] = mmdc_offset_array[i]; pm_info->mmdc_io_val[i][1] = readl_relaxed(pm_info->iomuxc_base.vbase + mmdc_offset_array[i]); } imx6_suspend_in_ocram_fn = fncpy( suspend_ocram_base + sizeof(*pm_info), &imx6_suspend, MX6Q_SUSPEND_OCRAM_SIZE - sizeof(*pm_info)); goto put_node; pl310_cache_map_failed: iounmap(&pm_info->gpc_base.vbase); gpc_map_failed: iounmap(&pm_info->iomuxc_base.vbase); iomuxc_map_failed: iounmap(&pm_info->src_base.vbase); src_map_failed: iounmap(&pm_info->mmdc_base.vbase); put_node: of_node_put(node); return ret; } static void __init imx6_pm_common_init(const struct imx6_pm_socdata *socdata) { struct regmap *gpr; int ret; WARN_ON(!ccm_base); if (IS_ENABLED(CONFIG_SUSPEND)) { ret = imx6q_suspend_init(socdata); if (ret) pr_warn("%s: No DDR LPM support with suspend %d!\n", __func__, ret); } /* * This is for SW workaround step #1 of ERR007265, see comments * in imx6q_set_lpm for details of this errata. * Force IOMUXC irq pending, so that the interrupt to GPC can be * used to deassert dsm_request signal when the signal gets * asserted unexpectedly. */ gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); if (!IS_ERR(gpr)) regmap_update_bits(gpr, IOMUXC_GPR1, IMX6Q_GPR1_GINT, IMX6Q_GPR1_GINT); } void __init imx6q_pm_init(void) { imx6_pm_common_init(&imx6q_pm_data); } void __init imx6dl_pm_init(void) { imx6_pm_common_init(&imx6dl_pm_data); } void __init imx6sl_pm_init(void) { imx6_pm_common_init(&imx6sl_pm_data); } void __init imx6sx_pm_init(void) { imx6_pm_common_init(&imx6sx_pm_data); }
gpl-2.0
Alucard24/Alucard-Kernel-LG-G5
arch/arm/plat-samsung/pm-debug.c
537
2490
/* * Copyright (C) 2013 Samsung Electronics Co., Ltd. * Tomasz Figa <t.figa@samsung.com> * Copyright (C) 2008 Openmoko, Inc. * Copyright (C) 2004-2008 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * * Samsung common power management (suspend to RAM) debug support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/serial_core.h> #include <linux/serial_s3c.h> #include <linux/io.h> #include <asm/mach/map.h> #include <plat/cpu.h> #include <plat/pm-common.h> #ifdef CONFIG_SAMSUNG_ATAGS #include <mach/pm-core.h> #else static inline void s3c_pm_debug_init_uart(void) {} static inline void s3c_pm_arch_update_uart(void __iomem *regs, struct pm_uart_save *save) {} #endif static struct pm_uart_save uart_save; extern void printascii(const char *); void s3c_pm_dbg(const char *fmt, ...) { va_list va; char buff[256]; va_start(va, fmt); vsnprintf(buff, sizeof(buff), fmt, va); va_end(va); printascii(buff); } void s3c_pm_debug_init(void) { /* restart uart clocks so we can use them to output */ s3c_pm_debug_init_uart(); } static inline void __iomem *s3c_pm_uart_base(void) { unsigned long paddr; unsigned long vaddr; debug_ll_addr(&paddr, &vaddr); return (void __iomem *)vaddr; } void s3c_pm_save_uarts(void) { void __iomem *regs = s3c_pm_uart_base(); struct pm_uart_save *save = &uart_save; save->ulcon = __raw_readl(regs + S3C2410_ULCON); save->ucon = __raw_readl(regs + S3C2410_UCON); save->ufcon = __raw_readl(regs + S3C2410_UFCON); save->umcon = __raw_readl(regs + S3C2410_UMCON); save->ubrdiv = __raw_readl(regs + S3C2410_UBRDIV); if (!soc_is_s3c2410()) save->udivslot = __raw_readl(regs + S3C2443_DIVSLOT); S3C_PMDBG("UART[%p]: ULCON=%04x, UCON=%04x, UFCON=%04x, UBRDIV=%04x\n", regs, save->ulcon, save->ucon, save->ufcon, save->ubrdiv); } void s3c_pm_restore_uarts(void) { void __iomem *regs = s3c_pm_uart_base(); struct pm_uart_save *save = &uart_save; s3c_pm_arch_update_uart(regs, save); __raw_writel(save->ulcon, regs + S3C2410_ULCON); __raw_writel(save->ucon, regs + S3C2410_UCON); __raw_writel(save->ufcon, regs + S3C2410_UFCON); __raw_writel(save->umcon, regs + S3C2410_UMCON); __raw_writel(save->ubrdiv, regs + S3C2410_UBRDIV); if (!soc_is_s3c2410()) __raw_writel(save->udivslot, regs + S3C2443_DIVSLOT); }
gpl-2.0
bas-t/media_tree
drivers/usb/host/ehci-xilinx-of.c
1305
6582
/* * EHCI HCD (Host Controller Driver) for USB. * * Bus Glue for Xilinx EHCI core on the of_platform bus * * Copyright (c) 2009 Xilinx, Inc. * * Based on "ehci-ppc-of.c" by Valentine Barshak <vbarshak@ru.mvista.com> * and "ehci-ppc-soc.c" by Stefan Roese <sr@denx.de> * and "ohci-ppc-of.c" by Sylvain Munaut <tnt@246tNt.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/err.h> #include <linux/signal.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/of_address.h> #include <linux/of_irq.h> /** * ehci_xilinx_port_handed_over - hand the port out if failed to enable it * @hcd: Pointer to the usb_hcd device to which the host controller bound * @portnum:Port number to which the device is attached. * * This function is used as a place to tell the user that the Xilinx USB host * controller does support LS devices. And in an HS only configuration, it * does not support FS devices either. It is hoped that this can help a * confused user. * * There are cases when the host controller fails to enable the port due to, * for example, insufficient power that can be supplied to the device from * the USB bus. In those cases, the messages printed here are not helpful. */ static int ehci_xilinx_port_handed_over(struct usb_hcd *hcd, int portnum) { dev_warn(hcd->self.controller, "port %d cannot be enabled\n", portnum); if (hcd->has_tt) { dev_warn(hcd->self.controller, "Maybe you have connected a low speed device?\n"); dev_warn(hcd->self.controller, "We do not support low speed devices\n"); } else { dev_warn(hcd->self.controller, "Maybe your device is not a high speed device?\n"); dev_warn(hcd->self.controller, "The USB host controller does not support full speed " "nor low speed devices\n"); dev_warn(hcd->self.controller, "You can reconfigure the host controller to have " "full speed support\n"); } return 0; } static const struct hc_driver ehci_xilinx_of_hc_driver = { .description = hcd_name, .product_desc = "OF EHCI", .hcd_priv_size = sizeof(struct ehci_hcd), /* * generic hardware linkage */ .irq = ehci_irq, .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, /* * basic lifecycle operations */ .reset = ehci_setup, .start = ehci_run, .stop = ehci_stop, .shutdown = ehci_shutdown, /* * managing i/o requests and associated device resources */ .urb_enqueue = ehci_urb_enqueue, .urb_dequeue = ehci_urb_dequeue, .endpoint_disable = ehci_endpoint_disable, .endpoint_reset = ehci_endpoint_reset, /* * scheduling support */ .get_frame_number = ehci_get_frame, /* * root hub support */ .hub_status_data = ehci_hub_status_data, .hub_control = ehci_hub_control, #ifdef CONFIG_PM .bus_suspend = ehci_bus_suspend, .bus_resume = ehci_bus_resume, #endif .relinquish_port = NULL, .port_handed_over = ehci_xilinx_port_handed_over, .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, }; /** * ehci_hcd_xilinx_of_probe - Probe method for the USB host controller * @op: pointer to the platform_device bound to the host controller * * This function requests resources and sets up appropriate properties for the * host controller. Because the Xilinx USB host controller can be configured * as HS only or HS/FS only, it checks the configuration in the device tree * entry, and sets an appropriate value for hcd->has_tt. */ static int ehci_hcd_xilinx_of_probe(struct platform_device *op) { struct device_node *dn = op->dev.of_node; struct usb_hcd *hcd; struct ehci_hcd *ehci; struct resource res; int irq; int rv; int *value; if (usb_disabled()) return -ENODEV; dev_dbg(&op->dev, "initializing XILINX-OF USB Controller\n"); rv = of_address_to_resource(dn, 0, &res); if (rv) return rv; hcd = usb_create_hcd(&ehci_xilinx_of_hc_driver, &op->dev, "XILINX-OF USB"); if (!hcd) return -ENOMEM; hcd->rsrc_start = res.start; hcd->rsrc_len = resource_size(&res); irq = irq_of_parse_and_map(dn, 0); if (!irq) { dev_err(&op->dev, "%s: irq_of_parse_and_map failed\n", __FILE__); rv = -EBUSY; goto err_irq; } hcd->regs = devm_ioremap_resource(&op->dev, &res); if (IS_ERR(hcd->regs)) { rv = PTR_ERR(hcd->regs); goto err_irq; } ehci = hcd_to_ehci(hcd); /* This core always has big-endian register interface and uses * big-endian memory descriptors. */ ehci->big_endian_mmio = 1; ehci->big_endian_desc = 1; /* Check whether the FS support option is selected in the hardware. */ value = (int *)of_get_property(dn, "xlnx,support-usb-fs", NULL); if (value && (*value == 1)) { ehci_dbg(ehci, "USB host controller supports FS devices\n"); hcd->has_tt = 1; } else { ehci_dbg(ehci, "USB host controller is HS only\n"); hcd->has_tt = 0; } /* Debug registers are at the first 0x100 region */ ehci->caps = hcd->regs + 0x100; rv = usb_add_hcd(hcd, irq, 0); if (rv == 0) { device_wakeup_enable(hcd->self.controller); return 0; } err_irq: usb_put_hcd(hcd); return rv; } /** * ehci_hcd_xilinx_of_remove - shutdown hcd and release resources * @op: pointer to platform_device structure that is to be removed * * Remove the hcd structure, and release resources that has been requested * during probe. */ static int ehci_hcd_xilinx_of_remove(struct platform_device *op) { struct usb_hcd *hcd = platform_get_drvdata(op); dev_dbg(&op->dev, "stopping XILINX-OF USB Controller\n"); usb_remove_hcd(hcd); usb_put_hcd(hcd); return 0; } static const struct of_device_id ehci_hcd_xilinx_of_match[] = { {.compatible = "xlnx,xps-usb-host-1.00.a",}, {}, }; MODULE_DEVICE_TABLE(of, ehci_hcd_xilinx_of_match); static struct platform_driver ehci_hcd_xilinx_of_driver = { .probe = ehci_hcd_xilinx_of_probe, .remove = ehci_hcd_xilinx_of_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "xilinx-of-ehci", .of_match_table = ehci_hcd_xilinx_of_match, }, };
gpl-2.0
ddikodroid/Finder-Kernel-Source-4.0
arch/sparc/kernel/pci.c
2329
30023
/* pci.c: UltraSparc PCI controller support. * * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com) * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz) * * OF tree based PCI bus probing taken from the PowerPC port * with minor modifications, see there for credits. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/sched.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/msi.h> #include <linux/irq.h> #include <linux/init.h> #include <linux/of.h> #include <linux/of_device.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/irq.h> #include <asm/prom.h> #include <asm/apb.h> #include "pci_impl.h" /* List of all PCI controllers found in the system. */ struct pci_pbm_info *pci_pbm_root = NULL; /* Each PBM found gets a unique index. */ int pci_num_pbms = 0; volatile int pci_poke_in_progress; volatile int pci_poke_cpu = -1; volatile int pci_poke_faulted; static DEFINE_SPINLOCK(pci_poke_lock); void pci_config_read8(u8 *addr, u8 *ret) { unsigned long flags; u8 byte; spin_lock_irqsave(&pci_poke_lock, flags); pci_poke_cpu = smp_processor_id(); pci_poke_in_progress = 1; pci_poke_faulted = 0; __asm__ __volatile__("membar #Sync\n\t" "lduba [%1] %2, %0\n\t" "membar #Sync" : "=r" (byte) : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) : "memory"); pci_poke_in_progress = 0; pci_poke_cpu = -1; if (!pci_poke_faulted) *ret = byte; spin_unlock_irqrestore(&pci_poke_lock, flags); } void pci_config_read16(u16 *addr, u16 *ret) { unsigned long flags; u16 word; spin_lock_irqsave(&pci_poke_lock, flags); pci_poke_cpu = smp_processor_id(); pci_poke_in_progress = 1; pci_poke_faulted = 0; __asm__ __volatile__("membar #Sync\n\t" "lduha [%1] %2, %0\n\t" "membar #Sync" : "=r" (word) : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) : "memory"); pci_poke_in_progress = 0; pci_poke_cpu = -1; if (!pci_poke_faulted) *ret = word; spin_unlock_irqrestore(&pci_poke_lock, flags); } void pci_config_read32(u32 *addr, u32 *ret) { unsigned long flags; u32 dword; spin_lock_irqsave(&pci_poke_lock, flags); pci_poke_cpu = smp_processor_id(); pci_poke_in_progress = 1; pci_poke_faulted = 0; __asm__ __volatile__("membar #Sync\n\t" "lduwa [%1] %2, %0\n\t" "membar #Sync" : "=r" (dword) : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) : "memory"); pci_poke_in_progress = 0; pci_poke_cpu = -1; if (!pci_poke_faulted) *ret = dword; spin_unlock_irqrestore(&pci_poke_lock, flags); } void pci_config_write8(u8 *addr, u8 val) { unsigned long flags; spin_lock_irqsave(&pci_poke_lock, flags); pci_poke_cpu = smp_processor_id(); pci_poke_in_progress = 1; pci_poke_faulted = 0; __asm__ __volatile__("membar #Sync\n\t" "stba %0, [%1] %2\n\t" "membar #Sync" : /* no outputs */ : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) : "memory"); pci_poke_in_progress = 0; pci_poke_cpu = -1; spin_unlock_irqrestore(&pci_poke_lock, flags); } void pci_config_write16(u16 *addr, u16 val) { unsigned long flags; spin_lock_irqsave(&pci_poke_lock, flags); pci_poke_cpu = smp_processor_id(); pci_poke_in_progress = 1; pci_poke_faulted = 0; __asm__ __volatile__("membar #Sync\n\t" "stha %0, [%1] %2\n\t" "membar #Sync" : /* no outputs */ : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) : "memory"); pci_poke_in_progress = 0; pci_poke_cpu = -1; spin_unlock_irqrestore(&pci_poke_lock, flags); } void pci_config_write32(u32 *addr, u32 val) { unsigned long flags; spin_lock_irqsave(&pci_poke_lock, flags); pci_poke_cpu = smp_processor_id(); pci_poke_in_progress = 1; pci_poke_faulted = 0; __asm__ __volatile__("membar #Sync\n\t" "stwa %0, [%1] %2\n\t" "membar #Sync" : /* no outputs */ : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) : "memory"); pci_poke_in_progress = 0; pci_poke_cpu = -1; spin_unlock_irqrestore(&pci_poke_lock, flags); } static int ofpci_verbose; static int __init ofpci_debug(char *str) { int val = 0; get_option(&str, &val); if (val) ofpci_verbose = 1; return 1; } __setup("ofpci_debug=", ofpci_debug); static unsigned long pci_parse_of_flags(u32 addr0) { unsigned long flags = 0; if (addr0 & 0x02000000) { flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY; flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64; flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M; if (addr0 & 0x40000000) flags |= IORESOURCE_PREFETCH | PCI_BASE_ADDRESS_MEM_PREFETCH; } else if (addr0 & 0x01000000) flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO; return flags; } /* The of_device layer has translated all of the assigned-address properties * into physical address resources, we only have to figure out the register * mapping. */ static void pci_parse_of_addrs(struct platform_device *op, struct device_node *node, struct pci_dev *dev) { struct resource *op_res; const u32 *addrs; int proplen; addrs = of_get_property(node, "assigned-addresses", &proplen); if (!addrs) return; if (ofpci_verbose) printk(" parse addresses (%d bytes) @ %p\n", proplen, addrs); op_res = &op->resource[0]; for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) { struct resource *res; unsigned long flags; int i; flags = pci_parse_of_flags(addrs[0]); if (!flags) continue; i = addrs[0] & 0xff; if (ofpci_verbose) printk(" start: %llx, end: %llx, i: %x\n", op_res->start, op_res->end, i); if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; } else if (i == dev->rom_base_reg) { res = &dev->resource[PCI_ROM_RESOURCE]; flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE; } else { printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); continue; } res->start = op_res->start; res->end = op_res->end; res->flags = flags; res->name = pci_name(dev); } } static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, struct device_node *node, struct pci_bus *bus, int devfn) { struct dev_archdata *sd; struct pci_slot *slot; struct platform_device *op; struct pci_dev *dev; const char *type; u32 class; dev = alloc_pci_dev(); if (!dev) return NULL; sd = &dev->dev.archdata; sd->iommu = pbm->iommu; sd->stc = &pbm->stc; sd->host_controller = pbm; sd->op = op = of_find_device_by_node(node); sd->numa_node = pbm->numa_node; sd = &op->dev.archdata; sd->iommu = pbm->iommu; sd->stc = &pbm->stc; sd->numa_node = pbm->numa_node; if (!strcmp(node->name, "ebus")) of_propagate_archdata(op); type = of_get_property(node, "device_type", NULL); if (type == NULL) type = ""; if (ofpci_verbose) printk(" create device, devfn: %x, type: %s\n", devfn, type); dev->bus = bus; dev->sysdata = node; dev->dev.parent = bus->bridge; dev->dev.bus = &pci_bus_type; dev->dev.of_node = node; dev->devfn = devfn; dev->multifunction = 0; /* maybe a lie? */ set_pcie_port_type(dev); list_for_each_entry(slot, &dev->bus->slots, list) if (PCI_SLOT(dev->devfn) == slot->number) dev->slot = slot; dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff); dev->device = of_getintprop_default(node, "device-id", 0xffff); dev->subsystem_vendor = of_getintprop_default(node, "subsystem-vendor-id", 0); dev->subsystem_device = of_getintprop_default(node, "subsystem-id", 0); dev->cfg_size = pci_cfg_space_size(dev); /* We can't actually use the firmware value, we have * to read what is in the register right now. One * reason is that in the case of IDE interfaces the * firmware can sample the value before the the IDE * interface is programmed into native mode. */ pci_read_config_dword(dev, PCI_CLASS_REVISION, &class); dev->class = class >> 8; dev->revision = class & 0xff; dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(bus), dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); if (ofpci_verbose) printk(" class: 0x%x device name: %s\n", dev->class, pci_name(dev)); /* I have seen IDE devices which will not respond to * the bmdma simplex check reads if bus mastering is * disabled. */ if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) pci_set_master(dev); dev->current_state = 4; /* unknown power state */ dev->error_state = pci_channel_io_normal; dev->dma_mask = 0xffffffff; if (!strcmp(node->name, "pci")) { /* a PCI-PCI bridge */ dev->hdr_type = PCI_HEADER_TYPE_BRIDGE; dev->rom_base_reg = PCI_ROM_ADDRESS1; } else if (!strcmp(type, "cardbus")) { dev->hdr_type = PCI_HEADER_TYPE_CARDBUS; } else { dev->hdr_type = PCI_HEADER_TYPE_NORMAL; dev->rom_base_reg = PCI_ROM_ADDRESS; dev->irq = sd->op->archdata.irqs[0]; if (dev->irq == 0xffffffff) dev->irq = PCI_IRQ_NONE; } pci_parse_of_addrs(sd->op, node, dev); if (ofpci_verbose) printk(" adding to system ...\n"); pci_device_add(dev, bus); return dev; } static void __devinit apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p) { u32 idx, first, last; first = 8; last = 0; for (idx = 0; idx < 8; idx++) { if ((map & (1 << idx)) != 0) { if (first > idx) first = idx; if (last < idx) last = idx; } } *first_p = first; *last_p = last; } static void pci_resource_adjust(struct resource *res, struct resource *root) { res->start += root->start; res->end += root->start; } /* For PCI bus devices which lack a 'ranges' property we interrogate * the config space values to set the resources, just like the generic * Linux PCI probing code does. */ static void __devinit pci_cfg_fake_ranges(struct pci_dev *dev, struct pci_bus *bus, struct pci_pbm_info *pbm) { struct resource *res; u8 io_base_lo, io_limit_lo; u16 mem_base_lo, mem_limit_lo; unsigned long base, limit; pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo); base = (io_base_lo & PCI_IO_RANGE_MASK) << 8; limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8; if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) { u16 io_base_hi, io_limit_hi; pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi); pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi); base |= (io_base_hi << 16); limit |= (io_limit_hi << 16); } res = bus->resource[0]; if (base <= limit) { res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; if (!res->start) res->start = base; if (!res->end) res->end = limit + 0xfff; pci_resource_adjust(res, &pbm->io_space); } pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16; res = bus->resource[1]; if (base <= limit) { res->flags = ((mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM); res->start = base; res->end = limit + 0xfffff; pci_resource_adjust(res, &pbm->mem_space); } pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo); base = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16; limit = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { u32 mem_base_hi, mem_limit_hi; pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi); pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi); /* * Some bridges set the base > limit by default, and some * (broken) BIOSes do not initialize them. If we find * this, just assume they are not being used. */ if (mem_base_hi <= mem_limit_hi) { base |= ((long) mem_base_hi) << 32; limit |= ((long) mem_limit_hi) << 32; } } res = bus->resource[2]; if (base <= limit) { res->flags = ((mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH); res->start = base; res->end = limit + 0xfffff; pci_resource_adjust(res, &pbm->mem_space); } } /* Cook up fake bus resources for SUNW,simba PCI bridges which lack * a proper 'ranges' property. */ static void __devinit apb_fake_ranges(struct pci_dev *dev, struct pci_bus *bus, struct pci_pbm_info *pbm) { struct resource *res; u32 first, last; u8 map; pci_read_config_byte(dev, APB_IO_ADDRESS_MAP, &map); apb_calc_first_last(map, &first, &last); res = bus->resource[0]; res->start = (first << 21); res->end = (last << 21) + ((1 << 21) - 1); res->flags = IORESOURCE_IO; pci_resource_adjust(res, &pbm->io_space); pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map); apb_calc_first_last(map, &first, &last); res = bus->resource[1]; res->start = (first << 21); res->end = (last << 21) + ((1 << 21) - 1); res->flags = IORESOURCE_MEM; pci_resource_adjust(res, &pbm->mem_space); } static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm, struct device_node *node, struct pci_bus *bus); #define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1]) static void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm, struct device_node *node, struct pci_dev *dev) { struct pci_bus *bus; const u32 *busrange, *ranges; int len, i, simba; struct resource *res; unsigned int flags; u64 size; if (ofpci_verbose) printk("of_scan_pci_bridge(%s)\n", node->full_name); /* parse bus-range property */ busrange = of_get_property(node, "bus-range", &len); if (busrange == NULL || len != 8) { printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n", node->full_name); return; } ranges = of_get_property(node, "ranges", &len); simba = 0; if (ranges == NULL) { const char *model = of_get_property(node, "model", NULL); if (model && !strcmp(model, "SUNW,simba")) simba = 1; } bus = pci_add_new_bus(dev->bus, dev, busrange[0]); if (!bus) { printk(KERN_ERR "Failed to create pci bus for %s\n", node->full_name); return; } bus->primary = dev->bus->number; bus->subordinate = busrange[1]; bus->bridge_ctl = 0; /* parse ranges property, or cook one up by hand for Simba */ /* PCI #address-cells == 3 and #size-cells == 2 always */ res = &dev->resource[PCI_BRIDGE_RESOURCES]; for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) { res->flags = 0; bus->resource[i] = res; ++res; } if (simba) { apb_fake_ranges(dev, bus, pbm); goto after_ranges; } else if (ranges == NULL) { pci_cfg_fake_ranges(dev, bus, pbm); goto after_ranges; } i = 1; for (; len >= 32; len -= 32, ranges += 8) { struct resource *root; flags = pci_parse_of_flags(ranges[0]); size = GET_64BIT(ranges, 6); if (flags == 0 || size == 0) continue; if (flags & IORESOURCE_IO) { res = bus->resource[0]; if (res->flags) { printk(KERN_ERR "PCI: ignoring extra I/O range" " for bridge %s\n", node->full_name); continue; } root = &pbm->io_space; } else { if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) { printk(KERN_ERR "PCI: too many memory ranges" " for bridge %s\n", node->full_name); continue; } res = bus->resource[i]; ++i; root = &pbm->mem_space; } res->start = GET_64BIT(ranges, 1); res->end = res->start + size - 1; res->flags = flags; /* Another way to implement this would be to add an of_device * layer routine that can calculate a resource for a given * range property value in a PCI device. */ pci_resource_adjust(res, root); } after_ranges: sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), bus->number); if (ofpci_verbose) printk(" bus name: %s\n", bus->name); pci_of_scan_bus(pbm, node, bus); } static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm, struct device_node *node, struct pci_bus *bus) { struct device_node *child; const u32 *reg; int reglen, devfn, prev_devfn; struct pci_dev *dev; if (ofpci_verbose) printk("PCI: scan_bus[%s] bus no %d\n", node->full_name, bus->number); child = NULL; prev_devfn = -1; while ((child = of_get_next_child(node, child)) != NULL) { if (ofpci_verbose) printk(" * %s\n", child->full_name); reg = of_get_property(child, "reg", &reglen); if (reg == NULL || reglen < 20) continue; devfn = (reg[0] >> 8) & 0xff; /* This is a workaround for some device trees * which list PCI devices twice. On the V100 * for example, device number 3 is listed twice. * Once as "pm" and once again as "lomp". */ if (devfn == prev_devfn) continue; prev_devfn = devfn; /* create a new pci_dev for this device */ dev = of_create_pci_dev(pbm, child, bus, devfn); if (!dev) continue; if (ofpci_verbose) printk("PCI: dev header type: %x\n", dev->hdr_type); if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) of_scan_pci_bridge(pbm, child, dev); } } static ssize_t show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char * buf) { struct pci_dev *pdev; struct device_node *dp; pdev = to_pci_dev(dev); dp = pdev->dev.of_node; return snprintf (buf, PAGE_SIZE, "%s\n", dp->full_name); } static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL); static void __devinit pci_bus_register_of_sysfs(struct pci_bus *bus) { struct pci_dev *dev; struct pci_bus *child_bus; int err; list_for_each_entry(dev, &bus->devices, bus_list) { /* we don't really care if we can create this file or * not, but we need to assign the result of the call * or the world will fall under alien invasion and * everybody will be frozen on a spaceship ready to be * eaten on alpha centauri by some green and jelly * humanoid. */ err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr); (void) err; } list_for_each_entry(child_bus, &bus->children, node) pci_bus_register_of_sysfs(child_bus); } struct pci_bus * __devinit pci_scan_one_pbm(struct pci_pbm_info *pbm, struct device *parent) { struct device_node *node = pbm->op->dev.of_node; struct pci_bus *bus; printk("PCI: Scanning PBM %s\n", node->full_name); bus = pci_create_bus(parent, pbm->pci_first_busno, pbm->pci_ops, pbm); if (!bus) { printk(KERN_ERR "Failed to create bus for %s\n", node->full_name); return NULL; } bus->secondary = pbm->pci_first_busno; bus->subordinate = pbm->pci_last_busno; bus->resource[0] = &pbm->io_space; bus->resource[1] = &pbm->mem_space; pci_of_scan_bus(pbm, node, bus); pci_bus_add_devices(bus); pci_bus_register_of_sysfs(bus); return bus; } void __devinit pcibios_fixup_bus(struct pci_bus *pbus) { struct pci_pbm_info *pbm = pbus->sysdata; /* Generic PCI bus probing sets these to point at * &io{port,mem}_resouce which is wrong for us. */ pbus->resource[0] = &pbm->io_space; pbus->resource[1] = &pbm->mem_space; } void pcibios_update_irq(struct pci_dev *pdev, int irq) { } resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { return res->start; } int pcibios_enable_device(struct pci_dev *dev, int mask) { u16 cmd, oldcmd; int i; pci_read_config_word(dev, PCI_COMMAND, &cmd); oldcmd = cmd; for (i = 0; i < PCI_NUM_RESOURCES; i++) { struct resource *res = &dev->resource[i]; /* Only set up the requested stuff */ if (!(mask & (1<<i))) continue; if (res->flags & IORESOURCE_IO) cmd |= PCI_COMMAND_IO; if (res->flags & IORESOURCE_MEM) cmd |= PCI_COMMAND_MEMORY; } if (cmd != oldcmd) { printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n", pci_name(dev), cmd); /* Enable the appropriate bits in the PCI command register. */ pci_write_config_word(dev, PCI_COMMAND, cmd); } return 0; } void pcibios_resource_to_bus(struct pci_dev *pdev, struct pci_bus_region *region, struct resource *res) { struct pci_pbm_info *pbm = pdev->bus->sysdata; struct resource zero_res, *root; zero_res.start = 0; zero_res.end = 0; zero_res.flags = res->flags; if (res->flags & IORESOURCE_IO) root = &pbm->io_space; else root = &pbm->mem_space; pci_resource_adjust(&zero_res, root); region->start = res->start - zero_res.start; region->end = res->end - zero_res.start; } EXPORT_SYMBOL(pcibios_resource_to_bus); void pcibios_bus_to_resource(struct pci_dev *pdev, struct resource *res, struct pci_bus_region *region) { struct pci_pbm_info *pbm = pdev->bus->sysdata; struct resource *root; res->start = region->start; res->end = region->end; if (res->flags & IORESOURCE_IO) root = &pbm->io_space; else root = &pbm->mem_space; pci_resource_adjust(res, root); } EXPORT_SYMBOL(pcibios_bus_to_resource); char * __devinit pcibios_setup(char *str) { return str; } /* Platform support for /proc/bus/pci/X/Y mmap()s. */ /* If the user uses a host-bridge as the PCI device, he may use * this to perform a raw mmap() of the I/O or MEM space behind * that controller. * * This can be useful for execution of x86 PCI bios initialization code * on a PCI card, like the xfree86 int10 stuff does. */ static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state) { struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; unsigned long space_size, user_offset, user_size; if (mmap_state == pci_mmap_io) { space_size = (pbm->io_space.end - pbm->io_space.start) + 1; } else { space_size = (pbm->mem_space.end - pbm->mem_space.start) + 1; } /* Make sure the request is in range. */ user_offset = vma->vm_pgoff << PAGE_SHIFT; user_size = vma->vm_end - vma->vm_start; if (user_offset >= space_size || (user_offset + user_size) > space_size) return -EINVAL; if (mmap_state == pci_mmap_io) { vma->vm_pgoff = (pbm->io_space.start + user_offset) >> PAGE_SHIFT; } else { vma->vm_pgoff = (pbm->mem_space.start + user_offset) >> PAGE_SHIFT; } return 0; } /* Adjust vm_pgoff of VMA such that it is the physical page offset * corresponding to the 32-bit pci bus offset for DEV requested by the user. * * Basically, the user finds the base address for his device which he wishes * to mmap. They read the 32-bit value from the config space base register, * add whatever PAGE_SIZE multiple offset they wish, and feed this into the * offset parameter of mmap on /proc/bus/pci/XXX for that device. * * Returns negative error code on failure, zero on success. */ static int __pci_mmap_make_offset(struct pci_dev *pdev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state) { unsigned long user_paddr, user_size; int i, err; /* First compute the physical address in vma->vm_pgoff, * making sure the user offset is within range in the * appropriate PCI space. */ err = __pci_mmap_make_offset_bus(pdev, vma, mmap_state); if (err) return err; /* If this is a mapping on a host bridge, any address * is OK. */ if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST) return err; /* Otherwise make sure it's in the range for one of the * device's resources. */ user_paddr = vma->vm_pgoff << PAGE_SHIFT; user_size = vma->vm_end - vma->vm_start; for (i = 0; i <= PCI_ROM_RESOURCE; i++) { struct resource *rp = &pdev->resource[i]; resource_size_t aligned_end; /* Active? */ if (!rp->flags) continue; /* Same type? */ if (i == PCI_ROM_RESOURCE) { if (mmap_state != pci_mmap_mem) continue; } else { if ((mmap_state == pci_mmap_io && (rp->flags & IORESOURCE_IO) == 0) || (mmap_state == pci_mmap_mem && (rp->flags & IORESOURCE_MEM) == 0)) continue; } /* Align the resource end to the next page address. * PAGE_SIZE intentionally added instead of (PAGE_SIZE - 1), * because actually we need the address of the next byte * after rp->end. */ aligned_end = (rp->end + PAGE_SIZE) & PAGE_MASK; if ((rp->start <= user_paddr) && (user_paddr + user_size) <= aligned_end) break; } if (i > PCI_ROM_RESOURCE) return -EINVAL; return 0; } /* Set vm_flags of VMA, as appropriate for this architecture, for a pci device * mapping. */ static void __pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state) { vma->vm_flags |= (VM_IO | VM_RESERVED); } /* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci * device mapping. */ static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state) { /* Our io_remap_pfn_range takes care of this, do nothing. */ } /* Perform the actual remap of the pages for a PCI device mapping, as appropriate * for this architecture. The region in the process to map is described by vm_start * and vm_end members of VMA, the base physical address is found in vm_pgoff. * The pci device structure is provided so that architectures may make mapping * decisions on a per-device or per-bus basis. * * Returns a negative error code on failure, zero on success. */ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine) { int ret; ret = __pci_mmap_make_offset(dev, vma, mmap_state); if (ret < 0) return ret; __pci_mmap_set_flags(dev, vma, mmap_state); __pci_mmap_set_pgprot(dev, vma, mmap_state); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot); if (ret) return ret; return 0; } #ifdef CONFIG_NUMA int pcibus_to_node(struct pci_bus *pbus) { struct pci_pbm_info *pbm = pbus->sysdata; return pbm->numa_node; } EXPORT_SYMBOL(pcibus_to_node); #endif /* Return the domain number for this pci bus */ int pci_domain_nr(struct pci_bus *pbus) { struct pci_pbm_info *pbm = pbus->sysdata; int ret; if (!pbm) { ret = -ENXIO; } else { ret = pbm->index; } return ret; } EXPORT_SYMBOL(pci_domain_nr); #ifdef CONFIG_PCI_MSI int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) { struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; unsigned int irq; if (!pbm->setup_msi_irq) return -EINVAL; return pbm->setup_msi_irq(&irq, pdev, desc); } void arch_teardown_msi_irq(unsigned int irq) { struct msi_desc *entry = irq_get_msi_desc(irq); struct pci_dev *pdev = entry->dev; struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; if (pbm->teardown_msi_irq) pbm->teardown_msi_irq(irq, pdev); } #endif /* !(CONFIG_PCI_MSI) */ struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) { return pdev->dev.of_node; } EXPORT_SYMBOL(pci_device_to_OF_node); static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit) { struct pci_dev *ali_isa_bridge; u8 val; /* ALI sound chips generate 31-bits of DMA, a special register * determines what bit 31 is emitted as. */ ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); pci_read_config_byte(ali_isa_bridge, 0x7e, &val); if (set_bit) val |= 0x01; else val &= ~0x01; pci_write_config_byte(ali_isa_bridge, 0x7e, val); pci_dev_put(ali_isa_bridge); } int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask) { u64 dma_addr_mask; if (pdev == NULL) { dma_addr_mask = 0xffffffff; } else { struct iommu *iommu = pdev->dev.archdata.iommu; dma_addr_mask = iommu->dma_addr_mask; if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == PCI_DEVICE_ID_AL_M5451 && device_mask == 0x7fffffff) { ali_sound_dma_hack(pdev, (dma_addr_mask & 0x80000000) != 0); return 1; } } if (device_mask >= (1UL << 32UL)) return 0; return (device_mask & dma_addr_mask) == dma_addr_mask; } void pci_resource_to_user(const struct pci_dev *pdev, int bar, const struct resource *rp, resource_size_t *start, resource_size_t *end) { struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; unsigned long offset; if (rp->flags & IORESOURCE_IO) offset = pbm->io_space.start; else offset = pbm->mem_space.start; *start = rp->start - offset; *end = rp->end - offset; } static int __init pcibios_init(void) { pci_dfl_cache_line_size = 64 >> 2; return 0; } subsys_initcall(pcibios_init); #ifdef CONFIG_SYSFS static void __devinit pci_bus_slot_names(struct device_node *node, struct pci_bus *bus) { const struct pci_slot_names { u32 slot_mask; char names[0]; } *prop; const char *sp; int len, i; u32 mask; prop = of_get_property(node, "slot-names", &len); if (!prop) return; mask = prop->slot_mask; sp = prop->names; if (ofpci_verbose) printk("PCI: Making slots for [%s] mask[0x%02x]\n", node->full_name, mask); i = 0; while (mask) { struct pci_slot *pci_slot; u32 this_bit = 1 << i; if (!(mask & this_bit)) { i++; continue; } if (ofpci_verbose) printk("PCI: Making slot [%s]\n", sp); pci_slot = pci_create_slot(bus, i, sp, NULL); if (IS_ERR(pci_slot)) printk(KERN_ERR "PCI: pci_create_slot returned %ld\n", PTR_ERR(pci_slot)); sp += strlen(sp) + 1; mask &= ~this_bit; i++; } } static int __init of_pci_slot_init(void) { struct pci_bus *pbus = NULL; while ((pbus = pci_find_next_bus(pbus)) != NULL) { struct device_node *node; if (pbus->self) { /* PCI->PCI bridge */ node = pbus->self->dev.of_node; } else { struct pci_pbm_info *pbm = pbus->sysdata; /* Host PCI controller */ node = pbm->op->dev.of_node; } pci_bus_slot_names(node, pbus); } return 0; } module_init(of_pci_slot_init); #endif
gpl-2.0
Jolocotroco/android_kernel_samsung_smdkv210
drivers/media/video/cx231xx/cx231xx-vbi.c
3097
18093
/* cx231xx_vbi.c - driver for Conexant Cx23100/101/102 USB video capture devices Copyright (C) 2008 <srinivasa.deevi at conexant dot com> Based on cx88 driver This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/list.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/bitmap.h> #include <linux/usb.h> #include <linux/i2c.h> #include <linux/mm.h> #include <linux/mutex.h> #include <linux/slab.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-chip-ident.h> #include <media/msp3400.h> #include <media/tuner.h> #include "cx231xx.h" #include "cx231xx-vbi.h" static inline void print_err_status(struct cx231xx *dev, int packet, int status) { char *errmsg = "Unknown"; switch (status) { case -ENOENT: errmsg = "unlinked synchronuously"; break; case -ECONNRESET: errmsg = "unlinked asynchronuously"; break; case -ENOSR: errmsg = "Buffer error (overrun)"; break; case -EPIPE: errmsg = "Stalled (device not responding)"; break; case -EOVERFLOW: errmsg = "Babble (bad cable?)"; break; case -EPROTO: errmsg = "Bit-stuff error (bad cable?)"; break; case -EILSEQ: errmsg = "CRC/Timeout (could be anything)"; break; case -ETIME: errmsg = "Device does not respond"; break; } if (packet < 0) { cx231xx_err(DRIVER_NAME "URB status %d [%s].\n", status, errmsg); } else { cx231xx_err(DRIVER_NAME "URB packet %d, status %d [%s].\n", packet, status, errmsg); } } /* * Controls the isoc copy of each urb packet */ static inline int cx231xx_isoc_vbi_copy(struct cx231xx *dev, struct urb *urb) { struct cx231xx_buffer *buf; struct cx231xx_dmaqueue *dma_q = urb->context; int rc = 1; unsigned char *p_buffer; u32 bytes_parsed = 0, buffer_size = 0; u8 sav_eav = 0; if (!dev) return 0; if ((dev->state & DEV_DISCONNECTED) || (dev->state & DEV_MISCONFIGURED)) return 0; if (urb->status < 0) { print_err_status(dev, -1, urb->status); if (urb->status == -ENOENT) return 0; } buf = dev->vbi_mode.bulk_ctl.buf; /* get buffer pointer and length */ p_buffer = urb->transfer_buffer; buffer_size = urb->actual_length; if (buffer_size > 0) { bytes_parsed = 0; if (dma_q->is_partial_line) { /* Handle the case where we were working on a partial line */ sav_eav = dma_q->last_sav; } else { /* Check for a SAV/EAV overlapping the buffer boundary */ sav_eav = cx231xx_find_boundary_SAV_EAV(p_buffer, dma_q->partial_buf, &bytes_parsed); } sav_eav &= 0xF0; /* Get the first line if we have some portion of an SAV/EAV from the last buffer or a partial line */ if (sav_eav) { bytes_parsed += cx231xx_get_vbi_line(dev, dma_q, sav_eav, /* SAV/EAV */ p_buffer + bytes_parsed, /* p_buffer */ buffer_size - bytes_parsed); /* buffer size */ } /* Now parse data that is completely in this buffer */ dma_q->is_partial_line = 0; while (bytes_parsed < buffer_size) { u32 bytes_used = 0; sav_eav = cx231xx_find_next_SAV_EAV( p_buffer + bytes_parsed, /* p_buffer */ buffer_size - bytes_parsed, /* buffer size */ &bytes_used); /* bytes used to get SAV/EAV */ bytes_parsed += bytes_used; sav_eav &= 0xF0; if (sav_eav && (bytes_parsed < buffer_size)) { bytes_parsed += cx231xx_get_vbi_line(dev, dma_q, sav_eav, /* SAV/EAV */ p_buffer+bytes_parsed, /* p_buffer */ buffer_size-bytes_parsed);/*buf size*/ } } /* Save the last four bytes of the buffer so we can check the buffer boundary condition next time */ memcpy(dma_q->partial_buf, p_buffer + buffer_size - 4, 4); bytes_parsed = 0; } return rc; } /* ------------------------------------------------------------------ Vbi buf operations ------------------------------------------------------------------*/ static int vbi_buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size) { struct cx231xx_fh *fh = vq->priv_data; struct cx231xx *dev = fh->dev; u32 height = 0; height = ((dev->norm & V4L2_STD_625_50) ? PAL_VBI_LINES : NTSC_VBI_LINES); *size = (dev->width * height * 2 * 2); if (0 == *count) *count = CX231XX_DEF_VBI_BUF; if (*count < CX231XX_MIN_BUF) *count = CX231XX_MIN_BUF; return 0; } /* This is called *without* dev->slock held; please keep it that way */ static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf) { struct cx231xx_fh *fh = vq->priv_data; struct cx231xx *dev = fh->dev; unsigned long flags = 0; if (in_interrupt()) BUG(); /* We used to wait for the buffer to finish here, but this didn't work because, as we were keeping the state as VIDEOBUF_QUEUED, videobuf_queue_cancel marked it as finished for us. (Also, it could wedge forever if the hardware was misconfigured.) This should be safe; by the time we get here, the buffer isn't queued anymore. If we ever start marking the buffers as VIDEOBUF_ACTIVE, it won't be, though. */ spin_lock_irqsave(&dev->vbi_mode.slock, flags); if (dev->vbi_mode.bulk_ctl.buf == buf) dev->vbi_mode.bulk_ctl.buf = NULL; spin_unlock_irqrestore(&dev->vbi_mode.slock, flags); videobuf_vmalloc_free(&buf->vb); buf->vb.state = VIDEOBUF_NEEDS_INIT; } static int vbi_buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb, enum v4l2_field field) { struct cx231xx_fh *fh = vq->priv_data; struct cx231xx_buffer *buf = container_of(vb, struct cx231xx_buffer, vb); struct cx231xx *dev = fh->dev; int rc = 0, urb_init = 0; u32 height = 0; height = ((dev->norm & V4L2_STD_625_50) ? PAL_VBI_LINES : NTSC_VBI_LINES); buf->vb.size = ((dev->width << 1) * height * 2); if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size) return -EINVAL; buf->vb.width = dev->width; buf->vb.height = height; buf->vb.field = field; buf->vb.field = V4L2_FIELD_SEQ_TB; if (VIDEOBUF_NEEDS_INIT == buf->vb.state) { rc = videobuf_iolock(vq, &buf->vb, NULL); if (rc < 0) goto fail; } if (!dev->vbi_mode.bulk_ctl.num_bufs) urb_init = 1; if (urb_init) { rc = cx231xx_init_vbi_isoc(dev, CX231XX_NUM_VBI_PACKETS, CX231XX_NUM_VBI_BUFS, dev->vbi_mode.alt_max_pkt_size[0], cx231xx_isoc_vbi_copy); if (rc < 0) goto fail; } buf->vb.state = VIDEOBUF_PREPARED; return 0; fail: free_buffer(vq, buf); return rc; } static void vbi_buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct cx231xx_buffer *buf = container_of(vb, struct cx231xx_buffer, vb); struct cx231xx_fh *fh = vq->priv_data; struct cx231xx *dev = fh->dev; struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq; buf->vb.state = VIDEOBUF_QUEUED; list_add_tail(&buf->vb.queue, &vidq->active); } static void vbi_buffer_release(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct cx231xx_buffer *buf = container_of(vb, struct cx231xx_buffer, vb); free_buffer(vq, buf); } struct videobuf_queue_ops cx231xx_vbi_qops = { .buf_setup = vbi_buffer_setup, .buf_prepare = vbi_buffer_prepare, .buf_queue = vbi_buffer_queue, .buf_release = vbi_buffer_release, }; /* ------------------------------------------------------------------ URB control ------------------------------------------------------------------*/ /* * IRQ callback, called by URB callback */ static void cx231xx_irq_vbi_callback(struct urb *urb) { struct cx231xx_dmaqueue *dma_q = urb->context; struct cx231xx_video_mode *vmode = container_of(dma_q, struct cx231xx_video_mode, vidq); struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode); int rc; switch (urb->status) { case 0: /* success */ case -ETIMEDOUT: /* NAK */ break; case -ECONNRESET: /* kill */ case -ENOENT: case -ESHUTDOWN: return; default: /* error */ cx231xx_err(DRIVER_NAME "urb completition error %d.\n", urb->status); break; } /* Copy data from URB */ spin_lock(&dev->vbi_mode.slock); rc = dev->vbi_mode.bulk_ctl.bulk_copy(dev, urb); spin_unlock(&dev->vbi_mode.slock); /* Reset status */ urb->status = 0; urb->status = usb_submit_urb(urb, GFP_ATOMIC); if (urb->status) { cx231xx_err(DRIVER_NAME "urb resubmit failed (error=%i)\n", urb->status); } } /* * Stop and Deallocate URBs */ void cx231xx_uninit_vbi_isoc(struct cx231xx *dev) { struct urb *urb; int i; cx231xx_info(DRIVER_NAME "cx231xx: called cx231xx_uninit_vbi_isoc\n"); dev->vbi_mode.bulk_ctl.nfields = -1; for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) { urb = dev->vbi_mode.bulk_ctl.urb[i]; if (urb) { if (!irqs_disabled()) usb_kill_urb(urb); else usb_unlink_urb(urb); if (dev->vbi_mode.bulk_ctl.transfer_buffer[i]) { kfree(dev->vbi_mode.bulk_ctl. transfer_buffer[i]); dev->vbi_mode.bulk_ctl.transfer_buffer[i] = NULL; } usb_free_urb(urb); dev->vbi_mode.bulk_ctl.urb[i] = NULL; } dev->vbi_mode.bulk_ctl.transfer_buffer[i] = NULL; } kfree(dev->vbi_mode.bulk_ctl.urb); kfree(dev->vbi_mode.bulk_ctl.transfer_buffer); dev->vbi_mode.bulk_ctl.urb = NULL; dev->vbi_mode.bulk_ctl.transfer_buffer = NULL; dev->vbi_mode.bulk_ctl.num_bufs = 0; cx231xx_capture_start(dev, 0, Vbi); } EXPORT_SYMBOL_GPL(cx231xx_uninit_vbi_isoc); /* * Allocate URBs and start IRQ */ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets, int num_bufs, int max_pkt_size, int (*bulk_copy) (struct cx231xx *dev, struct urb *urb)) { struct cx231xx_dmaqueue *dma_q = &dev->vbi_mode.vidq; int i; int sb_size, pipe; struct urb *urb; int rc; cx231xx_info(DRIVER_NAME "cx231xx: called cx231xx_prepare_isoc\n"); /* De-allocates all pending stuff */ cx231xx_uninit_vbi_isoc(dev); /* clear if any halt */ usb_clear_halt(dev->udev, usb_rcvbulkpipe(dev->udev, dev->vbi_mode.end_point_addr)); dev->vbi_mode.bulk_ctl.bulk_copy = bulk_copy; dev->vbi_mode.bulk_ctl.num_bufs = num_bufs; dma_q->pos = 0; dma_q->is_partial_line = 0; dma_q->last_sav = 0; dma_q->current_field = -1; dma_q->bytes_left_in_line = dev->width << 1; dma_q->lines_per_field = ((dev->norm & V4L2_STD_625_50) ? PAL_VBI_LINES : NTSC_VBI_LINES); dma_q->lines_completed = 0; for (i = 0; i < 8; i++) dma_q->partial_buf[i] = 0; dev->vbi_mode.bulk_ctl.urb = kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL); if (!dev->vbi_mode.bulk_ctl.urb) { cx231xx_errdev("cannot alloc memory for usb buffers\n"); return -ENOMEM; } dev->vbi_mode.bulk_ctl.transfer_buffer = kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL); if (!dev->vbi_mode.bulk_ctl.transfer_buffer) { cx231xx_errdev("cannot allocate memory for usbtransfer\n"); kfree(dev->vbi_mode.bulk_ctl.urb); return -ENOMEM; } dev->vbi_mode.bulk_ctl.max_pkt_size = max_pkt_size; dev->vbi_mode.bulk_ctl.buf = NULL; sb_size = max_packets * dev->vbi_mode.bulk_ctl.max_pkt_size; /* allocate urbs and transfer buffers */ for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) { urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { cx231xx_err(DRIVER_NAME ": cannot alloc bulk_ctl.urb %i\n", i); cx231xx_uninit_vbi_isoc(dev); return -ENOMEM; } dev->vbi_mode.bulk_ctl.urb[i] = urb; urb->transfer_flags = 0; dev->vbi_mode.bulk_ctl.transfer_buffer[i] = kzalloc(sb_size, GFP_KERNEL); if (!dev->vbi_mode.bulk_ctl.transfer_buffer[i]) { cx231xx_err(DRIVER_NAME ": unable to allocate %i bytes for transfer" " buffer %i%s\n", sb_size, i, in_interrupt() ? " while in int" : ""); cx231xx_uninit_vbi_isoc(dev); return -ENOMEM; } pipe = usb_rcvbulkpipe(dev->udev, dev->vbi_mode.end_point_addr); usb_fill_bulk_urb(urb, dev->udev, pipe, dev->vbi_mode.bulk_ctl.transfer_buffer[i], sb_size, cx231xx_irq_vbi_callback, dma_q); } init_waitqueue_head(&dma_q->wq); /* submit urbs and enables IRQ */ for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) { rc = usb_submit_urb(dev->vbi_mode.bulk_ctl.urb[i], GFP_ATOMIC); if (rc) { cx231xx_err(DRIVER_NAME ": submit of urb %i failed (error=%i)\n", i, rc); cx231xx_uninit_vbi_isoc(dev); return rc; } } cx231xx_capture_start(dev, 1, Vbi); return 0; } EXPORT_SYMBOL_GPL(cx231xx_init_vbi_isoc); u32 cx231xx_get_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, u8 sav_eav, u8 *p_buffer, u32 buffer_size) { u32 bytes_copied = 0; int current_field = -1; switch (sav_eav) { case SAV_VBI_FIELD1: current_field = 1; break; case SAV_VBI_FIELD2: current_field = 2; break; default: break; } if (current_field < 0) return bytes_copied; dma_q->last_sav = sav_eav; bytes_copied = cx231xx_copy_vbi_line(dev, dma_q, p_buffer, buffer_size, current_field); return bytes_copied; } /* * Announces that a buffer were filled and request the next */ static inline void vbi_buffer_filled(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, struct cx231xx_buffer *buf) { /* Advice that buffer was filled */ /* cx231xx_info(DRIVER_NAME "[%p/%d] wakeup\n", buf, buf->vb.i); */ buf->vb.state = VIDEOBUF_DONE; buf->vb.field_count++; do_gettimeofday(&buf->vb.ts); dev->vbi_mode.bulk_ctl.buf = NULL; list_del(&buf->vb.queue); wake_up(&buf->vb.done); } u32 cx231xx_copy_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, u8 *p_line, u32 length, int field_number) { u32 bytes_to_copy; struct cx231xx_buffer *buf; u32 _line_size = dev->width * 2; if (dma_q->current_field == -1) { /* Just starting up */ cx231xx_reset_vbi_buffer(dev, dma_q); } if (dma_q->current_field != field_number) dma_q->lines_completed = 0; /* get the buffer pointer */ buf = dev->vbi_mode.bulk_ctl.buf; /* Remember the field number for next time */ dma_q->current_field = field_number; bytes_to_copy = dma_q->bytes_left_in_line; if (bytes_to_copy > length) bytes_to_copy = length; if (dma_q->lines_completed >= dma_q->lines_per_field) { dma_q->bytes_left_in_line -= bytes_to_copy; dma_q->is_partial_line = (dma_q->bytes_left_in_line == 0) ? 0 : 1; return 0; } dma_q->is_partial_line = 1; /* If we don't have a buffer, just return the number of bytes we would have copied if we had a buffer. */ if (!buf) { dma_q->bytes_left_in_line -= bytes_to_copy; dma_q->is_partial_line = (dma_q->bytes_left_in_line == 0) ? 0 : 1; return bytes_to_copy; } /* copy the data to video buffer */ cx231xx_do_vbi_copy(dev, dma_q, p_line, bytes_to_copy); dma_q->pos += bytes_to_copy; dma_q->bytes_left_in_line -= bytes_to_copy; if (dma_q->bytes_left_in_line == 0) { dma_q->bytes_left_in_line = _line_size; dma_q->lines_completed++; dma_q->is_partial_line = 0; if (cx231xx_is_vbi_buffer_done(dev, dma_q) && buf) { vbi_buffer_filled(dev, dma_q, buf); dma_q->pos = 0; dma_q->lines_completed = 0; cx231xx_reset_vbi_buffer(dev, dma_q); } } return bytes_to_copy; } /* * video-buf generic routine to get the next available buffer */ static inline void get_next_vbi_buf(struct cx231xx_dmaqueue *dma_q, struct cx231xx_buffer **buf) { struct cx231xx_video_mode *vmode = container_of(dma_q, struct cx231xx_video_mode, vidq); struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode); char *outp; if (list_empty(&dma_q->active)) { cx231xx_err(DRIVER_NAME ": No active queue to serve\n"); dev->vbi_mode.bulk_ctl.buf = NULL; *buf = NULL; return; } /* Get the next buffer */ *buf = list_entry(dma_q->active.next, struct cx231xx_buffer, vb.queue); /* Cleans up buffer - Useful for testing for frame/URB loss */ outp = videobuf_to_vmalloc(&(*buf)->vb); memset(outp, 0, (*buf)->vb.size); dev->vbi_mode.bulk_ctl.buf = *buf; return; } void cx231xx_reset_vbi_buffer(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q) { struct cx231xx_buffer *buf; buf = dev->vbi_mode.bulk_ctl.buf; if (buf == NULL) { /* first try to get the buffer */ get_next_vbi_buf(dma_q, &buf); dma_q->pos = 0; dma_q->current_field = -1; } dma_q->bytes_left_in_line = dev->width << 1; dma_q->lines_completed = 0; } int cx231xx_do_vbi_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, u8 *p_buffer, u32 bytes_to_copy) { u8 *p_out_buffer = NULL; u32 current_line_bytes_copied = 0; struct cx231xx_buffer *buf; u32 _line_size = dev->width << 1; void *startwrite; int offset, lencopy; buf = dev->vbi_mode.bulk_ctl.buf; if (buf == NULL) return -EINVAL; p_out_buffer = videobuf_to_vmalloc(&buf->vb); if (dma_q->bytes_left_in_line != _line_size) { current_line_bytes_copied = _line_size - dma_q->bytes_left_in_line; } offset = (dma_q->lines_completed * _line_size) + current_line_bytes_copied; if (dma_q->current_field == 2) { /* Populate the second half of the frame */ offset += (dev->width * 2 * dma_q->lines_per_field); } /* prepare destination address */ startwrite = p_out_buffer + offset; lencopy = dma_q->bytes_left_in_line > bytes_to_copy ? bytes_to_copy : dma_q->bytes_left_in_line; memcpy(startwrite, p_buffer, lencopy); return 0; } u8 cx231xx_is_vbi_buffer_done(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q) { u32 height = 0; height = ((dev->norm & V4L2_STD_625_50) ? PAL_VBI_LINES : NTSC_VBI_LINES); if (dma_q->lines_completed == height && dma_q->current_field == 2) return 1; else return 0; }
gpl-2.0
Neves4/DatKernel
net/mac80211/led.c
3097
8095
/* * Copyright 2006, Johannes Berg <johannes@sipsolutions.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* just for IFNAMSIZ */ #include <linux/if.h> #include <linux/slab.h> #include "led.h" void ieee80211_led_rx(struct ieee80211_local *local) { if (unlikely(!local->rx_led)) return; if (local->rx_led_counter++ % 2 == 0) led_trigger_event(local->rx_led, LED_OFF); else led_trigger_event(local->rx_led, LED_FULL); } /* q is 1 if a packet was enqueued, 0 if it has been transmitted */ void ieee80211_led_tx(struct ieee80211_local *local, int q) { if (unlikely(!local->tx_led)) return; /* not sure how this is supposed to work ... */ local->tx_led_counter += 2*q-1; if (local->tx_led_counter % 2 == 0) led_trigger_event(local->tx_led, LED_OFF); else led_trigger_event(local->tx_led, LED_FULL); } void ieee80211_led_assoc(struct ieee80211_local *local, bool associated) { if (unlikely(!local->assoc_led)) return; if (associated) led_trigger_event(local->assoc_led, LED_FULL); else led_trigger_event(local->assoc_led, LED_OFF); } void ieee80211_led_radio(struct ieee80211_local *local, bool enabled) { if (unlikely(!local->radio_led)) return; if (enabled) led_trigger_event(local->radio_led, LED_FULL); else led_trigger_event(local->radio_led, LED_OFF); } void ieee80211_led_names(struct ieee80211_local *local) { snprintf(local->rx_led_name, sizeof(local->rx_led_name), "%srx", wiphy_name(local->hw.wiphy)); snprintf(local->tx_led_name, sizeof(local->tx_led_name), "%stx", wiphy_name(local->hw.wiphy)); snprintf(local->assoc_led_name, sizeof(local->assoc_led_name), "%sassoc", wiphy_name(local->hw.wiphy)); snprintf(local->radio_led_name, sizeof(local->radio_led_name), "%sradio", wiphy_name(local->hw.wiphy)); } void ieee80211_led_init(struct ieee80211_local *local) { local->rx_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL); if (local->rx_led) { local->rx_led->name = local->rx_led_name; if (led_trigger_register(local->rx_led)) { kfree(local->rx_led); local->rx_led = NULL; } } local->tx_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL); if (local->tx_led) { local->tx_led->name = local->tx_led_name; if (led_trigger_register(local->tx_led)) { kfree(local->tx_led); local->tx_led = NULL; } } local->assoc_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL); if (local->assoc_led) { local->assoc_led->name = local->assoc_led_name; if (led_trigger_register(local->assoc_led)) { kfree(local->assoc_led); local->assoc_led = NULL; } } local->radio_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL); if (local->radio_led) { local->radio_led->name = local->radio_led_name; if (led_trigger_register(local->radio_led)) { kfree(local->radio_led); local->radio_led = NULL; } } if (local->tpt_led_trigger) { if (led_trigger_register(&local->tpt_led_trigger->trig)) { kfree(local->tpt_led_trigger); local->tpt_led_trigger = NULL; } } } void ieee80211_led_exit(struct ieee80211_local *local) { if (local->radio_led) { led_trigger_unregister(local->radio_led); kfree(local->radio_led); } if (local->assoc_led) { led_trigger_unregister(local->assoc_led); kfree(local->assoc_led); } if (local->tx_led) { led_trigger_unregister(local->tx_led); kfree(local->tx_led); } if (local->rx_led) { led_trigger_unregister(local->rx_led); kfree(local->rx_led); } if (local->tpt_led_trigger) { led_trigger_unregister(&local->tpt_led_trigger->trig); kfree(local->tpt_led_trigger); } } char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); return local->radio_led_name; } EXPORT_SYMBOL(__ieee80211_get_radio_led_name); char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); return local->assoc_led_name; } EXPORT_SYMBOL(__ieee80211_get_assoc_led_name); char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); return local->tx_led_name; } EXPORT_SYMBOL(__ieee80211_get_tx_led_name); char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); return local->rx_led_name; } EXPORT_SYMBOL(__ieee80211_get_rx_led_name); static unsigned long tpt_trig_traffic(struct ieee80211_local *local, struct tpt_led_trigger *tpt_trig) { unsigned long traffic, delta; traffic = tpt_trig->tx_bytes + tpt_trig->rx_bytes; delta = traffic - tpt_trig->prev_traffic; tpt_trig->prev_traffic = traffic; return DIV_ROUND_UP(delta, 1024 / 8); } static void tpt_trig_timer(unsigned long data) { struct ieee80211_local *local = (void *)data; struct tpt_led_trigger *tpt_trig = local->tpt_led_trigger; struct led_classdev *led_cdev; unsigned long on, off, tpt; int i; if (!tpt_trig->running) return; mod_timer(&tpt_trig->timer, round_jiffies(jiffies + HZ)); tpt = tpt_trig_traffic(local, tpt_trig); /* default to just solid on */ on = 1; off = 0; for (i = tpt_trig->blink_table_len - 1; i >= 0; i--) { if (tpt_trig->blink_table[i].throughput < 0 || tpt > tpt_trig->blink_table[i].throughput) { off = tpt_trig->blink_table[i].blink_time / 2; on = tpt_trig->blink_table[i].blink_time - off; break; } } read_lock(&tpt_trig->trig.leddev_list_lock); list_for_each_entry(led_cdev, &tpt_trig->trig.led_cdevs, trig_list) led_blink_set(led_cdev, &on, &off); read_unlock(&tpt_trig->trig.leddev_list_lock); } char *__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw, unsigned int flags, const struct ieee80211_tpt_blink *blink_table, unsigned int blink_table_len) { struct ieee80211_local *local = hw_to_local(hw); struct tpt_led_trigger *tpt_trig; if (WARN_ON(local->tpt_led_trigger)) return NULL; tpt_trig = kzalloc(sizeof(struct tpt_led_trigger), GFP_KERNEL); if (!tpt_trig) return NULL; snprintf(tpt_trig->name, sizeof(tpt_trig->name), "%stpt", wiphy_name(local->hw.wiphy)); tpt_trig->trig.name = tpt_trig->name; tpt_trig->blink_table = blink_table; tpt_trig->blink_table_len = blink_table_len; tpt_trig->want = flags; setup_timer(&tpt_trig->timer, tpt_trig_timer, (unsigned long)local); local->tpt_led_trigger = tpt_trig; return tpt_trig->name; } EXPORT_SYMBOL(__ieee80211_create_tpt_led_trigger); static void ieee80211_start_tpt_led_trig(struct ieee80211_local *local) { struct tpt_led_trigger *tpt_trig = local->tpt_led_trigger; if (tpt_trig->running) return; /* reset traffic */ tpt_trig_traffic(local, tpt_trig); tpt_trig->running = true; tpt_trig_timer((unsigned long)local); mod_timer(&tpt_trig->timer, round_jiffies(jiffies + HZ)); } static void ieee80211_stop_tpt_led_trig(struct ieee80211_local *local) { struct tpt_led_trigger *tpt_trig = local->tpt_led_trigger; struct led_classdev *led_cdev; if (!tpt_trig->running) return; tpt_trig->running = false; del_timer_sync(&tpt_trig->timer); read_lock(&tpt_trig->trig.leddev_list_lock); list_for_each_entry(led_cdev, &tpt_trig->trig.led_cdevs, trig_list) led_brightness_set(led_cdev, LED_OFF); read_unlock(&tpt_trig->trig.leddev_list_lock); } void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local, unsigned int types_on, unsigned int types_off) { struct tpt_led_trigger *tpt_trig = local->tpt_led_trigger; bool allowed; WARN_ON(types_on & types_off); if (!tpt_trig) return; tpt_trig->active &= ~types_off; tpt_trig->active |= types_on; /* * Regardless of wanted state, we shouldn't blink when * the radio is disabled -- this can happen due to some * code ordering issues with __ieee80211_recalc_idle() * being called before the radio is started. */ allowed = tpt_trig->active & IEEE80211_TPT_LEDTRIG_FL_RADIO; if (!allowed || !(tpt_trig->active & tpt_trig->want)) ieee80211_stop_tpt_led_trig(local); else ieee80211_start_tpt_led_trig(local); }
gpl-2.0
spezi77/android_kernel_htcbravo-3.0
arch/mips/sgi-ip32/ip32-reset.c
4633
5047
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 Keith M Wesolowski * Copyright (C) 2001 Paul Mundt * Copyright (C) 2003 Guido Guenther <agx@sigxcpu.org> */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/notifier.h> #include <linux/delay.h> #include <linux/ds17287rtc.h> #include <linux/interrupt.h> #include <linux/pm.h> #include <asm/addrspace.h> #include <asm/irq.h> #include <asm/reboot.h> #include <asm/system.h> #include <asm/wbflush.h> #include <asm/ip32/mace.h> #include <asm/ip32/crime.h> #include <asm/ip32/ip32_ints.h> #define POWERDOWN_TIMEOUT 120 /* * Blink frequency during reboot grace period and when panicked. */ #define POWERDOWN_FREQ (HZ / 4) #define PANIC_FREQ (HZ / 8) static struct timer_list power_timer, blink_timer, debounce_timer; static int has_panicked, shuting_down; static void ip32_machine_restart(char *command) __attribute__((noreturn)); static void ip32_machine_halt(void) __attribute__((noreturn)); static void ip32_machine_power_off(void) __attribute__((noreturn)); static void ip32_machine_restart(char *cmd) { crime->control = CRIME_CONTROL_HARD_RESET; while (1); } static inline void ip32_machine_halt(void) { ip32_machine_power_off(); } static void ip32_machine_power_off(void) { unsigned char reg_a, xctrl_a, xctrl_b; disable_irq(MACEISA_RTC_IRQ); reg_a = CMOS_READ(RTC_REG_A); /* setup for kickstart & wake-up (DS12287 Ref. Man. p. 19) */ reg_a &= ~DS_REGA_DV2; reg_a |= DS_REGA_DV1; CMOS_WRITE(reg_a | DS_REGA_DV0, RTC_REG_A); wbflush(); xctrl_b = CMOS_READ(DS_B1_XCTRL4B) | DS_XCTRL4B_ABE | DS_XCTRL4B_KFE; CMOS_WRITE(xctrl_b, DS_B1_XCTRL4B); xctrl_a = CMOS_READ(DS_B1_XCTRL4A) & ~DS_XCTRL4A_IFS; CMOS_WRITE(xctrl_a, DS_B1_XCTRL4A); wbflush(); /* adios amigos... */ CMOS_WRITE(xctrl_a | DS_XCTRL4A_PAB, DS_B1_XCTRL4A); CMOS_WRITE(reg_a, RTC_REG_A); wbflush(); while (1); } static void power_timeout(unsigned long data) { ip32_machine_power_off(); } static void blink_timeout(unsigned long data) { unsigned long led = mace->perif.ctrl.misc ^ MACEISA_LED_RED; mace->perif.ctrl.misc = led; mod_timer(&blink_timer, jiffies + data); } static void debounce(unsigned long data) { unsigned char reg_a, reg_c, xctrl_a; reg_c = CMOS_READ(RTC_INTR_FLAGS); reg_a = CMOS_READ(RTC_REG_A); CMOS_WRITE(reg_a | DS_REGA_DV0, RTC_REG_A); wbflush(); xctrl_a = CMOS_READ(DS_B1_XCTRL4A); if ((xctrl_a & DS_XCTRL4A_IFS) || (reg_c & RTC_IRQF )) { /* Interrupt still being sent. */ debounce_timer.expires = jiffies + 50; add_timer(&debounce_timer); /* clear interrupt source */ CMOS_WRITE(xctrl_a & ~DS_XCTRL4A_IFS, DS_B1_XCTRL4A); CMOS_WRITE(reg_a & ~DS_REGA_DV0, RTC_REG_A); return; } CMOS_WRITE(reg_a & ~DS_REGA_DV0, RTC_REG_A); if (has_panicked) ip32_machine_restart(NULL); enable_irq(MACEISA_RTC_IRQ); } static inline void ip32_power_button(void) { if (has_panicked) return; if (shuting_down || kill_cad_pid(SIGINT, 1)) { /* No init process or button pressed twice. */ ip32_machine_power_off(); } shuting_down = 1; blink_timer.data = POWERDOWN_FREQ; blink_timeout(POWERDOWN_FREQ); init_timer(&power_timer); power_timer.function = power_timeout; power_timer.expires = jiffies + POWERDOWN_TIMEOUT * HZ; add_timer(&power_timer); } static irqreturn_t ip32_rtc_int(int irq, void *dev_id) { unsigned char reg_c; reg_c = CMOS_READ(RTC_INTR_FLAGS); if (!(reg_c & RTC_IRQF)) { printk(KERN_WARNING "%s: RTC IRQ without RTC_IRQF\n", __func__); } /* Wait until interrupt goes away */ disable_irq_nosync(MACEISA_RTC_IRQ); init_timer(&debounce_timer); debounce_timer.function = debounce; debounce_timer.expires = jiffies + 50; add_timer(&debounce_timer); printk(KERN_DEBUG "Power button pressed\n"); ip32_power_button(); return IRQ_HANDLED; } static int panic_event(struct notifier_block *this, unsigned long event, void *ptr) { unsigned long led; if (has_panicked) return NOTIFY_DONE; has_panicked = 1; /* turn off the green LED */ led = mace->perif.ctrl.misc | MACEISA_LED_GREEN; mace->perif.ctrl.misc = led; blink_timer.data = PANIC_FREQ; blink_timeout(PANIC_FREQ); return NOTIFY_DONE; } static struct notifier_block panic_block = { .notifier_call = panic_event, }; static __init int ip32_reboot_setup(void) { /* turn on the green led only */ unsigned long led = mace->perif.ctrl.misc; led |= MACEISA_LED_RED; led &= ~MACEISA_LED_GREEN; mace->perif.ctrl.misc = led; _machine_restart = ip32_machine_restart; _machine_halt = ip32_machine_halt; pm_power_off = ip32_machine_power_off; init_timer(&blink_timer); blink_timer.function = blink_timeout; atomic_notifier_chain_register(&panic_notifier_list, &panic_block); if (request_irq(MACEISA_RTC_IRQ, ip32_rtc_int, 0, "rtc", NULL)) panic("Can't allocate MACEISA RTC IRQ"); return 0; } subsys_initcall(ip32_reboot_setup);
gpl-2.0
yukchou/linux-sunxi-1
arch/arm/mach-omap2/board-4430sdp.c
4633
23897
/* * Board support file for OMAP4430 SDP. * * Copyright (C) 2009 Texas Instruments * * Author: Santosh Shilimkar <santosh.shilimkar@ti.com> * * Based on mach-omap2/board-3430sdp.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/usb/otg.h> #include <linux/spi/spi.h> #include <linux/i2c/twl.h> #include <linux/mfd/twl6040.h> #include <linux/gpio_keys.h> #include <linux/regulator/machine.h> #include <linux/regulator/fixed.h> #include <linux/leds.h> #include <linux/leds_pwm.h> #include <linux/platform_data/omap4-keypad.h> #include <mach/hardware.h> #include <asm/hardware/gic.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <plat/board.h> #include "common.h" #include <plat/usb.h> #include <plat/mmc.h> #include <plat/omap4-keypad.h> #include <video/omapdss.h> #include <video/omap-panel-nokia-dsi.h> #include <video/omap-panel-picodlp.h> #include <linux/wl12xx.h> #include <linux/platform_data/omap-abe-twl6040.h> #include "mux.h" #include "hsmmc.h" #include "control.h" #include "common-board-devices.h" #define ETH_KS8851_IRQ 34 #define ETH_KS8851_POWER_ON 48 #define ETH_KS8851_QUART 138 #define OMAP4_SFH7741_SENSOR_OUTPUT_GPIO 184 #define OMAP4_SFH7741_ENABLE_GPIO 188 #define HDMI_GPIO_CT_CP_HPD 60 /* HPD mode enable/disable */ #define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */ #define HDMI_GPIO_HPD 63 /* Hotplug detect */ #define DISPLAY_SEL_GPIO 59 /* LCD2/PicoDLP switch */ #define DLP_POWER_ON_GPIO 40 #define GPIO_WIFI_PMENA 54 #define GPIO_WIFI_IRQ 53 static const int sdp4430_keymap[] = { KEY(0, 0, KEY_E), KEY(0, 1, KEY_R), KEY(0, 2, KEY_T), KEY(0, 3, KEY_HOME), KEY(0, 4, KEY_F5), KEY(0, 5, KEY_UNKNOWN), KEY(0, 6, KEY_I), KEY(0, 7, KEY_LEFTSHIFT), KEY(1, 0, KEY_D), KEY(1, 1, KEY_F), KEY(1, 2, KEY_G), KEY(1, 3, KEY_SEND), KEY(1, 4, KEY_F6), KEY(1, 5, KEY_UNKNOWN), KEY(1, 6, KEY_K), KEY(1, 7, KEY_ENTER), KEY(2, 0, KEY_X), KEY(2, 1, KEY_C), KEY(2, 2, KEY_V), KEY(2, 3, KEY_END), KEY(2, 4, KEY_F7), KEY(2, 5, KEY_UNKNOWN), KEY(2, 6, KEY_DOT), KEY(2, 7, KEY_CAPSLOCK), KEY(3, 0, KEY_Z), KEY(3, 1, KEY_KPPLUS), KEY(3, 2, KEY_B), KEY(3, 3, KEY_F1), KEY(3, 4, KEY_F8), KEY(3, 5, KEY_UNKNOWN), KEY(3, 6, KEY_O), KEY(3, 7, KEY_SPACE), KEY(4, 0, KEY_W), KEY(4, 1, KEY_Y), KEY(4, 2, KEY_U), KEY(4, 3, KEY_F2), KEY(4, 4, KEY_VOLUMEUP), KEY(4, 5, KEY_UNKNOWN), KEY(4, 6, KEY_L), KEY(4, 7, KEY_LEFT), KEY(5, 0, KEY_S), KEY(5, 1, KEY_H), KEY(5, 2, KEY_J), KEY(5, 3, KEY_F3), KEY(5, 4, KEY_F9), KEY(5, 5, KEY_VOLUMEDOWN), KEY(5, 6, KEY_M), KEY(5, 7, KEY_RIGHT), KEY(6, 0, KEY_Q), KEY(6, 1, KEY_A), KEY(6, 2, KEY_N), KEY(6, 3, KEY_BACK), KEY(6, 4, KEY_BACKSPACE), KEY(6, 5, KEY_UNKNOWN), KEY(6, 6, KEY_P), KEY(6, 7, KEY_UP), KEY(7, 0, KEY_PROG1), KEY(7, 1, KEY_PROG2), KEY(7, 2, KEY_PROG3), KEY(7, 3, KEY_PROG4), KEY(7, 4, KEY_F4), KEY(7, 5, KEY_UNKNOWN), KEY(7, 6, KEY_OK), KEY(7, 7, KEY_DOWN), }; static struct omap_device_pad keypad_pads[] = { { .name = "kpd_col1.kpd_col1", .enable = OMAP_WAKEUP_EN | OMAP_MUX_MODE1, }, { .name = "kpd_col1.kpd_col1", .enable = OMAP_WAKEUP_EN | OMAP_MUX_MODE1, }, { .name = "kpd_col2.kpd_col2", .enable = OMAP_WAKEUP_EN | OMAP_MUX_MODE1, }, { .name = "kpd_col3.kpd_col3", .enable = OMAP_WAKEUP_EN | OMAP_MUX_MODE1, }, { .name = "kpd_col4.kpd_col4", .enable = OMAP_WAKEUP_EN | OMAP_MUX_MODE1, }, { .name = "kpd_col5.kpd_col5", .enable = OMAP_WAKEUP_EN | OMAP_MUX_MODE1, }, { .name = "gpmc_a23.kpd_col7", .enable = OMAP_WAKEUP_EN | OMAP_MUX_MODE1, }, { .name = "gpmc_a22.kpd_col6", .enable = OMAP_WAKEUP_EN | OMAP_MUX_MODE1, }, { .name = "kpd_row0.kpd_row0", .enable = OMAP_PULL_ENA | OMAP_PULL_UP | OMAP_WAKEUP_EN | OMAP_MUX_MODE1 | OMAP_INPUT_EN, }, { .name = "kpd_row1.kpd_row1", .enable = OMAP_PULL_ENA | OMAP_PULL_UP | OMAP_WAKEUP_EN | OMAP_MUX_MODE1 | OMAP_INPUT_EN, }, { .name = "kpd_row2.kpd_row2", .enable = OMAP_PULL_ENA | OMAP_PULL_UP | OMAP_WAKEUP_EN | OMAP_MUX_MODE1 | OMAP_INPUT_EN, }, { .name = "kpd_row3.kpd_row3", .enable = OMAP_PULL_ENA | OMAP_PULL_UP | OMAP_WAKEUP_EN | OMAP_MUX_MODE1 | OMAP_INPUT_EN, }, { .name = "kpd_row4.kpd_row4", .enable = OMAP_PULL_ENA | OMAP_PULL_UP | OMAP_WAKEUP_EN | OMAP_MUX_MODE1 | OMAP_INPUT_EN, }, { .name = "kpd_row5.kpd_row5", .enable = OMAP_PULL_ENA | OMAP_PULL_UP | OMAP_WAKEUP_EN | OMAP_MUX_MODE1 | OMAP_INPUT_EN, }, { .name = "gpmc_a18.kpd_row6", .enable = OMAP_PULL_ENA | OMAP_PULL_UP | OMAP_WAKEUP_EN | OMAP_MUX_MODE1 | OMAP_INPUT_EN, }, { .name = "gpmc_a19.kpd_row7", .enable = OMAP_PULL_ENA | OMAP_PULL_UP | OMAP_WAKEUP_EN | OMAP_MUX_MODE1 | OMAP_INPUT_EN, }, }; static struct matrix_keymap_data sdp4430_keymap_data = { .keymap = sdp4430_keymap, .keymap_size = ARRAY_SIZE(sdp4430_keymap), }; static struct omap4_keypad_platform_data sdp4430_keypad_data = { .keymap_data = &sdp4430_keymap_data, .rows = 8, .cols = 8, }; static struct omap_board_data keypad_data = { .id = 1, .pads = keypad_pads, .pads_cnt = ARRAY_SIZE(keypad_pads), }; static struct gpio_led sdp4430_gpio_leds[] = { { .name = "omap4:green:debug0", .gpio = 61, }, { .name = "omap4:green:debug1", .gpio = 30, }, { .name = "omap4:green:debug2", .gpio = 7, }, { .name = "omap4:green:debug3", .gpio = 8, }, { .name = "omap4:green:debug4", .gpio = 50, }, { .name = "omap4:blue:user", .gpio = 169, }, { .name = "omap4:red:user", .gpio = 170, }, { .name = "omap4:green:user", .gpio = 139, }, }; static struct gpio_keys_button sdp4430_gpio_keys[] = { { .desc = "Proximity Sensor", .type = EV_SW, .code = SW_FRONT_PROXIMITY, .gpio = OMAP4_SFH7741_SENSOR_OUTPUT_GPIO, .active_low = 0, } }; static struct gpio_led_platform_data sdp4430_led_data = { .leds = sdp4430_gpio_leds, .num_leds = ARRAY_SIZE(sdp4430_gpio_leds), }; static struct led_pwm sdp4430_pwm_leds[] = { { .name = "omap4:green:chrg", .pwm_id = 1, .max_brightness = 255, .pwm_period_ns = 7812500, }, }; static struct led_pwm_platform_data sdp4430_pwm_data = { .num_leds = ARRAY_SIZE(sdp4430_pwm_leds), .leds = sdp4430_pwm_leds, }; static struct platform_device sdp4430_leds_pwm = { .name = "leds_pwm", .id = -1, .dev = { .platform_data = &sdp4430_pwm_data, }, }; static int omap_prox_activate(struct device *dev) { gpio_set_value(OMAP4_SFH7741_ENABLE_GPIO , 1); return 0; } static void omap_prox_deactivate(struct device *dev) { gpio_set_value(OMAP4_SFH7741_ENABLE_GPIO , 0); } static struct gpio_keys_platform_data sdp4430_gpio_keys_data = { .buttons = sdp4430_gpio_keys, .nbuttons = ARRAY_SIZE(sdp4430_gpio_keys), .enable = omap_prox_activate, .disable = omap_prox_deactivate, }; static struct platform_device sdp4430_gpio_keys_device = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &sdp4430_gpio_keys_data, }, }; static struct platform_device sdp4430_leds_gpio = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &sdp4430_led_data, }, }; static struct spi_board_info sdp4430_spi_board_info[] __initdata = { { .modalias = "ks8851", .bus_num = 1, .chip_select = 0, .max_speed_hz = 24000000, /* * .irq is set to gpio_to_irq(ETH_KS8851_IRQ) * in omap_4430sdp_init */ }, }; static struct gpio sdp4430_eth_gpios[] __initdata = { { ETH_KS8851_POWER_ON, GPIOF_OUT_INIT_HIGH, "eth_power" }, { ETH_KS8851_QUART, GPIOF_OUT_INIT_HIGH, "quart" }, { ETH_KS8851_IRQ, GPIOF_IN, "eth_irq" }, }; static int __init omap_ethernet_init(void) { int status; /* Request of GPIO lines */ status = gpio_request_array(sdp4430_eth_gpios, ARRAY_SIZE(sdp4430_eth_gpios)); if (status) pr_err("Cannot request ETH GPIOs\n"); return status; } static struct regulator_consumer_supply sdp4430_vbat_supply[] = { REGULATOR_SUPPLY("vddvibl", "twl6040-vibra"), REGULATOR_SUPPLY("vddvibr", "twl6040-vibra"), }; static struct regulator_init_data sdp4430_vbat_data = { .constraints = { .always_on = 1, }, .num_consumer_supplies = ARRAY_SIZE(sdp4430_vbat_supply), .consumer_supplies = sdp4430_vbat_supply, }; static struct fixed_voltage_config sdp4430_vbat_pdata = { .supply_name = "VBAT", .microvolts = 3750000, .init_data = &sdp4430_vbat_data, .gpio = -EINVAL, }; static struct platform_device sdp4430_vbat = { .name = "reg-fixed-voltage", .id = -1, .dev = { .platform_data = &sdp4430_vbat_pdata, }, }; static struct platform_device sdp4430_dmic_codec = { .name = "dmic-codec", .id = -1, }; static struct omap_abe_twl6040_data sdp4430_abe_audio_data = { .card_name = "SDP4430", .has_hs = ABE_TWL6040_LEFT | ABE_TWL6040_RIGHT, .has_hf = ABE_TWL6040_LEFT | ABE_TWL6040_RIGHT, .has_ep = 1, .has_aux = ABE_TWL6040_LEFT | ABE_TWL6040_RIGHT, .has_vibra = ABE_TWL6040_LEFT | ABE_TWL6040_RIGHT, .has_dmic = 1, .has_hsmic = 1, .has_mainmic = 1, .has_submic = 1, .has_afm = ABE_TWL6040_LEFT | ABE_TWL6040_RIGHT, .jack_detection = 1, /* MCLK input is 38.4MHz */ .mclk_freq = 38400000, }; static struct platform_device sdp4430_abe_audio = { .name = "omap-abe-twl6040", .id = -1, .dev = { .platform_data = &sdp4430_abe_audio_data, }, }; static struct platform_device *sdp4430_devices[] __initdata = { &sdp4430_gpio_keys_device, &sdp4430_leds_gpio, &sdp4430_leds_pwm, &sdp4430_vbat, &sdp4430_dmic_codec, &sdp4430_abe_audio, }; static struct omap_musb_board_data musb_board_data = { .interface_type = MUSB_INTERFACE_UTMI, .mode = MUSB_OTG, .power = 100, }; static struct omap2_hsmmc_info mmc[] = { { .mmc = 2, .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA, .gpio_cd = -EINVAL, .gpio_wp = -EINVAL, .nonremovable = true, .ocr_mask = MMC_VDD_29_30, .no_off_init = true, }, { .mmc = 1, .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA, .gpio_cd = -EINVAL, .gpio_wp = -EINVAL, }, { .mmc = 5, .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD, .pm_caps = MMC_PM_KEEP_POWER, .gpio_cd = -EINVAL, .gpio_wp = -EINVAL, .ocr_mask = MMC_VDD_165_195, .nonremovable = true, }, {} /* Terminator */ }; static struct regulator_consumer_supply sdp4430_vaux_supply[] = { REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"), }; static struct regulator_consumer_supply omap4_sdp4430_vmmc5_supply = { .supply = "vmmc", .dev_name = "omap_hsmmc.4", }; static struct regulator_init_data sdp4430_vmmc5 = { .constraints = { .valid_ops_mask = REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = 1, .consumer_supplies = &omap4_sdp4430_vmmc5_supply, }; static struct fixed_voltage_config sdp4430_vwlan = { .supply_name = "vwl1271", .microvolts = 1800000, /* 1.8V */ .gpio = GPIO_WIFI_PMENA, .startup_delay = 70000, /* 70msec */ .enable_high = 1, .enabled_at_boot = 0, .init_data = &sdp4430_vmmc5, }; static struct platform_device omap_vwlan_device = { .name = "reg-fixed-voltage", .id = 1, .dev = { .platform_data = &sdp4430_vwlan, }, }; static int omap4_twl6030_hsmmc_late_init(struct device *dev) { int irq = 0; struct platform_device *pdev = container_of(dev, struct platform_device, dev); struct omap_mmc_platform_data *pdata = dev->platform_data; /* Setting MMC1 Card detect Irq */ if (pdev->id == 0) { irq = twl6030_mmc_card_detect_config(); if (irq < 0) { pr_err("Failed configuring MMC1 card detect\n"); return irq; } pdata->slots[0].card_detect_irq = irq; pdata->slots[0].card_detect = twl6030_mmc_card_detect; } return 0; } static __init void omap4_twl6030_hsmmc_set_late_init(struct device *dev) { struct omap_mmc_platform_data *pdata; /* dev can be null if CONFIG_MMC_OMAP_HS is not set */ if (!dev) { pr_err("Failed %s\n", __func__); return; } pdata = dev->platform_data; pdata->init = omap4_twl6030_hsmmc_late_init; } static int __init omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers) { struct omap2_hsmmc_info *c; omap_hsmmc_init(controllers); for (c = controllers; c->mmc; c++) omap4_twl6030_hsmmc_set_late_init(&c->pdev->dev); return 0; } static struct regulator_init_data sdp4430_vaux1 = { .constraints = { .min_uV = 1000000, .max_uV = 3000000, .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(sdp4430_vaux_supply), .consumer_supplies = sdp4430_vaux_supply, }; static struct regulator_init_data sdp4430_vusim = { .constraints = { .min_uV = 1200000, .max_uV = 2900000, .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, }; static struct twl6040_codec_data twl6040_codec = { /* single-step ramp for headset and handsfree */ .hs_left_step = 0x0f, .hs_right_step = 0x0f, .hf_left_step = 0x1d, .hf_right_step = 0x1d, }; static struct twl6040_vibra_data twl6040_vibra = { .vibldrv_res = 8, .vibrdrv_res = 3, .viblmotor_res = 10, .vibrmotor_res = 10, .vddvibl_uV = 0, /* fixed volt supply - VBAT */ .vddvibr_uV = 0, /* fixed volt supply - VBAT */ }; static struct twl6040_platform_data twl6040_data = { .codec = &twl6040_codec, .vibra = &twl6040_vibra, .audpwron_gpio = 127, .irq_base = TWL6040_CODEC_IRQ_BASE, }; static struct twl4030_platform_data sdp4430_twldata = { /* Regulators */ .vusim = &sdp4430_vusim, .vaux1 = &sdp4430_vaux1, }; static struct i2c_board_info __initdata sdp4430_i2c_3_boardinfo[] = { { I2C_BOARD_INFO("tmp105", 0x48), }, { I2C_BOARD_INFO("bh1780", 0x29), }, }; static struct i2c_board_info __initdata sdp4430_i2c_4_boardinfo[] = { { I2C_BOARD_INFO("hmc5843", 0x1e), }, }; static int __init omap4_i2c_init(void) { omap4_pmic_get_config(&sdp4430_twldata, TWL_COMMON_PDATA_USB, TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VAUX2 | TWL_COMMON_REGULATOR_VAUX3 | TWL_COMMON_REGULATOR_VMMC | TWL_COMMON_REGULATOR_VPP | TWL_COMMON_REGULATOR_VANA | TWL_COMMON_REGULATOR_VCXIO | TWL_COMMON_REGULATOR_VUSB | TWL_COMMON_REGULATOR_CLK32KG); omap4_pmic_init("twl6030", &sdp4430_twldata, &twl6040_data, OMAP44XX_IRQ_SYS_2N); omap_register_i2c_bus(2, 400, NULL, 0); omap_register_i2c_bus(3, 400, sdp4430_i2c_3_boardinfo, ARRAY_SIZE(sdp4430_i2c_3_boardinfo)); omap_register_i2c_bus(4, 400, sdp4430_i2c_4_boardinfo, ARRAY_SIZE(sdp4430_i2c_4_boardinfo)); return 0; } static void __init omap_sfh7741prox_init(void) { int error; error = gpio_request_one(OMAP4_SFH7741_ENABLE_GPIO, GPIOF_OUT_INIT_LOW, "sfh7741"); if (error < 0) pr_err("%s:failed to request GPIO %d, error %d\n", __func__, OMAP4_SFH7741_ENABLE_GPIO, error); } static struct gpio sdp4430_hdmi_gpios[] = { { HDMI_GPIO_CT_CP_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ct_cp_hpd" }, { HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ls_oe" }, { HDMI_GPIO_HPD, GPIOF_DIR_IN, "hdmi_gpio_hpd" }, }; static int sdp4430_panel_enable_hdmi(struct omap_dss_device *dssdev) { int status; status = gpio_request_array(sdp4430_hdmi_gpios, ARRAY_SIZE(sdp4430_hdmi_gpios)); if (status) pr_err("%s: Cannot request HDMI GPIOs\n", __func__); return status; } static void sdp4430_panel_disable_hdmi(struct omap_dss_device *dssdev) { gpio_free_array(sdp4430_hdmi_gpios, ARRAY_SIZE(sdp4430_hdmi_gpios)); } static struct nokia_dsi_panel_data dsi1_panel = { .name = "taal", .reset_gpio = 102, .use_ext_te = false, .ext_te_gpio = 101, .esd_interval = 0, }; static struct omap_dss_device sdp4430_lcd_device = { .name = "lcd", .driver_name = "taal", .type = OMAP_DISPLAY_TYPE_DSI, .data = &dsi1_panel, .phy.dsi = { .clk_lane = 1, .clk_pol = 0, .data1_lane = 2, .data1_pol = 0, .data2_lane = 3, .data2_pol = 0, .module = 0, }, .clocks = { .dispc = { .channel = { /* Logic Clock = 172.8 MHz */ .lck_div = 1, /* Pixel Clock = 34.56 MHz */ .pck_div = 5, .lcd_clk_src = OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, }, .dispc_fclk_src = OMAP_DSS_CLK_SRC_FCK, }, .dsi = { .regn = 16, /* Fint = 2.4 MHz */ .regm = 180, /* DDR Clock = 216 MHz */ .regm_dispc = 5, /* PLL1_CLK1 = 172.8 MHz */ .regm_dsi = 5, /* PLL1_CLK2 = 172.8 MHz */ .lp_clk_div = 10, /* LP Clock = 8.64 MHz */ .dsi_fclk_src = OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, }, }, .channel = OMAP_DSS_CHANNEL_LCD, }; static struct nokia_dsi_panel_data dsi2_panel = { .name = "taal", .reset_gpio = 104, .use_ext_te = false, .ext_te_gpio = 103, .esd_interval = 0, }; static struct omap_dss_device sdp4430_lcd2_device = { .name = "lcd2", .driver_name = "taal", .type = OMAP_DISPLAY_TYPE_DSI, .data = &dsi2_panel, .phy.dsi = { .clk_lane = 1, .clk_pol = 0, .data1_lane = 2, .data1_pol = 0, .data2_lane = 3, .data2_pol = 0, .module = 1, }, .clocks = { .dispc = { .channel = { /* Logic Clock = 172.8 MHz */ .lck_div = 1, /* Pixel Clock = 34.56 MHz */ .pck_div = 5, .lcd_clk_src = OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC, }, .dispc_fclk_src = OMAP_DSS_CLK_SRC_FCK, }, .dsi = { .regn = 16, /* Fint = 2.4 MHz */ .regm = 180, /* DDR Clock = 216 MHz */ .regm_dispc = 5, /* PLL1_CLK1 = 172.8 MHz */ .regm_dsi = 5, /* PLL1_CLK2 = 172.8 MHz */ .lp_clk_div = 10, /* LP Clock = 8.64 MHz */ .dsi_fclk_src = OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, }, }, .channel = OMAP_DSS_CHANNEL_LCD2, }; static void sdp4430_lcd_init(void) { int r; r = gpio_request_one(dsi1_panel.reset_gpio, GPIOF_DIR_OUT, "lcd1_reset_gpio"); if (r) pr_err("%s: Could not get lcd1_reset_gpio\n", __func__); r = gpio_request_one(dsi2_panel.reset_gpio, GPIOF_DIR_OUT, "lcd2_reset_gpio"); if (r) pr_err("%s: Could not get lcd2_reset_gpio\n", __func__); } static struct omap_dss_hdmi_data sdp4430_hdmi_data = { .hpd_gpio = HDMI_GPIO_HPD, }; static struct omap_dss_device sdp4430_hdmi_device = { .name = "hdmi", .driver_name = "hdmi_panel", .type = OMAP_DISPLAY_TYPE_HDMI, .platform_enable = sdp4430_panel_enable_hdmi, .platform_disable = sdp4430_panel_disable_hdmi, .channel = OMAP_DSS_CHANNEL_DIGIT, .data = &sdp4430_hdmi_data, }; static struct picodlp_panel_data sdp4430_picodlp_pdata = { .picodlp_adapter_id = 2, .emu_done_gpio = 44, .pwrgood_gpio = 45, }; static void sdp4430_picodlp_init(void) { int r; const struct gpio picodlp_gpios[] = { {DLP_POWER_ON_GPIO, GPIOF_OUT_INIT_LOW, "DLP POWER ON"}, {sdp4430_picodlp_pdata.emu_done_gpio, GPIOF_IN, "DLP EMU DONE"}, {sdp4430_picodlp_pdata.pwrgood_gpio, GPIOF_OUT_INIT_LOW, "DLP PWRGOOD"}, }; r = gpio_request_array(picodlp_gpios, ARRAY_SIZE(picodlp_gpios)); if (r) pr_err("Cannot request PicoDLP GPIOs, error %d\n", r); } static int sdp4430_panel_enable_picodlp(struct omap_dss_device *dssdev) { gpio_set_value(DISPLAY_SEL_GPIO, 0); gpio_set_value(DLP_POWER_ON_GPIO, 1); return 0; } static void sdp4430_panel_disable_picodlp(struct omap_dss_device *dssdev) { gpio_set_value(DLP_POWER_ON_GPIO, 0); gpio_set_value(DISPLAY_SEL_GPIO, 1); } static struct omap_dss_device sdp4430_picodlp_device = { .name = "picodlp", .driver_name = "picodlp_panel", .type = OMAP_DISPLAY_TYPE_DPI, .phy.dpi.data_lines = 24, .channel = OMAP_DSS_CHANNEL_LCD2, .platform_enable = sdp4430_panel_enable_picodlp, .platform_disable = sdp4430_panel_disable_picodlp, .data = &sdp4430_picodlp_pdata, }; static struct omap_dss_device *sdp4430_dss_devices[] = { &sdp4430_lcd_device, &sdp4430_lcd2_device, &sdp4430_hdmi_device, &sdp4430_picodlp_device, }; static struct omap_dss_board_info sdp4430_dss_data = { .num_devices = ARRAY_SIZE(sdp4430_dss_devices), .devices = sdp4430_dss_devices, .default_device = &sdp4430_lcd_device, }; static void __init omap_4430sdp_display_init(void) { int r; /* Enable LCD2 by default (instead of Pico DLP) */ r = gpio_request_one(DISPLAY_SEL_GPIO, GPIOF_OUT_INIT_HIGH, "display_sel"); if (r) pr_err("%s: Could not get display_sel GPIO\n", __func__); sdp4430_lcd_init(); sdp4430_picodlp_init(); omap_display_init(&sdp4430_dss_data); /* * OMAP4460SDP/Blaze and OMAP4430 ES2.3 SDP/Blaze boards and * later have external pull up on the HDMI I2C lines */ if (cpu_is_omap446x() || omap_rev() > OMAP4430_REV_ES2_2) omap_hdmi_init(OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP); else omap_hdmi_init(0); omap_mux_init_gpio(HDMI_GPIO_LS_OE, OMAP_PIN_OUTPUT); omap_mux_init_gpio(HDMI_GPIO_CT_CP_HPD, OMAP_PIN_OUTPUT); omap_mux_init_gpio(HDMI_GPIO_HPD, OMAP_PIN_INPUT_PULLDOWN); } #ifdef CONFIG_OMAP_MUX static struct omap_board_mux board_mux[] __initdata = { OMAP4_MUX(USBB2_ULPITLL_CLK, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT), { .reg_offset = OMAP_MUX_TERMINATOR }, }; #else #define board_mux NULL #endif static void __init omap4_sdp4430_wifi_mux_init(void) { omap_mux_init_gpio(GPIO_WIFI_IRQ, OMAP_PIN_INPUT | OMAP_PIN_OFF_WAKEUPENABLE); omap_mux_init_gpio(GPIO_WIFI_PMENA, OMAP_PIN_OUTPUT); omap_mux_init_signal("sdmmc5_cmd.sdmmc5_cmd", OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP); omap_mux_init_signal("sdmmc5_clk.sdmmc5_clk", OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP); omap_mux_init_signal("sdmmc5_dat0.sdmmc5_dat0", OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP); omap_mux_init_signal("sdmmc5_dat1.sdmmc5_dat1", OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP); omap_mux_init_signal("sdmmc5_dat2.sdmmc5_dat2", OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP); omap_mux_init_signal("sdmmc5_dat3.sdmmc5_dat3", OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP); } static struct wl12xx_platform_data omap4_sdp4430_wlan_data __initdata = { .board_ref_clock = WL12XX_REFCLOCK_26, .board_tcxo_clock = WL12XX_TCXOCLOCK_26, }; static void __init omap4_sdp4430_wifi_init(void) { int ret; omap4_sdp4430_wifi_mux_init(); omap4_sdp4430_wlan_data.irq = gpio_to_irq(GPIO_WIFI_IRQ); ret = wl12xx_set_platform_data(&omap4_sdp4430_wlan_data); if (ret) pr_err("Error setting wl12xx data: %d\n", ret); ret = platform_device_register(&omap_vwlan_device); if (ret) pr_err("Error registering wl12xx device: %d\n", ret); } static void __init omap_4430sdp_init(void) { int status; int package = OMAP_PACKAGE_CBS; if (omap_rev() == OMAP4430_REV_ES1_0) package = OMAP_PACKAGE_CBL; omap4_mux_init(board_mux, NULL, package); omap4_i2c_init(); omap_sfh7741prox_init(); platform_add_devices(sdp4430_devices, ARRAY_SIZE(sdp4430_devices)); omap_serial_init(); omap_sdrc_init(NULL, NULL); omap4_sdp4430_wifi_init(); omap4_twl6030_hsmmc_init(mmc); usb_musb_init(&musb_board_data); status = omap_ethernet_init(); if (status) { pr_err("Ethernet initialization failed: %d\n", status); } else { sdp4430_spi_board_info[0].irq = gpio_to_irq(ETH_KS8851_IRQ); spi_register_board_info(sdp4430_spi_board_info, ARRAY_SIZE(sdp4430_spi_board_info)); } status = omap4_keyboard_init(&sdp4430_keypad_data, &keypad_data); if (status) pr_err("Keypad initialization failed: %d\n", status); omap_4430sdp_display_init(); } MACHINE_START(OMAP_4430SDP, "OMAP4430 4430SDP board") /* Maintainer: Santosh Shilimkar - Texas Instruments Inc */ .atag_offset = 0x100, .reserve = omap_reserve, .map_io = omap4_map_io, .init_early = omap4430_init_early, .init_irq = gic_init_irq, .handle_irq = gic_handle_irq, .init_machine = omap_4430sdp_init, .timer = &omap4_timer, .restart = omap_prcm_restart, MACHINE_END
gpl-2.0
hiikezoe/android_kernel_nec_n06e
drivers/media/video/davinci/vpif_capture.c
4889
63335
/* * Copyright (C) 2009 Texas Instruments Inc * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * TODO : add support for VBI & HBI data service * add static buffer allocation */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/string.h> #include <linux/videodev2.h> #include <linux/wait.h> #include <linux/time.h> #include <linux/i2c.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/slab.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-chip-ident.h> #include "vpif_capture.h" #include "vpif.h" MODULE_DESCRIPTION("TI DaVinci VPIF Capture driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(VPIF_CAPTURE_VERSION); #define vpif_err(fmt, arg...) v4l2_err(&vpif_obj.v4l2_dev, fmt, ## arg) #define vpif_dbg(level, debug, fmt, arg...) \ v4l2_dbg(level, debug, &vpif_obj.v4l2_dev, fmt, ## arg) static int debug = 1; static u32 ch0_numbuffers = 3; static u32 ch1_numbuffers = 3; static u32 ch0_bufsize = 1920 * 1080 * 2; static u32 ch1_bufsize = 720 * 576 * 2; module_param(debug, int, 0644); module_param(ch0_numbuffers, uint, S_IRUGO); module_param(ch1_numbuffers, uint, S_IRUGO); module_param(ch0_bufsize, uint, S_IRUGO); module_param(ch1_bufsize, uint, S_IRUGO); MODULE_PARM_DESC(debug, "Debug level 0-1"); MODULE_PARM_DESC(ch2_numbuffers, "Channel0 buffer count (default:3)"); MODULE_PARM_DESC(ch3_numbuffers, "Channel1 buffer count (default:3)"); MODULE_PARM_DESC(ch2_bufsize, "Channel0 buffer size (default:1920 x 1080 x 2)"); MODULE_PARM_DESC(ch3_bufsize, "Channel1 buffer size (default:720 x 576 x 2)"); static struct vpif_config_params config_params = { .min_numbuffers = 3, .numbuffers[0] = 3, .numbuffers[1] = 3, .min_bufsize[0] = 720 * 480 * 2, .min_bufsize[1] = 720 * 480 * 2, .channel_bufsize[0] = 1920 * 1080 * 2, .channel_bufsize[1] = 720 * 576 * 2, }; /* global variables */ static struct vpif_device vpif_obj = { {NULL} }; static struct device *vpif_dev; /** * vpif_uservirt_to_phys : translate user/virtual address to phy address * @virtp: user/virtual address * * This inline function is used to convert user space virtual address to * physical address. */ static inline u32 vpif_uservirt_to_phys(u32 virtp) { unsigned long physp = 0; struct mm_struct *mm = current->mm; struct vm_area_struct *vma; vma = find_vma(mm, virtp); /* For kernel direct-mapped memory, take the easy way */ if (virtp >= PAGE_OFFSET) physp = virt_to_phys((void *)virtp); else if (vma && (vma->vm_flags & VM_IO) && (vma->vm_pgoff)) /** * this will catch, kernel-allocated, mmaped-to-usermode * addresses */ physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start); else { /* otherwise, use get_user_pages() for general userland pages */ int res, nr_pages = 1; struct page *pages; down_read(&current->mm->mmap_sem); res = get_user_pages(current, current->mm, virtp, nr_pages, 1, 0, &pages, NULL); up_read(&current->mm->mmap_sem); if (res == nr_pages) physp = __pa(page_address(&pages[0]) + (virtp & ~PAGE_MASK)); else { vpif_err("get_user_pages failed\n"); return 0; } } return physp; } /** * buffer_prepare : callback function for buffer prepare * @q : buffer queue ptr * @vb: ptr to video buffer * @field: field info * * This is the callback function for buffer prepare when videobuf_qbuf() * function is called. The buffer is prepared and user space virtual address * or user address is converted into physical address */ static int vpif_buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb, enum v4l2_field field) { /* Get the file handle object and channel object */ struct vpif_fh *fh = q->priv_data; struct channel_obj *ch = fh->channel; struct common_obj *common; unsigned long addr; vpif_dbg(2, debug, "vpif_buffer_prepare\n"); common = &ch->common[VPIF_VIDEO_INDEX]; /* If buffer is not initialized, initialize it */ if (VIDEOBUF_NEEDS_INIT == vb->state) { vb->width = common->width; vb->height = common->height; vb->size = vb->width * vb->height; vb->field = field; } vb->state = VIDEOBUF_PREPARED; /** * if user pointer memory mechanism is used, get the physical * address of the buffer */ if (V4L2_MEMORY_USERPTR == common->memory) { if (0 == vb->baddr) { vpif_dbg(1, debug, "buffer address is 0\n"); return -EINVAL; } vb->boff = vpif_uservirt_to_phys(vb->baddr); if (!IS_ALIGNED(vb->boff, 8)) goto exit; } addr = vb->boff; if (q->streaming) { if (!IS_ALIGNED((addr + common->ytop_off), 8) || !IS_ALIGNED((addr + common->ybtm_off), 8) || !IS_ALIGNED((addr + common->ctop_off), 8) || !IS_ALIGNED((addr + common->cbtm_off), 8)) goto exit; } return 0; exit: vpif_dbg(1, debug, "buffer_prepare:offset is not aligned to 8 bytes\n"); return -EINVAL; } /** * vpif_buffer_setup : Callback function for buffer setup. * @q: buffer queue ptr * @count: number of buffers * @size: size of the buffer * * This callback function is called when reqbuf() is called to adjust * the buffer count and buffer size */ static int vpif_buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size) { /* Get the file handle object and channel object */ struct vpif_fh *fh = q->priv_data; struct channel_obj *ch = fh->channel; struct common_obj *common; common = &ch->common[VPIF_VIDEO_INDEX]; vpif_dbg(2, debug, "vpif_buffer_setup\n"); /* If memory type is not mmap, return */ if (V4L2_MEMORY_MMAP != common->memory) return 0; /* Calculate the size of the buffer */ *size = config_params.channel_bufsize[ch->channel_id]; if (*count < config_params.min_numbuffers) *count = config_params.min_numbuffers; return 0; } /** * vpif_buffer_queue : Callback function to add buffer to DMA queue * @q: ptr to videobuf_queue * @vb: ptr to videobuf_buffer */ static void vpif_buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb) { /* Get the file handle object and channel object */ struct vpif_fh *fh = q->priv_data; struct channel_obj *ch = fh->channel; struct common_obj *common; common = &ch->common[VPIF_VIDEO_INDEX]; vpif_dbg(2, debug, "vpif_buffer_queue\n"); /* add the buffer to the DMA queue */ list_add_tail(&vb->queue, &common->dma_queue); /* Change state of the buffer */ vb->state = VIDEOBUF_QUEUED; } /** * vpif_buffer_release : Callback function to free buffer * @q: buffer queue ptr * @vb: ptr to video buffer * * This function is called from the videobuf layer to free memory * allocated to the buffers */ static void vpif_buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb) { /* Get the file handle object and channel object */ struct vpif_fh *fh = q->priv_data; struct channel_obj *ch = fh->channel; struct common_obj *common; common = &ch->common[VPIF_VIDEO_INDEX]; videobuf_dma_contig_free(q, vb); vb->state = VIDEOBUF_NEEDS_INIT; } static struct videobuf_queue_ops video_qops = { .buf_setup = vpif_buffer_setup, .buf_prepare = vpif_buffer_prepare, .buf_queue = vpif_buffer_queue, .buf_release = vpif_buffer_release, }; static u8 channel_first_int[VPIF_NUMBER_OF_OBJECTS][2] = { {1, 1} }; /** * vpif_process_buffer_complete: process a completed buffer * @common: ptr to common channel object * * This function time stamp the buffer and mark it as DONE. It also * wake up any process waiting on the QUEUE and set the next buffer * as current */ static void vpif_process_buffer_complete(struct common_obj *common) { do_gettimeofday(&common->cur_frm->ts); common->cur_frm->state = VIDEOBUF_DONE; wake_up_interruptible(&common->cur_frm->done); /* Make curFrm pointing to nextFrm */ common->cur_frm = common->next_frm; } /** * vpif_schedule_next_buffer: set next buffer address for capture * @common : ptr to common channel object * * This function will get next buffer from the dma queue and * set the buffer address in the vpif register for capture. * the buffer is marked active */ static void vpif_schedule_next_buffer(struct common_obj *common) { unsigned long addr = 0; common->next_frm = list_entry(common->dma_queue.next, struct videobuf_buffer, queue); /* Remove that buffer from the buffer queue */ list_del(&common->next_frm->queue); common->next_frm->state = VIDEOBUF_ACTIVE; if (V4L2_MEMORY_USERPTR == common->memory) addr = common->next_frm->boff; else addr = videobuf_to_dma_contig(common->next_frm); /* Set top and bottom field addresses in VPIF registers */ common->set_addr(addr + common->ytop_off, addr + common->ybtm_off, addr + common->ctop_off, addr + common->cbtm_off); } /** * vpif_channel_isr : ISR handler for vpif capture * @irq: irq number * @dev_id: dev_id ptr * * It changes status of the captured buffer, takes next buffer from the queue * and sets its address in VPIF registers */ static irqreturn_t vpif_channel_isr(int irq, void *dev_id) { struct vpif_device *dev = &vpif_obj; struct common_obj *common; struct channel_obj *ch; enum v4l2_field field; int channel_id = 0; int fid = -1, i; channel_id = *(int *)(dev_id); ch = dev->dev[channel_id]; field = ch->common[VPIF_VIDEO_INDEX].fmt.fmt.pix.field; for (i = 0; i < VPIF_NUMBER_OF_OBJECTS; i++) { common = &ch->common[i]; /* skip If streaming is not started in this channel */ if (0 == common->started) continue; /* Check the field format */ if (1 == ch->vpifparams.std_info.frm_fmt) { /* Progressive mode */ if (list_empty(&common->dma_queue)) continue; if (!channel_first_int[i][channel_id]) vpif_process_buffer_complete(common); channel_first_int[i][channel_id] = 0; vpif_schedule_next_buffer(common); channel_first_int[i][channel_id] = 0; } else { /** * Interlaced mode. If it is first interrupt, ignore * it */ if (channel_first_int[i][channel_id]) { channel_first_int[i][channel_id] = 0; continue; } if (0 == i) { ch->field_id ^= 1; /* Get field id from VPIF registers */ fid = vpif_channel_getfid(ch->channel_id); if (fid != ch->field_id) { /** * If field id does not match stored * field id, make them in sync */ if (0 == fid) ch->field_id = fid; return IRQ_HANDLED; } } /* device field id and local field id are in sync */ if (0 == fid) { /* this is even field */ if (common->cur_frm == common->next_frm) continue; /* mark the current buffer as done */ vpif_process_buffer_complete(common); } else if (1 == fid) { /* odd field */ if (list_empty(&common->dma_queue) || (common->cur_frm != common->next_frm)) continue; vpif_schedule_next_buffer(common); } } } return IRQ_HANDLED; } /** * vpif_update_std_info() - update standard related info * @ch: ptr to channel object * * For a given standard selected by application, update values * in the device data structures */ static int vpif_update_std_info(struct channel_obj *ch) { struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct vpif_params *vpifparams = &ch->vpifparams; const struct vpif_channel_config_params *config; struct vpif_channel_config_params *std_info = &vpifparams->std_info; struct video_obj *vid_ch = &ch->video; int index; vpif_dbg(2, debug, "vpif_update_std_info\n"); for (index = 0; index < vpif_ch_params_count; index++) { config = &ch_params[index]; if (config->hd_sd == 0) { vpif_dbg(2, debug, "SD format\n"); if (config->stdid & vid_ch->stdid) { memcpy(std_info, config, sizeof(*config)); break; } } else { vpif_dbg(2, debug, "HD format\n"); if (config->dv_preset == vid_ch->dv_preset) { memcpy(std_info, config, sizeof(*config)); break; } } } /* standard not found */ if (index == vpif_ch_params_count) return -EINVAL; common->fmt.fmt.pix.width = std_info->width; common->width = std_info->width; common->fmt.fmt.pix.height = std_info->height; common->height = std_info->height; common->fmt.fmt.pix.bytesperline = std_info->width; vpifparams->video_params.hpitch = std_info->width; vpifparams->video_params.storage_mode = std_info->frm_fmt; return 0; } /** * vpif_calculate_offsets : This function calculates buffers offsets * @ch : ptr to channel object * * This function calculates buffer offsets for Y and C in the top and * bottom field */ static void vpif_calculate_offsets(struct channel_obj *ch) { unsigned int hpitch, vpitch, sizeimage; struct video_obj *vid_ch = &(ch->video); struct vpif_params *vpifparams = &ch->vpifparams; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; enum v4l2_field field = common->fmt.fmt.pix.field; vpif_dbg(2, debug, "vpif_calculate_offsets\n"); if (V4L2_FIELD_ANY == field) { if (vpifparams->std_info.frm_fmt) vid_ch->buf_field = V4L2_FIELD_NONE; else vid_ch->buf_field = V4L2_FIELD_INTERLACED; } else vid_ch->buf_field = common->fmt.fmt.pix.field; if (V4L2_MEMORY_USERPTR == common->memory) sizeimage = common->fmt.fmt.pix.sizeimage; else sizeimage = config_params.channel_bufsize[ch->channel_id]; hpitch = common->fmt.fmt.pix.bytesperline; vpitch = sizeimage / (hpitch * 2); if ((V4L2_FIELD_NONE == vid_ch->buf_field) || (V4L2_FIELD_INTERLACED == vid_ch->buf_field)) { /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */ common->ytop_off = 0; common->ybtm_off = hpitch; common->ctop_off = sizeimage / 2; common->cbtm_off = sizeimage / 2 + hpitch; } else if (V4L2_FIELD_SEQ_TB == vid_ch->buf_field) { /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */ common->ytop_off = 0; common->ybtm_off = sizeimage / 4; common->ctop_off = sizeimage / 2; common->cbtm_off = common->ctop_off + sizeimage / 4; } else if (V4L2_FIELD_SEQ_BT == vid_ch->buf_field) { /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */ common->ybtm_off = 0; common->ytop_off = sizeimage / 4; common->cbtm_off = sizeimage / 2; common->ctop_off = common->cbtm_off + sizeimage / 4; } if ((V4L2_FIELD_NONE == vid_ch->buf_field) || (V4L2_FIELD_INTERLACED == vid_ch->buf_field)) vpifparams->video_params.storage_mode = 1; else vpifparams->video_params.storage_mode = 0; if (1 == vpifparams->std_info.frm_fmt) vpifparams->video_params.hpitch = common->fmt.fmt.pix.bytesperline; else { if ((field == V4L2_FIELD_ANY) || (field == V4L2_FIELD_INTERLACED)) vpifparams->video_params.hpitch = common->fmt.fmt.pix.bytesperline * 2; else vpifparams->video_params.hpitch = common->fmt.fmt.pix.bytesperline; } ch->vpifparams.video_params.stdid = vpifparams->std_info.stdid; } /** * vpif_config_format: configure default frame format in the device * ch : ptr to channel object */ static void vpif_config_format(struct channel_obj *ch) { struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_dbg(2, debug, "vpif_config_format\n"); common->fmt.fmt.pix.field = V4L2_FIELD_ANY; if (config_params.numbuffers[ch->channel_id] == 0) common->memory = V4L2_MEMORY_USERPTR; else common->memory = V4L2_MEMORY_MMAP; common->fmt.fmt.pix.sizeimage = config_params.channel_bufsize[ch->channel_id]; if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER) common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_SBGGR8; else common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUV422P; common->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; } /** * vpif_get_default_field() - Get default field type based on interface * @vpif_params - ptr to vpif params */ static inline enum v4l2_field vpif_get_default_field( struct vpif_interface *iface) { return (iface->if_type == VPIF_IF_RAW_BAYER) ? V4L2_FIELD_NONE : V4L2_FIELD_INTERLACED; } /** * vpif_check_format() - check given pixel format for compatibility * @ch - channel ptr * @pixfmt - Given pixel format * @update - update the values as per hardware requirement * * Check the application pixel format for S_FMT and update the input * values as per hardware limits for TRY_FMT. The default pixel and * field format is selected based on interface type. */ static int vpif_check_format(struct channel_obj *ch, struct v4l2_pix_format *pixfmt, int update) { struct common_obj *common = &(ch->common[VPIF_VIDEO_INDEX]); struct vpif_params *vpif_params = &ch->vpifparams; enum v4l2_field field = pixfmt->field; u32 sizeimage, hpitch, vpitch; int ret = -EINVAL; vpif_dbg(2, debug, "vpif_check_format\n"); /** * first check for the pixel format. If if_type is Raw bayer, * only V4L2_PIX_FMT_SBGGR8 format is supported. Otherwise only * V4L2_PIX_FMT_YUV422P is supported */ if (vpif_params->iface.if_type == VPIF_IF_RAW_BAYER) { if (pixfmt->pixelformat != V4L2_PIX_FMT_SBGGR8) { if (!update) { vpif_dbg(2, debug, "invalid pix format\n"); goto exit; } pixfmt->pixelformat = V4L2_PIX_FMT_SBGGR8; } } else { if (pixfmt->pixelformat != V4L2_PIX_FMT_YUV422P) { if (!update) { vpif_dbg(2, debug, "invalid pixel format\n"); goto exit; } pixfmt->pixelformat = V4L2_PIX_FMT_YUV422P; } } if (!(VPIF_VALID_FIELD(field))) { if (!update) { vpif_dbg(2, debug, "invalid field format\n"); goto exit; } /** * By default use FIELD_NONE for RAW Bayer capture * and FIELD_INTERLACED for other interfaces */ field = vpif_get_default_field(&vpif_params->iface); } else if (field == V4L2_FIELD_ANY) /* unsupported field. Use default */ field = vpif_get_default_field(&vpif_params->iface); /* validate the hpitch */ hpitch = pixfmt->bytesperline; if (hpitch < vpif_params->std_info.width) { if (!update) { vpif_dbg(2, debug, "invalid hpitch\n"); goto exit; } hpitch = vpif_params->std_info.width; } if (V4L2_MEMORY_USERPTR == common->memory) sizeimage = pixfmt->sizeimage; else sizeimage = config_params.channel_bufsize[ch->channel_id]; vpitch = sizeimage / (hpitch * 2); /* validate the vpitch */ if (vpitch < vpif_params->std_info.height) { if (!update) { vpif_dbg(2, debug, "Invalid vpitch\n"); goto exit; } vpitch = vpif_params->std_info.height; } /* Check for 8 byte alignment */ if (!ALIGN(hpitch, 8)) { if (!update) { vpif_dbg(2, debug, "invalid pitch alignment\n"); goto exit; } /* adjust to next 8 byte boundary */ hpitch = (((hpitch + 7) / 8) * 8); } /* if update is set, modify the bytesperline and sizeimage */ if (update) { pixfmt->bytesperline = hpitch; pixfmt->sizeimage = hpitch * vpitch * 2; } /** * Image width and height is always based on current standard width and * height */ pixfmt->width = common->fmt.fmt.pix.width; pixfmt->height = common->fmt.fmt.pix.height; return 0; exit: return ret; } /** * vpif_config_addr() - function to configure buffer address in vpif * @ch - channel ptr * @muxmode - channel mux mode */ static void vpif_config_addr(struct channel_obj *ch, int muxmode) { struct common_obj *common; vpif_dbg(2, debug, "vpif_config_addr\n"); common = &(ch->common[VPIF_VIDEO_INDEX]); if (VPIF_CHANNEL1_VIDEO == ch->channel_id) common->set_addr = ch1_set_videobuf_addr; else if (2 == muxmode) common->set_addr = ch0_set_videobuf_addr_yc_nmux; else common->set_addr = ch0_set_videobuf_addr; } /** * vpfe_mmap : It is used to map kernel space buffers into user spaces * @filep: file pointer * @vma: ptr to vm_area_struct */ static int vpif_mmap(struct file *filep, struct vm_area_struct *vma) { /* Get the channel object and file handle object */ struct vpif_fh *fh = filep->private_data; struct channel_obj *ch = fh->channel; struct common_obj *common = &(ch->common[VPIF_VIDEO_INDEX]); vpif_dbg(2, debug, "vpif_mmap\n"); return videobuf_mmap_mapper(&common->buffer_queue, vma); } /** * vpif_poll: It is used for select/poll system call * @filep: file pointer * @wait: poll table to wait */ static unsigned int vpif_poll(struct file *filep, poll_table * wait) { struct vpif_fh *fh = filep->private_data; struct channel_obj *channel = fh->channel; struct common_obj *common = &(channel->common[VPIF_VIDEO_INDEX]); vpif_dbg(2, debug, "vpif_poll\n"); if (common->started) return videobuf_poll_stream(filep, &common->buffer_queue, wait); return 0; } /** * vpif_open : vpif open handler * @filep: file ptr * * It creates object of file handle structure and stores it in private_data * member of filepointer */ static int vpif_open(struct file *filep) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(filep); struct common_obj *common; struct video_obj *vid_ch; struct channel_obj *ch; struct vpif_fh *fh; int i; vpif_dbg(2, debug, "vpif_open\n"); ch = video_get_drvdata(vdev); vid_ch = &ch->video; common = &ch->common[VPIF_VIDEO_INDEX]; if (NULL == ch->curr_subdev_info) { /** * search through the sub device to see a registered * sub device and make it as current sub device */ for (i = 0; i < config->subdev_count; i++) { if (vpif_obj.sd[i]) { /* the sub device is registered */ ch->curr_subdev_info = &config->subdev_info[i]; /* make first input as the current input */ vid_ch->input_idx = 0; break; } } if (i == config->subdev_count) { vpif_err("No sub device registered\n"); return -ENOENT; } } /* Allocate memory for the file handle object */ fh = kzalloc(sizeof(struct vpif_fh), GFP_KERNEL); if (NULL == fh) { vpif_err("unable to allocate memory for file handle object\n"); return -ENOMEM; } /* store pointer to fh in private_data member of filep */ filep->private_data = fh; fh->channel = ch; fh->initialized = 0; /* If decoder is not initialized. initialize it */ if (!ch->initialized) { fh->initialized = 1; ch->initialized = 1; memset(&(ch->vpifparams), 0, sizeof(struct vpif_params)); } /* Increment channel usrs counter */ ch->usrs++; /* Set io_allowed member to false */ fh->io_allowed[VPIF_VIDEO_INDEX] = 0; /* Initialize priority of this instance to default priority */ fh->prio = V4L2_PRIORITY_UNSET; v4l2_prio_open(&ch->prio, &fh->prio); return 0; } /** * vpif_release : function to clean up file close * @filep: file pointer * * This function deletes buffer queue, frees the buffers and the vpfe file * handle */ static int vpif_release(struct file *filep) { struct vpif_fh *fh = filep->private_data; struct channel_obj *ch = fh->channel; struct common_obj *common; vpif_dbg(2, debug, "vpif_release\n"); common = &ch->common[VPIF_VIDEO_INDEX]; /* if this instance is doing IO */ if (fh->io_allowed[VPIF_VIDEO_INDEX]) { /* Reset io_usrs member of channel object */ common->io_usrs = 0; /* Disable channel as per its device type and channel id */ if (VPIF_CHANNEL0_VIDEO == ch->channel_id) { enable_channel0(0); channel0_intr_enable(0); } if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) || (2 == common->started)) { enable_channel1(0); channel1_intr_enable(0); } common->started = 0; /* Free buffers allocated */ videobuf_queue_cancel(&common->buffer_queue); videobuf_mmap_free(&common->buffer_queue); } /* Decrement channel usrs counter */ ch->usrs--; /* Close the priority */ v4l2_prio_close(&ch->prio, fh->prio); if (fh->initialized) ch->initialized = 0; filep->private_data = NULL; kfree(fh); return 0; } /** * vpif_reqbufs() - request buffer handler * @file: file ptr * @priv: file handle * @reqbuf: request buffer structure ptr */ static int vpif_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *reqbuf) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common; u8 index = 0; vpif_dbg(2, debug, "vpif_reqbufs\n"); /** * This file handle has not initialized the channel, * It is not allowed to do settings */ if ((VPIF_CHANNEL0_VIDEO == ch->channel_id) || (VPIF_CHANNEL1_VIDEO == ch->channel_id)) { if (!fh->initialized) { vpif_dbg(1, debug, "Channel Busy\n"); return -EBUSY; } } if (V4L2_BUF_TYPE_VIDEO_CAPTURE != reqbuf->type) return -EINVAL; index = VPIF_VIDEO_INDEX; common = &ch->common[index]; if (0 != common->io_usrs) return -EBUSY; /* Initialize videobuf queue as per the buffer type */ videobuf_queue_dma_contig_init(&common->buffer_queue, &video_qops, NULL, &common->irqlock, reqbuf->type, common->fmt.fmt.pix.field, sizeof(struct videobuf_buffer), fh, &common->lock); /* Set io allowed member of file handle to TRUE */ fh->io_allowed[index] = 1; /* Increment io usrs member of channel object to 1 */ common->io_usrs = 1; /* Store type of memory requested in channel object */ common->memory = reqbuf->memory; INIT_LIST_HEAD(&common->dma_queue); /* Allocate buffers */ return videobuf_reqbufs(&common->buffer_queue, reqbuf); } /** * vpif_querybuf() - query buffer handler * @file: file ptr * @priv: file handle * @buf: v4l2 buffer structure ptr */ static int vpif_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_dbg(2, debug, "vpif_querybuf\n"); if (common->fmt.type != buf->type) return -EINVAL; if (common->memory != V4L2_MEMORY_MMAP) { vpif_dbg(1, debug, "Invalid memory\n"); return -EINVAL; } return videobuf_querybuf(&common->buffer_queue, buf); } /** * vpif_qbuf() - query buffer handler * @file: file ptr * @priv: file handle * @buf: v4l2 buffer structure ptr */ static int vpif_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct v4l2_buffer tbuf = *buf; struct videobuf_buffer *buf1; unsigned long addr = 0; unsigned long flags; int ret = 0; vpif_dbg(2, debug, "vpif_qbuf\n"); if (common->fmt.type != tbuf.type) { vpif_err("invalid buffer type\n"); return -EINVAL; } if (!fh->io_allowed[VPIF_VIDEO_INDEX]) { vpif_err("fh io not allowed \n"); return -EACCES; } if (!(list_empty(&common->dma_queue)) || (common->cur_frm != common->next_frm) || !common->started || (common->started && (0 == ch->field_id))) return videobuf_qbuf(&common->buffer_queue, buf); /* bufferqueue is empty store buffer address in VPIF registers */ mutex_lock(&common->buffer_queue.vb_lock); buf1 = common->buffer_queue.bufs[tbuf.index]; if ((buf1->state == VIDEOBUF_QUEUED) || (buf1->state == VIDEOBUF_ACTIVE)) { vpif_err("invalid state\n"); goto qbuf_exit; } switch (buf1->memory) { case V4L2_MEMORY_MMAP: if (buf1->baddr == 0) goto qbuf_exit; break; case V4L2_MEMORY_USERPTR: if (tbuf.length < buf1->bsize) goto qbuf_exit; if ((VIDEOBUF_NEEDS_INIT != buf1->state) && (buf1->baddr != tbuf.m.userptr)) { vpif_buffer_release(&common->buffer_queue, buf1); buf1->baddr = tbuf.m.userptr; } break; default: goto qbuf_exit; } local_irq_save(flags); ret = vpif_buffer_prepare(&common->buffer_queue, buf1, common->buffer_queue.field); if (ret < 0) { local_irq_restore(flags); goto qbuf_exit; } buf1->state = VIDEOBUF_ACTIVE; if (V4L2_MEMORY_USERPTR == common->memory) addr = buf1->boff; else addr = videobuf_to_dma_contig(buf1); common->next_frm = buf1; common->set_addr(addr + common->ytop_off, addr + common->ybtm_off, addr + common->ctop_off, addr + common->cbtm_off); local_irq_restore(flags); list_add_tail(&buf1->stream, &common->buffer_queue.stream); mutex_unlock(&common->buffer_queue.vb_lock); return 0; qbuf_exit: mutex_unlock(&common->buffer_queue.vb_lock); return -EINVAL; } /** * vpif_dqbuf() - query buffer handler * @file: file ptr * @priv: file handle * @buf: v4l2 buffer structure ptr */ static int vpif_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_dbg(2, debug, "vpif_dqbuf\n"); return videobuf_dqbuf(&common->buffer_queue, buf, file->f_flags & O_NONBLOCK); } /** * vpif_streamon() - streamon handler * @file: file ptr * @priv: file handle * @buftype: v4l2 buffer type */ static int vpif_streamon(struct file *file, void *priv, enum v4l2_buf_type buftype) { struct vpif_capture_config *config = vpif_dev->platform_data; struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct channel_obj *oth_ch = vpif_obj.dev[!ch->channel_id]; struct vpif_params *vpif; unsigned long addr = 0; int ret = 0; vpif_dbg(2, debug, "vpif_streamon\n"); vpif = &ch->vpifparams; if (buftype != V4L2_BUF_TYPE_VIDEO_CAPTURE) { vpif_dbg(1, debug, "buffer type not supported\n"); return -EINVAL; } /* If file handle is not allowed IO, return error */ if (!fh->io_allowed[VPIF_VIDEO_INDEX]) { vpif_dbg(1, debug, "io not allowed\n"); return -EACCES; } /* If Streaming is already started, return error */ if (common->started) { vpif_dbg(1, debug, "channel->started\n"); return -EBUSY; } if ((ch->channel_id == VPIF_CHANNEL0_VIDEO && oth_ch->common[VPIF_VIDEO_INDEX].started && vpif->std_info.ycmux_mode == 0) || ((ch->channel_id == VPIF_CHANNEL1_VIDEO) && (2 == oth_ch->common[VPIF_VIDEO_INDEX].started))) { vpif_dbg(1, debug, "other channel is being used\n"); return -EBUSY; } ret = vpif_check_format(ch, &common->fmt.fmt.pix, 0); if (ret) return ret; /* Enable streamon on the sub device */ ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video, s_stream, 1); if (ret && (ret != -ENOIOCTLCMD)) { vpif_dbg(1, debug, "stream on failed in subdev\n"); return ret; } /* Call videobuf_streamon to start streaming in videobuf */ ret = videobuf_streamon(&common->buffer_queue); if (ret) { vpif_dbg(1, debug, "videobuf_streamon\n"); return ret; } /* If buffer queue is empty, return error */ if (list_empty(&common->dma_queue)) { vpif_dbg(1, debug, "buffer queue is empty\n"); ret = -EIO; goto exit; } /* Get the next frame from the buffer queue */ common->cur_frm = list_entry(common->dma_queue.next, struct videobuf_buffer, queue); common->next_frm = common->cur_frm; /* Remove buffer from the buffer queue */ list_del(&common->cur_frm->queue); /* Mark state of the current frame to active */ common->cur_frm->state = VIDEOBUF_ACTIVE; /* Initialize field_id and started member */ ch->field_id = 0; common->started = 1; if (V4L2_MEMORY_USERPTR == common->memory) addr = common->cur_frm->boff; else addr = videobuf_to_dma_contig(common->cur_frm); /* Calculate the offset for Y and C data in the buffer */ vpif_calculate_offsets(ch); if ((vpif->std_info.frm_fmt && ((common->fmt.fmt.pix.field != V4L2_FIELD_NONE) && (common->fmt.fmt.pix.field != V4L2_FIELD_ANY))) || (!vpif->std_info.frm_fmt && (common->fmt.fmt.pix.field == V4L2_FIELD_NONE))) { vpif_dbg(1, debug, "conflict in field format and std format\n"); ret = -EINVAL; goto exit; } /* configure 1 or 2 channel mode */ ret = config->setup_input_channel_mode(vpif->std_info.ycmux_mode); if (ret < 0) { vpif_dbg(1, debug, "can't set vpif channel mode\n"); goto exit; } /* Call vpif_set_params function to set the parameters and addresses */ ret = vpif_set_video_params(vpif, ch->channel_id); if (ret < 0) { vpif_dbg(1, debug, "can't set video params\n"); goto exit; } common->started = ret; vpif_config_addr(ch, ret); common->set_addr(addr + common->ytop_off, addr + common->ybtm_off, addr + common->ctop_off, addr + common->cbtm_off); /** * Set interrupt for both the fields in VPIF Register enable channel in * VPIF register */ if ((VPIF_CHANNEL0_VIDEO == ch->channel_id)) { channel0_intr_assert(); channel0_intr_enable(1); enable_channel0(1); } if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) || (common->started == 2)) { channel1_intr_assert(); channel1_intr_enable(1); enable_channel1(1); } channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1; return ret; exit: videobuf_streamoff(&common->buffer_queue); return ret; } /** * vpif_streamoff() - streamoff handler * @file: file ptr * @priv: file handle * @buftype: v4l2 buffer type */ static int vpif_streamoff(struct file *file, void *priv, enum v4l2_buf_type buftype) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; int ret; vpif_dbg(2, debug, "vpif_streamoff\n"); if (buftype != V4L2_BUF_TYPE_VIDEO_CAPTURE) { vpif_dbg(1, debug, "buffer type not supported\n"); return -EINVAL; } /* If io is allowed for this file handle, return error */ if (!fh->io_allowed[VPIF_VIDEO_INDEX]) { vpif_dbg(1, debug, "io not allowed\n"); return -EACCES; } /* If streaming is not started, return error */ if (!common->started) { vpif_dbg(1, debug, "channel->started\n"); return -EINVAL; } /* disable channel */ if (VPIF_CHANNEL0_VIDEO == ch->channel_id) { enable_channel0(0); channel0_intr_enable(0); } else { enable_channel1(0); channel1_intr_enable(0); } common->started = 0; ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video, s_stream, 0); if (ret && (ret != -ENOIOCTLCMD)) vpif_dbg(1, debug, "stream off failed in subdev\n"); return videobuf_streamoff(&common->buffer_queue); } /** * vpif_map_sub_device_to_input() - Maps sub device to input * @ch - ptr to channel * @config - ptr to capture configuration * @input_index - Given input index from application * @sub_device_index - index into sd table * * lookup the sub device information for a given input index. * we report all the inputs to application. inputs table also * has sub device name for the each input */ static struct vpif_subdev_info *vpif_map_sub_device_to_input( struct channel_obj *ch, struct vpif_capture_config *vpif_cfg, int input_index, int *sub_device_index) { struct vpif_capture_chan_config *chan_cfg; struct vpif_subdev_info *subdev_info = NULL; const char *subdev_name = NULL; int i; vpif_dbg(2, debug, "vpif_map_sub_device_to_input\n"); chan_cfg = &vpif_cfg->chan_config[ch->channel_id]; /** * search through the inputs to find the sub device supporting * the input */ for (i = 0; i < chan_cfg->input_count; i++) { /* For each sub device, loop through input */ if (i == input_index) { subdev_name = chan_cfg->inputs[i].subdev_name; break; } } /* if reached maximum. return null */ if (i == chan_cfg->input_count || (NULL == subdev_name)) return subdev_info; /* loop through the sub device list to get the sub device info */ for (i = 0; i < vpif_cfg->subdev_count; i++) { subdev_info = &vpif_cfg->subdev_info[i]; if (!strcmp(subdev_info->name, subdev_name)) break; } if (i == vpif_cfg->subdev_count) return subdev_info; /* check if the sub device is registered */ if (NULL == vpif_obj.sd[i]) return NULL; *sub_device_index = i; return subdev_info; } /** * vpif_querystd() - querystd handler * @file: file ptr * @priv: file handle * @std_id: ptr to std id * * This function is called to detect standard at the selected input */ static int vpif_querystd(struct file *file, void *priv, v4l2_std_id *std_id) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; int ret = 0; vpif_dbg(2, debug, "vpif_querystd\n"); /* Call querystd function of decoder device */ ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video, querystd, std_id); if (ret < 0) vpif_dbg(1, debug, "Failed to set standard for sub devices\n"); return ret; } /** * vpif_g_std() - get STD handler * @file: file ptr * @priv: file handle * @std_id: ptr to std id */ static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; vpif_dbg(2, debug, "vpif_g_std\n"); *std = ch->video.stdid; return 0; } /** * vpif_s_std() - set STD handler * @file: file ptr * @priv: file handle * @std_id: ptr to std id */ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id *std_id) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; int ret = 0; vpif_dbg(2, debug, "vpif_s_std\n"); if (common->started) { vpif_err("streaming in progress\n"); return -EBUSY; } if ((VPIF_CHANNEL0_VIDEO == ch->channel_id) || (VPIF_CHANNEL1_VIDEO == ch->channel_id)) { if (!fh->initialized) { vpif_dbg(1, debug, "Channel Busy\n"); return -EBUSY; } } ret = v4l2_prio_check(&ch->prio, fh->prio); if (0 != ret) return ret; fh->initialized = 1; /* Call encoder subdevice function to set the standard */ ch->video.stdid = *std_id; ch->video.dv_preset = V4L2_DV_INVALID; memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings)); /* Get the information about the standard */ if (vpif_update_std_info(ch)) { vpif_err("Error getting the standard info\n"); return -EINVAL; } /* Configure the default format information */ vpif_config_format(ch); /* set standard in the sub device */ ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], core, s_std, *std_id); if (ret < 0) vpif_dbg(1, debug, "Failed to set standard for sub devices\n"); return ret; } /** * vpif_enum_input() - ENUMINPUT handler * @file: file ptr * @priv: file handle * @input: ptr to input structure */ static int vpif_enum_input(struct file *file, void *priv, struct v4l2_input *input) { struct vpif_capture_config *config = vpif_dev->platform_data; struct vpif_capture_chan_config *chan_cfg; struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; chan_cfg = &config->chan_config[ch->channel_id]; if (input->index >= chan_cfg->input_count) { vpif_dbg(1, debug, "Invalid input index\n"); return -EINVAL; } memcpy(input, &chan_cfg->inputs[input->index].input, sizeof(*input)); return 0; } /** * vpif_g_input() - Get INPUT handler * @file: file ptr * @priv: file handle * @index: ptr to input index */ static int vpif_g_input(struct file *file, void *priv, unsigned int *index) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct video_obj *vid_ch = &ch->video; *index = vid_ch->input_idx; return 0; } /** * vpif_s_input() - Set INPUT handler * @file: file ptr * @priv: file handle * @index: input index */ static int vpif_s_input(struct file *file, void *priv, unsigned int index) { struct vpif_capture_config *config = vpif_dev->platform_data; struct vpif_capture_chan_config *chan_cfg; struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct video_obj *vid_ch = &ch->video; struct vpif_subdev_info *subdev_info; int ret = 0, sd_index = 0; u32 input = 0, output = 0; chan_cfg = &config->chan_config[ch->channel_id]; if (common->started) { vpif_err("Streaming in progress\n"); return -EBUSY; } if ((VPIF_CHANNEL0_VIDEO == ch->channel_id) || (VPIF_CHANNEL1_VIDEO == ch->channel_id)) { if (!fh->initialized) { vpif_dbg(1, debug, "Channel Busy\n"); return -EBUSY; } } ret = v4l2_prio_check(&ch->prio, fh->prio); if (0 != ret) return ret; fh->initialized = 1; subdev_info = vpif_map_sub_device_to_input(ch, config, index, &sd_index); if (NULL == subdev_info) { vpif_dbg(1, debug, "couldn't lookup sub device for the input index\n"); return -EINVAL; } /* first setup input path from sub device to vpif */ if (config->setup_input_path) { ret = config->setup_input_path(ch->channel_id, subdev_info->name); if (ret < 0) { vpif_dbg(1, debug, "couldn't setup input path for the" " sub device %s, for input index %d\n", subdev_info->name, index); return ret; } } if (subdev_info->can_route) { input = subdev_info->input; output = subdev_info->output; ret = v4l2_subdev_call(vpif_obj.sd[sd_index], video, s_routing, input, output, 0); if (ret < 0) { vpif_dbg(1, debug, "Failed to set input\n"); return ret; } } vid_ch->input_idx = index; ch->curr_subdev_info = subdev_info; ch->curr_sd_index = sd_index; /* copy interface parameters to vpif */ ch->vpifparams.iface = subdev_info->vpif_if; /* update tvnorms from the sub device input info */ ch->video_dev->tvnorms = chan_cfg->inputs[index].input.std; return ret; } /** * vpif_enum_fmt_vid_cap() - ENUM_FMT handler * @file: file ptr * @priv: file handle * @index: input index */ static int vpif_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *fmt) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; if (fmt->index != 0) { vpif_dbg(1, debug, "Invalid format index\n"); return -EINVAL; } /* Fill in the information about format */ if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER) { fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; strcpy(fmt->description, "Raw Mode -Bayer Pattern GrRBGb"); fmt->pixelformat = V4L2_PIX_FMT_SBGGR8; } else { fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; strcpy(fmt->description, "YCbCr4:2:2 YC Planar"); fmt->pixelformat = V4L2_PIX_FMT_YUV422P; } return 0; } /** * vpif_try_fmt_vid_cap() - TRY_FMT handler * @file: file ptr * @priv: file handle * @fmt: ptr to v4l2 format structure */ static int vpif_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct v4l2_pix_format *pixfmt = &fmt->fmt.pix; return vpif_check_format(ch, pixfmt, 1); } /** * vpif_g_fmt_vid_cap() - Set INPUT handler * @file: file ptr * @priv: file handle * @fmt: ptr to v4l2 format structure */ static int vpif_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; /* Check the validity of the buffer type */ if (common->fmt.type != fmt->type) return -EINVAL; /* Fill in the information about format */ *fmt = common->fmt; return 0; } /** * vpif_s_fmt_vid_cap() - Set FMT handler * @file: file ptr * @priv: file handle * @fmt: ptr to v4l2 format structure */ static int vpif_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct v4l2_pix_format *pixfmt; int ret = 0; vpif_dbg(2, debug, "%s\n", __func__); /* If streaming is started, return error */ if (common->started) { vpif_dbg(1, debug, "Streaming is started\n"); return -EBUSY; } if ((VPIF_CHANNEL0_VIDEO == ch->channel_id) || (VPIF_CHANNEL1_VIDEO == ch->channel_id)) { if (!fh->initialized) { vpif_dbg(1, debug, "Channel Busy\n"); return -EBUSY; } } ret = v4l2_prio_check(&ch->prio, fh->prio); if (0 != ret) return ret; fh->initialized = 1; pixfmt = &fmt->fmt.pix; /* Check for valid field format */ ret = vpif_check_format(ch, pixfmt, 0); if (ret) return ret; /* store the format in the channel object */ common->fmt = *fmt; return 0; } /** * vpif_querycap() - QUERYCAP handler * @file: file ptr * @priv: file handle * @cap: ptr to v4l2_capability structure */ static int vpif_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct vpif_capture_config *config = vpif_dev->platform_data; cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; strlcpy(cap->driver, "vpif capture", sizeof(cap->driver)); strlcpy(cap->bus_info, "DM646x Platform", sizeof(cap->bus_info)); strlcpy(cap->card, config->card_name, sizeof(cap->card)); return 0; } /** * vpif_g_priority() - get priority handler * @file: file ptr * @priv: file handle * @prio: ptr to v4l2_priority structure */ static int vpif_g_priority(struct file *file, void *priv, enum v4l2_priority *prio) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; *prio = v4l2_prio_max(&ch->prio); return 0; } /** * vpif_s_priority() - set priority handler * @file: file ptr * @priv: file handle * @prio: ptr to v4l2_priority structure */ static int vpif_s_priority(struct file *file, void *priv, enum v4l2_priority p) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; return v4l2_prio_change(&ch->prio, &fh->prio, p); } /** * vpif_cropcap() - cropcap handler * @file: file ptr * @priv: file handle * @crop: ptr to v4l2_cropcap structure */ static int vpif_cropcap(struct file *file, void *priv, struct v4l2_cropcap *crop) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; if (V4L2_BUF_TYPE_VIDEO_CAPTURE != crop->type) return -EINVAL; crop->bounds.left = 0; crop->bounds.top = 0; crop->bounds.height = common->height; crop->bounds.width = common->width; crop->defrect = crop->bounds; return 0; } /** * vpif_enum_dv_presets() - ENUM_DV_PRESETS handler * @file: file ptr * @priv: file handle * @preset: input preset */ static int vpif_enum_dv_presets(struct file *file, void *priv, struct v4l2_dv_enum_preset *preset) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video, enum_dv_presets, preset); } /** * vpif_query_dv_presets() - QUERY_DV_PRESET handler * @file: file ptr * @priv: file handle * @preset: input preset */ static int vpif_query_dv_preset(struct file *file, void *priv, struct v4l2_dv_preset *preset) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video, query_dv_preset, preset); } /** * vpif_s_dv_presets() - S_DV_PRESETS handler * @file: file ptr * @priv: file handle * @preset: input preset */ static int vpif_s_dv_preset(struct file *file, void *priv, struct v4l2_dv_preset *preset) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; int ret = 0; if (common->started) { vpif_dbg(1, debug, "streaming in progress\n"); return -EBUSY; } if ((VPIF_CHANNEL0_VIDEO == ch->channel_id) || (VPIF_CHANNEL1_VIDEO == ch->channel_id)) { if (!fh->initialized) { vpif_dbg(1, debug, "Channel Busy\n"); return -EBUSY; } } ret = v4l2_prio_check(&ch->prio, fh->prio); if (ret) return ret; fh->initialized = 1; /* Call encoder subdevice function to set the standard */ if (mutex_lock_interruptible(&common->lock)) return -ERESTARTSYS; ch->video.dv_preset = preset->preset; ch->video.stdid = V4L2_STD_UNKNOWN; memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings)); /* Get the information about the standard */ if (vpif_update_std_info(ch)) { vpif_dbg(1, debug, "Error getting the standard info\n"); ret = -EINVAL; } else { /* Configure the default format information */ vpif_config_format(ch); ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video, s_dv_preset, preset); } mutex_unlock(&common->lock); return ret; } /** * vpif_g_dv_presets() - G_DV_PRESETS handler * @file: file ptr * @priv: file handle * @preset: input preset */ static int vpif_g_dv_preset(struct file *file, void *priv, struct v4l2_dv_preset *preset) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; preset->preset = ch->video.dv_preset; return 0; } /** * vpif_s_dv_timings() - S_DV_TIMINGS handler * @file: file ptr * @priv: file handle * @timings: digital video timings */ static int vpif_s_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct vpif_params *vpifparams = &ch->vpifparams; struct vpif_channel_config_params *std_info = &vpifparams->std_info; struct video_obj *vid_ch = &ch->video; struct v4l2_bt_timings *bt = &vid_ch->bt_timings; int ret; if (timings->type != V4L2_DV_BT_656_1120) { vpif_dbg(2, debug, "Timing type not defined\n"); return -EINVAL; } /* Configure subdevice timings, if any */ ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video, s_dv_timings, timings); if (ret == -ENOIOCTLCMD) { vpif_dbg(2, debug, "Custom DV timings not supported by " "subdevice\n"); return -EINVAL; } if (ret < 0) { vpif_dbg(2, debug, "Error setting custom DV timings\n"); return ret; } if (!(timings->bt.width && timings->bt.height && (timings->bt.hbackporch || timings->bt.hfrontporch || timings->bt.hsync) && timings->bt.vfrontporch && (timings->bt.vbackporch || timings->bt.vsync))) { vpif_dbg(2, debug, "Timings for width, height, " "horizontal back porch, horizontal sync, " "horizontal front porch, vertical back porch, " "vertical sync and vertical back porch " "must be defined\n"); return -EINVAL; } *bt = timings->bt; /* Configure video port timings */ std_info->eav2sav = bt->hbackporch + bt->hfrontporch + bt->hsync - 8; std_info->sav2eav = bt->width; std_info->l1 = 1; std_info->l3 = bt->vsync + bt->vbackporch + 1; if (bt->interlaced) { if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) { std_info->vsize = bt->height * 2 + bt->vfrontporch + bt->vsync + bt->vbackporch + bt->il_vfrontporch + bt->il_vsync + bt->il_vbackporch; std_info->l5 = std_info->vsize/2 - (bt->vfrontporch - 1); std_info->l7 = std_info->vsize/2 + 1; std_info->l9 = std_info->l7 + bt->il_vsync + bt->il_vbackporch + 1; std_info->l11 = std_info->vsize - (bt->il_vfrontporch - 1); } else { vpif_dbg(2, debug, "Required timing values for " "interlaced BT format missing\n"); return -EINVAL; } } else { std_info->vsize = bt->height + bt->vfrontporch + bt->vsync + bt->vbackporch; std_info->l5 = std_info->vsize - (bt->vfrontporch - 1); } strncpy(std_info->name, "Custom timings BT656/1120", VPIF_MAX_NAME); std_info->width = bt->width; std_info->height = bt->height; std_info->frm_fmt = bt->interlaced ? 0 : 1; std_info->ycmux_mode = 0; std_info->capture_format = 0; std_info->vbi_supported = 0; std_info->hd_sd = 1; std_info->stdid = 0; std_info->dv_preset = V4L2_DV_INVALID; vid_ch->stdid = 0; vid_ch->dv_preset = V4L2_DV_INVALID; return 0; } /** * vpif_g_dv_timings() - G_DV_TIMINGS handler * @file: file ptr * @priv: file handle * @timings: digital video timings */ static int vpif_g_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct video_obj *vid_ch = &ch->video; struct v4l2_bt_timings *bt = &vid_ch->bt_timings; timings->bt = *bt; return 0; } /* * vpif_g_chip_ident() - Identify the chip * @file: file ptr * @priv: file handle * @chip: chip identity * * Returns zero or -EINVAL if read operations fails. */ static int vpif_g_chip_ident(struct file *file, void *priv, struct v4l2_dbg_chip_ident *chip) { chip->ident = V4L2_IDENT_NONE; chip->revision = 0; if (chip->match.type != V4L2_CHIP_MATCH_I2C_DRIVER && chip->match.type != V4L2_CHIP_MATCH_I2C_ADDR) { vpif_dbg(2, debug, "match_type is invalid.\n"); return -EINVAL; } return v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 0, core, g_chip_ident, chip); } #ifdef CONFIG_VIDEO_ADV_DEBUG /* * vpif_dbg_g_register() - Read register * @file: file ptr * @priv: file handle * @reg: register to be read * * Debugging only * Returns zero or -EINVAL if read operations fails. */ static int vpif_dbg_g_register(struct file *file, void *priv, struct v4l2_dbg_register *reg){ struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], core, g_register, reg); } /* * vpif_dbg_s_register() - Write to register * @file: file ptr * @priv: file handle * @reg: register to be modified * * Debugging only * Returns zero or -EINVAL if write operations fails. */ static int vpif_dbg_s_register(struct file *file, void *priv, struct v4l2_dbg_register *reg){ struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], core, s_register, reg); } #endif /* * vpif_log_status() - Status information * @file: file ptr * @priv: file handle * * Returns zero. */ static int vpif_log_status(struct file *filep, void *priv) { /* status for sub devices */ v4l2_device_call_all(&vpif_obj.v4l2_dev, 0, core, log_status); return 0; } /* vpif capture ioctl operations */ static const struct v4l2_ioctl_ops vpif_ioctl_ops = { .vidioc_querycap = vpif_querycap, .vidioc_g_priority = vpif_g_priority, .vidioc_s_priority = vpif_s_priority, .vidioc_enum_fmt_vid_cap = vpif_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vpif_g_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vpif_s_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vpif_try_fmt_vid_cap, .vidioc_enum_input = vpif_enum_input, .vidioc_s_input = vpif_s_input, .vidioc_g_input = vpif_g_input, .vidioc_reqbufs = vpif_reqbufs, .vidioc_querybuf = vpif_querybuf, .vidioc_querystd = vpif_querystd, .vidioc_s_std = vpif_s_std, .vidioc_g_std = vpif_g_std, .vidioc_qbuf = vpif_qbuf, .vidioc_dqbuf = vpif_dqbuf, .vidioc_streamon = vpif_streamon, .vidioc_streamoff = vpif_streamoff, .vidioc_cropcap = vpif_cropcap, .vidioc_enum_dv_presets = vpif_enum_dv_presets, .vidioc_s_dv_preset = vpif_s_dv_preset, .vidioc_g_dv_preset = vpif_g_dv_preset, .vidioc_query_dv_preset = vpif_query_dv_preset, .vidioc_s_dv_timings = vpif_s_dv_timings, .vidioc_g_dv_timings = vpif_g_dv_timings, .vidioc_g_chip_ident = vpif_g_chip_ident, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = vpif_dbg_g_register, .vidioc_s_register = vpif_dbg_s_register, #endif .vidioc_log_status = vpif_log_status, }; /* vpif file operations */ static struct v4l2_file_operations vpif_fops = { .owner = THIS_MODULE, .open = vpif_open, .release = vpif_release, .unlocked_ioctl = video_ioctl2, .mmap = vpif_mmap, .poll = vpif_poll }; /* vpif video template */ static struct video_device vpif_video_template = { .name = "vpif", .fops = &vpif_fops, .minor = -1, .ioctl_ops = &vpif_ioctl_ops, }; /** * initialize_vpif() - Initialize vpif data structures * * Allocate memory for data structures and initialize them */ static int initialize_vpif(void) { int err = 0, i, j; int free_channel_objects_index; /* Default number of buffers should be 3 */ if ((ch0_numbuffers > 0) && (ch0_numbuffers < config_params.min_numbuffers)) ch0_numbuffers = config_params.min_numbuffers; if ((ch1_numbuffers > 0) && (ch1_numbuffers < config_params.min_numbuffers)) ch1_numbuffers = config_params.min_numbuffers; /* Set buffer size to min buffers size if it is invalid */ if (ch0_bufsize < config_params.min_bufsize[VPIF_CHANNEL0_VIDEO]) ch0_bufsize = config_params.min_bufsize[VPIF_CHANNEL0_VIDEO]; if (ch1_bufsize < config_params.min_bufsize[VPIF_CHANNEL1_VIDEO]) ch1_bufsize = config_params.min_bufsize[VPIF_CHANNEL1_VIDEO]; config_params.numbuffers[VPIF_CHANNEL0_VIDEO] = ch0_numbuffers; config_params.numbuffers[VPIF_CHANNEL1_VIDEO] = ch1_numbuffers; if (ch0_numbuffers) { config_params.channel_bufsize[VPIF_CHANNEL0_VIDEO] = ch0_bufsize; } if (ch1_numbuffers) { config_params.channel_bufsize[VPIF_CHANNEL1_VIDEO] = ch1_bufsize; } /* Allocate memory for six channel objects */ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { vpif_obj.dev[i] = kzalloc(sizeof(*vpif_obj.dev[i]), GFP_KERNEL); /* If memory allocation fails, return error */ if (!vpif_obj.dev[i]) { free_channel_objects_index = i; err = -ENOMEM; goto vpif_init_free_channel_objects; } } return 0; vpif_init_free_channel_objects: for (j = 0; j < free_channel_objects_index; j++) kfree(vpif_obj.dev[j]); return err; } /** * vpif_probe : This function probes the vpif capture driver * @pdev: platform device pointer * * This creates device entries by register itself to the V4L2 driver and * initializes fields of each channel objects */ static __init int vpif_probe(struct platform_device *pdev) { struct vpif_subdev_info *subdevdata; struct vpif_capture_config *config; int i, j, k, m, q, err; struct i2c_adapter *i2c_adap; struct channel_obj *ch; struct common_obj *common; struct video_device *vfd; struct resource *res; int subdev_count; vpif_dev = &pdev->dev; err = initialize_vpif(); if (err) { v4l2_err(vpif_dev->driver, "Error initializing vpif\n"); return err; } err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev); if (err) { v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n"); return err; } k = 0; while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, k))) { for (i = res->start; i <= res->end; i++) { if (request_irq(i, vpif_channel_isr, IRQF_DISABLED, "DM646x_Capture", (void *)(&vpif_obj.dev[k]->channel_id))) { err = -EBUSY; i--; goto vpif_int_err; } } k++; } for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[i]; /* Allocate memory for video device */ vfd = video_device_alloc(); if (NULL == vfd) { for (j = 0; j < i; j++) { ch = vpif_obj.dev[j]; video_device_release(ch->video_dev); } err = -ENOMEM; goto vpif_dev_alloc_err; } /* Initialize field of video device */ *vfd = vpif_video_template; vfd->v4l2_dev = &vpif_obj.v4l2_dev; vfd->release = video_device_release; snprintf(vfd->name, sizeof(vfd->name), "DM646x_VPIFCapture_DRIVER_V%s", VPIF_CAPTURE_VERSION); /* Set video_dev to the video device */ ch->video_dev = vfd; } for (j = 0; j < VPIF_CAPTURE_MAX_DEVICES; j++) { ch = vpif_obj.dev[j]; ch->channel_id = j; common = &(ch->common[VPIF_VIDEO_INDEX]); spin_lock_init(&common->irqlock); mutex_init(&common->lock); ch->video_dev->lock = &common->lock; /* Initialize prio member of channel object */ v4l2_prio_init(&ch->prio); err = video_register_device(ch->video_dev, VFL_TYPE_GRABBER, (j ? 1 : 0)); if (err) goto probe_out; video_set_drvdata(ch->video_dev, ch); } i2c_adap = i2c_get_adapter(1); config = pdev->dev.platform_data; subdev_count = config->subdev_count; vpif_obj.sd = kzalloc(sizeof(struct v4l2_subdev *) * subdev_count, GFP_KERNEL); if (vpif_obj.sd == NULL) { vpif_err("unable to allocate memory for subdevice pointers\n"); err = -ENOMEM; goto probe_out; } for (i = 0; i < subdev_count; i++) { subdevdata = &config->subdev_info[i]; vpif_obj.sd[i] = v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev, i2c_adap, &subdevdata->board_info, NULL); if (!vpif_obj.sd[i]) { vpif_err("Error registering v4l2 subdevice\n"); goto probe_subdev_out; } v4l2_info(&vpif_obj.v4l2_dev, "registered sub device %s\n", subdevdata->name); if (vpif_obj.sd[i]) vpif_obj.sd[i]->grp_id = 1 << i; } v4l2_info(&vpif_obj.v4l2_dev, "DM646x VPIF capture driver initialized\n"); return 0; probe_subdev_out: /* free sub devices memory */ kfree(vpif_obj.sd); j = VPIF_CAPTURE_MAX_DEVICES; probe_out: for (k = 0; k < j; k++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[k]; /* Unregister video device */ video_unregister_device(ch->video_dev); } vpif_dev_alloc_err: k = VPIF_CAPTURE_MAX_DEVICES-1; res = platform_get_resource(pdev, IORESOURCE_IRQ, k); i = res->end; vpif_int_err: for (q = k; q >= 0; q--) { for (m = i; m >= (int)res->start; m--) free_irq(m, (void *)(&vpif_obj.dev[q]->channel_id)); res = platform_get_resource(pdev, IORESOURCE_IRQ, q-1); if (res) i = res->end; } v4l2_device_unregister(&vpif_obj.v4l2_dev); return err; } /** * vpif_remove() - driver remove handler * @device: ptr to platform device structure * * The vidoe device is unregistered */ static int vpif_remove(struct platform_device *device) { int i; struct channel_obj *ch; v4l2_device_unregister(&vpif_obj.v4l2_dev); /* un-register device */ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[i]; /* Unregister video device */ video_unregister_device(ch->video_dev); } return 0; } /** * vpif_suspend: vpif device suspend * * TODO: Add suspend code here */ static int vpif_suspend(struct device *dev) { return -1; } /** * vpif_resume: vpif device suspend * * TODO: Add resume code here */ static int vpif_resume(struct device *dev) { return -1; } static const struct dev_pm_ops vpif_dev_pm_ops = { .suspend = vpif_suspend, .resume = vpif_resume, }; static __refdata struct platform_driver vpif_driver = { .driver = { .name = "vpif_capture", .owner = THIS_MODULE, .pm = &vpif_dev_pm_ops, }, .probe = vpif_probe, .remove = vpif_remove, }; /** * vpif_init: initialize the vpif driver * * This function registers device and driver to the kernel, requests irq * handler and allocates memory * for channel objects */ static __init int vpif_init(void) { return platform_driver_register(&vpif_driver); } /** * vpif_cleanup : This function clean up the vpif capture resources * * This will un-registers device and driver to the kernel, frees * requested irq handler and de-allocates memory allocated for channel * objects. */ static void vpif_cleanup(void) { struct platform_device *pdev; struct resource *res; int irq_num; int i = 0; pdev = container_of(vpif_dev, struct platform_device, dev); while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, i))) { for (irq_num = res->start; irq_num <= res->end; irq_num++) free_irq(irq_num, (void *)(&vpif_obj.dev[i]->channel_id)); i++; } platform_driver_unregister(&vpif_driver); kfree(vpif_obj.sd); for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) kfree(vpif_obj.dev[i]); } /* Function for module initialization and cleanup */ module_init(vpif_init); module_exit(vpif_cleanup);
gpl-2.0
Jackeagle/android_kernel_htc_dlxub1
drivers/scsi/pas16.c
5145
18234
#define AUTOSENSE #define PSEUDO_DMA #define FOO #define UNSAFE /* Not unsafe for PAS16 -- use it */ #define PDEBUG 0 /* * This driver adapted from Drew Eckhardt's Trantor T128 driver * * Copyright 1993, Drew Eckhardt * Visionary Computing * (Unix and Linux consulting and custom programming) * drew@colorado.edu * +1 (303) 666-5836 * * ( Based on T128 - DISTRIBUTION RELEASE 3. ) * * Modified to work with the Pro Audio Spectrum/Studio 16 * by John Weidman. * * * For more information, please consult * * Media Vision * (510) 770-8600 * (800) 348-7116 * * and * * NCR 5380 Family * SCSI Protocol Controller * Databook * * NCR Microelectronics * 1635 Aeroplaza Drive * Colorado Springs, CO 80916 * 1+ (719) 578-3400 * 1+ (800) 334-5454 */ /* * Options : * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically * for commands that return with a CHECK CONDITION status. * * LIMIT_TRANSFERSIZE - if defined, limit the pseudo-dma transfers to 512 * bytes at a time. Since interrupts are disabled by default during * these transfers, we might need this to give reasonable interrupt * service time if the transfer size gets too large. * * PSEUDO_DMA - enables PSEUDO-DMA hardware, should give a 3-4X performance * increase compared to polled I/O. * * PARITY - enable parity checking. Not supported. * * SCSI2 - enable support for SCSI-II tagged queueing. Untested. * * UNSAFE - leave interrupts enabled during pseudo-DMA transfers. This * parameter comes from the NCR5380 code. It is NOT unsafe with * the PAS16 and you should use it. If you don't you will have * a problem with dropped characters during high speed * communications during SCSI transfers. If you really don't * want to use UNSAFE you can try defining LIMIT_TRANSFERSIZE or * twiddle with the transfer size in the high level code. * * USLEEP - enable support for devices that don't disconnect. Untested. * * The card is detected and initialized in one of several ways : * 1. Autoprobe (default) - There are many different models of * the Pro Audio Spectrum/Studio 16, and I only have one of * them, so this may require a little tweaking. An interrupt * is triggered to autoprobe for the interrupt line. Note: * with the newer model boards, the interrupt is set via * software after reset using the default_irq for the * current board number. * * 2. With command line overrides - pas16=port,irq may be * used on the LILO command line to override the defaults. * * 3. With the PAS16_OVERRIDE compile time define. This is * specified as an array of address, irq tuples. Ie, for * one board at the default 0x388 address, IRQ10, I could say * -DPAS16_OVERRIDE={{0x388, 10}} * NOTE: Untested. * * 4. When included as a module, with arguments passed on the command line: * pas16_irq=xx the interrupt * pas16_addr=xx the port * e.g. "modprobe pas16 pas16_addr=0x388 pas16_irq=5" * * Note that if the override methods are used, place holders must * be specified for other boards in the system. * * * Configuration notes : * The current driver does not support interrupt sharing with the * sound portion of the card. If you use the same irq for the * scsi port and sound you will have problems. Either use * a different irq for the scsi port or don't use interrupts * for the scsi port. * * If you have problems with your card not being recognized, use * the LILO command line override. Try to get it recognized without * interrupts. Ie, for a board at the default 0x388 base port, * boot: linux pas16=0x388,255 * * SCSI_IRQ_NONE (255) should be specified for no interrupt, * IRQ_AUTO (254) to autoprobe for an IRQ line if overridden * on the command line. * * (IRQ_AUTO == 254, SCSI_IRQ_NONE == 255 in NCR5380.h) */ #include <linux/module.h> #include <linux/signal.h> #include <linux/proc_fs.h> #include <asm/io.h> #include <asm/dma.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/stat.h> #include <linux/init.h> #include "scsi.h" #include <scsi/scsi_host.h> #include "pas16.h" #define AUTOPROBE_IRQ #include "NCR5380.h" static int pas_maxi = 0; static int pas_wmaxi = 0; static unsigned short pas16_addr = 0; static int pas16_irq = 0; static const int scsi_irq_translate[] = { 0, 0, 1, 2, 3, 4, 5, 6, 0, 0, 7, 8, 9, 0, 10, 11 }; /* The default_irqs array contains values used to set the irq into the * board via software (as must be done on newer model boards without * irq jumpers on the board). The first value in the array will be * assigned to logical board 0, the next to board 1, etc. */ static int default_irqs[] __initdata = { PAS16_DEFAULT_BOARD_1_IRQ, PAS16_DEFAULT_BOARD_2_IRQ, PAS16_DEFAULT_BOARD_3_IRQ, PAS16_DEFAULT_BOARD_4_IRQ }; static struct override { unsigned short io_port; int irq; } overrides #ifdef PAS16_OVERRIDE [] __initdata = PAS16_OVERRIDE; #else [4] __initdata = {{0,IRQ_AUTO}, {0,IRQ_AUTO}, {0,IRQ_AUTO}, {0,IRQ_AUTO}}; #endif #define NO_OVERRIDES ARRAY_SIZE(overrides) static struct base { unsigned short io_port; int noauto; } bases[] __initdata = { {PAS16_DEFAULT_BASE_1, 0}, {PAS16_DEFAULT_BASE_2, 0}, {PAS16_DEFAULT_BASE_3, 0}, {PAS16_DEFAULT_BASE_4, 0} }; #define NO_BASES ARRAY_SIZE(bases) static const unsigned short pas16_offset[ 8 ] = { 0x1c00, /* OUTPUT_DATA_REG */ 0x1c01, /* INITIATOR_COMMAND_REG */ 0x1c02, /* MODE_REG */ 0x1c03, /* TARGET_COMMAND_REG */ 0x3c00, /* STATUS_REG ro, SELECT_ENABLE_REG wo */ 0x3c01, /* BUS_AND_STATUS_REG ro, START_DMA_SEND_REG wo */ 0x3c02, /* INPUT_DATA_REGISTER ro, (N/A on PAS16 ?) * START_DMA_TARGET_RECEIVE_REG wo */ 0x3c03, /* RESET_PARITY_INTERRUPT_REG ro, * START_DMA_INITIATOR_RECEIVE_REG wo */ }; /*----------------------------------------------------------------*/ /* the following will set the monitor border color (useful to find where something crashed or gets stuck at */ /* 1 = blue 2 = green 3 = cyan 4 = red 5 = magenta 6 = yellow 7 = white */ #if 1 #define rtrc(i) {inb(0x3da); outb(0x31, 0x3c0); outb((i), 0x3c0);} #else #define rtrc(i) {} #endif /* * Function : enable_board( int board_num, unsigned short port ) * * Purpose : set address in new model board * * Inputs : board_num - logical board number 0-3, port - base address * */ static void __init enable_board( int board_num, unsigned short port ) { outb( 0xbc + board_num, MASTER_ADDRESS_PTR ); outb( port >> 2, MASTER_ADDRESS_PTR ); } /* * Function : init_board( unsigned short port, int irq ) * * Purpose : Set the board up to handle the SCSI interface * * Inputs : port - base address of the board, * irq - irq to assign to the SCSI port * force_irq - set it even if it conflicts with sound driver * */ static void __init init_board( unsigned short io_port, int irq, int force_irq ) { unsigned int tmp; unsigned int pas_irq_code; /* Initialize the SCSI part of the board */ outb( 0x30, io_port + P_TIMEOUT_COUNTER_REG ); /* Timeout counter */ outb( 0x01, io_port + P_TIMEOUT_STATUS_REG_OFFSET ); /* Reset TC */ outb( 0x01, io_port + WAIT_STATE ); /* 1 Wait state */ NCR5380_read( RESET_PARITY_INTERRUPT_REG ); /* Set the SCSI interrupt pointer without mucking up the sound * interrupt pointer in the same byte. */ pas_irq_code = ( irq < 16 ) ? scsi_irq_translate[irq] : 0; tmp = inb( io_port + IO_CONFIG_3 ); if( (( tmp & 0x0f ) == pas_irq_code) && pas_irq_code > 0 && !force_irq ) { printk( "pas16: WARNING: Can't use same irq as sound " "driver -- interrupts disabled\n" ); /* Set up the drive parameters, disable 5380 interrupts */ outb( 0x4d, io_port + SYS_CONFIG_4 ); } else { tmp = ( tmp & 0x0f ) | ( pas_irq_code << 4 ); outb( tmp, io_port + IO_CONFIG_3 ); /* Set up the drive parameters and enable 5380 interrupts */ outb( 0x6d, io_port + SYS_CONFIG_4 ); } } /* * Function : pas16_hw_detect( unsigned short board_num ) * * Purpose : determine if a pas16 board is present * * Inputs : board_num - logical board number ( 0 - 3 ) * * Returns : 0 if board not found, 1 if found. */ static int __init pas16_hw_detect( unsigned short board_num ) { unsigned char board_rev, tmp; unsigned short io_port = bases[ board_num ].io_port; /* See if we can find a PAS16 board at the address associated * with this logical board number. */ /* First, attempt to take a newer model board out of reset and * give it a base address. This shouldn't affect older boards. */ enable_board( board_num, io_port ); /* Now see if it looks like a PAS16 board */ board_rev = inb( io_port + PCB_CONFIG ); if( board_rev == 0xff ) return 0; tmp = board_rev ^ 0xe0; outb( tmp, io_port + PCB_CONFIG ); tmp = inb( io_port + PCB_CONFIG ); outb( board_rev, io_port + PCB_CONFIG ); if( board_rev != tmp ) /* Not a PAS-16 */ return 0; if( ( inb( io_port + OPERATION_MODE_1 ) & 0x03 ) != 0x03 ) return 0; /* return if no SCSI interface found */ /* Mediavision has some new model boards that return ID bits * that indicate a SCSI interface, but they're not (LMS). We'll * put in an additional test to try to weed them out. */ outb( 0x01, io_port + WAIT_STATE ); /* 1 Wait state */ NCR5380_write( MODE_REG, 0x20 ); /* Is it really SCSI? */ if( NCR5380_read( MODE_REG ) != 0x20 ) /* Write to a reg. */ return 0; /* and try to read */ NCR5380_write( MODE_REG, 0x00 ); /* it back. */ if( NCR5380_read( MODE_REG ) != 0x00 ) return 0; return 1; } /* * Function : pas16_setup(char *str, int *ints) * * Purpose : LILO command line initialization of the overrides array, * * Inputs : str - unused, ints - array of integer parameters with ints[0] * equal to the number of ints. * */ void __init pas16_setup(char *str, int *ints) { static int commandline_current = 0; int i; if (ints[0] != 2) printk("pas16_setup : usage pas16=io_port,irq\n"); else if (commandline_current < NO_OVERRIDES) { overrides[commandline_current].io_port = (unsigned short) ints[1]; overrides[commandline_current].irq = ints[2]; for (i = 0; i < NO_BASES; ++i) if (bases[i].io_port == (unsigned short) ints[1]) { bases[i].noauto = 1; break; } ++commandline_current; } } /* * Function : int pas16_detect(struct scsi_host_template * tpnt) * * Purpose : detects and initializes PAS16 controllers * that were autoprobed, overridden on the LILO command line, * or specified at compile time. * * Inputs : tpnt - template for this SCSI adapter. * * Returns : 1 if a host adapter was found, 0 if not. * */ int __init pas16_detect(struct scsi_host_template * tpnt) { static int current_override = 0; static unsigned short current_base = 0; struct Scsi_Host *instance; unsigned short io_port; int count; tpnt->proc_name = "pas16"; tpnt->proc_info = &pas16_proc_info; if (pas16_addr != 0) { overrides[0].io_port = pas16_addr; /* * This is how we avoid seeing more than * one host adapter at the same I/O port. * Cribbed shamelessly from pas16_setup(). */ for (count = 0; count < NO_BASES; ++count) if (bases[count].io_port == pas16_addr) { bases[count].noauto = 1; break; } } if (pas16_irq != 0) overrides[0].irq = pas16_irq; for (count = 0; current_override < NO_OVERRIDES; ++current_override) { io_port = 0; if (overrides[current_override].io_port) { io_port = overrides[current_override].io_port; enable_board( current_override, io_port ); init_board( io_port, overrides[current_override].irq, 1 ); } else for (; !io_port && (current_base < NO_BASES); ++current_base) { #if (PDEBUG & PDEBUG_INIT) printk("scsi-pas16 : probing io_port %04x\n", (unsigned int) bases[current_base].io_port); #endif if ( !bases[current_base].noauto && pas16_hw_detect( current_base ) ){ io_port = bases[current_base].io_port; init_board( io_port, default_irqs[ current_base ], 0 ); #if (PDEBUG & PDEBUG_INIT) printk("scsi-pas16 : detected board.\n"); #endif } } #if defined(PDEBUG) && (PDEBUG & PDEBUG_INIT) printk("scsi-pas16 : io_port = %04x\n", (unsigned int) io_port); #endif if (!io_port) break; instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata)); if(instance == NULL) break; instance->io_port = io_port; NCR5380_init(instance, 0); if (overrides[current_override].irq != IRQ_AUTO) instance->irq = overrides[current_override].irq; else instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS); if (instance->irq != SCSI_IRQ_NONE) if (request_irq(instance->irq, pas16_intr, IRQF_DISABLED, "pas16", instance)) { printk("scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); instance->irq = SCSI_IRQ_NONE; } if (instance->irq == SCSI_IRQ_NONE) { printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); /* Disable 5380 interrupts, leave drive params the same */ outb( 0x4d, io_port + SYS_CONFIG_4 ); outb( (inb(io_port + IO_CONFIG_3) & 0x0f), io_port + IO_CONFIG_3 ); } #if defined(PDEBUG) && (PDEBUG & PDEBUG_INIT) printk("scsi%d : irq = %d\n", instance->host_no, instance->irq); #endif printk("scsi%d : at 0x%04x", instance->host_no, (int) instance->io_port); if (instance->irq == SCSI_IRQ_NONE) printk (" interrupts disabled"); else printk (" irq %d", instance->irq); printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", CAN_QUEUE, CMD_PER_LUN, PAS16_PUBLIC_RELEASE); NCR5380_print_options(instance); printk("\n"); ++current_override; ++count; } return count; } /* * Function : int pas16_biosparam(Disk *disk, struct block_device *dev, int *ip) * * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for * the specified device / size. * * Inputs : size = size of device in sectors (512 bytes), dev = block device * major / minor, ip[] = {heads, sectors, cylinders} * * Returns : always 0 (success), initializes ip * */ /* * XXX Most SCSI boards use this mapping, I could be incorrect. Some one * using hard disks on a trantor should verify that this mapping corresponds * to that used by the BIOS / ASPI driver by running the linux fdisk program * and matching the H_C_S coordinates to what DOS uses. */ int pas16_biosparam(struct scsi_device *sdev, struct block_device *dev, sector_t capacity, int * ip) { int size = capacity; ip[0] = 64; ip[1] = 32; ip[2] = size >> 11; /* I think I have it as /(32*64) */ if( ip[2] > 1024 ) { /* yes, >, not >= */ ip[0]=255; ip[1]=63; ip[2]=size/(63*255); if( ip[2] > 1023 ) /* yes >1023... */ ip[2] = 1023; } return 0; } /* * Function : int NCR5380_pread (struct Scsi_Host *instance, * unsigned char *dst, int len) * * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to * dst * * Inputs : dst = destination, len = length in bytes * * Returns : 0 on success, non zero on a failure such as a watchdog * timeout. */ static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst, int len) { register unsigned char *d = dst; register unsigned short reg = (unsigned short) (instance->io_port + P_DATA_REG_OFFSET); register int i = len; int ii = 0; while ( !(inb(instance->io_port + P_STATUS_REG_OFFSET) & P_ST_RDY) ) ++ii; insb( reg, d, i ); if ( inb(instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET) & P_TS_TIM) { outb( P_TS_CT, instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET); printk("scsi%d : watchdog timer fired in NCR5380_pread()\n", instance->host_no); return -1; } if (ii > pas_maxi) pas_maxi = ii; return 0; } /* * Function : int NCR5380_pwrite (struct Scsi_Host *instance, * unsigned char *src, int len) * * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from * src * * Inputs : src = source, len = length in bytes * * Returns : 0 on success, non zero on a failure such as a watchdog * timeout. */ static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src, int len) { register unsigned char *s = src; register unsigned short reg = (instance->io_port + P_DATA_REG_OFFSET); register int i = len; int ii = 0; while ( !((inb(instance->io_port + P_STATUS_REG_OFFSET)) & P_ST_RDY) ) ++ii; outsb( reg, s, i ); if (inb(instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET) & P_TS_TIM) { outb( P_TS_CT, instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET); printk("scsi%d : watchdog timer fired in NCR5380_pwrite()\n", instance->host_no); return -1; } if (ii > pas_maxi) pas_wmaxi = ii; return 0; } #include "NCR5380.c" static int pas16_release(struct Scsi_Host *shost) { if (shost->irq) free_irq(shost->irq, shost); NCR5380_exit(shost); if (shost->dma_channel != 0xff) free_dma(shost->dma_channel); if (shost->io_port && shost->n_io_port) release_region(shost->io_port, shost->n_io_port); scsi_unregister(shost); return 0; } static struct scsi_host_template driver_template = { .name = "Pro Audio Spectrum-16 SCSI", .detect = pas16_detect, .release = pas16_release, .queuecommand = pas16_queue_command, .eh_abort_handler = pas16_abort, .eh_bus_reset_handler = pas16_bus_reset, .bios_param = pas16_biosparam, .can_queue = CAN_QUEUE, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = CMD_PER_LUN, .use_clustering = DISABLE_CLUSTERING, }; #include "scsi_module.c" #ifdef MODULE module_param(pas16_addr, ushort, 0); module_param(pas16_irq, int, 0); #endif MODULE_LICENSE("GPL");
gpl-2.0
TeamRegular/android_kernel_zara
drivers/mtd/maps/h720x-flash.c
5145
2747
/* * Flash memory access on Hynix GMS30C7201/HMS30C7202 based * evaluation boards * * (C) 2002 Jungjun Kim <jungjun.kim@hynix.com> * 2003 Thomas Gleixner <tglx@linutronix.de> */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <mach/hardware.h> #include <asm/io.h> static struct mtd_info *mymtd; static struct map_info h720x_map = { .name = "H720X", .bankwidth = 4, .size = H720X_FLASH_SIZE, .phys = H720X_FLASH_PHYS, }; static struct mtd_partition h720x_partitions[] = { { .name = "ArMon", .size = 0x00080000, .offset = 0, .mask_flags = MTD_WRITEABLE },{ .name = "Env", .size = 0x00040000, .offset = 0x00080000, .mask_flags = MTD_WRITEABLE },{ .name = "Kernel", .size = 0x00180000, .offset = 0x000c0000, .mask_flags = MTD_WRITEABLE },{ .name = "Ramdisk", .size = 0x00400000, .offset = 0x00240000, .mask_flags = MTD_WRITEABLE },{ .name = "jffs2", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND } }; #define NUM_PARTITIONS ARRAY_SIZE(h720x_partitions) /* * Initialize FLASH support */ static int __init h720x_mtd_init(void) { h720x_map.virt = ioremap(h720x_map.phys, h720x_map.size); if (!h720x_map.virt) { printk(KERN_ERR "H720x-MTD: ioremap failed\n"); return -EIO; } simple_map_init(&h720x_map); // Probe for flash bankwidth 4 printk (KERN_INFO "H720x-MTD probing 32bit FLASH\n"); mymtd = do_map_probe("cfi_probe", &h720x_map); if (!mymtd) { printk (KERN_INFO "H720x-MTD probing 16bit FLASH\n"); // Probe for bankwidth 2 h720x_map.bankwidth = 2; mymtd = do_map_probe("cfi_probe", &h720x_map); } if (mymtd) { mymtd->owner = THIS_MODULE; mtd_device_parse_register(mymtd, NULL, NULL, h720x_partitions, NUM_PARTITIONS); return 0; } iounmap((void *)h720x_map.virt); return -ENXIO; } /* * Cleanup */ static void __exit h720x_mtd_cleanup(void) { if (mymtd) { mtd_device_unregister(mymtd); map_destroy(mymtd); } if (h720x_map.virt) { iounmap((void *)h720x_map.virt); h720x_map.virt = 0; } } module_init(h720x_mtd_init); module_exit(h720x_mtd_cleanup); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>"); MODULE_DESCRIPTION("MTD map driver for Hynix evaluation boards");
gpl-2.0
linxiaoji/kernel_mediatek_g750_t01
drivers/scsi/in2000.c
5145
73530
/* * in2000.c - Linux device driver for the * Always IN2000 ISA SCSI card. * * Copyright (c) 1996 John Shifflett, GeoLog Consulting * john@geolog.com * jshiffle@netcom.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * For the avoidance of doubt the "preferred form" of this code is one which * is in an open non patent encumbered format. Where cryptographic key signing * forms part of the process of creating an executable the information * including keys needed to generate an equivalently functional executable * are deemed to be part of the source code. * * Drew Eckhardt's excellent 'Generic NCR5380' sources provided * much of the inspiration and some of the code for this driver. * The Linux IN2000 driver distributed in the Linux kernels through * version 1.2.13 was an extremely valuable reference on the arcane * (and still mysterious) workings of the IN2000's fifo. It also * is where I lifted in2000_biosparam(), the gist of the card * detection scheme, and other bits of code. Many thanks to the * talented and courageous people who wrote, contributed to, and * maintained that driver (including Brad McLean, Shaun Savage, * Bill Earnest, Larry Doolittle, Roger Sunshine, John Luckey, * Matt Postiff, Peter Lu, zerucha@shell.portal.com, and Eric * Youngdale). I should also mention the driver written by * Hamish Macdonald for the (GASP!) Amiga A2091 card, included * in the Linux-m68k distribution; it gave me a good initial * understanding of the proper way to run a WD33c93 chip, and I * ended up stealing lots of code from it. * * _This_ driver is (I feel) an improvement over the old one in * several respects: * - All problems relating to the data size of a SCSI request are * gone (as far as I know). The old driver couldn't handle * swapping to partitions because that involved 4k blocks, nor * could it deal with the st.c tape driver unmodified, because * that usually involved 4k - 32k blocks. The old driver never * quite got away from a morbid dependence on 2k block sizes - * which of course is the size of the card's fifo. * * - Target Disconnection/Reconnection is now supported. Any * system with more than one device active on the SCSI bus * will benefit from this. The driver defaults to what I'm * calling 'adaptive disconnect' - meaning that each command * is evaluated individually as to whether or not it should * be run with the option to disconnect/reselect (if the * device chooses), or as a "SCSI-bus-hog". * * - Synchronous data transfers are now supported. Because there * are a few devices (and many improperly terminated systems) * that choke when doing sync, the default is sync DISABLED * for all devices. This faster protocol can (and should!) * be enabled on selected devices via the command-line. * * - Runtime operating parameters can now be specified through * either the LILO or the 'insmod' command line. For LILO do: * "in2000=blah,blah,blah" * and with insmod go like: * "insmod /usr/src/linux/modules/in2000.o setup_strings=blah,blah" * The defaults should be good for most people. See the comment * for 'setup_strings' below for more details. * * - The old driver relied exclusively on what the Western Digital * docs call "Combination Level 2 Commands", which are a great * idea in that the CPU is relieved of a lot of interrupt * overhead. However, by accepting a certain (user-settable) * amount of additional interrupts, this driver achieves * better control over the SCSI bus, and data transfers are * almost as fast while being much easier to define, track, * and debug. * * - You can force detection of a card whose BIOS has been disabled. * * - Multiple IN2000 cards might almost be supported. I've tried to * keep it in mind, but have no way to test... * * * TODO: * tagged queuing. multiple cards. * * * NOTE: * When using this or any other SCSI driver as a module, you'll * find that with the stock kernel, at most _two_ SCSI hard * drives will be linked into the device list (ie, usable). * If your IN2000 card has more than 2 disks on its bus, you * might want to change the define of 'SD_EXTRA_DEVS' in the * 'hosts.h' file from 2 to whatever is appropriate. It took * me a while to track down this surprisingly obscure and * undocumented little "feature". * * * People with bug reports, wish-lists, complaints, comments, * or improvements are asked to pah-leeez email me (John Shifflett) * at john@geolog.com or jshiffle@netcom.com! I'm anxious to get * this thing into as good a shape as possible, and I'm positive * there are lots of lurking bugs and "Stupid Places". * * Updated for Linux 2.5 by Alan Cox <alan@lxorguk.ukuu.org.uk> * - Using new_eh handler * - Hopefully got all the locking right again * See "FIXME" notes for items that could do with more work */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/proc_fs.h> #include <linux/ioport.h> #include <linux/stat.h> #include <asm/io.h> #include "scsi.h" #include <scsi/scsi_host.h> #define IN2000_VERSION "1.33-2.5" #define IN2000_DATE "2002/11/03" #include "in2000.h" /* * 'setup_strings' is a single string used to pass operating parameters and * settings from the kernel/module command-line to the driver. 'setup_args[]' * is an array of strings that define the compile-time default values for * these settings. If Linux boots with a LILO or insmod command-line, those * settings are combined with 'setup_args[]'. Note that LILO command-lines * are prefixed with "in2000=" while insmod uses a "setup_strings=" prefix. * The driver recognizes the following keywords (lower case required) and * arguments: * * - ioport:addr -Where addr is IO address of a (usually ROM-less) card. * - noreset -No optional args. Prevents SCSI bus reset at boot time. * - nosync:x -x is a bitmask where the 1st 7 bits correspond with * the 7 possible SCSI devices (bit 0 for device #0, etc). * Set a bit to PREVENT sync negotiation on that device. * The driver default is sync DISABLED on all devices. * - period:ns -ns is the minimum # of nanoseconds in a SCSI data transfer * period. Default is 500; acceptable values are 250 - 1000. * - disconnect:x -x = 0 to never allow disconnects, 2 to always allow them. * x = 1 does 'adaptive' disconnects, which is the default * and generally the best choice. * - debug:x -If 'DEBUGGING_ON' is defined, x is a bitmask that causes * various types of debug output to printed - see the DB_xxx * defines in in2000.h * - proc:x -If 'PROC_INTERFACE' is defined, x is a bitmask that * determines how the /proc interface works and what it * does - see the PR_xxx defines in in2000.h * * Syntax Notes: * - Numeric arguments can be decimal or the '0x' form of hex notation. There * _must_ be a colon between a keyword and its numeric argument, with no * spaces. * - Keywords are separated by commas, no spaces, in the standard kernel * command-line manner. * - A keyword in the 'nth' comma-separated command-line member will overwrite * the 'nth' element of setup_args[]. A blank command-line member (in * other words, a comma with no preceding keyword) will _not_ overwrite * the corresponding setup_args[] element. * * A few LILO examples (for insmod, use 'setup_strings' instead of 'in2000'): * - in2000=ioport:0x220,noreset * - in2000=period:250,disconnect:2,nosync:0x03 * - in2000=debug:0x1e * - in2000=proc:3 */ /* Normally, no defaults are specified... */ static char *setup_args[] = { "", "", "", "", "", "", "", "", "" }; /* filled in by 'insmod' */ static char *setup_strings; module_param(setup_strings, charp, 0); static inline uchar read_3393(struct IN2000_hostdata *hostdata, uchar reg_num) { write1_io(reg_num, IO_WD_ADDR); return read1_io(IO_WD_DATA); } #define READ_AUX_STAT() read1_io(IO_WD_ASR) static inline void write_3393(struct IN2000_hostdata *hostdata, uchar reg_num, uchar value) { write1_io(reg_num, IO_WD_ADDR); write1_io(value, IO_WD_DATA); } static inline void write_3393_cmd(struct IN2000_hostdata *hostdata, uchar cmd) { /* while (READ_AUX_STAT() & ASR_CIP) printk("|");*/ write1_io(WD_COMMAND, IO_WD_ADDR); write1_io(cmd, IO_WD_DATA); } static uchar read_1_byte(struct IN2000_hostdata *hostdata) { uchar asr, x = 0; write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); write_3393_cmd(hostdata, WD_CMD_TRANS_INFO | 0x80); do { asr = READ_AUX_STAT(); if (asr & ASR_DBR) x = read_3393(hostdata, WD_DATA); } while (!(asr & ASR_INT)); return x; } static void write_3393_count(struct IN2000_hostdata *hostdata, unsigned long value) { write1_io(WD_TRANSFER_COUNT_MSB, IO_WD_ADDR); write1_io((value >> 16), IO_WD_DATA); write1_io((value >> 8), IO_WD_DATA); write1_io(value, IO_WD_DATA); } static unsigned long read_3393_count(struct IN2000_hostdata *hostdata) { unsigned long value; write1_io(WD_TRANSFER_COUNT_MSB, IO_WD_ADDR); value = read1_io(IO_WD_DATA) << 16; value |= read1_io(IO_WD_DATA) << 8; value |= read1_io(IO_WD_DATA); return value; } /* The 33c93 needs to be told which direction a command transfers its * data; we use this function to figure it out. Returns true if there * will be a DATA_OUT phase with this command, false otherwise. * (Thanks to Joerg Dorchain for the research and suggestion.) */ static int is_dir_out(Scsi_Cmnd * cmd) { switch (cmd->cmnd[0]) { case WRITE_6: case WRITE_10: case WRITE_12: case WRITE_LONG: case WRITE_SAME: case WRITE_BUFFER: case WRITE_VERIFY: case WRITE_VERIFY_12: case COMPARE: case COPY: case COPY_VERIFY: case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW: case SEARCH_EQUAL_12: case SEARCH_HIGH_12: case SEARCH_LOW_12: case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE: case MODE_SELECT: case MODE_SELECT_10: case LOG_SELECT: case SEND_DIAGNOSTIC: case CHANGE_DEFINITION: case UPDATE_BLOCK: case SET_WINDOW: case MEDIUM_SCAN: case SEND_VOLUME_TAG: case 0xea: return 1; default: return 0; } } static struct sx_period sx_table[] = { {1, 0x20}, {252, 0x20}, {376, 0x30}, {500, 0x40}, {624, 0x50}, {752, 0x60}, {876, 0x70}, {1000, 0x00}, {0, 0} }; static int round_period(unsigned int period) { int x; for (x = 1; sx_table[x].period_ns; x++) { if ((period <= sx_table[x - 0].period_ns) && (period > sx_table[x - 1].period_ns)) { return x; } } return 7; } static uchar calc_sync_xfer(unsigned int period, unsigned int offset) { uchar result; period *= 4; /* convert SDTR code to ns */ result = sx_table[round_period(period)].reg_value; result |= (offset < OPTIMUM_SX_OFF) ? offset : OPTIMUM_SX_OFF; return result; } static void in2000_execute(struct Scsi_Host *instance); static int in2000_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) { struct Scsi_Host *instance; struct IN2000_hostdata *hostdata; Scsi_Cmnd *tmp; instance = cmd->device->host; hostdata = (struct IN2000_hostdata *) instance->hostdata; DB(DB_QUEUE_COMMAND, scmd_printk(KERN_DEBUG, cmd, "Q-%02x(", cmd->cmnd[0])) /* Set up a few fields in the Scsi_Cmnd structure for our own use: * - host_scribble is the pointer to the next cmd in the input queue * - scsi_done points to the routine we call when a cmd is finished * - result is what you'd expect */ cmd->host_scribble = NULL; cmd->scsi_done = done; cmd->result = 0; /* We use the Scsi_Pointer structure that's included with each command * as a scratchpad (as it's intended to be used!). The handy thing about * the SCp.xxx fields is that they're always associated with a given * cmd, and are preserved across disconnect-reselect. This means we * can pretty much ignore SAVE_POINTERS and RESTORE_POINTERS messages * if we keep all the critical pointers and counters in SCp: * - SCp.ptr is the pointer into the RAM buffer * - SCp.this_residual is the size of that buffer * - SCp.buffer points to the current scatter-gather buffer * - SCp.buffers_residual tells us how many S.G. buffers there are * - SCp.have_data_in helps keep track of >2048 byte transfers * - SCp.sent_command is not used * - SCp.phase records this command's SRCID_ER bit setting */ if (scsi_bufflen(cmd)) { cmd->SCp.buffer = scsi_sglist(cmd); cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1; cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); cmd->SCp.this_residual = cmd->SCp.buffer->length; } else { cmd->SCp.buffer = NULL; cmd->SCp.buffers_residual = 0; cmd->SCp.ptr = NULL; cmd->SCp.this_residual = 0; } cmd->SCp.have_data_in = 0; /* We don't set SCp.phase here - that's done in in2000_execute() */ /* WD docs state that at the conclusion of a "LEVEL2" command, the * status byte can be retrieved from the LUN register. Apparently, * this is the case only for *uninterrupted* LEVEL2 commands! If * there are any unexpected phases entered, even if they are 100% * legal (different devices may choose to do things differently), * the LEVEL2 command sequence is exited. This often occurs prior * to receiving the status byte, in which case the driver does a * status phase interrupt and gets the status byte on its own. * While such a command can then be "resumed" (ie restarted to * finish up as a LEVEL2 command), the LUN register will NOT be * a valid status byte at the command's conclusion, and we must * use the byte obtained during the earlier interrupt. Here, we * preset SCp.Status to an illegal value (0xff) so that when * this command finally completes, we can tell where the actual * status byte is stored. */ cmd->SCp.Status = ILLEGAL_STATUS_BYTE; /* We need to disable interrupts before messing with the input * queue and calling in2000_execute(). */ /* * Add the cmd to the end of 'input_Q'. Note that REQUEST_SENSE * commands are added to the head of the queue so that the desired * sense data is not lost before REQUEST_SENSE executes. */ if (!(hostdata->input_Q) || (cmd->cmnd[0] == REQUEST_SENSE)) { cmd->host_scribble = (uchar *) hostdata->input_Q; hostdata->input_Q = cmd; } else { /* find the end of the queue */ for (tmp = (Scsi_Cmnd *) hostdata->input_Q; tmp->host_scribble; tmp = (Scsi_Cmnd *) tmp->host_scribble); tmp->host_scribble = (uchar *) cmd; } /* We know that there's at least one command in 'input_Q' now. * Go see if any of them are runnable! */ in2000_execute(cmd->device->host); DB(DB_QUEUE_COMMAND, printk(")Q ")) return 0; } static DEF_SCSI_QCMD(in2000_queuecommand) /* * This routine attempts to start a scsi command. If the host_card is * already connected, we give up immediately. Otherwise, look through * the input_Q, using the first command we find that's intended * for a currently non-busy target/lun. * Note that this function is always called with interrupts already * disabled (either from in2000_queuecommand() or in2000_intr()). */ static void in2000_execute(struct Scsi_Host *instance) { struct IN2000_hostdata *hostdata; Scsi_Cmnd *cmd, *prev; int i; unsigned short *sp; unsigned short f; unsigned short flushbuf[16]; hostdata = (struct IN2000_hostdata *) instance->hostdata; DB(DB_EXECUTE, printk("EX(")) if (hostdata->selecting || hostdata->connected) { DB(DB_EXECUTE, printk(")EX-0 ")) return; } /* * Search through the input_Q for a command destined * for an idle target/lun. */ cmd = (Scsi_Cmnd *) hostdata->input_Q; prev = NULL; while (cmd) { if (!(hostdata->busy[cmd->device->id] & (1 << cmd->device->lun))) break; prev = cmd; cmd = (Scsi_Cmnd *) cmd->host_scribble; } /* quit if queue empty or all possible targets are busy */ if (!cmd) { DB(DB_EXECUTE, printk(")EX-1 ")) return; } /* remove command from queue */ if (prev) prev->host_scribble = cmd->host_scribble; else hostdata->input_Q = (Scsi_Cmnd *) cmd->host_scribble; #ifdef PROC_STATISTICS hostdata->cmd_cnt[cmd->device->id]++; #endif /* * Start the selection process */ if (is_dir_out(cmd)) write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id); else write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id | DSTID_DPD); /* Now we need to figure out whether or not this command is a good * candidate for disconnect/reselect. We guess to the best of our * ability, based on a set of hierarchical rules. When several * devices are operating simultaneously, disconnects are usually * an advantage. In a single device system, or if only 1 device * is being accessed, transfers usually go faster if disconnects * are not allowed: * * + Commands should NEVER disconnect if hostdata->disconnect = * DIS_NEVER (this holds for tape drives also), and ALWAYS * disconnect if hostdata->disconnect = DIS_ALWAYS. * + Tape drive commands should always be allowed to disconnect. * + Disconnect should be allowed if disconnected_Q isn't empty. * + Commands should NOT disconnect if input_Q is empty. * + Disconnect should be allowed if there are commands in input_Q * for a different target/lun. In this case, the other commands * should be made disconnect-able, if not already. * * I know, I know - this code would flunk me out of any * "C Programming 101" class ever offered. But it's easy * to change around and experiment with for now. */ cmd->SCp.phase = 0; /* assume no disconnect */ if (hostdata->disconnect == DIS_NEVER) goto no; if (hostdata->disconnect == DIS_ALWAYS) goto yes; if (cmd->device->type == 1) /* tape drive? */ goto yes; if (hostdata->disconnected_Q) /* other commands disconnected? */ goto yes; if (!(hostdata->input_Q)) /* input_Q empty? */ goto no; for (prev = (Scsi_Cmnd *) hostdata->input_Q; prev; prev = (Scsi_Cmnd *) prev->host_scribble) { if ((prev->device->id != cmd->device->id) || (prev->device->lun != cmd->device->lun)) { for (prev = (Scsi_Cmnd *) hostdata->input_Q; prev; prev = (Scsi_Cmnd *) prev->host_scribble) prev->SCp.phase = 1; goto yes; } } goto no; yes: cmd->SCp.phase = 1; #ifdef PROC_STATISTICS hostdata->disc_allowed_cnt[cmd->device->id]++; #endif no: write_3393(hostdata, WD_SOURCE_ID, ((cmd->SCp.phase) ? SRCID_ER : 0)); write_3393(hostdata, WD_TARGET_LUN, cmd->device->lun); write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, hostdata->sync_xfer[cmd->device->id]); hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); if ((hostdata->level2 <= L2_NONE) || (hostdata->sync_stat[cmd->device->id] == SS_UNSET)) { /* * Do a 'Select-With-ATN' command. This will end with * one of the following interrupts: * CSR_RESEL_AM: failure - can try again later. * CSR_TIMEOUT: failure - give up. * CSR_SELECT: success - proceed. */ hostdata->selecting = cmd; /* Every target has its own synchronous transfer setting, kept in * the sync_xfer array, and a corresponding status byte in sync_stat[]. * Each target's sync_stat[] entry is initialized to SS_UNSET, and its * sync_xfer[] entry is initialized to the default/safe value. SS_UNSET * means that the parameters are undetermined as yet, and that we * need to send an SDTR message to this device after selection is * complete. We set SS_FIRST to tell the interrupt routine to do so, * unless we don't want to even _try_ synchronous transfers: In this * case we set SS_SET to make the defaults final. */ if (hostdata->sync_stat[cmd->device->id] == SS_UNSET) { if (hostdata->sync_off & (1 << cmd->device->id)) hostdata->sync_stat[cmd->device->id] = SS_SET; else hostdata->sync_stat[cmd->device->id] = SS_FIRST; } hostdata->state = S_SELECTING; write_3393_count(hostdata, 0); /* this guarantees a DATA_PHASE interrupt */ write_3393_cmd(hostdata, WD_CMD_SEL_ATN); } else { /* * Do a 'Select-With-ATN-Xfer' command. This will end with * one of the following interrupts: * CSR_RESEL_AM: failure - can try again later. * CSR_TIMEOUT: failure - give up. * anything else: success - proceed. */ hostdata->connected = cmd; write_3393(hostdata, WD_COMMAND_PHASE, 0); /* copy command_descriptor_block into WD chip * (take advantage of auto-incrementing) */ write1_io(WD_CDB_1, IO_WD_ADDR); for (i = 0; i < cmd->cmd_len; i++) write1_io(cmd->cmnd[i], IO_WD_DATA); /* The wd33c93 only knows about Group 0, 1, and 5 commands when * it's doing a 'select-and-transfer'. To be safe, we write the * size of the CDB into the OWN_ID register for every case. This * way there won't be problems with vendor-unique, audio, etc. */ write_3393(hostdata, WD_OWN_ID, cmd->cmd_len); /* When doing a non-disconnect command, we can save ourselves a DATA * phase interrupt later by setting everything up now. With writes we * need to pre-fill the fifo; if there's room for the 32 flush bytes, * put them in there too - that'll avoid a fifo interrupt. Reads are * somewhat simpler. * KLUDGE NOTE: It seems that you can't completely fill the fifo here: * This results in the IO_FIFO_COUNT register rolling over to zero, * and apparently the gate array logic sees this as empty, not full, * so the 3393 chip is never signalled to start reading from the * fifo. Or maybe it's seen as a permanent fifo interrupt condition. * Regardless, we fix this by temporarily pretending that the fifo * is 16 bytes smaller. (I see now that the old driver has a comment * about "don't fill completely" in an analogous place - must be the * same deal.) This results in CDROM, swap partitions, and tape drives * needing an extra interrupt per write command - I think we can live * with that! */ if (!(cmd->SCp.phase)) { write_3393_count(hostdata, cmd->SCp.this_residual); write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_BUS); write1_io(0, IO_FIFO_WRITE); /* clear fifo counter, write mode */ if (is_dir_out(cmd)) { hostdata->fifo = FI_FIFO_WRITING; if ((i = cmd->SCp.this_residual) > (IN2000_FIFO_SIZE - 16)) i = IN2000_FIFO_SIZE - 16; cmd->SCp.have_data_in = i; /* this much data in fifo */ i >>= 1; /* Gulp. Assuming modulo 2. */ sp = (unsigned short *) cmd->SCp.ptr; f = hostdata->io_base + IO_FIFO; #ifdef FAST_WRITE_IO FAST_WRITE2_IO(); #else while (i--) write2_io(*sp++, IO_FIFO); #endif /* Is there room for the flush bytes? */ if (cmd->SCp.have_data_in <= ((IN2000_FIFO_SIZE - 16) - 32)) { sp = flushbuf; i = 16; #ifdef FAST_WRITE_IO FAST_WRITE2_IO(); #else while (i--) write2_io(0, IO_FIFO); #endif } } else { write1_io(0, IO_FIFO_READ); /* put fifo in read mode */ hostdata->fifo = FI_FIFO_READING; cmd->SCp.have_data_in = 0; /* nothing transferred yet */ } } else { write_3393_count(hostdata, 0); /* this guarantees a DATA_PHASE interrupt */ } hostdata->state = S_RUNNING_LEVEL2; write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER); } /* * Since the SCSI bus can handle only 1 connection at a time, * we get out of here now. If the selection fails, or when * the command disconnects, we'll come back to this routine * to search the input_Q again... */ DB(DB_EXECUTE, printk("%s)EX-2 ", (cmd->SCp.phase) ? "d:" : "")) } static void transfer_pio(uchar * buf, int cnt, int data_in_dir, struct IN2000_hostdata *hostdata) { uchar asr; DB(DB_TRANSFER, printk("(%p,%d,%s)", buf, cnt, data_in_dir ? "in" : "out")) write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); write_3393_count(hostdata, cnt); write_3393_cmd(hostdata, WD_CMD_TRANS_INFO); if (data_in_dir) { do { asr = READ_AUX_STAT(); if (asr & ASR_DBR) *buf++ = read_3393(hostdata, WD_DATA); } while (!(asr & ASR_INT)); } else { do { asr = READ_AUX_STAT(); if (asr & ASR_DBR) write_3393(hostdata, WD_DATA, *buf++); } while (!(asr & ASR_INT)); } /* Note: we are returning with the interrupt UN-cleared. * Since (presumably) an entire I/O operation has * completed, the bus phase is probably different, and * the interrupt routine will discover this when it * responds to the uncleared int. */ } static void transfer_bytes(Scsi_Cmnd * cmd, int data_in_dir) { struct IN2000_hostdata *hostdata; unsigned short *sp; unsigned short f; int i; hostdata = (struct IN2000_hostdata *) cmd->device->host->hostdata; /* Normally, you'd expect 'this_residual' to be non-zero here. * In a series of scatter-gather transfers, however, this * routine will usually be called with 'this_residual' equal * to 0 and 'buffers_residual' non-zero. This means that a * previous transfer completed, clearing 'this_residual', and * now we need to setup the next scatter-gather buffer as the * source or destination for THIS transfer. */ if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { ++cmd->SCp.buffer; --cmd->SCp.buffers_residual; cmd->SCp.this_residual = cmd->SCp.buffer->length; cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); } /* Set up hardware registers */ write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, hostdata->sync_xfer[cmd->device->id]); write_3393_count(hostdata, cmd->SCp.this_residual); write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_BUS); write1_io(0, IO_FIFO_WRITE); /* zero counter, assume write */ /* Reading is easy. Just issue the command and return - we'll * get an interrupt later when we have actual data to worry about. */ if (data_in_dir) { write1_io(0, IO_FIFO_READ); if ((hostdata->level2 >= L2_DATA) || (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) { write_3393(hostdata, WD_COMMAND_PHASE, 0x45); write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER); hostdata->state = S_RUNNING_LEVEL2; } else write_3393_cmd(hostdata, WD_CMD_TRANS_INFO); hostdata->fifo = FI_FIFO_READING; cmd->SCp.have_data_in = 0; return; } /* Writing is more involved - we'll start the WD chip and write as * much data to the fifo as we can right now. Later interrupts will * write any bytes that don't make it at this stage. */ if ((hostdata->level2 >= L2_DATA) || (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) { write_3393(hostdata, WD_COMMAND_PHASE, 0x45); write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER); hostdata->state = S_RUNNING_LEVEL2; } else write_3393_cmd(hostdata, WD_CMD_TRANS_INFO); hostdata->fifo = FI_FIFO_WRITING; sp = (unsigned short *) cmd->SCp.ptr; if ((i = cmd->SCp.this_residual) > IN2000_FIFO_SIZE) i = IN2000_FIFO_SIZE; cmd->SCp.have_data_in = i; i >>= 1; /* Gulp. We assume this_residual is modulo 2 */ f = hostdata->io_base + IO_FIFO; #ifdef FAST_WRITE_IO FAST_WRITE2_IO(); #else while (i--) write2_io(*sp++, IO_FIFO); #endif } /* We need to use spin_lock_irqsave() & spin_unlock_irqrestore() in this * function in order to work in an SMP environment. (I'd be surprised * if the driver is ever used by anyone on a real multi-CPU motherboard, * but it _does_ need to be able to compile and run in an SMP kernel.) */ static irqreturn_t in2000_intr(int irqnum, void *dev_id) { struct Scsi_Host *instance = dev_id; struct IN2000_hostdata *hostdata; Scsi_Cmnd *patch, *cmd; uchar asr, sr, phs, id, lun, *ucp, msg; int i, j; unsigned long length; unsigned short *sp; unsigned short f; unsigned long flags; hostdata = (struct IN2000_hostdata *) instance->hostdata; /* Get the spin_lock and disable further ints, for SMP */ spin_lock_irqsave(instance->host_lock, flags); #ifdef PROC_STATISTICS hostdata->int_cnt++; #endif /* The IN2000 card has 2 interrupt sources OR'ed onto its IRQ line - the * WD3393 chip and the 2k fifo (which is actually a dual-port RAM combined * with a big logic array, so it's a little different than what you might * expect). As far as I know, there's no reason that BOTH can't be active * at the same time, but there's a problem: while we can read the 3393 * to tell if _it_ wants an interrupt, I don't know of a way to ask the * fifo the same question. The best we can do is check the 3393 and if * it _isn't_ the source of the interrupt, then we can be pretty sure * that the fifo is the culprit. * UPDATE: I have it on good authority (Bill Earnest) that bit 0 of the * IO_FIFO_COUNT register mirrors the fifo interrupt state. I * assume that bit clear means interrupt active. As it turns * out, the driver really doesn't need to check for this after * all, so my remarks above about a 'problem' can safely be * ignored. The way the logic is set up, there's no advantage * (that I can see) to worrying about it. * * It seems that the fifo interrupt signal is negated when we extract * bytes during read or write bytes during write. * - fifo will interrupt when data is moving from it to the 3393, and * there are 31 (or less?) bytes left to go. This is sort of short- * sighted: what if you don't WANT to do more? In any case, our * response is to push more into the fifo - either actual data or * dummy bytes if need be. Note that we apparently have to write at * least 32 additional bytes to the fifo after an interrupt in order * to get it to release the ones it was holding on to - writing fewer * than 32 will result in another fifo int. * UPDATE: Again, info from Bill Earnest makes this more understandable: * 32 bytes = two counts of the fifo counter register. He tells * me that the fifo interrupt is a non-latching signal derived * from a straightforward boolean interpretation of the 7 * highest bits of the fifo counter and the fifo-read/fifo-write * state. Who'd a thought? */ write1_io(0, IO_LED_ON); asr = READ_AUX_STAT(); if (!(asr & ASR_INT)) { /* no WD33c93 interrupt? */ /* Ok. This is definitely a FIFO-only interrupt. * * If FI_FIFO_READING is set, there are up to 2048 bytes waiting to be read, * maybe more to come from the SCSI bus. Read as many as we can out of the * fifo and into memory at the location of SCp.ptr[SCp.have_data_in], and * update have_data_in afterwards. * * If we have FI_FIFO_WRITING, the FIFO has almost run out of bytes to move * into the WD3393 chip (I think the interrupt happens when there are 31 * bytes left, but it may be fewer...). The 3393 is still waiting, so we * shove some more into the fifo, which gets things moving again. If the * original SCSI command specified more than 2048 bytes, there may still * be some of that data left: fine - use it (from SCp.ptr[SCp.have_data_in]). * Don't forget to update have_data_in. If we've already written out the * entire buffer, feed 32 dummy bytes to the fifo - they're needed to * push out the remaining real data. * (Big thanks to Bill Earnest for getting me out of the mud in here.) */ cmd = (Scsi_Cmnd *) hostdata->connected; /* assume we're connected */ CHECK_NULL(cmd, "fifo_int") if (hostdata->fifo == FI_FIFO_READING) { DB(DB_FIFO, printk("{R:%02x} ", read1_io(IO_FIFO_COUNT))) sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in); i = read1_io(IO_FIFO_COUNT) & 0xfe; i <<= 2; /* # of words waiting in the fifo */ f = hostdata->io_base + IO_FIFO; #ifdef FAST_READ_IO FAST_READ2_IO(); #else while (i--) *sp++ = read2_io(IO_FIFO); #endif i = sp - (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in); i <<= 1; cmd->SCp.have_data_in += i; } else if (hostdata->fifo == FI_FIFO_WRITING) { DB(DB_FIFO, printk("{W:%02x} ", read1_io(IO_FIFO_COUNT))) /* If all bytes have been written to the fifo, flush out the stragglers. * Note that while writing 16 dummy words seems arbitrary, we don't * have another choice that I can see. What we really want is to read * the 3393 transfer count register (that would tell us how many bytes * needed flushing), but the TRANSFER_INFO command hasn't completed * yet (not enough bytes!) and that register won't be accessible. So, * we use 16 words - a number obtained through trial and error. * UPDATE: Bill says this is exactly what Always does, so there. * More thanks due him for help in this section. */ if (cmd->SCp.this_residual == cmd->SCp.have_data_in) { i = 16; while (i--) /* write 32 dummy bytes */ write2_io(0, IO_FIFO); } /* If there are still bytes left in the SCSI buffer, write as many as we * can out to the fifo. */ else { sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in); i = cmd->SCp.this_residual - cmd->SCp.have_data_in; /* bytes yet to go */ j = read1_io(IO_FIFO_COUNT) & 0xfe; j <<= 2; /* how many words the fifo has room for */ if ((j << 1) > i) j = (i >> 1); while (j--) write2_io(*sp++, IO_FIFO); i = sp - (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in); i <<= 1; cmd->SCp.have_data_in += i; } } else { printk("*** Spurious FIFO interrupt ***"); } write1_io(0, IO_LED_OFF); /* release the SMP spin_lock and restore irq state */ spin_unlock_irqrestore(instance->host_lock, flags); return IRQ_HANDLED; } /* This interrupt was triggered by the WD33c93 chip. The fifo interrupt * may also be asserted, but we don't bother to check it: we get more * detailed info from FIFO_READING and FIFO_WRITING (see below). */ cmd = (Scsi_Cmnd *) hostdata->connected; /* assume we're connected */ sr = read_3393(hostdata, WD_SCSI_STATUS); /* clear the interrupt */ phs = read_3393(hostdata, WD_COMMAND_PHASE); if (!cmd && (sr != CSR_RESEL_AM && sr != CSR_TIMEOUT && sr != CSR_SELECT)) { printk("\nNR:wd-intr-1\n"); write1_io(0, IO_LED_OFF); /* release the SMP spin_lock and restore irq state */ spin_unlock_irqrestore(instance->host_lock, flags); return IRQ_HANDLED; } DB(DB_INTR, printk("{%02x:%02x-", asr, sr)) /* After starting a FIFO-based transfer, the next _WD3393_ interrupt is * guaranteed to be in response to the completion of the transfer. * If we were reading, there's probably data in the fifo that needs * to be copied into RAM - do that here. Also, we have to update * 'this_residual' and 'ptr' based on the contents of the * TRANSFER_COUNT register, in case the device decided to do an * intermediate disconnect (a device may do this if it has to * do a seek, or just to be nice and let other devices have * some bus time during long transfers). * After doing whatever is necessary with the fifo, we go on and * service the WD3393 interrupt normally. */ if (hostdata->fifo == FI_FIFO_READING) { /* buffer index = start-of-buffer + #-of-bytes-already-read */ sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in); /* bytes remaining in fifo = (total-wanted - #-not-got) - #-already-read */ i = (cmd->SCp.this_residual - read_3393_count(hostdata)) - cmd->SCp.have_data_in; i >>= 1; /* Gulp. We assume this will always be modulo 2 */ f = hostdata->io_base + IO_FIFO; #ifdef FAST_READ_IO FAST_READ2_IO(); #else while (i--) *sp++ = read2_io(IO_FIFO); #endif hostdata->fifo = FI_FIFO_UNUSED; length = cmd->SCp.this_residual; cmd->SCp.this_residual = read_3393_count(hostdata); cmd->SCp.ptr += (length - cmd->SCp.this_residual); DB(DB_TRANSFER, printk("(%p,%d)", cmd->SCp.ptr, cmd->SCp.this_residual)) } else if (hostdata->fifo == FI_FIFO_WRITING) { hostdata->fifo = FI_FIFO_UNUSED; length = cmd->SCp.this_residual; cmd->SCp.this_residual = read_3393_count(hostdata); cmd->SCp.ptr += (length - cmd->SCp.this_residual); DB(DB_TRANSFER, printk("(%p,%d)", cmd->SCp.ptr, cmd->SCp.this_residual)) } /* Respond to the specific WD3393 interrupt - there are quite a few! */ switch (sr) { case CSR_TIMEOUT: DB(DB_INTR, printk("TIMEOUT")) if (hostdata->state == S_RUNNING_LEVEL2) hostdata->connected = NULL; else { cmd = (Scsi_Cmnd *) hostdata->selecting; /* get a valid cmd */ CHECK_NULL(cmd, "csr_timeout") hostdata->selecting = NULL; } cmd->result = DID_NO_CONNECT << 16; hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); hostdata->state = S_UNCONNECTED; cmd->scsi_done(cmd); /* We are not connected to a target - check to see if there * are commands waiting to be executed. */ in2000_execute(instance); break; /* Note: this interrupt should not occur in a LEVEL2 command */ case CSR_SELECT: DB(DB_INTR, printk("SELECT")) hostdata->connected = cmd = (Scsi_Cmnd *) hostdata->selecting; CHECK_NULL(cmd, "csr_select") hostdata->selecting = NULL; /* construct an IDENTIFY message with correct disconnect bit */ hostdata->outgoing_msg[0] = (0x80 | 0x00 | cmd->device->lun); if (cmd->SCp.phase) hostdata->outgoing_msg[0] |= 0x40; if (hostdata->sync_stat[cmd->device->id] == SS_FIRST) { #ifdef SYNC_DEBUG printk(" sending SDTR "); #endif hostdata->sync_stat[cmd->device->id] = SS_WAITING; /* tack on a 2nd message to ask about synchronous transfers */ hostdata->outgoing_msg[1] = EXTENDED_MESSAGE; hostdata->outgoing_msg[2] = 3; hostdata->outgoing_msg[3] = EXTENDED_SDTR; hostdata->outgoing_msg[4] = OPTIMUM_SX_PER / 4; hostdata->outgoing_msg[5] = OPTIMUM_SX_OFF; hostdata->outgoing_len = 6; } else hostdata->outgoing_len = 1; hostdata->state = S_CONNECTED; break; case CSR_XFER_DONE | PHS_DATA_IN: case CSR_UNEXP | PHS_DATA_IN: case CSR_SRV_REQ | PHS_DATA_IN: DB(DB_INTR, printk("IN-%d.%d", cmd->SCp.this_residual, cmd->SCp.buffers_residual)) transfer_bytes(cmd, DATA_IN_DIR); if (hostdata->state != S_RUNNING_LEVEL2) hostdata->state = S_CONNECTED; break; case CSR_XFER_DONE | PHS_DATA_OUT: case CSR_UNEXP | PHS_DATA_OUT: case CSR_SRV_REQ | PHS_DATA_OUT: DB(DB_INTR, printk("OUT-%d.%d", cmd->SCp.this_residual, cmd->SCp.buffers_residual)) transfer_bytes(cmd, DATA_OUT_DIR); if (hostdata->state != S_RUNNING_LEVEL2) hostdata->state = S_CONNECTED; break; /* Note: this interrupt should not occur in a LEVEL2 command */ case CSR_XFER_DONE | PHS_COMMAND: case CSR_UNEXP | PHS_COMMAND: case CSR_SRV_REQ | PHS_COMMAND: DB(DB_INTR, printk("CMND-%02x", cmd->cmnd[0])) transfer_pio(cmd->cmnd, cmd->cmd_len, DATA_OUT_DIR, hostdata); hostdata->state = S_CONNECTED; break; case CSR_XFER_DONE | PHS_STATUS: case CSR_UNEXP | PHS_STATUS: case CSR_SRV_REQ | PHS_STATUS: DB(DB_INTR, printk("STATUS=")) cmd->SCp.Status = read_1_byte(hostdata); DB(DB_INTR, printk("%02x", cmd->SCp.Status)) if (hostdata->level2 >= L2_BASIC) { sr = read_3393(hostdata, WD_SCSI_STATUS); /* clear interrupt */ hostdata->state = S_RUNNING_LEVEL2; write_3393(hostdata, WD_COMMAND_PHASE, 0x50); write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER); } else { hostdata->state = S_CONNECTED; } break; case CSR_XFER_DONE | PHS_MESS_IN: case CSR_UNEXP | PHS_MESS_IN: case CSR_SRV_REQ | PHS_MESS_IN: DB(DB_INTR, printk("MSG_IN=")) msg = read_1_byte(hostdata); sr = read_3393(hostdata, WD_SCSI_STATUS); /* clear interrupt */ hostdata->incoming_msg[hostdata->incoming_ptr] = msg; if (hostdata->incoming_msg[0] == EXTENDED_MESSAGE) msg = EXTENDED_MESSAGE; else hostdata->incoming_ptr = 0; cmd->SCp.Message = msg; switch (msg) { case COMMAND_COMPLETE: DB(DB_INTR, printk("CCMP")) write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_PRE_CMP_DISC; break; case SAVE_POINTERS: DB(DB_INTR, printk("SDP")) write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; break; case RESTORE_POINTERS: DB(DB_INTR, printk("RDP")) if (hostdata->level2 >= L2_BASIC) { write_3393(hostdata, WD_COMMAND_PHASE, 0x45); write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER); hostdata->state = S_RUNNING_LEVEL2; } else { write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; } break; case DISCONNECT: DB(DB_INTR, printk("DIS")) cmd->device->disconnect = 1; write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_PRE_TMP_DISC; break; case MESSAGE_REJECT: DB(DB_INTR, printk("REJ")) #ifdef SYNC_DEBUG printk("-REJ-"); #endif if (hostdata->sync_stat[cmd->device->id] == SS_WAITING) hostdata->sync_stat[cmd->device->id] = SS_SET; write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; break; case EXTENDED_MESSAGE: DB(DB_INTR, printk("EXT")) ucp = hostdata->incoming_msg; #ifdef SYNC_DEBUG printk("%02x", ucp[hostdata->incoming_ptr]); #endif /* Is this the last byte of the extended message? */ if ((hostdata->incoming_ptr >= 2) && (hostdata->incoming_ptr == (ucp[1] + 1))) { switch (ucp[2]) { /* what's the EXTENDED code? */ case EXTENDED_SDTR: id = calc_sync_xfer(ucp[3], ucp[4]); if (hostdata->sync_stat[cmd->device->id] != SS_WAITING) { /* A device has sent an unsolicited SDTR message; rather than go * through the effort of decoding it and then figuring out what * our reply should be, we're just gonna say that we have a * synchronous fifo depth of 0. This will result in asynchronous * transfers - not ideal but so much easier. * Actually, this is OK because it assures us that if we don't * specifically ask for sync transfers, we won't do any. */ write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */ hostdata->outgoing_msg[0] = EXTENDED_MESSAGE; hostdata->outgoing_msg[1] = 3; hostdata->outgoing_msg[2] = EXTENDED_SDTR; hostdata->outgoing_msg[3] = hostdata->default_sx_per / 4; hostdata->outgoing_msg[4] = 0; hostdata->outgoing_len = 5; hostdata->sync_xfer[cmd->device->id] = calc_sync_xfer(hostdata->default_sx_per / 4, 0); } else { hostdata->sync_xfer[cmd->device->id] = id; } #ifdef SYNC_DEBUG printk("sync_xfer=%02x", hostdata->sync_xfer[cmd->device->id]); #endif hostdata->sync_stat[cmd->device->id] = SS_SET; write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; break; case EXTENDED_WDTR: write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */ printk("sending WDTR "); hostdata->outgoing_msg[0] = EXTENDED_MESSAGE; hostdata->outgoing_msg[1] = 2; hostdata->outgoing_msg[2] = EXTENDED_WDTR; hostdata->outgoing_msg[3] = 0; /* 8 bit transfer width */ hostdata->outgoing_len = 4; write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; break; default: write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */ printk("Rejecting Unknown Extended Message(%02x). ", ucp[2]); hostdata->outgoing_msg[0] = MESSAGE_REJECT; hostdata->outgoing_len = 1; write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; break; } hostdata->incoming_ptr = 0; } /* We need to read more MESS_IN bytes for the extended message */ else { hostdata->incoming_ptr++; write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; } break; default: printk("Rejecting Unknown Message(%02x) ", msg); write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */ hostdata->outgoing_msg[0] = MESSAGE_REJECT; hostdata->outgoing_len = 1; write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; } break; /* Note: this interrupt will occur only after a LEVEL2 command */ case CSR_SEL_XFER_DONE: /* Make sure that reselection is enabled at this point - it may * have been turned off for the command that just completed. */ write_3393(hostdata, WD_SOURCE_ID, SRCID_ER); if (phs == 0x60) { DB(DB_INTR, printk("SX-DONE")) cmd->SCp.Message = COMMAND_COMPLETE; lun = read_3393(hostdata, WD_TARGET_LUN); DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun)) hostdata->connected = NULL; hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); hostdata->state = S_UNCONNECTED; if (cmd->SCp.Status == ILLEGAL_STATUS_BYTE) cmd->SCp.Status = lun; if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD) cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); else cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); cmd->scsi_done(cmd); /* We are no longer connected to a target - check to see if * there are commands waiting to be executed. */ in2000_execute(instance); } else { printk("%02x:%02x:%02x: Unknown SEL_XFER_DONE phase!!---", asr, sr, phs); } break; /* Note: this interrupt will occur only after a LEVEL2 command */ case CSR_SDP: DB(DB_INTR, printk("SDP")) hostdata->state = S_RUNNING_LEVEL2; write_3393(hostdata, WD_COMMAND_PHASE, 0x41); write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER); break; case CSR_XFER_DONE | PHS_MESS_OUT: case CSR_UNEXP | PHS_MESS_OUT: case CSR_SRV_REQ | PHS_MESS_OUT: DB(DB_INTR, printk("MSG_OUT=")) /* To get here, we've probably requested MESSAGE_OUT and have * already put the correct bytes in outgoing_msg[] and filled * in outgoing_len. We simply send them out to the SCSI bus. * Sometimes we get MESSAGE_OUT phase when we're not expecting * it - like when our SDTR message is rejected by a target. Some * targets send the REJECT before receiving all of the extended * message, and then seem to go back to MESSAGE_OUT for a byte * or two. Not sure why, or if I'm doing something wrong to * cause this to happen. Regardless, it seems that sending * NOP messages in these situations results in no harm and * makes everyone happy. */ if (hostdata->outgoing_len == 0) { hostdata->outgoing_len = 1; hostdata->outgoing_msg[0] = NOP; } transfer_pio(hostdata->outgoing_msg, hostdata->outgoing_len, DATA_OUT_DIR, hostdata); DB(DB_INTR, printk("%02x", hostdata->outgoing_msg[0])) hostdata->outgoing_len = 0; hostdata->state = S_CONNECTED; break; case CSR_UNEXP_DISC: /* I think I've seen this after a request-sense that was in response * to an error condition, but not sure. We certainly need to do * something when we get this interrupt - the question is 'what?'. * Let's think positively, and assume some command has finished * in a legal manner (like a command that provokes a request-sense), * so we treat it as a normal command-complete-disconnect. */ /* Make sure that reselection is enabled at this point - it may * have been turned off for the command that just completed. */ write_3393(hostdata, WD_SOURCE_ID, SRCID_ER); if (cmd == NULL) { printk(" - Already disconnected! "); hostdata->state = S_UNCONNECTED; /* release the SMP spin_lock and restore irq state */ spin_unlock_irqrestore(instance->host_lock, flags); return IRQ_HANDLED; } DB(DB_INTR, printk("UNEXP_DISC")) hostdata->connected = NULL; hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); hostdata->state = S_UNCONNECTED; if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD) cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); else cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); cmd->scsi_done(cmd); /* We are no longer connected to a target - check to see if * there are commands waiting to be executed. */ in2000_execute(instance); break; case CSR_DISC: /* Make sure that reselection is enabled at this point - it may * have been turned off for the command that just completed. */ write_3393(hostdata, WD_SOURCE_ID, SRCID_ER); DB(DB_INTR, printk("DISC")) if (cmd == NULL) { printk(" - Already disconnected! "); hostdata->state = S_UNCONNECTED; } switch (hostdata->state) { case S_PRE_CMP_DISC: hostdata->connected = NULL; hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); hostdata->state = S_UNCONNECTED; DB(DB_INTR, printk(":%d", cmd->SCp.Status)) if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD) cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); else cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); cmd->scsi_done(cmd); break; case S_PRE_TMP_DISC: case S_RUNNING_LEVEL2: cmd->host_scribble = (uchar *) hostdata->disconnected_Q; hostdata->disconnected_Q = cmd; hostdata->connected = NULL; hostdata->state = S_UNCONNECTED; #ifdef PROC_STATISTICS hostdata->disc_done_cnt[cmd->device->id]++; #endif break; default: printk("*** Unexpected DISCONNECT interrupt! ***"); hostdata->state = S_UNCONNECTED; } /* We are no longer connected to a target - check to see if * there are commands waiting to be executed. */ in2000_execute(instance); break; case CSR_RESEL_AM: DB(DB_INTR, printk("RESEL")) /* First we have to make sure this reselection didn't */ /* happen during Arbitration/Selection of some other device. */ /* If yes, put losing command back on top of input_Q. */ if (hostdata->level2 <= L2_NONE) { if (hostdata->selecting) { cmd = (Scsi_Cmnd *) hostdata->selecting; hostdata->selecting = NULL; hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); cmd->host_scribble = (uchar *) hostdata->input_Q; hostdata->input_Q = cmd; } } else { if (cmd) { if (phs == 0x00) { hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); cmd->host_scribble = (uchar *) hostdata->input_Q; hostdata->input_Q = cmd; } else { printk("---%02x:%02x:%02x-TROUBLE: Intrusive ReSelect!---", asr, sr, phs); while (1) printk("\r"); } } } /* OK - find out which device reselected us. */ id = read_3393(hostdata, WD_SOURCE_ID); id &= SRCID_MASK; /* and extract the lun from the ID message. (Note that we don't * bother to check for a valid message here - I guess this is * not the right way to go, but....) */ lun = read_3393(hostdata, WD_DATA); if (hostdata->level2 < L2_RESELECT) write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); lun &= 7; /* Now we look for the command that's reconnecting. */ cmd = (Scsi_Cmnd *) hostdata->disconnected_Q; patch = NULL; while (cmd) { if (id == cmd->device->id && lun == cmd->device->lun) break; patch = cmd; cmd = (Scsi_Cmnd *) cmd->host_scribble; } /* Hmm. Couldn't find a valid command.... What to do? */ if (!cmd) { printk("---TROUBLE: target %d.%d not in disconnect queue---", id, lun); break; } /* Ok, found the command - now start it up again. */ if (patch) patch->host_scribble = cmd->host_scribble; else hostdata->disconnected_Q = (Scsi_Cmnd *) cmd->host_scribble; hostdata->connected = cmd; /* We don't need to worry about 'initialize_SCp()' or 'hostdata->busy[]' * because these things are preserved over a disconnect. * But we DO need to fix the DPD bit so it's correct for this command. */ if (is_dir_out(cmd)) write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id); else write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id | DSTID_DPD); if (hostdata->level2 >= L2_RESELECT) { write_3393_count(hostdata, 0); /* we want a DATA_PHASE interrupt */ write_3393(hostdata, WD_COMMAND_PHASE, 0x45); write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER); hostdata->state = S_RUNNING_LEVEL2; } else hostdata->state = S_CONNECTED; break; default: printk("--UNKNOWN INTERRUPT:%02x:%02x:%02x--", asr, sr, phs); } write1_io(0, IO_LED_OFF); DB(DB_INTR, printk("} ")) /* release the SMP spin_lock and restore irq state */ spin_unlock_irqrestore(instance->host_lock, flags); return IRQ_HANDLED; } #define RESET_CARD 0 #define RESET_CARD_AND_BUS 1 #define B_FLAG 0x80 /* * Caller must hold instance lock! */ static int reset_hardware(struct Scsi_Host *instance, int type) { struct IN2000_hostdata *hostdata; int qt, x; hostdata = (struct IN2000_hostdata *) instance->hostdata; write1_io(0, IO_LED_ON); if (type == RESET_CARD_AND_BUS) { write1_io(0, IO_CARD_RESET); x = read1_io(IO_HARDWARE); } x = read_3393(hostdata, WD_SCSI_STATUS); /* clear any WD intrpt */ write_3393(hostdata, WD_OWN_ID, instance->this_id | OWNID_EAF | OWNID_RAF | OWNID_FS_8); write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, calc_sync_xfer(hostdata->default_sx_per / 4, DEFAULT_SX_OFF)); write1_io(0, IO_FIFO_WRITE); /* clear fifo counter */ write1_io(0, IO_FIFO_READ); /* start fifo out in read mode */ write_3393(hostdata, WD_COMMAND, WD_CMD_RESET); /* FIXME: timeout ?? */ while (!(READ_AUX_STAT() & ASR_INT)) cpu_relax(); /* wait for RESET to complete */ x = read_3393(hostdata, WD_SCSI_STATUS); /* clear interrupt */ write_3393(hostdata, WD_QUEUE_TAG, 0xa5); /* any random number */ qt = read_3393(hostdata, WD_QUEUE_TAG); if (qt == 0xa5) { x |= B_FLAG; write_3393(hostdata, WD_QUEUE_TAG, 0); } write_3393(hostdata, WD_TIMEOUT_PERIOD, TIMEOUT_PERIOD_VALUE); write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); write1_io(0, IO_LED_OFF); return x; } static int in2000_bus_reset(Scsi_Cmnd * cmd) { struct Scsi_Host *instance; struct IN2000_hostdata *hostdata; int x; unsigned long flags; instance = cmd->device->host; hostdata = (struct IN2000_hostdata *) instance->hostdata; printk(KERN_WARNING "scsi%d: Reset. ", instance->host_no); spin_lock_irqsave(instance->host_lock, flags); /* do scsi-reset here */ reset_hardware(instance, RESET_CARD_AND_BUS); for (x = 0; x < 8; x++) { hostdata->busy[x] = 0; hostdata->sync_xfer[x] = calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF); hostdata->sync_stat[x] = SS_UNSET; /* using default sync values */ } hostdata->input_Q = NULL; hostdata->selecting = NULL; hostdata->connected = NULL; hostdata->disconnected_Q = NULL; hostdata->state = S_UNCONNECTED; hostdata->fifo = FI_FIFO_UNUSED; hostdata->incoming_ptr = 0; hostdata->outgoing_len = 0; cmd->result = DID_RESET << 16; spin_unlock_irqrestore(instance->host_lock, flags); return SUCCESS; } static int __in2000_abort(Scsi_Cmnd * cmd) { struct Scsi_Host *instance; struct IN2000_hostdata *hostdata; Scsi_Cmnd *tmp, *prev; uchar sr, asr; unsigned long timeout; instance = cmd->device->host; hostdata = (struct IN2000_hostdata *) instance->hostdata; printk(KERN_DEBUG "scsi%d: Abort-", instance->host_no); printk("(asr=%02x,count=%ld,resid=%d,buf_resid=%d,have_data=%d,FC=%02x)- ", READ_AUX_STAT(), read_3393_count(hostdata), cmd->SCp.this_residual, cmd->SCp.buffers_residual, cmd->SCp.have_data_in, read1_io(IO_FIFO_COUNT)); /* * Case 1 : If the command hasn't been issued yet, we simply remove it * from the inout_Q. */ tmp = (Scsi_Cmnd *) hostdata->input_Q; prev = NULL; while (tmp) { if (tmp == cmd) { if (prev) prev->host_scribble = cmd->host_scribble; cmd->host_scribble = NULL; cmd->result = DID_ABORT << 16; printk(KERN_WARNING "scsi%d: Abort - removing command from input_Q. ", instance->host_no); cmd->scsi_done(cmd); return SUCCESS; } prev = tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble; } /* * Case 2 : If the command is connected, we're going to fail the abort * and let the high level SCSI driver retry at a later time or * issue a reset. * * Timeouts, and therefore aborted commands, will be highly unlikely * and handling them cleanly in this situation would make the common * case of noresets less efficient, and would pollute our code. So, * we fail. */ if (hostdata->connected == cmd) { printk(KERN_WARNING "scsi%d: Aborting connected command - ", instance->host_no); printk("sending wd33c93 ABORT command - "); write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); write_3393_cmd(hostdata, WD_CMD_ABORT); /* Now we have to attempt to flush out the FIFO... */ printk("flushing fifo - "); timeout = 1000000; do { asr = READ_AUX_STAT(); if (asr & ASR_DBR) read_3393(hostdata, WD_DATA); } while (!(asr & ASR_INT) && timeout-- > 0); sr = read_3393(hostdata, WD_SCSI_STATUS); printk("asr=%02x, sr=%02x, %ld bytes un-transferred (timeout=%ld) - ", asr, sr, read_3393_count(hostdata), timeout); /* * Abort command processed. * Still connected. * We must disconnect. */ printk("sending wd33c93 DISCONNECT command - "); write_3393_cmd(hostdata, WD_CMD_DISCONNECT); timeout = 1000000; asr = READ_AUX_STAT(); while ((asr & ASR_CIP) && timeout-- > 0) asr = READ_AUX_STAT(); sr = read_3393(hostdata, WD_SCSI_STATUS); printk("asr=%02x, sr=%02x.", asr, sr); hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); hostdata->connected = NULL; hostdata->state = S_UNCONNECTED; cmd->result = DID_ABORT << 16; cmd->scsi_done(cmd); in2000_execute(instance); return SUCCESS; } /* * Case 3: If the command is currently disconnected from the bus, * we're not going to expend much effort here: Let's just return * an ABORT_SNOOZE and hope for the best... */ for (tmp = (Scsi_Cmnd *) hostdata->disconnected_Q; tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble) if (cmd == tmp) { printk(KERN_DEBUG "scsi%d: unable to abort disconnected command.\n", instance->host_no); return FAILED; } /* * Case 4 : If we reached this point, the command was not found in any of * the queues. * * We probably reached this point because of an unlikely race condition * between the command completing successfully and the abortion code, * so we won't panic, but we will notify the user in case something really * broke. */ in2000_execute(instance); printk("scsi%d: warning : SCSI command probably completed successfully" " before abortion. ", instance->host_no); return SUCCESS; } static int in2000_abort(Scsi_Cmnd * cmd) { int rc; spin_lock_irq(cmd->device->host->host_lock); rc = __in2000_abort(cmd); spin_unlock_irq(cmd->device->host->host_lock); return rc; } #define MAX_IN2000_HOSTS 3 #define MAX_SETUP_ARGS ARRAY_SIZE(setup_args) #define SETUP_BUFFER_SIZE 200 static char setup_buffer[SETUP_BUFFER_SIZE]; static char setup_used[MAX_SETUP_ARGS]; static int done_setup = 0; static void __init in2000_setup(char *str, int *ints) { int i; char *p1, *p2; strlcpy(setup_buffer, str, SETUP_BUFFER_SIZE); p1 = setup_buffer; i = 0; while (*p1 && (i < MAX_SETUP_ARGS)) { p2 = strchr(p1, ','); if (p2) { *p2 = '\0'; if (p1 != p2) setup_args[i] = p1; p1 = p2 + 1; i++; } else { setup_args[i] = p1; break; } } for (i = 0; i < MAX_SETUP_ARGS; i++) setup_used[i] = 0; done_setup = 1; } /* check_setup_args() returns index if key found, 0 if not */ static int __init check_setup_args(char *key, int *val, char *buf) { int x; char *cp; for (x = 0; x < MAX_SETUP_ARGS; x++) { if (setup_used[x]) continue; if (!strncmp(setup_args[x], key, strlen(key))) break; } if (x == MAX_SETUP_ARGS) return 0; setup_used[x] = 1; cp = setup_args[x] + strlen(key); *val = -1; if (*cp != ':') return ++x; cp++; if ((*cp >= '0') && (*cp <= '9')) { *val = simple_strtoul(cp, NULL, 0); } return ++x; } /* The "correct" (ie portable) way to access memory-mapped hardware * such as the IN2000 EPROM and dip switch is through the use of * special macros declared in 'asm/io.h'. We use readb() and readl() * when reading from the card's BIOS area in in2000_detect(). */ static u32 bios_tab[] in2000__INITDATA = { 0xc8000, 0xd0000, 0xd8000, 0 }; static unsigned short base_tab[] in2000__INITDATA = { 0x220, 0x200, 0x110, 0x100, }; static int int_tab[] in2000__INITDATA = { 15, 14, 11, 10 }; static int probe_bios(u32 addr, u32 *s1, uchar *switches) { void __iomem *p = ioremap(addr, 0x34); if (!p) return 0; *s1 = readl(p + 0x10); if (*s1 == 0x41564f4e || readl(p + 0x30) == 0x61776c41) { /* Read the switch image that's mapped into EPROM space */ *switches = ~readb(p + 0x20); iounmap(p); return 1; } iounmap(p); return 0; } static int __init in2000_detect(struct scsi_host_template * tpnt) { struct Scsi_Host *instance; struct IN2000_hostdata *hostdata; int detect_count; int bios; int x; unsigned short base; uchar switches; uchar hrev; unsigned long flags; int val; char buf[32]; /* Thanks to help from Bill Earnest, probing for IN2000 cards is a * pretty straightforward and fool-proof operation. There are 3 * possible locations for the IN2000 EPROM in memory space - if we * find a BIOS signature, we can read the dip switch settings from * the byte at BIOS+32 (shadowed in by logic on the card). From 2 * of the switch bits we get the card's address in IO space. There's * an image of the dip switch there, also, so we have a way to back- * check that this really is an IN2000 card. Very nifty. Use the * 'ioport:xx' command-line parameter if your BIOS EPROM is absent * or disabled. */ if (!done_setup && setup_strings) in2000_setup(setup_strings, NULL); detect_count = 0; for (bios = 0; bios_tab[bios]; bios++) { u32 s1 = 0; if (check_setup_args("ioport", &val, buf)) { base = val; switches = ~inb(base + IO_SWITCHES) & 0xff; printk("Forcing IN2000 detection at IOport 0x%x ", base); bios = 2; } /* * There have been a couple of BIOS versions with different layouts * for the obvious ID strings. We look for the 2 most common ones and * hope that they cover all the cases... */ else if (probe_bios(bios_tab[bios], &s1, &switches)) { printk("Found IN2000 BIOS at 0x%x ", (unsigned int) bios_tab[bios]); /* Find out where the IO space is */ x = switches & (SW_ADDR0 | SW_ADDR1); base = base_tab[x]; /* Check for the IN2000 signature in IO space. */ x = ~inb(base + IO_SWITCHES) & 0xff; if (x != switches) { printk("Bad IO signature: %02x vs %02x.\n", x, switches); continue; } } else continue; /* OK. We have a base address for the IO ports - run a few safety checks */ if (!(switches & SW_BIT7)) { /* I _think_ all cards do this */ printk("There is no IN-2000 SCSI card at IOport 0x%03x!\n", base); continue; } /* Let's assume any hardware version will work, although the driver * has only been tested on 0x21, 0x22, 0x25, 0x26, and 0x27. We'll * print out the rev number for reference later, but accept them all. */ hrev = inb(base + IO_HARDWARE); /* Bit 2 tells us if interrupts are disabled */ if (switches & SW_DISINT) { printk("The IN-2000 SCSI card at IOport 0x%03x ", base); printk("is not configured for interrupt operation!\n"); printk("This driver requires an interrupt: cancelling detection.\n"); continue; } /* Ok. We accept that there's an IN2000 at ioaddr 'base'. Now * initialize it. */ tpnt->proc_name = "in2000"; instance = scsi_register(tpnt, sizeof(struct IN2000_hostdata)); if (instance == NULL) continue; detect_count++; hostdata = (struct IN2000_hostdata *) instance->hostdata; instance->io_port = hostdata->io_base = base; hostdata->dip_switch = switches; hostdata->hrev = hrev; write1_io(0, IO_FIFO_WRITE); /* clear fifo counter */ write1_io(0, IO_FIFO_READ); /* start fifo out in read mode */ write1_io(0, IO_INTR_MASK); /* allow all ints */ x = int_tab[(switches & (SW_INT0 | SW_INT1)) >> SW_INT_SHIFT]; if (request_irq(x, in2000_intr, IRQF_DISABLED, "in2000", instance)) { printk("in2000_detect: Unable to allocate IRQ.\n"); detect_count--; continue; } instance->irq = x; instance->n_io_port = 13; request_region(base, 13, "in2000"); /* lock in this IO space for our use */ for (x = 0; x < 8; x++) { hostdata->busy[x] = 0; hostdata->sync_xfer[x] = calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF); hostdata->sync_stat[x] = SS_UNSET; /* using default sync values */ #ifdef PROC_STATISTICS hostdata->cmd_cnt[x] = 0; hostdata->disc_allowed_cnt[x] = 0; hostdata->disc_done_cnt[x] = 0; #endif } hostdata->input_Q = NULL; hostdata->selecting = NULL; hostdata->connected = NULL; hostdata->disconnected_Q = NULL; hostdata->state = S_UNCONNECTED; hostdata->fifo = FI_FIFO_UNUSED; hostdata->level2 = L2_BASIC; hostdata->disconnect = DIS_ADAPTIVE; hostdata->args = DEBUG_DEFAULTS; hostdata->incoming_ptr = 0; hostdata->outgoing_len = 0; hostdata->default_sx_per = DEFAULT_SX_PER; /* Older BIOS's had a 'sync on/off' switch - use its setting */ if (s1 == 0x41564f4e && (switches & SW_SYNC_DOS5)) hostdata->sync_off = 0x00; /* sync defaults to on */ else hostdata->sync_off = 0xff; /* sync defaults to off */ #ifdef PROC_INTERFACE hostdata->proc = PR_VERSION | PR_INFO | PR_STATISTICS | PR_CONNECTED | PR_INPUTQ | PR_DISCQ | PR_STOP; #ifdef PROC_STATISTICS hostdata->int_cnt = 0; #endif #endif if (check_setup_args("nosync", &val, buf)) hostdata->sync_off = val; if (check_setup_args("period", &val, buf)) hostdata->default_sx_per = sx_table[round_period((unsigned int) val)].period_ns; if (check_setup_args("disconnect", &val, buf)) { if ((val >= DIS_NEVER) && (val <= DIS_ALWAYS)) hostdata->disconnect = val; else hostdata->disconnect = DIS_ADAPTIVE; } if (check_setup_args("noreset", &val, buf)) hostdata->args ^= A_NO_SCSI_RESET; if (check_setup_args("level2", &val, buf)) hostdata->level2 = val; if (check_setup_args("debug", &val, buf)) hostdata->args = (val & DB_MASK); #ifdef PROC_INTERFACE if (check_setup_args("proc", &val, buf)) hostdata->proc = val; #endif /* FIXME: not strictly needed I think but the called code expects to be locked */ spin_lock_irqsave(instance->host_lock, flags); x = reset_hardware(instance, (hostdata->args & A_NO_SCSI_RESET) ? RESET_CARD : RESET_CARD_AND_BUS); spin_unlock_irqrestore(instance->host_lock, flags); hostdata->microcode = read_3393(hostdata, WD_CDB_1); if (x & 0x01) { if (x & B_FLAG) hostdata->chip = C_WD33C93B; else hostdata->chip = C_WD33C93A; } else hostdata->chip = C_WD33C93; printk("dip_switch=%02x irq=%d ioport=%02x floppy=%s sync/DOS5=%s ", (switches & 0x7f), instance->irq, hostdata->io_base, (switches & SW_FLOPPY) ? "Yes" : "No", (switches & SW_SYNC_DOS5) ? "Yes" : "No"); printk("hardware_ver=%02x chip=%s microcode=%02x\n", hrev, (hostdata->chip == C_WD33C93) ? "WD33c93" : (hostdata->chip == C_WD33C93A) ? "WD33c93A" : (hostdata->chip == C_WD33C93B) ? "WD33c93B" : "unknown", hostdata->microcode); #ifdef DEBUGGING_ON printk("setup_args = "); for (x = 0; x < MAX_SETUP_ARGS; x++) printk("%s,", setup_args[x]); printk("\n"); #endif if (hostdata->sync_off == 0xff) printk("Sync-transfer DISABLED on all devices: ENABLE from command-line\n"); printk("IN2000 driver version %s - %s\n", IN2000_VERSION, IN2000_DATE); } return detect_count; } static int in2000_release(struct Scsi_Host *shost) { if (shost->irq) free_irq(shost->irq, shost); if (shost->io_port && shost->n_io_port) release_region(shost->io_port, shost->n_io_port); return 0; } /* NOTE: I lifted this function straight out of the old driver, * and have not tested it. Presumably it does what it's * supposed to do... */ static int in2000_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int *iinfo) { int size; size = capacity; iinfo[0] = 64; iinfo[1] = 32; iinfo[2] = size >> 11; /* This should approximate the large drive handling that the DOS ASPI manager uses. Drives very near the boundaries may not be handled correctly (i.e. near 2.0 Gb and 4.0 Gb) */ if (iinfo[2] > 1024) { iinfo[0] = 64; iinfo[1] = 63; iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]); } if (iinfo[2] > 1024) { iinfo[0] = 128; iinfo[1] = 63; iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]); } if (iinfo[2] > 1024) { iinfo[0] = 255; iinfo[1] = 63; iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]); } return 0; } static int in2000_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off, int len, int in) { #ifdef PROC_INTERFACE char *bp; char tbuf[128]; unsigned long flags; struct IN2000_hostdata *hd; Scsi_Cmnd *cmd; int x, i; static int stop = 0; hd = (struct IN2000_hostdata *) instance->hostdata; /* If 'in' is TRUE we need to _read_ the proc file. We accept the following * keywords (same format as command-line, but only ONE per read): * debug * disconnect * period * resync * proc */ if (in) { buf[len] = '\0'; bp = buf; if (!strncmp(bp, "debug:", 6)) { bp += 6; hd->args = simple_strtoul(bp, NULL, 0) & DB_MASK; } else if (!strncmp(bp, "disconnect:", 11)) { bp += 11; x = simple_strtoul(bp, NULL, 0); if (x < DIS_NEVER || x > DIS_ALWAYS) x = DIS_ADAPTIVE; hd->disconnect = x; } else if (!strncmp(bp, "period:", 7)) { bp += 7; x = simple_strtoul(bp, NULL, 0); hd->default_sx_per = sx_table[round_period((unsigned int) x)].period_ns; } else if (!strncmp(bp, "resync:", 7)) { bp += 7; x = simple_strtoul(bp, NULL, 0); for (i = 0; i < 7; i++) if (x & (1 << i)) hd->sync_stat[i] = SS_UNSET; } else if (!strncmp(bp, "proc:", 5)) { bp += 5; hd->proc = simple_strtoul(bp, NULL, 0); } else if (!strncmp(bp, "level2:", 7)) { bp += 7; hd->level2 = simple_strtoul(bp, NULL, 0); } return len; } spin_lock_irqsave(instance->host_lock, flags); bp = buf; *bp = '\0'; if (hd->proc & PR_VERSION) { sprintf(tbuf, "\nVersion %s - %s.", IN2000_VERSION, IN2000_DATE); strcat(bp, tbuf); } if (hd->proc & PR_INFO) { sprintf(tbuf, "\ndip_switch=%02x: irq=%d io=%02x floppy=%s sync/DOS5=%s", (hd->dip_switch & 0x7f), instance->irq, hd->io_base, (hd->dip_switch & 0x40) ? "Yes" : "No", (hd->dip_switch & 0x20) ? "Yes" : "No"); strcat(bp, tbuf); strcat(bp, "\nsync_xfer[] = "); for (x = 0; x < 7; x++) { sprintf(tbuf, "\t%02x", hd->sync_xfer[x]); strcat(bp, tbuf); } strcat(bp, "\nsync_stat[] = "); for (x = 0; x < 7; x++) { sprintf(tbuf, "\t%02x", hd->sync_stat[x]); strcat(bp, tbuf); } } #ifdef PROC_STATISTICS if (hd->proc & PR_STATISTICS) { strcat(bp, "\ncommands issued: "); for (x = 0; x < 7; x++) { sprintf(tbuf, "\t%ld", hd->cmd_cnt[x]); strcat(bp, tbuf); } strcat(bp, "\ndisconnects allowed:"); for (x = 0; x < 7; x++) { sprintf(tbuf, "\t%ld", hd->disc_allowed_cnt[x]); strcat(bp, tbuf); } strcat(bp, "\ndisconnects done: "); for (x = 0; x < 7; x++) { sprintf(tbuf, "\t%ld", hd->disc_done_cnt[x]); strcat(bp, tbuf); } sprintf(tbuf, "\ninterrupts: \t%ld", hd->int_cnt); strcat(bp, tbuf); } #endif if (hd->proc & PR_CONNECTED) { strcat(bp, "\nconnected: "); if (hd->connected) { cmd = (Scsi_Cmnd *) hd->connected; sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); strcat(bp, tbuf); } } if (hd->proc & PR_INPUTQ) { strcat(bp, "\ninput_Q: "); cmd = (Scsi_Cmnd *) hd->input_Q; while (cmd) { sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); strcat(bp, tbuf); cmd = (Scsi_Cmnd *) cmd->host_scribble; } } if (hd->proc & PR_DISCQ) { strcat(bp, "\ndisconnected_Q:"); cmd = (Scsi_Cmnd *) hd->disconnected_Q; while (cmd) { sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); strcat(bp, tbuf); cmd = (Scsi_Cmnd *) cmd->host_scribble; } } if (hd->proc & PR_TEST) { ; /* insert your own custom function here */ } strcat(bp, "\n"); spin_unlock_irqrestore(instance->host_lock, flags); *start = buf; if (stop) { stop = 0; return 0; /* return 0 to signal end-of-file */ } if (off > 0x40000) /* ALWAYS stop after 256k bytes have been read */ stop = 1; if (hd->proc & PR_STOP) /* stop every other time */ stop = 1; return strlen(bp); #else /* PROC_INTERFACE */ return 0; #endif /* PROC_INTERFACE */ } MODULE_LICENSE("GPL"); static struct scsi_host_template driver_template = { .proc_name = "in2000", .proc_info = in2000_proc_info, .name = "Always IN2000", .detect = in2000_detect, .release = in2000_release, .queuecommand = in2000_queuecommand, .eh_abort_handler = in2000_abort, .eh_bus_reset_handler = in2000_bus_reset, .bios_param = in2000_biosparam, .can_queue = IN2000_CAN_Q, .this_id = IN2000_HOST_ID, .sg_tablesize = IN2000_SG, .cmd_per_lun = IN2000_CPL, .use_clustering = DISABLE_CLUSTERING, }; #include "scsi_module.c"
gpl-2.0
daeiron/kenzo_caf_kernel
arch/arm/mach-rpc/irq.c
6425
3512
#include <linux/init.h> #include <linux/list.h> #include <linux/io.h> #include <asm/mach/irq.h> #include <asm/hardware/iomd.h> #include <asm/irq.h> #include <asm/fiq.h> static void iomd_ack_irq_a(struct irq_data *d) { unsigned int val, mask; mask = 1 << d->irq; val = iomd_readb(IOMD_IRQMASKA); iomd_writeb(val & ~mask, IOMD_IRQMASKA); iomd_writeb(mask, IOMD_IRQCLRA); } static void iomd_mask_irq_a(struct irq_data *d) { unsigned int val, mask; mask = 1 << d->irq; val = iomd_readb(IOMD_IRQMASKA); iomd_writeb(val & ~mask, IOMD_IRQMASKA); } static void iomd_unmask_irq_a(struct irq_data *d) { unsigned int val, mask; mask = 1 << d->irq; val = iomd_readb(IOMD_IRQMASKA); iomd_writeb(val | mask, IOMD_IRQMASKA); } static struct irq_chip iomd_a_chip = { .irq_ack = iomd_ack_irq_a, .irq_mask = iomd_mask_irq_a, .irq_unmask = iomd_unmask_irq_a, }; static void iomd_mask_irq_b(struct irq_data *d) { unsigned int val, mask; mask = 1 << (d->irq & 7); val = iomd_readb(IOMD_IRQMASKB); iomd_writeb(val & ~mask, IOMD_IRQMASKB); } static void iomd_unmask_irq_b(struct irq_data *d) { unsigned int val, mask; mask = 1 << (d->irq & 7); val = iomd_readb(IOMD_IRQMASKB); iomd_writeb(val | mask, IOMD_IRQMASKB); } static struct irq_chip iomd_b_chip = { .irq_ack = iomd_mask_irq_b, .irq_mask = iomd_mask_irq_b, .irq_unmask = iomd_unmask_irq_b, }; static void iomd_mask_irq_dma(struct irq_data *d) { unsigned int val, mask; mask = 1 << (d->irq & 7); val = iomd_readb(IOMD_DMAMASK); iomd_writeb(val & ~mask, IOMD_DMAMASK); } static void iomd_unmask_irq_dma(struct irq_data *d) { unsigned int val, mask; mask = 1 << (d->irq & 7); val = iomd_readb(IOMD_DMAMASK); iomd_writeb(val | mask, IOMD_DMAMASK); } static struct irq_chip iomd_dma_chip = { .irq_ack = iomd_mask_irq_dma, .irq_mask = iomd_mask_irq_dma, .irq_unmask = iomd_unmask_irq_dma, }; static void iomd_mask_irq_fiq(struct irq_data *d) { unsigned int val, mask; mask = 1 << (d->irq & 7); val = iomd_readb(IOMD_FIQMASK); iomd_writeb(val & ~mask, IOMD_FIQMASK); } static void iomd_unmask_irq_fiq(struct irq_data *d) { unsigned int val, mask; mask = 1 << (d->irq & 7); val = iomd_readb(IOMD_FIQMASK); iomd_writeb(val | mask, IOMD_FIQMASK); } static struct irq_chip iomd_fiq_chip = { .irq_ack = iomd_mask_irq_fiq, .irq_mask = iomd_mask_irq_fiq, .irq_unmask = iomd_unmask_irq_fiq, }; extern unsigned char rpc_default_fiq_start, rpc_default_fiq_end; void __init rpc_init_irq(void) { unsigned int irq, flags; iomd_writeb(0, IOMD_IRQMASKA); iomd_writeb(0, IOMD_IRQMASKB); iomd_writeb(0, IOMD_FIQMASK); iomd_writeb(0, IOMD_DMAMASK); set_fiq_handler(&rpc_default_fiq_start, &rpc_default_fiq_end - &rpc_default_fiq_start); for (irq = 0; irq < NR_IRQS; irq++) { flags = IRQF_VALID; if (irq <= 6 || (irq >= 9 && irq <= 15)) flags |= IRQF_PROBE; if (irq == 21 || (irq >= 16 && irq <= 19) || irq == IRQ_KEYBOARDTX) flags |= IRQF_NOAUTOEN; switch (irq) { case 0 ... 7: irq_set_chip_and_handler(irq, &iomd_a_chip, handle_level_irq); set_irq_flags(irq, flags); break; case 8 ... 15: irq_set_chip_and_handler(irq, &iomd_b_chip, handle_level_irq); set_irq_flags(irq, flags); break; case 16 ... 21: irq_set_chip_and_handler(irq, &iomd_dma_chip, handle_level_irq); set_irq_flags(irq, flags); break; case 64 ... 71: irq_set_chip(irq, &iomd_fiq_chip); set_irq_flags(irq, IRQF_VALID); break; } } init_FIQ(FIQ_START); }
gpl-2.0
jfdsmabalot/kernel_legacy_exynos5410
arch/x86/math-emu/reg_constant.c
14361
3807
/*---------------------------------------------------------------------------+ | reg_constant.c | | | | All of the constant FPU_REGs | | | | Copyright (C) 1992,1993,1994,1997 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, | | Australia. E-mail billm@suburbia.net | | | | | +---------------------------------------------------------------------------*/ #include "fpu_system.h" #include "fpu_emu.h" #include "status_w.h" #include "reg_constant.h" #include "control_w.h" #define MAKE_REG(s, e, l, h) { l, h, \ ((EXTENDED_Ebias+(e)) | ((SIGN_##s != 0)*0x8000)) } FPU_REG const CONST_1 = MAKE_REG(POS, 0, 0x00000000, 0x80000000); #if 0 FPU_REG const CONST_2 = MAKE_REG(POS, 1, 0x00000000, 0x80000000); FPU_REG const CONST_HALF = MAKE_REG(POS, -1, 0x00000000, 0x80000000); #endif /* 0 */ static FPU_REG const CONST_L2T = MAKE_REG(POS, 1, 0xcd1b8afe, 0xd49a784b); static FPU_REG const CONST_L2E = MAKE_REG(POS, 0, 0x5c17f0bc, 0xb8aa3b29); FPU_REG const CONST_PI = MAKE_REG(POS, 1, 0x2168c235, 0xc90fdaa2); FPU_REG const CONST_PI2 = MAKE_REG(POS, 0, 0x2168c235, 0xc90fdaa2); FPU_REG const CONST_PI4 = MAKE_REG(POS, -1, 0x2168c235, 0xc90fdaa2); static FPU_REG const CONST_LG2 = MAKE_REG(POS, -2, 0xfbcff799, 0x9a209a84); static FPU_REG const CONST_LN2 = MAKE_REG(POS, -1, 0xd1cf79ac, 0xb17217f7); /* Extra bits to take pi/2 to more than 128 bits precision. */ FPU_REG const CONST_PI2extra = MAKE_REG(NEG, -66, 0xfc8f8cbb, 0xece675d1); /* Only the sign (and tag) is used in internal zeroes */ FPU_REG const CONST_Z = MAKE_REG(POS, EXP_UNDER, 0x0, 0x0); /* Only the sign and significand (and tag) are used in internal NaNs */ /* The 80486 never generates one of these FPU_REG const CONST_SNAN = MAKE_REG(POS, EXP_OVER, 0x00000001, 0x80000000); */ /* This is the real indefinite QNaN */ FPU_REG const CONST_QNaN = MAKE_REG(NEG, EXP_OVER, 0x00000000, 0xC0000000); /* Only the sign (and tag) is used in internal infinities */ FPU_REG const CONST_INF = MAKE_REG(POS, EXP_OVER, 0x00000000, 0x80000000); static void fld_const(FPU_REG const * c, int adj, u_char tag) { FPU_REG *st_new_ptr; if (STACK_OVERFLOW) { FPU_stack_overflow(); return; } push(); reg_copy(c, st_new_ptr); st_new_ptr->sigl += adj; /* For all our fldxxx constants, we don't need to borrow or carry. */ FPU_settag0(tag); clear_C1(); } /* A fast way to find out whether x is one of RC_DOWN or RC_CHOP (and not one of RC_RND or RC_UP). */ #define DOWN_OR_CHOP(x) (x & RC_DOWN) static void fld1(int rc) { fld_const(&CONST_1, 0, TAG_Valid); } static void fldl2t(int rc) { fld_const(&CONST_L2T, (rc == RC_UP) ? 1 : 0, TAG_Valid); } static void fldl2e(int rc) { fld_const(&CONST_L2E, DOWN_OR_CHOP(rc) ? -1 : 0, TAG_Valid); } static void fldpi(int rc) { fld_const(&CONST_PI, DOWN_OR_CHOP(rc) ? -1 : 0, TAG_Valid); } static void fldlg2(int rc) { fld_const(&CONST_LG2, DOWN_OR_CHOP(rc) ? -1 : 0, TAG_Valid); } static void fldln2(int rc) { fld_const(&CONST_LN2, DOWN_OR_CHOP(rc) ? -1 : 0, TAG_Valid); } static void fldz(int rc) { fld_const(&CONST_Z, 0, TAG_Zero); } typedef void (*FUNC_RC) (int); static FUNC_RC constants_table[] = { fld1, fldl2t, fldl2e, fldpi, fldlg2, fldln2, fldz, (FUNC_RC) FPU_illegal }; void fconst(void) { (constants_table[FPU_rm]) (control_word & CW_RC); }
gpl-2.0
tonyho/TQ2440-linux-2.6.30.4
net/mac80211/tx.c
26
62269
/* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * * Transmit and frame generation functions. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/etherdevice.h> #include <linux/bitmap.h> #include <linux/rcupdate.h> #include <net/net_namespace.h> #include <net/ieee80211_radiotap.h> #include <net/cfg80211.h> #include <net/mac80211.h> #include <asm/unaligned.h> #include "ieee80211_i.h" #include "led.h" #include "mesh.h" #include "wep.h" #include "wpa.h" #include "wme.h" #include "rate.h" #define IEEE80211_TX_OK 0 #define IEEE80211_TX_AGAIN 1 #define IEEE80211_TX_PENDING 2 /* misc utils */ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, int next_frag_len) { int rate, mrate, erp, dur, i; struct ieee80211_rate *txrate; struct ieee80211_local *local = tx->local; struct ieee80211_supported_band *sband; struct ieee80211_hdr *hdr; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); /* assume HW handles this */ if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS) return 0; /* uh huh? */ if (WARN_ON_ONCE(info->control.rates[0].idx < 0)) return 0; sband = local->hw.wiphy->bands[tx->channel->band]; txrate = &sband->bitrates[info->control.rates[0].idx]; erp = txrate->flags & IEEE80211_RATE_ERP_G; /* * data and mgmt (except PS Poll): * - during CFP: 32768 * - during contention period: * if addr1 is group address: 0 * if more fragments = 0 and addr1 is individual address: time to * transmit one ACK plus SIFS * if more fragments = 1 and addr1 is individual address: time to * transmit next fragment plus 2 x ACK plus 3 x SIFS * * IEEE 802.11, 9.6: * - control response frame (CTS or ACK) shall be transmitted using the * same rate as the immediately previous frame in the frame exchange * sequence, if this rate belongs to the PHY mandatory rates, or else * at the highest possible rate belonging to the PHY rates in the * BSSBasicRateSet */ hdr = (struct ieee80211_hdr *)tx->skb->data; if (ieee80211_is_ctl(hdr->frame_control)) { /* TODO: These control frames are not currently sent by * mac80211, but should they be implemented, this function * needs to be updated to support duration field calculation. * * RTS: time needed to transmit pending data/mgmt frame plus * one CTS frame plus one ACK frame plus 3 x SIFS * CTS: duration of immediately previous RTS minus time * required to transmit CTS and its SIFS * ACK: 0 if immediately previous directed data/mgmt had * more=0, with more=1 duration in ACK frame is duration * from previous frame minus time needed to transmit ACK * and its SIFS * PS Poll: BIT(15) | BIT(14) | aid */ return 0; } /* data/mgmt */ if (0 /* FIX: data/mgmt during CFP */) return cpu_to_le16(32768); if (group_addr) /* Group address as the destination - no ACK */ return 0; /* Individual destination address: * IEEE 802.11, Ch. 9.6 (after IEEE 802.11g changes) * CTS and ACK frames shall be transmitted using the highest rate in * basic rate set that is less than or equal to the rate of the * immediately previous frame and that is using the same modulation * (CCK or OFDM). If no basic rate set matches with these requirements, * the highest mandatory rate of the PHY that is less than or equal to * the rate of the previous frame is used. * Mandatory rates for IEEE 802.11g PHY: 1, 2, 5.5, 11, 6, 12, 24 Mbps */ rate = -1; /* use lowest available if everything fails */ mrate = sband->bitrates[0].bitrate; for (i = 0; i < sband->n_bitrates; i++) { struct ieee80211_rate *r = &sband->bitrates[i]; if (r->bitrate > txrate->bitrate) break; if (tx->sdata->vif.bss_conf.basic_rates & BIT(i)) rate = r->bitrate; switch (sband->band) { case IEEE80211_BAND_2GHZ: { u32 flag; if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) flag = IEEE80211_RATE_MANDATORY_G; else flag = IEEE80211_RATE_MANDATORY_B; if (r->flags & flag) mrate = r->bitrate; break; } case IEEE80211_BAND_5GHZ: if (r->flags & IEEE80211_RATE_MANDATORY_A) mrate = r->bitrate; break; case IEEE80211_NUM_BANDS: WARN_ON(1); break; } } if (rate == -1) { /* No matching basic rate found; use highest suitable mandatory * PHY rate */ rate = mrate; } /* Time needed to transmit ACK * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up * to closest integer */ dur = ieee80211_frame_duration(local, 10, rate, erp, tx->sdata->vif.bss_conf.use_short_preamble); if (next_frag_len) { /* Frame is fragmented: duration increases with time needed to * transmit next fragment plus ACK and 2 x SIFS. */ dur *= 2; /* ACK + SIFS */ /* next fragment */ dur += ieee80211_frame_duration(local, next_frag_len, txrate->bitrate, erp, tx->sdata->vif.bss_conf.use_short_preamble); } return cpu_to_le16(dur); } static int inline is_ieee80211_device(struct ieee80211_local *local, struct net_device *dev) { return local == wdev_priv(dev->ieee80211_ptr); } /* tx handlers */ static ieee80211_tx_result debug_noinline ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); u32 sta_flags; if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) return TX_CONTINUE; if (unlikely(tx->local->sw_scanning) && !ieee80211_is_probe_req(hdr->frame_control) && !ieee80211_is_nullfunc(hdr->frame_control)) /* * When software scanning only nullfunc frames (to notify * the sleep state to the AP) and probe requests (for the * active scan) are allowed, all other frames should not be * sent and we should not get here, but if we do * nonetheless, drop them to avoid sending them * off-channel. See the link below and * ieee80211_start_scan() for more. * * http://article.gmane.org/gmane.linux.kernel.wireless.general/30089 */ return TX_DROP; if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT) return TX_CONTINUE; if (tx->flags & IEEE80211_TX_PS_BUFFERED) return TX_CONTINUE; sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0; if (likely(tx->flags & IEEE80211_TX_UNICAST)) { if (unlikely(!(sta_flags & WLAN_STA_ASSOC) && tx->sdata->vif.type != NL80211_IFTYPE_ADHOC && ieee80211_is_data(hdr->frame_control))) { #ifdef CONFIG_MAC80211_VERBOSE_DEBUG printk(KERN_DEBUG "%s: dropped data frame to not " "associated station %pM\n", tx->dev->name, hdr->addr1); #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc); return TX_DROP; } } else { if (unlikely(ieee80211_is_data(hdr->frame_control) && tx->local->num_sta == 0 && tx->sdata->vif.type != NL80211_IFTYPE_ADHOC)) { /* * No associated STAs - no need to send multicast * frames. */ return TX_DROP; } return TX_CONTINUE; } return TX_CONTINUE; } /* This function is called whenever the AP is about to exceed the maximum limit * of buffered frames for power saving STAs. This situation should not really * happen often during normal operation, so dropping the oldest buffered packet * from each queue should be OK to make some room for new frames. */ static void purge_old_ps_buffers(struct ieee80211_local *local) { int total = 0, purged = 0; struct sk_buff *skb; struct ieee80211_sub_if_data *sdata; struct sta_info *sta; /* * virtual interfaces are protected by RCU */ rcu_read_lock(); list_for_each_entry_rcu(sdata, &local->interfaces, list) { struct ieee80211_if_ap *ap; if (sdata->vif.type != NL80211_IFTYPE_AP) continue; ap = &sdata->u.ap; skb = skb_dequeue(&ap->ps_bc_buf); if (skb) { purged++; dev_kfree_skb(skb); } total += skb_queue_len(&ap->ps_bc_buf); } list_for_each_entry_rcu(sta, &local->sta_list, list) { skb = skb_dequeue(&sta->ps_tx_buf); if (skb) { purged++; dev_kfree_skb(skb); } total += skb_queue_len(&sta->ps_tx_buf); } rcu_read_unlock(); local->total_ps_buffered = total; #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG printk(KERN_DEBUG "%s: PS buffers full - purged %d frames\n", wiphy_name(local->hw.wiphy), purged); #endif } static ieee80211_tx_result ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; /* * broadcast/multicast frame * * If any of the associated stations is in power save mode, * the frame is buffered to be sent after DTIM beacon frame. * This is done either by the hardware or us. */ /* powersaving STAs only in AP/VLAN mode */ if (!tx->sdata->bss) return TX_CONTINUE; /* no buffering for ordered frames */ if (ieee80211_has_order(hdr->frame_control)) return TX_CONTINUE; /* no stations in PS mode */ if (!atomic_read(&tx->sdata->bss->num_sta_ps)) return TX_CONTINUE; /* buffered in mac80211 */ if (tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING) { if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) purge_old_ps_buffers(tx->local); if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= AP_MAX_BC_BUFFER) { #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG if (net_ratelimit()) { printk(KERN_DEBUG "%s: BC TX buffer full - " "dropping the oldest frame\n", tx->dev->name); } #endif dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf)); } else tx->local->total_ps_buffered++; skb_queue_tail(&tx->sdata->bss->ps_bc_buf, tx->skb); return TX_QUEUED; } /* buffered in hardware */ info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; return TX_CONTINUE; } static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta, struct sk_buff *skb) { if (!ieee80211_is_mgmt(fc)) return 0; if (sta == NULL || !test_sta_flags(sta, WLAN_STA_MFP)) return 0; if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) skb->data)) return 0; return 1; } static ieee80211_tx_result ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) { struct sta_info *sta = tx->sta; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; u32 staflags; if (unlikely(!sta || ieee80211_is_probe_resp(hdr->frame_control))) return TX_CONTINUE; staflags = get_sta_flags(sta); if (unlikely((staflags & WLAN_STA_PS) && !(staflags & WLAN_STA_PSPOLL))) { #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG printk(KERN_DEBUG "STA %pM aid %d: PS buffer (entries " "before %d)\n", sta->sta.addr, sta->sta.aid, skb_queue_len(&sta->ps_tx_buf)); #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) purge_old_ps_buffers(tx->local); if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) { struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf); #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG if (net_ratelimit()) { printk(KERN_DEBUG "%s: STA %pM TX " "buffer full - dropping oldest frame\n", tx->dev->name, sta->sta.addr); } #endif dev_kfree_skb(old); } else tx->local->total_ps_buffered++; /* Queue frame to be sent after STA sends an PS Poll frame */ if (skb_queue_empty(&sta->ps_tx_buf)) sta_info_set_tim_bit(sta); info->control.jiffies = jiffies; skb_queue_tail(&sta->ps_tx_buf, tx->skb); return TX_QUEUED; } #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG else if (unlikely(test_sta_flags(sta, WLAN_STA_PS))) { printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll " "set -> send frame\n", tx->dev->name, sta->sta.addr); } #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ clear_sta_flags(sta, WLAN_STA_PSPOLL); return TX_CONTINUE; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) { if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) return TX_CONTINUE; if (tx->flags & IEEE80211_TX_UNICAST) return ieee80211_tx_h_unicast_ps_buf(tx); else return ieee80211_tx_h_multicast_ps_buf(tx); } static ieee80211_tx_result debug_noinline ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) { struct ieee80211_key *key; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; if (unlikely(tx->skb->do_not_encrypt)) tx->key = NULL; else if (tx->sta && (key = rcu_dereference(tx->sta->key))) tx->key = key; else if (ieee80211_is_mgmt(hdr->frame_control) && (key = rcu_dereference(tx->sdata->default_mgmt_key))) tx->key = key; else if ((key = rcu_dereference(tx->sdata->default_key))) tx->key = key; else if (tx->sdata->drop_unencrypted && (tx->skb->protocol != cpu_to_be16(ETH_P_PAE)) && !(info->flags & IEEE80211_TX_CTL_INJECTED) && (!ieee80211_is_robust_mgmt_frame(hdr) || (ieee80211_is_action(hdr->frame_control) && tx->sta && test_sta_flags(tx->sta, WLAN_STA_MFP)))) { I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted); return TX_DROP; } else tx->key = NULL; if (tx->key) { tx->key->tx_rx_count++; /* TODO: add threshold stuff again */ switch (tx->key->conf.alg) { case ALG_WEP: if (ieee80211_is_auth(hdr->frame_control)) break; case ALG_TKIP: if (!ieee80211_is_data_present(hdr->frame_control)) tx->key = NULL; break; case ALG_CCMP: if (!ieee80211_is_data_present(hdr->frame_control) && !ieee80211_use_mfp(hdr->frame_control, tx->sta, tx->skb)) tx->key = NULL; break; case ALG_AES_CMAC: if (!ieee80211_is_mgmt(hdr->frame_control)) tx->key = NULL; break; } } if (!tx->key || !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) tx->skb->do_not_encrypt = 1; return TX_CONTINUE; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); struct ieee80211_hdr *hdr = (void *)tx->skb->data; struct ieee80211_supported_band *sband; struct ieee80211_rate *rate; int i, len; bool inval = false, rts = false, short_preamble = false; struct ieee80211_tx_rate_control txrc; memset(&txrc, 0, sizeof(txrc)); sband = tx->local->hw.wiphy->bands[tx->channel->band]; len = min_t(int, tx->skb->len + FCS_LEN, tx->local->fragmentation_threshold); /* set up the tx rate control struct we give the RC algo */ txrc.hw = local_to_hw(tx->local); txrc.sband = sband; txrc.bss_conf = &tx->sdata->vif.bss_conf; txrc.skb = tx->skb; txrc.reported_rate.idx = -1; txrc.max_rate_idx = tx->sdata->max_ratectrl_rateidx; /* set up RTS protection if desired */ if (tx->local->rts_threshold < IEEE80211_MAX_RTS_THRESHOLD && len > tx->local->rts_threshold) { txrc.rts = rts = true; } /* * Use short preamble if the BSS can handle it, but not for * management frames unless we know the receiver can handle * that -- the management frame might be to a station that * just wants a probe response. */ if (tx->sdata->vif.bss_conf.use_short_preamble && (ieee80211_is_data(hdr->frame_control) || (tx->sta && test_sta_flags(tx->sta, WLAN_STA_SHORT_PREAMBLE)))) txrc.short_preamble = short_preamble = true; rate_control_get_rate(tx->sdata, tx->sta, &txrc); if (unlikely(info->control.rates[0].idx < 0)) return TX_DROP; if (txrc.reported_rate.idx < 0) txrc.reported_rate = info->control.rates[0]; if (tx->sta) tx->sta->last_tx_rate = txrc.reported_rate; if (unlikely(!info->control.rates[0].count)) info->control.rates[0].count = 1; if (is_multicast_ether_addr(hdr->addr1)) { /* * XXX: verify the rate is in the basic rateset */ return TX_CONTINUE; } /* * set up the RTS/CTS rate as the fastest basic rate * that is not faster than the data rate * * XXX: Should this check all retry rates? */ if (!(info->control.rates[0].flags & IEEE80211_TX_RC_MCS)) { s8 baserate = 0; rate = &sband->bitrates[info->control.rates[0].idx]; for (i = 0; i < sband->n_bitrates; i++) { /* must be a basic rate */ if (!(tx->sdata->vif.bss_conf.basic_rates & BIT(i))) continue; /* must not be faster than the data rate */ if (sband->bitrates[i].bitrate > rate->bitrate) continue; /* maximum */ if (sband->bitrates[baserate].bitrate < sband->bitrates[i].bitrate) baserate = i; } info->control.rts_cts_rate_idx = baserate; } for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { /* * make sure there's no valid rate following * an invalid one, just in case drivers don't * take the API seriously to stop at -1. */ if (inval) { info->control.rates[i].idx = -1; continue; } if (info->control.rates[i].idx < 0) { inval = true; continue; } /* * For now assume MCS is already set up correctly, this * needs to be fixed. */ if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS) { WARN_ON(info->control.rates[i].idx > 76); continue; } /* set up RTS protection if desired */ if (rts) info->control.rates[i].flags |= IEEE80211_TX_RC_USE_RTS_CTS; /* RC is busted */ if (WARN_ON_ONCE(info->control.rates[i].idx >= sband->n_bitrates)) { info->control.rates[i].idx = -1; continue; } rate = &sband->bitrates[info->control.rates[i].idx]; /* set up short preamble */ if (short_preamble && rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) info->control.rates[i].flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE; /* set up G protection */ if (!rts && tx->sdata->vif.bss_conf.use_cts_prot && rate->flags & IEEE80211_RATE_ERP_G) info->control.rates[i].flags |= IEEE80211_TX_RC_USE_CTS_PROTECT; } return TX_CONTINUE; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_misc(struct ieee80211_tx_data *tx) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); if (tx->sta) info->control.sta = &tx->sta->sta; return TX_CONTINUE; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; u16 *seq; u8 *qc; int tid; /* * Packet injection may want to control the sequence * number, if we have no matching interface then we * neither assign one ourselves nor ask the driver to. */ if (unlikely(!info->control.vif)) return TX_CONTINUE; if (unlikely(ieee80211_is_ctl(hdr->frame_control))) return TX_CONTINUE; if (ieee80211_hdrlen(hdr->frame_control) < 24) return TX_CONTINUE; /* * Anything but QoS data that has a sequence number field * (is long enough) gets a sequence number from the global * counter. */ if (!ieee80211_is_data_qos(hdr->frame_control)) { /* driver should assign sequence number */ info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; /* for pure STA mode without beacons, we can do it */ hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number); tx->sdata->sequence_number += 0x10; tx->sdata->sequence_number &= IEEE80211_SCTL_SEQ; return TX_CONTINUE; } /* * This should be true for injected/management frames only, for * management frames we have set the IEEE80211_TX_CTL_ASSIGN_SEQ * above since they are not QoS-data frames. */ if (!tx->sta) return TX_CONTINUE; /* include per-STA, per-TID sequence counter */ qc = ieee80211_get_qos_ctl(hdr); tid = *qc & IEEE80211_QOS_CTL_TID_MASK; seq = &tx->sta->tid_seq[tid]; hdr->seq_ctrl = cpu_to_le16(*seq); /* Increase the sequence number. */ *seq = (*seq + 0x10) & IEEE80211_SCTL_SEQ; return TX_CONTINUE; } static int ieee80211_fragment(struct ieee80211_local *local, struct sk_buff *skb, int hdrlen, int frag_threshold) { struct sk_buff *tail = skb, *tmp; int per_fragm = frag_threshold - hdrlen - FCS_LEN; int pos = hdrlen + per_fragm; int rem = skb->len - hdrlen - per_fragm; if (WARN_ON(rem < 0)) return -EINVAL; while (rem) { int fraglen = per_fragm; if (fraglen > rem) fraglen = rem; rem -= fraglen; tmp = dev_alloc_skb(local->tx_headroom + frag_threshold + IEEE80211_ENCRYPT_HEADROOM + IEEE80211_ENCRYPT_TAILROOM); if (!tmp) return -ENOMEM; tail->next = tmp; tail = tmp; skb_reserve(tmp, local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM); /* copy control information */ memcpy(tmp->cb, skb->cb, sizeof(tmp->cb)); skb_copy_queue_mapping(tmp, skb); tmp->priority = skb->priority; tmp->do_not_encrypt = skb->do_not_encrypt; tmp->dev = skb->dev; tmp->iif = skb->iif; /* copy header and data */ memcpy(skb_put(tmp, hdrlen), skb->data, hdrlen); memcpy(skb_put(tmp, fraglen), skb->data + pos, fraglen); pos += fraglen; } skb->len = hdrlen + per_fragm; return 0; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) { struct sk_buff *skb = tx->skb; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; int frag_threshold = tx->local->fragmentation_threshold; int hdrlen; int fragnum; if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) return TX_CONTINUE; /* * Warn when submitting a fragmented A-MPDU frame and drop it. * This scenario is handled in __ieee80211_tx_prepare but extra * caution taken here as fragmented ampdu may cause Tx stop. */ if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) return TX_DROP; hdrlen = ieee80211_hdrlen(hdr->frame_control); /* internal error, why is TX_FRAGMENTED set? */ if (WARN_ON(skb->len + FCS_LEN <= frag_threshold)) return TX_DROP; /* * Now fragment the frame. This will allocate all the fragments and * chain them (using skb as the first fragment) to skb->next. * During transmission, we will remove the successfully transmitted * fragments from this list. When the low-level driver rejects one * of the fragments then we will simply pretend to accept the skb * but store it away as pending. */ if (ieee80211_fragment(tx->local, skb, hdrlen, frag_threshold)) return TX_DROP; /* update duration/seq/flags of fragments */ fragnum = 0; do { int next_len; const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); hdr = (void *)skb->data; info = IEEE80211_SKB_CB(skb); if (skb->next) { hdr->frame_control |= morefrags; next_len = skb->next->len; /* * No multi-rate retries for fragmented frames, that * would completely throw off the NAV at other STAs. */ info->control.rates[1].idx = -1; info->control.rates[2].idx = -1; info->control.rates[3].idx = -1; info->control.rates[4].idx = -1; BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5); info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; } else { hdr->frame_control &= ~morefrags; next_len = 0; } hdr->duration_id = ieee80211_duration(tx, 0, next_len); hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG); fragnum++; } while ((skb = skb->next)); return TX_CONTINUE; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) { if (!tx->key) return TX_CONTINUE; switch (tx->key->conf.alg) { case ALG_WEP: return ieee80211_crypto_wep_encrypt(tx); case ALG_TKIP: return ieee80211_crypto_tkip_encrypt(tx); case ALG_CCMP: return ieee80211_crypto_ccmp_encrypt(tx); case ALG_AES_CMAC: return ieee80211_crypto_aes_cmac_encrypt(tx); } /* not reached */ WARN_ON(1); return TX_DROP; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx) { struct sk_buff *skb = tx->skb; struct ieee80211_hdr *hdr; int next_len; bool group_addr; do { hdr = (void *) skb->data; next_len = skb->next ? skb->next->len : 0; group_addr = is_multicast_ether_addr(hdr->addr1); hdr->duration_id = ieee80211_duration(tx, group_addr, next_len); } while ((skb = skb->next)); return TX_CONTINUE; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) { struct sk_buff *skb = tx->skb; if (!tx->sta) return TX_CONTINUE; tx->sta->tx_packets++; do { tx->sta->tx_fragments++; tx->sta->tx_bytes += skb->len; } while ((skb = skb->next)); return TX_CONTINUE; } /* actual transmit path */ /* * deal with packet injection down monitor interface * with Radiotap Header -- only called for monitor mode interface */ static ieee80211_tx_result __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, struct sk_buff *skb) { /* * this is the moment to interpret and discard the radiotap header that * must be at the start of the packet injected in Monitor mode * * Need to take some care with endian-ness since radiotap * args are little-endian */ struct ieee80211_radiotap_iterator iterator; struct ieee80211_radiotap_header *rthdr = (struct ieee80211_radiotap_header *) skb->data; struct ieee80211_supported_band *sband; int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len); sband = tx->local->hw.wiphy->bands[tx->channel->band]; skb->do_not_encrypt = 1; tx->flags &= ~IEEE80211_TX_FRAGMENTED; /* * for every radiotap entry that is present * (ieee80211_radiotap_iterator_next returns -ENOENT when no more * entries present, or -EINVAL on error) */ while (!ret) { ret = ieee80211_radiotap_iterator_next(&iterator); if (ret) continue; /* see if this argument is something we can use */ switch (iterator.this_arg_index) { /* * You must take care when dereferencing iterator.this_arg * for multibyte types... the pointer is not aligned. Use * get_unaligned((type *)iterator.this_arg) to dereference * iterator.this_arg for type "type" safely on all arches. */ case IEEE80211_RADIOTAP_FLAGS: if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) { /* * this indicates that the skb we have been * handed has the 32-bit FCS CRC at the end... * we should react to that by snipping it off * because it will be recomputed and added * on transmission */ if (skb->len < (iterator.max_length + FCS_LEN)) return TX_DROP; skb_trim(skb, skb->len - FCS_LEN); } if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP) tx->skb->do_not_encrypt = 0; if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) tx->flags |= IEEE80211_TX_FRAGMENTED; break; /* * Please update the file * Documentation/networking/mac80211-injection.txt * when parsing new fields here. */ default: break; } } if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */ return TX_DROP; /* * remove the radiotap header * iterator->max_length was sanity-checked against * skb->len by iterator init */ skb_pull(skb, iterator.max_length); return TX_CONTINUE; } /* * initialises @tx */ static ieee80211_tx_result __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, struct sk_buff *skb, struct net_device *dev) { struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct ieee80211_hdr *hdr; struct ieee80211_sub_if_data *sdata; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int hdrlen, tid; u8 *qc, *state; bool queued = false; memset(tx, 0, sizeof(*tx)); tx->skb = skb; tx->dev = dev; /* use original interface */ tx->local = local; tx->sdata = IEEE80211_DEV_TO_SUB_IF(dev); tx->channel = local->hw.conf.channel; /* * Set this flag (used below to indicate "automatic fragmentation"), * it will be cleared/left by radiotap as desired. */ tx->flags |= IEEE80211_TX_FRAGMENTED; /* process and remove the injection radiotap header */ sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) { if (__ieee80211_parse_tx_radiotap(tx, skb) == TX_DROP) return TX_DROP; /* * __ieee80211_parse_tx_radiotap has now removed * the radiotap header that was present and pre-filled * 'tx' with tx control information. */ } /* * If this flag is set to true anywhere, and we get here, * we are doing the needed processing, so remove the flag * now. */ info->flags &= ~IEEE80211_TX_INTFL_NEED_TXPROCESSING; hdr = (struct ieee80211_hdr *) skb->data; tx->sta = sta_info_get(local, hdr->addr1); if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) { unsigned long flags; struct tid_ampdu_tx *tid_tx; qc = ieee80211_get_qos_ctl(hdr); tid = *qc & IEEE80211_QOS_CTL_TID_MASK; spin_lock_irqsave(&tx->sta->lock, flags); /* * XXX: This spinlock could be fairly expensive, but see the * comment in agg-tx.c:ieee80211_agg_tx_operational(). * One way to solve this would be to do something RCU-like * for managing the tid_tx struct and using atomic bitops * for the actual state -- by introducing an actual * 'operational' bit that would be possible. It would * require changing ieee80211_agg_tx_operational() to * set that bit, and changing the way tid_tx is managed * everywhere, including races between that bit and * tid_tx going away (tid_tx being added can be easily * committed to memory before the 'operational' bit). */ tid_tx = tx->sta->ampdu_mlme.tid_tx[tid]; state = &tx->sta->ampdu_mlme.tid_state_tx[tid]; if (*state == HT_AGG_STATE_OPERATIONAL) { info->flags |= IEEE80211_TX_CTL_AMPDU; } else if (*state != HT_AGG_STATE_IDLE) { /* in progress */ queued = true; info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; __skb_queue_tail(&tid_tx->pending, skb); } spin_unlock_irqrestore(&tx->sta->lock, flags); if (unlikely(queued)) return TX_QUEUED; } if (is_multicast_ether_addr(hdr->addr1)) { tx->flags &= ~IEEE80211_TX_UNICAST; info->flags |= IEEE80211_TX_CTL_NO_ACK; } else { tx->flags |= IEEE80211_TX_UNICAST; info->flags &= ~IEEE80211_TX_CTL_NO_ACK; } if (tx->flags & IEEE80211_TX_FRAGMENTED) { if ((tx->flags & IEEE80211_TX_UNICAST) && skb->len + FCS_LEN > local->fragmentation_threshold && !(info->flags & IEEE80211_TX_CTL_AMPDU)) tx->flags |= IEEE80211_TX_FRAGMENTED; else tx->flags &= ~IEEE80211_TX_FRAGMENTED; } if (!tx->sta) info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT)) info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) { u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)]; tx->ethertype = (pos[0] << 8) | pos[1]; } info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT; return TX_CONTINUE; } /* * NB: @tx is uninitialised when passed in here */ static int ieee80211_tx_prepare(struct ieee80211_local *local, struct ieee80211_tx_data *tx, struct sk_buff *skb) { struct net_device *dev; dev = dev_get_by_index(&init_net, skb->iif); if (unlikely(dev && !is_ieee80211_device(local, dev))) { dev_put(dev); dev = NULL; } if (unlikely(!dev)) return -ENODEV; /* * initialises tx with control * * return value is safe to ignore here because this function * can only be invoked for multicast frames * * XXX: clean up */ __ieee80211_tx_prepare(tx, skb, dev); dev_put(dev); return 0; } static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp, struct sta_info *sta) { struct sk_buff *skb = *skbp, *next; struct ieee80211_tx_info *info; int ret, len; bool fragm = false; local->mdev->trans_start = jiffies; while (skb) { if (ieee80211_queue_stopped(&local->hw, skb_get_queue_mapping(skb))) return IEEE80211_TX_PENDING; info = IEEE80211_SKB_CB(skb); if (fragm) info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | IEEE80211_TX_CTL_FIRST_FRAGMENT); next = skb->next; len = skb->len; ret = local->ops->tx(local_to_hw(local), skb); if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) { dev_kfree_skb(skb); ret = NETDEV_TX_OK; } if (ret != NETDEV_TX_OK) return IEEE80211_TX_AGAIN; *skbp = skb = next; ieee80211_led_tx(local, 1); fragm = true; } return IEEE80211_TX_OK; } /* * Invoke TX handlers, return 0 on success and non-zero if the * frame was dropped or queued. */ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) { struct sk_buff *skb = tx->skb; ieee80211_tx_result res = TX_DROP; #define CALL_TXH(txh) \ res = txh(tx); \ if (res != TX_CONTINUE) \ goto txh_done; CALL_TXH(ieee80211_tx_h_check_assoc) CALL_TXH(ieee80211_tx_h_ps_buf) CALL_TXH(ieee80211_tx_h_select_key) CALL_TXH(ieee80211_tx_h_michael_mic_add) CALL_TXH(ieee80211_tx_h_rate_ctrl) CALL_TXH(ieee80211_tx_h_misc) CALL_TXH(ieee80211_tx_h_sequence) CALL_TXH(ieee80211_tx_h_fragment) /* handlers after fragment must be aware of tx info fragmentation! */ CALL_TXH(ieee80211_tx_h_encrypt) CALL_TXH(ieee80211_tx_h_calculate_duration) CALL_TXH(ieee80211_tx_h_stats) #undef CALL_TXH txh_done: if (unlikely(res == TX_DROP)) { I802_DEBUG_INC(tx->local->tx_handlers_drop); while (skb) { struct sk_buff *next; next = skb->next; dev_kfree_skb(skb); skb = next; } return -1; } else if (unlikely(res == TX_QUEUED)) { I802_DEBUG_INC(tx->local->tx_handlers_queued); return -1; } return 0; } static void ieee80211_tx(struct net_device *dev, struct sk_buff *skb, bool txpending) { struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct sta_info *sta; struct ieee80211_tx_data tx; ieee80211_tx_result res_prepare; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct sk_buff *next; unsigned long flags; int ret, retries; u16 queue; queue = skb_get_queue_mapping(skb); WARN_ON(!txpending && !skb_queue_empty(&local->pending[queue])); if (unlikely(skb->len < 10)) { dev_kfree_skb(skb); return; } rcu_read_lock(); /* initialises tx */ res_prepare = __ieee80211_tx_prepare(&tx, skb, dev); if (unlikely(res_prepare == TX_DROP)) { dev_kfree_skb(skb); rcu_read_unlock(); return; } else if (unlikely(res_prepare == TX_QUEUED)) { rcu_read_unlock(); return; } sta = tx.sta; tx.channel = local->hw.conf.channel; info->band = tx.channel->band; if (invoke_tx_handlers(&tx)) goto out; retries = 0; retry: ret = __ieee80211_tx(local, &tx.skb, tx.sta); switch (ret) { case IEEE80211_TX_OK: break; case IEEE80211_TX_AGAIN: /* * Since there are no fragmented frames on A-MPDU * queues, there's no reason for a driver to reject * a frame there, warn and drop it. */ if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) goto drop; /* fall through */ case IEEE80211_TX_PENDING: skb = tx.skb; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); if (__netif_subqueue_stopped(local->mdev, queue)) { do { next = skb->next; skb->next = NULL; if (unlikely(txpending)) skb_queue_head(&local->pending[queue], skb); else skb_queue_tail(&local->pending[queue], skb); } while ((skb = next)); /* * Make sure nobody will enable the queue on us * (without going through the tasklet) nor disable the * netdev queue underneath the pending handling code. */ __set_bit(IEEE80211_QUEUE_STOP_REASON_PENDING, &local->queue_stop_reasons[queue]); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } else { spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); retries++; if (WARN(retries > 10, "tx refused but queue active")) goto drop; goto retry; } } out: rcu_read_unlock(); return; drop: rcu_read_unlock(); skb = tx.skb; while (skb) { next = skb->next; dev_kfree_skb(skb); skb = next; } } /* device xmit handlers */ static int ieee80211_skb_resize(struct ieee80211_local *local, struct sk_buff *skb, int head_need, bool may_encrypt) { int tail_need = 0; /* * This could be optimised, devices that do full hardware * crypto (including TKIP MMIC) need no tailroom... But we * have no drivers for such devices currently. */ if (may_encrypt) { tail_need = IEEE80211_ENCRYPT_TAILROOM; tail_need -= skb_tailroom(skb); tail_need = max_t(int, tail_need, 0); } if (head_need || tail_need) { /* Sorry. Can't account for this any more */ skb_orphan(skb); } if (skb_header_cloned(skb)) I802_DEBUG_INC(local->tx_expand_skb_head_cloned); else I802_DEBUG_INC(local->tx_expand_skb_head); if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) { printk(KERN_DEBUG "%s: failed to reallocate TX buffer\n", wiphy_name(local->hw.wiphy)); return -ENOMEM; } /* update truesize too */ skb->truesize += head_need + tail_need; return 0; } int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ieee80211_master_priv *mpriv = netdev_priv(dev); struct ieee80211_local *local = mpriv->local; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct net_device *odev = NULL; struct ieee80211_sub_if_data *osdata; int headroom; bool may_encrypt; enum { NOT_MONITOR, FOUND_SDATA, UNKNOWN_ADDRESS, } monitor_iface = NOT_MONITOR; if (skb->iif) odev = dev_get_by_index(&init_net, skb->iif); if (unlikely(odev && !is_ieee80211_device(local, odev))) { dev_put(odev); odev = NULL; } if (unlikely(!odev)) { #ifdef CONFIG_MAC80211_VERBOSE_DEBUG printk(KERN_DEBUG "%s: Discarded packet with nonexistent " "originating device\n", dev->name); #endif dev_kfree_skb(skb); return NETDEV_TX_OK; } if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && local->hw.conf.dynamic_ps_timeout > 0) { if (local->hw.conf.flags & IEEE80211_CONF_PS) { ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_QUEUE_STOP_REASON_PS); queue_work(local->hw.workqueue, &local->dynamic_ps_disable_work); } mod_timer(&local->dynamic_ps_timer, jiffies + msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); } memset(info, 0, sizeof(*info)); info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; osdata = IEEE80211_DEV_TO_SUB_IF(odev); if (ieee80211_vif_is_mesh(&osdata->vif) && ieee80211_is_data(hdr->frame_control)) { if (is_multicast_ether_addr(hdr->addr3)) memcpy(hdr->addr1, hdr->addr3, ETH_ALEN); else if (mesh_nexthop_lookup(skb, osdata)) { dev_put(odev); return NETDEV_TX_OK; } if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0) IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh, fwded_frames); } else if (unlikely(osdata->vif.type == NL80211_IFTYPE_MONITOR)) { struct ieee80211_sub_if_data *sdata; int hdrlen; u16 len_rthdr; info->flags |= IEEE80211_TX_CTL_INJECTED; monitor_iface = UNKNOWN_ADDRESS; len_rthdr = ieee80211_get_radiotap_len(skb->data); hdr = (struct ieee80211_hdr *)skb->data + len_rthdr; hdrlen = ieee80211_hdrlen(hdr->frame_control); /* check the header is complete in the frame */ if (likely(skb->len >= len_rthdr + hdrlen)) { /* * We process outgoing injected frames that have a * local address we handle as though they are our * own frames. * This code here isn't entirely correct, the local * MAC address is not necessarily enough to find * the interface to use; for that proper VLAN/WDS * support we will need a different mechanism. */ rcu_read_lock(); list_for_each_entry_rcu(sdata, &local->interfaces, list) { if (!netif_running(sdata->dev)) continue; if (sdata->vif.type != NL80211_IFTYPE_AP) continue; if (compare_ether_addr(sdata->dev->dev_addr, hdr->addr2)) { dev_hold(sdata->dev); dev_put(odev); osdata = sdata; odev = osdata->dev; skb->iif = sdata->dev->ifindex; monitor_iface = FOUND_SDATA; break; } } rcu_read_unlock(); } } may_encrypt = !skb->do_not_encrypt; headroom = osdata->local->tx_headroom; if (may_encrypt) headroom += IEEE80211_ENCRYPT_HEADROOM; headroom -= skb_headroom(skb); headroom = max_t(int, 0, headroom); if (ieee80211_skb_resize(osdata->local, skb, headroom, may_encrypt)) { dev_kfree_skb(skb); dev_put(odev); return NETDEV_TX_OK; } if (osdata->vif.type == NL80211_IFTYPE_AP_VLAN) osdata = container_of(osdata->bss, struct ieee80211_sub_if_data, u.ap); if (likely(monitor_iface != UNKNOWN_ADDRESS)) info->control.vif = &osdata->vif; ieee80211_tx(odev, skb, false); dev_put(odev); return NETDEV_TX_OK; } int ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct ieee80211_channel *chan = local->hw.conf.channel; struct ieee80211_radiotap_header *prthdr = (struct ieee80211_radiotap_header *)skb->data; u16 len_rthdr; /* * Frame injection is not allowed if beaconing is not allowed * or if we need radar detection. Beaconing is usually not allowed when * the mode or operation (Adhoc, AP, Mesh) does not support DFS. * Passive scan is also used in world regulatory domains where * your country is not known and as such it should be treated as * NO TX unless the channel is explicitly allowed in which case * your current regulatory domain would not have the passive scan * flag. * * Since AP mode uses monitor interfaces to inject/TX management * frames we can make AP mode the exception to this rule once it * supports radar detection as its implementation can deal with * radar detection by itself. We can do that later by adding a * monitor flag interfaces used for AP support. */ if ((chan->flags & (IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_RADAR | IEEE80211_CHAN_PASSIVE_SCAN))) goto fail; /* check for not even having the fixed radiotap header part */ if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) goto fail; /* too short to be possibly valid */ /* is it a header version we can trust to find length from? */ if (unlikely(prthdr->it_version)) goto fail; /* only version 0 is supported */ /* then there must be a radiotap header with a length we can use */ len_rthdr = ieee80211_get_radiotap_len(skb->data); /* does the skb contain enough to deliver on the alleged length? */ if (unlikely(skb->len < len_rthdr)) goto fail; /* skb too short for claimed rt header extent */ skb->dev = local->mdev; /* needed because we set skb device to master */ skb->iif = dev->ifindex; /* sometimes we do encrypt injected frames, will be fixed * up in radiotap parser if not wanted */ skb->do_not_encrypt = 0; /* * fix up the pointers accounting for the radiotap * header still being in there. We are being given * a precooked IEEE80211 header so no need for * normal processing */ skb_set_mac_header(skb, len_rthdr); /* * these are just fixed to the end of the rt area since we * don't have any better information and at this point, nobody cares */ skb_set_network_header(skb, len_rthdr); skb_set_transport_header(skb, len_rthdr); /* pass the radiotap header up to the next stage intact */ dev_queue_xmit(skb); return NETDEV_TX_OK; fail: dev_kfree_skb(skb); return NETDEV_TX_OK; /* meaning, we dealt with the skb */ } /** * ieee80211_subif_start_xmit - netif start_xmit function for Ethernet-type * subinterfaces (wlan#, WDS, and VLAN interfaces) * @skb: packet to be sent * @dev: incoming interface * * Returns: 0 on success (and frees skb in this case) or 1 on failure (skb will * not be freed, and caller is responsible for either retrying later or freeing * skb). * * This function takes in an Ethernet header and encapsulates it with suitable * IEEE 802.11 header based on which interface the packet is coming in. The * encapsulated packet will then be passed to master interface, wlan#.11, for * transmission (through low-level driver). */ int ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; int ret = 1, head_need; u16 ethertype, hdrlen, meshhdrlen = 0; __le16 fc; struct ieee80211_hdr hdr; struct ieee80211s_hdr mesh_hdr; const u8 *encaps_data; int encaps_len, skip_header_bytes; int nh_pos, h_pos; struct sta_info *sta; u32 sta_flags = 0; if (unlikely(skb->len < ETH_HLEN)) { ret = 0; goto fail; } nh_pos = skb_network_header(skb) - skb->data; h_pos = skb_transport_header(skb) - skb->data; /* convert Ethernet header to proper 802.11 header (based on * operation mode) */ ethertype = (skb->data[12] << 8) | skb->data[13]; fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); switch (sdata->vif.type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); /* DA BSSID SA */ memcpy(hdr.addr1, skb->data, ETH_ALEN); memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); hdrlen = 24; break; case NL80211_IFTYPE_WDS: fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); /* RA TA DA SA */ memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN); memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); memcpy(hdr.addr3, skb->data, ETH_ALEN); memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); hdrlen = 30; break; #ifdef CONFIG_MAC80211_MESH case NL80211_IFTYPE_MESH_POINT: fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); if (!sdata->u.mesh.mshcfg.dot11MeshTTL) { /* Do not send frames with mesh_ttl == 0 */ sdata->u.mesh.mshstats.dropped_frames_ttl++; ret = 0; goto fail; } memset(&mesh_hdr, 0, sizeof(mesh_hdr)); if (compare_ether_addr(dev->dev_addr, skb->data + ETH_ALEN) == 0) { /* RA TA DA SA */ memset(hdr.addr1, 0, ETH_ALEN); memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); memcpy(hdr.addr3, skb->data, ETH_ALEN); memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, sdata); } else { /* packet from other interface */ struct mesh_path *mppath; memset(hdr.addr1, 0, ETH_ALEN); memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); memcpy(hdr.addr4, dev->dev_addr, ETH_ALEN); if (is_multicast_ether_addr(skb->data)) memcpy(hdr.addr3, skb->data, ETH_ALEN); else { rcu_read_lock(); mppath = mpp_path_lookup(skb->data, sdata); if (mppath) memcpy(hdr.addr3, mppath->mpp, ETH_ALEN); else memset(hdr.addr3, 0xff, ETH_ALEN); rcu_read_unlock(); } mesh_hdr.flags |= MESH_FLAGS_AE_A5_A6; mesh_hdr.ttl = sdata->u.mesh.mshcfg.dot11MeshTTL; put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &mesh_hdr.seqnum); memcpy(mesh_hdr.eaddr1, skb->data, ETH_ALEN); memcpy(mesh_hdr.eaddr2, skb->data + ETH_ALEN, ETH_ALEN); sdata->u.mesh.mesh_seqnum++; meshhdrlen = 18; } hdrlen = 30; break; #endif case NL80211_IFTYPE_STATION: fc |= cpu_to_le16(IEEE80211_FCTL_TODS); /* BSSID SA DA */ memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN); memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); memcpy(hdr.addr3, skb->data, ETH_ALEN); hdrlen = 24; break; case NL80211_IFTYPE_ADHOC: /* DA SA BSSID */ memcpy(hdr.addr1, skb->data, ETH_ALEN); memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); memcpy(hdr.addr3, sdata->u.ibss.bssid, ETH_ALEN); hdrlen = 24; break; default: ret = 0; goto fail; } /* * There's no need to try to look up the destination * if it is a multicast address (which can only happen * in AP mode) */ if (!is_multicast_ether_addr(hdr.addr1)) { rcu_read_lock(); sta = sta_info_get(local, hdr.addr1); if (sta) sta_flags = get_sta_flags(sta); rcu_read_unlock(); } /* receiver and we are QoS enabled, use a QoS type frame */ if ((sta_flags & WLAN_STA_WME) && local->hw.queues >= 4) { fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); hdrlen += 2; } /* * Drop unicast frames to unauthorised stations unless they are * EAPOL frames from the local station. */ if (!ieee80211_vif_is_mesh(&sdata->vif) && unlikely(!is_multicast_ether_addr(hdr.addr1) && !(sta_flags & WLAN_STA_AUTHORIZED) && !(ethertype == ETH_P_PAE && compare_ether_addr(dev->dev_addr, skb->data + ETH_ALEN) == 0))) { #ifdef CONFIG_MAC80211_VERBOSE_DEBUG if (net_ratelimit()) printk(KERN_DEBUG "%s: dropped frame to %pM" " (unauthorized port)\n", dev->name, hdr.addr1); #endif I802_DEBUG_INC(local->tx_handlers_drop_unauth_port); ret = 0; goto fail; } hdr.frame_control = fc; hdr.duration_id = 0; hdr.seq_ctrl = 0; skip_header_bytes = ETH_HLEN; if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) { encaps_data = bridge_tunnel_header; encaps_len = sizeof(bridge_tunnel_header); skip_header_bytes -= 2; } else if (ethertype >= 0x600) { encaps_data = rfc1042_header; encaps_len = sizeof(rfc1042_header); skip_header_bytes -= 2; } else { encaps_data = NULL; encaps_len = 0; } skb_pull(skb, skip_header_bytes); nh_pos -= skip_header_bytes; h_pos -= skip_header_bytes; head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb); /* * So we need to modify the skb header and hence need a copy of * that. The head_need variable above doesn't, so far, include * the needed header space that we don't need right away. If we * can, then we don't reallocate right now but only after the * frame arrives at the master device (if it does...) * * If we cannot, however, then we will reallocate to include all * the ever needed space. Also, if we need to reallocate it anyway, * make it big enough for everything we may ever need. */ if (head_need > 0 || skb_cloned(skb)) { head_need += IEEE80211_ENCRYPT_HEADROOM; head_need += local->tx_headroom; head_need = max_t(int, 0, head_need); if (ieee80211_skb_resize(local, skb, head_need, true)) goto fail; } if (encaps_data) { memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len); nh_pos += encaps_len; h_pos += encaps_len; } if (meshhdrlen > 0) { memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen); nh_pos += meshhdrlen; h_pos += meshhdrlen; } if (ieee80211_is_data_qos(fc)) { __le16 *qos_control; qos_control = (__le16*) skb_push(skb, 2); memcpy(skb_push(skb, hdrlen - 2), &hdr, hdrlen - 2); /* * Maybe we could actually set some fields here, for now just * initialise to zero to indicate no special operation. */ *qos_control = 0; } else memcpy(skb_push(skb, hdrlen), &hdr, hdrlen); nh_pos += hdrlen; h_pos += hdrlen; skb->iif = dev->ifindex; skb->dev = local->mdev; dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; /* Update skb pointers to various headers since this modified frame * is going to go through Linux networking code that may potentially * need things like pointer to IP header. */ skb_set_mac_header(skb, 0); skb_set_network_header(skb, nh_pos); skb_set_transport_header(skb, h_pos); dev->trans_start = jiffies; dev_queue_xmit(skb); return 0; fail: if (!ret) dev_kfree_skb(skb); return ret; } /* * ieee80211_clear_tx_pending may not be called in a context where * it is possible that it packets could come in again. */ void ieee80211_clear_tx_pending(struct ieee80211_local *local) { int i; for (i = 0; i < local->hw.queues; i++) skb_queue_purge(&local->pending[i]); } static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_sub_if_data *sdata; struct sta_info *sta; struct ieee80211_hdr *hdr; struct net_device *dev; int ret; bool result = true; /* does interface still exist? */ dev = dev_get_by_index(&init_net, skb->iif); if (!dev) { dev_kfree_skb(skb); return true; } /* validate info->control.vif against skb->iif */ sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); if (unlikely(info->control.vif && info->control.vif != &sdata->vif)) { dev_kfree_skb(skb); result = true; goto out; } if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) { ieee80211_tx(dev, skb, true); } else { hdr = (struct ieee80211_hdr *)skb->data; sta = sta_info_get(local, hdr->addr1); ret = __ieee80211_tx(local, &skb, sta); if (ret != IEEE80211_TX_OK) result = false; } out: dev_put(dev); return result; } /* * Transmit all pending packets. Called from tasklet, locks master device * TX lock so that no new packets can come in. */ void ieee80211_tx_pending(unsigned long data) { struct ieee80211_local *local = (struct ieee80211_local *)data; struct net_device *dev = local->mdev; unsigned long flags; int i; bool next; rcu_read_lock(); netif_tx_lock_bh(dev); for (i = 0; i < local->hw.queues; i++) { /* * If queue is stopped by something other than due to pending * frames, or we have no pending frames, proceed to next queue. */ spin_lock_irqsave(&local->queue_stop_reason_lock, flags); next = false; if (local->queue_stop_reasons[i] != BIT(IEEE80211_QUEUE_STOP_REASON_PENDING) || skb_queue_empty(&local->pending[i])) next = true; spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); if (next) continue; /* * start the queue now to allow processing our packets, * we're under the tx lock here anyway so nothing will * happen as a result of this */ netif_start_subqueue(local->mdev, i); while (!skb_queue_empty(&local->pending[i])) { struct sk_buff *skb = skb_dequeue(&local->pending[i]); if (!ieee80211_tx_pending_skb(local, skb)) { skb_queue_head(&local->pending[i], skb); break; } } /* Start regular packet processing again. */ if (skb_queue_empty(&local->pending[i])) ieee80211_wake_queue_by_reason(&local->hw, i, IEEE80211_QUEUE_STOP_REASON_PENDING); } netif_tx_unlock_bh(dev); rcu_read_unlock(); } /* functions for drivers to get certain frames */ static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss, struct sk_buff *skb, struct beacon_data *beacon) { u8 *pos, *tim; int aid0 = 0; int i, have_bits = 0, n1, n2; /* Generate bitmap for TIM only if there are any STAs in power save * mode. */ if (atomic_read(&bss->num_sta_ps) > 0) /* in the hope that this is faster than * checking byte-for-byte */ have_bits = !bitmap_empty((unsigned long*)bss->tim, IEEE80211_MAX_AID+1); if (bss->dtim_count == 0) bss->dtim_count = beacon->dtim_period - 1; else bss->dtim_count--; tim = pos = (u8 *) skb_put(skb, 6); *pos++ = WLAN_EID_TIM; *pos++ = 4; *pos++ = bss->dtim_count; *pos++ = beacon->dtim_period; if (bss->dtim_count == 0 && !skb_queue_empty(&bss->ps_bc_buf)) aid0 = 1; if (have_bits) { /* Find largest even number N1 so that bits numbered 1 through * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits * (N2 + 1) x 8 through 2007 are 0. */ n1 = 0; for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) { if (bss->tim[i]) { n1 = i & 0xfe; break; } } n2 = n1; for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) { if (bss->tim[i]) { n2 = i; break; } } /* Bitmap control */ *pos++ = n1 | aid0; /* Part Virt Bitmap */ memcpy(pos, bss->tim + n1, n2 - n1 + 1); tim[1] = n2 - n1 + 4; skb_put(skb, n2 - n1); } else { *pos++ = aid0; /* Bitmap control */ *pos++ = 0; /* Part Virt Bitmap */ } } struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ieee80211_local *local = hw_to_local(hw); struct sk_buff *skb = NULL; struct ieee80211_tx_info *info; struct ieee80211_sub_if_data *sdata = NULL; struct ieee80211_if_ap *ap = NULL; struct beacon_data *beacon; struct ieee80211_supported_band *sband; enum ieee80211_band band = local->hw.conf.channel->band; sband = local->hw.wiphy->bands[band]; rcu_read_lock(); sdata = vif_to_sdata(vif); if (sdata->vif.type == NL80211_IFTYPE_AP) { ap = &sdata->u.ap; beacon = rcu_dereference(ap->beacon); if (ap && beacon) { /* * headroom, head length, * tail length and maximum TIM length */ skb = dev_alloc_skb(local->tx_headroom + beacon->head_len + beacon->tail_len + 256); if (!skb) goto out; skb_reserve(skb, local->tx_headroom); memcpy(skb_put(skb, beacon->head_len), beacon->head, beacon->head_len); /* * Not very nice, but we want to allow the driver to call * ieee80211_beacon_get() as a response to the set_tim() * callback. That, however, is already invoked under the * sta_lock to guarantee consistent and race-free update * of the tim bitmap in mac80211 and the driver. */ if (local->tim_in_locked_section) { ieee80211_beacon_add_tim(ap, skb, beacon); } else { unsigned long flags; spin_lock_irqsave(&local->sta_lock, flags); ieee80211_beacon_add_tim(ap, skb, beacon); spin_unlock_irqrestore(&local->sta_lock, flags); } if (beacon->tail) memcpy(skb_put(skb, beacon->tail_len), beacon->tail, beacon->tail_len); } else goto out; } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_hdr *hdr; if (!ifibss->probe_resp) goto out; skb = skb_copy(ifibss->probe_resp, GFP_ATOMIC); if (!skb) goto out; hdr = (struct ieee80211_hdr *) skb->data; hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); } else if (ieee80211_vif_is_mesh(&sdata->vif)) { struct ieee80211_mgmt *mgmt; u8 *pos; /* headroom, head length, tail length and maximum TIM length */ skb = dev_alloc_skb(local->tx_headroom + 400); if (!skb) goto out; skb_reserve(skb, local->hw.extra_tx_headroom); mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + sizeof(mgmt->u.beacon)); memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); memset(mgmt->da, 0xff, ETH_ALEN); memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); /* BSSID is left zeroed, wildcard value */ mgmt->u.beacon.beacon_int = cpu_to_le16(local->hw.conf.beacon_int); mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */ pos = skb_put(skb, 2); *pos++ = WLAN_EID_SSID; *pos++ = 0x0; mesh_mgmt_ies_add(skb, sdata); } else { WARN_ON(1); goto out; } info = IEEE80211_SKB_CB(skb); skb->do_not_encrypt = 1; info->band = band; /* * XXX: For now, always use the lowest rate */ info->control.rates[0].idx = 0; info->control.rates[0].count = 1; info->control.rates[1].idx = -1; info->control.rates[2].idx = -1; info->control.rates[3].idx = -1; info->control.rates[4].idx = -1; BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5); info->control.vif = vif; info->flags |= IEEE80211_TX_CTL_NO_ACK; info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; out: rcu_read_unlock(); return skb; } EXPORT_SYMBOL(ieee80211_beacon_get); void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const void *frame, size_t frame_len, const struct ieee80211_tx_info *frame_txctl, struct ieee80211_rts *rts) { const struct ieee80211_hdr *hdr = frame; rts->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); rts->duration = ieee80211_rts_duration(hw, vif, frame_len, frame_txctl); memcpy(rts->ra, hdr->addr1, sizeof(rts->ra)); memcpy(rts->ta, hdr->addr2, sizeof(rts->ta)); } EXPORT_SYMBOL(ieee80211_rts_get); void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const void *frame, size_t frame_len, const struct ieee80211_tx_info *frame_txctl, struct ieee80211_cts *cts) { const struct ieee80211_hdr *hdr = frame; cts->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS); cts->duration = ieee80211_ctstoself_duration(hw, vif, frame_len, frame_txctl); memcpy(cts->ra, hdr->addr1, sizeof(cts->ra)); } EXPORT_SYMBOL(ieee80211_ctstoself_get); struct sk_buff * ieee80211_get_buffered_bc(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ieee80211_local *local = hw_to_local(hw); struct sk_buff *skb = NULL; struct sta_info *sta; struct ieee80211_tx_data tx; struct ieee80211_sub_if_data *sdata; struct ieee80211_if_ap *bss = NULL; struct beacon_data *beacon; struct ieee80211_tx_info *info; sdata = vif_to_sdata(vif); bss = &sdata->u.ap; if (!bss) return NULL; rcu_read_lock(); beacon = rcu_dereference(bss->beacon); if (sdata->vif.type != NL80211_IFTYPE_AP || !beacon || !beacon->head) goto out; if (bss->dtim_count != 0) goto out; /* send buffered bc/mc only after DTIM beacon */ while (1) { skb = skb_dequeue(&bss->ps_bc_buf); if (!skb) goto out; local->total_ps_buffered--; if (!skb_queue_empty(&bss->ps_bc_buf) && skb->len >= 2) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; /* more buffered multicast/broadcast frames ==> set * MoreData flag in IEEE 802.11 header to inform PS * STAs */ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); } if (!ieee80211_tx_prepare(local, &tx, skb)) break; dev_kfree_skb_any(skb); } info = IEEE80211_SKB_CB(skb); sta = tx.sta; tx.flags |= IEEE80211_TX_PS_BUFFERED; tx.channel = local->hw.conf.channel; info->band = tx.channel->band; if (invoke_tx_handlers(&tx)) skb = NULL; out: rcu_read_unlock(); return skb; } EXPORT_SYMBOL(ieee80211_get_buffered_bc);
gpl-2.0
daeinki/drm
kernel/jump_label.c
26
9464
/* * jump label support * * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> * Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com> * */ #include <linux/memory.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/sort.h> #include <linux/err.h> #include <linux/jump_label.h> #ifdef HAVE_JUMP_LABEL /* mutex to protect coming/going of the the jump_label table */ static DEFINE_MUTEX(jump_label_mutex); void jump_label_lock(void) { mutex_lock(&jump_label_mutex); } void jump_label_unlock(void) { mutex_unlock(&jump_label_mutex); } bool jump_label_enabled(struct jump_label_key *key) { return !!atomic_read(&key->enabled); } static int jump_label_cmp(const void *a, const void *b) { const struct jump_entry *jea = a; const struct jump_entry *jeb = b; if (jea->key < jeb->key) return -1; if (jea->key > jeb->key) return 1; return 0; } static void jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) { unsigned long size; size = (((unsigned long)stop - (unsigned long)start) / sizeof(struct jump_entry)); sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); } static void jump_label_update(struct jump_label_key *key, int enable); void jump_label_inc(struct jump_label_key *key) { if (atomic_inc_not_zero(&key->enabled)) return; jump_label_lock(); if (atomic_add_return(1, &key->enabled) == 1) jump_label_update(key, JUMP_LABEL_ENABLE); jump_label_unlock(); } void jump_label_dec(struct jump_label_key *key) { if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) return; jump_label_update(key, JUMP_LABEL_DISABLE); jump_label_unlock(); } static int addr_conflict(struct jump_entry *entry, void *start, void *end) { if (entry->code <= (unsigned long)end && entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start) return 1; return 0; } static int __jump_label_text_reserved(struct jump_entry *iter_start, struct jump_entry *iter_stop, void *start, void *end) { struct jump_entry *iter; iter = iter_start; while (iter < iter_stop) { if (addr_conflict(iter, start, end)) return 1; iter++; } return 0; } /* * Update code which is definitely not currently executing. * Architectures which need heavyweight synchronization to modify * running code can override this to make the non-live update case * cheaper. */ void __weak arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type) { arch_jump_label_transform(entry, type); } static void __jump_label_update(struct jump_label_key *key, struct jump_entry *entry, struct jump_entry *stop, int enable) { for (; (entry < stop) && (entry->key == (jump_label_t)(unsigned long)key); entry++) { /* * entry->code set to 0 invalidates module init text sections * kernel_text_address() verifies we are not in core kernel * init code, see jump_label_invalidate_module_init(). */ if (entry->code && kernel_text_address(entry->code)) arch_jump_label_transform(entry, enable); } } void __init jump_label_init(void) { struct jump_entry *iter_start = __start___jump_table; struct jump_entry *iter_stop = __stop___jump_table; struct jump_label_key *key = NULL; struct jump_entry *iter; jump_label_lock(); jump_label_sort_entries(iter_start, iter_stop); for (iter = iter_start; iter < iter_stop; iter++) { struct jump_label_key *iterk; iterk = (struct jump_label_key *)(unsigned long)iter->key; arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ? JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE); if (iterk == key) continue; key = iterk; key->entries = iter; #ifdef CONFIG_MODULES key->next = NULL; #endif } jump_label_unlock(); } #ifdef CONFIG_MODULES struct jump_label_mod { struct jump_label_mod *next; struct jump_entry *entries; struct module *mod; }; static int __jump_label_mod_text_reserved(void *start, void *end) { struct module *mod; mod = __module_text_address((unsigned long)start); if (!mod) return 0; WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); return __jump_label_text_reserved(mod->jump_entries, mod->jump_entries + mod->num_jump_entries, start, end); } static void __jump_label_mod_update(struct jump_label_key *key, int enable) { struct jump_label_mod *mod = key->next; while (mod) { struct module *m = mod->mod; __jump_label_update(key, mod->entries, m->jump_entries + m->num_jump_entries, enable); mod = mod->next; } } /*** * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() * @mod: module to patch * * Allow for run-time selection of the optimal nops. Before the module * loads patch these with arch_get_jump_label_nop(), which is specified by * the arch specific jump label code. */ void jump_label_apply_nops(struct module *mod) { struct jump_entry *iter_start = mod->jump_entries; struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; struct jump_entry *iter; /* if the module doesn't have jump label entries, just return */ if (iter_start == iter_stop) return; for (iter = iter_start; iter < iter_stop; iter++) arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE); } static int jump_label_add_module(struct module *mod) { struct jump_entry *iter_start = mod->jump_entries; struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; struct jump_entry *iter; struct jump_label_key *key = NULL; struct jump_label_mod *jlm; /* if the module doesn't have jump label entries, just return */ if (iter_start == iter_stop) return 0; jump_label_sort_entries(iter_start, iter_stop); for (iter = iter_start; iter < iter_stop; iter++) { if (iter->key == (jump_label_t)(unsigned long)key) continue; key = (struct jump_label_key *)(unsigned long)iter->key; if (__module_address(iter->key) == mod) { atomic_set(&key->enabled, 0); key->entries = iter; key->next = NULL; continue; } jlm = kzalloc(sizeof(struct jump_label_mod), GFP_KERNEL); if (!jlm) return -ENOMEM; jlm->mod = mod; jlm->entries = iter; jlm->next = key->next; key->next = jlm; if (jump_label_enabled(key)) __jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE); } return 0; } static void jump_label_del_module(struct module *mod) { struct jump_entry *iter_start = mod->jump_entries; struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; struct jump_entry *iter; struct jump_label_key *key = NULL; struct jump_label_mod *jlm, **prev; for (iter = iter_start; iter < iter_stop; iter++) { if (iter->key == (jump_label_t)(unsigned long)key) continue; key = (struct jump_label_key *)(unsigned long)iter->key; if (__module_address(iter->key) == mod) continue; prev = &key->next; jlm = key->next; while (jlm && jlm->mod != mod) { prev = &jlm->next; jlm = jlm->next; } if (jlm) { *prev = jlm->next; kfree(jlm); } } } static void jump_label_invalidate_module_init(struct module *mod) { struct jump_entry *iter_start = mod->jump_entries; struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; struct jump_entry *iter; for (iter = iter_start; iter < iter_stop; iter++) { if (within_module_init(iter->code, mod)) iter->code = 0; } } static int jump_label_module_notify(struct notifier_block *self, unsigned long val, void *data) { struct module *mod = data; int ret = 0; switch (val) { case MODULE_STATE_COMING: jump_label_lock(); ret = jump_label_add_module(mod); if (ret) jump_label_del_module(mod); jump_label_unlock(); break; case MODULE_STATE_GOING: jump_label_lock(); jump_label_del_module(mod); jump_label_unlock(); break; case MODULE_STATE_LIVE: jump_label_lock(); jump_label_invalidate_module_init(mod); jump_label_unlock(); break; } return notifier_from_errno(ret); } struct notifier_block jump_label_module_nb = { .notifier_call = jump_label_module_notify, .priority = 1, /* higher than tracepoints */ }; static __init int jump_label_init_module(void) { return register_module_notifier(&jump_label_module_nb); } early_initcall(jump_label_init_module); #endif /* CONFIG_MODULES */ /*** * jump_label_text_reserved - check if addr range is reserved * @start: start text addr * @end: end text addr * * checks if the text addr located between @start and @end * overlaps with any of the jump label patch addresses. Code * that wants to modify kernel text should first verify that * it does not overlap with any of the jump label addresses. * Caller must hold jump_label_mutex. * * returns 1 if there is an overlap, 0 otherwise */ int jump_label_text_reserved(void *start, void *end) { int ret = __jump_label_text_reserved(__start___jump_table, __stop___jump_table, start, end); if (ret) return ret; #ifdef CONFIG_MODULES ret = __jump_label_mod_text_reserved(start, end); #endif return ret; } static void jump_label_update(struct jump_label_key *key, int enable) { struct jump_entry *entry = key->entries, *stop = __stop___jump_table; #ifdef CONFIG_MODULES struct module *mod = __module_address((jump_label_t)key); __jump_label_mod_update(key, enable); if (mod) stop = mod->jump_entries + mod->num_jump_entries; #endif /* if there are no users, entry can be NULL */ if (entry) __jump_label_update(key, entry, stop, enable); } #endif
gpl-2.0
thillux/coreboot
payloads/libpayload/arch/arm64/timer.c
26
1935
/* * This file is part of the libpayload project. * * Copyright (C) 2008 Advanced Micro Devices, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /** * @file arm64/timer.c * ARM64 specific timer routines */ #include <libpayload.h> /** * @ingroup arch * Global variable containing the speed of the processor in KHz. */ u32 cpu_khz; /** * Calculate the speed of the processor for use in delays. * * @return The CPU speed in kHz. */ unsigned int get_cpu_speed(void) { /* FIXME */ cpu_khz = 1000000U; return cpu_khz; }
gpl-2.0
noewin/papercrop
luabind-0.9/test/main.cpp
26
3141
// Copyright (c) 2005 Daniel Wallin, Arvid Norberg // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF // ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED // TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A // PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT // SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR // ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN // ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE // OR OTHER DEALINGS IN THE SOFTWARE. #include <iostream> #include <cstring> extern "C" { #include "lauxlib.h" #include "lualib.h" } #include <luabind/open.hpp> #include "test.hpp" extern "C" struct lua_State; void test_main(lua_State*); struct lua_state { lua_state(); ~lua_state(); operator lua_State*() const; void check() const; private: lua_State* m_state; int m_top; }; lua_state::lua_state() : m_state(lua_open()) { luaopen_base(m_state); #if defined(LUA_VERSION_NUM) && LUA_VERSION_NUM >= 501 // lua 5.1 or newer luaL_openlibs(m_state); #else // lua 5.0.2 or older lua_baselibopen(m_state); #endif m_top = lua_gettop(m_state); luabind::open(m_state); } lua_state::~lua_state() { lua_close(m_state); } void lua_state::check() const { TEST_CHECK(lua_gettop(m_state) == m_top); } lua_state::operator lua_State*() const { return m_state; } int pcall_handler(lua_State* L) { return 1; } void dostring(lua_State* state, char const* str) { lua_pushcclosure(state, &pcall_handler, 0); if (luaL_loadbuffer(state, str, std::strlen(str), str)) { std::string err(lua_tostring(state, -1)); lua_pop(state, 2); throw err; } if (lua_pcall(state, 0, 0, -2)) { std::string err(lua_tostring(state, -1)); lua_pop(state, 2); throw err; } lua_pop(state, 1); } bool tests_failure = false; void report_failure(char const* err, char const* file, int line) { std::cerr << file << ":" << line << "\"" << err << "\"\n"; tests_failure = true; } int main() { lua_state L; try { test_main(L); L.check(); return tests_failure ? 1 : 0; } catch (luabind::error const& e) { std::cerr << "Terminated with exception: \"" << e.what() << "\"\n" << lua_tostring(e.state(), -1) << "\n"; return 1; } catch (std::exception const& e) { std::cerr << "Terminated with exception: \"" << e.what() << "\"\n"; return 1; } catch (...) { std::cerr << "Terminated with unknown exception\n"; return 1; } }
gpl-2.0
LeChuck42/or1k-gcc
gcc/gengtype-state.c
26
74191
/* Gengtype persistent state serialization & de-serialization. Useful for gengtype in plugin mode. Copyright (C) 2010-2014 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. Contributed by Jeremie Salvucci <jeremie.salvucci@free.fr> and Basile Starynkevitch <basile@starynkevitch.net> */ #ifdef GENERATOR_FILE #include "bconfig.h" #else #include "config.h" #endif #include "system.h" #include "errors.h" /* For fatal. */ #include "double-int.h" #include "hashtab.h" #include "version.h" /* For version_string & pkgversion_string. */ #include "obstack.h" #include "gengtype.h" /* Gives the file location of a type, if any. */ static inline struct fileloc* type_lineloc (const_type_p ty) { if (!ty) return NULL; switch (ty->kind) { case TYPE_NONE: gcc_unreachable (); case TYPE_STRUCT: case TYPE_UNION: case TYPE_LANG_STRUCT: case TYPE_USER_STRUCT: case TYPE_UNDEFINED: return CONST_CAST (struct fileloc*, &ty->u.s.line); case TYPE_PARAM_STRUCT: return CONST_CAST (struct fileloc*, &ty->u.param_struct.line); case TYPE_SCALAR: case TYPE_STRING: case TYPE_POINTER: case TYPE_ARRAY: return NULL; default: gcc_unreachable (); } } /* The state file has simplistic lispy lexical tokens. Its lexer gives a linked list of struct state_token_st, through the peek_state_token function. Lexical tokens are consumed with next_state_tokens. */ /* The lexical kind of each lispy token. */ enum state_token_en { STOK_NONE, /* Never used. */ STOK_INTEGER, /* Integer token. */ STOK_STRING, /* String token. */ STOK_LEFTPAR, /* Left opening parenthesis. */ STOK_RIGHTPAR, /* Right closing parenthesis. */ STOK_NAME /* hash-consed name or identifier. */ }; /* Structure and hash-table used to share identifiers or names. */ struct state_ident_st { /* TODO: We could improve the parser by reserving identifiers for state keywords and adding a keyword number for them. That would mean adding another field in this state_ident_st struct. */ char stid_name[1]; /* actually bigger & null terminated */ }; static htab_t state_ident_tab; /* The state_token_st structure is for lexical tokens in the read state file. The stok_kind field discriminates the union. Tokens are allocated by peek_state_token which calls read_a_state_token which allocate them. Tokens are freed by calls to next_state_tokens. Token are organized in a FIFO look-ahead queue filled by peek_state_token. */ struct state_token_st { enum state_token_en stok_kind; /* the lexical kind discriminates the stok_un union */ int stok_line; /* the line number */ int stok_col; /* the column number */ const char *stok_file; /* the file path */ struct state_token_st *stok_next; /* the next token in the queue, when peeked */ union /* discriminated by stok_kind! */ { int stok_num; /* when STOK_INTEGER */ char stok_string[1]; /* when STOK_STRING, actual size is bigger and null terminated */ struct state_ident_st *stok_ident; /* when STOK_IDENT */ void *stok_ptr; /* null otherwise */ } stok_un; }; #define NULL_STATE_TOKEN (struct state_token_st*)0 /* the state_token pointer contains the leftmost current token. The tokens are organized in a linked queue, using stok_next, for token look-ahead. */ struct state_token_st *state_token = NULL_STATE_TOKEN; /* Used by the reading lexer. */ static FILE *state_file; static const char *state_path = NULL; static int state_line = 0; static long state_bol = 0; /* offset of beginning of line */ /* A class for writing out s-expressions, keeping track of newlines and nested indentation. */ class s_expr_writer { public: s_expr_writer (); void write_new_line (); void write_any_indent (int leading_spaces); void begin_s_expr (const char *tag); void end_s_expr (); private: int m_indent_amount; int m_had_recent_newline; }; // class s_expr_writer /* A class for writing out "gtype.state". */ class state_writer : public s_expr_writer { public: state_writer (); private: void write_state_fileloc (struct fileloc *floc); void write_state_fields (pair_p fields); void write_state_a_string (const char *s); void write_state_string_option (options_p current); void write_state_type_option (options_p current); void write_state_nested_option (options_p current); void write_state_option (options_p current); void write_state_options (options_p opt); void write_state_lang_bitmap (lang_bitmap bitmap); void write_state_version (const char *version); void write_state_scalar_type (type_p current); void write_state_string_type (type_p current); void write_state_undefined_type (type_p current); void write_state_struct_union_type (type_p current, const char *kindstr); void write_state_struct_type (type_p current); void write_state_user_struct_type (type_p current); void write_state_union_type (type_p current); void write_state_lang_struct_type (type_p current); void write_state_param_struct_type (type_p current); void write_state_pointer_type (type_p current); void write_state_array_type (type_p current); void write_state_gc_used (enum gc_used_enum gus); void write_state_common_type_content (type_p current); void write_state_type (type_p current); void write_state_pair (pair_p current); int write_state_pair_list (pair_p list); void write_state_typedefs (void); void write_state_structures (void); void write_state_param_structs (void); void write_state_variables (void); void write_state_srcdir (void); void write_state_files_list (void); void write_state_languages (void); friend void write_state (const char *state_path); private: /* Counter of written types. */ int m_state_written_type_count; }; // class state_writer /* class s_expr_writer's trivial constructor. */ s_expr_writer::s_expr_writer () : m_indent_amount (0), m_had_recent_newline (0) { } /* Write a newline to the output file, merging adjacent newlines. */ void s_expr_writer::write_new_line (void) { /* Don't add a newline if we've just had one. */ if (!m_had_recent_newline) { fprintf (state_file, "\n"); m_had_recent_newline = 1; } } /* If we've just had a newline, write the indentation amount, potentially omitting some spaces. LEADING_SPACES exists to support code that writes strings with leading spaces (e.g " foo") which might occur within a line, or could be the first thing on a line. By passing leading_spaces == 1, when such a string is the first thing on a line, write_any_indent () swallows the successive leading spaces into the indentation so that the "foo" begins at the expected column. */ void s_expr_writer::write_any_indent (int leading_spaces) { int i; int amount = m_indent_amount - leading_spaces; if (m_had_recent_newline) for (i = 0; i < amount; i++) fprintf (state_file, " "); m_had_recent_newline = 0; } /* Write the beginning of a new s-expresion e.g. "(!foo " The writer automatically adds whitespace to show the hierarchical structure of the expressions, so each one starts on a new line, and any within it will be at an increased indentation level. */ void s_expr_writer::begin_s_expr (const char *tag) { write_new_line (); write_any_indent (0); fprintf (state_file, "(!%s ", tag); m_indent_amount++; } /* Write out the end of an s-expression: any necssessary indentation, a closing parenthesis, and a new line. */ void s_expr_writer::end_s_expr (void) { m_indent_amount--; write_any_indent (0); fprintf (state_file, ")"); write_new_line (); } /* class state_writer's trivial constructor. */ state_writer::state_writer () : s_expr_writer (), m_state_written_type_count (0) { } /* Fatal error messages when reading the state. They are extremely unlikely, and only appear when this gengtype-state.c file is buggy, or when reading a gengtype state which was not generated by the same version of gengtype or GCC. */ /* Fatal message while reading state. */ static inline void fatal_reading_state (struct state_token_st* tok, const char*msg) { if (tok) fatal ("%s:%d:%d: Invalid state file; %s", tok->stok_file, tok->stok_line, tok->stok_col, msg); else fatal ("%s:%d: Invalid state file; %s", state_path, state_line, msg); } /* Fatal printf-like message while reading state. This can't be a function, because there is no way to pass a va_arg to a variant of fatal. */ #define fatal_reading_state_printf(Tok,Fmt,...) do { \ struct state_token_st* badtok = Tok; \ if (badtok) \ fatal ("%s:%d:%d: Invalid state file; " Fmt, \ badtok->stok_file, \ badtok->stok_line, \ badtok->stok_col, __VA_ARGS__); \ else \ fatal ("%s:%d: Invalid state file; " Fmt, \ state_path, state_line, __VA_ARGS__); \ } while (0) /* Find or allocate an identifier in our name hash table. */ static struct state_ident_st * state_ident_by_name (const char *name, enum insert_option optins) { PTR *slot = NULL; int namlen = 0; struct state_ident_st *stid = NULL; if (!name || !name[0]) return NULL; slot = htab_find_slot (state_ident_tab, name, optins); if (!slot) return NULL; namlen = strlen (name); stid = (struct state_ident_st *) xmalloc (sizeof (struct state_ident_st) + namlen); memset (stid, 0, sizeof (struct state_ident_st) + namlen); strcpy (stid->stid_name, name); *slot = stid; return stid; } /* Our token lexer is heavily inspired by MELT's lexer, and share some code with the file gcc/melt-runtime.c of the GCC MELT branch! We really want the gengtype state to be easily parsable by MELT. This is a usual lispy lexing routine, dealing with spaces and comments, numbers, parenthesis, names, strings. */ static struct state_token_st * read_a_state_token (void) { int c = 0; long curoff = 0; struct state_token_st *tk = NULL; again: /* Read again, e.g. after a comment or spaces. */ c = getc (state_file); if (c == EOF) return NULL; /* Handle spaces, count lines. */ if (c == '\n') { state_line++; state_bol = curoff = ftell (state_file); goto again; }; if (ISSPACE (c)) goto again; /* Skip comments starting with semi-colon. */ if (c == ';') { do { c = getc (state_file); } while (c > 0 && c != '\n'); if (c == '\n') { state_line++; state_bol = curoff = ftell (state_file); } goto again; }; /* Read signed numbers. */ if (ISDIGIT (c) || c == '-' || c == '+') { /* number */ int n = 0; ungetc (c, state_file); curoff = ftell (state_file); if (fscanf (state_file, "%d", &n) <= 0) fatal_reading_state (NULL_STATE_TOKEN, "Lexical error in number"); tk = XCNEW (struct state_token_st); tk->stok_kind = STOK_INTEGER; tk->stok_line = state_line; tk->stok_col = curoff - state_bol; tk->stok_file = state_path; tk->stok_next = NULL; tk->stok_un.stok_num = n; return tk; } /* Read an opening left parenthesis. */ else if (c == '(') { curoff = ftell (state_file); tk = XCNEW (struct state_token_st); tk->stok_kind = STOK_LEFTPAR; tk->stok_line = state_line; tk->stok_col = curoff - state_bol; tk->stok_file = state_path; tk->stok_next = NULL; return tk; } /* Read an closing right parenthesis. */ else if (c == ')') { curoff = ftell (state_file); tk = XCNEW (struct state_token_st); tk->stok_kind = STOK_RIGHTPAR; tk->stok_line = state_line; tk->stok_col = curoff - state_bol; tk->stok_file = state_path; tk->stok_next = NULL; return tk; } /* Read identifiers, using an obstack. */ else if (ISALPHA (c) || c == '_' || c == '$' || c == '!' || c == '#') { struct obstack id_obstack; struct state_ident_st *sid = NULL; char *ids = NULL; obstack_init (&id_obstack); curoff = ftell (state_file); while (ISALNUM (c) || c == '_' || c == '$' || c == '!' || c == '#') { obstack_1grow (&id_obstack, c); c = getc (state_file); if (c < 0) break; }; if (c >= 0) ungetc (c, state_file); obstack_1grow (&id_obstack, (char) 0); ids = XOBFINISH (&id_obstack, char *); sid = state_ident_by_name (ids, INSERT); obstack_free (&id_obstack, NULL); ids = NULL; tk = XCNEW (struct state_token_st); tk->stok_kind = STOK_NAME; tk->stok_line = state_line; tk->stok_col = curoff - state_bol; tk->stok_file = state_path; tk->stok_next = NULL; tk->stok_un.stok_ident = sid; return tk; } /* Read a string, dealing with escape sequences a la C! */ else if (c == '"') { char *cstr = NULL; int cslen = 0; struct obstack bstring_obstack; obstack_init (&bstring_obstack); curoff = ftell (state_file); while ((c = getc (state_file)) != '"' && c >= 0) { if (ISPRINT (c) && c != '\\') obstack_1grow (&bstring_obstack, (char) c); else if (ISSPACE (c) && c != '\n') obstack_1grow (&bstring_obstack, (char) c); else if (c == '\\') { c = getc (state_file); switch (c) { case 'a': obstack_1grow (&bstring_obstack, '\a'); c = getc (state_file); break; case 'b': obstack_1grow (&bstring_obstack, '\b'); c = getc (state_file); break; case 't': obstack_1grow (&bstring_obstack, '\t'); c = getc (state_file); break; case 'n': obstack_1grow (&bstring_obstack, '\n'); c = getc (state_file); break; case 'v': obstack_1grow (&bstring_obstack, '\v'); c = getc (state_file); break; case 'f': obstack_1grow (&bstring_obstack, '\f'); c = getc (state_file); break; case 'r': obstack_1grow (&bstring_obstack, '\r'); c = getc (state_file); break; case '"': obstack_1grow (&bstring_obstack, '\"'); c = getc (state_file); break; case '\\': obstack_1grow (&bstring_obstack, '\\'); c = getc (state_file); break; case ' ': obstack_1grow (&bstring_obstack, ' '); c = getc (state_file); break; case 'x': { unsigned int cx = 0; if (fscanf (state_file, "%02x", &cx) > 0 && cx > 0) obstack_1grow (&bstring_obstack, cx); else fatal_reading_state (NULL_STATE_TOKEN, "Lexical error in string hex escape"); c = getc (state_file); break; } default: fatal_reading_state (NULL_STATE_TOKEN, "Lexical error - unknown string escape"); } } else fatal_reading_state (NULL_STATE_TOKEN, "Lexical error..."); }; if (c != '"') fatal_reading_state (NULL_STATE_TOKEN, "Unterminated string"); obstack_1grow (&bstring_obstack, '\0'); cstr = XOBFINISH (&bstring_obstack, char *); cslen = strlen (cstr); tk = (struct state_token_st *) xcalloc (sizeof (struct state_token_st) + cslen, 1); tk->stok_kind = STOK_STRING; tk->stok_line = state_line; tk->stok_col = curoff - state_bol; tk->stok_file = state_path; tk->stok_next = NULL; strcpy (tk->stok_un.stok_string, cstr); obstack_free (&bstring_obstack, NULL); return tk; } /* Got an unexpected character. */ fatal_reading_state_printf (NULL_STATE_TOKEN, "Lexical error at offset %ld - bad character \\%03o = '%c'", ftell (state_file), c, c); } /* Used for lexical look-ahead. Retrieves the lexical token of rank DEPTH, starting with 0 when reading the state file. Gives null on end of file. */ static struct state_token_st * peek_state_token (int depth) { int remdepth = depth; struct state_token_st **ptoken = &state_token; struct state_token_st *tok = NULL; while (remdepth >= 0) { if (*ptoken == NULL) { *ptoken = tok = read_a_state_token (); if (tok == NULL) return NULL; } tok = *ptoken; ptoken = &((*ptoken)->stok_next); remdepth--; } return tok; } /* Consume the next DEPTH tokens and free them. */ static void next_state_tokens (int depth) { struct state_token_st *n; while (depth > 0) { if (state_token != NULL) { n = state_token->stok_next; free (state_token); state_token = n; } else fatal_reading_state (NULL_STATE_TOKEN, "Tokens stack empty"); depth--; } } /* Safely retrieve the lexical kind of a token. */ static inline enum state_token_en state_token_kind (struct state_token_st *p) { if (p == NULL) return STOK_NONE; else return p->stok_kind; } /* Test if a token is a given name i.e. an identifier. */ static inline bool state_token_is_name (struct state_token_st *p, const char *name) { if (p == NULL) return false; if (p->stok_kind != STOK_NAME) return false; return !strcmp (p->stok_un.stok_ident->stid_name, name); } /* Following routines are useful for serializing datas. * * We want to serialize : * - typedefs list * - structures list * - param_structs list * - variables list * * So, we have one routine for each kind of data. The main writing * routine is write_state. The main reading routine is * read_state. Most writing routines write_state_FOO have a * corresponding reading routine read_state_FOO. Reading is done in a * recursive descending way, and any read error is fatal. */ /* When reading the state, we need to remember the previously seen types by their state_number, since GTY-ed types are usually shared. */ static htab_t state_seen_types; /* Return the length of a linked list made of pairs. */ static int pair_list_length (pair_p list); /* Compute the length of a list of pairs, starting from the first one. */ static int pair_list_length (pair_p list) { int nbpair = 0; pair_p l = NULL; for (l = list; l; l = l->next) nbpair++; return nbpair; } /* Write a file location. Files relative to $(srcdir) are quite frequent and are handled specially. This ensures that two gengtype state file-s produced by gengtype on the same GCC source tree are very similar and can be reasonably compared with diff, even if the two GCC source trees have different absolute paths. */ void state_writer::write_state_fileloc (struct fileloc *floc) { if (floc != NULL && floc->line > 0) { const char *srcrelpath = NULL; gcc_assert (floc->file != NULL); /* Most of the files are inside $(srcdir) so it is worth to handle them specially. */ srcrelpath = get_file_srcdir_relative_path (floc->file); if (srcrelpath != NULL) { begin_s_expr ("srcfileloc"); write_state_a_string (srcrelpath); } else { begin_s_expr ("fileloc"); write_state_a_string (get_input_file_name (floc->file)); } fprintf (state_file, " %d", floc->line); end_s_expr (); } else fprintf (state_file, "nil "); } /* Write a list of fields. */ void state_writer::write_state_fields (pair_p fields) { int nbfields = pair_list_length (fields); int nbpairs = 0; begin_s_expr ("fields"); fprintf (state_file, "%d ", nbfields); nbpairs = write_state_pair_list (fields); gcc_assert (nbpairs == nbfields); end_s_expr (); } /* Write a null-terminated string in our lexical convention, very similar to the convention of C. */ void state_writer::write_state_a_string (const char *s) { char c; write_any_indent (1); fputs (" \"", state_file); for (; *s != 0; s++) { c = *s; switch (c) { case '\a': fputs ("\\a", state_file); break; case '\b': fputs ("\\b", state_file); break; case '\t': fputs ("\\t", state_file); break; case '\n': fputs ("\\n", state_file); break; case '\v': fputs ("\\v", state_file); break; case '\f': fputs ("\\f", state_file); break; case '\r': fputs ("\\r", state_file); break; case '\"': fputs ("\\\"", state_file); break; case '\\': fputs ("\\\\", state_file); break; default: if (ISPRINT (c)) putc (c, state_file); else fprintf (state_file, "\\x%02x", (unsigned) c); } } fputs ("\"", state_file); } /* Our option-s have three kinds, each with its writer. */ void state_writer::write_state_string_option (options_p current) { write_any_indent (0); fprintf (state_file, "string "); if (current->info.string != NULL) write_state_a_string (current->info.string); else fprintf (state_file, " nil "); } void state_writer::write_state_type_option (options_p current) { write_any_indent (0); fprintf (state_file, "type "); write_state_type (current->info.type); } void state_writer::write_state_nested_option (options_p current) { write_any_indent (0); fprintf (state_file, "nested "); write_state_type (current->info.nested->type); if (current->info.nested->convert_from != NULL) write_state_a_string (current->info.nested->convert_from); else { write_any_indent (1); fprintf (state_file, " nil "); } if (current->info.nested->convert_to != NULL) write_state_a_string (current->info.nested->convert_to); else { write_any_indent (1); fprintf (state_file, " nil "); } } void state_writer::write_state_option (options_p current) { begin_s_expr ("option"); write_any_indent (0); if (current->name != NULL) fprintf (state_file, "%s ", current->name); else fprintf (state_file, "nil "); switch (current->kind) { case OPTION_STRING: write_state_string_option (current); break; case OPTION_TYPE: write_state_type_option (current); break; case OPTION_NESTED: write_state_nested_option (current); break; default: fatal ("Option tag unknown"); } /* Terminate the "option" s-expression. */ end_s_expr (); } /* Write a list of GTY options. */ void state_writer::write_state_options (options_p opt) { options_p current; if (opt == NULL) { write_any_indent (0); fprintf (state_file, "nil "); return; } begin_s_expr ("options"); for (current = opt; current != NULL; current = current->next) write_state_option (current); end_s_expr (); } /* Write a bitmap representing a set of GCC front-end languages. */ void state_writer::write_state_lang_bitmap (lang_bitmap bitmap) { write_any_indent (0); fprintf (state_file, "%d ", (int) bitmap); } /* Write version information. */ void state_writer::write_state_version (const char *version) { begin_s_expr ("version"); write_state_a_string (version); end_s_expr (); } /* Write a scalar type. We have only two of these. */ void state_writer::write_state_scalar_type (type_p current) { write_any_indent (0); if (current == &scalar_nonchar) fprintf (state_file, "scalar_nonchar "); else if (current == &scalar_char) fprintf (state_file, "scalar_char "); else fatal ("Unexpected type in write_state_scalar_type"); write_state_common_type_content (current); } /* Write the string type. There is only one such thing! */ void state_writer::write_state_string_type (type_p current) { if (current == &string_type) { write_any_indent (0); fprintf (state_file, "string "); write_state_common_type_content (current); } else fatal ("Unexpected type in write_state_string_type"); } /* Write an undefined type. */ void state_writer::write_state_undefined_type (type_p current) { DBGPRINTF ("undefined type @ %p #%d '%s'", (void *) current, current->state_number, current->u.s.tag); write_any_indent (0); fprintf (state_file, "undefined "); gcc_assert (current->gc_used == GC_UNUSED); write_state_common_type_content (current); if (current->u.s.tag != NULL) write_state_a_string (current->u.s.tag); else { write_any_indent (0); fprintf (state_file, "nil"); } write_state_fileloc (type_lineloc (current)); } /* Common code to write structure like types. */ void state_writer::write_state_struct_union_type (type_p current, const char *kindstr) { DBGPRINTF ("%s type @ %p #%d '%s'", kindstr, (void *) current, current->state_number, current->u.s.tag); write_any_indent (0); fprintf (state_file, "%s ", kindstr); write_state_common_type_content (current); if (current->u.s.tag != NULL) write_state_a_string (current->u.s.tag); else { write_any_indent (0); fprintf (state_file, "nil"); } write_state_fileloc (type_lineloc (current)); write_state_fields (current->u.s.fields); write_state_options (current->u.s.opt); write_state_lang_bitmap (current->u.s.bitmap); } /* Write a GTY struct type. */ void state_writer::write_state_struct_type (type_p current) { write_state_struct_union_type (current, "struct"); write_state_type (current->u.s.lang_struct); write_state_type (current->u.s.base_class); } /* Write a GTY user-defined struct type. */ void state_writer::write_state_user_struct_type (type_p current) { DBGPRINTF ("user_struct type @ %p #%d '%s'", (void *) current, current->state_number, current->u.s.tag); write_any_indent (0); fprintf (state_file, "user_struct "); write_state_common_type_content (current); if (current->u.s.tag != NULL) write_state_a_string (current->u.s.tag); else { write_any_indent (0); fprintf (state_file, "nil"); } write_state_fileloc (type_lineloc (current)); write_state_fields (current->u.s.fields); } /* write a GTY union type. */ void state_writer::write_state_union_type (type_p current) { write_state_struct_union_type (current, "union"); write_state_type (current->u.s.lang_struct); } /* Write a lang_struct type. This is tricky and was painful to debug, we deal with the next field specifically within their lang_struct subfield, which points to a linked list of homonumous types. Change this function with extreme care, see also read_state_lang_struct_type. */ void state_writer::write_state_lang_struct_type (type_p current) { int nbhomontype = 0; type_p hty = NULL; const char *homoname = 0; write_state_struct_union_type (current, "lang_struct"); /* lang_struct-ures are particularly tricky, since their u.s.lang_struct field gives a list of homonymous struct-s or union-s! */ DBGPRINTF ("lang_struct @ %p #%d", (void *) current, current->state_number); for (hty = current->u.s.lang_struct; hty != NULL; hty = hty->next) { nbhomontype++; DBGPRINTF ("homonymous #%d hty @ %p #%d '%s'", nbhomontype, (void *) hty, hty->state_number, hty->u.s.tag); /* Every member of the homonymous list should have the same tag. */ gcc_assert (union_or_struct_p (hty)); gcc_assert (hty->u.s.lang_struct == current); if (!homoname) homoname = hty->u.s.tag; gcc_assert (strcmp (homoname, hty->u.s.tag) == 0); } begin_s_expr ("homotypes"); fprintf (state_file, "%d", nbhomontype); for (hty = current->u.s.lang_struct; hty != NULL; hty = hty->next) write_state_type (hty); end_s_expr (); } /* Write a parametrized structure GTY type. */ void state_writer::write_state_param_struct_type (type_p current) { int i; write_any_indent (0); fprintf (state_file, "param_struct "); write_state_common_type_content (current); write_state_type (current->u.param_struct.stru); for (i = 0; i < NUM_PARAM; i++) { if (current->u.param_struct.param[i] != NULL) write_state_type (current->u.param_struct.param[i]); else { write_any_indent (0); fprintf (state_file, "nil "); } } write_state_fileloc (&current->u.param_struct.line); } /* Write a pointer type. */ void state_writer::write_state_pointer_type (type_p current) { write_any_indent (0); fprintf (state_file, "pointer "); write_state_common_type_content (current); write_state_type (current->u.p); } /* Write an array type. */ void state_writer::write_state_array_type (type_p current) { write_any_indent (0); fprintf (state_file, "array "); write_state_common_type_content (current); if (current->u.a.len != NULL) write_state_a_string (current->u.a.len); else { write_any_indent (1); fprintf (state_file, " nil"); } write_any_indent (1); fprintf (state_file, " "); write_state_type (current->u.a.p); } /* Write the gc_used information. */ void state_writer::write_state_gc_used (enum gc_used_enum gus) { write_any_indent (1); switch (gus) { case GC_UNUSED: fprintf (state_file, " gc_unused"); break; case GC_USED: fprintf (state_file, " gc_used"); break; case GC_MAYBE_POINTED_TO: fprintf (state_file, " gc_maybe_pointed_to"); break; case GC_POINTED_TO: fprintf (state_file, " gc_pointed_to"); break; default: gcc_unreachable (); } } /* Utility routine to write the common content of all types. Notice that the next field is *not* written on purpose. */ void state_writer::write_state_common_type_content (type_p current) { write_any_indent (0); fprintf (state_file, "%d ", current->state_number); /* We do not write the next type, because list of types are explicitly written. However, lang_struct are special in that respect. See function write_state_lang_struct_type for more. */ write_state_type (current->pointer_to); write_state_gc_used (current->gc_used); } /* The important and recursive routine writing GTY types as understood by gengtype. Types which have a positive state_number have already been seen and written. */ void state_writer::write_state_type (type_p current) { write_any_indent (0); if (current == NULL) { fprintf (state_file, "nil "); return; } begin_s_expr ("type"); if (current->state_number > 0) { write_any_indent (0); fprintf (state_file, "already_seen %d", current->state_number); } else { m_state_written_type_count++; DBGPRINTF ("writing type #%d @%p old number %d", m_state_written_type_count, (void *) current, current->state_number); current->state_number = m_state_written_type_count; switch (current->kind) { case TYPE_NONE: gcc_unreachable (); case TYPE_UNDEFINED: write_state_undefined_type (current); break; case TYPE_STRUCT: write_state_struct_type (current); break; case TYPE_USER_STRUCT: write_state_user_struct_type (current); break; case TYPE_UNION: write_state_union_type (current); break; case TYPE_POINTER: write_state_pointer_type (current); break; case TYPE_ARRAY: write_state_array_type (current); break; case TYPE_LANG_STRUCT: write_state_lang_struct_type (current); break; case TYPE_PARAM_STRUCT: write_state_param_struct_type (current); break; case TYPE_SCALAR: write_state_scalar_type (current); break; case TYPE_STRING: write_state_string_type (current); break; } } /* Terminate the "type" s-expression. */ end_s_expr (); } /* Write a pair. */ void state_writer::write_state_pair (pair_p current) { if (current == NULL) { write_any_indent (0); fprintf (state_file, "nil)"); return; } begin_s_expr ("pair"); if (current->name != NULL) write_state_a_string (current->name); else write_state_a_string ("nil"); write_state_type (current->type); write_state_fileloc (&(current->line)); write_state_options (current->opt); /* Terminate the "pair" s-expression. */ end_s_expr (); } /* Write a pair list and return the number of pairs written. */ int state_writer::write_state_pair_list (pair_p list) { int nbpair = 0; pair_p current; for (current = list; current != NULL; current = current->next) { write_state_pair (current); nbpair++; } return nbpair; } /* When writing imported linked lists, like typedefs, structures, param_structs, ... we count their length first and write it. These eases the reading, and enables an extra verification on the number of actually read items. */ /* Write our typedefs. */ void state_writer::write_state_typedefs (void) { int nbtypedefs = pair_list_length (typedefs); int nbpairs = 0; begin_s_expr ("typedefs"); fprintf (state_file, "%d", nbtypedefs); nbpairs = write_state_pair_list (typedefs); gcc_assert (nbpairs == nbtypedefs); end_s_expr (); if (verbosity_level >= 2) printf ("%s wrote %d typedefs\n", progname, nbtypedefs); } /* Write our structures. */ void state_writer::write_state_structures (void) { int nbstruct = 0; type_p current; for (current = structures; current != NULL; current = current->next) nbstruct++; begin_s_expr ("structures"); fprintf (state_file, "%d", nbstruct); for (current = structures; current != NULL; current = current->next) { write_new_line (); write_state_type (current); } /* Terminate the "structures" s-expression. */ end_s_expr (); if (verbosity_level >= 2) printf ("%s wrote %d structures in state\n", progname, nbstruct); } /* Write our param_struct-s. */ void state_writer::write_state_param_structs (void) { int nbparamstruct = 0; type_p current; for (current = param_structs; current != NULL; current = current->next) nbparamstruct++; begin_s_expr ("param_structs"); fprintf (state_file, "%d", nbparamstruct); for (current = param_structs; current != NULL; current = current->next) write_state_type (current); end_s_expr (); } /* Write our variables. */ void state_writer::write_state_variables (void) { int nbvars = pair_list_length (variables); int nbpairs = 0; begin_s_expr ("variables"); fprintf (state_file, "%d", nbvars); nbpairs = write_state_pair_list (variables); gcc_assert (nbpairs == nbvars); end_s_expr (); if (verbosity_level >= 2) printf ("%s wrote %d variables.\n", progname, nbvars); } /* Write the source directory. File locations within the source directory have been written specifically. */ void state_writer::write_state_srcdir (void) { begin_s_expr ("srcdir"); write_state_a_string (srcdir); end_s_expr (); } /* Count and write the list of our files. */ void state_writer::write_state_files_list (void) { int i = 0; /* Write the list of files with their lang_bitmap. */ begin_s_expr ("fileslist"); fprintf (state_file, "%d", (int) num_gt_files); for (i = 0; i < (int) num_gt_files; i++) { const char *cursrcrelpath = NULL; const input_file *curfil = gt_files[i]; /* Most of the files are inside $(srcdir) so it is worth to handle them specially. */ cursrcrelpath = get_file_srcdir_relative_path (curfil); if (cursrcrelpath) { begin_s_expr ("srcfile"); fprintf (state_file, "%d ", get_lang_bitmap (curfil)); write_state_a_string (cursrcrelpath); } else { begin_s_expr ("file"); fprintf (state_file, "%d ", get_lang_bitmap (curfil)); write_state_a_string (get_input_file_name (curfil)); } /* Terminate the inner s-expression (either "srcfile" or "file"). */ end_s_expr (); } /* Terminate the "fileslist" s-expression. */ end_s_expr (); } /* Write the list of GCC front-end languages. */ void state_writer::write_state_languages (void) { int i = 0; begin_s_expr ("languages"); fprintf (state_file, "%d", (int) num_lang_dirs); for (i = 0; i < (int) num_lang_dirs; i++) { /* Languages names are identifiers, we expect only letters or underscores or digits in them. In particular, C++ is not a valid language name, but cp is valid. */ fprintf (state_file, " %s", lang_dir_names[i]); } end_s_expr (); } /* Write the trailer. */ static void write_state_trailer (void) { /* This test should probably catch IO errors like disk full... */ if (fputs ("\n(!endfile)\n", state_file) == EOF) fatal ("failed to write state trailer [%s]", xstrerror (errno)); } /* The write_state routine is the only writing routine called by main in gengtype.c. To avoid messing the state if gengtype is interrupted or aborted, we write a temporary file and rename it after having written it in totality. */ void write_state (const char *state_path) { long statelen = 0; time_t now = 0; char *temp_state_path = NULL; char tempsuffix[40]; time (&now); /* We write a unique temporary file which is renamed when complete * only. So even if gengtype is interrupted, the written state file * won't be partially written, since the temporary file is not yet * renamed in that case. */ memset (tempsuffix, 0, sizeof (tempsuffix)); snprintf (tempsuffix, sizeof (tempsuffix) - 1, "-%ld-%d.tmp", (long) now, (int) getpid ()); temp_state_path = concat (state_path, tempsuffix, NULL); state_file = fopen (temp_state_path, "w"); if (state_file == NULL) fatal ("Failed to open file %s for writing state: %s", temp_state_path, xstrerror (errno)); if (verbosity_level >= 3) printf ("%s writing state file %s temporarily in %s\n", progname, state_path, temp_state_path); /* This is the first line of the state. Perhaps the file utility could know about that, so don't change it often. */ fprintf (state_file, ";;;;@@@@ GCC gengtype state\n"); /* Output a few comments for humans. */ fprintf (state_file, ";;; DON'T EDIT THIS FILE, since generated by GCC's gengtype\n"); fprintf (state_file, ";;; The format of this file is tied to a particular version of GCC.\n"); fprintf (state_file, ";;; Don't parse this file wihout knowing GCC gengtype internals.\n"); fprintf (state_file, ";;; This file should be parsed by the same %s which wrote it.\n", progname); state_writer sw; /* The first non-comment significant line gives the version string. */ sw.write_state_version (version_string); sw.write_state_srcdir (); sw.write_state_languages (); sw.write_state_files_list (); sw.write_state_structures (); sw.write_state_typedefs (); sw.write_state_param_structs (); sw.write_state_variables (); write_state_trailer (); statelen = ftell (state_file); if (ferror (state_file)) fatal ("output error when writing state file %s [%s]", temp_state_path, xstrerror (errno)); if (fclose (state_file)) fatal ("failed to close state file %s [%s]", temp_state_path, xstrerror (errno)); if (rename (temp_state_path, state_path)) fatal ("failed to rename %s to state file %s [%s]", temp_state_path, state_path, xstrerror (errno)); free (temp_state_path); if (verbosity_level >= 1) printf ("%s wrote state file %s of %ld bytes with %d GTY-ed types\n", progname, state_path, statelen, sw.m_state_written_type_count); } /** End of writing routines! The corresponding reading routines follow. **/ /* Forward declarations, since some read_state_* functions are recursive! */ static void read_state_fileloc (struct fileloc *line); static void read_state_options (options_p *opt); static void read_state_type (type_p *current); static void read_state_pair (pair_p *pair); /* Return the number of pairs actually read. */ static int read_state_pair_list (pair_p *list); static void read_state_fields (pair_p *fields); static void read_state_common_type_content (type_p current); /* Record into the state_seen_types hash-table a type which we are reading, to enable recursive or circular references to it. */ static void record_type (type_p type) { PTR *slot; slot = htab_find_slot (state_seen_types, type, INSERT); gcc_assert (slot); *slot = type; } /* Read an already seen type. */ static void read_state_already_seen_type (type_p *type) { struct state_token_st *t0 = peek_state_token (0); if (state_token_kind (t0) == STOK_INTEGER) { PTR *slot = NULL; struct type loctype = { TYPE_SCALAR, 0, 0, 0, GC_UNUSED, {0} }; loctype.state_number = t0->stok_un.stok_num; slot = htab_find_slot (state_seen_types, &loctype, NO_INSERT); if (slot == NULL) { fatal_reading_state (t0, "Unknown type"); } next_state_tokens (1); *type = (type_p) *slot; } else { fatal_reading_state (t0, "Bad seen type"); } } /* Read the scalar_nonchar type. */ static void read_state_scalar_nonchar_type (type_p *type) { *type = &scalar_nonchar; read_state_common_type_content (*type); } /* Read the scalar_char type. */ static void read_state_scalar_char_type (type_p *type) { *type = &scalar_char; read_state_common_type_content (*type); } /* Read the string_type. */ static void read_state_string_type (type_p *type) { *type = &string_type; read_state_common_type_content (*type); } /* Read a lang_bitmap representing a set of GCC front-end languages. */ static void read_state_lang_bitmap (lang_bitmap *bitmap) { struct state_token_st *t; t = peek_state_token (0); if (state_token_kind (t) == STOK_INTEGER) { *bitmap = t->stok_un.stok_num; next_state_tokens (1); } else { fatal_reading_state (t, "Bad syntax for bitmap"); } } /* Read an undefined type. */ static void read_state_undefined_type (type_p type) { struct state_token_st *t0; type->kind = TYPE_UNDEFINED; read_state_common_type_content (type); t0 = peek_state_token (0); if (state_token_kind (t0) == STOK_STRING) { if (state_token_is_name (t0, "nil")) { type->u.s.tag = NULL; DBGPRINTF ("read anonymous undefined type @%p #%d", (void *) type, type->state_number); } else { type->u.s.tag = xstrdup (t0->stok_un.stok_string); DBGPRINTF ("read undefined type @%p #%d '%s'", (void *) type, type->state_number, type->u.s.tag); } next_state_tokens (1); read_state_fileloc (&(type->u.s.line)); } else { fatal_reading_state (t0, "Bad tag in undefined type"); } } /* Read a GTY-ed struct type. */ static void read_state_struct_type (type_p type) { struct state_token_st *t0; type->kind = TYPE_STRUCT; read_state_common_type_content (type); t0 = peek_state_token (0); if (state_token_kind (t0) == STOK_STRING) { if (state_token_is_name (t0, "nil")) { type->u.s.tag = NULL; DBGPRINTF ("read anonymous struct type @%p #%d", (void *) type, type->state_number); } else { type->u.s.tag = xstrdup (t0->stok_un.stok_string); DBGPRINTF ("read struct type @%p #%d '%s'", (void *) type, type->state_number, type->u.s.tag); } next_state_tokens (1); read_state_fileloc (&(type->u.s.line)); read_state_fields (&(type->u.s.fields)); read_state_options (&(type->u.s.opt)); read_state_lang_bitmap (&(type->u.s.bitmap)); read_state_type (&(type->u.s.lang_struct)); read_state_type (&(type->u.s.base_class)); if (type->u.s.base_class) add_subclass (type->u.s.base_class, type); } else { fatal_reading_state (t0, "Bad tag in struct type"); } } /* Read a GTY-ed user-provided struct TYPE. */ static void read_state_user_struct_type (type_p type) { struct state_token_st *t0; type->kind = TYPE_USER_STRUCT; read_state_common_type_content (type); t0 = peek_state_token (0); if (state_token_kind (t0) == STOK_STRING) { if (state_token_is_name (t0, "nil")) { type->u.s.tag = NULL; DBGPRINTF ("read anonymous struct type @%p #%d", (void *) type, type->state_number); } else { type->u.s.tag = xstrdup (t0->stok_un.stok_string); DBGPRINTF ("read struct type @%p #%d '%s'", (void *) type, type->state_number, type->u.s.tag); } next_state_tokens (1); read_state_fileloc (&(type->u.s.line)); read_state_fields (&(type->u.s.fields)); } else { fatal_reading_state (t0, "Bad tag in user-struct type"); } } /* Read a GTY-ed union type. */ static void read_state_union_type (type_p type) { struct state_token_st *t0; type->kind = TYPE_UNION; read_state_common_type_content (type); t0 = peek_state_token (0); if (state_token_kind (t0) == STOK_STRING) { if (state_token_is_name (t0, "nil")) { type->u.s.tag = NULL; DBGPRINTF ("read anonymous union type @%p #%d", (void *) type, type->state_number); } else { type->u.s.tag = xstrdup (t0->stok_un.stok_string); DBGPRINTF ("read union type @%p #%d '%s'", (void *) type, type->state_number, type->u.s.tag); } next_state_tokens (1); read_state_fileloc (&(type->u.s.line)); read_state_fields (&(type->u.s.fields)); read_state_options (&(type->u.s.opt)); read_state_lang_bitmap (&(type->u.s.bitmap)); read_state_type (&(type->u.s.lang_struct)); } else fatal_reading_state (t0, "Bad tag in union type"); } /* Read a GTY-ed pointer type. */ static void read_state_pointer_type (type_p type) { type->kind = TYPE_POINTER; read_state_common_type_content (type); DBGPRINTF ("read pointer type @%p #%d", (void *) type, type->state_number); read_state_type (&(type->u.p)); } /* Read a GTY-ed array type. */ static void read_state_array_type (type_p type) { struct state_token_st *t0; type->kind = TYPE_ARRAY; read_state_common_type_content (type); t0 = peek_state_token (0); if (state_token_kind (t0) == STOK_STRING) { type->u.a.len = xstrdup (t0->stok_un.stok_string); DBGPRINTF ("read array type @%p #%d length '%s'", (void *) type, type->state_number, type->u.a.len); next_state_tokens (1); } else if (state_token_is_name (t0, "nil")) { type->u.a.len = NULL; DBGPRINTF ("read array type @%p #%d without length", (void *) type, type->state_number); next_state_tokens (1); } else fatal_reading_state (t0, "Bad array name type"); read_state_type (&(type->u.a.p)); } /* Read a lang_struct type for GTY-ed struct-s which depends upon GCC front-end languages. This is a tricky function and it was painful to debug. Change it with extreme care. See also write_state_lang_struct_type. */ static void read_state_lang_struct_type (type_p type) { struct state_token_st *t0 = NULL; struct state_token_st *t1 = NULL; struct state_token_st *t2 = NULL; type->kind = TYPE_LANG_STRUCT; read_state_common_type_content (type); t0 = peek_state_token (0); if (state_token_kind (t0) == STOK_STRING) { if (state_token_is_name (t0, "nil")) { DBGPRINTF ("read anonymous lang_struct type @%p #%d", (void *) type, type->state_number); type->u.s.tag = NULL; } else { type->u.s.tag = xstrdup (t0->stok_un.stok_string); DBGPRINTF ("read lang_struct type @%p #%d '%s'", (void *) type, type->state_number, type->u.s.tag); } next_state_tokens (1); } else fatal_reading_state (t0, "Bad tag in lang struct type"); read_state_fileloc (&(type->u.s.line)); read_state_fields (&(type->u.s.fields)); read_state_options (&(type->u.s.opt)); read_state_lang_bitmap (&(type->u.s.bitmap)); /* Within lang_struct-ures, the lang_struct field is a linked list of homonymous types! */ t0 = peek_state_token (0); t1 = peek_state_token (1); t2 = peek_state_token (2); /* Parse (!homotypes <number-types> <type-1> .... <type-n>) */ if (state_token_kind (t0) == STOK_LEFTPAR && state_token_is_name (t1, "!homotypes") && state_token_kind (t2) == STOK_INTEGER) { type_p *prevty = &type->u.s.lang_struct; int nbhomotype = t2->stok_un.stok_num; int i = 0; t0 = t1 = t2 = NULL; next_state_tokens (3); for (i = 0; i < nbhomotype; i++) { read_state_type (prevty); t0 = peek_state_token (0); if (*prevty) prevty = &(*prevty)->next; else fatal_reading_state (t0, "expecting type in homotype list for lang_struct"); }; if (state_token_kind (t0) != STOK_RIGHTPAR) fatal_reading_state (t0, "expecting ) in homotype list for lang_struct"); next_state_tokens (1); } else fatal_reading_state (t0, "expecting !homotypes for lang_struct"); } /* Read a param_struct type for GTY parametrized structures. */ static void read_state_param_struct_type (type_p type) { int i; struct state_token_st *t0; type->kind = TYPE_PARAM_STRUCT; read_state_common_type_content (type); DBGPRINTF ("read param_struct type @%p #%d", (void *) type, type->state_number); read_state_type (&(type->u.param_struct.stru)); for (i = 0; i < NUM_PARAM; i++) { t0 = peek_state_token (0); if (state_token_is_name (t0, "nil")) { type->u.param_struct.param[i] = NULL; next_state_tokens (1); } else read_state_type (&(type->u.param_struct.param[i])); } read_state_fileloc (&(type->u.param_struct.line)); } /* Read the gc used information. */ static void read_state_gc_used (enum gc_used_enum *pgus) { struct state_token_st *t0 = peek_state_token (0); if (state_token_is_name (t0, "gc_unused")) *pgus = GC_UNUSED; else if (state_token_is_name (t0, "gc_used")) *pgus = GC_USED; else if (state_token_is_name (t0, "gc_maybe_pointed_to")) *pgus = GC_MAYBE_POINTED_TO; else if (state_token_is_name (t0, "gc_pointed_to")) *pgus = GC_POINTED_TO; else fatal_reading_state (t0, "invalid gc_used information"); next_state_tokens (1); } /* Utility function to read the common content of types. */ static void read_state_common_type_content (type_p current) { struct state_token_st *t0 = peek_state_token (0); if (state_token_kind (t0) == STOK_INTEGER) { current->state_number = t0->stok_un.stok_num; next_state_tokens (1); record_type (current); } else fatal_reading_state_printf (t0, "Expected integer for state_number line %d", state_line); /* We don't read the next field of the type. */ read_state_type (&current->pointer_to); read_state_gc_used (&current->gc_used); } /* Read a GTY-ed type. */ void read_state_type (type_p *current) { struct state_token_st *t0 = peek_state_token (0); struct state_token_st *t1 = peek_state_token (1); if (state_token_kind (t0) == STOK_LEFTPAR && state_token_is_name (t1, "!type")) { next_state_tokens (2); t0 = peek_state_token (0); if (state_token_is_name (t0, "already_seen")) { next_state_tokens (1); read_state_already_seen_type (current); } else { t0 = peek_state_token (0); if (state_token_is_name (t0, "scalar_nonchar")) { next_state_tokens (1); read_state_scalar_nonchar_type (current); } else if (state_token_is_name (t0, "scalar_char")) { next_state_tokens (1); read_state_scalar_char_type (current); } else if (state_token_is_name (t0, "string")) { next_state_tokens (1); read_state_string_type (current); } else if (state_token_is_name (t0, "undefined")) { *current = XCNEW (struct type); next_state_tokens (1); read_state_undefined_type (*current); } else if (state_token_is_name (t0, "struct")) { *current = XCNEW (struct type); next_state_tokens (1); read_state_struct_type (*current); } else if (state_token_is_name (t0, "union")) { *current = XCNEW (struct type); next_state_tokens (1); read_state_union_type (*current); } else if (state_token_is_name (t0, "lang_struct")) { *current = XCNEW (struct type); next_state_tokens (1); read_state_lang_struct_type (*current); } else if (state_token_is_name (t0, "param_struct")) { *current = XCNEW (struct type); next_state_tokens (1); read_state_param_struct_type (*current); } else if (state_token_is_name (t0, "pointer")) { *current = XCNEW (struct type); next_state_tokens (1); read_state_pointer_type (*current); } else if (state_token_is_name (t0, "array")) { *current = XCNEW (struct type); next_state_tokens (1); read_state_array_type (*current); } else if (state_token_is_name (t0, "user_struct")) { *current = XCNEW (struct type); next_state_tokens (1); read_state_user_struct_type (*current); } else fatal_reading_state (t0, "bad type in (!type"); } t0 = peek_state_token (0); if (state_token_kind (t0) != STOK_RIGHTPAR) fatal_reading_state (t0, "missing ) in type"); next_state_tokens (1); } else if (state_token_is_name (t0, "nil")) { next_state_tokens (1); *current = NULL; } else fatal_reading_state (t0, "bad type syntax"); } /* Read a file location. Files within the source directory are dealt with specifically. */ void read_state_fileloc (struct fileloc *floc) { bool issrcfile = false; struct state_token_st *t0 = peek_state_token (0); struct state_token_st *t1 = peek_state_token (1); gcc_assert (floc != NULL); gcc_assert (srcdir != NULL); if (state_token_kind (t0) == STOK_LEFTPAR && (state_token_is_name (t1, "!fileloc") || (issrcfile = state_token_is_name (t1, "!srcfileloc")))) { next_state_tokens (2); t0 = peek_state_token (0); t1 = peek_state_token (1); if (state_token_kind (t0) == STOK_STRING && state_token_kind (t1) == STOK_INTEGER) { char *path = t0->stok_un.stok_string; if (issrcfile) { static const char dirsepstr[2] = { DIR_SEPARATOR, (char) 0 }; char *fullpath = concat (srcdir, dirsepstr, path, NULL); floc->file = input_file_by_name (fullpath); free (fullpath); } else floc->file = input_file_by_name (path); floc->line = t1->stok_un.stok_num; next_state_tokens (2); } else fatal_reading_state (t0, "Bad fileloc syntax, expected path string and line"); t0 = peek_state_token (0); if (state_token_kind (t0) != STOK_RIGHTPAR) fatal_reading_state (t0, "Bad fileloc syntax, expected )"); next_state_tokens (1); } else if (state_token_is_name (t0, "nil")) { next_state_tokens (1); floc->file = NULL; floc->line = 0; } else fatal_reading_state (t0, "Bad fileloc syntax"); } /* Read the fields of a GTY-ed type. */ void read_state_fields (pair_p *fields) { pair_p tmp = NULL; struct state_token_st *t0 = peek_state_token (0); struct state_token_st *t1 = peek_state_token (1); struct state_token_st *t2 = peek_state_token (2); if (state_token_kind (t0) == STOK_LEFTPAR && state_token_is_name (t1, "!fields") && state_token_kind (t2) == STOK_INTEGER) { int nbfields = t2->stok_un.stok_num; int nbpairs = 0; next_state_tokens (3); nbpairs = read_state_pair_list (&tmp); t0 = peek_state_token (0); if (nbpairs != nbfields) fatal_reading_state_printf (t0, "Mismatched fields number, expected %d got %d", nbpairs, nbfields); if (state_token_kind (t0) == STOK_RIGHTPAR) next_state_tokens (1); else fatal_reading_state (t0, "Bad fields expecting )"); } *fields = tmp; } /* Read a string option. */ static void read_state_string_option (options_p opt) { struct state_token_st *t0 = peek_state_token (0); opt->kind = OPTION_STRING; if (state_token_kind (t0) == STOK_STRING) { opt->info.string = xstrdup (t0->stok_un.stok_string); next_state_tokens (1); } else if (state_token_is_name (t0, "nil")) { opt->info.string = NULL; next_state_tokens (1); } else fatal_reading_state (t0, "Missing name in string option"); } /* Read a type option. */ static void read_state_type_option (options_p opt) { opt->kind = OPTION_TYPE; read_state_type (&(opt->info.type)); } /* Read a nested option. */ static void read_state_nested_option (options_p opt) { struct state_token_st *t0; opt->info.nested = XCNEW (struct nested_ptr_data); opt->kind = OPTION_NESTED; read_state_type (&(opt->info.nested->type)); t0 = peek_state_token (0); if (state_token_kind (t0) == STOK_STRING) { opt->info.nested->convert_from = xstrdup (t0->stok_un.stok_string); next_state_tokens (1); } else if (state_token_is_name (t0, "nil")) { opt->info.nested->convert_from = NULL; next_state_tokens (1); } else fatal_reading_state (t0, "Bad nested convert_from option"); t0 = peek_state_token (0); if (state_token_kind (t0) == STOK_STRING) { opt->info.nested->convert_to = xstrdup (t0->stok_un.stok_string); next_state_tokens (1); } else if (state_token_is_name (t0, "nil")) { opt->info.nested->convert_to = NULL; next_state_tokens (1); } else fatal_reading_state (t0, "Bad nested convert_from option"); } /* Read an GTY option. */ static void read_state_option (options_p *opt) { struct state_token_st *t0 = peek_state_token (0); struct state_token_st *t1 = peek_state_token (1); if (state_token_kind (t0) == STOK_LEFTPAR && state_token_is_name (t1, "!option")) { next_state_tokens (2); t0 = peek_state_token (0); if (state_token_kind (t0) == STOK_NAME) { *opt = XCNEW (struct options); if (state_token_is_name (t0, "nil")) (*opt)->name = NULL; else (*opt)->name = t0->stok_un.stok_ident->stid_name; next_state_tokens (1); t0 = peek_state_token (0); if (state_token_kind (t0) == STOK_NAME) { if (state_token_is_name (t0, "string")) { next_state_tokens (1); read_state_string_option (*opt); } else if (state_token_is_name (t0, "type")) { next_state_tokens (1); read_state_type_option (*opt); } else if (state_token_is_name (t0, "nested")) { next_state_tokens (1); read_state_nested_option (*opt); } else fatal_reading_state (t0, "Bad option type"); t0 = peek_state_token (0); if (state_token_kind (t0) != STOK_RIGHTPAR) fatal_reading_state (t0, "Bad syntax in option, expecting )"); next_state_tokens (1); } else fatal_reading_state (t0, "Missing option type"); } else fatal_reading_state (t0, "Bad name for option"); } else fatal_reading_state (t0, "Bad option, waiting for )"); } /* Read a list of options. */ void read_state_options (options_p *opt) { options_p head = NULL; options_p previous = NULL; options_p current_option = NULL; struct state_token_st *t0 = peek_state_token (0); struct state_token_st *t1 = peek_state_token (1); if (state_token_kind (t0) == STOK_LEFTPAR && state_token_is_name (t1, "!options")) { next_state_tokens (2); t0 = peek_state_token (0); while (state_token_kind (t0) != STOK_RIGHTPAR) { read_state_option (&current_option); if (head == NULL) { head = current_option; previous = head; } else { previous->next = current_option; previous = current_option; } t0 = peek_state_token (0); } next_state_tokens (1); } else if (state_token_is_name (t0, "nil")) { next_state_tokens (1); } else fatal_reading_state (t0, "Bad options syntax"); *opt = head; } /* Read a version, and check against the version of the gengtype. */ static void read_state_version (const char *version_string) { struct state_token_st *t0 = peek_state_token (0); struct state_token_st *t1 = peek_state_token (1); if (state_token_kind (t0) == STOK_LEFTPAR && state_token_is_name (t1, "!version")) { next_state_tokens (2); t0 = peek_state_token (0); t1 = peek_state_token (1); if (state_token_kind (t0) == STOK_STRING && state_token_kind (t1) == STOK_RIGHTPAR) { /* Check that the read version string is the same as current version. */ if (strcmp (version_string, t0->stok_un.stok_string)) fatal_reading_state_printf (t0, "version string mismatch; expecting %s but got %s", version_string, t0->stok_un.stok_string); next_state_tokens (2); } else fatal_reading_state (t0, "Missing version or right parenthesis"); } else fatal_reading_state (t0, "Bad version syntax"); } /* Read a pair. */ void read_state_pair (pair_p *current) { struct state_token_st *t0 = peek_state_token (0); struct state_token_st *t1 = peek_state_token (1); if (state_token_kind (t0) == STOK_LEFTPAR && state_token_is_name (t1, "!pair")) { *current = XCNEW (struct pair); next_state_tokens (2); t0 = peek_state_token (0); if (state_token_kind (t0) == STOK_STRING) { if (strcmp (t0->stok_un.stok_string, "nil") == 0) { (*current)->name = NULL; } else { (*current)->name = xstrdup (t0->stok_un.stok_string); } next_state_tokens (1); read_state_type (&((*current)->type)); read_state_fileloc (&((*current)->line)); read_state_options (&((*current)->opt));; t0 = peek_state_token (0); if (state_token_kind (t0) == STOK_RIGHTPAR) { next_state_tokens (1); } else { fatal_reading_state (t0, "Bad syntax for pair, )"); } } else { fatal_reading_state (t0, "Bad name for pair"); } } else if (state_token_kind (t0) == STOK_NAME && state_token_is_name (t0, "nil")) { next_state_tokens (1); *current = NULL; } else fatal_reading_state_printf (t0, "Bad syntax for pair, (!pair %d", state_token->stok_kind); } /* Return the number of pairs actually read. */ int read_state_pair_list (pair_p *list) { int nbpair = 0; pair_p head = NULL; pair_p previous = NULL; pair_p tmp = NULL; struct state_token_st *t0 = peek_state_token (0); while (t0 && state_token_kind (t0) != STOK_RIGHTPAR) { read_state_pair (&tmp); if (head == NULL) { head = tmp; previous = head; } else { previous->next = tmp; previous = tmp; } t0 = peek_state_token (0); nbpair++; } /* don't consume the ); the caller will eat it. */ *list = head; return nbpair; } /* Read the typedefs. */ static void read_state_typedefs (pair_p *typedefs) { int nbtypedefs = 0; pair_p list = NULL; struct state_token_st *t0 = peek_state_token (0); struct state_token_st *t1 = peek_state_token (1); struct state_token_st *t2 = peek_state_token (2); if (state_token_kind (t0) == STOK_LEFTPAR && state_token_is_name (t1, "!typedefs") && state_token_kind (t2) == STOK_INTEGER) { int nbpairs = 0; nbtypedefs = t2->stok_un.stok_num; next_state_tokens (3); nbpairs = read_state_pair_list (&list); t0 = peek_state_token (0); if (nbpairs != nbtypedefs) fatal_reading_state_printf (t0, "invalid number of typedefs, expected %d but got %d", nbtypedefs, nbpairs); if (state_token_kind (t0) == STOK_RIGHTPAR) next_state_tokens (1); else fatal_reading_state (t0, "Bad typedefs syntax )"); } else fatal_reading_state (t0, "Bad typedefs syntax (!typedefs"); if (verbosity_level >= 2) printf ("%s read %d typedefs from state\n", progname, nbtypedefs); *typedefs = list; } /* Read the structures. */ static void read_state_structures (type_p *structures) { type_p head = NULL; type_p previous = NULL; type_p tmp; int nbstruct = 0, countstruct = 0; struct state_token_st *t0 = peek_state_token (0); struct state_token_st *t1 = peek_state_token (1); struct state_token_st *t2 = peek_state_token (2); if (state_token_kind (t0) == STOK_LEFTPAR && state_token_is_name (t1, "!structures") && state_token_kind (t2) == STOK_INTEGER) { nbstruct = t2->stok_un.stok_num; next_state_tokens (3); t0 = peek_state_token (0); while (t0 && state_token_kind (t0) != STOK_RIGHTPAR) { tmp = NULL; read_state_type (&tmp); countstruct++; if (head == NULL) { head = tmp; previous = head; } else { previous->next = tmp; previous = tmp; } t0 = peek_state_token (0); } next_state_tokens (1); } else fatal_reading_state (t0, "Bad structures syntax"); if (countstruct != nbstruct) fatal_reading_state_printf (NULL_STATE_TOKEN, "expected %d structures but got %d", nbstruct, countstruct); if (verbosity_level >= 2) printf ("%s read %d structures from state\n", progname, nbstruct); *structures = head; } /* Read the param_struct-s. */ static void read_state_param_structs (type_p *param_structs) { int nbparamstructs = 0; int countparamstructs = 0; type_p head = NULL; type_p previous = NULL; type_p tmp; struct state_token_st *t0 = peek_state_token (0); struct state_token_st *t1 = peek_state_token (1); struct state_token_st *t2 = peek_state_token (2); if (state_token_kind (t0) == STOK_LEFTPAR && state_token_is_name (t1, "!param_structs") && state_token_kind (t2) == STOK_INTEGER) { nbparamstructs = t2->stok_un.stok_num; next_state_tokens (3); t0 = t1 = t2 = NULL; t0 = peek_state_token (0); while (state_token_kind (t0) != STOK_RIGHTPAR) { tmp = NULL; read_state_type (&tmp); if (head == NULL) { head = tmp; previous = head; } else { previous->next = tmp; previous = tmp; } t0 = peek_state_token (0); countparamstructs++; } next_state_tokens (1); } else fatal_reading_state (t0, "Bad param_structs syntax"); t0 = peek_state_token (0); if (countparamstructs != nbparamstructs) fatal_reading_state_printf (t0, "invalid number of param_structs expected %d got %d", nbparamstructs, countparamstructs); *param_structs = head; } /* Read the variables. */ static void read_state_variables (pair_p *variables) { pair_p list = NULL; int nbvars = 0; struct state_token_st *t0 = peek_state_token (0); struct state_token_st *t1 = peek_state_token (1); struct state_token_st *t2 = peek_state_token (2); if (state_token_kind (t0) == STOK_LEFTPAR && state_token_is_name (t1, "!variables") && state_token_kind (t2) == STOK_INTEGER) { int nbpairs = 0; nbvars = t2->stok_un.stok_num; next_state_tokens (3); nbpairs = read_state_pair_list (&list); t0 = peek_state_token (0); if (nbpairs != nbvars) fatal_reading_state_printf (t0, "Invalid number of variables, expected %d but got %d", nbvars, nbpairs); if (state_token_kind (t0) == STOK_RIGHTPAR) next_state_tokens (1); else fatal_reading_state (t0, "Waiting for ) in variables"); } else fatal_reading_state (t0, "Bad variables syntax"); *variables = list; if (verbosity_level >= 2) printf ("%s read %d variables from state\n", progname, nbvars); } /* Read the source directory. */ static void read_state_srcdir (void) { struct state_token_st *t0 = peek_state_token (0); struct state_token_st *t1 = peek_state_token (1); if (state_token_kind (t0) == STOK_LEFTPAR && state_token_is_name (t1, "!srcdir")) { next_state_tokens (2); t0 = peek_state_token (0); t1 = peek_state_token (1); if (state_token_kind (t0) == STOK_STRING && state_token_kind (t1) == STOK_RIGHTPAR) { srcdir = xstrdup (t0->stok_un.stok_string); srcdir_len = strlen (srcdir); next_state_tokens (2); return; } } fatal_reading_state (t0, "Bad srcdir in state_file"); } /* Read the sequence of GCC front-end languages. */ static void read_state_languages (void) { struct state_token_st *t0 = peek_state_token (0); struct state_token_st *t1 = peek_state_token (1); struct state_token_st *t2 = peek_state_token (2); if (state_token_kind (t0) == STOK_LEFTPAR && state_token_is_name (t1, "!languages") && state_token_kind (t2) == STOK_INTEGER) { int i = 0; num_lang_dirs = t2->stok_un.stok_num; lang_dir_names = XCNEWVEC (const char *, num_lang_dirs); next_state_tokens (3); t0 = t1 = t2 = NULL; for (i = 0; i < (int) num_lang_dirs; i++) { t0 = peek_state_token (0); if (state_token_kind (t0) != STOK_NAME) fatal_reading_state (t0, "expecting language name in state file"); lang_dir_names[i] = t0->stok_un.stok_ident->stid_name; next_state_tokens (1); } t0 = peek_state_token (0); if (state_token_kind (t0) != STOK_RIGHTPAR) fatal_reading_state (t0, "missing ) in languages list of state file"); next_state_tokens (1); } else fatal_reading_state (t0, "expecting languages list in state file"); } /* Read the sequence of files. */ static void read_state_files_list (void) { struct state_token_st *t0 = peek_state_token (0); struct state_token_st *t1 = peek_state_token (1); struct state_token_st *t2 = peek_state_token (2); if (state_token_kind (t0) == STOK_LEFTPAR && state_token_is_name (t1, "!fileslist") && state_token_kind (t2) == STOK_INTEGER) { int i = 0; num_gt_files = t2->stok_un.stok_num; next_state_tokens (3); t0 = t1 = t2 = NULL; gt_files = XCNEWVEC (const input_file *, num_gt_files); for (i = 0; i < (int) num_gt_files; i++) { bool issrcfile = FALSE; t0 = t1 = t2 = NULL; t0 = peek_state_token (0); t1 = peek_state_token (1); t2 = peek_state_token (2); if (state_token_kind (t0) == STOK_LEFTPAR && (state_token_is_name (t1, "!file") || (issrcfile = state_token_is_name (t1, "!srcfile"))) && state_token_kind (t2) == STOK_INTEGER) { lang_bitmap bmap = t2->stok_un.stok_num; next_state_tokens (3); t0 = t1 = t2 = NULL; t0 = peek_state_token (0); t1 = peek_state_token (1); if (state_token_kind (t0) == STOK_STRING && state_token_kind (t1) == STOK_RIGHTPAR) { const char *fnam = t0->stok_un.stok_string; /* Allocate & fill a gt_file entry with space for the lang_bitmap before! */ input_file *curgt = NULL; if (issrcfile) { static const char dirsepstr[2] = { DIR_SEPARATOR, (char) 0 }; char *fullpath = concat (srcdir, dirsepstr, fnam, NULL); curgt = input_file_by_name (fullpath); free (fullpath); } else curgt = input_file_by_name (fnam); set_lang_bitmap (curgt, bmap); gt_files[i] = curgt; next_state_tokens (2); } else fatal_reading_state (t0, "bad file in !fileslist of state file"); } else fatal_reading_state (t0, "expecting file in !fileslist of state file"); }; t0 = peek_state_token (0); if (state_token_kind (t0) != STOK_RIGHTPAR) fatal_reading_state (t0, "missing ) for !fileslist in state file"); next_state_tokens (1); } else fatal_reading_state (t0, "missing !fileslist in state file"); } /* Read the trailer. */ static void read_state_trailer (void) { struct state_token_st *t0 = peek_state_token (0); struct state_token_st *t1 = peek_state_token (1); struct state_token_st *t2 = peek_state_token (2); if (state_token_kind (t0) == STOK_LEFTPAR && state_token_is_name (t1, "!endfile") && state_token_kind (t2) == STOK_RIGHTPAR) next_state_tokens (3); else fatal_reading_state (t0, "missing !endfile in state file"); } /* Utility functions for the state_seen_types hash table. */ static unsigned hash_type_number (const void *ty) { const struct type *type = (const struct type *) ty; return type->state_number; } static int equals_type_number (const void *ty1, const void *ty2) { const struct type *type1 = (const struct type *) ty1; const struct type *type2 = (const struct type *) ty2; return type1->state_number == type2->state_number; } static int string_eq (const void *a, const void *b) { const char *a0 = (const char *)a; const char *b0 = (const char *)b; return (strcmp (a0, b0) == 0); } /* The function reading the state, called by main from gengtype.c. */ void read_state (const char *path) { state_file = fopen (path, "r"); if (state_file == NULL) fatal ("Failed to open state file %s for reading [%s]", path, xstrerror (errno)); state_path = path; state_line = 1; if (verbosity_level >= 1) { printf ("%s reading state file %s;", progname, state_path); if (verbosity_level >= 2) putchar ('\n'); fflush (stdout); } state_seen_types = htab_create (2017, hash_type_number, equals_type_number, NULL); state_ident_tab = htab_create (4027, htab_hash_string, string_eq, NULL); read_state_version (version_string); read_state_srcdir (); read_state_languages (); read_state_files_list (); read_state_structures (&structures); if (ferror (state_file)) fatal_reading_state_printf (NULL_STATE_TOKEN, "input error while reading state [%s]", xstrerror (errno)); read_state_typedefs (&typedefs); read_state_param_structs (&param_structs); read_state_variables (&variables); read_state_trailer (); if (verbosity_level >= 1) { printf ("%s read %ld bytes.\n", progname, ftell (state_file)); fflush (stdout); }; if (fclose (state_file)) fatal ("failed to close read state file %s [%s]", path, xstrerror (errno)); state_file = NULL; state_path = NULL; } /* End of file gengtype-state.c. */
gpl-2.0
mina86/linux
arch/x86/kvm/mtrr.c
282
16526
/* * vMTRR implementation * * Copyright (C) 2006 Qumranet, Inc. * Copyright 2010 Red Hat, Inc. and/or its affiliates. * Copyright(C) 2015 Intel Corporation. * * Authors: * Yaniv Kamay <yaniv@qumranet.com> * Avi Kivity <avi@qumranet.com> * Marcelo Tosatti <mtosatti@redhat.com> * Paolo Bonzini <pbonzini@redhat.com> * Xiao Guangrong <guangrong.xiao@linux.intel.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. */ #include <linux/kvm_host.h> #include <asm/mtrr.h> #include "cpuid.h" #include "mmu.h" #define IA32_MTRR_DEF_TYPE_E (1ULL << 11) #define IA32_MTRR_DEF_TYPE_FE (1ULL << 10) #define IA32_MTRR_DEF_TYPE_TYPE_MASK (0xff) static bool msr_mtrr_valid(unsigned msr) { switch (msr) { case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1: case MSR_MTRRfix64K_00000: case MSR_MTRRfix16K_80000: case MSR_MTRRfix16K_A0000: case MSR_MTRRfix4K_C0000: case MSR_MTRRfix4K_C8000: case MSR_MTRRfix4K_D0000: case MSR_MTRRfix4K_D8000: case MSR_MTRRfix4K_E0000: case MSR_MTRRfix4K_E8000: case MSR_MTRRfix4K_F0000: case MSR_MTRRfix4K_F8000: case MSR_MTRRdefType: case MSR_IA32_CR_PAT: return true; } return false; } static bool valid_pat_type(unsigned t) { return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */ } static bool valid_mtrr_type(unsigned t) { return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */ } bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) { int i; u64 mask; if (!msr_mtrr_valid(msr)) return false; if (msr == MSR_IA32_CR_PAT) { for (i = 0; i < 8; i++) if (!valid_pat_type((data >> (i * 8)) & 0xff)) return false; return true; } else if (msr == MSR_MTRRdefType) { if (data & ~0xcff) return false; return valid_mtrr_type(data & 0xff); } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { for (i = 0; i < 8 ; i++) if (!valid_mtrr_type((data >> (i * 8)) & 0xff)) return false; return true; } /* variable MTRRs */ WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR)); mask = (~0ULL) << cpuid_maxphyaddr(vcpu); if ((msr & 1) == 0) { /* MTRR base */ if (!valid_mtrr_type(data & 0xff)) return false; mask |= 0xf00; } else /* MTRR mask */ mask |= 0x7ff; if (data & mask) { kvm_inject_gp(vcpu, 0); return false; } return true; } EXPORT_SYMBOL_GPL(kvm_mtrr_valid); static bool mtrr_is_enabled(struct kvm_mtrr *mtrr_state) { return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_E); } static bool fixed_mtrr_is_enabled(struct kvm_mtrr *mtrr_state) { return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_FE); } static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state) { return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK; } static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu) { /* * Intel SDM 11.11.2.2: all MTRRs are disabled when * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC * memory type is applied to all of physical memory. * * However, virtual machines can be run with CPUID such that * there are no MTRRs. In that case, the firmware will never * enable MTRRs and it is obviously undesirable to run the * guest entirely with UC memory and we use WB. */ if (guest_cpuid_has_mtrr(vcpu)) return MTRR_TYPE_UNCACHABLE; else return MTRR_TYPE_WRBACK; } /* * Three terms are used in the following code: * - segment, it indicates the address segments covered by fixed MTRRs. * - unit, it corresponds to the MSR entry in the segment. * - range, a range is covered in one memory cache type. */ struct fixed_mtrr_segment { u64 start; u64 end; int range_shift; /* the start position in kvm_mtrr.fixed_ranges[]. */ int range_start; }; static struct fixed_mtrr_segment fixed_seg_table[] = { /* MSR_MTRRfix64K_00000, 1 unit. 64K fixed mtrr. */ { .start = 0x0, .end = 0x80000, .range_shift = 16, /* 64K */ .range_start = 0, }, /* * MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000, 2 units, * 16K fixed mtrr. */ { .start = 0x80000, .end = 0xc0000, .range_shift = 14, /* 16K */ .range_start = 8, }, /* * MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000, 8 units, * 4K fixed mtrr. */ { .start = 0xc0000, .end = 0x100000, .range_shift = 12, /* 12K */ .range_start = 24, } }; /* * The size of unit is covered in one MSR, one MSR entry contains * 8 ranges so that unit size is always 8 * 2^range_shift. */ static u64 fixed_mtrr_seg_unit_size(int seg) { return 8 << fixed_seg_table[seg].range_shift; } static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit) { switch (msr) { case MSR_MTRRfix64K_00000: *seg = 0; *unit = 0; break; case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000: *seg = 1; *unit = msr - MSR_MTRRfix16K_80000; break; case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000: *seg = 2; *unit = msr - MSR_MTRRfix4K_C0000; break; default: return false; } return true; } static void fixed_mtrr_seg_unit_range(int seg, int unit, u64 *start, u64 *end) { struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg]; u64 unit_size = fixed_mtrr_seg_unit_size(seg); *start = mtrr_seg->start + unit * unit_size; *end = *start + unit_size; WARN_ON(*end > mtrr_seg->end); } static int fixed_mtrr_seg_unit_range_index(int seg, int unit) { struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg]; WARN_ON(mtrr_seg->start + unit * fixed_mtrr_seg_unit_size(seg) > mtrr_seg->end); /* each unit has 8 ranges. */ return mtrr_seg->range_start + 8 * unit; } static int fixed_mtrr_seg_end_range_index(int seg) { struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg]; int n; n = (mtrr_seg->end - mtrr_seg->start) >> mtrr_seg->range_shift; return mtrr_seg->range_start + n - 1; } static bool fixed_msr_to_range(u32 msr, u64 *start, u64 *end) { int seg, unit; if (!fixed_msr_to_seg_unit(msr, &seg, &unit)) return false; fixed_mtrr_seg_unit_range(seg, unit, start, end); return true; } static int fixed_msr_to_range_index(u32 msr) { int seg, unit; if (!fixed_msr_to_seg_unit(msr, &seg, &unit)) return -1; return fixed_mtrr_seg_unit_range_index(seg, unit); } static int fixed_mtrr_addr_to_seg(u64 addr) { struct fixed_mtrr_segment *mtrr_seg; int seg, seg_num = ARRAY_SIZE(fixed_seg_table); for (seg = 0; seg < seg_num; seg++) { mtrr_seg = &fixed_seg_table[seg]; if (mtrr_seg->start <= addr && addr < mtrr_seg->end) return seg; } return -1; } static int fixed_mtrr_addr_seg_to_range_index(u64 addr, int seg) { struct fixed_mtrr_segment *mtrr_seg; int index; mtrr_seg = &fixed_seg_table[seg]; index = mtrr_seg->range_start; index += (addr - mtrr_seg->start) >> mtrr_seg->range_shift; return index; } static u64 fixed_mtrr_range_end_addr(int seg, int index) { struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg]; int pos = index - mtrr_seg->range_start; return mtrr_seg->start + ((pos + 1) << mtrr_seg->range_shift); } static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end) { u64 mask; *start = range->base & PAGE_MASK; mask = range->mask & PAGE_MASK; /* This cannot overflow because writing to the reserved bits of * variable MTRRs causes a #GP. */ *end = (*start | ~mask) + 1; } static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr) { struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; gfn_t start, end; int index; if (msr == MSR_IA32_CR_PAT || !tdp_enabled || !kvm_arch_has_noncoherent_dma(vcpu->kvm)) return; if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType) return; /* fixed MTRRs. */ if (fixed_msr_to_range(msr, &start, &end)) { if (!fixed_mtrr_is_enabled(mtrr_state)) return; } else if (msr == MSR_MTRRdefType) { start = 0x0; end = ~0ULL; } else { /* variable range MTRRs. */ index = (msr - 0x200) / 2; var_mtrr_range(&mtrr_state->var_ranges[index], &start, &end); } kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end)); } static bool var_mtrr_range_is_valid(struct kvm_mtrr_range *range) { return (range->mask & (1 << 11)) != 0; } static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data) { struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; struct kvm_mtrr_range *tmp, *cur; int index, is_mtrr_mask; index = (msr - 0x200) / 2; is_mtrr_mask = msr - 0x200 - 2 * index; cur = &mtrr_state->var_ranges[index]; /* remove the entry if it's in the list. */ if (var_mtrr_range_is_valid(cur)) list_del(&mtrr_state->var_ranges[index].node); /* Extend the mask with all 1 bits to the left, since those * bits must implicitly be 0. The bits are then cleared * when reading them. */ if (!is_mtrr_mask) cur->base = data; else cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu)); /* add it to the list if it's enabled. */ if (var_mtrr_range_is_valid(cur)) { list_for_each_entry(tmp, &mtrr_state->head, node) if (cur->base >= tmp->base) break; list_add_tail(&cur->node, &tmp->node); } } int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data) { int index; if (!kvm_mtrr_valid(vcpu, msr, data)) return 1; index = fixed_msr_to_range_index(msr); if (index >= 0) *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data; else if (msr == MSR_MTRRdefType) vcpu->arch.mtrr_state.deftype = data; else if (msr == MSR_IA32_CR_PAT) vcpu->arch.pat = data; else set_var_mtrr_msr(vcpu, msr, data); update_mtrr(vcpu, msr); return 0; } int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { int index; /* MSR_MTRRcap is a readonly MSR. */ if (msr == MSR_MTRRcap) { /* * SMRR = 0 * WC = 1 * FIX = 1 * VCNT = KVM_NR_VAR_MTRR */ *pdata = 0x500 | KVM_NR_VAR_MTRR; return 0; } if (!msr_mtrr_valid(msr)) return 1; index = fixed_msr_to_range_index(msr); if (index >= 0) *pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index]; else if (msr == MSR_MTRRdefType) *pdata = vcpu->arch.mtrr_state.deftype; else if (msr == MSR_IA32_CR_PAT) *pdata = vcpu->arch.pat; else { /* Variable MTRRs */ int is_mtrr_mask; index = (msr - 0x200) / 2; is_mtrr_mask = msr - 0x200 - 2 * index; if (!is_mtrr_mask) *pdata = vcpu->arch.mtrr_state.var_ranges[index].base; else *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask; *pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1; } return 0; } void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu) { INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head); } struct mtrr_iter { /* input fields. */ struct kvm_mtrr *mtrr_state; u64 start; u64 end; /* output fields. */ int mem_type; /* mtrr is completely disabled? */ bool mtrr_disabled; /* [start, end) is not fully covered in MTRRs? */ bool partial_map; /* private fields. */ union { /* used for fixed MTRRs. */ struct { int index; int seg; }; /* used for var MTRRs. */ struct { struct kvm_mtrr_range *range; /* max address has been covered in var MTRRs. */ u64 start_max; }; }; bool fixed; }; static bool mtrr_lookup_fixed_start(struct mtrr_iter *iter) { int seg, index; if (!fixed_mtrr_is_enabled(iter->mtrr_state)) return false; seg = fixed_mtrr_addr_to_seg(iter->start); if (seg < 0) return false; iter->fixed = true; index = fixed_mtrr_addr_seg_to_range_index(iter->start, seg); iter->index = index; iter->seg = seg; return true; } static bool match_var_range(struct mtrr_iter *iter, struct kvm_mtrr_range *range) { u64 start, end; var_mtrr_range(range, &start, &end); if (!(start >= iter->end || end <= iter->start)) { iter->range = range; /* * the function is called when we do kvm_mtrr.head walking. * Range has the minimum base address which interleaves * [looker->start_max, looker->end). */ iter->partial_map |= iter->start_max < start; /* update the max address has been covered. */ iter->start_max = max(iter->start_max, end); return true; } return false; } static void __mtrr_lookup_var_next(struct mtrr_iter *iter) { struct kvm_mtrr *mtrr_state = iter->mtrr_state; list_for_each_entry_continue(iter->range, &mtrr_state->head, node) if (match_var_range(iter, iter->range)) return; iter->range = NULL; iter->partial_map |= iter->start_max < iter->end; } static void mtrr_lookup_var_start(struct mtrr_iter *iter) { struct kvm_mtrr *mtrr_state = iter->mtrr_state; iter->fixed = false; iter->start_max = iter->start; iter->range = NULL; iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node); __mtrr_lookup_var_next(iter); } static void mtrr_lookup_fixed_next(struct mtrr_iter *iter) { /* terminate the lookup. */ if (fixed_mtrr_range_end_addr(iter->seg, iter->index) >= iter->end) { iter->fixed = false; iter->range = NULL; return; } iter->index++; /* have looked up for all fixed MTRRs. */ if (iter->index >= ARRAY_SIZE(iter->mtrr_state->fixed_ranges)) return mtrr_lookup_var_start(iter); /* switch to next segment. */ if (iter->index > fixed_mtrr_seg_end_range_index(iter->seg)) iter->seg++; } static void mtrr_lookup_var_next(struct mtrr_iter *iter) { __mtrr_lookup_var_next(iter); } static void mtrr_lookup_start(struct mtrr_iter *iter) { if (!mtrr_is_enabled(iter->mtrr_state)) { iter->mtrr_disabled = true; return; } if (!mtrr_lookup_fixed_start(iter)) mtrr_lookup_var_start(iter); } static void mtrr_lookup_init(struct mtrr_iter *iter, struct kvm_mtrr *mtrr_state, u64 start, u64 end) { iter->mtrr_state = mtrr_state; iter->start = start; iter->end = end; iter->mtrr_disabled = false; iter->partial_map = false; iter->fixed = false; iter->range = NULL; mtrr_lookup_start(iter); } static bool mtrr_lookup_okay(struct mtrr_iter *iter) { if (iter->fixed) { iter->mem_type = iter->mtrr_state->fixed_ranges[iter->index]; return true; } if (iter->range) { iter->mem_type = iter->range->base & 0xff; return true; } return false; } static void mtrr_lookup_next(struct mtrr_iter *iter) { if (iter->fixed) mtrr_lookup_fixed_next(iter); else mtrr_lookup_var_next(iter); } #define mtrr_for_each_mem_type(_iter_, _mtrr_, _gpa_start_, _gpa_end_) \ for (mtrr_lookup_init(_iter_, _mtrr_, _gpa_start_, _gpa_end_); \ mtrr_lookup_okay(_iter_); mtrr_lookup_next(_iter_)) u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) { struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; struct mtrr_iter iter; u64 start, end; int type = -1; const int wt_wb_mask = (1 << MTRR_TYPE_WRBACK) | (1 << MTRR_TYPE_WRTHROUGH); start = gfn_to_gpa(gfn); end = start + PAGE_SIZE; mtrr_for_each_mem_type(&iter, mtrr_state, start, end) { int curr_type = iter.mem_type; /* * Please refer to Intel SDM Volume 3: 11.11.4.1 MTRR * Precedences. */ if (type == -1) { type = curr_type; continue; } /* * If two or more variable memory ranges match and the * memory types are identical, then that memory type is * used. */ if (type == curr_type) continue; /* * If two or more variable memory ranges match and one of * the memory types is UC, the UC memory type used. */ if (curr_type == MTRR_TYPE_UNCACHABLE) return MTRR_TYPE_UNCACHABLE; /* * If two or more variable memory ranges match and the * memory types are WT and WB, the WT memory type is used. */ if (((1 << type) & wt_wb_mask) && ((1 << curr_type) & wt_wb_mask)) { type = MTRR_TYPE_WRTHROUGH; continue; } /* * For overlaps not defined by the above rules, processor * behavior is undefined. */ /* We use WB for this undefined behavior. :( */ return MTRR_TYPE_WRBACK; } if (iter.mtrr_disabled) return mtrr_disabled_type(vcpu); /* not contained in any MTRRs. */ if (type == -1) return mtrr_default_type(mtrr_state); /* * We just check one page, partially covered by MTRRs is * impossible. */ WARN_ON(iter.partial_map); return type; } EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type); bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int page_num) { struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; struct mtrr_iter iter; u64 start, end; int type = -1; start = gfn_to_gpa(gfn); end = gfn_to_gpa(gfn + page_num); mtrr_for_each_mem_type(&iter, mtrr_state, start, end) { if (type == -1) { type = iter.mem_type; continue; } if (type != iter.mem_type) return false; } if (iter.mtrr_disabled) return true; if (!iter.partial_map) return true; if (type == -1) return true; return type == mtrr_default_type(mtrr_state); }
gpl-2.0
RidaShamasneh/nethunter_kernel_g5
drivers/mtd/tests/nandbiterrs.c
538
10406
/* * Copyright © 2012 NetCommWireless * Iwo Mergler <Iwo.Mergler@netcommwireless.com.au> * * Test for multi-bit error recovery on a NAND page This mostly tests the * ECC controller / driver. * * There are two test modes: * * 0 - artificially inserting bit errors until the ECC fails * This is the default method and fairly quick. It should * be independent of the quality of the FLASH. * * 1 - re-writing the same pattern repeatedly until the ECC fails. * This method relies on the physics of NAND FLASH to eventually * generate '0' bits if '1' has been written sufficient times. * Depending on the NAND, the first bit errors will appear after * 1000 or more writes and then will usually snowball, reaching the * limits of the ECC quickly. * * The test stops after 10000 cycles, should your FLASH be * exceptionally good and not generate bit errors before that. Try * a different page in that case. * * Please note that neither of these tests will significantly 'use up' any * FLASH endurance. Only a maximum of two erase operations will be performed. * * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; see the file COPYING. If not, write to the Free Software * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/mtd/mtd.h> #include <linux/err.h> #include <linux/mtd/nand.h> #include <linux/slab.h> #include "mtd_test.h" static int dev; module_param(dev, int, S_IRUGO); MODULE_PARM_DESC(dev, "MTD device number to use"); static unsigned page_offset; module_param(page_offset, uint, S_IRUGO); MODULE_PARM_DESC(page_offset, "Page number relative to dev start"); static unsigned seed; module_param(seed, uint, S_IRUGO); MODULE_PARM_DESC(seed, "Random seed"); static int mode; module_param(mode, int, S_IRUGO); MODULE_PARM_DESC(mode, "0=incremental errors, 1=overwrite test"); static unsigned max_overwrite = 10000; static loff_t offset; /* Offset of the page we're using. */ static unsigned eraseblock; /* Eraseblock number for our page. */ /* We assume that the ECC can correct up to a certain number * of biterrors per subpage. */ static unsigned subsize; /* Size of subpages */ static unsigned subcount; /* Number of subpages per page */ static struct mtd_info *mtd; /* MTD device */ static uint8_t *wbuffer; /* One page write / compare buffer */ static uint8_t *rbuffer; /* One page read buffer */ /* 'random' bytes from known offsets */ static uint8_t hash(unsigned offset) { unsigned v = offset; unsigned char c; v ^= 0x7f7edfd3; v = v ^ (v >> 3); v = v ^ (v >> 5); v = v ^ (v >> 13); c = v & 0xFF; /* Reverse bits of result. */ c = (c & 0x0F) << 4 | (c & 0xF0) >> 4; c = (c & 0x33) << 2 | (c & 0xCC) >> 2; c = (c & 0x55) << 1 | (c & 0xAA) >> 1; return c; } /* Writes wbuffer to page */ static int write_page(int log) { if (log) pr_info("write_page\n"); return mtdtest_write(mtd, offset, mtd->writesize, wbuffer); } /* Re-writes the data area while leaving the OOB alone. */ static int rewrite_page(int log) { int err = 0; struct mtd_oob_ops ops; if (log) pr_info("rewrite page\n"); ops.mode = MTD_OPS_RAW; /* No ECC */ ops.len = mtd->writesize; ops.retlen = 0; ops.ooblen = 0; ops.oobretlen = 0; ops.ooboffs = 0; ops.datbuf = wbuffer; ops.oobbuf = NULL; err = mtd_write_oob(mtd, offset, &ops); if (err || ops.retlen != mtd->writesize) { pr_err("error: write_oob failed (%d)\n", err); if (!err) err = -EIO; } return err; } /* Reads page into rbuffer. Returns number of corrected bit errors (>=0) * or error (<0) */ static int read_page(int log) { int err = 0; size_t read; struct mtd_ecc_stats oldstats; if (log) pr_info("read_page\n"); /* Saving last mtd stats */ memcpy(&oldstats, &mtd->ecc_stats, sizeof(oldstats)); err = mtd_read(mtd, offset, mtd->writesize, &read, rbuffer); if (err == -EUCLEAN) err = mtd->ecc_stats.corrected - oldstats.corrected; if (err < 0 || read != mtd->writesize) { pr_err("error: read failed at %#llx\n", (long long)offset); if (err >= 0) err = -EIO; } return err; } /* Verifies rbuffer against random sequence */ static int verify_page(int log) { unsigned i, errs = 0; if (log) pr_info("verify_page\n"); for (i = 0; i < mtd->writesize; i++) { if (rbuffer[i] != hash(i+seed)) { pr_err("Error: page offset %u, expected %02x, got %02x\n", i, hash(i+seed), rbuffer[i]); errs++; } } if (errs) return -EIO; else return 0; } #define CBIT(v, n) ((v) & (1 << (n))) #define BCLR(v, n) ((v) = (v) & ~(1 << (n))) /* Finds the first '1' bit in wbuffer starting at offset 'byte' * and sets it to '0'. */ static int insert_biterror(unsigned byte) { int bit; while (byte < mtd->writesize) { for (bit = 7; bit >= 0; bit--) { if (CBIT(wbuffer[byte], bit)) { BCLR(wbuffer[byte], bit); pr_info("Inserted biterror @ %u/%u\n", byte, bit); return 0; } } byte++; } pr_err("biterror: Failed to find a '1' bit\n"); return -EIO; } /* Writes 'random' data to page and then introduces deliberate bit * errors into the page, while verifying each step. */ static int incremental_errors_test(void) { int err = 0; unsigned i; unsigned errs_per_subpage = 0; pr_info("incremental biterrors test\n"); for (i = 0; i < mtd->writesize; i++) wbuffer[i] = hash(i+seed); err = write_page(1); if (err) goto exit; while (1) { err = rewrite_page(1); if (err) goto exit; err = read_page(1); if (err > 0) pr_info("Read reported %d corrected bit errors\n", err); if (err < 0) { pr_err("After %d biterrors per subpage, read reported error %d\n", errs_per_subpage, err); err = 0; goto exit; } err = verify_page(1); if (err) { pr_err("ECC failure, read data is incorrect despite read success\n"); goto exit; } pr_info("Successfully corrected %d bit errors per subpage\n", errs_per_subpage); for (i = 0; i < subcount; i++) { err = insert_biterror(i * subsize); if (err < 0) goto exit; } errs_per_subpage++; } exit: return err; } /* Writes 'random' data to page and then re-writes that same data repeatedly. This eventually develops bit errors (bits written as '1' will slowly become '0'), which are corrected as far as the ECC is capable of. */ static int overwrite_test(void) { int err = 0; unsigned i; unsigned max_corrected = 0; unsigned opno = 0; /* We don't expect more than this many correctable bit errors per * page. */ #define MAXBITS 512 static unsigned bitstats[MAXBITS]; /* bit error histogram. */ memset(bitstats, 0, sizeof(bitstats)); pr_info("overwrite biterrors test\n"); for (i = 0; i < mtd->writesize; i++) wbuffer[i] = hash(i+seed); err = write_page(1); if (err) goto exit; while (opno < max_overwrite) { err = rewrite_page(0); if (err) break; err = read_page(0); if (err >= 0) { if (err >= MAXBITS) { pr_info("Implausible number of bit errors corrected\n"); err = -EIO; break; } bitstats[err]++; if (err > max_corrected) { max_corrected = err; pr_info("Read reported %d corrected bit errors\n", err); } } else { /* err < 0 */ pr_info("Read reported error %d\n", err); err = 0; break; } err = verify_page(0); if (err) { bitstats[max_corrected] = opno; pr_info("ECC failure, read data is incorrect despite read success\n"); break; } opno++; } /* At this point bitstats[0] contains the number of ops with no bit * errors, bitstats[1] the number of ops with 1 bit error, etc. */ pr_info("Bit error histogram (%d operations total):\n", opno); for (i = 0; i < max_corrected; i++) pr_info("Page reads with %3d corrected bit errors: %d\n", i, bitstats[i]); exit: return err; } static int __init mtd_nandbiterrs_init(void) { int err = 0; printk("\n"); printk(KERN_INFO "==================================================\n"); pr_info("MTD device: %d\n", dev); mtd = get_mtd_device(NULL, dev); if (IS_ERR(mtd)) { err = PTR_ERR(mtd); pr_err("error: cannot get MTD device\n"); goto exit_mtddev; } if (!mtd_type_is_nand(mtd)) { pr_info("this test requires NAND flash\n"); err = -ENODEV; goto exit_nand; } pr_info("MTD device size %llu, eraseblock=%u, page=%u, oob=%u\n", (unsigned long long)mtd->size, mtd->erasesize, mtd->writesize, mtd->oobsize); subsize = mtd->writesize >> mtd->subpage_sft; subcount = mtd->writesize / subsize; pr_info("Device uses %d subpages of %d bytes\n", subcount, subsize); offset = (loff_t)page_offset * mtd->writesize; eraseblock = mtd_div_by_eb(offset, mtd); pr_info("Using page=%u, offset=%llu, eraseblock=%u\n", page_offset, offset, eraseblock); wbuffer = kmalloc(mtd->writesize, GFP_KERNEL); if (!wbuffer) { err = -ENOMEM; goto exit_wbuffer; } rbuffer = kmalloc(mtd->writesize, GFP_KERNEL); if (!rbuffer) { err = -ENOMEM; goto exit_rbuffer; } err = mtdtest_erase_eraseblock(mtd, eraseblock); if (err) goto exit_error; if (mode == 0) err = incremental_errors_test(); else err = overwrite_test(); if (err) goto exit_error; /* We leave the block un-erased in case of test failure. */ err = mtdtest_erase_eraseblock(mtd, eraseblock); if (err) goto exit_error; err = -EIO; pr_info("finished successfully.\n"); printk(KERN_INFO "==================================================\n"); exit_error: kfree(rbuffer); exit_rbuffer: kfree(wbuffer); exit_wbuffer: /* Nothing */ exit_nand: put_mtd_device(mtd); exit_mtddev: return err; } static void __exit mtd_nandbiterrs_exit(void) { return; } module_init(mtd_nandbiterrs_init); module_exit(mtd_nandbiterrs_exit); MODULE_DESCRIPTION("NAND bit error recovery test"); MODULE_AUTHOR("Iwo Mergler"); MODULE_LICENSE("GPL");
gpl-2.0
cooldroid/android_kernel_oneplus_msm8974
drivers/platform/msm/ipa/ipa_client.c
1818
12264
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/delay.h> #include "ipa_i.h" /* * These values were determined empirically and shows good E2E bi- * directional throughputs */ #define IPA_A2_HOLB_TMR_EN 0x1 #define IPA_A2_HOLB_TMR_DEFAULT_VAL 0x1ff #define IPA_PKT_FLUSH_TO_US 100 static void ipa_enable_data_path(u32 clnt_hdl) { if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_VIRTUAL) { /* IPA_HW_MODE_VIRTUAL lacks support for TAG IC & EP suspend */ return; } if (ipa_ctx->ipa_hw_type == IPA_HW_v1_1) ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_CTRL_n_OFST(clnt_hdl), 0); } static int ipa_disable_data_path(u32 clnt_hdl) { struct ipa_ep_context *ep = &ipa_ctx->ep[clnt_hdl]; if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_VIRTUAL) { /* IPA_HW_MODE_VIRTUAL lacks support for TAG IC & EP suspend */ return 0; } if (ipa_ctx->ipa_hw_type == IPA_HW_v1_1) { ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_CTRL_n_OFST(clnt_hdl), 1); udelay(IPA_PKT_FLUSH_TO_US); if (IPA_CLIENT_IS_CONS(ep->client) && ep->cfg.aggr.aggr_en == IPA_ENABLE_AGGR && ep->cfg.aggr.aggr_time_limit) msleep(ep->cfg.aggr.aggr_time_limit); } return 0; } static int ipa_connect_configure_sps(const struct ipa_connect_params *in, struct ipa_ep_context *ep, int ipa_ep_idx) { int result = -EFAULT; /* Default Config */ ep->ep_hdl = sps_alloc_endpoint(); if (ep->ep_hdl == NULL) { IPAERR("SPS EP alloc failed EP.\n"); return -EFAULT; } result = sps_get_config(ep->ep_hdl, &ep->connect); if (result) { IPAERR("fail to get config.\n"); return -EFAULT; } /* Specific Config */ if (IPA_CLIENT_IS_CONS(in->client)) { ep->connect.mode = SPS_MODE_SRC; ep->connect.destination = in->client_bam_hdl; ep->connect.source = ipa_ctx->bam_handle; ep->connect.dest_pipe_index = in->client_ep_idx; ep->connect.src_pipe_index = ipa_ep_idx; } else { ep->connect.mode = SPS_MODE_DEST; ep->connect.source = in->client_bam_hdl; ep->connect.destination = ipa_ctx->bam_handle; ep->connect.src_pipe_index = in->client_ep_idx; ep->connect.dest_pipe_index = ipa_ep_idx; } return 0; } static int ipa_connect_allocate_fifo(const struct ipa_connect_params *in, struct sps_mem_buffer *mem_buff_ptr, bool *fifo_in_pipe_mem_ptr, u32 *fifo_pipe_mem_ofst_ptr, u32 fifo_size, int ipa_ep_idx) { dma_addr_t dma_addr; u32 ofst; int result = -EFAULT; mem_buff_ptr->size = fifo_size; if (in->pipe_mem_preferred) { if (ipa_pipe_mem_alloc(&ofst, fifo_size)) { IPAERR("FIFO pipe mem alloc fail ep %u\n", ipa_ep_idx); mem_buff_ptr->base = dma_alloc_coherent(NULL, mem_buff_ptr->size, &dma_addr, GFP_KERNEL); } else { memset(mem_buff_ptr, 0, sizeof(struct sps_mem_buffer)); result = sps_setup_bam2bam_fifo(mem_buff_ptr, ofst, fifo_size, 1); WARN_ON(result); *fifo_in_pipe_mem_ptr = 1; dma_addr = mem_buff_ptr->phys_base; *fifo_pipe_mem_ofst_ptr = ofst; } } else { mem_buff_ptr->base = dma_alloc_coherent(NULL, mem_buff_ptr->size, &dma_addr, GFP_KERNEL); } mem_buff_ptr->phys_base = dma_addr; if (mem_buff_ptr->base == NULL) { IPAERR("fail to get DMA memory.\n"); return -EFAULT; } return 0; } static void ipa_program_holb(struct ipa_ep_context *ep, int ipa_ep_idx) { struct ipa_ep_cfg_holb holb; if (IPA_CLIENT_IS_PROD(ep->client)) return; switch (ep->client) { case IPA_CLIENT_A2_TETHERED_CONS: case IPA_CLIENT_A2_EMBEDDED_CONS: holb.en = IPA_A2_HOLB_TMR_EN; holb.tmr_val = IPA_A2_HOLB_TMR_DEFAULT_VAL; break; default: return; } ipa_cfg_ep_holb(ipa_ep_idx, &holb); } /** * ipa_connect() - low-level IPA client connect * @in: [in] input parameters from client * @sps: [out] sps output from IPA needed by client for sps_connect * @clnt_hdl: [out] opaque client handle assigned by IPA to client * * Should be called by the driver of the peripheral that wants to connect to * IPA in BAM-BAM mode. these peripherals are A2, USB and HSIC. this api * expects caller to take responsibility to add any needed headers, routing * and filtering tables and rules as needed. * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps, u32 *clnt_hdl) { int ipa_ep_idx; int result = -EFAULT; struct ipa_ep_context *ep; ipa_inc_client_enable_clks(); if (in == NULL || sps == NULL || clnt_hdl == NULL || in->client >= IPA_CLIENT_MAX || in->desc_fifo_sz == 0 || in->data_fifo_sz == 0) { IPAERR("bad parm.\n"); result = -EINVAL; goto fail; } ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, in->client); if (ipa_ep_idx == -1) { IPAERR("fail to alloc EP.\n"); goto fail; } ep = &ipa_ctx->ep[ipa_ep_idx]; if (ep->valid) { IPAERR("EP already allocated.\n"); goto fail; } memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context)); ipa_enable_data_path(ipa_ep_idx); ep->valid = 1; ep->client = in->client; ep->client_notify = in->notify; ep->priv = in->priv; if (ipa_cfg_ep(ipa_ep_idx, &in->ipa_ep_cfg)) { IPAERR("fail to configure EP.\n"); goto ipa_cfg_ep_fail; } result = ipa_connect_configure_sps(in, ep, ipa_ep_idx); if (result) { IPAERR("fail to configure SPS.\n"); goto ipa_cfg_ep_fail; } if (in->desc.base == NULL) { result = ipa_connect_allocate_fifo(in, &ep->connect.desc, &ep->desc_fifo_in_pipe_mem, &ep->desc_fifo_pipe_mem_ofst, in->desc_fifo_sz, ipa_ep_idx); if (result) { IPAERR("fail to allocate DESC FIFO.\n"); goto desc_mem_alloc_fail; } } else { IPADBG("client allocated DESC FIFO\n"); ep->connect.desc = in->desc; ep->desc_fifo_client_allocated = 1; } IPADBG("Descriptor FIFO pa=0x%x, size=%d\n", ep->connect.desc.phys_base, ep->connect.desc.size); if (in->data.base == NULL) { result = ipa_connect_allocate_fifo(in, &ep->connect.data, &ep->data_fifo_in_pipe_mem, &ep->data_fifo_pipe_mem_ofst, in->data_fifo_sz, ipa_ep_idx); if (result) { IPAERR("fail to allocate DATA FIFO.\n"); goto data_mem_alloc_fail; } } else { IPADBG("client allocated DATA FIFO\n"); ep->connect.data = in->data; ep->data_fifo_client_allocated = 1; } IPADBG("Data FIFO pa=0x%x, size=%d\n", ep->connect.data.phys_base, ep->connect.data.size); ep->connect.event_thresh = IPA_EVENT_THRESHOLD; ep->connect.options = SPS_O_AUTO_ENABLE; /* BAM-to-BAM */ if (IPA_CLIENT_IS_CONS(in->client)) ep->connect.options |= SPS_O_NO_DISABLE; result = sps_connect(ep->ep_hdl, &ep->connect); if (result) { IPAERR("sps_connect fails.\n"); goto sps_connect_fail; } sps->ipa_bam_hdl = ipa_ctx->bam_handle; sps->ipa_ep_idx = ipa_ep_idx; *clnt_hdl = ipa_ep_idx; memcpy(&sps->desc, &ep->connect.desc, sizeof(struct sps_mem_buffer)); memcpy(&sps->data, &ep->connect.data, sizeof(struct sps_mem_buffer)); ipa_program_holb(ep, ipa_ep_idx); IPADBG("client %d (ep: %d) connected\n", in->client, ipa_ep_idx); return 0; sps_connect_fail: if (!ep->data_fifo_in_pipe_mem) dma_free_coherent(NULL, ep->connect.data.size, ep->connect.data.base, ep->connect.data.phys_base); else ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst, ep->connect.data.size); data_mem_alloc_fail: if (!ep->desc_fifo_in_pipe_mem) dma_free_coherent(NULL, ep->connect.desc.size, ep->connect.desc.base, ep->connect.desc.phys_base); else ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst, ep->connect.desc.size); desc_mem_alloc_fail: sps_free_endpoint(ep->ep_hdl); ipa_cfg_ep_fail: memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context)); fail: ipa_dec_client_disable_clks(); return result; } EXPORT_SYMBOL(ipa_connect); /** * ipa_disconnect() - low-level IPA client disconnect * @clnt_hdl: [in] opaque client handle assigned by IPA to client * * Should be called by the driver of the peripheral that wants to disconnect * from IPA in BAM-BAM mode. this api expects caller to take responsibility to * free any needed headers, routing and filtering tables and rules as needed. * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa_disconnect(u32 clnt_hdl) { int result; struct ipa_ep_context *ep; if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) { IPAERR("bad parm.\n"); return -EINVAL; } ep = &ipa_ctx->ep[clnt_hdl]; if (ep->suspended) { ipa_inc_client_enable_clks(); ep->suspended = false; } result = ipa_disable_data_path(clnt_hdl); if (result) { IPAERR("disable data path failed res=%d clnt=%d.\n", result, clnt_hdl); return -EPERM; } result = sps_disconnect(ep->ep_hdl); if (result) { IPAERR("SPS disconnect failed.\n"); return -EPERM; } if (!ep->desc_fifo_client_allocated && ep->connect.desc.base) { if (!ep->desc_fifo_in_pipe_mem) dma_free_coherent(NULL, ep->connect.desc.size, ep->connect.desc.base, ep->connect.desc.phys_base); else ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst, ep->connect.desc.size); } if (!ep->data_fifo_client_allocated && ep->connect.data.base) { if (!ep->data_fifo_in_pipe_mem) dma_free_coherent(NULL, ep->connect.data.size, ep->connect.data.base, ep->connect.data.phys_base); else ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst, ep->connect.data.size); } result = sps_free_endpoint(ep->ep_hdl); if (result) { IPAERR("SPS de-alloc EP failed.\n"); return -EPERM; } memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context)); ipa_dec_client_disable_clks(); IPADBG("client (ep: %d) disconnected\n", clnt_hdl); return 0; } EXPORT_SYMBOL(ipa_disconnect); /** * ipa_resume() - low-level IPA client resume * @clnt_hdl: [in] opaque client handle assigned by IPA to client * * Should be called by the driver of the peripheral that wants to resume IPA * connection. Resume IPA connection results in turning on IPA clocks in * case they were off as a result of suspend. * this api can be called only if a call to ipa_suspend() was * made. * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa_resume(u32 clnt_hdl) { struct ipa_ep_context *ep; if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) { IPAERR("bad parm. clnt_hdl %d\n", clnt_hdl); return -EINVAL; } ep = &ipa_ctx->ep[clnt_hdl]; if (!ep->suspended) { IPAERR("EP not suspended. clnt_hdl %d\n", clnt_hdl); return -EPERM; } ipa_inc_client_enable_clks(); ep->suspended = false; return 0; } EXPORT_SYMBOL(ipa_resume); /** * ipa_suspend() - low-level IPA client suspend * @clnt_hdl: [in] opaque client handle assigned by IPA to client * * Should be called by the driver of the peripheral that wants to suspend IPA * connection. Suspend IPA connection results in turning off IPA clocks in * case that there is no active clients using IPA. Pipes remains connected in * case of suspend. * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa_suspend(u32 clnt_hdl) { struct ipa_ep_context *ep; if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) { IPAERR("bad parm. clnt_hdl %d\n", clnt_hdl); return -EINVAL; } ep = &ipa_ctx->ep[clnt_hdl]; if (ep->suspended) { IPAERR("EP already suspended. clnt_hdl %d\n", clnt_hdl); return -EPERM; } if (IPA_CLIENT_IS_CONS(ep->client) && ep->cfg.aggr.aggr_en == IPA_ENABLE_AGGR && ep->cfg.aggr.aggr_time_limit) msleep(ep->cfg.aggr.aggr_time_limit); ipa_dec_client_disable_clks(); ep->suspended = true; return 0; } EXPORT_SYMBOL(ipa_suspend);
gpl-2.0
tsuibin/linux-4.x.y
drivers/isdn/hisax/elsa.c
1818
34443
/* $Id: elsa.c,v 2.32.2.4 2004/01/24 20:47:21 keil Exp $ * * low level stuff for Elsa isdn cards * * Author Karsten Keil * Copyright by Karsten Keil <keil@isdn4linux.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * For changes and modifications please read * Documentation/isdn/HiSax.cert * * Thanks to Elsa GmbH for documents and information * * Klaus Lichtenwalder (Klaus.Lichtenwalder@WebForum.DE) * for ELSA PCMCIA support * */ #include <linux/init.h> #include <linux/slab.h> #include "hisax.h" #include "arcofi.h" #include "isac.h" #include "ipac.h" #include "hscx.h" #include "isdnl1.h" #include <linux/pci.h> #include <linux/isapnp.h> #include <linux/serial.h> #include <linux/serial_reg.h> static const char *Elsa_revision = "$Revision: 2.32.2.4 $"; static const char *Elsa_Types[] = {"None", "PC", "PCC-8", "PCC-16", "PCF", "PCF-Pro", "PCMCIA", "QS 1000", "QS 3000", "Microlink PCI", "QS 3000 PCI", "PCMCIA-IPAC" }; static const char *ITACVer[] = {"?0?", "?1?", "?2?", "?3?", "?4?", "V2.2", "B1", "A1"}; #define byteout(addr, val) outb(val, addr) #define bytein(addr) inb(addr) #define ELSA_ISAC 0 #define ELSA_ISAC_PCM 1 #define ELSA_ITAC 1 #define ELSA_HSCX 2 #define ELSA_ALE 3 #define ELSA_ALE_PCM 4 #define ELSA_CONTROL 4 #define ELSA_CONFIG 5 #define ELSA_START_TIMER 6 #define ELSA_TRIG_IRQ 7 #define ELSA_PC 1 #define ELSA_PCC8 2 #define ELSA_PCC16 3 #define ELSA_PCF 4 #define ELSA_PCFPRO 5 #define ELSA_PCMCIA 6 #define ELSA_QS1000 7 #define ELSA_QS3000 8 #define ELSA_QS1000PCI 9 #define ELSA_QS3000PCI 10 #define ELSA_PCMCIA_IPAC 11 /* PCI stuff */ #define ELSA_PCI_IRQ_MASK 0x04 /* ITAC Registeradressen (only Microlink PC) */ #define ITAC_SYS 0x34 #define ITAC_ISEN 0x48 #define ITAC_RFIE 0x4A #define ITAC_XFIE 0x4C #define ITAC_SCIE 0x4E #define ITAC_STIE 0x46 /*** *** *** Makros als Befehle fuer die Kartenregister *** *** (mehrere Befehle werden durch Bit-Oderung kombiniert) *** *** ***/ /* Config-Register (Read) */ #define ELIRQF_TIMER_RUN 0x02 /* Bit 1 des Config-Reg */ #define ELIRQF_TIMER_RUN_PCC8 0x01 /* Bit 0 des Config-Reg bei PCC */ #define ELSA_IRQ_IDX 0x38 /* Bit 3,4,5 des Config-Reg */ #define ELSA_IRQ_IDX_PCC8 0x30 /* Bit 4,5 des Config-Reg */ #define ELSA_IRQ_IDX_PC 0x0c /* Bit 2,3 des Config-Reg */ /* Control-Register (Write) */ #define ELSA_LINE_LED 0x02 /* Bit 1 Gelbe LED */ #define ELSA_STAT_LED 0x08 /* Bit 3 Gruene LED */ #define ELSA_ISDN_RESET 0x20 /* Bit 5 Reset-Leitung */ #define ELSA_ENA_TIMER_INT 0x80 /* Bit 7 Freigabe Timer Interrupt */ /* ALE-Register (Read) */ #define ELSA_HW_RELEASE 0x07 /* Bit 0-2 Hardwarerkennung */ #define ELSA_S0_POWER_BAD 0x08 /* Bit 3 S0-Bus Spannung fehlt */ /* Status Flags */ #define ELIRQF_TIMER_AKTIV 1 #define ELSA_BAD_PWR 2 #define ELSA_ASSIGN 4 #define RS_ISR_PASS_LIMIT 256 #define FLG_MODEM_ACTIVE 1 /* IPAC AUX */ #define ELSA_IPAC_LINE_LED 0x40 /* Bit 6 Gelbe LED */ #define ELSA_IPAC_STAT_LED 0x80 /* Bit 7 Gruene LED */ #if ARCOFI_USE static struct arcofi_msg ARCOFI_XOP_F = {NULL,0,2,{0xa1,0x3f,0,0,0,0,0,0,0,0}}; /* Normal OP */ static struct arcofi_msg ARCOFI_XOP_1 = {&ARCOFI_XOP_F,0,2,{0xa1,0x31,0,0,0,0,0,0,0,0}}; /* PWR UP */ static struct arcofi_msg ARCOFI_SOP_F = {&ARCOFI_XOP_1,0,10,{0xa1,0x1f,0x00,0x50,0x10,0x00,0x00,0x80,0x02,0x12}}; static struct arcofi_msg ARCOFI_COP_9 = {&ARCOFI_SOP_F,0,10,{0xa1,0x29,0x80,0xcb,0xe9,0x88,0x00,0xc8,0xd8,0x80}}; /* RX */ static struct arcofi_msg ARCOFI_COP_8 = {&ARCOFI_COP_9,0,10,{0xa1,0x28,0x49,0x31,0x8,0x13,0x6e,0x88,0x2a,0x61}}; /* TX */ static struct arcofi_msg ARCOFI_COP_7 = {&ARCOFI_COP_8,0,4,{0xa1,0x27,0x80,0x80,0,0,0,0,0,0}}; /* GZ */ static struct arcofi_msg ARCOFI_COP_6 = {&ARCOFI_COP_7,0,6,{0xa1,0x26,0,0,0x82,0x7c,0,0,0,0}}; /* GRL GRH */ static struct arcofi_msg ARCOFI_COP_5 = {&ARCOFI_COP_6,0,4,{0xa1,0x25,0xbb,0x4a,0,0,0,0,0,0}}; /* GTX */ static struct arcofi_msg ARCOFI_VERSION = {NULL,1,2,{0xa0,0,0,0,0,0,0,0,0,0}}; static struct arcofi_msg ARCOFI_XOP_0 = {NULL,0,2,{0xa1,0x30,0,0,0,0,0,0,0,0}}; /* PWR Down */ static void set_arcofi(struct IsdnCardState *cs, int bc); #include "elsa_ser.c" #endif /* ARCOFI_USE */ static inline u_char readreg(unsigned int ale, unsigned int adr, u_char off) { register u_char ret; byteout(ale, off); ret = bytein(adr); return (ret); } static inline void readfifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size) { byteout(ale, off); insb(adr, data, size); } static inline void writereg(unsigned int ale, unsigned int adr, u_char off, u_char data) { byteout(ale, off); byteout(adr, data); } static inline void writefifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size) { byteout(ale, off); outsb(adr, data, size); } /* Interface functions */ static u_char ReadISAC(struct IsdnCardState *cs, u_char offset) { return (readreg(cs->hw.elsa.ale, cs->hw.elsa.isac, offset)); } static void WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value) { writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, offset, value); } static void ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size) { readfifo(cs->hw.elsa.ale, cs->hw.elsa.isac, 0, data, size); } static void WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size) { writefifo(cs->hw.elsa.ale, cs->hw.elsa.isac, 0, data, size); } static u_char ReadISAC_IPAC(struct IsdnCardState *cs, u_char offset) { return (readreg(cs->hw.elsa.ale, cs->hw.elsa.isac, offset + 0x80)); } static void WriteISAC_IPAC(struct IsdnCardState *cs, u_char offset, u_char value) { writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, offset | 0x80, value); } static void ReadISACfifo_IPAC(struct IsdnCardState *cs, u_char *data, int size) { readfifo(cs->hw.elsa.ale, cs->hw.elsa.isac, 0x80, data, size); } static void WriteISACfifo_IPAC(struct IsdnCardState *cs, u_char *data, int size) { writefifo(cs->hw.elsa.ale, cs->hw.elsa.isac, 0x80, data, size); } static u_char ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset) { return (readreg(cs->hw.elsa.ale, cs->hw.elsa.hscx, offset + (hscx ? 0x40 : 0))); } static void WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value) { writereg(cs->hw.elsa.ale, cs->hw.elsa.hscx, offset + (hscx ? 0x40 : 0), value); } static inline u_char readitac(struct IsdnCardState *cs, u_char off) { register u_char ret; byteout(cs->hw.elsa.ale, off); ret = bytein(cs->hw.elsa.itac); return (ret); } static inline void writeitac(struct IsdnCardState *cs, u_char off, u_char data) { byteout(cs->hw.elsa.ale, off); byteout(cs->hw.elsa.itac, data); } static inline int TimerRun(struct IsdnCardState *cs) { register u_char v; v = bytein(cs->hw.elsa.cfg); if ((cs->subtyp == ELSA_QS1000) || (cs->subtyp == ELSA_QS3000)) return (0 == (v & ELIRQF_TIMER_RUN)); else if (cs->subtyp == ELSA_PCC8) return (v & ELIRQF_TIMER_RUN_PCC8); return (v & ELIRQF_TIMER_RUN); } /* * fast interrupt HSCX stuff goes here */ #define READHSCX(cs, nr, reg) readreg(cs->hw.elsa.ale, \ cs->hw.elsa.hscx, reg + (nr ? 0x40 : 0)) #define WRITEHSCX(cs, nr, reg, data) writereg(cs->hw.elsa.ale, \ cs->hw.elsa.hscx, reg + (nr ? 0x40 : 0), data) #define READHSCXFIFO(cs, nr, ptr, cnt) readfifo(cs->hw.elsa.ale, \ cs->hw.elsa.hscx, (nr ? 0x40 : 0), ptr, cnt) #define WRITEHSCXFIFO(cs, nr, ptr, cnt) writefifo(cs->hw.elsa.ale, \ cs->hw.elsa.hscx, (nr ? 0x40 : 0), ptr, cnt) #include "hscx_irq.c" static irqreturn_t elsa_interrupt(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; u_long flags; u_char val; int icnt = 5; if ((cs->typ == ISDN_CTYPE_ELSA_PCMCIA) && (*cs->busy_flag == 1)) { /* The card tends to generate interrupts while being removed causing us to just crash the kernel. bad. */ printk(KERN_WARNING "Elsa: card not available!\n"); return IRQ_NONE; } spin_lock_irqsave(&cs->lock, flags); #if ARCOFI_USE if (cs->hw.elsa.MFlag) { val = serial_inp(cs, UART_IIR); if (!(val & UART_IIR_NO_INT)) { debugl1(cs, "IIR %02x", val); rs_interrupt_elsa(cs); } } #endif val = readreg(cs->hw.elsa.ale, cs->hw.elsa.hscx, HSCX_ISTA + 0x40); Start_HSCX: if (val) { hscx_int_main(cs, val); } val = readreg(cs->hw.elsa.ale, cs->hw.elsa.isac, ISAC_ISTA); Start_ISAC: if (val) { isac_interrupt(cs, val); } val = readreg(cs->hw.elsa.ale, cs->hw.elsa.hscx, HSCX_ISTA + 0x40); if (val && icnt) { if (cs->debug & L1_DEB_HSCX) debugl1(cs, "HSCX IntStat after IntRoutine"); icnt--; goto Start_HSCX; } val = readreg(cs->hw.elsa.ale, cs->hw.elsa.isac, ISAC_ISTA); if (val && icnt) { if (cs->debug & L1_DEB_ISAC) debugl1(cs, "ISAC IntStat after IntRoutine"); icnt--; goto Start_ISAC; } if (!icnt) printk(KERN_WARNING"ELSA IRQ LOOP\n"); writereg(cs->hw.elsa.ale, cs->hw.elsa.hscx, HSCX_MASK, 0xFF); writereg(cs->hw.elsa.ale, cs->hw.elsa.hscx, HSCX_MASK + 0x40, 0xFF); writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, ISAC_MASK, 0xFF); if (cs->hw.elsa.status & ELIRQF_TIMER_AKTIV) { if (!TimerRun(cs)) { /* Timer Restart */ byteout(cs->hw.elsa.timer, 0); cs->hw.elsa.counter++; } } #if ARCOFI_USE if (cs->hw.elsa.MFlag) { val = serial_inp(cs, UART_MCR); val ^= 0x8; serial_outp(cs, UART_MCR, val); val = serial_inp(cs, UART_MCR); val ^= 0x8; serial_outp(cs, UART_MCR, val); } #endif if (cs->hw.elsa.trig) byteout(cs->hw.elsa.trig, 0x00); writereg(cs->hw.elsa.ale, cs->hw.elsa.hscx, HSCX_MASK, 0x0); writereg(cs->hw.elsa.ale, cs->hw.elsa.hscx, HSCX_MASK + 0x40, 0x0); writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, ISAC_MASK, 0x0); spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static irqreturn_t elsa_interrupt_ipac(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; u_long flags; u_char ista, val; int icnt = 5; spin_lock_irqsave(&cs->lock, flags); if (cs->subtyp == ELSA_QS1000PCI || cs->subtyp == ELSA_QS3000PCI) { val = bytein(cs->hw.elsa.cfg + 0x4c); /* PCI IRQ */ if (!(val & ELSA_PCI_IRQ_MASK)) { spin_unlock_irqrestore(&cs->lock, flags); return IRQ_NONE; } } #if ARCOFI_USE if (cs->hw.elsa.MFlag) { val = serial_inp(cs, UART_IIR); if (!(val & UART_IIR_NO_INT)) { debugl1(cs, "IIR %02x", val); rs_interrupt_elsa(cs); } } #endif ista = readreg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ISTA); Start_IPAC: if (cs->debug & L1_DEB_IPAC) debugl1(cs, "IPAC ISTA %02X", ista); if (ista & 0x0f) { val = readreg(cs->hw.elsa.ale, cs->hw.elsa.hscx, HSCX_ISTA + 0x40); if (ista & 0x01) val |= 0x01; if (ista & 0x04) val |= 0x02; if (ista & 0x08) val |= 0x04; if (val) hscx_int_main(cs, val); } if (ista & 0x20) { val = 0xfe & readreg(cs->hw.elsa.ale, cs->hw.elsa.isac, ISAC_ISTA + 0x80); if (val) { isac_interrupt(cs, val); } } if (ista & 0x10) { val = 0x01; isac_interrupt(cs, val); } ista = readreg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ISTA); if ((ista & 0x3f) && icnt) { icnt--; goto Start_IPAC; } if (!icnt) printk(KERN_WARNING "ELSA IRQ LOOP\n"); writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_MASK, 0xFF); writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_MASK, 0xC0); spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static void release_io_elsa(struct IsdnCardState *cs) { int bytecnt = 8; del_timer(&cs->hw.elsa.tl); #if ARCOFI_USE clear_arcofi(cs); #endif if (cs->hw.elsa.ctrl) byteout(cs->hw.elsa.ctrl, 0); /* LEDs Out */ if (cs->subtyp == ELSA_QS1000PCI) { byteout(cs->hw.elsa.cfg + 0x4c, 0x01); /* disable IRQ */ writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ATX, 0xff); bytecnt = 2; release_region(cs->hw.elsa.cfg, 0x80); } if (cs->subtyp == ELSA_QS3000PCI) { byteout(cs->hw.elsa.cfg + 0x4c, 0x03); /* disable ELSA PCI IRQ */ writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ATX, 0xff); release_region(cs->hw.elsa.cfg, 0x80); } if (cs->subtyp == ELSA_PCMCIA_IPAC) { writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ATX, 0xff); } if ((cs->subtyp == ELSA_PCFPRO) || (cs->subtyp == ELSA_QS3000) || (cs->subtyp == ELSA_PCF) || (cs->subtyp == ELSA_QS3000PCI)) { bytecnt = 16; #if ARCOFI_USE release_modem(cs); #endif } if (cs->hw.elsa.base) release_region(cs->hw.elsa.base, bytecnt); } static void reset_elsa(struct IsdnCardState *cs) { if (cs->hw.elsa.timer) { /* Wait 1 Timer */ byteout(cs->hw.elsa.timer, 0); while (TimerRun(cs)); cs->hw.elsa.ctrl_reg |= 0x50; cs->hw.elsa.ctrl_reg &= ~ELSA_ISDN_RESET; /* Reset On */ byteout(cs->hw.elsa.ctrl, cs->hw.elsa.ctrl_reg); /* Wait 1 Timer */ byteout(cs->hw.elsa.timer, 0); while (TimerRun(cs)); cs->hw.elsa.ctrl_reg |= ELSA_ISDN_RESET; /* Reset Off */ byteout(cs->hw.elsa.ctrl, cs->hw.elsa.ctrl_reg); /* Wait 1 Timer */ byteout(cs->hw.elsa.timer, 0); while (TimerRun(cs)); if (cs->hw.elsa.trig) byteout(cs->hw.elsa.trig, 0xff); } if ((cs->subtyp == ELSA_QS1000PCI) || (cs->subtyp == ELSA_QS3000PCI) || (cs->subtyp == ELSA_PCMCIA_IPAC)) { writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_POTA2, 0x20); mdelay(10); writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_POTA2, 0x00); writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_MASK, 0xc0); mdelay(10); if (cs->subtyp != ELSA_PCMCIA_IPAC) { writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ACFG, 0x0); writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_AOE, 0x3c); } else { writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_PCFG, 0x10); writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ACFG, 0x4); writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_AOE, 0xf8); } writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ATX, 0xff); if (cs->subtyp == ELSA_QS1000PCI) byteout(cs->hw.elsa.cfg + 0x4c, 0x41); /* enable ELSA PCI IRQ */ else if (cs->subtyp == ELSA_QS3000PCI) byteout(cs->hw.elsa.cfg + 0x4c, 0x43); /* enable ELSA PCI IRQ */ } } #if ARCOFI_USE static void set_arcofi(struct IsdnCardState *cs, int bc) { cs->dc.isac.arcofi_bc = bc; arcofi_fsm(cs, ARCOFI_START, &ARCOFI_COP_5); wait_event_interruptible(cs->dc.isac.arcofi_wait, cs->dc.isac.arcofi_state == ARCOFI_NOP); } static int check_arcofi(struct IsdnCardState *cs) { int arcofi_present = 0; char tmp[40]; char *t; u_char *p; if (!cs->dc.isac.mon_tx) if (!(cs->dc.isac.mon_tx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC))) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "ISAC MON TX out of buffers!"); return (0); } cs->dc.isac.arcofi_bc = 0; arcofi_fsm(cs, ARCOFI_START, &ARCOFI_VERSION); wait_event_interruptible(cs->dc.isac.arcofi_wait, cs->dc.isac.arcofi_state == ARCOFI_NOP); if (!test_and_clear_bit(FLG_ARCOFI_ERROR, &cs->HW_Flags)) { debugl1(cs, "Arcofi response received %d bytes", cs->dc.isac.mon_rxp); p = cs->dc.isac.mon_rx; t = tmp; t += sprintf(tmp, "Arcofi data"); QuickHex(t, p, cs->dc.isac.mon_rxp); debugl1(cs, "%s", tmp); if ((cs->dc.isac.mon_rxp == 2) && (cs->dc.isac.mon_rx[0] == 0xa0)) { switch (cs->dc.isac.mon_rx[1]) { case 0x80: debugl1(cs, "Arcofi 2160 detected"); arcofi_present = 1; break; case 0x82: debugl1(cs, "Arcofi 2165 detected"); arcofi_present = 2; break; case 0x84: debugl1(cs, "Arcofi 2163 detected"); arcofi_present = 3; break; default: debugl1(cs, "unknown Arcofi response"); break; } } else debugl1(cs, "undefined Monitor response"); cs->dc.isac.mon_rxp = 0; } else if (cs->dc.isac.mon_tx) { debugl1(cs, "Arcofi not detected"); } if (arcofi_present) { if (cs->subtyp == ELSA_QS1000) { cs->subtyp = ELSA_QS3000; printk(KERN_INFO "Elsa: %s detected modem at 0x%lx\n", Elsa_Types[cs->subtyp], cs->hw.elsa.base + 8); release_region(cs->hw.elsa.base, 8); if (!request_region(cs->hw.elsa.base, 16, "elsa isdn modem")) { printk(KERN_WARNING "HiSax: %s config port %lx-%lx already in use\n", Elsa_Types[cs->subtyp], cs->hw.elsa.base + 8, cs->hw.elsa.base + 16); } } else if (cs->subtyp == ELSA_PCC16) { cs->subtyp = ELSA_PCF; printk(KERN_INFO "Elsa: %s detected modem at 0x%lx\n", Elsa_Types[cs->subtyp], cs->hw.elsa.base + 8); release_region(cs->hw.elsa.base, 8); if (!request_region(cs->hw.elsa.base, 16, "elsa isdn modem")) { printk(KERN_WARNING "HiSax: %s config port %lx-%lx already in use\n", Elsa_Types[cs->subtyp], cs->hw.elsa.base + 8, cs->hw.elsa.base + 16); } } else printk(KERN_INFO "Elsa: %s detected modem at 0x%lx\n", Elsa_Types[cs->subtyp], cs->hw.elsa.base + 8); arcofi_fsm(cs, ARCOFI_START, &ARCOFI_XOP_0); wait_event_interruptible(cs->dc.isac.arcofi_wait, cs->dc.isac.arcofi_state == ARCOFI_NOP); return (1); } return (0); } #endif /* ARCOFI_USE */ static void elsa_led_handler(struct IsdnCardState *cs) { int blink = 0; if (cs->subtyp == ELSA_PCMCIA || cs->subtyp == ELSA_PCMCIA_IPAC) return; del_timer(&cs->hw.elsa.tl); if (cs->hw.elsa.status & ELSA_ASSIGN) cs->hw.elsa.ctrl_reg |= ELSA_STAT_LED; else if (cs->hw.elsa.status & ELSA_BAD_PWR) cs->hw.elsa.ctrl_reg &= ~ELSA_STAT_LED; else { cs->hw.elsa.ctrl_reg ^= ELSA_STAT_LED; blink = 250; } if (cs->hw.elsa.status & 0xf000) cs->hw.elsa.ctrl_reg |= ELSA_LINE_LED; else if (cs->hw.elsa.status & 0x0f00) { cs->hw.elsa.ctrl_reg ^= ELSA_LINE_LED; blink = 500; } else cs->hw.elsa.ctrl_reg &= ~ELSA_LINE_LED; if ((cs->subtyp == ELSA_QS1000PCI) || (cs->subtyp == ELSA_QS3000PCI)) { u_char led = 0xff; if (cs->hw.elsa.ctrl_reg & ELSA_LINE_LED) led ^= ELSA_IPAC_LINE_LED; if (cs->hw.elsa.ctrl_reg & ELSA_STAT_LED) led ^= ELSA_IPAC_STAT_LED; writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ATX, led); } else byteout(cs->hw.elsa.ctrl, cs->hw.elsa.ctrl_reg); if (blink) { init_timer(&cs->hw.elsa.tl); cs->hw.elsa.tl.expires = jiffies + ((blink * HZ) / 1000); add_timer(&cs->hw.elsa.tl); } } static int Elsa_card_msg(struct IsdnCardState *cs, int mt, void *arg) { int ret = 0; u_long flags; switch (mt) { case CARD_RESET: spin_lock_irqsave(&cs->lock, flags); reset_elsa(cs); spin_unlock_irqrestore(&cs->lock, flags); return (0); case CARD_RELEASE: release_io_elsa(cs); return (0); case CARD_INIT: spin_lock_irqsave(&cs->lock, flags); cs->debug |= L1_DEB_IPAC; reset_elsa(cs); inithscxisac(cs, 1); if ((cs->subtyp == ELSA_QS1000) || (cs->subtyp == ELSA_QS3000)) { byteout(cs->hw.elsa.timer, 0); } if (cs->hw.elsa.trig) byteout(cs->hw.elsa.trig, 0xff); inithscxisac(cs, 2); spin_unlock_irqrestore(&cs->lock, flags); return (0); case CARD_TEST: if ((cs->subtyp == ELSA_PCMCIA) || (cs->subtyp == ELSA_PCMCIA_IPAC) || (cs->subtyp == ELSA_QS1000PCI)) { return (0); } else if (cs->subtyp == ELSA_QS3000PCI) { ret = 0; } else { spin_lock_irqsave(&cs->lock, flags); cs->hw.elsa.counter = 0; cs->hw.elsa.ctrl_reg |= ELSA_ENA_TIMER_INT; cs->hw.elsa.status |= ELIRQF_TIMER_AKTIV; byteout(cs->hw.elsa.ctrl, cs->hw.elsa.ctrl_reg); byteout(cs->hw.elsa.timer, 0); spin_unlock_irqrestore(&cs->lock, flags); msleep(110); spin_lock_irqsave(&cs->lock, flags); cs->hw.elsa.ctrl_reg &= ~ELSA_ENA_TIMER_INT; byteout(cs->hw.elsa.ctrl, cs->hw.elsa.ctrl_reg); cs->hw.elsa.status &= ~ELIRQF_TIMER_AKTIV; spin_unlock_irqrestore(&cs->lock, flags); printk(KERN_INFO "Elsa: %d timer tics in 110 msek\n", cs->hw.elsa.counter); if ((cs->hw.elsa.counter > 10) && (cs->hw.elsa.counter < 16)) { printk(KERN_INFO "Elsa: timer and irq OK\n"); ret = 0; } else { printk(KERN_WARNING "Elsa: timer tic problem (%d/12) maybe an IRQ(%d) conflict\n", cs->hw.elsa.counter, cs->irq); ret = 1; } } #if ARCOFI_USE if (check_arcofi(cs)) { init_modem(cs); } #endif elsa_led_handler(cs); return (ret); case (MDL_REMOVE | REQUEST): cs->hw.elsa.status &= 0; break; case (MDL_ASSIGN | REQUEST): cs->hw.elsa.status |= ELSA_ASSIGN; break; case MDL_INFO_SETUP: if ((long) arg) cs->hw.elsa.status |= 0x0200; else cs->hw.elsa.status |= 0x0100; break; case MDL_INFO_CONN: if ((long) arg) cs->hw.elsa.status |= 0x2000; else cs->hw.elsa.status |= 0x1000; break; case MDL_INFO_REL: if ((long) arg) { cs->hw.elsa.status &= ~0x2000; cs->hw.elsa.status &= ~0x0200; } else { cs->hw.elsa.status &= ~0x1000; cs->hw.elsa.status &= ~0x0100; } break; #if ARCOFI_USE case CARD_AUX_IND: if (cs->hw.elsa.MFlag) { int len; u_char *msg; if (!arg) return (0); msg = arg; len = *msg; msg++; modem_write_cmd(cs, msg, len); } break; #endif } if (cs->typ == ISDN_CTYPE_ELSA) { int pwr = bytein(cs->hw.elsa.ale); if (pwr & 0x08) cs->hw.elsa.status |= ELSA_BAD_PWR; else cs->hw.elsa.status &= ~ELSA_BAD_PWR; } elsa_led_handler(cs); return (ret); } static unsigned char probe_elsa_adr(unsigned int adr, int typ) { int i, in1, in2, p16_1 = 0, p16_2 = 0, p8_1 = 0, p8_2 = 0, pc_1 = 0, pc_2 = 0, pfp_1 = 0, pfp_2 = 0; /* In case of the elsa pcmcia card, this region is in use, reserved for us by the card manager. So we do not check it here, it would fail. */ if (typ != ISDN_CTYPE_ELSA_PCMCIA) { if (request_region(adr, 8, "elsa card")) { release_region(adr, 8); } else { printk(KERN_WARNING "Elsa: Probing Port 0x%x: already in use\n", adr); return (0); } } for (i = 0; i < 16; i++) { in1 = inb(adr + ELSA_CONFIG); /* 'toggelt' bei */ in2 = inb(adr + ELSA_CONFIG); /* jedem Zugriff */ p16_1 += 0x04 & in1; p16_2 += 0x04 & in2; p8_1 += 0x02 & in1; p8_2 += 0x02 & in2; pc_1 += 0x01 & in1; pc_2 += 0x01 & in2; pfp_1 += 0x40 & in1; pfp_2 += 0x40 & in2; } printk(KERN_INFO "Elsa: Probing IO 0x%x", adr); if (65 == ++p16_1 * ++p16_2) { printk(" PCC-16/PCF found\n"); return (ELSA_PCC16); } else if (1025 == ++pfp_1 * ++pfp_2) { printk(" PCF-Pro found\n"); return (ELSA_PCFPRO); } else if (33 == ++p8_1 * ++p8_2) { printk(" PCC8 found\n"); return (ELSA_PCC8); } else if (17 == ++pc_1 * ++pc_2) { printk(" PC found\n"); return (ELSA_PC); } else { printk(" failed\n"); return (0); } } static unsigned int probe_elsa(struct IsdnCardState *cs) { int i; unsigned int CARD_portlist[] = {0x160, 0x170, 0x260, 0x360, 0}; for (i = 0; CARD_portlist[i]; i++) { if ((cs->subtyp = probe_elsa_adr(CARD_portlist[i], cs->typ))) break; } return (CARD_portlist[i]); } static int setup_elsa_isa(struct IsdnCard *card) { struct IsdnCardState *cs = card->cs; u_char val; cs->hw.elsa.base = card->para[0]; printk(KERN_INFO "Elsa: Microlink IO probing\n"); if (cs->hw.elsa.base) { if (!(cs->subtyp = probe_elsa_adr(cs->hw.elsa.base, cs->typ))) { printk(KERN_WARNING "Elsa: no Elsa Microlink at %#lx\n", cs->hw.elsa.base); return (0); } } else cs->hw.elsa.base = probe_elsa(cs); if (!cs->hw.elsa.base) { printk(KERN_WARNING "No Elsa Microlink found\n"); return (0); } cs->hw.elsa.cfg = cs->hw.elsa.base + ELSA_CONFIG; cs->hw.elsa.ctrl = cs->hw.elsa.base + ELSA_CONTROL; cs->hw.elsa.ale = cs->hw.elsa.base + ELSA_ALE; cs->hw.elsa.isac = cs->hw.elsa.base + ELSA_ISAC; cs->hw.elsa.itac = cs->hw.elsa.base + ELSA_ITAC; cs->hw.elsa.hscx = cs->hw.elsa.base + ELSA_HSCX; cs->hw.elsa.trig = cs->hw.elsa.base + ELSA_TRIG_IRQ; cs->hw.elsa.timer = cs->hw.elsa.base + ELSA_START_TIMER; val = bytein(cs->hw.elsa.cfg); if (cs->subtyp == ELSA_PC) { const u_char CARD_IrqTab[8] = {7, 3, 5, 9, 0, 0, 0, 0}; cs->irq = CARD_IrqTab[(val & ELSA_IRQ_IDX_PC) >> 2]; } else if (cs->subtyp == ELSA_PCC8) { const u_char CARD_IrqTab[8] = {7, 3, 5, 9, 0, 0, 0, 0}; cs->irq = CARD_IrqTab[(val & ELSA_IRQ_IDX_PCC8) >> 4]; } else { const u_char CARD_IrqTab[8] = {15, 10, 15, 3, 11, 5, 11, 9}; cs->irq = CARD_IrqTab[(val & ELSA_IRQ_IDX) >> 3]; } val = bytein(cs->hw.elsa.ale) & ELSA_HW_RELEASE; if (val < 3) val |= 8; val += 'A' - 3; if (val == 'B' || val == 'C') val ^= 1; if ((cs->subtyp == ELSA_PCFPRO) && (val == 'G')) val = 'C'; printk(KERN_INFO "Elsa: %s found at %#lx Rev.:%c IRQ %d\n", Elsa_Types[cs->subtyp], cs->hw.elsa.base, val, cs->irq); val = bytein(cs->hw.elsa.ale) & ELSA_S0_POWER_BAD; if (val) { printk(KERN_WARNING "Elsa: Microlink S0 bus power bad\n"); cs->hw.elsa.status |= ELSA_BAD_PWR; } return (1); } #ifdef __ISAPNP__ static struct isapnp_device_id elsa_ids[] = { { ISAPNP_VENDOR('E', 'L', 'S'), ISAPNP_FUNCTION(0x0133), ISAPNP_VENDOR('E', 'L', 'S'), ISAPNP_FUNCTION(0x0133), (unsigned long) "Elsa QS1000" }, { ISAPNP_VENDOR('E', 'L', 'S'), ISAPNP_FUNCTION(0x0134), ISAPNP_VENDOR('E', 'L', 'S'), ISAPNP_FUNCTION(0x0134), (unsigned long) "Elsa QS3000" }, { 0, } }; static struct isapnp_device_id *ipid = &elsa_ids[0]; static struct pnp_card *pnp_c = NULL; #endif /* __ISAPNP__ */ static int setup_elsa_isapnp(struct IsdnCard *card) { struct IsdnCardState *cs = card->cs; #ifdef __ISAPNP__ if (!card->para[1] && isapnp_present()) { struct pnp_dev *pnp_d; while (ipid->card_vendor) { if ((pnp_c = pnp_find_card(ipid->card_vendor, ipid->card_device, pnp_c))) { pnp_d = NULL; if ((pnp_d = pnp_find_dev(pnp_c, ipid->vendor, ipid->function, pnp_d))) { int err; printk(KERN_INFO "HiSax: %s detected\n", (char *)ipid->driver_data); pnp_disable_dev(pnp_d); err = pnp_activate_dev(pnp_d); if (err < 0) { printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n", __func__, err); return (0); } card->para[1] = pnp_port_start(pnp_d, 0); card->para[0] = pnp_irq(pnp_d, 0); if (!card->para[0] || !card->para[1]) { printk(KERN_ERR "Elsa PnP:some resources are missing %ld/%lx\n", card->para[0], card->para[1]); pnp_disable_dev(pnp_d); return (0); } if (ipid->function == ISAPNP_FUNCTION(0x133)) cs->subtyp = ELSA_QS1000; else cs->subtyp = ELSA_QS3000; break; } else { printk(KERN_ERR "Elsa PnP: PnP error card found, no device\n"); return (0); } } ipid++; pnp_c = NULL; } if (!ipid->card_vendor) { printk(KERN_INFO "Elsa PnP: no ISAPnP card found\n"); return (0); } } #endif /* __ISAPNP__ */ if (card->para[1] && card->para[0]) { cs->hw.elsa.base = card->para[1]; cs->irq = card->para[0]; if (!cs->subtyp) cs->subtyp = ELSA_QS1000; } else { printk(KERN_ERR "Elsa PnP: no parameter\n"); } cs->hw.elsa.cfg = cs->hw.elsa.base + ELSA_CONFIG; cs->hw.elsa.ale = cs->hw.elsa.base + ELSA_ALE; cs->hw.elsa.isac = cs->hw.elsa.base + ELSA_ISAC; cs->hw.elsa.hscx = cs->hw.elsa.base + ELSA_HSCX; cs->hw.elsa.trig = cs->hw.elsa.base + ELSA_TRIG_IRQ; cs->hw.elsa.timer = cs->hw.elsa.base + ELSA_START_TIMER; cs->hw.elsa.ctrl = cs->hw.elsa.base + ELSA_CONTROL; printk(KERN_INFO "Elsa: %s defined at %#lx IRQ %d\n", Elsa_Types[cs->subtyp], cs->hw.elsa.base, cs->irq); return (1); } static void setup_elsa_pcmcia(struct IsdnCard *card) { struct IsdnCardState *cs = card->cs; u_char val; cs->hw.elsa.base = card->para[1]; cs->irq = card->para[0]; val = readreg(cs->hw.elsa.base + 0, cs->hw.elsa.base + 2, IPAC_ID); if ((val == 1) || (val == 2)) { /* IPAC version 1.1/1.2 */ cs->subtyp = ELSA_PCMCIA_IPAC; cs->hw.elsa.ale = cs->hw.elsa.base + 0; cs->hw.elsa.isac = cs->hw.elsa.base + 2; cs->hw.elsa.hscx = cs->hw.elsa.base + 2; test_and_set_bit(HW_IPAC, &cs->HW_Flags); } else { cs->subtyp = ELSA_PCMCIA; cs->hw.elsa.ale = cs->hw.elsa.base + ELSA_ALE_PCM; cs->hw.elsa.isac = cs->hw.elsa.base + ELSA_ISAC_PCM; cs->hw.elsa.hscx = cs->hw.elsa.base + ELSA_HSCX; } cs->hw.elsa.timer = 0; cs->hw.elsa.trig = 0; cs->hw.elsa.ctrl = 0; cs->irq_flags |= IRQF_SHARED; printk(KERN_INFO "Elsa: %s defined at %#lx IRQ %d\n", Elsa_Types[cs->subtyp], cs->hw.elsa.base, cs->irq); } #ifdef CONFIG_PCI static struct pci_dev *dev_qs1000 = NULL; static struct pci_dev *dev_qs3000 = NULL; static int setup_elsa_pci(struct IsdnCard *card) { struct IsdnCardState *cs = card->cs; cs->subtyp = 0; if ((dev_qs1000 = hisax_find_pci_device(PCI_VENDOR_ID_ELSA, PCI_DEVICE_ID_ELSA_MICROLINK, dev_qs1000))) { if (pci_enable_device(dev_qs1000)) return (0); cs->subtyp = ELSA_QS1000PCI; cs->irq = dev_qs1000->irq; cs->hw.elsa.cfg = pci_resource_start(dev_qs1000, 1); cs->hw.elsa.base = pci_resource_start(dev_qs1000, 3); } else if ((dev_qs3000 = hisax_find_pci_device(PCI_VENDOR_ID_ELSA, PCI_DEVICE_ID_ELSA_QS3000, dev_qs3000))) { if (pci_enable_device(dev_qs3000)) return (0); cs->subtyp = ELSA_QS3000PCI; cs->irq = dev_qs3000->irq; cs->hw.elsa.cfg = pci_resource_start(dev_qs3000, 1); cs->hw.elsa.base = pci_resource_start(dev_qs3000, 3); } else { printk(KERN_WARNING "Elsa: No PCI card found\n"); return (0); } if (!cs->irq) { printk(KERN_WARNING "Elsa: No IRQ for PCI card found\n"); return (0); } if (!(cs->hw.elsa.base && cs->hw.elsa.cfg)) { printk(KERN_WARNING "Elsa: No IO-Adr for PCI card found\n"); return (0); } if ((cs->hw.elsa.cfg & 0xff) || (cs->hw.elsa.base & 0xf)) { printk(KERN_WARNING "Elsa: You may have a wrong PCI bios\n"); printk(KERN_WARNING "Elsa: If your system hangs now, read\n"); printk(KERN_WARNING "Elsa: Documentation/isdn/README.HiSax\n"); } cs->hw.elsa.ale = cs->hw.elsa.base; cs->hw.elsa.isac = cs->hw.elsa.base + 1; cs->hw.elsa.hscx = cs->hw.elsa.base + 1; test_and_set_bit(HW_IPAC, &cs->HW_Flags); cs->hw.elsa.timer = 0; cs->hw.elsa.trig = 0; cs->irq_flags |= IRQF_SHARED; printk(KERN_INFO "Elsa: %s defined at %#lx/0x%x IRQ %d\n", Elsa_Types[cs->subtyp], cs->hw.elsa.base, cs->hw.elsa.cfg, cs->irq); return (1); } #else static int setup_elsa_pci(struct IsdnCard *card) { return (1); } #endif /* CONFIG_PCI */ static int setup_elsa_common(struct IsdnCard *card) { struct IsdnCardState *cs = card->cs; u_char val; int bytecnt; switch (cs->subtyp) { case ELSA_PC: case ELSA_PCC8: case ELSA_PCC16: case ELSA_QS1000: case ELSA_PCMCIA: case ELSA_PCMCIA_IPAC: bytecnt = 8; break; case ELSA_PCFPRO: case ELSA_PCF: case ELSA_QS3000: case ELSA_QS3000PCI: bytecnt = 16; break; case ELSA_QS1000PCI: bytecnt = 2; break; default: printk(KERN_WARNING "Unknown ELSA subtype %d\n", cs->subtyp); return (0); } /* In case of the elsa pcmcia card, this region is in use, reserved for us by the card manager. So we do not check it here, it would fail. */ if (cs->typ != ISDN_CTYPE_ELSA_PCMCIA && !request_region(cs->hw.elsa.base, bytecnt, "elsa isdn")) { printk(KERN_WARNING "HiSax: ELSA config port %#lx-%#lx already in use\n", cs->hw.elsa.base, cs->hw.elsa.base + bytecnt); return (0); } if ((cs->subtyp == ELSA_QS1000PCI) || (cs->subtyp == ELSA_QS3000PCI)) { if (!request_region(cs->hw.elsa.cfg, 0x80, "elsa isdn pci")) { printk(KERN_WARNING "HiSax: ELSA pci port %x-%x already in use\n", cs->hw.elsa.cfg, cs->hw.elsa.cfg + 0x80); release_region(cs->hw.elsa.base, bytecnt); return (0); } } #if ARCOFI_USE init_arcofi(cs); #endif setup_isac(cs); cs->hw.elsa.tl.function = (void *) elsa_led_handler; cs->hw.elsa.tl.data = (long) cs; init_timer(&cs->hw.elsa.tl); /* Teste Timer */ if (cs->hw.elsa.timer) { byteout(cs->hw.elsa.trig, 0xff); byteout(cs->hw.elsa.timer, 0); if (!TimerRun(cs)) { byteout(cs->hw.elsa.timer, 0); /* 2. Versuch */ if (!TimerRun(cs)) { printk(KERN_WARNING "Elsa: timer do not start\n"); release_io_elsa(cs); return (0); } } HZDELAY((HZ / 100) + 1); /* wait >=10 ms */ if (TimerRun(cs)) { printk(KERN_WARNING "Elsa: timer do not run down\n"); release_io_elsa(cs); return (0); } printk(KERN_INFO "Elsa: timer OK; resetting card\n"); } cs->BC_Read_Reg = &ReadHSCX; cs->BC_Write_Reg = &WriteHSCX; cs->BC_Send_Data = &hscx_fill_fifo; cs->cardmsg = &Elsa_card_msg; if ((cs->subtyp == ELSA_QS1000PCI) || (cs->subtyp == ELSA_QS3000PCI) || (cs->subtyp == ELSA_PCMCIA_IPAC)) { cs->readisac = &ReadISAC_IPAC; cs->writeisac = &WriteISAC_IPAC; cs->readisacfifo = &ReadISACfifo_IPAC; cs->writeisacfifo = &WriteISACfifo_IPAC; cs->irq_func = &elsa_interrupt_ipac; val = readreg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ID); printk(KERN_INFO "Elsa: IPAC version %x\n", val); } else { cs->readisac = &ReadISAC; cs->writeisac = &WriteISAC; cs->readisacfifo = &ReadISACfifo; cs->writeisacfifo = &WriteISACfifo; cs->irq_func = &elsa_interrupt; ISACVersion(cs, "Elsa:"); if (HscxVersion(cs, "Elsa:")) { printk(KERN_WARNING "Elsa: wrong HSCX versions check IO address\n"); release_io_elsa(cs); return (0); } } if (cs->subtyp == ELSA_PC) { val = readitac(cs, ITAC_SYS); printk(KERN_INFO "Elsa: ITAC version %s\n", ITACVer[val & 7]); writeitac(cs, ITAC_ISEN, 0); writeitac(cs, ITAC_RFIE, 0); writeitac(cs, ITAC_XFIE, 0); writeitac(cs, ITAC_SCIE, 0); writeitac(cs, ITAC_STIE, 0); } return (1); } int setup_elsa(struct IsdnCard *card) { int rc; struct IsdnCardState *cs = card->cs; char tmp[64]; strcpy(tmp, Elsa_revision); printk(KERN_INFO "HiSax: Elsa driver Rev. %s\n", HiSax_getrev(tmp)); cs->hw.elsa.ctrl_reg = 0; cs->hw.elsa.status = 0; cs->hw.elsa.MFlag = 0; cs->subtyp = 0; if (cs->typ == ISDN_CTYPE_ELSA) { rc = setup_elsa_isa(card); if (!rc) return (0); } else if (cs->typ == ISDN_CTYPE_ELSA_PNP) { rc = setup_elsa_isapnp(card); if (!rc) return (0); } else if (cs->typ == ISDN_CTYPE_ELSA_PCMCIA) setup_elsa_pcmcia(card); else if (cs->typ == ISDN_CTYPE_ELSA_PCI) { rc = setup_elsa_pci(card); if (!rc) return (0); } else return (0); return setup_elsa_common(card); }
gpl-2.0
dmitry-pervushin/linux-netfilter
fs/ioprio.c
2074
5074
/* * fs/ioprio.c * * Copyright (C) 2004 Jens Axboe <axboe@kernel.dk> * * Helper functions for setting/querying io priorities of processes. The * system calls closely mimmick getpriority/setpriority, see the man page for * those. The prio argument is a composite of prio class and prio data, where * the data argument has meaning within that class. The standard scheduling * classes have 8 distinct prio levels, with 0 being the highest prio and 7 * being the lowest. * * IOW, setting BE scheduling class with prio 2 is done ala: * * unsigned int prio = (IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT) | 2; * * ioprio_set(PRIO_PROCESS, pid, prio); * * See also Documentation/block/ioprio.txt * */ #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/ioprio.h> #include <linux/blkdev.h> #include <linux/capability.h> #include <linux/syscalls.h> #include <linux/security.h> #include <linux/pid_namespace.h> int set_task_ioprio(struct task_struct *task, int ioprio) { int err; struct io_context *ioc; const struct cred *cred = current_cred(), *tcred; rcu_read_lock(); tcred = __task_cred(task); if (!uid_eq(tcred->uid, cred->euid) && !uid_eq(tcred->uid, cred->uid) && !capable(CAP_SYS_NICE)) { rcu_read_unlock(); return -EPERM; } rcu_read_unlock(); err = security_task_setioprio(task, ioprio); if (err) return err; ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); if (ioc) { ioc->ioprio = ioprio; put_io_context(ioc); } return err; } EXPORT_SYMBOL_GPL(set_task_ioprio); SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio) { int class = IOPRIO_PRIO_CLASS(ioprio); int data = IOPRIO_PRIO_DATA(ioprio); struct task_struct *p, *g; struct user_struct *user; struct pid *pgrp; kuid_t uid; int ret; switch (class) { case IOPRIO_CLASS_RT: if (!capable(CAP_SYS_ADMIN)) return -EPERM; /* fall through, rt has prio field too */ case IOPRIO_CLASS_BE: if (data >= IOPRIO_BE_NR || data < 0) return -EINVAL; break; case IOPRIO_CLASS_IDLE: break; case IOPRIO_CLASS_NONE: if (data) return -EINVAL; break; default: return -EINVAL; } ret = -ESRCH; rcu_read_lock(); switch (which) { case IOPRIO_WHO_PROCESS: if (!who) p = current; else p = find_task_by_vpid(who); if (p) ret = set_task_ioprio(p, ioprio); break; case IOPRIO_WHO_PGRP: if (!who) pgrp = task_pgrp(current); else pgrp = find_vpid(who); do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { ret = set_task_ioprio(p, ioprio); if (ret) break; } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); break; case IOPRIO_WHO_USER: uid = make_kuid(current_user_ns(), who); if (!uid_valid(uid)) break; if (!who) user = current_user(); else user = find_user(uid); if (!user) break; do_each_thread(g, p) { if (!uid_eq(task_uid(p), uid)) continue; ret = set_task_ioprio(p, ioprio); if (ret) goto free_uid; } while_each_thread(g, p); free_uid: if (who) free_uid(user); break; default: ret = -EINVAL; } rcu_read_unlock(); return ret; } static int get_task_ioprio(struct task_struct *p) { int ret; ret = security_task_getioprio(p); if (ret) goto out; ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM); if (p->io_context) ret = p->io_context->ioprio; out: return ret; } int ioprio_best(unsigned short aprio, unsigned short bprio) { unsigned short aclass = IOPRIO_PRIO_CLASS(aprio); unsigned short bclass = IOPRIO_PRIO_CLASS(bprio); if (aclass == IOPRIO_CLASS_NONE) aclass = IOPRIO_CLASS_BE; if (bclass == IOPRIO_CLASS_NONE) bclass = IOPRIO_CLASS_BE; if (aclass == bclass) return min(aprio, bprio); if (aclass > bclass) return bprio; else return aprio; } SYSCALL_DEFINE2(ioprio_get, int, which, int, who) { struct task_struct *g, *p; struct user_struct *user; struct pid *pgrp; kuid_t uid; int ret = -ESRCH; int tmpio; rcu_read_lock(); switch (which) { case IOPRIO_WHO_PROCESS: if (!who) p = current; else p = find_task_by_vpid(who); if (p) ret = get_task_ioprio(p); break; case IOPRIO_WHO_PGRP: if (!who) pgrp = task_pgrp(current); else pgrp = find_vpid(who); do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { tmpio = get_task_ioprio(p); if (tmpio < 0) continue; if (ret == -ESRCH) ret = tmpio; else ret = ioprio_best(ret, tmpio); } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); break; case IOPRIO_WHO_USER: uid = make_kuid(current_user_ns(), who); if (!who) user = current_user(); else user = find_user(uid); if (!user) break; do_each_thread(g, p) { if (!uid_eq(task_uid(p), user->uid)) continue; tmpio = get_task_ioprio(p); if (tmpio < 0) continue; if (ret == -ESRCH) ret = tmpio; else ret = ioprio_best(ret, tmpio); } while_each_thread(g, p); if (who) free_uid(user); break; default: ret = -EINVAL; } rcu_read_unlock(); return ret; }
gpl-2.0
sk806/N5_Kernel
drivers/edac/amd64_edac_inj.c
2074
6590
#include "amd64_edac.h" static ssize_t amd64_inject_section_show(struct device *dev, struct device_attribute *mattr, char *buf) { struct mem_ctl_info *mci = to_mci(dev); struct amd64_pvt *pvt = mci->pvt_info; return sprintf(buf, "0x%x\n", pvt->injection.section); } /* * store error injection section value which refers to one of 4 16-byte sections * within a 64-byte cacheline * * range: 0..3 */ static ssize_t amd64_inject_section_store(struct device *dev, struct device_attribute *mattr, const char *data, size_t count) { struct mem_ctl_info *mci = to_mci(dev); struct amd64_pvt *pvt = mci->pvt_info; unsigned long value; int ret; ret = strict_strtoul(data, 10, &value); if (ret < 0) return ret; if (value > 3) { amd64_warn("%s: invalid section 0x%lx\n", __func__, value); return -EINVAL; } pvt->injection.section = (u32) value; return count; } static ssize_t amd64_inject_word_show(struct device *dev, struct device_attribute *mattr, char *buf) { struct mem_ctl_info *mci = to_mci(dev); struct amd64_pvt *pvt = mci->pvt_info; return sprintf(buf, "0x%x\n", pvt->injection.word); } /* * store error injection word value which refers to one of 9 16-bit word of the * 16-byte (128-bit + ECC bits) section * * range: 0..8 */ static ssize_t amd64_inject_word_store(struct device *dev, struct device_attribute *mattr, const char *data, size_t count) { struct mem_ctl_info *mci = to_mci(dev); struct amd64_pvt *pvt = mci->pvt_info; unsigned long value; int ret; ret = strict_strtoul(data, 10, &value); if (ret < 0) return ret; if (value > 8) { amd64_warn("%s: invalid word 0x%lx\n", __func__, value); return -EINVAL; } pvt->injection.word = (u32) value; return count; } static ssize_t amd64_inject_ecc_vector_show(struct device *dev, struct device_attribute *mattr, char *buf) { struct mem_ctl_info *mci = to_mci(dev); struct amd64_pvt *pvt = mci->pvt_info; return sprintf(buf, "0x%x\n", pvt->injection.bit_map); } /* * store 16 bit error injection vector which enables injecting errors to the * corresponding bit within the error injection word above. When used during a * DRAM ECC read, it holds the contents of the of the DRAM ECC bits. */ static ssize_t amd64_inject_ecc_vector_store(struct device *dev, struct device_attribute *mattr, const char *data, size_t count) { struct mem_ctl_info *mci = to_mci(dev); struct amd64_pvt *pvt = mci->pvt_info; unsigned long value; int ret; ret = strict_strtoul(data, 16, &value); if (ret < 0) return ret; if (value & 0xFFFF0000) { amd64_warn("%s: invalid EccVector: 0x%lx\n", __func__, value); return -EINVAL; } pvt->injection.bit_map = (u32) value; return count; } /* * Do a DRAM ECC read. Assemble staged values in the pvt area, format into * fields needed by the injection registers and read the NB Array Data Port. */ static ssize_t amd64_inject_read_store(struct device *dev, struct device_attribute *mattr, const char *data, size_t count) { struct mem_ctl_info *mci = to_mci(dev); struct amd64_pvt *pvt = mci->pvt_info; unsigned long value; u32 section, word_bits; int ret; ret = strict_strtoul(data, 10, &value); if (ret < 0) return ret; /* Form value to choose 16-byte section of cacheline */ section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section); amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section); word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection); /* Issue 'word' and 'bit' along with the READ request */ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits); edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits); return count; } /* * Do a DRAM ECC write. Assemble staged values in the pvt area and format into * fields needed by the injection registers. */ static ssize_t amd64_inject_write_store(struct device *dev, struct device_attribute *mattr, const char *data, size_t count) { struct mem_ctl_info *mci = to_mci(dev); struct amd64_pvt *pvt = mci->pvt_info; u32 section, word_bits, tmp; unsigned long value; int ret; ret = strict_strtoul(data, 10, &value); if (ret < 0) return ret; /* Form value to choose 16-byte section of cacheline */ section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section); amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section); word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection); pr_notice_once("Don't forget to decrease MCE polling interval in\n" "/sys/bus/machinecheck/devices/machinecheck<CPUNUM>/check_interval\n" "so that you can get the error report faster.\n"); on_each_cpu(disable_caches, NULL, 1); /* Issue 'word' and 'bit' along with the READ request */ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits); retry: /* wait until injection happens */ amd64_read_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, &tmp); if (tmp & F10_NB_ARR_ECC_WR_REQ) { cpu_relax(); goto retry; } on_each_cpu(enable_caches, NULL, 1); edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits); return count; } /* * update NUM_INJ_ATTRS in case you add new members */ static DEVICE_ATTR(inject_section, S_IRUGO | S_IWUSR, amd64_inject_section_show, amd64_inject_section_store); static DEVICE_ATTR(inject_word, S_IRUGO | S_IWUSR, amd64_inject_word_show, amd64_inject_word_store); static DEVICE_ATTR(inject_ecc_vector, S_IRUGO | S_IWUSR, amd64_inject_ecc_vector_show, amd64_inject_ecc_vector_store); static DEVICE_ATTR(inject_write, S_IWUSR, NULL, amd64_inject_write_store); static DEVICE_ATTR(inject_read, S_IWUSR, NULL, amd64_inject_read_store); int amd64_create_sysfs_inject_files(struct mem_ctl_info *mci) { int rc; rc = device_create_file(&mci->dev, &dev_attr_inject_section); if (rc < 0) return rc; rc = device_create_file(&mci->dev, &dev_attr_inject_word); if (rc < 0) return rc; rc = device_create_file(&mci->dev, &dev_attr_inject_ecc_vector); if (rc < 0) return rc; rc = device_create_file(&mci->dev, &dev_attr_inject_write); if (rc < 0) return rc; rc = device_create_file(&mci->dev, &dev_attr_inject_read); if (rc < 0) return rc; return 0; } void amd64_remove_sysfs_inject_files(struct mem_ctl_info *mci) { device_remove_file(&mci->dev, &dev_attr_inject_section); device_remove_file(&mci->dev, &dev_attr_inject_word); device_remove_file(&mci->dev, &dev_attr_inject_ecc_vector); device_remove_file(&mci->dev, &dev_attr_inject_write); device_remove_file(&mci->dev, &dev_attr_inject_read); }
gpl-2.0
gundal/zerofltetmo
drivers/net/wireless/airo.c
2074
222700
/*====================================================================== Aironet driver for 4500 and 4800 series cards This code is released under both the GPL version 2 and BSD licenses. Either license may be used. The respective licenses are found at the end of this file. This code was developed by Benjamin Reed <breed@users.sourceforge.net> including portions of which come from the Aironet PC4500 Developer's Reference Manual and used with permission. Copyright (C) 1999 Benjamin Reed. All Rights Reserved. Permission to use code in the Developer's manual was granted for this driver by Aironet. Major code contributions were received from Javier Achirica <achirica@users.sourceforge.net> and Jean Tourrilhes <jt@hpl.hp.com>. Code was also integrated from the Cisco Aironet driver for Linux. Support for MPI350 cards was added by Fabrice Bellet <fabrice@bellet.info>. ======================================================================*/ #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/sched.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/bitops.h> #include <linux/scatterlist.h> #include <linux/crypto.h> #include <asm/io.h> #include <asm/unaligned.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/ioport.h> #include <linux/pci.h> #include <asm/uaccess.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/ieee80211.h> #include <net/iw_handler.h> #include "airo.h" #define DRV_NAME "airo" #ifdef CONFIG_PCI static DEFINE_PCI_DEVICE_TABLE(card_ids) = { { 0x14b9, 1, PCI_ANY_ID, PCI_ANY_ID, }, { 0x14b9, 0x4500, PCI_ANY_ID, PCI_ANY_ID }, { 0x14b9, 0x4800, PCI_ANY_ID, PCI_ANY_ID, }, { 0x14b9, 0x0340, PCI_ANY_ID, PCI_ANY_ID, }, { 0x14b9, 0x0350, PCI_ANY_ID, PCI_ANY_ID, }, { 0x14b9, 0x5000, PCI_ANY_ID, PCI_ANY_ID, }, { 0x14b9, 0xa504, PCI_ANY_ID, PCI_ANY_ID, }, { 0, } }; MODULE_DEVICE_TABLE(pci, card_ids); static int airo_pci_probe(struct pci_dev *, const struct pci_device_id *); static void airo_pci_remove(struct pci_dev *); static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state); static int airo_pci_resume(struct pci_dev *pdev); static struct pci_driver airo_driver = { .name = DRV_NAME, .id_table = card_ids, .probe = airo_pci_probe, .remove = airo_pci_remove, .suspend = airo_pci_suspend, .resume = airo_pci_resume, }; #endif /* CONFIG_PCI */ /* Include Wireless Extension definition and check version - Jean II */ #include <linux/wireless.h> #define WIRELESS_SPY /* enable iwspy support */ #define CISCO_EXT /* enable Cisco extensions */ #ifdef CISCO_EXT #include <linux/delay.h> #endif /* Hack to do some power saving */ #define POWER_ON_DOWN /* As you can see this list is HUGH! I really don't know what a lot of these counts are about, but they are all here for completeness. If the IGNLABEL macro is put in infront of the label, that statistic will not be included in the list of statistics in the /proc filesystem */ #define IGNLABEL(comment) NULL static const char *statsLabels[] = { "RxOverrun", IGNLABEL("RxPlcpCrcErr"), IGNLABEL("RxPlcpFormatErr"), IGNLABEL("RxPlcpLengthErr"), "RxMacCrcErr", "RxMacCrcOk", "RxWepErr", "RxWepOk", "RetryLong", "RetryShort", "MaxRetries", "NoAck", "NoCts", "RxAck", "RxCts", "TxAck", "TxRts", "TxCts", "TxMc", "TxBc", "TxUcFrags", "TxUcPackets", "TxBeacon", "RxBeacon", "TxSinColl", "TxMulColl", "DefersNo", "DefersProt", "DefersEngy", "DupFram", "RxFragDisc", "TxAged", "RxAged", "LostSync-MaxRetry", "LostSync-MissedBeacons", "LostSync-ArlExceeded", "LostSync-Deauth", "LostSync-Disassoced", "LostSync-TsfTiming", "HostTxMc", "HostTxBc", "HostTxUc", "HostTxFail", "HostRxMc", "HostRxBc", "HostRxUc", "HostRxDiscard", IGNLABEL("HmacTxMc"), IGNLABEL("HmacTxBc"), IGNLABEL("HmacTxUc"), IGNLABEL("HmacTxFail"), IGNLABEL("HmacRxMc"), IGNLABEL("HmacRxBc"), IGNLABEL("HmacRxUc"), IGNLABEL("HmacRxDiscard"), IGNLABEL("HmacRxAccepted"), "SsidMismatch", "ApMismatch", "RatesMismatch", "AuthReject", "AuthTimeout", "AssocReject", "AssocTimeout", IGNLABEL("ReasonOutsideTable"), IGNLABEL("ReasonStatus1"), IGNLABEL("ReasonStatus2"), IGNLABEL("ReasonStatus3"), IGNLABEL("ReasonStatus4"), IGNLABEL("ReasonStatus5"), IGNLABEL("ReasonStatus6"), IGNLABEL("ReasonStatus7"), IGNLABEL("ReasonStatus8"), IGNLABEL("ReasonStatus9"), IGNLABEL("ReasonStatus10"), IGNLABEL("ReasonStatus11"), IGNLABEL("ReasonStatus12"), IGNLABEL("ReasonStatus13"), IGNLABEL("ReasonStatus14"), IGNLABEL("ReasonStatus15"), IGNLABEL("ReasonStatus16"), IGNLABEL("ReasonStatus17"), IGNLABEL("ReasonStatus18"), IGNLABEL("ReasonStatus19"), "RxMan", "TxMan", "RxRefresh", "TxRefresh", "RxPoll", "TxPoll", "HostRetries", "LostSync-HostReq", "HostTxBytes", "HostRxBytes", "ElapsedUsec", "ElapsedSec", "LostSyncBetterAP", "PrivacyMismatch", "Jammed", "DiscRxNotWepped", "PhyEleMismatch", (char*)-1 }; #ifndef RUN_AT #define RUN_AT(x) (jiffies+(x)) #endif /* These variables are for insmod, since it seems that the rates can only be set in setup_card. Rates should be a comma separated (no spaces) list of rates (up to 8). */ static int rates[8]; static char *ssids[3]; static int io[4]; static int irq[4]; static int maxencrypt /* = 0 */; /* The highest rate that the card can encrypt at. 0 means no limit. For old cards this was 4 */ static int auto_wep /* = 0 */; /* If set, it tries to figure out the wep mode */ static int aux_bap /* = 0 */; /* Checks to see if the aux ports are needed to read the bap, needed on some older cards and buses. */ static int adhoc; static int probe = 1; static kuid_t proc_kuid; static int proc_uid /* = 0 */; static kgid_t proc_kgid; static int proc_gid /* = 0 */; static int airo_perm = 0555; static int proc_perm = 0644; MODULE_AUTHOR("Benjamin Reed"); MODULE_DESCRIPTION("Support for Cisco/Aironet 802.11 wireless ethernet cards. " "Direct support for ISA/PCI/MPI cards and support for PCMCIA when used with airo_cs."); MODULE_LICENSE("Dual BSD/GPL"); MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340/350"); module_param_array(io, int, NULL, 0); module_param_array(irq, int, NULL, 0); module_param_array(rates, int, NULL, 0); module_param_array(ssids, charp, NULL, 0); module_param(auto_wep, int, 0); MODULE_PARM_DESC(auto_wep, "If non-zero, the driver will keep looping through the authentication options until an association is made. " "The value of auto_wep is number of the wep keys to check. " "A value of 2 will try using the key at index 0 and index 1."); module_param(aux_bap, int, 0); MODULE_PARM_DESC(aux_bap, "If non-zero, the driver will switch into a mode that seems to work better for older cards with some older buses. " "Before switching it checks that the switch is needed."); module_param(maxencrypt, int, 0); MODULE_PARM_DESC(maxencrypt, "The maximum speed that the card can do encryption. " "Units are in 512kbs. " "Zero (default) means there is no limit. " "Older cards used to be limited to 2mbs (4)."); module_param(adhoc, int, 0); MODULE_PARM_DESC(adhoc, "If non-zero, the card will start in adhoc mode."); module_param(probe, int, 0); MODULE_PARM_DESC(probe, "If zero, the driver won't start the card."); module_param(proc_uid, int, 0); MODULE_PARM_DESC(proc_uid, "The uid that the /proc files will belong to."); module_param(proc_gid, int, 0); MODULE_PARM_DESC(proc_gid, "The gid that the /proc files will belong to."); module_param(airo_perm, int, 0); MODULE_PARM_DESC(airo_perm, "The permission bits of /proc/[driver/]aironet."); module_param(proc_perm, int, 0); MODULE_PARM_DESC(proc_perm, "The permission bits of the files in /proc"); /* This is a kind of sloppy hack to get this information to OUT4500 and IN4500. I would be extremely interested in the situation where this doesn't work though!!! */ static int do8bitIO /* = 0 */; /* Return codes */ #define SUCCESS 0 #define ERROR -1 #define NO_PACKET -2 /* Commands */ #define NOP2 0x0000 #define MAC_ENABLE 0x0001 #define MAC_DISABLE 0x0002 #define CMD_LOSE_SYNC 0x0003 /* Not sure what this does... */ #define CMD_SOFTRESET 0x0004 #define HOSTSLEEP 0x0005 #define CMD_MAGIC_PKT 0x0006 #define CMD_SETWAKEMASK 0x0007 #define CMD_READCFG 0x0008 #define CMD_SETMODE 0x0009 #define CMD_ALLOCATETX 0x000a #define CMD_TRANSMIT 0x000b #define CMD_DEALLOCATETX 0x000c #define NOP 0x0010 #define CMD_WORKAROUND 0x0011 #define CMD_ALLOCATEAUX 0x0020 #define CMD_ACCESS 0x0021 #define CMD_PCIBAP 0x0022 #define CMD_PCIAUX 0x0023 #define CMD_ALLOCBUF 0x0028 #define CMD_GETTLV 0x0029 #define CMD_PUTTLV 0x002a #define CMD_DELTLV 0x002b #define CMD_FINDNEXTTLV 0x002c #define CMD_PSPNODES 0x0030 #define CMD_SETCW 0x0031 #define CMD_SETPCF 0x0032 #define CMD_SETPHYREG 0x003e #define CMD_TXTEST 0x003f #define MAC_ENABLETX 0x0101 #define CMD_LISTBSS 0x0103 #define CMD_SAVECFG 0x0108 #define CMD_ENABLEAUX 0x0111 #define CMD_WRITERID 0x0121 #define CMD_USEPSPNODES 0x0130 #define MAC_ENABLERX 0x0201 /* Command errors */ #define ERROR_QUALIF 0x00 #define ERROR_ILLCMD 0x01 #define ERROR_ILLFMT 0x02 #define ERROR_INVFID 0x03 #define ERROR_INVRID 0x04 #define ERROR_LARGE 0x05 #define ERROR_NDISABL 0x06 #define ERROR_ALLOCBSY 0x07 #define ERROR_NORD 0x0B #define ERROR_NOWR 0x0C #define ERROR_INVFIDTX 0x0D #define ERROR_TESTACT 0x0E #define ERROR_TAGNFND 0x12 #define ERROR_DECODE 0x20 #define ERROR_DESCUNAV 0x21 #define ERROR_BADLEN 0x22 #define ERROR_MODE 0x80 #define ERROR_HOP 0x81 #define ERROR_BINTER 0x82 #define ERROR_RXMODE 0x83 #define ERROR_MACADDR 0x84 #define ERROR_RATES 0x85 #define ERROR_ORDER 0x86 #define ERROR_SCAN 0x87 #define ERROR_AUTH 0x88 #define ERROR_PSMODE 0x89 #define ERROR_RTYPE 0x8A #define ERROR_DIVER 0x8B #define ERROR_SSID 0x8C #define ERROR_APLIST 0x8D #define ERROR_AUTOWAKE 0x8E #define ERROR_LEAP 0x8F /* Registers */ #define COMMAND 0x00 #define PARAM0 0x02 #define PARAM1 0x04 #define PARAM2 0x06 #define STATUS 0x08 #define RESP0 0x0a #define RESP1 0x0c #define RESP2 0x0e #define LINKSTAT 0x10 #define SELECT0 0x18 #define OFFSET0 0x1c #define RXFID 0x20 #define TXALLOCFID 0x22 #define TXCOMPLFID 0x24 #define DATA0 0x36 #define EVSTAT 0x30 #define EVINTEN 0x32 #define EVACK 0x34 #define SWS0 0x28 #define SWS1 0x2a #define SWS2 0x2c #define SWS3 0x2e #define AUXPAGE 0x3A #define AUXOFF 0x3C #define AUXDATA 0x3E #define FID_TX 1 #define FID_RX 2 /* Offset into aux memory for descriptors */ #define AUX_OFFSET 0x800 /* Size of allocated packets */ #define PKTSIZE 1840 #define RIDSIZE 2048 /* Size of the transmit queue */ #define MAXTXQ 64 /* BAP selectors */ #define BAP0 0 /* Used for receiving packets */ #define BAP1 2 /* Used for xmiting packets and working with RIDS */ /* Flags */ #define COMMAND_BUSY 0x8000 #define BAP_BUSY 0x8000 #define BAP_ERR 0x4000 #define BAP_DONE 0x2000 #define PROMISC 0xffff #define NOPROMISC 0x0000 #define EV_CMD 0x10 #define EV_CLEARCOMMANDBUSY 0x4000 #define EV_RX 0x01 #define EV_TX 0x02 #define EV_TXEXC 0x04 #define EV_ALLOC 0x08 #define EV_LINK 0x80 #define EV_AWAKE 0x100 #define EV_TXCPY 0x400 #define EV_UNKNOWN 0x800 #define EV_MIC 0x1000 /* Message Integrity Check Interrupt */ #define EV_AWAKEN 0x2000 #define STATUS_INTS (EV_AWAKE|EV_LINK|EV_TXEXC|EV_TX|EV_TXCPY|EV_RX|EV_MIC) #ifdef CHECK_UNKNOWN_INTS #define IGNORE_INTS ( EV_CMD | EV_UNKNOWN) #else #define IGNORE_INTS (~STATUS_INTS) #endif /* RID TYPES */ #define RID_RW 0x20 /* The RIDs */ #define RID_CAPABILITIES 0xFF00 #define RID_APINFO 0xFF01 #define RID_RADIOINFO 0xFF02 #define RID_UNKNOWN3 0xFF03 #define RID_RSSI 0xFF04 #define RID_CONFIG 0xFF10 #define RID_SSID 0xFF11 #define RID_APLIST 0xFF12 #define RID_DRVNAME 0xFF13 #define RID_ETHERENCAP 0xFF14 #define RID_WEP_TEMP 0xFF15 #define RID_WEP_PERM 0xFF16 #define RID_MODULATION 0xFF17 #define RID_OPTIONS 0xFF18 #define RID_ACTUALCONFIG 0xFF20 /*readonly*/ #define RID_FACTORYCONFIG 0xFF21 #define RID_UNKNOWN22 0xFF22 #define RID_LEAPUSERNAME 0xFF23 #define RID_LEAPPASSWORD 0xFF24 #define RID_STATUS 0xFF50 #define RID_BEACON_HST 0xFF51 #define RID_BUSY_HST 0xFF52 #define RID_RETRIES_HST 0xFF53 #define RID_UNKNOWN54 0xFF54 #define RID_UNKNOWN55 0xFF55 #define RID_UNKNOWN56 0xFF56 #define RID_MIC 0xFF57 #define RID_STATS16 0xFF60 #define RID_STATS16DELTA 0xFF61 #define RID_STATS16DELTACLEAR 0xFF62 #define RID_STATS 0xFF68 #define RID_STATSDELTA 0xFF69 #define RID_STATSDELTACLEAR 0xFF6A #define RID_ECHOTEST_RID 0xFF70 #define RID_ECHOTEST_RESULTS 0xFF71 #define RID_BSSLISTFIRST 0xFF72 #define RID_BSSLISTNEXT 0xFF73 #define RID_WPA_BSSLISTFIRST 0xFF74 #define RID_WPA_BSSLISTNEXT 0xFF75 typedef struct { u16 cmd; u16 parm0; u16 parm1; u16 parm2; } Cmd; typedef struct { u16 status; u16 rsp0; u16 rsp1; u16 rsp2; } Resp; /* * Rids and endian-ness: The Rids will always be in cpu endian, since * this all the patches from the big-endian guys end up doing that. * so all rid access should use the read/writeXXXRid routines. */ /* This structure came from an email sent to me from an engineer at aironet for inclusion into this driver */ typedef struct WepKeyRid WepKeyRid; struct WepKeyRid { __le16 len; __le16 kindex; u8 mac[ETH_ALEN]; __le16 klen; u8 key[16]; } __packed; /* These structures are from the Aironet's PC4500 Developers Manual */ typedef struct Ssid Ssid; struct Ssid { __le16 len; u8 ssid[32]; } __packed; typedef struct SsidRid SsidRid; struct SsidRid { __le16 len; Ssid ssids[3]; } __packed; typedef struct ModulationRid ModulationRid; struct ModulationRid { __le16 len; __le16 modulation; #define MOD_DEFAULT cpu_to_le16(0) #define MOD_CCK cpu_to_le16(1) #define MOD_MOK cpu_to_le16(2) } __packed; typedef struct ConfigRid ConfigRid; struct ConfigRid { __le16 len; /* sizeof(ConfigRid) */ __le16 opmode; /* operating mode */ #define MODE_STA_IBSS cpu_to_le16(0) #define MODE_STA_ESS cpu_to_le16(1) #define MODE_AP cpu_to_le16(2) #define MODE_AP_RPTR cpu_to_le16(3) #define MODE_CFG_MASK cpu_to_le16(0xff) #define MODE_ETHERNET_HOST cpu_to_le16(0<<8) /* rx payloads converted */ #define MODE_LLC_HOST cpu_to_le16(1<<8) /* rx payloads left as is */ #define MODE_AIRONET_EXTEND cpu_to_le16(1<<9) /* enable Aironet extenstions */ #define MODE_AP_INTERFACE cpu_to_le16(1<<10) /* enable ap interface extensions */ #define MODE_ANTENNA_ALIGN cpu_to_le16(1<<11) /* enable antenna alignment */ #define MODE_ETHER_LLC cpu_to_le16(1<<12) /* enable ethernet LLC */ #define MODE_LEAF_NODE cpu_to_le16(1<<13) /* enable leaf node bridge */ #define MODE_CF_POLLABLE cpu_to_le16(1<<14) /* enable CF pollable */ #define MODE_MIC cpu_to_le16(1<<15) /* enable MIC */ __le16 rmode; /* receive mode */ #define RXMODE_BC_MC_ADDR cpu_to_le16(0) #define RXMODE_BC_ADDR cpu_to_le16(1) /* ignore multicasts */ #define RXMODE_ADDR cpu_to_le16(2) /* ignore multicast and broadcast */ #define RXMODE_RFMON cpu_to_le16(3) /* wireless monitor mode */ #define RXMODE_RFMON_ANYBSS cpu_to_le16(4) #define RXMODE_LANMON cpu_to_le16(5) /* lan style monitor -- data packets only */ #define RXMODE_MASK cpu_to_le16(255) #define RXMODE_DISABLE_802_3_HEADER cpu_to_le16(1<<8) /* disables 802.3 header on rx */ #define RXMODE_FULL_MASK (RXMODE_MASK | RXMODE_DISABLE_802_3_HEADER) #define RXMODE_NORMALIZED_RSSI cpu_to_le16(1<<9) /* return normalized RSSI */ __le16 fragThresh; __le16 rtsThres; u8 macAddr[ETH_ALEN]; u8 rates[8]; __le16 shortRetryLimit; __le16 longRetryLimit; __le16 txLifetime; /* in kusec */ __le16 rxLifetime; /* in kusec */ __le16 stationary; __le16 ordering; __le16 u16deviceType; /* for overriding device type */ __le16 cfpRate; __le16 cfpDuration; __le16 _reserved1[3]; /*---------- Scanning/Associating ----------*/ __le16 scanMode; #define SCANMODE_ACTIVE cpu_to_le16(0) #define SCANMODE_PASSIVE cpu_to_le16(1) #define SCANMODE_AIROSCAN cpu_to_le16(2) __le16 probeDelay; /* in kusec */ __le16 probeEnergyTimeout; /* in kusec */ __le16 probeResponseTimeout; __le16 beaconListenTimeout; __le16 joinNetTimeout; __le16 authTimeout; __le16 authType; #define AUTH_OPEN cpu_to_le16(0x1) #define AUTH_ENCRYPT cpu_to_le16(0x101) #define AUTH_SHAREDKEY cpu_to_le16(0x102) #define AUTH_ALLOW_UNENCRYPTED cpu_to_le16(0x200) __le16 associationTimeout; __le16 specifiedApTimeout; __le16 offlineScanInterval; __le16 offlineScanDuration; __le16 linkLossDelay; __le16 maxBeaconLostTime; __le16 refreshInterval; #define DISABLE_REFRESH cpu_to_le16(0xFFFF) __le16 _reserved1a[1]; /*---------- Power save operation ----------*/ __le16 powerSaveMode; #define POWERSAVE_CAM cpu_to_le16(0) #define POWERSAVE_PSP cpu_to_le16(1) #define POWERSAVE_PSPCAM cpu_to_le16(2) __le16 sleepForDtims; __le16 listenInterval; __le16 fastListenInterval; __le16 listenDecay; __le16 fastListenDelay; __le16 _reserved2[2]; /*---------- Ap/Ibss config items ----------*/ __le16 beaconPeriod; __le16 atimDuration; __le16 hopPeriod; __le16 channelSet; __le16 channel; __le16 dtimPeriod; __le16 bridgeDistance; __le16 radioID; /*---------- Radio configuration ----------*/ __le16 radioType; #define RADIOTYPE_DEFAULT cpu_to_le16(0) #define RADIOTYPE_802_11 cpu_to_le16(1) #define RADIOTYPE_LEGACY cpu_to_le16(2) u8 rxDiversity; u8 txDiversity; __le16 txPower; #define TXPOWER_DEFAULT 0 __le16 rssiThreshold; #define RSSI_DEFAULT 0 __le16 modulation; #define PREAMBLE_AUTO cpu_to_le16(0) #define PREAMBLE_LONG cpu_to_le16(1) #define PREAMBLE_SHORT cpu_to_le16(2) __le16 preamble; __le16 homeProduct; __le16 radioSpecific; /*---------- Aironet Extensions ----------*/ u8 nodeName[16]; __le16 arlThreshold; __le16 arlDecay; __le16 arlDelay; __le16 _reserved4[1]; /*---------- Aironet Extensions ----------*/ u8 magicAction; #define MAGIC_ACTION_STSCHG 1 #define MAGIC_ACTION_RESUME 2 #define MAGIC_IGNORE_MCAST (1<<8) #define MAGIC_IGNORE_BCAST (1<<9) #define MAGIC_SWITCH_TO_PSP (0<<10) #define MAGIC_STAY_IN_CAM (1<<10) u8 magicControl; __le16 autoWake; } __packed; typedef struct StatusRid StatusRid; struct StatusRid { __le16 len; u8 mac[ETH_ALEN]; __le16 mode; __le16 errorCode; __le16 sigQuality; __le16 SSIDlen; char SSID[32]; char apName[16]; u8 bssid[4][ETH_ALEN]; __le16 beaconPeriod; __le16 dimPeriod; __le16 atimDuration; __le16 hopPeriod; __le16 channelSet; __le16 channel; __le16 hopsToBackbone; __le16 apTotalLoad; __le16 generatedLoad; __le16 accumulatedArl; __le16 signalQuality; __le16 currentXmitRate; __le16 apDevExtensions; __le16 normalizedSignalStrength; __le16 shortPreamble; u8 apIP[4]; u8 noisePercent; /* Noise percent in last second */ u8 noisedBm; /* Noise dBm in last second */ u8 noiseAvePercent; /* Noise percent in last minute */ u8 noiseAvedBm; /* Noise dBm in last minute */ u8 noiseMaxPercent; /* Highest noise percent in last minute */ u8 noiseMaxdBm; /* Highest noise dbm in last minute */ __le16 load; u8 carrier[4]; __le16 assocStatus; #define STAT_NOPACKETS 0 #define STAT_NOCARRIERSET 10 #define STAT_GOTCARRIERSET 11 #define STAT_WRONGSSID 20 #define STAT_BADCHANNEL 25 #define STAT_BADBITRATES 30 #define STAT_BADPRIVACY 35 #define STAT_APFOUND 40 #define STAT_APREJECTED 50 #define STAT_AUTHENTICATING 60 #define STAT_DEAUTHENTICATED 61 #define STAT_AUTHTIMEOUT 62 #define STAT_ASSOCIATING 70 #define STAT_DEASSOCIATED 71 #define STAT_ASSOCTIMEOUT 72 #define STAT_NOTAIROAP 73 #define STAT_ASSOCIATED 80 #define STAT_LEAPING 90 #define STAT_LEAPFAILED 91 #define STAT_LEAPTIMEDOUT 92 #define STAT_LEAPCOMPLETE 93 } __packed; typedef struct StatsRid StatsRid; struct StatsRid { __le16 len; __le16 spacer; __le32 vals[100]; } __packed; typedef struct APListRid APListRid; struct APListRid { __le16 len; u8 ap[4][ETH_ALEN]; } __packed; typedef struct CapabilityRid CapabilityRid; struct CapabilityRid { __le16 len; char oui[3]; char zero; __le16 prodNum; char manName[32]; char prodName[16]; char prodVer[8]; char factoryAddr[ETH_ALEN]; char aironetAddr[ETH_ALEN]; __le16 radioType; __le16 country; char callid[ETH_ALEN]; char supportedRates[8]; char rxDiversity; char txDiversity; __le16 txPowerLevels[8]; __le16 hardVer; __le16 hardCap; __le16 tempRange; __le16 softVer; __le16 softSubVer; __le16 interfaceVer; __le16 softCap; __le16 bootBlockVer; __le16 requiredHard; __le16 extSoftCap; } __packed; /* Only present on firmware >= 5.30.17 */ typedef struct BSSListRidExtra BSSListRidExtra; struct BSSListRidExtra { __le16 unknown[4]; u8 fixed[12]; /* WLAN management frame */ u8 iep[624]; } __packed; typedef struct BSSListRid BSSListRid; struct BSSListRid { __le16 len; __le16 index; /* First is 0 and 0xffff means end of list */ #define RADIO_FH 1 /* Frequency hopping radio type */ #define RADIO_DS 2 /* Direct sequence radio type */ #define RADIO_TMA 4 /* Proprietary radio used in old cards (2500) */ __le16 radioType; u8 bssid[ETH_ALEN]; /* Mac address of the BSS */ u8 zero; u8 ssidLen; u8 ssid[32]; __le16 dBm; #define CAP_ESS cpu_to_le16(1<<0) #define CAP_IBSS cpu_to_le16(1<<1) #define CAP_PRIVACY cpu_to_le16(1<<4) #define CAP_SHORTHDR cpu_to_le16(1<<5) __le16 cap; __le16 beaconInterval; u8 rates[8]; /* Same as rates for config rid */ struct { /* For frequency hopping only */ __le16 dwell; u8 hopSet; u8 hopPattern; u8 hopIndex; u8 fill; } fh; __le16 dsChannel; __le16 atimWindow; /* Only present on firmware >= 5.30.17 */ BSSListRidExtra extra; } __packed; typedef struct { BSSListRid bss; struct list_head list; } BSSListElement; typedef struct tdsRssiEntry tdsRssiEntry; struct tdsRssiEntry { u8 rssipct; u8 rssidBm; } __packed; typedef struct tdsRssiRid tdsRssiRid; struct tdsRssiRid { u16 len; tdsRssiEntry x[256]; } __packed; typedef struct MICRid MICRid; struct MICRid { __le16 len; __le16 state; __le16 multicastValid; u8 multicast[16]; __le16 unicastValid; u8 unicast[16]; } __packed; typedef struct MICBuffer MICBuffer; struct MICBuffer { __be16 typelen; union { u8 snap[8]; struct { u8 dsap; u8 ssap; u8 control; u8 orgcode[3]; u8 fieldtype[2]; } llc; } u; __be32 mic; __be32 seq; } __packed; typedef struct { u8 da[ETH_ALEN]; u8 sa[ETH_ALEN]; } etherHead; #define TXCTL_TXOK (1<<1) /* report if tx is ok */ #define TXCTL_TXEX (1<<2) /* report if tx fails */ #define TXCTL_802_3 (0<<3) /* 802.3 packet */ #define TXCTL_802_11 (1<<3) /* 802.11 mac packet */ #define TXCTL_ETHERNET (0<<4) /* payload has ethertype */ #define TXCTL_LLC (1<<4) /* payload is llc */ #define TXCTL_RELEASE (0<<5) /* release after completion */ #define TXCTL_NORELEASE (1<<5) /* on completion returns to host */ #define BUSY_FID 0x10000 #ifdef CISCO_EXT #define AIROMAGIC 0xa55a /* Warning : SIOCDEVPRIVATE may disapear during 2.5.X - Jean II */ #ifdef SIOCIWFIRSTPRIV #ifdef SIOCDEVPRIVATE #define AIROOLDIOCTL SIOCDEVPRIVATE #define AIROOLDIDIFC AIROOLDIOCTL + 1 #endif /* SIOCDEVPRIVATE */ #else /* SIOCIWFIRSTPRIV */ #define SIOCIWFIRSTPRIV SIOCDEVPRIVATE #endif /* SIOCIWFIRSTPRIV */ /* This may be wrong. When using the new SIOCIWFIRSTPRIV range, we probably * should use only "GET" ioctls (last bit set to 1). "SET" ioctls are root * only and don't return the modified struct ifreq to the application which * is usually a problem. - Jean II */ #define AIROIOCTL SIOCIWFIRSTPRIV #define AIROIDIFC AIROIOCTL + 1 /* Ioctl constants to be used in airo_ioctl.command */ #define AIROGCAP 0 // Capability rid #define AIROGCFG 1 // USED A LOT #define AIROGSLIST 2 // System ID list #define AIROGVLIST 3 // List of specified AP's #define AIROGDRVNAM 4 // NOTUSED #define AIROGEHTENC 5 // NOTUSED #define AIROGWEPKTMP 6 #define AIROGWEPKNV 7 #define AIROGSTAT 8 #define AIROGSTATSC32 9 #define AIROGSTATSD32 10 #define AIROGMICRID 11 #define AIROGMICSTATS 12 #define AIROGFLAGS 13 #define AIROGID 14 #define AIRORRID 15 #define AIRORSWVERSION 17 /* Leave gap of 40 commands after AIROGSTATSD32 for future */ #define AIROPCAP AIROGSTATSD32 + 40 #define AIROPVLIST AIROPCAP + 1 #define AIROPSLIST AIROPVLIST + 1 #define AIROPCFG AIROPSLIST + 1 #define AIROPSIDS AIROPCFG + 1 #define AIROPAPLIST AIROPSIDS + 1 #define AIROPMACON AIROPAPLIST + 1 /* Enable mac */ #define AIROPMACOFF AIROPMACON + 1 /* Disable mac */ #define AIROPSTCLR AIROPMACOFF + 1 #define AIROPWEPKEY AIROPSTCLR + 1 #define AIROPWEPKEYNV AIROPWEPKEY + 1 #define AIROPLEAPPWD AIROPWEPKEYNV + 1 #define AIROPLEAPUSR AIROPLEAPPWD + 1 /* Flash codes */ #define AIROFLSHRST AIROPWEPKEYNV + 40 #define AIROFLSHGCHR AIROFLSHRST + 1 #define AIROFLSHSTFL AIROFLSHGCHR + 1 #define AIROFLSHPCHR AIROFLSHSTFL + 1 #define AIROFLPUTBUF AIROFLSHPCHR + 1 #define AIRORESTART AIROFLPUTBUF + 1 #define FLASHSIZE 32768 #define AUXMEMSIZE (256 * 1024) typedef struct aironet_ioctl { unsigned short command; // What to do unsigned short len; // Len of data unsigned short ridnum; // rid number unsigned char __user *data; // d-data } aironet_ioctl; static const char swversion[] = "2.1"; #endif /* CISCO_EXT */ #define NUM_MODULES 2 #define MIC_MSGLEN_MAX 2400 #define EMMH32_MSGLEN_MAX MIC_MSGLEN_MAX #define AIRO_DEF_MTU 2312 typedef struct { u32 size; // size u8 enabled; // MIC enabled or not u32 rxSuccess; // successful packets received u32 rxIncorrectMIC; // pkts dropped due to incorrect MIC comparison u32 rxNotMICed; // pkts dropped due to not being MIC'd u32 rxMICPlummed; // pkts dropped due to not having a MIC plummed u32 rxWrongSequence; // pkts dropped due to sequence number violation u32 reserve[32]; } mic_statistics; typedef struct { u32 coeff[((EMMH32_MSGLEN_MAX)+3)>>2]; u64 accum; // accumulated mic, reduced to u32 in final() int position; // current position (byte offset) in message union { u8 d8[4]; __be32 d32; } part; // saves partial message word across update() calls } emmh32_context; typedef struct { emmh32_context seed; // Context - the seed u32 rx; // Received sequence number u32 tx; // Tx sequence number u32 window; // Start of window u8 valid; // Flag to say if context is valid or not u8 key[16]; } miccntx; typedef struct { miccntx mCtx; // Multicast context miccntx uCtx; // Unicast context } mic_module; typedef struct { unsigned int rid: 16; unsigned int len: 15; unsigned int valid: 1; dma_addr_t host_addr; } Rid; typedef struct { unsigned int offset: 15; unsigned int eoc: 1; unsigned int len: 15; unsigned int valid: 1; dma_addr_t host_addr; } TxFid; struct rx_hdr { __le16 status, len; u8 rssi[2]; u8 rate; u8 freq; __le16 tmp[4]; } __packed; typedef struct { unsigned int ctl: 15; unsigned int rdy: 1; unsigned int len: 15; unsigned int valid: 1; dma_addr_t host_addr; } RxFid; /* * Host receive descriptor */ typedef struct { unsigned char __iomem *card_ram_off; /* offset into card memory of the desc */ RxFid rx_desc; /* card receive descriptor */ char *virtual_host_addr; /* virtual address of host receive buffer */ int pending; } HostRxDesc; /* * Host transmit descriptor */ typedef struct { unsigned char __iomem *card_ram_off; /* offset into card memory of the desc */ TxFid tx_desc; /* card transmit descriptor */ char *virtual_host_addr; /* virtual address of host receive buffer */ int pending; } HostTxDesc; /* * Host RID descriptor */ typedef struct { unsigned char __iomem *card_ram_off; /* offset into card memory of the descriptor */ Rid rid_desc; /* card RID descriptor */ char *virtual_host_addr; /* virtual address of host receive buffer */ } HostRidDesc; typedef struct { u16 sw0; u16 sw1; u16 status; u16 len; #define HOST_SET (1 << 0) #define HOST_INT_TX (1 << 1) /* Interrupt on successful TX */ #define HOST_INT_TXERR (1 << 2) /* Interrupt on unseccessful TX */ #define HOST_LCC_PAYLOAD (1 << 4) /* LLC payload, 0 = Ethertype */ #define HOST_DONT_RLSE (1 << 5) /* Don't release buffer when done */ #define HOST_DONT_RETRY (1 << 6) /* Don't retry trasmit */ #define HOST_CLR_AID (1 << 7) /* clear AID failure */ #define HOST_RTS (1 << 9) /* Force RTS use */ #define HOST_SHORT (1 << 10) /* Do short preamble */ u16 ctl; u16 aid; u16 retries; u16 fill; } TxCtlHdr; typedef struct { u16 ctl; u16 duration; char addr1[6]; char addr2[6]; char addr3[6]; u16 seq; char addr4[6]; } WifiHdr; typedef struct { TxCtlHdr ctlhdr; u16 fill1; u16 fill2; WifiHdr wifihdr; u16 gaplen; u16 status; } WifiCtlHdr; static WifiCtlHdr wifictlhdr8023 = { .ctlhdr = { .ctl = HOST_DONT_RLSE, } }; // A few details needed for WEP (Wireless Equivalent Privacy) #define MAX_KEY_SIZE 13 // 128 (?) bits #define MIN_KEY_SIZE 5 // 40 bits RC4 - WEP typedef struct wep_key_t { u16 len; u8 key[16]; /* 40-bit and 104-bit keys */ } wep_key_t; /* List of Wireless Handlers (new API) */ static const struct iw_handler_def airo_handler_def; static const char version[] = "airo.c 0.6 (Ben Reed & Javier Achirica)"; struct airo_info; static int get_dec_u16( char *buffer, int *start, int limit ); static void OUT4500( struct airo_info *, u16 register, u16 value ); static unsigned short IN4500( struct airo_info *, u16 register ); static u16 setup_card(struct airo_info*, u8 *mac, int lock); static int enable_MAC(struct airo_info *ai, int lock); static void disable_MAC(struct airo_info *ai, int lock); static void enable_interrupts(struct airo_info*); static void disable_interrupts(struct airo_info*); static u16 issuecommand(struct airo_info*, Cmd *pCmd, Resp *pRsp); static int bap_setup(struct airo_info*, u16 rid, u16 offset, int whichbap); static int aux_bap_read(struct airo_info*, __le16 *pu16Dst, int bytelen, int whichbap); static int fast_bap_read(struct airo_info*, __le16 *pu16Dst, int bytelen, int whichbap); static int bap_write(struct airo_info*, const __le16 *pu16Src, int bytelen, int whichbap); static int PC4500_accessrid(struct airo_info*, u16 rid, u16 accmd); static int PC4500_readrid(struct airo_info*, u16 rid, void *pBuf, int len, int lock); static int PC4500_writerid(struct airo_info*, u16 rid, const void *pBuf, int len, int lock); static int do_writerid( struct airo_info*, u16 rid, const void *rid_data, int len, int dummy ); static u16 transmit_allocate(struct airo_info*, int lenPayload, int raw); static int transmit_802_3_packet(struct airo_info*, int len, char *pPacket); static int transmit_802_11_packet(struct airo_info*, int len, char *pPacket); static int mpi_send_packet (struct net_device *dev); static void mpi_unmap_card(struct pci_dev *pci); static void mpi_receive_802_3(struct airo_info *ai); static void mpi_receive_802_11(struct airo_info *ai); static int waitbusy (struct airo_info *ai); static irqreturn_t airo_interrupt( int irq, void* dev_id); static int airo_thread(void *data); static void timer_func( struct net_device *dev ); static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static struct iw_statistics *airo_get_wireless_stats (struct net_device *dev); static void airo_read_wireless_stats (struct airo_info *local); #ifdef CISCO_EXT static int readrids(struct net_device *dev, aironet_ioctl *comp); static int writerids(struct net_device *dev, aironet_ioctl *comp); static int flashcard(struct net_device *dev, aironet_ioctl *comp); #endif /* CISCO_EXT */ static void micinit(struct airo_info *ai); static int micsetup(struct airo_info *ai); static int encapsulate(struct airo_info *ai, etherHead *pPacket, MICBuffer *buffer, int len); static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *pPacket, u16 payLen); static u8 airo_rssi_to_dbm (tdsRssiEntry *rssi_rid, u8 rssi); static u8 airo_dbm_to_pct (tdsRssiEntry *rssi_rid, u8 dbm); static void airo_networks_free(struct airo_info *ai); struct airo_info { struct net_device *dev; struct list_head dev_list; /* Note, we can have MAX_FIDS outstanding. FIDs are 16-bits, so we use the high bit to mark whether it is in use. */ #define MAX_FIDS 6 #define MPI_MAX_FIDS 1 u32 fids[MAX_FIDS]; ConfigRid config; char keyindex; // Used with auto wep char defindex; // Used with auto wep struct proc_dir_entry *proc_entry; spinlock_t aux_lock; #define FLAG_RADIO_OFF 0 /* User disabling of MAC */ #define FLAG_RADIO_DOWN 1 /* ifup/ifdown disabling of MAC */ #define FLAG_RADIO_MASK 0x03 #define FLAG_ENABLED 2 #define FLAG_ADHOC 3 /* Needed by MIC */ #define FLAG_MIC_CAPABLE 4 #define FLAG_UPDATE_MULTI 5 #define FLAG_UPDATE_UNI 6 #define FLAG_802_11 7 #define FLAG_PROMISC 8 /* IFF_PROMISC 0x100 - include/linux/if.h */ #define FLAG_PENDING_XMIT 9 #define FLAG_PENDING_XMIT11 10 #define FLAG_MPI 11 #define FLAG_REGISTERED 12 #define FLAG_COMMIT 13 #define FLAG_RESET 14 #define FLAG_FLASHING 15 #define FLAG_WPA_CAPABLE 16 unsigned long flags; #define JOB_DIE 0 #define JOB_XMIT 1 #define JOB_XMIT11 2 #define JOB_STATS 3 #define JOB_PROMISC 4 #define JOB_MIC 5 #define JOB_EVENT 6 #define JOB_AUTOWEP 7 #define JOB_WSTATS 8 #define JOB_SCAN_RESULTS 9 unsigned long jobs; int (*bap_read)(struct airo_info*, __le16 *pu16Dst, int bytelen, int whichbap); unsigned short *flash; tdsRssiEntry *rssi; struct task_struct *list_bss_task; struct task_struct *airo_thread_task; struct semaphore sem; wait_queue_head_t thr_wait; unsigned long expires; struct { struct sk_buff *skb; int fid; } xmit, xmit11; struct net_device *wifidev; struct iw_statistics wstats; // wireless stats unsigned long scan_timeout; /* Time scan should be read */ struct iw_spy_data spy_data; struct iw_public_data wireless_data; /* MIC stuff */ struct crypto_cipher *tfm; mic_module mod[2]; mic_statistics micstats; HostRxDesc rxfids[MPI_MAX_FIDS]; // rx/tx/config MPI350 descriptors HostTxDesc txfids[MPI_MAX_FIDS]; HostRidDesc config_desc; unsigned long ridbus; // phys addr of config_desc struct sk_buff_head txq;// tx queue used by mpi350 code struct pci_dev *pci; unsigned char __iomem *pcimem; unsigned char __iomem *pciaux; unsigned char *shared; dma_addr_t shared_dma; pm_message_t power; SsidRid *SSID; APListRid *APList; #define PCI_SHARED_LEN 2*MPI_MAX_FIDS*PKTSIZE+RIDSIZE char proc_name[IFNAMSIZ]; int wep_capable; int max_wep_idx; /* WPA-related stuff */ unsigned int bssListFirst; unsigned int bssListNext; unsigned int bssListRidLen; struct list_head network_list; struct list_head network_free_list; BSSListElement *networks; }; static inline int bap_read(struct airo_info *ai, __le16 *pu16Dst, int bytelen, int whichbap) { return ai->bap_read(ai, pu16Dst, bytelen, whichbap); } static int setup_proc_entry( struct net_device *dev, struct airo_info *apriv ); static int takedown_proc_entry( struct net_device *dev, struct airo_info *apriv ); static int cmdreset(struct airo_info *ai); static int setflashmode (struct airo_info *ai); static int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime); static int flashputbuf(struct airo_info *ai); static int flashrestart(struct airo_info *ai,struct net_device *dev); #define airo_print(type, name, fmt, args...) \ printk(type DRV_NAME "(%s): " fmt "\n", name, ##args) #define airo_print_info(name, fmt, args...) \ airo_print(KERN_INFO, name, fmt, ##args) #define airo_print_dbg(name, fmt, args...) \ airo_print(KERN_DEBUG, name, fmt, ##args) #define airo_print_warn(name, fmt, args...) \ airo_print(KERN_WARNING, name, fmt, ##args) #define airo_print_err(name, fmt, args...) \ airo_print(KERN_ERR, name, fmt, ##args) #define AIRO_FLASH(dev) (((struct airo_info *)dev->ml_priv)->flash) /*********************************************************************** * MIC ROUTINES * *********************************************************************** */ static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq); static void MoveWindow(miccntx *context, u32 micSeq); static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_cipher *tfm); static void emmh32_init(emmh32_context *context); static void emmh32_update(emmh32_context *context, u8 *pOctets, int len); static void emmh32_final(emmh32_context *context, u8 digest[4]); static int flashpchar(struct airo_info *ai,int byte,int dwelltime); static void age_mic_context(miccntx *cur, miccntx *old, u8 *key, int key_len, struct crypto_cipher *tfm) { /* If the current MIC context is valid and its key is the same as * the MIC register, there's nothing to do. */ if (cur->valid && (memcmp(cur->key, key, key_len) == 0)) return; /* Age current mic Context */ memcpy(old, cur, sizeof(*cur)); /* Initialize new context */ memcpy(cur->key, key, key_len); cur->window = 33; /* Window always points to the middle */ cur->rx = 0; /* Rx Sequence numbers */ cur->tx = 0; /* Tx sequence numbers */ cur->valid = 1; /* Key is now valid */ /* Give key to mic seed */ emmh32_setseed(&cur->seed, key, key_len, tfm); } /* micinit - Initialize mic seed */ static void micinit(struct airo_info *ai) { MICRid mic_rid; clear_bit(JOB_MIC, &ai->jobs); PC4500_readrid(ai, RID_MIC, &mic_rid, sizeof(mic_rid), 0); up(&ai->sem); ai->micstats.enabled = (le16_to_cpu(mic_rid.state) & 0x00FF) ? 1 : 0; if (!ai->micstats.enabled) { /* So next time we have a valid key and mic is enabled, we will * update the sequence number if the key is the same as before. */ ai->mod[0].uCtx.valid = 0; ai->mod[0].mCtx.valid = 0; return; } if (mic_rid.multicastValid) { age_mic_context(&ai->mod[0].mCtx, &ai->mod[1].mCtx, mic_rid.multicast, sizeof(mic_rid.multicast), ai->tfm); } if (mic_rid.unicastValid) { age_mic_context(&ai->mod[0].uCtx, &ai->mod[1].uCtx, mic_rid.unicast, sizeof(mic_rid.unicast), ai->tfm); } } /* micsetup - Get ready for business */ static int micsetup(struct airo_info *ai) { int i; if (ai->tfm == NULL) ai->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(ai->tfm)) { airo_print_err(ai->dev->name, "failed to load transform for AES"); ai->tfm = NULL; return ERROR; } for (i=0; i < NUM_MODULES; i++) { memset(&ai->mod[i].mCtx,0,sizeof(miccntx)); memset(&ai->mod[i].uCtx,0,sizeof(miccntx)); } return SUCCESS; } static const u8 micsnap[] = {0xAA,0xAA,0x03,0x00,0x40,0x96,0x00,0x02}; /*=========================================================================== * Description: Mic a packet * * Inputs: etherHead * pointer to an 802.3 frame * * Returns: BOOLEAN if successful, otherwise false. * PacketTxLen will be updated with the mic'd packets size. * * Caveats: It is assumed that the frame buffer will already * be big enough to hold the largets mic message possible. * (No memory allocation is done here). * * Author: sbraneky (10/15/01) * Merciless hacks by rwilcher (1/14/02) */ static int encapsulate(struct airo_info *ai ,etherHead *frame, MICBuffer *mic, int payLen) { miccntx *context; // Determine correct context // If not adhoc, always use unicast key if (test_bit(FLAG_ADHOC, &ai->flags) && (frame->da[0] & 0x1)) context = &ai->mod[0].mCtx; else context = &ai->mod[0].uCtx; if (!context->valid) return ERROR; mic->typelen = htons(payLen + 16); //Length of Mic'd packet memcpy(&mic->u.snap, micsnap, sizeof(micsnap)); // Add Snap // Add Tx sequence mic->seq = htonl(context->tx); context->tx += 2; emmh32_init(&context->seed); // Mic the packet emmh32_update(&context->seed,frame->da,ETH_ALEN * 2); // DA,SA emmh32_update(&context->seed,(u8*)&mic->typelen,10); // Type/Length and Snap emmh32_update(&context->seed,(u8*)&mic->seq,sizeof(mic->seq)); //SEQ emmh32_update(&context->seed,(u8*)(frame + 1),payLen); //payload emmh32_final(&context->seed, (u8*)&mic->mic); /* New Type/length ?????????? */ mic->typelen = 0; //Let NIC know it could be an oversized packet return SUCCESS; } typedef enum { NONE, NOMIC, NOMICPLUMMED, SEQUENCE, INCORRECTMIC, } mic_error; /*=========================================================================== * Description: Decapsulates a MIC'd packet and returns the 802.3 packet * (removes the MIC stuff) if packet is a valid packet. * * Inputs: etherHead pointer to the 802.3 packet * * Returns: BOOLEAN - TRUE if packet should be dropped otherwise FALSE * * Author: sbraneky (10/15/01) * Merciless hacks by rwilcher (1/14/02) *--------------------------------------------------------------------------- */ static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *eth, u16 payLen) { int i; u32 micSEQ; miccntx *context; u8 digest[4]; mic_error micError = NONE; // Check if the packet is a Mic'd packet if (!ai->micstats.enabled) { //No Mic set or Mic OFF but we received a MIC'd packet. if (memcmp ((u8*)eth + 14, micsnap, sizeof(micsnap)) == 0) { ai->micstats.rxMICPlummed++; return ERROR; } return SUCCESS; } if (ntohs(mic->typelen) == 0x888E) return SUCCESS; if (memcmp (mic->u.snap, micsnap, sizeof(micsnap)) != 0) { // Mic enabled but packet isn't Mic'd ai->micstats.rxMICPlummed++; return ERROR; } micSEQ = ntohl(mic->seq); //store SEQ as CPU order //At this point we a have a mic'd packet and mic is enabled //Now do the mic error checking. //Receive seq must be odd if ( (micSEQ & 1) == 0 ) { ai->micstats.rxWrongSequence++; return ERROR; } for (i = 0; i < NUM_MODULES; i++) { int mcast = eth->da[0] & 1; //Determine proper context context = mcast ? &ai->mod[i].mCtx : &ai->mod[i].uCtx; //Make sure context is valid if (!context->valid) { if (i == 0) micError = NOMICPLUMMED; continue; } //DeMic it if (!mic->typelen) mic->typelen = htons(payLen + sizeof(MICBuffer) - 2); emmh32_init(&context->seed); emmh32_update(&context->seed, eth->da, ETH_ALEN*2); emmh32_update(&context->seed, (u8 *)&mic->typelen, sizeof(mic->typelen)+sizeof(mic->u.snap)); emmh32_update(&context->seed, (u8 *)&mic->seq,sizeof(mic->seq)); emmh32_update(&context->seed, (u8 *)(eth + 1),payLen); //Calculate MIC emmh32_final(&context->seed, digest); if (memcmp(digest, &mic->mic, 4)) { //Make sure the mics match //Invalid Mic if (i == 0) micError = INCORRECTMIC; continue; } //Check Sequence number if mics pass if (RxSeqValid(ai, context, mcast, micSEQ) == SUCCESS) { ai->micstats.rxSuccess++; return SUCCESS; } if (i == 0) micError = SEQUENCE; } // Update statistics switch (micError) { case NOMICPLUMMED: ai->micstats.rxMICPlummed++; break; case SEQUENCE: ai->micstats.rxWrongSequence++; break; case INCORRECTMIC: ai->micstats.rxIncorrectMIC++; break; case NONE: break; case NOMIC: break; } return ERROR; } /*=========================================================================== * Description: Checks the Rx Seq number to make sure it is valid * and hasn't already been received * * Inputs: miccntx - mic context to check seq against * micSeq - the Mic seq number * * Returns: TRUE if valid otherwise FALSE. * * Author: sbraneky (10/15/01) * Merciless hacks by rwilcher (1/14/02) *--------------------------------------------------------------------------- */ static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq) { u32 seq,index; //Allow for the ap being rebooted - if it is then use the next //sequence number of the current sequence number - might go backwards if (mcast) { if (test_bit(FLAG_UPDATE_MULTI, &ai->flags)) { clear_bit (FLAG_UPDATE_MULTI, &ai->flags); context->window = (micSeq > 33) ? micSeq : 33; context->rx = 0; // Reset rx } } else if (test_bit(FLAG_UPDATE_UNI, &ai->flags)) { clear_bit (FLAG_UPDATE_UNI, &ai->flags); context->window = (micSeq > 33) ? micSeq : 33; // Move window context->rx = 0; // Reset rx } //Make sequence number relative to START of window seq = micSeq - (context->window - 33); //Too old of a SEQ number to check. if ((s32)seq < 0) return ERROR; if ( seq > 64 ) { //Window is infinite forward MoveWindow(context,micSeq); return SUCCESS; } // We are in the window. Now check the context rx bit to see if it was already sent seq >>= 1; //divide by 2 because we only have odd numbers index = 1 << seq; //Get an index number if (!(context->rx & index)) { //micSEQ falls inside the window. //Add seqence number to the list of received numbers. context->rx |= index; MoveWindow(context,micSeq); return SUCCESS; } return ERROR; } static void MoveWindow(miccntx *context, u32 micSeq) { u32 shift; //Move window if seq greater than the middle of the window if (micSeq > context->window) { shift = (micSeq - context->window) >> 1; //Shift out old if (shift < 32) context->rx >>= shift; else context->rx = 0; context->window = micSeq; //Move window } } /*==============================================*/ /*========== EMMH ROUTINES ====================*/ /*==============================================*/ /* mic accumulate */ #define MIC_ACCUM(val) \ context->accum += (u64)(val) * context->coeff[coeff_position++]; static unsigned char aes_counter[16]; /* expand the key to fill the MMH coefficient array */ static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_cipher *tfm) { /* take the keying material, expand if necessary, truncate at 16-bytes */ /* run through AES counter mode to generate context->coeff[] */ int i,j; u32 counter; u8 *cipher, plain[16]; crypto_cipher_setkey(tfm, pkey, 16); counter = 0; for (i = 0; i < ARRAY_SIZE(context->coeff); ) { aes_counter[15] = (u8)(counter >> 0); aes_counter[14] = (u8)(counter >> 8); aes_counter[13] = (u8)(counter >> 16); aes_counter[12] = (u8)(counter >> 24); counter++; memcpy (plain, aes_counter, 16); crypto_cipher_encrypt_one(tfm, plain, plain); cipher = plain; for (j = 0; (j < 16) && (i < ARRAY_SIZE(context->coeff)); ) { context->coeff[i++] = ntohl(*(__be32 *)&cipher[j]); j += 4; } } } /* prepare for calculation of a new mic */ static void emmh32_init(emmh32_context *context) { /* prepare for new mic calculation */ context->accum = 0; context->position = 0; } /* add some bytes to the mic calculation */ static void emmh32_update(emmh32_context *context, u8 *pOctets, int len) { int coeff_position, byte_position; if (len == 0) return; coeff_position = context->position >> 2; /* deal with partial 32-bit word left over from last update */ byte_position = context->position & 3; if (byte_position) { /* have a partial word in part to deal with */ do { if (len == 0) return; context->part.d8[byte_position++] = *pOctets++; context->position++; len--; } while (byte_position < 4); MIC_ACCUM(ntohl(context->part.d32)); } /* deal with full 32-bit words */ while (len >= 4) { MIC_ACCUM(ntohl(*(__be32 *)pOctets)); context->position += 4; pOctets += 4; len -= 4; } /* deal with partial 32-bit word that will be left over from this update */ byte_position = 0; while (len > 0) { context->part.d8[byte_position++] = *pOctets++; context->position++; len--; } } /* mask used to zero empty bytes for final partial word */ static u32 mask32[4] = { 0x00000000L, 0xFF000000L, 0xFFFF0000L, 0xFFFFFF00L }; /* calculate the mic */ static void emmh32_final(emmh32_context *context, u8 digest[4]) { int coeff_position, byte_position; u32 val; u64 sum, utmp; s64 stmp; coeff_position = context->position >> 2; /* deal with partial 32-bit word left over from last update */ byte_position = context->position & 3; if (byte_position) { /* have a partial word in part to deal with */ val = ntohl(context->part.d32); MIC_ACCUM(val & mask32[byte_position]); /* zero empty bytes */ } /* reduce the accumulated u64 to a 32-bit MIC */ sum = context->accum; stmp = (sum & 0xffffffffLL) - ((sum >> 32) * 15); utmp = (stmp & 0xffffffffLL) - ((stmp >> 32) * 15); sum = utmp & 0xffffffffLL; if (utmp > 0x10000000fLL) sum -= 15; val = (u32)sum; digest[0] = (val>>24) & 0xFF; digest[1] = (val>>16) & 0xFF; digest[2] = (val>>8) & 0xFF; digest[3] = val & 0xFF; } static int readBSSListRid(struct airo_info *ai, int first, BSSListRid *list) { Cmd cmd; Resp rsp; if (first == 1) { if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN; memset(&cmd, 0, sizeof(cmd)); cmd.cmd=CMD_LISTBSS; if (down_interruptible(&ai->sem)) return -ERESTARTSYS; ai->list_bss_task = current; issuecommand(ai, &cmd, &rsp); up(&ai->sem); /* Let the command take effect */ schedule_timeout_uninterruptible(3 * HZ); ai->list_bss_task = NULL; } return PC4500_readrid(ai, first ? ai->bssListFirst : ai->bssListNext, list, ai->bssListRidLen, 1); } static int readWepKeyRid(struct airo_info *ai, WepKeyRid *wkr, int temp, int lock) { return PC4500_readrid(ai, temp ? RID_WEP_TEMP : RID_WEP_PERM, wkr, sizeof(*wkr), lock); } static int writeWepKeyRid(struct airo_info *ai, WepKeyRid *wkr, int perm, int lock) { int rc; rc = PC4500_writerid(ai, RID_WEP_TEMP, wkr, sizeof(*wkr), lock); if (rc!=SUCCESS) airo_print_err(ai->dev->name, "WEP_TEMP set %x", rc); if (perm) { rc = PC4500_writerid(ai, RID_WEP_PERM, wkr, sizeof(*wkr), lock); if (rc!=SUCCESS) airo_print_err(ai->dev->name, "WEP_PERM set %x", rc); } return rc; } static int readSsidRid(struct airo_info*ai, SsidRid *ssidr) { return PC4500_readrid(ai, RID_SSID, ssidr, sizeof(*ssidr), 1); } static int writeSsidRid(struct airo_info*ai, SsidRid *pssidr, int lock) { return PC4500_writerid(ai, RID_SSID, pssidr, sizeof(*pssidr), lock); } static int readConfigRid(struct airo_info *ai, int lock) { int rc; ConfigRid cfg; if (ai->config.len) return SUCCESS; rc = PC4500_readrid(ai, RID_ACTUALCONFIG, &cfg, sizeof(cfg), lock); if (rc != SUCCESS) return rc; ai->config = cfg; return SUCCESS; } static inline void checkThrottle(struct airo_info *ai) { int i; /* Old hardware had a limit on encryption speed */ if (ai->config.authType != AUTH_OPEN && maxencrypt) { for(i=0; i<8; i++) { if (ai->config.rates[i] > maxencrypt) { ai->config.rates[i] = 0; } } } } static int writeConfigRid(struct airo_info *ai, int lock) { ConfigRid cfgr; if (!test_bit (FLAG_COMMIT, &ai->flags)) return SUCCESS; clear_bit (FLAG_COMMIT, &ai->flags); clear_bit (FLAG_RESET, &ai->flags); checkThrottle(ai); cfgr = ai->config; if ((cfgr.opmode & MODE_CFG_MASK) == MODE_STA_IBSS) set_bit(FLAG_ADHOC, &ai->flags); else clear_bit(FLAG_ADHOC, &ai->flags); return PC4500_writerid( ai, RID_CONFIG, &cfgr, sizeof(cfgr), lock); } static int readStatusRid(struct airo_info *ai, StatusRid *statr, int lock) { return PC4500_readrid(ai, RID_STATUS, statr, sizeof(*statr), lock); } static int readAPListRid(struct airo_info *ai, APListRid *aplr) { return PC4500_readrid(ai, RID_APLIST, aplr, sizeof(*aplr), 1); } static int writeAPListRid(struct airo_info *ai, APListRid *aplr, int lock) { return PC4500_writerid(ai, RID_APLIST, aplr, sizeof(*aplr), lock); } static int readCapabilityRid(struct airo_info *ai, CapabilityRid *capr, int lock) { return PC4500_readrid(ai, RID_CAPABILITIES, capr, sizeof(*capr), lock); } static int readStatsRid(struct airo_info*ai, StatsRid *sr, int rid, int lock) { return PC4500_readrid(ai, rid, sr, sizeof(*sr), lock); } static void try_auto_wep(struct airo_info *ai) { if (auto_wep && !test_bit(FLAG_RADIO_DOWN, &ai->flags)) { ai->expires = RUN_AT(3*HZ); wake_up_interruptible(&ai->thr_wait); } } static int airo_open(struct net_device *dev) { struct airo_info *ai = dev->ml_priv; int rc = 0; if (test_bit(FLAG_FLASHING, &ai->flags)) return -EIO; /* Make sure the card is configured. * Wireless Extensions may postpone config changes until the card * is open (to pipeline changes and speed-up card setup). If * those changes are not yet committed, do it now - Jean II */ if (test_bit(FLAG_COMMIT, &ai->flags)) { disable_MAC(ai, 1); writeConfigRid(ai, 1); } if (ai->wifidev != dev) { clear_bit(JOB_DIE, &ai->jobs); ai->airo_thread_task = kthread_run(airo_thread, dev, dev->name); if (IS_ERR(ai->airo_thread_task)) return (int)PTR_ERR(ai->airo_thread_task); rc = request_irq(dev->irq, airo_interrupt, IRQF_SHARED, dev->name, dev); if (rc) { airo_print_err(dev->name, "register interrupt %d failed, rc %d", dev->irq, rc); set_bit(JOB_DIE, &ai->jobs); kthread_stop(ai->airo_thread_task); return rc; } /* Power on the MAC controller (which may have been disabled) */ clear_bit(FLAG_RADIO_DOWN, &ai->flags); enable_interrupts(ai); try_auto_wep(ai); } enable_MAC(ai, 1); netif_start_queue(dev); return 0; } static netdev_tx_t mpi_start_xmit(struct sk_buff *skb, struct net_device *dev) { int npacks, pending; unsigned long flags; struct airo_info *ai = dev->ml_priv; if (!skb) { airo_print_err(dev->name, "%s: skb == NULL!",__func__); return NETDEV_TX_OK; } npacks = skb_queue_len (&ai->txq); if (npacks >= MAXTXQ - 1) { netif_stop_queue (dev); if (npacks > MAXTXQ) { dev->stats.tx_fifo_errors++; return NETDEV_TX_BUSY; } skb_queue_tail (&ai->txq, skb); return NETDEV_TX_OK; } spin_lock_irqsave(&ai->aux_lock, flags); skb_queue_tail (&ai->txq, skb); pending = test_bit(FLAG_PENDING_XMIT, &ai->flags); spin_unlock_irqrestore(&ai->aux_lock,flags); netif_wake_queue (dev); if (pending == 0) { set_bit(FLAG_PENDING_XMIT, &ai->flags); mpi_send_packet (dev); } return NETDEV_TX_OK; } /* * @mpi_send_packet * * Attempt to transmit a packet. Can be called from interrupt * or transmit . return number of packets we tried to send */ static int mpi_send_packet (struct net_device *dev) { struct sk_buff *skb; unsigned char *buffer; s16 len; __le16 *payloadLen; struct airo_info *ai = dev->ml_priv; u8 *sendbuf; /* get a packet to send */ if ((skb = skb_dequeue(&ai->txq)) == NULL) { airo_print_err(dev->name, "%s: Dequeue'd zero in send_packet()", __func__); return 0; } /* check min length*/ len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; buffer = skb->data; ai->txfids[0].tx_desc.offset = 0; ai->txfids[0].tx_desc.valid = 1; ai->txfids[0].tx_desc.eoc = 1; ai->txfids[0].tx_desc.len =len+sizeof(WifiHdr); /* * Magic, the cards firmware needs a length count (2 bytes) in the host buffer * right after TXFID_HDR.The TXFID_HDR contains the status short so payloadlen * is immediately after it. ------------------------------------------------ * |TXFIDHDR+STATUS|PAYLOADLEN|802.3HDR|PACKETDATA| * ------------------------------------------------ */ memcpy(ai->txfids[0].virtual_host_addr, (char *)&wifictlhdr8023, sizeof(wifictlhdr8023)); payloadLen = (__le16 *)(ai->txfids[0].virtual_host_addr + sizeof(wifictlhdr8023)); sendbuf = ai->txfids[0].virtual_host_addr + sizeof(wifictlhdr8023) + 2 ; /* * Firmware automatically puts 802 header on so * we don't need to account for it in the length */ if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled && (ntohs(((__be16 *)buffer)[6]) != 0x888E)) { MICBuffer pMic; if (encapsulate(ai, (etherHead *)buffer, &pMic, len - sizeof(etherHead)) != SUCCESS) return ERROR; *payloadLen = cpu_to_le16(len-sizeof(etherHead)+sizeof(pMic)); ai->txfids[0].tx_desc.len += sizeof(pMic); /* copy data into airo dma buffer */ memcpy (sendbuf, buffer, sizeof(etherHead)); buffer += sizeof(etherHead); sendbuf += sizeof(etherHead); memcpy (sendbuf, &pMic, sizeof(pMic)); sendbuf += sizeof(pMic); memcpy (sendbuf, buffer, len - sizeof(etherHead)); } else { *payloadLen = cpu_to_le16(len - sizeof(etherHead)); dev->trans_start = jiffies; /* copy data into airo dma buffer */ memcpy(sendbuf, buffer, len); } memcpy_toio(ai->txfids[0].card_ram_off, &ai->txfids[0].tx_desc, sizeof(TxFid)); OUT4500(ai, EVACK, 8); dev_kfree_skb_any(skb); return 1; } static void get_tx_error(struct airo_info *ai, s32 fid) { __le16 status; if (fid < 0) status = ((WifiCtlHdr *)ai->txfids[0].virtual_host_addr)->ctlhdr.status; else { if (bap_setup(ai, ai->fids[fid] & 0xffff, 4, BAP0) != SUCCESS) return; bap_read(ai, &status, 2, BAP0); } if (le16_to_cpu(status) & 2) /* Too many retries */ ai->dev->stats.tx_aborted_errors++; if (le16_to_cpu(status) & 4) /* Transmit lifetime exceeded */ ai->dev->stats.tx_heartbeat_errors++; if (le16_to_cpu(status) & 8) /* Aid fail */ { } if (le16_to_cpu(status) & 0x10) /* MAC disabled */ ai->dev->stats.tx_carrier_errors++; if (le16_to_cpu(status) & 0x20) /* Association lost */ { } /* We produce a TXDROP event only for retry or lifetime * exceeded, because that's the only status that really mean * that this particular node went away. * Other errors means that *we* screwed up. - Jean II */ if ((le16_to_cpu(status) & 2) || (le16_to_cpu(status) & 4)) { union iwreq_data wrqu; char junk[0x18]; /* Faster to skip over useless data than to do * another bap_setup(). We are at offset 0x6 and * need to go to 0x18 and read 6 bytes - Jean II */ bap_read(ai, (__le16 *) junk, 0x18, BAP0); /* Copy 802.11 dest address. * We use the 802.11 header because the frame may * not be 802.3 or may be mangled... * In Ad-Hoc mode, it will be the node address. * In managed mode, it will be most likely the AP addr * User space will figure out how to convert it to * whatever it needs (IP address or else). * - Jean II */ memcpy(wrqu.addr.sa_data, junk + 0x12, ETH_ALEN); wrqu.addr.sa_family = ARPHRD_ETHER; /* Send event to user space */ wireless_send_event(ai->dev, IWEVTXDROP, &wrqu, NULL); } } static void airo_end_xmit(struct net_device *dev) { u16 status; int i; struct airo_info *priv = dev->ml_priv; struct sk_buff *skb = priv->xmit.skb; int fid = priv->xmit.fid; u32 *fids = priv->fids; clear_bit(JOB_XMIT, &priv->jobs); clear_bit(FLAG_PENDING_XMIT, &priv->flags); status = transmit_802_3_packet (priv, fids[fid], skb->data); up(&priv->sem); i = 0; if ( status == SUCCESS ) { dev->trans_start = jiffies; for (; i < MAX_FIDS / 2 && (priv->fids[i] & 0xffff0000); i++); } else { priv->fids[fid] &= 0xffff; dev->stats.tx_window_errors++; } if (i < MAX_FIDS / 2) netif_wake_queue(dev); dev_kfree_skb(skb); } static netdev_tx_t airo_start_xmit(struct sk_buff *skb, struct net_device *dev) { s16 len; int i, j; struct airo_info *priv = dev->ml_priv; u32 *fids = priv->fids; if ( skb == NULL ) { airo_print_err(dev->name, "%s: skb == NULL!", __func__); return NETDEV_TX_OK; } /* Find a vacant FID */ for( i = 0; i < MAX_FIDS / 2 && (fids[i] & 0xffff0000); i++ ); for( j = i + 1; j < MAX_FIDS / 2 && (fids[j] & 0xffff0000); j++ ); if ( j >= MAX_FIDS / 2 ) { netif_stop_queue(dev); if (i == MAX_FIDS / 2) { dev->stats.tx_fifo_errors++; return NETDEV_TX_BUSY; } } /* check min length*/ len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; /* Mark fid as used & save length for later */ fids[i] |= (len << 16); priv->xmit.skb = skb; priv->xmit.fid = i; if (down_trylock(&priv->sem) != 0) { set_bit(FLAG_PENDING_XMIT, &priv->flags); netif_stop_queue(dev); set_bit(JOB_XMIT, &priv->jobs); wake_up_interruptible(&priv->thr_wait); } else airo_end_xmit(dev); return NETDEV_TX_OK; } static void airo_end_xmit11(struct net_device *dev) { u16 status; int i; struct airo_info *priv = dev->ml_priv; struct sk_buff *skb = priv->xmit11.skb; int fid = priv->xmit11.fid; u32 *fids = priv->fids; clear_bit(JOB_XMIT11, &priv->jobs); clear_bit(FLAG_PENDING_XMIT11, &priv->flags); status = transmit_802_11_packet (priv, fids[fid], skb->data); up(&priv->sem); i = MAX_FIDS / 2; if ( status == SUCCESS ) { dev->trans_start = jiffies; for (; i < MAX_FIDS && (priv->fids[i] & 0xffff0000); i++); } else { priv->fids[fid] &= 0xffff; dev->stats.tx_window_errors++; } if (i < MAX_FIDS) netif_wake_queue(dev); dev_kfree_skb(skb); } static netdev_tx_t airo_start_xmit11(struct sk_buff *skb, struct net_device *dev) { s16 len; int i, j; struct airo_info *priv = dev->ml_priv; u32 *fids = priv->fids; if (test_bit(FLAG_MPI, &priv->flags)) { /* Not implemented yet for MPI350 */ netif_stop_queue(dev); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if ( skb == NULL ) { airo_print_err(dev->name, "%s: skb == NULL!", __func__); return NETDEV_TX_OK; } /* Find a vacant FID */ for( i = MAX_FIDS / 2; i < MAX_FIDS && (fids[i] & 0xffff0000); i++ ); for( j = i + 1; j < MAX_FIDS && (fids[j] & 0xffff0000); j++ ); if ( j >= MAX_FIDS ) { netif_stop_queue(dev); if (i == MAX_FIDS) { dev->stats.tx_fifo_errors++; return NETDEV_TX_BUSY; } } /* check min length*/ len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; /* Mark fid as used & save length for later */ fids[i] |= (len << 16); priv->xmit11.skb = skb; priv->xmit11.fid = i; if (down_trylock(&priv->sem) != 0) { set_bit(FLAG_PENDING_XMIT11, &priv->flags); netif_stop_queue(dev); set_bit(JOB_XMIT11, &priv->jobs); wake_up_interruptible(&priv->thr_wait); } else airo_end_xmit11(dev); return NETDEV_TX_OK; } static void airo_read_stats(struct net_device *dev) { struct airo_info *ai = dev->ml_priv; StatsRid stats_rid; __le32 *vals = stats_rid.vals; clear_bit(JOB_STATS, &ai->jobs); if (ai->power.event) { up(&ai->sem); return; } readStatsRid(ai, &stats_rid, RID_STATS, 0); up(&ai->sem); dev->stats.rx_packets = le32_to_cpu(vals[43]) + le32_to_cpu(vals[44]) + le32_to_cpu(vals[45]); dev->stats.tx_packets = le32_to_cpu(vals[39]) + le32_to_cpu(vals[40]) + le32_to_cpu(vals[41]); dev->stats.rx_bytes = le32_to_cpu(vals[92]); dev->stats.tx_bytes = le32_to_cpu(vals[91]); dev->stats.rx_errors = le32_to_cpu(vals[0]) + le32_to_cpu(vals[2]) + le32_to_cpu(vals[3]) + le32_to_cpu(vals[4]); dev->stats.tx_errors = le32_to_cpu(vals[42]) + dev->stats.tx_fifo_errors; dev->stats.multicast = le32_to_cpu(vals[43]); dev->stats.collisions = le32_to_cpu(vals[89]); /* detailed rx_errors: */ dev->stats.rx_length_errors = le32_to_cpu(vals[3]); dev->stats.rx_crc_errors = le32_to_cpu(vals[4]); dev->stats.rx_frame_errors = le32_to_cpu(vals[2]); dev->stats.rx_fifo_errors = le32_to_cpu(vals[0]); } static struct net_device_stats *airo_get_stats(struct net_device *dev) { struct airo_info *local = dev->ml_priv; if (!test_bit(JOB_STATS, &local->jobs)) { /* Get stats out of the card if available */ if (down_trylock(&local->sem) != 0) { set_bit(JOB_STATS, &local->jobs); wake_up_interruptible(&local->thr_wait); } else airo_read_stats(dev); } return &dev->stats; } static void airo_set_promisc(struct airo_info *ai) { Cmd cmd; Resp rsp; memset(&cmd, 0, sizeof(cmd)); cmd.cmd=CMD_SETMODE; clear_bit(JOB_PROMISC, &ai->jobs); cmd.parm0=(ai->flags&IFF_PROMISC) ? PROMISC : NOPROMISC; issuecommand(ai, &cmd, &rsp); up(&ai->sem); } static void airo_set_multicast_list(struct net_device *dev) { struct airo_info *ai = dev->ml_priv; if ((dev->flags ^ ai->flags) & IFF_PROMISC) { change_bit(FLAG_PROMISC, &ai->flags); if (down_trylock(&ai->sem) != 0) { set_bit(JOB_PROMISC, &ai->jobs); wake_up_interruptible(&ai->thr_wait); } else airo_set_promisc(ai); } if ((dev->flags&IFF_ALLMULTI) || !netdev_mc_empty(dev)) { /* Turn on multicast. (Should be already setup...) */ } } static int airo_set_mac_address(struct net_device *dev, void *p) { struct airo_info *ai = dev->ml_priv; struct sockaddr *addr = p; readConfigRid(ai, 1); memcpy (ai->config.macAddr, addr->sa_data, dev->addr_len); set_bit (FLAG_COMMIT, &ai->flags); disable_MAC(ai, 1); writeConfigRid (ai, 1); enable_MAC(ai, 1); memcpy (ai->dev->dev_addr, addr->sa_data, dev->addr_len); if (ai->wifidev) memcpy (ai->wifidev->dev_addr, addr->sa_data, dev->addr_len); return 0; } static int airo_change_mtu(struct net_device *dev, int new_mtu) { if ((new_mtu < 68) || (new_mtu > 2400)) return -EINVAL; dev->mtu = new_mtu; return 0; } static LIST_HEAD(airo_devices); static void add_airo_dev(struct airo_info *ai) { /* Upper layers already keep track of PCI devices, * so we only need to remember our non-PCI cards. */ if (!ai->pci) list_add_tail(&ai->dev_list, &airo_devices); } static void del_airo_dev(struct airo_info *ai) { if (!ai->pci) list_del(&ai->dev_list); } static int airo_close(struct net_device *dev) { struct airo_info *ai = dev->ml_priv; netif_stop_queue(dev); if (ai->wifidev != dev) { #ifdef POWER_ON_DOWN /* Shut power to the card. The idea is that the user can save * power when he doesn't need the card with "ifconfig down". * That's the method that is most friendly towards the network * stack (i.e. the network stack won't try to broadcast * anything on the interface and routes are gone. Jean II */ set_bit(FLAG_RADIO_DOWN, &ai->flags); disable_MAC(ai, 1); #endif disable_interrupts( ai ); free_irq(dev->irq, dev); set_bit(JOB_DIE, &ai->jobs); kthread_stop(ai->airo_thread_task); } return 0; } void stop_airo_card( struct net_device *dev, int freeres ) { struct airo_info *ai = dev->ml_priv; set_bit(FLAG_RADIO_DOWN, &ai->flags); disable_MAC(ai, 1); disable_interrupts(ai); takedown_proc_entry( dev, ai ); if (test_bit(FLAG_REGISTERED, &ai->flags)) { unregister_netdev( dev ); if (ai->wifidev) { unregister_netdev(ai->wifidev); free_netdev(ai->wifidev); ai->wifidev = NULL; } clear_bit(FLAG_REGISTERED, &ai->flags); } /* * Clean out tx queue */ if (test_bit(FLAG_MPI, &ai->flags) && !skb_queue_empty(&ai->txq)) { struct sk_buff *skb = NULL; for (;(skb = skb_dequeue(&ai->txq));) dev_kfree_skb(skb); } airo_networks_free (ai); kfree(ai->flash); kfree(ai->rssi); kfree(ai->APList); kfree(ai->SSID); if (freeres) { /* PCMCIA frees this stuff, so only for PCI and ISA */ release_region( dev->base_addr, 64 ); if (test_bit(FLAG_MPI, &ai->flags)) { if (ai->pci) mpi_unmap_card(ai->pci); if (ai->pcimem) iounmap(ai->pcimem); if (ai->pciaux) iounmap(ai->pciaux); pci_free_consistent(ai->pci, PCI_SHARED_LEN, ai->shared, ai->shared_dma); } } crypto_free_cipher(ai->tfm); del_airo_dev(ai); free_netdev( dev ); } EXPORT_SYMBOL(stop_airo_card); static int wll_header_parse(const struct sk_buff *skb, unsigned char *haddr) { memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); return ETH_ALEN; } static void mpi_unmap_card(struct pci_dev *pci) { unsigned long mem_start = pci_resource_start(pci, 1); unsigned long mem_len = pci_resource_len(pci, 1); unsigned long aux_start = pci_resource_start(pci, 2); unsigned long aux_len = AUXMEMSIZE; release_mem_region(aux_start, aux_len); release_mem_region(mem_start, mem_len); } /************************************************************* * This routine assumes that descriptors have been setup . * Run at insmod time or after reset when the decriptors * have been initialized . Returns 0 if all is well nz * otherwise . Does not allocate memory but sets up card * using previously allocated descriptors. */ static int mpi_init_descriptors (struct airo_info *ai) { Cmd cmd; Resp rsp; int i; int rc = SUCCESS; /* Alloc card RX descriptors */ netif_stop_queue(ai->dev); memset(&rsp,0,sizeof(rsp)); memset(&cmd,0,sizeof(cmd)); cmd.cmd = CMD_ALLOCATEAUX; cmd.parm0 = FID_RX; cmd.parm1 = (ai->rxfids[0].card_ram_off - ai->pciaux); cmd.parm2 = MPI_MAX_FIDS; rc=issuecommand(ai, &cmd, &rsp); if (rc != SUCCESS) { airo_print_err(ai->dev->name, "Couldn't allocate RX FID"); return rc; } for (i=0; i<MPI_MAX_FIDS; i++) { memcpy_toio(ai->rxfids[i].card_ram_off, &ai->rxfids[i].rx_desc, sizeof(RxFid)); } /* Alloc card TX descriptors */ memset(&rsp,0,sizeof(rsp)); memset(&cmd,0,sizeof(cmd)); cmd.cmd = CMD_ALLOCATEAUX; cmd.parm0 = FID_TX; cmd.parm1 = (ai->txfids[0].card_ram_off - ai->pciaux); cmd.parm2 = MPI_MAX_FIDS; for (i=0; i<MPI_MAX_FIDS; i++) { ai->txfids[i].tx_desc.valid = 1; memcpy_toio(ai->txfids[i].card_ram_off, &ai->txfids[i].tx_desc, sizeof(TxFid)); } ai->txfids[i-1].tx_desc.eoc = 1; /* Last descriptor has EOC set */ rc=issuecommand(ai, &cmd, &rsp); if (rc != SUCCESS) { airo_print_err(ai->dev->name, "Couldn't allocate TX FID"); return rc; } /* Alloc card Rid descriptor */ memset(&rsp,0,sizeof(rsp)); memset(&cmd,0,sizeof(cmd)); cmd.cmd = CMD_ALLOCATEAUX; cmd.parm0 = RID_RW; cmd.parm1 = (ai->config_desc.card_ram_off - ai->pciaux); cmd.parm2 = 1; /* Magic number... */ rc=issuecommand(ai, &cmd, &rsp); if (rc != SUCCESS) { airo_print_err(ai->dev->name, "Couldn't allocate RID"); return rc; } memcpy_toio(ai->config_desc.card_ram_off, &ai->config_desc.rid_desc, sizeof(Rid)); return rc; } /* * We are setting up three things here: * 1) Map AUX memory for descriptors: Rid, TxFid, or RxFid. * 2) Map PCI memory for issuing commands. * 3) Allocate memory (shared) to send and receive ethernet frames. */ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci) { unsigned long mem_start, mem_len, aux_start, aux_len; int rc = -1; int i; dma_addr_t busaddroff; unsigned char *vpackoff; unsigned char __iomem *pciaddroff; mem_start = pci_resource_start(pci, 1); mem_len = pci_resource_len(pci, 1); aux_start = pci_resource_start(pci, 2); aux_len = AUXMEMSIZE; if (!request_mem_region(mem_start, mem_len, DRV_NAME)) { airo_print_err("", "Couldn't get region %x[%x]", (int)mem_start, (int)mem_len); goto out; } if (!request_mem_region(aux_start, aux_len, DRV_NAME)) { airo_print_err("", "Couldn't get region %x[%x]", (int)aux_start, (int)aux_len); goto free_region1; } ai->pcimem = ioremap(mem_start, mem_len); if (!ai->pcimem) { airo_print_err("", "Couldn't map region %x[%x]", (int)mem_start, (int)mem_len); goto free_region2; } ai->pciaux = ioremap(aux_start, aux_len); if (!ai->pciaux) { airo_print_err("", "Couldn't map region %x[%x]", (int)aux_start, (int)aux_len); goto free_memmap; } /* Reserve PKTSIZE for each fid and 2K for the Rids */ ai->shared = pci_alloc_consistent(pci, PCI_SHARED_LEN, &ai->shared_dma); if (!ai->shared) { airo_print_err("", "Couldn't alloc_consistent %d", PCI_SHARED_LEN); goto free_auxmap; } /* * Setup descriptor RX, TX, CONFIG */ busaddroff = ai->shared_dma; pciaddroff = ai->pciaux + AUX_OFFSET; vpackoff = ai->shared; /* RX descriptor setup */ for(i = 0; i < MPI_MAX_FIDS; i++) { ai->rxfids[i].pending = 0; ai->rxfids[i].card_ram_off = pciaddroff; ai->rxfids[i].virtual_host_addr = vpackoff; ai->rxfids[i].rx_desc.host_addr = busaddroff; ai->rxfids[i].rx_desc.valid = 1; ai->rxfids[i].rx_desc.len = PKTSIZE; ai->rxfids[i].rx_desc.rdy = 0; pciaddroff += sizeof(RxFid); busaddroff += PKTSIZE; vpackoff += PKTSIZE; } /* TX descriptor setup */ for(i = 0; i < MPI_MAX_FIDS; i++) { ai->txfids[i].card_ram_off = pciaddroff; ai->txfids[i].virtual_host_addr = vpackoff; ai->txfids[i].tx_desc.valid = 1; ai->txfids[i].tx_desc.host_addr = busaddroff; memcpy(ai->txfids[i].virtual_host_addr, &wifictlhdr8023, sizeof(wifictlhdr8023)); pciaddroff += sizeof(TxFid); busaddroff += PKTSIZE; vpackoff += PKTSIZE; } ai->txfids[i-1].tx_desc.eoc = 1; /* Last descriptor has EOC set */ /* Rid descriptor setup */ ai->config_desc.card_ram_off = pciaddroff; ai->config_desc.virtual_host_addr = vpackoff; ai->config_desc.rid_desc.host_addr = busaddroff; ai->ridbus = busaddroff; ai->config_desc.rid_desc.rid = 0; ai->config_desc.rid_desc.len = RIDSIZE; ai->config_desc.rid_desc.valid = 1; pciaddroff += sizeof(Rid); busaddroff += RIDSIZE; vpackoff += RIDSIZE; /* Tell card about descriptors */ if (mpi_init_descriptors (ai) != SUCCESS) goto free_shared; return 0; free_shared: pci_free_consistent(pci, PCI_SHARED_LEN, ai->shared, ai->shared_dma); free_auxmap: iounmap(ai->pciaux); free_memmap: iounmap(ai->pcimem); free_region2: release_mem_region(aux_start, aux_len); free_region1: release_mem_region(mem_start, mem_len); out: return rc; } static const struct header_ops airo_header_ops = { .parse = wll_header_parse, }; static const struct net_device_ops airo11_netdev_ops = { .ndo_open = airo_open, .ndo_stop = airo_close, .ndo_start_xmit = airo_start_xmit11, .ndo_get_stats = airo_get_stats, .ndo_set_mac_address = airo_set_mac_address, .ndo_do_ioctl = airo_ioctl, .ndo_change_mtu = airo_change_mtu, }; static void wifi_setup(struct net_device *dev) { dev->netdev_ops = &airo11_netdev_ops; dev->header_ops = &airo_header_ops; dev->wireless_handlers = &airo_handler_def; dev->type = ARPHRD_IEEE80211; dev->hard_header_len = ETH_HLEN; dev->mtu = AIRO_DEF_MTU; dev->addr_len = ETH_ALEN; dev->tx_queue_len = 100; memset(dev->broadcast,0xFF, ETH_ALEN); dev->flags = IFF_BROADCAST|IFF_MULTICAST; } static struct net_device *init_wifidev(struct airo_info *ai, struct net_device *ethdev) { int err; struct net_device *dev = alloc_netdev(0, "wifi%d", wifi_setup); if (!dev) return NULL; dev->ml_priv = ethdev->ml_priv; dev->irq = ethdev->irq; dev->base_addr = ethdev->base_addr; dev->wireless_data = ethdev->wireless_data; SET_NETDEV_DEV(dev, ethdev->dev.parent); memcpy(dev->dev_addr, ethdev->dev_addr, dev->addr_len); err = register_netdev(dev); if (err<0) { free_netdev(dev); return NULL; } return dev; } static int reset_card( struct net_device *dev , int lock) { struct airo_info *ai = dev->ml_priv; if (lock && down_interruptible(&ai->sem)) return -1; waitbusy (ai); OUT4500(ai,COMMAND,CMD_SOFTRESET); msleep(200); waitbusy (ai); msleep(200); if (lock) up(&ai->sem); return 0; } #define AIRO_MAX_NETWORK_COUNT 64 static int airo_networks_allocate(struct airo_info *ai) { if (ai->networks) return 0; ai->networks = kcalloc(AIRO_MAX_NETWORK_COUNT, sizeof(BSSListElement), GFP_KERNEL); if (!ai->networks) { airo_print_warn("", "Out of memory allocating beacons"); return -ENOMEM; } return 0; } static void airo_networks_free(struct airo_info *ai) { kfree(ai->networks); ai->networks = NULL; } static void airo_networks_initialize(struct airo_info *ai) { int i; INIT_LIST_HEAD(&ai->network_free_list); INIT_LIST_HEAD(&ai->network_list); for (i = 0; i < AIRO_MAX_NETWORK_COUNT; i++) list_add_tail(&ai->networks[i].list, &ai->network_free_list); } static const struct net_device_ops airo_netdev_ops = { .ndo_open = airo_open, .ndo_stop = airo_close, .ndo_start_xmit = airo_start_xmit, .ndo_get_stats = airo_get_stats, .ndo_set_rx_mode = airo_set_multicast_list, .ndo_set_mac_address = airo_set_mac_address, .ndo_do_ioctl = airo_ioctl, .ndo_change_mtu = airo_change_mtu, .ndo_validate_addr = eth_validate_addr, }; static const struct net_device_ops mpi_netdev_ops = { .ndo_open = airo_open, .ndo_stop = airo_close, .ndo_start_xmit = mpi_start_xmit, .ndo_get_stats = airo_get_stats, .ndo_set_rx_mode = airo_set_multicast_list, .ndo_set_mac_address = airo_set_mac_address, .ndo_do_ioctl = airo_ioctl, .ndo_change_mtu = airo_change_mtu, .ndo_validate_addr = eth_validate_addr, }; static struct net_device *_init_airo_card( unsigned short irq, int port, int is_pcmcia, struct pci_dev *pci, struct device *dmdev ) { struct net_device *dev; struct airo_info *ai; int i, rc; CapabilityRid cap_rid; /* Create the network device object. */ dev = alloc_netdev(sizeof(*ai), "", ether_setup); if (!dev) { airo_print_err("", "Couldn't alloc_etherdev"); return NULL; } ai = dev->ml_priv = netdev_priv(dev); ai->wifidev = NULL; ai->flags = 1 << FLAG_RADIO_DOWN; ai->jobs = 0; ai->dev = dev; if (pci && (pci->device == 0x5000 || pci->device == 0xa504)) { airo_print_dbg("", "Found an MPI350 card"); set_bit(FLAG_MPI, &ai->flags); } spin_lock_init(&ai->aux_lock); sema_init(&ai->sem, 1); ai->config.len = 0; ai->pci = pci; init_waitqueue_head (&ai->thr_wait); ai->tfm = NULL; add_airo_dev(ai); if (airo_networks_allocate (ai)) goto err_out_free; airo_networks_initialize (ai); skb_queue_head_init (&ai->txq); /* The Airo-specific entries in the device structure. */ if (test_bit(FLAG_MPI,&ai->flags)) dev->netdev_ops = &mpi_netdev_ops; else dev->netdev_ops = &airo_netdev_ops; dev->wireless_handlers = &airo_handler_def; ai->wireless_data.spy_data = &ai->spy_data; dev->wireless_data = &ai->wireless_data; dev->irq = irq; dev->base_addr = port; dev->priv_flags &= ~IFF_TX_SKB_SHARING; SET_NETDEV_DEV(dev, dmdev); reset_card (dev, 1); msleep(400); if (!is_pcmcia) { if (!request_region(dev->base_addr, 64, DRV_NAME)) { rc = -EBUSY; airo_print_err(dev->name, "Couldn't request region"); goto err_out_nets; } } if (test_bit(FLAG_MPI,&ai->flags)) { if (mpi_map_card(ai, pci)) { airo_print_err("", "Could not map memory"); goto err_out_res; } } if (probe) { if (setup_card(ai, dev->dev_addr, 1) != SUCCESS) { airo_print_err(dev->name, "MAC could not be enabled" ); rc = -EIO; goto err_out_map; } } else if (!test_bit(FLAG_MPI,&ai->flags)) { ai->bap_read = fast_bap_read; set_bit(FLAG_FLASHING, &ai->flags); } strcpy(dev->name, "eth%d"); rc = register_netdev(dev); if (rc) { airo_print_err(dev->name, "Couldn't register_netdev"); goto err_out_map; } ai->wifidev = init_wifidev(ai, dev); if (!ai->wifidev) goto err_out_reg; rc = readCapabilityRid(ai, &cap_rid, 1); if (rc != SUCCESS) { rc = -EIO; goto err_out_wifi; } /* WEP capability discovery */ ai->wep_capable = (cap_rid.softCap & cpu_to_le16(0x02)) ? 1 : 0; ai->max_wep_idx = (cap_rid.softCap & cpu_to_le16(0x80)) ? 3 : 0; airo_print_info(dev->name, "Firmware version %x.%x.%02d", ((le16_to_cpu(cap_rid.softVer) >> 8) & 0xF), (le16_to_cpu(cap_rid.softVer) & 0xFF), le16_to_cpu(cap_rid.softSubVer)); /* Test for WPA support */ /* Only firmware versions 5.30.17 or better can do WPA */ if (le16_to_cpu(cap_rid.softVer) > 0x530 || (le16_to_cpu(cap_rid.softVer) == 0x530 && le16_to_cpu(cap_rid.softSubVer) >= 17)) { airo_print_info(ai->dev->name, "WPA supported."); set_bit(FLAG_WPA_CAPABLE, &ai->flags); ai->bssListFirst = RID_WPA_BSSLISTFIRST; ai->bssListNext = RID_WPA_BSSLISTNEXT; ai->bssListRidLen = sizeof(BSSListRid); } else { airo_print_info(ai->dev->name, "WPA unsupported with firmware " "versions older than 5.30.17."); ai->bssListFirst = RID_BSSLISTFIRST; ai->bssListNext = RID_BSSLISTNEXT; ai->bssListRidLen = sizeof(BSSListRid) - sizeof(BSSListRidExtra); } set_bit(FLAG_REGISTERED,&ai->flags); airo_print_info(dev->name, "MAC enabled %pM", dev->dev_addr); /* Allocate the transmit buffers */ if (probe && !test_bit(FLAG_MPI,&ai->flags)) for( i = 0; i < MAX_FIDS; i++ ) ai->fids[i] = transmit_allocate(ai,AIRO_DEF_MTU,i>=MAX_FIDS/2); if (setup_proc_entry(dev, dev->ml_priv) < 0) goto err_out_wifi; return dev; err_out_wifi: unregister_netdev(ai->wifidev); free_netdev(ai->wifidev); err_out_reg: unregister_netdev(dev); err_out_map: if (test_bit(FLAG_MPI,&ai->flags) && pci) { pci_free_consistent(pci, PCI_SHARED_LEN, ai->shared, ai->shared_dma); iounmap(ai->pciaux); iounmap(ai->pcimem); mpi_unmap_card(ai->pci); } err_out_res: if (!is_pcmcia) release_region( dev->base_addr, 64 ); err_out_nets: airo_networks_free(ai); err_out_free: del_airo_dev(ai); free_netdev(dev); return NULL; } struct net_device *init_airo_card( unsigned short irq, int port, int is_pcmcia, struct device *dmdev) { return _init_airo_card ( irq, port, is_pcmcia, NULL, dmdev); } EXPORT_SYMBOL(init_airo_card); static int waitbusy (struct airo_info *ai) { int delay = 0; while ((IN4500(ai, COMMAND) & COMMAND_BUSY) && (delay < 10000)) { udelay (10); if ((++delay % 20) == 0) OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY); } return delay < 10000; } int reset_airo_card( struct net_device *dev ) { int i; struct airo_info *ai = dev->ml_priv; if (reset_card (dev, 1)) return -1; if ( setup_card(ai, dev->dev_addr, 1 ) != SUCCESS ) { airo_print_err(dev->name, "MAC could not be enabled"); return -1; } airo_print_info(dev->name, "MAC enabled %pM", dev->dev_addr); /* Allocate the transmit buffers if needed */ if (!test_bit(FLAG_MPI,&ai->flags)) for( i = 0; i < MAX_FIDS; i++ ) ai->fids[i] = transmit_allocate (ai,AIRO_DEF_MTU,i>=MAX_FIDS/2); enable_interrupts( ai ); netif_wake_queue(dev); return 0; } EXPORT_SYMBOL(reset_airo_card); static void airo_send_event(struct net_device *dev) { struct airo_info *ai = dev->ml_priv; union iwreq_data wrqu; StatusRid status_rid; clear_bit(JOB_EVENT, &ai->jobs); PC4500_readrid(ai, RID_STATUS, &status_rid, sizeof(status_rid), 0); up(&ai->sem); wrqu.data.length = 0; wrqu.data.flags = 0; memcpy(wrqu.ap_addr.sa_data, status_rid.bssid[0], ETH_ALEN); wrqu.ap_addr.sa_family = ARPHRD_ETHER; /* Send event to user space */ wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); } static void airo_process_scan_results (struct airo_info *ai) { union iwreq_data wrqu; BSSListRid bss; int rc; BSSListElement * loop_net; BSSListElement * tmp_net; /* Blow away current list of scan results */ list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) { list_move_tail (&loop_net->list, &ai->network_free_list); /* Don't blow away ->list, just BSS data */ memset (loop_net, 0, sizeof (loop_net->bss)); } /* Try to read the first entry of the scan result */ rc = PC4500_readrid(ai, ai->bssListFirst, &bss, ai->bssListRidLen, 0); if((rc) || (bss.index == cpu_to_le16(0xffff))) { /* No scan results */ goto out; } /* Read and parse all entries */ tmp_net = NULL; while((!rc) && (bss.index != cpu_to_le16(0xffff))) { /* Grab a network off the free list */ if (!list_empty(&ai->network_free_list)) { tmp_net = list_entry(ai->network_free_list.next, BSSListElement, list); list_del(ai->network_free_list.next); } if (tmp_net != NULL) { memcpy(tmp_net, &bss, sizeof(tmp_net->bss)); list_add_tail(&tmp_net->list, &ai->network_list); tmp_net = NULL; } /* Read next entry */ rc = PC4500_readrid(ai, ai->bssListNext, &bss, ai->bssListRidLen, 0); } out: ai->scan_timeout = 0; clear_bit(JOB_SCAN_RESULTS, &ai->jobs); up(&ai->sem); /* Send an empty event to user space. * We don't send the received data on * the event because it would require * us to do complex transcoding, and * we want to minimise the work done in * the irq handler. Use a request to * extract the data - Jean II */ wrqu.data.length = 0; wrqu.data.flags = 0; wireless_send_event(ai->dev, SIOCGIWSCAN, &wrqu, NULL); } static int airo_thread(void *data) { struct net_device *dev = data; struct airo_info *ai = dev->ml_priv; int locked; set_freezable(); while(1) { /* make swsusp happy with our thread */ try_to_freeze(); if (test_bit(JOB_DIE, &ai->jobs)) break; if (ai->jobs) { locked = down_interruptible(&ai->sem); } else { wait_queue_t wait; init_waitqueue_entry(&wait, current); add_wait_queue(&ai->thr_wait, &wait); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (ai->jobs) break; if (ai->expires || ai->scan_timeout) { if (ai->scan_timeout && time_after_eq(jiffies,ai->scan_timeout)){ set_bit(JOB_SCAN_RESULTS, &ai->jobs); break; } else if (ai->expires && time_after_eq(jiffies,ai->expires)){ set_bit(JOB_AUTOWEP, &ai->jobs); break; } if (!kthread_should_stop() && !freezing(current)) { unsigned long wake_at; if (!ai->expires || !ai->scan_timeout) { wake_at = max(ai->expires, ai->scan_timeout); } else { wake_at = min(ai->expires, ai->scan_timeout); } schedule_timeout(wake_at - jiffies); continue; } } else if (!kthread_should_stop() && !freezing(current)) { schedule(); continue; } break; } current->state = TASK_RUNNING; remove_wait_queue(&ai->thr_wait, &wait); locked = 1; } if (locked) continue; if (test_bit(JOB_DIE, &ai->jobs)) { up(&ai->sem); break; } if (ai->power.event || test_bit(FLAG_FLASHING, &ai->flags)) { up(&ai->sem); continue; } if (test_bit(JOB_XMIT, &ai->jobs)) airo_end_xmit(dev); else if (test_bit(JOB_XMIT11, &ai->jobs)) airo_end_xmit11(dev); else if (test_bit(JOB_STATS, &ai->jobs)) airo_read_stats(dev); else if (test_bit(JOB_WSTATS, &ai->jobs)) airo_read_wireless_stats(ai); else if (test_bit(JOB_PROMISC, &ai->jobs)) airo_set_promisc(ai); else if (test_bit(JOB_MIC, &ai->jobs)) micinit(ai); else if (test_bit(JOB_EVENT, &ai->jobs)) airo_send_event(dev); else if (test_bit(JOB_AUTOWEP, &ai->jobs)) timer_func(dev); else if (test_bit(JOB_SCAN_RESULTS, &ai->jobs)) airo_process_scan_results(ai); else /* Shouldn't get here, but we make sure to unlock */ up(&ai->sem); } return 0; } static int header_len(__le16 ctl) { u16 fc = le16_to_cpu(ctl); switch (fc & 0xc) { case 4: if ((fc & 0xe0) == 0xc0) return 10; /* one-address control packet */ return 16; /* two-address control packet */ case 8: if ((fc & 0x300) == 0x300) return 30; /* WDS packet */ } return 24; } static void airo_handle_cisco_mic(struct airo_info *ai) { if (test_bit(FLAG_MIC_CAPABLE, &ai->flags)) { set_bit(JOB_MIC, &ai->jobs); wake_up_interruptible(&ai->thr_wait); } } /* Airo Status codes */ #define STAT_NOBEACON 0x8000 /* Loss of sync - missed beacons */ #define STAT_MAXRETRIES 0x8001 /* Loss of sync - max retries */ #define STAT_MAXARL 0x8002 /* Loss of sync - average retry level exceeded*/ #define STAT_FORCELOSS 0x8003 /* Loss of sync - host request */ #define STAT_TSFSYNC 0x8004 /* Loss of sync - TSF synchronization */ #define STAT_DEAUTH 0x8100 /* low byte is 802.11 reason code */ #define STAT_DISASSOC 0x8200 /* low byte is 802.11 reason code */ #define STAT_ASSOC_FAIL 0x8400 /* low byte is 802.11 reason code */ #define STAT_AUTH_FAIL 0x0300 /* low byte is 802.11 reason code */ #define STAT_ASSOC 0x0400 /* Associated */ #define STAT_REASSOC 0x0600 /* Reassociated? Only on firmware >= 5.30.17 */ static void airo_print_status(const char *devname, u16 status) { u8 reason = status & 0xFF; switch (status & 0xFF00) { case STAT_NOBEACON: switch (status) { case STAT_NOBEACON: airo_print_dbg(devname, "link lost (missed beacons)"); break; case STAT_MAXRETRIES: case STAT_MAXARL: airo_print_dbg(devname, "link lost (max retries)"); break; case STAT_FORCELOSS: airo_print_dbg(devname, "link lost (local choice)"); break; case STAT_TSFSYNC: airo_print_dbg(devname, "link lost (TSF sync lost)"); break; default: airo_print_dbg(devname, "unknow status %x\n", status); break; } break; case STAT_DEAUTH: airo_print_dbg(devname, "deauthenticated (reason: %d)", reason); break; case STAT_DISASSOC: airo_print_dbg(devname, "disassociated (reason: %d)", reason); break; case STAT_ASSOC_FAIL: airo_print_dbg(devname, "association failed (reason: %d)", reason); break; case STAT_AUTH_FAIL: airo_print_dbg(devname, "authentication failed (reason: %d)", reason); break; case STAT_ASSOC: case STAT_REASSOC: break; default: airo_print_dbg(devname, "unknow status %x\n", status); break; } } static void airo_handle_link(struct airo_info *ai) { union iwreq_data wrqu; int scan_forceloss = 0; u16 status; /* Get new status and acknowledge the link change */ status = le16_to_cpu(IN4500(ai, LINKSTAT)); OUT4500(ai, EVACK, EV_LINK); if ((status == STAT_FORCELOSS) && (ai->scan_timeout > 0)) scan_forceloss = 1; airo_print_status(ai->dev->name, status); if ((status == STAT_ASSOC) || (status == STAT_REASSOC)) { if (auto_wep) ai->expires = 0; if (ai->list_bss_task) wake_up_process(ai->list_bss_task); set_bit(FLAG_UPDATE_UNI, &ai->flags); set_bit(FLAG_UPDATE_MULTI, &ai->flags); if (down_trylock(&ai->sem) != 0) { set_bit(JOB_EVENT, &ai->jobs); wake_up_interruptible(&ai->thr_wait); } else airo_send_event(ai->dev); } else if (!scan_forceloss) { if (auto_wep && !ai->expires) { ai->expires = RUN_AT(3*HZ); wake_up_interruptible(&ai->thr_wait); } /* Send event to user space */ memset(wrqu.ap_addr.sa_data, '\0', ETH_ALEN); wrqu.ap_addr.sa_family = ARPHRD_ETHER; wireless_send_event(ai->dev, SIOCGIWAP, &wrqu, NULL); } } static void airo_handle_rx(struct airo_info *ai) { struct sk_buff *skb = NULL; __le16 fc, v, *buffer, tmpbuf[4]; u16 len, hdrlen = 0, gap, fid; struct rx_hdr hdr; int success = 0; if (test_bit(FLAG_MPI, &ai->flags)) { if (test_bit(FLAG_802_11, &ai->flags)) mpi_receive_802_11(ai); else mpi_receive_802_3(ai); OUT4500(ai, EVACK, EV_RX); return; } fid = IN4500(ai, RXFID); /* Get the packet length */ if (test_bit(FLAG_802_11, &ai->flags)) { bap_setup (ai, fid, 4, BAP0); bap_read (ai, (__le16*)&hdr, sizeof(hdr), BAP0); /* Bad CRC. Ignore packet */ if (le16_to_cpu(hdr.status) & 2) hdr.len = 0; if (ai->wifidev == NULL) hdr.len = 0; } else { bap_setup(ai, fid, 0x36, BAP0); bap_read(ai, &hdr.len, 2, BAP0); } len = le16_to_cpu(hdr.len); if (len > AIRO_DEF_MTU) { airo_print_err(ai->dev->name, "Bad size %d", len); goto done; } if (len == 0) goto done; if (test_bit(FLAG_802_11, &ai->flags)) { bap_read(ai, &fc, sizeof (fc), BAP0); hdrlen = header_len(fc); } else hdrlen = ETH_ALEN * 2; skb = dev_alloc_skb(len + hdrlen + 2 + 2); if (!skb) { ai->dev->stats.rx_dropped++; goto done; } skb_reserve(skb, 2); /* This way the IP header is aligned */ buffer = (__le16 *) skb_put(skb, len + hdrlen); if (test_bit(FLAG_802_11, &ai->flags)) { buffer[0] = fc; bap_read(ai, buffer + 1, hdrlen - 2, BAP0); if (hdrlen == 24) bap_read(ai, tmpbuf, 6, BAP0); bap_read(ai, &v, sizeof(v), BAP0); gap = le16_to_cpu(v); if (gap) { if (gap <= 8) { bap_read(ai, tmpbuf, gap, BAP0); } else { airo_print_err(ai->dev->name, "gaplen too " "big. Problems will follow..."); } } bap_read(ai, buffer + hdrlen/2, len, BAP0); } else { MICBuffer micbuf; bap_read(ai, buffer, ETH_ALEN * 2, BAP0); if (ai->micstats.enabled) { bap_read(ai, (__le16 *) &micbuf, sizeof (micbuf), BAP0); if (ntohs(micbuf.typelen) > 0x05DC) bap_setup(ai, fid, 0x44, BAP0); else { if (len <= sizeof (micbuf)) { dev_kfree_skb_irq(skb); goto done; } len -= sizeof(micbuf); skb_trim(skb, len + hdrlen); } } bap_read(ai, buffer + ETH_ALEN, len, BAP0); if (decapsulate(ai, &micbuf, (etherHead*) buffer, len)) dev_kfree_skb_irq (skb); else success = 1; } #ifdef WIRELESS_SPY if (success && (ai->spy_data.spy_number > 0)) { char *sa; struct iw_quality wstats; /* Prepare spy data : addr + qual */ if (!test_bit(FLAG_802_11, &ai->flags)) { sa = (char *) buffer + 6; bap_setup(ai, fid, 8, BAP0); bap_read(ai, (__le16 *) hdr.rssi, 2, BAP0); } else sa = (char *) buffer + 10; wstats.qual = hdr.rssi[0]; if (ai->rssi) wstats.level = 0x100 - ai->rssi[hdr.rssi[1]].rssidBm; else wstats.level = (hdr.rssi[1] + 321) / 2; wstats.noise = ai->wstats.qual.noise; wstats.updated = IW_QUAL_LEVEL_UPDATED | IW_QUAL_QUAL_UPDATED | IW_QUAL_DBM; /* Update spy records */ wireless_spy_update(ai->dev, sa, &wstats); } #endif /* WIRELESS_SPY */ done: OUT4500(ai, EVACK, EV_RX); if (success) { if (test_bit(FLAG_802_11, &ai->flags)) { skb_reset_mac_header(skb); skb->pkt_type = PACKET_OTHERHOST; skb->dev = ai->wifidev; skb->protocol = htons(ETH_P_802_2); } else skb->protocol = eth_type_trans(skb, ai->dev); skb->ip_summed = CHECKSUM_NONE; netif_rx(skb); } } static void airo_handle_tx(struct airo_info *ai, u16 status) { int i, len = 0, index = -1; u16 fid; if (test_bit(FLAG_MPI, &ai->flags)) { unsigned long flags; if (status & EV_TXEXC) get_tx_error(ai, -1); spin_lock_irqsave(&ai->aux_lock, flags); if (!skb_queue_empty(&ai->txq)) { spin_unlock_irqrestore(&ai->aux_lock,flags); mpi_send_packet(ai->dev); } else { clear_bit(FLAG_PENDING_XMIT, &ai->flags); spin_unlock_irqrestore(&ai->aux_lock,flags); netif_wake_queue(ai->dev); } OUT4500(ai, EVACK, status & (EV_TX | EV_TXCPY | EV_TXEXC)); return; } fid = IN4500(ai, TXCOMPLFID); for(i = 0; i < MAX_FIDS; i++) { if ((ai->fids[i] & 0xffff) == fid) { len = ai->fids[i] >> 16; index = i; } } if (index != -1) { if (status & EV_TXEXC) get_tx_error(ai, index); OUT4500(ai, EVACK, status & (EV_TX | EV_TXEXC)); /* Set up to be used again */ ai->fids[index] &= 0xffff; if (index < MAX_FIDS / 2) { if (!test_bit(FLAG_PENDING_XMIT, &ai->flags)) netif_wake_queue(ai->dev); } else { if (!test_bit(FLAG_PENDING_XMIT11, &ai->flags)) netif_wake_queue(ai->wifidev); } } else { OUT4500(ai, EVACK, status & (EV_TX | EV_TXCPY | EV_TXEXC)); airo_print_err(ai->dev->name, "Unallocated FID was used to xmit"); } } static irqreturn_t airo_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; u16 status, savedInterrupts = 0; struct airo_info *ai = dev->ml_priv; int handled = 0; if (!netif_device_present(dev)) return IRQ_NONE; for (;;) { status = IN4500(ai, EVSTAT); if (!(status & STATUS_INTS) || (status == 0xffff)) break; handled = 1; if (status & EV_AWAKE) { OUT4500(ai, EVACK, EV_AWAKE); OUT4500(ai, EVACK, EV_AWAKE); } if (!savedInterrupts) { savedInterrupts = IN4500(ai, EVINTEN); OUT4500(ai, EVINTEN, 0); } if (status & EV_MIC) { OUT4500(ai, EVACK, EV_MIC); airo_handle_cisco_mic(ai); } if (status & EV_LINK) { /* Link status changed */ airo_handle_link(ai); } /* Check to see if there is something to receive */ if (status & EV_RX) airo_handle_rx(ai); /* Check to see if a packet has been transmitted */ if (status & (EV_TX | EV_TXCPY | EV_TXEXC)) airo_handle_tx(ai, status); if ( status & ~STATUS_INTS & ~IGNORE_INTS ) { airo_print_warn(ai->dev->name, "Got weird status %x", status & ~STATUS_INTS & ~IGNORE_INTS ); } } if (savedInterrupts) OUT4500(ai, EVINTEN, savedInterrupts); return IRQ_RETVAL(handled); } /* * Routines to talk to the card */ /* * This was originally written for the 4500, hence the name * NOTE: If use with 8bit mode and SMP bad things will happen! * Why would some one do 8 bit IO in an SMP machine?!? */ static void OUT4500( struct airo_info *ai, u16 reg, u16 val ) { if (test_bit(FLAG_MPI,&ai->flags)) reg <<= 1; if ( !do8bitIO ) outw( val, ai->dev->base_addr + reg ); else { outb( val & 0xff, ai->dev->base_addr + reg ); outb( val >> 8, ai->dev->base_addr + reg + 1 ); } } static u16 IN4500( struct airo_info *ai, u16 reg ) { unsigned short rc; if (test_bit(FLAG_MPI,&ai->flags)) reg <<= 1; if ( !do8bitIO ) rc = inw( ai->dev->base_addr + reg ); else { rc = inb( ai->dev->base_addr + reg ); rc += ((int)inb( ai->dev->base_addr + reg + 1 )) << 8; } return rc; } static int enable_MAC(struct airo_info *ai, int lock) { int rc; Cmd cmd; Resp rsp; /* FLAG_RADIO_OFF : Radio disabled via /proc or Wireless Extensions * FLAG_RADIO_DOWN : Radio disabled via "ifconfig ethX down" * Note : we could try to use !netif_running(dev) in enable_MAC() * instead of this flag, but I don't trust it *within* the * open/close functions, and testing both flags together is * "cheaper" - Jean II */ if (ai->flags & FLAG_RADIO_MASK) return SUCCESS; if (lock && down_interruptible(&ai->sem)) return -ERESTARTSYS; if (!test_bit(FLAG_ENABLED, &ai->flags)) { memset(&cmd, 0, sizeof(cmd)); cmd.cmd = MAC_ENABLE; rc = issuecommand(ai, &cmd, &rsp); if (rc == SUCCESS) set_bit(FLAG_ENABLED, &ai->flags); } else rc = SUCCESS; if (lock) up(&ai->sem); if (rc) airo_print_err(ai->dev->name, "Cannot enable MAC"); else if ((rsp.status & 0xFF00) != 0) { airo_print_err(ai->dev->name, "Bad MAC enable reason=%x, " "rid=%x, offset=%d", rsp.rsp0, rsp.rsp1, rsp.rsp2); rc = ERROR; } return rc; } static void disable_MAC( struct airo_info *ai, int lock ) { Cmd cmd; Resp rsp; if (lock && down_interruptible(&ai->sem)) return; if (test_bit(FLAG_ENABLED, &ai->flags)) { memset(&cmd, 0, sizeof(cmd)); cmd.cmd = MAC_DISABLE; // disable in case already enabled issuecommand(ai, &cmd, &rsp); clear_bit(FLAG_ENABLED, &ai->flags); } if (lock) up(&ai->sem); } static void enable_interrupts( struct airo_info *ai ) { /* Enable the interrupts */ OUT4500( ai, EVINTEN, STATUS_INTS ); } static void disable_interrupts( struct airo_info *ai ) { OUT4500( ai, EVINTEN, 0 ); } static void mpi_receive_802_3(struct airo_info *ai) { RxFid rxd; int len = 0; struct sk_buff *skb; char *buffer; int off = 0; MICBuffer micbuf; memcpy_fromio(&rxd, ai->rxfids[0].card_ram_off, sizeof(rxd)); /* Make sure we got something */ if (rxd.rdy && rxd.valid == 0) { len = rxd.len + 12; if (len < 12 || len > 2048) goto badrx; skb = dev_alloc_skb(len); if (!skb) { ai->dev->stats.rx_dropped++; goto badrx; } buffer = skb_put(skb,len); memcpy(buffer, ai->rxfids[0].virtual_host_addr, ETH_ALEN * 2); if (ai->micstats.enabled) { memcpy(&micbuf, ai->rxfids[0].virtual_host_addr + ETH_ALEN * 2, sizeof(micbuf)); if (ntohs(micbuf.typelen) <= 0x05DC) { if (len <= sizeof(micbuf) + ETH_ALEN * 2) goto badmic; off = sizeof(micbuf); skb_trim (skb, len - off); } } memcpy(buffer + ETH_ALEN * 2, ai->rxfids[0].virtual_host_addr + ETH_ALEN * 2 + off, len - ETH_ALEN * 2 - off); if (decapsulate (ai, &micbuf, (etherHead*)buffer, len - off - ETH_ALEN * 2)) { badmic: dev_kfree_skb_irq (skb); goto badrx; } #ifdef WIRELESS_SPY if (ai->spy_data.spy_number > 0) { char *sa; struct iw_quality wstats; /* Prepare spy data : addr + qual */ sa = buffer + ETH_ALEN; wstats.qual = 0; /* XXX Where do I get that info from ??? */ wstats.level = 0; wstats.updated = 0; /* Update spy records */ wireless_spy_update(ai->dev, sa, &wstats); } #endif /* WIRELESS_SPY */ skb->ip_summed = CHECKSUM_NONE; skb->protocol = eth_type_trans(skb, ai->dev); netif_rx(skb); } badrx: if (rxd.valid == 0) { rxd.valid = 1; rxd.rdy = 0; rxd.len = PKTSIZE; memcpy_toio(ai->rxfids[0].card_ram_off, &rxd, sizeof(rxd)); } } static void mpi_receive_802_11(struct airo_info *ai) { RxFid rxd; struct sk_buff *skb = NULL; u16 len, hdrlen = 0; __le16 fc; struct rx_hdr hdr; u16 gap; u16 *buffer; char *ptr = ai->rxfids[0].virtual_host_addr + 4; memcpy_fromio(&rxd, ai->rxfids[0].card_ram_off, sizeof(rxd)); memcpy ((char *)&hdr, ptr, sizeof(hdr)); ptr += sizeof(hdr); /* Bad CRC. Ignore packet */ if (le16_to_cpu(hdr.status) & 2) hdr.len = 0; if (ai->wifidev == NULL) hdr.len = 0; len = le16_to_cpu(hdr.len); if (len > AIRO_DEF_MTU) { airo_print_err(ai->dev->name, "Bad size %d", len); goto badrx; } if (len == 0) goto badrx; fc = get_unaligned((__le16 *)ptr); hdrlen = header_len(fc); skb = dev_alloc_skb( len + hdrlen + 2 ); if ( !skb ) { ai->dev->stats.rx_dropped++; goto badrx; } buffer = (u16*)skb_put (skb, len + hdrlen); memcpy ((char *)buffer, ptr, hdrlen); ptr += hdrlen; if (hdrlen == 24) ptr += 6; gap = get_unaligned_le16(ptr); ptr += sizeof(__le16); if (gap) { if (gap <= 8) ptr += gap; else airo_print_err(ai->dev->name, "gaplen too big. Problems will follow..."); } memcpy ((char *)buffer + hdrlen, ptr, len); ptr += len; #ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */ if (ai->spy_data.spy_number > 0) { char *sa; struct iw_quality wstats; /* Prepare spy data : addr + qual */ sa = (char*)buffer + 10; wstats.qual = hdr.rssi[0]; if (ai->rssi) wstats.level = 0x100 - ai->rssi[hdr.rssi[1]].rssidBm; else wstats.level = (hdr.rssi[1] + 321) / 2; wstats.noise = ai->wstats.qual.noise; wstats.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | IW_QUAL_DBM; /* Update spy records */ wireless_spy_update(ai->dev, sa, &wstats); } #endif /* IW_WIRELESS_SPY */ skb_reset_mac_header(skb); skb->pkt_type = PACKET_OTHERHOST; skb->dev = ai->wifidev; skb->protocol = htons(ETH_P_802_2); skb->ip_summed = CHECKSUM_NONE; netif_rx( skb ); badrx: if (rxd.valid == 0) { rxd.valid = 1; rxd.rdy = 0; rxd.len = PKTSIZE; memcpy_toio(ai->rxfids[0].card_ram_off, &rxd, sizeof(rxd)); } } static u16 setup_card(struct airo_info *ai, u8 *mac, int lock) { Cmd cmd; Resp rsp; int status; SsidRid mySsid; __le16 lastindex; WepKeyRid wkr; int rc; memset( &mySsid, 0, sizeof( mySsid ) ); kfree (ai->flash); ai->flash = NULL; /* The NOP is the first step in getting the card going */ cmd.cmd = NOP; cmd.parm0 = cmd.parm1 = cmd.parm2 = 0; if (lock && down_interruptible(&ai->sem)) return ERROR; if ( issuecommand( ai, &cmd, &rsp ) != SUCCESS ) { if (lock) up(&ai->sem); return ERROR; } disable_MAC( ai, 0); // Let's figure out if we need to use the AUX port if (!test_bit(FLAG_MPI,&ai->flags)) { cmd.cmd = CMD_ENABLEAUX; if (issuecommand(ai, &cmd, &rsp) != SUCCESS) { if (lock) up(&ai->sem); airo_print_err(ai->dev->name, "Error checking for AUX port"); return ERROR; } if (!aux_bap || rsp.status & 0xff00) { ai->bap_read = fast_bap_read; airo_print_dbg(ai->dev->name, "Doing fast bap_reads"); } else { ai->bap_read = aux_bap_read; airo_print_dbg(ai->dev->name, "Doing AUX bap_reads"); } } if (lock) up(&ai->sem); if (ai->config.len == 0) { int i; tdsRssiRid rssi_rid; CapabilityRid cap_rid; kfree(ai->APList); ai->APList = NULL; kfree(ai->SSID); ai->SSID = NULL; // general configuration (read/modify/write) status = readConfigRid(ai, lock); if ( status != SUCCESS ) return ERROR; status = readCapabilityRid(ai, &cap_rid, lock); if ( status != SUCCESS ) return ERROR; status = PC4500_readrid(ai,RID_RSSI,&rssi_rid,sizeof(rssi_rid),lock); if ( status == SUCCESS ) { if (ai->rssi || (ai->rssi = kmalloc(512, GFP_KERNEL)) != NULL) memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */ } else { kfree(ai->rssi); ai->rssi = NULL; if (cap_rid.softCap & cpu_to_le16(8)) ai->config.rmode |= RXMODE_NORMALIZED_RSSI; else airo_print_warn(ai->dev->name, "unknown received signal " "level scale"); } ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS; ai->config.authType = AUTH_OPEN; ai->config.modulation = MOD_CCK; if (le16_to_cpu(cap_rid.len) >= sizeof(cap_rid) && (cap_rid.extSoftCap & cpu_to_le16(1)) && micsetup(ai) == SUCCESS) { ai->config.opmode |= MODE_MIC; set_bit(FLAG_MIC_CAPABLE, &ai->flags); } /* Save off the MAC */ for( i = 0; i < ETH_ALEN; i++ ) { mac[i] = ai->config.macAddr[i]; } /* Check to see if there are any insmod configured rates to add */ if ( rates[0] ) { memset(ai->config.rates,0,sizeof(ai->config.rates)); for( i = 0; i < 8 && rates[i]; i++ ) { ai->config.rates[i] = rates[i]; } } set_bit (FLAG_COMMIT, &ai->flags); } /* Setup the SSIDs if present */ if ( ssids[0] ) { int i; for( i = 0; i < 3 && ssids[i]; i++ ) { size_t len = strlen(ssids[i]); if (len > 32) len = 32; mySsid.ssids[i].len = cpu_to_le16(len); memcpy(mySsid.ssids[i].ssid, ssids[i], len); } mySsid.len = cpu_to_le16(sizeof(mySsid)); } status = writeConfigRid(ai, lock); if ( status != SUCCESS ) return ERROR; /* Set up the SSID list */ if ( ssids[0] ) { status = writeSsidRid(ai, &mySsid, lock); if ( status != SUCCESS ) return ERROR; } status = enable_MAC(ai, lock); if (status != SUCCESS) return ERROR; /* Grab the initial wep key, we gotta save it for auto_wep */ rc = readWepKeyRid(ai, &wkr, 1, lock); if (rc == SUCCESS) do { lastindex = wkr.kindex; if (wkr.kindex == cpu_to_le16(0xffff)) { ai->defindex = wkr.mac[0]; } rc = readWepKeyRid(ai, &wkr, 0, lock); } while(lastindex != wkr.kindex); try_auto_wep(ai); return SUCCESS; } static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp) { // Im really paranoid about letting it run forever! int max_tries = 600000; if (IN4500(ai, EVSTAT) & EV_CMD) OUT4500(ai, EVACK, EV_CMD); OUT4500(ai, PARAM0, pCmd->parm0); OUT4500(ai, PARAM1, pCmd->parm1); OUT4500(ai, PARAM2, pCmd->parm2); OUT4500(ai, COMMAND, pCmd->cmd); while (max_tries-- && (IN4500(ai, EVSTAT) & EV_CMD) == 0) { if ((IN4500(ai, COMMAND)) == pCmd->cmd) // PC4500 didn't notice command, try again OUT4500(ai, COMMAND, pCmd->cmd); if (!in_atomic() && (max_tries & 255) == 0) schedule(); } if ( max_tries == -1 ) { airo_print_err(ai->dev->name, "Max tries exceeded when issuing command"); if (IN4500(ai, COMMAND) & COMMAND_BUSY) OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY); return ERROR; } // command completed pRsp->status = IN4500(ai, STATUS); pRsp->rsp0 = IN4500(ai, RESP0); pRsp->rsp1 = IN4500(ai, RESP1); pRsp->rsp2 = IN4500(ai, RESP2); if ((pRsp->status & 0xff00)!=0 && pCmd->cmd != CMD_SOFTRESET) airo_print_err(ai->dev->name, "cmd:%x status:%x rsp0:%x rsp1:%x rsp2:%x", pCmd->cmd, pRsp->status, pRsp->rsp0, pRsp->rsp1, pRsp->rsp2); // clear stuck command busy if necessary if (IN4500(ai, COMMAND) & COMMAND_BUSY) { OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY); } // acknowledge processing the status/response OUT4500(ai, EVACK, EV_CMD); return SUCCESS; } /* Sets up the bap to start exchange data. whichbap should * be one of the BAP0 or BAP1 defines. Locks should be held before * calling! */ static int bap_setup(struct airo_info *ai, u16 rid, u16 offset, int whichbap ) { int timeout = 50; int max_tries = 3; OUT4500(ai, SELECT0+whichbap, rid); OUT4500(ai, OFFSET0+whichbap, offset); while (1) { int status = IN4500(ai, OFFSET0+whichbap); if (status & BAP_BUSY) { /* This isn't really a timeout, but its kinda close */ if (timeout--) { continue; } } else if ( status & BAP_ERR ) { /* invalid rid or offset */ airo_print_err(ai->dev->name, "BAP error %x %d", status, whichbap ); return ERROR; } else if (status & BAP_DONE) { // success return SUCCESS; } if ( !(max_tries--) ) { airo_print_err(ai->dev->name, "BAP setup error too many retries\n"); return ERROR; } // -- PC4500 missed it, try again OUT4500(ai, SELECT0+whichbap, rid); OUT4500(ai, OFFSET0+whichbap, offset); timeout = 50; } } /* should only be called by aux_bap_read. This aux function and the following use concepts not documented in the developers guide. I got them from a patch given to my by Aironet */ static u16 aux_setup(struct airo_info *ai, u16 page, u16 offset, u16 *len) { u16 next; OUT4500(ai, AUXPAGE, page); OUT4500(ai, AUXOFF, 0); next = IN4500(ai, AUXDATA); *len = IN4500(ai, AUXDATA)&0xff; if (offset != 4) OUT4500(ai, AUXOFF, offset); return next; } /* requires call to bap_setup() first */ static int aux_bap_read(struct airo_info *ai, __le16 *pu16Dst, int bytelen, int whichbap) { u16 len; u16 page; u16 offset; u16 next; int words; int i; unsigned long flags; spin_lock_irqsave(&ai->aux_lock, flags); page = IN4500(ai, SWS0+whichbap); offset = IN4500(ai, SWS2+whichbap); next = aux_setup(ai, page, offset, &len); words = (bytelen+1)>>1; for (i=0; i<words;) { int count; count = (len>>1) < (words-i) ? (len>>1) : (words-i); if ( !do8bitIO ) insw( ai->dev->base_addr+DATA0+whichbap, pu16Dst+i,count ); else insb( ai->dev->base_addr+DATA0+whichbap, pu16Dst+i, count << 1 ); i += count; if (i<words) { next = aux_setup(ai, next, 4, &len); } } spin_unlock_irqrestore(&ai->aux_lock, flags); return SUCCESS; } /* requires call to bap_setup() first */ static int fast_bap_read(struct airo_info *ai, __le16 *pu16Dst, int bytelen, int whichbap) { bytelen = (bytelen + 1) & (~1); // round up to even value if ( !do8bitIO ) insw( ai->dev->base_addr+DATA0+whichbap, pu16Dst, bytelen>>1 ); else insb( ai->dev->base_addr+DATA0+whichbap, pu16Dst, bytelen ); return SUCCESS; } /* requires call to bap_setup() first */ static int bap_write(struct airo_info *ai, const __le16 *pu16Src, int bytelen, int whichbap) { bytelen = (bytelen + 1) & (~1); // round up to even value if ( !do8bitIO ) outsw( ai->dev->base_addr+DATA0+whichbap, pu16Src, bytelen>>1 ); else outsb( ai->dev->base_addr+DATA0+whichbap, pu16Src, bytelen ); return SUCCESS; } static int PC4500_accessrid(struct airo_info *ai, u16 rid, u16 accmd) { Cmd cmd; /* for issuing commands */ Resp rsp; /* response from commands */ u16 status; memset(&cmd, 0, sizeof(cmd)); cmd.cmd = accmd; cmd.parm0 = rid; status = issuecommand(ai, &cmd, &rsp); if (status != 0) return status; if ( (rsp.status & 0x7F00) != 0) { return (accmd << 8) + (rsp.rsp0 & 0xFF); } return 0; } /* Note, that we are using BAP1 which is also used by transmit, so * we must get a lock. */ static int PC4500_readrid(struct airo_info *ai, u16 rid, void *pBuf, int len, int lock) { u16 status; int rc = SUCCESS; if (lock) { if (down_interruptible(&ai->sem)) return ERROR; } if (test_bit(FLAG_MPI,&ai->flags)) { Cmd cmd; Resp rsp; memset(&cmd, 0, sizeof(cmd)); memset(&rsp, 0, sizeof(rsp)); ai->config_desc.rid_desc.valid = 1; ai->config_desc.rid_desc.len = RIDSIZE; ai->config_desc.rid_desc.rid = 0; ai->config_desc.rid_desc.host_addr = ai->ridbus; cmd.cmd = CMD_ACCESS; cmd.parm0 = rid; memcpy_toio(ai->config_desc.card_ram_off, &ai->config_desc.rid_desc, sizeof(Rid)); rc = issuecommand(ai, &cmd, &rsp); if (rsp.status & 0x7f00) rc = rsp.rsp0; if (!rc) memcpy(pBuf, ai->config_desc.virtual_host_addr, len); goto done; } else { if ((status = PC4500_accessrid(ai, rid, CMD_ACCESS))!=SUCCESS) { rc = status; goto done; } if (bap_setup(ai, rid, 0, BAP1) != SUCCESS) { rc = ERROR; goto done; } // read the rid length field bap_read(ai, pBuf, 2, BAP1); // length for remaining part of rid len = min(len, (int)le16_to_cpu(*(__le16*)pBuf)) - 2; if ( len <= 2 ) { airo_print_err(ai->dev->name, "Rid %x has a length of %d which is too short", (int)rid, (int)len ); rc = ERROR; goto done; } // read remainder of the rid rc = bap_read(ai, ((__le16*)pBuf)+1, len, BAP1); } done: if (lock) up(&ai->sem); return rc; } /* Note, that we are using BAP1 which is also used by transmit, so * make sure this isn't called when a transmit is happening */ static int PC4500_writerid(struct airo_info *ai, u16 rid, const void *pBuf, int len, int lock) { u16 status; int rc = SUCCESS; *(__le16*)pBuf = cpu_to_le16((u16)len); if (lock) { if (down_interruptible(&ai->sem)) return ERROR; } if (test_bit(FLAG_MPI,&ai->flags)) { Cmd cmd; Resp rsp; if (test_bit(FLAG_ENABLED, &ai->flags) && (RID_WEP_TEMP != rid)) airo_print_err(ai->dev->name, "%s: MAC should be disabled (rid=%04x)", __func__, rid); memset(&cmd, 0, sizeof(cmd)); memset(&rsp, 0, sizeof(rsp)); ai->config_desc.rid_desc.valid = 1; ai->config_desc.rid_desc.len = *((u16 *)pBuf); ai->config_desc.rid_desc.rid = 0; cmd.cmd = CMD_WRITERID; cmd.parm0 = rid; memcpy_toio(ai->config_desc.card_ram_off, &ai->config_desc.rid_desc, sizeof(Rid)); if (len < 4 || len > 2047) { airo_print_err(ai->dev->name, "%s: len=%d", __func__, len); rc = -1; } else { memcpy(ai->config_desc.virtual_host_addr, pBuf, len); rc = issuecommand(ai, &cmd, &rsp); if ((rc & 0xff00) != 0) { airo_print_err(ai->dev->name, "%s: Write rid Error %d", __func__, rc); airo_print_err(ai->dev->name, "%s: Cmd=%04x", __func__, cmd.cmd); } if ((rsp.status & 0x7f00)) rc = rsp.rsp0; } } else { // --- first access so that we can write the rid data if ( (status = PC4500_accessrid(ai, rid, CMD_ACCESS)) != 0) { rc = status; goto done; } // --- now write the rid data if (bap_setup(ai, rid, 0, BAP1) != SUCCESS) { rc = ERROR; goto done; } bap_write(ai, pBuf, len, BAP1); // ---now commit the rid data rc = PC4500_accessrid(ai, rid, 0x100|CMD_ACCESS); } done: if (lock) up(&ai->sem); return rc; } /* Allocates a FID to be used for transmitting packets. We only use one for now. */ static u16 transmit_allocate(struct airo_info *ai, int lenPayload, int raw) { unsigned int loop = 3000; Cmd cmd; Resp rsp; u16 txFid; __le16 txControl; cmd.cmd = CMD_ALLOCATETX; cmd.parm0 = lenPayload; if (down_interruptible(&ai->sem)) return ERROR; if (issuecommand(ai, &cmd, &rsp) != SUCCESS) { txFid = ERROR; goto done; } if ( (rsp.status & 0xFF00) != 0) { txFid = ERROR; goto done; } /* wait for the allocate event/indication * It makes me kind of nervous that this can just sit here and spin, * but in practice it only loops like four times. */ while (((IN4500(ai, EVSTAT) & EV_ALLOC) == 0) && --loop); if (!loop) { txFid = ERROR; goto done; } // get the allocated fid and acknowledge txFid = IN4500(ai, TXALLOCFID); OUT4500(ai, EVACK, EV_ALLOC); /* The CARD is pretty cool since it converts the ethernet packet * into 802.11. Also note that we don't release the FID since we * will be using the same one over and over again. */ /* We only have to setup the control once since we are not * releasing the fid. */ if (raw) txControl = cpu_to_le16(TXCTL_TXOK | TXCTL_TXEX | TXCTL_802_11 | TXCTL_ETHERNET | TXCTL_NORELEASE); else txControl = cpu_to_le16(TXCTL_TXOK | TXCTL_TXEX | TXCTL_802_3 | TXCTL_ETHERNET | TXCTL_NORELEASE); if (bap_setup(ai, txFid, 0x0008, BAP1) != SUCCESS) txFid = ERROR; else bap_write(ai, &txControl, sizeof(txControl), BAP1); done: up(&ai->sem); return txFid; } /* In general BAP1 is dedicated to transmiting packets. However, since we need a BAP when accessing RIDs, we also use BAP1 for that. Make sure the BAP1 spinlock is held when this is called. */ static int transmit_802_3_packet(struct airo_info *ai, int len, char *pPacket) { __le16 payloadLen; Cmd cmd; Resp rsp; int miclen = 0; u16 txFid = len; MICBuffer pMic; len >>= 16; if (len <= ETH_ALEN * 2) { airo_print_warn(ai->dev->name, "Short packet %d", len); return ERROR; } len -= ETH_ALEN * 2; if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled && (ntohs(((__be16 *)pPacket)[6]) != 0x888E)) { if (encapsulate(ai,(etherHead *)pPacket,&pMic,len) != SUCCESS) return ERROR; miclen = sizeof(pMic); } // packet is destination[6], source[6], payload[len-12] // write the payload length and dst/src/payload if (bap_setup(ai, txFid, 0x0036, BAP1) != SUCCESS) return ERROR; /* The hardware addresses aren't counted as part of the payload, so * we have to subtract the 12 bytes for the addresses off */ payloadLen = cpu_to_le16(len + miclen); bap_write(ai, &payloadLen, sizeof(payloadLen),BAP1); bap_write(ai, (__le16*)pPacket, sizeof(etherHead), BAP1); if (miclen) bap_write(ai, (__le16*)&pMic, miclen, BAP1); bap_write(ai, (__le16*)(pPacket + sizeof(etherHead)), len, BAP1); // issue the transmit command memset( &cmd, 0, sizeof( cmd ) ); cmd.cmd = CMD_TRANSMIT; cmd.parm0 = txFid; if (issuecommand(ai, &cmd, &rsp) != SUCCESS) return ERROR; if ( (rsp.status & 0xFF00) != 0) return ERROR; return SUCCESS; } static int transmit_802_11_packet(struct airo_info *ai, int len, char *pPacket) { __le16 fc, payloadLen; Cmd cmd; Resp rsp; int hdrlen; static u8 tail[(30-10) + 2 + 6] = {[30-10] = 6}; /* padding of header to full size + le16 gaplen (6) + gaplen bytes */ u16 txFid = len; len >>= 16; fc = *(__le16*)pPacket; hdrlen = header_len(fc); if (len < hdrlen) { airo_print_warn(ai->dev->name, "Short packet %d", len); return ERROR; } /* packet is 802.11 header + payload * write the payload length and dst/src/payload */ if (bap_setup(ai, txFid, 6, BAP1) != SUCCESS) return ERROR; /* The 802.11 header aren't counted as part of the payload, so * we have to subtract the header bytes off */ payloadLen = cpu_to_le16(len-hdrlen); bap_write(ai, &payloadLen, sizeof(payloadLen),BAP1); if (bap_setup(ai, txFid, 0x0014, BAP1) != SUCCESS) return ERROR; bap_write(ai, (__le16 *)pPacket, hdrlen, BAP1); bap_write(ai, (__le16 *)(tail + (hdrlen - 10)), 38 - hdrlen, BAP1); bap_write(ai, (__le16 *)(pPacket + hdrlen), len - hdrlen, BAP1); // issue the transmit command memset( &cmd, 0, sizeof( cmd ) ); cmd.cmd = CMD_TRANSMIT; cmd.parm0 = txFid; if (issuecommand(ai, &cmd, &rsp) != SUCCESS) return ERROR; if ( (rsp.status & 0xFF00) != 0) return ERROR; return SUCCESS; } /* * This is the proc_fs routines. It is a bit messier than I would * like! Feel free to clean it up! */ static ssize_t proc_read( struct file *file, char __user *buffer, size_t len, loff_t *offset); static ssize_t proc_write( struct file *file, const char __user *buffer, size_t len, loff_t *offset ); static int proc_close( struct inode *inode, struct file *file ); static int proc_stats_open( struct inode *inode, struct file *file ); static int proc_statsdelta_open( struct inode *inode, struct file *file ); static int proc_status_open( struct inode *inode, struct file *file ); static int proc_SSID_open( struct inode *inode, struct file *file ); static int proc_APList_open( struct inode *inode, struct file *file ); static int proc_BSSList_open( struct inode *inode, struct file *file ); static int proc_config_open( struct inode *inode, struct file *file ); static int proc_wepkey_open( struct inode *inode, struct file *file ); static const struct file_operations proc_statsdelta_ops = { .owner = THIS_MODULE, .read = proc_read, .open = proc_statsdelta_open, .release = proc_close, .llseek = default_llseek, }; static const struct file_operations proc_stats_ops = { .owner = THIS_MODULE, .read = proc_read, .open = proc_stats_open, .release = proc_close, .llseek = default_llseek, }; static const struct file_operations proc_status_ops = { .owner = THIS_MODULE, .read = proc_read, .open = proc_status_open, .release = proc_close, .llseek = default_llseek, }; static const struct file_operations proc_SSID_ops = { .owner = THIS_MODULE, .read = proc_read, .write = proc_write, .open = proc_SSID_open, .release = proc_close, .llseek = default_llseek, }; static const struct file_operations proc_BSSList_ops = { .owner = THIS_MODULE, .read = proc_read, .write = proc_write, .open = proc_BSSList_open, .release = proc_close, .llseek = default_llseek, }; static const struct file_operations proc_APList_ops = { .owner = THIS_MODULE, .read = proc_read, .write = proc_write, .open = proc_APList_open, .release = proc_close, .llseek = default_llseek, }; static const struct file_operations proc_config_ops = { .owner = THIS_MODULE, .read = proc_read, .write = proc_write, .open = proc_config_open, .release = proc_close, .llseek = default_llseek, }; static const struct file_operations proc_wepkey_ops = { .owner = THIS_MODULE, .read = proc_read, .write = proc_write, .open = proc_wepkey_open, .release = proc_close, .llseek = default_llseek, }; static struct proc_dir_entry *airo_entry; struct proc_data { int release_buffer; int readlen; char *rbuffer; int writelen; int maxwritelen; char *wbuffer; void (*on_close) (struct inode *, struct file *); }; static int setup_proc_entry( struct net_device *dev, struct airo_info *apriv ) { struct proc_dir_entry *entry; /* First setup the device directory */ strcpy(apriv->proc_name,dev->name); apriv->proc_entry = proc_mkdir_mode(apriv->proc_name, airo_perm, airo_entry); if (!apriv->proc_entry) return -ENOMEM; proc_set_user(apriv->proc_entry, proc_kuid, proc_kgid); /* Setup the StatsDelta */ entry = proc_create_data("StatsDelta", S_IRUGO & proc_perm, apriv->proc_entry, &proc_statsdelta_ops, dev); if (!entry) goto fail; proc_set_user(entry, proc_kuid, proc_kgid); /* Setup the Stats */ entry = proc_create_data("Stats", S_IRUGO & proc_perm, apriv->proc_entry, &proc_stats_ops, dev); if (!entry) goto fail; proc_set_user(entry, proc_kuid, proc_kgid); /* Setup the Status */ entry = proc_create_data("Status", S_IRUGO & proc_perm, apriv->proc_entry, &proc_status_ops, dev); if (!entry) goto fail; proc_set_user(entry, proc_kuid, proc_kgid); /* Setup the Config */ entry = proc_create_data("Config", proc_perm, apriv->proc_entry, &proc_config_ops, dev); if (!entry) goto fail; proc_set_user(entry, proc_kuid, proc_kgid); /* Setup the SSID */ entry = proc_create_data("SSID", proc_perm, apriv->proc_entry, &proc_SSID_ops, dev); if (!entry) goto fail; proc_set_user(entry, proc_kuid, proc_kgid); /* Setup the APList */ entry = proc_create_data("APList", proc_perm, apriv->proc_entry, &proc_APList_ops, dev); if (!entry) goto fail; proc_set_user(entry, proc_kuid, proc_kgid); /* Setup the BSSList */ entry = proc_create_data("BSSList", proc_perm, apriv->proc_entry, &proc_BSSList_ops, dev); if (!entry) goto fail; proc_set_user(entry, proc_kuid, proc_kgid); /* Setup the WepKey */ entry = proc_create_data("WepKey", proc_perm, apriv->proc_entry, &proc_wepkey_ops, dev); if (!entry) goto fail; proc_set_user(entry, proc_kuid, proc_kgid); return 0; fail: remove_proc_subtree(apriv->proc_name, airo_entry); return -ENOMEM; } static int takedown_proc_entry( struct net_device *dev, struct airo_info *apriv ) { remove_proc_subtree(apriv->proc_name, airo_entry); return 0; } /* * What we want from the proc_fs is to be able to efficiently read * and write the configuration. To do this, we want to read the * configuration when the file is opened and write it when the file is * closed. So basically we allocate a read buffer at open and fill it * with data, and allocate a write buffer and read it at close. */ /* * The read routine is generic, it relies on the preallocated rbuffer * to supply the data. */ static ssize_t proc_read( struct file *file, char __user *buffer, size_t len, loff_t *offset ) { struct proc_data *priv = file->private_data; if (!priv->rbuffer) return -EINVAL; return simple_read_from_buffer(buffer, len, offset, priv->rbuffer, priv->readlen); } /* * The write routine is generic, it fills in a preallocated rbuffer * to supply the data. */ static ssize_t proc_write( struct file *file, const char __user *buffer, size_t len, loff_t *offset ) { ssize_t ret; struct proc_data *priv = file->private_data; if (!priv->wbuffer) return -EINVAL; ret = simple_write_to_buffer(priv->wbuffer, priv->maxwritelen, offset, buffer, len); if (ret > 0) priv->writelen = max_t(int, priv->writelen, *offset); return ret; } static int proc_status_open(struct inode *inode, struct file *file) { struct proc_data *data; struct net_device *dev = PDE_DATA(inode); struct airo_info *apriv = dev->ml_priv; CapabilityRid cap_rid; StatusRid status_rid; u16 mode; int i; if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) return -ENOMEM; data = file->private_data; if ((data->rbuffer = kmalloc( 2048, GFP_KERNEL )) == NULL) { kfree (file->private_data); return -ENOMEM; } readStatusRid(apriv, &status_rid, 1); readCapabilityRid(apriv, &cap_rid, 1); mode = le16_to_cpu(status_rid.mode); i = sprintf(data->rbuffer, "Status: %s%s%s%s%s%s%s%s%s\n", mode & 1 ? "CFG ": "", mode & 2 ? "ACT ": "", mode & 0x10 ? "SYN ": "", mode & 0x20 ? "LNK ": "", mode & 0x40 ? "LEAP ": "", mode & 0x80 ? "PRIV ": "", mode & 0x100 ? "KEY ": "", mode & 0x200 ? "WEP ": "", mode & 0x8000 ? "ERR ": ""); sprintf( data->rbuffer+i, "Mode: %x\n" "Signal Strength: %d\n" "Signal Quality: %d\n" "SSID: %-.*s\n" "AP: %-.16s\n" "Freq: %d\n" "BitRate: %dmbs\n" "Driver Version: %s\n" "Device: %s\nManufacturer: %s\nFirmware Version: %s\n" "Radio type: %x\nCountry: %x\nHardware Version: %x\n" "Software Version: %x\nSoftware Subversion: %x\n" "Boot block version: %x\n", le16_to_cpu(status_rid.mode), le16_to_cpu(status_rid.normalizedSignalStrength), le16_to_cpu(status_rid.signalQuality), le16_to_cpu(status_rid.SSIDlen), status_rid.SSID, status_rid.apName, le16_to_cpu(status_rid.channel), le16_to_cpu(status_rid.currentXmitRate) / 2, version, cap_rid.prodName, cap_rid.manName, cap_rid.prodVer, le16_to_cpu(cap_rid.radioType), le16_to_cpu(cap_rid.country), le16_to_cpu(cap_rid.hardVer), le16_to_cpu(cap_rid.softVer), le16_to_cpu(cap_rid.softSubVer), le16_to_cpu(cap_rid.bootBlockVer)); data->readlen = strlen( data->rbuffer ); return 0; } static int proc_stats_rid_open(struct inode*, struct file*, u16); static int proc_statsdelta_open( struct inode *inode, struct file *file ) { if (file->f_mode&FMODE_WRITE) { return proc_stats_rid_open(inode, file, RID_STATSDELTACLEAR); } return proc_stats_rid_open(inode, file, RID_STATSDELTA); } static int proc_stats_open( struct inode *inode, struct file *file ) { return proc_stats_rid_open(inode, file, RID_STATS); } static int proc_stats_rid_open( struct inode *inode, struct file *file, u16 rid ) { struct proc_data *data; struct net_device *dev = PDE_DATA(inode); struct airo_info *apriv = dev->ml_priv; StatsRid stats; int i, j; __le32 *vals = stats.vals; int len; if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) return -ENOMEM; data = file->private_data; if ((data->rbuffer = kmalloc( 4096, GFP_KERNEL )) == NULL) { kfree (file->private_data); return -ENOMEM; } readStatsRid(apriv, &stats, rid, 1); len = le16_to_cpu(stats.len); j = 0; for(i=0; statsLabels[i]!=(char *)-1 && i*4<len; i++) { if (!statsLabels[i]) continue; if (j+strlen(statsLabels[i])+16>4096) { airo_print_warn(apriv->dev->name, "Potentially disastrous buffer overflow averted!"); break; } j+=sprintf(data->rbuffer+j, "%s: %u\n", statsLabels[i], le32_to_cpu(vals[i])); } if (i*4 >= len) { airo_print_warn(apriv->dev->name, "Got a short rid"); } data->readlen = j; return 0; } static int get_dec_u16( char *buffer, int *start, int limit ) { u16 value; int valid = 0; for (value = 0; *start < limit && buffer[*start] >= '0' && buffer[*start] <= '9'; (*start)++) { valid = 1; value *= 10; value += buffer[*start] - '0'; } if ( !valid ) return -1; return value; } static int airo_config_commit(struct net_device *dev, struct iw_request_info *info, void *zwrq, char *extra); static inline int sniffing_mode(struct airo_info *ai) { return (le16_to_cpu(ai->config.rmode) & le16_to_cpu(RXMODE_MASK)) >= le16_to_cpu(RXMODE_RFMON); } static void proc_config_on_close(struct inode *inode, struct file *file) { struct proc_data *data = file->private_data; struct net_device *dev = PDE_DATA(inode); struct airo_info *ai = dev->ml_priv; char *line; if ( !data->writelen ) return; readConfigRid(ai, 1); set_bit (FLAG_COMMIT, &ai->flags); line = data->wbuffer; while( line[0] ) { /*** Mode processing */ if ( !strncmp( line, "Mode: ", 6 ) ) { line += 6; if (sniffing_mode(ai)) set_bit (FLAG_RESET, &ai->flags); ai->config.rmode &= ~RXMODE_FULL_MASK; clear_bit (FLAG_802_11, &ai->flags); ai->config.opmode &= ~MODE_CFG_MASK; ai->config.scanMode = SCANMODE_ACTIVE; if ( line[0] == 'a' ) { ai->config.opmode |= MODE_STA_IBSS; } else { ai->config.opmode |= MODE_STA_ESS; if ( line[0] == 'r' ) { ai->config.rmode |= RXMODE_RFMON | RXMODE_DISABLE_802_3_HEADER; ai->config.scanMode = SCANMODE_PASSIVE; set_bit (FLAG_802_11, &ai->flags); } else if ( line[0] == 'y' ) { ai->config.rmode |= RXMODE_RFMON_ANYBSS | RXMODE_DISABLE_802_3_HEADER; ai->config.scanMode = SCANMODE_PASSIVE; set_bit (FLAG_802_11, &ai->flags); } else if ( line[0] == 'l' ) ai->config.rmode |= RXMODE_LANMON; } set_bit (FLAG_COMMIT, &ai->flags); } /*** Radio status */ else if (!strncmp(line,"Radio: ", 7)) { line += 7; if (!strncmp(line,"off",3)) { set_bit (FLAG_RADIO_OFF, &ai->flags); } else { clear_bit (FLAG_RADIO_OFF, &ai->flags); } } /*** NodeName processing */ else if ( !strncmp( line, "NodeName: ", 10 ) ) { int j; line += 10; memset( ai->config.nodeName, 0, 16 ); /* Do the name, assume a space between the mode and node name */ for( j = 0; j < 16 && line[j] != '\n'; j++ ) { ai->config.nodeName[j] = line[j]; } set_bit (FLAG_COMMIT, &ai->flags); } /*** PowerMode processing */ else if ( !strncmp( line, "PowerMode: ", 11 ) ) { line += 11; if ( !strncmp( line, "PSPCAM", 6 ) ) { ai->config.powerSaveMode = POWERSAVE_PSPCAM; set_bit (FLAG_COMMIT, &ai->flags); } else if ( !strncmp( line, "PSP", 3 ) ) { ai->config.powerSaveMode = POWERSAVE_PSP; set_bit (FLAG_COMMIT, &ai->flags); } else { ai->config.powerSaveMode = POWERSAVE_CAM; set_bit (FLAG_COMMIT, &ai->flags); } } else if ( !strncmp( line, "DataRates: ", 11 ) ) { int v, i = 0, k = 0; /* i is index into line, k is index to rates */ line += 11; while((v = get_dec_u16(line, &i, 3))!=-1) { ai->config.rates[k++] = (u8)v; line += i + 1; i = 0; } set_bit (FLAG_COMMIT, &ai->flags); } else if ( !strncmp( line, "Channel: ", 9 ) ) { int v, i = 0; line += 9; v = get_dec_u16(line, &i, i+3); if ( v != -1 ) { ai->config.channelSet = cpu_to_le16(v); set_bit (FLAG_COMMIT, &ai->flags); } } else if ( !strncmp( line, "XmitPower: ", 11 ) ) { int v, i = 0; line += 11; v = get_dec_u16(line, &i, i+3); if ( v != -1 ) { ai->config.txPower = cpu_to_le16(v); set_bit (FLAG_COMMIT, &ai->flags); } } else if ( !strncmp( line, "WEP: ", 5 ) ) { line += 5; switch( line[0] ) { case 's': ai->config.authType = AUTH_SHAREDKEY; break; case 'e': ai->config.authType = AUTH_ENCRYPT; break; default: ai->config.authType = AUTH_OPEN; break; } set_bit (FLAG_COMMIT, &ai->flags); } else if ( !strncmp( line, "LongRetryLimit: ", 16 ) ) { int v, i = 0; line += 16; v = get_dec_u16(line, &i, 3); v = (v<0) ? 0 : ((v>255) ? 255 : v); ai->config.longRetryLimit = cpu_to_le16(v); set_bit (FLAG_COMMIT, &ai->flags); } else if ( !strncmp( line, "ShortRetryLimit: ", 17 ) ) { int v, i = 0; line += 17; v = get_dec_u16(line, &i, 3); v = (v<0) ? 0 : ((v>255) ? 255 : v); ai->config.shortRetryLimit = cpu_to_le16(v); set_bit (FLAG_COMMIT, &ai->flags); } else if ( !strncmp( line, "RTSThreshold: ", 14 ) ) { int v, i = 0; line += 14; v = get_dec_u16(line, &i, 4); v = (v<0) ? 0 : ((v>AIRO_DEF_MTU) ? AIRO_DEF_MTU : v); ai->config.rtsThres = cpu_to_le16(v); set_bit (FLAG_COMMIT, &ai->flags); } else if ( !strncmp( line, "TXMSDULifetime: ", 16 ) ) { int v, i = 0; line += 16; v = get_dec_u16(line, &i, 5); v = (v<0) ? 0 : v; ai->config.txLifetime = cpu_to_le16(v); set_bit (FLAG_COMMIT, &ai->flags); } else if ( !strncmp( line, "RXMSDULifetime: ", 16 ) ) { int v, i = 0; line += 16; v = get_dec_u16(line, &i, 5); v = (v<0) ? 0 : v; ai->config.rxLifetime = cpu_to_le16(v); set_bit (FLAG_COMMIT, &ai->flags); } else if ( !strncmp( line, "TXDiversity: ", 13 ) ) { ai->config.txDiversity = (line[13]=='l') ? 1 : ((line[13]=='r')? 2: 3); set_bit (FLAG_COMMIT, &ai->flags); } else if ( !strncmp( line, "RXDiversity: ", 13 ) ) { ai->config.rxDiversity = (line[13]=='l') ? 1 : ((line[13]=='r')? 2: 3); set_bit (FLAG_COMMIT, &ai->flags); } else if ( !strncmp( line, "FragThreshold: ", 15 ) ) { int v, i = 0; line += 15; v = get_dec_u16(line, &i, 4); v = (v<256) ? 256 : ((v>AIRO_DEF_MTU) ? AIRO_DEF_MTU : v); v = v & 0xfffe; /* Make sure its even */ ai->config.fragThresh = cpu_to_le16(v); set_bit (FLAG_COMMIT, &ai->flags); } else if (!strncmp(line, "Modulation: ", 12)) { line += 12; switch(*line) { case 'd': ai->config.modulation=MOD_DEFAULT; set_bit(FLAG_COMMIT, &ai->flags); break; case 'c': ai->config.modulation=MOD_CCK; set_bit(FLAG_COMMIT, &ai->flags); break; case 'm': ai->config.modulation=MOD_MOK; set_bit(FLAG_COMMIT, &ai->flags); break; default: airo_print_warn(ai->dev->name, "Unknown modulation"); } } else if (!strncmp(line, "Preamble: ", 10)) { line += 10; switch(*line) { case 'a': ai->config.preamble=PREAMBLE_AUTO; set_bit(FLAG_COMMIT, &ai->flags); break; case 'l': ai->config.preamble=PREAMBLE_LONG; set_bit(FLAG_COMMIT, &ai->flags); break; case 's': ai->config.preamble=PREAMBLE_SHORT; set_bit(FLAG_COMMIT, &ai->flags); break; default: airo_print_warn(ai->dev->name, "Unknown preamble"); } } else { airo_print_warn(ai->dev->name, "Couldn't figure out %s", line); } while( line[0] && line[0] != '\n' ) line++; if ( line[0] ) line++; } airo_config_commit(dev, NULL, NULL, NULL); } static const char *get_rmode(__le16 mode) { switch(mode & RXMODE_MASK) { case RXMODE_RFMON: return "rfmon"; case RXMODE_RFMON_ANYBSS: return "yna (any) bss rfmon"; case RXMODE_LANMON: return "lanmon"; } return "ESS"; } static int proc_config_open(struct inode *inode, struct file *file) { struct proc_data *data; struct net_device *dev = PDE_DATA(inode); struct airo_info *ai = dev->ml_priv; int i; __le16 mode; if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) return -ENOMEM; data = file->private_data; if ((data->rbuffer = kmalloc( 2048, GFP_KERNEL )) == NULL) { kfree (file->private_data); return -ENOMEM; } if ((data->wbuffer = kzalloc( 2048, GFP_KERNEL )) == NULL) { kfree (data->rbuffer); kfree (file->private_data); return -ENOMEM; } data->maxwritelen = 2048; data->on_close = proc_config_on_close; readConfigRid(ai, 1); mode = ai->config.opmode & MODE_CFG_MASK; i = sprintf( data->rbuffer, "Mode: %s\n" "Radio: %s\n" "NodeName: %-16s\n" "PowerMode: %s\n" "DataRates: %d %d %d %d %d %d %d %d\n" "Channel: %d\n" "XmitPower: %d\n", mode == MODE_STA_IBSS ? "adhoc" : mode == MODE_STA_ESS ? get_rmode(ai->config.rmode): mode == MODE_AP ? "AP" : mode == MODE_AP_RPTR ? "AP RPTR" : "Error", test_bit(FLAG_RADIO_OFF, &ai->flags) ? "off" : "on", ai->config.nodeName, ai->config.powerSaveMode == POWERSAVE_CAM ? "CAM" : ai->config.powerSaveMode == POWERSAVE_PSP ? "PSP" : ai->config.powerSaveMode == POWERSAVE_PSPCAM ? "PSPCAM" : "Error", (int)ai->config.rates[0], (int)ai->config.rates[1], (int)ai->config.rates[2], (int)ai->config.rates[3], (int)ai->config.rates[4], (int)ai->config.rates[5], (int)ai->config.rates[6], (int)ai->config.rates[7], le16_to_cpu(ai->config.channelSet), le16_to_cpu(ai->config.txPower) ); sprintf( data->rbuffer + i, "LongRetryLimit: %d\n" "ShortRetryLimit: %d\n" "RTSThreshold: %d\n" "TXMSDULifetime: %d\n" "RXMSDULifetime: %d\n" "TXDiversity: %s\n" "RXDiversity: %s\n" "FragThreshold: %d\n" "WEP: %s\n" "Modulation: %s\n" "Preamble: %s\n", le16_to_cpu(ai->config.longRetryLimit), le16_to_cpu(ai->config.shortRetryLimit), le16_to_cpu(ai->config.rtsThres), le16_to_cpu(ai->config.txLifetime), le16_to_cpu(ai->config.rxLifetime), ai->config.txDiversity == 1 ? "left" : ai->config.txDiversity == 2 ? "right" : "both", ai->config.rxDiversity == 1 ? "left" : ai->config.rxDiversity == 2 ? "right" : "both", le16_to_cpu(ai->config.fragThresh), ai->config.authType == AUTH_ENCRYPT ? "encrypt" : ai->config.authType == AUTH_SHAREDKEY ? "shared" : "open", ai->config.modulation == MOD_DEFAULT ? "default" : ai->config.modulation == MOD_CCK ? "cck" : ai->config.modulation == MOD_MOK ? "mok" : "error", ai->config.preamble == PREAMBLE_AUTO ? "auto" : ai->config.preamble == PREAMBLE_LONG ? "long" : ai->config.preamble == PREAMBLE_SHORT ? "short" : "error" ); data->readlen = strlen( data->rbuffer ); return 0; } static void proc_SSID_on_close(struct inode *inode, struct file *file) { struct proc_data *data = file->private_data; struct net_device *dev = PDE_DATA(inode); struct airo_info *ai = dev->ml_priv; SsidRid SSID_rid; int i; char *p = data->wbuffer; char *end = p + data->writelen; if (!data->writelen) return; *end = '\n'; /* sentinel; we have space for it */ memset(&SSID_rid, 0, sizeof(SSID_rid)); for (i = 0; i < 3 && p < end; i++) { int j = 0; /* copy up to 32 characters from this line */ while (*p != '\n' && j < 32) SSID_rid.ssids[i].ssid[j++] = *p++; if (j == 0) break; SSID_rid.ssids[i].len = cpu_to_le16(j); /* skip to the beginning of the next line */ while (*p++ != '\n') ; } if (i) SSID_rid.len = cpu_to_le16(sizeof(SSID_rid)); disable_MAC(ai, 1); writeSsidRid(ai, &SSID_rid, 1); enable_MAC(ai, 1); } static void proc_APList_on_close( struct inode *inode, struct file *file ) { struct proc_data *data = file->private_data; struct net_device *dev = PDE_DATA(inode); struct airo_info *ai = dev->ml_priv; APListRid APList_rid; int i; if ( !data->writelen ) return; memset( &APList_rid, 0, sizeof(APList_rid) ); APList_rid.len = cpu_to_le16(sizeof(APList_rid)); for( i = 0; i < 4 && data->writelen >= (i+1)*6*3; i++ ) { int j; for( j = 0; j < 6*3 && data->wbuffer[j+i*6*3]; j++ ) { switch(j%3) { case 0: APList_rid.ap[i][j/3]= hex_to_bin(data->wbuffer[j+i*6*3])<<4; break; case 1: APList_rid.ap[i][j/3]|= hex_to_bin(data->wbuffer[j+i*6*3]); break; } } } disable_MAC(ai, 1); writeAPListRid(ai, &APList_rid, 1); enable_MAC(ai, 1); } /* This function wraps PC4500_writerid with a MAC disable */ static int do_writerid( struct airo_info *ai, u16 rid, const void *rid_data, int len, int dummy ) { int rc; disable_MAC(ai, 1); rc = PC4500_writerid(ai, rid, rid_data, len, 1); enable_MAC(ai, 1); return rc; } /* Returns the WEP key at the specified index, or -1 if that key does * not exist. The buffer is assumed to be at least 16 bytes in length. */ static int get_wep_key(struct airo_info *ai, u16 index, char *buf, u16 buflen) { WepKeyRid wkr; int rc; __le16 lastindex; rc = readWepKeyRid(ai, &wkr, 1, 1); if (rc != SUCCESS) return -1; do { lastindex = wkr.kindex; if (le16_to_cpu(wkr.kindex) == index) { int klen = min_t(int, buflen, le16_to_cpu(wkr.klen)); memcpy(buf, wkr.key, klen); return klen; } rc = readWepKeyRid(ai, &wkr, 0, 1); if (rc != SUCCESS) return -1; } while (lastindex != wkr.kindex); return -1; } static int get_wep_tx_idx(struct airo_info *ai) { WepKeyRid wkr; int rc; __le16 lastindex; rc = readWepKeyRid(ai, &wkr, 1, 1); if (rc != SUCCESS) return -1; do { lastindex = wkr.kindex; if (wkr.kindex == cpu_to_le16(0xffff)) return wkr.mac[0]; rc = readWepKeyRid(ai, &wkr, 0, 1); if (rc != SUCCESS) return -1; } while (lastindex != wkr.kindex); return -1; } static int set_wep_key(struct airo_info *ai, u16 index, const char *key, u16 keylen, int perm, int lock) { static const unsigned char macaddr[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 }; WepKeyRid wkr; int rc; if (WARN_ON(keylen == 0)) return -1; memset(&wkr, 0, sizeof(wkr)); wkr.len = cpu_to_le16(sizeof(wkr)); wkr.kindex = cpu_to_le16(index); wkr.klen = cpu_to_le16(keylen); memcpy(wkr.key, key, keylen); memcpy(wkr.mac, macaddr, ETH_ALEN); if (perm) disable_MAC(ai, lock); rc = writeWepKeyRid(ai, &wkr, perm, lock); if (perm) enable_MAC(ai, lock); return rc; } static int set_wep_tx_idx(struct airo_info *ai, u16 index, int perm, int lock) { WepKeyRid wkr; int rc; memset(&wkr, 0, sizeof(wkr)); wkr.len = cpu_to_le16(sizeof(wkr)); wkr.kindex = cpu_to_le16(0xffff); wkr.mac[0] = (char)index; if (perm) { ai->defindex = (char)index; disable_MAC(ai, lock); } rc = writeWepKeyRid(ai, &wkr, perm, lock); if (perm) enable_MAC(ai, lock); return rc; } static void proc_wepkey_on_close( struct inode *inode, struct file *file ) { struct proc_data *data; struct net_device *dev = PDE_DATA(inode); struct airo_info *ai = dev->ml_priv; int i, rc; char key[16]; u16 index = 0; int j = 0; memset(key, 0, sizeof(key)); data = file->private_data; if ( !data->writelen ) return; if (data->wbuffer[0] >= '0' && data->wbuffer[0] <= '3' && (data->wbuffer[1] == ' ' || data->wbuffer[1] == '\n')) { index = data->wbuffer[0] - '0'; if (data->wbuffer[1] == '\n') { rc = set_wep_tx_idx(ai, index, 1, 1); if (rc < 0) { airo_print_err(ai->dev->name, "failed to set " "WEP transmit index to %d: %d.", index, rc); } return; } j = 2; } else { airo_print_err(ai->dev->name, "WepKey passed invalid key index"); return; } for( i = 0; i < 16*3 && data->wbuffer[i+j]; i++ ) { switch(i%3) { case 0: key[i/3] = hex_to_bin(data->wbuffer[i+j])<<4; break; case 1: key[i/3] |= hex_to_bin(data->wbuffer[i+j]); break; } } rc = set_wep_key(ai, index, key, i/3, 1, 1); if (rc < 0) { airo_print_err(ai->dev->name, "failed to set WEP key at index " "%d: %d.", index, rc); } } static int proc_wepkey_open( struct inode *inode, struct file *file ) { struct proc_data *data; struct net_device *dev = PDE_DATA(inode); struct airo_info *ai = dev->ml_priv; char *ptr; WepKeyRid wkr; __le16 lastindex; int j=0; int rc; if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) return -ENOMEM; memset(&wkr, 0, sizeof(wkr)); data = file->private_data; if ((data->rbuffer = kzalloc( 180, GFP_KERNEL )) == NULL) { kfree (file->private_data); return -ENOMEM; } data->writelen = 0; data->maxwritelen = 80; if ((data->wbuffer = kzalloc( 80, GFP_KERNEL )) == NULL) { kfree (data->rbuffer); kfree (file->private_data); return -ENOMEM; } data->on_close = proc_wepkey_on_close; ptr = data->rbuffer; strcpy(ptr, "No wep keys\n"); rc = readWepKeyRid(ai, &wkr, 1, 1); if (rc == SUCCESS) do { lastindex = wkr.kindex; if (wkr.kindex == cpu_to_le16(0xffff)) { j += sprintf(ptr+j, "Tx key = %d\n", (int)wkr.mac[0]); } else { j += sprintf(ptr+j, "Key %d set with length = %d\n", le16_to_cpu(wkr.kindex), le16_to_cpu(wkr.klen)); } readWepKeyRid(ai, &wkr, 0, 1); } while((lastindex != wkr.kindex) && (j < 180-30)); data->readlen = strlen( data->rbuffer ); return 0; } static int proc_SSID_open(struct inode *inode, struct file *file) { struct proc_data *data; struct net_device *dev = PDE_DATA(inode); struct airo_info *ai = dev->ml_priv; int i; char *ptr; SsidRid SSID_rid; if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) return -ENOMEM; data = file->private_data; if ((data->rbuffer = kmalloc( 104, GFP_KERNEL )) == NULL) { kfree (file->private_data); return -ENOMEM; } data->writelen = 0; data->maxwritelen = 33*3; /* allocate maxwritelen + 1; we'll want a sentinel */ if ((data->wbuffer = kzalloc(33*3 + 1, GFP_KERNEL)) == NULL) { kfree (data->rbuffer); kfree (file->private_data); return -ENOMEM; } data->on_close = proc_SSID_on_close; readSsidRid(ai, &SSID_rid); ptr = data->rbuffer; for (i = 0; i < 3; i++) { int j; size_t len = le16_to_cpu(SSID_rid.ssids[i].len); if (!len) break; if (len > 32) len = 32; for (j = 0; j < len && SSID_rid.ssids[i].ssid[j]; j++) *ptr++ = SSID_rid.ssids[i].ssid[j]; *ptr++ = '\n'; } *ptr = '\0'; data->readlen = strlen( data->rbuffer ); return 0; } static int proc_APList_open( struct inode *inode, struct file *file ) { struct proc_data *data; struct net_device *dev = PDE_DATA(inode); struct airo_info *ai = dev->ml_priv; int i; char *ptr; APListRid APList_rid; if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) return -ENOMEM; data = file->private_data; if ((data->rbuffer = kmalloc( 104, GFP_KERNEL )) == NULL) { kfree (file->private_data); return -ENOMEM; } data->writelen = 0; data->maxwritelen = 4*6*3; if ((data->wbuffer = kzalloc( data->maxwritelen, GFP_KERNEL )) == NULL) { kfree (data->rbuffer); kfree (file->private_data); return -ENOMEM; } data->on_close = proc_APList_on_close; readAPListRid(ai, &APList_rid); ptr = data->rbuffer; for( i = 0; i < 4; i++ ) { // We end when we find a zero MAC if ( !*(int*)APList_rid.ap[i] && !*(int*)&APList_rid.ap[i][2]) break; ptr += sprintf(ptr, "%pM\n", APList_rid.ap[i]); } if (i==0) ptr += sprintf(ptr, "Not using specific APs\n"); *ptr = '\0'; data->readlen = strlen( data->rbuffer ); return 0; } static int proc_BSSList_open( struct inode *inode, struct file *file ) { struct proc_data *data; struct net_device *dev = PDE_DATA(inode); struct airo_info *ai = dev->ml_priv; char *ptr; BSSListRid BSSList_rid; int rc; /* If doLoseSync is not 1, we won't do a Lose Sync */ int doLoseSync = -1; if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) return -ENOMEM; data = file->private_data; if ((data->rbuffer = kmalloc( 1024, GFP_KERNEL )) == NULL) { kfree (file->private_data); return -ENOMEM; } data->writelen = 0; data->maxwritelen = 0; data->wbuffer = NULL; data->on_close = NULL; if (file->f_mode & FMODE_WRITE) { if (!(file->f_mode & FMODE_READ)) { Cmd cmd; Resp rsp; if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN; memset(&cmd, 0, sizeof(cmd)); cmd.cmd=CMD_LISTBSS; if (down_interruptible(&ai->sem)) return -ERESTARTSYS; issuecommand(ai, &cmd, &rsp); up(&ai->sem); data->readlen = 0; return 0; } doLoseSync = 1; } ptr = data->rbuffer; /* There is a race condition here if there are concurrent opens. Since it is a rare condition, we'll just live with it, otherwise we have to add a spin lock... */ rc = readBSSListRid(ai, doLoseSync, &BSSList_rid); while(rc == 0 && BSSList_rid.index != cpu_to_le16(0xffff)) { ptr += sprintf(ptr, "%pM %*s rssi = %d", BSSList_rid.bssid, (int)BSSList_rid.ssidLen, BSSList_rid.ssid, le16_to_cpu(BSSList_rid.dBm)); ptr += sprintf(ptr, " channel = %d %s %s %s %s\n", le16_to_cpu(BSSList_rid.dsChannel), BSSList_rid.cap & CAP_ESS ? "ESS" : "", BSSList_rid.cap & CAP_IBSS ? "adhoc" : "", BSSList_rid.cap & CAP_PRIVACY ? "wep" : "", BSSList_rid.cap & CAP_SHORTHDR ? "shorthdr" : ""); rc = readBSSListRid(ai, 0, &BSSList_rid); } *ptr = '\0'; data->readlen = strlen( data->rbuffer ); return 0; } static int proc_close( struct inode *inode, struct file *file ) { struct proc_data *data = file->private_data; if (data->on_close != NULL) data->on_close(inode, file); kfree(data->rbuffer); kfree(data->wbuffer); kfree(data); return 0; } /* Since the card doesn't automatically switch to the right WEP mode, we will make it do it. If the card isn't associated, every secs we will switch WEP modes to see if that will help. If the card is associated we will check every minute to see if anything has changed. */ static void timer_func( struct net_device *dev ) { struct airo_info *apriv = dev->ml_priv; /* We don't have a link so try changing the authtype */ readConfigRid(apriv, 0); disable_MAC(apriv, 0); switch(apriv->config.authType) { case AUTH_ENCRYPT: /* So drop to OPEN */ apriv->config.authType = AUTH_OPEN; break; case AUTH_SHAREDKEY: if (apriv->keyindex < auto_wep) { set_wep_tx_idx(apriv, apriv->keyindex, 0, 0); apriv->config.authType = AUTH_SHAREDKEY; apriv->keyindex++; } else { /* Drop to ENCRYPT */ apriv->keyindex = 0; set_wep_tx_idx(apriv, apriv->defindex, 0, 0); apriv->config.authType = AUTH_ENCRYPT; } break; default: /* We'll escalate to SHAREDKEY */ apriv->config.authType = AUTH_SHAREDKEY; } set_bit (FLAG_COMMIT, &apriv->flags); writeConfigRid(apriv, 0); enable_MAC(apriv, 0); up(&apriv->sem); /* Schedule check to see if the change worked */ clear_bit(JOB_AUTOWEP, &apriv->jobs); apriv->expires = RUN_AT(HZ*3); } #ifdef CONFIG_PCI static int airo_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pent) { struct net_device *dev; if (pci_enable_device(pdev)) return -ENODEV; pci_set_master(pdev); if (pdev->device == 0x5000 || pdev->device == 0xa504) dev = _init_airo_card(pdev->irq, pdev->resource[0].start, 0, pdev, &pdev->dev); else dev = _init_airo_card(pdev->irq, pdev->resource[2].start, 0, pdev, &pdev->dev); if (!dev) { pci_disable_device(pdev); return -ENODEV; } pci_set_drvdata(pdev, dev); return 0; } static void airo_pci_remove(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); airo_print_info(dev->name, "Unregistering..."); stop_airo_card(dev, 1); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata(pdev); struct airo_info *ai = dev->ml_priv; Cmd cmd; Resp rsp; if (!ai->APList) ai->APList = kmalloc(sizeof(APListRid), GFP_KERNEL); if (!ai->APList) return -ENOMEM; if (!ai->SSID) ai->SSID = kmalloc(sizeof(SsidRid), GFP_KERNEL); if (!ai->SSID) return -ENOMEM; readAPListRid(ai, ai->APList); readSsidRid(ai, ai->SSID); memset(&cmd, 0, sizeof(cmd)); /* the lock will be released at the end of the resume callback */ if (down_interruptible(&ai->sem)) return -EAGAIN; disable_MAC(ai, 0); netif_device_detach(dev); ai->power = state; cmd.cmd = HOSTSLEEP; issuecommand(ai, &cmd, &rsp); pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); pci_save_state(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } static int airo_pci_resume(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct airo_info *ai = dev->ml_priv; pci_power_t prev_state = pdev->current_state; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); pci_enable_wake(pdev, PCI_D0, 0); if (prev_state != PCI_D1) { reset_card(dev, 0); mpi_init_descriptors(ai); setup_card(ai, dev->dev_addr, 0); clear_bit(FLAG_RADIO_OFF, &ai->flags); clear_bit(FLAG_PENDING_XMIT, &ai->flags); } else { OUT4500(ai, EVACK, EV_AWAKEN); OUT4500(ai, EVACK, EV_AWAKEN); msleep(100); } set_bit(FLAG_COMMIT, &ai->flags); disable_MAC(ai, 0); msleep(200); if (ai->SSID) { writeSsidRid(ai, ai->SSID, 0); kfree(ai->SSID); ai->SSID = NULL; } if (ai->APList) { writeAPListRid(ai, ai->APList, 0); kfree(ai->APList); ai->APList = NULL; } writeConfigRid(ai, 0); enable_MAC(ai, 0); ai->power = PMSG_ON; netif_device_attach(dev); netif_wake_queue(dev); enable_interrupts(ai); up(&ai->sem); return 0; } #endif static int __init airo_init_module( void ) { int i; proc_kuid = make_kuid(&init_user_ns, proc_uid); proc_kgid = make_kgid(&init_user_ns, proc_gid); if (!uid_valid(proc_kuid) || !gid_valid(proc_kgid)) return -EINVAL; airo_entry = proc_mkdir_mode("driver/aironet", airo_perm, NULL); if (airo_entry) proc_set_user(airo_entry, proc_kuid, proc_kgid); for (i = 0; i < 4 && io[i] && irq[i]; i++) { airo_print_info("", "Trying to configure ISA adapter at irq=%d " "io=0x%x", irq[i], io[i] ); if (init_airo_card( irq[i], io[i], 0, NULL )) /* do nothing */ ; } #ifdef CONFIG_PCI airo_print_info("", "Probing for PCI adapters"); i = pci_register_driver(&airo_driver); airo_print_info("", "Finished probing for PCI adapters"); if (i) { remove_proc_entry("driver/aironet", NULL); return i; } #endif /* Always exit with success, as we are a library module * as well as a driver module */ return 0; } static void __exit airo_cleanup_module( void ) { struct airo_info *ai; while(!list_empty(&airo_devices)) { ai = list_entry(airo_devices.next, struct airo_info, dev_list); airo_print_info(ai->dev->name, "Unregistering..."); stop_airo_card(ai->dev, 1); } #ifdef CONFIG_PCI pci_unregister_driver(&airo_driver); #endif remove_proc_entry("driver/aironet", NULL); } /* * Initial Wireless Extension code for Aironet driver by : * Jean Tourrilhes <jt@hpl.hp.com> - HPL - 17 November 00 * Conversion to new driver API by : * Jean Tourrilhes <jt@hpl.hp.com> - HPL - 26 March 02 * Javier also did a good amount of work here, adding some new extensions * and fixing my code. Let's just say that without him this code just * would not work at all... - Jean II */ static u8 airo_rssi_to_dbm (tdsRssiEntry *rssi_rid, u8 rssi) { if (!rssi_rid) return 0; return (0x100 - rssi_rid[rssi].rssidBm); } static u8 airo_dbm_to_pct (tdsRssiEntry *rssi_rid, u8 dbm) { int i; if (!rssi_rid) return 0; for (i = 0; i < 256; i++) if (rssi_rid[i].rssidBm == dbm) return rssi_rid[i].rssipct; return 0; } static int airo_get_quality (StatusRid *status_rid, CapabilityRid *cap_rid) { int quality = 0; u16 sq; if ((status_rid->mode & cpu_to_le16(0x3f)) != cpu_to_le16(0x3f)) return 0; if (!(cap_rid->hardCap & cpu_to_le16(8))) return 0; sq = le16_to_cpu(status_rid->signalQuality); if (memcmp(cap_rid->prodName, "350", 3)) if (sq > 0x20) quality = 0; else quality = 0x20 - sq; else if (sq > 0xb0) quality = 0; else if (sq < 0x10) quality = 0xa0; else quality = 0xb0 - sq; return quality; } #define airo_get_max_quality(cap_rid) (memcmp((cap_rid)->prodName, "350", 3) ? 0x20 : 0xa0) #define airo_get_avg_quality(cap_rid) (memcmp((cap_rid)->prodName, "350", 3) ? 0x10 : 0x50); /*------------------------------------------------------------------*/ /* * Wireless Handler : get protocol name */ static int airo_get_name(struct net_device *dev, struct iw_request_info *info, char *cwrq, char *extra) { strcpy(cwrq, "IEEE 802.11-DS"); return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set frequency */ static int airo_set_freq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *fwrq, char *extra) { struct airo_info *local = dev->ml_priv; int rc = -EINPROGRESS; /* Call commit handler */ /* If setting by frequency, convert to a channel */ if(fwrq->e == 1) { int f = fwrq->m / 100000; /* Hack to fall through... */ fwrq->e = 0; fwrq->m = ieee80211_freq_to_dsss_chan(f); } /* Setting by channel number */ if((fwrq->m > 1000) || (fwrq->e > 0)) rc = -EOPNOTSUPP; else { int channel = fwrq->m; /* We should do a better check than that, * based on the card capability !!! */ if((channel < 1) || (channel > 14)) { airo_print_dbg(dev->name, "New channel value of %d is invalid!", fwrq->m); rc = -EINVAL; } else { readConfigRid(local, 1); /* Yes ! We can set it !!! */ local->config.channelSet = cpu_to_le16(channel); set_bit (FLAG_COMMIT, &local->flags); } } return rc; } /*------------------------------------------------------------------*/ /* * Wireless Handler : get frequency */ static int airo_get_freq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *fwrq, char *extra) { struct airo_info *local = dev->ml_priv; StatusRid status_rid; /* Card status info */ int ch; readConfigRid(local, 1); if ((local->config.opmode & MODE_CFG_MASK) == MODE_STA_ESS) status_rid.channel = local->config.channelSet; else readStatusRid(local, &status_rid, 1); ch = le16_to_cpu(status_rid.channel); if((ch > 0) && (ch < 15)) { fwrq->m = ieee80211_dsss_chan_to_freq(ch) * 100000; fwrq->e = 1; } else { fwrq->m = ch; fwrq->e = 0; } return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set ESSID */ static int airo_set_essid(struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { struct airo_info *local = dev->ml_priv; SsidRid SSID_rid; /* SSIDs */ /* Reload the list of current SSID */ readSsidRid(local, &SSID_rid); /* Check if we asked for `any' */ if (dwrq->flags == 0) { /* Just send an empty SSID list */ memset(&SSID_rid, 0, sizeof(SSID_rid)); } else { unsigned index = (dwrq->flags & IW_ENCODE_INDEX) - 1; /* Check the size of the string */ if (dwrq->length > IW_ESSID_MAX_SIZE) return -E2BIG ; /* Check if index is valid */ if (index >= ARRAY_SIZE(SSID_rid.ssids)) return -EINVAL; /* Set the SSID */ memset(SSID_rid.ssids[index].ssid, 0, sizeof(SSID_rid.ssids[index].ssid)); memcpy(SSID_rid.ssids[index].ssid, extra, dwrq->length); SSID_rid.ssids[index].len = cpu_to_le16(dwrq->length); } SSID_rid.len = cpu_to_le16(sizeof(SSID_rid)); /* Write it to the card */ disable_MAC(local, 1); writeSsidRid(local, &SSID_rid, 1); enable_MAC(local, 1); return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : get ESSID */ static int airo_get_essid(struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { struct airo_info *local = dev->ml_priv; StatusRid status_rid; /* Card status info */ readStatusRid(local, &status_rid, 1); /* Note : if dwrq->flags != 0, we should * get the relevant SSID from the SSID list... */ /* Get the current SSID */ memcpy(extra, status_rid.SSID, le16_to_cpu(status_rid.SSIDlen)); /* If none, we may want to get the one that was set */ /* Push it out ! */ dwrq->length = le16_to_cpu(status_rid.SSIDlen); dwrq->flags = 1; /* active */ return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set AP address */ static int airo_set_wap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *awrq, char *extra) { struct airo_info *local = dev->ml_priv; Cmd cmd; Resp rsp; APListRid APList_rid; if (awrq->sa_family != ARPHRD_ETHER) return -EINVAL; else if (is_broadcast_ether_addr(awrq->sa_data) || is_zero_ether_addr(awrq->sa_data)) { memset(&cmd, 0, sizeof(cmd)); cmd.cmd=CMD_LOSE_SYNC; if (down_interruptible(&local->sem)) return -ERESTARTSYS; issuecommand(local, &cmd, &rsp); up(&local->sem); } else { memset(&APList_rid, 0, sizeof(APList_rid)); APList_rid.len = cpu_to_le16(sizeof(APList_rid)); memcpy(APList_rid.ap[0], awrq->sa_data, ETH_ALEN); disable_MAC(local, 1); writeAPListRid(local, &APList_rid, 1); enable_MAC(local, 1); } return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : get AP address */ static int airo_get_wap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *awrq, char *extra) { struct airo_info *local = dev->ml_priv; StatusRid status_rid; /* Card status info */ readStatusRid(local, &status_rid, 1); /* Tentative. This seems to work, wow, I'm lucky !!! */ memcpy(awrq->sa_data, status_rid.bssid[0], ETH_ALEN); awrq->sa_family = ARPHRD_ETHER; return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set Nickname */ static int airo_set_nick(struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { struct airo_info *local = dev->ml_priv; /* Check the size of the string */ if(dwrq->length > 16) { return -E2BIG; } readConfigRid(local, 1); memset(local->config.nodeName, 0, sizeof(local->config.nodeName)); memcpy(local->config.nodeName, extra, dwrq->length); set_bit (FLAG_COMMIT, &local->flags); return -EINPROGRESS; /* Call commit handler */ } /*------------------------------------------------------------------*/ /* * Wireless Handler : get Nickname */ static int airo_get_nick(struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { struct airo_info *local = dev->ml_priv; readConfigRid(local, 1); strncpy(extra, local->config.nodeName, 16); extra[16] = '\0'; dwrq->length = strlen(extra); return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set Bit-Rate */ static int airo_set_rate(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; CapabilityRid cap_rid; /* Card capability info */ u8 brate = 0; int i; /* First : get a valid bit rate value */ readCapabilityRid(local, &cap_rid, 1); /* Which type of value ? */ if((vwrq->value < 8) && (vwrq->value >= 0)) { /* Setting by rate index */ /* Find value in the magic rate table */ brate = cap_rid.supportedRates[vwrq->value]; } else { /* Setting by frequency value */ u8 normvalue = (u8) (vwrq->value/500000); /* Check if rate is valid */ for(i = 0 ; i < 8 ; i++) { if(normvalue == cap_rid.supportedRates[i]) { brate = normvalue; break; } } } /* -1 designed the max rate (mostly auto mode) */ if(vwrq->value == -1) { /* Get the highest available rate */ for(i = 0 ; i < 8 ; i++) { if(cap_rid.supportedRates[i] == 0) break; } if(i != 0) brate = cap_rid.supportedRates[i - 1]; } /* Check that it is valid */ if(brate == 0) { return -EINVAL; } readConfigRid(local, 1); /* Now, check if we want a fixed or auto value */ if(vwrq->fixed == 0) { /* Fill all the rates up to this max rate */ memset(local->config.rates, 0, 8); for(i = 0 ; i < 8 ; i++) { local->config.rates[i] = cap_rid.supportedRates[i]; if(local->config.rates[i] == brate) break; } } else { /* Fixed mode */ /* One rate, fixed */ memset(local->config.rates, 0, 8); local->config.rates[0] = brate; } set_bit (FLAG_COMMIT, &local->flags); return -EINPROGRESS; /* Call commit handler */ } /*------------------------------------------------------------------*/ /* * Wireless Handler : get Bit-Rate */ static int airo_get_rate(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; StatusRid status_rid; /* Card status info */ readStatusRid(local, &status_rid, 1); vwrq->value = le16_to_cpu(status_rid.currentXmitRate) * 500000; /* If more than one rate, set auto */ readConfigRid(local, 1); vwrq->fixed = (local->config.rates[1] == 0); return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set RTS threshold */ static int airo_set_rts(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; int rthr = vwrq->value; if(vwrq->disabled) rthr = AIRO_DEF_MTU; if((rthr < 0) || (rthr > AIRO_DEF_MTU)) { return -EINVAL; } readConfigRid(local, 1); local->config.rtsThres = cpu_to_le16(rthr); set_bit (FLAG_COMMIT, &local->flags); return -EINPROGRESS; /* Call commit handler */ } /*------------------------------------------------------------------*/ /* * Wireless Handler : get RTS threshold */ static int airo_get_rts(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; readConfigRid(local, 1); vwrq->value = le16_to_cpu(local->config.rtsThres); vwrq->disabled = (vwrq->value >= AIRO_DEF_MTU); vwrq->fixed = 1; return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set Fragmentation threshold */ static int airo_set_frag(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; int fthr = vwrq->value; if(vwrq->disabled) fthr = AIRO_DEF_MTU; if((fthr < 256) || (fthr > AIRO_DEF_MTU)) { return -EINVAL; } fthr &= ~0x1; /* Get an even value - is it really needed ??? */ readConfigRid(local, 1); local->config.fragThresh = cpu_to_le16(fthr); set_bit (FLAG_COMMIT, &local->flags); return -EINPROGRESS; /* Call commit handler */ } /*------------------------------------------------------------------*/ /* * Wireless Handler : get Fragmentation threshold */ static int airo_get_frag(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; readConfigRid(local, 1); vwrq->value = le16_to_cpu(local->config.fragThresh); vwrq->disabled = (vwrq->value >= AIRO_DEF_MTU); vwrq->fixed = 1; return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set Mode of Operation */ static int airo_set_mode(struct net_device *dev, struct iw_request_info *info, __u32 *uwrq, char *extra) { struct airo_info *local = dev->ml_priv; int reset = 0; readConfigRid(local, 1); if (sniffing_mode(local)) reset = 1; switch(*uwrq) { case IW_MODE_ADHOC: local->config.opmode &= ~MODE_CFG_MASK; local->config.opmode |= MODE_STA_IBSS; local->config.rmode &= ~RXMODE_FULL_MASK; local->config.scanMode = SCANMODE_ACTIVE; clear_bit (FLAG_802_11, &local->flags); break; case IW_MODE_INFRA: local->config.opmode &= ~MODE_CFG_MASK; local->config.opmode |= MODE_STA_ESS; local->config.rmode &= ~RXMODE_FULL_MASK; local->config.scanMode = SCANMODE_ACTIVE; clear_bit (FLAG_802_11, &local->flags); break; case IW_MODE_MASTER: local->config.opmode &= ~MODE_CFG_MASK; local->config.opmode |= MODE_AP; local->config.rmode &= ~RXMODE_FULL_MASK; local->config.scanMode = SCANMODE_ACTIVE; clear_bit (FLAG_802_11, &local->flags); break; case IW_MODE_REPEAT: local->config.opmode &= ~MODE_CFG_MASK; local->config.opmode |= MODE_AP_RPTR; local->config.rmode &= ~RXMODE_FULL_MASK; local->config.scanMode = SCANMODE_ACTIVE; clear_bit (FLAG_802_11, &local->flags); break; case IW_MODE_MONITOR: local->config.opmode &= ~MODE_CFG_MASK; local->config.opmode |= MODE_STA_ESS; local->config.rmode &= ~RXMODE_FULL_MASK; local->config.rmode |= RXMODE_RFMON | RXMODE_DISABLE_802_3_HEADER; local->config.scanMode = SCANMODE_PASSIVE; set_bit (FLAG_802_11, &local->flags); break; default: return -EINVAL; } if (reset) set_bit (FLAG_RESET, &local->flags); set_bit (FLAG_COMMIT, &local->flags); return -EINPROGRESS; /* Call commit handler */ } /*------------------------------------------------------------------*/ /* * Wireless Handler : get Mode of Operation */ static int airo_get_mode(struct net_device *dev, struct iw_request_info *info, __u32 *uwrq, char *extra) { struct airo_info *local = dev->ml_priv; readConfigRid(local, 1); /* If not managed, assume it's ad-hoc */ switch (local->config.opmode & MODE_CFG_MASK) { case MODE_STA_ESS: *uwrq = IW_MODE_INFRA; break; case MODE_AP: *uwrq = IW_MODE_MASTER; break; case MODE_AP_RPTR: *uwrq = IW_MODE_REPEAT; break; default: *uwrq = IW_MODE_ADHOC; } return 0; } static inline int valid_index(struct airo_info *ai, int index) { return (index >= 0) && (index <= ai->max_wep_idx); } /*------------------------------------------------------------------*/ /* * Wireless Handler : set Encryption Key */ static int airo_set_encode(struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { struct airo_info *local = dev->ml_priv; int perm = (dwrq->flags & IW_ENCODE_TEMP ? 0 : 1); __le16 currentAuthType = local->config.authType; int rc = 0; if (!local->wep_capable) return -EOPNOTSUPP; readConfigRid(local, 1); /* Basic checking: do we have a key to set ? * Note : with the new API, it's impossible to get a NULL pointer. * Therefore, we need to check a key size == 0 instead. * New version of iwconfig properly set the IW_ENCODE_NOKEY flag * when no key is present (only change flags), but older versions * don't do it. - Jean II */ if (dwrq->length > 0) { wep_key_t key; int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; int current_index; /* Check the size of the key */ if (dwrq->length > MAX_KEY_SIZE) { return -EINVAL; } current_index = get_wep_tx_idx(local); if (current_index < 0) current_index = 0; /* Check the index (none -> use current) */ if (!valid_index(local, index)) index = current_index; /* Set the length */ if (dwrq->length > MIN_KEY_SIZE) key.len = MAX_KEY_SIZE; else key.len = MIN_KEY_SIZE; /* Check if the key is not marked as invalid */ if(!(dwrq->flags & IW_ENCODE_NOKEY)) { /* Cleanup */ memset(key.key, 0, MAX_KEY_SIZE); /* Copy the key in the driver */ memcpy(key.key, extra, dwrq->length); /* Send the key to the card */ rc = set_wep_key(local, index, key.key, key.len, perm, 1); if (rc < 0) { airo_print_err(local->dev->name, "failed to set" " WEP key at index %d: %d.", index, rc); return rc; } } /* WE specify that if a valid key is set, encryption * should be enabled (user may turn it off later) * This is also how "iwconfig ethX key on" works */ if((index == current_index) && (key.len > 0) && (local->config.authType == AUTH_OPEN)) { local->config.authType = AUTH_ENCRYPT; } } else { /* Do we want to just set the transmit key index ? */ int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; if (valid_index(local, index)) { rc = set_wep_tx_idx(local, index, perm, 1); if (rc < 0) { airo_print_err(local->dev->name, "failed to set" " WEP transmit index to %d: %d.", index, rc); return rc; } } else { /* Don't complain if only change the mode */ if (!(dwrq->flags & IW_ENCODE_MODE)) return -EINVAL; } } /* Read the flags */ if(dwrq->flags & IW_ENCODE_DISABLED) local->config.authType = AUTH_OPEN; // disable encryption if(dwrq->flags & IW_ENCODE_RESTRICTED) local->config.authType = AUTH_SHAREDKEY; // Only Both if(dwrq->flags & IW_ENCODE_OPEN) local->config.authType = AUTH_ENCRYPT; // Only Wep /* Commit the changes to flags if needed */ if (local->config.authType != currentAuthType) set_bit (FLAG_COMMIT, &local->flags); return -EINPROGRESS; /* Call commit handler */ } /*------------------------------------------------------------------*/ /* * Wireless Handler : get Encryption Key */ static int airo_get_encode(struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { struct airo_info *local = dev->ml_priv; int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; int wep_key_len; u8 buf[16]; if (!local->wep_capable) return -EOPNOTSUPP; readConfigRid(local, 1); /* Check encryption mode */ switch(local->config.authType) { case AUTH_ENCRYPT: dwrq->flags = IW_ENCODE_OPEN; break; case AUTH_SHAREDKEY: dwrq->flags = IW_ENCODE_RESTRICTED; break; default: case AUTH_OPEN: dwrq->flags = IW_ENCODE_DISABLED; break; } /* We can't return the key, so set the proper flag and return zero */ dwrq->flags |= IW_ENCODE_NOKEY; memset(extra, 0, 16); /* Which key do we want ? -1 -> tx index */ if (!valid_index(local, index)) { index = get_wep_tx_idx(local); if (index < 0) index = 0; } dwrq->flags |= index + 1; /* Copy the key to the user buffer */ wep_key_len = get_wep_key(local, index, &buf[0], sizeof(buf)); if (wep_key_len < 0) { dwrq->length = 0; } else { dwrq->length = wep_key_len; memcpy(extra, buf, dwrq->length); } return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set extended Encryption parameters */ static int airo_set_encodeext(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct airo_info *local = dev->ml_priv; struct iw_point *encoding = &wrqu->encoding; struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; int perm = ( encoding->flags & IW_ENCODE_TEMP ? 0 : 1 ); __le16 currentAuthType = local->config.authType; int idx, key_len, alg = ext->alg, set_key = 1, rc; wep_key_t key; if (!local->wep_capable) return -EOPNOTSUPP; readConfigRid(local, 1); /* Determine and validate the key index */ idx = encoding->flags & IW_ENCODE_INDEX; if (idx) { if (!valid_index(local, idx - 1)) return -EINVAL; idx--; } else { idx = get_wep_tx_idx(local); if (idx < 0) idx = 0; } if (encoding->flags & IW_ENCODE_DISABLED) alg = IW_ENCODE_ALG_NONE; if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { /* Only set transmit key index here, actual * key is set below if needed. */ rc = set_wep_tx_idx(local, idx, perm, 1); if (rc < 0) { airo_print_err(local->dev->name, "failed to set " "WEP transmit index to %d: %d.", idx, rc); return rc; } set_key = ext->key_len > 0 ? 1 : 0; } if (set_key) { /* Set the requested key first */ memset(key.key, 0, MAX_KEY_SIZE); switch (alg) { case IW_ENCODE_ALG_NONE: key.len = 0; break; case IW_ENCODE_ALG_WEP: if (ext->key_len > MIN_KEY_SIZE) { key.len = MAX_KEY_SIZE; } else if (ext->key_len > 0) { key.len = MIN_KEY_SIZE; } else { return -EINVAL; } key_len = min (ext->key_len, key.len); memcpy(key.key, ext->key, key_len); break; default: return -EINVAL; } if (key.len == 0) { rc = set_wep_tx_idx(local, idx, perm, 1); if (rc < 0) { airo_print_err(local->dev->name, "failed to set WEP transmit index to %d: %d.", idx, rc); return rc; } } else { rc = set_wep_key(local, idx, key.key, key.len, perm, 1); if (rc < 0) { airo_print_err(local->dev->name, "failed to set WEP key at index %d: %d.", idx, rc); return rc; } } } /* Read the flags */ if(encoding->flags & IW_ENCODE_DISABLED) local->config.authType = AUTH_OPEN; // disable encryption if(encoding->flags & IW_ENCODE_RESTRICTED) local->config.authType = AUTH_SHAREDKEY; // Only Both if(encoding->flags & IW_ENCODE_OPEN) local->config.authType = AUTH_ENCRYPT; // Only Wep /* Commit the changes to flags if needed */ if (local->config.authType != currentAuthType) set_bit (FLAG_COMMIT, &local->flags); return -EINPROGRESS; } /*------------------------------------------------------------------*/ /* * Wireless Handler : get extended Encryption parameters */ static int airo_get_encodeext(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct airo_info *local = dev->ml_priv; struct iw_point *encoding = &wrqu->encoding; struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; int idx, max_key_len, wep_key_len; u8 buf[16]; if (!local->wep_capable) return -EOPNOTSUPP; readConfigRid(local, 1); max_key_len = encoding->length - sizeof(*ext); if (max_key_len < 0) return -EINVAL; idx = encoding->flags & IW_ENCODE_INDEX; if (idx) { if (!valid_index(local, idx - 1)) return -EINVAL; idx--; } else { idx = get_wep_tx_idx(local); if (idx < 0) idx = 0; } encoding->flags = idx + 1; memset(ext, 0, sizeof(*ext)); /* Check encryption mode */ switch(local->config.authType) { case AUTH_ENCRYPT: encoding->flags = IW_ENCODE_ALG_WEP | IW_ENCODE_ENABLED; break; case AUTH_SHAREDKEY: encoding->flags = IW_ENCODE_ALG_WEP | IW_ENCODE_ENABLED; break; default: case AUTH_OPEN: encoding->flags = IW_ENCODE_ALG_NONE | IW_ENCODE_DISABLED; break; } /* We can't return the key, so set the proper flag and return zero */ encoding->flags |= IW_ENCODE_NOKEY; memset(extra, 0, 16); /* Copy the key to the user buffer */ wep_key_len = get_wep_key(local, idx, &buf[0], sizeof(buf)); if (wep_key_len < 0) { ext->key_len = 0; } else { ext->key_len = wep_key_len; memcpy(extra, buf, ext->key_len); } return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set extended authentication parameters */ static int airo_set_auth(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct airo_info *local = dev->ml_priv; struct iw_param *param = &wrqu->param; __le16 currentAuthType = local->config.authType; switch (param->flags & IW_AUTH_INDEX) { case IW_AUTH_WPA_VERSION: case IW_AUTH_CIPHER_PAIRWISE: case IW_AUTH_CIPHER_GROUP: case IW_AUTH_KEY_MGMT: case IW_AUTH_RX_UNENCRYPTED_EAPOL: case IW_AUTH_PRIVACY_INVOKED: /* * airo does not use these parameters */ break; case IW_AUTH_DROP_UNENCRYPTED: if (param->value) { /* Only change auth type if unencrypted */ if (currentAuthType == AUTH_OPEN) local->config.authType = AUTH_ENCRYPT; } else { local->config.authType = AUTH_OPEN; } /* Commit the changes to flags if needed */ if (local->config.authType != currentAuthType) set_bit (FLAG_COMMIT, &local->flags); break; case IW_AUTH_80211_AUTH_ALG: { /* FIXME: What about AUTH_OPEN? This API seems to * disallow setting our auth to AUTH_OPEN. */ if (param->value & IW_AUTH_ALG_SHARED_KEY) { local->config.authType = AUTH_SHAREDKEY; } else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) { local->config.authType = AUTH_ENCRYPT; } else return -EINVAL; /* Commit the changes to flags if needed */ if (local->config.authType != currentAuthType) set_bit (FLAG_COMMIT, &local->flags); break; } case IW_AUTH_WPA_ENABLED: /* Silently accept disable of WPA */ if (param->value > 0) return -EOPNOTSUPP; break; default: return -EOPNOTSUPP; } return -EINPROGRESS; } /*------------------------------------------------------------------*/ /* * Wireless Handler : get extended authentication parameters */ static int airo_get_auth(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct airo_info *local = dev->ml_priv; struct iw_param *param = &wrqu->param; __le16 currentAuthType = local->config.authType; switch (param->flags & IW_AUTH_INDEX) { case IW_AUTH_DROP_UNENCRYPTED: switch (currentAuthType) { case AUTH_SHAREDKEY: case AUTH_ENCRYPT: param->value = 1; break; default: param->value = 0; break; } break; case IW_AUTH_80211_AUTH_ALG: switch (currentAuthType) { case AUTH_SHAREDKEY: param->value = IW_AUTH_ALG_SHARED_KEY; break; case AUTH_ENCRYPT: default: param->value = IW_AUTH_ALG_OPEN_SYSTEM; break; } break; case IW_AUTH_WPA_ENABLED: param->value = 0; break; default: return -EOPNOTSUPP; } return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set Tx-Power */ static int airo_set_txpow(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; CapabilityRid cap_rid; /* Card capability info */ int i; int rc = -EINVAL; __le16 v = cpu_to_le16(vwrq->value); readCapabilityRid(local, &cap_rid, 1); if (vwrq->disabled) { set_bit (FLAG_RADIO_OFF, &local->flags); set_bit (FLAG_COMMIT, &local->flags); return -EINPROGRESS; /* Call commit handler */ } if (vwrq->flags != IW_TXPOW_MWATT) { return -EINVAL; } clear_bit (FLAG_RADIO_OFF, &local->flags); for (i = 0; i < 8 && cap_rid.txPowerLevels[i]; i++) if (v == cap_rid.txPowerLevels[i]) { readConfigRid(local, 1); local->config.txPower = v; set_bit (FLAG_COMMIT, &local->flags); rc = -EINPROGRESS; /* Call commit handler */ break; } return rc; } /*------------------------------------------------------------------*/ /* * Wireless Handler : get Tx-Power */ static int airo_get_txpow(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; readConfigRid(local, 1); vwrq->value = le16_to_cpu(local->config.txPower); vwrq->fixed = 1; /* No power control */ vwrq->disabled = test_bit(FLAG_RADIO_OFF, &local->flags); vwrq->flags = IW_TXPOW_MWATT; return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set Retry limits */ static int airo_set_retry(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; int rc = -EINVAL; if(vwrq->disabled) { return -EINVAL; } readConfigRid(local, 1); if(vwrq->flags & IW_RETRY_LIMIT) { __le16 v = cpu_to_le16(vwrq->value); if(vwrq->flags & IW_RETRY_LONG) local->config.longRetryLimit = v; else if (vwrq->flags & IW_RETRY_SHORT) local->config.shortRetryLimit = v; else { /* No modifier : set both */ local->config.longRetryLimit = v; local->config.shortRetryLimit = v; } set_bit (FLAG_COMMIT, &local->flags); rc = -EINPROGRESS; /* Call commit handler */ } if(vwrq->flags & IW_RETRY_LIFETIME) { local->config.txLifetime = cpu_to_le16(vwrq->value / 1024); set_bit (FLAG_COMMIT, &local->flags); rc = -EINPROGRESS; /* Call commit handler */ } return rc; } /*------------------------------------------------------------------*/ /* * Wireless Handler : get Retry limits */ static int airo_get_retry(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; vwrq->disabled = 0; /* Can't be disabled */ readConfigRid(local, 1); /* Note : by default, display the min retry number */ if((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) { vwrq->flags = IW_RETRY_LIFETIME; vwrq->value = le16_to_cpu(local->config.txLifetime) * 1024; } else if((vwrq->flags & IW_RETRY_LONG)) { vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG; vwrq->value = le16_to_cpu(local->config.longRetryLimit); } else { vwrq->flags = IW_RETRY_LIMIT; vwrq->value = le16_to_cpu(local->config.shortRetryLimit); if(local->config.shortRetryLimit != local->config.longRetryLimit) vwrq->flags |= IW_RETRY_SHORT; } return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : get range info */ static int airo_get_range(struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { struct airo_info *local = dev->ml_priv; struct iw_range *range = (struct iw_range *) extra; CapabilityRid cap_rid; /* Card capability info */ int i; int k; readCapabilityRid(local, &cap_rid, 1); dwrq->length = sizeof(struct iw_range); memset(range, 0, sizeof(*range)); range->min_nwid = 0x0000; range->max_nwid = 0x0000; range->num_channels = 14; /* Should be based on cap_rid.country to give only * what the current card support */ k = 0; for(i = 0; i < 14; i++) { range->freq[k].i = i + 1; /* List index */ range->freq[k].m = ieee80211_dsss_chan_to_freq(i + 1) * 100000; range->freq[k++].e = 1; /* Values in MHz -> * 10^5 * 10 */ } range->num_frequency = k; range->sensitivity = 65535; /* Hum... Should put the right values there */ if (local->rssi) range->max_qual.qual = 100; /* % */ else range->max_qual.qual = airo_get_max_quality(&cap_rid); range->max_qual.level = 0x100 - 120; /* -120 dBm */ range->max_qual.noise = 0x100 - 120; /* -120 dBm */ /* Experimental measurements - boundary 11/5.5 Mb/s */ /* Note : with or without the (local->rssi), results * are somewhat different. - Jean II */ if (local->rssi) { range->avg_qual.qual = 50; /* % */ range->avg_qual.level = 0x100 - 70; /* -70 dBm */ } else { range->avg_qual.qual = airo_get_avg_quality(&cap_rid); range->avg_qual.level = 0x100 - 80; /* -80 dBm */ } range->avg_qual.noise = 0x100 - 85; /* -85 dBm */ for(i = 0 ; i < 8 ; i++) { range->bitrate[i] = cap_rid.supportedRates[i] * 500000; if(range->bitrate[i] == 0) break; } range->num_bitrates = i; /* Set an indication of the max TCP throughput * in bit/s that we can expect using this interface. * May be use for QoS stuff... Jean II */ if(i > 2) range->throughput = 5000 * 1000; else range->throughput = 1500 * 1000; range->min_rts = 0; range->max_rts = AIRO_DEF_MTU; range->min_frag = 256; range->max_frag = AIRO_DEF_MTU; if(cap_rid.softCap & cpu_to_le16(2)) { // WEP: RC4 40 bits range->encoding_size[0] = 5; // RC4 ~128 bits if (cap_rid.softCap & cpu_to_le16(0x100)) { range->encoding_size[1] = 13; range->num_encoding_sizes = 2; } else range->num_encoding_sizes = 1; range->max_encoding_tokens = cap_rid.softCap & cpu_to_le16(0x80) ? 4 : 1; } else { range->num_encoding_sizes = 0; range->max_encoding_tokens = 0; } range->min_pmp = 0; range->max_pmp = 5000000; /* 5 secs */ range->min_pmt = 0; range->max_pmt = 65535 * 1024; /* ??? */ range->pmp_flags = IW_POWER_PERIOD; range->pmt_flags = IW_POWER_TIMEOUT; range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_ALL_R; /* Transmit Power - values are in mW */ for(i = 0 ; i < 8 ; i++) { range->txpower[i] = le16_to_cpu(cap_rid.txPowerLevels[i]); if(range->txpower[i] == 0) break; } range->num_txpower = i; range->txpower_capa = IW_TXPOW_MWATT; range->we_version_source = 19; range->we_version_compiled = WIRELESS_EXT; range->retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME; range->retry_flags = IW_RETRY_LIMIT; range->r_time_flags = IW_RETRY_LIFETIME; range->min_retry = 1; range->max_retry = 65535; range->min_r_time = 1024; range->max_r_time = 65535 * 1024; /* Event capability (kernel + driver) */ range->event_capa[0] = (IW_EVENT_CAPA_K_0 | IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) | IW_EVENT_CAPA_MASK(SIOCGIWAP) | IW_EVENT_CAPA_MASK(SIOCGIWSCAN)); range->event_capa[1] = IW_EVENT_CAPA_K_1; range->event_capa[4] = IW_EVENT_CAPA_MASK(IWEVTXDROP); return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set Power Management */ static int airo_set_power(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; readConfigRid(local, 1); if (vwrq->disabled) { if (sniffing_mode(local)) return -EINVAL; local->config.powerSaveMode = POWERSAVE_CAM; local->config.rmode &= ~RXMODE_MASK; local->config.rmode |= RXMODE_BC_MC_ADDR; set_bit (FLAG_COMMIT, &local->flags); return -EINPROGRESS; /* Call commit handler */ } if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) { local->config.fastListenDelay = cpu_to_le16((vwrq->value + 500) / 1024); local->config.powerSaveMode = POWERSAVE_PSPCAM; set_bit (FLAG_COMMIT, &local->flags); } else if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_PERIOD) { local->config.fastListenInterval = local->config.listenInterval = cpu_to_le16((vwrq->value + 500) / 1024); local->config.powerSaveMode = POWERSAVE_PSPCAM; set_bit (FLAG_COMMIT, &local->flags); } switch (vwrq->flags & IW_POWER_MODE) { case IW_POWER_UNICAST_R: if (sniffing_mode(local)) return -EINVAL; local->config.rmode &= ~RXMODE_MASK; local->config.rmode |= RXMODE_ADDR; set_bit (FLAG_COMMIT, &local->flags); break; case IW_POWER_ALL_R: if (sniffing_mode(local)) return -EINVAL; local->config.rmode &= ~RXMODE_MASK; local->config.rmode |= RXMODE_BC_MC_ADDR; set_bit (FLAG_COMMIT, &local->flags); case IW_POWER_ON: /* This is broken, fixme ;-) */ break; default: return -EINVAL; } // Note : we may want to factor local->need_commit here // Note2 : may also want to factor RXMODE_RFMON test return -EINPROGRESS; /* Call commit handler */ } /*------------------------------------------------------------------*/ /* * Wireless Handler : get Power Management */ static int airo_get_power(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; __le16 mode; readConfigRid(local, 1); mode = local->config.powerSaveMode; if ((vwrq->disabled = (mode == POWERSAVE_CAM))) return 0; if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) { vwrq->value = le16_to_cpu(local->config.fastListenDelay) * 1024; vwrq->flags = IW_POWER_TIMEOUT; } else { vwrq->value = le16_to_cpu(local->config.fastListenInterval) * 1024; vwrq->flags = IW_POWER_PERIOD; } if ((local->config.rmode & RXMODE_MASK) == RXMODE_ADDR) vwrq->flags |= IW_POWER_UNICAST_R; else vwrq->flags |= IW_POWER_ALL_R; return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set Sensitivity */ static int airo_set_sens(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; readConfigRid(local, 1); local->config.rssiThreshold = cpu_to_le16(vwrq->disabled ? RSSI_DEFAULT : vwrq->value); set_bit (FLAG_COMMIT, &local->flags); return -EINPROGRESS; /* Call commit handler */ } /*------------------------------------------------------------------*/ /* * Wireless Handler : get Sensitivity */ static int airo_get_sens(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; readConfigRid(local, 1); vwrq->value = le16_to_cpu(local->config.rssiThreshold); vwrq->disabled = (vwrq->value == 0); vwrq->fixed = 1; return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : get AP List * Note : this is deprecated in favor of IWSCAN */ static int airo_get_aplist(struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { struct airo_info *local = dev->ml_priv; struct sockaddr *address = (struct sockaddr *) extra; struct iw_quality *qual; BSSListRid BSSList; int i; int loseSync = capable(CAP_NET_ADMIN) ? 1: -1; qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL); if (!qual) return -ENOMEM; for (i = 0; i < IW_MAX_AP; i++) { u16 dBm; if (readBSSListRid(local, loseSync, &BSSList)) break; loseSync = 0; memcpy(address[i].sa_data, BSSList.bssid, ETH_ALEN); address[i].sa_family = ARPHRD_ETHER; dBm = le16_to_cpu(BSSList.dBm); if (local->rssi) { qual[i].level = 0x100 - dBm; qual[i].qual = airo_dbm_to_pct(local->rssi, dBm); qual[i].updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | IW_QUAL_DBM; } else { qual[i].level = (dBm + 321) / 2; qual[i].qual = 0; qual[i].updated = IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_UPDATED | IW_QUAL_DBM; } qual[i].noise = local->wstats.qual.noise; if (BSSList.index == cpu_to_le16(0xffff)) break; } if (!i) { StatusRid status_rid; /* Card status info */ readStatusRid(local, &status_rid, 1); for (i = 0; i < min(IW_MAX_AP, 4) && (status_rid.bssid[i][0] & status_rid.bssid[i][1] & status_rid.bssid[i][2] & status_rid.bssid[i][3] & status_rid.bssid[i][4] & status_rid.bssid[i][5])!=0xff && (status_rid.bssid[i][0] | status_rid.bssid[i][1] | status_rid.bssid[i][2] | status_rid.bssid[i][3] | status_rid.bssid[i][4] | status_rid.bssid[i][5]); i++) { memcpy(address[i].sa_data, status_rid.bssid[i], ETH_ALEN); address[i].sa_family = ARPHRD_ETHER; } } else { dwrq->flags = 1; /* Should be define'd */ memcpy(extra + sizeof(struct sockaddr) * i, qual, sizeof(struct iw_quality) * i); } dwrq->length = i; kfree(qual); return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : Initiate Scan */ static int airo_set_scan(struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { struct airo_info *ai = dev->ml_priv; Cmd cmd; Resp rsp; int wake = 0; /* Note : you may have realised that, as this is a SET operation, * this is privileged and therefore a normal user can't * perform scanning. * This is not an error, while the device perform scanning, * traffic doesn't flow, so it's a perfect DoS... * Jean II */ if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN; if (down_interruptible(&ai->sem)) return -ERESTARTSYS; /* If there's already a scan in progress, don't * trigger another one. */ if (ai->scan_timeout > 0) goto out; /* Initiate a scan command */ ai->scan_timeout = RUN_AT(3*HZ); memset(&cmd, 0, sizeof(cmd)); cmd.cmd=CMD_LISTBSS; issuecommand(ai, &cmd, &rsp); wake = 1; out: up(&ai->sem); if (wake) wake_up_interruptible(&ai->thr_wait); return 0; } /*------------------------------------------------------------------*/ /* * Translate scan data returned from the card to a card independent * format that the Wireless Tools will understand - Jean II */ static inline char *airo_translate_scan(struct net_device *dev, struct iw_request_info *info, char *current_ev, char *end_buf, BSSListRid *bss) { struct airo_info *ai = dev->ml_priv; struct iw_event iwe; /* Temporary buffer */ __le16 capabilities; char * current_val; /* For rates */ int i; char * buf; u16 dBm; /* First entry *MUST* be the AP MAC address */ iwe.cmd = SIOCGIWAP; iwe.u.ap_addr.sa_family = ARPHRD_ETHER; memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN); current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_ADDR_LEN); /* Other entries will be displayed in the order we give them */ /* Add the ESSID */ iwe.u.data.length = bss->ssidLen; if(iwe.u.data.length > 32) iwe.u.data.length = 32; iwe.cmd = SIOCGIWESSID; iwe.u.data.flags = 1; current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, bss->ssid); /* Add mode */ iwe.cmd = SIOCGIWMODE; capabilities = bss->cap; if(capabilities & (CAP_ESS | CAP_IBSS)) { if(capabilities & CAP_ESS) iwe.u.mode = IW_MODE_MASTER; else iwe.u.mode = IW_MODE_ADHOC; current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_UINT_LEN); } /* Add frequency */ iwe.cmd = SIOCGIWFREQ; iwe.u.freq.m = le16_to_cpu(bss->dsChannel); iwe.u.freq.m = ieee80211_dsss_chan_to_freq(iwe.u.freq.m) * 100000; iwe.u.freq.e = 1; current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_FREQ_LEN); dBm = le16_to_cpu(bss->dBm); /* Add quality statistics */ iwe.cmd = IWEVQUAL; if (ai->rssi) { iwe.u.qual.level = 0x100 - dBm; iwe.u.qual.qual = airo_dbm_to_pct(ai->rssi, dBm); iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | IW_QUAL_DBM; } else { iwe.u.qual.level = (dBm + 321) / 2; iwe.u.qual.qual = 0; iwe.u.qual.updated = IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_UPDATED | IW_QUAL_DBM; } iwe.u.qual.noise = ai->wstats.qual.noise; current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_QUAL_LEN); /* Add encryption capability */ iwe.cmd = SIOCGIWENCODE; if(capabilities & CAP_PRIVACY) iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; else iwe.u.data.flags = IW_ENCODE_DISABLED; iwe.u.data.length = 0; current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, bss->ssid); /* Rate : stuffing multiple values in a single event require a bit * more of magic - Jean II */ current_val = current_ev + iwe_stream_lcp_len(info); iwe.cmd = SIOCGIWRATE; /* Those two flags are ignored... */ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; /* Max 8 values */ for(i = 0 ; i < 8 ; i++) { /* NULL terminated */ if(bss->rates[i] == 0) break; /* Bit rate given in 500 kb/s units (+ 0x80) */ iwe.u.bitrate.value = ((bss->rates[i] & 0x7f) * 500000); /* Add new value to event */ current_val = iwe_stream_add_value(info, current_ev, current_val, end_buf, &iwe, IW_EV_PARAM_LEN); } /* Check if we added any event */ if ((current_val - current_ev) > iwe_stream_lcp_len(info)) current_ev = current_val; /* Beacon interval */ buf = kmalloc(30, GFP_KERNEL); if (buf) { iwe.cmd = IWEVCUSTOM; sprintf(buf, "bcn_int=%d", bss->beaconInterval); iwe.u.data.length = strlen(buf); current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, buf); kfree(buf); } /* Put WPA/RSN Information Elements into the event stream */ if (test_bit(FLAG_WPA_CAPABLE, &ai->flags)) { unsigned int num_null_ies = 0; u16 length = sizeof (bss->extra.iep); u8 *ie = (void *)&bss->extra.iep; while ((length >= 2) && (num_null_ies < 2)) { if (2 + ie[1] > length) { /* Invalid element, don't continue parsing IE */ break; } switch (ie[0]) { case WLAN_EID_SSID: /* Two zero-length SSID elements * mean we're done parsing elements */ if (!ie[1]) num_null_ies++; break; case WLAN_EID_VENDOR_SPECIFIC: if (ie[1] >= 4 && ie[2] == 0x00 && ie[3] == 0x50 && ie[4] == 0xf2 && ie[5] == 0x01) { iwe.cmd = IWEVGENIE; /* 64 is an arbitrary cut-off */ iwe.u.data.length = min(ie[1] + 2, 64); current_ev = iwe_stream_add_point( info, current_ev, end_buf, &iwe, ie); } break; case WLAN_EID_RSN: iwe.cmd = IWEVGENIE; /* 64 is an arbitrary cut-off */ iwe.u.data.length = min(ie[1] + 2, 64); current_ev = iwe_stream_add_point( info, current_ev, end_buf, &iwe, ie); break; default: break; } length -= 2 + ie[1]; ie += 2 + ie[1]; } } return current_ev; } /*------------------------------------------------------------------*/ /* * Wireless Handler : Read Scan Results */ static int airo_get_scan(struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { struct airo_info *ai = dev->ml_priv; BSSListElement *net; int err = 0; char *current_ev = extra; /* If a scan is in-progress, return -EAGAIN */ if (ai->scan_timeout > 0) return -EAGAIN; if (down_interruptible(&ai->sem)) return -EAGAIN; list_for_each_entry (net, &ai->network_list, list) { /* Translate to WE format this entry */ current_ev = airo_translate_scan(dev, info, current_ev, extra + dwrq->length, &net->bss); /* Check if there is space for one more entry */ if((extra + dwrq->length - current_ev) <= IW_EV_ADDR_LEN) { /* Ask user space to try again with a bigger buffer */ err = -E2BIG; goto out; } } /* Length of data */ dwrq->length = (current_ev - extra); dwrq->flags = 0; /* todo */ out: up(&ai->sem); return err; } /*------------------------------------------------------------------*/ /* * Commit handler : called after a bunch of SET operations */ static int airo_config_commit(struct net_device *dev, struct iw_request_info *info, /* NULL */ void *zwrq, /* NULL */ char *extra) /* NULL */ { struct airo_info *local = dev->ml_priv; if (!test_bit (FLAG_COMMIT, &local->flags)) return 0; /* Some of the "SET" function may have modified some of the * parameters. It's now time to commit them in the card */ disable_MAC(local, 1); if (test_bit (FLAG_RESET, &local->flags)) { APListRid APList_rid; SsidRid SSID_rid; readAPListRid(local, &APList_rid); readSsidRid(local, &SSID_rid); if (test_bit(FLAG_MPI,&local->flags)) setup_card(local, dev->dev_addr, 1 ); else reset_airo_card(dev); disable_MAC(local, 1); writeSsidRid(local, &SSID_rid, 1); writeAPListRid(local, &APList_rid, 1); } if (down_interruptible(&local->sem)) return -ERESTARTSYS; writeConfigRid(local, 0); enable_MAC(local, 0); if (test_bit (FLAG_RESET, &local->flags)) airo_set_promisc(local); else up(&local->sem); return 0; } /*------------------------------------------------------------------*/ /* * Structures to export the Wireless Handlers */ static const struct iw_priv_args airo_private_args[] = { /*{ cmd, set_args, get_args, name } */ { AIROIOCTL, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | sizeof (aironet_ioctl), IW_PRIV_TYPE_BYTE | 2047, "airoioctl" }, { AIROIDIFC, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | sizeof (aironet_ioctl), IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "airoidifc" }, }; static const iw_handler airo_handler[] = { (iw_handler) airo_config_commit, /* SIOCSIWCOMMIT */ (iw_handler) airo_get_name, /* SIOCGIWNAME */ (iw_handler) NULL, /* SIOCSIWNWID */ (iw_handler) NULL, /* SIOCGIWNWID */ (iw_handler) airo_set_freq, /* SIOCSIWFREQ */ (iw_handler) airo_get_freq, /* SIOCGIWFREQ */ (iw_handler) airo_set_mode, /* SIOCSIWMODE */ (iw_handler) airo_get_mode, /* SIOCGIWMODE */ (iw_handler) airo_set_sens, /* SIOCSIWSENS */ (iw_handler) airo_get_sens, /* SIOCGIWSENS */ (iw_handler) NULL, /* SIOCSIWRANGE */ (iw_handler) airo_get_range, /* SIOCGIWRANGE */ (iw_handler) NULL, /* SIOCSIWPRIV */ (iw_handler) NULL, /* SIOCGIWPRIV */ (iw_handler) NULL, /* SIOCSIWSTATS */ (iw_handler) NULL, /* SIOCGIWSTATS */ iw_handler_set_spy, /* SIOCSIWSPY */ iw_handler_get_spy, /* SIOCGIWSPY */ iw_handler_set_thrspy, /* SIOCSIWTHRSPY */ iw_handler_get_thrspy, /* SIOCGIWTHRSPY */ (iw_handler) airo_set_wap, /* SIOCSIWAP */ (iw_handler) airo_get_wap, /* SIOCGIWAP */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) airo_get_aplist, /* SIOCGIWAPLIST */ (iw_handler) airo_set_scan, /* SIOCSIWSCAN */ (iw_handler) airo_get_scan, /* SIOCGIWSCAN */ (iw_handler) airo_set_essid, /* SIOCSIWESSID */ (iw_handler) airo_get_essid, /* SIOCGIWESSID */ (iw_handler) airo_set_nick, /* SIOCSIWNICKN */ (iw_handler) airo_get_nick, /* SIOCGIWNICKN */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) airo_set_rate, /* SIOCSIWRATE */ (iw_handler) airo_get_rate, /* SIOCGIWRATE */ (iw_handler) airo_set_rts, /* SIOCSIWRTS */ (iw_handler) airo_get_rts, /* SIOCGIWRTS */ (iw_handler) airo_set_frag, /* SIOCSIWFRAG */ (iw_handler) airo_get_frag, /* SIOCGIWFRAG */ (iw_handler) airo_set_txpow, /* SIOCSIWTXPOW */ (iw_handler) airo_get_txpow, /* SIOCGIWTXPOW */ (iw_handler) airo_set_retry, /* SIOCSIWRETRY */ (iw_handler) airo_get_retry, /* SIOCGIWRETRY */ (iw_handler) airo_set_encode, /* SIOCSIWENCODE */ (iw_handler) airo_get_encode, /* SIOCGIWENCODE */ (iw_handler) airo_set_power, /* SIOCSIWPOWER */ (iw_handler) airo_get_power, /* SIOCGIWPOWER */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL, /* SIOCSIWGENIE */ (iw_handler) NULL, /* SIOCGIWGENIE */ (iw_handler) airo_set_auth, /* SIOCSIWAUTH */ (iw_handler) airo_get_auth, /* SIOCGIWAUTH */ (iw_handler) airo_set_encodeext, /* SIOCSIWENCODEEXT */ (iw_handler) airo_get_encodeext, /* SIOCGIWENCODEEXT */ (iw_handler) NULL, /* SIOCSIWPMKSA */ }; /* Note : don't describe AIROIDIFC and AIROOLDIDIFC in here. * We want to force the use of the ioctl code, because those can't be * won't work the iw_handler code (because they simultaneously read * and write data and iw_handler can't do that). * Note that it's perfectly legal to read/write on a single ioctl command, * you just can't use iwpriv and need to force it via the ioctl handler. * Jean II */ static const iw_handler airo_private_handler[] = { NULL, /* SIOCIWFIRSTPRIV */ }; static const struct iw_handler_def airo_handler_def = { .num_standard = ARRAY_SIZE(airo_handler), .num_private = ARRAY_SIZE(airo_private_handler), .num_private_args = ARRAY_SIZE(airo_private_args), .standard = airo_handler, .private = airo_private_handler, .private_args = airo_private_args, .get_wireless_stats = airo_get_wireless_stats, }; /* * This defines the configuration part of the Wireless Extensions * Note : irq and spinlock protection will occur in the subroutines * * TODO : * o Check input value more carefully and fill correct values in range * o Test and shakeout the bugs (if any) * * Jean II * * Javier Achirica did a great job of merging code from the unnamed CISCO * developer that added support for flashing the card. */ static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { int rc = 0; struct airo_info *ai = dev->ml_priv; if (ai->power.event) return 0; switch (cmd) { #ifdef CISCO_EXT case AIROIDIFC: #ifdef AIROOLDIDIFC case AIROOLDIDIFC: #endif { int val = AIROMAGIC; aironet_ioctl com; if (copy_from_user(&com,rq->ifr_data,sizeof(com))) rc = -EFAULT; else if (copy_to_user(com.data,(char *)&val,sizeof(val))) rc = -EFAULT; } break; case AIROIOCTL: #ifdef AIROOLDIOCTL case AIROOLDIOCTL: #endif /* Get the command struct and hand it off for evaluation by * the proper subfunction */ { aironet_ioctl com; if (copy_from_user(&com,rq->ifr_data,sizeof(com))) { rc = -EFAULT; break; } /* Separate R/W functions bracket legality here */ if ( com.command == AIRORSWVERSION ) { if (copy_to_user(com.data, swversion, sizeof(swversion))) rc = -EFAULT; else rc = 0; } else if ( com.command <= AIRORRID) rc = readrids(dev,&com); else if ( com.command >= AIROPCAP && com.command <= (AIROPLEAPUSR+2) ) rc = writerids(dev,&com); else if ( com.command >= AIROFLSHRST && com.command <= AIRORESTART ) rc = flashcard(dev,&com); else rc = -EINVAL; /* Bad command in ioctl */ } break; #endif /* CISCO_EXT */ // All other calls are currently unsupported default: rc = -EOPNOTSUPP; } return rc; } /* * Get the Wireless stats out of the driver * Note : irq and spinlock protection will occur in the subroutines * * TODO : * o Check if work in Ad-Hoc mode (otherwise, use SPY, as in wvlan_cs) * * Jean */ static void airo_read_wireless_stats(struct airo_info *local) { StatusRid status_rid; StatsRid stats_rid; CapabilityRid cap_rid; __le32 *vals = stats_rid.vals; /* Get stats out of the card */ clear_bit(JOB_WSTATS, &local->jobs); if (local->power.event) { up(&local->sem); return; } readCapabilityRid(local, &cap_rid, 0); readStatusRid(local, &status_rid, 0); readStatsRid(local, &stats_rid, RID_STATS, 0); up(&local->sem); /* The status */ local->wstats.status = le16_to_cpu(status_rid.mode); /* Signal quality and co */ if (local->rssi) { local->wstats.qual.level = airo_rssi_to_dbm(local->rssi, le16_to_cpu(status_rid.sigQuality)); /* normalizedSignalStrength appears to be a percentage */ local->wstats.qual.qual = le16_to_cpu(status_rid.normalizedSignalStrength); } else { local->wstats.qual.level = (le16_to_cpu(status_rid.normalizedSignalStrength) + 321) / 2; local->wstats.qual.qual = airo_get_quality(&status_rid, &cap_rid); } if (le16_to_cpu(status_rid.len) >= 124) { local->wstats.qual.noise = 0x100 - status_rid.noisedBm; local->wstats.qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; } else { local->wstats.qual.noise = 0; local->wstats.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_INVALID | IW_QUAL_DBM; } /* Packets discarded in the wireless adapter due to wireless * specific problems */ local->wstats.discard.nwid = le32_to_cpu(vals[56]) + le32_to_cpu(vals[57]) + le32_to_cpu(vals[58]); /* SSID Mismatch */ local->wstats.discard.code = le32_to_cpu(vals[6]);/* RxWepErr */ local->wstats.discard.fragment = le32_to_cpu(vals[30]); local->wstats.discard.retries = le32_to_cpu(vals[10]); local->wstats.discard.misc = le32_to_cpu(vals[1]) + le32_to_cpu(vals[32]); local->wstats.miss.beacon = le32_to_cpu(vals[34]); } static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev) { struct airo_info *local = dev->ml_priv; if (!test_bit(JOB_WSTATS, &local->jobs)) { /* Get stats out of the card if available */ if (down_trylock(&local->sem) != 0) { set_bit(JOB_WSTATS, &local->jobs); wake_up_interruptible(&local->thr_wait); } else airo_read_wireless_stats(local); } return &local->wstats; } #ifdef CISCO_EXT /* * This just translates from driver IOCTL codes to the command codes to * feed to the radio's host interface. Things can be added/deleted * as needed. This represents the READ side of control I/O to * the card */ static int readrids(struct net_device *dev, aironet_ioctl *comp) { unsigned short ridcode; unsigned char *iobuf; int len; struct airo_info *ai = dev->ml_priv; if (test_bit(FLAG_FLASHING, &ai->flags)) return -EIO; switch(comp->command) { case AIROGCAP: ridcode = RID_CAPABILITIES; break; case AIROGCFG: ridcode = RID_CONFIG; if (test_bit(FLAG_COMMIT, &ai->flags)) { disable_MAC (ai, 1); writeConfigRid (ai, 1); enable_MAC(ai, 1); } break; case AIROGSLIST: ridcode = RID_SSID; break; case AIROGVLIST: ridcode = RID_APLIST; break; case AIROGDRVNAM: ridcode = RID_DRVNAME; break; case AIROGEHTENC: ridcode = RID_ETHERENCAP; break; case AIROGWEPKTMP: ridcode = RID_WEP_TEMP; /* Only super-user can read WEP keys */ if (!capable(CAP_NET_ADMIN)) return -EPERM; break; case AIROGWEPKNV: ridcode = RID_WEP_PERM; /* Only super-user can read WEP keys */ if (!capable(CAP_NET_ADMIN)) return -EPERM; break; case AIROGSTAT: ridcode = RID_STATUS; break; case AIROGSTATSD32: ridcode = RID_STATSDELTA; break; case AIROGSTATSC32: ridcode = RID_STATS; break; case AIROGMICSTATS: if (copy_to_user(comp->data, &ai->micstats, min((int)comp->len,(int)sizeof(ai->micstats)))) return -EFAULT; return 0; case AIRORRID: ridcode = comp->ridnum; break; default: return -EINVAL; break; } if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL) return -ENOMEM; PC4500_readrid(ai,ridcode,iobuf,RIDSIZE, 1); /* get the count of bytes in the rid docs say 1st 2 bytes is it. * then return it to the user * 9/22/2000 Honor user given length */ len = comp->len; if (copy_to_user(comp->data, iobuf, min(len, (int)RIDSIZE))) { kfree (iobuf); return -EFAULT; } kfree (iobuf); return 0; } /* * Danger Will Robinson write the rids here */ static int writerids(struct net_device *dev, aironet_ioctl *comp) { struct airo_info *ai = dev->ml_priv; int ridcode; int enabled; static int (* writer)(struct airo_info *, u16 rid, const void *, int, int); unsigned char *iobuf; /* Only super-user can write RIDs */ if (!capable(CAP_NET_ADMIN)) return -EPERM; if (test_bit(FLAG_FLASHING, &ai->flags)) return -EIO; ridcode = 0; writer = do_writerid; switch(comp->command) { case AIROPSIDS: ridcode = RID_SSID; break; case AIROPCAP: ridcode = RID_CAPABILITIES; break; case AIROPAPLIST: ridcode = RID_APLIST; break; case AIROPCFG: ai->config.len = 0; clear_bit(FLAG_COMMIT, &ai->flags); ridcode = RID_CONFIG; break; case AIROPWEPKEYNV: ridcode = RID_WEP_PERM; break; case AIROPLEAPUSR: ridcode = RID_LEAPUSERNAME; break; case AIROPLEAPPWD: ridcode = RID_LEAPPASSWORD; break; case AIROPWEPKEY: ridcode = RID_WEP_TEMP; writer = PC4500_writerid; break; case AIROPLEAPUSR+1: ridcode = 0xFF2A; break; case AIROPLEAPUSR+2: ridcode = 0xFF2B; break; /* this is not really a rid but a command given to the card * same with MAC off */ case AIROPMACON: if (enable_MAC(ai, 1) != 0) return -EIO; return 0; /* * Evidently this code in the airo driver does not get a symbol * as disable_MAC. it's probably so short the compiler does not gen one. */ case AIROPMACOFF: disable_MAC(ai, 1); return 0; /* This command merely clears the counts does not actually store any data * only reads rid. But as it changes the cards state, I put it in the * writerid routines. */ case AIROPSTCLR: if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL) return -ENOMEM; PC4500_readrid(ai,RID_STATSDELTACLEAR,iobuf,RIDSIZE, 1); enabled = ai->micstats.enabled; memset(&ai->micstats,0,sizeof(ai->micstats)); ai->micstats.enabled = enabled; if (copy_to_user(comp->data, iobuf, min((int)comp->len, (int)RIDSIZE))) { kfree (iobuf); return -EFAULT; } kfree (iobuf); return 0; default: return -EOPNOTSUPP; /* Blarg! */ } if(comp->len > RIDSIZE) return -EINVAL; if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL) return -ENOMEM; if (copy_from_user(iobuf,comp->data,comp->len)) { kfree (iobuf); return -EFAULT; } if (comp->command == AIROPCFG) { ConfigRid *cfg = (ConfigRid *)iobuf; if (test_bit(FLAG_MIC_CAPABLE, &ai->flags)) cfg->opmode |= MODE_MIC; if ((cfg->opmode & MODE_CFG_MASK) == MODE_STA_IBSS) set_bit (FLAG_ADHOC, &ai->flags); else clear_bit (FLAG_ADHOC, &ai->flags); } if((*writer)(ai, ridcode, iobuf,comp->len,1)) { kfree (iobuf); return -EIO; } kfree (iobuf); return 0; } /***************************************************************************** * Ancillary flash / mod functions much black magic lurkes here * ***************************************************************************** */ /* * Flash command switch table */ static int flashcard(struct net_device *dev, aironet_ioctl *comp) { int z; /* Only super-user can modify flash */ if (!capable(CAP_NET_ADMIN)) return -EPERM; switch(comp->command) { case AIROFLSHRST: return cmdreset((struct airo_info *)dev->ml_priv); case AIROFLSHSTFL: if (!AIRO_FLASH(dev) && (AIRO_FLASH(dev) = kmalloc(FLASHSIZE, GFP_KERNEL)) == NULL) return -ENOMEM; return setflashmode((struct airo_info *)dev->ml_priv); case AIROFLSHGCHR: /* Get char from aux */ if(comp->len != sizeof(int)) return -EINVAL; if (copy_from_user(&z,comp->data,comp->len)) return -EFAULT; return flashgchar((struct airo_info *)dev->ml_priv, z, 8000); case AIROFLSHPCHR: /* Send char to card. */ if(comp->len != sizeof(int)) return -EINVAL; if (copy_from_user(&z,comp->data,comp->len)) return -EFAULT; return flashpchar((struct airo_info *)dev->ml_priv, z, 8000); case AIROFLPUTBUF: /* Send 32k to card */ if (!AIRO_FLASH(dev)) return -ENOMEM; if(comp->len > FLASHSIZE) return -EINVAL; if (copy_from_user(AIRO_FLASH(dev), comp->data, comp->len)) return -EFAULT; flashputbuf((struct airo_info *)dev->ml_priv); return 0; case AIRORESTART: if (flashrestart((struct airo_info *)dev->ml_priv, dev)) return -EIO; return 0; } return -EINVAL; } #define FLASH_COMMAND 0x7e7e /* * STEP 1) * Disable MAC and do soft reset on * card. */ static int cmdreset(struct airo_info *ai) { disable_MAC(ai, 1); if(!waitbusy (ai)){ airo_print_info(ai->dev->name, "Waitbusy hang before RESET"); return -EBUSY; } OUT4500(ai,COMMAND,CMD_SOFTRESET); ssleep(1); /* WAS 600 12/7/00 */ if(!waitbusy (ai)){ airo_print_info(ai->dev->name, "Waitbusy hang AFTER RESET"); return -EBUSY; } return 0; } /* STEP 2) * Put the card in legendary flash * mode */ static int setflashmode (struct airo_info *ai) { set_bit (FLAG_FLASHING, &ai->flags); OUT4500(ai, SWS0, FLASH_COMMAND); OUT4500(ai, SWS1, FLASH_COMMAND); if (probe) { OUT4500(ai, SWS0, FLASH_COMMAND); OUT4500(ai, COMMAND,0x10); } else { OUT4500(ai, SWS2, FLASH_COMMAND); OUT4500(ai, SWS3, FLASH_COMMAND); OUT4500(ai, COMMAND,0); } msleep(500); /* 500ms delay */ if(!waitbusy(ai)) { clear_bit (FLAG_FLASHING, &ai->flags); airo_print_info(ai->dev->name, "Waitbusy hang after setflash mode"); return -EIO; } return 0; } /* Put character to SWS0 wait for dwelltime * x 50us for echo . */ static int flashpchar(struct airo_info *ai,int byte,int dwelltime) { int echo; int waittime; byte |= 0x8000; if(dwelltime == 0 ) dwelltime = 200; waittime=dwelltime; /* Wait for busy bit d15 to go false indicating buffer empty */ while ((IN4500 (ai, SWS0) & 0x8000) && waittime > 0) { udelay (50); waittime -= 50; } /* timeout for busy clear wait */ if(waittime <= 0 ){ airo_print_info(ai->dev->name, "flash putchar busywait timeout!"); return -EBUSY; } /* Port is clear now write byte and wait for it to echo back */ do { OUT4500(ai,SWS0,byte); udelay(50); dwelltime -= 50; echo = IN4500(ai,SWS1); } while (dwelltime >= 0 && echo != byte); OUT4500(ai,SWS1,0); return (echo == byte) ? 0 : -EIO; } /* * Get a character from the card matching matchbyte * Step 3) */ static int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime){ int rchar; unsigned char rbyte=0; do { rchar = IN4500(ai,SWS1); if(dwelltime && !(0x8000 & rchar)){ dwelltime -= 10; mdelay(10); continue; } rbyte = 0xff & rchar; if( (rbyte == matchbyte) && (0x8000 & rchar) ){ OUT4500(ai,SWS1,0); return 0; } if( rbyte == 0x81 || rbyte == 0x82 || rbyte == 0x83 || rbyte == 0x1a || 0xffff == rchar) break; OUT4500(ai,SWS1,0); }while(dwelltime > 0); return -EIO; } /* * Transfer 32k of firmware data from user buffer to our buffer and * send to the card */ static int flashputbuf(struct airo_info *ai){ int nwords; /* Write stuff */ if (test_bit(FLAG_MPI,&ai->flags)) memcpy_toio(ai->pciaux + 0x8000, ai->flash, FLASHSIZE); else { OUT4500(ai,AUXPAGE,0x100); OUT4500(ai,AUXOFF,0); for(nwords=0;nwords != FLASHSIZE / 2;nwords++){ OUT4500(ai,AUXDATA,ai->flash[nwords] & 0xffff); } } OUT4500(ai,SWS0,0x8000); return 0; } /* * */ static int flashrestart(struct airo_info *ai,struct net_device *dev){ int i,status; ssleep(1); /* Added 12/7/00 */ clear_bit (FLAG_FLASHING, &ai->flags); if (test_bit(FLAG_MPI, &ai->flags)) { status = mpi_init_descriptors(ai); if (status != SUCCESS) return status; } status = setup_card(ai, dev->dev_addr, 1); if (!test_bit(FLAG_MPI,&ai->flags)) for( i = 0; i < MAX_FIDS; i++ ) { ai->fids[i] = transmit_allocate ( ai, AIRO_DEF_MTU, i >= MAX_FIDS / 2 ); } ssleep(1); /* Added 12/7/00 */ return status; } #endif /* CISCO_EXT */ /* This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. In addition: Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ module_init(airo_init_module); module_exit(airo_cleanup_module);
gpl-2.0
PureNexusProject/android_kernel_htc_flounder
arch/mips/alchemy/common/platform.c
2586
12047
/* * Platform device support for Au1x00 SoCs. * * Copyright 2004, Matt Porter <mporter@kernel.crashing.org> * * (C) Copyright Embedded Alley Solutions, Inc 2005 * Author: Pantelis Antoniou <pantelis@embeddedalley.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/dma-mapping.h> #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/serial_8250.h> #include <linux/slab.h> #include <linux/usb/ehci_pdriver.h> #include <linux/usb/ohci_pdriver.h> #include <asm/mach-au1x00/au1000.h> #include <asm/mach-au1x00/au1xxx_dbdma.h> #include <asm/mach-au1x00/au1100_mmc.h> #include <asm/mach-au1x00/au1xxx_eth.h> #include <prom.h> static void alchemy_8250_pm(struct uart_port *port, unsigned int state, unsigned int old_state) { #ifdef CONFIG_SERIAL_8250 switch (state) { case 0: alchemy_uart_enable(CPHYSADDR(port->membase)); serial8250_do_pm(port, state, old_state); break; case 3: /* power off */ serial8250_do_pm(port, state, old_state); alchemy_uart_disable(CPHYSADDR(port->membase)); break; default: serial8250_do_pm(port, state, old_state); break; } #endif } #define PORT(_base, _irq) \ { \ .mapbase = _base, \ .irq = _irq, \ .regshift = 2, \ .iotype = UPIO_AU, \ .flags = UPF_SKIP_TEST | UPF_IOREMAP | \ UPF_FIXED_TYPE, \ .type = PORT_16550A, \ .pm = alchemy_8250_pm, \ } static struct plat_serial8250_port au1x00_uart_data[][4] __initdata = { [ALCHEMY_CPU_AU1000] = { PORT(AU1000_UART0_PHYS_ADDR, AU1000_UART0_INT), PORT(AU1000_UART1_PHYS_ADDR, AU1000_UART1_INT), PORT(AU1000_UART2_PHYS_ADDR, AU1000_UART2_INT), PORT(AU1000_UART3_PHYS_ADDR, AU1000_UART3_INT), }, [ALCHEMY_CPU_AU1500] = { PORT(AU1000_UART0_PHYS_ADDR, AU1500_UART0_INT), PORT(AU1000_UART3_PHYS_ADDR, AU1500_UART3_INT), }, [ALCHEMY_CPU_AU1100] = { PORT(AU1000_UART0_PHYS_ADDR, AU1100_UART0_INT), PORT(AU1000_UART1_PHYS_ADDR, AU1100_UART1_INT), PORT(AU1000_UART3_PHYS_ADDR, AU1100_UART3_INT), }, [ALCHEMY_CPU_AU1550] = { PORT(AU1000_UART0_PHYS_ADDR, AU1550_UART0_INT), PORT(AU1000_UART1_PHYS_ADDR, AU1550_UART1_INT), PORT(AU1000_UART3_PHYS_ADDR, AU1550_UART3_INT), }, [ALCHEMY_CPU_AU1200] = { PORT(AU1000_UART0_PHYS_ADDR, AU1200_UART0_INT), PORT(AU1000_UART1_PHYS_ADDR, AU1200_UART1_INT), }, [ALCHEMY_CPU_AU1300] = { PORT(AU1300_UART0_PHYS_ADDR, AU1300_UART0_INT), PORT(AU1300_UART1_PHYS_ADDR, AU1300_UART1_INT), PORT(AU1300_UART2_PHYS_ADDR, AU1300_UART2_INT), PORT(AU1300_UART3_PHYS_ADDR, AU1300_UART3_INT), }, }; static struct platform_device au1xx0_uart_device = { .name = "serial8250", .id = PLAT8250_DEV_AU1X00, }; static void __init alchemy_setup_uarts(int ctype) { unsigned int uartclk = get_au1x00_uart_baud_base() * 16; int s = sizeof(struct plat_serial8250_port); int c = alchemy_get_uarts(ctype); struct plat_serial8250_port *ports; ports = kzalloc(s * (c + 1), GFP_KERNEL); if (!ports) { printk(KERN_INFO "Alchemy: no memory for UART data\n"); return; } memcpy(ports, au1x00_uart_data[ctype], s * c); au1xx0_uart_device.dev.platform_data = ports; /* Fill up uartclk. */ for (s = 0; s < c; s++) ports[s].uartclk = uartclk; if (platform_device_register(&au1xx0_uart_device)) printk(KERN_INFO "Alchemy: failed to register UARTs\n"); } /* The dmamask must be set for OHCI/EHCI to work */ static u64 alchemy_ohci_dmamask = DMA_BIT_MASK(32); static u64 __maybe_unused alchemy_ehci_dmamask = DMA_BIT_MASK(32); /* Power on callback for the ehci platform driver */ static int alchemy_ehci_power_on(struct platform_device *pdev) { return alchemy_usb_control(ALCHEMY_USB_EHCI0, 1); } /* Power off/suspend callback for the ehci platform driver */ static void alchemy_ehci_power_off(struct platform_device *pdev) { alchemy_usb_control(ALCHEMY_USB_EHCI0, 0); } static struct usb_ehci_pdata alchemy_ehci_pdata = { .no_io_watchdog = 1, .power_on = alchemy_ehci_power_on, .power_off = alchemy_ehci_power_off, .power_suspend = alchemy_ehci_power_off, }; /* Power on callback for the ohci platform driver */ static int alchemy_ohci_power_on(struct platform_device *pdev) { int unit; unit = (pdev->id == 1) ? ALCHEMY_USB_OHCI1 : ALCHEMY_USB_OHCI0; return alchemy_usb_control(unit, 1); } /* Power off/suspend callback for the ohci platform driver */ static void alchemy_ohci_power_off(struct platform_device *pdev) { int unit; unit = (pdev->id == 1) ? ALCHEMY_USB_OHCI1 : ALCHEMY_USB_OHCI0; alchemy_usb_control(unit, 0); } static struct usb_ohci_pdata alchemy_ohci_pdata = { .power_on = alchemy_ohci_power_on, .power_off = alchemy_ohci_power_off, .power_suspend = alchemy_ohci_power_off, }; static unsigned long alchemy_ohci_data[][2] __initdata = { [ALCHEMY_CPU_AU1000] = { AU1000_USB_OHCI_PHYS_ADDR, AU1000_USB_HOST_INT }, [ALCHEMY_CPU_AU1500] = { AU1000_USB_OHCI_PHYS_ADDR, AU1500_USB_HOST_INT }, [ALCHEMY_CPU_AU1100] = { AU1000_USB_OHCI_PHYS_ADDR, AU1100_USB_HOST_INT }, [ALCHEMY_CPU_AU1550] = { AU1550_USB_OHCI_PHYS_ADDR, AU1550_USB_HOST_INT }, [ALCHEMY_CPU_AU1200] = { AU1200_USB_OHCI_PHYS_ADDR, AU1200_USB_INT }, [ALCHEMY_CPU_AU1300] = { AU1300_USB_OHCI0_PHYS_ADDR, AU1300_USB_INT }, }; static unsigned long alchemy_ehci_data[][2] __initdata = { [ALCHEMY_CPU_AU1200] = { AU1200_USB_EHCI_PHYS_ADDR, AU1200_USB_INT }, [ALCHEMY_CPU_AU1300] = { AU1300_USB_EHCI_PHYS_ADDR, AU1300_USB_INT }, }; static int __init _new_usbres(struct resource **r, struct platform_device **d) { *r = kzalloc(sizeof(struct resource) * 2, GFP_KERNEL); if (!*r) return -ENOMEM; *d = kzalloc(sizeof(struct platform_device), GFP_KERNEL); if (!*d) { kfree(*r); return -ENOMEM; } (*d)->dev.coherent_dma_mask = DMA_BIT_MASK(32); (*d)->num_resources = 2; (*d)->resource = *r; return 0; } static void __init alchemy_setup_usb(int ctype) { struct resource *res; struct platform_device *pdev; /* setup OHCI0. Every variant has one */ if (_new_usbres(&res, &pdev)) return; res[0].start = alchemy_ohci_data[ctype][0]; res[0].end = res[0].start + 0x100 - 1; res[0].flags = IORESOURCE_MEM; res[1].start = alchemy_ohci_data[ctype][1]; res[1].end = res[1].start; res[1].flags = IORESOURCE_IRQ; pdev->name = "ohci-platform"; pdev->id = 0; pdev->dev.dma_mask = &alchemy_ohci_dmamask; pdev->dev.platform_data = &alchemy_ohci_pdata; if (platform_device_register(pdev)) printk(KERN_INFO "Alchemy USB: cannot add OHCI0\n"); /* setup EHCI0: Au1200/Au1300 */ if ((ctype == ALCHEMY_CPU_AU1200) || (ctype == ALCHEMY_CPU_AU1300)) { if (_new_usbres(&res, &pdev)) return; res[0].start = alchemy_ehci_data[ctype][0]; res[0].end = res[0].start + 0x100 - 1; res[0].flags = IORESOURCE_MEM; res[1].start = alchemy_ehci_data[ctype][1]; res[1].end = res[1].start; res[1].flags = IORESOURCE_IRQ; pdev->name = "ehci-platform"; pdev->id = 0; pdev->dev.dma_mask = &alchemy_ehci_dmamask; pdev->dev.platform_data = &alchemy_ehci_pdata; if (platform_device_register(pdev)) printk(KERN_INFO "Alchemy USB: cannot add EHCI0\n"); } /* Au1300: OHCI1 */ if (ctype == ALCHEMY_CPU_AU1300) { if (_new_usbres(&res, &pdev)) return; res[0].start = AU1300_USB_OHCI1_PHYS_ADDR; res[0].end = res[0].start + 0x100 - 1; res[0].flags = IORESOURCE_MEM; res[1].start = AU1300_USB_INT; res[1].end = res[1].start; res[1].flags = IORESOURCE_IRQ; pdev->name = "ohci-platform"; pdev->id = 1; pdev->dev.dma_mask = &alchemy_ohci_dmamask; pdev->dev.platform_data = &alchemy_ohci_pdata; if (platform_device_register(pdev)) printk(KERN_INFO "Alchemy USB: cannot add OHCI1\n"); } } /* Macro to help defining the Ethernet MAC resources */ #define MAC_RES_COUNT 4 /* MAC regs, MAC en, MAC INT, MACDMA regs */ #define MAC_RES(_base, _enable, _irq, _macdma) \ { \ .start = _base, \ .end = _base + 0xffff, \ .flags = IORESOURCE_MEM, \ }, \ { \ .start = _enable, \ .end = _enable + 0x3, \ .flags = IORESOURCE_MEM, \ }, \ { \ .start = _irq, \ .end = _irq, \ .flags = IORESOURCE_IRQ \ }, \ { \ .start = _macdma, \ .end = _macdma + 0x1ff, \ .flags = IORESOURCE_MEM, \ } static struct resource au1xxx_eth0_resources[][MAC_RES_COUNT] __initdata = { [ALCHEMY_CPU_AU1000] = { MAC_RES(AU1000_MAC0_PHYS_ADDR, AU1000_MACEN_PHYS_ADDR, AU1000_MAC0_DMA_INT, AU1000_MACDMA0_PHYS_ADDR) }, [ALCHEMY_CPU_AU1500] = { MAC_RES(AU1500_MAC0_PHYS_ADDR, AU1500_MACEN_PHYS_ADDR, AU1500_MAC0_DMA_INT, AU1000_MACDMA0_PHYS_ADDR) }, [ALCHEMY_CPU_AU1100] = { MAC_RES(AU1000_MAC0_PHYS_ADDR, AU1000_MACEN_PHYS_ADDR, AU1100_MAC0_DMA_INT, AU1000_MACDMA0_PHYS_ADDR) }, [ALCHEMY_CPU_AU1550] = { MAC_RES(AU1000_MAC0_PHYS_ADDR, AU1000_MACEN_PHYS_ADDR, AU1550_MAC0_DMA_INT, AU1000_MACDMA0_PHYS_ADDR) }, }; static struct au1000_eth_platform_data au1xxx_eth0_platform_data = { .phy1_search_mac0 = 1, }; static struct platform_device au1xxx_eth0_device = { .name = "au1000-eth", .id = 0, .num_resources = MAC_RES_COUNT, .dev.platform_data = &au1xxx_eth0_platform_data, }; static struct resource au1xxx_eth1_resources[][MAC_RES_COUNT] __initdata = { [ALCHEMY_CPU_AU1000] = { MAC_RES(AU1000_MAC1_PHYS_ADDR, AU1000_MACEN_PHYS_ADDR + 4, AU1000_MAC1_DMA_INT, AU1000_MACDMA1_PHYS_ADDR) }, [ALCHEMY_CPU_AU1500] = { MAC_RES(AU1500_MAC1_PHYS_ADDR, AU1500_MACEN_PHYS_ADDR + 4, AU1500_MAC1_DMA_INT, AU1000_MACDMA1_PHYS_ADDR) }, [ALCHEMY_CPU_AU1550] = { MAC_RES(AU1000_MAC1_PHYS_ADDR, AU1000_MACEN_PHYS_ADDR + 4, AU1550_MAC1_DMA_INT, AU1000_MACDMA1_PHYS_ADDR) }, }; static struct au1000_eth_platform_data au1xxx_eth1_platform_data = { .phy1_search_mac0 = 1, }; static struct platform_device au1xxx_eth1_device = { .name = "au1000-eth", .id = 1, .num_resources = MAC_RES_COUNT, .dev.platform_data = &au1xxx_eth1_platform_data, }; void __init au1xxx_override_eth_cfg(unsigned int port, struct au1000_eth_platform_data *eth_data) { if (!eth_data || port > 1) return; if (port == 0) memcpy(&au1xxx_eth0_platform_data, eth_data, sizeof(struct au1000_eth_platform_data)); else memcpy(&au1xxx_eth1_platform_data, eth_data, sizeof(struct au1000_eth_platform_data)); } static void __init alchemy_setup_macs(int ctype) { int ret, i; unsigned char ethaddr[6]; struct resource *macres; /* Handle 1st MAC */ if (alchemy_get_macs(ctype) < 1) return; macres = kmemdup(au1xxx_eth0_resources[ctype], sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL); if (!macres) { printk(KERN_INFO "Alchemy: no memory for MAC0 resources\n"); return; } au1xxx_eth0_device.resource = macres; i = prom_get_ethernet_addr(ethaddr); if (!i && !is_valid_ether_addr(au1xxx_eth0_platform_data.mac)) memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6); ret = platform_device_register(&au1xxx_eth0_device); if (ret) printk(KERN_INFO "Alchemy: failed to register MAC0\n"); /* Handle 2nd MAC */ if (alchemy_get_macs(ctype) < 2) return; macres = kmemdup(au1xxx_eth1_resources[ctype], sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL); if (!macres) { printk(KERN_INFO "Alchemy: no memory for MAC1 resources\n"); return; } au1xxx_eth1_device.resource = macres; ethaddr[5] += 1; /* next addr for 2nd MAC */ if (!i && !is_valid_ether_addr(au1xxx_eth1_platform_data.mac)) memcpy(au1xxx_eth1_platform_data.mac, ethaddr, 6); /* Register second MAC if enabled in pinfunc */ if (!(au_readl(SYS_PINFUNC) & (u32)SYS_PF_NI2)) { ret = platform_device_register(&au1xxx_eth1_device); if (ret) printk(KERN_INFO "Alchemy: failed to register MAC1\n"); } } static int __init au1xxx_platform_init(void) { int ctype = alchemy_get_cputype(); alchemy_setup_uarts(ctype); alchemy_setup_macs(ctype); alchemy_setup_usb(ctype); return 0; } arch_initcall(au1xxx_platform_init);
gpl-2.0
iHateWEBos/vigor_aosp_kernel
arch/arm/mach-shmobile/intc-sh7377.c
2842
24467
/* * sh7377 processor support - INTC hardware block * * Copyright (C) 2010 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/sh_intc.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> enum { UNUSED_INTCA = 0, ENABLED, DISABLED, /* interrupt sources INTCA */ IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A, IRQ8A, IRQ9A, IRQ10A, IRQ11A, IRQ12A, IRQ13A, IRQ14A, IRQ15A, IRQ16A, IRQ17A, IRQ18A, IRQ19A, IRQ20A, IRQ21A, IRQ22A, IRQ23A, IRQ24A, IRQ25A, IRQ26A, IRQ27A, IRQ28A, IRQ29A, IRQ30A, IRQ31A, DIRC, _2DG, CRYPT_STD, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1, AP_ARM_IRQPMU, AP_ARM_COMMTX, AP_ARM_COMMRX, MFI_MFIM, MFI_MFIS, BBIF1, BBIF2, USBDMAC_USHDMI, USBHS_USHI0, USBHS_USHI1, _3DG_SGX540, CMT1_CMT10, CMT1_CMT11, CMT1_CMT12, CMT1_CMT13, CMT2, CMT3, KEYSC_KEY, SCIFA0, SCIFA1, SCIFA2, SCIFA3, MSIOF2, MSIOF1, SCIFA4, SCIFA5, SCIFB, FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, SDHI0, SDHI1, MSU_MSU, MSU_MSU2, IRREM, MSUG, IRDA, TPU0, TPU1, TPU2, TPU3, TPU4, LCRC, PINTCA_PINT1, PINTCA_PINT2, TTI20, MISTY, DDM, RWDT0, RWDT1, DMAC_1_DEI0, DMAC_1_DEI1, DMAC_1_DEI2, DMAC_1_DEI3, DMAC_2_DEI4, DMAC_2_DEI5, DMAC_2_DADERR, DMAC2_1_DEI0, DMAC2_1_DEI1, DMAC2_1_DEI2, DMAC2_1_DEI3, DMAC2_2_DEI4, DMAC2_2_DEI5, DMAC2_2_DADERR, DMAC3_1_DEI0, DMAC3_1_DEI1, DMAC3_1_DEI2, DMAC3_1_DEI3, DMAC3_2_DEI4, DMAC3_2_DEI5, DMAC3_2_DADERR, SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM, ICUSB_ICUSB0, ICUSB_ICUSB1, ICUDMC_ICUDMC1, ICUDMC_ICUDMC2, SPU2_SPU0, SPU2_SPU1, FSI, FMSI, SCUV, IPMMU_IPMMUB, AP_ARM_CTIIRQ, AP_ARM_DMAEXTERRIRQ, AP_ARM_DMAIRQ, AP_ARM_DMASIRQ, MFIS2, CPORTR2S, CMT14, CMT15, SCIFA6, /* interrupt groups INTCA */ DMAC_1, DMAC_2, DMAC2_1, DMAC2_2, DMAC3_1, DMAC3_2, SHWYSTAT, AP_ARM1, AP_ARM2, USBHS, SPU2, FLCTL, IIC1, ICUSB, ICUDMC }; static struct intc_vect intca_vectors[] __initdata = { INTC_VECT(IRQ0A, 0x0200), INTC_VECT(IRQ1A, 0x0220), INTC_VECT(IRQ2A, 0x0240), INTC_VECT(IRQ3A, 0x0260), INTC_VECT(IRQ4A, 0x0280), INTC_VECT(IRQ5A, 0x02a0), INTC_VECT(IRQ6A, 0x02c0), INTC_VECT(IRQ7A, 0x02e0), INTC_VECT(IRQ8A, 0x0300), INTC_VECT(IRQ9A, 0x0320), INTC_VECT(IRQ10A, 0x0340), INTC_VECT(IRQ11A, 0x0360), INTC_VECT(IRQ12A, 0x0380), INTC_VECT(IRQ13A, 0x03a0), INTC_VECT(IRQ14A, 0x03c0), INTC_VECT(IRQ15A, 0x03e0), INTC_VECT(IRQ16A, 0x3200), INTC_VECT(IRQ17A, 0x3220), INTC_VECT(IRQ18A, 0x3240), INTC_VECT(IRQ19A, 0x3260), INTC_VECT(IRQ20A, 0x3280), INTC_VECT(IRQ31A, 0x32a0), INTC_VECT(IRQ22A, 0x32c0), INTC_VECT(IRQ23A, 0x32e0), INTC_VECT(IRQ24A, 0x3300), INTC_VECT(IRQ25A, 0x3320), INTC_VECT(IRQ26A, 0x3340), INTC_VECT(IRQ27A, 0x3360), INTC_VECT(IRQ28A, 0x3380), INTC_VECT(IRQ29A, 0x33a0), INTC_VECT(IRQ30A, 0x33c0), INTC_VECT(IRQ31A, 0x33e0), INTC_VECT(DIRC, 0x0560), INTC_VECT(_2DG, 0x05e0), INTC_VECT(CRYPT_STD, 0x0700), INTC_VECT(IIC1_ALI1, 0x0780), INTC_VECT(IIC1_TACKI1, 0x07a0), INTC_VECT(IIC1_WAITI1, 0x07c0), INTC_VECT(IIC1_DTEI1, 0x07e0), INTC_VECT(AP_ARM_IRQPMU, 0x0800), INTC_VECT(AP_ARM_COMMTX, 0x0840), INTC_VECT(AP_ARM_COMMRX, 0x0860), INTC_VECT(MFI_MFIM, 0x0900), INTC_VECT(MFI_MFIS, 0x0920), INTC_VECT(BBIF1, 0x0940), INTC_VECT(BBIF2, 0x0960), INTC_VECT(USBDMAC_USHDMI, 0x0a00), INTC_VECT(USBHS_USHI0, 0x0a20), INTC_VECT(USBHS_USHI1, 0x0a40), INTC_VECT(_3DG_SGX540, 0x0a60), INTC_VECT(CMT1_CMT10, 0x0b00), INTC_VECT(CMT1_CMT11, 0x0b20), INTC_VECT(CMT1_CMT12, 0x0b40), INTC_VECT(CMT1_CMT13, 0x0b60), INTC_VECT(CMT2, 0x0b80), INTC_VECT(CMT3, 0x0ba0), INTC_VECT(KEYSC_KEY, 0x0be0), INTC_VECT(SCIFA0, 0x0c00), INTC_VECT(SCIFA1, 0x0c20), INTC_VECT(SCIFA2, 0x0c40), INTC_VECT(SCIFA3, 0x0c60), INTC_VECT(MSIOF2, 0x0c80), INTC_VECT(MSIOF1, 0x0d00), INTC_VECT(SCIFA4, 0x0d20), INTC_VECT(SCIFA5, 0x0d40), INTC_VECT(SCIFB, 0x0d60), INTC_VECT(FLCTL_FLSTEI, 0x0d80), INTC_VECT(FLCTL_FLTENDI, 0x0da0), INTC_VECT(FLCTL_FLTREQ0I, 0x0dc0), INTC_VECT(FLCTL_FLTREQ1I, 0x0de0), INTC_VECT(SDHI0, 0x0e00), INTC_VECT(SDHI0, 0x0e20), INTC_VECT(SDHI0, 0x0e40), INTC_VECT(SDHI0, 0x0e60), INTC_VECT(SDHI1, 0x0e80), INTC_VECT(SDHI1, 0x0ea0), INTC_VECT(SDHI1, 0x0ec0), INTC_VECT(SDHI1, 0x0ee0), INTC_VECT(MSU_MSU, 0x0f20), INTC_VECT(MSU_MSU2, 0x0f40), INTC_VECT(IRREM, 0x0f60), INTC_VECT(MSUG, 0x0fa0), INTC_VECT(IRDA, 0x0480), INTC_VECT(TPU0, 0x04a0), INTC_VECT(TPU1, 0x04c0), INTC_VECT(TPU2, 0x04e0), INTC_VECT(TPU3, 0x0500), INTC_VECT(TPU4, 0x0520), INTC_VECT(LCRC, 0x0540), INTC_VECT(PINTCA_PINT1, 0x1000), INTC_VECT(PINTCA_PINT2, 0x1020), INTC_VECT(TTI20, 0x1100), INTC_VECT(MISTY, 0x1120), INTC_VECT(DDM, 0x1140), INTC_VECT(RWDT0, 0x1280), INTC_VECT(RWDT1, 0x12a0), INTC_VECT(DMAC_1_DEI0, 0x2000), INTC_VECT(DMAC_1_DEI1, 0x2020), INTC_VECT(DMAC_1_DEI2, 0x2040), INTC_VECT(DMAC_1_DEI3, 0x2060), INTC_VECT(DMAC_2_DEI4, 0x2080), INTC_VECT(DMAC_2_DEI5, 0x20a0), INTC_VECT(DMAC_2_DADERR, 0x20c0), INTC_VECT(DMAC2_1_DEI0, 0x2100), INTC_VECT(DMAC2_1_DEI1, 0x2120), INTC_VECT(DMAC2_1_DEI2, 0x2140), INTC_VECT(DMAC2_1_DEI3, 0x2160), INTC_VECT(DMAC2_2_DEI4, 0x2180), INTC_VECT(DMAC2_2_DEI5, 0x21a0), INTC_VECT(DMAC2_2_DADERR, 0x21c0), INTC_VECT(DMAC3_1_DEI0, 0x2200), INTC_VECT(DMAC3_1_DEI1, 0x2220), INTC_VECT(DMAC3_1_DEI2, 0x2240), INTC_VECT(DMAC3_1_DEI3, 0x2260), INTC_VECT(DMAC3_2_DEI4, 0x2280), INTC_VECT(DMAC3_2_DEI5, 0x22a0), INTC_VECT(DMAC3_2_DADERR, 0x22c0), INTC_VECT(SHWYSTAT_RT, 0x1300), INTC_VECT(SHWYSTAT_HS, 0x1d20), INTC_VECT(SHWYSTAT_COM, 0x1340), INTC_VECT(ICUSB_ICUSB0, 0x1700), INTC_VECT(ICUSB_ICUSB1, 0x1720), INTC_VECT(ICUDMC_ICUDMC1, 0x1780), INTC_VECT(ICUDMC_ICUDMC2, 0x17a0), INTC_VECT(SPU2_SPU0, 0x1800), INTC_VECT(SPU2_SPU1, 0x1820), INTC_VECT(FSI, 0x1840), INTC_VECT(FMSI, 0x1860), INTC_VECT(SCUV, 0x1880), INTC_VECT(IPMMU_IPMMUB, 0x1900), INTC_VECT(AP_ARM_CTIIRQ, 0x1980), INTC_VECT(AP_ARM_DMAEXTERRIRQ, 0x19a0), INTC_VECT(AP_ARM_DMAIRQ, 0x19c0), INTC_VECT(AP_ARM_DMASIRQ, 0x19e0), INTC_VECT(MFIS2, 0x1a00), INTC_VECT(CPORTR2S, 0x1a20), INTC_VECT(CMT14, 0x1a40), INTC_VECT(CMT15, 0x1a60), INTC_VECT(SCIFA6, 0x1a80), }; static struct intc_group intca_groups[] __initdata = { INTC_GROUP(DMAC_1, DMAC_1_DEI0, DMAC_1_DEI1, DMAC_1_DEI2, DMAC_1_DEI3), INTC_GROUP(DMAC_2, DMAC_2_DEI4, DMAC_2_DEI5, DMAC_2_DADERR), INTC_GROUP(DMAC2_1, DMAC2_1_DEI0, DMAC2_1_DEI1, DMAC2_1_DEI2, DMAC2_1_DEI3), INTC_GROUP(DMAC2_2, DMAC2_2_DEI4, DMAC2_2_DEI5, DMAC2_2_DADERR), INTC_GROUP(DMAC3_1, DMAC3_1_DEI0, DMAC3_1_DEI1, DMAC3_1_DEI2, DMAC3_1_DEI3), INTC_GROUP(DMAC3_2, DMAC3_2_DEI4, DMAC3_2_DEI5, DMAC3_2_DADERR), INTC_GROUP(AP_ARM1, AP_ARM_IRQPMU, AP_ARM_COMMTX, AP_ARM_COMMTX), INTC_GROUP(USBHS, USBHS_USHI0, USBHS_USHI1), INTC_GROUP(SPU2, SPU2_SPU0, SPU2_SPU1), INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), INTC_GROUP(IIC1, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1), INTC_GROUP(SHWYSTAT, SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM), INTC_GROUP(ICUSB, ICUSB_ICUSB0, ICUSB_ICUSB1), INTC_GROUP(ICUDMC, ICUDMC_ICUDMC1, ICUDMC_ICUDMC2), }; static struct intc_mask_reg intca_mask_registers[] __initdata = { { 0xe6900040, 0xe6900060, 8, /* INTMSK00A / INTMSKCLR00A */ { IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A } }, { 0xe6900044, 0xe6900064, 8, /* INTMSK10A / INTMSKCLR10A */ { IRQ8A, IRQ9A, IRQ10A, IRQ11A, IRQ12A, IRQ13A, IRQ14A, IRQ15A } }, { 0xe6900048, 0xe6900068, 8, /* INTMSK20A / INTMSKCLR20A */ { IRQ16A, IRQ17A, IRQ18A, IRQ19A, IRQ20A, IRQ21A, IRQ22A, IRQ23A } }, { 0xe690004c, 0xe690006c, 8, /* INTMSK30A / INTMSKCLR30A */ { IRQ24A, IRQ25A, IRQ26A, IRQ27A, IRQ28A, IRQ29A, IRQ30A, IRQ31A } }, { 0xe6940080, 0xe69400c0, 8, /* IMR0A / IMCR0A */ { DMAC2_1_DEI3, DMAC2_1_DEI2, DMAC2_1_DEI1, DMAC2_1_DEI0, AP_ARM_IRQPMU, 0, AP_ARM_COMMTX, AP_ARM_COMMRX } }, { 0xe6940084, 0xe69400c4, 8, /* IMR1A / IMCR1A */ { _2DG, CRYPT_STD, DIRC, 0, DMAC_1_DEI3, DMAC_1_DEI2, DMAC_1_DEI1, DMAC_1_DEI0 } }, { 0xe6940088, 0xe69400c8, 8, /* IMR2A / IMCR2A */ { PINTCA_PINT1, PINTCA_PINT2, 0, 0, BBIF1, BBIF2, MFI_MFIS, MFI_MFIM } }, { 0xe694008c, 0xe69400cc, 8, /* IMR3A / IMCR3A */ { DMAC3_1_DEI3, DMAC3_1_DEI2, DMAC3_1_DEI1, DMAC3_1_DEI0, DMAC3_2_DADERR, DMAC3_2_DEI5, DMAC3_2_DEI4, IRDA } }, { 0xe6940090, 0xe69400d0, 8, /* IMR4A / IMCR4A */ { DDM, 0, 0, 0, 0, 0, 0, 0 } }, { 0xe6940094, 0xe69400d4, 8, /* IMR5A / IMCR5A */ { KEYSC_KEY, DMAC_2_DADERR, DMAC_2_DEI5, DMAC_2_DEI4, SCIFA3, SCIFA2, SCIFA1, SCIFA0 } }, { 0xe6940098, 0xe69400d8, 8, /* IMR6A / IMCR6A */ { SCIFB, SCIFA5, SCIFA4, MSIOF1, 0, 0, MSIOF2, 0 } }, { 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */ { DISABLED, ENABLED, ENABLED, ENABLED, FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } }, { 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */ { DISABLED, ENABLED, ENABLED, ENABLED, TTI20, USBDMAC_USHDMI, 0, MSUG } }, { 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */ { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10, CMT2, USBHS_USHI1, USBHS_USHI0, _3DG_SGX540 } }, { 0xe69400a8, 0xe69400e8, 8, /* IMR10A / IMCR10A */ { 0, DMAC2_2_DADERR, DMAC2_2_DEI5, DMAC2_2_DEI4, 0, 0, 0, 0 } }, { 0xe69400ac, 0xe69400ec, 8, /* IMR11A / IMCR11A */ { IIC1_DTEI1, IIC1_WAITI1, IIC1_TACKI1, IIC1_ALI1, LCRC, MSU_MSU2, IRREM, MSU_MSU } }, { 0xe69400b0, 0xe69400f0, 8, /* IMR12A / IMCR12A */ { 0, 0, TPU0, TPU1, TPU2, TPU3, TPU4, 0 } }, { 0xe69400b4, 0xe69400f4, 8, /* IMR13A / IMCR13A */ { 0, 0, 0, 0, MISTY, CMT3, RWDT1, RWDT0 } }, { 0xe6950080, 0xe69500c0, 8, /* IMR0A3 / IMCR0A3 */ { SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM, 0, 0, 0, 0, 0 } }, { 0xe6950090, 0xe69500d0, 8, /* IMR4A3 / IMCR4A3 */ { ICUSB_ICUSB0, ICUSB_ICUSB1, 0, 0, ICUDMC_ICUDMC1, ICUDMC_ICUDMC2, 0, 0 } }, { 0xe6950094, 0xe69500d4, 8, /* IMR5A3 / IMCR5A3 */ { SPU2_SPU0, SPU2_SPU1, FSI, FMSI, SCUV, 0, 0, 0 } }, { 0xe6950098, 0xe69500d8, 8, /* IMR6A3 / IMCR6A3 */ { IPMMU_IPMMUB, 0, 0, 0, AP_ARM_CTIIRQ, AP_ARM_DMAEXTERRIRQ, AP_ARM_DMAIRQ, AP_ARM_DMASIRQ } }, { 0xe695009c, 0xe69500dc, 8, /* IMR7A3 / IMCR7A3 */ { MFIS2, CPORTR2S, CMT14, CMT15, SCIFA6, 0, 0, 0 } }, }; static struct intc_prio_reg intca_prio_registers[] __initdata = { { 0xe6900010, 0, 32, 4, /* INTPRI00A */ { IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A } }, { 0xe6900014, 0, 32, 4, /* INTPRI10A */ { IRQ8A, IRQ9A, IRQ10A, IRQ11A, IRQ12A, IRQ13A, IRQ14A, IRQ15A } }, { 0xe6900018, 0, 32, 4, /* INTPRI10A */ { IRQ16A, IRQ17A, IRQ18A, IRQ19A, IRQ20A, IRQ21A, IRQ22A, IRQ23A } }, { 0xe690001c, 0, 32, 4, /* INTPRI30A */ { IRQ24A, IRQ25A, IRQ26A, IRQ27A, IRQ28A, IRQ29A, IRQ30A, IRQ31A } }, { 0xe6940000, 0, 16, 4, /* IPRAA */ { DMAC3_1, DMAC3_2, CMT2, LCRC } }, { 0xe6940004, 0, 16, 4, /* IPRBA */ { IRDA, 0, BBIF1, BBIF2 } }, { 0xe6940008, 0, 16, 4, /* IPRCA */ { _2DG, CRYPT_STD, CMT1_CMT11, AP_ARM1 } }, { 0xe694000c, 0, 16, 4, /* IPRDA */ { PINTCA_PINT1, PINTCA_PINT2, CMT1_CMT12, TPU4 } }, { 0xe6940010, 0, 16, 4, /* IPREA */ { DMAC_1, MFI_MFIS, MFI_MFIM, USBHS } }, { 0xe6940014, 0, 16, 4, /* IPRFA */ { KEYSC_KEY, DMAC_2, _3DG_SGX540, CMT1_CMT10 } }, { 0xe6940018, 0, 16, 4, /* IPRGA */ { SCIFA0, SCIFA1, SCIFA2, SCIFA3 } }, { 0xe694001c, 0, 16, 4, /* IPRGH */ { MSIOF2, USBDMAC_USHDMI, FLCTL, SDHI0 } }, { 0xe6940020, 0, 16, 4, /* IPRIA */ { MSIOF1, SCIFA4, MSU_MSU, IIC1 } }, { 0xe6940024, 0, 16, 4, /* IPRJA */ { DMAC2_1, DMAC2_2, MSUG, TTI20 } }, { 0xe6940028, 0, 16, 4, /* IPRKA */ { 0, CMT1_CMT13, IRREM, SDHI1 } }, { 0xe694002c, 0, 16, 4, /* IPRLA */ { TPU0, TPU1, TPU2, TPU3 } }, { 0xe6940030, 0, 16, 4, /* IPRMA */ { MISTY, CMT3, RWDT1, RWDT0 } }, { 0xe6940034, 0, 16, 4, /* IPRNA */ { SCIFB, SCIFA5, 0, DDM } }, { 0xe6940038, 0, 16, 4, /* IPROA */ { 0, 0, DIRC, 0 } }, { 0xe6950000, 0, 16, 4, /* IPRAA3 */ { SHWYSTAT, 0, 0, 0 } }, { 0xe6950020, 0, 16, 4, /* IPRIA3 */ { ICUSB, 0, 0, 0 } }, { 0xe6950024, 0, 16, 4, /* IPRJA3 */ { ICUDMC, 0, 0, 0 } }, { 0xe6950028, 0, 16, 4, /* IPRKA3 */ { SPU2, 0, FSI, FMSI } }, { 0xe695002c, 0, 16, 4, /* IPRLA3 */ { SCUV, 0, 0, 0 } }, { 0xe6950030, 0, 16, 4, /* IPRMA3 */ { IPMMU_IPMMUB, 0, 0, 0 } }, { 0xe6950034, 0, 16, 4, /* IPRNA3 */ { AP_ARM2, 0, 0, 0 } }, { 0xe6950038, 0, 16, 4, /* IPROA3 */ { MFIS2, CPORTR2S, CMT14, CMT15 } }, { 0xe694003c, 0, 16, 4, /* IPRPA3 */ { SCIFA6, 0, 0, 0 } }, }; static struct intc_sense_reg intca_sense_registers[] __initdata = { { 0xe6900000, 16, 2, /* ICR1A */ { IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A } }, { 0xe6900004, 16, 2, /* ICR2A */ { IRQ8A, IRQ9A, IRQ10A, IRQ11A, IRQ12A, IRQ13A, IRQ14A, IRQ15A } }, { 0xe6900008, 16, 2, /* ICR3A */ { IRQ16A, IRQ17A, IRQ18A, IRQ19A, IRQ20A, IRQ21A, IRQ22A, IRQ23A } }, { 0xe690000c, 16, 2, /* ICR4A */ { IRQ24A, IRQ25A, IRQ26A, IRQ27A, IRQ28A, IRQ29A, IRQ30A, IRQ31A } }, }; static struct intc_mask_reg intca_ack_registers[] __initdata = { { 0xe6900020, 0, 8, /* INTREQ00A */ { IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A } }, { 0xe6900024, 0, 8, /* INTREQ10A */ { IRQ8A, IRQ9A, IRQ10A, IRQ11A, IRQ12A, IRQ13A, IRQ14A, IRQ15A } }, { 0xe6900028, 0, 8, /* INTREQ20A */ { IRQ16A, IRQ17A, IRQ18A, IRQ19A, IRQ20A, IRQ21A, IRQ22A, IRQ23A } }, { 0xe690002c, 0, 8, /* INTREQ30A */ { IRQ24A, IRQ25A, IRQ26A, IRQ27A, IRQ28A, IRQ29A, IRQ30A, IRQ31A } }, }; static struct intc_desc intca_desc __initdata = { .name = "sh7377-intca", .force_enable = ENABLED, .force_disable = DISABLED, .hw = INTC_HW_DESC(intca_vectors, intca_groups, intca_mask_registers, intca_prio_registers, intca_sense_registers, intca_ack_registers), }; /* this macro ignore entry which is also in INTCA */ #define __IGNORE(a...) #define __IGNORE0(a...) 0 enum { UNUSED_INTCS = 0, INTCS, /* interrupt sources INTCS */ VEU_VEU0, VEU_VEU1, VEU_VEU2, VEU_VEU3, RTDMAC1_1_DEI0, RTDMAC1_1_DEI1, RTDMAC1_1_DEI2, RTDMAC1_1_DEI3, CEU, BEU_BEU0, BEU_BEU1, BEU_BEU2, __IGNORE(MFI) __IGNORE(BBIF2) VPU, TSIF1, __IGNORE(SGX540) _2DDMAC, IIC2_ALI2, IIC2_TACKI2, IIC2_WAITI2, IIC2_DTEI2, IPMMU_IPMMUR, IPMMU_IPMMUR2, RTDMAC1_2_DEI4, RTDMAC1_2_DEI5, RTDMAC1_2_DADERR, __IGNORE(KEYSC) __IGNORE(TTI20) __IGNORE(MSIOF) IIC0_ALI0, IIC0_TACKI0, IIC0_WAITI0, IIC0_DTEI0, TMU_TUNI0, TMU_TUNI1, TMU_TUNI2, CMT0, TSIF0, __IGNORE(CMT2) LMB, __IGNORE(MSUG) __IGNORE(MSU_MSU, MSU_MSU2) __IGNORE(CTI) MVI3, __IGNORE(RWDT0) __IGNORE(RWDT1) ICB, PEP, ASA, __IGNORE(_2DG) HQE, JPU, LCDC0, __IGNORE(LCRC) RTDMAC2_1_DEI0, RTDMAC2_1_DEI1, RTDMAC2_1_DEI2, RTDMAC2_1_DEI3, RTDMAC2_2_DEI4, RTDMAC2_2_DEI5, RTDMAC2_2_DADERR, FRC, LCDC1, CSIRX, DSITX_DSITX0, DSITX_DSITX1, __IGNORE(SPU2_SPU0, SPU2_SPU1) __IGNORE(FSI) __IGNORE(FMSI) __IGNORE(SCUV) TMU1_TUNI10, TMU1_TUNI11, TMU1_TUNI12, TSIF2, CMT4, __IGNORE(MFIS2) CPORTS2R, /* interrupt groups INTCS */ RTDMAC1_1, RTDMAC1_2, VEU, BEU, IIC0, __IGNORE(MSU) IPMMU, IIC2, RTDMAC2_1, RTDMAC2_2, DSITX, __IGNORE(SPU2) TMU1, }; #define INTCS_INTVECT 0x0F80 static struct intc_vect intcs_vectors[] __initdata = { INTCS_VECT(VEU_VEU0, 0x0700), INTCS_VECT(VEU_VEU1, 0x0720), INTCS_VECT(VEU_VEU2, 0x0740), INTCS_VECT(VEU_VEU3, 0x0760), INTCS_VECT(RTDMAC1_1_DEI0, 0x0800), INTCS_VECT(RTDMAC1_1_DEI1, 0x0820), INTCS_VECT(RTDMAC1_1_DEI2, 0x0840), INTCS_VECT(RTDMAC1_1_DEI3, 0x0860), INTCS_VECT(CEU, 0x0880), INTCS_VECT(BEU_BEU0, 0x08A0), INTCS_VECT(BEU_BEU1, 0x08C0), INTCS_VECT(BEU_BEU2, 0x08E0), __IGNORE(INTCS_VECT(MFI, 0x0900)) __IGNORE(INTCS_VECT(BBIF2, 0x0960)) INTCS_VECT(VPU, 0x0980), INTCS_VECT(TSIF1, 0x09A0), __IGNORE(INTCS_VECT(SGX540, 0x09E0)) INTCS_VECT(_2DDMAC, 0x0A00), INTCS_VECT(IIC2_ALI2, 0x0A80), INTCS_VECT(IIC2_TACKI2, 0x0AA0), INTCS_VECT(IIC2_WAITI2, 0x0AC0), INTCS_VECT(IIC2_DTEI2, 0x0AE0), INTCS_VECT(IPMMU_IPMMUR, 0x0B00), INTCS_VECT(IPMMU_IPMMUR2, 0x0B20), INTCS_VECT(RTDMAC1_2_DEI4, 0x0B80), INTCS_VECT(RTDMAC1_2_DEI5, 0x0BA0), INTCS_VECT(RTDMAC1_2_DADERR, 0x0BC0), __IGNORE(INTCS_VECT(KEYSC 0x0BE0)) __IGNORE(INTCS_VECT(TTI20, 0x0C80)) __IGNORE(INTCS_VECT(MSIOF, 0x0D20)) INTCS_VECT(IIC0_ALI0, 0x0E00), INTCS_VECT(IIC0_TACKI0, 0x0E20), INTCS_VECT(IIC0_WAITI0, 0x0E40), INTCS_VECT(IIC0_DTEI0, 0x0E60), INTCS_VECT(TMU_TUNI0, 0x0E80), INTCS_VECT(TMU_TUNI1, 0x0EA0), INTCS_VECT(TMU_TUNI2, 0x0EC0), INTCS_VECT(CMT0, 0x0F00), INTCS_VECT(TSIF0, 0x0F20), __IGNORE(INTCS_VECT(CMT2, 0x0F40)) INTCS_VECT(LMB, 0x0F60), __IGNORE(INTCS_VECT(MSUG, 0x0F80)) __IGNORE(INTCS_VECT(MSU_MSU, 0x0FA0)) __IGNORE(INTCS_VECT(MSU_MSU2, 0x0FC0)) __IGNORE(INTCS_VECT(CTI, 0x0400)) INTCS_VECT(MVI3, 0x0420), __IGNORE(INTCS_VECT(RWDT0, 0x0440)) __IGNORE(INTCS_VECT(RWDT1, 0x0460)) INTCS_VECT(ICB, 0x0480), INTCS_VECT(PEP, 0x04A0), INTCS_VECT(ASA, 0x04C0), __IGNORE(INTCS_VECT(_2DG, 0x04E0)) INTCS_VECT(HQE, 0x0540), INTCS_VECT(JPU, 0x0560), INTCS_VECT(LCDC0, 0x0580), __IGNORE(INTCS_VECT(LCRC, 0x05A0)) INTCS_VECT(RTDMAC2_1_DEI0, 0x1300), INTCS_VECT(RTDMAC2_1_DEI1, 0x1320), INTCS_VECT(RTDMAC2_1_DEI2, 0x1340), INTCS_VECT(RTDMAC2_1_DEI3, 0x1360), INTCS_VECT(RTDMAC2_2_DEI4, 0x1380), INTCS_VECT(RTDMAC2_2_DEI5, 0x13A0), INTCS_VECT(RTDMAC2_2_DADERR, 0x13C0), INTCS_VECT(FRC, 0x1700), INTCS_VECT(LCDC1, 0x1780), INTCS_VECT(CSIRX, 0x17A0), INTCS_VECT(DSITX_DSITX0, 0x17C0), INTCS_VECT(DSITX_DSITX1, 0x17E0), __IGNORE(INTCS_VECT(SPU2_SPU0, 0x1800)) __IGNORE(INTCS_VECT(SPU2_SPU1, 0x1820)) __IGNORE(INTCS_VECT(FSI, 0x1840)) __IGNORE(INTCS_VECT(FMSI, 0x1860)) __IGNORE(INTCS_VECT(SCUV, 0x1880)) INTCS_VECT(TMU1_TUNI10, 0x1900), INTCS_VECT(TMU1_TUNI11, 0x1920), INTCS_VECT(TMU1_TUNI12, 0x1940), INTCS_VECT(TSIF2, 0x1960), INTCS_VECT(CMT4, 0x1980), __IGNORE(INTCS_VECT(MFIS2, 0x1A00)) INTCS_VECT(CPORTS2R, 0x1A20), INTC_VECT(INTCS, INTCS_INTVECT), }; static struct intc_group intcs_groups[] __initdata = { INTC_GROUP(RTDMAC1_1, RTDMAC1_1_DEI0, RTDMAC1_1_DEI1, RTDMAC1_1_DEI2, RTDMAC1_1_DEI3), INTC_GROUP(RTDMAC1_2, RTDMAC1_2_DEI4, RTDMAC1_2_DEI5, RTDMAC1_2_DADERR), INTC_GROUP(VEU, VEU_VEU0, VEU_VEU1, VEU_VEU2, VEU_VEU3), INTC_GROUP(BEU, BEU_BEU0, BEU_BEU1, BEU_BEU2), INTC_GROUP(IIC0, IIC0_ALI0, IIC0_TACKI0, IIC0_WAITI0, IIC0_DTEI0), __IGNORE(INTC_GROUP(MSU, MSU_MSU, MSU_MSU2)) INTC_GROUP(IPMMU, IPMMU_IPMMUR, IPMMU_IPMMUR2), INTC_GROUP(IIC2, IIC2_ALI2, IIC2_TACKI2, IIC2_WAITI2, IIC2_DTEI2), INTC_GROUP(RTDMAC2_1, RTDMAC2_1_DEI0, RTDMAC2_1_DEI1, RTDMAC2_1_DEI2, RTDMAC2_1_DEI3), INTC_GROUP(RTDMAC2_2, RTDMAC2_2_DEI4, RTDMAC2_2_DEI5, RTDMAC2_2_DADERR), INTC_GROUP(DSITX, DSITX_DSITX0, DSITX_DSITX1), __IGNORE(INTC_GROUP(SPU2, SPU2_SPU0, SPU2_SPU1)) INTC_GROUP(TMU1, TMU1_TUNI10, TMU1_TUNI11, TMU1_TUNI12), }; static struct intc_mask_reg intcs_mask_registers[] __initdata = { { 0xE6940184, 0xE69401C4, 8, /* IMR1AS / IMCR1AS */ { BEU_BEU2, BEU_BEU1, BEU_BEU0, CEU, VEU_VEU3, VEU_VEU2, VEU_VEU1, VEU_VEU0 } }, { 0xE6940188, 0xE69401C8, 8, /* IMR2AS / IMCR2AS */ { 0, 0, 0, VPU, __IGNORE0(BBIF2), 0, 0, __IGNORE0(MFI) } }, { 0xE694018C, 0xE69401CC, 8, /* IMR3AS / IMCR3AS */ { 0, 0, 0, _2DDMAC, __IGNORE0(_2DG), ASA, PEP, ICB } }, { 0xE6940190, 0xE69401D0, 8, /* IMR4AS / IMCR4AS */ { 0, 0, MVI3, __IGNORE0(CTI), JPU, HQE, __IGNORE0(LCRC), LCDC0 } }, { 0xE6940194, 0xE69401D4, 8, /* IMR5AS / IMCR5AS */ { __IGNORE0(KEYSC), RTDMAC1_2_DADERR, RTDMAC1_2_DEI5, RTDMAC1_2_DEI4, RTDMAC1_1_DEI3, RTDMAC1_1_DEI2, RTDMAC1_1_DEI1, RTDMAC1_1_DEI0 } }, __IGNORE({ 0xE6940198, 0xE69401D8, 8, /* IMR6AS / IMCR6AS */ { 0, 0, MSIOF, 0, SGX540, 0, TTI20, 0 } }) { 0xE694019C, 0xE69401DC, 8, /* IMR7AS / IMCR7AS */ { 0, TMU_TUNI2, TMU_TUNI1, TMU_TUNI0, 0, 0, 0, 0 } }, __IGNORE({ 0xE69401A0, 0xE69401E0, 8, /* IMR8AS / IMCR8AS */ { 0, 0, 0, 0, 0, MSU_MSU, MSU_MSU2, MSUG } }) { 0xE69401A4, 0xE69401E4, 8, /* IMR9AS / IMCR9AS */ { __IGNORE0(RWDT1), __IGNORE0(RWDT0), __IGNORE0(CMT2), CMT0, IIC2_DTEI2, IIC2_WAITI2, IIC2_TACKI2, IIC2_ALI2 } }, { 0xE69401A8, 0xE69401E8, 8, /* IMR10AS / IMCR10AS */ { 0, 0, IPMMU_IPMMUR, IPMMU_IPMMUR2, 0, 0, 0, 0 } }, { 0xE69401AC, 0xE69401EC, 8, /* IMR11AS / IMCR11AS */ { IIC0_DTEI0, IIC0_WAITI0, IIC0_TACKI0, IIC0_ALI0, 0, TSIF1, LMB, TSIF0 } }, { 0xE6950180, 0xE69501C0, 8, /* IMR0AS3 / IMCR0AS3 */ { RTDMAC2_1_DEI0, RTDMAC2_1_DEI1, RTDMAC2_1_DEI2, RTDMAC2_1_DEI3, RTDMAC2_2_DEI4, RTDMAC2_2_DEI5, RTDMAC2_2_DADERR, 0 } }, { 0xE6950190, 0xE69501D0, 8, /* IMR4AS3 / IMCR4AS3 */ { FRC, 0, 0, 0, LCDC1, CSIRX, DSITX_DSITX0, DSITX_DSITX1 } }, __IGNORE({ 0xE6950194, 0xE69501D4, 8, /* IMR5AS3 / IMCR5AS3 */ {SPU2_SPU0, SPU2_SPU1, FSI, FMSI, SCUV, 0, 0, 0 } }) { 0xE6950198, 0xE69501D8, 8, /* IMR6AS3 / IMCR6AS3 */ { TMU1_TUNI10, TMU1_TUNI11, TMU1_TUNI12, TSIF2, CMT4, 0, 0, 0 } }, { 0xE695019C, 0xE69501DC, 8, /* IMR7AS3 / IMCR7AS3 */ { __IGNORE0(MFIS2), CPORTS2R, 0, 0, 0, 0, 0, 0 } }, { 0xFFD20104, 0, 16, /* INTAMASK */ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, INTCS } } }; static struct intc_prio_reg intcs_prio_registers[] __initdata = { /* IPRAS */ { 0xFFD20000, 0, 16, 4, { __IGNORE0(CTI), MVI3, _2DDMAC, ICB } }, /* IPRBS */ { 0xFFD20004, 0, 16, 4, { JPU, LCDC0, 0, __IGNORE0(LCRC) } }, /* IPRCS */ __IGNORE({ 0xFFD20008, 0, 16, 4, { BBIF2, 0, 0, 0 } }) /* IPRES */ { 0xFFD20010, 0, 16, 4, { RTDMAC1_1, CEU, __IGNORE0(MFI), VPU } }, /* IPRFS */ { 0xFFD20014, 0, 16, 4, { __IGNORE0(KEYSC), RTDMAC1_2, __IGNORE0(CMT2), CMT0 } }, /* IPRGS */ { 0xFFD20018, 0, 16, 4, { TMU_TUNI0, TMU_TUNI1, TMU_TUNI2, TSIF1 } }, /* IPRHS */ { 0xFFD2001C, 0, 16, 4, { __IGNORE0(TTI20), 0, VEU, BEU } }, /* IPRIS */ { 0xFFD20020, 0, 16, 4, { 0, __IGNORE0(MSIOF), TSIF0, IIC0 } }, /* IPRJS */ __IGNORE({ 0xFFD20024, 0, 16, 4, { 0, SGX540, MSUG, MSU } }) /* IPRKS */ { 0xFFD20028, 0, 16, 4, { __IGNORE0(_2DG), ASA, LMB, PEP } }, /* IPRLS */ { 0xFFD2002C, 0, 16, 4, { IPMMU, 0, 0, HQE } }, /* IPRMS */ { 0xFFD20030, 0, 16, 4, { IIC2, 0, __IGNORE0(RWDT1), __IGNORE0(RWDT0) } }, /* IPRAS3 */ { 0xFFD50000, 0, 16, 4, { RTDMAC2_1, 0, 0, 0 } }, /* IPRBS3 */ { 0xFFD50004, 0, 16, 4, { RTDMAC2_2, 0, 0, 0 } }, /* IPRIS3 */ { 0xFFD50020, 0, 16, 4, { FRC, 0, 0, 0 } }, /* IPRJS3 */ { 0xFFD50024, 0, 16, 4, { LCDC1, CSIRX, DSITX, 0 } }, /* IPRKS3 */ __IGNORE({ 0xFFD50028, 0, 16, 4, { SPU2, 0, FSI, FMSI } }) /* IPRLS3 */ __IGNORE({ 0xFFD5002C, 0, 16, 4, { SCUV, 0, 0, 0 } }) /* IPRMS3 */ { 0xFFD50030, 0, 16, 4, { TMU1, 0, 0, TSIF2 } }, /* IPRNS3 */ { 0xFFD50034, 0, 16, 4, { CMT4, 0, 0, 0 } }, /* IPROS3 */ { 0xFFD50038, 0, 16, 4, { __IGNORE0(MFIS2), CPORTS2R, 0, 0 } }, }; static struct resource intcs_resources[] __initdata = { [0] = { .start = 0xffd20000, .end = 0xffd500ff, .flags = IORESOURCE_MEM, } }; static struct intc_desc intcs_desc __initdata = { .name = "sh7377-intcs", .resource = intcs_resources, .num_resources = ARRAY_SIZE(intcs_resources), .hw = INTC_HW_DESC(intcs_vectors, intcs_groups, intcs_mask_registers, intcs_prio_registers, NULL, NULL), }; static void intcs_demux(unsigned int irq, struct irq_desc *desc) { void __iomem *reg = (void *)irq_get_handler_data(irq); unsigned int evtcodeas = ioread32(reg); generic_handle_irq(intcs_evt2irq(evtcodeas)); } #define INTEVTSA 0xFFD20100 void __init sh7377_init_irq(void) { void __iomem *intevtsa = ioremap_nocache(INTEVTSA, PAGE_SIZE); register_intc_controller(&intca_desc); register_intc_controller(&intcs_desc); /* demux using INTEVTSA */ irq_set_handler_data(evt2irq(INTCS_INTVECT), (void *)intevtsa); irq_set_chained_handler(evt2irq(INTCS_INTVECT), intcs_demux); }
gpl-2.0
samuaz/kernel_msm_gee
drivers/net/wireless/wl1251/spi.c
2842
7451
/* * This file is part of wl1251 * * Copyright (C) 2008 Nokia Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/crc7.h> #include <linux/spi/spi.h> #include <linux/wl12xx.h> #include "wl1251.h" #include "reg.h" #include "spi.h" static irqreturn_t wl1251_irq(int irq, void *cookie) { struct wl1251 *wl; wl1251_debug(DEBUG_IRQ, "IRQ"); wl = cookie; ieee80211_queue_work(wl->hw, &wl->irq_work); return IRQ_HANDLED; } static struct spi_device *wl_to_spi(struct wl1251 *wl) { return wl->if_priv; } static void wl1251_spi_reset(struct wl1251 *wl) { u8 *cmd; struct spi_transfer t; struct spi_message m; cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL); if (!cmd) { wl1251_error("could not allocate cmd for spi reset"); return; } memset(&t, 0, sizeof(t)); spi_message_init(&m); memset(cmd, 0xff, WSPI_INIT_CMD_LEN); t.tx_buf = cmd; t.len = WSPI_INIT_CMD_LEN; spi_message_add_tail(&t, &m); spi_sync(wl_to_spi(wl), &m); wl1251_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN); } static void wl1251_spi_wake(struct wl1251 *wl) { u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd; struct spi_transfer t; struct spi_message m; cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL); if (!cmd) { wl1251_error("could not allocate cmd for spi init"); return; } memset(crc, 0, sizeof(crc)); memset(&t, 0, sizeof(t)); spi_message_init(&m); /* * Set WSPI_INIT_COMMAND * the data is being send from the MSB to LSB */ cmd[2] = 0xff; cmd[3] = 0xff; cmd[1] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX; cmd[0] = 0; cmd[7] = 0; cmd[6] |= HW_ACCESS_WSPI_INIT_CMD_MASK << 3; cmd[6] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN; if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0) cmd[5] |= WSPI_INIT_CMD_DIS_FIXEDBUSY; else cmd[5] |= WSPI_INIT_CMD_EN_FIXEDBUSY; cmd[5] |= WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS; crc[0] = cmd[1]; crc[1] = cmd[0]; crc[2] = cmd[7]; crc[3] = cmd[6]; crc[4] = cmd[5]; cmd[4] |= crc7(0, crc, WSPI_INIT_CMD_CRC_LEN) << 1; cmd[4] |= WSPI_INIT_CMD_END; t.tx_buf = cmd; t.len = WSPI_INIT_CMD_LEN; spi_message_add_tail(&t, &m); spi_sync(wl_to_spi(wl), &m); wl1251_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN); } static void wl1251_spi_reset_wake(struct wl1251 *wl) { wl1251_spi_reset(wl); wl1251_spi_wake(wl); } static void wl1251_spi_read(struct wl1251 *wl, int addr, void *buf, size_t len) { struct spi_transfer t[3]; struct spi_message m; u8 *busy_buf; u32 *cmd; cmd = &wl->buffer_cmd; busy_buf = wl->buffer_busyword; *cmd = 0; *cmd |= WSPI_CMD_READ; *cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH; *cmd |= addr & WSPI_CMD_BYTE_ADDR; spi_message_init(&m); memset(t, 0, sizeof(t)); t[0].tx_buf = cmd; t[0].len = 4; spi_message_add_tail(&t[0], &m); /* Busy and non busy words read */ t[1].rx_buf = busy_buf; t[1].len = WL1251_BUSY_WORD_LEN; spi_message_add_tail(&t[1], &m); t[2].rx_buf = buf; t[2].len = len; spi_message_add_tail(&t[2], &m); spi_sync(wl_to_spi(wl), &m); /* FIXME: check busy words */ wl1251_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd)); wl1251_dump(DEBUG_SPI, "spi_read buf <- ", buf, len); } static void wl1251_spi_write(struct wl1251 *wl, int addr, void *buf, size_t len) { struct spi_transfer t[2]; struct spi_message m; u32 *cmd; cmd = &wl->buffer_cmd; *cmd = 0; *cmd |= WSPI_CMD_WRITE; *cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH; *cmd |= addr & WSPI_CMD_BYTE_ADDR; spi_message_init(&m); memset(t, 0, sizeof(t)); t[0].tx_buf = cmd; t[0].len = sizeof(*cmd); spi_message_add_tail(&t[0], &m); t[1].tx_buf = buf; t[1].len = len; spi_message_add_tail(&t[1], &m); spi_sync(wl_to_spi(wl), &m); wl1251_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd)); wl1251_dump(DEBUG_SPI, "spi_write buf -> ", buf, len); } static void wl1251_spi_enable_irq(struct wl1251 *wl) { return enable_irq(wl->irq); } static void wl1251_spi_disable_irq(struct wl1251 *wl) { return disable_irq(wl->irq); } static int wl1251_spi_set_power(struct wl1251 *wl, bool enable) { if (wl->set_power) wl->set_power(enable); return 0; } static const struct wl1251_if_operations wl1251_spi_ops = { .read = wl1251_spi_read, .write = wl1251_spi_write, .reset = wl1251_spi_reset_wake, .enable_irq = wl1251_spi_enable_irq, .disable_irq = wl1251_spi_disable_irq, .power = wl1251_spi_set_power, }; static int __devinit wl1251_spi_probe(struct spi_device *spi) { struct wl12xx_platform_data *pdata; struct ieee80211_hw *hw; struct wl1251 *wl; int ret; pdata = spi->dev.platform_data; if (!pdata) { wl1251_error("no platform data"); return -ENODEV; } hw = wl1251_alloc_hw(); if (IS_ERR(hw)) return PTR_ERR(hw); wl = hw->priv; SET_IEEE80211_DEV(hw, &spi->dev); dev_set_drvdata(&spi->dev, wl); wl->if_priv = spi; wl->if_ops = &wl1251_spi_ops; /* This is the only SPI value that we need to set here, the rest * comes from the board-peripherals file */ spi->bits_per_word = 32; ret = spi_setup(spi); if (ret < 0) { wl1251_error("spi_setup failed"); goto out_free; } wl->set_power = pdata->set_power; if (!wl->set_power) { wl1251_error("set power function missing in platform data"); return -ENODEV; } wl->irq = spi->irq; if (wl->irq < 0) { wl1251_error("irq missing in platform data"); return -ENODEV; } wl->use_eeprom = pdata->use_eeprom; ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl); if (ret < 0) { wl1251_error("request_irq() failed: %d", ret); goto out_free; } irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); disable_irq(wl->irq); ret = wl1251_init_ieee80211(wl); if (ret) goto out_irq; return 0; out_irq: free_irq(wl->irq, wl); out_free: ieee80211_free_hw(hw); return ret; } static int __devexit wl1251_spi_remove(struct spi_device *spi) { struct wl1251 *wl = dev_get_drvdata(&spi->dev); free_irq(wl->irq, wl); wl1251_free_hw(wl); return 0; } static struct spi_driver wl1251_spi_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, .probe = wl1251_spi_probe, .remove = __devexit_p(wl1251_spi_remove), }; static int __init wl1251_spi_init(void) { int ret; ret = spi_register_driver(&wl1251_spi_driver); if (ret < 0) { wl1251_error("failed to register spi driver: %d", ret); goto out; } out: return ret; } static void __exit wl1251_spi_exit(void) { spi_unregister_driver(&wl1251_spi_driver); wl1251_notice("unloaded"); } module_init(wl1251_spi_init); module_exit(wl1251_spi_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>"); MODULE_ALIAS("spi:wl1251");
gpl-2.0
pacificIT/linux_kernel
crypto/crct10dif_common.c
3098
3728
/* * Cryptographic API. * * T10 Data Integrity Field CRC16 Crypto Transform * * Copyright (c) 2007 Oracle Corporation. All rights reserved. * Written by Martin K. Petersen <martin.petersen@oracle.com> * Copyright (C) 2013 Intel Corporation * Author: Tim Chen <tim.c.chen@linux.intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/crc-t10dif.h> #include <linux/module.h> #include <linux/kernel.h> /* Table generated using the following polynomium: * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1 * gt: 0x8bb7 */ static const __u16 t10_dif_crc_table[256] = { 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B, 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6, 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6, 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B, 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1, 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C, 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C, 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781, 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8, 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255, 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925, 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698, 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472, 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF, 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF, 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02, 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA, 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067, 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17, 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA, 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640, 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD, 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D, 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30, 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759, 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4, 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394, 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29, 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3, 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E, 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E, 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3 }; __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len) { unsigned int i; for (i = 0 ; i < len ; i++) crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff]; return crc; } EXPORT_SYMBOL(crc_t10dif_generic); MODULE_DESCRIPTION("T10 DIF CRC calculation common code"); MODULE_LICENSE("GPL");
gpl-2.0
brymaster5000/m7-GPE-L
sound/soc/omap/omap-pcm.c
4378
12271
/* * omap-pcm.c -- ALSA PCM interface for the OMAP SoC * * Copyright (C) 2008 Nokia Corporation * * Contact: Jarkko Nikula <jarkko.nikula@bitmer.com> * Peter Ujfalusi <peter.ujfalusi@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/module.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <plat/dma.h> #include "omap-pcm.h" static const struct snd_pcm_hardware omap_pcm_hardware = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_NO_PERIOD_WAKEUP, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE, .period_bytes_min = 32, .period_bytes_max = 64 * 1024, .periods_min = 2, .periods_max = 255, .buffer_bytes_max = 128 * 1024, }; struct omap_runtime_data { spinlock_t lock; struct omap_pcm_dma_data *dma_data; int dma_ch; int period_index; }; static void omap_pcm_dma_irq(int ch, u16 stat, void *data) { struct snd_pcm_substream *substream = data; struct snd_pcm_runtime *runtime = substream->runtime; struct omap_runtime_data *prtd = runtime->private_data; unsigned long flags; if ((cpu_is_omap1510())) { /* * OMAP1510 doesn't fully support DMA progress counter * and there is no software emulation implemented yet, * so have to maintain our own progress counters * that can be used by omap_pcm_pointer() instead. */ spin_lock_irqsave(&prtd->lock, flags); if ((stat == OMAP_DMA_LAST_IRQ) && (prtd->period_index == runtime->periods - 1)) { /* we are in sync, do nothing */ spin_unlock_irqrestore(&prtd->lock, flags); return; } if (prtd->period_index >= 0) { if (stat & OMAP_DMA_BLOCK_IRQ) { /* end of buffer reached, loop back */ prtd->period_index = 0; } else if (stat & OMAP_DMA_LAST_IRQ) { /* update the counter for the last period */ prtd->period_index = runtime->periods - 1; } else if (++prtd->period_index >= runtime->periods) { /* end of buffer missed? loop back */ prtd->period_index = 0; } } spin_unlock_irqrestore(&prtd->lock, flags); } snd_pcm_period_elapsed(substream); } /* this may get called several times by oss emulation */ static int omap_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct omap_runtime_data *prtd = runtime->private_data; struct omap_pcm_dma_data *dma_data; int err = 0; dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); /* return if this is a bufferless transfer e.g. * codec <--> BT codec or GSM modem -- lg FIXME */ if (!dma_data) return 0; snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); runtime->dma_bytes = params_buffer_bytes(params); if (prtd->dma_data) return 0; prtd->dma_data = dma_data; err = omap_request_dma(dma_data->dma_req, dma_data->name, omap_pcm_dma_irq, substream, &prtd->dma_ch); if (!err) { /* * Link channel with itself so DMA doesn't need any * reprogramming while looping the buffer */ omap_dma_link_lch(prtd->dma_ch, prtd->dma_ch); } return err; } static int omap_pcm_hw_free(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct omap_runtime_data *prtd = runtime->private_data; if (prtd->dma_data == NULL) return 0; omap_dma_unlink_lch(prtd->dma_ch, prtd->dma_ch); omap_free_dma(prtd->dma_ch); prtd->dma_data = NULL; snd_pcm_set_runtime_buffer(substream, NULL); return 0; } static int omap_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct omap_runtime_data *prtd = runtime->private_data; struct omap_pcm_dma_data *dma_data = prtd->dma_data; struct omap_dma_channel_params dma_params; int bytes; /* return if this is a bufferless transfer e.g. * codec <--> BT codec or GSM modem -- lg FIXME */ if (!prtd->dma_data) return 0; memset(&dma_params, 0, sizeof(dma_params)); dma_params.data_type = dma_data->data_type; dma_params.trigger = dma_data->dma_req; dma_params.sync_mode = dma_data->sync_mode; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { dma_params.src_amode = OMAP_DMA_AMODE_POST_INC; dma_params.dst_amode = OMAP_DMA_AMODE_CONSTANT; dma_params.src_or_dst_synch = OMAP_DMA_DST_SYNC; dma_params.src_start = runtime->dma_addr; dma_params.dst_start = dma_data->port_addr; dma_params.dst_port = OMAP_DMA_PORT_MPUI; dma_params.dst_fi = dma_data->packet_size; } else { dma_params.src_amode = OMAP_DMA_AMODE_CONSTANT; dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC; dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC; dma_params.src_start = dma_data->port_addr; dma_params.dst_start = runtime->dma_addr; dma_params.src_port = OMAP_DMA_PORT_MPUI; dma_params.src_fi = dma_data->packet_size; } /* * Set DMA transfer frame size equal to ALSA period size and frame * count as no. of ALSA periods. Then with DMA frame interrupt enabled, * we can transfer the whole ALSA buffer with single DMA transfer but * still can get an interrupt at each period bounary */ bytes = snd_pcm_lib_period_bytes(substream); dma_params.elem_count = bytes >> dma_data->data_type; dma_params.frame_count = runtime->periods; omap_set_dma_params(prtd->dma_ch, &dma_params); if ((cpu_is_omap1510())) omap_enable_dma_irq(prtd->dma_ch, OMAP_DMA_FRAME_IRQ | OMAP_DMA_LAST_IRQ | OMAP_DMA_BLOCK_IRQ); else if (!substream->runtime->no_period_wakeup) omap_enable_dma_irq(prtd->dma_ch, OMAP_DMA_FRAME_IRQ); else { /* * No period wakeup: * we need to disable BLOCK_IRQ, which is enabled by the omap * dma core at request dma time. */ omap_disable_dma_irq(prtd->dma_ch, OMAP_DMA_BLOCK_IRQ); } if (!(cpu_class_is_omap1())) { omap_set_dma_src_burst_mode(prtd->dma_ch, OMAP_DMA_DATA_BURST_16); omap_set_dma_dest_burst_mode(prtd->dma_ch, OMAP_DMA_DATA_BURST_16); } return 0; } static int omap_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_pcm_runtime *runtime = substream->runtime; struct omap_runtime_data *prtd = runtime->private_data; struct omap_pcm_dma_data *dma_data = prtd->dma_data; unsigned long flags; int ret = 0; spin_lock_irqsave(&prtd->lock, flags); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: prtd->period_index = 0; /* Configure McBSP internal buffer usage */ if (dma_data->set_threshold) dma_data->set_threshold(substream); omap_start_dma(prtd->dma_ch); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: prtd->period_index = -1; omap_stop_dma(prtd->dma_ch); break; default: ret = -EINVAL; } spin_unlock_irqrestore(&prtd->lock, flags); return ret; } static snd_pcm_uframes_t omap_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct omap_runtime_data *prtd = runtime->private_data; dma_addr_t ptr; snd_pcm_uframes_t offset; if (cpu_is_omap1510()) { offset = prtd->period_index * runtime->period_size; } else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { ptr = omap_get_dma_dst_pos(prtd->dma_ch); offset = bytes_to_frames(runtime, ptr - runtime->dma_addr); } else { ptr = omap_get_dma_src_pos(prtd->dma_ch); offset = bytes_to_frames(runtime, ptr - runtime->dma_addr); } if (offset >= runtime->buffer_size) offset = 0; return offset; } static int omap_pcm_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct omap_runtime_data *prtd; int ret; snd_soc_set_runtime_hwparams(substream, &omap_pcm_hardware); /* Ensure that buffer size is a multiple of period size */ ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) goto out; prtd = kzalloc(sizeof(*prtd), GFP_KERNEL); if (prtd == NULL) { ret = -ENOMEM; goto out; } spin_lock_init(&prtd->lock); runtime->private_data = prtd; out: return ret; } static int omap_pcm_close(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; kfree(runtime->private_data); return 0; } static int omap_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; return dma_mmap_writecombine(substream->pcm->card->dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); } static struct snd_pcm_ops omap_pcm_ops = { .open = omap_pcm_open, .close = omap_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = omap_pcm_hw_params, .hw_free = omap_pcm_hw_free, .prepare = omap_pcm_prepare, .trigger = omap_pcm_trigger, .pointer = omap_pcm_pointer, .mmap = omap_pcm_mmap, }; static u64 omap_pcm_dmamask = DMA_BIT_MASK(64); static int omap_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream) { struct snd_pcm_substream *substream = pcm->streams[stream].substream; struct snd_dma_buffer *buf = &substream->dma_buffer; size_t size = omap_pcm_hardware.buffer_bytes_max; buf->dev.type = SNDRV_DMA_TYPE_DEV; buf->dev.dev = pcm->card->dev; buf->private_data = NULL; buf->area = dma_alloc_writecombine(pcm->card->dev, size, &buf->addr, GFP_KERNEL); if (!buf->area) return -ENOMEM; buf->bytes = size; return 0; } static void omap_pcm_free_dma_buffers(struct snd_pcm *pcm) { struct snd_pcm_substream *substream; struct snd_dma_buffer *buf; int stream; for (stream = 0; stream < 2; stream++) { substream = pcm->streams[stream].substream; if (!substream) continue; buf = &substream->dma_buffer; if (!buf->area) continue; dma_free_writecombine(pcm->card->dev, buf->bytes, buf->area, buf->addr); buf->area = NULL; } } static int omap_pcm_new(struct snd_soc_pcm_runtime *rtd) { struct snd_card *card = rtd->card->snd_card; struct snd_pcm *pcm = rtd->pcm; int ret = 0; if (!card->dev->dma_mask) card->dev->dma_mask = &omap_pcm_dmamask; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = DMA_BIT_MASK(64); if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { ret = omap_pcm_preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_PLAYBACK); if (ret) goto out; } if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) { ret = omap_pcm_preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_CAPTURE); if (ret) goto out; } out: /* free preallocated buffers in case of error */ if (ret) omap_pcm_free_dma_buffers(pcm); return ret; } static struct snd_soc_platform_driver omap_soc_platform = { .ops = &omap_pcm_ops, .pcm_new = omap_pcm_new, .pcm_free = omap_pcm_free_dma_buffers, }; static __devinit int omap_pcm_probe(struct platform_device *pdev) { return snd_soc_register_platform(&pdev->dev, &omap_soc_platform); } static int __devexit omap_pcm_remove(struct platform_device *pdev) { snd_soc_unregister_platform(&pdev->dev); return 0; } static struct platform_driver omap_pcm_driver = { .driver = { .name = "omap-pcm-audio", .owner = THIS_MODULE, }, .probe = omap_pcm_probe, .remove = __devexit_p(omap_pcm_remove), }; module_platform_driver(omap_pcm_driver); MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@bitmer.com>"); MODULE_DESCRIPTION("OMAP PCM DMA module"); MODULE_LICENSE("GPL");
gpl-2.0
pawitp/android_kernel_samsung_i9082
arch/powerpc/oprofile/op_model_7450.c
4634
5381
/* * arch/powerpc/oprofile/op_model_7450.c * * Freescale 745x/744x oprofile support, based on fsl_booke support * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM * * Copyright (c) 2004 Freescale Semiconductor, Inc * * Author: Andy Fleming * Maintainer: Kumar Gala <galak@kernel.crashing.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/oprofile.h> #include <linux/init.h> #include <linux/smp.h> #include <asm/ptrace.h> #include <asm/system.h> #include <asm/processor.h> #include <asm/cputable.h> #include <asm/page.h> #include <asm/pmc.h> #include <asm/oprofile_impl.h> static unsigned long reset_value[OP_MAX_COUNTER]; static int oprofile_running; static u32 mmcr0_val, mmcr1_val, mmcr2_val, num_pmcs; #define MMCR0_PMC1_SHIFT 6 #define MMCR0_PMC2_SHIFT 0 #define MMCR1_PMC3_SHIFT 27 #define MMCR1_PMC4_SHIFT 22 #define MMCR1_PMC5_SHIFT 17 #define MMCR1_PMC6_SHIFT 11 #define mmcr0_event1(event) \ ((event << MMCR0_PMC1_SHIFT) & MMCR0_PMC1SEL) #define mmcr0_event2(event) \ ((event << MMCR0_PMC2_SHIFT) & MMCR0_PMC2SEL) #define mmcr1_event3(event) \ ((event << MMCR1_PMC3_SHIFT) & MMCR1_PMC3SEL) #define mmcr1_event4(event) \ ((event << MMCR1_PMC4_SHIFT) & MMCR1_PMC4SEL) #define mmcr1_event5(event) \ ((event << MMCR1_PMC5_SHIFT) & MMCR1_PMC5SEL) #define mmcr1_event6(event) \ ((event << MMCR1_PMC6_SHIFT) & MMCR1_PMC6SEL) #define MMCR0_INIT (MMCR0_FC | MMCR0_FCS | MMCR0_FCP | MMCR0_FCM1 | MMCR0_FCM0) /* Unfreezes the counters on this CPU, enables the interrupt, * enables the counters to trigger the interrupt, and sets the * counters to only count when the mark bit is not set. */ static void pmc_start_ctrs(void) { u32 mmcr0 = mfspr(SPRN_MMCR0); mmcr0 &= ~(MMCR0_FC | MMCR0_FCM0); mmcr0 |= (MMCR0_FCECE | MMCR0_PMC1CE | MMCR0_PMCnCE | MMCR0_PMXE); mtspr(SPRN_MMCR0, mmcr0); } /* Disables the counters on this CPU, and freezes them */ static void pmc_stop_ctrs(void) { u32 mmcr0 = mfspr(SPRN_MMCR0); mmcr0 |= MMCR0_FC; mmcr0 &= ~(MMCR0_FCECE | MMCR0_PMC1CE | MMCR0_PMCnCE | MMCR0_PMXE); mtspr(SPRN_MMCR0, mmcr0); } /* Configures the counters on this CPU based on the global * settings */ static int fsl7450_cpu_setup(struct op_counter_config *ctr) { /* freeze all counters */ pmc_stop_ctrs(); mtspr(SPRN_MMCR0, mmcr0_val); mtspr(SPRN_MMCR1, mmcr1_val); if (num_pmcs > 4) mtspr(SPRN_MMCR2, mmcr2_val); return 0; } /* Configures the global settings for the countes on all CPUs. */ static int fsl7450_reg_setup(struct op_counter_config *ctr, struct op_system_config *sys, int num_ctrs) { int i; num_pmcs = num_ctrs; /* Our counters count up, and "count" refers to * how much before the next interrupt, and we interrupt * on overflow. So we calculate the starting value * which will give us "count" until overflow. * Then we set the events on the enabled counters */ for (i = 0; i < num_ctrs; ++i) reset_value[i] = 0x80000000UL - ctr[i].count; /* Set events for Counters 1 & 2 */ mmcr0_val = MMCR0_INIT | mmcr0_event1(ctr[0].event) | mmcr0_event2(ctr[1].event); /* Setup user/kernel bits */ if (sys->enable_kernel) mmcr0_val &= ~(MMCR0_FCS); if (sys->enable_user) mmcr0_val &= ~(MMCR0_FCP); /* Set events for Counters 3-6 */ mmcr1_val = mmcr1_event3(ctr[2].event) | mmcr1_event4(ctr[3].event); if (num_ctrs > 4) mmcr1_val |= mmcr1_event5(ctr[4].event) | mmcr1_event6(ctr[5].event); mmcr2_val = 0; return 0; } /* Sets the counters on this CPU to the chosen values, and starts them */ static int fsl7450_start(struct op_counter_config *ctr) { int i; mtmsr(mfmsr() | MSR_PMM); for (i = 0; i < num_pmcs; ++i) { if (ctr[i].enabled) classic_ctr_write(i, reset_value[i]); else classic_ctr_write(i, 0); } /* Clear the freeze bit, and enable the interrupt. * The counters won't actually start until the rfi clears * the PMM bit */ pmc_start_ctrs(); oprofile_running = 1; return 0; } /* Stop the counters on this CPU */ static void fsl7450_stop(void) { /* freeze counters */ pmc_stop_ctrs(); oprofile_running = 0; mb(); } /* Handle the interrupt on this CPU, and log a sample for each * event that triggered the interrupt */ static void fsl7450_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr) { unsigned long pc; int is_kernel; int val; int i; /* set the PMM bit (see comment below) */ mtmsr(mfmsr() | MSR_PMM); pc = mfspr(SPRN_SIAR); is_kernel = is_kernel_addr(pc); for (i = 0; i < num_pmcs; ++i) { val = classic_ctr_read(i); if (val < 0) { if (oprofile_running && ctr[i].enabled) { oprofile_add_ext_sample(pc, regs, i, is_kernel); classic_ctr_write(i, reset_value[i]); } else { classic_ctr_write(i, 0); } } } /* The freeze bit was set by the interrupt. */ /* Clear the freeze bit, and reenable the interrupt. * The counters won't actually start until the rfi clears * the PM/M bit */ pmc_start_ctrs(); } struct op_powerpc_model op_model_7450= { .reg_setup = fsl7450_reg_setup, .cpu_setup = fsl7450_cpu_setup, .start = fsl7450_start, .stop = fsl7450_stop, .handle_interrupt = fsl7450_handle_interrupt, };
gpl-2.0
percy-g2/android_kernel_msm8625
drivers/usb/host/ohci-hub.c
4890
21840
/* * OHCI HCD (Host Controller Driver) for USB. * * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at> * (C) Copyright 2000-2004 David Brownell <dbrownell@users.sourceforge.net> * * This file is licenced under GPL */ /*-------------------------------------------------------------------------*/ /* * OHCI Root Hub ... the nonsharable stuff */ #define dbg_port(hc,label,num,value) \ ohci_dbg (hc, \ "%s roothub.portstatus [%d] " \ "= 0x%08x%s%s%s%s%s%s%s%s%s%s%s%s\n", \ label, num, temp, \ (temp & RH_PS_PRSC) ? " PRSC" : "", \ (temp & RH_PS_OCIC) ? " OCIC" : "", \ (temp & RH_PS_PSSC) ? " PSSC" : "", \ (temp & RH_PS_PESC) ? " PESC" : "", \ (temp & RH_PS_CSC) ? " CSC" : "", \ \ (temp & RH_PS_LSDA) ? " LSDA" : "", \ (temp & RH_PS_PPS) ? " PPS" : "", \ (temp & RH_PS_PRS) ? " PRS" : "", \ (temp & RH_PS_POCI) ? " POCI" : "", \ (temp & RH_PS_PSS) ? " PSS" : "", \ \ (temp & RH_PS_PES) ? " PES" : "", \ (temp & RH_PS_CCS) ? " CCS" : "" \ ); /*-------------------------------------------------------------------------*/ #define OHCI_SCHED_ENABLES \ (OHCI_CTRL_CLE|OHCI_CTRL_BLE|OHCI_CTRL_PLE|OHCI_CTRL_IE) static void dl_done_list (struct ohci_hcd *); static void finish_unlinks (struct ohci_hcd *, u16); #ifdef CONFIG_PM static int ohci_rh_suspend (struct ohci_hcd *ohci, int autostop) __releases(ohci->lock) __acquires(ohci->lock) { int status = 0; ohci->hc_control = ohci_readl (ohci, &ohci->regs->control); switch (ohci->hc_control & OHCI_CTRL_HCFS) { case OHCI_USB_RESUME: ohci_dbg (ohci, "resume/suspend?\n"); ohci->hc_control &= ~OHCI_CTRL_HCFS; ohci->hc_control |= OHCI_USB_RESET; ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); (void) ohci_readl (ohci, &ohci->regs->control); /* FALL THROUGH */ case OHCI_USB_RESET: status = -EBUSY; ohci_dbg (ohci, "needs reinit!\n"); goto done; case OHCI_USB_SUSPEND: if (!ohci->autostop) { ohci_dbg (ohci, "already suspended\n"); goto done; } } ohci_dbg (ohci, "%s root hub\n", autostop ? "auto-stop" : "suspend"); /* First stop any processing */ if (!autostop && (ohci->hc_control & OHCI_SCHED_ENABLES)) { ohci->hc_control &= ~OHCI_SCHED_ENABLES; ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); ohci->hc_control = ohci_readl (ohci, &ohci->regs->control); ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrstatus); /* sched disables take effect on the next frame, * then the last WDH could take 6+ msec */ ohci_dbg (ohci, "stopping schedules ...\n"); ohci->autostop = 0; spin_unlock_irq (&ohci->lock); msleep (8); spin_lock_irq (&ohci->lock); } dl_done_list (ohci); finish_unlinks (ohci, ohci_frame_no(ohci)); /* maybe resume can wake root hub */ if (ohci_to_hcd(ohci)->self.root_hub->do_remote_wakeup || autostop) { ohci->hc_control |= OHCI_CTRL_RWE; } else { ohci_writel(ohci, OHCI_INTR_RHSC | OHCI_INTR_RD, &ohci->regs->intrdisable); ohci->hc_control &= ~OHCI_CTRL_RWE; } /* Suspend hub ... this is the "global (to this bus) suspend" mode, * which doesn't imply ports will first be individually suspended. */ ohci->hc_control &= ~OHCI_CTRL_HCFS; ohci->hc_control |= OHCI_USB_SUSPEND; ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); (void) ohci_readl (ohci, &ohci->regs->control); /* no resumes until devices finish suspending */ if (!autostop) { ohci->next_statechange = jiffies + msecs_to_jiffies (5); ohci->autostop = 0; ohci->rh_state = OHCI_RH_SUSPENDED; } done: return status; } static inline struct ed *find_head (struct ed *ed) { /* for bulk and control lists */ while (ed->ed_prev) ed = ed->ed_prev; return ed; } /* caller has locked the root hub */ static int ohci_rh_resume (struct ohci_hcd *ohci) __releases(ohci->lock) __acquires(ohci->lock) { struct usb_hcd *hcd = ohci_to_hcd (ohci); u32 temp, enables; int status = -EINPROGRESS; int autostopped = ohci->autostop; ohci->autostop = 0; ohci->hc_control = ohci_readl (ohci, &ohci->regs->control); if (ohci->hc_control & (OHCI_CTRL_IR | OHCI_SCHED_ENABLES)) { /* this can happen after resuming a swsusp snapshot */ if (ohci->rh_state != OHCI_RH_RUNNING) { ohci_dbg (ohci, "BIOS/SMM active, control %03x\n", ohci->hc_control); status = -EBUSY; /* this happens when pmcore resumes HC then root */ } else { ohci_dbg (ohci, "duplicate resume\n"); status = 0; } } else switch (ohci->hc_control & OHCI_CTRL_HCFS) { case OHCI_USB_SUSPEND: ohci->hc_control &= ~(OHCI_CTRL_HCFS|OHCI_SCHED_ENABLES); ohci->hc_control |= OHCI_USB_RESUME; ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); (void) ohci_readl (ohci, &ohci->regs->control); ohci_dbg (ohci, "%s root hub\n", autostopped ? "auto-start" : "resume"); break; case OHCI_USB_RESUME: /* HCFS changes sometime after INTR_RD */ ohci_dbg(ohci, "%swakeup root hub\n", autostopped ? "auto-" : ""); break; case OHCI_USB_OPER: /* this can happen after resuming a swsusp snapshot */ ohci_dbg (ohci, "snapshot resume? reinit\n"); status = -EBUSY; break; default: /* RESET, we lost power */ ohci_dbg (ohci, "lost power\n"); status = -EBUSY; } if (status == -EBUSY) { if (!autostopped) { spin_unlock_irq (&ohci->lock); (void) ohci_init (ohci); status = ohci_restart (ohci); usb_root_hub_lost_power(hcd->self.root_hub); spin_lock_irq (&ohci->lock); } return status; } if (status != -EINPROGRESS) return status; if (autostopped) goto skip_resume; spin_unlock_irq (&ohci->lock); /* Some controllers (lucent erratum) need extra-long delays */ msleep (20 /* usb 11.5.1.10 */ + 12 /* 32 msec counter */ + 1); temp = ohci_readl (ohci, &ohci->regs->control); temp &= OHCI_CTRL_HCFS; if (temp != OHCI_USB_RESUME) { ohci_err (ohci, "controller won't resume\n"); spin_lock_irq(&ohci->lock); return -EBUSY; } /* disable old schedule state, reinit from scratch */ ohci_writel (ohci, 0, &ohci->regs->ed_controlhead); ohci_writel (ohci, 0, &ohci->regs->ed_controlcurrent); ohci_writel (ohci, 0, &ohci->regs->ed_bulkhead); ohci_writel (ohci, 0, &ohci->regs->ed_bulkcurrent); ohci_writel (ohci, 0, &ohci->regs->ed_periodcurrent); ohci_writel (ohci, (u32) ohci->hcca_dma, &ohci->regs->hcca); /* Sometimes PCI D3 suspend trashes frame timings ... */ periodic_reinit (ohci); /* the following code is executed with ohci->lock held and * irqs disabled if and only if autostopped is true */ skip_resume: /* interrupts might have been disabled */ ohci_writel (ohci, OHCI_INTR_INIT, &ohci->regs->intrenable); if (ohci->ed_rm_list) ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrenable); /* Then re-enable operations */ ohci_writel (ohci, OHCI_USB_OPER, &ohci->regs->control); (void) ohci_readl (ohci, &ohci->regs->control); if (!autostopped) msleep (3); temp = ohci->hc_control; temp &= OHCI_CTRL_RWC; temp |= OHCI_CONTROL_INIT | OHCI_USB_OPER; ohci->hc_control = temp; ohci_writel (ohci, temp, &ohci->regs->control); (void) ohci_readl (ohci, &ohci->regs->control); /* TRSMRCY */ if (!autostopped) { msleep (10); spin_lock_irq (&ohci->lock); } /* now ohci->lock is always held and irqs are always disabled */ /* keep it alive for more than ~5x suspend + resume costs */ ohci->next_statechange = jiffies + STATECHANGE_DELAY; /* maybe turn schedules back on */ enables = 0; temp = 0; if (!ohci->ed_rm_list) { if (ohci->ed_controltail) { ohci_writel (ohci, find_head (ohci->ed_controltail)->dma, &ohci->regs->ed_controlhead); enables |= OHCI_CTRL_CLE; temp |= OHCI_CLF; } if (ohci->ed_bulktail) { ohci_writel (ohci, find_head (ohci->ed_bulktail)->dma, &ohci->regs->ed_bulkhead); enables |= OHCI_CTRL_BLE; temp |= OHCI_BLF; } } if (hcd->self.bandwidth_isoc_reqs || hcd->self.bandwidth_int_reqs) enables |= OHCI_CTRL_PLE|OHCI_CTRL_IE; if (enables) { ohci_dbg (ohci, "restarting schedules ... %08x\n", enables); ohci->hc_control |= enables; ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); if (temp) ohci_writel (ohci, temp, &ohci->regs->cmdstatus); (void) ohci_readl (ohci, &ohci->regs->control); } ohci->rh_state = OHCI_RH_RUNNING; return 0; } static int ohci_bus_suspend (struct usb_hcd *hcd) { struct ohci_hcd *ohci = hcd_to_ohci (hcd); int rc; spin_lock_irq (&ohci->lock); if (unlikely(!HCD_HW_ACCESSIBLE(hcd))) rc = -ESHUTDOWN; else rc = ohci_rh_suspend (ohci, 0); spin_unlock_irq (&ohci->lock); return rc; } static int ohci_bus_resume (struct usb_hcd *hcd) { struct ohci_hcd *ohci = hcd_to_ohci (hcd); int rc; if (time_before (jiffies, ohci->next_statechange)) msleep(5); spin_lock_irq (&ohci->lock); if (unlikely(!HCD_HW_ACCESSIBLE(hcd))) rc = -ESHUTDOWN; else rc = ohci_rh_resume (ohci); spin_unlock_irq (&ohci->lock); /* poll until we know a device is connected or we autostop */ if (rc == 0) usb_hcd_poll_rh_status(hcd); return rc; } /* Carry out the final steps of resuming the controller device */ static void ohci_finish_controller_resume(struct usb_hcd *hcd) { struct ohci_hcd *ohci = hcd_to_ohci(hcd); int port; bool need_reinit = false; /* See if the controller is already running or has been reset */ ohci->hc_control = ohci_readl(ohci, &ohci->regs->control); if (ohci->hc_control & (OHCI_CTRL_IR | OHCI_SCHED_ENABLES)) { need_reinit = true; } else { switch (ohci->hc_control & OHCI_CTRL_HCFS) { case OHCI_USB_OPER: case OHCI_USB_RESET: need_reinit = true; } } /* If needed, reinitialize and suspend the root hub */ if (need_reinit) { spin_lock_irq(&ohci->lock); ohci_rh_resume(ohci); ohci_rh_suspend(ohci, 0); spin_unlock_irq(&ohci->lock); } /* Normally just turn on port power and enable interrupts */ else { ohci_dbg(ohci, "powerup ports\n"); for (port = 0; port < ohci->num_ports; port++) ohci_writel(ohci, RH_PS_PPS, &ohci->regs->roothub.portstatus[port]); ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrenable); ohci_readl(ohci, &ohci->regs->intrenable); msleep(20); } usb_hcd_resume_root_hub(hcd); } /* Carry out polling-, autostop-, and autoresume-related state changes */ static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed, int any_connected, int rhsc_status) { int poll_rh = 1; int rhsc_enable; /* Some broken controllers never turn off RHSC in the interrupt * status register. For their sake we won't re-enable RHSC * interrupts if the interrupt bit is already active. */ rhsc_enable = ohci_readl(ohci, &ohci->regs->intrenable) & OHCI_INTR_RHSC; switch (ohci->hc_control & OHCI_CTRL_HCFS) { case OHCI_USB_OPER: /* If no status changes are pending, enable RHSC interrupts. */ if (!rhsc_enable && !rhsc_status && !changed) { rhsc_enable = OHCI_INTR_RHSC; ohci_writel(ohci, rhsc_enable, &ohci->regs->intrenable); } /* Keep on polling until we know a device is connected * and RHSC is enabled, or until we autostop. */ if (!ohci->autostop) { if (any_connected || !device_may_wakeup(&ohci_to_hcd(ohci) ->self.root_hub->dev)) { if (rhsc_enable) poll_rh = 0; } else { ohci->autostop = 1; ohci->next_statechange = jiffies + HZ; } /* if no devices have been attached for one second, autostop */ } else { if (changed || any_connected) { ohci->autostop = 0; ohci->next_statechange = jiffies + STATECHANGE_DELAY; } else if (time_after_eq(jiffies, ohci->next_statechange) && !ohci->ed_rm_list && !(ohci->hc_control & OHCI_SCHED_ENABLES)) { ohci_rh_suspend(ohci, 1); if (rhsc_enable) poll_rh = 0; } } break; case OHCI_USB_SUSPEND: case OHCI_USB_RESUME: /* if there is a port change, autostart or ask to be resumed */ if (changed) { if (ohci->autostop) ohci_rh_resume(ohci); else usb_hcd_resume_root_hub(ohci_to_hcd(ohci)); /* If remote wakeup is disabled, stop polling */ } else if (!ohci->autostop && !ohci_to_hcd(ohci)->self.root_hub-> do_remote_wakeup) { poll_rh = 0; } else { /* If no status changes are pending, * enable RHSC interrupts */ if (!rhsc_enable && !rhsc_status) { rhsc_enable = OHCI_INTR_RHSC; ohci_writel(ohci, rhsc_enable, &ohci->regs->intrenable); } /* Keep polling until RHSC is enabled */ if (rhsc_enable) poll_rh = 0; } break; } return poll_rh; } #else /* CONFIG_PM */ static inline int ohci_rh_resume(struct ohci_hcd *ohci) { return 0; } /* Carry out polling-related state changes. * autostop isn't used when CONFIG_PM is turned off. */ static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed, int any_connected, int rhsc_status) { /* If RHSC is enabled, don't poll */ if (ohci_readl(ohci, &ohci->regs->intrenable) & OHCI_INTR_RHSC) return 0; /* If status changes are pending, continue polling. * Conversely, if no status changes are pending but the RHSC * status bit was set, then RHSC may be broken so continue polling. */ if (changed || rhsc_status) return 1; /* It's safe to re-enable RHSC interrupts */ ohci_writel(ohci, OHCI_INTR_RHSC, &ohci->regs->intrenable); return 0; } #endif /* CONFIG_PM */ /*-------------------------------------------------------------------------*/ /* build "status change" packet (one or two bytes) from HC registers */ static int ohci_hub_status_data (struct usb_hcd *hcd, char *buf) { struct ohci_hcd *ohci = hcd_to_ohci (hcd); int i, changed = 0, length = 1; int any_connected = 0; int rhsc_status; unsigned long flags; spin_lock_irqsave (&ohci->lock, flags); if (!HCD_HW_ACCESSIBLE(hcd)) goto done; /* undocumented erratum seen on at least rev D */ if ((ohci->flags & OHCI_QUIRK_AMD756) && (roothub_a (ohci) & RH_A_NDP) > MAX_ROOT_PORTS) { ohci_warn (ohci, "bogus NDP, rereads as NDP=%d\n", ohci_readl (ohci, &ohci->regs->roothub.a) & RH_A_NDP); /* retry later; "should not happen" */ goto done; } /* init status */ if (roothub_status (ohci) & (RH_HS_LPSC | RH_HS_OCIC)) buf [0] = changed = 1; else buf [0] = 0; if (ohci->num_ports > 7) { buf [1] = 0; length++; } /* Clear the RHSC status flag before reading the port statuses */ ohci_writel(ohci, OHCI_INTR_RHSC, &ohci->regs->intrstatus); rhsc_status = ohci_readl(ohci, &ohci->regs->intrstatus) & OHCI_INTR_RHSC; /* look at each port */ for (i = 0; i < ohci->num_ports; i++) { u32 status = roothub_portstatus (ohci, i); /* can't autostop if ports are connected */ any_connected |= (status & RH_PS_CCS); if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC | RH_PS_OCIC | RH_PS_PRSC)) { changed = 1; if (i < 7) buf [0] |= 1 << (i + 1); else buf [1] |= 1 << (i - 7); } } if (ohci_root_hub_state_changes(ohci, changed, any_connected, rhsc_status)) set_bit(HCD_FLAG_POLL_RH, &hcd->flags); else clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); done: spin_unlock_irqrestore (&ohci->lock, flags); return changed ? length : 0; } /*-------------------------------------------------------------------------*/ static void ohci_hub_descriptor ( struct ohci_hcd *ohci, struct usb_hub_descriptor *desc ) { u32 rh = roothub_a (ohci); u16 temp; desc->bDescriptorType = 0x29; desc->bPwrOn2PwrGood = (rh & RH_A_POTPGT) >> 24; desc->bHubContrCurrent = 0; desc->bNbrPorts = ohci->num_ports; temp = 1 + (ohci->num_ports / 8); desc->bDescLength = 7 + 2 * temp; temp = 0; if (rh & RH_A_NPS) /* no power switching? */ temp |= 0x0002; if (rh & RH_A_PSM) /* per-port power switching? */ temp |= 0x0001; if (rh & RH_A_NOCP) /* no overcurrent reporting? */ temp |= 0x0010; else if (rh & RH_A_OCPM) /* per-port overcurrent reporting? */ temp |= 0x0008; desc->wHubCharacteristics = (__force __u16)cpu_to_hc16(ohci, temp); /* ports removable, and usb 1.0 legacy PortPwrCtrlMask */ rh = roothub_b (ohci); memset(desc->u.hs.DeviceRemovable, 0xff, sizeof(desc->u.hs.DeviceRemovable)); desc->u.hs.DeviceRemovable[0] = rh & RH_B_DR; if (ohci->num_ports > 7) { desc->u.hs.DeviceRemovable[1] = (rh & RH_B_DR) >> 8; desc->u.hs.DeviceRemovable[2] = 0xff; } else desc->u.hs.DeviceRemovable[1] = 0xff; } /*-------------------------------------------------------------------------*/ #ifdef CONFIG_USB_OTG static int ohci_start_port_reset (struct usb_hcd *hcd, unsigned port) { struct ohci_hcd *ohci = hcd_to_ohci (hcd); u32 status; if (!port) return -EINVAL; port--; /* start port reset before HNP protocol times out */ status = ohci_readl(ohci, &ohci->regs->roothub.portstatus [port]); if (!(status & RH_PS_CCS)) return -ENODEV; /* khubd will finish the reset later */ ohci_writel(ohci, RH_PS_PRS, &ohci->regs->roothub.portstatus [port]); return 0; } #else #define ohci_start_port_reset NULL #endif /*-------------------------------------------------------------------------*/ /* See usb 7.1.7.5: root hubs must issue at least 50 msec reset signaling, * not necessarily continuous ... to guard against resume signaling. * The short timeout is safe for non-root hubs, and is backward-compatible * with earlier Linux hosts. */ #ifdef CONFIG_USB_SUSPEND #define PORT_RESET_MSEC 50 #else #define PORT_RESET_MSEC 10 #endif /* this timer value might be vendor-specific ... */ #define PORT_RESET_HW_MSEC 10 /* wrap-aware logic morphed from <linux/jiffies.h> */ #define tick_before(t1,t2) ((s16)(((s16)(t1))-((s16)(t2))) < 0) /* called from some task, normally khubd */ static inline int root_port_reset (struct ohci_hcd *ohci, unsigned port) { __hc32 __iomem *portstat = &ohci->regs->roothub.portstatus [port]; u32 temp = 0; u16 now = ohci_readl(ohci, &ohci->regs->fmnumber); u16 reset_done = now + PORT_RESET_MSEC; int limit_1 = DIV_ROUND_UP(PORT_RESET_MSEC, PORT_RESET_HW_MSEC); /* build a "continuous enough" reset signal, with up to * 3msec gap between pulses. scheduler HZ==100 must work; * this might need to be deadline-scheduled. */ do { int limit_2; /* spin until any current reset finishes */ limit_2 = PORT_RESET_HW_MSEC * 2; while (--limit_2 >= 0) { temp = ohci_readl (ohci, portstat); /* handle e.g. CardBus eject */ if (temp == ~(u32)0) return -ESHUTDOWN; if (!(temp & RH_PS_PRS)) break; udelay (500); } /* timeout (a hardware error) has been observed when * EHCI sets CF while this driver is resetting a port; * presumably other disconnect paths might do it too. */ if (limit_2 < 0) { ohci_dbg(ohci, "port[%d] reset timeout, stat %08x\n", port, temp); break; } if (!(temp & RH_PS_CCS)) break; if (temp & RH_PS_PRSC) ohci_writel (ohci, RH_PS_PRSC, portstat); /* start the next reset, sleep till it's probably done */ ohci_writel (ohci, RH_PS_PRS, portstat); msleep(PORT_RESET_HW_MSEC); now = ohci_readl(ohci, &ohci->regs->fmnumber); } while (tick_before(now, reset_done) && --limit_1 >= 0); /* caller synchronizes using PRSC ... and handles PRS * still being set when this returns. */ return 0; } static int ohci_hub_control ( struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength ) { struct ohci_hcd *ohci = hcd_to_ohci (hcd); int ports = ohci->num_ports; u32 temp; int retval = 0; if (unlikely(!HCD_HW_ACCESSIBLE(hcd))) return -ESHUTDOWN; switch (typeReq) { case ClearHubFeature: switch (wValue) { case C_HUB_OVER_CURRENT: ohci_writel (ohci, RH_HS_OCIC, &ohci->regs->roothub.status); case C_HUB_LOCAL_POWER: break; default: goto error; } break; case ClearPortFeature: if (!wIndex || wIndex > ports) goto error; wIndex--; switch (wValue) { case USB_PORT_FEAT_ENABLE: temp = RH_PS_CCS; break; case USB_PORT_FEAT_C_ENABLE: temp = RH_PS_PESC; break; case USB_PORT_FEAT_SUSPEND: temp = RH_PS_POCI; break; case USB_PORT_FEAT_C_SUSPEND: temp = RH_PS_PSSC; break; case USB_PORT_FEAT_POWER: temp = RH_PS_LSDA; break; case USB_PORT_FEAT_C_CONNECTION: temp = RH_PS_CSC; break; case USB_PORT_FEAT_C_OVER_CURRENT: temp = RH_PS_OCIC; break; case USB_PORT_FEAT_C_RESET: temp = RH_PS_PRSC; break; default: goto error; } ohci_writel (ohci, temp, &ohci->regs->roothub.portstatus [wIndex]); // ohci_readl (ohci, &ohci->regs->roothub.portstatus [wIndex]); break; case GetHubDescriptor: ohci_hub_descriptor (ohci, (struct usb_hub_descriptor *) buf); break; case GetHubStatus: temp = roothub_status (ohci) & ~(RH_HS_CRWE | RH_HS_DRWE); put_unaligned_le32(temp, buf); break; case GetPortStatus: if (!wIndex || wIndex > ports) goto error; wIndex--; temp = roothub_portstatus (ohci, wIndex); put_unaligned_le32(temp, buf); #ifndef OHCI_VERBOSE_DEBUG if (*(u16*)(buf+2)) /* only if wPortChange is interesting */ #endif dbg_port (ohci, "GetStatus", wIndex, temp); break; case SetHubFeature: switch (wValue) { case C_HUB_OVER_CURRENT: // FIXME: this can be cleared, yes? case C_HUB_LOCAL_POWER: break; default: goto error; } break; case SetPortFeature: if (!wIndex || wIndex > ports) goto error; wIndex--; switch (wValue) { case USB_PORT_FEAT_SUSPEND: #ifdef CONFIG_USB_OTG if (hcd->self.otg_port == (wIndex + 1) && hcd->self.b_hnp_enable) ohci->start_hnp(ohci); else #endif ohci_writel (ohci, RH_PS_PSS, &ohci->regs->roothub.portstatus [wIndex]); break; case USB_PORT_FEAT_POWER: ohci_writel (ohci, RH_PS_PPS, &ohci->regs->roothub.portstatus [wIndex]); break; case USB_PORT_FEAT_RESET: retval = root_port_reset (ohci, wIndex); break; default: goto error; } break; default: error: /* "protocol stall" on error */ retval = -EPIPE; } return retval; }
gpl-2.0
chillstep1998/AK-OnePone
drivers/staging/vt6655/rxtx.c
7962
133309
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * File: rxtx.c * * Purpose: handle WMAC/802.3/802.11 rx & tx functions * * Author: Lyndon Chen * * Date: May 20, 2003 * * Functions: * s_vGenerateTxParameter - Generate tx dma required parameter. * vGenerateMACHeader - Translate 802.3 to 802.11 header * cbGetFragCount - Caculate fragment number count * csBeacon_xmit - beacon tx function * csMgmt_xmit - management tx function * s_cbFillTxBufHead - fulfill tx dma buffer header * s_uGetDataDuration - get tx data required duration * s_uFillDataHead- fulfill tx data duration header * s_uGetRTSCTSDuration- get rtx/cts required duration * s_uGetRTSCTSRsvTime- get rts/cts reserved time * s_uGetTxRsvTime- get frame reserved time * s_vFillCTSHead- fulfill CTS ctl header * s_vFillFragParameter- Set fragment ctl parameter. * s_vFillRTSHead- fulfill RTS ctl header * s_vFillTxKey- fulfill tx encrypt key * s_vSWencryption- Software encrypt header * vDMA0_tx_80211- tx 802.11 frame via dma0 * vGenerateFIFOHeader- Generate tx FIFO ctl header * * Revision History: * */ #include "device.h" #include "rxtx.h" #include "tether.h" #include "card.h" #include "bssdb.h" #include "mac.h" #include "baseband.h" #include "michael.h" #include "tkip.h" #include "tcrc.h" #include "wctl.h" #include "wroute.h" #include "hostap.h" #include "rf.h" /*--------------------- Static Definitions -------------------------*/ /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ //static int msglevel =MSG_LEVEL_DEBUG; static int msglevel =MSG_LEVEL_INFO; #define PLICE_DEBUG /*--------------------- Static Functions --------------------------*/ /*--------------------- Static Definitions -------------------------*/ #define CRITICAL_PACKET_LEN 256 // if packet size < 256 -> in-direct send // packet size >= 256 -> direct send const unsigned short wTimeStampOff[2][MAX_RATE] = { {384, 288, 226, 209, 54, 43, 37, 31, 28, 25, 24, 23}, // Long Preamble {384, 192, 130, 113, 54, 43, 37, 31, 28, 25, 24, 23}, // Short Preamble }; const unsigned short wFB_Opt0[2][5] = { {RATE_12M, RATE_18M, RATE_24M, RATE_36M, RATE_48M}, // fallback_rate0 {RATE_12M, RATE_12M, RATE_18M, RATE_24M, RATE_36M}, // fallback_rate1 }; const unsigned short wFB_Opt1[2][5] = { {RATE_12M, RATE_18M, RATE_24M, RATE_24M, RATE_36M}, // fallback_rate0 {RATE_6M , RATE_6M, RATE_12M, RATE_12M, RATE_18M}, // fallback_rate1 }; #define RTSDUR_BB 0 #define RTSDUR_BA 1 #define RTSDUR_AA 2 #define CTSDUR_BA 3 #define RTSDUR_BA_F0 4 #define RTSDUR_AA_F0 5 #define RTSDUR_BA_F1 6 #define RTSDUR_AA_F1 7 #define CTSDUR_BA_F0 8 #define CTSDUR_BA_F1 9 #define DATADUR_B 10 #define DATADUR_A 11 #define DATADUR_A_F0 12 #define DATADUR_A_F1 13 /*--------------------- Static Functions --------------------------*/ static void s_vFillTxKey( PSDevice pDevice, unsigned char *pbyBuf, unsigned char *pbyIVHead, PSKeyItem pTransmitKey, unsigned char *pbyHdrBuf, unsigned short wPayloadLen, unsigned char *pMICHDR ); static void s_vFillRTSHead( PSDevice pDevice, unsigned char byPktType, void * pvRTS, unsigned int cbFrameLength, bool bNeedAck, bool bDisCRC, PSEthernetHeader psEthHeader, unsigned short wCurrentRate, unsigned char byFBOption ); static void s_vGenerateTxParameter( PSDevice pDevice, unsigned char byPktType, void * pTxBufHead, void * pvRrvTime, void * pvRTS, void * pvCTS, unsigned int cbFrameSize, bool bNeedACK, unsigned int uDMAIdx, PSEthernetHeader psEthHeader, unsigned short wCurrentRate ); static void s_vFillFragParameter( PSDevice pDevice, unsigned char *pbyBuffer, unsigned int uTxType, void * pvtdCurr, unsigned short wFragType, unsigned int cbReqCount ); static unsigned int s_cbFillTxBufHead(PSDevice pDevice, unsigned char byPktType, unsigned char *pbyTxBufferAddr, unsigned int cbFrameBodySize, unsigned int uDMAIdx, PSTxDesc pHeadTD, PSEthernetHeader psEthHeader, unsigned char *pPacket, bool bNeedEncrypt, PSKeyItem pTransmitKey, unsigned int uNodeIndex, unsigned int *puMACfragNum); static unsigned int s_uFillDataHead ( PSDevice pDevice, unsigned char byPktType, void * pTxDataHead, unsigned int cbFrameLength, unsigned int uDMAIdx, bool bNeedAck, unsigned int uFragIdx, unsigned int cbLastFragmentSize, unsigned int uMACfragNum, unsigned char byFBOption, unsigned short wCurrentRate ); /*--------------------- Export Variables --------------------------*/ static void s_vFillTxKey ( PSDevice pDevice, unsigned char *pbyBuf, unsigned char *pbyIVHead, PSKeyItem pTransmitKey, unsigned char *pbyHdrBuf, unsigned short wPayloadLen, unsigned char *pMICHDR ) { unsigned long *pdwIV = (unsigned long *) pbyIVHead; unsigned long *pdwExtIV = (unsigned long *) ((unsigned char *)pbyIVHead+4); unsigned short wValue; PS802_11Header pMACHeader = (PS802_11Header)pbyHdrBuf; unsigned long dwRevIVCounter; unsigned char byKeyIndex = 0; //Fill TXKEY if (pTransmitKey == NULL) return; dwRevIVCounter = cpu_to_le32(pDevice->dwIVCounter); *pdwIV = pDevice->dwIVCounter; byKeyIndex = pTransmitKey->dwKeyIndex & 0xf; if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) { if (pTransmitKey->uKeyLength == WLAN_WEP232_KEYLEN ){ memcpy(pDevice->abyPRNG, (unsigned char *)&(dwRevIVCounter), 3); memcpy(pDevice->abyPRNG+3, pTransmitKey->abyKey, pTransmitKey->uKeyLength); } else { memcpy(pbyBuf, (unsigned char *)&(dwRevIVCounter), 3); memcpy(pbyBuf+3, pTransmitKey->abyKey, pTransmitKey->uKeyLength); if(pTransmitKey->uKeyLength == WLAN_WEP40_KEYLEN) { memcpy(pbyBuf+8, (unsigned char *)&(dwRevIVCounter), 3); memcpy(pbyBuf+11, pTransmitKey->abyKey, pTransmitKey->uKeyLength); } memcpy(pDevice->abyPRNG, pbyBuf, 16); } // Append IV after Mac Header *pdwIV &= WEP_IV_MASK;//00000000 11111111 11111111 11111111 *pdwIV |= (byKeyIndex << 30); *pdwIV = cpu_to_le32(*pdwIV); pDevice->dwIVCounter++; if (pDevice->dwIVCounter > WEP_IV_MASK) { pDevice->dwIVCounter = 0; } } else if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) { pTransmitKey->wTSC15_0++; if (pTransmitKey->wTSC15_0 == 0) { pTransmitKey->dwTSC47_16++; } TKIPvMixKey(pTransmitKey->abyKey, pDevice->abyCurrentNetAddr, pTransmitKey->wTSC15_0, pTransmitKey->dwTSC47_16, pDevice->abyPRNG); memcpy(pbyBuf, pDevice->abyPRNG, 16); // Make IV memcpy(pdwIV, pDevice->abyPRNG, 3); *(pbyIVHead+3) = (unsigned char)(((byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV // Append IV&ExtIV after Mac Header *pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vFillTxKey()---- pdwExtIV: %lx\n", *pdwExtIV); } else if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) { pTransmitKey->wTSC15_0++; if (pTransmitKey->wTSC15_0 == 0) { pTransmitKey->dwTSC47_16++; } memcpy(pbyBuf, pTransmitKey->abyKey, 16); // Make IV *pdwIV = 0; *(pbyIVHead+3) = (unsigned char)(((byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV *pdwIV |= cpu_to_le16((unsigned short)(pTransmitKey->wTSC15_0)); //Append IV&ExtIV after Mac Header *pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16); //Fill MICHDR0 *pMICHDR = 0x59; *((unsigned char *)(pMICHDR+1)) = 0; // TxPriority memcpy(pMICHDR+2, &(pMACHeader->abyAddr2[0]), 6); *((unsigned char *)(pMICHDR+8)) = HIBYTE(HIWORD(pTransmitKey->dwTSC47_16)); *((unsigned char *)(pMICHDR+9)) = LOBYTE(HIWORD(pTransmitKey->dwTSC47_16)); *((unsigned char *)(pMICHDR+10)) = HIBYTE(LOWORD(pTransmitKey->dwTSC47_16)); *((unsigned char *)(pMICHDR+11)) = LOBYTE(LOWORD(pTransmitKey->dwTSC47_16)); *((unsigned char *)(pMICHDR+12)) = HIBYTE(pTransmitKey->wTSC15_0); *((unsigned char *)(pMICHDR+13)) = LOBYTE(pTransmitKey->wTSC15_0); *((unsigned char *)(pMICHDR+14)) = HIBYTE(wPayloadLen); *((unsigned char *)(pMICHDR+15)) = LOBYTE(wPayloadLen); //Fill MICHDR1 *((unsigned char *)(pMICHDR+16)) = 0; // HLEN[15:8] if (pDevice->bLongHeader) { *((unsigned char *)(pMICHDR+17)) = 28; // HLEN[7:0] } else { *((unsigned char *)(pMICHDR+17)) = 22; // HLEN[7:0] } wValue = cpu_to_le16(pMACHeader->wFrameCtl & 0xC78F); memcpy(pMICHDR+18, (unsigned char *)&wValue, 2); // MSKFRACTL memcpy(pMICHDR+20, &(pMACHeader->abyAddr1[0]), 6); memcpy(pMICHDR+26, &(pMACHeader->abyAddr2[0]), 6); //Fill MICHDR2 memcpy(pMICHDR+32, &(pMACHeader->abyAddr3[0]), 6); wValue = pMACHeader->wSeqCtl; wValue &= 0x000F; wValue = cpu_to_le16(wValue); memcpy(pMICHDR+38, (unsigned char *)&wValue, 2); // MSKSEQCTL if (pDevice->bLongHeader) { memcpy(pMICHDR+40, &(pMACHeader->abyAddr4[0]), 6); } } } static void s_vSWencryption ( PSDevice pDevice, PSKeyItem pTransmitKey, unsigned char *pbyPayloadHead, unsigned short wPayloadSize ) { unsigned int cbICVlen = 4; unsigned long dwICV = 0xFFFFFFFFL; unsigned long *pdwICV; if (pTransmitKey == NULL) return; if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) { //======================================================================= // Append ICV after payload dwICV = CRCdwGetCrc32Ex(pbyPayloadHead, wPayloadSize, dwICV);//ICV(Payload) pdwICV = (unsigned long *)(pbyPayloadHead + wPayloadSize); // finally, we must invert dwCRC to get the correct answer *pdwICV = cpu_to_le32(~dwICV); // RC4 encryption rc4_init(&pDevice->SBox, pDevice->abyPRNG, pTransmitKey->uKeyLength + 3); rc4_encrypt(&pDevice->SBox, pbyPayloadHead, pbyPayloadHead, wPayloadSize+cbICVlen); //======================================================================= } else if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) { //======================================================================= //Append ICV after payload dwICV = CRCdwGetCrc32Ex(pbyPayloadHead, wPayloadSize, dwICV);//ICV(Payload) pdwICV = (unsigned long *)(pbyPayloadHead + wPayloadSize); // finally, we must invert dwCRC to get the correct answer *pdwICV = cpu_to_le32(~dwICV); // RC4 encryption rc4_init(&pDevice->SBox, pDevice->abyPRNG, TKIP_KEY_LEN); rc4_encrypt(&pDevice->SBox, pbyPayloadHead, pbyPayloadHead, wPayloadSize+cbICVlen); //======================================================================= } } /*byPktType : PK_TYPE_11A 0 PK_TYPE_11B 1 PK_TYPE_11GB 2 PK_TYPE_11GA 3 */ static unsigned int s_uGetTxRsvTime ( PSDevice pDevice, unsigned char byPktType, unsigned int cbFrameLength, unsigned short wRate, bool bNeedAck ) { unsigned int uDataTime, uAckTime; uDataTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, cbFrameLength, wRate); #ifdef PLICE_DEBUG //printk("s_uGetTxRsvTime is %d\n",uDataTime); #endif if (byPktType == PK_TYPE_11B) {//llb,CCK mode uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, (unsigned short)pDevice->byTopCCKBasicRate); } else {//11g 2.4G OFDM mode & 11a 5G OFDM mode uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, (unsigned short)pDevice->byTopOFDMBasicRate); } if (bNeedAck) { return (uDataTime + pDevice->uSIFS + uAckTime); } else { return uDataTime; } } //byFreqType: 0=>5GHZ 1=>2.4GHZ static unsigned int s_uGetRTSCTSRsvTime ( PSDevice pDevice, unsigned char byRTSRsvType, unsigned char byPktType, unsigned int cbFrameLength, unsigned short wCurrentRate ) { unsigned int uRrvTime , uRTSTime, uCTSTime, uAckTime, uDataTime; uRrvTime = uRTSTime = uCTSTime = uAckTime = uDataTime = 0; uDataTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, cbFrameLength, wCurrentRate); if (byRTSRsvType == 0) { //RTSTxRrvTime_bb uRTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 20, pDevice->byTopCCKBasicRate); uCTSTime = uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate); } else if (byRTSRsvType == 1){ //RTSTxRrvTime_ba, only in 2.4GHZ uRTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 20, pDevice->byTopCCKBasicRate); uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate); uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate); } else if (byRTSRsvType == 2) { //RTSTxRrvTime_aa uRTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 20, pDevice->byTopOFDMBasicRate); uCTSTime = uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate); } else if (byRTSRsvType == 3) { //CTSTxRrvTime_ba, only in 2.4GHZ uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate); uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate); uRrvTime = uCTSTime + uAckTime + uDataTime + 2*pDevice->uSIFS; return uRrvTime; } //RTSRrvTime uRrvTime = uRTSTime + uCTSTime + uAckTime + uDataTime + 3*pDevice->uSIFS; return uRrvTime; } //byFreqType 0: 5GHz, 1:2.4Ghz static unsigned int s_uGetDataDuration ( PSDevice pDevice, unsigned char byDurType, unsigned int cbFrameLength, unsigned char byPktType, unsigned short wRate, bool bNeedAck, unsigned int uFragIdx, unsigned int cbLastFragmentSize, unsigned int uMACfragNum, unsigned char byFBOption ) { bool bLastFrag = 0; unsigned int uAckTime =0, uNextPktTime = 0; if (uFragIdx == (uMACfragNum-1)) { bLastFrag = 1; } switch (byDurType) { case DATADUR_B: //DATADUR_B if (((uMACfragNum == 1)) || (bLastFrag == 1)) {//Non Frag or Last Frag if (bNeedAck) { uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate); return (pDevice->uSIFS + uAckTime); } else { return 0; } } else {//First Frag or Mid Frag if (uFragIdx == (uMACfragNum-2)) { uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wRate, bNeedAck); } else { uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck); } if (bNeedAck) { uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate); return (pDevice->uSIFS + uAckTime + uNextPktTime); } else { return (pDevice->uSIFS + uNextPktTime); } } break; case DATADUR_A: //DATADUR_A if (((uMACfragNum==1)) || (bLastFrag==1)) {//Non Frag or Last Frag if(bNeedAck){ uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate); return (pDevice->uSIFS + uAckTime); } else { return 0; } } else {//First Frag or Mid Frag if(uFragIdx == (uMACfragNum-2)){ uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wRate, bNeedAck); } else { uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck); } if(bNeedAck){ uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate); return (pDevice->uSIFS + uAckTime + uNextPktTime); } else { return (pDevice->uSIFS + uNextPktTime); } } break; case DATADUR_A_F0: //DATADUR_A_F0 if (((uMACfragNum==1)) || (bLastFrag==1)) {//Non Frag or Last Frag if(bNeedAck){ uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate); return (pDevice->uSIFS + uAckTime); } else { return 0; } } else { //First Frag or Mid Frag if (byFBOption == AUTO_FB_0) { if (wRate < RATE_18M) wRate = RATE_18M; else if (wRate > RATE_54M) wRate = RATE_54M; if(uFragIdx == (uMACfragNum-2)){ uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck); } else { uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck); } } else { // (byFBOption == AUTO_FB_1) if (wRate < RATE_18M) wRate = RATE_18M; else if (wRate > RATE_54M) wRate = RATE_54M; if(uFragIdx == (uMACfragNum-2)){ uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck); } else { uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck); } } if(bNeedAck){ uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate); return (pDevice->uSIFS + uAckTime + uNextPktTime); } else { return (pDevice->uSIFS + uNextPktTime); } } break; case DATADUR_A_F1: //DATADUR_A_F1 if (((uMACfragNum==1)) || (bLastFrag==1)) {//Non Frag or Last Frag if(bNeedAck){ uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate); return (pDevice->uSIFS + uAckTime); } else { return 0; } } else { //First Frag or Mid Frag if (byFBOption == AUTO_FB_0) { if (wRate < RATE_18M) wRate = RATE_18M; else if (wRate > RATE_54M) wRate = RATE_54M; if(uFragIdx == (uMACfragNum-2)){ uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck); } else { uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck); } } else { // (byFBOption == AUTO_FB_1) if (wRate < RATE_18M) wRate = RATE_18M; else if (wRate > RATE_54M) wRate = RATE_54M; if(uFragIdx == (uMACfragNum-2)){ uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck); } else { uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck); } } if(bNeedAck){ uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate); return (pDevice->uSIFS + uAckTime + uNextPktTime); } else { return (pDevice->uSIFS + uNextPktTime); } } break; default: break; } ASSERT(false); return 0; } //byFreqType: 0=>5GHZ 1=>2.4GHZ static unsigned int s_uGetRTSCTSDuration ( PSDevice pDevice, unsigned char byDurType, unsigned int cbFrameLength, unsigned char byPktType, unsigned short wRate, bool bNeedAck, unsigned char byFBOption ) { unsigned int uCTSTime = 0, uDurTime = 0; switch (byDurType) { case RTSDUR_BB: //RTSDuration_bb uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate); uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck); break; case RTSDUR_BA: //RTSDuration_ba uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate); uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck); break; case RTSDUR_AA: //RTSDuration_aa uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate); uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck); break; case CTSDUR_BA: //CTSDuration_ba uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck); break; case RTSDUR_BA_F0: //RTSDuration_ba_f0 uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate); if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) { uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck); } else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) { uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck); } break; case RTSDUR_AA_F0: //RTSDuration_aa_f0 uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate); if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) { uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck); } else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) { uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck); } break; case RTSDUR_BA_F1: //RTSDuration_ba_f1 uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate); if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) { uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck); } else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) { uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck); } break; case RTSDUR_AA_F1: //RTSDuration_aa_f1 uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate); if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) { uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck); } else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) { uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck); } break; case CTSDUR_BA_F0: //CTSDuration_ba_f0 if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) { uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck); } else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) { uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck); } break; case CTSDUR_BA_F1: //CTSDuration_ba_f1 if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) { uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck); } else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) { uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck); } break; default: break; } return uDurTime; } static unsigned int s_uFillDataHead ( PSDevice pDevice, unsigned char byPktType, void * pTxDataHead, unsigned int cbFrameLength, unsigned int uDMAIdx, bool bNeedAck, unsigned int uFragIdx, unsigned int cbLastFragmentSize, unsigned int uMACfragNum, unsigned char byFBOption, unsigned short wCurrentRate ) { unsigned short wLen = 0x0000; if (pTxDataHead == NULL) { return 0; } if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) { if (byFBOption == AUTO_FB_NONE) { PSTxDataHead_g pBuf = (PSTxDataHead_g)pTxDataHead; //Get SignalField,ServiceField,Length BBvCaculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType, (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_a), (unsigned char *)&(pBuf->bySignalField_a) ); pBuf->wTransmitLength_a = cpu_to_le16(wLen); BBvCaculateParameter(pDevice, cbFrameLength, pDevice->byTopCCKBasicRate, PK_TYPE_11B, (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_b), (unsigned char *)&(pBuf->bySignalField_b) ); pBuf->wTransmitLength_b = cpu_to_le16(wLen); //Get Duration and TimeStamp pBuf->wDuration_a = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType, wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //1: 2.4GHz pBuf->wDuration_b = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength, PK_TYPE_11B, pDevice->byTopCCKBasicRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //1: 2.4 pBuf->wTimeStampOff_a = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE]); pBuf->wTimeStampOff_b = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][pDevice->byTopCCKBasicRate%MAX_RATE]); return (pBuf->wDuration_a); } else { // Auto Fallback PSTxDataHead_g_FB pBuf = (PSTxDataHead_g_FB)pTxDataHead; //Get SignalField,ServiceField,Length BBvCaculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType, (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_a), (unsigned char *)&(pBuf->bySignalField_a) ); pBuf->wTransmitLength_a = cpu_to_le16(wLen); BBvCaculateParameter(pDevice, cbFrameLength, pDevice->byTopCCKBasicRate, PK_TYPE_11B, (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_b), (unsigned char *)&(pBuf->bySignalField_b) ); pBuf->wTransmitLength_b = cpu_to_le16(wLen); //Get Duration and TimeStamp pBuf->wDuration_a = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType, wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //1: 2.4GHz pBuf->wDuration_b = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength, PK_TYPE_11B, pDevice->byTopCCKBasicRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //1: 2.4GHz pBuf->wDuration_a_f0 = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //1: 2.4GHz pBuf->wDuration_a_f1 = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //1: 2.4GHz pBuf->wTimeStampOff_a = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE]); pBuf->wTimeStampOff_b = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][pDevice->byTopCCKBasicRate%MAX_RATE]); return (pBuf->wDuration_a); } //if (byFBOption == AUTO_FB_NONE) } else if (byPktType == PK_TYPE_11A) { if ((byFBOption != AUTO_FB_NONE)) { // Auto Fallback PSTxDataHead_a_FB pBuf = (PSTxDataHead_a_FB)pTxDataHead; //Get SignalField,ServiceField,Length BBvCaculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType, (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField), (unsigned char *)&(pBuf->bySignalField) ); pBuf->wTransmitLength = cpu_to_le16(wLen); //Get Duration and TimeStampOff pBuf->wDuration = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType, wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //0: 5GHz pBuf->wDuration_f0 = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //0: 5GHz pBuf->wDuration_f1 = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //0: 5GHz pBuf->wTimeStampOff = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE]); return (pBuf->wDuration); } else { PSTxDataHead_ab pBuf = (PSTxDataHead_ab)pTxDataHead; //Get SignalField,ServiceField,Length BBvCaculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType, (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField), (unsigned char *)&(pBuf->bySignalField) ); pBuf->wTransmitLength = cpu_to_le16(wLen); //Get Duration and TimeStampOff pBuf->wDuration = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType, wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); pBuf->wTimeStampOff = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE]); return (pBuf->wDuration); } } else { PSTxDataHead_ab pBuf = (PSTxDataHead_ab)pTxDataHead; //Get SignalField,ServiceField,Length BBvCaculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType, (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField), (unsigned char *)&(pBuf->bySignalField) ); pBuf->wTransmitLength = cpu_to_le16(wLen); //Get Duration and TimeStampOff pBuf->wDuration = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength, byPktType, wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); pBuf->wTimeStampOff = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE]); return (pBuf->wDuration); } return 0; } static void s_vFillRTSHead ( PSDevice pDevice, unsigned char byPktType, void * pvRTS, unsigned int cbFrameLength, bool bNeedAck, bool bDisCRC, PSEthernetHeader psEthHeader, unsigned short wCurrentRate, unsigned char byFBOption ) { unsigned int uRTSFrameLen = 20; unsigned short wLen = 0x0000; if (pvRTS == NULL) return; if (bDisCRC) { // When CRCDIS bit is on, H/W forgot to generate FCS for RTS frame, // in this case we need to decrease its length by 4. uRTSFrameLen -= 4; } // Note: So far RTSHead dosen't appear in ATIM & Beacom DMA, so we don't need to take them into account. // Otherwise, we need to modify codes for them. if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) { if (byFBOption == AUTO_FB_NONE) { PSRTS_g pBuf = (PSRTS_g)pvRTS; //Get SignalField,ServiceField,Length BBvCaculateParameter(pDevice, uRTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B, (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_b), (unsigned char *)&(pBuf->bySignalField_b) ); pBuf->wTransmitLength_b = cpu_to_le16(wLen); BBvCaculateParameter(pDevice, uRTSFrameLen, pDevice->byTopOFDMBasicRate, byPktType, (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_a), (unsigned char *)&(pBuf->bySignalField_a) ); pBuf->wTransmitLength_a = cpu_to_le16(wLen); //Get Duration pBuf->wDuration_bb = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_BB, cbFrameLength, PK_TYPE_11B, pDevice->byTopCCKBasicRate, bNeedAck, byFBOption)); //0:RTSDuration_bb, 1:2.4G, 1:CCKData pBuf->wDuration_aa = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //2:RTSDuration_aa, 1:2.4G, 2,3: 2.4G OFDMData pBuf->wDuration_ba = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //1:RTSDuration_ba, 1:2.4G, 2,3:2.4G OFDM Data pBuf->Data.wDurationID = pBuf->wDuration_aa; //Get RTS Frame body pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4 if ((pDevice->eOPMode == OP_MODE_ADHOC) || (pDevice->eOPMode == OP_MODE_AP)) { memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN); } else { memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN); } if (pDevice->eOPMode == OP_MODE_AP) { memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN); } else { memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN); } } else { PSRTS_g_FB pBuf = (PSRTS_g_FB)pvRTS; //Get SignalField,ServiceField,Length BBvCaculateParameter(pDevice, uRTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B, (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_b), (unsigned char *)&(pBuf->bySignalField_b) ); pBuf->wTransmitLength_b = cpu_to_le16(wLen); BBvCaculateParameter(pDevice, uRTSFrameLen, pDevice->byTopOFDMBasicRate, byPktType, (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_a), (unsigned char *)&(pBuf->bySignalField_a) ); pBuf->wTransmitLength_a = cpu_to_le16(wLen); //Get Duration pBuf->wDuration_bb = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_BB, cbFrameLength, PK_TYPE_11B, pDevice->byTopCCKBasicRate, bNeedAck, byFBOption)); //0:RTSDuration_bb, 1:2.4G, 1:CCKData pBuf->wDuration_aa = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //2:RTSDuration_aa, 1:2.4G, 2,3:2.4G OFDMData pBuf->wDuration_ba = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //1:RTSDuration_ba, 1:2.4G, 2,3:2.4G OFDMData pBuf->wRTSDuration_ba_f0 = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //4:wRTSDuration_ba_f0, 1:2.4G, 1:CCKData pBuf->wRTSDuration_aa_f0 = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //5:wRTSDuration_aa_f0, 1:2.4G, 1:CCKData pBuf->wRTSDuration_ba_f1 = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //6:wRTSDuration_ba_f1, 1:2.4G, 1:CCKData pBuf->wRTSDuration_aa_f1 = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //7:wRTSDuration_aa_f1, 1:2.4G, 1:CCKData pBuf->Data.wDurationID = pBuf->wDuration_aa; //Get RTS Frame body pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4 if ((pDevice->eOPMode == OP_MODE_ADHOC) || (pDevice->eOPMode == OP_MODE_AP)) { memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN); } else { memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN); } if (pDevice->eOPMode == OP_MODE_AP) { memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN); } else { memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN); } } // if (byFBOption == AUTO_FB_NONE) } else if (byPktType == PK_TYPE_11A) { if (byFBOption == AUTO_FB_NONE) { PSRTS_ab pBuf = (PSRTS_ab)pvRTS; //Get SignalField,ServiceField,Length BBvCaculateParameter(pDevice, uRTSFrameLen, pDevice->byTopOFDMBasicRate, byPktType, (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField), (unsigned char *)&(pBuf->bySignalField) ); pBuf->wTransmitLength = cpu_to_le16(wLen); //Get Duration pBuf->wDuration = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //0:RTSDuration_aa, 0:5G, 0: 5G OFDMData pBuf->Data.wDurationID = pBuf->wDuration; //Get RTS Frame body pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4 if ((pDevice->eOPMode == OP_MODE_ADHOC) || (pDevice->eOPMode == OP_MODE_AP)) { memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN); } else { memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN); } if (pDevice->eOPMode == OP_MODE_AP) { memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN); } else { memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN); } } else { PSRTS_a_FB pBuf = (PSRTS_a_FB)pvRTS; //Get SignalField,ServiceField,Length BBvCaculateParameter(pDevice, uRTSFrameLen, pDevice->byTopOFDMBasicRate, byPktType, (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField), (unsigned char *)&(pBuf->bySignalField) ); pBuf->wTransmitLength = cpu_to_le16(wLen); //Get Duration pBuf->wDuration = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //0:RTSDuration_aa, 0:5G, 0: 5G OFDMData pBuf->wRTSDuration_f0 = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //5:RTSDuration_aa_f0, 0:5G, 0: 5G OFDMData pBuf->wRTSDuration_f1 = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //7:RTSDuration_aa_f1, 0:5G, 0: pBuf->Data.wDurationID = pBuf->wDuration; //Get RTS Frame body pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4 if ((pDevice->eOPMode == OP_MODE_ADHOC) || (pDevice->eOPMode == OP_MODE_AP)) { memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN); } else { memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN); } if (pDevice->eOPMode == OP_MODE_AP) { memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN); } else { memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN); } } } else if (byPktType == PK_TYPE_11B) { PSRTS_ab pBuf = (PSRTS_ab)pvRTS; //Get SignalField,ServiceField,Length BBvCaculateParameter(pDevice, uRTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B, (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField), (unsigned char *)&(pBuf->bySignalField) ); pBuf->wTransmitLength = cpu_to_le16(wLen); //Get Duration pBuf->wDuration = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_BB, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //0:RTSDuration_bb, 1:2.4G, 1:CCKData pBuf->Data.wDurationID = pBuf->wDuration; //Get RTS Frame body pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4 if ((pDevice->eOPMode == OP_MODE_ADHOC) || (pDevice->eOPMode == OP_MODE_AP)) { memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN); } else { memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN); } if (pDevice->eOPMode == OP_MODE_AP) { memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN); } else { memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN); } } } static void s_vFillCTSHead ( PSDevice pDevice, unsigned int uDMAIdx, unsigned char byPktType, void * pvCTS, unsigned int cbFrameLength, bool bNeedAck, bool bDisCRC, unsigned short wCurrentRate, unsigned char byFBOption ) { unsigned int uCTSFrameLen = 14; unsigned short wLen = 0x0000; if (pvCTS == NULL) { return; } if (bDisCRC) { // When CRCDIS bit is on, H/W forgot to generate FCS for CTS frame, // in this case we need to decrease its length by 4. uCTSFrameLen -= 4; } if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) { if (byFBOption != AUTO_FB_NONE && uDMAIdx != TYPE_ATIMDMA && uDMAIdx != TYPE_BEACONDMA) { // Auto Fall back PSCTS_FB pBuf = (PSCTS_FB)pvCTS; //Get SignalField,ServiceField,Length BBvCaculateParameter(pDevice, uCTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B, (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_b), (unsigned char *)&(pBuf->bySignalField_b) ); pBuf->wTransmitLength_b = cpu_to_le16(wLen); pBuf->wDuration_ba = (unsigned short)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption); //3:CTSDuration_ba, 1:2.4G, 2,3:2.4G OFDM Data pBuf->wDuration_ba += pDevice->wCTSDuration; pBuf->wDuration_ba = cpu_to_le16(pBuf->wDuration_ba); //Get CTSDuration_ba_f0 pBuf->wCTSDuration_ba_f0 = (unsigned short)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption); //8:CTSDuration_ba_f0, 1:2.4G, 2,3:2.4G OFDM Data pBuf->wCTSDuration_ba_f0 += pDevice->wCTSDuration; pBuf->wCTSDuration_ba_f0 = cpu_to_le16(pBuf->wCTSDuration_ba_f0); //Get CTSDuration_ba_f1 pBuf->wCTSDuration_ba_f1 = (unsigned short)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption); //9:CTSDuration_ba_f1, 1:2.4G, 2,3:2.4G OFDM Data pBuf->wCTSDuration_ba_f1 += pDevice->wCTSDuration; pBuf->wCTSDuration_ba_f1 = cpu_to_le16(pBuf->wCTSDuration_ba_f1); //Get CTS Frame body pBuf->Data.wDurationID = pBuf->wDuration_ba; pBuf->Data.wFrameControl = TYPE_CTL_CTS;//0x00C4 pBuf->Data.wReserved = 0x0000; memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyCurrentNetAddr[0]), ETH_ALEN); } else { //if (byFBOption != AUTO_FB_NONE && uDMAIdx != TYPE_ATIMDMA && uDMAIdx != TYPE_BEACONDMA) PSCTS pBuf = (PSCTS)pvCTS; //Get SignalField,ServiceField,Length BBvCaculateParameter(pDevice, uCTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B, (unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_b), (unsigned char *)&(pBuf->bySignalField_b) ); pBuf->wTransmitLength_b = cpu_to_le16(wLen); //Get CTSDuration_ba pBuf->wDuration_ba = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //3:CTSDuration_ba, 1:2.4G, 2,3:2.4G OFDM Data pBuf->wDuration_ba += pDevice->wCTSDuration; pBuf->wDuration_ba = cpu_to_le16(pBuf->wDuration_ba); //Get CTS Frame body pBuf->Data.wDurationID = pBuf->wDuration_ba; pBuf->Data.wFrameControl = TYPE_CTL_CTS;//0x00C4 pBuf->Data.wReserved = 0x0000; memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyCurrentNetAddr[0]), ETH_ALEN); } } } /*+ * * Description: * Generate FIFO control for MAC & Baseband controller * * Parameters: * In: * pDevice - Pointer to adapter * pTxDataHead - Transmit Data Buffer * pTxBufHead - pTxBufHead * pvRrvTime - pvRrvTime * pvRTS - RTS Buffer * pCTS - CTS Buffer * cbFrameSize - Transmit Data Length (Hdr+Payload+FCS) * bNeedACK - If need ACK * uDescIdx - Desc Index * Out: * none * * Return Value: none * -*/ // unsigned int cbFrameSize,//Hdr+Payload+FCS static void s_vGenerateTxParameter ( PSDevice pDevice, unsigned char byPktType, void * pTxBufHead, void * pvRrvTime, void * pvRTS, void * pvCTS, unsigned int cbFrameSize, bool bNeedACK, unsigned int uDMAIdx, PSEthernetHeader psEthHeader, unsigned short wCurrentRate ) { unsigned int cbMACHdLen = WLAN_HDR_ADDR3_LEN; //24 unsigned short wFifoCtl; bool bDisCRC = false; unsigned char byFBOption = AUTO_FB_NONE; // unsigned short wCurrentRate = pDevice->wCurrentRate; //DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_vGenerateTxParameter...\n"); PSTxBufHead pFifoHead = (PSTxBufHead)pTxBufHead; pFifoHead->wReserved = wCurrentRate; wFifoCtl = pFifoHead->wFIFOCtl; if (wFifoCtl & FIFOCTL_CRCDIS) { bDisCRC = true; } if (wFifoCtl & FIFOCTL_AUTO_FB_0) { byFBOption = AUTO_FB_0; } else if (wFifoCtl & FIFOCTL_AUTO_FB_1) { byFBOption = AUTO_FB_1; } if (pDevice->bLongHeader) cbMACHdLen = WLAN_HDR_ADDR3_LEN + 6; if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) { if (pvRTS != NULL) { //RTS_need //Fill RsvTime if (pvRrvTime) { PSRrvTime_gRTS pBuf = (PSRrvTime_gRTS)pvRrvTime; pBuf->wRTSTxRrvTime_aa = cpu_to_le16((unsigned short)s_uGetRTSCTSRsvTime(pDevice, 2, byPktType, cbFrameSize, wCurrentRate));//2:RTSTxRrvTime_aa, 1:2.4GHz pBuf->wRTSTxRrvTime_ba = cpu_to_le16((unsigned short)s_uGetRTSCTSRsvTime(pDevice, 1, byPktType, cbFrameSize, wCurrentRate));//1:RTSTxRrvTime_ba, 1:2.4GHz pBuf->wRTSTxRrvTime_bb = cpu_to_le16((unsigned short)s_uGetRTSCTSRsvTime(pDevice, 0, byPktType, cbFrameSize, wCurrentRate));//0:RTSTxRrvTime_bb, 1:2.4GHz pBuf->wTxRrvTime_a = cpu_to_le16((unsigned short) s_uGetTxRsvTime(pDevice, byPktType, cbFrameSize, wCurrentRate, bNeedACK));//2.4G OFDM pBuf->wTxRrvTime_b = cpu_to_le16((unsigned short) s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, pDevice->byTopCCKBasicRate, bNeedACK));//1:CCK } //Fill RTS s_vFillRTSHead(pDevice, byPktType, pvRTS, cbFrameSize, bNeedACK, bDisCRC, psEthHeader, wCurrentRate, byFBOption); } else {//RTS_needless, PCF mode //Fill RsvTime if (pvRrvTime) { PSRrvTime_gCTS pBuf = (PSRrvTime_gCTS)pvRrvTime; pBuf->wTxRrvTime_a = cpu_to_le16((unsigned short)s_uGetTxRsvTime(pDevice, byPktType, cbFrameSize, wCurrentRate, bNeedACK));//2.4G OFDM pBuf->wTxRrvTime_b = cpu_to_le16((unsigned short)s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, pDevice->byTopCCKBasicRate, bNeedACK));//1:CCK pBuf->wCTSTxRrvTime_ba = cpu_to_le16((unsigned short)s_uGetRTSCTSRsvTime(pDevice, 3, byPktType, cbFrameSize, wCurrentRate));//3:CTSTxRrvTime_Ba, 1:2.4GHz } //Fill CTS s_vFillCTSHead(pDevice, uDMAIdx, byPktType, pvCTS, cbFrameSize, bNeedACK, bDisCRC, wCurrentRate, byFBOption); } } else if (byPktType == PK_TYPE_11A) { if (pvRTS != NULL) {//RTS_need, non PCF mode //Fill RsvTime if (pvRrvTime) { PSRrvTime_ab pBuf = (PSRrvTime_ab)pvRrvTime; pBuf->wRTSTxRrvTime = cpu_to_le16((unsigned short)s_uGetRTSCTSRsvTime(pDevice, 2, byPktType, cbFrameSize, wCurrentRate));//2:RTSTxRrvTime_aa, 0:5GHz pBuf->wTxRrvTime = cpu_to_le16((unsigned short)s_uGetTxRsvTime(pDevice, byPktType, cbFrameSize, wCurrentRate, bNeedACK));//0:OFDM } //Fill RTS s_vFillRTSHead(pDevice, byPktType, pvRTS, cbFrameSize, bNeedACK, bDisCRC, psEthHeader, wCurrentRate, byFBOption); } else if (pvRTS == NULL) {//RTS_needless, non PCF mode //Fill RsvTime if (pvRrvTime) { PSRrvTime_ab pBuf = (PSRrvTime_ab)pvRrvTime; pBuf->wTxRrvTime = cpu_to_le16((unsigned short)s_uGetTxRsvTime(pDevice, PK_TYPE_11A, cbFrameSize, wCurrentRate, bNeedACK)); //0:OFDM } } } else if (byPktType == PK_TYPE_11B) { if ((pvRTS != NULL)) {//RTS_need, non PCF mode //Fill RsvTime if (pvRrvTime) { PSRrvTime_ab pBuf = (PSRrvTime_ab)pvRrvTime; pBuf->wRTSTxRrvTime = cpu_to_le16((unsigned short)s_uGetRTSCTSRsvTime(pDevice, 0, byPktType, cbFrameSize, wCurrentRate));//0:RTSTxRrvTime_bb, 1:2.4GHz pBuf->wTxRrvTime = cpu_to_le16((unsigned short)s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, wCurrentRate, bNeedACK));//1:CCK } //Fill RTS s_vFillRTSHead(pDevice, byPktType, pvRTS, cbFrameSize, bNeedACK, bDisCRC, psEthHeader, wCurrentRate, byFBOption); } else { //RTS_needless, non PCF mode //Fill RsvTime if (pvRrvTime) { PSRrvTime_ab pBuf = (PSRrvTime_ab)pvRrvTime; pBuf->wTxRrvTime = cpu_to_le16((unsigned short)s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, wCurrentRate, bNeedACK)); //1:CCK } } } //DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_vGenerateTxParameter END.\n"); } /* unsigned char *pbyBuffer,//point to pTxBufHead unsigned short wFragType,//00:Non-Frag, 01:Start, 02:Mid, 03:Last unsigned int cbFragmentSize,//Hdr+payoad+FCS */ static void s_vFillFragParameter( PSDevice pDevice, unsigned char *pbyBuffer, unsigned int uTxType, void * pvtdCurr, unsigned short wFragType, unsigned int cbReqCount ) { PSTxBufHead pTxBufHead = (PSTxBufHead) pbyBuffer; //DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_vFillFragParameter...\n"); if (uTxType == TYPE_SYNCDMA) { //PSTxSyncDesc ptdCurr = (PSTxSyncDesc)s_pvGetTxDescHead(pDevice, uTxType, uCurIdx); PSTxSyncDesc ptdCurr = (PSTxSyncDesc)pvtdCurr; //Set FIFOCtl & TimeStamp in TxSyncDesc ptdCurr->m_wFIFOCtl = pTxBufHead->wFIFOCtl; ptdCurr->m_wTimeStamp = pTxBufHead->wTimeStamp; //Set TSR1 & ReqCount in TxDescHead ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount)); if (wFragType == FRAGCTL_ENDFRAG) { //Last Fragmentation ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU); } else { ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP); } } else { //PSTxDesc ptdCurr = (PSTxDesc)s_pvGetTxDescHead(pDevice, uTxType, uCurIdx); PSTxDesc ptdCurr = (PSTxDesc)pvtdCurr; //Set TSR1 & ReqCount in TxDescHead ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount)); if (wFragType == FRAGCTL_ENDFRAG) { //Last Fragmentation ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU); } else { ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP); } } pTxBufHead->wFragCtl |= (unsigned short)wFragType;//0x0001; //0000 0000 0000 0001 //DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_vFillFragParameter END\n"); } static unsigned int s_cbFillTxBufHead(PSDevice pDevice, unsigned char byPktType, unsigned char *pbyTxBufferAddr, unsigned int cbFrameBodySize, unsigned int uDMAIdx, PSTxDesc pHeadTD, PSEthernetHeader psEthHeader, unsigned char *pPacket, bool bNeedEncrypt, PSKeyItem pTransmitKey, unsigned int uNodeIndex, unsigned int *puMACfragNum) { unsigned int cbMACHdLen; unsigned int cbFrameSize; unsigned int cbFragmentSize; //Hdr+(IV)+payoad+(MIC)+(ICV)+FCS unsigned int cbFragPayloadSize; unsigned int cbLastFragmentSize; //Hdr+(IV)+payoad+(MIC)+(ICV)+FCS unsigned int cbLastFragPayloadSize; unsigned int uFragIdx; unsigned char *pbyPayloadHead; unsigned char *pbyIVHead; unsigned char *pbyMacHdr; unsigned short wFragType; //00:Non-Frag, 01:Start, 10:Mid, 11:Last unsigned int uDuration; unsigned char *pbyBuffer; // unsigned int uKeyEntryIdx = NUM_KEY_ENTRY+1; // unsigned char byKeySel = 0xFF; unsigned int cbIVlen = 0; unsigned int cbICVlen = 0; unsigned int cbMIClen = 0; unsigned int cbFCSlen = 4; unsigned int cb802_1_H_len = 0; unsigned int uLength = 0; unsigned int uTmpLen = 0; // unsigned char abyTmp[8]; // unsigned long dwCRC; unsigned int cbMICHDR = 0; unsigned long dwMICKey0, dwMICKey1; unsigned long dwMIC_Priority; unsigned long *pdwMIC_L; unsigned long *pdwMIC_R; unsigned long dwSafeMIC_L, dwSafeMIC_R; //Fix "Last Frag Size" < "MIC length". bool bMIC2Frag = false; unsigned int uMICFragLen = 0; unsigned int uMACfragNum = 1; unsigned int uPadding = 0; unsigned int cbReqCount = 0; bool bNeedACK; bool bRTS; bool bIsAdhoc; unsigned char *pbyType; PSTxDesc ptdCurr; PSTxBufHead psTxBufHd = (PSTxBufHead) pbyTxBufferAddr; // unsigned int tmpDescIdx; unsigned int cbHeaderLength = 0; void * pvRrvTime; PSMICHDRHead pMICHDR; void * pvRTS; void * pvCTS; void * pvTxDataHd; unsigned short wTxBufSize; // FFinfo size unsigned int uTotalCopyLength = 0; unsigned char byFBOption = AUTO_FB_NONE; bool bIsWEP256 = false; PSMgmtObject pMgmt = pDevice->pMgmt; pvRrvTime = pMICHDR = pvRTS = pvCTS = pvTxDataHd = NULL; //DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_cbFillTxBufHead...\n"); if ((pDevice->eOPMode == OP_MODE_ADHOC) || (pDevice->eOPMode == OP_MODE_AP)) { if (is_multicast_ether_addr(&(psEthHeader->abyDstAddr[0]))) bNeedACK = false; else bNeedACK = true; bIsAdhoc = true; } else { // MSDUs in Infra mode always need ACK bNeedACK = true; bIsAdhoc = false; } if (pDevice->bLongHeader) cbMACHdLen = WLAN_HDR_ADDR3_LEN + 6; else cbMACHdLen = WLAN_HDR_ADDR3_LEN; if ((bNeedEncrypt == true) && (pTransmitKey != NULL)) { if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) { cbIVlen = 4; cbICVlen = 4; if (pTransmitKey->uKeyLength == WLAN_WEP232_KEYLEN) { bIsWEP256 = true; } } if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) { cbIVlen = 8;//IV+ExtIV cbMIClen = 8; cbICVlen = 4; } if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) { cbIVlen = 8;//RSN Header cbICVlen = 8;//MIC cbMICHDR = sizeof(SMICHDRHead); } if (pDevice->byLocalID > REV_ID_VT3253_A1) { //MAC Header should be padding 0 to DW alignment. uPadding = 4 - (cbMACHdLen%4); uPadding %= 4; } } cbFrameSize = cbMACHdLen + cbIVlen + (cbFrameBodySize + cbMIClen) + cbICVlen + cbFCSlen; if ((bNeedACK == false) || (cbFrameSize < pDevice->wRTSThreshold) || ((cbFrameSize >= pDevice->wFragmentationThreshold) && (pDevice->wFragmentationThreshold <= pDevice->wRTSThreshold)) ) { bRTS = false; } else { bRTS = true; psTxBufHd->wFIFOCtl |= (FIFOCTL_RTS | FIFOCTL_LRETRY); } // // Use for AUTO FALL BACK // if (psTxBufHd->wFIFOCtl & FIFOCTL_AUTO_FB_0) { byFBOption = AUTO_FB_0; } else if (psTxBufHd->wFIFOCtl & FIFOCTL_AUTO_FB_1) { byFBOption = AUTO_FB_1; } ////////////////////////////////////////////////////// //Set RrvTime/RTS/CTS Buffer wTxBufSize = sizeof(STxBufHead); if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {//802.11g packet if (byFBOption == AUTO_FB_NONE) { if (bRTS == true) {//RTS_need pvRrvTime = (PSRrvTime_gRTS) (pbyTxBufferAddr + wTxBufSize); pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS)); pvRTS = (PSRTS_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR); pvCTS = NULL; pvTxDataHd = (PSTxDataHead_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR + sizeof(SRTS_g)); cbHeaderLength = wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR + sizeof(SRTS_g) + sizeof(STxDataHead_g); } else { //RTS_needless pvRrvTime = (PSRrvTime_gCTS) (pbyTxBufferAddr + wTxBufSize); pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS)); pvRTS = NULL; pvCTS = (PSCTS) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR); pvTxDataHd = (PSTxDataHead_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS)); cbHeaderLength = wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS) + sizeof(STxDataHead_g); } } else { // Auto Fall Back if (bRTS == true) {//RTS_need pvRrvTime = (PSRrvTime_gRTS) (pbyTxBufferAddr + wTxBufSize); pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS)); pvRTS = (PSRTS_g_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR); pvCTS = NULL; pvTxDataHd = (PSTxDataHead_g_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR + sizeof(SRTS_g_FB)); cbHeaderLength = wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR + sizeof(SRTS_g_FB) + sizeof(STxDataHead_g_FB); } else { //RTS_needless pvRrvTime = (PSRrvTime_gCTS) (pbyTxBufferAddr + wTxBufSize); pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS)); pvRTS = NULL; pvCTS = (PSCTS_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR); pvTxDataHd = (PSTxDataHead_g_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS_FB)); cbHeaderLength = wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS_FB) + sizeof(STxDataHead_g_FB); } } // Auto Fall Back } else {//802.11a/b packet if (byFBOption == AUTO_FB_NONE) { if (bRTS == true) { pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize); pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab)); pvRTS = (PSRTS_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR); pvCTS = NULL; pvTxDataHd = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(PSRrvTime_ab) + cbMICHDR + sizeof(SRTS_ab)); cbHeaderLength = wTxBufSize + sizeof(PSRrvTime_ab) + cbMICHDR + sizeof(SRTS_ab) + sizeof(STxDataHead_ab); } else { //RTS_needless, need MICHDR pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize); pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab)); pvRTS = NULL; pvCTS = NULL; pvTxDataHd = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR); cbHeaderLength = wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR + sizeof(STxDataHead_ab); } } else { // Auto Fall Back if (bRTS == true) {//RTS_need pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize); pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab)); pvRTS = (PSRTS_a_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR); pvCTS = NULL; pvTxDataHd = (PSTxDataHead_a_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(PSRrvTime_ab) + cbMICHDR + sizeof(SRTS_a_FB)); cbHeaderLength = wTxBufSize + sizeof(PSRrvTime_ab) + cbMICHDR + sizeof(SRTS_a_FB) + sizeof(STxDataHead_a_FB); } else { //RTS_needless pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize); pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab)); pvRTS = NULL; pvCTS = NULL; pvTxDataHd = (PSTxDataHead_a_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR); cbHeaderLength = wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR + sizeof(STxDataHead_a_FB); } } // Auto Fall Back } memset((void *)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderLength - wTxBufSize)); ////////////////////////////////////////////////////////////////// if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) { if (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) { dwMICKey0 = *(unsigned long *)(&pTransmitKey->abyKey[16]); dwMICKey1 = *(unsigned long *)(&pTransmitKey->abyKey[20]); } else if ((pTransmitKey->dwKeyIndex & AUTHENTICATOR_KEY) != 0) { dwMICKey0 = *(unsigned long *)(&pTransmitKey->abyKey[16]); dwMICKey1 = *(unsigned long *)(&pTransmitKey->abyKey[20]); } else { dwMICKey0 = *(unsigned long *)(&pTransmitKey->abyKey[24]); dwMICKey1 = *(unsigned long *)(&pTransmitKey->abyKey[28]); } // DO Software Michael MIC_vInit(dwMICKey0, dwMICKey1); MIC_vAppend((unsigned char *)&(psEthHeader->abyDstAddr[0]), 12); dwMIC_Priority = 0; MIC_vAppend((unsigned char *)&dwMIC_Priority, 4); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1); } /////////////////////////////////////////////////////////////////// pbyMacHdr = (unsigned char *)(pbyTxBufferAddr + cbHeaderLength); pbyPayloadHead = (unsigned char *)(pbyMacHdr + cbMACHdLen + uPadding + cbIVlen); pbyIVHead = (unsigned char *)(pbyMacHdr + cbMACHdLen + uPadding); if ((cbFrameSize > pDevice->wFragmentationThreshold) && (bNeedACK == true) && (bIsWEP256 == false)) { // Fragmentation // FragThreshold = Fragment size(Hdr+(IV)+fragment payload+(MIC)+(ICV)+FCS) cbFragmentSize = pDevice->wFragmentationThreshold; cbFragPayloadSize = cbFragmentSize - cbMACHdLen - cbIVlen - cbICVlen - cbFCSlen; //FragNum = (FrameSize-(Hdr+FCS))/(Fragment Size -(Hrd+FCS))) uMACfragNum = (unsigned short) ((cbFrameBodySize + cbMIClen) / cbFragPayloadSize); cbLastFragPayloadSize = (cbFrameBodySize + cbMIClen) % cbFragPayloadSize; if (cbLastFragPayloadSize == 0) { cbLastFragPayloadSize = cbFragPayloadSize; } else { uMACfragNum++; } //[Hdr+(IV)+last fragment payload+(MIC)+(ICV)+FCS] cbLastFragmentSize = cbMACHdLen + cbLastFragPayloadSize + cbIVlen + cbICVlen + cbFCSlen; for (uFragIdx = 0; uFragIdx < uMACfragNum; uFragIdx ++) { if (uFragIdx == 0) { //========================= // Start Fragmentation //========================= DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Start Fragmentation...\n"); wFragType = FRAGCTL_STAFRAG; //Fill FIFO,RrvTime,RTS,and CTS s_vGenerateTxParameter(pDevice, byPktType, (void *)psTxBufHd, pvRrvTime, pvRTS, pvCTS, cbFragmentSize, bNeedACK, uDMAIdx, psEthHeader, pDevice->wCurrentRate); //Fill DataHead uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFragmentSize, uDMAIdx, bNeedACK, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption, pDevice->wCurrentRate); // Generate TX MAC Header vGenerateMACHeader(pDevice, pbyMacHdr, (unsigned short)uDuration, psEthHeader, bNeedEncrypt, wFragType, uDMAIdx, uFragIdx); if (bNeedEncrypt == true) { //Fill TXKEY s_vFillTxKey(pDevice, (unsigned char *)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey, pbyMacHdr, (unsigned short)cbFragPayloadSize, (unsigned char *)pMICHDR); //Fill IV(ExtIV,RSNHDR) if (pDevice->bEnableHostWEP) { pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16; pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0 = pTransmitKey->wTSC15_0; } } // 802.1H if (ntohs(psEthHeader->wType) > ETH_DATA_LEN) { if ((psEthHeader->wType == TYPE_PKT_IPX) || (psEthHeader->wType == cpu_to_le16(0xF380))) { memcpy((unsigned char *) (pbyPayloadHead), &pDevice->abySNAP_Bridgetunnel[0], 6); } else { memcpy((unsigned char *) (pbyPayloadHead), &pDevice->abySNAP_RFC1042[0], 6); } pbyType = (unsigned char *) (pbyPayloadHead + 6); memcpy(pbyType, &(psEthHeader->wType), sizeof(unsigned short)); cb802_1_H_len = 8; } cbReqCount = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cbFragPayloadSize; //--------------------------- // S/W or H/W Encryption //--------------------------- //Fill MICHDR //if (pDevice->bAES) { // s_vFillMICHDR(pDevice, (unsigned char *)pMICHDR, pbyMacHdr, (unsigned short)cbFragPayloadSize); //} //cbReqCount += s_uDoEncryption(pDevice, psEthHeader, (void *)psTxBufHd, byKeySel, // pbyPayloadHead, (unsigned short)cbFragPayloadSize, uDMAIdx); //pbyBuffer = (unsigned char *)pDevice->aamTxBuf[uDMAIdx][uDescIdx].pbyVAddr; pbyBuffer = (unsigned char *)pHeadTD->pTDInfo->buf; uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cb802_1_H_len; //copy TxBufferHeader + MacHeader to desc memcpy(pbyBuffer, (void *)psTxBufHd, uLength); // Copy the Packet into a tx Buffer memcpy((pbyBuffer + uLength), (pPacket + 14), (cbFragPayloadSize - cb802_1_H_len)); uTotalCopyLength += cbFragPayloadSize - cb802_1_H_len; if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Start MIC: %d\n", cbFragPayloadSize); MIC_vAppend((pbyBuffer + uLength - cb802_1_H_len), cbFragPayloadSize); } //--------------------------- // S/W Encryption //--------------------------- if ((pDevice->byLocalID <= REV_ID_VT3253_A1)) { if (bNeedEncrypt) { s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength - cb802_1_H_len), (unsigned short)cbFragPayloadSize); cbReqCount += cbICVlen; } } ptdCurr = (PSTxDesc)pHeadTD; //-------------------- //1.Set TSR1 & ReqCount in TxDescHead //2.Set FragCtl in TxBufferHead //3.Set Frame Control //4.Set Sequence Control //5.Get S/W generate FCS //-------------------- s_vFillFragParameter(pDevice, pbyBuffer, uDMAIdx, (void *)ptdCurr, wFragType, cbReqCount); ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding; ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength; ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma; ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma); pDevice->iTDUsed[uDMAIdx]++; pHeadTD = ptdCurr->next; } else if (uFragIdx == (uMACfragNum-1)) { //========================= // Last Fragmentation //========================= DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Last Fragmentation...\n"); //tmpDescIdx = (uDescIdx + uFragIdx) % pDevice->cbTD[uDMAIdx]; wFragType = FRAGCTL_ENDFRAG; //Fill FIFO,RrvTime,RTS,and CTS s_vGenerateTxParameter(pDevice, byPktType, (void *)psTxBufHd, pvRrvTime, pvRTS, pvCTS, cbLastFragmentSize, bNeedACK, uDMAIdx, psEthHeader, pDevice->wCurrentRate); //Fill DataHead uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbLastFragmentSize, uDMAIdx, bNeedACK, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption, pDevice->wCurrentRate); // Generate TX MAC Header vGenerateMACHeader(pDevice, pbyMacHdr, (unsigned short)uDuration, psEthHeader, bNeedEncrypt, wFragType, uDMAIdx, uFragIdx); if (bNeedEncrypt == true) { //Fill TXKEY s_vFillTxKey(pDevice, (unsigned char *)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey, pbyMacHdr, (unsigned short)cbLastFragPayloadSize, (unsigned char *)pMICHDR); if (pDevice->bEnableHostWEP) { pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16; pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0 = pTransmitKey->wTSC15_0; } } cbReqCount = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cbLastFragPayloadSize; //--------------------------- // S/W or H/W Encryption //--------------------------- pbyBuffer = (unsigned char *)pHeadTD->pTDInfo->buf; //pbyBuffer = (unsigned char *)pDevice->aamTxBuf[uDMAIdx][tmpDescIdx].pbyVAddr; uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen; //copy TxBufferHeader + MacHeader to desc memcpy(pbyBuffer, (void *)psTxBufHd, uLength); // Copy the Packet into a tx Buffer if (bMIC2Frag == false) { memcpy((pbyBuffer + uLength), (pPacket + 14 + uTotalCopyLength), (cbLastFragPayloadSize - cbMIClen) ); //TODO check uTmpLen ! uTmpLen = cbLastFragPayloadSize - cbMIClen; } if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"LAST: uMICFragLen:%d, cbLastFragPayloadSize:%d, uTmpLen:%d\n", uMICFragLen, cbLastFragPayloadSize, uTmpLen); if (bMIC2Frag == false) { if (uTmpLen != 0) MIC_vAppend((pbyBuffer + uLength), uTmpLen); pdwMIC_L = (unsigned long *)(pbyBuffer + uLength + uTmpLen); pdwMIC_R = (unsigned long *)(pbyBuffer + uLength + uTmpLen + 4); MIC_vGetMIC(pdwMIC_L, pdwMIC_R); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Last MIC:%lX, %lX\n", *pdwMIC_L, *pdwMIC_R); } else { if (uMICFragLen >= 4) { memcpy((pbyBuffer + uLength), ((unsigned char *)&dwSafeMIC_R + (uMICFragLen - 4)), (cbMIClen - uMICFragLen)); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"LAST: uMICFragLen >= 4: %X, %d\n", *(unsigned char *)((unsigned char *)&dwSafeMIC_R + (uMICFragLen - 4)), (cbMIClen - uMICFragLen)); } else { memcpy((pbyBuffer + uLength), ((unsigned char *)&dwSafeMIC_L + uMICFragLen), (4 - uMICFragLen)); memcpy((pbyBuffer + uLength + (4 - uMICFragLen)), &dwSafeMIC_R, 4); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"LAST: uMICFragLen < 4: %X, %d\n", *(unsigned char *)((unsigned char *)&dwSafeMIC_R + uMICFragLen - 4), (cbMIClen - uMICFragLen)); } /* for (ii = 0; ii < cbLastFragPayloadSize + 8 + 24; ii++) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", *((unsigned char *)((pbyBuffer + uLength) + ii - 8 - 24))); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n\n"); */ } MIC_vUnInit(); } else { ASSERT(uTmpLen == (cbLastFragPayloadSize - cbMIClen)); } //--------------------------- // S/W Encryption //--------------------------- if ((pDevice->byLocalID <= REV_ID_VT3253_A1)) { if (bNeedEncrypt) { s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength), (unsigned short)cbLastFragPayloadSize); cbReqCount += cbICVlen; } } ptdCurr = (PSTxDesc)pHeadTD; //-------------------- //1.Set TSR1 & ReqCount in TxDescHead //2.Set FragCtl in TxBufferHead //3.Set Frame Control //4.Set Sequence Control //5.Get S/W generate FCS //-------------------- s_vFillFragParameter(pDevice, pbyBuffer, uDMAIdx, (void *)ptdCurr, wFragType, cbReqCount); ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding; ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength; ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma; ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma); pDevice->iTDUsed[uDMAIdx]++; pHeadTD = ptdCurr->next; } else { //========================= // Middle Fragmentation //========================= DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Middle Fragmentation...\n"); //tmpDescIdx = (uDescIdx + uFragIdx) % pDevice->cbTD[uDMAIdx]; wFragType = FRAGCTL_MIDFRAG; //Fill FIFO,RrvTime,RTS,and CTS s_vGenerateTxParameter(pDevice, byPktType, (void *)psTxBufHd, pvRrvTime, pvRTS, pvCTS, cbFragmentSize, bNeedACK, uDMAIdx, psEthHeader, pDevice->wCurrentRate); //Fill DataHead uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFragmentSize, uDMAIdx, bNeedACK, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption, pDevice->wCurrentRate); // Generate TX MAC Header vGenerateMACHeader(pDevice, pbyMacHdr, (unsigned short)uDuration, psEthHeader, bNeedEncrypt, wFragType, uDMAIdx, uFragIdx); if (bNeedEncrypt == true) { //Fill TXKEY s_vFillTxKey(pDevice, (unsigned char *)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey, pbyMacHdr, (unsigned short)cbFragPayloadSize, (unsigned char *)pMICHDR); if (pDevice->bEnableHostWEP) { pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16; pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0 = pTransmitKey->wTSC15_0; } } cbReqCount = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cbFragPayloadSize; //--------------------------- // S/W or H/W Encryption //--------------------------- //Fill MICHDR //if (pDevice->bAES) { // s_vFillMICHDR(pDevice, (unsigned char *)pMICHDR, pbyMacHdr, (unsigned short)cbFragPayloadSize); //} //cbReqCount += s_uDoEncryption(pDevice, psEthHeader, (void *)psTxBufHd, byKeySel, // pbyPayloadHead, (unsigned short)cbFragPayloadSize, uDMAIdx); pbyBuffer = (unsigned char *)pHeadTD->pTDInfo->buf; //pbyBuffer = (unsigned char *)pDevice->aamTxBuf[uDMAIdx][tmpDescIdx].pbyVAddr; uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen; //copy TxBufferHeader + MacHeader to desc memcpy(pbyBuffer, (void *)psTxBufHd, uLength); // Copy the Packet into a tx Buffer memcpy((pbyBuffer + uLength), (pPacket + 14 + uTotalCopyLength), cbFragPayloadSize ); uTmpLen = cbFragPayloadSize; uTotalCopyLength += uTmpLen; if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) { MIC_vAppend((pbyBuffer + uLength), uTmpLen); if (uTmpLen < cbFragPayloadSize) { bMIC2Frag = true; uMICFragLen = cbFragPayloadSize - uTmpLen; ASSERT(uMICFragLen < cbMIClen); pdwMIC_L = (unsigned long *)(pbyBuffer + uLength + uTmpLen); pdwMIC_R = (unsigned long *)(pbyBuffer + uLength + uTmpLen + 4); MIC_vGetMIC(pdwMIC_L, pdwMIC_R); dwSafeMIC_L = *pdwMIC_L; dwSafeMIC_R = *pdwMIC_R; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIDDLE: uMICFragLen:%d, cbFragPayloadSize:%d, uTmpLen:%d\n", uMICFragLen, cbFragPayloadSize, uTmpLen); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Fill MIC in Middle frag [%d]\n", uMICFragLen); /* for (ii = 0; ii < uMICFragLen; ii++) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", *((unsigned char *)((pbyBuffer + uLength + uTmpLen) + ii))); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); */ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Get MIC:%lX, %lX\n", *pdwMIC_L, *pdwMIC_R); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Middle frag len: %d\n", uTmpLen); /* for (ii = 0; ii < uTmpLen; ii++) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", *((unsigned char *)((pbyBuffer + uLength) + ii))); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n\n"); */ } else { ASSERT(uTmpLen == (cbFragPayloadSize)); } if ((pDevice->byLocalID <= REV_ID_VT3253_A1)) { if (bNeedEncrypt) { s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength), (unsigned short)cbFragPayloadSize); cbReqCount += cbICVlen; } } ptdCurr = (PSTxDesc)pHeadTD; //-------------------- //1.Set TSR1 & ReqCount in TxDescHead //2.Set FragCtl in TxBufferHead //3.Set Frame Control //4.Set Sequence Control //5.Get S/W generate FCS //-------------------- s_vFillFragParameter(pDevice, pbyBuffer, uDMAIdx, (void *)ptdCurr, wFragType, cbReqCount); ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding; ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength; ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma; ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma); pDevice->iTDUsed[uDMAIdx]++; pHeadTD = ptdCurr->next; } } // for (uMACfragNum) } else { //========================= // No Fragmentation //========================= //DBG_PRTGRP03(("No Fragmentation...\n")); //DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"No Fragmentation...\n"); wFragType = FRAGCTL_NONFRAG; //Set FragCtl in TxBufferHead psTxBufHd->wFragCtl |= (unsigned short)wFragType; //Fill FIFO,RrvTime,RTS,and CTS s_vGenerateTxParameter(pDevice, byPktType, (void *)psTxBufHd, pvRrvTime, pvRTS, pvCTS, cbFrameSize, bNeedACK, uDMAIdx, psEthHeader, pDevice->wCurrentRate); //Fill DataHead uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFrameSize, uDMAIdx, bNeedACK, 0, 0, uMACfragNum, byFBOption, pDevice->wCurrentRate); // Generate TX MAC Header vGenerateMACHeader(pDevice, pbyMacHdr, (unsigned short)uDuration, psEthHeader, bNeedEncrypt, wFragType, uDMAIdx, 0); if (bNeedEncrypt == true) { //Fill TXKEY s_vFillTxKey(pDevice, (unsigned char *)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey, pbyMacHdr, (unsigned short)cbFrameBodySize, (unsigned char *)pMICHDR); if (pDevice->bEnableHostWEP) { pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16; pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0 = pTransmitKey->wTSC15_0; } } // 802.1H if (ntohs(psEthHeader->wType) > ETH_DATA_LEN) { if ((psEthHeader->wType == TYPE_PKT_IPX) || (psEthHeader->wType == cpu_to_le16(0xF380))) { memcpy((unsigned char *) (pbyPayloadHead), &pDevice->abySNAP_Bridgetunnel[0], 6); } else { memcpy((unsigned char *) (pbyPayloadHead), &pDevice->abySNAP_RFC1042[0], 6); } pbyType = (unsigned char *) (pbyPayloadHead + 6); memcpy(pbyType, &(psEthHeader->wType), sizeof(unsigned short)); cb802_1_H_len = 8; } cbReqCount = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + (cbFrameBodySize + cbMIClen); //--------------------------- // S/W or H/W Encryption //--------------------------- //Fill MICHDR //if (pDevice->bAES) { // DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Fill MICHDR...\n"); // s_vFillMICHDR(pDevice, (unsigned char *)pMICHDR, pbyMacHdr, (unsigned short)cbFrameBodySize); //} pbyBuffer = (unsigned char *)pHeadTD->pTDInfo->buf; //pbyBuffer = (unsigned char *)pDevice->aamTxBuf[uDMAIdx][uDescIdx].pbyVAddr; uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cb802_1_H_len; //copy TxBufferHeader + MacHeader to desc memcpy(pbyBuffer, (void *)psTxBufHd, uLength); // Copy the Packet into a tx Buffer memcpy((pbyBuffer + uLength), (pPacket + 14), cbFrameBodySize - cb802_1_H_len ); if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)){ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Length:%d, %d\n", cbFrameBodySize - cb802_1_H_len, uLength); /* for (ii = 0; ii < (cbFrameBodySize - cb802_1_H_len); ii++) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", *((unsigned char *)((pbyBuffer + uLength) + ii))); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); */ MIC_vAppend((pbyBuffer + uLength - cb802_1_H_len), cbFrameBodySize); pdwMIC_L = (unsigned long *)(pbyBuffer + uLength - cb802_1_H_len + cbFrameBodySize); pdwMIC_R = (unsigned long *)(pbyBuffer + uLength - cb802_1_H_len + cbFrameBodySize + 4); MIC_vGetMIC(pdwMIC_L, pdwMIC_R); MIC_vUnInit(); if (pDevice->bTxMICFail == true) { *pdwMIC_L = 0; *pdwMIC_R = 0; pDevice->bTxMICFail = false; } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"uLength: %d, %d\n", uLength, cbFrameBodySize); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"cbReqCount:%d, %d, %d, %d\n", cbReqCount, cbHeaderLength, uPadding, cbIVlen); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC:%lx, %lx\n", *pdwMIC_L, *pdwMIC_R); /* for (ii = 0; ii < 8; ii++) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", *(((unsigned char *)(pdwMIC_L) + ii))); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); */ } if ((pDevice->byLocalID <= REV_ID_VT3253_A1)){ if (bNeedEncrypt) { s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength - cb802_1_H_len), (unsigned short)(cbFrameBodySize + cbMIClen)); cbReqCount += cbICVlen; } } ptdCurr = (PSTxDesc)pHeadTD; ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding; ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength; ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma; ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma); //Set TSR1 & ReqCount in TxDescHead ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU); ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount)); pDevice->iTDUsed[uDMAIdx]++; // DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" ptdCurr->m_dwReserved0[%d] ptdCurr->m_dwReserved1[%d].\n", ptdCurr->pTDInfo->dwReqCount, ptdCurr->pTDInfo->dwHeaderLength); // DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" cbHeaderLength[%d]\n", cbHeaderLength); } *puMACfragNum = uMACfragNum; //DBG_PRTGRP03(("s_cbFillTxBufHead END\n")); return cbHeaderLength; } void vGenerateFIFOHeader(PSDevice pDevice, unsigned char byPktType, unsigned char *pbyTxBufferAddr, bool bNeedEncrypt, unsigned int cbPayloadSize, unsigned int uDMAIdx, PSTxDesc pHeadTD, PSEthernetHeader psEthHeader, unsigned char *pPacket, PSKeyItem pTransmitKey, unsigned int uNodeIndex, unsigned int *puMACfragNum, unsigned int *pcbHeaderSize) { unsigned int wTxBufSize; // FFinfo size bool bNeedACK; bool bIsAdhoc; unsigned short cbMacHdLen; PSTxBufHead pTxBufHead = (PSTxBufHead) pbyTxBufferAddr; wTxBufSize = sizeof(STxBufHead); memset(pTxBufHead, 0, wTxBufSize); //Set FIFOCTL_NEEDACK if ((pDevice->eOPMode == OP_MODE_ADHOC) || (pDevice->eOPMode == OP_MODE_AP)) { if (is_multicast_ether_addr(&(psEthHeader->abyDstAddr[0]))) { bNeedACK = false; pTxBufHead->wFIFOCtl = pTxBufHead->wFIFOCtl & (~FIFOCTL_NEEDACK); } else { bNeedACK = true; pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK; } bIsAdhoc = true; } else { // MSDUs in Infra mode always need ACK bNeedACK = true; pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK; bIsAdhoc = false; } pTxBufHead->wFIFOCtl |= FIFOCTL_TMOEN; pTxBufHead->wTimeStamp = cpu_to_le16(DEFAULT_MSDU_LIFETIME_RES_64us); //Set FIFOCTL_LHEAD if (pDevice->bLongHeader) pTxBufHead->wFIFOCtl |= FIFOCTL_LHEAD; //Set FIFOCTL_GENINT pTxBufHead->wFIFOCtl |= FIFOCTL_GENINT; //Set FIFOCTL_ISDMA0 if (TYPE_TXDMA0 == uDMAIdx) { pTxBufHead->wFIFOCtl |= FIFOCTL_ISDMA0; } //Set FRAGCTL_MACHDCNT if (pDevice->bLongHeader) { cbMacHdLen = WLAN_HDR_ADDR3_LEN + 6; } else { cbMacHdLen = WLAN_HDR_ADDR3_LEN; } pTxBufHead->wFragCtl |= cpu_to_le16((unsigned short)(cbMacHdLen << 10)); //Set packet type if (byPktType == PK_TYPE_11A) {//0000 0000 0000 0000 ; } else if (byPktType == PK_TYPE_11B) {//0000 0001 0000 0000 pTxBufHead->wFIFOCtl |= FIFOCTL_11B; } else if (byPktType == PK_TYPE_11GB) {//0000 0010 0000 0000 pTxBufHead->wFIFOCtl |= FIFOCTL_11GB; } else if (byPktType == PK_TYPE_11GA) {//0000 0011 0000 0000 pTxBufHead->wFIFOCtl |= FIFOCTL_11GA; } //Set FIFOCTL_GrpAckPolicy if (pDevice->bGrpAckPolicy == true) {//0000 0100 0000 0000 pTxBufHead->wFIFOCtl |= FIFOCTL_GRPACK; } //Set Auto Fallback Ctl if (pDevice->wCurrentRate >= RATE_18M) { if (pDevice->byAutoFBCtrl == AUTO_FB_0) { pTxBufHead->wFIFOCtl |= FIFOCTL_AUTO_FB_0; } else if (pDevice->byAutoFBCtrl == AUTO_FB_1) { pTxBufHead->wFIFOCtl |= FIFOCTL_AUTO_FB_1; } } //Set FRAGCTL_WEPTYP pDevice->bAES = false; //Set FRAGCTL_WEPTYP if (pDevice->byLocalID > REV_ID_VT3253_A1) { if ((bNeedEncrypt) && (pTransmitKey != NULL)) { //WEP enabled if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) { pTxBufHead->wFragCtl |= FRAGCTL_TKIP; } else if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) { //WEP40 or WEP104 if (pTransmitKey->uKeyLength != WLAN_WEP232_KEYLEN) pTxBufHead->wFragCtl |= FRAGCTL_LEGACY; } else if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) { //CCMP pTxBufHead->wFragCtl |= FRAGCTL_AES; } } } #ifdef PLICE_DEBUG //printk("Func:vGenerateFIFOHeader:TxDataRate is %d,TxPower is %d\n",pDevice->wCurrentRate,pDevice->byCurPwr); //if (pDevice->wCurrentRate <= 3) //{ // RFbRawSetPower(pDevice,36,pDevice->wCurrentRate); //} //else RFbSetPower(pDevice, pDevice->wCurrentRate, pDevice->byCurrentCh); #endif //if (pDevice->wCurrentRate == 3) //pDevice->byCurPwr = 46; pTxBufHead->byTxPower = pDevice->byCurPwr; /* if(pDevice->bEnableHostWEP) pTxBufHead->wFragCtl &= ~(FRAGCTL_TKIP | FRAGCTL_LEGACY |FRAGCTL_AES); */ *pcbHeaderSize = s_cbFillTxBufHead(pDevice, byPktType, pbyTxBufferAddr, cbPayloadSize, uDMAIdx, pHeadTD, psEthHeader, pPacket, bNeedEncrypt, pTransmitKey, uNodeIndex, puMACfragNum); return; } /*+ * * Description: * Translate 802.3 to 802.11 header * * Parameters: * In: * pDevice - Pointer to adapter * dwTxBufferAddr - Transmit Buffer * pPacket - Packet from upper layer * cbPacketSize - Transmit Data Length * Out: * pcbHeadSize - Header size of MAC&Baseband control and 802.11 Header * pcbAppendPayload - size of append payload for 802.1H translation * * Return Value: none * -*/ void vGenerateMACHeader ( PSDevice pDevice, unsigned char *pbyBufferAddr, unsigned short wDuration, PSEthernetHeader psEthHeader, bool bNeedEncrypt, unsigned short wFragType, unsigned int uDMAIdx, unsigned int uFragIdx ) { PS802_11Header pMACHeader = (PS802_11Header)pbyBufferAddr; memset(pMACHeader, 0, (sizeof(S802_11Header))); //- sizeof(pMACHeader->dwIV))); if (uDMAIdx == TYPE_ATIMDMA) { pMACHeader->wFrameCtl = TYPE_802_11_ATIM; } else { pMACHeader->wFrameCtl = TYPE_802_11_DATA; } if (pDevice->eOPMode == OP_MODE_AP) { memcpy(&(pMACHeader->abyAddr1[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN); memcpy(&(pMACHeader->abyAddr2[0]), &(pDevice->abyBSSID[0]), ETH_ALEN); memcpy(&(pMACHeader->abyAddr3[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN); pMACHeader->wFrameCtl |= FC_FROMDS; } else { if (pDevice->eOPMode == OP_MODE_ADHOC) { memcpy(&(pMACHeader->abyAddr1[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN); memcpy(&(pMACHeader->abyAddr2[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN); memcpy(&(pMACHeader->abyAddr3[0]), &(pDevice->abyBSSID[0]), ETH_ALEN); } else { memcpy(&(pMACHeader->abyAddr3[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN); memcpy(&(pMACHeader->abyAddr2[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN); memcpy(&(pMACHeader->abyAddr1[0]), &(pDevice->abyBSSID[0]), ETH_ALEN); pMACHeader->wFrameCtl |= FC_TODS; } } if (bNeedEncrypt) pMACHeader->wFrameCtl |= cpu_to_le16((unsigned short)WLAN_SET_FC_ISWEP(1)); pMACHeader->wDurationID = cpu_to_le16(wDuration); if (pDevice->bLongHeader) { PWLAN_80211HDR_A4 pMACA4Header = (PWLAN_80211HDR_A4) pbyBufferAddr; pMACHeader->wFrameCtl |= (FC_TODS | FC_FROMDS); memcpy(pMACA4Header->abyAddr4, pDevice->abyBSSID, WLAN_ADDR_LEN); } pMACHeader->wSeqCtl = cpu_to_le16(pDevice->wSeqCounter << 4); //Set FragNumber in Sequence Control pMACHeader->wSeqCtl |= cpu_to_le16((unsigned short)uFragIdx); if ((wFragType == FRAGCTL_ENDFRAG) || (wFragType == FRAGCTL_NONFRAG)) { pDevice->wSeqCounter++; if (pDevice->wSeqCounter > 0x0fff) pDevice->wSeqCounter = 0; } if ((wFragType == FRAGCTL_STAFRAG) || (wFragType == FRAGCTL_MIDFRAG)) { //StartFrag or MidFrag pMACHeader->wFrameCtl |= FC_MOREFRAG; } } CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) { PSTxDesc pFrstTD; unsigned char byPktType; unsigned char *pbyTxBufferAddr; void * pvRTS; PSCTS pCTS; void * pvTxDataHd; unsigned int uDuration; unsigned int cbReqCount; PS802_11Header pMACHeader; unsigned int cbHeaderSize; unsigned int cbFrameBodySize; bool bNeedACK; bool bIsPSPOLL = false; PSTxBufHead pTxBufHead; unsigned int cbFrameSize; unsigned int cbIVlen = 0; unsigned int cbICVlen = 0; unsigned int cbMIClen = 0; unsigned int cbFCSlen = 4; unsigned int uPadding = 0; unsigned short wTxBufSize; unsigned int cbMacHdLen; SEthernetHeader sEthHeader; void * pvRrvTime; void * pMICHDR; PSMgmtObject pMgmt = pDevice->pMgmt; unsigned short wCurrentRate = RATE_1M; if (AVAIL_TD(pDevice, TYPE_TXDMA0) <= 0) { return CMD_STATUS_RESOURCES; } pFrstTD = pDevice->apCurrTD[TYPE_TXDMA0]; pbyTxBufferAddr = (unsigned char *)pFrstTD->pTDInfo->buf; cbFrameBodySize = pPacket->cbPayloadLen; pTxBufHead = (PSTxBufHead) pbyTxBufferAddr; wTxBufSize = sizeof(STxBufHead); memset(pTxBufHead, 0, wTxBufSize); if (pDevice->eCurrentPHYType == PHY_TYPE_11A) { wCurrentRate = RATE_6M; byPktType = PK_TYPE_11A; } else { wCurrentRate = RATE_1M; byPktType = PK_TYPE_11B; } // SetPower will cause error power TX state for OFDM Date packet in TX buffer. // 2004.11.11 Kyle -- Using OFDM power to tx MngPkt will decrease the connection capability. // And cmd timer will wait data pkt TX finish before scanning so it's OK // to set power here. if (pDevice->pMgmt->eScanState != WMAC_NO_SCANNING) { RFbSetPower(pDevice, wCurrentRate, pDevice->byCurrentCh); } else { RFbSetPower(pDevice, wCurrentRate, pMgmt->uCurrChannel); } pTxBufHead->byTxPower = pDevice->byCurPwr; //+++++++++++++++++++++ Patch VT3253 A1 performance +++++++++++++++++++++++++++ if (pDevice->byFOETuning) { if ((pPacket->p80211Header->sA3.wFrameCtl & TYPE_DATE_NULL) == TYPE_DATE_NULL) { wCurrentRate = RATE_24M; byPktType = PK_TYPE_11GA; } } //Set packet type if (byPktType == PK_TYPE_11A) {//0000 0000 0000 0000 pTxBufHead->wFIFOCtl = 0; } else if (byPktType == PK_TYPE_11B) {//0000 0001 0000 0000 pTxBufHead->wFIFOCtl |= FIFOCTL_11B; } else if (byPktType == PK_TYPE_11GB) {//0000 0010 0000 0000 pTxBufHead->wFIFOCtl |= FIFOCTL_11GB; } else if (byPktType == PK_TYPE_11GA) {//0000 0011 0000 0000 pTxBufHead->wFIFOCtl |= FIFOCTL_11GA; } pTxBufHead->wFIFOCtl |= FIFOCTL_TMOEN; pTxBufHead->wTimeStamp = cpu_to_le16(DEFAULT_MGN_LIFETIME_RES_64us); if (is_multicast_ether_addr(&(pPacket->p80211Header->sA3.abyAddr1[0]))) bNeedACK = false; else { bNeedACK = true; pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK; }; if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) || (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) ) { pTxBufHead->wFIFOCtl |= FIFOCTL_LRETRY; //Set Preamble type always long //pDevice->byPreambleType = PREAMBLE_LONG; // probe-response don't retry //if ((pPacket->p80211Header->sA4.wFrameCtl & TYPE_SUBTYPE_MASK) == TYPE_MGMT_PROBE_RSP) { // bNeedACK = false; // pTxBufHead->wFIFOCtl &= (~FIFOCTL_NEEDACK); //} } pTxBufHead->wFIFOCtl |= (FIFOCTL_GENINT | FIFOCTL_ISDMA0); if ((pPacket->p80211Header->sA4.wFrameCtl & TYPE_SUBTYPE_MASK) == TYPE_CTL_PSPOLL) { bIsPSPOLL = true; cbMacHdLen = WLAN_HDR_ADDR2_LEN; } else { cbMacHdLen = WLAN_HDR_ADDR3_LEN; } //Set FRAGCTL_MACHDCNT pTxBufHead->wFragCtl |= cpu_to_le16((unsigned short)(cbMacHdLen << 10)); // Notes: // Although spec says MMPDU can be fragmented; In most case, // no one will send a MMPDU under fragmentation. With RTS may occur. pDevice->bAES = false; //Set FRAGCTL_WEPTYP if (WLAN_GET_FC_ISWEP(pPacket->p80211Header->sA4.wFrameCtl) != 0) { if (pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled) { cbIVlen = 4; cbICVlen = 4; pTxBufHead->wFragCtl |= FRAGCTL_LEGACY; } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) { cbIVlen = 8;//IV+ExtIV cbMIClen = 8; cbICVlen = 4; pTxBufHead->wFragCtl |= FRAGCTL_TKIP; //We need to get seed here for filling TxKey entry. //TKIPvMixKey(pTransmitKey->abyKey, pDevice->abyCurrentNetAddr, // pTransmitKey->wTSC15_0, pTransmitKey->dwTSC47_16, pDevice->abyPRNG); } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) { cbIVlen = 8;//RSN Header cbICVlen = 8;//MIC pTxBufHead->wFragCtl |= FRAGCTL_AES; pDevice->bAES = true; } //MAC Header should be padding 0 to DW alignment. uPadding = 4 - (cbMacHdLen%4); uPadding %= 4; } cbFrameSize = cbMacHdLen + cbFrameBodySize + cbIVlen + cbMIClen + cbICVlen + cbFCSlen; //Set FIFOCTL_GrpAckPolicy if (pDevice->bGrpAckPolicy == true) {//0000 0100 0000 0000 pTxBufHead->wFIFOCtl |= FIFOCTL_GRPACK; } //the rest of pTxBufHead->wFragCtl:FragTyp will be set later in s_vFillFragParameter() //Set RrvTime/RTS/CTS Buffer if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {//802.11g packet pvRrvTime = (PSRrvTime_gCTS) (pbyTxBufferAddr + wTxBufSize); pMICHDR = NULL; pvRTS = NULL; pCTS = (PSCTS) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS)); pvTxDataHd = (PSTxDataHead_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + sizeof(SCTS)); cbHeaderSize = wTxBufSize + sizeof(SRrvTime_gCTS) + sizeof(SCTS) + sizeof(STxDataHead_g); } else { // 802.11a/b packet pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize); pMICHDR = NULL; pvRTS = NULL; pCTS = NULL; pvTxDataHd = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab)); cbHeaderSize = wTxBufSize + sizeof(SRrvTime_ab) + sizeof(STxDataHead_ab); } memset((void *)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderSize - wTxBufSize)); memcpy(&(sEthHeader.abyDstAddr[0]), &(pPacket->p80211Header->sA3.abyAddr1[0]), ETH_ALEN); memcpy(&(sEthHeader.abySrcAddr[0]), &(pPacket->p80211Header->sA3.abyAddr2[0]), ETH_ALEN); //========================= // No Fragmentation //========================= pTxBufHead->wFragCtl |= (unsigned short)FRAGCTL_NONFRAG; //Fill FIFO,RrvTime,RTS,and CTS s_vGenerateTxParameter(pDevice, byPktType, pbyTxBufferAddr, pvRrvTime, pvRTS, pCTS, cbFrameSize, bNeedACK, TYPE_TXDMA0, &sEthHeader, wCurrentRate); //Fill DataHead uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFrameSize, TYPE_TXDMA0, bNeedACK, 0, 0, 1, AUTO_FB_NONE, wCurrentRate); pMACHeader = (PS802_11Header) (pbyTxBufferAddr + cbHeaderSize); cbReqCount = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen + cbFrameBodySize; if (WLAN_GET_FC_ISWEP(pPacket->p80211Header->sA4.wFrameCtl) != 0) { unsigned char *pbyIVHead; unsigned char *pbyPayloadHead; unsigned char *pbyBSSID; PSKeyItem pTransmitKey = NULL; pbyIVHead = (unsigned char *)(pbyTxBufferAddr + cbHeaderSize + cbMacHdLen + uPadding); pbyPayloadHead = (unsigned char *)(pbyTxBufferAddr + cbHeaderSize + cbMacHdLen + uPadding + cbIVlen); //Fill TXKEY //Kyle: Need fix: TKIP and AES did't encryt Mnt Packet. //s_vFillTxKey(pDevice, (unsigned char *)pTxBufHead->adwTxKey, NULL); //Fill IV(ExtIV,RSNHDR) //s_vFillPrePayload(pDevice, pbyIVHead, NULL); //--------------------------- // S/W or H/W Encryption //--------------------------- //Fill MICHDR //if (pDevice->bAES) { // s_vFillMICHDR(pDevice, (unsigned char *)pMICHDR, (unsigned char *)pMACHeader, (unsigned short)cbFrameBodySize); //} do { if ((pDevice->eOPMode == OP_MODE_INFRASTRUCTURE) && (pDevice->bLinkPass == true)) { pbyBSSID = pDevice->abyBSSID; // get pairwise key if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == false) { // get group key if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == true) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Get GTK.\n"); break; } } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Get PTK.\n"); break; } } // get group key pbyBSSID = pDevice->abyBroadcastAddr; if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == false) { pTransmitKey = NULL; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KEY is NULL. OP Mode[%d]\n", pDevice->eOPMode); } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Get GTK.\n"); } } while(false); //Fill TXKEY s_vFillTxKey(pDevice, (unsigned char *)(pTxBufHead->adwTxKey), pbyIVHead, pTransmitKey, (unsigned char *)pMACHeader, (unsigned short)cbFrameBodySize, NULL); memcpy(pMACHeader, pPacket->p80211Header, cbMacHdLen); memcpy(pbyPayloadHead, ((unsigned char *)(pPacket->p80211Header) + cbMacHdLen), cbFrameBodySize); } else { // Copy the Packet into a tx Buffer memcpy(pMACHeader, pPacket->p80211Header, pPacket->cbMPDULen); } pMACHeader->wSeqCtl = cpu_to_le16(pDevice->wSeqCounter << 4); pDevice->wSeqCounter++ ; if (pDevice->wSeqCounter > 0x0fff) pDevice->wSeqCounter = 0; if (bIsPSPOLL) { // The MAC will automatically replace the Duration-field of MAC header by Duration-field // of FIFO control header. // This will cause AID-field of PS-POLL packet be incorrect (Because PS-POLL's AID field is // in the same place of other packet's Duration-field). // And it will cause Cisco-AP to issue Disassociation-packet if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) { ((PSTxDataHead_g)pvTxDataHd)->wDuration_a = cpu_to_le16(pPacket->p80211Header->sA2.wDurationID); ((PSTxDataHead_g)pvTxDataHd)->wDuration_b = cpu_to_le16(pPacket->p80211Header->sA2.wDurationID); } else { ((PSTxDataHead_ab)pvTxDataHd)->wDuration = cpu_to_le16(pPacket->p80211Header->sA2.wDurationID); } } // first TD is the only TD //Set TSR1 & ReqCount in TxDescHead pFrstTD->m_td1TD1.byTCR = (TCR_STP | TCR_EDP | EDMSDU); pFrstTD->pTDInfo->skb_dma = pFrstTD->pTDInfo->buf_dma; pFrstTD->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount)); pFrstTD->buff_addr = cpu_to_le32(pFrstTD->pTDInfo->skb_dma); pFrstTD->pTDInfo->byFlags = 0; if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) { // Disable PS MACbPSWakeup(pDevice->PortOffset); } pDevice->bPWBitOn = false; wmb(); pFrstTD->m_td0TD0.f1Owner = OWNED_BY_NIC; wmb(); pDevice->iTDUsed[TYPE_TXDMA0]++; if (AVAIL_TD(pDevice, TYPE_TXDMA0) <= 1) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " available td0 <= 1\n"); } pDevice->apCurrTD[TYPE_TXDMA0] = pFrstTD->next; #ifdef PLICE_DEBUG //printk("SCAN:CurrentRate is %d,TxPower is %d\n",wCurrentRate,pTxBufHead->byTxPower); #endif #ifdef TxInSleep pDevice->nTxDataTimeCout=0; //2008-8-21 chester <add> for send null packet #endif // Poll Transmit the adapter MACvTransmit0(pDevice->PortOffset); return CMD_STATUS_PENDING; } CMD_STATUS csBeacon_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) { unsigned char byPktType; unsigned char *pbyBuffer = (unsigned char *)pDevice->tx_beacon_bufs; unsigned int cbFrameSize = pPacket->cbMPDULen + WLAN_FCS_LEN; unsigned int cbHeaderSize = 0; unsigned short wTxBufSize = sizeof(STxShortBufHead); PSTxShortBufHead pTxBufHead = (PSTxShortBufHead) pbyBuffer; PSTxDataHead_ab pTxDataHead = (PSTxDataHead_ab) (pbyBuffer + wTxBufSize); PS802_11Header pMACHeader; unsigned short wCurrentRate; unsigned short wLen = 0x0000; memset(pTxBufHead, 0, wTxBufSize); if (pDevice->eCurrentPHYType == PHY_TYPE_11A) { wCurrentRate = RATE_6M; byPktType = PK_TYPE_11A; } else { wCurrentRate = RATE_2M; byPktType = PK_TYPE_11B; } //Set Preamble type always long pDevice->byPreambleType = PREAMBLE_LONG; //Set FIFOCTL_GENINT pTxBufHead->wFIFOCtl |= FIFOCTL_GENINT; //Set packet type & Get Duration if (byPktType == PK_TYPE_11A) {//0000 0000 0000 0000 pTxDataHead->wDuration = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameSize, byPktType, wCurrentRate, false, 0, 0, 1, AUTO_FB_NONE)); } else if (byPktType == PK_TYPE_11B) {//0000 0001 0000 0000 pTxBufHead->wFIFOCtl |= FIFOCTL_11B; pTxDataHead->wDuration = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameSize, byPktType, wCurrentRate, false, 0, 0, 1, AUTO_FB_NONE)); } BBvCaculateParameter(pDevice, cbFrameSize, wCurrentRate, byPktType, (unsigned short *)&(wLen), (unsigned char *)&(pTxDataHead->byServiceField), (unsigned char *)&(pTxDataHead->bySignalField) ); pTxDataHead->wTransmitLength = cpu_to_le16(wLen); //Get TimeStampOff pTxDataHead->wTimeStampOff = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE]); cbHeaderSize = wTxBufSize + sizeof(STxDataHead_ab); //Generate Beacon Header pMACHeader = (PS802_11Header)(pbyBuffer + cbHeaderSize); memcpy(pMACHeader, pPacket->p80211Header, pPacket->cbMPDULen); pMACHeader->wDurationID = 0; pMACHeader->wSeqCtl = cpu_to_le16(pDevice->wSeqCounter << 4); pDevice->wSeqCounter++ ; if (pDevice->wSeqCounter > 0x0fff) pDevice->wSeqCounter = 0; // Set Beacon buffer length pDevice->wBCNBufLen = pPacket->cbMPDULen + cbHeaderSize; MACvSetCurrBCNTxDescAddr(pDevice->PortOffset, (pDevice->tx_beacon_dma)); MACvSetCurrBCNLength(pDevice->PortOffset, pDevice->wBCNBufLen); // Set auto Transmit on MACvRegBitsOn(pDevice->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX); // Poll Transmit the adapter MACvTransmitBCN(pDevice->PortOffset); return CMD_STATUS_PENDING; } unsigned int cbGetFragCount ( PSDevice pDevice, PSKeyItem pTransmitKey, unsigned int cbFrameBodySize, PSEthernetHeader psEthHeader ) { unsigned int cbMACHdLen; unsigned int cbFrameSize; unsigned int cbFragmentSize; //Hdr+(IV)+payoad+(MIC)+(ICV)+FCS unsigned int cbFragPayloadSize; unsigned int cbLastFragPayloadSize; unsigned int cbIVlen = 0; unsigned int cbICVlen = 0; unsigned int cbMIClen = 0; unsigned int cbFCSlen = 4; unsigned int uMACfragNum = 1; bool bNeedACK; if ((pDevice->eOPMode == OP_MODE_ADHOC) || (pDevice->eOPMode == OP_MODE_AP)) { if (is_multicast_ether_addr(&(psEthHeader->abyDstAddr[0]))) bNeedACK = false; else bNeedACK = true; } else { // MSDUs in Infra mode always need ACK bNeedACK = true; } if (pDevice->bLongHeader) cbMACHdLen = WLAN_HDR_ADDR3_LEN + 6; else cbMACHdLen = WLAN_HDR_ADDR3_LEN; if (pDevice->bEncryptionEnable == true) { if (pTransmitKey == NULL) { if ((pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled) || (pDevice->pMgmt->eAuthenMode < WMAC_AUTH_WPA)) { cbIVlen = 4; cbICVlen = 4; } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) { cbIVlen = 8;//IV+ExtIV cbMIClen = 8; cbICVlen = 4; } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) { cbIVlen = 8;//RSN Header cbICVlen = 8;//MIC } } else if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) { cbIVlen = 4; cbICVlen = 4; } else if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) { cbIVlen = 8;//IV+ExtIV cbMIClen = 8; cbICVlen = 4; } else if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) { cbIVlen = 8;//RSN Header cbICVlen = 8;//MIC } } cbFrameSize = cbMACHdLen + cbIVlen + (cbFrameBodySize + cbMIClen) + cbICVlen + cbFCSlen; if ((cbFrameSize > pDevice->wFragmentationThreshold) && (bNeedACK == true)) { // Fragmentation cbFragmentSize = pDevice->wFragmentationThreshold; cbFragPayloadSize = cbFragmentSize - cbMACHdLen - cbIVlen - cbICVlen - cbFCSlen; uMACfragNum = (unsigned short) ((cbFrameBodySize + cbMIClen) / cbFragPayloadSize); cbLastFragPayloadSize = (cbFrameBodySize + cbMIClen) % cbFragPayloadSize; if (cbLastFragPayloadSize == 0) { cbLastFragPayloadSize = cbFragPayloadSize; } else { uMACfragNum++; } } return uMACfragNum; } void vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, unsigned char *pbMPDU, unsigned int cbMPDULen) { PSTxDesc pFrstTD; unsigned char byPktType; unsigned char *pbyTxBufferAddr; void * pvRTS; void * pvCTS; void * pvTxDataHd; unsigned int uDuration; unsigned int cbReqCount; PS802_11Header pMACHeader; unsigned int cbHeaderSize; unsigned int cbFrameBodySize; bool bNeedACK; bool bIsPSPOLL = false; PSTxBufHead pTxBufHead; unsigned int cbFrameSize; unsigned int cbIVlen = 0; unsigned int cbICVlen = 0; unsigned int cbMIClen = 0; unsigned int cbFCSlen = 4; unsigned int uPadding = 0; unsigned int cbMICHDR = 0; unsigned int uLength = 0; unsigned long dwMICKey0, dwMICKey1; unsigned long dwMIC_Priority; unsigned long *pdwMIC_L; unsigned long *pdwMIC_R; unsigned short wTxBufSize; unsigned int cbMacHdLen; SEthernetHeader sEthHeader; void * pvRrvTime; void * pMICHDR; PSMgmtObject pMgmt = pDevice->pMgmt; unsigned short wCurrentRate = RATE_1M; PUWLAN_80211HDR p80211Header; unsigned int uNodeIndex = 0; bool bNodeExist = false; SKeyItem STempKey; PSKeyItem pTransmitKey = NULL; unsigned char *pbyIVHead; unsigned char *pbyPayloadHead; unsigned char *pbyMacHdr; unsigned int cbExtSuppRate = 0; // PWLAN_IE pItem; pvRrvTime = pMICHDR = pvRTS = pvCTS = pvTxDataHd = NULL; if(cbMPDULen <= WLAN_HDR_ADDR3_LEN) { cbFrameBodySize = 0; } else { cbFrameBodySize = cbMPDULen - WLAN_HDR_ADDR3_LEN; } p80211Header = (PUWLAN_80211HDR)pbMPDU; pFrstTD = pDevice->apCurrTD[TYPE_TXDMA0]; pbyTxBufferAddr = (unsigned char *)pFrstTD->pTDInfo->buf; pTxBufHead = (PSTxBufHead) pbyTxBufferAddr; wTxBufSize = sizeof(STxBufHead); memset(pTxBufHead, 0, wTxBufSize); if (pDevice->eCurrentPHYType == PHY_TYPE_11A) { wCurrentRate = RATE_6M; byPktType = PK_TYPE_11A; } else { wCurrentRate = RATE_1M; byPktType = PK_TYPE_11B; } // SetPower will cause error power TX state for OFDM Date packet in TX buffer. // 2004.11.11 Kyle -- Using OFDM power to tx MngPkt will decrease the connection capability. // And cmd timer will wait data pkt TX finish before scanning so it's OK // to set power here. if (pDevice->pMgmt->eScanState != WMAC_NO_SCANNING) { RFbSetPower(pDevice, wCurrentRate, pDevice->byCurrentCh); } else { RFbSetPower(pDevice, wCurrentRate, pMgmt->uCurrChannel); } pTxBufHead->byTxPower = pDevice->byCurPwr; //+++++++++++++++++++++ Patch VT3253 A1 performance +++++++++++++++++++++++++++ if (pDevice->byFOETuning) { if ((p80211Header->sA3.wFrameCtl & TYPE_DATE_NULL) == TYPE_DATE_NULL) { wCurrentRate = RATE_24M; byPktType = PK_TYPE_11GA; } } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vDMA0_tx_80211: p80211Header->sA3.wFrameCtl = %x \n", p80211Header->sA3.wFrameCtl); //Set packet type if (byPktType == PK_TYPE_11A) {//0000 0000 0000 0000 pTxBufHead->wFIFOCtl = 0; } else if (byPktType == PK_TYPE_11B) {//0000 0001 0000 0000 pTxBufHead->wFIFOCtl |= FIFOCTL_11B; } else if (byPktType == PK_TYPE_11GB) {//0000 0010 0000 0000 pTxBufHead->wFIFOCtl |= FIFOCTL_11GB; } else if (byPktType == PK_TYPE_11GA) {//0000 0011 0000 0000 pTxBufHead->wFIFOCtl |= FIFOCTL_11GA; } pTxBufHead->wFIFOCtl |= FIFOCTL_TMOEN; pTxBufHead->wTimeStamp = cpu_to_le16(DEFAULT_MGN_LIFETIME_RES_64us); if (is_multicast_ether_addr(&(p80211Header->sA3.abyAddr1[0]))) { bNeedACK = false; if (pDevice->bEnableHostWEP) { uNodeIndex = 0; bNodeExist = true; } } else { if (pDevice->bEnableHostWEP) { if (BSSDBbIsSTAInNodeDB(pDevice->pMgmt, (unsigned char *)(p80211Header->sA3.abyAddr1), &uNodeIndex)) bNodeExist = true; } bNeedACK = true; pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK; }; if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) || (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) ) { pTxBufHead->wFIFOCtl |= FIFOCTL_LRETRY; //Set Preamble type always long //pDevice->byPreambleType = PREAMBLE_LONG; // probe-response don't retry //if ((p80211Header->sA4.wFrameCtl & TYPE_SUBTYPE_MASK) == TYPE_MGMT_PROBE_RSP) { // bNeedACK = false; // pTxBufHead->wFIFOCtl &= (~FIFOCTL_NEEDACK); //} } pTxBufHead->wFIFOCtl |= (FIFOCTL_GENINT | FIFOCTL_ISDMA0); if ((p80211Header->sA4.wFrameCtl & TYPE_SUBTYPE_MASK) == TYPE_CTL_PSPOLL) { bIsPSPOLL = true; cbMacHdLen = WLAN_HDR_ADDR2_LEN; } else { cbMacHdLen = WLAN_HDR_ADDR3_LEN; } // hostapd deamon ext support rate patch if (WLAN_GET_FC_FSTYPE(p80211Header->sA4.wFrameCtl) == WLAN_FSTYPE_ASSOCRESP) { if (((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates)->len != 0) { cbExtSuppRate += ((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates)->len + WLAN_IEHDR_LEN; } if (((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates)->len != 0) { cbExtSuppRate += ((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates)->len + WLAN_IEHDR_LEN; } if (cbExtSuppRate >0) { cbFrameBodySize = WLAN_ASSOCRESP_OFF_SUPP_RATES; } } //Set FRAGCTL_MACHDCNT pTxBufHead->wFragCtl |= cpu_to_le16((unsigned short)cbMacHdLen << 10); // Notes: // Although spec says MMPDU can be fragmented; In most case, // no one will send a MMPDU under fragmentation. With RTS may occur. pDevice->bAES = false; //Set FRAGCTL_WEPTYP if (WLAN_GET_FC_ISWEP(p80211Header->sA4.wFrameCtl) != 0) { if (pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled) { cbIVlen = 4; cbICVlen = 4; pTxBufHead->wFragCtl |= FRAGCTL_LEGACY; } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) { cbIVlen = 8;//IV+ExtIV cbMIClen = 8; cbICVlen = 4; pTxBufHead->wFragCtl |= FRAGCTL_TKIP; //We need to get seed here for filling TxKey entry. //TKIPvMixKey(pTransmitKey->abyKey, pDevice->abyCurrentNetAddr, // pTransmitKey->wTSC15_0, pTransmitKey->dwTSC47_16, pDevice->abyPRNG); } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) { cbIVlen = 8;//RSN Header cbICVlen = 8;//MIC cbMICHDR = sizeof(SMICHDRHead); pTxBufHead->wFragCtl |= FRAGCTL_AES; pDevice->bAES = true; } //MAC Header should be padding 0 to DW alignment. uPadding = 4 - (cbMacHdLen%4); uPadding %= 4; } cbFrameSize = cbMacHdLen + cbFrameBodySize + cbIVlen + cbMIClen + cbICVlen + cbFCSlen + cbExtSuppRate; //Set FIFOCTL_GrpAckPolicy if (pDevice->bGrpAckPolicy == true) {//0000 0100 0000 0000 pTxBufHead->wFIFOCtl |= FIFOCTL_GRPACK; } //the rest of pTxBufHead->wFragCtl:FragTyp will be set later in s_vFillFragParameter() if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {//802.11g packet pvRrvTime = (PSRrvTime_gCTS) (pbyTxBufferAddr + wTxBufSize); pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS)); pvRTS = NULL; pvCTS = (PSCTS) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR); pvTxDataHd = (PSTxDataHead_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS)); cbHeaderSize = wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS) + sizeof(STxDataHead_g); } else {//802.11a/b packet pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize); pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab)); pvRTS = NULL; pvCTS = NULL; pvTxDataHd = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR); cbHeaderSize = wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR + sizeof(STxDataHead_ab); } memset((void *)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderSize - wTxBufSize)); memcpy(&(sEthHeader.abyDstAddr[0]), &(p80211Header->sA3.abyAddr1[0]), ETH_ALEN); memcpy(&(sEthHeader.abySrcAddr[0]), &(p80211Header->sA3.abyAddr2[0]), ETH_ALEN); //========================= // No Fragmentation //========================= pTxBufHead->wFragCtl |= (unsigned short)FRAGCTL_NONFRAG; //Fill FIFO,RrvTime,RTS,and CTS s_vGenerateTxParameter(pDevice, byPktType, pbyTxBufferAddr, pvRrvTime, pvRTS, pvCTS, cbFrameSize, bNeedACK, TYPE_TXDMA0, &sEthHeader, wCurrentRate); //Fill DataHead uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFrameSize, TYPE_TXDMA0, bNeedACK, 0, 0, 1, AUTO_FB_NONE, wCurrentRate); pMACHeader = (PS802_11Header) (pbyTxBufferAddr + cbHeaderSize); cbReqCount = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen + (cbFrameBodySize + cbMIClen) + cbExtSuppRate; pbyMacHdr = (unsigned char *)(pbyTxBufferAddr + cbHeaderSize); pbyPayloadHead = (unsigned char *)(pbyMacHdr + cbMacHdLen + uPadding + cbIVlen); pbyIVHead = (unsigned char *)(pbyMacHdr + cbMacHdLen + uPadding); // Copy the Packet into a tx Buffer memcpy(pbyMacHdr, pbMPDU, cbMacHdLen); // version set to 0, patch for hostapd deamon pMACHeader->wFrameCtl &= cpu_to_le16(0xfffc); memcpy(pbyPayloadHead, (pbMPDU + cbMacHdLen), cbFrameBodySize); // replace support rate, patch for hostapd deamon( only support 11M) if (WLAN_GET_FC_FSTYPE(p80211Header->sA4.wFrameCtl) == WLAN_FSTYPE_ASSOCRESP) { if (cbExtSuppRate != 0) { if (((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates)->len != 0) memcpy((pbyPayloadHead + cbFrameBodySize), pMgmt->abyCurrSuppRates, ((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates)->len + WLAN_IEHDR_LEN ); if (((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates)->len != 0) memcpy((pbyPayloadHead + cbFrameBodySize) + ((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates)->len + WLAN_IEHDR_LEN, pMgmt->abyCurrExtSuppRates, ((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates)->len + WLAN_IEHDR_LEN ); } } // Set wep if (WLAN_GET_FC_ISWEP(p80211Header->sA4.wFrameCtl) != 0) { if (pDevice->bEnableHostWEP) { pTransmitKey = &STempKey; pTransmitKey->byCipherSuite = pMgmt->sNodeDBTable[uNodeIndex].byCipherSuite; pTransmitKey->dwKeyIndex = pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex; pTransmitKey->uKeyLength = pMgmt->sNodeDBTable[uNodeIndex].uWepKeyLength; pTransmitKey->dwTSC47_16 = pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16; pTransmitKey->wTSC15_0 = pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0; memcpy(pTransmitKey->abyKey, &pMgmt->sNodeDBTable[uNodeIndex].abyWepKey[0], pTransmitKey->uKeyLength ); } if ((pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) { dwMICKey0 = *(unsigned long *)(&pTransmitKey->abyKey[16]); dwMICKey1 = *(unsigned long *)(&pTransmitKey->abyKey[20]); // DO Software Michael MIC_vInit(dwMICKey0, dwMICKey1); MIC_vAppend((unsigned char *)&(sEthHeader.abyDstAddr[0]), 12); dwMIC_Priority = 0; MIC_vAppend((unsigned char *)&dwMIC_Priority, 4); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"DMA0_tx_8021:MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1); uLength = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen; MIC_vAppend((pbyTxBufferAddr + uLength), cbFrameBodySize); pdwMIC_L = (unsigned long *)(pbyTxBufferAddr + uLength + cbFrameBodySize); pdwMIC_R = (unsigned long *)(pbyTxBufferAddr + uLength + cbFrameBodySize + 4); MIC_vGetMIC(pdwMIC_L, pdwMIC_R); MIC_vUnInit(); if (pDevice->bTxMICFail == true) { *pdwMIC_L = 0; *pdwMIC_R = 0; pDevice->bTxMICFail = false; } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"uLength: %d, %d\n", uLength, cbFrameBodySize); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"cbReqCount:%d, %d, %d, %d\n", cbReqCount, cbHeaderSize, uPadding, cbIVlen); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC:%lx, %lx\n", *pdwMIC_L, *pdwMIC_R); } s_vFillTxKey(pDevice, (unsigned char *)(pTxBufHead->adwTxKey), pbyIVHead, pTransmitKey, pbyMacHdr, (unsigned short)cbFrameBodySize, (unsigned char *)pMICHDR); if (pDevice->bEnableHostWEP) { pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16; pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0 = pTransmitKey->wTSC15_0; } if ((pDevice->byLocalID <= REV_ID_VT3253_A1)) { s_vSWencryption(pDevice, pTransmitKey, pbyPayloadHead, (unsigned short)(cbFrameBodySize + cbMIClen)); } } pMACHeader->wSeqCtl = cpu_to_le16(pDevice->wSeqCounter << 4); pDevice->wSeqCounter++ ; if (pDevice->wSeqCounter > 0x0fff) pDevice->wSeqCounter = 0; if (bIsPSPOLL) { // The MAC will automatically replace the Duration-field of MAC header by Duration-field // of FIFO control header. // This will cause AID-field of PS-POLL packet be incorrect (Because PS-POLL's AID field is // in the same place of other packet's Duration-field). // And it will cause Cisco-AP to issue Disassociation-packet if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) { ((PSTxDataHead_g)pvTxDataHd)->wDuration_a = cpu_to_le16(p80211Header->sA2.wDurationID); ((PSTxDataHead_g)pvTxDataHd)->wDuration_b = cpu_to_le16(p80211Header->sA2.wDurationID); } else { ((PSTxDataHead_ab)pvTxDataHd)->wDuration = cpu_to_le16(p80211Header->sA2.wDurationID); } } // first TD is the only TD //Set TSR1 & ReqCount in TxDescHead pFrstTD->pTDInfo->skb = skb; pFrstTD->m_td1TD1.byTCR = (TCR_STP | TCR_EDP | EDMSDU); pFrstTD->pTDInfo->skb_dma = pFrstTD->pTDInfo->buf_dma; pFrstTD->m_td1TD1.wReqCount = cpu_to_le16(cbReqCount); pFrstTD->buff_addr = cpu_to_le32(pFrstTD->pTDInfo->skb_dma); pFrstTD->pTDInfo->byFlags = 0; pFrstTD->pTDInfo->byFlags |= TD_FLAGS_PRIV_SKB; if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) { // Disable PS MACbPSWakeup(pDevice->PortOffset); } pDevice->bPWBitOn = false; wmb(); pFrstTD->m_td0TD0.f1Owner = OWNED_BY_NIC; wmb(); pDevice->iTDUsed[TYPE_TXDMA0]++; if (AVAIL_TD(pDevice, TYPE_TXDMA0) <= 1) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " available td0 <= 1\n"); } pDevice->apCurrTD[TYPE_TXDMA0] = pFrstTD->next; // Poll Transmit the adapter MACvTransmit0(pDevice->PortOffset); return; }
gpl-2.0
EnJens/kernel_tf201_stock
sound/pci/emu10k1/emufx.c
7962
101686
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Creative Labs, Inc. * Routines for effect processor FX8010 * * Copyright (c) by James Courtier-Dutton <James@superbug.co.uk> * Added EMU 1010 support. * * BUGS: * -- * * TODO: * -- * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/pci.h> #include <linux/capability.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/moduleparam.h> #include <sound/core.h> #include <sound/tlv.h> #include <sound/emu10k1.h> #if 0 /* for testing purposes - digital out -> capture */ #define EMU10K1_CAPTURE_DIGITAL_OUT #endif #if 0 /* for testing purposes - set S/PDIF to AC3 output */ #define EMU10K1_SET_AC3_IEC958 #endif #if 0 /* for testing purposes - feed the front signal to Center/LFE outputs */ #define EMU10K1_CENTER_LFE_FROM_FRONT #endif static bool high_res_gpr_volume; module_param(high_res_gpr_volume, bool, 0444); MODULE_PARM_DESC(high_res_gpr_volume, "GPR mixer controls use 31-bit range."); /* * Tables */ static char *fxbuses[16] = { /* 0x00 */ "PCM Left", /* 0x01 */ "PCM Right", /* 0x02 */ "PCM Surround Left", /* 0x03 */ "PCM Surround Right", /* 0x04 */ "MIDI Left", /* 0x05 */ "MIDI Right", /* 0x06 */ "Center", /* 0x07 */ "LFE", /* 0x08 */ NULL, /* 0x09 */ NULL, /* 0x0a */ NULL, /* 0x0b */ NULL, /* 0x0c */ "MIDI Reverb", /* 0x0d */ "MIDI Chorus", /* 0x0e */ NULL, /* 0x0f */ NULL }; static char *creative_ins[16] = { /* 0x00 */ "AC97 Left", /* 0x01 */ "AC97 Right", /* 0x02 */ "TTL IEC958 Left", /* 0x03 */ "TTL IEC958 Right", /* 0x04 */ "Zoom Video Left", /* 0x05 */ "Zoom Video Right", /* 0x06 */ "Optical IEC958 Left", /* 0x07 */ "Optical IEC958 Right", /* 0x08 */ "Line/Mic 1 Left", /* 0x09 */ "Line/Mic 1 Right", /* 0x0a */ "Coaxial IEC958 Left", /* 0x0b */ "Coaxial IEC958 Right", /* 0x0c */ "Line/Mic 2 Left", /* 0x0d */ "Line/Mic 2 Right", /* 0x0e */ NULL, /* 0x0f */ NULL }; static char *audigy_ins[16] = { /* 0x00 */ "AC97 Left", /* 0x01 */ "AC97 Right", /* 0x02 */ "Audigy CD Left", /* 0x03 */ "Audigy CD Right", /* 0x04 */ "Optical IEC958 Left", /* 0x05 */ "Optical IEC958 Right", /* 0x06 */ NULL, /* 0x07 */ NULL, /* 0x08 */ "Line/Mic 2 Left", /* 0x09 */ "Line/Mic 2 Right", /* 0x0a */ "SPDIF Left", /* 0x0b */ "SPDIF Right", /* 0x0c */ "Aux2 Left", /* 0x0d */ "Aux2 Right", /* 0x0e */ NULL, /* 0x0f */ NULL }; static char *creative_outs[32] = { /* 0x00 */ "AC97 Left", /* 0x01 */ "AC97 Right", /* 0x02 */ "Optical IEC958 Left", /* 0x03 */ "Optical IEC958 Right", /* 0x04 */ "Center", /* 0x05 */ "LFE", /* 0x06 */ "Headphone Left", /* 0x07 */ "Headphone Right", /* 0x08 */ "Surround Left", /* 0x09 */ "Surround Right", /* 0x0a */ "PCM Capture Left", /* 0x0b */ "PCM Capture Right", /* 0x0c */ "MIC Capture", /* 0x0d */ "AC97 Surround Left", /* 0x0e */ "AC97 Surround Right", /* 0x0f */ NULL, /* 0x10 */ NULL, /* 0x11 */ "Analog Center", /* 0x12 */ "Analog LFE", /* 0x13 */ NULL, /* 0x14 */ NULL, /* 0x15 */ NULL, /* 0x16 */ NULL, /* 0x17 */ NULL, /* 0x18 */ NULL, /* 0x19 */ NULL, /* 0x1a */ NULL, /* 0x1b */ NULL, /* 0x1c */ NULL, /* 0x1d */ NULL, /* 0x1e */ NULL, /* 0x1f */ NULL, }; static char *audigy_outs[32] = { /* 0x00 */ "Digital Front Left", /* 0x01 */ "Digital Front Right", /* 0x02 */ "Digital Center", /* 0x03 */ "Digital LEF", /* 0x04 */ "Headphone Left", /* 0x05 */ "Headphone Right", /* 0x06 */ "Digital Rear Left", /* 0x07 */ "Digital Rear Right", /* 0x08 */ "Front Left", /* 0x09 */ "Front Right", /* 0x0a */ "Center", /* 0x0b */ "LFE", /* 0x0c */ NULL, /* 0x0d */ NULL, /* 0x0e */ "Rear Left", /* 0x0f */ "Rear Right", /* 0x10 */ "AC97 Front Left", /* 0x11 */ "AC97 Front Right", /* 0x12 */ "ADC Caputre Left", /* 0x13 */ "ADC Capture Right", /* 0x14 */ NULL, /* 0x15 */ NULL, /* 0x16 */ NULL, /* 0x17 */ NULL, /* 0x18 */ NULL, /* 0x19 */ NULL, /* 0x1a */ NULL, /* 0x1b */ NULL, /* 0x1c */ NULL, /* 0x1d */ NULL, /* 0x1e */ NULL, /* 0x1f */ NULL, }; static const u32 bass_table[41][5] = { { 0x3e4f844f, 0x84ed4cc3, 0x3cc69927, 0x7b03553a, 0xc4da8486 }, { 0x3e69a17a, 0x84c280fb, 0x3cd77cd4, 0x7b2f2a6f, 0xc4b08d1d }, { 0x3e82ff42, 0x849991d5, 0x3ce7466b, 0x7b5917c6, 0xc48863ee }, { 0x3e9bab3c, 0x847267f0, 0x3cf5ffe8, 0x7b813560, 0xc461f22c }, { 0x3eb3b275, 0x844ced29, 0x3d03b295, 0x7ba79a1c, 0xc43d223b }, { 0x3ecb2174, 0x84290c8b, 0x3d106714, 0x7bcc5ba3, 0xc419dfa5 }, { 0x3ee2044b, 0x8406b244, 0x3d1c2561, 0x7bef8e77, 0xc3f8170f }, { 0x3ef86698, 0x83e5cb96, 0x3d26f4d8, 0x7c114600, 0xc3d7b625 }, { 0x3f0e5390, 0x83c646c9, 0x3d30dc39, 0x7c319498, 0xc3b8ab97 }, { 0x3f23d60b, 0x83a81321, 0x3d39e1af, 0x7c508b9c, 0xc39ae704 }, { 0x3f38f884, 0x838b20d2, 0x3d420ad2, 0x7c6e3b75, 0xc37e58f1 }, { 0x3f4dc52c, 0x836f60ef, 0x3d495cab, 0x7c8ab3a6, 0xc362f2be }, { 0x3f6245e8, 0x8354c565, 0x3d4fdbb8, 0x7ca602d6, 0xc348a69b }, { 0x3f76845f, 0x833b40ec, 0x3d558bf0, 0x7cc036df, 0xc32f677c }, { 0x3f8a8a03, 0x8322c6fb, 0x3d5a70c4, 0x7cd95cd7, 0xc317290b }, { 0x3f9e6014, 0x830b4bc3, 0x3d5e8d25, 0x7cf1811a, 0xc2ffdfa5 }, { 0x3fb20fae, 0x82f4c420, 0x3d61e37f, 0x7d08af56, 0xc2e9804a }, { 0x3fc5a1cc, 0x82df2592, 0x3d6475c3, 0x7d1ef294, 0xc2d40096 }, { 0x3fd91f55, 0x82ca6632, 0x3d664564, 0x7d345541, 0xc2bf56b9 }, { 0x3fec9120, 0x82b67cac, 0x3d675356, 0x7d48e138, 0xc2ab796e }, { 0x40000000, 0x82a36037, 0x3d67a012, 0x7d5c9fc9, 0xc2985fee }, { 0x401374c7, 0x8291088a, 0x3d672b93, 0x7d6f99c3, 0xc28601f2 }, { 0x4026f857, 0x827f6dd7, 0x3d65f559, 0x7d81d77c, 0xc27457a3 }, { 0x403a939f, 0x826e88c5, 0x3d63fc63, 0x7d9360d4, 0xc2635996 }, { 0x404e4faf, 0x825e5266, 0x3d613f32, 0x7da43d42, 0xc25300c6 }, { 0x406235ba, 0x824ec434, 0x3d5dbbc3, 0x7db473d7, 0xc243468e }, { 0x40764f1f, 0x823fd80c, 0x3d596f8f, 0x7dc40b44, 0xc23424a2 }, { 0x408aa576, 0x82318824, 0x3d545787, 0x7dd309e2, 0xc2259509 }, { 0x409f4296, 0x8223cf0b, 0x3d4e7012, 0x7de175b5, 0xc2179218 }, { 0x40b430a0, 0x8216a7a1, 0x3d47b505, 0x7def5475, 0xc20a1670 }, { 0x40c97a0a, 0x820a0d12, 0x3d4021a1, 0x7dfcab8d, 0xc1fd1cf5 }, { 0x40df29a6, 0x81fdfad6, 0x3d37b08d, 0x7e098028, 0xc1f0a0ca }, { 0x40f54ab1, 0x81f26ca9, 0x3d2e5bd1, 0x7e15d72b, 0xc1e49d52 }, { 0x410be8da, 0x81e75e89, 0x3d241cce, 0x7e21b544, 0xc1d90e24 }, { 0x41231051, 0x81dcccb3, 0x3d18ec37, 0x7e2d1ee6, 0xc1cdef10 }, { 0x413acdd0, 0x81d2b39e, 0x3d0cc20a, 0x7e38184e, 0xc1c33c13 }, { 0x41532ea7, 0x81c90ffb, 0x3cff9585, 0x7e42a58b, 0xc1b8f15a }, { 0x416c40cd, 0x81bfdeb2, 0x3cf15d21, 0x7e4cca7c, 0xc1af0b3f }, { 0x418612ea, 0x81b71cdc, 0x3ce20e85, 0x7e568ad3, 0xc1a58640 }, { 0x41a0b465, 0x81aec7c5, 0x3cd19e7c, 0x7e5fea1e, 0xc19c5f03 }, { 0x41bc3573, 0x81a6dcea, 0x3cc000e9, 0x7e68ebc2, 0xc1939250 } }; static const u32 treble_table[41][5] = { { 0x0125cba9, 0xfed5debd, 0x00599b6c, 0x0d2506da, 0xfa85b354 }, { 0x0142f67e, 0xfeb03163, 0x0066cd0f, 0x0d14c69d, 0xfa914473 }, { 0x016328bd, 0xfe860158, 0x0075b7f2, 0x0d03eb27, 0xfa9d32d2 }, { 0x0186b438, 0xfe56c982, 0x00869234, 0x0cf27048, 0xfaa97fca }, { 0x01adf358, 0xfe21f5fe, 0x00999842, 0x0ce051c2, 0xfab62ca5 }, { 0x01d949fa, 0xfde6e287, 0x00af0d8d, 0x0ccd8b4a, 0xfac33aa7 }, { 0x02092669, 0xfda4d8bf, 0x00c73d4c, 0x0cba1884, 0xfad0ab07 }, { 0x023e0268, 0xfd5b0e4a, 0x00e27b54, 0x0ca5f509, 0xfade7ef2 }, { 0x0278645c, 0xfd08a2b0, 0x01012509, 0x0c911c63, 0xfaecb788 }, { 0x02b8e091, 0xfcac9d1a, 0x0123a262, 0x0c7b8a14, 0xfafb55df }, { 0x03001a9a, 0xfc45e9ce, 0x014a6709, 0x0c65398f, 0xfb0a5aff }, { 0x034ec6d7, 0xfbd3576b, 0x0175f397, 0x0c4e2643, 0xfb19c7e4 }, { 0x03a5ac15, 0xfb5393ee, 0x01a6d6ed, 0x0c364b94, 0xfb299d7c }, { 0x0405a562, 0xfac52968, 0x01ddafae, 0x0c1da4e2, 0xfb39dca5 }, { 0x046fa3fe, 0xfa267a66, 0x021b2ddd, 0x0c042d8d, 0xfb4a8631 }, { 0x04e4b17f, 0xf975be0f, 0x0260149f, 0x0be9e0f2, 0xfb5b9ae0 }, { 0x0565f220, 0xf8b0fbe5, 0x02ad3c29, 0x0bceba73, 0xfb6d1b60 }, { 0x05f4a745, 0xf7d60722, 0x030393d4, 0x0bb2b578, 0xfb7f084d }, { 0x06923236, 0xf6e279bd, 0x03642465, 0x0b95cd75, 0xfb916233 }, { 0x07401713, 0xf5d3aef9, 0x03d01283, 0x0b77fded, 0xfba42984 }, { 0x08000000, 0xf4a6bd88, 0x0448a161, 0x0b594278, 0xfbb75e9f }, { 0x08d3c097, 0xf3587131, 0x04cf35a4, 0x0b3996c9, 0xfbcb01cb }, { 0x09bd59a2, 0xf1e543f9, 0x05655880, 0x0b18f6b2, 0xfbdf1333 }, { 0x0abefd0f, 0xf04956ca, 0x060cbb12, 0x0af75e2c, 0xfbf392e8 }, { 0x0bdb123e, 0xee806984, 0x06c739fe, 0x0ad4c962, 0xfc0880dd }, { 0x0d143a94, 0xec85d287, 0x0796e150, 0x0ab134b0, 0xfc1ddce5 }, { 0x0e6d5664, 0xea547598, 0x087df0a0, 0x0a8c9cb6, 0xfc33a6ad }, { 0x0fe98a2a, 0xe7e6ba35, 0x097edf83, 0x0a66fe5b, 0xfc49ddc2 }, { 0x118c4421, 0xe536813a, 0x0a9c6248, 0x0a4056d7, 0xfc608185 }, { 0x1359422e, 0xe23d19eb, 0x0bd96efb, 0x0a18a3bf, 0xfc77912c }, { 0x1554982b, 0xdef33645, 0x0d3942bd, 0x09efe312, 0xfc8f0bc1 }, { 0x1782b68a, 0xdb50deb1, 0x0ebf676d, 0x09c6133f, 0xfca6f019 }, { 0x19e8715d, 0xd74d64fd, 0x106fb999, 0x099b3337, 0xfcbf3cd6 }, { 0x1c8b07b8, 0xd2df56ab, 0x124e6ec8, 0x096f4274, 0xfcd7f060 }, { 0x1f702b6d, 0xcdfc6e92, 0x14601c10, 0x0942410b, 0xfcf108e5 }, { 0x229e0933, 0xc89985cd, 0x16a9bcfa, 0x09142fb5, 0xfd0a8451 }, { 0x261b5118, 0xc2aa8409, 0x1930bab6, 0x08e50fdc, 0xfd24604d }, { 0x29ef3f5d, 0xbc224f28, 0x1bfaf396, 0x08b4e3aa, 0xfd3e9a3b }, { 0x2e21a59b, 0xb4f2ba46, 0x1f0ec2d6, 0x0883ae15, 0xfd592f33 }, { 0x32baf44b, 0xad0c7429, 0x227308a3, 0x085172eb, 0xfd741bfd }, { 0x37c4448b, 0xa45ef51d, 0x262f3267, 0x081e36dc, 0xfd8f5d14 } }; /* dB gain = (float) 20 * log10( float(db_table_value) / 0x8000000 ) */ static const u32 db_table[101] = { 0x00000000, 0x01571f82, 0x01674b41, 0x01783a1b, 0x0189f540, 0x019c8651, 0x01aff763, 0x01c45306, 0x01d9a446, 0x01eff6b8, 0x0207567a, 0x021fd03d, 0x0239714c, 0x02544792, 0x027061a1, 0x028dcebb, 0x02ac9edc, 0x02cce2bf, 0x02eeabe8, 0x03120cb0, 0x0337184e, 0x035de2df, 0x03868173, 0x03b10a18, 0x03dd93e9, 0x040c3713, 0x043d0cea, 0x04702ff3, 0x04a5bbf2, 0x04ddcdfb, 0x0518847f, 0x0555ff62, 0x05966005, 0x05d9c95d, 0x06206005, 0x066a4a52, 0x06b7b067, 0x0708bc4c, 0x075d9a01, 0x07b6779d, 0x08138561, 0x0874f5d5, 0x08dafde1, 0x0945d4ed, 0x09b5b4fd, 0x0a2adad1, 0x0aa58605, 0x0b25f936, 0x0bac7a24, 0x0c3951d8, 0x0ccccccc, 0x0d673b17, 0x0e08f093, 0x0eb24510, 0x0f639481, 0x101d3f2d, 0x10dfa9e6, 0x11ab3e3f, 0x12806ac3, 0x135fa333, 0x144960c5, 0x153e2266, 0x163e6cfe, 0x174acbb7, 0x1863d04d, 0x198a1357, 0x1abe349f, 0x1c00db77, 0x1d52b712, 0x1eb47ee6, 0x2026f30f, 0x21aadcb6, 0x23410e7e, 0x24ea64f9, 0x26a7c71d, 0x287a26c4, 0x2a62812c, 0x2c61df84, 0x2e795779, 0x30aa0bcf, 0x32f52cfe, 0x355bf9d8, 0x37dfc033, 0x3a81dda4, 0x3d43c038, 0x4026e73c, 0x432ce40f, 0x46575af8, 0x49a8040f, 0x4d20ac2a, 0x50c335d3, 0x54919a57, 0x588dead1, 0x5cba514a, 0x611911ea, 0x65ac8c2f, 0x6a773c39, 0x6f7bbc23, 0x74bcc56c, 0x7a3d3272, 0x7fffffff, }; /* EMU10k1/EMU10k2 DSP control db gain */ static const DECLARE_TLV_DB_SCALE(snd_emu10k1_db_scale1, -4000, 40, 1); static const DECLARE_TLV_DB_LINEAR(snd_emu10k1_db_linear, TLV_DB_GAIN_MUTE, 0); /* EMU10K1 bass/treble db gain */ static const DECLARE_TLV_DB_SCALE(snd_emu10k1_bass_treble_db_scale, -1200, 60, 0); static const u32 onoff_table[2] = { 0x00000000, 0x00000001 }; /* */ static inline mm_segment_t snd_enter_user(void) { mm_segment_t fs = get_fs(); set_fs(get_ds()); return fs; } static inline void snd_leave_user(mm_segment_t fs) { set_fs(fs); } /* * controls */ static int snd_emu10k1_gpr_ctl_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct snd_emu10k1_fx8010_ctl *ctl = (struct snd_emu10k1_fx8010_ctl *) kcontrol->private_value; if (ctl->min == 0 && ctl->max == 1) uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; else uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = ctl->vcount; uinfo->value.integer.min = ctl->min; uinfo->value.integer.max = ctl->max; return 0; } static int snd_emu10k1_gpr_ctl_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_emu10k1 *emu = snd_kcontrol_chip(kcontrol); struct snd_emu10k1_fx8010_ctl *ctl = (struct snd_emu10k1_fx8010_ctl *) kcontrol->private_value; unsigned long flags; unsigned int i; spin_lock_irqsave(&emu->reg_lock, flags); for (i = 0; i < ctl->vcount; i++) ucontrol->value.integer.value[i] = ctl->value[i]; spin_unlock_irqrestore(&emu->reg_lock, flags); return 0; } static int snd_emu10k1_gpr_ctl_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_emu10k1 *emu = snd_kcontrol_chip(kcontrol); struct snd_emu10k1_fx8010_ctl *ctl = (struct snd_emu10k1_fx8010_ctl *) kcontrol->private_value; unsigned long flags; unsigned int nval, val; unsigned int i, j; int change = 0; spin_lock_irqsave(&emu->reg_lock, flags); for (i = 0; i < ctl->vcount; i++) { nval = ucontrol->value.integer.value[i]; if (nval < ctl->min) nval = ctl->min; if (nval > ctl->max) nval = ctl->max; if (nval != ctl->value[i]) change = 1; val = ctl->value[i] = nval; switch (ctl->translation) { case EMU10K1_GPR_TRANSLATION_NONE: snd_emu10k1_ptr_write(emu, emu->gpr_base + ctl->gpr[i], 0, val); break; case EMU10K1_GPR_TRANSLATION_TABLE100: snd_emu10k1_ptr_write(emu, emu->gpr_base + ctl->gpr[i], 0, db_table[val]); break; case EMU10K1_GPR_TRANSLATION_BASS: if ((ctl->count % 5) != 0 || (ctl->count / 5) != ctl->vcount) { change = -EIO; goto __error; } for (j = 0; j < 5; j++) snd_emu10k1_ptr_write(emu, emu->gpr_base + ctl->gpr[j * ctl->vcount + i], 0, bass_table[val][j]); break; case EMU10K1_GPR_TRANSLATION_TREBLE: if ((ctl->count % 5) != 0 || (ctl->count / 5) != ctl->vcount) { change = -EIO; goto __error; } for (j = 0; j < 5; j++) snd_emu10k1_ptr_write(emu, emu->gpr_base + ctl->gpr[j * ctl->vcount + i], 0, treble_table[val][j]); break; case EMU10K1_GPR_TRANSLATION_ONOFF: snd_emu10k1_ptr_write(emu, emu->gpr_base + ctl->gpr[i], 0, onoff_table[val]); break; } } __error: spin_unlock_irqrestore(&emu->reg_lock, flags); return change; } /* * Interrupt handler */ static void snd_emu10k1_fx8010_interrupt(struct snd_emu10k1 *emu) { struct snd_emu10k1_fx8010_irq *irq, *nirq; irq = emu->fx8010.irq_handlers; while (irq) { nirq = irq->next; /* irq ptr can be removed from list */ if (snd_emu10k1_ptr_read(emu, emu->gpr_base + irq->gpr_running, 0) & 0xffff0000) { if (irq->handler) irq->handler(emu, irq->private_data); snd_emu10k1_ptr_write(emu, emu->gpr_base + irq->gpr_running, 0, 1); } irq = nirq; } } int snd_emu10k1_fx8010_register_irq_handler(struct snd_emu10k1 *emu, snd_fx8010_irq_handler_t *handler, unsigned char gpr_running, void *private_data, struct snd_emu10k1_fx8010_irq **r_irq) { struct snd_emu10k1_fx8010_irq *irq; unsigned long flags; irq = kmalloc(sizeof(*irq), GFP_ATOMIC); if (irq == NULL) return -ENOMEM; irq->handler = handler; irq->gpr_running = gpr_running; irq->private_data = private_data; irq->next = NULL; spin_lock_irqsave(&emu->fx8010.irq_lock, flags); if (emu->fx8010.irq_handlers == NULL) { emu->fx8010.irq_handlers = irq; emu->dsp_interrupt = snd_emu10k1_fx8010_interrupt; snd_emu10k1_intr_enable(emu, INTE_FXDSPENABLE); } else { irq->next = emu->fx8010.irq_handlers; emu->fx8010.irq_handlers = irq; } spin_unlock_irqrestore(&emu->fx8010.irq_lock, flags); if (r_irq) *r_irq = irq; return 0; } int snd_emu10k1_fx8010_unregister_irq_handler(struct snd_emu10k1 *emu, struct snd_emu10k1_fx8010_irq *irq) { struct snd_emu10k1_fx8010_irq *tmp; unsigned long flags; spin_lock_irqsave(&emu->fx8010.irq_lock, flags); if ((tmp = emu->fx8010.irq_handlers) == irq) { emu->fx8010.irq_handlers = tmp->next; if (emu->fx8010.irq_handlers == NULL) { snd_emu10k1_intr_disable(emu, INTE_FXDSPENABLE); emu->dsp_interrupt = NULL; } } else { while (tmp && tmp->next != irq) tmp = tmp->next; if (tmp) tmp->next = tmp->next->next; } spin_unlock_irqrestore(&emu->fx8010.irq_lock, flags); kfree(irq); return 0; } /************************************************************************* * EMU10K1 effect manager *************************************************************************/ static void snd_emu10k1_write_op(struct snd_emu10k1_fx8010_code *icode, unsigned int *ptr, u32 op, u32 r, u32 a, u32 x, u32 y) { u_int32_t *code; if (snd_BUG_ON(*ptr >= 512)) return; code = (u_int32_t __force *)icode->code + (*ptr) * 2; set_bit(*ptr, icode->code_valid); code[0] = ((x & 0x3ff) << 10) | (y & 0x3ff); code[1] = ((op & 0x0f) << 20) | ((r & 0x3ff) << 10) | (a & 0x3ff); (*ptr)++; } #define OP(icode, ptr, op, r, a, x, y) \ snd_emu10k1_write_op(icode, ptr, op, r, a, x, y) static void snd_emu10k1_audigy_write_op(struct snd_emu10k1_fx8010_code *icode, unsigned int *ptr, u32 op, u32 r, u32 a, u32 x, u32 y) { u_int32_t *code; if (snd_BUG_ON(*ptr >= 1024)) return; code = (u_int32_t __force *)icode->code + (*ptr) * 2; set_bit(*ptr, icode->code_valid); code[0] = ((x & 0x7ff) << 12) | (y & 0x7ff); code[1] = ((op & 0x0f) << 24) | ((r & 0x7ff) << 12) | (a & 0x7ff); (*ptr)++; } #define A_OP(icode, ptr, op, r, a, x, y) \ snd_emu10k1_audigy_write_op(icode, ptr, op, r, a, x, y) static void snd_emu10k1_efx_write(struct snd_emu10k1 *emu, unsigned int pc, unsigned int data) { pc += emu->audigy ? A_MICROCODEBASE : MICROCODEBASE; snd_emu10k1_ptr_write(emu, pc, 0, data); } unsigned int snd_emu10k1_efx_read(struct snd_emu10k1 *emu, unsigned int pc) { pc += emu->audigy ? A_MICROCODEBASE : MICROCODEBASE; return snd_emu10k1_ptr_read(emu, pc, 0); } static int snd_emu10k1_gpr_poke(struct snd_emu10k1 *emu, struct snd_emu10k1_fx8010_code *icode) { int gpr; u32 val; for (gpr = 0; gpr < (emu->audigy ? 0x200 : 0x100); gpr++) { if (!test_bit(gpr, icode->gpr_valid)) continue; if (get_user(val, &icode->gpr_map[gpr])) return -EFAULT; snd_emu10k1_ptr_write(emu, emu->gpr_base + gpr, 0, val); } return 0; } static int snd_emu10k1_gpr_peek(struct snd_emu10k1 *emu, struct snd_emu10k1_fx8010_code *icode) { int gpr; u32 val; for (gpr = 0; gpr < (emu->audigy ? 0x200 : 0x100); gpr++) { set_bit(gpr, icode->gpr_valid); val = snd_emu10k1_ptr_read(emu, emu->gpr_base + gpr, 0); if (put_user(val, &icode->gpr_map[gpr])) return -EFAULT; } return 0; } static int snd_emu10k1_tram_poke(struct snd_emu10k1 *emu, struct snd_emu10k1_fx8010_code *icode) { int tram; u32 addr, val; for (tram = 0; tram < (emu->audigy ? 0x100 : 0xa0); tram++) { if (!test_bit(tram, icode->tram_valid)) continue; if (get_user(val, &icode->tram_data_map[tram]) || get_user(addr, &icode->tram_addr_map[tram])) return -EFAULT; snd_emu10k1_ptr_write(emu, TANKMEMDATAREGBASE + tram, 0, val); if (!emu->audigy) { snd_emu10k1_ptr_write(emu, TANKMEMADDRREGBASE + tram, 0, addr); } else { snd_emu10k1_ptr_write(emu, TANKMEMADDRREGBASE + tram, 0, addr << 12); snd_emu10k1_ptr_write(emu, A_TANKMEMCTLREGBASE + tram, 0, addr >> 20); } } return 0; } static int snd_emu10k1_tram_peek(struct snd_emu10k1 *emu, struct snd_emu10k1_fx8010_code *icode) { int tram; u32 val, addr; memset(icode->tram_valid, 0, sizeof(icode->tram_valid)); for (tram = 0; tram < (emu->audigy ? 0x100 : 0xa0); tram++) { set_bit(tram, icode->tram_valid); val = snd_emu10k1_ptr_read(emu, TANKMEMDATAREGBASE + tram, 0); if (!emu->audigy) { addr = snd_emu10k1_ptr_read(emu, TANKMEMADDRREGBASE + tram, 0); } else { addr = snd_emu10k1_ptr_read(emu, TANKMEMADDRREGBASE + tram, 0) >> 12; addr |= snd_emu10k1_ptr_read(emu, A_TANKMEMCTLREGBASE + tram, 0) << 20; } if (put_user(val, &icode->tram_data_map[tram]) || put_user(addr, &icode->tram_addr_map[tram])) return -EFAULT; } return 0; } static int snd_emu10k1_code_poke(struct snd_emu10k1 *emu, struct snd_emu10k1_fx8010_code *icode) { u32 pc, lo, hi; for (pc = 0; pc < (emu->audigy ? 2*1024 : 2*512); pc += 2) { if (!test_bit(pc / 2, icode->code_valid)) continue; if (get_user(lo, &icode->code[pc + 0]) || get_user(hi, &icode->code[pc + 1])) return -EFAULT; snd_emu10k1_efx_write(emu, pc + 0, lo); snd_emu10k1_efx_write(emu, pc + 1, hi); } return 0; } static int snd_emu10k1_code_peek(struct snd_emu10k1 *emu, struct snd_emu10k1_fx8010_code *icode) { u32 pc; memset(icode->code_valid, 0, sizeof(icode->code_valid)); for (pc = 0; pc < (emu->audigy ? 2*1024 : 2*512); pc += 2) { set_bit(pc / 2, icode->code_valid); if (put_user(snd_emu10k1_efx_read(emu, pc + 0), &icode->code[pc + 0])) return -EFAULT; if (put_user(snd_emu10k1_efx_read(emu, pc + 1), &icode->code[pc + 1])) return -EFAULT; } return 0; } static struct snd_emu10k1_fx8010_ctl * snd_emu10k1_look_for_ctl(struct snd_emu10k1 *emu, struct snd_ctl_elem_id *id) { struct snd_emu10k1_fx8010_ctl *ctl; struct snd_kcontrol *kcontrol; list_for_each_entry(ctl, &emu->fx8010.gpr_ctl, list) { kcontrol = ctl->kcontrol; if (kcontrol->id.iface == id->iface && !strcmp(kcontrol->id.name, id->name) && kcontrol->id.index == id->index) return ctl; } return NULL; } #define MAX_TLV_SIZE 256 static unsigned int *copy_tlv(const unsigned int __user *_tlv) { unsigned int data[2]; unsigned int *tlv; if (!_tlv) return NULL; if (copy_from_user(data, _tlv, sizeof(data))) return NULL; if (data[1] >= MAX_TLV_SIZE) return NULL; tlv = kmalloc(data[1] + sizeof(data), GFP_KERNEL); if (!tlv) return NULL; memcpy(tlv, data, sizeof(data)); if (copy_from_user(tlv + 2, _tlv + 2, data[1])) { kfree(tlv); return NULL; } return tlv; } static int copy_gctl(struct snd_emu10k1 *emu, struct snd_emu10k1_fx8010_control_gpr *gctl, struct snd_emu10k1_fx8010_control_gpr __user *_gctl, int idx) { struct snd_emu10k1_fx8010_control_old_gpr __user *octl; if (emu->support_tlv) return copy_from_user(gctl, &_gctl[idx], sizeof(*gctl)); octl = (struct snd_emu10k1_fx8010_control_old_gpr __user *)_gctl; if (copy_from_user(gctl, &octl[idx], sizeof(*octl))) return -EFAULT; gctl->tlv = NULL; return 0; } static int copy_gctl_to_user(struct snd_emu10k1 *emu, struct snd_emu10k1_fx8010_control_gpr __user *_gctl, struct snd_emu10k1_fx8010_control_gpr *gctl, int idx) { struct snd_emu10k1_fx8010_control_old_gpr __user *octl; if (emu->support_tlv) return copy_to_user(&_gctl[idx], gctl, sizeof(*gctl)); octl = (struct snd_emu10k1_fx8010_control_old_gpr __user *)_gctl; return copy_to_user(&octl[idx], gctl, sizeof(*octl)); } static int snd_emu10k1_verify_controls(struct snd_emu10k1 *emu, struct snd_emu10k1_fx8010_code *icode) { unsigned int i; struct snd_ctl_elem_id __user *_id; struct snd_ctl_elem_id id; struct snd_emu10k1_fx8010_control_gpr *gctl; int err; for (i = 0, _id = icode->gpr_del_controls; i < icode->gpr_del_control_count; i++, _id++) { if (copy_from_user(&id, _id, sizeof(id))) return -EFAULT; if (snd_emu10k1_look_for_ctl(emu, &id) == NULL) return -ENOENT; } gctl = kmalloc(sizeof(*gctl), GFP_KERNEL); if (! gctl) return -ENOMEM; err = 0; for (i = 0; i < icode->gpr_add_control_count; i++) { if (copy_gctl(emu, gctl, icode->gpr_add_controls, i)) { err = -EFAULT; goto __error; } if (snd_emu10k1_look_for_ctl(emu, &gctl->id)) continue; down_read(&emu->card->controls_rwsem); if (snd_ctl_find_id(emu->card, &gctl->id) != NULL) { up_read(&emu->card->controls_rwsem); err = -EEXIST; goto __error; } up_read(&emu->card->controls_rwsem); if (gctl->id.iface != SNDRV_CTL_ELEM_IFACE_MIXER && gctl->id.iface != SNDRV_CTL_ELEM_IFACE_PCM) { err = -EINVAL; goto __error; } } for (i = 0; i < icode->gpr_list_control_count; i++) { /* FIXME: we need to check the WRITE access */ if (copy_gctl(emu, gctl, icode->gpr_list_controls, i)) { err = -EFAULT; goto __error; } } __error: kfree(gctl); return err; } static void snd_emu10k1_ctl_private_free(struct snd_kcontrol *kctl) { struct snd_emu10k1_fx8010_ctl *ctl; ctl = (struct snd_emu10k1_fx8010_ctl *) kctl->private_value; kctl->private_value = 0; list_del(&ctl->list); kfree(ctl); if (kctl->tlv.p) kfree(kctl->tlv.p); } static int snd_emu10k1_add_controls(struct snd_emu10k1 *emu, struct snd_emu10k1_fx8010_code *icode) { unsigned int i, j; struct snd_emu10k1_fx8010_control_gpr *gctl; struct snd_emu10k1_fx8010_ctl *ctl, *nctl; struct snd_kcontrol_new knew; struct snd_kcontrol *kctl; struct snd_ctl_elem_value *val; int err = 0; val = kmalloc(sizeof(*val), GFP_KERNEL); gctl = kmalloc(sizeof(*gctl), GFP_KERNEL); nctl = kmalloc(sizeof(*nctl), GFP_KERNEL); if (!val || !gctl || !nctl) { err = -ENOMEM; goto __error; } for (i = 0; i < icode->gpr_add_control_count; i++) { if (copy_gctl(emu, gctl, icode->gpr_add_controls, i)) { err = -EFAULT; goto __error; } if (gctl->id.iface != SNDRV_CTL_ELEM_IFACE_MIXER && gctl->id.iface != SNDRV_CTL_ELEM_IFACE_PCM) { err = -EINVAL; goto __error; } if (! gctl->id.name[0]) { err = -EINVAL; goto __error; } ctl = snd_emu10k1_look_for_ctl(emu, &gctl->id); memset(&knew, 0, sizeof(knew)); knew.iface = gctl->id.iface; knew.name = gctl->id.name; knew.index = gctl->id.index; knew.device = gctl->id.device; knew.subdevice = gctl->id.subdevice; knew.info = snd_emu10k1_gpr_ctl_info; knew.tlv.p = copy_tlv(gctl->tlv); if (knew.tlv.p) knew.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ; knew.get = snd_emu10k1_gpr_ctl_get; knew.put = snd_emu10k1_gpr_ctl_put; memset(nctl, 0, sizeof(*nctl)); nctl->vcount = gctl->vcount; nctl->count = gctl->count; for (j = 0; j < 32; j++) { nctl->gpr[j] = gctl->gpr[j]; nctl->value[j] = ~gctl->value[j]; /* inverted, we want to write new value in gpr_ctl_put() */ val->value.integer.value[j] = gctl->value[j]; } nctl->min = gctl->min; nctl->max = gctl->max; nctl->translation = gctl->translation; if (ctl == NULL) { ctl = kmalloc(sizeof(*ctl), GFP_KERNEL); if (ctl == NULL) { err = -ENOMEM; kfree(knew.tlv.p); goto __error; } knew.private_value = (unsigned long)ctl; *ctl = *nctl; if ((err = snd_ctl_add(emu->card, kctl = snd_ctl_new1(&knew, emu))) < 0) { kfree(ctl); kfree(knew.tlv.p); goto __error; } kctl->private_free = snd_emu10k1_ctl_private_free; ctl->kcontrol = kctl; list_add_tail(&ctl->list, &emu->fx8010.gpr_ctl); } else { /* overwrite */ nctl->list = ctl->list; nctl->kcontrol = ctl->kcontrol; *ctl = *nctl; snd_ctl_notify(emu->card, SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO, &ctl->kcontrol->id); } snd_emu10k1_gpr_ctl_put(ctl->kcontrol, val); } __error: kfree(nctl); kfree(gctl); kfree(val); return err; } static int snd_emu10k1_del_controls(struct snd_emu10k1 *emu, struct snd_emu10k1_fx8010_code *icode) { unsigned int i; struct snd_ctl_elem_id id; struct snd_ctl_elem_id __user *_id; struct snd_emu10k1_fx8010_ctl *ctl; struct snd_card *card = emu->card; for (i = 0, _id = icode->gpr_del_controls; i < icode->gpr_del_control_count; i++, _id++) { if (copy_from_user(&id, _id, sizeof(id))) return -EFAULT; down_write(&card->controls_rwsem); ctl = snd_emu10k1_look_for_ctl(emu, &id); if (ctl) snd_ctl_remove(card, ctl->kcontrol); up_write(&card->controls_rwsem); } return 0; } static int snd_emu10k1_list_controls(struct snd_emu10k1 *emu, struct snd_emu10k1_fx8010_code *icode) { unsigned int i = 0, j; unsigned int total = 0; struct snd_emu10k1_fx8010_control_gpr *gctl; struct snd_emu10k1_fx8010_ctl *ctl; struct snd_ctl_elem_id *id; gctl = kmalloc(sizeof(*gctl), GFP_KERNEL); if (! gctl) return -ENOMEM; list_for_each_entry(ctl, &emu->fx8010.gpr_ctl, list) { total++; if (icode->gpr_list_controls && i < icode->gpr_list_control_count) { memset(gctl, 0, sizeof(*gctl)); id = &ctl->kcontrol->id; gctl->id.iface = id->iface; strlcpy(gctl->id.name, id->name, sizeof(gctl->id.name)); gctl->id.index = id->index; gctl->id.device = id->device; gctl->id.subdevice = id->subdevice; gctl->vcount = ctl->vcount; gctl->count = ctl->count; for (j = 0; j < 32; j++) { gctl->gpr[j] = ctl->gpr[j]; gctl->value[j] = ctl->value[j]; } gctl->min = ctl->min; gctl->max = ctl->max; gctl->translation = ctl->translation; if (copy_gctl_to_user(emu, icode->gpr_list_controls, gctl, i)) { kfree(gctl); return -EFAULT; } i++; } } icode->gpr_list_control_total = total; kfree(gctl); return 0; } static int snd_emu10k1_icode_poke(struct snd_emu10k1 *emu, struct snd_emu10k1_fx8010_code *icode) { int err = 0; mutex_lock(&emu->fx8010.lock); if ((err = snd_emu10k1_verify_controls(emu, icode)) < 0) goto __error; strlcpy(emu->fx8010.name, icode->name, sizeof(emu->fx8010.name)); /* stop FX processor - this may be dangerous, but it's better to miss some samples than generate wrong ones - [jk] */ if (emu->audigy) snd_emu10k1_ptr_write(emu, A_DBG, 0, emu->fx8010.dbg | A_DBG_SINGLE_STEP); else snd_emu10k1_ptr_write(emu, DBG, 0, emu->fx8010.dbg | EMU10K1_DBG_SINGLE_STEP); /* ok, do the main job */ if ((err = snd_emu10k1_del_controls(emu, icode)) < 0 || (err = snd_emu10k1_gpr_poke(emu, icode)) < 0 || (err = snd_emu10k1_tram_poke(emu, icode)) < 0 || (err = snd_emu10k1_code_poke(emu, icode)) < 0 || (err = snd_emu10k1_add_controls(emu, icode)) < 0) goto __error; /* start FX processor when the DSP code is updated */ if (emu->audigy) snd_emu10k1_ptr_write(emu, A_DBG, 0, emu->fx8010.dbg); else snd_emu10k1_ptr_write(emu, DBG, 0, emu->fx8010.dbg); __error: mutex_unlock(&emu->fx8010.lock); return err; } static int snd_emu10k1_icode_peek(struct snd_emu10k1 *emu, struct snd_emu10k1_fx8010_code *icode) { int err; mutex_lock(&emu->fx8010.lock); strlcpy(icode->name, emu->fx8010.name, sizeof(icode->name)); /* ok, do the main job */ err = snd_emu10k1_gpr_peek(emu, icode); if (err >= 0) err = snd_emu10k1_tram_peek(emu, icode); if (err >= 0) err = snd_emu10k1_code_peek(emu, icode); if (err >= 0) err = snd_emu10k1_list_controls(emu, icode); mutex_unlock(&emu->fx8010.lock); return err; } static int snd_emu10k1_ipcm_poke(struct snd_emu10k1 *emu, struct snd_emu10k1_fx8010_pcm_rec *ipcm) { unsigned int i; int err = 0; struct snd_emu10k1_fx8010_pcm *pcm; if (ipcm->substream >= EMU10K1_FX8010_PCM_COUNT) return -EINVAL; if (ipcm->channels > 32) return -EINVAL; pcm = &emu->fx8010.pcm[ipcm->substream]; mutex_lock(&emu->fx8010.lock); spin_lock_irq(&emu->reg_lock); if (pcm->opened) { err = -EBUSY; goto __error; } if (ipcm->channels == 0) { /* remove */ pcm->valid = 0; } else { /* FIXME: we need to add universal code to the PCM transfer routine */ if (ipcm->channels != 2) { err = -EINVAL; goto __error; } pcm->valid = 1; pcm->opened = 0; pcm->channels = ipcm->channels; pcm->tram_start = ipcm->tram_start; pcm->buffer_size = ipcm->buffer_size; pcm->gpr_size = ipcm->gpr_size; pcm->gpr_count = ipcm->gpr_count; pcm->gpr_tmpcount = ipcm->gpr_tmpcount; pcm->gpr_ptr = ipcm->gpr_ptr; pcm->gpr_trigger = ipcm->gpr_trigger; pcm->gpr_running = ipcm->gpr_running; for (i = 0; i < pcm->channels; i++) pcm->etram[i] = ipcm->etram[i]; } __error: spin_unlock_irq(&emu->reg_lock); mutex_unlock(&emu->fx8010.lock); return err; } static int snd_emu10k1_ipcm_peek(struct snd_emu10k1 *emu, struct snd_emu10k1_fx8010_pcm_rec *ipcm) { unsigned int i; int err = 0; struct snd_emu10k1_fx8010_pcm *pcm; if (ipcm->substream >= EMU10K1_FX8010_PCM_COUNT) return -EINVAL; pcm = &emu->fx8010.pcm[ipcm->substream]; mutex_lock(&emu->fx8010.lock); spin_lock_irq(&emu->reg_lock); ipcm->channels = pcm->channels; ipcm->tram_start = pcm->tram_start; ipcm->buffer_size = pcm->buffer_size; ipcm->gpr_size = pcm->gpr_size; ipcm->gpr_ptr = pcm->gpr_ptr; ipcm->gpr_count = pcm->gpr_count; ipcm->gpr_tmpcount = pcm->gpr_tmpcount; ipcm->gpr_trigger = pcm->gpr_trigger; ipcm->gpr_running = pcm->gpr_running; for (i = 0; i < pcm->channels; i++) ipcm->etram[i] = pcm->etram[i]; ipcm->res1 = ipcm->res2 = 0; ipcm->pad = 0; spin_unlock_irq(&emu->reg_lock); mutex_unlock(&emu->fx8010.lock); return err; } #define SND_EMU10K1_GPR_CONTROLS 44 #define SND_EMU10K1_INPUTS 12 #define SND_EMU10K1_PLAYBACK_CHANNELS 8 #define SND_EMU10K1_CAPTURE_CHANNELS 4 static void __devinit snd_emu10k1_init_mono_control(struct snd_emu10k1_fx8010_control_gpr *ctl, const char *name, int gpr, int defval) { ctl->id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; strcpy(ctl->id.name, name); ctl->vcount = ctl->count = 1; ctl->gpr[0] = gpr + 0; ctl->value[0] = defval; if (high_res_gpr_volume) { ctl->min = 0; ctl->max = 0x7fffffff; ctl->tlv = snd_emu10k1_db_linear; ctl->translation = EMU10K1_GPR_TRANSLATION_NONE; } else { ctl->min = 0; ctl->max = 100; ctl->tlv = snd_emu10k1_db_scale1; ctl->translation = EMU10K1_GPR_TRANSLATION_TABLE100; } } static void __devinit snd_emu10k1_init_stereo_control(struct snd_emu10k1_fx8010_control_gpr *ctl, const char *name, int gpr, int defval) { ctl->id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; strcpy(ctl->id.name, name); ctl->vcount = ctl->count = 2; ctl->gpr[0] = gpr + 0; ctl->value[0] = defval; ctl->gpr[1] = gpr + 1; ctl->value[1] = defval; if (high_res_gpr_volume) { ctl->min = 0; ctl->max = 0x7fffffff; ctl->tlv = snd_emu10k1_db_linear; ctl->translation = EMU10K1_GPR_TRANSLATION_NONE; } else { ctl->min = 0; ctl->max = 100; ctl->tlv = snd_emu10k1_db_scale1; ctl->translation = EMU10K1_GPR_TRANSLATION_TABLE100; } } static void __devinit snd_emu10k1_init_mono_onoff_control(struct snd_emu10k1_fx8010_control_gpr *ctl, const char *name, int gpr, int defval) { ctl->id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; strcpy(ctl->id.name, name); ctl->vcount = ctl->count = 1; ctl->gpr[0] = gpr + 0; ctl->value[0] = defval; ctl->min = 0; ctl->max = 1; ctl->translation = EMU10K1_GPR_TRANSLATION_ONOFF; } static void __devinit snd_emu10k1_init_stereo_onoff_control(struct snd_emu10k1_fx8010_control_gpr *ctl, const char *name, int gpr, int defval) { ctl->id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; strcpy(ctl->id.name, name); ctl->vcount = ctl->count = 2; ctl->gpr[0] = gpr + 0; ctl->value[0] = defval; ctl->gpr[1] = gpr + 1; ctl->value[1] = defval; ctl->min = 0; ctl->max = 1; ctl->translation = EMU10K1_GPR_TRANSLATION_ONOFF; } /* * Used for emu1010 - conversion from 32-bit capture inputs from HANA * to 2 x 16-bit registers in audigy - their values are read via DMA. * Conversion is performed by Audigy DSP instructions of FX8010. */ static int snd_emu10k1_audigy_dsp_convert_32_to_2x16( struct snd_emu10k1_fx8010_code *icode, u32 *ptr, int tmp, int bit_shifter16, int reg_in, int reg_out) { A_OP(icode, ptr, iACC3, A_GPR(tmp + 1), reg_in, A_C_00000000, A_C_00000000); A_OP(icode, ptr, iANDXOR, A_GPR(tmp), A_GPR(tmp + 1), A_GPR(bit_shifter16 - 1), A_C_00000000); A_OP(icode, ptr, iTSTNEG, A_GPR(tmp + 2), A_GPR(tmp), A_C_80000000, A_GPR(bit_shifter16 - 2)); A_OP(icode, ptr, iANDXOR, A_GPR(tmp + 2), A_GPR(tmp + 2), A_C_80000000, A_C_00000000); A_OP(icode, ptr, iANDXOR, A_GPR(tmp), A_GPR(tmp), A_GPR(bit_shifter16 - 3), A_C_00000000); A_OP(icode, ptr, iMACINT0, A_GPR(tmp), A_C_00000000, A_GPR(tmp), A_C_00010000); A_OP(icode, ptr, iANDXOR, reg_out, A_GPR(tmp), A_C_ffffffff, A_GPR(tmp + 2)); A_OP(icode, ptr, iACC3, reg_out + 1, A_GPR(tmp + 1), A_C_00000000, A_C_00000000); return 1; } /* * initial DSP configuration for Audigy */ static int __devinit _snd_emu10k1_audigy_init_efx(struct snd_emu10k1 *emu) { int err, i, z, gpr, nctl; int bit_shifter16; const int playback = 10; const int capture = playback + (SND_EMU10K1_PLAYBACK_CHANNELS * 2); /* we reserve 10 voices */ const int stereo_mix = capture + 2; const int tmp = 0x88; u32 ptr; struct snd_emu10k1_fx8010_code *icode = NULL; struct snd_emu10k1_fx8010_control_gpr *controls = NULL, *ctl; u32 *gpr_map; mm_segment_t seg; if ((icode = kzalloc(sizeof(*icode), GFP_KERNEL)) == NULL || (icode->gpr_map = (u_int32_t __user *) kcalloc(512 + 256 + 256 + 2 * 1024, sizeof(u_int32_t), GFP_KERNEL)) == NULL || (controls = kcalloc(SND_EMU10K1_GPR_CONTROLS, sizeof(*controls), GFP_KERNEL)) == NULL) { err = -ENOMEM; goto __err; } gpr_map = (u32 __force *)icode->gpr_map; icode->tram_data_map = icode->gpr_map + 512; icode->tram_addr_map = icode->tram_data_map + 256; icode->code = icode->tram_addr_map + 256; /* clear free GPRs */ for (i = 0; i < 512; i++) set_bit(i, icode->gpr_valid); /* clear TRAM data & address lines */ for (i = 0; i < 256; i++) set_bit(i, icode->tram_valid); strcpy(icode->name, "Audigy DSP code for ALSA"); ptr = 0; nctl = 0; gpr = stereo_mix + 10; gpr_map[gpr++] = 0x00007fff; gpr_map[gpr++] = 0x00008000; gpr_map[gpr++] = 0x0000ffff; bit_shifter16 = gpr; /* stop FX processor */ snd_emu10k1_ptr_write(emu, A_DBG, 0, (emu->fx8010.dbg = 0) | A_DBG_SINGLE_STEP); #if 1 /* PCM front Playback Volume (independent from stereo mix) * playback = 0 + ( gpr * FXBUS_PCM_LEFT_FRONT >> 31) * where gpr contains attenuation from corresponding mixer control * (snd_emu10k1_init_stereo_control) */ A_OP(icode, &ptr, iMAC0, A_GPR(playback), A_C_00000000, A_GPR(gpr), A_FXBUS(FXBUS_PCM_LEFT_FRONT)); A_OP(icode, &ptr, iMAC0, A_GPR(playback+1), A_C_00000000, A_GPR(gpr+1), A_FXBUS(FXBUS_PCM_RIGHT_FRONT)); snd_emu10k1_init_stereo_control(&controls[nctl++], "PCM Front Playback Volume", gpr, 100); gpr += 2; /* PCM Surround Playback (independent from stereo mix) */ A_OP(icode, &ptr, iMAC0, A_GPR(playback+2), A_C_00000000, A_GPR(gpr), A_FXBUS(FXBUS_PCM_LEFT_REAR)); A_OP(icode, &ptr, iMAC0, A_GPR(playback+3), A_C_00000000, A_GPR(gpr+1), A_FXBUS(FXBUS_PCM_RIGHT_REAR)); snd_emu10k1_init_stereo_control(&controls[nctl++], "PCM Surround Playback Volume", gpr, 100); gpr += 2; /* PCM Side Playback (independent from stereo mix) */ if (emu->card_capabilities->spk71) { A_OP(icode, &ptr, iMAC0, A_GPR(playback+6), A_C_00000000, A_GPR(gpr), A_FXBUS(FXBUS_PCM_LEFT_SIDE)); A_OP(icode, &ptr, iMAC0, A_GPR(playback+7), A_C_00000000, A_GPR(gpr+1), A_FXBUS(FXBUS_PCM_RIGHT_SIDE)); snd_emu10k1_init_stereo_control(&controls[nctl++], "PCM Side Playback Volume", gpr, 100); gpr += 2; } /* PCM Center Playback (independent from stereo mix) */ A_OP(icode, &ptr, iMAC0, A_GPR(playback+4), A_C_00000000, A_GPR(gpr), A_FXBUS(FXBUS_PCM_CENTER)); snd_emu10k1_init_mono_control(&controls[nctl++], "PCM Center Playback Volume", gpr, 100); gpr++; /* PCM LFE Playback (independent from stereo mix) */ A_OP(icode, &ptr, iMAC0, A_GPR(playback+5), A_C_00000000, A_GPR(gpr), A_FXBUS(FXBUS_PCM_LFE)); snd_emu10k1_init_mono_control(&controls[nctl++], "PCM LFE Playback Volume", gpr, 100); gpr++; /* * Stereo Mix */ /* Wave (PCM) Playback Volume (will be renamed later) */ A_OP(icode, &ptr, iMAC0, A_GPR(stereo_mix), A_C_00000000, A_GPR(gpr), A_FXBUS(FXBUS_PCM_LEFT)); A_OP(icode, &ptr, iMAC0, A_GPR(stereo_mix+1), A_C_00000000, A_GPR(gpr+1), A_FXBUS(FXBUS_PCM_RIGHT)); snd_emu10k1_init_stereo_control(&controls[nctl++], "Wave Playback Volume", gpr, 100); gpr += 2; /* Synth Playback */ A_OP(icode, &ptr, iMAC0, A_GPR(stereo_mix+0), A_GPR(stereo_mix+0), A_GPR(gpr), A_FXBUS(FXBUS_MIDI_LEFT)); A_OP(icode, &ptr, iMAC0, A_GPR(stereo_mix+1), A_GPR(stereo_mix+1), A_GPR(gpr+1), A_FXBUS(FXBUS_MIDI_RIGHT)); snd_emu10k1_init_stereo_control(&controls[nctl++], "Synth Playback Volume", gpr, 100); gpr += 2; /* Wave (PCM) Capture */ A_OP(icode, &ptr, iMAC0, A_GPR(capture+0), A_C_00000000, A_GPR(gpr), A_FXBUS(FXBUS_PCM_LEFT)); A_OP(icode, &ptr, iMAC0, A_GPR(capture+1), A_C_00000000, A_GPR(gpr+1), A_FXBUS(FXBUS_PCM_RIGHT)); snd_emu10k1_init_stereo_control(&controls[nctl++], "PCM Capture Volume", gpr, 0); gpr += 2; /* Synth Capture */ A_OP(icode, &ptr, iMAC0, A_GPR(capture+0), A_GPR(capture+0), A_GPR(gpr), A_FXBUS(FXBUS_MIDI_LEFT)); A_OP(icode, &ptr, iMAC0, A_GPR(capture+1), A_GPR(capture+1), A_GPR(gpr+1), A_FXBUS(FXBUS_MIDI_RIGHT)); snd_emu10k1_init_stereo_control(&controls[nctl++], "Synth Capture Volume", gpr, 0); gpr += 2; /* * inputs */ #define A_ADD_VOLUME_IN(var,vol,input) \ A_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input)) /* emu1212 DSP 0 and DSP 1 Capture */ if (emu->card_capabilities->emu_model) { if (emu->card_capabilities->ca0108_chip) { /* Note:JCD:No longer bit shift lower 16bits to upper 16bits of 32bit value. */ A_OP(icode, &ptr, iMACINT0, A_GPR(tmp), A_C_00000000, A3_EMU32IN(0x0), A_C_00000001); A_OP(icode, &ptr, iMAC0, A_GPR(capture+0), A_GPR(capture+0), A_GPR(gpr), A_GPR(tmp)); A_OP(icode, &ptr, iMACINT0, A_GPR(tmp), A_C_00000000, A3_EMU32IN(0x1), A_C_00000001); A_OP(icode, &ptr, iMAC0, A_GPR(capture+1), A_GPR(capture+1), A_GPR(gpr), A_GPR(tmp)); } else { A_OP(icode, &ptr, iMAC0, A_GPR(capture+0), A_GPR(capture+0), A_GPR(gpr), A_P16VIN(0x0)); A_OP(icode, &ptr, iMAC0, A_GPR(capture+1), A_GPR(capture+1), A_GPR(gpr+1), A_P16VIN(0x1)); } snd_emu10k1_init_stereo_control(&controls[nctl++], "EMU Capture Volume", gpr, 0); gpr += 2; } /* AC'97 Playback Volume - used only for mic (renamed later) */ A_ADD_VOLUME_IN(stereo_mix, gpr, A_EXTIN_AC97_L); A_ADD_VOLUME_IN(stereo_mix+1, gpr+1, A_EXTIN_AC97_R); snd_emu10k1_init_stereo_control(&controls[nctl++], "AMic Playback Volume", gpr, 0); gpr += 2; /* AC'97 Capture Volume - used only for mic */ A_ADD_VOLUME_IN(capture, gpr, A_EXTIN_AC97_L); A_ADD_VOLUME_IN(capture+1, gpr+1, A_EXTIN_AC97_R); snd_emu10k1_init_stereo_control(&controls[nctl++], "Mic Capture Volume", gpr, 0); gpr += 2; /* mic capture buffer */ A_OP(icode, &ptr, iINTERP, A_EXTOUT(A_EXTOUT_MIC_CAP), A_EXTIN(A_EXTIN_AC97_L), 0xcd, A_EXTIN(A_EXTIN_AC97_R)); /* Audigy CD Playback Volume */ A_ADD_VOLUME_IN(stereo_mix, gpr, A_EXTIN_SPDIF_CD_L); A_ADD_VOLUME_IN(stereo_mix+1, gpr+1, A_EXTIN_SPDIF_CD_R); snd_emu10k1_init_stereo_control(&controls[nctl++], emu->card_capabilities->ac97_chip ? "Audigy CD Playback Volume" : "CD Playback Volume", gpr, 0); gpr += 2; /* Audigy CD Capture Volume */ A_ADD_VOLUME_IN(capture, gpr, A_EXTIN_SPDIF_CD_L); A_ADD_VOLUME_IN(capture+1, gpr+1, A_EXTIN_SPDIF_CD_R); snd_emu10k1_init_stereo_control(&controls[nctl++], emu->card_capabilities->ac97_chip ? "Audigy CD Capture Volume" : "CD Capture Volume", gpr, 0); gpr += 2; /* Optical SPDIF Playback Volume */ A_ADD_VOLUME_IN(stereo_mix, gpr, A_EXTIN_OPT_SPDIF_L); A_ADD_VOLUME_IN(stereo_mix+1, gpr+1, A_EXTIN_OPT_SPDIF_R); snd_emu10k1_init_stereo_control(&controls[nctl++], SNDRV_CTL_NAME_IEC958("Optical ",PLAYBACK,VOLUME), gpr, 0); gpr += 2; /* Optical SPDIF Capture Volume */ A_ADD_VOLUME_IN(capture, gpr, A_EXTIN_OPT_SPDIF_L); A_ADD_VOLUME_IN(capture+1, gpr+1, A_EXTIN_OPT_SPDIF_R); snd_emu10k1_init_stereo_control(&controls[nctl++], SNDRV_CTL_NAME_IEC958("Optical ",CAPTURE,VOLUME), gpr, 0); gpr += 2; /* Line2 Playback Volume */ A_ADD_VOLUME_IN(stereo_mix, gpr, A_EXTIN_LINE2_L); A_ADD_VOLUME_IN(stereo_mix+1, gpr+1, A_EXTIN_LINE2_R); snd_emu10k1_init_stereo_control(&controls[nctl++], emu->card_capabilities->ac97_chip ? "Line2 Playback Volume" : "Line Playback Volume", gpr, 0); gpr += 2; /* Line2 Capture Volume */ A_ADD_VOLUME_IN(capture, gpr, A_EXTIN_LINE2_L); A_ADD_VOLUME_IN(capture+1, gpr+1, A_EXTIN_LINE2_R); snd_emu10k1_init_stereo_control(&controls[nctl++], emu->card_capabilities->ac97_chip ? "Line2 Capture Volume" : "Line Capture Volume", gpr, 0); gpr += 2; /* Philips ADC Playback Volume */ A_ADD_VOLUME_IN(stereo_mix, gpr, A_EXTIN_ADC_L); A_ADD_VOLUME_IN(stereo_mix+1, gpr+1, A_EXTIN_ADC_R); snd_emu10k1_init_stereo_control(&controls[nctl++], "Analog Mix Playback Volume", gpr, 0); gpr += 2; /* Philips ADC Capture Volume */ A_ADD_VOLUME_IN(capture, gpr, A_EXTIN_ADC_L); A_ADD_VOLUME_IN(capture+1, gpr+1, A_EXTIN_ADC_R); snd_emu10k1_init_stereo_control(&controls[nctl++], "Analog Mix Capture Volume", gpr, 0); gpr += 2; /* Aux2 Playback Volume */ A_ADD_VOLUME_IN(stereo_mix, gpr, A_EXTIN_AUX2_L); A_ADD_VOLUME_IN(stereo_mix+1, gpr+1, A_EXTIN_AUX2_R); snd_emu10k1_init_stereo_control(&controls[nctl++], emu->card_capabilities->ac97_chip ? "Aux2 Playback Volume" : "Aux Playback Volume", gpr, 0); gpr += 2; /* Aux2 Capture Volume */ A_ADD_VOLUME_IN(capture, gpr, A_EXTIN_AUX2_L); A_ADD_VOLUME_IN(capture+1, gpr+1, A_EXTIN_AUX2_R); snd_emu10k1_init_stereo_control(&controls[nctl++], emu->card_capabilities->ac97_chip ? "Aux2 Capture Volume" : "Aux Capture Volume", gpr, 0); gpr += 2; /* Stereo Mix Front Playback Volume */ A_OP(icode, &ptr, iMAC0, A_GPR(playback), A_GPR(playback), A_GPR(gpr), A_GPR(stereo_mix)); A_OP(icode, &ptr, iMAC0, A_GPR(playback+1), A_GPR(playback+1), A_GPR(gpr+1), A_GPR(stereo_mix+1)); snd_emu10k1_init_stereo_control(&controls[nctl++], "Front Playback Volume", gpr, 100); gpr += 2; /* Stereo Mix Surround Playback */ A_OP(icode, &ptr, iMAC0, A_GPR(playback+2), A_GPR(playback+2), A_GPR(gpr), A_GPR(stereo_mix)); A_OP(icode, &ptr, iMAC0, A_GPR(playback+3), A_GPR(playback+3), A_GPR(gpr+1), A_GPR(stereo_mix+1)); snd_emu10k1_init_stereo_control(&controls[nctl++], "Surround Playback Volume", gpr, 0); gpr += 2; /* Stereo Mix Center Playback */ /* Center = sub = Left/2 + Right/2 */ A_OP(icode, &ptr, iINTERP, A_GPR(tmp), A_GPR(stereo_mix), 0xcd, A_GPR(stereo_mix+1)); A_OP(icode, &ptr, iMAC0, A_GPR(playback+4), A_GPR(playback+4), A_GPR(gpr), A_GPR(tmp)); snd_emu10k1_init_mono_control(&controls[nctl++], "Center Playback Volume", gpr, 0); gpr++; /* Stereo Mix LFE Playback */ A_OP(icode, &ptr, iMAC0, A_GPR(playback+5), A_GPR(playback+5), A_GPR(gpr), A_GPR(tmp)); snd_emu10k1_init_mono_control(&controls[nctl++], "LFE Playback Volume", gpr, 0); gpr++; if (emu->card_capabilities->spk71) { /* Stereo Mix Side Playback */ A_OP(icode, &ptr, iMAC0, A_GPR(playback+6), A_GPR(playback+6), A_GPR(gpr), A_GPR(stereo_mix)); A_OP(icode, &ptr, iMAC0, A_GPR(playback+7), A_GPR(playback+7), A_GPR(gpr+1), A_GPR(stereo_mix+1)); snd_emu10k1_init_stereo_control(&controls[nctl++], "Side Playback Volume", gpr, 0); gpr += 2; } /* * outputs */ #define A_PUT_OUTPUT(out,src) A_OP(icode, &ptr, iACC3, A_EXTOUT(out), A_C_00000000, A_C_00000000, A_GPR(src)) #define A_PUT_STEREO_OUTPUT(out1,out2,src) \ {A_PUT_OUTPUT(out1,src); A_PUT_OUTPUT(out2,src+1);} #define _A_SWITCH(icode, ptr, dst, src, sw) \ A_OP((icode), ptr, iMACINT0, dst, A_C_00000000, src, sw); #define A_SWITCH(icode, ptr, dst, src, sw) \ _A_SWITCH(icode, ptr, A_GPR(dst), A_GPR(src), A_GPR(sw)) #define _A_SWITCH_NEG(icode, ptr, dst, src) \ A_OP((icode), ptr, iANDXOR, dst, src, A_C_00000001, A_C_00000001); #define A_SWITCH_NEG(icode, ptr, dst, src) \ _A_SWITCH_NEG(icode, ptr, A_GPR(dst), A_GPR(src)) /* * Process tone control */ A_OP(icode, &ptr, iACC3, A_GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 0), A_GPR(playback + 0), A_C_00000000, A_C_00000000); /* left */ A_OP(icode, &ptr, iACC3, A_GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 1), A_GPR(playback + 1), A_C_00000000, A_C_00000000); /* right */ A_OP(icode, &ptr, iACC3, A_GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 2), A_GPR(playback + 2), A_C_00000000, A_C_00000000); /* rear left */ A_OP(icode, &ptr, iACC3, A_GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 3), A_GPR(playback + 3), A_C_00000000, A_C_00000000); /* rear right */ A_OP(icode, &ptr, iACC3, A_GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 4), A_GPR(playback + 4), A_C_00000000, A_C_00000000); /* center */ A_OP(icode, &ptr, iACC3, A_GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 5), A_GPR(playback + 5), A_C_00000000, A_C_00000000); /* LFE */ if (emu->card_capabilities->spk71) { A_OP(icode, &ptr, iACC3, A_GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 6), A_GPR(playback + 6), A_C_00000000, A_C_00000000); /* side left */ A_OP(icode, &ptr, iACC3, A_GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 7), A_GPR(playback + 7), A_C_00000000, A_C_00000000); /* side right */ } ctl = &controls[nctl + 0]; ctl->id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; strcpy(ctl->id.name, "Tone Control - Bass"); ctl->vcount = 2; ctl->count = 10; ctl->min = 0; ctl->max = 40; ctl->value[0] = ctl->value[1] = 20; ctl->translation = EMU10K1_GPR_TRANSLATION_BASS; ctl = &controls[nctl + 1]; ctl->id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; strcpy(ctl->id.name, "Tone Control - Treble"); ctl->vcount = 2; ctl->count = 10; ctl->min = 0; ctl->max = 40; ctl->value[0] = ctl->value[1] = 20; ctl->translation = EMU10K1_GPR_TRANSLATION_TREBLE; #define BASS_GPR 0x8c #define TREBLE_GPR 0x96 for (z = 0; z < 5; z++) { int j; for (j = 0; j < 2; j++) { controls[nctl + 0].gpr[z * 2 + j] = BASS_GPR + z * 2 + j; controls[nctl + 1].gpr[z * 2 + j] = TREBLE_GPR + z * 2 + j; } } for (z = 0; z < 4; z++) { /* front/rear/center-lfe/side */ int j, k, l, d; for (j = 0; j < 2; j++) { /* left/right */ k = 0xb0 + (z * 8) + (j * 4); l = 0xe0 + (z * 8) + (j * 4); d = playback + SND_EMU10K1_PLAYBACK_CHANNELS + z * 2 + j; A_OP(icode, &ptr, iMAC0, A_C_00000000, A_C_00000000, A_GPR(d), A_GPR(BASS_GPR + 0 + j)); A_OP(icode, &ptr, iMACMV, A_GPR(k+1), A_GPR(k), A_GPR(k+1), A_GPR(BASS_GPR + 4 + j)); A_OP(icode, &ptr, iMACMV, A_GPR(k), A_GPR(d), A_GPR(k), A_GPR(BASS_GPR + 2 + j)); A_OP(icode, &ptr, iMACMV, A_GPR(k+3), A_GPR(k+2), A_GPR(k+3), A_GPR(BASS_GPR + 8 + j)); A_OP(icode, &ptr, iMAC0, A_GPR(k+2), A_GPR_ACCU, A_GPR(k+2), A_GPR(BASS_GPR + 6 + j)); A_OP(icode, &ptr, iACC3, A_GPR(k+2), A_GPR(k+2), A_GPR(k+2), A_C_00000000); A_OP(icode, &ptr, iMAC0, A_C_00000000, A_C_00000000, A_GPR(k+2), A_GPR(TREBLE_GPR + 0 + j)); A_OP(icode, &ptr, iMACMV, A_GPR(l+1), A_GPR(l), A_GPR(l+1), A_GPR(TREBLE_GPR + 4 + j)); A_OP(icode, &ptr, iMACMV, A_GPR(l), A_GPR(k+2), A_GPR(l), A_GPR(TREBLE_GPR + 2 + j)); A_OP(icode, &ptr, iMACMV, A_GPR(l+3), A_GPR(l+2), A_GPR(l+3), A_GPR(TREBLE_GPR + 8 + j)); A_OP(icode, &ptr, iMAC0, A_GPR(l+2), A_GPR_ACCU, A_GPR(l+2), A_GPR(TREBLE_GPR + 6 + j)); A_OP(icode, &ptr, iMACINT0, A_GPR(l+2), A_C_00000000, A_GPR(l+2), A_C_00000010); A_OP(icode, &ptr, iACC3, A_GPR(d), A_GPR(l+2), A_C_00000000, A_C_00000000); if (z == 2) /* center */ break; } } nctl += 2; #undef BASS_GPR #undef TREBLE_GPR for (z = 0; z < 8; z++) { A_SWITCH(icode, &ptr, tmp + 0, playback + SND_EMU10K1_PLAYBACK_CHANNELS + z, gpr + 0); A_SWITCH_NEG(icode, &ptr, tmp + 1, gpr + 0); A_SWITCH(icode, &ptr, tmp + 1, playback + z, tmp + 1); A_OP(icode, &ptr, iACC3, A_GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + z), A_GPR(tmp + 0), A_GPR(tmp + 1), A_C_00000000); } snd_emu10k1_init_stereo_onoff_control(controls + nctl++, "Tone Control - Switch", gpr, 0); gpr += 2; /* Master volume (will be renamed later) */ A_OP(icode, &ptr, iMAC0, A_GPR(playback+0+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+0+SND_EMU10K1_PLAYBACK_CHANNELS)); A_OP(icode, &ptr, iMAC0, A_GPR(playback+1+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+1+SND_EMU10K1_PLAYBACK_CHANNELS)); A_OP(icode, &ptr, iMAC0, A_GPR(playback+2+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+2+SND_EMU10K1_PLAYBACK_CHANNELS)); A_OP(icode, &ptr, iMAC0, A_GPR(playback+3+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+3+SND_EMU10K1_PLAYBACK_CHANNELS)); A_OP(icode, &ptr, iMAC0, A_GPR(playback+4+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+4+SND_EMU10K1_PLAYBACK_CHANNELS)); A_OP(icode, &ptr, iMAC0, A_GPR(playback+5+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+5+SND_EMU10K1_PLAYBACK_CHANNELS)); A_OP(icode, &ptr, iMAC0, A_GPR(playback+6+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+6+SND_EMU10K1_PLAYBACK_CHANNELS)); A_OP(icode, &ptr, iMAC0, A_GPR(playback+7+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+7+SND_EMU10K1_PLAYBACK_CHANNELS)); snd_emu10k1_init_mono_control(&controls[nctl++], "Wave Master Playback Volume", gpr, 0); gpr += 2; /* analog speakers */ A_PUT_STEREO_OUTPUT(A_EXTOUT_AFRONT_L, A_EXTOUT_AFRONT_R, playback + SND_EMU10K1_PLAYBACK_CHANNELS); A_PUT_STEREO_OUTPUT(A_EXTOUT_AREAR_L, A_EXTOUT_AREAR_R, playback+2 + SND_EMU10K1_PLAYBACK_CHANNELS); A_PUT_OUTPUT(A_EXTOUT_ACENTER, playback+4 + SND_EMU10K1_PLAYBACK_CHANNELS); A_PUT_OUTPUT(A_EXTOUT_ALFE, playback+5 + SND_EMU10K1_PLAYBACK_CHANNELS); if (emu->card_capabilities->spk71) A_PUT_STEREO_OUTPUT(A_EXTOUT_ASIDE_L, A_EXTOUT_ASIDE_R, playback+6 + SND_EMU10K1_PLAYBACK_CHANNELS); /* headphone */ A_PUT_STEREO_OUTPUT(A_EXTOUT_HEADPHONE_L, A_EXTOUT_HEADPHONE_R, playback + SND_EMU10K1_PLAYBACK_CHANNELS); /* digital outputs */ /* A_PUT_STEREO_OUTPUT(A_EXTOUT_FRONT_L, A_EXTOUT_FRONT_R, playback + SND_EMU10K1_PLAYBACK_CHANNELS); */ if (emu->card_capabilities->emu_model) { /* EMU1010 Outputs from PCM Front, Rear, Center, LFE, Side */ snd_printk(KERN_INFO "EMU outputs on\n"); for (z = 0; z < 8; z++) { if (emu->card_capabilities->ca0108_chip) { A_OP(icode, &ptr, iACC3, A3_EMU32OUT(z), A_GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + z), A_C_00000000, A_C_00000000); } else { A_OP(icode, &ptr, iACC3, A_EMU32OUTL(z), A_GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + z), A_C_00000000, A_C_00000000); } } } /* IEC958 Optical Raw Playback Switch */ gpr_map[gpr++] = 0; gpr_map[gpr++] = 0x1008; gpr_map[gpr++] = 0xffff0000; for (z = 0; z < 2; z++) { A_OP(icode, &ptr, iMAC0, A_GPR(tmp + 2), A_FXBUS(FXBUS_PT_LEFT + z), A_C_00000000, A_C_00000000); A_OP(icode, &ptr, iSKIP, A_GPR_COND, A_GPR_COND, A_GPR(gpr - 2), A_C_00000001); A_OP(icode, &ptr, iACC3, A_GPR(tmp + 2), A_C_00000000, A_C_00010000, A_GPR(tmp + 2)); A_OP(icode, &ptr, iANDXOR, A_GPR(tmp + 2), A_GPR(tmp + 2), A_GPR(gpr - 1), A_C_00000000); A_SWITCH(icode, &ptr, tmp + 0, tmp + 2, gpr + z); A_SWITCH_NEG(icode, &ptr, tmp + 1, gpr + z); A_SWITCH(icode, &ptr, tmp + 1, playback + SND_EMU10K1_PLAYBACK_CHANNELS + z, tmp + 1); if ((z==1) && (emu->card_capabilities->spdif_bug)) { /* Due to a SPDIF output bug on some Audigy cards, this code delays the Right channel by 1 sample */ snd_printk(KERN_INFO "Installing spdif_bug patch: %s\n", emu->card_capabilities->name); A_OP(icode, &ptr, iACC3, A_EXTOUT(A_EXTOUT_FRONT_L + z), A_GPR(gpr - 3), A_C_00000000, A_C_00000000); A_OP(icode, &ptr, iACC3, A_GPR(gpr - 3), A_GPR(tmp + 0), A_GPR(tmp + 1), A_C_00000000); } else { A_OP(icode, &ptr, iACC3, A_EXTOUT(A_EXTOUT_FRONT_L + z), A_GPR(tmp + 0), A_GPR(tmp + 1), A_C_00000000); } } snd_emu10k1_init_stereo_onoff_control(controls + nctl++, SNDRV_CTL_NAME_IEC958("Optical Raw ",PLAYBACK,SWITCH), gpr, 0); gpr += 2; A_PUT_STEREO_OUTPUT(A_EXTOUT_REAR_L, A_EXTOUT_REAR_R, playback+2 + SND_EMU10K1_PLAYBACK_CHANNELS); A_PUT_OUTPUT(A_EXTOUT_CENTER, playback+4 + SND_EMU10K1_PLAYBACK_CHANNELS); A_PUT_OUTPUT(A_EXTOUT_LFE, playback+5 + SND_EMU10K1_PLAYBACK_CHANNELS); /* ADC buffer */ #ifdef EMU10K1_CAPTURE_DIGITAL_OUT A_PUT_STEREO_OUTPUT(A_EXTOUT_ADC_CAP_L, A_EXTOUT_ADC_CAP_R, playback + SND_EMU10K1_PLAYBACK_CHANNELS); #else A_PUT_OUTPUT(A_EXTOUT_ADC_CAP_L, capture); A_PUT_OUTPUT(A_EXTOUT_ADC_CAP_R, capture+1); #endif if (emu->card_capabilities->emu_model) { if (emu->card_capabilities->ca0108_chip) { snd_printk(KERN_INFO "EMU2 inputs on\n"); for (z = 0; z < 0x10; z++) { snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A3_EMU32IN(z), A_FXBUS2(z*2) ); } } else { snd_printk(KERN_INFO "EMU inputs on\n"); /* Capture 16 (originally 8) channels of S32_LE sound */ /* printk(KERN_DEBUG "emufx.c: gpr=0x%x, tmp=0x%x\n", gpr, tmp); */ /* For the EMU1010: How to get 32bit values from the DSP. High 16bits into L, low 16bits into R. */ /* A_P16VIN(0) is delayed by one sample, * so all other A_P16VIN channels will need to also be delayed */ /* Left ADC in. 1 of 2 */ snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_P16VIN(0x0), A_FXBUS2(0) ); /* Right ADC in 1 of 2 */ gpr_map[gpr++] = 0x00000000; /* Delaying by one sample: instead of copying the input * value A_P16VIN to output A_FXBUS2 as in the first channel, * we use an auxiliary register, delaying the value by one * sample */ snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(2) ); A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x1), A_C_00000000, A_C_00000000); gpr_map[gpr++] = 0x00000000; snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(4) ); A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x2), A_C_00000000, A_C_00000000); gpr_map[gpr++] = 0x00000000; snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(6) ); A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x3), A_C_00000000, A_C_00000000); /* For 96kHz mode */ /* Left ADC in. 2 of 2 */ gpr_map[gpr++] = 0x00000000; snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0x8) ); A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x4), A_C_00000000, A_C_00000000); /* Right ADC in 2 of 2 */ gpr_map[gpr++] = 0x00000000; snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0xa) ); A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x5), A_C_00000000, A_C_00000000); gpr_map[gpr++] = 0x00000000; snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0xc) ); A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x6), A_C_00000000, A_C_00000000); gpr_map[gpr++] = 0x00000000; snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0xe) ); A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x7), A_C_00000000, A_C_00000000); /* Pavel Hofman - we still have voices, A_FXBUS2s, and * A_P16VINs available - * let's add 8 more capture channels - total of 16 */ gpr_map[gpr++] = 0x00000000; snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0x10)); A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x8), A_C_00000000, A_C_00000000); gpr_map[gpr++] = 0x00000000; snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0x12)); A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x9), A_C_00000000, A_C_00000000); gpr_map[gpr++] = 0x00000000; snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0x14)); A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xa), A_C_00000000, A_C_00000000); gpr_map[gpr++] = 0x00000000; snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0x16)); A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xb), A_C_00000000, A_C_00000000); gpr_map[gpr++] = 0x00000000; snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0x18)); A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xc), A_C_00000000, A_C_00000000); gpr_map[gpr++] = 0x00000000; snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0x1a)); A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xd), A_C_00000000, A_C_00000000); gpr_map[gpr++] = 0x00000000; snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0x1c)); A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xe), A_C_00000000, A_C_00000000); gpr_map[gpr++] = 0x00000000; snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0x1e)); A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xf), A_C_00000000, A_C_00000000); } #if 0 for (z = 4; z < 8; z++) { A_OP(icode, &ptr, iACC3, A_FXBUS2(z), A_C_00000000, A_C_00000000, A_C_00000000); } for (z = 0xc; z < 0x10; z++) { A_OP(icode, &ptr, iACC3, A_FXBUS2(z), A_C_00000000, A_C_00000000, A_C_00000000); } #endif } else { /* EFX capture - capture the 16 EXTINs */ /* Capture 16 channels of S16_LE sound */ for (z = 0; z < 16; z++) { A_OP(icode, &ptr, iACC3, A_FXBUS2(z), A_C_00000000, A_C_00000000, A_EXTIN(z)); } } #endif /* JCD test */ /* * ok, set up done.. */ if (gpr > tmp) { snd_BUG(); err = -EIO; goto __err; } /* clear remaining instruction memory */ while (ptr < 0x400) A_OP(icode, &ptr, 0x0f, 0xc0, 0xc0, 0xcf, 0xc0); seg = snd_enter_user(); icode->gpr_add_control_count = nctl; icode->gpr_add_controls = (struct snd_emu10k1_fx8010_control_gpr __user *)controls; emu->support_tlv = 1; /* support TLV */ err = snd_emu10k1_icode_poke(emu, icode); emu->support_tlv = 0; /* clear again */ snd_leave_user(seg); __err: kfree(controls); if (icode != NULL) { kfree((void __force *)icode->gpr_map); kfree(icode); } return err; } /* * initial DSP configuration for Emu10k1 */ /* when volume = max, then copy only to avoid volume modification */ /* with iMAC0 (negative values) */ static void __devinit _volume(struct snd_emu10k1_fx8010_code *icode, u32 *ptr, u32 dst, u32 src, u32 vol) { OP(icode, ptr, iMAC0, dst, C_00000000, src, vol); OP(icode, ptr, iANDXOR, C_00000000, vol, C_ffffffff, C_7fffffff); OP(icode, ptr, iSKIP, GPR_COND, GPR_COND, CC_REG_NONZERO, C_00000001); OP(icode, ptr, iACC3, dst, src, C_00000000, C_00000000); } static void __devinit _volume_add(struct snd_emu10k1_fx8010_code *icode, u32 *ptr, u32 dst, u32 src, u32 vol) { OP(icode, ptr, iANDXOR, C_00000000, vol, C_ffffffff, C_7fffffff); OP(icode, ptr, iSKIP, GPR_COND, GPR_COND, CC_REG_NONZERO, C_00000002); OP(icode, ptr, iMACINT0, dst, dst, src, C_00000001); OP(icode, ptr, iSKIP, C_00000000, C_7fffffff, C_7fffffff, C_00000001); OP(icode, ptr, iMAC0, dst, dst, src, vol); } static void __devinit _volume_out(struct snd_emu10k1_fx8010_code *icode, u32 *ptr, u32 dst, u32 src, u32 vol) { OP(icode, ptr, iANDXOR, C_00000000, vol, C_ffffffff, C_7fffffff); OP(icode, ptr, iSKIP, GPR_COND, GPR_COND, CC_REG_NONZERO, C_00000002); OP(icode, ptr, iACC3, dst, src, C_00000000, C_00000000); OP(icode, ptr, iSKIP, C_00000000, C_7fffffff, C_7fffffff, C_00000001); OP(icode, ptr, iMAC0, dst, C_00000000, src, vol); } #define VOLUME(icode, ptr, dst, src, vol) \ _volume(icode, ptr, GPR(dst), GPR(src), GPR(vol)) #define VOLUME_IN(icode, ptr, dst, src, vol) \ _volume(icode, ptr, GPR(dst), EXTIN(src), GPR(vol)) #define VOLUME_ADD(icode, ptr, dst, src, vol) \ _volume_add(icode, ptr, GPR(dst), GPR(src), GPR(vol)) #define VOLUME_ADDIN(icode, ptr, dst, src, vol) \ _volume_add(icode, ptr, GPR(dst), EXTIN(src), GPR(vol)) #define VOLUME_OUT(icode, ptr, dst, src, vol) \ _volume_out(icode, ptr, EXTOUT(dst), GPR(src), GPR(vol)) #define _SWITCH(icode, ptr, dst, src, sw) \ OP((icode), ptr, iMACINT0, dst, C_00000000, src, sw); #define SWITCH(icode, ptr, dst, src, sw) \ _SWITCH(icode, ptr, GPR(dst), GPR(src), GPR(sw)) #define SWITCH_IN(icode, ptr, dst, src, sw) \ _SWITCH(icode, ptr, GPR(dst), EXTIN(src), GPR(sw)) #define _SWITCH_NEG(icode, ptr, dst, src) \ OP((icode), ptr, iANDXOR, dst, src, C_00000001, C_00000001); #define SWITCH_NEG(icode, ptr, dst, src) \ _SWITCH_NEG(icode, ptr, GPR(dst), GPR(src)) static int __devinit _snd_emu10k1_init_efx(struct snd_emu10k1 *emu) { int err, i, z, gpr, tmp, playback, capture; u32 ptr; struct snd_emu10k1_fx8010_code *icode; struct snd_emu10k1_fx8010_pcm_rec *ipcm = NULL; struct snd_emu10k1_fx8010_control_gpr *controls = NULL, *ctl; u32 *gpr_map; mm_segment_t seg; if ((icode = kzalloc(sizeof(*icode), GFP_KERNEL)) == NULL) return -ENOMEM; if ((icode->gpr_map = (u_int32_t __user *) kcalloc(256 + 160 + 160 + 2 * 512, sizeof(u_int32_t), GFP_KERNEL)) == NULL || (controls = kcalloc(SND_EMU10K1_GPR_CONTROLS, sizeof(struct snd_emu10k1_fx8010_control_gpr), GFP_KERNEL)) == NULL || (ipcm = kzalloc(sizeof(*ipcm), GFP_KERNEL)) == NULL) { err = -ENOMEM; goto __err; } gpr_map = (u32 __force *)icode->gpr_map; icode->tram_data_map = icode->gpr_map + 256; icode->tram_addr_map = icode->tram_data_map + 160; icode->code = icode->tram_addr_map + 160; /* clear free GPRs */ for (i = 0; i < 256; i++) set_bit(i, icode->gpr_valid); /* clear TRAM data & address lines */ for (i = 0; i < 160; i++) set_bit(i, icode->tram_valid); strcpy(icode->name, "SB Live! FX8010 code for ALSA v1.2 by Jaroslav Kysela"); ptr = 0; i = 0; /* we have 12 inputs */ playback = SND_EMU10K1_INPUTS; /* we have 6 playback channels and tone control doubles */ capture = playback + (SND_EMU10K1_PLAYBACK_CHANNELS * 2); gpr = capture + SND_EMU10K1_CAPTURE_CHANNELS; tmp = 0x88; /* we need 4 temporary GPR */ /* from 0x8c to 0xff is the area for tone control */ /* stop FX processor */ snd_emu10k1_ptr_write(emu, DBG, 0, (emu->fx8010.dbg = 0) | EMU10K1_DBG_SINGLE_STEP); /* * Process FX Buses */ OP(icode, &ptr, iMACINT0, GPR(0), C_00000000, FXBUS(FXBUS_PCM_LEFT), C_00000004); OP(icode, &ptr, iMACINT0, GPR(1), C_00000000, FXBUS(FXBUS_PCM_RIGHT), C_00000004); OP(icode, &ptr, iMACINT0, GPR(2), C_00000000, FXBUS(FXBUS_MIDI_LEFT), C_00000004); OP(icode, &ptr, iMACINT0, GPR(3), C_00000000, FXBUS(FXBUS_MIDI_RIGHT), C_00000004); OP(icode, &ptr, iMACINT0, GPR(4), C_00000000, FXBUS(FXBUS_PCM_LEFT_REAR), C_00000004); OP(icode, &ptr, iMACINT0, GPR(5), C_00000000, FXBUS(FXBUS_PCM_RIGHT_REAR), C_00000004); OP(icode, &ptr, iMACINT0, GPR(6), C_00000000, FXBUS(FXBUS_PCM_CENTER), C_00000004); OP(icode, &ptr, iMACINT0, GPR(7), C_00000000, FXBUS(FXBUS_PCM_LFE), C_00000004); OP(icode, &ptr, iMACINT0, GPR(8), C_00000000, C_00000000, C_00000000); /* S/PDIF left */ OP(icode, &ptr, iMACINT0, GPR(9), C_00000000, C_00000000, C_00000000); /* S/PDIF right */ OP(icode, &ptr, iMACINT0, GPR(10), C_00000000, FXBUS(FXBUS_PCM_LEFT_FRONT), C_00000004); OP(icode, &ptr, iMACINT0, GPR(11), C_00000000, FXBUS(FXBUS_PCM_RIGHT_FRONT), C_00000004); /* Raw S/PDIF PCM */ ipcm->substream = 0; ipcm->channels = 2; ipcm->tram_start = 0; ipcm->buffer_size = (64 * 1024) / 2; ipcm->gpr_size = gpr++; ipcm->gpr_ptr = gpr++; ipcm->gpr_count = gpr++; ipcm->gpr_tmpcount = gpr++; ipcm->gpr_trigger = gpr++; ipcm->gpr_running = gpr++; ipcm->etram[0] = 0; ipcm->etram[1] = 1; gpr_map[gpr + 0] = 0xfffff000; gpr_map[gpr + 1] = 0xffff0000; gpr_map[gpr + 2] = 0x70000000; gpr_map[gpr + 3] = 0x00000007; gpr_map[gpr + 4] = 0x001f << 11; gpr_map[gpr + 5] = 0x001c << 11; gpr_map[gpr + 6] = (0x22 - 0x01) - 1; /* skip at 01 to 22 */ gpr_map[gpr + 7] = (0x22 - 0x06) - 1; /* skip at 06 to 22 */ gpr_map[gpr + 8] = 0x2000000 + (2<<11); gpr_map[gpr + 9] = 0x4000000 + (2<<11); gpr_map[gpr + 10] = 1<<11; gpr_map[gpr + 11] = (0x24 - 0x0a) - 1; /* skip at 0a to 24 */ gpr_map[gpr + 12] = 0; /* if the trigger flag is not set, skip */ /* 00: */ OP(icode, &ptr, iMAC0, C_00000000, GPR(ipcm->gpr_trigger), C_00000000, C_00000000); /* 01: */ OP(icode, &ptr, iSKIP, GPR_COND, GPR_COND, CC_REG_ZERO, GPR(gpr + 6)); /* if the running flag is set, we're running */ /* 02: */ OP(icode, &ptr, iMAC0, C_00000000, GPR(ipcm->gpr_running), C_00000000, C_00000000); /* 03: */ OP(icode, &ptr, iSKIP, GPR_COND, GPR_COND, CC_REG_NONZERO, C_00000004); /* wait until ((GPR_DBAC>>11) & 0x1f) == 0x1c) */ /* 04: */ OP(icode, &ptr, iANDXOR, GPR(tmp + 0), GPR_DBAC, GPR(gpr + 4), C_00000000); /* 05: */ OP(icode, &ptr, iMACINT0, C_00000000, GPR(tmp + 0), C_ffffffff, GPR(gpr + 5)); /* 06: */ OP(icode, &ptr, iSKIP, GPR_COND, GPR_COND, CC_REG_NONZERO, GPR(gpr + 7)); /* 07: */ OP(icode, &ptr, iACC3, GPR(gpr + 12), C_00000010, C_00000001, C_00000000); /* 08: */ OP(icode, &ptr, iANDXOR, GPR(ipcm->gpr_running), GPR(ipcm->gpr_running), C_00000000, C_00000001); /* 09: */ OP(icode, &ptr, iACC3, GPR(gpr + 12), GPR(gpr + 12), C_ffffffff, C_00000000); /* 0a: */ OP(icode, &ptr, iSKIP, GPR_COND, GPR_COND, CC_REG_NONZERO, GPR(gpr + 11)); /* 0b: */ OP(icode, &ptr, iACC3, GPR(gpr + 12), C_00000001, C_00000000, C_00000000); /* 0c: */ OP(icode, &ptr, iANDXOR, GPR(tmp + 0), ETRAM_DATA(ipcm->etram[0]), GPR(gpr + 0), C_00000000); /* 0d: */ OP(icode, &ptr, iLOG, GPR(tmp + 0), GPR(tmp + 0), GPR(gpr + 3), C_00000000); /* 0e: */ OP(icode, &ptr, iANDXOR, GPR(8), GPR(tmp + 0), GPR(gpr + 1), GPR(gpr + 2)); /* 0f: */ OP(icode, &ptr, iSKIP, C_00000000, GPR_COND, CC_REG_MINUS, C_00000001); /* 10: */ OP(icode, &ptr, iANDXOR, GPR(8), GPR(8), GPR(gpr + 1), GPR(gpr + 2)); /* 11: */ OP(icode, &ptr, iANDXOR, GPR(tmp + 0), ETRAM_DATA(ipcm->etram[1]), GPR(gpr + 0), C_00000000); /* 12: */ OP(icode, &ptr, iLOG, GPR(tmp + 0), GPR(tmp + 0), GPR(gpr + 3), C_00000000); /* 13: */ OP(icode, &ptr, iANDXOR, GPR(9), GPR(tmp + 0), GPR(gpr + 1), GPR(gpr + 2)); /* 14: */ OP(icode, &ptr, iSKIP, C_00000000, GPR_COND, CC_REG_MINUS, C_00000001); /* 15: */ OP(icode, &ptr, iANDXOR, GPR(9), GPR(9), GPR(gpr + 1), GPR(gpr + 2)); /* 16: */ OP(icode, &ptr, iACC3, GPR(tmp + 0), GPR(ipcm->gpr_ptr), C_00000001, C_00000000); /* 17: */ OP(icode, &ptr, iMACINT0, C_00000000, GPR(tmp + 0), C_ffffffff, GPR(ipcm->gpr_size)); /* 18: */ OP(icode, &ptr, iSKIP, GPR_COND, GPR_COND, CC_REG_MINUS, C_00000001); /* 19: */ OP(icode, &ptr, iACC3, GPR(tmp + 0), C_00000000, C_00000000, C_00000000); /* 1a: */ OP(icode, &ptr, iACC3, GPR(ipcm->gpr_ptr), GPR(tmp + 0), C_00000000, C_00000000); /* 1b: */ OP(icode, &ptr, iACC3, GPR(ipcm->gpr_tmpcount), GPR(ipcm->gpr_tmpcount), C_ffffffff, C_00000000); /* 1c: */ OP(icode, &ptr, iSKIP, GPR_COND, GPR_COND, CC_REG_NONZERO, C_00000002); /* 1d: */ OP(icode, &ptr, iACC3, GPR(ipcm->gpr_tmpcount), GPR(ipcm->gpr_count), C_00000000, C_00000000); /* 1e: */ OP(icode, &ptr, iACC3, GPR_IRQ, C_80000000, C_00000000, C_00000000); /* 1f: */ OP(icode, &ptr, iANDXOR, GPR(ipcm->gpr_running), GPR(ipcm->gpr_running), C_00000001, C_00010000); /* 20: */ OP(icode, &ptr, iANDXOR, GPR(ipcm->gpr_running), GPR(ipcm->gpr_running), C_00010000, C_00000001); /* 21: */ OP(icode, &ptr, iSKIP, C_00000000, C_7fffffff, C_7fffffff, C_00000002); /* 22: */ OP(icode, &ptr, iMACINT1, ETRAM_ADDR(ipcm->etram[0]), GPR(gpr + 8), GPR_DBAC, C_ffffffff); /* 23: */ OP(icode, &ptr, iMACINT1, ETRAM_ADDR(ipcm->etram[1]), GPR(gpr + 9), GPR_DBAC, C_ffffffff); /* 24: */ gpr += 13; /* Wave Playback Volume */ for (z = 0; z < 2; z++) VOLUME(icode, &ptr, playback + z, z, gpr + z); snd_emu10k1_init_stereo_control(controls + i++, "Wave Playback Volume", gpr, 100); gpr += 2; /* Wave Surround Playback Volume */ for (z = 0; z < 2; z++) VOLUME(icode, &ptr, playback + 2 + z, z, gpr + z); snd_emu10k1_init_stereo_control(controls + i++, "Wave Surround Playback Volume", gpr, 0); gpr += 2; /* Wave Center/LFE Playback Volume */ OP(icode, &ptr, iACC3, GPR(tmp + 0), FXBUS(FXBUS_PCM_LEFT), FXBUS(FXBUS_PCM_RIGHT), C_00000000); OP(icode, &ptr, iMACINT0, GPR(tmp + 0), C_00000000, GPR(tmp + 0), C_00000002); VOLUME(icode, &ptr, playback + 4, tmp + 0, gpr); snd_emu10k1_init_mono_control(controls + i++, "Wave Center Playback Volume", gpr++, 0); VOLUME(icode, &ptr, playback + 5, tmp + 0, gpr); snd_emu10k1_init_mono_control(controls + i++, "Wave LFE Playback Volume", gpr++, 0); /* Wave Capture Volume + Switch */ for (z = 0; z < 2; z++) { SWITCH(icode, &ptr, tmp + 0, z, gpr + 2 + z); VOLUME(icode, &ptr, capture + z, tmp + 0, gpr + z); } snd_emu10k1_init_stereo_control(controls + i++, "Wave Capture Volume", gpr, 0); snd_emu10k1_init_stereo_onoff_control(controls + i++, "Wave Capture Switch", gpr + 2, 0); gpr += 4; /* Synth Playback Volume */ for (z = 0; z < 2; z++) VOLUME_ADD(icode, &ptr, playback + z, 2 + z, gpr + z); snd_emu10k1_init_stereo_control(controls + i++, "Synth Playback Volume", gpr, 100); gpr += 2; /* Synth Capture Volume + Switch */ for (z = 0; z < 2; z++) { SWITCH(icode, &ptr, tmp + 0, 2 + z, gpr + 2 + z); VOLUME_ADD(icode, &ptr, capture + z, tmp + 0, gpr + z); } snd_emu10k1_init_stereo_control(controls + i++, "Synth Capture Volume", gpr, 0); snd_emu10k1_init_stereo_onoff_control(controls + i++, "Synth Capture Switch", gpr + 2, 0); gpr += 4; /* Surround Digital Playback Volume (renamed later without Digital) */ for (z = 0; z < 2; z++) VOLUME_ADD(icode, &ptr, playback + 2 + z, 4 + z, gpr + z); snd_emu10k1_init_stereo_control(controls + i++, "Surround Digital Playback Volume", gpr, 100); gpr += 2; /* Surround Capture Volume + Switch */ for (z = 0; z < 2; z++) { SWITCH(icode, &ptr, tmp + 0, 4 + z, gpr + 2 + z); VOLUME_ADD(icode, &ptr, capture + z, tmp + 0, gpr + z); } snd_emu10k1_init_stereo_control(controls + i++, "Surround Capture Volume", gpr, 0); snd_emu10k1_init_stereo_onoff_control(controls + i++, "Surround Capture Switch", gpr + 2, 0); gpr += 4; /* Center Playback Volume (renamed later without Digital) */ VOLUME_ADD(icode, &ptr, playback + 4, 6, gpr); snd_emu10k1_init_mono_control(controls + i++, "Center Digital Playback Volume", gpr++, 100); /* LFE Playback Volume + Switch (renamed later without Digital) */ VOLUME_ADD(icode, &ptr, playback + 5, 7, gpr); snd_emu10k1_init_mono_control(controls + i++, "LFE Digital Playback Volume", gpr++, 100); /* Front Playback Volume */ for (z = 0; z < 2; z++) VOLUME_ADD(icode, &ptr, playback + z, 10 + z, gpr + z); snd_emu10k1_init_stereo_control(controls + i++, "Front Playback Volume", gpr, 100); gpr += 2; /* Front Capture Volume + Switch */ for (z = 0; z < 2; z++) { SWITCH(icode, &ptr, tmp + 0, 10 + z, gpr + 2); VOLUME_ADD(icode, &ptr, capture + z, tmp + 0, gpr + z); } snd_emu10k1_init_stereo_control(controls + i++, "Front Capture Volume", gpr, 0); snd_emu10k1_init_mono_onoff_control(controls + i++, "Front Capture Switch", gpr + 2, 0); gpr += 3; /* * Process inputs */ if (emu->fx8010.extin_mask & ((1<<EXTIN_AC97_L)|(1<<EXTIN_AC97_R))) { /* AC'97 Playback Volume */ VOLUME_ADDIN(icode, &ptr, playback + 0, EXTIN_AC97_L, gpr); gpr++; VOLUME_ADDIN(icode, &ptr, playback + 1, EXTIN_AC97_R, gpr); gpr++; snd_emu10k1_init_stereo_control(controls + i++, "AC97 Playback Volume", gpr-2, 0); /* AC'97 Capture Volume */ VOLUME_ADDIN(icode, &ptr, capture + 0, EXTIN_AC97_L, gpr); gpr++; VOLUME_ADDIN(icode, &ptr, capture + 1, EXTIN_AC97_R, gpr); gpr++; snd_emu10k1_init_stereo_control(controls + i++, "AC97 Capture Volume", gpr-2, 100); } if (emu->fx8010.extin_mask & ((1<<EXTIN_SPDIF_CD_L)|(1<<EXTIN_SPDIF_CD_R))) { /* IEC958 TTL Playback Volume */ for (z = 0; z < 2; z++) VOLUME_ADDIN(icode, &ptr, playback + z, EXTIN_SPDIF_CD_L + z, gpr + z); snd_emu10k1_init_stereo_control(controls + i++, SNDRV_CTL_NAME_IEC958("TTL ",PLAYBACK,VOLUME), gpr, 0); gpr += 2; /* IEC958 TTL Capture Volume + Switch */ for (z = 0; z < 2; z++) { SWITCH_IN(icode, &ptr, tmp + 0, EXTIN_SPDIF_CD_L + z, gpr + 2 + z); VOLUME_ADD(icode, &ptr, capture + z, tmp + 0, gpr + z); } snd_emu10k1_init_stereo_control(controls + i++, SNDRV_CTL_NAME_IEC958("TTL ",CAPTURE,VOLUME), gpr, 0); snd_emu10k1_init_stereo_onoff_control(controls + i++, SNDRV_CTL_NAME_IEC958("TTL ",CAPTURE,SWITCH), gpr + 2, 0); gpr += 4; } if (emu->fx8010.extin_mask & ((1<<EXTIN_ZOOM_L)|(1<<EXTIN_ZOOM_R))) { /* Zoom Video Playback Volume */ for (z = 0; z < 2; z++) VOLUME_ADDIN(icode, &ptr, playback + z, EXTIN_ZOOM_L + z, gpr + z); snd_emu10k1_init_stereo_control(controls + i++, "Zoom Video Playback Volume", gpr, 0); gpr += 2; /* Zoom Video Capture Volume + Switch */ for (z = 0; z < 2; z++) { SWITCH_IN(icode, &ptr, tmp + 0, EXTIN_ZOOM_L + z, gpr + 2 + z); VOLUME_ADD(icode, &ptr, capture + z, tmp + 0, gpr + z); } snd_emu10k1_init_stereo_control(controls + i++, "Zoom Video Capture Volume", gpr, 0); snd_emu10k1_init_stereo_onoff_control(controls + i++, "Zoom Video Capture Switch", gpr + 2, 0); gpr += 4; } if (emu->fx8010.extin_mask & ((1<<EXTIN_TOSLINK_L)|(1<<EXTIN_TOSLINK_R))) { /* IEC958 Optical Playback Volume */ for (z = 0; z < 2; z++) VOLUME_ADDIN(icode, &ptr, playback + z, EXTIN_TOSLINK_L + z, gpr + z); snd_emu10k1_init_stereo_control(controls + i++, SNDRV_CTL_NAME_IEC958("LiveDrive ",PLAYBACK,VOLUME), gpr, 0); gpr += 2; /* IEC958 Optical Capture Volume */ for (z = 0; z < 2; z++) { SWITCH_IN(icode, &ptr, tmp + 0, EXTIN_TOSLINK_L + z, gpr + 2 + z); VOLUME_ADD(icode, &ptr, capture + z, tmp + 0, gpr + z); } snd_emu10k1_init_stereo_control(controls + i++, SNDRV_CTL_NAME_IEC958("LiveDrive ",CAPTURE,VOLUME), gpr, 0); snd_emu10k1_init_stereo_onoff_control(controls + i++, SNDRV_CTL_NAME_IEC958("LiveDrive ",CAPTURE,SWITCH), gpr + 2, 0); gpr += 4; } if (emu->fx8010.extin_mask & ((1<<EXTIN_LINE1_L)|(1<<EXTIN_LINE1_R))) { /* Line LiveDrive Playback Volume */ for (z = 0; z < 2; z++) VOLUME_ADDIN(icode, &ptr, playback + z, EXTIN_LINE1_L + z, gpr + z); snd_emu10k1_init_stereo_control(controls + i++, "Line LiveDrive Playback Volume", gpr, 0); gpr += 2; /* Line LiveDrive Capture Volume + Switch */ for (z = 0; z < 2; z++) { SWITCH_IN(icode, &ptr, tmp + 0, EXTIN_LINE1_L + z, gpr + 2 + z); VOLUME_ADD(icode, &ptr, capture + z, tmp + 0, gpr + z); } snd_emu10k1_init_stereo_control(controls + i++, "Line LiveDrive Capture Volume", gpr, 0); snd_emu10k1_init_stereo_onoff_control(controls + i++, "Line LiveDrive Capture Switch", gpr + 2, 0); gpr += 4; } if (emu->fx8010.extin_mask & ((1<<EXTIN_COAX_SPDIF_L)|(1<<EXTIN_COAX_SPDIF_R))) { /* IEC958 Coax Playback Volume */ for (z = 0; z < 2; z++) VOLUME_ADDIN(icode, &ptr, playback + z, EXTIN_COAX_SPDIF_L + z, gpr + z); snd_emu10k1_init_stereo_control(controls + i++, SNDRV_CTL_NAME_IEC958("Coaxial ",PLAYBACK,VOLUME), gpr, 0); gpr += 2; /* IEC958 Coax Capture Volume + Switch */ for (z = 0; z < 2; z++) { SWITCH_IN(icode, &ptr, tmp + 0, EXTIN_COAX_SPDIF_L + z, gpr + 2 + z); VOLUME_ADD(icode, &ptr, capture + z, tmp + 0, gpr + z); } snd_emu10k1_init_stereo_control(controls + i++, SNDRV_CTL_NAME_IEC958("Coaxial ",CAPTURE,VOLUME), gpr, 0); snd_emu10k1_init_stereo_onoff_control(controls + i++, SNDRV_CTL_NAME_IEC958("Coaxial ",CAPTURE,SWITCH), gpr + 2, 0); gpr += 4; } if (emu->fx8010.extin_mask & ((1<<EXTIN_LINE2_L)|(1<<EXTIN_LINE2_R))) { /* Line LiveDrive Playback Volume */ for (z = 0; z < 2; z++) VOLUME_ADDIN(icode, &ptr, playback + z, EXTIN_LINE2_L + z, gpr + z); snd_emu10k1_init_stereo_control(controls + i++, "Line2 LiveDrive Playback Volume", gpr, 0); controls[i-1].id.index = 1; gpr += 2; /* Line LiveDrive Capture Volume */ for (z = 0; z < 2; z++) { SWITCH_IN(icode, &ptr, tmp + 0, EXTIN_LINE2_L + z, gpr + 2 + z); VOLUME_ADD(icode, &ptr, capture + z, tmp + 0, gpr + z); } snd_emu10k1_init_stereo_control(controls + i++, "Line2 LiveDrive Capture Volume", gpr, 0); controls[i-1].id.index = 1; snd_emu10k1_init_stereo_onoff_control(controls + i++, "Line2 LiveDrive Capture Switch", gpr + 2, 0); controls[i-1].id.index = 1; gpr += 4; } /* * Process tone control */ OP(icode, &ptr, iACC3, GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 0), GPR(playback + 0), C_00000000, C_00000000); /* left */ OP(icode, &ptr, iACC3, GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 1), GPR(playback + 1), C_00000000, C_00000000); /* right */ OP(icode, &ptr, iACC3, GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 2), GPR(playback + 2), C_00000000, C_00000000); /* rear left */ OP(icode, &ptr, iACC3, GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 3), GPR(playback + 3), C_00000000, C_00000000); /* rear right */ OP(icode, &ptr, iACC3, GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 4), GPR(playback + 4), C_00000000, C_00000000); /* center */ OP(icode, &ptr, iACC3, GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 5), GPR(playback + 5), C_00000000, C_00000000); /* LFE */ ctl = &controls[i + 0]; ctl->id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; strcpy(ctl->id.name, "Tone Control - Bass"); ctl->vcount = 2; ctl->count = 10; ctl->min = 0; ctl->max = 40; ctl->value[0] = ctl->value[1] = 20; ctl->tlv = snd_emu10k1_bass_treble_db_scale; ctl->translation = EMU10K1_GPR_TRANSLATION_BASS; ctl = &controls[i + 1]; ctl->id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; strcpy(ctl->id.name, "Tone Control - Treble"); ctl->vcount = 2; ctl->count = 10; ctl->min = 0; ctl->max = 40; ctl->value[0] = ctl->value[1] = 20; ctl->tlv = snd_emu10k1_bass_treble_db_scale; ctl->translation = EMU10K1_GPR_TRANSLATION_TREBLE; #define BASS_GPR 0x8c #define TREBLE_GPR 0x96 for (z = 0; z < 5; z++) { int j; for (j = 0; j < 2; j++) { controls[i + 0].gpr[z * 2 + j] = BASS_GPR + z * 2 + j; controls[i + 1].gpr[z * 2 + j] = TREBLE_GPR + z * 2 + j; } } for (z = 0; z < 3; z++) { /* front/rear/center-lfe */ int j, k, l, d; for (j = 0; j < 2; j++) { /* left/right */ k = 0xa0 + (z * 8) + (j * 4); l = 0xd0 + (z * 8) + (j * 4); d = playback + SND_EMU10K1_PLAYBACK_CHANNELS + z * 2 + j; OP(icode, &ptr, iMAC0, C_00000000, C_00000000, GPR(d), GPR(BASS_GPR + 0 + j)); OP(icode, &ptr, iMACMV, GPR(k+1), GPR(k), GPR(k+1), GPR(BASS_GPR + 4 + j)); OP(icode, &ptr, iMACMV, GPR(k), GPR(d), GPR(k), GPR(BASS_GPR + 2 + j)); OP(icode, &ptr, iMACMV, GPR(k+3), GPR(k+2), GPR(k+3), GPR(BASS_GPR + 8 + j)); OP(icode, &ptr, iMAC0, GPR(k+2), GPR_ACCU, GPR(k+2), GPR(BASS_GPR + 6 + j)); OP(icode, &ptr, iACC3, GPR(k+2), GPR(k+2), GPR(k+2), C_00000000); OP(icode, &ptr, iMAC0, C_00000000, C_00000000, GPR(k+2), GPR(TREBLE_GPR + 0 + j)); OP(icode, &ptr, iMACMV, GPR(l+1), GPR(l), GPR(l+1), GPR(TREBLE_GPR + 4 + j)); OP(icode, &ptr, iMACMV, GPR(l), GPR(k+2), GPR(l), GPR(TREBLE_GPR + 2 + j)); OP(icode, &ptr, iMACMV, GPR(l+3), GPR(l+2), GPR(l+3), GPR(TREBLE_GPR + 8 + j)); OP(icode, &ptr, iMAC0, GPR(l+2), GPR_ACCU, GPR(l+2), GPR(TREBLE_GPR + 6 + j)); OP(icode, &ptr, iMACINT0, GPR(l+2), C_00000000, GPR(l+2), C_00000010); OP(icode, &ptr, iACC3, GPR(d), GPR(l+2), C_00000000, C_00000000); if (z == 2) /* center */ break; } } i += 2; #undef BASS_GPR #undef TREBLE_GPR for (z = 0; z < 6; z++) { SWITCH(icode, &ptr, tmp + 0, playback + SND_EMU10K1_PLAYBACK_CHANNELS + z, gpr + 0); SWITCH_NEG(icode, &ptr, tmp + 1, gpr + 0); SWITCH(icode, &ptr, tmp + 1, playback + z, tmp + 1); OP(icode, &ptr, iACC3, GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + z), GPR(tmp + 0), GPR(tmp + 1), C_00000000); } snd_emu10k1_init_stereo_onoff_control(controls + i++, "Tone Control - Switch", gpr, 0); gpr += 2; /* * Process outputs */ if (emu->fx8010.extout_mask & ((1<<EXTOUT_AC97_L)|(1<<EXTOUT_AC97_R))) { /* AC'97 Playback Volume */ for (z = 0; z < 2; z++) OP(icode, &ptr, iACC3, EXTOUT(EXTOUT_AC97_L + z), GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + z), C_00000000, C_00000000); } if (emu->fx8010.extout_mask & ((1<<EXTOUT_TOSLINK_L)|(1<<EXTOUT_TOSLINK_R))) { /* IEC958 Optical Raw Playback Switch */ for (z = 0; z < 2; z++) { SWITCH(icode, &ptr, tmp + 0, 8 + z, gpr + z); SWITCH_NEG(icode, &ptr, tmp + 1, gpr + z); SWITCH(icode, &ptr, tmp + 1, playback + SND_EMU10K1_PLAYBACK_CHANNELS + z, tmp + 1); OP(icode, &ptr, iACC3, EXTOUT(EXTOUT_TOSLINK_L + z), GPR(tmp + 0), GPR(tmp + 1), C_00000000); #ifdef EMU10K1_CAPTURE_DIGITAL_OUT OP(icode, &ptr, iACC3, EXTOUT(EXTOUT_ADC_CAP_L + z), GPR(tmp + 0), GPR(tmp + 1), C_00000000); #endif } snd_emu10k1_init_stereo_onoff_control(controls + i++, SNDRV_CTL_NAME_IEC958("Optical Raw ",PLAYBACK,SWITCH), gpr, 0); gpr += 2; } if (emu->fx8010.extout_mask & ((1<<EXTOUT_HEADPHONE_L)|(1<<EXTOUT_HEADPHONE_R))) { /* Headphone Playback Volume */ for (z = 0; z < 2; z++) { SWITCH(icode, &ptr, tmp + 0, playback + SND_EMU10K1_PLAYBACK_CHANNELS + 4 + z, gpr + 2 + z); SWITCH_NEG(icode, &ptr, tmp + 1, gpr + 2 + z); SWITCH(icode, &ptr, tmp + 1, playback + SND_EMU10K1_PLAYBACK_CHANNELS + z, tmp + 1); OP(icode, &ptr, iACC3, GPR(tmp + 0), GPR(tmp + 0), GPR(tmp + 1), C_00000000); VOLUME_OUT(icode, &ptr, EXTOUT_HEADPHONE_L + z, tmp + 0, gpr + z); } snd_emu10k1_init_stereo_control(controls + i++, "Headphone Playback Volume", gpr + 0, 0); controls[i-1].id.index = 1; /* AC'97 can have also Headphone control */ snd_emu10k1_init_mono_onoff_control(controls + i++, "Headphone Center Playback Switch", gpr + 2, 0); controls[i-1].id.index = 1; snd_emu10k1_init_mono_onoff_control(controls + i++, "Headphone LFE Playback Switch", gpr + 3, 0); controls[i-1].id.index = 1; gpr += 4; } if (emu->fx8010.extout_mask & ((1<<EXTOUT_REAR_L)|(1<<EXTOUT_REAR_R))) for (z = 0; z < 2; z++) OP(icode, &ptr, iACC3, EXTOUT(EXTOUT_REAR_L + z), GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 2 + z), C_00000000, C_00000000); if (emu->fx8010.extout_mask & ((1<<EXTOUT_AC97_REAR_L)|(1<<EXTOUT_AC97_REAR_R))) for (z = 0; z < 2; z++) OP(icode, &ptr, iACC3, EXTOUT(EXTOUT_AC97_REAR_L + z), GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 2 + z), C_00000000, C_00000000); if (emu->fx8010.extout_mask & (1<<EXTOUT_AC97_CENTER)) { #ifndef EMU10K1_CENTER_LFE_FROM_FRONT OP(icode, &ptr, iACC3, EXTOUT(EXTOUT_AC97_CENTER), GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 4), C_00000000, C_00000000); OP(icode, &ptr, iACC3, EXTOUT(EXTOUT_ACENTER), GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 4), C_00000000, C_00000000); #else OP(icode, &ptr, iACC3, EXTOUT(EXTOUT_AC97_CENTER), GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 0), C_00000000, C_00000000); OP(icode, &ptr, iACC3, EXTOUT(EXTOUT_ACENTER), GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 0), C_00000000, C_00000000); #endif } if (emu->fx8010.extout_mask & (1<<EXTOUT_AC97_LFE)) { #ifndef EMU10K1_CENTER_LFE_FROM_FRONT OP(icode, &ptr, iACC3, EXTOUT(EXTOUT_AC97_LFE), GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 5), C_00000000, C_00000000); OP(icode, &ptr, iACC3, EXTOUT(EXTOUT_ALFE), GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 5), C_00000000, C_00000000); #else OP(icode, &ptr, iACC3, EXTOUT(EXTOUT_AC97_LFE), GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 1), C_00000000, C_00000000); OP(icode, &ptr, iACC3, EXTOUT(EXTOUT_ALFE), GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 1), C_00000000, C_00000000); #endif } #ifndef EMU10K1_CAPTURE_DIGITAL_OUT for (z = 0; z < 2; z++) OP(icode, &ptr, iACC3, EXTOUT(EXTOUT_ADC_CAP_L + z), GPR(capture + z), C_00000000, C_00000000); #endif if (emu->fx8010.extout_mask & (1<<EXTOUT_MIC_CAP)) OP(icode, &ptr, iACC3, EXTOUT(EXTOUT_MIC_CAP), GPR(capture + 2), C_00000000, C_00000000); /* EFX capture - capture the 16 EXTINS */ if (emu->card_capabilities->sblive51) { /* On the Live! 5.1, FXBUS2(1) and FXBUS(2) are shared with EXTOUT_ACENTER * and EXTOUT_ALFE, so we can't connect inputs to them for multitrack recording. * * Since only 14 of the 16 EXTINs are used, this is not a big problem. * We route AC97L and R to FX capture 14 and 15, SPDIF CD in to FX capture * 0 and 3, then the rest of the EXTINs to the corresponding FX capture * channel. Multitrack recorders will still see the center/lfe output signal * on the second and third channels. */ OP(icode, &ptr, iACC3, FXBUS2(14), C_00000000, C_00000000, EXTIN(0)); OP(icode, &ptr, iACC3, FXBUS2(15), C_00000000, C_00000000, EXTIN(1)); OP(icode, &ptr, iACC3, FXBUS2(0), C_00000000, C_00000000, EXTIN(2)); OP(icode, &ptr, iACC3, FXBUS2(3), C_00000000, C_00000000, EXTIN(3)); for (z = 4; z < 14; z++) OP(icode, &ptr, iACC3, FXBUS2(z), C_00000000, C_00000000, EXTIN(z)); } else { for (z = 0; z < 16; z++) OP(icode, &ptr, iACC3, FXBUS2(z), C_00000000, C_00000000, EXTIN(z)); } if (gpr > tmp) { snd_BUG(); err = -EIO; goto __err; } if (i > SND_EMU10K1_GPR_CONTROLS) { snd_BUG(); err = -EIO; goto __err; } /* clear remaining instruction memory */ while (ptr < 0x200) OP(icode, &ptr, iACC3, C_00000000, C_00000000, C_00000000, C_00000000); if ((err = snd_emu10k1_fx8010_tram_setup(emu, ipcm->buffer_size)) < 0) goto __err; seg = snd_enter_user(); icode->gpr_add_control_count = i; icode->gpr_add_controls = (struct snd_emu10k1_fx8010_control_gpr __user *)controls; emu->support_tlv = 1; /* support TLV */ err = snd_emu10k1_icode_poke(emu, icode); emu->support_tlv = 0; /* clear again */ snd_leave_user(seg); if (err >= 0) err = snd_emu10k1_ipcm_poke(emu, ipcm); __err: kfree(ipcm); kfree(controls); if (icode != NULL) { kfree((void __force *)icode->gpr_map); kfree(icode); } return err; } int __devinit snd_emu10k1_init_efx(struct snd_emu10k1 *emu) { spin_lock_init(&emu->fx8010.irq_lock); INIT_LIST_HEAD(&emu->fx8010.gpr_ctl); if (emu->audigy) return _snd_emu10k1_audigy_init_efx(emu); else return _snd_emu10k1_init_efx(emu); } void snd_emu10k1_free_efx(struct snd_emu10k1 *emu) { /* stop processor */ if (emu->audigy) snd_emu10k1_ptr_write(emu, A_DBG, 0, emu->fx8010.dbg = A_DBG_SINGLE_STEP); else snd_emu10k1_ptr_write(emu, DBG, 0, emu->fx8010.dbg = EMU10K1_DBG_SINGLE_STEP); } #if 0 /* FIXME: who use them? */ int snd_emu10k1_fx8010_tone_control_activate(struct snd_emu10k1 *emu, int output) { if (output < 0 || output >= 6) return -EINVAL; snd_emu10k1_ptr_write(emu, emu->gpr_base + 0x94 + output, 0, 1); return 0; } int snd_emu10k1_fx8010_tone_control_deactivate(struct snd_emu10k1 *emu, int output) { if (output < 0 || output >= 6) return -EINVAL; snd_emu10k1_ptr_write(emu, emu->gpr_base + 0x94 + output, 0, 0); return 0; } #endif int snd_emu10k1_fx8010_tram_setup(struct snd_emu10k1 *emu, u32 size) { u8 size_reg = 0; /* size is in samples */ if (size != 0) { size = (size - 1) >> 13; while (size) { size >>= 1; size_reg++; } size = 0x2000 << size_reg; } if ((emu->fx8010.etram_pages.bytes / 2) == size) return 0; spin_lock_irq(&emu->emu_lock); outl(HCFG_LOCKTANKCACHE_MASK | inl(emu->port + HCFG), emu->port + HCFG); spin_unlock_irq(&emu->emu_lock); snd_emu10k1_ptr_write(emu, TCB, 0, 0); snd_emu10k1_ptr_write(emu, TCBS, 0, 0); if (emu->fx8010.etram_pages.area != NULL) { snd_dma_free_pages(&emu->fx8010.etram_pages); emu->fx8010.etram_pages.area = NULL; emu->fx8010.etram_pages.bytes = 0; } if (size > 0) { if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci), size * 2, &emu->fx8010.etram_pages) < 0) return -ENOMEM; memset(emu->fx8010.etram_pages.area, 0, size * 2); snd_emu10k1_ptr_write(emu, TCB, 0, emu->fx8010.etram_pages.addr); snd_emu10k1_ptr_write(emu, TCBS, 0, size_reg); spin_lock_irq(&emu->emu_lock); outl(inl(emu->port + HCFG) & ~HCFG_LOCKTANKCACHE_MASK, emu->port + HCFG); spin_unlock_irq(&emu->emu_lock); } return 0; } static int snd_emu10k1_fx8010_open(struct snd_hwdep * hw, struct file *file) { return 0; } static void copy_string(char *dst, char *src, char *null, int idx) { if (src == NULL) sprintf(dst, "%s %02X", null, idx); else strcpy(dst, src); } static void snd_emu10k1_fx8010_info(struct snd_emu10k1 *emu, struct snd_emu10k1_fx8010_info *info) { char **fxbus, **extin, **extout; unsigned short fxbus_mask, extin_mask, extout_mask; int res; info->internal_tram_size = emu->fx8010.itram_size; info->external_tram_size = emu->fx8010.etram_pages.bytes / 2; fxbus = fxbuses; extin = emu->audigy ? audigy_ins : creative_ins; extout = emu->audigy ? audigy_outs : creative_outs; fxbus_mask = emu->fx8010.fxbus_mask; extin_mask = emu->fx8010.extin_mask; extout_mask = emu->fx8010.extout_mask; for (res = 0; res < 16; res++, fxbus++, extin++, extout++) { copy_string(info->fxbus_names[res], fxbus_mask & (1 << res) ? *fxbus : NULL, "FXBUS", res); copy_string(info->extin_names[res], extin_mask & (1 << res) ? *extin : NULL, "Unused", res); copy_string(info->extout_names[res], extout_mask & (1 << res) ? *extout : NULL, "Unused", res); } for (res = 16; res < 32; res++, extout++) copy_string(info->extout_names[res], extout_mask & (1 << res) ? *extout : NULL, "Unused", res); info->gpr_controls = emu->fx8010.gpr_count; } static int snd_emu10k1_fx8010_ioctl(struct snd_hwdep * hw, struct file *file, unsigned int cmd, unsigned long arg) { struct snd_emu10k1 *emu = hw->private_data; struct snd_emu10k1_fx8010_info *info; struct snd_emu10k1_fx8010_code *icode; struct snd_emu10k1_fx8010_pcm_rec *ipcm; unsigned int addr; void __user *argp = (void __user *)arg; int res; switch (cmd) { case SNDRV_EMU10K1_IOCTL_PVERSION: emu->support_tlv = 1; return put_user(SNDRV_EMU10K1_VERSION, (int __user *)argp); case SNDRV_EMU10K1_IOCTL_INFO: info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; snd_emu10k1_fx8010_info(emu, info); if (copy_to_user(argp, info, sizeof(*info))) { kfree(info); return -EFAULT; } kfree(info); return 0; case SNDRV_EMU10K1_IOCTL_CODE_POKE: if (!capable(CAP_SYS_ADMIN)) return -EPERM; icode = memdup_user(argp, sizeof(*icode)); if (IS_ERR(icode)) return PTR_ERR(icode); res = snd_emu10k1_icode_poke(emu, icode); kfree(icode); return res; case SNDRV_EMU10K1_IOCTL_CODE_PEEK: icode = memdup_user(argp, sizeof(*icode)); if (IS_ERR(icode)) return PTR_ERR(icode); res = snd_emu10k1_icode_peek(emu, icode); if (res == 0 && copy_to_user(argp, icode, sizeof(*icode))) { kfree(icode); return -EFAULT; } kfree(icode); return res; case SNDRV_EMU10K1_IOCTL_PCM_POKE: ipcm = memdup_user(argp, sizeof(*ipcm)); if (IS_ERR(ipcm)) return PTR_ERR(ipcm); res = snd_emu10k1_ipcm_poke(emu, ipcm); kfree(ipcm); return res; case SNDRV_EMU10K1_IOCTL_PCM_PEEK: ipcm = memdup_user(argp, sizeof(*ipcm)); if (IS_ERR(ipcm)) return PTR_ERR(ipcm); res = snd_emu10k1_ipcm_peek(emu, ipcm); if (res == 0 && copy_to_user(argp, ipcm, sizeof(*ipcm))) { kfree(ipcm); return -EFAULT; } kfree(ipcm); return res; case SNDRV_EMU10K1_IOCTL_TRAM_SETUP: if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (get_user(addr, (unsigned int __user *)argp)) return -EFAULT; mutex_lock(&emu->fx8010.lock); res = snd_emu10k1_fx8010_tram_setup(emu, addr); mutex_unlock(&emu->fx8010.lock); return res; case SNDRV_EMU10K1_IOCTL_STOP: if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (emu->audigy) snd_emu10k1_ptr_write(emu, A_DBG, 0, emu->fx8010.dbg |= A_DBG_SINGLE_STEP); else snd_emu10k1_ptr_write(emu, DBG, 0, emu->fx8010.dbg |= EMU10K1_DBG_SINGLE_STEP); return 0; case SNDRV_EMU10K1_IOCTL_CONTINUE: if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (emu->audigy) snd_emu10k1_ptr_write(emu, A_DBG, 0, emu->fx8010.dbg = 0); else snd_emu10k1_ptr_write(emu, DBG, 0, emu->fx8010.dbg = 0); return 0; case SNDRV_EMU10K1_IOCTL_ZERO_TRAM_COUNTER: if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (emu->audigy) snd_emu10k1_ptr_write(emu, A_DBG, 0, emu->fx8010.dbg | A_DBG_ZC); else snd_emu10k1_ptr_write(emu, DBG, 0, emu->fx8010.dbg | EMU10K1_DBG_ZC); udelay(10); if (emu->audigy) snd_emu10k1_ptr_write(emu, A_DBG, 0, emu->fx8010.dbg); else snd_emu10k1_ptr_write(emu, DBG, 0, emu->fx8010.dbg); return 0; case SNDRV_EMU10K1_IOCTL_SINGLE_STEP: if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (get_user(addr, (unsigned int __user *)argp)) return -EFAULT; if (addr > 0x1ff) return -EINVAL; if (emu->audigy) snd_emu10k1_ptr_write(emu, A_DBG, 0, emu->fx8010.dbg |= A_DBG_SINGLE_STEP | addr); else snd_emu10k1_ptr_write(emu, DBG, 0, emu->fx8010.dbg |= EMU10K1_DBG_SINGLE_STEP | addr); udelay(10); if (emu->audigy) snd_emu10k1_ptr_write(emu, A_DBG, 0, emu->fx8010.dbg |= A_DBG_SINGLE_STEP | A_DBG_STEP_ADDR | addr); else snd_emu10k1_ptr_write(emu, DBG, 0, emu->fx8010.dbg |= EMU10K1_DBG_SINGLE_STEP | EMU10K1_DBG_STEP | addr); return 0; case SNDRV_EMU10K1_IOCTL_DBG_READ: if (emu->audigy) addr = snd_emu10k1_ptr_read(emu, A_DBG, 0); else addr = snd_emu10k1_ptr_read(emu, DBG, 0); if (put_user(addr, (unsigned int __user *)argp)) return -EFAULT; return 0; } return -ENOTTY; } static int snd_emu10k1_fx8010_release(struct snd_hwdep * hw, struct file *file) { return 0; } int __devinit snd_emu10k1_fx8010_new(struct snd_emu10k1 *emu, int device, struct snd_hwdep ** rhwdep) { struct snd_hwdep *hw; int err; if (rhwdep) *rhwdep = NULL; if ((err = snd_hwdep_new(emu->card, "FX8010", device, &hw)) < 0) return err; strcpy(hw->name, "EMU10K1 (FX8010)"); hw->iface = SNDRV_HWDEP_IFACE_EMU10K1; hw->ops.open = snd_emu10k1_fx8010_open; hw->ops.ioctl = snd_emu10k1_fx8010_ioctl; hw->ops.release = snd_emu10k1_fx8010_release; hw->private_data = emu; if (rhwdep) *rhwdep = hw; return 0; } #ifdef CONFIG_PM int __devinit snd_emu10k1_efx_alloc_pm_buffer(struct snd_emu10k1 *emu) { int len; len = emu->audigy ? 0x200 : 0x100; emu->saved_gpr = kmalloc(len * 4, GFP_KERNEL); if (! emu->saved_gpr) return -ENOMEM; len = emu->audigy ? 0x100 : 0xa0; emu->tram_val_saved = kmalloc(len * 4, GFP_KERNEL); emu->tram_addr_saved = kmalloc(len * 4, GFP_KERNEL); if (! emu->tram_val_saved || ! emu->tram_addr_saved) return -ENOMEM; len = emu->audigy ? 2 * 1024 : 2 * 512; emu->saved_icode = vmalloc(len * 4); if (! emu->saved_icode) return -ENOMEM; return 0; } void snd_emu10k1_efx_free_pm_buffer(struct snd_emu10k1 *emu) { kfree(emu->saved_gpr); kfree(emu->tram_val_saved); kfree(emu->tram_addr_saved); vfree(emu->saved_icode); } /* * save/restore GPR, TRAM and codes */ void snd_emu10k1_efx_suspend(struct snd_emu10k1 *emu) { int i, len; len = emu->audigy ? 0x200 : 0x100; for (i = 0; i < len; i++) emu->saved_gpr[i] = snd_emu10k1_ptr_read(emu, emu->gpr_base + i, 0); len = emu->audigy ? 0x100 : 0xa0; for (i = 0; i < len; i++) { emu->tram_val_saved[i] = snd_emu10k1_ptr_read(emu, TANKMEMDATAREGBASE + i, 0); emu->tram_addr_saved[i] = snd_emu10k1_ptr_read(emu, TANKMEMADDRREGBASE + i, 0); if (emu->audigy) { emu->tram_addr_saved[i] >>= 12; emu->tram_addr_saved[i] |= snd_emu10k1_ptr_read(emu, A_TANKMEMCTLREGBASE + i, 0) << 20; } } len = emu->audigy ? 2 * 1024 : 2 * 512; for (i = 0; i < len; i++) emu->saved_icode[i] = snd_emu10k1_efx_read(emu, i); } void snd_emu10k1_efx_resume(struct snd_emu10k1 *emu) { int i, len; /* set up TRAM */ if (emu->fx8010.etram_pages.bytes > 0) { unsigned size, size_reg = 0; size = emu->fx8010.etram_pages.bytes / 2; size = (size - 1) >> 13; while (size) { size >>= 1; size_reg++; } outl(HCFG_LOCKTANKCACHE_MASK | inl(emu->port + HCFG), emu->port + HCFG); snd_emu10k1_ptr_write(emu, TCB, 0, emu->fx8010.etram_pages.addr); snd_emu10k1_ptr_write(emu, TCBS, 0, size_reg); outl(inl(emu->port + HCFG) & ~HCFG_LOCKTANKCACHE_MASK, emu->port + HCFG); } if (emu->audigy) snd_emu10k1_ptr_write(emu, A_DBG, 0, emu->fx8010.dbg | A_DBG_SINGLE_STEP); else snd_emu10k1_ptr_write(emu, DBG, 0, emu->fx8010.dbg | EMU10K1_DBG_SINGLE_STEP); len = emu->audigy ? 0x200 : 0x100; for (i = 0; i < len; i++) snd_emu10k1_ptr_write(emu, emu->gpr_base + i, 0, emu->saved_gpr[i]); len = emu->audigy ? 0x100 : 0xa0; for (i = 0; i < len; i++) { snd_emu10k1_ptr_write(emu, TANKMEMDATAREGBASE + i, 0, emu->tram_val_saved[i]); if (! emu->audigy) snd_emu10k1_ptr_write(emu, TANKMEMADDRREGBASE + i, 0, emu->tram_addr_saved[i]); else { snd_emu10k1_ptr_write(emu, TANKMEMADDRREGBASE + i, 0, emu->tram_addr_saved[i] << 12); snd_emu10k1_ptr_write(emu, TANKMEMADDRREGBASE + i, 0, emu->tram_addr_saved[i] >> 20); } } len = emu->audigy ? 2 * 1024 : 2 * 512; for (i = 0; i < len; i++) snd_emu10k1_efx_write(emu, i, emu->saved_icode[i]); /* start FX processor when the DSP code is updated */ if (emu->audigy) snd_emu10k1_ptr_write(emu, A_DBG, 0, emu->fx8010.dbg); else snd_emu10k1_ptr_write(emu, DBG, 0, emu->fx8010.dbg); } #endif
gpl-2.0
Dm47021/Linux-kernel_4.1.15-rt17_MusicOS
arch/arm/mach-w90x900/mfp.c
9754
4272
/* * linux/arch/arm/mach-w90x900/mfp.c * * Copyright (c) 2008 Nuvoton technology corporation * * Wan ZongShun <mcuos.com@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation;version 2 of the License. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/string.h> #include <linux/clk.h> #include <linux/mutex.h> #include <linux/io.h> #include <mach/hardware.h> #define REG_MFSEL (W90X900_VA_GCR + 0xC) #define GPSELF (0x01 << 1) #define GPSELC (0x03 << 2) #define GPSELD (0x0f << 4) #define GPSELEI0 (0x01 << 26) #define GPSELEI1 (0x01 << 27) #define GPIOG0TO1 (0x03 << 14) #define GPIOG2TO3 (0x03 << 16) #define GPIOG22TO23 (0x03 << 22) #define GPIOG18TO20 (0x07 << 18) #define ENSPI (0x0a << 14) #define ENI2C0 (0x01 << 14) #define ENI2C1 (0x01 << 16) #define ENAC97 (0x02 << 22) #define ENSD1 (0x02 << 18) #define ENSD0 (0x0a << 4) #define ENKPI (0x02 << 2) #define ENNAND (0x01 << 2) static DEFINE_MUTEX(mfp_mutex); void mfp_set_groupf(struct device *dev) { unsigned long mfpen; const char *dev_id; BUG_ON(!dev); mutex_lock(&mfp_mutex); dev_id = dev_name(dev); mfpen = __raw_readl(REG_MFSEL); if (strcmp(dev_id, "nuc900-emc") == 0) mfpen |= GPSELF;/*enable mac*/ else mfpen &= ~GPSELF;/*GPIOF[9:0]*/ __raw_writel(mfpen, REG_MFSEL); mutex_unlock(&mfp_mutex); } EXPORT_SYMBOL(mfp_set_groupf); void mfp_set_groupc(struct device *dev) { unsigned long mfpen; const char *dev_id; BUG_ON(!dev); mutex_lock(&mfp_mutex); dev_id = dev_name(dev); mfpen = __raw_readl(REG_MFSEL); if (strcmp(dev_id, "nuc900-lcd") == 0) mfpen |= GPSELC;/*enable lcd*/ else if (strcmp(dev_id, "nuc900-kpi") == 0) { mfpen &= (~GPSELC);/*enable kpi*/ mfpen |= ENKPI; } else if (strcmp(dev_id, "nuc900-nand") == 0) { mfpen &= (~GPSELC);/*enable nand*/ mfpen |= ENNAND; } else mfpen &= (~GPSELC);/*GPIOC[14:0]*/ __raw_writel(mfpen, REG_MFSEL); mutex_unlock(&mfp_mutex); } EXPORT_SYMBOL(mfp_set_groupc); void mfp_set_groupi(struct device *dev) { unsigned long mfpen; const char *dev_id; BUG_ON(!dev); mutex_lock(&mfp_mutex); dev_id = dev_name(dev); mfpen = __raw_readl(REG_MFSEL); mfpen &= ~GPSELEI1;/*default gpio16*/ if (strcmp(dev_id, "nuc900-wdog") == 0) mfpen |= GPSELEI1;/*enable wdog*/ else if (strcmp(dev_id, "nuc900-atapi") == 0) mfpen |= GPSELEI0;/*enable atapi*/ else if (strcmp(dev_id, "nuc900-keypad") == 0) mfpen &= ~GPSELEI0;/*enable keypad*/ __raw_writel(mfpen, REG_MFSEL); mutex_unlock(&mfp_mutex); } EXPORT_SYMBOL(mfp_set_groupi); void mfp_set_groupg(struct device *dev, const char *subname) { unsigned long mfpen; const char *dev_id; BUG_ON((!dev) && (!subname)); mutex_lock(&mfp_mutex); if (subname != NULL) dev_id = subname; else dev_id = dev_name(dev); mfpen = __raw_readl(REG_MFSEL); if (strcmp(dev_id, "nuc900-spi") == 0) { mfpen &= ~(GPIOG0TO1 | GPIOG2TO3); mfpen |= ENSPI;/*enable spi*/ } else if (strcmp(dev_id, "nuc900-i2c0") == 0) { mfpen &= ~(GPIOG0TO1); mfpen |= ENI2C0;/*enable i2c0*/ } else if (strcmp(dev_id, "nuc900-i2c1") == 0) { mfpen &= ~(GPIOG2TO3); mfpen |= ENI2C1;/*enable i2c1*/ } else if (strcmp(dev_id, "nuc900-ac97") == 0) { mfpen &= ~(GPIOG22TO23); mfpen |= ENAC97;/*enable AC97*/ } else if (strcmp(dev_id, "nuc900-mmc-port1") == 0) { mfpen &= ~(GPIOG18TO20); mfpen |= (ENSD1 | 0x01);/*enable sd1*/ } else { mfpen &= ~(GPIOG0TO1 | GPIOG2TO3);/*GPIOG[3:0]*/ } __raw_writel(mfpen, REG_MFSEL); mutex_unlock(&mfp_mutex); } EXPORT_SYMBOL(mfp_set_groupg); void mfp_set_groupd(struct device *dev, const char *subname) { unsigned long mfpen; const char *dev_id; BUG_ON((!dev) && (!subname)); mutex_lock(&mfp_mutex); if (subname != NULL) dev_id = subname; else dev_id = dev_name(dev); mfpen = __raw_readl(REG_MFSEL); if (strcmp(dev_id, "nuc900-mmc-port0") == 0) { mfpen &= ~GPSELD;/*enable sd0*/ mfpen |= ENSD0; } else mfpen &= (~GPSELD); __raw_writel(mfpen, REG_MFSEL); mutex_unlock(&mfp_mutex); } EXPORT_SYMBOL(mfp_set_groupd);
gpl-2.0
davidmueller13/kernel_lt03lte_twv2_5.1.1
drivers/net/wireless/wl1251/init.c
11034
9169
/* * This file is part of wl1251 * * Copyright (C) 2009 Nokia Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include "init.h" #include "wl12xx_80211.h" #include "acx.h" #include "cmd.h" #include "reg.h" int wl1251_hw_init_hwenc_config(struct wl1251 *wl) { int ret; ret = wl1251_acx_feature_cfg(wl); if (ret < 0) { wl1251_warning("couldn't set feature config"); return ret; } ret = wl1251_acx_default_key(wl, wl->default_key); if (ret < 0) { wl1251_warning("couldn't set default key"); return ret; } return 0; } int wl1251_hw_init_templates_config(struct wl1251 *wl) { int ret; u8 partial_vbm[PARTIAL_VBM_MAX]; /* send empty templates for fw memory reservation */ ret = wl1251_cmd_template_set(wl, CMD_PROBE_REQ, NULL, sizeof(struct wl12xx_probe_req_template)); if (ret < 0) return ret; ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA, NULL, sizeof(struct wl12xx_null_data_template)); if (ret < 0) return ret; ret = wl1251_cmd_template_set(wl, CMD_PS_POLL, NULL, sizeof(struct wl12xx_ps_poll_template)); if (ret < 0) return ret; ret = wl1251_cmd_template_set(wl, CMD_QOS_NULL_DATA, NULL, sizeof (struct wl12xx_qos_null_data_template)); if (ret < 0) return ret; ret = wl1251_cmd_template_set(wl, CMD_PROBE_RESP, NULL, sizeof (struct wl12xx_probe_resp_template)); if (ret < 0) return ret; ret = wl1251_cmd_template_set(wl, CMD_BEACON, NULL, sizeof (struct wl12xx_beacon_template)); if (ret < 0) return ret; /* tim templates, first reserve space then allocate an empty one */ memset(partial_vbm, 0, PARTIAL_VBM_MAX); ret = wl1251_cmd_vbm(wl, TIM_ELE_ID, partial_vbm, PARTIAL_VBM_MAX, 0); if (ret < 0) return ret; ret = wl1251_cmd_vbm(wl, TIM_ELE_ID, partial_vbm, 1, 0); if (ret < 0) return ret; return 0; } int wl1251_hw_init_rx_config(struct wl1251 *wl, u32 config, u32 filter) { int ret; ret = wl1251_acx_rx_msdu_life_time(wl, RX_MSDU_LIFETIME_DEF); if (ret < 0) return ret; ret = wl1251_acx_rx_config(wl, config, filter); if (ret < 0) return ret; return 0; } int wl1251_hw_init_phy_config(struct wl1251 *wl) { int ret; ret = wl1251_acx_pd_threshold(wl); if (ret < 0) return ret; ret = wl1251_acx_slot(wl, DEFAULT_SLOT_TIME); if (ret < 0) return ret; ret = wl1251_acx_group_address_tbl(wl); if (ret < 0) return ret; ret = wl1251_acx_service_period_timeout(wl); if (ret < 0) return ret; ret = wl1251_acx_rts_threshold(wl, RTS_THRESHOLD_DEF); if (ret < 0) return ret; return 0; } int wl1251_hw_init_beacon_filter(struct wl1251 *wl) { int ret; /* disable beacon filtering at this stage */ ret = wl1251_acx_beacon_filter_opt(wl, false); if (ret < 0) return ret; ret = wl1251_acx_beacon_filter_table(wl); if (ret < 0) return ret; return 0; } int wl1251_hw_init_pta(struct wl1251 *wl) { int ret; ret = wl1251_acx_sg_enable(wl); if (ret < 0) return ret; ret = wl1251_acx_sg_cfg(wl); if (ret < 0) return ret; return 0; } int wl1251_hw_init_energy_detection(struct wl1251 *wl) { int ret; ret = wl1251_acx_cca_threshold(wl); if (ret < 0) return ret; return 0; } int wl1251_hw_init_beacon_broadcast(struct wl1251 *wl) { int ret; ret = wl1251_acx_bcn_dtim_options(wl); if (ret < 0) return ret; return 0; } int wl1251_hw_init_power_auth(struct wl1251 *wl) { return wl1251_acx_sleep_auth(wl, WL1251_PSM_CAM); } int wl1251_hw_init_mem_config(struct wl1251 *wl) { int ret; ret = wl1251_acx_mem_cfg(wl); if (ret < 0) return ret; wl->target_mem_map = kzalloc(sizeof(struct wl1251_acx_mem_map), GFP_KERNEL); if (!wl->target_mem_map) { wl1251_error("couldn't allocate target memory map"); return -ENOMEM; } /* we now ask for the firmware built memory map */ ret = wl1251_acx_mem_map(wl, wl->target_mem_map, sizeof(struct wl1251_acx_mem_map)); if (ret < 0) { wl1251_error("couldn't retrieve firmware memory map"); kfree(wl->target_mem_map); wl->target_mem_map = NULL; return ret; } return 0; } static int wl1251_hw_init_txq_fill(u8 qid, struct acx_tx_queue_qos_config *config, u32 num_blocks) { config->qid = qid; switch (qid) { case QOS_AC_BE: config->high_threshold = (QOS_TX_HIGH_BE_DEF * num_blocks) / 100; config->low_threshold = (QOS_TX_LOW_BE_DEF * num_blocks) / 100; break; case QOS_AC_BK: config->high_threshold = (QOS_TX_HIGH_BK_DEF * num_blocks) / 100; config->low_threshold = (QOS_TX_LOW_BK_DEF * num_blocks) / 100; break; case QOS_AC_VI: config->high_threshold = (QOS_TX_HIGH_VI_DEF * num_blocks) / 100; config->low_threshold = (QOS_TX_LOW_VI_DEF * num_blocks) / 100; break; case QOS_AC_VO: config->high_threshold = (QOS_TX_HIGH_VO_DEF * num_blocks) / 100; config->low_threshold = (QOS_TX_LOW_VO_DEF * num_blocks) / 100; break; default: wl1251_error("Invalid TX queue id: %d", qid); return -EINVAL; } return 0; } static int wl1251_hw_init_tx_queue_config(struct wl1251 *wl) { struct acx_tx_queue_qos_config *config; struct wl1251_acx_mem_map *wl_mem_map = wl->target_mem_map; int ret, i; wl1251_debug(DEBUG_ACX, "acx tx queue config"); config = kzalloc(sizeof(*config), GFP_KERNEL); if (!config) { ret = -ENOMEM; goto out; } for (i = 0; i < MAX_NUM_OF_AC; i++) { ret = wl1251_hw_init_txq_fill(i, config, wl_mem_map->num_tx_mem_blocks); if (ret < 0) goto out; ret = wl1251_cmd_configure(wl, ACX_TX_QUEUE_CFG, config, sizeof(*config)); if (ret < 0) goto out; } wl1251_acx_ac_cfg(wl, AC_BE, CWMIN_BE, CWMAX_BE, AIFS_DIFS, TXOP_BE); wl1251_acx_ac_cfg(wl, AC_BK, CWMIN_BK, CWMAX_BK, AIFS_DIFS, TXOP_BK); wl1251_acx_ac_cfg(wl, AC_VI, CWMIN_VI, CWMAX_VI, AIFS_DIFS, TXOP_VI); wl1251_acx_ac_cfg(wl, AC_VO, CWMIN_VO, CWMAX_VO, AIFS_DIFS, TXOP_VO); out: kfree(config); return ret; } static int wl1251_hw_init_data_path_config(struct wl1251 *wl) { int ret; /* asking for the data path parameters */ wl->data_path = kzalloc(sizeof(struct acx_data_path_params_resp), GFP_KERNEL); if (!wl->data_path) { wl1251_error("Couldnt allocate data path parameters"); return -ENOMEM; } ret = wl1251_acx_data_path_params(wl, wl->data_path); if (ret < 0) { kfree(wl->data_path); wl->data_path = NULL; return ret; } return 0; } int wl1251_hw_init(struct wl1251 *wl) { struct wl1251_acx_mem_map *wl_mem_map; int ret; ret = wl1251_hw_init_hwenc_config(wl); if (ret < 0) return ret; /* Template settings */ ret = wl1251_hw_init_templates_config(wl); if (ret < 0) return ret; /* Default memory configuration */ ret = wl1251_hw_init_mem_config(wl); if (ret < 0) return ret; /* Default data path configuration */ ret = wl1251_hw_init_data_path_config(wl); if (ret < 0) goto out_free_memmap; /* RX config */ ret = wl1251_hw_init_rx_config(wl, RX_CFG_PROMISCUOUS | RX_CFG_TSF, RX_FILTER_OPTION_DEF); /* RX_CONFIG_OPTION_ANY_DST_ANY_BSS, RX_FILTER_OPTION_FILTER_ALL); */ if (ret < 0) goto out_free_data_path; /* TX queues config */ ret = wl1251_hw_init_tx_queue_config(wl); if (ret < 0) goto out_free_data_path; /* PHY layer config */ ret = wl1251_hw_init_phy_config(wl); if (ret < 0) goto out_free_data_path; /* Initialize connection monitoring thresholds */ ret = wl1251_acx_conn_monit_params(wl); if (ret < 0) goto out_free_data_path; /* Beacon filtering */ ret = wl1251_hw_init_beacon_filter(wl); if (ret < 0) goto out_free_data_path; /* Bluetooth WLAN coexistence */ ret = wl1251_hw_init_pta(wl); if (ret < 0) goto out_free_data_path; /* Energy detection */ ret = wl1251_hw_init_energy_detection(wl); if (ret < 0) goto out_free_data_path; /* Beacons and boradcast settings */ ret = wl1251_hw_init_beacon_broadcast(wl); if (ret < 0) goto out_free_data_path; /* Enable data path */ ret = wl1251_cmd_data_path(wl, wl->channel, 1); if (ret < 0) goto out_free_data_path; /* Default power state */ ret = wl1251_hw_init_power_auth(wl); if (ret < 0) goto out_free_data_path; wl_mem_map = wl->target_mem_map; wl1251_info("%d tx blocks at 0x%x, %d rx blocks at 0x%x", wl_mem_map->num_tx_mem_blocks, wl->data_path->tx_control_addr, wl_mem_map->num_rx_mem_blocks, wl->data_path->rx_control_addr); return 0; out_free_data_path: kfree(wl->data_path); out_free_memmap: kfree(wl->target_mem_map); return ret; }
gpl-2.0
lostemp/linux-2.6.30.4_analysis
drivers/net/wireless/ar9170/mac.c
27
11439
/* * Atheros AR9170 driver * * MAC programming * * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, see * http://www.gnu.org/licenses/. * * This file incorporates work covered by the following copyright and * permission notice: * Copyright (c) 2007-2008 Atheros Communications, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "ar9170.h" #include "cmd.h" int ar9170_set_qos(struct ar9170 *ar) { ar9170_regwrite_begin(ar); ar9170_regwrite(AR9170_MAC_REG_AC0_CW, ar->edcf[0].cw_min | (ar->edcf[0].cw_max << 16)); ar9170_regwrite(AR9170_MAC_REG_AC1_CW, ar->edcf[1].cw_min | (ar->edcf[1].cw_max << 16)); ar9170_regwrite(AR9170_MAC_REG_AC2_CW, ar->edcf[2].cw_min | (ar->edcf[2].cw_max << 16)); ar9170_regwrite(AR9170_MAC_REG_AC3_CW, ar->edcf[3].cw_min | (ar->edcf[3].cw_max << 16)); ar9170_regwrite(AR9170_MAC_REG_AC4_CW, ar->edcf[4].cw_min | (ar->edcf[4].cw_max << 16)); ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_AIFS, ((ar->edcf[0].aifs * 9 + 10)) | ((ar->edcf[1].aifs * 9 + 10) << 12) | ((ar->edcf[2].aifs * 9 + 10) << 24)); ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_AIFS, ((ar->edcf[2].aifs * 9 + 10) >> 8) | ((ar->edcf[3].aifs * 9 + 10) << 4) | ((ar->edcf[4].aifs * 9 + 10) << 16)); ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_TXOP, ar->edcf[0].txop | ar->edcf[1].txop << 16); ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_TXOP, ar->edcf[1].txop | ar->edcf[3].txop << 16); ar9170_regwrite_finish(); return ar9170_regwrite_result(); } int ar9170_init_mac(struct ar9170 *ar) { ar9170_regwrite_begin(ar); ar9170_regwrite(AR9170_MAC_REG_ACK_EXTENSION, 0x40); ar9170_regwrite(AR9170_MAC_REG_RETRY_MAX, 0); /* enable MMIC */ ar9170_regwrite(AR9170_MAC_REG_SNIFFER, AR9170_MAC_REG_SNIFFER_DEFAULTS); ar9170_regwrite(AR9170_MAC_REG_RX_THRESHOLD, 0xc1f80); ar9170_regwrite(AR9170_MAC_REG_RX_PE_DELAY, 0x70); ar9170_regwrite(AR9170_MAC_REG_EIFS_AND_SIFS, 0xa144000); ar9170_regwrite(AR9170_MAC_REG_SLOT_TIME, 9 << 10); /* CF-END mode */ ar9170_regwrite(0x1c3b2c, 0x19000000); /* NAV protects ACK only (in TXOP) */ ar9170_regwrite(0x1c3b38, 0x201); /* Set Beacon PHY CTRL's TPC to 0x7, TA1=1 */ /* OTUS set AM to 0x1 */ ar9170_regwrite(AR9170_MAC_REG_BCN_HT1, 0x8000170); ar9170_regwrite(AR9170_MAC_REG_BACKOFF_PROTECT, 0x105); /* AGG test code*/ /* Aggregation MAX number and timeout */ ar9170_regwrite(0x1c3b9c, 0x10000a); ar9170_regwrite(AR9170_MAC_REG_FRAMETYPE_FILTER, AR9170_MAC_REG_FTF_DEFAULTS); /* Enable deaggregator, response in sniffer mode */ ar9170_regwrite(0x1c3c40, 0x1 | 1<<30); /* rate sets */ ar9170_regwrite(AR9170_MAC_REG_BASIC_RATE, 0x150f); ar9170_regwrite(AR9170_MAC_REG_MANDATORY_RATE, 0x150f); ar9170_regwrite(AR9170_MAC_REG_RTS_CTS_RATE, 0x10b01bb); /* MIMO response control */ ar9170_regwrite(0x1c3694, 0x4003C1E);/* bit 26~28 otus-AM */ /* switch MAC to OTUS interface */ ar9170_regwrite(0x1c3600, 0x3); ar9170_regwrite(AR9170_MAC_REG_AMPDU_RX_THRESH, 0xffff); /* set PHY register read timeout (??) */ ar9170_regwrite(AR9170_MAC_REG_MISC_680, 0xf00008); /* Disable Rx TimeOut, workaround for BB. */ ar9170_regwrite(AR9170_MAC_REG_RX_TIMEOUT, 0x0); /* Set CPU clock frequency to 88/80MHz */ ar9170_regwrite(AR9170_PWR_REG_CLOCK_SEL, AR9170_PWR_CLK_AHB_80_88MHZ | AR9170_PWR_CLK_DAC_160_INV_DLY); /* Set WLAN DMA interrupt mode: generate int per packet */ ar9170_regwrite(AR9170_MAC_REG_TXRX_MPI, 0x110011); ar9170_regwrite(AR9170_MAC_REG_FCS_SELECT, AR9170_MAC_FCS_FIFO_PROT); /* Disables the CF_END frame, undocumented register */ ar9170_regwrite(AR9170_MAC_REG_TXOP_NOT_ENOUGH_IND, 0x141E0F48); ar9170_regwrite_finish(); return ar9170_regwrite_result(); } static int ar9170_set_mac_reg(struct ar9170 *ar, const u32 reg, const u8 *mac) { static const u8 zero[ETH_ALEN] = { 0 }; if (!mac) mac = zero; ar9170_regwrite_begin(ar); ar9170_regwrite(reg, (mac[3] << 24) | (mac[2] << 16) | (mac[1] << 8) | mac[0]); ar9170_regwrite(reg + 4, (mac[5] << 8) | mac[4]); ar9170_regwrite_finish(); return ar9170_regwrite_result(); } int ar9170_update_multicast(struct ar9170 *ar) { int err; ar9170_regwrite_begin(ar); ar9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_H, ar->want_mc_hash >> 32); ar9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_L, ar->want_mc_hash); ar9170_regwrite_finish(); err = ar9170_regwrite_result(); if (err) return err; ar->cur_mc_hash = ar->want_mc_hash; return 0; } int ar9170_update_frame_filter(struct ar9170 *ar) { int err; err = ar9170_write_reg(ar, AR9170_MAC_REG_FRAMETYPE_FILTER, ar->want_filter); if (err) return err; ar->cur_filter = ar->want_filter; return 0; } static int ar9170_set_promiscouous(struct ar9170 *ar) { u32 encr_mode, sniffer; int err; err = ar9170_read_reg(ar, AR9170_MAC_REG_SNIFFER, &sniffer); if (err) return err; err = ar9170_read_reg(ar, AR9170_MAC_REG_ENCRYPTION, &encr_mode); if (err) return err; if (ar->sniffer_enabled) { sniffer |= AR9170_MAC_REG_SNIFFER_ENABLE_PROMISC; /* * Rx decryption works in place. * * If we don't disable it, the hardware will render all * encrypted frames which are encrypted with an unknown * key useless. */ encr_mode |= AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE; ar->sniffer_enabled = true; } else { sniffer &= ~AR9170_MAC_REG_SNIFFER_ENABLE_PROMISC; if (ar->rx_software_decryption) encr_mode |= AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE; else encr_mode &= ~AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE; } ar9170_regwrite_begin(ar); ar9170_regwrite(AR9170_MAC_REG_ENCRYPTION, encr_mode); ar9170_regwrite(AR9170_MAC_REG_SNIFFER, sniffer); ar9170_regwrite_finish(); return ar9170_regwrite_result(); } int ar9170_set_operating_mode(struct ar9170 *ar) { u32 pm_mode = AR9170_MAC_REG_POWERMGT_DEFAULTS; u8 *mac_addr, *bssid; int err; if (ar->vif) { mac_addr = ar->mac_addr; bssid = ar->bssid; switch (ar->vif->type) { case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_ADHOC: pm_mode |= AR9170_MAC_REG_POWERMGT_IBSS; break; /* case NL80211_IFTYPE_AP: pm_mode |= AR9170_MAC_REG_POWERMGT_AP; break;*/ case NL80211_IFTYPE_WDS: pm_mode |= AR9170_MAC_REG_POWERMGT_AP_WDS; break; case NL80211_IFTYPE_MONITOR: ar->sniffer_enabled = true; ar->rx_software_decryption = true; break; default: pm_mode |= AR9170_MAC_REG_POWERMGT_STA; break; } } else { mac_addr = NULL; bssid = NULL; } err = ar9170_set_mac_reg(ar, AR9170_MAC_REG_MAC_ADDR_L, mac_addr); if (err) return err; err = ar9170_set_mac_reg(ar, AR9170_MAC_REG_BSSID_L, bssid); if (err) return err; err = ar9170_set_promiscouous(ar); if (err) return err; ar9170_regwrite_begin(ar); ar9170_regwrite(AR9170_MAC_REG_POWERMANAGEMENT, pm_mode); ar9170_regwrite_finish(); return ar9170_regwrite_result(); } int ar9170_set_hwretry_limit(struct ar9170 *ar, unsigned int max_retry) { u32 tmp = min_t(u32, 0x33333, max_retry * 0x11111); return ar9170_write_reg(ar, AR9170_MAC_REG_RETRY_MAX, tmp); } int ar9170_set_beacon_timers(struct ar9170 *ar) { u32 v = 0; u32 pretbtt = 0; v |= ar->hw->conf.beacon_int; if (ar->vif) { switch (ar->vif->type) { case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_ADHOC: v |= BIT(25); break; case NL80211_IFTYPE_AP: v |= BIT(24); pretbtt = (ar->hw->conf.beacon_int - 6) << 16; break; default: break; } v |= ar->vif->bss_conf.dtim_period << 16; } ar9170_regwrite_begin(ar); ar9170_regwrite(AR9170_MAC_REG_PRETBTT, pretbtt); ar9170_regwrite(AR9170_MAC_REG_BCN_PERIOD, v); ar9170_regwrite_finish(); return ar9170_regwrite_result(); } int ar9170_update_beacon(struct ar9170 *ar) { struct sk_buff *skb; __le32 *data, *old = NULL; u32 word; int i; skb = ieee80211_beacon_get(ar->hw, ar->vif); if (!skb) return -ENOMEM; data = (__le32 *)skb->data; if (ar->beacon) old = (__le32 *)ar->beacon->data; ar9170_regwrite_begin(ar); for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) { /* * XXX: This accesses beyond skb data for up * to the last 3 bytes!! */ if (old && (data[i] == old[i])) continue; word = le32_to_cpu(data[i]); ar9170_regwrite(AR9170_BEACON_BUFFER_ADDRESS + 4 * i, word); } /* XXX: use skb->cb info */ if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ) ar9170_regwrite(AR9170_MAC_REG_BCN_PLCP, ((skb->len + 4) << (3+16)) + 0x0400); else ar9170_regwrite(AR9170_MAC_REG_BCN_PLCP, ((skb->len + 4) << (3+16)) + 0x0400); ar9170_regwrite(AR9170_MAC_REG_BCN_LENGTH, skb->len + 4); ar9170_regwrite(AR9170_MAC_REG_BCN_ADDR, AR9170_BEACON_BUFFER_ADDRESS); ar9170_regwrite(AR9170_MAC_REG_BCN_CTRL, 1); ar9170_regwrite_finish(); dev_kfree_skb(ar->beacon); ar->beacon = skb; return ar9170_regwrite_result(); } void ar9170_new_beacon(struct work_struct *work) { struct ar9170 *ar = container_of(work, struct ar9170, beacon_work); struct sk_buff *skb; if (unlikely(!IS_STARTED(ar))) return ; mutex_lock(&ar->mutex); if (!ar->vif) goto out; ar9170_update_beacon(ar); rcu_read_lock(); while ((skb = ieee80211_get_buffered_bc(ar->hw, ar->vif))) ar9170_op_tx(ar->hw, skb); rcu_read_unlock(); out: mutex_unlock(&ar->mutex); } int ar9170_upload_key(struct ar9170 *ar, u8 id, const u8 *mac, u8 ktype, u8 keyidx, u8 *keydata, int keylen) { __le32 vals[7]; static const u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; u8 dummy; mac = mac ? : bcast; vals[0] = cpu_to_le32((keyidx << 16) + id); vals[1] = cpu_to_le32(mac[1] << 24 | mac[0] << 16 | ktype); vals[2] = cpu_to_le32(mac[5] << 24 | mac[4] << 16 | mac[3] << 8 | mac[2]); memset(&vals[3], 0, 16); if (keydata) memcpy(&vals[3], keydata, keylen); return ar->exec_cmd(ar, AR9170_CMD_EKEY, sizeof(vals), (u8 *)vals, 1, &dummy); } int ar9170_disable_key(struct ar9170 *ar, u8 id) { __le32 val = cpu_to_le32(id); u8 dummy; return ar->exec_cmd(ar, AR9170_CMD_EKEY, sizeof(val), (u8 *)&val, 1, &dummy); }
gpl-2.0
skristiansson/eco32-gcc
libgcc/config/lm32/_modsi3.c
27
1999
/* _modsi3 for Lattice Mico32. Contributed by Jon Beniston <jon@beniston.com> Copyright (C) 2009-2013 Free Software Foundation, Inc. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #include "libgcc_lm32.h" /* Signed integer modulus. */ SItype __modsi3 (SItype a, SItype b) { int neg = 0; SItype res; int cfg; if (b == 0) { /* Raise divide by zero exception. */ int eba, sr; /* Save interrupt enable. */ __asm__ __volatile__ ("rcsr %0, IE":"=r" (sr)); sr = (sr & 1) << 1; __asm__ __volatile__ ("wcsr IE, %0"::"r" (sr)); /* Branch to exception handler. */ __asm__ __volatile__ ("rcsr %0, EBA":"=r" (eba)); eba += 32 * 5; __asm__ __volatile__ ("mv ea, ra"); __asm__ __volatile__ ("b %0"::"r" (eba)); __builtin_unreachable (); } if (a < 0) { a = -a; neg = 1; } if (b < 0) b = -b; __asm__ ("rcsr %0, CFG":"=r" (cfg)); if (cfg & 2) __asm__ ("modu %0, %1, %2": "=r" (res):"r" (a), "r" (b)); else res = __udivmodsi4 (a, b, 1); if (neg) res = -res; return res; }
gpl-2.0
commial/python-spidermonkey
js/src/fdlibm/w_log.c
27
2702
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- * * ***** BEGIN LICENSE BLOCK ***** * Version: MPL 1.1/GPL 2.0/LGPL 2.1 * * The contents of this file are subject to the Mozilla Public License Version * 1.1 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is Mozilla Communicator client code, released * March 31, 1998. * * The Initial Developer of the Original Code is * Sun Microsystems, Inc. * Portions created by the Initial Developer are Copyright (C) 1998 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Alternatively, the contents of this file may be used under the terms of * either of the GNU General Public License Version 2 or later (the "GPL"), * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), * in which case the provisions of the GPL or the LGPL are applicable instead * of those above. If you wish to allow use of your version of this file only * under the terms of either the GPL or the LGPL, and not to allow others to * use your version of this file under the terms of the MPL, indicate your * decision by deleting the provisions above and replace them with the notice * and other provisions required by the GPL or the LGPL. If you do not delete * the provisions above, a recipient may use your version of this file under * the terms of any one of the MPL, the GPL or the LGPL. * * ***** END LICENSE BLOCK ***** */ /* @(#)w_log.c 1.3 95/01/18 */ /* * ==================================================== * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. * * Developed at SunSoft, a Sun Microsystems, Inc. business. * Permission to use, copy, modify, and distribute this * software is freely granted, provided that this notice * is preserved. * ==================================================== */ /* * wrapper log(x) */ #include "fdlibm.h" #ifdef __STDC__ double fd_log(double x) /* wrapper log */ #else double fd_log(x) /* wrapper log */ double x; #endif { #ifdef _IEEE_LIBM return __ieee754_log(x); #else double z; int err; z = __ieee754_log(x); if(_LIB_VERSION == _IEEE_ || fd_isnan(x) || x > 0.0) return z; if(x==0.0) return __kernel_standard(x,x,16,&err); /* log(0) */ else return __kernel_standard(x,x,17,&err); /* log(x<0) */ #endif }
gpl-2.0
ystk/debian-eglibc
libio/putc_u.c
27
1052
/* Copyright (C) 1991, 1995, 1996, 1997 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ #include "libioP.h" #include "stdio.h" #undef putc_unlocked int putc_unlocked (c, fp) int c; _IO_FILE *fp; { CHECK_FILE (fp, EOF); return _IO_putc_unlocked (c, fp); }
gpl-2.0
ztemt/A476_V1B_5.1_kernel
sound/soc/mediatek/mt_soc_audio_v2/mt_soc_pcm_voice_md1_bt.c
27
15036
/* * Copyright (C) 2007 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /******************************************************************************* * * Filename: * --------- * mt_soc_pcm_voice_md1_bt.c * * Project: * -------- * voice_bt call platform driver * * Description: * ------------ * * * Author: * ------- * Chipeng Chang * *------------------------------------------------------------------------------ * $Revision: #1 $ * $Modtime:$ * $Log:$ * * *******************************************************************************/ /***************************************************************************** * C O M P I L E R F L A G S *****************************************************************************/ /***************************************************************************** * E X T E R N A L R E F E R E N C E S *****************************************************************************/ #include <linux/dma-mapping.h> #include "AudDrv_Common.h" #include "AudDrv_Def.h" #include "AudDrv_Afe.h" #include "AudDrv_Ana.h" #include "AudDrv_Clk.h" #include "AudDrv_Kernel.h" #include "mt_soc_afe_control.h" #include "mt_soc_digital_type.h" #include "mt_soc_pcm_common.h" /* * function implementation */ static int mtk_voice_bt_probe(struct platform_device *pdev); static int mtk_voice_bt_close(struct snd_pcm_substream *substream); static int mtk_soc_voice_bt_new(struct snd_soc_pcm_runtime *rtd); static int mtk_voice_bt_platform_probe(struct snd_soc_platform *platform); static bool SetModemSpeechDAIBTAttribute(int sample_rate); static bool voice_bt_Status = false; static AudioDigtalI2S mAudioDigitalI2S; bool get_voice_bt_status(void) { return voice_bt_Status; } EXPORT_SYMBOL(get_voice_bt_status); static AudioDigitalPCM voice_bt1Pcm = { .mTxLchRepeatSel = Soc_Aud_TX_LCH_RPT_TX_LCH_NO_REPEAT, .mVbt16kModeSel = Soc_Aud_VBT_16K_MODE_VBT_16K_MODE_DISABLE, .mExtModemSel = Soc_Aud_EXT_MODEM_MODEM_2_USE_INTERNAL_MODEM, .mExtendBckSyncLength = 0, .mExtendBckSyncTypeSel = Soc_Aud_PCM_SYNC_TYPE_BCK_CYCLE_SYNC, .mSingelMicSel = Soc_Aud_BT_MODE_DUAL_MIC_ON_TX, .mAsyncFifoSel = Soc_Aud_BYPASS_SRC_SLAVE_USE_ASRC, .mSlaveModeSel = Soc_Aud_PCM_CLOCK_SOURCE_SALVE_MODE, .mPcmWordLength = Soc_Aud_PCM_WLEN_LEN_PCM_16BIT, .mPcmModeWidebandSel = false, .mPcmFormat = Soc_Aud_PCM_FMT_PCM_MODE_B, .mModemPcmOn = false, }; static struct snd_pcm_hw_constraint_list constraints_sample_rates = { .count = ARRAY_SIZE(soc_voice_supported_sample_rates), .list = soc_voice_supported_sample_rates, .mask = 0, }; static struct snd_pcm_hardware mtk_pcm_hardware = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID), .formats = SND_SOC_STD_MT_FMTS, .rates = SOC_NORMAL_USE_RATE, .rate_min = SOC_NORMAL_USE_RATE_MIN, .rate_max = SOC_NORMAL_USE_RATE_MAX, .channels_min = SOC_NORMAL_USE_CHANNELS_MIN, .channels_max = SOC_NORMAL_USE_CHANNELS_MAX, .buffer_bytes_max = MAX_BUFFER_SIZE, .period_bytes_max = MAX_PERIOD_SIZE, .periods_min = 1, .periods_max = 4096, .fifo_size = 0, }; static int mtk_voice_bt_pcm_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; int err = 0; int ret = 0; AudDrv_Clk_On(); printk("mtk_voice_bt_pcm_open\n"); if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { printk("%s with SNDRV_PCM_STREAM_CAPTURE \n", __func__); runtime->rate = 16000; return 0; } runtime->hw = mtk_pcm_hardware; memcpy((void *)(&(runtime->hw)), (void *)&mtk_pcm_hardware , sizeof(struct snd_pcm_hardware)); ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &constraints_sample_rates); ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) { printk("snd_pcm_hw_constraint_integer failed\n"); } //print for hw pcm information printk("mtk_voice_bt_pcm_open runtime rate = %d channels = %d \n", runtime->rate, runtime->channels); runtime->hw.info |= SNDRV_PCM_INFO_INTERLEAVED; runtime->hw.info |= SNDRV_PCM_INFO_NONINTERLEAVED; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { printk("SNDRV_PCM_STREAM_PLAYBACK mtkalsa_voice_bt_constraints\n"); runtime->rate = 16000; } else { } if (err < 0) { printk("mtk_voice_bt_close\n"); mtk_voice_bt_close(substream); return err; } printk("mtk_voice_bt_pcm_open return\n"); return 0; } #if 0 //not used static void ConfigAdcI2S(struct snd_pcm_substream *substream) { mAudioDigitalI2S.mLR_SWAP = Soc_Aud_LR_SWAP_NO_SWAP; mAudioDigitalI2S.mBuffer_Update_word = 8; mAudioDigitalI2S.mFpga_bit_test = 0; mAudioDigitalI2S.mFpga_bit = 0; mAudioDigitalI2S.mloopback = 0; mAudioDigitalI2S.mINV_LRCK = Soc_Aud_INV_LRCK_NO_INVERSE; mAudioDigitalI2S.mI2S_FMT = Soc_Aud_I2S_FORMAT_I2S; mAudioDigitalI2S.mI2S_WLEN = Soc_Aud_I2S_WLEN_WLEN_16BITS; mAudioDigitalI2S.mI2S_SAMPLERATE = (substream->runtime->rate); } #endif static int mtk_voice_bt_close(struct snd_pcm_substream *substream) { printk("mtk_voice_bt_close \n"); if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { printk("%s with SNDRV_PCM_STREAM_CAPTURE \n", __func__); AudDrv_Clk_Off(); return 0; } // interconnection setting SetConnection(Soc_Aud_InterCon_DisConnect, Soc_Aud_InterConnectionInput_I02, Soc_Aud_InterConnectionOutput_O17); SetConnection(Soc_Aud_InterCon_DisConnect, Soc_Aud_InterConnectionInput_I02, Soc_Aud_InterConnectionOutput_O18); SetConnection(Soc_Aud_InterCon_DisConnect, Soc_Aud_InterConnectionInput_I14, Soc_Aud_InterConnectionOutput_O02); // here start digital part SetMemoryPathEnable(Soc_Aud_Digital_Block_DAI_BT, false); SetDaiBtEnable(false); EnableAfe(false); AudDrv_Clk_Off(); voice_bt_Status = false; return 0; } static int mtk_voice_bt_trigger(struct snd_pcm_substream *substream, int cmd) { printk("mtk_voice_bt_trigger cmd = %d\n", cmd); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: break; } return 0; } static int mtk_voice_bt_pcm_copy(struct snd_pcm_substream *substream, int channel, snd_pcm_uframes_t pos, void __user *dst, snd_pcm_uframes_t count) { return 0; } static int mtk_voice_bt_pcm_silence(struct snd_pcm_substream *substream, int channel, snd_pcm_uframes_t pos, snd_pcm_uframes_t count) { printk("mtk_voice_bt_pcm_silence \n"); return 0; /* do nothing */ } static void *dummy_page[2]; static struct page *mtk_pcm_page(struct snd_pcm_substream *substream, unsigned long offset) { return virt_to_page(dummy_page[substream->stream]); /* the same page */ } static bool SetModemSpeechDAIBTAttribute(int sample_rate) { AudioDigitalDAIBT daibt_attribute; memset((void *)&daibt_attribute, 0, sizeof(daibt_attribute)); #if 0 // temp for merge only support daibt_attribute.mUSE_MRGIF_INPUT = Soc_Aud_BT_DAI_INPUT_FROM_BT; #else daibt_attribute.mUSE_MRGIF_INPUT = Soc_Aud_BT_DAI_INPUT_FROM_MGRIF; #endif daibt_attribute.mDAI_BT_MODE = (sample_rate == 8000) ? Soc_Aud_DATBT_MODE_Mode8K : Soc_Aud_DATBT_MODE_Mode16K; daibt_attribute.mDAI_DEL = Soc_Aud_DAI_DEL_HighWord; // suggest always HighWord daibt_attribute.mBT_LEN = 0; daibt_attribute.mDATA_RDY = true; daibt_attribute.mBT_SYNC = Soc_Aud_BTSYNC_Short_Sync; daibt_attribute.mBT_ON = true; daibt_attribute.mDAIBT_ON = false; SetDaiBt(&daibt_attribute); return true; } static int mtk_voice_bt1_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtimeStream = substream->runtime; printk("mtk_alsa_prepare rate = %d channels = %d period_size = %lu\n", runtimeStream->rate, runtimeStream->channels, runtimeStream->period_size); if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { printk("%s with SNDRV_PCM_STREAM_CAPTURE \n", __func__); return 0; } AudDrv_Clk_On(); // here start digital part SetConnection(Soc_Aud_InterCon_Connection, Soc_Aud_InterConnectionInput_I02, Soc_Aud_InterConnectionOutput_O17); SetConnection(Soc_Aud_InterCon_Connection, Soc_Aud_InterConnectionInput_I02, Soc_Aud_InterConnectionOutput_O18); SetConnection(Soc_Aud_InterCon_Connection, Soc_Aud_InterConnectionInput_I14, Soc_Aud_InterConnectionOutput_O02); if (GetMemoryPathEnable(Soc_Aud_Digital_Block_DAI_BT) == false) { //set merge interface SetMemoryPathEnable(Soc_Aud_Digital_Block_DAI_BT, true); } else { SetMemoryPathEnable(Soc_Aud_Digital_Block_DAI_BT, true); } // now use samplerate 8000 SetModemSpeechDAIBTAttribute(runtimeStream->rate); SetDaiBtEnable(true); voice_bt1Pcm.mPcmModeWidebandSel = (runtimeStream->rate == 8000) ? Soc_Aud_PCM_MODE_PCM_MODE_8K : Soc_Aud_PCM_MODE_PCM_MODE_16K; voice_bt1Pcm.mAsyncFifoSel = Soc_Aud_BYPASS_SRC_SLAVE_USE_ASYNC_FIFO; SetModemPcmConfig(MODEM_1, voice_bt1Pcm); SetModemPcmEnable(MODEM_1, true); EnableAfe(true); voice_bt_Status = true; return 0; } static int mtk_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { int ret = 0; printk("mtk_pcm_hw_params \n"); return ret; } static int mtk_voice_bt_hw_free(struct snd_pcm_substream *substream) { PRINTK_AUDDRV("mtk_voice_bt_hw_free \n"); return snd_pcm_lib_free_pages(substream); } static struct snd_pcm_ops mtk_voice_bt_ops = { .open = mtk_voice_bt_pcm_open, .close = mtk_voice_bt_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = mtk_pcm_hw_params, .hw_free = mtk_voice_bt_hw_free, .prepare = mtk_voice_bt1_prepare, .trigger = mtk_voice_bt_trigger, .copy = mtk_voice_bt_pcm_copy, .silence = mtk_voice_bt_pcm_silence, .page = mtk_pcm_page, }; static struct snd_soc_platform_driver mtk_soc_voice_bt_platform = { .ops = &mtk_voice_bt_ops, .pcm_new = mtk_soc_voice_bt_new, .probe = mtk_voice_bt_platform_probe, }; static int mtk_voice_bt_probe(struct platform_device *pdev) { printk("mtk_voice_bt_probe\n"); pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64); if (!pdev->dev.dma_mask) { pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; } if (pdev->dev.of_node) { dev_set_name(&pdev->dev, "%s", MT_SOC_VOICE_MD1_BT); } printk("%s: dev name %s\n", __func__, dev_name(&pdev->dev)); return snd_soc_register_platform(&pdev->dev, &mtk_soc_voice_bt_platform); } static int mtk_soc_voice_bt_new(struct snd_soc_pcm_runtime *rtd) { int ret = 0; printk("%s\n", __func__); return ret; } static int mtk_voice_bt_platform_probe(struct snd_soc_platform *platform) { printk("mtk_voice_bt_platform_probe\n"); return 0; } static int mtk_voice_bt_remove(struct platform_device *pdev) { pr_debug("%s\n", __func__); snd_soc_unregister_platform(&pdev->dev); return 0; } //supend and resume function static int mtk_voice_bt_pm_ops_suspend(struct device *device) { // if now in phone call state, not suspend!! bool b_modem1_speech_on; bool b_modem2_speech_on; AudDrv_Clk_On();//should enable clk for access reg b_modem1_speech_on = (bool)(Afe_Get_Reg(PCM2_INTF_CON) & 0x1); b_modem2_speech_on = (bool)(Afe_Get_Reg(PCM_INTF_CON) & 0x1); AudDrv_Clk_Off(); if (b_modem1_speech_on == true || b_modem2_speech_on == true) { clkmux_sel(MT_MUX_AUDINTBUS, 0, "AUDIO"); //select 26M return 0; } return 0; } static int mtk_voice_bt_pm_ops_resume(struct device *device) { bool b_modem1_speech_on; bool b_modem2_speech_on; AudDrv_Clk_On();//should enable clk for access reg b_modem1_speech_on = (bool)(Afe_Get_Reg(PCM2_INTF_CON) & 0x1); b_modem2_speech_on = (bool)(Afe_Get_Reg(PCM_INTF_CON) & 0x1); AudDrv_Clk_Off(); if (b_modem1_speech_on == true || b_modem2_speech_on == true) { clkmux_sel(MT_MUX_AUDINTBUS, 1, "AUDIO"); //mainpll return 0; } return 0; } struct dev_pm_ops mtk_voice_bt_pm_ops = { .suspend = mtk_voice_bt_pm_ops_suspend, .resume = mtk_voice_bt_pm_ops_resume, .freeze = NULL, .thaw = NULL, .poweroff = NULL, .restore = NULL, .restore_noirq = NULL, }; #ifdef CONFIG_OF static const struct of_device_id mt_soc_pcm_voice_md1_bt_of_ids[] = { { .compatible = "mediatek,mt_soc_pcm_voice_md1_bt", }, {} }; #endif static struct platform_driver mtk_voice_bt_driver = { .driver = { .name = MT_SOC_VOICE_MD1_BT, .owner = THIS_MODULE, #ifdef CONFIG_OF .of_match_table = mt_soc_pcm_voice_md1_bt_of_ids, #endif #ifdef CONFIG_PM .pm = &mtk_voice_bt_pm_ops, #endif }, .probe = mtk_voice_bt_probe, .remove = mtk_voice_bt_remove, }; #ifndef CONFIG_OF static struct platform_device *soc_mtk_voice_bt_dev; #endif static int __init mtk_soc_voice_bt_platform_init(void) { int ret = 0; printk("%s\n", __func__); #ifndef CONFIG_OF soc_mtk_voice_bt_dev = platform_device_alloc(MT_SOC_VOICE_MD1_BT, -1); if (!soc_mtk_voice_bt_dev) { return -ENOMEM; } ret = platform_device_add(soc_mtk_voice_bt_dev); if (ret != 0) { platform_device_put(soc_mtk_voice_bt_dev); return ret; } #endif ret = platform_driver_register(&mtk_voice_bt_driver); return ret; } module_init(mtk_soc_voice_bt_platform_init); static void __exit mtk_soc_voice_bt_platform_exit(void) { printk("%s\n", __func__); platform_driver_unregister(&mtk_voice_bt_driver); } module_exit(mtk_soc_voice_bt_platform_exit); MODULE_DESCRIPTION("AFE PCM module platform driver"); MODULE_LICENSE("GPL");
gpl-2.0
CyanideL/android_kernel_asus_grouper
arch/sparc/kernel/signal_64.c
539
18633
/* * arch/sparc64/kernel/signal.c * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 1995, 2008 David S. Miller (davem@davemloft.net) * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #ifdef CONFIG_COMPAT #include <linux/compat.h> /* for compat_old_sigset_t */ #endif #include <linux/sched.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/ptrace.h> #include <linux/tracehook.h> #include <linux/unistd.h> #include <linux/mm.h> #include <linux/tty.h> #include <linux/binfmts.h> #include <linux/bitops.h> #include <asm/uaccess.h> #include <asm/ptrace.h> #include <asm/pgtable.h> #include <asm/fpumacro.h> #include <asm/uctx.h> #include <asm/siginfo.h> #include <asm/visasm.h> #include "entry.h" #include "systbls.h" #include "sigutil.h" #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) /* {set, get}context() needed for 64-bit SparcLinux userland. */ asmlinkage void sparc64_set_context(struct pt_regs *regs) { struct ucontext __user *ucp = (struct ucontext __user *) regs->u_regs[UREG_I0]; mc_gregset_t __user *grp; unsigned long pc, npc, tstate; unsigned long fp, i7; unsigned char fenab; int err; flush_user_windows(); if (get_thread_wsaved() || (((unsigned long)ucp) & (sizeof(unsigned long)-1)) || (!__access_ok(ucp, sizeof(*ucp)))) goto do_sigsegv; grp = &ucp->uc_mcontext.mc_gregs; err = __get_user(pc, &((*grp)[MC_PC])); err |= __get_user(npc, &((*grp)[MC_NPC])); if (err || ((pc | npc) & 3)) goto do_sigsegv; if (regs->u_regs[UREG_I1]) { sigset_t set; if (_NSIG_WORDS == 1) { if (__get_user(set.sig[0], &ucp->uc_sigmask.sig[0])) goto do_sigsegv; } else { if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t))) goto do_sigsegv; } sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); } if (test_thread_flag(TIF_32BIT)) { pc &= 0xffffffff; npc &= 0xffffffff; } regs->tpc = pc; regs->tnpc = npc; err |= __get_user(regs->y, &((*grp)[MC_Y])); err |= __get_user(tstate, &((*grp)[MC_TSTATE])); regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC); regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC)); err |= __get_user(regs->u_regs[UREG_G1], (&(*grp)[MC_G1])); err |= __get_user(regs->u_regs[UREG_G2], (&(*grp)[MC_G2])); err |= __get_user(regs->u_regs[UREG_G3], (&(*grp)[MC_G3])); err |= __get_user(regs->u_regs[UREG_G4], (&(*grp)[MC_G4])); err |= __get_user(regs->u_regs[UREG_G5], (&(*grp)[MC_G5])); err |= __get_user(regs->u_regs[UREG_G6], (&(*grp)[MC_G6])); /* Skip %g7 as that's the thread register in userspace. */ err |= __get_user(regs->u_regs[UREG_I0], (&(*grp)[MC_O0])); err |= __get_user(regs->u_regs[UREG_I1], (&(*grp)[MC_O1])); err |= __get_user(regs->u_regs[UREG_I2], (&(*grp)[MC_O2])); err |= __get_user(regs->u_regs[UREG_I3], (&(*grp)[MC_O3])); err |= __get_user(regs->u_regs[UREG_I4], (&(*grp)[MC_O4])); err |= __get_user(regs->u_regs[UREG_I5], (&(*grp)[MC_O5])); err |= __get_user(regs->u_regs[UREG_I6], (&(*grp)[MC_O6])); err |= __get_user(regs->u_regs[UREG_I7], (&(*grp)[MC_O7])); err |= __get_user(fp, &(ucp->uc_mcontext.mc_fp)); err |= __get_user(i7, &(ucp->uc_mcontext.mc_i7)); err |= __put_user(fp, (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6]))); err |= __put_user(i7, (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7]))); err |= __get_user(fenab, &(ucp->uc_mcontext.mc_fpregs.mcfpu_enab)); if (fenab) { unsigned long *fpregs = current_thread_info()->fpregs; unsigned long fprs; fprs_write(0); err |= __get_user(fprs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fprs)); if (fprs & FPRS_DL) err |= copy_from_user(fpregs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs), (sizeof(unsigned int) * 32)); if (fprs & FPRS_DU) err |= copy_from_user(fpregs+16, ((unsigned long __user *)&(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs))+16, (sizeof(unsigned int) * 32)); err |= __get_user(current_thread_info()->xfsr[0], &(ucp->uc_mcontext.mc_fpregs.mcfpu_fsr)); err |= __get_user(current_thread_info()->gsr[0], &(ucp->uc_mcontext.mc_fpregs.mcfpu_gsr)); regs->tstate &= ~TSTATE_PEF; } if (err) goto do_sigsegv; return; do_sigsegv: force_sig(SIGSEGV, current); } asmlinkage void sparc64_get_context(struct pt_regs *regs) { struct ucontext __user *ucp = (struct ucontext __user *) regs->u_regs[UREG_I0]; mc_gregset_t __user *grp; mcontext_t __user *mcp; unsigned long fp, i7; unsigned char fenab; int err; synchronize_user_stack(); if (get_thread_wsaved() || clear_user(ucp, sizeof(*ucp))) goto do_sigsegv; #if 1 fenab = 0; /* IMO get_context is like any other system call, thus modifies FPU state -jj */ #else fenab = (current_thread_info()->fpsaved[0] & FPRS_FEF); #endif mcp = &ucp->uc_mcontext; grp = &mcp->mc_gregs; /* Skip over the trap instruction, first. */ if (test_thread_flag(TIF_32BIT)) { regs->tpc = (regs->tnpc & 0xffffffff); regs->tnpc = (regs->tnpc + 4) & 0xffffffff; } else { regs->tpc = regs->tnpc; regs->tnpc += 4; } err = 0; if (_NSIG_WORDS == 1) err |= __put_user(current->blocked.sig[0], (unsigned long __user *)&ucp->uc_sigmask); else err |= __copy_to_user(&ucp->uc_sigmask, &current->blocked, sizeof(sigset_t)); err |= __put_user(regs->tstate, &((*grp)[MC_TSTATE])); err |= __put_user(regs->tpc, &((*grp)[MC_PC])); err |= __put_user(regs->tnpc, &((*grp)[MC_NPC])); err |= __put_user(regs->y, &((*grp)[MC_Y])); err |= __put_user(regs->u_regs[UREG_G1], &((*grp)[MC_G1])); err |= __put_user(regs->u_regs[UREG_G2], &((*grp)[MC_G2])); err |= __put_user(regs->u_regs[UREG_G3], &((*grp)[MC_G3])); err |= __put_user(regs->u_regs[UREG_G4], &((*grp)[MC_G4])); err |= __put_user(regs->u_regs[UREG_G5], &((*grp)[MC_G5])); err |= __put_user(regs->u_regs[UREG_G6], &((*grp)[MC_G6])); err |= __put_user(regs->u_regs[UREG_G7], &((*grp)[MC_G7])); err |= __put_user(regs->u_regs[UREG_I0], &((*grp)[MC_O0])); err |= __put_user(regs->u_regs[UREG_I1], &((*grp)[MC_O1])); err |= __put_user(regs->u_regs[UREG_I2], &((*grp)[MC_O2])); err |= __put_user(regs->u_regs[UREG_I3], &((*grp)[MC_O3])); err |= __put_user(regs->u_regs[UREG_I4], &((*grp)[MC_O4])); err |= __put_user(regs->u_regs[UREG_I5], &((*grp)[MC_O5])); err |= __put_user(regs->u_regs[UREG_I6], &((*grp)[MC_O6])); err |= __put_user(regs->u_regs[UREG_I7], &((*grp)[MC_O7])); err |= __get_user(fp, (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6]))); err |= __get_user(i7, (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7]))); err |= __put_user(fp, &(mcp->mc_fp)); err |= __put_user(i7, &(mcp->mc_i7)); err |= __put_user(fenab, &(mcp->mc_fpregs.mcfpu_enab)); if (fenab) { unsigned long *fpregs = current_thread_info()->fpregs; unsigned long fprs; fprs = current_thread_info()->fpsaved[0]; if (fprs & FPRS_DL) err |= copy_to_user(&(mcp->mc_fpregs.mcfpu_fregs), fpregs, (sizeof(unsigned int) * 32)); if (fprs & FPRS_DU) err |= copy_to_user( ((unsigned long __user *)&(mcp->mc_fpregs.mcfpu_fregs))+16, fpregs+16, (sizeof(unsigned int) * 32)); err |= __put_user(current_thread_info()->xfsr[0], &(mcp->mc_fpregs.mcfpu_fsr)); err |= __put_user(current_thread_info()->gsr[0], &(mcp->mc_fpregs.mcfpu_gsr)); err |= __put_user(fprs, &(mcp->mc_fpregs.mcfpu_fprs)); } if (err) goto do_sigsegv; return; do_sigsegv: force_sig(SIGSEGV, current); } struct rt_signal_frame { struct sparc_stackf ss; siginfo_t info; struct pt_regs regs; __siginfo_fpu_t __user *fpu_save; stack_t stack; sigset_t mask; __siginfo_rwin_t *rwin_save; }; static long _sigpause_common(old_sigset_t set) { sigset_t blocked; current->saved_sigmask = current->blocked; set &= _BLOCKABLE; siginitset(&blocked, set); set_current_blocked(&blocked); current->state = TASK_INTERRUPTIBLE; schedule(); set_restore_sigmask(); return -ERESTARTNOHAND; } asmlinkage long sys_sigpause(unsigned int set) { return _sigpause_common(set); } asmlinkage long sys_sigsuspend(old_sigset_t set) { return _sigpause_common(set); } void do_rt_sigreturn(struct pt_regs *regs) { struct rt_signal_frame __user *sf; unsigned long tpc, tnpc, tstate; __siginfo_fpu_t __user *fpu_save; __siginfo_rwin_t __user *rwin_save; sigset_t set; int err; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; synchronize_user_stack (); sf = (struct rt_signal_frame __user *) (regs->u_regs [UREG_FP] + STACK_BIAS); /* 1. Make sure we are not getting garbage from the user */ if (((unsigned long) sf) & 3) goto segv; err = get_user(tpc, &sf->regs.tpc); err |= __get_user(tnpc, &sf->regs.tnpc); if (test_thread_flag(TIF_32BIT)) { tpc &= 0xffffffff; tnpc &= 0xffffffff; } err |= ((tpc | tnpc) & 3); /* 2. Restore the state */ err |= __get_user(regs->y, &sf->regs.y); err |= __get_user(tstate, &sf->regs.tstate); err |= copy_from_user(regs->u_regs, sf->regs.u_regs, sizeof(regs->u_regs)); /* User can only change condition codes and %asi in %tstate. */ regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC); regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC)); err |= __get_user(fpu_save, &sf->fpu_save); if (!err && fpu_save) err |= restore_fpu_state(regs, fpu_save); err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf); if (err) goto segv; err |= __get_user(rwin_save, &sf->rwin_save); if (!err && rwin_save) { if (restore_rwin_state(rwin_save)) goto segv; } regs->tpc = tpc; regs->tnpc = tnpc; /* Prevent syscall restart. */ pt_regs_clear_syscall(regs); sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); return; segv: force_sig(SIGSEGV, current); } /* Checks if the fp is valid */ static int invalid_frame_pointer(void __user *fp) { if (((unsigned long) fp) & 15) return 1; return 0; } static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, unsigned long framesize) { unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS; /* * If we are on the alternate signal stack and would overflow it, don't. * Return an always-bogus address instead so we will die with SIGSEGV. */ if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize))) return (void __user *) -1L; /* This is the X/Open sanctioned signal stack switching. */ if (ka->sa.sa_flags & SA_ONSTACK) { if (sas_ss_flags(sp) == 0) sp = current->sas_ss_sp + current->sas_ss_size; } sp -= framesize; /* Always align the stack frame. This handles two cases. First, * sigaltstack need not be mindful of platform specific stack * alignment. Second, if we took this signal because the stack * is not aligned properly, we'd like to take the signal cleanly * and report that. */ sp &= ~15UL; return (void __user *) sp; } static inline int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, int signo, sigset_t *oldset, siginfo_t *info) { struct rt_signal_frame __user *sf; int wsaved, err, sf_size; void __user *tail; /* 1. Make sure everything is clean */ synchronize_user_stack(); save_and_clear_fpu(); wsaved = get_thread_wsaved(); sf_size = sizeof(struct rt_signal_frame); if (current_thread_info()->fpsaved[0] & FPRS_FEF) sf_size += sizeof(__siginfo_fpu_t); if (wsaved) sf_size += sizeof(__siginfo_rwin_t); sf = (struct rt_signal_frame __user *) get_sigframe(ka, regs, sf_size); if (invalid_frame_pointer (sf)) goto sigill; tail = (sf + 1); /* 2. Save the current process state */ err = copy_to_user(&sf->regs, regs, sizeof (*regs)); if (current_thread_info()->fpsaved[0] & FPRS_FEF) { __siginfo_fpu_t __user *fpu_save = tail; tail += sizeof(__siginfo_fpu_t); err |= save_fpu_state(regs, fpu_save); err |= __put_user((u64)fpu_save, &sf->fpu_save); } else { err |= __put_user(0, &sf->fpu_save); } if (wsaved) { __siginfo_rwin_t __user *rwin_save = tail; tail += sizeof(__siginfo_rwin_t); err |= save_rwin_state(wsaved, rwin_save); err |= __put_user((u64)rwin_save, &sf->rwin_save); set_thread_wsaved(0); } else { err |= __put_user(0, &sf->rwin_save); } /* Setup sigaltstack */ err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp); err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags); err |= __put_user(current->sas_ss_size, &sf->stack.ss_size); err |= copy_to_user(&sf->mask, oldset, sizeof(sigset_t)); if (!wsaved) { err |= copy_in_user((u64 __user *)sf, (u64 __user *)(regs->u_regs[UREG_FP] + STACK_BIAS), sizeof(struct reg_window)); } else { struct reg_window *rp; rp = &current_thread_info()->reg_window[wsaved - 1]; err |= copy_to_user(sf, rp, sizeof(struct reg_window)); } if (info) err |= copy_siginfo_to_user(&sf->info, info); else { err |= __put_user(signo, &sf->info.si_signo); err |= __put_user(SI_NOINFO, &sf->info.si_code); } if (err) goto sigsegv; /* 3. signal handler back-trampoline and parameters */ regs->u_regs[UREG_FP] = ((unsigned long) sf) - STACK_BIAS; regs->u_regs[UREG_I0] = signo; regs->u_regs[UREG_I1] = (unsigned long) &sf->info; /* The sigcontext is passed in this way because of how it * is defined in GLIBC's /usr/include/bits/sigcontext.h * for sparc64. It includes the 128 bytes of siginfo_t. */ regs->u_regs[UREG_I2] = (unsigned long) &sf->info; /* 5. signal handler */ regs->tpc = (unsigned long) ka->sa.sa_handler; regs->tnpc = (regs->tpc + 4); if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } /* 4. return to kernel instructions */ regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; return 0; sigill: do_exit(SIGILL); return -EINVAL; sigsegv: force_sigsegv(signo, current); return -EFAULT; } static inline int handle_signal(unsigned long signr, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) { sigset_t blocked; int err; err = setup_rt_frame(ka, regs, signr, oldset, (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL); if (err) return err; sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NOMASK)) sigaddset(&blocked, signr); set_current_blocked(&blocked); tracehook_signal_handler(signr, info, ka, regs, 0); return 0; } static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, struct sigaction *sa) { switch (regs->u_regs[UREG_I0]) { case ERESTART_RESTARTBLOCK: case ERESTARTNOHAND: no_system_call_restart: regs->u_regs[UREG_I0] = EINTR; regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY); break; case ERESTARTSYS: if (!(sa->sa_flags & SA_RESTART)) goto no_system_call_restart; /* fallthrough */ case ERESTARTNOINTR: regs->u_regs[UREG_I0] = orig_i0; regs->tpc -= 4; regs->tnpc -= 4; } } /* Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) { struct k_sigaction ka; int restart_syscall; sigset_t *oldset; siginfo_t info; int signr; /* It's a lot of work and synchronization to add a new ptrace * register for GDB to save and restore in order to get * orig_i0 correct for syscall restarts when debugging. * * Although it should be the case that most of the global * registers are volatile across a system call, glibc already * depends upon that fact that we preserve them. So we can't * just use any global register to save away the orig_i0 value. * * In particular %g2, %g3, %g4, and %g5 are all assumed to be * preserved across a system call trap by various pieces of * code in glibc. * * %g7 is used as the "thread register". %g6 is not used in * any fixed manner. %g6 is used as a scratch register and * a compiler temporary, but it's value is never used across * a system call. Therefore %g6 is usable for orig_i0 storage. */ if (pt_regs_is_syscall(regs) && (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) regs->u_regs[UREG_G6] = orig_i0; if (current_thread_info()->status & TS_RESTORE_SIGMASK) oldset = &current->saved_sigmask; else oldset = &current->blocked; #ifdef CONFIG_COMPAT if (test_thread_flag(TIF_32BIT)) { extern void do_signal32(sigset_t *, struct pt_regs *); do_signal32(oldset, regs); return; } #endif signr = get_signal_to_deliver(&info, &ka, regs, NULL); restart_syscall = 0; if (pt_regs_is_syscall(regs) && (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) { restart_syscall = 1; orig_i0 = regs->u_regs[UREG_G6]; } if (signr > 0) { if (restart_syscall) syscall_restart(orig_i0, regs, &ka.sa); if (handle_signal(signr, &ka, &info, oldset, regs) == 0) { /* A signal was successfully delivered; the saved * sigmask will have been stored in the signal frame, * and will be restored by sigreturn, so we can simply * clear the TS_RESTORE_SIGMASK flag. */ current_thread_info()->status &= ~TS_RESTORE_SIGMASK; } return; } if (restart_syscall && (regs->u_regs[UREG_I0] == ERESTARTNOHAND || regs->u_regs[UREG_I0] == ERESTARTSYS || regs->u_regs[UREG_I0] == ERESTARTNOINTR)) { /* replay the system call when we are done */ regs->u_regs[UREG_I0] = orig_i0; regs->tpc -= 4; regs->tnpc -= 4; pt_regs_clear_syscall(regs); } if (restart_syscall && regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { regs->u_regs[UREG_G1] = __NR_restart_syscall; regs->tpc -= 4; regs->tnpc -= 4; pt_regs_clear_syscall(regs); } /* If there's no signal to deliver, we just put the saved sigmask * back */ if (current_thread_info()->status & TS_RESTORE_SIGMASK) { current_thread_info()->status &= ~TS_RESTORE_SIGMASK; set_current_blocked(&current->saved_sigmask); } } void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags) { if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs, orig_i0); if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); if (current->replacement_session_keyring) key_replace_session_keyring(); } }
gpl-2.0
herophj/linux_kerner_2_6
arch/x86/mm/kmemcheck/shadow.c
539
3558
#include <linux/kmemcheck.h> #include <linux/module.h> #include <linux/mm.h> #include <asm/page.h> #include <asm/pgtable.h> #include "pte.h" #include "shadow.h" /* * Return the shadow address for the given address. Returns NULL if the * address is not tracked. * * We need to be extremely careful not to follow any invalid pointers, * because this function can be called for *any* possible address. */ void *kmemcheck_shadow_lookup(unsigned long address) { pte_t *pte; struct page *page; if (!virt_addr_valid(address)) return NULL; pte = kmemcheck_pte_lookup(address); if (!pte) return NULL; page = virt_to_page(address); if (!page->shadow) return NULL; return page->shadow + (address & (PAGE_SIZE - 1)); } static void mark_shadow(void *address, unsigned int n, enum kmemcheck_shadow status) { unsigned long addr = (unsigned long) address; unsigned long last_addr = addr + n - 1; unsigned long page = addr & PAGE_MASK; unsigned long last_page = last_addr & PAGE_MASK; unsigned int first_n; void *shadow; /* If the memory range crosses a page boundary, stop there. */ if (page == last_page) first_n = n; else first_n = page + PAGE_SIZE - addr; shadow = kmemcheck_shadow_lookup(addr); if (shadow) memset(shadow, status, first_n); addr += first_n; n -= first_n; /* Do full-page memset()s. */ while (n >= PAGE_SIZE) { shadow = kmemcheck_shadow_lookup(addr); if (shadow) memset(shadow, status, PAGE_SIZE); addr += PAGE_SIZE; n -= PAGE_SIZE; } /* Do the remaining page, if any. */ if (n > 0) { shadow = kmemcheck_shadow_lookup(addr); if (shadow) memset(shadow, status, n); } } void kmemcheck_mark_unallocated(void *address, unsigned int n) { mark_shadow(address, n, KMEMCHECK_SHADOW_UNALLOCATED); } void kmemcheck_mark_uninitialized(void *address, unsigned int n) { mark_shadow(address, n, KMEMCHECK_SHADOW_UNINITIALIZED); } /* * Fill the shadow memory of the given address such that the memory at that * address is marked as being initialized. */ void kmemcheck_mark_initialized(void *address, unsigned int n) { mark_shadow(address, n, KMEMCHECK_SHADOW_INITIALIZED); } EXPORT_SYMBOL_GPL(kmemcheck_mark_initialized); void kmemcheck_mark_freed(void *address, unsigned int n) { mark_shadow(address, n, KMEMCHECK_SHADOW_FREED); } void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n) { unsigned int i; for (i = 0; i < n; ++i) kmemcheck_mark_unallocated(page_address(&p[i]), PAGE_SIZE); } void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n) { unsigned int i; for (i = 0; i < n; ++i) kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE); } void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n) { unsigned int i; for (i = 0; i < n; ++i) kmemcheck_mark_initialized(page_address(&p[i]), PAGE_SIZE); } enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size) { uint8_t *x; unsigned int i; x = shadow; #ifdef CONFIG_KMEMCHECK_PARTIAL_OK /* * Make sure _some_ bytes are initialized. Gcc frequently generates * code to access neighboring bytes. */ for (i = 0; i < size; ++i) { if (x[i] == KMEMCHECK_SHADOW_INITIALIZED) return x[i]; } #else /* All bytes must be initialized. */ for (i = 0; i < size; ++i) { if (x[i] != KMEMCHECK_SHADOW_INITIALIZED) return x[i]; } #endif return x[0]; } void kmemcheck_shadow_set(void *shadow, unsigned int size) { uint8_t *x; unsigned int i; x = shadow; for (i = 0; i < size; ++i) x[i] = KMEMCHECK_SHADOW_INITIALIZED; }
gpl-2.0
lukino563/normandy_lulz_kernel
drivers/usb/musb/musb_gadget.c
795
60636
/* * MUSB OTG driver peripheral support * * Copyright 2005 Mentor Graphics Corporation * Copyright (C) 2005-2006 by Texas Instruments * Copyright (C) 2006-2007 Nokia Corporation * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include <linux/kernel.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/module.h> #include <linux/smp.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include "musb_core.h" /* MUSB PERIPHERAL status 3-mar-2006: * * - EP0 seems solid. It passes both USBCV and usbtest control cases. * Minor glitches: * * + remote wakeup to Linux hosts work, but saw USBCV failures; * in one test run (operator error?) * + endpoint halt tests -- in both usbtest and usbcv -- seem * to break when dma is enabled ... is something wrongly * clearing SENDSTALL? * * - Mass storage behaved ok when last tested. Network traffic patterns * (with lots of short transfers etc) need retesting; they turn up the * worst cases of the DMA, since short packets are typical but are not * required. * * - TX/IN * + both pio and dma behave in with network and g_zero tests * + no cppi throughput issues other than no-hw-queueing * + failed with FLAT_REG (DaVinci) * + seems to behave with double buffering, PIO -and- CPPI * + with gadgetfs + AIO, requests got lost? * * - RX/OUT * + both pio and dma behave in with network and g_zero tests * + dma is slow in typical case (short_not_ok is clear) * + double buffering ok with PIO * + double buffering *FAILS* with CPPI, wrong data bytes sometimes * + request lossage observed with gadgetfs * * - ISO not tested ... might work, but only weakly isochronous * * - Gadget driver disabling of softconnect during bind() is ignored; so * drivers can't hold off host requests until userspace is ready. * (Workaround: they can turn it off later.) * * - PORTABILITY (assumes PIO works): * + DaVinci, basically works with cppi dma * + OMAP 2430, ditto with mentor dma * + TUSB 6010, platform-specific dma in the works */ /* ----------------------------------------------------------------------- */ #define is_buffer_mapped(req) (is_dma_capable() && \ (req->map_state != UN_MAPPED)) /* Maps the buffer to dma */ static inline void map_dma_buffer(struct musb_request *request, struct musb *musb, struct musb_ep *musb_ep) { int compatible = true; struct dma_controller *dma = musb->dma_controller; request->map_state = UN_MAPPED; if (!is_dma_capable() || !musb_ep->dma) return; /* Check if DMA engine can handle this request. * DMA code must reject the USB request explicitly. * Default behaviour is to map the request. */ if (dma->is_compatible) compatible = dma->is_compatible(musb_ep->dma, musb_ep->packet_sz, request->request.buf, request->request.length); if (!compatible) return; if (request->request.dma == DMA_ADDR_INVALID) { request->request.dma = dma_map_single( musb->controller, request->request.buf, request->request.length, request->tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE); request->map_state = MUSB_MAPPED; } else { dma_sync_single_for_device(musb->controller, request->request.dma, request->request.length, request->tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE); request->map_state = PRE_MAPPED; } } /* Unmap the buffer from dma and maps it back to cpu */ static inline void unmap_dma_buffer(struct musb_request *request, struct musb *musb) { if (!is_buffer_mapped(request)) return; if (request->request.dma == DMA_ADDR_INVALID) { dev_vdbg(musb->controller, "not unmapping a never mapped buffer\n"); return; } if (request->map_state == MUSB_MAPPED) { dma_unmap_single(musb->controller, request->request.dma, request->request.length, request->tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE); request->request.dma = DMA_ADDR_INVALID; } else { /* PRE_MAPPED */ dma_sync_single_for_cpu(musb->controller, request->request.dma, request->request.length, request->tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE); } request->map_state = UN_MAPPED; } /* * Immediately complete a request. * * @param request the request to complete * @param status the status to complete the request with * Context: controller locked, IRQs blocked. */ void musb_g_giveback( struct musb_ep *ep, struct usb_request *request, int status) __releases(ep->musb->lock) __acquires(ep->musb->lock) { struct musb_request *req; struct musb *musb; int busy = ep->busy; req = to_musb_request(request); list_del(&req->list); if (req->request.status == -EINPROGRESS) req->request.status = status; musb = req->musb; ep->busy = 1; spin_unlock(&musb->lock); unmap_dma_buffer(req, musb); if (request->status == 0) dev_dbg(musb->controller, "%s done request %p, %d/%d\n", ep->end_point.name, request, req->request.actual, req->request.length); else dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n", ep->end_point.name, request, req->request.actual, req->request.length, request->status); req->request.complete(&req->ep->end_point, &req->request); spin_lock(&musb->lock); ep->busy = busy; } /* ----------------------------------------------------------------------- */ /* * Abort requests queued to an endpoint using the status. Synchronous. * caller locked controller and blocked irqs, and selected this ep. */ static void nuke(struct musb_ep *ep, const int status) { struct musb *musb = ep->musb; struct musb_request *req = NULL; void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs; ep->busy = 1; if (is_dma_capable() && ep->dma) { struct dma_controller *c = ep->musb->dma_controller; int value; if (ep->is_in) { /* * The programming guide says that we must not clear * the DMAMODE bit before DMAENAB, so we only * clear it in the second write... */ musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO); musb_writew(epio, MUSB_TXCSR, 0 | MUSB_TXCSR_FLUSHFIFO); } else { musb_writew(epio, MUSB_RXCSR, 0 | MUSB_RXCSR_FLUSHFIFO); musb_writew(epio, MUSB_RXCSR, 0 | MUSB_RXCSR_FLUSHFIFO); } value = c->channel_abort(ep->dma); dev_dbg(musb->controller, "%s: abort DMA --> %d\n", ep->name, value); c->channel_release(ep->dma); ep->dma = NULL; } while (!list_empty(&ep->req_list)) { req = list_first_entry(&ep->req_list, struct musb_request, list); musb_g_giveback(ep, &req->request, status); } } /* ----------------------------------------------------------------------- */ /* Data transfers - pure PIO, pure DMA, or mixed mode */ /* * This assumes the separate CPPI engine is responding to DMA requests * from the usb core ... sequenced a bit differently from mentor dma. */ static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep) { if (can_bulk_split(musb, ep->type)) return ep->hw_ep->max_packet_sz_tx; else return ep->packet_sz; } #ifdef CONFIG_USB_INVENTRA_DMA /* Peripheral tx (IN) using Mentor DMA works as follows: Only mode 0 is used for transfers <= wPktSize, mode 1 is used for larger transfers, One of the following happens: - Host sends IN token which causes an endpoint interrupt -> TxAvail -> if DMA is currently busy, exit. -> if queue is non-empty, txstate(). - Request is queued by the gadget driver. -> if queue was previously empty, txstate() txstate() -> start /\ -> setup DMA | (data is transferred to the FIFO, then sent out when | IN token(s) are recd from Host. | -> DMA interrupt on completion | calls TxAvail. | -> stop DMA, ~DMAENAB, | -> set TxPktRdy for last short pkt or zlp | -> Complete Request | -> Continue next request (call txstate) |___________________________________| * Non-Mentor DMA engines can of course work differently, such as by * upleveling from irq-per-packet to irq-per-buffer. */ #endif /* * An endpoint is transmitting data. This can be called either from * the IRQ routine or from ep.queue() to kickstart a request on an * endpoint. * * Context: controller locked, IRQs blocked, endpoint selected */ static void txstate(struct musb *musb, struct musb_request *req) { u8 epnum = req->epnum; struct musb_ep *musb_ep; void __iomem *epio = musb->endpoints[epnum].regs; struct usb_request *request; u16 fifo_count = 0, csr; int use_dma = 0; musb_ep = req->ep; /* we shouldn't get here while DMA is active ... but we do ... */ if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { dev_dbg(musb->controller, "dma pending...\n"); return; } /* read TXCSR before */ csr = musb_readw(epio, MUSB_TXCSR); request = &req->request; fifo_count = min(max_ep_writesize(musb, musb_ep), (int)(request->length - request->actual)); if (csr & MUSB_TXCSR_TXPKTRDY) { dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n", musb_ep->end_point.name, csr); return; } if (csr & MUSB_TXCSR_P_SENDSTALL) { dev_dbg(musb->controller, "%s stalling, txcsr %03x\n", musb_ep->end_point.name, csr); return; } dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n", epnum, musb_ep->packet_sz, fifo_count, csr); #ifndef CONFIG_MUSB_PIO_ONLY if (is_buffer_mapped(req)) { struct dma_controller *c = musb->dma_controller; size_t request_size; /* setup DMA, then program endpoint CSR */ request_size = min_t(size_t, request->length - request->actual, musb_ep->dma->max_len); use_dma = (request->dma != DMA_ADDR_INVALID); /* MUSB_TXCSR_P_ISO is still set correctly */ #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) { if (request_size < musb_ep->packet_sz) musb_ep->dma->desired_mode = 0; else musb_ep->dma->desired_mode = 1; use_dma = use_dma && c->channel_program( musb_ep->dma, musb_ep->packet_sz, musb_ep->dma->desired_mode, request->dma + request->actual, request_size); if (use_dma) { if (musb_ep->dma->desired_mode == 0) { /* * We must not clear the DMAMODE bit * before the DMAENAB bit -- and the * latter doesn't always get cleared * before we get here... */ csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB); musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_P_WZC_BITS); csr &= ~MUSB_TXCSR_DMAMODE; csr |= (MUSB_TXCSR_DMAENAB | MUSB_TXCSR_MODE); /* against programming guide */ } else { csr |= (MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE); /* * Enable Autoset according to table * below * bulk_split hb_mult Autoset_Enable * 0 0 Yes(Normal) * 0 >0 No(High BW ISO) * 1 0 Yes(HS bulk) * 1 >0 Yes(FS bulk) */ if (!musb_ep->hb_mult || (musb_ep->hb_mult && can_bulk_split(musb, musb_ep->type))) csr |= MUSB_TXCSR_AUTOSET; } csr &= ~MUSB_TXCSR_P_UNDERRUN; musb_writew(epio, MUSB_TXCSR, csr); } } #elif defined(CONFIG_USB_TI_CPPI_DMA) /* program endpoint CSR first, then setup DMA */ csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE; musb_writew(epio, MUSB_TXCSR, (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN) | csr); /* ensure writebuffer is empty */ csr = musb_readw(epio, MUSB_TXCSR); /* NOTE host side sets DMAENAB later than this; both are * OK since the transfer dma glue (between CPPI and Mentor * fifos) just tells CPPI it could start. Data only moves * to the USB TX fifo when both fifos are ready. */ /* "mode" is irrelevant here; handle terminating ZLPs like * PIO does, since the hardware RNDIS mode seems unreliable * except for the last-packet-is-already-short case. */ use_dma = use_dma && c->channel_program( musb_ep->dma, musb_ep->packet_sz, 0, request->dma + request->actual, request_size); if (!use_dma) { c->channel_release(musb_ep->dma); musb_ep->dma = NULL; csr &= ~MUSB_TXCSR_DMAENAB; musb_writew(epio, MUSB_TXCSR, csr); /* invariant: prequest->buf is non-null */ } #elif defined(CONFIG_USB_TUSB_OMAP_DMA) use_dma = use_dma && c->channel_program( musb_ep->dma, musb_ep->packet_sz, request->zero, request->dma + request->actual, request_size); #endif } #endif if (!use_dma) { /* * Unmap the dma buffer back to cpu if dma channel * programming fails */ unmap_dma_buffer(req, musb); musb_write_fifo(musb_ep->hw_ep, fifo_count, (u8 *) (request->buf + request->actual)); request->actual += fifo_count; csr |= MUSB_TXCSR_TXPKTRDY; csr &= ~MUSB_TXCSR_P_UNDERRUN; musb_writew(epio, MUSB_TXCSR, csr); } /* host may already have the data when this message shows... */ dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n", musb_ep->end_point.name, use_dma ? "dma" : "pio", request->actual, request->length, musb_readw(epio, MUSB_TXCSR), fifo_count, musb_readw(epio, MUSB_TXMAXP)); } /* * FIFO state update (e.g. data ready). * Called from IRQ, with controller locked. */ void musb_g_tx(struct musb *musb, u8 epnum) { u16 csr; struct musb_request *req; struct usb_request *request; u8 __iomem *mbase = musb->mregs; struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in; void __iomem *epio = musb->endpoints[epnum].regs; struct dma_channel *dma; musb_ep_select(mbase, epnum); req = next_request(musb_ep); request = &req->request; csr = musb_readw(epio, MUSB_TXCSR); dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr); dma = is_dma_capable() ? musb_ep->dma : NULL; /* * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX * probably rates reporting as a host error. */ if (csr & MUSB_TXCSR_P_SENTSTALL) { csr |= MUSB_TXCSR_P_WZC_BITS; csr &= ~MUSB_TXCSR_P_SENTSTALL; musb_writew(epio, MUSB_TXCSR, csr); return; } if (csr & MUSB_TXCSR_P_UNDERRUN) { /* We NAKed, no big deal... little reason to care. */ csr |= MUSB_TXCSR_P_WZC_BITS; csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); musb_writew(epio, MUSB_TXCSR, csr); dev_vdbg(musb->controller, "underrun on ep%d, req %p\n", epnum, request); } if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { /* * SHOULD NOT HAPPEN... has with CPPI though, after * changing SENDSTALL (and other cases); harmless? */ dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name); return; } if (request) { u8 is_dma = 0; if (dma && (csr & MUSB_TXCSR_DMAENAB)) { is_dma = 1; csr |= MUSB_TXCSR_P_WZC_BITS; csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); musb_writew(epio, MUSB_TXCSR, csr); /* Ensure writebuffer is empty. */ csr = musb_readw(epio, MUSB_TXCSR); request->actual += musb_ep->dma->actual_len; dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n", epnum, csr, musb_ep->dma->actual_len, request); } /* * First, maybe a terminating short packet. Some DMA * engines might handle this by themselves. */ if ((request->zero && request->length && (request->length % musb_ep->packet_sz == 0) && (request->actual == request->length)) #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || (is_dma && (!dma->desired_mode || (request->actual & (musb_ep->packet_sz - 1)))) #endif ) { /* * On DMA completion, FIFO may not be * available yet... */ if (csr & MUSB_TXCSR_TXPKTRDY) return; dev_dbg(musb->controller, "sending zero pkt\n"); musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY); request->zero = 0; } if (request->actual == request->length) { musb_g_giveback(musb_ep, request, 0); /* * In the giveback function the MUSB lock is * released and acquired after sometime. During * this time period the INDEX register could get * changed by the gadget_queue function especially * on SMP systems. Reselect the INDEX to be sure * we are reading/modifying the right registers */ musb_ep_select(mbase, epnum); req = musb_ep->desc ? next_request(musb_ep) : NULL; if (!req) { dev_dbg(musb->controller, "%s idle now\n", musb_ep->end_point.name); return; } } txstate(musb, req); } } /* ------------------------------------------------------------ */ #ifdef CONFIG_USB_INVENTRA_DMA /* Peripheral rx (OUT) using Mentor DMA works as follows: - Only mode 0 is used. - Request is queued by the gadget class driver. -> if queue was previously empty, rxstate() - Host sends OUT token which causes an endpoint interrupt /\ -> RxReady | -> if request queued, call rxstate | /\ -> setup DMA | | -> DMA interrupt on completion | | -> RxReady | | -> stop DMA | | -> ack the read | | -> if data recd = max expected | | by the request, or host | | sent a short packet, | | complete the request, | | and start the next one. | |_____________________________________| | else just wait for the host | to send the next OUT token. |__________________________________________________| * Non-Mentor DMA engines can of course work differently. */ #endif /* * Context: controller locked, IRQs blocked, endpoint selected */ static void rxstate(struct musb *musb, struct musb_request *req) { const u8 epnum = req->epnum; struct usb_request *request = &req->request; struct musb_ep *musb_ep; void __iomem *epio = musb->endpoints[epnum].regs; unsigned fifo_count = 0; u16 len; u16 csr = musb_readw(epio, MUSB_RXCSR); struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; u8 use_mode_1; if (hw_ep->is_shared_fifo) musb_ep = &hw_ep->ep_in; else musb_ep = &hw_ep->ep_out; len = musb_ep->packet_sz; /* We shouldn't get here while DMA is active, but we do... */ if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { dev_dbg(musb->controller, "DMA pending...\n"); return; } if (csr & MUSB_RXCSR_P_SENDSTALL) { dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n", musb_ep->end_point.name, csr); return; } if (is_cppi_enabled() && is_buffer_mapped(req)) { struct dma_controller *c = musb->dma_controller; struct dma_channel *channel = musb_ep->dma; /* NOTE: CPPI won't actually stop advancing the DMA * queue after short packet transfers, so this is almost * always going to run as IRQ-per-packet DMA so that * faults will be handled correctly. */ if (c->channel_program(channel, musb_ep->packet_sz, !request->short_not_ok, request->dma + request->actual, request->length - request->actual)) { /* make sure that if an rxpkt arrived after the irq, * the cppi engine will be ready to take it as soon * as DMA is enabled */ csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE); csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS; musb_writew(epio, MUSB_RXCSR, csr); return; } } if (csr & MUSB_RXCSR_RXPKTRDY) { len = musb_readw(epio, MUSB_RXCOUNT); /* * Enable Mode 1 on RX transfers only when short_not_ok flag * is set. Currently short_not_ok flag is set only from * file_storage and f_mass_storage drivers */ if (request->short_not_ok && len == musb_ep->packet_sz) use_mode_1 = 1; else use_mode_1 = 0; if (request->actual < request->length) { #ifdef CONFIG_USB_INVENTRA_DMA if (is_buffer_mapped(req)) { struct dma_controller *c; struct dma_channel *channel; int use_dma = 0; c = musb->dma_controller; channel = musb_ep->dma; /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in * mode 0 only. So we do not get endpoint interrupts due to DMA * completion. We only get interrupts from DMA controller. * * We could operate in DMA mode 1 if we knew the size of the tranfer * in advance. For mass storage class, request->length = what the host * sends, so that'd work. But for pretty much everything else, * request->length is routinely more than what the host sends. For * most these gadgets, end of is signified either by a short packet, * or filling the last byte of the buffer. (Sending extra data in * that last pckate should trigger an overflow fault.) But in mode 1, * we don't get DMA completion interrupt for short packets. * * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1), * to get endpoint interrupt on every DMA req, but that didn't seem * to work reliably. * * REVISIT an updated g_file_storage can set req->short_not_ok, which * then becomes usable as a runtime "use mode 1" hint... */ /* Experimental: Mode1 works with mass storage use cases */ if (use_mode_1) { csr |= MUSB_RXCSR_AUTOCLEAR; musb_writew(epio, MUSB_RXCSR, csr); csr |= MUSB_RXCSR_DMAENAB; musb_writew(epio, MUSB_RXCSR, csr); /* * this special sequence (enabling and then * disabling MUSB_RXCSR_DMAMODE) is required * to get DMAReq to activate */ musb_writew(epio, MUSB_RXCSR, csr | MUSB_RXCSR_DMAMODE); musb_writew(epio, MUSB_RXCSR, csr); } else { if (!musb_ep->hb_mult && musb_ep->hw_ep->rx_double_buffered) csr |= MUSB_RXCSR_AUTOCLEAR; csr |= MUSB_RXCSR_DMAENAB; musb_writew(epio, MUSB_RXCSR, csr); } if (request->actual < request->length) { int transfer_size = 0; if (use_mode_1) { transfer_size = min(request->length - request->actual, channel->max_len); musb_ep->dma->desired_mode = 1; } else { transfer_size = min(request->length - request->actual, (unsigned)len); musb_ep->dma->desired_mode = 0; } use_dma = c->channel_program( channel, musb_ep->packet_sz, channel->desired_mode, request->dma + request->actual, transfer_size); } if (use_dma) return; } #elif defined(CONFIG_USB_UX500_DMA) if ((is_buffer_mapped(req)) && (request->actual < request->length)) { struct dma_controller *c; struct dma_channel *channel; int transfer_size = 0; c = musb->dma_controller; channel = musb_ep->dma; /* In case first packet is short */ if (len < musb_ep->packet_sz) transfer_size = len; else if (request->short_not_ok) transfer_size = min(request->length - request->actual, channel->max_len); else transfer_size = min(request->length - request->actual, (unsigned)len); csr &= ~MUSB_RXCSR_DMAMODE; csr |= (MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR); musb_writew(epio, MUSB_RXCSR, csr); if (transfer_size <= musb_ep->packet_sz) { musb_ep->dma->desired_mode = 0; } else { musb_ep->dma->desired_mode = 1; /* Mode must be set after DMAENAB */ csr |= MUSB_RXCSR_DMAMODE; musb_writew(epio, MUSB_RXCSR, csr); } if (c->channel_program(channel, musb_ep->packet_sz, channel->desired_mode, request->dma + request->actual, transfer_size)) return; } #endif /* Mentor's DMA */ fifo_count = request->length - request->actual; dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n", musb_ep->end_point.name, len, fifo_count, musb_ep->packet_sz); fifo_count = min_t(unsigned, len, fifo_count); #ifdef CONFIG_USB_TUSB_OMAP_DMA if (tusb_dma_omap() && is_buffer_mapped(req)) { struct dma_controller *c = musb->dma_controller; struct dma_channel *channel = musb_ep->dma; u32 dma_addr = request->dma + request->actual; int ret; ret = c->channel_program(channel, musb_ep->packet_sz, channel->desired_mode, dma_addr, fifo_count); if (ret) return; } #endif /* * Unmap the dma buffer back to cpu if dma channel * programming fails. This buffer is mapped if the * channel allocation is successful */ if (is_buffer_mapped(req)) { unmap_dma_buffer(req, musb); /* * Clear DMAENAB and AUTOCLEAR for the * PIO mode transfer */ csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR); musb_writew(epio, MUSB_RXCSR, csr); } musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) (request->buf + request->actual)); request->actual += fifo_count; /* REVISIT if we left anything in the fifo, flush * it and report -EOVERFLOW */ /* ack the read! */ csr |= MUSB_RXCSR_P_WZC_BITS; csr &= ~MUSB_RXCSR_RXPKTRDY; musb_writew(epio, MUSB_RXCSR, csr); } } /* reach the end or short packet detected */ if (request->actual == request->length || len < musb_ep->packet_sz) musb_g_giveback(musb_ep, request, 0); } /* * Data ready for a request; called from IRQ */ void musb_g_rx(struct musb *musb, u8 epnum) { u16 csr; struct musb_request *req; struct usb_request *request; void __iomem *mbase = musb->mregs; struct musb_ep *musb_ep; void __iomem *epio = musb->endpoints[epnum].regs; struct dma_channel *dma; struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; if (hw_ep->is_shared_fifo) musb_ep = &hw_ep->ep_in; else musb_ep = &hw_ep->ep_out; musb_ep_select(mbase, epnum); req = next_request(musb_ep); if (!req) return; request = &req->request; csr = musb_readw(epio, MUSB_RXCSR); dma = is_dma_capable() ? musb_ep->dma : NULL; dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name, csr, dma ? " (dma)" : "", request); if (csr & MUSB_RXCSR_P_SENTSTALL) { csr |= MUSB_RXCSR_P_WZC_BITS; csr &= ~MUSB_RXCSR_P_SENTSTALL; musb_writew(epio, MUSB_RXCSR, csr); return; } if (csr & MUSB_RXCSR_P_OVERRUN) { /* csr |= MUSB_RXCSR_P_WZC_BITS; */ csr &= ~MUSB_RXCSR_P_OVERRUN; musb_writew(epio, MUSB_RXCSR, csr); dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request); if (request->status == -EINPROGRESS) request->status = -EOVERFLOW; } if (csr & MUSB_RXCSR_INCOMPRX) { /* REVISIT not necessarily an error */ dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name); } if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { /* "should not happen"; likely RXPKTRDY pending for DMA */ dev_dbg(musb->controller, "%s busy, csr %04x\n", musb_ep->end_point.name, csr); return; } if (dma && (csr & MUSB_RXCSR_DMAENAB)) { csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB | MUSB_RXCSR_DMAMODE); musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_P_WZC_BITS | csr); request->actual += musb_ep->dma->actual_len; dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n", epnum, csr, musb_readw(epio, MUSB_RXCSR), musb_ep->dma->actual_len, request); #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \ defined(CONFIG_USB_UX500_DMA) /* Autoclear doesn't clear RxPktRdy for short packets */ if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered) || (dma->actual_len & (musb_ep->packet_sz - 1))) { /* ack the read! */ csr &= ~MUSB_RXCSR_RXPKTRDY; musb_writew(epio, MUSB_RXCSR, csr); } /* incomplete, and not short? wait for next IN packet */ if ((request->actual < request->length) && (musb_ep->dma->actual_len == musb_ep->packet_sz)) { /* In double buffer case, continue to unload fifo if * there is Rx packet in FIFO. **/ csr = musb_readw(epio, MUSB_RXCSR); if ((csr & MUSB_RXCSR_RXPKTRDY) && hw_ep->rx_double_buffered) goto exit; return; } #endif musb_g_giveback(musb_ep, request, 0); /* * In the giveback function the MUSB lock is * released and acquired after sometime. During * this time period the INDEX register could get * changed by the gadget_queue function especially * on SMP systems. Reselect the INDEX to be sure * we are reading/modifying the right registers */ musb_ep_select(mbase, epnum); req = next_request(musb_ep); if (!req) return; } #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \ defined(CONFIG_USB_UX500_DMA) exit: #endif /* Analyze request */ rxstate(musb, req); } /* ------------------------------------------------------------ */ static int musb_gadget_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc) { unsigned long flags; struct musb_ep *musb_ep; struct musb_hw_ep *hw_ep; void __iomem *regs; struct musb *musb; void __iomem *mbase; u8 epnum; u16 csr; unsigned tmp; int status = -EINVAL; if (!ep || !desc) return -EINVAL; musb_ep = to_musb_ep(ep); hw_ep = musb_ep->hw_ep; regs = hw_ep->regs; musb = musb_ep->musb; mbase = musb->mregs; epnum = musb_ep->current_epnum; spin_lock_irqsave(&musb->lock, flags); if (musb_ep->desc) { status = -EBUSY; goto fail; } musb_ep->type = usb_endpoint_type(desc); /* check direction and (later) maxpacket size against endpoint */ if (usb_endpoint_num(desc) != epnum) goto fail; /* REVISIT this rules out high bandwidth periodic transfers */ tmp = usb_endpoint_maxp(desc); if (tmp & ~0x07ff) { int ok; if (usb_endpoint_dir_in(desc)) ok = musb->hb_iso_tx; else ok = musb->hb_iso_rx; if (!ok) { dev_dbg(musb->controller, "no support for high bandwidth ISO\n"); goto fail; } musb_ep->hb_mult = (tmp >> 11) & 3; } else { musb_ep->hb_mult = 0; } musb_ep->packet_sz = tmp & 0x7ff; tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1); /* enable the interrupts for the endpoint, set the endpoint * packet size (or fail), set the mode, clear the fifo */ musb_ep_select(mbase, epnum); if (usb_endpoint_dir_in(desc)) { u16 int_txe = musb_readw(mbase, MUSB_INTRTXE); if (hw_ep->is_shared_fifo) musb_ep->is_in = 1; if (!musb_ep->is_in) goto fail; if (tmp > hw_ep->max_packet_sz_tx) { dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n"); goto fail; } int_txe |= (1 << epnum); musb_writew(mbase, MUSB_INTRTXE, int_txe); /* REVISIT if can_bulk_split(), use by updating "tmp"; * likewise high bandwidth periodic tx */ /* Set TXMAXP with the FIFO size of the endpoint * to disable double buffering mode. */ if (musb->double_buffer_not_ok) { musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx); } else { if (can_bulk_split(musb, musb_ep->type)) musb_ep->hb_mult = (hw_ep->max_packet_sz_tx / musb_ep->packet_sz) - 1; musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); } csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; if (musb_readw(regs, MUSB_TXCSR) & MUSB_TXCSR_FIFONOTEMPTY) csr |= MUSB_TXCSR_FLUSHFIFO; if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) csr |= MUSB_TXCSR_P_ISO; /* set twice in case of double buffering */ musb_writew(regs, MUSB_TXCSR, csr); /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ musb_writew(regs, MUSB_TXCSR, csr); } else { u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE); if (hw_ep->is_shared_fifo) musb_ep->is_in = 0; if (musb_ep->is_in) goto fail; if (tmp > hw_ep->max_packet_sz_rx) { dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n"); goto fail; } int_rxe |= (1 << epnum); musb_writew(mbase, MUSB_INTRRXE, int_rxe); /* REVISIT if can_bulk_combine() use by updating "tmp" * likewise high bandwidth periodic rx */ /* Set RXMAXP with the FIFO size of the endpoint * to disable double buffering mode. */ if (musb->double_buffer_not_ok) musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx); else musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); /* force shared fifo to OUT-only mode */ if (hw_ep->is_shared_fifo) { csr = musb_readw(regs, MUSB_TXCSR); csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY); musb_writew(regs, MUSB_TXCSR, csr); } csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG; if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) csr |= MUSB_RXCSR_P_ISO; else if (musb_ep->type == USB_ENDPOINT_XFER_INT) csr |= MUSB_RXCSR_DISNYET; /* set twice in case of double buffering */ musb_writew(regs, MUSB_RXCSR, csr); musb_writew(regs, MUSB_RXCSR, csr); } /* NOTE: all the I/O code _should_ work fine without DMA, in case * for some reason you run out of channels here. */ if (is_dma_capable() && musb->dma_controller) { struct dma_controller *c = musb->dma_controller; musb_ep->dma = c->channel_alloc(c, hw_ep, (desc->bEndpointAddress & USB_DIR_IN)); } else musb_ep->dma = NULL; musb_ep->desc = desc; musb_ep->busy = 0; musb_ep->wedged = 0; status = 0; pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n", musb_driver_name, musb_ep->end_point.name, ({ char *s; switch (musb_ep->type) { case USB_ENDPOINT_XFER_BULK: s = "bulk"; break; case USB_ENDPOINT_XFER_INT: s = "int"; break; default: s = "iso"; break; }; s; }), musb_ep->is_in ? "IN" : "OUT", musb_ep->dma ? "dma, " : "", musb_ep->packet_sz); schedule_work(&musb->irq_work); fail: spin_unlock_irqrestore(&musb->lock, flags); return status; } /* * Disable an endpoint flushing all requests queued. */ static int musb_gadget_disable(struct usb_ep *ep) { unsigned long flags; struct musb *musb; u8 epnum; struct musb_ep *musb_ep; void __iomem *epio; int status = 0; musb_ep = to_musb_ep(ep); musb = musb_ep->musb; epnum = musb_ep->current_epnum; epio = musb->endpoints[epnum].regs; spin_lock_irqsave(&musb->lock, flags); musb_ep_select(musb->mregs, epnum); /* zero the endpoint sizes */ if (musb_ep->is_in) { u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE); int_txe &= ~(1 << epnum); musb_writew(musb->mregs, MUSB_INTRTXE, int_txe); musb_writew(epio, MUSB_TXMAXP, 0); } else { u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE); int_rxe &= ~(1 << epnum); musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe); musb_writew(epio, MUSB_RXMAXP, 0); } musb_ep->desc = NULL; musb_ep->end_point.desc = NULL; /* abort all pending DMA and requests */ nuke(musb_ep, -ESHUTDOWN); schedule_work(&musb->irq_work); spin_unlock_irqrestore(&(musb->lock), flags); dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name); return status; } /* * Allocate a request for an endpoint. * Reused by ep0 code. */ struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) { struct musb_ep *musb_ep = to_musb_ep(ep); struct musb *musb = musb_ep->musb; struct musb_request *request = NULL; request = kzalloc(sizeof *request, gfp_flags); if (!request) { dev_dbg(musb->controller, "not enough memory\n"); return NULL; } request->request.dma = DMA_ADDR_INVALID; request->epnum = musb_ep->current_epnum; request->ep = musb_ep; return &request->request; } /* * Free a request * Reused by ep0 code. */ void musb_free_request(struct usb_ep *ep, struct usb_request *req) { kfree(to_musb_request(req)); } static LIST_HEAD(buffers); struct free_record { struct list_head list; struct device *dev; unsigned bytes; dma_addr_t dma; }; /* * Context: controller locked, IRQs blocked. */ void musb_ep_restart(struct musb *musb, struct musb_request *req) { dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n", req->tx ? "TX/IN" : "RX/OUT", &req->request, req->request.length, req->epnum); musb_ep_select(musb->mregs, req->epnum); if (req->tx) txstate(musb, req); else rxstate(musb, req); } static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags) { struct musb_ep *musb_ep; struct musb_request *request; struct musb *musb; int status = 0; unsigned long lockflags; if (!ep || !req) return -EINVAL; if (!req->buf) return -ENODATA; musb_ep = to_musb_ep(ep); musb = musb_ep->musb; request = to_musb_request(req); request->musb = musb; if (request->ep != musb_ep) return -EINVAL; dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req); /* request is mine now... */ request->request.actual = 0; request->request.status = -EINPROGRESS; request->epnum = musb_ep->current_epnum; request->tx = musb_ep->is_in; map_dma_buffer(request, musb, musb_ep); spin_lock_irqsave(&musb->lock, lockflags); /* don't queue if the ep is down */ if (!musb_ep->desc) { dev_dbg(musb->controller, "req %p queued to %s while ep %s\n", req, ep->name, "disabled"); status = -ESHUTDOWN; goto cleanup; } /* add request to the list */ list_add_tail(&request->list, &musb_ep->req_list); /* it this is the head of the queue, start i/o ... */ if (!musb_ep->busy && &request->list == musb_ep->req_list.next) musb_ep_restart(musb, request); cleanup: spin_unlock_irqrestore(&musb->lock, lockflags); return status; } static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request) { struct musb_ep *musb_ep = to_musb_ep(ep); struct musb_request *req = to_musb_request(request); struct musb_request *r; unsigned long flags; int status = 0; struct musb *musb = musb_ep->musb; if (!ep || !request || to_musb_request(request)->ep != musb_ep) return -EINVAL; spin_lock_irqsave(&musb->lock, flags); list_for_each_entry(r, &musb_ep->req_list, list) { if (r == req) break; } if (r != req) { dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name); status = -EINVAL; goto done; } /* if the hardware doesn't have the request, easy ... */ if (musb_ep->req_list.next != &req->list || musb_ep->busy) musb_g_giveback(musb_ep, request, -ECONNRESET); /* ... else abort the dma transfer ... */ else if (is_dma_capable() && musb_ep->dma) { struct dma_controller *c = musb->dma_controller; musb_ep_select(musb->mregs, musb_ep->current_epnum); if (c->channel_abort) status = c->channel_abort(musb_ep->dma); else status = -EBUSY; if (status == 0) musb_g_giveback(musb_ep, request, -ECONNRESET); } else { /* NOTE: by sticking to easily tested hardware/driver states, * we leave counting of in-flight packets imprecise. */ musb_g_giveback(musb_ep, request, -ECONNRESET); } done: spin_unlock_irqrestore(&musb->lock, flags); return status; } /* * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any * data but will queue requests. * * exported to ep0 code */ static int musb_gadget_set_halt(struct usb_ep *ep, int value) { struct musb_ep *musb_ep = to_musb_ep(ep); u8 epnum = musb_ep->current_epnum; struct musb *musb = musb_ep->musb; void __iomem *epio = musb->endpoints[epnum].regs; void __iomem *mbase; unsigned long flags; u16 csr; struct musb_request *request; int status = 0; if (!ep) return -EINVAL; mbase = musb->mregs; spin_lock_irqsave(&musb->lock, flags); if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) { status = -EINVAL; goto done; } musb_ep_select(mbase, epnum); request = next_request(musb_ep); if (value) { if (request) { dev_dbg(musb->controller, "request in progress, cannot halt %s\n", ep->name); status = -EAGAIN; goto done; } /* Cannot portably stall with non-empty FIFO */ if (musb_ep->is_in) { csr = musb_readw(epio, MUSB_TXCSR); if (csr & MUSB_TXCSR_FIFONOTEMPTY) { dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name); status = -EAGAIN; goto done; } } } else musb_ep->wedged = 0; /* set/clear the stall and toggle bits */ dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear"); if (musb_ep->is_in) { csr = musb_readw(epio, MUSB_TXCSR); csr |= MUSB_TXCSR_P_WZC_BITS | MUSB_TXCSR_CLRDATATOG; if (value) csr |= MUSB_TXCSR_P_SENDSTALL; else csr &= ~(MUSB_TXCSR_P_SENDSTALL | MUSB_TXCSR_P_SENTSTALL); csr &= ~MUSB_TXCSR_TXPKTRDY; musb_writew(epio, MUSB_TXCSR, csr); } else { csr = musb_readw(epio, MUSB_RXCSR); csr |= MUSB_RXCSR_P_WZC_BITS | MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG; if (value) csr |= MUSB_RXCSR_P_SENDSTALL; else csr &= ~(MUSB_RXCSR_P_SENDSTALL | MUSB_RXCSR_P_SENTSTALL); musb_writew(epio, MUSB_RXCSR, csr); } /* maybe start the first request in the queue */ if (!musb_ep->busy && !value && request) { dev_dbg(musb->controller, "restarting the request\n"); musb_ep_restart(musb, request); } done: spin_unlock_irqrestore(&musb->lock, flags); return status; } /* * Sets the halt feature with the clear requests ignored */ static int musb_gadget_set_wedge(struct usb_ep *ep) { struct musb_ep *musb_ep = to_musb_ep(ep); if (!ep) return -EINVAL; musb_ep->wedged = 1; return usb_ep_set_halt(ep); } static int musb_gadget_fifo_status(struct usb_ep *ep) { struct musb_ep *musb_ep = to_musb_ep(ep); void __iomem *epio = musb_ep->hw_ep->regs; int retval = -EINVAL; if (musb_ep->desc && !musb_ep->is_in) { struct musb *musb = musb_ep->musb; int epnum = musb_ep->current_epnum; void __iomem *mbase = musb->mregs; unsigned long flags; spin_lock_irqsave(&musb->lock, flags); musb_ep_select(mbase, epnum); /* FIXME return zero unless RXPKTRDY is set */ retval = musb_readw(epio, MUSB_RXCOUNT); spin_unlock_irqrestore(&musb->lock, flags); } return retval; } static void musb_gadget_fifo_flush(struct usb_ep *ep) { struct musb_ep *musb_ep = to_musb_ep(ep); struct musb *musb = musb_ep->musb; u8 epnum = musb_ep->current_epnum; void __iomem *epio = musb->endpoints[epnum].regs; void __iomem *mbase; unsigned long flags; u16 csr, int_txe; mbase = musb->mregs; spin_lock_irqsave(&musb->lock, flags); musb_ep_select(mbase, (u8) epnum); /* disable interrupts */ int_txe = musb_readw(mbase, MUSB_INTRTXE); musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); if (musb_ep->is_in) { csr = musb_readw(epio, MUSB_TXCSR); if (csr & MUSB_TXCSR_FIFONOTEMPTY) { csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS; /* * Setting both TXPKTRDY and FLUSHFIFO makes controller * to interrupt current FIFO loading, but not flushing * the already loaded ones. */ csr &= ~MUSB_TXCSR_TXPKTRDY; musb_writew(epio, MUSB_TXCSR, csr); /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ musb_writew(epio, MUSB_TXCSR, csr); } } else { csr = musb_readw(epio, MUSB_RXCSR); csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS; musb_writew(epio, MUSB_RXCSR, csr); musb_writew(epio, MUSB_RXCSR, csr); } /* re-enable interrupt */ musb_writew(mbase, MUSB_INTRTXE, int_txe); spin_unlock_irqrestore(&musb->lock, flags); } static const struct usb_ep_ops musb_ep_ops = { .enable = musb_gadget_enable, .disable = musb_gadget_disable, .alloc_request = musb_alloc_request, .free_request = musb_free_request, .queue = musb_gadget_queue, .dequeue = musb_gadget_dequeue, .set_halt = musb_gadget_set_halt, .set_wedge = musb_gadget_set_wedge, .fifo_status = musb_gadget_fifo_status, .fifo_flush = musb_gadget_fifo_flush }; /* ----------------------------------------------------------------------- */ static int musb_gadget_get_frame(struct usb_gadget *gadget) { struct musb *musb = gadget_to_musb(gadget); return (int)musb_readw(musb->mregs, MUSB_FRAME); } static int musb_gadget_wakeup(struct usb_gadget *gadget) { struct musb *musb = gadget_to_musb(gadget); void __iomem *mregs = musb->mregs; unsigned long flags; int status = -EINVAL; u8 power, devctl; int retries; spin_lock_irqsave(&musb->lock, flags); switch (musb->xceiv->state) { case OTG_STATE_B_PERIPHERAL: /* NOTE: OTG state machine doesn't include B_SUSPENDED; * that's part of the standard usb 1.1 state machine, and * doesn't affect OTG transitions. */ if (musb->may_wakeup && musb->is_suspended) break; goto done; case OTG_STATE_B_IDLE: /* Start SRP ... OTG not required. */ devctl = musb_readb(mregs, MUSB_DEVCTL); dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl); devctl |= MUSB_DEVCTL_SESSION; musb_writeb(mregs, MUSB_DEVCTL, devctl); devctl = musb_readb(mregs, MUSB_DEVCTL); retries = 100; while (!(devctl & MUSB_DEVCTL_SESSION)) { devctl = musb_readb(mregs, MUSB_DEVCTL); if (retries-- < 1) break; } retries = 10000; while (devctl & MUSB_DEVCTL_SESSION) { devctl = musb_readb(mregs, MUSB_DEVCTL); if (retries-- < 1) break; } spin_unlock_irqrestore(&musb->lock, flags); otg_start_srp(musb->xceiv->otg); spin_lock_irqsave(&musb->lock, flags); /* Block idling for at least 1s */ musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(1 * HZ)); status = 0; goto done; default: dev_dbg(musb->controller, "Unhandled wake: %s\n", otg_state_string(musb->xceiv->state)); goto done; } status = 0; power = musb_readb(mregs, MUSB_POWER); power |= MUSB_POWER_RESUME; musb_writeb(mregs, MUSB_POWER, power); dev_dbg(musb->controller, "issue wakeup\n"); /* FIXME do this next chunk in a timer callback, no udelay */ mdelay(2); power = musb_readb(mregs, MUSB_POWER); power &= ~MUSB_POWER_RESUME; musb_writeb(mregs, MUSB_POWER, power); done: spin_unlock_irqrestore(&musb->lock, flags); return status; } static int musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered) { struct musb *musb = gadget_to_musb(gadget); musb->is_self_powered = !!is_selfpowered; return 0; } static void musb_pullup(struct musb *musb, int is_on) { u8 power; power = musb_readb(musb->mregs, MUSB_POWER); if (is_on) power |= MUSB_POWER_SOFTCONN; else power &= ~MUSB_POWER_SOFTCONN; /* FIXME if on, HdrcStart; if off, HdrcStop */ dev_dbg(musb->controller, "gadget D+ pullup %s\n", is_on ? "on" : "off"); musb_writeb(musb->mregs, MUSB_POWER, power); } #if 0 static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active) { dev_dbg(musb->controller, "<= %s =>\n", __func__); /* * FIXME iff driver's softconnect flag is set (as it is during probe, * though that can clear it), just musb_pullup(). */ return -EINVAL; } #endif static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) { struct musb *musb = gadget_to_musb(gadget); if (!musb->xceiv->set_power) return -EOPNOTSUPP; return usb_phy_set_power(musb->xceiv, mA); } static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) { struct musb *musb = gadget_to_musb(gadget); unsigned long flags; is_on = !!is_on; pm_runtime_get_sync(musb->controller); /* NOTE: this assumes we are sensing vbus; we'd rather * not pullup unless the B-session is active. */ spin_lock_irqsave(&musb->lock, flags); if (is_on != musb->softconnect) { musb->softconnect = is_on; musb_pullup(musb, is_on); } spin_unlock_irqrestore(&musb->lock, flags); pm_runtime_put(musb->controller); return 0; } static int musb_gadget_start(struct usb_gadget *g, struct usb_gadget_driver *driver); static int musb_gadget_stop(struct usb_gadget *g, struct usb_gadget_driver *driver); static const struct usb_gadget_ops musb_gadget_operations = { .get_frame = musb_gadget_get_frame, .wakeup = musb_gadget_wakeup, .set_selfpowered = musb_gadget_set_self_powered, /* .vbus_session = musb_gadget_vbus_session, */ .vbus_draw = musb_gadget_vbus_draw, .pullup = musb_gadget_pullup, .udc_start = musb_gadget_start, .udc_stop = musb_gadget_stop, }; /* ----------------------------------------------------------------------- */ /* Registration */ /* Only this registration code "knows" the rule (from USB standards) * about there being only one external upstream port. It assumes * all peripheral ports are external... */ static void musb_gadget_release(struct device *dev) { /* kref_put(WHAT) */ dev_dbg(dev, "%s\n", __func__); } static void __devinit init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in) { struct musb_hw_ep *hw_ep = musb->endpoints + epnum; memset(ep, 0, sizeof *ep); ep->current_epnum = epnum; ep->musb = musb; ep->hw_ep = hw_ep; ep->is_in = is_in; INIT_LIST_HEAD(&ep->req_list); sprintf(ep->name, "ep%d%s", epnum, (!epnum || hw_ep->is_shared_fifo) ? "" : ( is_in ? "in" : "out")); ep->end_point.name = ep->name; INIT_LIST_HEAD(&ep->end_point.ep_list); if (!epnum) { ep->end_point.maxpacket = 64; ep->end_point.ops = &musb_g_ep0_ops; musb->g.ep0 = &ep->end_point; } else { if (is_in) ep->end_point.maxpacket = hw_ep->max_packet_sz_tx; else ep->end_point.maxpacket = hw_ep->max_packet_sz_rx; ep->end_point.ops = &musb_ep_ops; list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list); } } /* * Initialize the endpoints exposed to peripheral drivers, with backlinks * to the rest of the driver state. */ static inline void __devinit musb_g_init_endpoints(struct musb *musb) { u8 epnum; struct musb_hw_ep *hw_ep; unsigned count = 0; /* initialize endpoint list just once */ INIT_LIST_HEAD(&(musb->g.ep_list)); for (epnum = 0, hw_ep = musb->endpoints; epnum < musb->nr_endpoints; epnum++, hw_ep++) { if (hw_ep->is_shared_fifo /* || !epnum */) { init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0); count++; } else { if (hw_ep->max_packet_sz_tx) { init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 1); count++; } if (hw_ep->max_packet_sz_rx) { init_peripheral_ep(musb, &hw_ep->ep_out, epnum, 0); count++; } } } } /* called once during driver setup to initialize and link into * the driver model; memory is zeroed. */ int __devinit musb_gadget_setup(struct musb *musb) { int status; /* REVISIT minor race: if (erroneously) setting up two * musb peripherals at the same time, only the bus lock * is probably held. */ musb->g.ops = &musb_gadget_operations; musb->g.max_speed = USB_SPEED_HIGH; musb->g.speed = USB_SPEED_UNKNOWN; /* this "gadget" abstracts/virtualizes the controller */ dev_set_name(&musb->g.dev, "gadget"); musb->g.dev.parent = musb->controller; musb->g.dev.dma_mask = musb->controller->dma_mask; musb->g.dev.release = musb_gadget_release; musb->g.name = musb_driver_name; if (is_otg_enabled(musb)) musb->g.is_otg = 1; musb_g_init_endpoints(musb); musb->is_active = 0; musb_platform_try_idle(musb, 0); status = device_register(&musb->g.dev); if (status != 0) { put_device(&musb->g.dev); return status; } status = usb_add_gadget_udc(musb->controller, &musb->g); if (status) goto err; return 0; err: musb->g.dev.parent = NULL; device_unregister(&musb->g.dev); return status; } void musb_gadget_cleanup(struct musb *musb) { usb_del_gadget_udc(&musb->g); if (musb->g.dev.parent) device_unregister(&musb->g.dev); } /* * Register the gadget driver. Used by gadget drivers when * registering themselves with the controller. * * -EINVAL something went wrong (not driver) * -EBUSY another gadget is already using the controller * -ENOMEM no memory to perform the operation * * @param driver the gadget driver * @return <0 if error, 0 if everything is fine */ static int musb_gadget_start(struct usb_gadget *g, struct usb_gadget_driver *driver) { struct musb *musb = gadget_to_musb(g); struct usb_otg *otg = musb->xceiv->otg; unsigned long flags; int retval = -EINVAL; if (driver->max_speed < USB_SPEED_HIGH) goto err0; pm_runtime_get_sync(musb->controller); dev_dbg(musb->controller, "registering driver %s\n", driver->function); musb->softconnect = 0; musb->gadget_driver = driver; spin_lock_irqsave(&musb->lock, flags); musb->is_active = 1; otg_set_peripheral(otg, &musb->g); musb->xceiv->state = OTG_STATE_B_IDLE; /* * FIXME this ignores the softconnect flag. Drivers are * allowed hold the peripheral inactive until for example * userspace hooks up printer hardware or DSP codecs, so * hosts only see fully functional devices. */ if (!is_otg_enabled(musb)) musb_start(musb); spin_unlock_irqrestore(&musb->lock, flags); if (is_otg_enabled(musb)) { struct usb_hcd *hcd = musb_to_hcd(musb); dev_dbg(musb->controller, "OTG startup...\n"); /* REVISIT: funcall to other code, which also * handles power budgeting ... this way also * ensures HdrcStart is indirectly called. */ retval = usb_add_hcd(musb_to_hcd(musb), 0, 0); if (retval < 0) { dev_dbg(musb->controller, "add_hcd failed, %d\n", retval); goto err2; } if ((musb->xceiv->last_event == USB_EVENT_ID) && otg->set_vbus) otg_set_vbus(otg, 1); hcd->self.uses_pio_for_control = 1; } if (musb->xceiv->last_event == USB_EVENT_NONE) pm_runtime_put(musb->controller); return 0; err2: if (!is_otg_enabled(musb)) musb_stop(musb); err0: return retval; } static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver) { int i; struct musb_hw_ep *hw_ep; /* don't disconnect if it's not connected */ if (musb->g.speed == USB_SPEED_UNKNOWN) driver = NULL; else musb->g.speed = USB_SPEED_UNKNOWN; /* deactivate the hardware */ if (musb->softconnect) { musb->softconnect = 0; musb_pullup(musb, 0); } musb_stop(musb); /* killing any outstanding requests will quiesce the driver; * then report disconnect */ if (driver) { for (i = 0, hw_ep = musb->endpoints; i < musb->nr_endpoints; i++, hw_ep++) { musb_ep_select(musb->mregs, i); if (hw_ep->is_shared_fifo /* || !epnum */) { nuke(&hw_ep->ep_in, -ESHUTDOWN); } else { if (hw_ep->max_packet_sz_tx) nuke(&hw_ep->ep_in, -ESHUTDOWN); if (hw_ep->max_packet_sz_rx) nuke(&hw_ep->ep_out, -ESHUTDOWN); } } } } /* * Unregister the gadget driver. Used by gadget drivers when * unregistering themselves from the controller. * * @param driver the gadget driver to unregister */ static int musb_gadget_stop(struct usb_gadget *g, struct usb_gadget_driver *driver) { struct musb *musb = gadget_to_musb(g); unsigned long flags; if (musb->xceiv->last_event == USB_EVENT_NONE) pm_runtime_get_sync(musb->controller); /* * REVISIT always use otg_set_peripheral() here too; * this needs to shut down the OTG engine. */ spin_lock_irqsave(&musb->lock, flags); musb_hnp_stop(musb); (void) musb_gadget_vbus_draw(&musb->g, 0); musb->xceiv->state = OTG_STATE_UNDEFINED; stop_activity(musb, driver); otg_set_peripheral(musb->xceiv->otg, NULL); dev_dbg(musb->controller, "unregistering driver %s\n", driver->function); musb->is_active = 0; musb_platform_try_idle(musb, 0); spin_unlock_irqrestore(&musb->lock, flags); if (is_otg_enabled(musb)) { usb_remove_hcd(musb_to_hcd(musb)); /* FIXME we need to be able to register another * gadget driver here and have everything work; * that currently misbehaves. */ } if (!is_otg_enabled(musb)) musb_stop(musb); pm_runtime_put(musb->controller); return 0; } /* ----------------------------------------------------------------------- */ /* lifecycle operations called through plat_uds.c */ void musb_g_resume(struct musb *musb) { musb->is_suspended = 0; switch (musb->xceiv->state) { case OTG_STATE_B_IDLE: break; case OTG_STATE_B_WAIT_ACON: case OTG_STATE_B_PERIPHERAL: musb->is_active = 1; if (musb->gadget_driver && musb->gadget_driver->resume) { spin_unlock(&musb->lock); musb->gadget_driver->resume(&musb->g); spin_lock(&musb->lock); } break; default: WARNING("unhandled RESUME transition (%s)\n", otg_state_string(musb->xceiv->state)); } } /* called when SOF packets stop for 3+ msec */ void musb_g_suspend(struct musb *musb) { u8 devctl; devctl = musb_readb(musb->mregs, MUSB_DEVCTL); dev_dbg(musb->controller, "devctl %02x\n", devctl); switch (musb->xceiv->state) { case OTG_STATE_B_IDLE: if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) musb->xceiv->state = OTG_STATE_B_PERIPHERAL; break; case OTG_STATE_B_PERIPHERAL: musb->is_suspended = 1; if (musb->gadget_driver && musb->gadget_driver->suspend) { spin_unlock(&musb->lock); musb->gadget_driver->suspend(&musb->g); spin_lock(&musb->lock); } break; default: /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ; * A_PERIPHERAL may need care too */ WARNING("unhandled SUSPEND transition (%s)\n", otg_state_string(musb->xceiv->state)); } } /* Called during SRP */ void musb_g_wakeup(struct musb *musb) { musb_gadget_wakeup(&musb->g); } /* called when VBUS drops below session threshold, and in other cases */ void musb_g_disconnect(struct musb *musb) { void __iomem *mregs = musb->mregs; u8 devctl = musb_readb(mregs, MUSB_DEVCTL); dev_dbg(musb->controller, "devctl %02x\n", devctl); /* clear HR */ musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION); /* don't draw vbus until new b-default session */ (void) musb_gadget_vbus_draw(&musb->g, 0); musb->g.speed = USB_SPEED_UNKNOWN; if (musb->gadget_driver && musb->gadget_driver->disconnect) { spin_unlock(&musb->lock); musb->gadget_driver->disconnect(&musb->g); spin_lock(&musb->lock); } switch (musb->xceiv->state) { default: dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n", otg_state_string(musb->xceiv->state)); musb->xceiv->state = OTG_STATE_A_IDLE; MUSB_HST_MODE(musb); break; case OTG_STATE_A_PERIPHERAL: musb->xceiv->state = OTG_STATE_A_WAIT_BCON; MUSB_HST_MODE(musb); break; case OTG_STATE_B_WAIT_ACON: case OTG_STATE_B_HOST: case OTG_STATE_B_PERIPHERAL: case OTG_STATE_B_IDLE: musb->xceiv->state = OTG_STATE_B_IDLE; break; case OTG_STATE_B_SRP_INIT: break; } musb->is_active = 0; } void musb_g_reset(struct musb *musb) __releases(musb->lock) __acquires(musb->lock) { void __iomem *mbase = musb->mregs; u8 devctl = musb_readb(mbase, MUSB_DEVCTL); u8 power; dev_dbg(musb->controller, "<== %s addr=%x driver '%s'\n", (devctl & MUSB_DEVCTL_BDEVICE) ? "B-Device" : "A-Device", musb_readb(mbase, MUSB_FADDR), musb->gadget_driver ? musb->gadget_driver->driver.name : NULL ); /* report disconnect, if we didn't already (flushing EP state) */ if (musb->g.speed != USB_SPEED_UNKNOWN) musb_g_disconnect(musb); /* clear HR */ else if (devctl & MUSB_DEVCTL_HR) musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); /* what speed did we negotiate? */ power = musb_readb(mbase, MUSB_POWER); musb->g.speed = (power & MUSB_POWER_HSMODE) ? USB_SPEED_HIGH : USB_SPEED_FULL; /* start in USB_STATE_DEFAULT */ musb->is_active = 1; musb->is_suspended = 0; MUSB_DEV_MODE(musb); musb->address = 0; musb->ep0_state = MUSB_EP0_STAGE_SETUP; musb->may_wakeup = 0; musb->g.b_hnp_enable = 0; musb->g.a_alt_hnp_support = 0; musb->g.a_hnp_support = 0; /* Normal reset, as B-Device; * or else after HNP, as A-Device */ if (devctl & MUSB_DEVCTL_BDEVICE) { musb->xceiv->state = OTG_STATE_B_PERIPHERAL; musb->g.is_a_peripheral = 0; } else if (is_otg_enabled(musb)) { musb->xceiv->state = OTG_STATE_A_PERIPHERAL; musb->g.is_a_peripheral = 1; } else WARN_ON(1); /* start with default limits on VBUS power draw */ (void) musb_gadget_vbus_draw(&musb->g, is_otg_enabled(musb) ? 8 : 100); }
gpl-2.0
ktd2004/linux-stable
drivers/pci/hotplug/cpcihp_generic.c
1307
6462
/* * cpcihp_generic.c * * Generic port I/O CompactPCI driver * * Copyright 2002 SOMA Networks, Inc. * Copyright 2001 Intel San Luis Obispo * Copyright 2000,2001 MontaVista Software Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * * This generic CompactPCI hotplug driver should allow using the PCI hotplug * mechanism on any CompactPCI board that exposes the #ENUM signal as a bit * in a system register that can be read through standard port I/O. * * Send feedback to <scottm@somanetworks.com> */ #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/string.h> #include "cpci_hotplug.h" #define DRIVER_VERSION "0.1" #define DRIVER_AUTHOR "Scott Murray <scottm@somanetworks.com>" #define DRIVER_DESC "Generic port I/O CompactPCI Hot Plug Driver" #if !defined(MODULE) #define MY_NAME "cpcihp_generic" #else #define MY_NAME THIS_MODULE->name #endif #define dbg(format, arg...) \ do { \ if (debug) \ printk (KERN_DEBUG "%s: " format "\n", \ MY_NAME , ## arg); \ } while (0) #define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg) #define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg) /* local variables */ static bool debug; static char *bridge; static u8 bridge_busnr; static u8 bridge_slot; static struct pci_bus *bus; static u8 first_slot; static u8 last_slot; static u16 port; static unsigned int enum_bit; static u8 enum_mask; static struct cpci_hp_controller_ops generic_hpc_ops; static struct cpci_hp_controller generic_hpc; static int __init validate_parameters(void) { char *str; char *p; unsigned long tmp; if (!bridge) { info("not configured, disabling."); return -EINVAL; } str = bridge; if (!*str) return -EINVAL; tmp = simple_strtoul(str, &p, 16); if (p == str || tmp > 0xff) { err("Invalid hotplug bus bridge device bus number"); return -EINVAL; } bridge_busnr = (u8) tmp; dbg("bridge_busnr = 0x%02x", bridge_busnr); if (*p != ':') { err("Invalid hotplug bus bridge device"); return -EINVAL; } str = p + 1; tmp = simple_strtoul(str, &p, 16); if (p == str || tmp > 0x1f) { err("Invalid hotplug bus bridge device slot number"); return -EINVAL; } bridge_slot = (u8) tmp; dbg("bridge_slot = 0x%02x", bridge_slot); dbg("first_slot = 0x%02x", first_slot); dbg("last_slot = 0x%02x", last_slot); if (!(first_slot && last_slot)) { err("Need to specify first_slot and last_slot"); return -EINVAL; } if (last_slot < first_slot) { err("first_slot must be less than last_slot"); return -EINVAL; } dbg("port = 0x%04x", port); dbg("enum_bit = 0x%02x", enum_bit); if (enum_bit > 7) { err("Invalid #ENUM bit"); return -EINVAL; } enum_mask = 1 << enum_bit; return 0; } static int query_enum(void) { u8 value; value = inb_p(port); return ((value & enum_mask) == enum_mask); } static int __init cpcihp_generic_init(void) { int status; struct resource *r; struct pci_dev *dev; info(DRIVER_DESC " version: " DRIVER_VERSION); status = validate_parameters(); if (status) return status; r = request_region(port, 1, "#ENUM hotswap signal register"); if (!r) return -EBUSY; dev = pci_get_domain_bus_and_slot(0, bridge_busnr, PCI_DEVFN(bridge_slot, 0)); if (!dev || dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) { err("Invalid bridge device %s", bridge); pci_dev_put(dev); return -EINVAL; } bus = dev->subordinate; pci_dev_put(dev); memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller)); generic_hpc_ops.query_enum = query_enum; generic_hpc.ops = &generic_hpc_ops; status = cpci_hp_register_controller(&generic_hpc); if (status != 0) { err("Could not register cPCI hotplug controller"); return -ENODEV; } dbg("registered controller"); status = cpci_hp_register_bus(bus, first_slot, last_slot); if (status != 0) { err("Could not register cPCI hotplug bus"); goto init_bus_register_error; } dbg("registered bus"); status = cpci_hp_start(); if (status != 0) { err("Could not started cPCI hotplug system"); goto init_start_error; } dbg("started cpci hp system"); return 0; init_start_error: cpci_hp_unregister_bus(bus); init_bus_register_error: cpci_hp_unregister_controller(&generic_hpc); err("status = %d", status); return status; } static void __exit cpcihp_generic_exit(void) { cpci_hp_stop(); cpci_hp_unregister_bus(bus); cpci_hp_unregister_controller(&generic_hpc); release_region(port, 1); } module_init(cpcihp_generic_init); module_exit(cpcihp_generic_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debugging mode enabled or not"); module_param(bridge, charp, 0); MODULE_PARM_DESC(bridge, "Hotswap bus bridge device, <bus>:<slot> (bus and slot are in hexadecimal)"); module_param(first_slot, byte, 0); MODULE_PARM_DESC(first_slot, "Hotswap bus first slot number"); module_param(last_slot, byte, 0); MODULE_PARM_DESC(last_slot, "Hotswap bus last slot number"); module_param(port, ushort, 0); MODULE_PARM_DESC(port, "#ENUM signal I/O port"); module_param(enum_bit, uint, 0); MODULE_PARM_DESC(enum_bit, "#ENUM signal bit (0-7)");
gpl-2.0
IndraVikas/linux
arch/sparc/crypto/crc32c_glue.c
2331
4138
/* Glue code for CRC32C optimized for sparc64 crypto opcodes. * * This is based largely upon arch/x86/crypto/crc32c-intel.c * * Copyright (C) 2008 Intel Corporation * Authors: Austin Zhang <austin_zhang@linux.intel.com> * Kent Liu <kent.liu@intel.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/crc32.h> #include <crypto/internal/hash.h> #include <asm/pstate.h> #include <asm/elf.h> #include "opcodes.h" /* * Setting the seed allows arbitrary accumulators and flexible XOR policy * If your algorithm starts with ~0, then XOR with ~0 before you set * the seed. */ static int crc32c_sparc64_setkey(struct crypto_shash *hash, const u8 *key, unsigned int keylen) { u32 *mctx = crypto_shash_ctx(hash); if (keylen != sizeof(u32)) { crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } *(__le32 *)mctx = le32_to_cpup((__le32 *)key); return 0; } static int crc32c_sparc64_init(struct shash_desc *desc) { u32 *mctx = crypto_shash_ctx(desc->tfm); u32 *crcp = shash_desc_ctx(desc); *crcp = *mctx; return 0; } extern void crc32c_sparc64(u32 *crcp, const u64 *data, unsigned int len); static void crc32c_compute(u32 *crcp, const u64 *data, unsigned int len) { unsigned int asm_len; asm_len = len & ~7U; if (asm_len) { crc32c_sparc64(crcp, data, asm_len); data += asm_len / 8; len -= asm_len; } if (len) *crcp = __crc32c_le(*crcp, (const unsigned char *) data, len); } static int crc32c_sparc64_update(struct shash_desc *desc, const u8 *data, unsigned int len) { u32 *crcp = shash_desc_ctx(desc); crc32c_compute(crcp, (const u64 *) data, len); return 0; } static int __crc32c_sparc64_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out) { u32 tmp = *crcp; crc32c_compute(&tmp, (const u64 *) data, len); *(__le32 *) out = ~cpu_to_le32(tmp); return 0; } static int crc32c_sparc64_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return __crc32c_sparc64_finup(shash_desc_ctx(desc), data, len, out); } static int crc32c_sparc64_final(struct shash_desc *desc, u8 *out) { u32 *crcp = shash_desc_ctx(desc); *(__le32 *) out = ~cpu_to_le32p(crcp); return 0; } static int crc32c_sparc64_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return __crc32c_sparc64_finup(crypto_shash_ctx(desc->tfm), data, len, out); } static int crc32c_sparc64_cra_init(struct crypto_tfm *tfm) { u32 *key = crypto_tfm_ctx(tfm); *key = ~0; return 0; } #define CHKSUM_BLOCK_SIZE 1 #define CHKSUM_DIGEST_SIZE 4 static struct shash_alg alg = { .setkey = crc32c_sparc64_setkey, .init = crc32c_sparc64_init, .update = crc32c_sparc64_update, .final = crc32c_sparc64_final, .finup = crc32c_sparc64_finup, .digest = crc32c_sparc64_digest, .descsize = sizeof(u32), .digestsize = CHKSUM_DIGEST_SIZE, .base = { .cra_name = "crc32c", .cra_driver_name = "crc32c-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_alignmask = 7, .cra_module = THIS_MODULE, .cra_init = crc32c_sparc64_cra_init, } }; static bool __init sparc64_has_crc32c_opcode(void) { unsigned long cfr; if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO)) return false; __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); if (!(cfr & CFR_CRC32C)) return false; return true; } static int __init crc32c_sparc64_mod_init(void) { if (sparc64_has_crc32c_opcode()) { pr_info("Using sparc64 crc32c opcode optimized CRC32C implementation\n"); return crypto_register_shash(&alg); } pr_info("sparc64 crc32c opcode not available.\n"); return -ENODEV; } static void __exit crc32c_sparc64_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(crc32c_sparc64_mod_init); module_exit(crc32c_sparc64_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated"); MODULE_ALIAS_CRYPTO("crc32c"); #include "crop_devid.c"
gpl-2.0
Neves4/DatKernel
arch/s390/kernel/jump_label.c
2587
1115
/* * Jump label s390 support * * Copyright IBM Corp. 2011 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com> */ #include <linux/module.h> #include <linux/uaccess.h> #include <linux/stop_machine.h> #include <linux/jump_label.h> #include <asm/ipl.h> #ifdef HAVE_JUMP_LABEL struct insn { u16 opcode; s32 offset; } __packed; struct insn_args { unsigned long *target; struct insn *insn; ssize_t size; }; static int __arch_jump_label_transform(void *data) { struct insn_args *args = data; int rc; rc = probe_kernel_write(args->target, args->insn, args->size); WARN_ON_ONCE(rc < 0); return 0; } void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { struct insn_args args; struct insn insn; if (type == JUMP_LABEL_ENABLE) { /* brcl 15,offset */ insn.opcode = 0xc0f4; insn.offset = (entry->target - entry->code) >> 1; } else { /* brcl 0,0 */ insn.opcode = 0xc004; insn.offset = 0; } args.target = (void *) entry->code; args.insn = &insn; args.size = JUMP_LABEL_NOP_SIZE; stop_machine(__arch_jump_label_transform, &args, NULL); } #endif
gpl-2.0
AndroidOpenDevelopment-Devices/android_kernel_moto_shamu
drivers/net/irda/ksdazzle-sir.c
2587
23428
/***************************************************************************** * * Filename: ksdazzle.c * Version: 0.1.2 * Description: Irda KingSun Dazzle USB Dongle * Status: Experimental * Author: Alex Villacís Lasso <a_villacis@palosanto.com> * * Based on stir4200, mcs7780, kingsun-sir drivers. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * *****************************************************************************/ /* * Following is my most current (2007-07-26) understanding of how the Kingsun * 07D0:4100 dongle (sometimes known as the MA-660) is supposed to work. This * information was deduced by examining the USB traffic captured with USBSnoopy * from the WinXP driver. Feel free to update here as more of the dongle is * known. * * General: This dongle exposes one interface with two interrupt endpoints, one * IN and one OUT. In this regard, it is similar to what the Kingsun/Donshine * dongle (07c0:4200) exposes. Traffic is raw and needs to be wrapped and * unwrapped manually as in stir4200, kingsun-sir, and ks959-sir. * * Transmission: To transmit an IrDA frame, it is necessary to wrap it, then * split it into multiple segments of up to 7 bytes each, and transmit each in * sequence. It seems that sending a single big block (like kingsun-sir does) * won't work with this dongle. Each segment needs to be prefixed with a value * equal to (unsigned char)0xF8 + <number of bytes in segment>, inside a payload * of exactly 8 bytes. For example, a segment of 1 byte gets prefixed by 0xF9, * and one of 7 bytes gets prefixed by 0xFF. The bytes at the end of the * payload, not considered by the prefix, are ignored (set to 0 by this * implementation). * * Reception: To receive data, the driver must poll the dongle regularly (like * kingsun-sir.c) with interrupt URBs. If data is available, it will be returned * in payloads from 0 to 8 bytes long. When concatenated, these payloads form * a raw IrDA stream that needs to be unwrapped as in stir4200 and kingsun-sir * * Speed change: To change the speed of the dongle, the driver prepares a * control URB with the following as a setup packet: * bRequestType USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE * bRequest 0x09 * wValue 0x0200 * wIndex 0x0001 * wLength 0x0008 (length of the payload) * The payload is a 8-byte record, apparently identical to the one used in * drivers/usb/serial/cypress_m8.c to change speed: * __u32 baudSpeed; * unsigned int dataBits : 2; // 0 - 5 bits 3 - 8 bits * unsigned int : 1; * unsigned int stopBits : 1; * unsigned int parityEnable : 1; * unsigned int parityType : 1; * unsigned int : 1; * unsigned int reset : 1; * unsigned char reserved[3]; // set to 0 * * For now only SIR speeds have been observed with this dongle. Therefore, * nothing is known on what changes (if any) must be done to frame wrapping / * unwrapping for higher than SIR speeds. This driver assumes no change is * necessary and announces support for all the way to 115200 bps. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/device.h> #include <linux/crc32.h> #include <asm/unaligned.h> #include <asm/byteorder.h> #include <asm/uaccess.h> #include <net/irda/irda.h> #include <net/irda/wrapper.h> #include <net/irda/crc.h> #define KSDAZZLE_VENDOR_ID 0x07d0 #define KSDAZZLE_PRODUCT_ID 0x4100 /* These are the currently known USB ids */ static struct usb_device_id dongles[] = { /* KingSun Co,Ltd IrDA/USB Bridge */ {USB_DEVICE(KSDAZZLE_VENDOR_ID, KSDAZZLE_PRODUCT_ID)}, {} }; MODULE_DEVICE_TABLE(usb, dongles); #define KINGSUN_MTT 0x07 #define KINGSUN_REQ_RECV 0x01 #define KINGSUN_REQ_SEND 0x09 #define KINGSUN_SND_FIFO_SIZE 2048 /* Max packet we can send */ #define KINGSUN_RCV_MAX 2048 /* Max transfer we can receive */ struct ksdazzle_speedparams { __le32 baudrate; /* baud rate, little endian */ __u8 flags; __u8 reserved[3]; } __packed; #define KS_DATA_5_BITS 0x00 #define KS_DATA_6_BITS 0x01 #define KS_DATA_7_BITS 0x02 #define KS_DATA_8_BITS 0x03 #define KS_STOP_BITS_1 0x00 #define KS_STOP_BITS_2 0x08 #define KS_PAR_DISABLE 0x00 #define KS_PAR_EVEN 0x10 #define KS_PAR_ODD 0x30 #define KS_RESET 0x80 #define KINGSUN_EP_IN 0 #define KINGSUN_EP_OUT 1 struct ksdazzle_cb { struct usb_device *usbdev; /* init: probe_irda */ struct net_device *netdev; /* network layer */ struct irlap_cb *irlap; /* The link layer we are binded to */ struct qos_info qos; struct urb *tx_urb; __u8 *tx_buf_clear; unsigned int tx_buf_clear_used; unsigned int tx_buf_clear_sent; __u8 tx_payload[8]; struct urb *rx_urb; __u8 *rx_buf; iobuff_t rx_unwrap_buff; struct usb_ctrlrequest *speed_setuprequest; struct urb *speed_urb; struct ksdazzle_speedparams speedparams; unsigned int new_speed; __u8 ep_in; __u8 ep_out; spinlock_t lock; int receiving; }; /* Callback transmission routine */ static void ksdazzle_speed_irq(struct urb *urb) { /* unlink, shutdown, unplug, other nasties */ if (urb->status != 0) dev_err(&urb->dev->dev, "ksdazzle_speed_irq: urb asynchronously failed - %d\n", urb->status); } /* Send a control request to change speed of the dongle */ static int ksdazzle_change_speed(struct ksdazzle_cb *kingsun, unsigned speed) { static unsigned int supported_speeds[] = { 2400, 9600, 19200, 38400, 57600, 115200, 576000, 1152000, 4000000, 0 }; int err; unsigned int i; if (kingsun->speed_setuprequest == NULL || kingsun->speed_urb == NULL) return -ENOMEM; /* Check that requested speed is among the supported ones */ for (i = 0; supported_speeds[i] && supported_speeds[i] != speed; i++) ; if (supported_speeds[i] == 0) return -EOPNOTSUPP; memset(&(kingsun->speedparams), 0, sizeof(struct ksdazzle_speedparams)); kingsun->speedparams.baudrate = cpu_to_le32(speed); kingsun->speedparams.flags = KS_DATA_8_BITS; /* speed_setuprequest pre-filled in ksdazzle_probe */ usb_fill_control_urb(kingsun->speed_urb, kingsun->usbdev, usb_sndctrlpipe(kingsun->usbdev, 0), (unsigned char *)kingsun->speed_setuprequest, &(kingsun->speedparams), sizeof(struct ksdazzle_speedparams), ksdazzle_speed_irq, kingsun); kingsun->speed_urb->status = 0; err = usb_submit_urb(kingsun->speed_urb, GFP_ATOMIC); return err; } /* Submit one fragment of an IrDA frame to the dongle */ static void ksdazzle_send_irq(struct urb *urb); static int ksdazzle_submit_tx_fragment(struct ksdazzle_cb *kingsun) { unsigned int wraplen; int ret; /* We can send at most 7 bytes of payload at a time */ wraplen = 7; if (wraplen > kingsun->tx_buf_clear_used) wraplen = kingsun->tx_buf_clear_used; /* Prepare payload prefix with used length */ memset(kingsun->tx_payload, 0, 8); kingsun->tx_payload[0] = (unsigned char)0xf8 + wraplen; memcpy(kingsun->tx_payload + 1, kingsun->tx_buf_clear, wraplen); usb_fill_int_urb(kingsun->tx_urb, kingsun->usbdev, usb_sndintpipe(kingsun->usbdev, kingsun->ep_out), kingsun->tx_payload, 8, ksdazzle_send_irq, kingsun, 1); kingsun->tx_urb->status = 0; ret = usb_submit_urb(kingsun->tx_urb, GFP_ATOMIC); /* Remember how much data was sent, in order to update at callback */ kingsun->tx_buf_clear_sent = (ret == 0) ? wraplen : 0; return ret; } /* Callback transmission routine */ static void ksdazzle_send_irq(struct urb *urb) { struct ksdazzle_cb *kingsun = urb->context; struct net_device *netdev = kingsun->netdev; int ret = 0; /* in process of stopping, just drop data */ if (!netif_running(kingsun->netdev)) { dev_err(&kingsun->usbdev->dev, "ksdazzle_send_irq: Network not running!\n"); return; } /* unlink, shutdown, unplug, other nasties */ if (urb->status != 0) { dev_err(&kingsun->usbdev->dev, "ksdazzle_send_irq: urb asynchronously failed - %d\n", urb->status); return; } if (kingsun->tx_buf_clear_used > 0) { /* Update data remaining to be sent */ if (kingsun->tx_buf_clear_sent < kingsun->tx_buf_clear_used) { memmove(kingsun->tx_buf_clear, kingsun->tx_buf_clear + kingsun->tx_buf_clear_sent, kingsun->tx_buf_clear_used - kingsun->tx_buf_clear_sent); } kingsun->tx_buf_clear_used -= kingsun->tx_buf_clear_sent; kingsun->tx_buf_clear_sent = 0; if (kingsun->tx_buf_clear_used > 0) { /* There is more data to be sent */ if ((ret = ksdazzle_submit_tx_fragment(kingsun)) != 0) { dev_err(&kingsun->usbdev->dev, "ksdazzle_send_irq: failed tx_urb submit: %d\n", ret); switch (ret) { case -ENODEV: case -EPIPE: break; default: netdev->stats.tx_errors++; netif_start_queue(netdev); } } } else { /* All data sent, send next speed && wake network queue */ if (kingsun->new_speed != -1 && cpu_to_le32(kingsun->new_speed) != kingsun->speedparams.baudrate) ksdazzle_change_speed(kingsun, kingsun->new_speed); netif_wake_queue(netdev); } } } /* * Called from net/core when new frame is available. */ static netdev_tx_t ksdazzle_hard_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ksdazzle_cb *kingsun; unsigned int wraplen; int ret = 0; netif_stop_queue(netdev); /* the IRDA wrapping routines don't deal with non linear skb */ SKB_LINEAR_ASSERT(skb); kingsun = netdev_priv(netdev); spin_lock(&kingsun->lock); kingsun->new_speed = irda_get_next_speed(skb); /* Append data to the end of whatever data remains to be transmitted */ wraplen = async_wrap_skb(skb, kingsun->tx_buf_clear, KINGSUN_SND_FIFO_SIZE); kingsun->tx_buf_clear_used = wraplen; if ((ret = ksdazzle_submit_tx_fragment(kingsun)) != 0) { dev_err(&kingsun->usbdev->dev, "ksdazzle_hard_xmit: failed tx_urb submit: %d\n", ret); switch (ret) { case -ENODEV: case -EPIPE: break; default: netdev->stats.tx_errors++; netif_start_queue(netdev); } } else { netdev->stats.tx_packets++; netdev->stats.tx_bytes += skb->len; } dev_kfree_skb(skb); spin_unlock(&kingsun->lock); return NETDEV_TX_OK; } /* Receive callback function */ static void ksdazzle_rcv_irq(struct urb *urb) { struct ksdazzle_cb *kingsun = urb->context; struct net_device *netdev = kingsun->netdev; /* in process of stopping, just drop data */ if (!netif_running(netdev)) { kingsun->receiving = 0; return; } /* unlink, shutdown, unplug, other nasties */ if (urb->status != 0) { dev_err(&kingsun->usbdev->dev, "ksdazzle_rcv_irq: urb asynchronously failed - %d\n", urb->status); kingsun->receiving = 0; return; } if (urb->actual_length > 0) { __u8 *bytes = urb->transfer_buffer; unsigned int i; for (i = 0; i < urb->actual_length; i++) { async_unwrap_char(netdev, &netdev->stats, &kingsun->rx_unwrap_buff, bytes[i]); } kingsun->receiving = (kingsun->rx_unwrap_buff.state != OUTSIDE_FRAME) ? 1 : 0; } /* This urb has already been filled in ksdazzle_net_open. It is assumed that urb keeps the pointer to the payload buffer. */ urb->status = 0; usb_submit_urb(urb, GFP_ATOMIC); } /* * Function ksdazzle_net_open (dev) * * Network device is taken up. Usually this is done by "ifconfig irda0 up" */ static int ksdazzle_net_open(struct net_device *netdev) { struct ksdazzle_cb *kingsun = netdev_priv(netdev); int err = -ENOMEM; char hwname[16]; /* At this point, urbs are NULL, and skb is NULL (see ksdazzle_probe) */ kingsun->receiving = 0; /* Initialize for SIR to copy data directly into skb. */ kingsun->rx_unwrap_buff.in_frame = FALSE; kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME; kingsun->rx_unwrap_buff.truesize = IRDA_SKB_MAX_MTU; kingsun->rx_unwrap_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU); if (!kingsun->rx_unwrap_buff.skb) goto free_mem; skb_reserve(kingsun->rx_unwrap_buff.skb, 1); kingsun->rx_unwrap_buff.head = kingsun->rx_unwrap_buff.skb->data; kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!kingsun->rx_urb) goto free_mem; kingsun->tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!kingsun->tx_urb) goto free_mem; kingsun->speed_urb = usb_alloc_urb(0, GFP_KERNEL); if (!kingsun->speed_urb) goto free_mem; /* Initialize speed for dongle */ kingsun->new_speed = 9600; err = ksdazzle_change_speed(kingsun, 9600); if (err < 0) goto free_mem; /* * Now that everything should be initialized properly, * Open new IrLAP layer instance to take care of us... */ sprintf(hwname, "usb#%d", kingsun->usbdev->devnum); kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname); if (!kingsun->irlap) { err = -ENOMEM; dev_err(&kingsun->usbdev->dev, "irlap_open failed\n"); goto free_mem; } /* Start reception. */ usb_fill_int_urb(kingsun->rx_urb, kingsun->usbdev, usb_rcvintpipe(kingsun->usbdev, kingsun->ep_in), kingsun->rx_buf, KINGSUN_RCV_MAX, ksdazzle_rcv_irq, kingsun, 1); kingsun->rx_urb->status = 0; err = usb_submit_urb(kingsun->rx_urb, GFP_KERNEL); if (err) { dev_err(&kingsun->usbdev->dev, "first urb-submit failed: %d\n", err); goto close_irlap; } netif_start_queue(netdev); /* Situation at this point: - all work buffers allocated - urbs allocated and ready to fill - max rx packet known (in max_rx) - unwrap state machine initialized, in state outside of any frame - receive request in progress - IrLAP layer started, about to hand over packets to send */ return 0; close_irlap: irlap_close(kingsun->irlap); free_mem: usb_free_urb(kingsun->speed_urb); kingsun->speed_urb = NULL; usb_free_urb(kingsun->tx_urb); kingsun->tx_urb = NULL; usb_free_urb(kingsun->rx_urb); kingsun->rx_urb = NULL; if (kingsun->rx_unwrap_buff.skb) { kfree_skb(kingsun->rx_unwrap_buff.skb); kingsun->rx_unwrap_buff.skb = NULL; kingsun->rx_unwrap_buff.head = NULL; } return err; } /* * Function ksdazzle_net_close (dev) * * Network device is taken down. Usually this is done by * "ifconfig irda0 down" */ static int ksdazzle_net_close(struct net_device *netdev) { struct ksdazzle_cb *kingsun = netdev_priv(netdev); /* Stop transmit processing */ netif_stop_queue(netdev); /* Mop up receive && transmit urb's */ usb_kill_urb(kingsun->tx_urb); usb_free_urb(kingsun->tx_urb); kingsun->tx_urb = NULL; usb_kill_urb(kingsun->speed_urb); usb_free_urb(kingsun->speed_urb); kingsun->speed_urb = NULL; usb_kill_urb(kingsun->rx_urb); usb_free_urb(kingsun->rx_urb); kingsun->rx_urb = NULL; kfree_skb(kingsun->rx_unwrap_buff.skb); kingsun->rx_unwrap_buff.skb = NULL; kingsun->rx_unwrap_buff.head = NULL; kingsun->rx_unwrap_buff.in_frame = FALSE; kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME; kingsun->receiving = 0; /* Stop and remove instance of IrLAP */ irlap_close(kingsun->irlap); kingsun->irlap = NULL; return 0; } /* * IOCTLs : Extra out-of-band network commands... */ static int ksdazzle_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) { struct if_irda_req *irq = (struct if_irda_req *)rq; struct ksdazzle_cb *kingsun = netdev_priv(netdev); int ret = 0; switch (cmd) { case SIOCSBANDWIDTH: /* Set bandwidth */ if (!capable(CAP_NET_ADMIN)) return -EPERM; /* Check if the device is still there */ if (netif_device_present(kingsun->netdev)) return ksdazzle_change_speed(kingsun, irq->ifr_baudrate); break; case SIOCSMEDIABUSY: /* Set media busy */ if (!capable(CAP_NET_ADMIN)) return -EPERM; /* Check if the IrDA stack is still there */ if (netif_running(kingsun->netdev)) irda_device_set_media_busy(kingsun->netdev, TRUE); break; case SIOCGRECEIVING: /* Only approximately true */ irq->ifr_receiving = kingsun->receiving; break; default: ret = -EOPNOTSUPP; } return ret; } static const struct net_device_ops ksdazzle_ops = { .ndo_start_xmit = ksdazzle_hard_xmit, .ndo_open = ksdazzle_net_open, .ndo_stop = ksdazzle_net_close, .ndo_do_ioctl = ksdazzle_net_ioctl, }; /* * This routine is called by the USB subsystem for each new device * in the system. We need to check if the device is ours, and in * this case start handling it. */ static int ksdazzle_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_host_interface *interface; struct usb_endpoint_descriptor *endpoint; struct usb_device *dev = interface_to_usbdev(intf); struct ksdazzle_cb *kingsun = NULL; struct net_device *net = NULL; int ret = -ENOMEM; int pipe, maxp_in, maxp_out; __u8 ep_in; __u8 ep_out; /* Check that there really are two interrupt endpoints. Check based on the one in drivers/usb/input/usbmouse.c */ interface = intf->cur_altsetting; if (interface->desc.bNumEndpoints != 2) { dev_err(&intf->dev, "ksdazzle: expected 2 endpoints, found %d\n", interface->desc.bNumEndpoints); return -ENODEV; } endpoint = &interface->endpoint[KINGSUN_EP_IN].desc; if (!usb_endpoint_is_int_in(endpoint)) { dev_err(&intf->dev, "ksdazzle: endpoint 0 is not interrupt IN\n"); return -ENODEV; } ep_in = endpoint->bEndpointAddress; pipe = usb_rcvintpipe(dev, ep_in); maxp_in = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); if (maxp_in > 255 || maxp_in <= 1) { dev_err(&intf->dev, "ksdazzle: endpoint 0 has max packet size %d not in range [2..255]\n", maxp_in); return -ENODEV; } endpoint = &interface->endpoint[KINGSUN_EP_OUT].desc; if (!usb_endpoint_is_int_out(endpoint)) { dev_err(&intf->dev, "ksdazzle: endpoint 1 is not interrupt OUT\n"); return -ENODEV; } ep_out = endpoint->bEndpointAddress; pipe = usb_sndintpipe(dev, ep_out); maxp_out = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); /* Allocate network device container. */ net = alloc_irdadev(sizeof(*kingsun)); if (!net) goto err_out1; SET_NETDEV_DEV(net, &intf->dev); kingsun = netdev_priv(net); kingsun->netdev = net; kingsun->usbdev = dev; kingsun->ep_in = ep_in; kingsun->ep_out = ep_out; kingsun->irlap = NULL; kingsun->tx_urb = NULL; kingsun->tx_buf_clear = NULL; kingsun->tx_buf_clear_used = 0; kingsun->tx_buf_clear_sent = 0; kingsun->rx_urb = NULL; kingsun->rx_buf = NULL; kingsun->rx_unwrap_buff.in_frame = FALSE; kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME; kingsun->rx_unwrap_buff.skb = NULL; kingsun->receiving = 0; spin_lock_init(&kingsun->lock); kingsun->speed_setuprequest = NULL; kingsun->speed_urb = NULL; kingsun->speedparams.baudrate = 0; /* Allocate input buffer */ kingsun->rx_buf = kmalloc(KINGSUN_RCV_MAX, GFP_KERNEL); if (!kingsun->rx_buf) goto free_mem; /* Allocate output buffer */ kingsun->tx_buf_clear = kmalloc(KINGSUN_SND_FIFO_SIZE, GFP_KERNEL); if (!kingsun->tx_buf_clear) goto free_mem; /* Allocate and initialize speed setup packet */ kingsun->speed_setuprequest = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); if (!kingsun->speed_setuprequest) goto free_mem; kingsun->speed_setuprequest->bRequestType = USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE; kingsun->speed_setuprequest->bRequest = KINGSUN_REQ_SEND; kingsun->speed_setuprequest->wValue = cpu_to_le16(0x0200); kingsun->speed_setuprequest->wIndex = cpu_to_le16(0x0001); kingsun->speed_setuprequest->wLength = cpu_to_le16(sizeof(struct ksdazzle_speedparams)); printk(KERN_INFO "KingSun/Dazzle IRDA/USB found at address %d, " "Vendor: %x, Product: %x\n", dev->devnum, le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct)); /* Initialize QoS for this device */ irda_init_max_qos_capabilies(&kingsun->qos); /* Baud rates known to be supported. Please uncomment if devices (other than a SonyEriccson K300 phone) can be shown to support higher speeds with this dongle. */ kingsun->qos.baud_rate.bits = IR_2400 | IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200; kingsun->qos.min_turn_time.bits &= KINGSUN_MTT; irda_qos_bits_to_value(&kingsun->qos); /* Override the network functions we need to use */ net->netdev_ops = &ksdazzle_ops; ret = register_netdev(net); if (ret != 0) goto free_mem; dev_info(&net->dev, "IrDA: Registered KingSun/Dazzle device %s\n", net->name); usb_set_intfdata(intf, kingsun); /* Situation at this point: - all work buffers allocated - setup requests pre-filled - urbs not allocated, set to NULL - max rx packet known (is KINGSUN_FIFO_SIZE) - unwrap state machine (partially) initialized, but skb == NULL */ return 0; free_mem: kfree(kingsun->speed_setuprequest); kfree(kingsun->tx_buf_clear); kfree(kingsun->rx_buf); free_netdev(net); err_out1: return ret; } /* * The current device is removed, the USB layer tell us to shut it down... */ static void ksdazzle_disconnect(struct usb_interface *intf) { struct ksdazzle_cb *kingsun = usb_get_intfdata(intf); if (!kingsun) return; unregister_netdev(kingsun->netdev); /* Mop up receive && transmit urb's */ usb_kill_urb(kingsun->speed_urb); usb_free_urb(kingsun->speed_urb); kingsun->speed_urb = NULL; usb_kill_urb(kingsun->tx_urb); usb_free_urb(kingsun->tx_urb); kingsun->tx_urb = NULL; usb_kill_urb(kingsun->rx_urb); usb_free_urb(kingsun->rx_urb); kingsun->rx_urb = NULL; kfree(kingsun->speed_setuprequest); kfree(kingsun->tx_buf_clear); kfree(kingsun->rx_buf); free_netdev(kingsun->netdev); usb_set_intfdata(intf, NULL); } #ifdef CONFIG_PM /* USB suspend, so power off the transmitter/receiver */ static int ksdazzle_suspend(struct usb_interface *intf, pm_message_t message) { struct ksdazzle_cb *kingsun = usb_get_intfdata(intf); netif_device_detach(kingsun->netdev); if (kingsun->speed_urb != NULL) usb_kill_urb(kingsun->speed_urb); if (kingsun->tx_urb != NULL) usb_kill_urb(kingsun->tx_urb); if (kingsun->rx_urb != NULL) usb_kill_urb(kingsun->rx_urb); return 0; } /* Coming out of suspend, so reset hardware */ static int ksdazzle_resume(struct usb_interface *intf) { struct ksdazzle_cb *kingsun = usb_get_intfdata(intf); if (kingsun->rx_urb != NULL) { /* Setup request already filled in ksdazzle_probe */ usb_submit_urb(kingsun->rx_urb, GFP_KERNEL); } netif_device_attach(kingsun->netdev); return 0; } #endif /* * USB device callbacks */ static struct usb_driver irda_driver = { .name = "ksdazzle-sir", .probe = ksdazzle_probe, .disconnect = ksdazzle_disconnect, .id_table = dongles, #ifdef CONFIG_PM .suspend = ksdazzle_suspend, .resume = ksdazzle_resume, #endif }; module_usb_driver(irda_driver); MODULE_AUTHOR("Alex Villacís Lasso <a_villacis@palosanto.com>"); MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun Dazzle"); MODULE_LICENSE("GPL");
gpl-2.0
matianfu/barcelona_kernel
crypto/async_tx/async_memcpy.c
3099
3028
/* * copy offload engine support * * Copyright © 2006, Intel Corporation. * * Dan Williams <dan.j.williams@intel.com> * * with architecture considerations by: * Neil Brown <neilb@suse.de> * Jeff Garzik <jeff@garzik.org> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * */ #include <linux/kernel.h> #include <linux/highmem.h> #include <linux/mm.h> #include <linux/dma-mapping.h> #include <linux/async_tx.h> /** * async_memcpy - attempt to copy memory with a dma engine. * @dest: destination page * @src: src page * @dest_offset: offset into 'dest' to start transaction * @src_offset: offset into 'src' to start transaction * @len: length in bytes * @submit: submission / completion modifiers * * honored flags: ASYNC_TX_ACK */ struct dma_async_tx_descriptor * async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, unsigned int src_offset, size_t len, struct async_submit_ctl *submit) { struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMCPY, &dest, 1, &src, 1, len); struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx = NULL; if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { dma_addr_t dma_dest, dma_src; unsigned long dma_prep_flags = 0; if (submit->cb_fn) dma_prep_flags |= DMA_PREP_INTERRUPT; if (submit->flags & ASYNC_TX_FENCE) dma_prep_flags |= DMA_PREP_FENCE; dma_dest = dma_map_page(device->dev, dest, dest_offset, len, DMA_FROM_DEVICE); dma_src = dma_map_page(device->dev, src, src_offset, len, DMA_TO_DEVICE); tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, dma_prep_flags); } if (tx) { pr_debug("%s: (async) len: %zu\n", __func__, len); async_tx_submit(chan, tx, submit); } else { void *dest_buf, *src_buf; pr_debug("%s: (sync) len: %zu\n", __func__, len); /* wait for any prerequisite operations */ async_tx_quiesce(&submit->depend_tx); dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; src_buf = kmap_atomic(src, KM_USER1) + src_offset; memcpy(dest_buf, src_buf, len); kunmap_atomic(src_buf, KM_USER1); kunmap_atomic(dest_buf, KM_USER0); async_tx_sync_epilog(submit); } return tx; } EXPORT_SYMBOL_GPL(async_memcpy); MODULE_AUTHOR("Intel Corporation"); MODULE_DESCRIPTION("asynchronous memcpy api"); MODULE_LICENSE("GPL");
gpl-2.0
ibuddler/M2ATT
drivers/usb/storage/freecom.c
4635
15599
/* Driver for Freecom USB/IDE adaptor * * Freecom v0.1: * * First release * * Current development and maintenance by: * (C) 2000 David Brown <usb-storage@davidb.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * * This driver was developed with information provided in FREECOM's USB * Programmers Reference Guide. For further information contact Freecom * (http://www.freecom.de/) */ #include <linux/module.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include "usb.h" #include "transport.h" #include "protocol.h" #include "debug.h" MODULE_DESCRIPTION("Driver for Freecom USB/IDE adaptor"); MODULE_AUTHOR("David Brown <usb-storage@davidb.org>"); MODULE_LICENSE("GPL"); #ifdef CONFIG_USB_STORAGE_DEBUG static void pdump (void *, int); #endif /* Bits of HD_STATUS */ #define ERR_STAT 0x01 #define DRQ_STAT 0x08 /* All of the outgoing packets are 64 bytes long. */ struct freecom_cb_wrap { u8 Type; /* Command type. */ u8 Timeout; /* Timeout in seconds. */ u8 Atapi[12]; /* An ATAPI packet. */ u8 Filler[50]; /* Padding Data. */ }; struct freecom_xfer_wrap { u8 Type; /* Command type. */ u8 Timeout; /* Timeout in seconds. */ __le32 Count; /* Number of bytes to transfer. */ u8 Pad[58]; } __attribute__ ((packed)); struct freecom_ide_out { u8 Type; /* Type + IDE register. */ u8 Pad; __le16 Value; /* Value to write. */ u8 Pad2[60]; }; struct freecom_ide_in { u8 Type; /* Type | IDE register. */ u8 Pad[63]; }; struct freecom_status { u8 Status; u8 Reason; __le16 Count; u8 Pad[60]; }; /* Freecom stuffs the interrupt status in the INDEX_STAT bit of the ide * register. */ #define FCM_INT_STATUS 0x02 /* INDEX_STAT */ #define FCM_STATUS_BUSY 0x80 /* These are the packet types. The low bit indicates that this command * should wait for an interrupt. */ #define FCM_PACKET_ATAPI 0x21 #define FCM_PACKET_STATUS 0x20 /* Receive data from the IDE interface. The ATAPI packet has already * waited, so the data should be immediately available. */ #define FCM_PACKET_INPUT 0x81 /* Send data to the IDE interface. */ #define FCM_PACKET_OUTPUT 0x01 /* Write a value to an ide register. Or the ide register to write after * munging the address a bit. */ #define FCM_PACKET_IDE_WRITE 0x40 #define FCM_PACKET_IDE_READ 0xC0 /* All packets (except for status) are 64 bytes long. */ #define FCM_PACKET_LENGTH 64 #define FCM_STATUS_PACKET_LENGTH 4 static int init_freecom(struct us_data *us); /* * The table of devices */ #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags)|(USB_US_TYPE_STOR<<24) } static struct usb_device_id freecom_usb_ids[] = { # include "unusual_freecom.h" { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, freecom_usb_ids); #undef UNUSUAL_DEV /* * The flags table */ #define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \ vendor_name, product_name, use_protocol, use_transport, \ init_function, Flags) \ { \ .vendorName = vendor_name, \ .productName = product_name, \ .useProtocol = use_protocol, \ .useTransport = use_transport, \ .initFunction = init_function, \ } static struct us_unusual_dev freecom_unusual_dev_list[] = { # include "unusual_freecom.h" { } /* Terminating entry */ }; #undef UNUSUAL_DEV static int freecom_readdata (struct scsi_cmnd *srb, struct us_data *us, unsigned int ipipe, unsigned int opipe, int count) { struct freecom_xfer_wrap *fxfr = (struct freecom_xfer_wrap *) us->iobuf; int result; fxfr->Type = FCM_PACKET_INPUT | 0x00; fxfr->Timeout = 0; /* Short timeout for debugging. */ fxfr->Count = cpu_to_le32 (count); memset (fxfr->Pad, 0, sizeof (fxfr->Pad)); US_DEBUGP("Read data Freecom! (c=%d)\n", count); /* Issue the transfer command. */ result = usb_stor_bulk_transfer_buf (us, opipe, fxfr, FCM_PACKET_LENGTH, NULL); if (result != USB_STOR_XFER_GOOD) { US_DEBUGP ("Freecom readdata transport error\n"); return USB_STOR_TRANSPORT_ERROR; } /* Now transfer all of our blocks. */ US_DEBUGP("Start of read\n"); result = usb_stor_bulk_srb(us, ipipe, srb); US_DEBUGP("freecom_readdata done!\n"); if (result > USB_STOR_XFER_SHORT) return USB_STOR_TRANSPORT_ERROR; return USB_STOR_TRANSPORT_GOOD; } static int freecom_writedata (struct scsi_cmnd *srb, struct us_data *us, int unsigned ipipe, unsigned int opipe, int count) { struct freecom_xfer_wrap *fxfr = (struct freecom_xfer_wrap *) us->iobuf; int result; fxfr->Type = FCM_PACKET_OUTPUT | 0x00; fxfr->Timeout = 0; /* Short timeout for debugging. */ fxfr->Count = cpu_to_le32 (count); memset (fxfr->Pad, 0, sizeof (fxfr->Pad)); US_DEBUGP("Write data Freecom! (c=%d)\n", count); /* Issue the transfer command. */ result = usb_stor_bulk_transfer_buf (us, opipe, fxfr, FCM_PACKET_LENGTH, NULL); if (result != USB_STOR_XFER_GOOD) { US_DEBUGP ("Freecom writedata transport error\n"); return USB_STOR_TRANSPORT_ERROR; } /* Now transfer all of our blocks. */ US_DEBUGP("Start of write\n"); result = usb_stor_bulk_srb(us, opipe, srb); US_DEBUGP("freecom_writedata done!\n"); if (result > USB_STOR_XFER_SHORT) return USB_STOR_TRANSPORT_ERROR; return USB_STOR_TRANSPORT_GOOD; } /* * Transport for the Freecom USB/IDE adaptor. * */ static int freecom_transport(struct scsi_cmnd *srb, struct us_data *us) { struct freecom_cb_wrap *fcb; struct freecom_status *fst; unsigned int ipipe, opipe; /* We need both pipes. */ int result; unsigned int partial; int length; fcb = (struct freecom_cb_wrap *) us->iobuf; fst = (struct freecom_status *) us->iobuf; US_DEBUGP("Freecom TRANSPORT STARTED\n"); /* Get handles for both transports. */ opipe = us->send_bulk_pipe; ipipe = us->recv_bulk_pipe; /* The ATAPI Command always goes out first. */ fcb->Type = FCM_PACKET_ATAPI | 0x00; fcb->Timeout = 0; memcpy (fcb->Atapi, srb->cmnd, 12); memset (fcb->Filler, 0, sizeof (fcb->Filler)); US_DEBUG(pdump (srb->cmnd, 12)); /* Send it out. */ result = usb_stor_bulk_transfer_buf (us, opipe, fcb, FCM_PACKET_LENGTH, NULL); /* The Freecom device will only fail if there is something wrong in * USB land. It returns the status in its own registers, which * come back in the bulk pipe. */ if (result != USB_STOR_XFER_GOOD) { US_DEBUGP ("freecom transport error\n"); return USB_STOR_TRANSPORT_ERROR; } /* There are times we can optimize out this status read, but it * doesn't hurt us to always do it now. */ result = usb_stor_bulk_transfer_buf (us, ipipe, fst, FCM_STATUS_PACKET_LENGTH, &partial); US_DEBUGP("foo Status result %d %u\n", result, partial); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; US_DEBUG(pdump ((void *) fst, partial)); /* The firmware will time-out commands after 20 seconds. Some commands * can legitimately take longer than this, so we use a different * command that only waits for the interrupt and then sends status, * without having to send a new ATAPI command to the device. * * NOTE: There is some indication that a data transfer after a timeout * may not work, but that is a condition that should never happen. */ while (fst->Status & FCM_STATUS_BUSY) { US_DEBUGP("20 second USB/ATAPI bridge TIMEOUT occurred!\n"); US_DEBUGP("fst->Status is %x\n", fst->Status); /* Get the status again */ fcb->Type = FCM_PACKET_STATUS; fcb->Timeout = 0; memset (fcb->Atapi, 0, sizeof(fcb->Atapi)); memset (fcb->Filler, 0, sizeof (fcb->Filler)); /* Send it out. */ result = usb_stor_bulk_transfer_buf (us, opipe, fcb, FCM_PACKET_LENGTH, NULL); /* The Freecom device will only fail if there is something * wrong in USB land. It returns the status in its own * registers, which come back in the bulk pipe. */ if (result != USB_STOR_XFER_GOOD) { US_DEBUGP ("freecom transport error\n"); return USB_STOR_TRANSPORT_ERROR; } /* get the data */ result = usb_stor_bulk_transfer_buf (us, ipipe, fst, FCM_STATUS_PACKET_LENGTH, &partial); US_DEBUGP("bar Status result %d %u\n", result, partial); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; US_DEBUG(pdump ((void *) fst, partial)); } if (partial != 4) return USB_STOR_TRANSPORT_ERROR; if ((fst->Status & 1) != 0) { US_DEBUGP("operation failed\n"); return USB_STOR_TRANSPORT_FAILED; } /* The device might not have as much data available as we * requested. If you ask for more than the device has, this reads * and such will hang. */ US_DEBUGP("Device indicates that it has %d bytes available\n", le16_to_cpu (fst->Count)); US_DEBUGP("SCSI requested %d\n", scsi_bufflen(srb)); /* Find the length we desire to read. */ switch (srb->cmnd[0]) { case INQUIRY: case REQUEST_SENSE: /* 16 or 18 bytes? spec says 18, lots of devices only have 16 */ case MODE_SENSE: case MODE_SENSE_10: length = le16_to_cpu(fst->Count); break; default: length = scsi_bufflen(srb); } /* verify that this amount is legal */ if (length > scsi_bufflen(srb)) { length = scsi_bufflen(srb); US_DEBUGP("Truncating request to match buffer length: %d\n", length); } /* What we do now depends on what direction the data is supposed to * move in. */ switch (us->srb->sc_data_direction) { case DMA_FROM_DEVICE: /* catch bogus "read 0 length" case */ if (!length) break; /* Make sure that the status indicates that the device * wants data as well. */ if ((fst->Status & DRQ_STAT) == 0 || (fst->Reason & 3) != 2) { US_DEBUGP("SCSI wants data, drive doesn't have any\n"); return USB_STOR_TRANSPORT_FAILED; } result = freecom_readdata (srb, us, ipipe, opipe, length); if (result != USB_STOR_TRANSPORT_GOOD) return result; US_DEBUGP("FCM: Waiting for status\n"); result = usb_stor_bulk_transfer_buf (us, ipipe, fst, FCM_PACKET_LENGTH, &partial); US_DEBUG(pdump ((void *) fst, partial)); if (partial != 4 || result > USB_STOR_XFER_SHORT) return USB_STOR_TRANSPORT_ERROR; if ((fst->Status & ERR_STAT) != 0) { US_DEBUGP("operation failed\n"); return USB_STOR_TRANSPORT_FAILED; } if ((fst->Reason & 3) != 3) { US_DEBUGP("Drive seems still hungry\n"); return USB_STOR_TRANSPORT_FAILED; } US_DEBUGP("Transfer happy\n"); break; case DMA_TO_DEVICE: /* catch bogus "write 0 length" case */ if (!length) break; /* Make sure the status indicates that the device wants to * send us data. */ /* !!IMPLEMENT!! */ result = freecom_writedata (srb, us, ipipe, opipe, length); if (result != USB_STOR_TRANSPORT_GOOD) return result; US_DEBUGP("FCM: Waiting for status\n"); result = usb_stor_bulk_transfer_buf (us, ipipe, fst, FCM_PACKET_LENGTH, &partial); if (partial != 4 || result > USB_STOR_XFER_SHORT) return USB_STOR_TRANSPORT_ERROR; if ((fst->Status & ERR_STAT) != 0) { US_DEBUGP("operation failed\n"); return USB_STOR_TRANSPORT_FAILED; } if ((fst->Reason & 3) != 3) { US_DEBUGP("Drive seems still hungry\n"); return USB_STOR_TRANSPORT_FAILED; } US_DEBUGP("Transfer happy\n"); break; case DMA_NONE: /* Easy, do nothing. */ break; default: /* should never hit here -- filtered in usb.c */ US_DEBUGP ("freecom unimplemented direction: %d\n", us->srb->sc_data_direction); /* Return fail, SCSI seems to handle this better. */ return USB_STOR_TRANSPORT_FAILED; break; } return USB_STOR_TRANSPORT_GOOD; } static int init_freecom(struct us_data *us) { int result; char *buffer = us->iobuf; /* The DMA-mapped I/O buffer is 64 bytes long, just right for * all our packets. No need to allocate any extra buffer space. */ result = usb_stor_control_msg(us, us->recv_ctrl_pipe, 0x4c, 0xc0, 0x4346, 0x0, buffer, 0x20, 3*HZ); buffer[32] = '\0'; US_DEBUGP("String returned from FC init is: %s\n", buffer); /* Special thanks to the people at Freecom for providing me with * this "magic sequence", which they use in their Windows and MacOS * drivers to make sure that all the attached perhiperals are * properly reset. */ /* send reset */ result = usb_stor_control_msg(us, us->send_ctrl_pipe, 0x4d, 0x40, 0x24d8, 0x0, NULL, 0x0, 3*HZ); US_DEBUGP("result from activate reset is %d\n", result); /* wait 250ms */ mdelay(250); /* clear reset */ result = usb_stor_control_msg(us, us->send_ctrl_pipe, 0x4d, 0x40, 0x24f8, 0x0, NULL, 0x0, 3*HZ); US_DEBUGP("result from clear reset is %d\n", result); /* wait 3 seconds */ mdelay(3 * 1000); return USB_STOR_TRANSPORT_GOOD; } static int usb_stor_freecom_reset(struct us_data *us) { printk (KERN_CRIT "freecom reset called\n"); /* We don't really have this feature. */ return FAILED; } #ifdef CONFIG_USB_STORAGE_DEBUG static void pdump (void *ibuffer, int length) { static char line[80]; int offset = 0; unsigned char *buffer = (unsigned char *) ibuffer; int i, j; int from, base; offset = 0; for (i = 0; i < length; i++) { if ((i & 15) == 0) { if (i > 0) { offset += sprintf (line+offset, " - "); for (j = i - 16; j < i; j++) { if (buffer[j] >= 32 && buffer[j] <= 126) line[offset++] = buffer[j]; else line[offset++] = '.'; } line[offset] = 0; US_DEBUGP("%s\n", line); offset = 0; } offset += sprintf (line+offset, "%08x:", i); } else if ((i & 7) == 0) { offset += sprintf (line+offset, " -"); } offset += sprintf (line+offset, " %02x", buffer[i] & 0xff); } /* Add the last "chunk" of data. */ from = (length - 1) % 16; base = ((length - 1) / 16) * 16; for (i = from + 1; i < 16; i++) offset += sprintf (line+offset, " "); if (from < 8) offset += sprintf (line+offset, " "); offset += sprintf (line+offset, " - "); for (i = 0; i <= from; i++) { if (buffer[base+i] >= 32 && buffer[base+i] <= 126) line[offset++] = buffer[base+i]; else line[offset++] = '.'; } line[offset] = 0; US_DEBUGP("%s\n", line); offset = 0; } #endif static int freecom_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct us_data *us; int result; result = usb_stor_probe1(&us, intf, id, (id - freecom_usb_ids) + freecom_unusual_dev_list); if (result) return result; us->transport_name = "Freecom"; us->transport = freecom_transport; us->transport_reset = usb_stor_freecom_reset; us->max_lun = 0; result = usb_stor_probe2(us); return result; } static struct usb_driver freecom_driver = { .name = "ums-freecom", .probe = freecom_probe, .disconnect = usb_stor_disconnect, .suspend = usb_stor_suspend, .resume = usb_stor_resume, .reset_resume = usb_stor_reset_resume, .pre_reset = usb_stor_pre_reset, .post_reset = usb_stor_post_reset, .id_table = freecom_usb_ids, .soft_unbind = 1, .no_dynamic_id = 1, }; module_usb_driver(freecom_driver);
gpl-2.0
ZdrowyGosciu/kernel_lge_d802_v30d
drivers/net/ethernet/seeq/sgiseeq.c
4891
22540
/* * sgiseeq.c: Seeq8003 ethernet driver for SGI machines. * * Copyright (C) 1996 David S. Miller (davem@davemloft.net) */ #undef DEBUG #include <linux/dma-mapping.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/platform_device.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <asm/sgi/hpc3.h> #include <asm/sgi/ip22.h> #include <asm/sgi/seeq.h> #include "sgiseeq.h" static char *sgiseeqstr = "SGI Seeq8003"; /* * If you want speed, you do something silly, it always has worked for me. So, * with that in mind, I've decided to make this driver look completely like a * stupid Lance from a driver architecture perspective. Only difference is that * here our "ring buffer" looks and acts like a real Lance one does but is * laid out like how the HPC DMA and the Seeq want it to. You'd be surprised * how a stupid idea like this can pay off in performance, not to mention * making this driver 2,000 times easier to write. ;-) */ /* Tune these if we tend to run out often etc. */ #define SEEQ_RX_BUFFERS 16 #define SEEQ_TX_BUFFERS 16 #define PKT_BUF_SZ 1584 #define NEXT_RX(i) (((i) + 1) & (SEEQ_RX_BUFFERS - 1)) #define NEXT_TX(i) (((i) + 1) & (SEEQ_TX_BUFFERS - 1)) #define PREV_RX(i) (((i) - 1) & (SEEQ_RX_BUFFERS - 1)) #define PREV_TX(i) (((i) - 1) & (SEEQ_TX_BUFFERS - 1)) #define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \ sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \ sp->tx_old - sp->tx_new - 1) #define VIRT_TO_DMA(sp, v) ((sp)->srings_dma + \ (dma_addr_t)((unsigned long)(v) - \ (unsigned long)((sp)->rx_desc))) /* Copy frames shorter than rx_copybreak, otherwise pass on up in * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha). */ static int rx_copybreak = 100; #define PAD_SIZE (128 - sizeof(struct hpc_dma_desc) - sizeof(void *)) struct sgiseeq_rx_desc { volatile struct hpc_dma_desc rdma; u8 padding[PAD_SIZE]; struct sk_buff *skb; }; struct sgiseeq_tx_desc { volatile struct hpc_dma_desc tdma; u8 padding[PAD_SIZE]; struct sk_buff *skb; }; /* * Warning: This structure is laid out in a certain way because HPC dma * descriptors must be 8-byte aligned. So don't touch this without * some care. */ struct sgiseeq_init_block { /* Note the name ;-) */ struct sgiseeq_rx_desc rxvector[SEEQ_RX_BUFFERS]; struct sgiseeq_tx_desc txvector[SEEQ_TX_BUFFERS]; }; struct sgiseeq_private { struct sgiseeq_init_block *srings; dma_addr_t srings_dma; /* Ptrs to the descriptors in uncached space. */ struct sgiseeq_rx_desc *rx_desc; struct sgiseeq_tx_desc *tx_desc; char *name; struct hpc3_ethregs *hregs; struct sgiseeq_regs *sregs; /* Ring entry counters. */ unsigned int rx_new, tx_new; unsigned int rx_old, tx_old; int is_edlc; unsigned char control; unsigned char mode; spinlock_t tx_lock; }; static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr) { dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), DMA_FROM_DEVICE); } static inline void dma_sync_desc_dev(struct net_device *dev, void *addr) { dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), DMA_TO_DEVICE); } static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs) { hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ; udelay(20); hregs->reset = 0; } static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs, struct sgiseeq_regs *sregs) { hregs->rx_ctrl = hregs->tx_ctrl = 0; hpc3_eth_reset(hregs); } #define RSTAT_GO_BITS (SEEQ_RCMD_IGOOD | SEEQ_RCMD_IEOF | SEEQ_RCMD_ISHORT | \ SEEQ_RCMD_IDRIB | SEEQ_RCMD_ICRC) static inline void seeq_go(struct sgiseeq_private *sp, struct hpc3_ethregs *hregs, struct sgiseeq_regs *sregs) { sregs->rstat = sp->mode | RSTAT_GO_BITS; hregs->rx_ctrl = HPC3_ERXCTRL_ACTIVE; } static inline void __sgiseeq_set_mac_address(struct net_device *dev) { struct sgiseeq_private *sp = netdev_priv(dev); struct sgiseeq_regs *sregs = sp->sregs; int i; sregs->tstat = SEEQ_TCMD_RB0; for (i = 0; i < 6; i++) sregs->rw.eth_addr[i] = dev->dev_addr[i]; } static int sgiseeq_set_mac_address(struct net_device *dev, void *addr) { struct sgiseeq_private *sp = netdev_priv(dev); struct sockaddr *sa = addr; memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); spin_lock_irq(&sp->tx_lock); __sgiseeq_set_mac_address(dev); spin_unlock_irq(&sp->tx_lock); return 0; } #define TCNTINFO_INIT (HPCDMA_EOX | HPCDMA_ETXD) #define RCNTCFG_INIT (HPCDMA_OWN | HPCDMA_EORP | HPCDMA_XIE) #define RCNTINFO_INIT (RCNTCFG_INIT | (PKT_BUF_SZ & HPCDMA_BCNT)) static int seeq_init_ring(struct net_device *dev) { struct sgiseeq_private *sp = netdev_priv(dev); int i; netif_stop_queue(dev); sp->rx_new = sp->tx_new = 0; sp->rx_old = sp->tx_old = 0; __sgiseeq_set_mac_address(dev); /* Setup tx ring. */ for(i = 0; i < SEEQ_TX_BUFFERS; i++) { sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT; dma_sync_desc_dev(dev, &sp->tx_desc[i]); } /* And now the rx ring. */ for (i = 0; i < SEEQ_RX_BUFFERS; i++) { if (!sp->rx_desc[i].skb) { dma_addr_t dma_addr; struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ); if (skb == NULL) return -ENOMEM; skb_reserve(skb, 2); dma_addr = dma_map_single(dev->dev.parent, skb->data - 2, PKT_BUF_SZ, DMA_FROM_DEVICE); sp->rx_desc[i].skb = skb; sp->rx_desc[i].rdma.pbuf = dma_addr; } sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT; dma_sync_desc_dev(dev, &sp->rx_desc[i]); } sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR; dma_sync_desc_dev(dev, &sp->rx_desc[i - 1]); return 0; } static void seeq_purge_ring(struct net_device *dev) { struct sgiseeq_private *sp = netdev_priv(dev); int i; /* clear tx ring. */ for (i = 0; i < SEEQ_TX_BUFFERS; i++) { if (sp->tx_desc[i].skb) { dev_kfree_skb(sp->tx_desc[i].skb); sp->tx_desc[i].skb = NULL; } } /* And now the rx ring. */ for (i = 0; i < SEEQ_RX_BUFFERS; i++) { if (sp->rx_desc[i].skb) { dev_kfree_skb(sp->rx_desc[i].skb); sp->rx_desc[i].skb = NULL; } } } #ifdef DEBUG static struct sgiseeq_private *gpriv; static struct net_device *gdev; static void sgiseeq_dump_rings(void) { static int once; struct sgiseeq_rx_desc *r = gpriv->rx_desc; struct sgiseeq_tx_desc *t = gpriv->tx_desc; struct hpc3_ethregs *hregs = gpriv->hregs; int i; if (once) return; once++; printk("RING DUMP:\n"); for (i = 0; i < SEEQ_RX_BUFFERS; i++) { printk("RX [%d]: @(%p) [%08x,%08x,%08x] ", i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, r[i].rdma.pnext); i += 1; printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n", i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, r[i].rdma.pnext); } for (i = 0; i < SEEQ_TX_BUFFERS; i++) { printk("TX [%d]: @(%p) [%08x,%08x,%08x] ", i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo, t[i].tdma.pnext); i += 1; printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n", i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo, t[i].tdma.pnext); } printk("INFO: [rx_new = %d rx_old=%d] [tx_new = %d tx_old = %d]\n", gpriv->rx_new, gpriv->rx_old, gpriv->tx_new, gpriv->tx_old); printk("RREGS: rx_cbptr[%08x] rx_ndptr[%08x] rx_ctrl[%08x]\n", hregs->rx_cbptr, hregs->rx_ndptr, hregs->rx_ctrl); printk("TREGS: tx_cbptr[%08x] tx_ndptr[%08x] tx_ctrl[%08x]\n", hregs->tx_cbptr, hregs->tx_ndptr, hregs->tx_ctrl); } #endif #define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF) #define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2) static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp, struct sgiseeq_regs *sregs) { struct hpc3_ethregs *hregs = sp->hregs; int err; reset_hpc3_and_seeq(hregs, sregs); err = seeq_init_ring(dev); if (err) return err; /* Setup to field the proper interrupt types. */ if (sp->is_edlc) { sregs->tstat = TSTAT_INIT_EDLC; sregs->rw.wregs.control = sp->control; sregs->rw.wregs.frame_gap = 0; } else { sregs->tstat = TSTAT_INIT_SEEQ; } hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc); hregs->tx_ndptr = VIRT_TO_DMA(sp, sp->tx_desc); seeq_go(sp, hregs, sregs); return 0; } static void record_rx_errors(struct net_device *dev, unsigned char status) { if (status & SEEQ_RSTAT_OVERF || status & SEEQ_RSTAT_SFRAME) dev->stats.rx_over_errors++; if (status & SEEQ_RSTAT_CERROR) dev->stats.rx_crc_errors++; if (status & SEEQ_RSTAT_DERROR) dev->stats.rx_frame_errors++; if (status & SEEQ_RSTAT_REOF) dev->stats.rx_errors++; } static inline void rx_maybe_restart(struct sgiseeq_private *sp, struct hpc3_ethregs *hregs, struct sgiseeq_regs *sregs) { if (!(hregs->rx_ctrl & HPC3_ERXCTRL_ACTIVE)) { hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc + sp->rx_new); seeq_go(sp, hregs, sregs); } } static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp, struct hpc3_ethregs *hregs, struct sgiseeq_regs *sregs) { struct sgiseeq_rx_desc *rd; struct sk_buff *skb = NULL; struct sk_buff *newskb; unsigned char pkt_status; int len = 0; unsigned int orig_end = PREV_RX(sp->rx_new); /* Service every received packet. */ rd = &sp->rx_desc[sp->rx_new]; dma_sync_desc_cpu(dev, rd); while (!(rd->rdma.cntinfo & HPCDMA_OWN)) { len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3; dma_unmap_single(dev->dev.parent, rd->rdma.pbuf, PKT_BUF_SZ, DMA_FROM_DEVICE); pkt_status = rd->skb->data[len]; if (pkt_status & SEEQ_RSTAT_FIG) { /* Packet is OK. */ /* We don't want to receive our own packets */ if (memcmp(rd->skb->data + 6, dev->dev_addr, ETH_ALEN)) { if (len > rx_copybreak) { skb = rd->skb; newskb = netdev_alloc_skb(dev, PKT_BUF_SZ); if (!newskb) { newskb = skb; skb = NULL; goto memory_squeeze; } skb_reserve(newskb, 2); } else { skb = netdev_alloc_skb_ip_align(dev, len); if (skb) skb_copy_to_linear_data(skb, rd->skb->data, len); newskb = rd->skb; } memory_squeeze: if (skb) { skb_put(skb, len); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; } else { printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", dev->name); dev->stats.rx_dropped++; } } else { /* Silently drop my own packets */ newskb = rd->skb; } } else { record_rx_errors(dev, pkt_status); newskb = rd->skb; } rd->skb = newskb; rd->rdma.pbuf = dma_map_single(dev->dev.parent, newskb->data - 2, PKT_BUF_SZ, DMA_FROM_DEVICE); /* Return the entry to the ring pool. */ rd->rdma.cntinfo = RCNTINFO_INIT; sp->rx_new = NEXT_RX(sp->rx_new); dma_sync_desc_dev(dev, rd); rd = &sp->rx_desc[sp->rx_new]; dma_sync_desc_cpu(dev, rd); } dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]); sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR); dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]); dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR; dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); rx_maybe_restart(sp, hregs, sregs); } static inline void tx_maybe_reset_collisions(struct sgiseeq_private *sp, struct sgiseeq_regs *sregs) { if (sp->is_edlc) { sregs->rw.wregs.control = sp->control & ~(SEEQ_CTRL_XCNT); sregs->rw.wregs.control = sp->control; } } static inline void kick_tx(struct net_device *dev, struct sgiseeq_private *sp, struct hpc3_ethregs *hregs) { struct sgiseeq_tx_desc *td; int i = sp->tx_old; /* If the HPC aint doin nothin, and there are more packets * with ETXD cleared and XIU set we must make very certain * that we restart the HPC else we risk locking up the * adapter. The following code is only safe iff the HPCDMA * is not active! */ td = &sp->tx_desc[i]; dma_sync_desc_cpu(dev, td); while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) == (HPCDMA_XIU | HPCDMA_ETXD)) { i = NEXT_TX(i); td = &sp->tx_desc[i]; dma_sync_desc_cpu(dev, td); } if (td->tdma.cntinfo & HPCDMA_XIU) { hregs->tx_ndptr = VIRT_TO_DMA(sp, td); hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE; } } static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp, struct hpc3_ethregs *hregs, struct sgiseeq_regs *sregs) { struct sgiseeq_tx_desc *td; unsigned long status = hregs->tx_ctrl; int j; tx_maybe_reset_collisions(sp, sregs); if (!(status & (HPC3_ETXCTRL_ACTIVE | SEEQ_TSTAT_PTRANS))) { /* Oops, HPC detected some sort of error. */ if (status & SEEQ_TSTAT_R16) dev->stats.tx_aborted_errors++; if (status & SEEQ_TSTAT_UFLOW) dev->stats.tx_fifo_errors++; if (status & SEEQ_TSTAT_LCLS) dev->stats.collisions++; } /* Ack 'em... */ for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) { td = &sp->tx_desc[j]; dma_sync_desc_cpu(dev, td); if (!(td->tdma.cntinfo & (HPCDMA_XIU))) break; if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) { if (!(status & HPC3_ETXCTRL_ACTIVE)) { hregs->tx_ndptr = VIRT_TO_DMA(sp, td); hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE; } break; } dev->stats.tx_packets++; sp->tx_old = NEXT_TX(sp->tx_old); td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE); td->tdma.cntinfo |= HPCDMA_EOX; if (td->skb) { dev_kfree_skb_any(td->skb); td->skb = NULL; } dma_sync_desc_dev(dev, td); } } static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *) dev_id; struct sgiseeq_private *sp = netdev_priv(dev); struct hpc3_ethregs *hregs = sp->hregs; struct sgiseeq_regs *sregs = sp->sregs; spin_lock(&sp->tx_lock); /* Ack the IRQ and set software state. */ hregs->reset = HPC3_ERST_CLRIRQ; /* Always check for received packets. */ sgiseeq_rx(dev, sp, hregs, sregs); /* Only check for tx acks if we have something queued. */ if (sp->tx_old != sp->tx_new) sgiseeq_tx(dev, sp, hregs, sregs); if ((TX_BUFFS_AVAIL(sp) > 0) && netif_queue_stopped(dev)) { netif_wake_queue(dev); } spin_unlock(&sp->tx_lock); return IRQ_HANDLED; } static int sgiseeq_open(struct net_device *dev) { struct sgiseeq_private *sp = netdev_priv(dev); struct sgiseeq_regs *sregs = sp->sregs; unsigned int irq = dev->irq; int err; if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) { printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq); return -EAGAIN; } err = init_seeq(dev, sp, sregs); if (err) goto out_free_irq; netif_start_queue(dev); return 0; out_free_irq: free_irq(irq, dev); return err; } static int sgiseeq_close(struct net_device *dev) { struct sgiseeq_private *sp = netdev_priv(dev); struct sgiseeq_regs *sregs = sp->sregs; unsigned int irq = dev->irq; netif_stop_queue(dev); /* Shutdown the Seeq. */ reset_hpc3_and_seeq(sp->hregs, sregs); free_irq(irq, dev); seeq_purge_ring(dev); return 0; } static inline int sgiseeq_reset(struct net_device *dev) { struct sgiseeq_private *sp = netdev_priv(dev); struct sgiseeq_regs *sregs = sp->sregs; int err; err = init_seeq(dev, sp, sregs); if (err) return err; dev->trans_start = jiffies; /* prevent tx timeout */ netif_wake_queue(dev); return 0; } static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct sgiseeq_private *sp = netdev_priv(dev); struct hpc3_ethregs *hregs = sp->hregs; unsigned long flags; struct sgiseeq_tx_desc *td; int len, entry; spin_lock_irqsave(&sp->tx_lock, flags); /* Setup... */ len = skb->len; if (len < ETH_ZLEN) { if (skb_padto(skb, ETH_ZLEN)) { spin_unlock_irqrestore(&sp->tx_lock, flags); return NETDEV_TX_OK; } len = ETH_ZLEN; } dev->stats.tx_bytes += len; entry = sp->tx_new; td = &sp->tx_desc[entry]; dma_sync_desc_cpu(dev, td); /* Create entry. There are so many races with adding a new * descriptor to the chain: * 1) Assume that the HPC is off processing a DMA chain while * we are changing all of the following. * 2) Do no allow the HPC to look at a new descriptor until * we have completely set up it's state. This means, do * not clear HPCDMA_EOX in the current last descritptor * until the one we are adding looks consistent and could * be processes right now. * 3) The tx interrupt code must notice when we've added a new * entry and the HPC got to the end of the chain before we * added this new entry and restarted it. */ td->skb = skb; td->tdma.pbuf = dma_map_single(dev->dev.parent, skb->data, len, DMA_TO_DEVICE); td->tdma.cntinfo = (len & HPCDMA_BCNT) | HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX; dma_sync_desc_dev(dev, td); if (sp->tx_old != sp->tx_new) { struct sgiseeq_tx_desc *backend; backend = &sp->tx_desc[PREV_TX(sp->tx_new)]; dma_sync_desc_cpu(dev, backend); backend->tdma.cntinfo &= ~HPCDMA_EOX; dma_sync_desc_dev(dev, backend); } sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */ /* Maybe kick the HPC back into motion. */ if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE)) kick_tx(dev, sp, hregs); if (!TX_BUFFS_AVAIL(sp)) netif_stop_queue(dev); spin_unlock_irqrestore(&sp->tx_lock, flags); return NETDEV_TX_OK; } static void timeout(struct net_device *dev) { printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name); sgiseeq_reset(dev); dev->trans_start = jiffies; /* prevent tx timeout */ netif_wake_queue(dev); } static void sgiseeq_set_multicast(struct net_device *dev) { struct sgiseeq_private *sp = netdev_priv(dev); unsigned char oldmode = sp->mode; if(dev->flags & IFF_PROMISC) sp->mode = SEEQ_RCMD_RANY; else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) sp->mode = SEEQ_RCMD_RBMCAST; else sp->mode = SEEQ_RCMD_RBCAST; /* XXX I know this sucks, but is there a better way to reprogram * XXX the receiver? At least, this shouldn't happen too often. */ if (oldmode != sp->mode) sgiseeq_reset(dev); } static inline void setup_tx_ring(struct net_device *dev, struct sgiseeq_tx_desc *buf, int nbufs) { struct sgiseeq_private *sp = netdev_priv(dev); int i = 0; while (i < (nbufs - 1)) { buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1); buf[i].tdma.pbuf = 0; dma_sync_desc_dev(dev, &buf[i]); i++; } buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf); dma_sync_desc_dev(dev, &buf[i]); } static inline void setup_rx_ring(struct net_device *dev, struct sgiseeq_rx_desc *buf, int nbufs) { struct sgiseeq_private *sp = netdev_priv(dev); int i = 0; while (i < (nbufs - 1)) { buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1); buf[i].rdma.pbuf = 0; dma_sync_desc_dev(dev, &buf[i]); i++; } buf[i].rdma.pbuf = 0; buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf); dma_sync_desc_dev(dev, &buf[i]); } static const struct net_device_ops sgiseeq_netdev_ops = { .ndo_open = sgiseeq_open, .ndo_stop = sgiseeq_close, .ndo_start_xmit = sgiseeq_start_xmit, .ndo_tx_timeout = timeout, .ndo_set_rx_mode = sgiseeq_set_multicast, .ndo_set_mac_address = sgiseeq_set_mac_address, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, }; static int __devinit sgiseeq_probe(struct platform_device *pdev) { struct sgiseeq_platform_data *pd = pdev->dev.platform_data; struct hpc3_regs *hpcregs = pd->hpc; struct sgiseeq_init_block *sr; unsigned int irq = pd->irq; struct sgiseeq_private *sp; struct net_device *dev; int err; dev = alloc_etherdev(sizeof (struct sgiseeq_private)); if (!dev) { err = -ENOMEM; goto err_out; } platform_set_drvdata(pdev, dev); sp = netdev_priv(dev); /* Make private data page aligned */ sr = dma_alloc_noncoherent(&pdev->dev, sizeof(*sp->srings), &sp->srings_dma, GFP_KERNEL); if (!sr) { printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n"); err = -ENOMEM; goto err_out_free_dev; } sp->srings = sr; sp->rx_desc = sp->srings->rxvector; sp->tx_desc = sp->srings->txvector; /* A couple calculations now, saves many cycles later. */ setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS); setup_tx_ring(dev, sp->tx_desc, SEEQ_TX_BUFFERS); memcpy(dev->dev_addr, pd->mac, ETH_ALEN); #ifdef DEBUG gpriv = sp; gdev = dev; #endif sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0]; sp->hregs = &hpcregs->ethregs; sp->name = sgiseeqstr; sp->mode = SEEQ_RCMD_RBCAST; /* Setup PIO and DMA transfer timing */ sp->hregs->pconfig = 0x161; sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP | HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026; /* Setup PIO and DMA transfer timing */ sp->hregs->pconfig = 0x161; sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP | HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026; /* Reset the chip. */ hpc3_eth_reset(sp->hregs); sp->is_edlc = !(sp->sregs->rw.rregs.collision_tx[0] & 0xff); if (sp->is_edlc) sp->control = SEEQ_CTRL_XCNT | SEEQ_CTRL_ACCNT | SEEQ_CTRL_SFLAG | SEEQ_CTRL_ESHORT | SEEQ_CTRL_ENCARR; dev->netdev_ops = &sgiseeq_netdev_ops; dev->watchdog_timeo = (200 * HZ) / 1000; dev->irq = irq; if (register_netdev(dev)) { printk(KERN_ERR "Sgiseeq: Cannot register net device, " "aborting.\n"); err = -ENODEV; goto err_out_free_page; } printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr); return 0; err_out_free_page: free_page((unsigned long) sp->srings); err_out_free_dev: free_netdev(dev); err_out: return err; } static int __exit sgiseeq_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sgiseeq_private *sp = netdev_priv(dev); unregister_netdev(dev); dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings, sp->srings_dma); free_netdev(dev); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver sgiseeq_driver = { .probe = sgiseeq_probe, .remove = __exit_p(sgiseeq_remove), .driver = { .name = "sgiseeq", .owner = THIS_MODULE, } }; module_platform_driver(sgiseeq_driver); MODULE_DESCRIPTION("SGI Seeq 8003 driver"); MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:sgiseeq");
gpl-2.0
sony-msm8960/android_kernel_sony_apq8064
drivers/staging/iio/imu/adis16400_ring.c
4891
5632
#include <linux/interrupt.h> #include <linux/mutex.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/bitops.h> #include <linux/export.h> #include "../iio.h" #include "../ring_sw.h" #include "../trigger_consumer.h" #include "adis16400.h" /** * adis16400_spi_read_burst() - read all data registers * @dev: device associated with child of actual device (iio_dev or iio_trig) * @rx: somewhere to pass back the value read (min size is 24 bytes) **/ static int adis16400_spi_read_burst(struct device *dev, u8 *rx) { struct spi_message msg; struct iio_dev *indio_dev = dev_get_drvdata(dev); struct adis16400_state *st = iio_priv(indio_dev); u32 old_speed_hz = st->us->max_speed_hz; int ret; struct spi_transfer xfers[] = { { .tx_buf = st->tx, .bits_per_word = 8, .len = 2, }, { .rx_buf = rx, .bits_per_word = 8, .len = 24, }, }; mutex_lock(&st->buf_lock); st->tx[0] = ADIS16400_READ_REG(ADIS16400_GLOB_CMD); st->tx[1] = 0; spi_message_init(&msg); spi_message_add_tail(&xfers[0], &msg); spi_message_add_tail(&xfers[1], &msg); st->us->max_speed_hz = min(ADIS16400_SPI_BURST, old_speed_hz); spi_setup(st->us); ret = spi_sync(st->us, &msg); if (ret) dev_err(&st->us->dev, "problem when burst reading"); st->us->max_speed_hz = old_speed_hz; spi_setup(st->us); mutex_unlock(&st->buf_lock); return ret; } static const u16 read_all_tx_array[] = { cpu_to_be16(ADIS16400_READ_REG(ADIS16400_SUPPLY_OUT)), cpu_to_be16(ADIS16400_READ_REG(ADIS16400_XGYRO_OUT)), cpu_to_be16(ADIS16400_READ_REG(ADIS16400_YGYRO_OUT)), cpu_to_be16(ADIS16400_READ_REG(ADIS16400_ZGYRO_OUT)), cpu_to_be16(ADIS16400_READ_REG(ADIS16400_XACCL_OUT)), cpu_to_be16(ADIS16400_READ_REG(ADIS16400_YACCL_OUT)), cpu_to_be16(ADIS16400_READ_REG(ADIS16400_ZACCL_OUT)), cpu_to_be16(ADIS16400_READ_REG(ADIS16350_XTEMP_OUT)), cpu_to_be16(ADIS16400_READ_REG(ADIS16350_YTEMP_OUT)), cpu_to_be16(ADIS16400_READ_REG(ADIS16350_ZTEMP_OUT)), cpu_to_be16(ADIS16400_READ_REG(ADIS16400_AUX_ADC)), }; static int adis16350_spi_read_all(struct device *dev, u8 *rx) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct adis16400_state *st = iio_priv(indio_dev); struct spi_message msg; int i, j = 0, ret; struct spi_transfer *xfers; int scan_count = bitmap_weight(indio_dev->active_scan_mask, indio_dev->masklength); xfers = kzalloc(sizeof(*xfers)*(scan_count + 1), GFP_KERNEL); if (xfers == NULL) return -ENOMEM; for (i = 0; i < ARRAY_SIZE(read_all_tx_array); i++) if (test_bit(i, indio_dev->active_scan_mask)) { xfers[j].tx_buf = &read_all_tx_array[i]; xfers[j].bits_per_word = 16; xfers[j].len = 2; xfers[j + 1].rx_buf = rx + j*2; j++; } xfers[j].bits_per_word = 16; xfers[j].len = 2; spi_message_init(&msg); for (j = 0; j < scan_count + 1; j++) spi_message_add_tail(&xfers[j], &msg); ret = spi_sync(st->us, &msg); kfree(xfers); return ret; } /* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device * specific to be rolled into the core. */ static irqreturn_t adis16400_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct adis16400_state *st = iio_priv(indio_dev); struct iio_buffer *ring = indio_dev->buffer; int i = 0, j, ret = 0; s16 *data; size_t datasize = ring->access->get_bytes_per_datum(ring); /* Asumption that long is enough for maximum channels */ unsigned long mask = *indio_dev->active_scan_mask; int scan_count = bitmap_weight(indio_dev->active_scan_mask, indio_dev->masklength); data = kmalloc(datasize , GFP_KERNEL); if (data == NULL) { dev_err(&st->us->dev, "memory alloc failed in ring bh"); return -ENOMEM; } if (scan_count) { if (st->variant->flags & ADIS16400_NO_BURST) { ret = adis16350_spi_read_all(&indio_dev->dev, st->rx); if (ret < 0) goto err; for (; i < scan_count; i++) data[i] = *(s16 *)(st->rx + i*2); } else { ret = adis16400_spi_read_burst(&indio_dev->dev, st->rx); if (ret < 0) goto err; for (; i < scan_count; i++) { j = __ffs(mask); mask &= ~(1 << j); data[i] = be16_to_cpup( (__be16 *)&(st->rx[j*2])); } } } /* Guaranteed to be aligned with 8 byte boundary */ if (ring->scan_timestamp) *((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp; ring->access->store_to(indio_dev->buffer, (u8 *) data, pf->timestamp); iio_trigger_notify_done(indio_dev->trig); kfree(data); return IRQ_HANDLED; err: kfree(data); return ret; } void adis16400_unconfigure_ring(struct iio_dev *indio_dev) { iio_dealloc_pollfunc(indio_dev->pollfunc); iio_sw_rb_free(indio_dev->buffer); } static const struct iio_buffer_setup_ops adis16400_ring_setup_ops = { .preenable = &iio_sw_buffer_preenable, .postenable = &iio_triggered_buffer_postenable, .predisable = &iio_triggered_buffer_predisable, }; int adis16400_configure_ring(struct iio_dev *indio_dev) { int ret = 0; struct iio_buffer *ring; ring = iio_sw_rb_allocate(indio_dev); if (!ring) { ret = -ENOMEM; return ret; } indio_dev->buffer = ring; ring->scan_timestamp = true; indio_dev->setup_ops = &adis16400_ring_setup_ops; indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time, &adis16400_trigger_handler, IRQF_ONESHOT, indio_dev, "%s_consumer%d", indio_dev->name, indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_iio_sw_rb_free; } indio_dev->modes |= INDIO_BUFFER_TRIGGERED; return 0; error_iio_sw_rb_free: iio_sw_rb_free(indio_dev->buffer); return ret; }
gpl-2.0