repo_name
string
path
string
copies
string
size
string
content
string
license
string
gromaudio/linux-imx6-31053
arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c
5160
4186
/***********************license start*************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2008 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information ***********************license end**************************************/ /** * * Helper utilities for qlm_jtag. * */ #include <asm/octeon/octeon.h> #include <asm/octeon/cvmx-helper-jtag.h> /** * Initialize the internal QLM JTAG logic to allow programming * of the JTAG chain by the cvmx_helper_qlm_jtag_*() functions. * These functions should only be used at the direction of Cavium * Networks. Programming incorrect values into the JTAG chain * can cause chip damage. */ void cvmx_helper_qlm_jtag_init(void) { union cvmx_ciu_qlm_jtgc jtgc; uint32_t clock_div = 0; uint32_t divisor = cvmx_sysinfo_get()->cpu_clock_hz / (25 * 1000000); divisor = (divisor - 1) >> 2; /* Convert the divisor into a power of 2 shift */ while (divisor) { clock_div++; divisor = divisor >> 1; } /* * Clock divider for QLM JTAG operations. eclk is divided by * 2^(CLK_DIV + 2) */ jtgc.u64 = 0; jtgc.s.clk_div = clock_div; jtgc.s.mux_sel = 0; if (OCTEON_IS_MODEL(OCTEON_CN52XX)) jtgc.s.bypass = 0x3; else jtgc.s.bypass = 0xf; cvmx_write_csr(CVMX_CIU_QLM_JTGC, jtgc.u64); cvmx_read_csr(CVMX_CIU_QLM_JTGC); } /** * Write up to 32bits into the QLM jtag chain. Bits are shifted * into the MSB and out the LSB, so you should shift in the low * order bits followed by the high order bits. The JTAG chain is * 4 * 268 bits long, or 1072. * * @qlm: QLM to shift value into * @bits: Number of bits to shift in (1-32). * @data: Data to shift in. Bit 0 enters the chain first, followed by * bit 1, etc. * * Returns The low order bits of the JTAG chain that shifted out of the * circle. */ uint32_t cvmx_helper_qlm_jtag_shift(int qlm, int bits, uint32_t data) { union cvmx_ciu_qlm_jtgd jtgd; jtgd.u64 = 0; jtgd.s.shift = 1; jtgd.s.shft_cnt = bits - 1; jtgd.s.shft_reg = data; if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) jtgd.s.select = 1 << qlm; cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64); do { jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD); } while (jtgd.s.shift); return jtgd.s.shft_reg >> (32 - bits); } /** * Shift long sequences of zeros into the QLM JTAG chain. It is * common to need to shift more than 32 bits of zeros into the * chain. This function is a convience wrapper around * cvmx_helper_qlm_jtag_shift() to shift more than 32 bits of * zeros at a time. * * @qlm: QLM to shift zeros into * @bits: */ void cvmx_helper_qlm_jtag_shift_zeros(int qlm, int bits) { while (bits > 0) { int n = bits; if (n > 32) n = 32; cvmx_helper_qlm_jtag_shift(qlm, n, 0); bits -= n; } } /** * Program the QLM JTAG chain into all lanes of the QLM. You must * have already shifted in 268*4, or 1072 bits into the JTAG * chain. Updating invalid values can possibly cause chip damage. * * @qlm: QLM to program */ void cvmx_helper_qlm_jtag_update(int qlm) { union cvmx_ciu_qlm_jtgd jtgd; /* Update the new data */ jtgd.u64 = 0; jtgd.s.update = 1; if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) jtgd.s.select = 1 << qlm; cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64); do { jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD); } while (jtgd.s.update); }
gpl-2.0
snandlal/samsung_kernel
drivers/acpi/pci_link.c
7976
23676
/* * pci_link.c - ACPI PCI Interrupt Link Device Driver ($Revision: 34 $) * * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * Copyright (C) 2002 Dominik Brodowski <devel@brodo.de> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * TBD: * 1. Support more than one IRQ resource entry per link device (index). * 2. Implement start/stop mechanism and use ACPI Bus Driver facilities * for IRQ management (e.g. start()->_SRS). */ #include <linux/syscore_ops.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/spinlock.h> #include <linux/pm.h> #include <linux/pci.h> #include <linux/mutex.h> #include <linux/slab.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #define PREFIX "ACPI: " #define _COMPONENT ACPI_PCI_COMPONENT ACPI_MODULE_NAME("pci_link"); #define ACPI_PCI_LINK_CLASS "pci_irq_routing" #define ACPI_PCI_LINK_DEVICE_NAME "PCI Interrupt Link" #define ACPI_PCI_LINK_FILE_INFO "info" #define ACPI_PCI_LINK_FILE_STATUS "state" #define ACPI_PCI_LINK_MAX_POSSIBLE 16 static int acpi_pci_link_add(struct acpi_device *device); static int acpi_pci_link_remove(struct acpi_device *device, int type); static const struct acpi_device_id link_device_ids[] = { {"PNP0C0F", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, link_device_ids); static struct acpi_driver acpi_pci_link_driver = { .name = "pci_link", .class = ACPI_PCI_LINK_CLASS, .ids = link_device_ids, .ops = { .add = acpi_pci_link_add, .remove = acpi_pci_link_remove, }, }; /* * If a link is initialized, we never change its active and initialized * later even the link is disable. Instead, we just repick the active irq */ struct acpi_pci_link_irq { u8 active; /* Current IRQ */ u8 triggering; /* All IRQs */ u8 polarity; /* All IRQs */ u8 resource_type; u8 possible_count; u8 possible[ACPI_PCI_LINK_MAX_POSSIBLE]; u8 initialized:1; u8 reserved:7; }; struct acpi_pci_link { struct list_head list; struct acpi_device *device; struct acpi_pci_link_irq irq; int refcnt; }; static LIST_HEAD(acpi_link_list); static DEFINE_MUTEX(acpi_link_lock); /* -------------------------------------------------------------------------- PCI Link Device Management -------------------------------------------------------------------------- */ /* * set context (link) possible list from resource list */ static acpi_status acpi_pci_link_check_possible(struct acpi_resource *resource, void *context) { struct acpi_pci_link *link = context; u32 i; switch (resource->type) { case ACPI_RESOURCE_TYPE_START_DEPENDENT: case ACPI_RESOURCE_TYPE_END_TAG: return AE_OK; case ACPI_RESOURCE_TYPE_IRQ: { struct acpi_resource_irq *p = &resource->data.irq; if (!p || !p->interrupt_count) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Blank _PRS IRQ resource\n")); return AE_OK; } for (i = 0; (i < p->interrupt_count && i < ACPI_PCI_LINK_MAX_POSSIBLE); i++) { if (!p->interrupts[i]) { printk(KERN_WARNING PREFIX "Invalid _PRS IRQ %d\n", p->interrupts[i]); continue; } link->irq.possible[i] = p->interrupts[i]; link->irq.possible_count++; } link->irq.triggering = p->triggering; link->irq.polarity = p->polarity; link->irq.resource_type = ACPI_RESOURCE_TYPE_IRQ; break; } case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: { struct acpi_resource_extended_irq *p = &resource->data.extended_irq; if (!p || !p->interrupt_count) { printk(KERN_WARNING PREFIX "Blank _PRS EXT IRQ resource\n"); return AE_OK; } for (i = 0; (i < p->interrupt_count && i < ACPI_PCI_LINK_MAX_POSSIBLE); i++) { if (!p->interrupts[i]) { printk(KERN_WARNING PREFIX "Invalid _PRS IRQ %d\n", p->interrupts[i]); continue; } link->irq.possible[i] = p->interrupts[i]; link->irq.possible_count++; } link->irq.triggering = p->triggering; link->irq.polarity = p->polarity; link->irq.resource_type = ACPI_RESOURCE_TYPE_EXTENDED_IRQ; break; } default: printk(KERN_ERR PREFIX "_PRS resource type 0x%x isn't an IRQ\n", resource->type); return AE_OK; } return AE_CTRL_TERMINATE; } static int acpi_pci_link_get_possible(struct acpi_pci_link *link) { acpi_status status; status = acpi_walk_resources(link->device->handle, METHOD_NAME__PRS, acpi_pci_link_check_possible, link); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRS")); return -ENODEV; } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d possible IRQs\n", link->irq.possible_count)); return 0; } static acpi_status acpi_pci_link_check_current(struct acpi_resource *resource, void *context) { int *irq = context; switch (resource->type) { case ACPI_RESOURCE_TYPE_START_DEPENDENT: case ACPI_RESOURCE_TYPE_END_TAG: return AE_OK; case ACPI_RESOURCE_TYPE_IRQ: { struct acpi_resource_irq *p = &resource->data.irq; if (!p || !p->interrupt_count) { /* * IRQ descriptors may have no IRQ# bits set, * particularly those those w/ _STA disabled */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Blank _CRS IRQ resource\n")); return AE_OK; } *irq = p->interrupts[0]; break; } case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: { struct acpi_resource_extended_irq *p = &resource->data.extended_irq; if (!p || !p->interrupt_count) { /* * extended IRQ descriptors must * return at least 1 IRQ */ printk(KERN_WARNING PREFIX "Blank _CRS EXT IRQ resource\n"); return AE_OK; } *irq = p->interrupts[0]; break; } break; default: printk(KERN_ERR PREFIX "_CRS resource type 0x%x isn't an IRQ\n", resource->type); return AE_OK; } return AE_CTRL_TERMINATE; } /* * Run _CRS and set link->irq.active * * return value: * 0 - success * !0 - failure */ static int acpi_pci_link_get_current(struct acpi_pci_link *link) { int result = 0; acpi_status status; int irq = 0; link->irq.active = 0; /* in practice, status disabled is meaningless, ignore it */ if (acpi_strict) { /* Query _STA, set link->device->status */ result = acpi_bus_get_status(link->device); if (result) { printk(KERN_ERR PREFIX "Unable to read status\n"); goto end; } if (!link->device->status.enabled) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Link disabled\n")); return 0; } } /* * Query and parse _CRS to get the current IRQ assignment. */ status = acpi_walk_resources(link->device->handle, METHOD_NAME__CRS, acpi_pci_link_check_current, &irq); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _CRS")); result = -ENODEV; goto end; } if (acpi_strict && !irq) { printk(KERN_ERR PREFIX "_CRS returned 0\n"); result = -ENODEV; } link->irq.active = irq; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Link at IRQ %d \n", link->irq.active)); end: return result; } static int acpi_pci_link_set(struct acpi_pci_link *link, int irq) { int result; acpi_status status; struct { struct acpi_resource res; struct acpi_resource end; } *resource; struct acpi_buffer buffer = { 0, NULL }; if (!irq) return -EINVAL; resource = kzalloc(sizeof(*resource) + 1, irqs_disabled() ? GFP_ATOMIC: GFP_KERNEL); if (!resource) return -ENOMEM; buffer.length = sizeof(*resource) + 1; buffer.pointer = resource; switch (link->irq.resource_type) { case ACPI_RESOURCE_TYPE_IRQ: resource->res.type = ACPI_RESOURCE_TYPE_IRQ; resource->res.length = sizeof(struct acpi_resource); resource->res.data.irq.triggering = link->irq.triggering; resource->res.data.irq.polarity = link->irq.polarity; if (link->irq.triggering == ACPI_EDGE_SENSITIVE) resource->res.data.irq.sharable = ACPI_EXCLUSIVE; else resource->res.data.irq.sharable = ACPI_SHARED; resource->res.data.irq.interrupt_count = 1; resource->res.data.irq.interrupts[0] = irq; break; case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: resource->res.type = ACPI_RESOURCE_TYPE_EXTENDED_IRQ; resource->res.length = sizeof(struct acpi_resource); resource->res.data.extended_irq.producer_consumer = ACPI_CONSUMER; resource->res.data.extended_irq.triggering = link->irq.triggering; resource->res.data.extended_irq.polarity = link->irq.polarity; if (link->irq.triggering == ACPI_EDGE_SENSITIVE) resource->res.data.irq.sharable = ACPI_EXCLUSIVE; else resource->res.data.irq.sharable = ACPI_SHARED; resource->res.data.extended_irq.interrupt_count = 1; resource->res.data.extended_irq.interrupts[0] = irq; /* ignore resource_source, it's optional */ break; default: printk(KERN_ERR PREFIX "Invalid Resource_type %d\n", link->irq.resource_type); result = -EINVAL; goto end; } resource->end.type = ACPI_RESOURCE_TYPE_END_TAG; /* Attempt to set the resource */ status = acpi_set_current_resources(link->device->handle, &buffer); /* check for total failure */ if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _SRS")); result = -ENODEV; goto end; } /* Query _STA, set device->status */ result = acpi_bus_get_status(link->device); if (result) { printk(KERN_ERR PREFIX "Unable to read status\n"); goto end; } if (!link->device->status.enabled) { printk(KERN_WARNING PREFIX "%s [%s] disabled and referenced, BIOS bug\n", acpi_device_name(link->device), acpi_device_bid(link->device)); } /* Query _CRS, set link->irq.active */ result = acpi_pci_link_get_current(link); if (result) { goto end; } /* * Is current setting not what we set? * set link->irq.active */ if (link->irq.active != irq) { /* * policy: when _CRS doesn't return what we just _SRS * assume _SRS worked and override _CRS value. */ printk(KERN_WARNING PREFIX "%s [%s] BIOS reported IRQ %d, using IRQ %d\n", acpi_device_name(link->device), acpi_device_bid(link->device), link->irq.active, irq); link->irq.active = irq; } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Set IRQ %d\n", link->irq.active)); end: kfree(resource); return result; } /* -------------------------------------------------------------------------- PCI Link IRQ Management -------------------------------------------------------------------------- */ /* * "acpi_irq_balance" (default in APIC mode) enables ACPI to use PIC Interrupt * Link Devices to move the PIRQs around to minimize sharing. * * "acpi_irq_nobalance" (default in PIC mode) tells ACPI not to move any PIC IRQs * that the BIOS has already set to active. This is necessary because * ACPI has no automatic means of knowing what ISA IRQs are used. Note that * if the BIOS doesn't set a Link Device active, ACPI needs to program it * even if acpi_irq_nobalance is set. * * A tables of penalties avoids directing PCI interrupts to well known * ISA IRQs. Boot params are available to over-ride the default table: * * List interrupts that are free for PCI use. * acpi_irq_pci=n[,m] * * List interrupts that should not be used for PCI: * acpi_irq_isa=n[,m] * * Note that PCI IRQ routers have a list of possible IRQs, * which may not include the IRQs this table says are available. * * Since this heuristic can't tell the difference between a link * that no device will attach to, vs. a link which may be shared * by multiple active devices -- it is not optimal. * * If interrupt performance is that important, get an IO-APIC system * with a pin dedicated to each device. Or for that matter, an MSI * enabled system. */ #define ACPI_MAX_IRQS 256 #define ACPI_MAX_ISA_IRQ 16 #define PIRQ_PENALTY_PCI_AVAILABLE (0) #define PIRQ_PENALTY_PCI_POSSIBLE (16*16) #define PIRQ_PENALTY_PCI_USING (16*16*16) #define PIRQ_PENALTY_ISA_TYPICAL (16*16*16*16) #define PIRQ_PENALTY_ISA_USED (16*16*16*16*16) #define PIRQ_PENALTY_ISA_ALWAYS (16*16*16*16*16*16) static int acpi_irq_penalty[ACPI_MAX_IRQS] = { PIRQ_PENALTY_ISA_ALWAYS, /* IRQ0 timer */ PIRQ_PENALTY_ISA_ALWAYS, /* IRQ1 keyboard */ PIRQ_PENALTY_ISA_ALWAYS, /* IRQ2 cascade */ PIRQ_PENALTY_ISA_TYPICAL, /* IRQ3 serial */ PIRQ_PENALTY_ISA_TYPICAL, /* IRQ4 serial */ PIRQ_PENALTY_ISA_TYPICAL, /* IRQ5 sometimes SoundBlaster */ PIRQ_PENALTY_ISA_TYPICAL, /* IRQ6 */ PIRQ_PENALTY_ISA_TYPICAL, /* IRQ7 parallel, spurious */ PIRQ_PENALTY_ISA_TYPICAL, /* IRQ8 rtc, sometimes */ PIRQ_PENALTY_PCI_AVAILABLE, /* IRQ9 PCI, often acpi */ PIRQ_PENALTY_PCI_AVAILABLE, /* IRQ10 PCI */ PIRQ_PENALTY_PCI_AVAILABLE, /* IRQ11 PCI */ PIRQ_PENALTY_ISA_USED, /* IRQ12 mouse */ PIRQ_PENALTY_ISA_USED, /* IRQ13 fpe, sometimes */ PIRQ_PENALTY_ISA_USED, /* IRQ14 ide0 */ PIRQ_PENALTY_ISA_USED, /* IRQ15 ide1 */ /* >IRQ15 */ }; int __init acpi_irq_penalty_init(void) { struct acpi_pci_link *link; int i; /* * Update penalties to facilitate IRQ balancing. */ list_for_each_entry(link, &acpi_link_list, list) { /* * reflect the possible and active irqs in the penalty table -- * useful for breaking ties. */ if (link->irq.possible_count) { int penalty = PIRQ_PENALTY_PCI_POSSIBLE / link->irq.possible_count; for (i = 0; i < link->irq.possible_count; i++) { if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ) acpi_irq_penalty[link->irq. possible[i]] += penalty; } } else if (link->irq.active) { acpi_irq_penalty[link->irq.active] += PIRQ_PENALTY_PCI_POSSIBLE; } } /* Add a penalty for the SCI */ acpi_irq_penalty[acpi_gbl_FADT.sci_interrupt] += PIRQ_PENALTY_PCI_USING; return 0; } static int acpi_irq_balance = -1; /* 0: static, 1: balance */ static int acpi_pci_link_allocate(struct acpi_pci_link *link) { int irq; int i; if (link->irq.initialized) { if (link->refcnt == 0) /* This means the link is disabled but initialized */ acpi_pci_link_set(link, link->irq.active); return 0; } /* * search for active IRQ in list of possible IRQs. */ for (i = 0; i < link->irq.possible_count; ++i) { if (link->irq.active == link->irq.possible[i]) break; } /* * forget active IRQ that is not in possible list */ if (i == link->irq.possible_count) { if (acpi_strict) printk(KERN_WARNING PREFIX "_CRS %d not found" " in _PRS\n", link->irq.active); link->irq.active = 0; } /* * if active found, use it; else pick entry from end of possible list. */ if (link->irq.active) irq = link->irq.active; else irq = link->irq.possible[link->irq.possible_count - 1]; if (acpi_irq_balance || !link->irq.active) { /* * Select the best IRQ. This is done in reverse to promote * the use of IRQs 9, 10, 11, and >15. */ for (i = (link->irq.possible_count - 1); i >= 0; i--) { if (acpi_irq_penalty[irq] > acpi_irq_penalty[link->irq.possible[i]]) irq = link->irq.possible[i]; } } /* Attempt to enable the link device at this IRQ. */ if (acpi_pci_link_set(link, irq)) { printk(KERN_ERR PREFIX "Unable to set IRQ for %s [%s]. " "Try pci=noacpi or acpi=off\n", acpi_device_name(link->device), acpi_device_bid(link->device)); return -ENODEV; } else { acpi_irq_penalty[link->irq.active] += PIRQ_PENALTY_PCI_USING; printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n", acpi_device_name(link->device), acpi_device_bid(link->device), link->irq.active); } link->irq.initialized = 1; return 0; } /* * acpi_pci_link_allocate_irq * success: return IRQ >= 0 * failure: return -1 */ int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering, int *polarity, char **name) { int result; struct acpi_device *device; struct acpi_pci_link *link; result = acpi_bus_get_device(handle, &device); if (result) { printk(KERN_ERR PREFIX "Invalid link device\n"); return -1; } link = acpi_driver_data(device); if (!link) { printk(KERN_ERR PREFIX "Invalid link context\n"); return -1; } /* TBD: Support multiple index (IRQ) entries per Link Device */ if (index) { printk(KERN_ERR PREFIX "Invalid index %d\n", index); return -1; } mutex_lock(&acpi_link_lock); if (acpi_pci_link_allocate(link)) { mutex_unlock(&acpi_link_lock); return -1; } if (!link->irq.active) { mutex_unlock(&acpi_link_lock); printk(KERN_ERR PREFIX "Link active IRQ is 0!\n"); return -1; } link->refcnt++; mutex_unlock(&acpi_link_lock); if (triggering) *triggering = link->irq.triggering; if (polarity) *polarity = link->irq.polarity; if (name) *name = acpi_device_bid(link->device); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Link %s is referenced\n", acpi_device_bid(link->device))); return (link->irq.active); } /* * We don't change link's irq information here. After it is reenabled, we * continue use the info */ int acpi_pci_link_free_irq(acpi_handle handle) { struct acpi_device *device; struct acpi_pci_link *link; acpi_status result; result = acpi_bus_get_device(handle, &device); if (result) { printk(KERN_ERR PREFIX "Invalid link device\n"); return -1; } link = acpi_driver_data(device); if (!link) { printk(KERN_ERR PREFIX "Invalid link context\n"); return -1; } mutex_lock(&acpi_link_lock); if (!link->irq.initialized) { mutex_unlock(&acpi_link_lock); printk(KERN_ERR PREFIX "Link isn't initialized\n"); return -1; } #ifdef FUTURE_USE /* * The Link reference count allows us to _DISable an unused link * and suspend time, and set it again on resume. * However, 2.6.12 still has irq_router.resume * which blindly restores the link state. * So we disable the reference count method * to prevent duplicate acpi_pci_link_set() * which would harm some systems */ link->refcnt--; #endif ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Link %s is dereferenced\n", acpi_device_bid(link->device))); if (link->refcnt == 0) acpi_evaluate_object(link->device->handle, "_DIS", NULL, NULL); mutex_unlock(&acpi_link_lock); return (link->irq.active); } /* -------------------------------------------------------------------------- Driver Interface -------------------------------------------------------------------------- */ static int acpi_pci_link_add(struct acpi_device *device) { int result; struct acpi_pci_link *link; int i; int found = 0; link = kzalloc(sizeof(struct acpi_pci_link), GFP_KERNEL); if (!link) return -ENOMEM; link->device = device; strcpy(acpi_device_name(device), ACPI_PCI_LINK_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_PCI_LINK_CLASS); device->driver_data = link; mutex_lock(&acpi_link_lock); result = acpi_pci_link_get_possible(link); if (result) goto end; /* query and set link->irq.active */ acpi_pci_link_get_current(link); printk(KERN_INFO PREFIX "%s [%s] (IRQs", acpi_device_name(device), acpi_device_bid(device)); for (i = 0; i < link->irq.possible_count; i++) { if (link->irq.active == link->irq.possible[i]) { printk(" *%d", link->irq.possible[i]); found = 1; } else printk(" %d", link->irq.possible[i]); } printk(")"); if (!found) printk(" *%d", link->irq.active); if (!link->device->status.enabled) printk(", disabled."); printk("\n"); list_add_tail(&link->list, &acpi_link_list); end: /* disable all links -- to be activated on use */ acpi_evaluate_object(device->handle, "_DIS", NULL, NULL); mutex_unlock(&acpi_link_lock); if (result) kfree(link); return result; } static int acpi_pci_link_resume(struct acpi_pci_link *link) { if (link->refcnt && link->irq.active && link->irq.initialized) return (acpi_pci_link_set(link, link->irq.active)); return 0; } static void irqrouter_resume(void) { struct acpi_pci_link *link; list_for_each_entry(link, &acpi_link_list, list) { acpi_pci_link_resume(link); } } static int acpi_pci_link_remove(struct acpi_device *device, int type) { struct acpi_pci_link *link; link = acpi_driver_data(device); mutex_lock(&acpi_link_lock); list_del(&link->list); mutex_unlock(&acpi_link_lock); kfree(link); return 0; } /* * modify acpi_irq_penalty[] from cmdline */ static int __init acpi_irq_penalty_update(char *str, int used) { int i; for (i = 0; i < 16; i++) { int retval; int irq; retval = get_option(&str, &irq); if (!retval) break; /* no number found */ if (irq < 0) continue; if (irq >= ARRAY_SIZE(acpi_irq_penalty)) continue; if (used) acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED; else acpi_irq_penalty[irq] = PIRQ_PENALTY_PCI_AVAILABLE; if (retval != 2) /* no next number */ break; } return 1; } /* * We'd like PNP to call this routine for the * single ISA_USED value for each legacy device. * But instead it calls us with each POSSIBLE setting. * There is no ISA_POSSIBLE weight, so we simply use * the (small) PCI_USING penalty. */ void acpi_penalize_isa_irq(int irq, int active) { if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) { if (active) acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED; else acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING; } } /* * Over-ride default table to reserve additional IRQs for use by ISA * e.g. acpi_irq_isa=5 * Useful for telling ACPI how not to interfere with your ISA sound card. */ static int __init acpi_irq_isa(char *str) { return acpi_irq_penalty_update(str, 1); } __setup("acpi_irq_isa=", acpi_irq_isa); /* * Over-ride default table to free additional IRQs for use by PCI * e.g. acpi_irq_pci=7,15 * Used for acpi_irq_balance to free up IRQs to reduce PCI IRQ sharing. */ static int __init acpi_irq_pci(char *str) { return acpi_irq_penalty_update(str, 0); } __setup("acpi_irq_pci=", acpi_irq_pci); static int __init acpi_irq_nobalance_set(char *str) { acpi_irq_balance = 0; return 1; } __setup("acpi_irq_nobalance", acpi_irq_nobalance_set); static int __init acpi_irq_balance_set(char *str) { acpi_irq_balance = 1; return 1; } __setup("acpi_irq_balance", acpi_irq_balance_set); static struct syscore_ops irqrouter_syscore_ops = { .resume = irqrouter_resume, }; static int __init irqrouter_init_ops(void) { if (!acpi_disabled && !acpi_noirq) register_syscore_ops(&irqrouter_syscore_ops); return 0; } device_initcall(irqrouter_init_ops); static int __init acpi_pci_link_init(void) { if (acpi_noirq) return 0; if (acpi_irq_balance == -1) { /* no command line switch: enable balancing in IOAPIC mode */ if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) acpi_irq_balance = 1; else acpi_irq_balance = 0; } if (acpi_bus_register_driver(&acpi_pci_link_driver) < 0) return -ENODEV; return 0; } subsys_initcall(acpi_pci_link_init);
gpl-2.0
invisiblek/android_kernel_oneplus_msm8974
net/netfilter/nf_conntrack_tftp.c
8744
4200
/* (C) 2001-2002 Magnus Boden <mb@ozaba.mine.nu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/in.h> #include <linux/udp.h> #include <linux/netfilter.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_tuple.h> #include <net/netfilter/nf_conntrack_expect.h> #include <net/netfilter/nf_conntrack_ecache.h> #include <net/netfilter/nf_conntrack_helper.h> #include <linux/netfilter/nf_conntrack_tftp.h> MODULE_AUTHOR("Magnus Boden <mb@ozaba.mine.nu>"); MODULE_DESCRIPTION("TFTP connection tracking helper"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ip_conntrack_tftp"); MODULE_ALIAS_NFCT_HELPER("tftp"); #define MAX_PORTS 8 static unsigned short ports[MAX_PORTS]; static unsigned int ports_c; module_param_array(ports, ushort, &ports_c, 0400); MODULE_PARM_DESC(ports, "Port numbers of TFTP servers"); unsigned int (*nf_nat_tftp_hook)(struct sk_buff *skb, enum ip_conntrack_info ctinfo, struct nf_conntrack_expect *exp) __read_mostly; EXPORT_SYMBOL_GPL(nf_nat_tftp_hook); static int tftp_help(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { const struct tftphdr *tfh; struct tftphdr _tftph; struct nf_conntrack_expect *exp; struct nf_conntrack_tuple *tuple; unsigned int ret = NF_ACCEPT; typeof(nf_nat_tftp_hook) nf_nat_tftp; tfh = skb_header_pointer(skb, protoff + sizeof(struct udphdr), sizeof(_tftph), &_tftph); if (tfh == NULL) return NF_ACCEPT; switch (ntohs(tfh->opcode)) { case TFTP_OPCODE_READ: case TFTP_OPCODE_WRITE: /* RRQ and WRQ works the same way */ nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); exp = nf_ct_expect_alloc(ct); if (exp == NULL) return NF_DROP; tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), &tuple->src.u3, &tuple->dst.u3, IPPROTO_UDP, NULL, &tuple->dst.u.udp.port); pr_debug("expect: "); nf_ct_dump_tuple(&exp->tuple); nf_nat_tftp = rcu_dereference(nf_nat_tftp_hook); if (nf_nat_tftp && ct->status & IPS_NAT_MASK) ret = nf_nat_tftp(skb, ctinfo, exp); else if (nf_ct_expect_related(exp) != 0) ret = NF_DROP; nf_ct_expect_put(exp); break; case TFTP_OPCODE_DATA: case TFTP_OPCODE_ACK: pr_debug("Data/ACK opcode\n"); break; case TFTP_OPCODE_ERROR: pr_debug("Error opcode\n"); break; default: pr_debug("Unknown opcode\n"); } return ret; } static struct nf_conntrack_helper tftp[MAX_PORTS][2] __read_mostly; static char tftp_names[MAX_PORTS][2][sizeof("tftp-65535")] __read_mostly; static const struct nf_conntrack_expect_policy tftp_exp_policy = { .max_expected = 1, .timeout = 5 * 60, }; static void nf_conntrack_tftp_fini(void) { int i, j; for (i = 0; i < ports_c; i++) { for (j = 0; j < 2; j++) nf_conntrack_helper_unregister(&tftp[i][j]); } } static int __init nf_conntrack_tftp_init(void) { int i, j, ret; char *tmpname; if (ports_c == 0) ports[ports_c++] = TFTP_PORT; for (i = 0; i < ports_c; i++) { memset(&tftp[i], 0, sizeof(tftp[i])); tftp[i][0].tuple.src.l3num = AF_INET; tftp[i][1].tuple.src.l3num = AF_INET6; for (j = 0; j < 2; j++) { tftp[i][j].tuple.dst.protonum = IPPROTO_UDP; tftp[i][j].tuple.src.u.udp.port = htons(ports[i]); tftp[i][j].expect_policy = &tftp_exp_policy; tftp[i][j].me = THIS_MODULE; tftp[i][j].help = tftp_help; tmpname = &tftp_names[i][j][0]; if (ports[i] == TFTP_PORT) sprintf(tmpname, "tftp"); else sprintf(tmpname, "tftp-%u", i); tftp[i][j].name = tmpname; ret = nf_conntrack_helper_register(&tftp[i][j]); if (ret) { printk(KERN_ERR "nf_ct_tftp: failed to register" " helper for pf: %u port: %u\n", tftp[i][j].tuple.src.l3num, ports[i]); nf_conntrack_tftp_fini(); return ret; } } } return 0; } module_init(nf_conntrack_tftp_init); module_exit(nf_conntrack_tftp_fini);
gpl-2.0
VanirAOSP/kernel_oppo_n1
crypto/rmd256.c
10536
10795
/* * Cryptographic API. * * RIPEMD-256 - RACE Integrity Primitives Evaluation Message Digest. * * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC * * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <asm/byteorder.h> #include "ripemd.h" struct rmd256_ctx { u64 byte_count; u32 state[8]; __le32 buffer[16]; }; #define K1 RMD_K1 #define K2 RMD_K2 #define K3 RMD_K3 #define K4 RMD_K4 #define KK1 RMD_K6 #define KK2 RMD_K7 #define KK3 RMD_K8 #define KK4 RMD_K1 #define F1(x, y, z) (x ^ y ^ z) /* XOR */ #define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */ #define F3(x, y, z) ((x | ~y) ^ z) #define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */ #define ROUND(a, b, c, d, f, k, x, s) { \ (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \ (a) = rol32((a), (s)); \ } static void rmd256_transform(u32 *state, const __le32 *in) { u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd, tmp; /* Initialize left lane */ aa = state[0]; bb = state[1]; cc = state[2]; dd = state[3]; /* Initialize right lane */ aaa = state[4]; bbb = state[5]; ccc = state[6]; ddd = state[7]; /* round 1: left lane */ ROUND(aa, bb, cc, dd, F1, K1, in[0], 11); ROUND(dd, aa, bb, cc, F1, K1, in[1], 14); ROUND(cc, dd, aa, bb, F1, K1, in[2], 15); ROUND(bb, cc, dd, aa, F1, K1, in[3], 12); ROUND(aa, bb, cc, dd, F1, K1, in[4], 5); ROUND(dd, aa, bb, cc, F1, K1, in[5], 8); ROUND(cc, dd, aa, bb, F1, K1, in[6], 7); ROUND(bb, cc, dd, aa, F1, K1, in[7], 9); ROUND(aa, bb, cc, dd, F1, K1, in[8], 11); ROUND(dd, aa, bb, cc, F1, K1, in[9], 13); ROUND(cc, dd, aa, bb, F1, K1, in[10], 14); ROUND(bb, cc, dd, aa, F1, K1, in[11], 15); ROUND(aa, bb, cc, dd, F1, K1, in[12], 6); ROUND(dd, aa, bb, cc, F1, K1, in[13], 7); ROUND(cc, dd, aa, bb, F1, K1, in[14], 9); ROUND(bb, cc, dd, aa, F1, K1, in[15], 8); /* round 1: right lane */ ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[5], 8); ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[14], 9); ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[7], 9); ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[0], 11); ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[9], 13); ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[2], 15); ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[11], 15); ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[4], 5); ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[13], 7); ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[6], 7); ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[15], 8); ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[8], 11); ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[1], 14); ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[10], 14); ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[3], 12); ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[12], 6); /* Swap contents of "a" registers */ tmp = aa; aa = aaa; aaa = tmp; /* round 2: left lane */ ROUND(aa, bb, cc, dd, F2, K2, in[7], 7); ROUND(dd, aa, bb, cc, F2, K2, in[4], 6); ROUND(cc, dd, aa, bb, F2, K2, in[13], 8); ROUND(bb, cc, dd, aa, F2, K2, in[1], 13); ROUND(aa, bb, cc, dd, F2, K2, in[10], 11); ROUND(dd, aa, bb, cc, F2, K2, in[6], 9); ROUND(cc, dd, aa, bb, F2, K2, in[15], 7); ROUND(bb, cc, dd, aa, F2, K2, in[3], 15); ROUND(aa, bb, cc, dd, F2, K2, in[12], 7); ROUND(dd, aa, bb, cc, F2, K2, in[0], 12); ROUND(cc, dd, aa, bb, F2, K2, in[9], 15); ROUND(bb, cc, dd, aa, F2, K2, in[5], 9); ROUND(aa, bb, cc, dd, F2, K2, in[2], 11); ROUND(dd, aa, bb, cc, F2, K2, in[14], 7); ROUND(cc, dd, aa, bb, F2, K2, in[11], 13); ROUND(bb, cc, dd, aa, F2, K2, in[8], 12); /* round 2: right lane */ ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[6], 9); ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[11], 13); ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[3], 15); ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[7], 7); ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[0], 12); ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[13], 8); ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[5], 9); ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[10], 11); ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[14], 7); ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[15], 7); ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[8], 12); ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[12], 7); ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[4], 6); ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[9], 15); ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[1], 13); ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[2], 11); /* Swap contents of "b" registers */ tmp = bb; bb = bbb; bbb = tmp; /* round 3: left lane */ ROUND(aa, bb, cc, dd, F3, K3, in[3], 11); ROUND(dd, aa, bb, cc, F3, K3, in[10], 13); ROUND(cc, dd, aa, bb, F3, K3, in[14], 6); ROUND(bb, cc, dd, aa, F3, K3, in[4], 7); ROUND(aa, bb, cc, dd, F3, K3, in[9], 14); ROUND(dd, aa, bb, cc, F3, K3, in[15], 9); ROUND(cc, dd, aa, bb, F3, K3, in[8], 13); ROUND(bb, cc, dd, aa, F3, K3, in[1], 15); ROUND(aa, bb, cc, dd, F3, K3, in[2], 14); ROUND(dd, aa, bb, cc, F3, K3, in[7], 8); ROUND(cc, dd, aa, bb, F3, K3, in[0], 13); ROUND(bb, cc, dd, aa, F3, K3, in[6], 6); ROUND(aa, bb, cc, dd, F3, K3, in[13], 5); ROUND(dd, aa, bb, cc, F3, K3, in[11], 12); ROUND(cc, dd, aa, bb, F3, K3, in[5], 7); ROUND(bb, cc, dd, aa, F3, K3, in[12], 5); /* round 3: right lane */ ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[15], 9); ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[5], 7); ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[1], 15); ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[3], 11); ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[7], 8); ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[14], 6); ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[6], 6); ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[9], 14); ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[11], 12); ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[8], 13); ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[12], 5); ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[2], 14); ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[10], 13); ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[0], 13); ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[4], 7); ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[13], 5); /* Swap contents of "c" registers */ tmp = cc; cc = ccc; ccc = tmp; /* round 4: left lane */ ROUND(aa, bb, cc, dd, F4, K4, in[1], 11); ROUND(dd, aa, bb, cc, F4, K4, in[9], 12); ROUND(cc, dd, aa, bb, F4, K4, in[11], 14); ROUND(bb, cc, dd, aa, F4, K4, in[10], 15); ROUND(aa, bb, cc, dd, F4, K4, in[0], 14); ROUND(dd, aa, bb, cc, F4, K4, in[8], 15); ROUND(cc, dd, aa, bb, F4, K4, in[12], 9); ROUND(bb, cc, dd, aa, F4, K4, in[4], 8); ROUND(aa, bb, cc, dd, F4, K4, in[13], 9); ROUND(dd, aa, bb, cc, F4, K4, in[3], 14); ROUND(cc, dd, aa, bb, F4, K4, in[7], 5); ROUND(bb, cc, dd, aa, F4, K4, in[15], 6); ROUND(aa, bb, cc, dd, F4, K4, in[14], 8); ROUND(dd, aa, bb, cc, F4, K4, in[5], 6); ROUND(cc, dd, aa, bb, F4, K4, in[6], 5); ROUND(bb, cc, dd, aa, F4, K4, in[2], 12); /* round 4: right lane */ ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[8], 15); ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[6], 5); ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[4], 8); ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[1], 11); ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[3], 14); ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[11], 14); ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[15], 6); ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[0], 14); ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[5], 6); ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[12], 9); ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[2], 12); ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[13], 9); ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[9], 12); ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[7], 5); ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[10], 15); ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[14], 8); /* Swap contents of "d" registers */ tmp = dd; dd = ddd; ddd = tmp; /* combine results */ state[0] += aa; state[1] += bb; state[2] += cc; state[3] += dd; state[4] += aaa; state[5] += bbb; state[6] += ccc; state[7] += ddd; return; } static int rmd256_init(struct shash_desc *desc) { struct rmd256_ctx *rctx = shash_desc_ctx(desc); rctx->byte_count = 0; rctx->state[0] = RMD_H0; rctx->state[1] = RMD_H1; rctx->state[2] = RMD_H2; rctx->state[3] = RMD_H3; rctx->state[4] = RMD_H5; rctx->state[5] = RMD_H6; rctx->state[6] = RMD_H7; rctx->state[7] = RMD_H8; memset(rctx->buffer, 0, sizeof(rctx->buffer)); return 0; } static int rmd256_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct rmd256_ctx *rctx = shash_desc_ctx(desc); const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f); rctx->byte_count += len; /* Enough space in buffer? If so copy and we're done */ if (avail > len) { memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), data, len); goto out; } memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), data, avail); rmd256_transform(rctx->state, rctx->buffer); data += avail; len -= avail; while (len >= sizeof(rctx->buffer)) { memcpy(rctx->buffer, data, sizeof(rctx->buffer)); rmd256_transform(rctx->state, rctx->buffer); data += sizeof(rctx->buffer); len -= sizeof(rctx->buffer); } memcpy(rctx->buffer, data, len); out: return 0; } /* Add padding and return the message digest. */ static int rmd256_final(struct shash_desc *desc, u8 *out) { struct rmd256_ctx *rctx = shash_desc_ctx(desc); u32 i, index, padlen; __le64 bits; __le32 *dst = (__le32 *)out; static const u8 padding[64] = { 0x80, }; bits = cpu_to_le64(rctx->byte_count << 3); /* Pad out to 56 mod 64 */ index = rctx->byte_count & 0x3f; padlen = (index < 56) ? (56 - index) : ((64+56) - index); rmd256_update(desc, padding, padlen); /* Append length */ rmd256_update(desc, (const u8 *)&bits, sizeof(bits)); /* Store state in digest */ for (i = 0; i < 8; i++) dst[i] = cpu_to_le32p(&rctx->state[i]); /* Wipe context */ memset(rctx, 0, sizeof(*rctx)); return 0; } static struct shash_alg alg = { .digestsize = RMD256_DIGEST_SIZE, .init = rmd256_init, .update = rmd256_update, .final = rmd256_final, .descsize = sizeof(struct rmd256_ctx), .base = { .cra_name = "rmd256", .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = RMD256_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init rmd256_mod_init(void) { return crypto_register_shash(&alg); } static void __exit rmd256_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(rmd256_mod_init); module_exit(rmd256_mod_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>"); MODULE_DESCRIPTION("RIPEMD-256 Message Digest");
gpl-2.0
shskyinfo/android_kernel_lge_vee7
arch/cris/arch-v10/lib/old_checksum.c
12328
2160
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * IP/TCP/UDP checksumming routines * * Authors: Jorge Cwik, <jorge@laser.satlink.net> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Tom May, <ftom@netcom.com> * Lots of code moved from tcp.c and ip.c; see those files * for more names. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <net/checksum.h> #include <net/module.h> #undef PROFILE_CHECKSUM #ifdef PROFILE_CHECKSUM /* these are just for profiling the checksum code with an oscillioscope.. uh */ #if 0 #define BITOFF *((unsigned char *)0xb0000030) = 0xff #define BITON *((unsigned char *)0xb0000030) = 0x0 #endif #include <asm/io.h> #define CBITON LED_ACTIVE_SET(1) #define CBITOFF LED_ACTIVE_SET(0) #define BITOFF #define BITON #else #define BITOFF #define BITON #define CBITOFF #define CBITON #endif /* * computes a partial checksum, e.g. for TCP/UDP fragments */ #include <asm/delay.h> __wsum csum_partial(const void *p, int len, __wsum __sum) { u32 sum = (__force u32)__sum; const u16 *buff = p; /* * Experiments with ethernet and slip connections show that buff * is aligned on either a 2-byte or 4-byte boundary. */ const void *endMarker = p + len; const void *marker = endMarker - (len % 16); #if 0 if((int)buff & 0x3) printk("unaligned buff %p\n", buff); __delay(900); /* extra delay of 90 us to test performance hit */ #endif BITON; while (buff < marker) { sum += *buff++; sum += *buff++; sum += *buff++; sum += *buff++; sum += *buff++; sum += *buff++; sum += *buff++; sum += *buff++; } marker = endMarker - (len % 2); while (buff < marker) sum += *buff++; if (endMarker > buff) sum += *(const u8 *)buff; /* add extra byte separately */ BITOFF; return (__force __wsum)sum; } EXPORT_SYMBOL(csum_partial);
gpl-2.0
jfdsmabalot/kernel_mako
arch/powerpc/boot/ns16550.c
13864
1983
/* * 16550 serial console support. * * Original copied from <file:arch/ppc/boot/common/ns16550.c> * (which had no copyright) * Modifications: 2006 (c) MontaVista Software, Inc. * * Modified by: Mark A. Greer <mgreer@mvista.com> */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "string.h" #include "stdio.h" #include "io.h" #include "ops.h" #define UART_DLL 0 /* Out: Divisor Latch Low */ #define UART_DLM 1 /* Out: Divisor Latch High */ #define UART_FCR 2 /* Out: FIFO Control Register */ #define UART_LCR 3 /* Out: Line Control Register */ #define UART_MCR 4 /* Out: Modem Control Register */ #define UART_LSR 5 /* In: Line Status Register */ #define UART_LSR_THRE 0x20 /* Transmit-hold-register empty */ #define UART_LSR_DR 0x01 /* Receiver data ready */ #define UART_MSR 6 /* In: Modem Status Register */ #define UART_SCR 7 /* I/O: Scratch Register */ static unsigned char *reg_base; static u32 reg_shift; static int ns16550_open(void) { out_8(reg_base + (UART_FCR << reg_shift), 0x06); return 0; } static void ns16550_putc(unsigned char c) { while ((in_8(reg_base + (UART_LSR << reg_shift)) & UART_LSR_THRE) == 0); out_8(reg_base, c); } static unsigned char ns16550_getc(void) { while ((in_8(reg_base + (UART_LSR << reg_shift)) & UART_LSR_DR) == 0); return in_8(reg_base); } static u8 ns16550_tstc(void) { return ((in_8(reg_base + (UART_LSR << reg_shift)) & UART_LSR_DR) != 0); } int ns16550_console_init(void *devp, struct serial_console_data *scdp) { int n; u32 reg_offset; if (dt_get_virtual_reg(devp, (void **)&reg_base, 1) < 1) return -1; n = getprop(devp, "reg-offset", &reg_offset, sizeof(reg_offset)); if (n == sizeof(reg_offset)) reg_base += reg_offset; n = getprop(devp, "reg-shift", &reg_shift, sizeof(reg_shift)); if (n != sizeof(reg_shift)) reg_shift = 0; scdp->open = ns16550_open; scdp->putc = ns16550_putc; scdp->getc = ns16550_getc; scdp->tstc = ns16550_tstc; scdp->close = NULL; return 0; }
gpl-2.0
PenguPilot/overo-kernel
arch/arm/mach-omap2/prm2xxx_3xxx.c
41
9376
/* * OMAP2/3 PRM module functions * * Copyright (C) 2010-2011 Texas Instruments, Inc. * Copyright (C) 2010 Nokia Corporation * Benoît Cousson * Paul Walmsley * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/io.h> #include <linux/irq.h> #include "common.h" #include <plat/cpu.h> #include <plat/prcm.h> #include <plat/irqs.h> #include "vp.h" #include "prm2xxx_3xxx.h" #include "cm2xxx_3xxx.h" #include "prm-regbits-24xx.h" #include "prm-regbits-34xx.h" static const struct omap_prcm_irq omap3_prcm_irqs[] = { OMAP_PRCM_IRQ("wkup", 0, 0), OMAP_PRCM_IRQ("io", 9, 1), }; static struct omap_prcm_irq_setup omap3_prcm_irq_setup = { .ack = OMAP3_PRM_IRQSTATUS_MPU_OFFSET, .mask = OMAP3_PRM_IRQENABLE_MPU_OFFSET, .nr_regs = 1, .irqs = omap3_prcm_irqs, .nr_irqs = ARRAY_SIZE(omap3_prcm_irqs), .irq = INT_34XX_PRCM_MPU_IRQ, .read_pending_irqs = &omap3xxx_prm_read_pending_irqs, .ocp_barrier = &omap3xxx_prm_ocp_barrier, .save_and_clear_irqen = &omap3xxx_prm_save_and_clear_irqen, .restore_irqen = &omap3xxx_prm_restore_irqen, }; u32 omap2_prm_read_mod_reg(s16 module, u16 idx) { return __raw_readl(prm_base + module + idx); } void omap2_prm_write_mod_reg(u32 val, s16 module, u16 idx) { __raw_writel(val, prm_base + module + idx); } /* Read-modify-write a register in a PRM module. Caller must lock */ u32 omap2_prm_rmw_mod_reg_bits(u32 mask, u32 bits, s16 module, s16 idx) { u32 v; v = omap2_prm_read_mod_reg(module, idx); v &= ~mask; v |= bits; omap2_prm_write_mod_reg(v, module, idx); return v; } /* Read a PRM register, AND it, and shift the result down to bit 0 */ u32 omap2_prm_read_mod_bits_shift(s16 domain, s16 idx, u32 mask) { u32 v; v = omap2_prm_read_mod_reg(domain, idx); v &= mask; v >>= __ffs(mask); return v; } u32 omap2_prm_set_mod_reg_bits(u32 bits, s16 module, s16 idx) { return omap2_prm_rmw_mod_reg_bits(bits, bits, module, idx); } u32 omap2_prm_clear_mod_reg_bits(u32 bits, s16 module, s16 idx) { return omap2_prm_rmw_mod_reg_bits(bits, 0x0, module, idx); } /** * omap2_prm_is_hardreset_asserted - read the HW reset line state of * submodules contained in the hwmod module * @prm_mod: PRM submodule base (e.g. CORE_MOD) * @shift: register bit shift corresponding to the reset line to check * * Returns 1 if the (sub)module hardreset line is currently asserted, * 0 if the (sub)module hardreset line is not currently asserted, or * -EINVAL if called while running on a non-OMAP2/3 chip. */ int omap2_prm_is_hardreset_asserted(s16 prm_mod, u8 shift) { if (!(cpu_is_omap24xx() || cpu_is_omap34xx())) return -EINVAL; return omap2_prm_read_mod_bits_shift(prm_mod, OMAP2_RM_RSTCTRL, (1 << shift)); } /** * omap2_prm_assert_hardreset - assert the HW reset line of a submodule * @prm_mod: PRM submodule base (e.g. CORE_MOD) * @shift: register bit shift corresponding to the reset line to assert * * Some IPs like dsp or iva contain processors that require an HW * reset line to be asserted / deasserted in order to fully enable the * IP. These modules may have multiple hard-reset lines that reset * different 'submodules' inside the IP block. This function will * place the submodule into reset. Returns 0 upon success or -EINVAL * upon an argument error. */ int omap2_prm_assert_hardreset(s16 prm_mod, u8 shift) { u32 mask; if (!(cpu_is_omap24xx() || cpu_is_omap34xx())) return -EINVAL; mask = 1 << shift; omap2_prm_rmw_mod_reg_bits(mask, mask, prm_mod, OMAP2_RM_RSTCTRL); return 0; } /** * omap2_prm_deassert_hardreset - deassert a submodule hardreset line and wait * @prm_mod: PRM submodule base (e.g. CORE_MOD) * @rst_shift: register bit shift corresponding to the reset line to deassert * @st_shift: register bit shift for the status of the deasserted submodule * * Some IPs like dsp or iva contain processors that require an HW * reset line to be asserted / deasserted in order to fully enable the * IP. These modules may have multiple hard-reset lines that reset * different 'submodules' inside the IP block. This function will * take the submodule out of reset and wait until the PRCM indicates * that the reset has completed before returning. Returns 0 upon success or * -EINVAL upon an argument error, -EEXIST if the submodule was already out * of reset, or -EBUSY if the submodule did not exit reset promptly. */ int omap2_prm_deassert_hardreset(s16 prm_mod, u8 rst_shift, u8 st_shift) { u32 rst, st; int c; if (!(cpu_is_omap24xx() || cpu_is_omap34xx())) return -EINVAL; rst = 1 << rst_shift; st = 1 << st_shift; /* Check the current status to avoid de-asserting the line twice */ if (omap2_prm_read_mod_bits_shift(prm_mod, OMAP2_RM_RSTCTRL, rst) == 0) return -EEXIST; /* Clear the reset status by writing 1 to the status bit */ omap2_prm_rmw_mod_reg_bits(0xffffffff, st, prm_mod, OMAP2_RM_RSTST); /* de-assert the reset control line */ omap2_prm_rmw_mod_reg_bits(rst, 0, prm_mod, OMAP2_RM_RSTCTRL); /* wait the status to be set */ omap_test_timeout(omap2_prm_read_mod_bits_shift(prm_mod, OMAP2_RM_RSTST, st), MAX_MODULE_HARDRESET_WAIT, c); return (c == MAX_MODULE_HARDRESET_WAIT) ? -EBUSY : 0; } /* PRM VP */ /* * struct omap3_vp - OMAP3 VP register access description. * @tranxdone_status: VP_TRANXDONE_ST bitmask in PRM_IRQSTATUS_MPU reg */ struct omap3_vp { u32 tranxdone_status; }; static struct omap3_vp omap3_vp[] = { [OMAP3_VP_VDD_MPU_ID] = { .tranxdone_status = OMAP3430_VP1_TRANXDONE_ST_MASK, }, [OMAP3_VP_VDD_CORE_ID] = { .tranxdone_status = OMAP3430_VP2_TRANXDONE_ST_MASK, }, }; #define MAX_VP_ID ARRAY_SIZE(omap3_vp); u32 omap3_prm_vp_check_txdone(u8 vp_id) { struct omap3_vp *vp = &omap3_vp[vp_id]; u32 irqstatus; irqstatus = omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); return irqstatus & vp->tranxdone_status; } void omap3_prm_vp_clear_txdone(u8 vp_id) { struct omap3_vp *vp = &omap3_vp[vp_id]; omap2_prm_write_mod_reg(vp->tranxdone_status, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); } u32 omap3_prm_vcvp_read(u8 offset) { return omap2_prm_read_mod_reg(OMAP3430_GR_MOD, offset); } void omap3_prm_vcvp_write(u32 val, u8 offset) { omap2_prm_write_mod_reg(val, OMAP3430_GR_MOD, offset); } u32 omap3_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset) { return omap2_prm_rmw_mod_reg_bits(mask, bits, OMAP3430_GR_MOD, offset); } /** * omap3xxx_prm_read_pending_irqs - read pending PRM MPU IRQs into @events * @events: ptr to a u32, preallocated by caller * * Read PRM_IRQSTATUS_MPU bits, AND'ed with the currently-enabled PRM * MPU IRQs, and store the result into the u32 pointed to by @events. * No return value. */ void omap3xxx_prm_read_pending_irqs(unsigned long *events) { u32 mask, st; /* XXX Can the mask read be avoided (e.g., can it come from RAM?) */ mask = omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); st = omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); events[0] = mask & st; } /** * omap3xxx_prm_ocp_barrier - force buffered MPU writes to the PRM to complete * * Force any buffered writes to the PRM IP block to complete. Needed * by the PRM IRQ handler, which reads and writes directly to the IP * block, to avoid race conditions after acknowledging or clearing IRQ * bits. No return value. */ void omap3xxx_prm_ocp_barrier(void) { omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_REVISION_OFFSET); } /** * omap3xxx_prm_save_and_clear_irqen - save/clear PRM_IRQENABLE_MPU reg * @saved_mask: ptr to a u32 array to save IRQENABLE bits * * Save the PRM_IRQENABLE_MPU register to @saved_mask. @saved_mask * must be allocated by the caller. Intended to be used in the PRM * interrupt handler suspend callback. The OCP barrier is needed to * ensure the write to disable PRM interrupts reaches the PRM before * returning; otherwise, spurious interrupts might occur. No return * value. */ void omap3xxx_prm_save_and_clear_irqen(u32 *saved_mask) { saved_mask[0] = omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); /* OCP barrier */ omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_REVISION_OFFSET); } /** * omap3xxx_prm_restore_irqen - set PRM_IRQENABLE_MPU register from args * @saved_mask: ptr to a u32 array of IRQENABLE bits saved previously * * Restore the PRM_IRQENABLE_MPU register from @saved_mask. Intended * to be used in the PRM interrupt handler resume callback to restore * values saved by omap3xxx_prm_save_and_clear_irqen(). No OCP * barrier should be needed here; any pending PRM interrupts will fire * once the writes reach the PRM. No return value. */ void omap3xxx_prm_restore_irqen(u32 *saved_mask) { omap2_prm_write_mod_reg(saved_mask[0], OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); } static int __init omap3xxx_prcm_init(void) { int ret = 0; if (cpu_is_omap34xx()) { ret = omap_prcm_register_chain_handler(&omap3_prcm_irq_setup); if (!ret) irq_set_status_flags(omap_prcm_event_to_irq("io"), IRQ_NOAUTOEN); } return ret; } subsys_initcall(omap3xxx_prcm_init);
gpl-2.0
wkritzinger/asuswrt-merlin
release/src-rt-7.14.114.x/src/linux/linux-2.6.36/sound/soc/blackfin/bf5xx-tdm.c
41
9064
/* * File: sound/soc/blackfin/bf5xx-tdm.c * Author: Barry Song <Barry.Song@analog.com> * * Created: Thurs June 04 2009 * Description: Blackfin I2S(TDM) CPU DAI driver * Even though TDM mode can be as part of I2S DAI, but there * are so much difference in configuration and data flow, * it's very ugly to integrate I2S and TDM into a module * * Modified: * Copyright 2009 Analog Devices Inc. * * Bugs: Enter bugs at http://blackfin.uclinux.org/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/initval.h> #include <sound/soc.h> #include <asm/irq.h> #include <asm/portmux.h> #include <linux/mutex.h> #include <linux/gpio.h> #include "bf5xx-sport.h" #include "bf5xx-tdm.h" static struct bf5xx_tdm_port bf5xx_tdm; static int sport_num = CONFIG_SND_BF5XX_SPORT_NUM; static struct sport_param sport_params[2] = { { .dma_rx_chan = CH_SPORT0_RX, .dma_tx_chan = CH_SPORT0_TX, .err_irq = IRQ_SPORT0_ERROR, .regs = (struct sport_register *)SPORT0_TCR1, }, { .dma_rx_chan = CH_SPORT1_RX, .dma_tx_chan = CH_SPORT1_TX, .err_irq = IRQ_SPORT1_ERROR, .regs = (struct sport_register *)SPORT1_TCR1, } }; /* * Setting the TFS pin selector for SPORT 0 based on whether the selected * port id F or G. If the port is F then no conflict should exist for the * TFS. When Port G is selected and EMAC then there is a conflict between * the PHY interrupt line and TFS. Current settings prevent the conflict * by ignoring the TFS pin when Port G is selected. This allows both * codecs and EMAC using Port G concurrently. */ #ifdef CONFIG_BF527_SPORT0_PORTG #define LOCAL_SPORT0_TFS (0) #else #define LOCAL_SPORT0_TFS (P_SPORT0_TFS) #endif static u16 sport_req[][7] = { {P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, P_SPORT0_DRPRI, P_SPORT0_RSCLK, LOCAL_SPORT0_TFS, 0}, {P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_TFS, 0} }; static int bf5xx_tdm_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) { int ret = 0; /* interface format:support TDM,slave mode */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_DSP_A: break; default: printk(KERN_ERR "%s: Unknown DAI format type\n", __func__); ret = -EINVAL; break; } switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: break; case SND_SOC_DAIFMT_CBS_CFS: case SND_SOC_DAIFMT_CBM_CFS: case SND_SOC_DAIFMT_CBS_CFM: ret = -EINVAL; break; default: printk(KERN_ERR "%s: Unknown DAI master type\n", __func__); ret = -EINVAL; break; } return ret; } static int bf5xx_tdm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { int ret = 0; bf5xx_tdm.tcr2 &= ~0x1f; bf5xx_tdm.rcr2 &= ~0x1f; switch (params_format(params)) { case SNDRV_PCM_FORMAT_S32_LE: bf5xx_tdm.tcr2 |= 31; bf5xx_tdm.rcr2 |= 31; sport_handle->wdsize = 4; break; /* at present, we only support 32bit transfer */ default: pr_err("not supported PCM format yet\n"); return -EINVAL; break; } if (!bf5xx_tdm.configured) { /* * TX and RX are not independent,they are enabled at the * same time, even if only one side is running. So, we * need to configure both of them at the time when the first * stream is opened. * * CPU DAI:slave mode. */ ret = sport_config_rx(sport_handle, bf5xx_tdm.rcr1, bf5xx_tdm.rcr2, 0, 0); if (ret) { pr_err("SPORT is busy!\n"); return -EBUSY; } ret = sport_config_tx(sport_handle, bf5xx_tdm.tcr1, bf5xx_tdm.tcr2, 0, 0); if (ret) { pr_err("SPORT is busy!\n"); return -EBUSY; } bf5xx_tdm.configured = 1; } return 0; } static void bf5xx_tdm_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { /* No active stream, SPORT is allowed to be configured again. */ if (!dai->active) bf5xx_tdm.configured = 0; } static int bf5xx_tdm_set_channel_map(struct snd_soc_dai *dai, unsigned int tx_num, unsigned int *tx_slot, unsigned int rx_num, unsigned int *rx_slot) { int i; unsigned int slot; unsigned int tx_mapped = 0, rx_mapped = 0; if ((tx_num > BFIN_TDM_DAI_MAX_SLOTS) || (rx_num > BFIN_TDM_DAI_MAX_SLOTS)) return -EINVAL; for (i = 0; i < tx_num; i++) { slot = tx_slot[i]; if ((slot < BFIN_TDM_DAI_MAX_SLOTS) && (!(tx_mapped & (1 << slot)))) { bf5xx_tdm.tx_map[i] = slot; tx_mapped |= 1 << slot; } else return -EINVAL; } for (i = 0; i < rx_num; i++) { slot = rx_slot[i]; if ((slot < BFIN_TDM_DAI_MAX_SLOTS) && (!(rx_mapped & (1 << slot)))) { bf5xx_tdm.rx_map[i] = slot; rx_mapped |= 1 << slot; } else return -EINVAL; } return 0; } #ifdef CONFIG_PM static int bf5xx_tdm_suspend(struct snd_soc_dai *dai) { struct sport_device *sport = snd_soc_dai_get_drvdata(dai); if (!dai->active) return 0; if (dai->capture.active) sport_rx_stop(sport); if (dai->playback.active) sport_tx_stop(sport); return 0; } static int bf5xx_tdm_resume(struct snd_soc_dai *dai) { int ret; struct sport_device *sport = dai->private_data; if (!dai->active) return 0; ret = sport_set_multichannel(sport, 8, 0xFF, 1); if (ret) { pr_err("SPORT is busy!\n"); ret = -EBUSY; } ret = sport_config_rx(sport, IRFS, 0x1F, 0, 0); if (ret) { pr_err("SPORT is busy!\n"); ret = -EBUSY; } ret = sport_config_tx(sport, ITFS, 0x1F, 0, 0); if (ret) { pr_err("SPORT is busy!\n"); ret = -EBUSY; } return 0; } #else #define bf5xx_tdm_suspend NULL #define bf5xx_tdm_resume NULL #endif static struct snd_soc_dai_ops bf5xx_tdm_dai_ops = { .hw_params = bf5xx_tdm_hw_params, .set_fmt = bf5xx_tdm_set_dai_fmt, .shutdown = bf5xx_tdm_shutdown, .set_channel_map = bf5xx_tdm_set_channel_map, }; struct snd_soc_dai bf5xx_tdm_dai = { .name = "bf5xx-tdm", .id = 0, .suspend = bf5xx_tdm_suspend, .resume = bf5xx_tdm_resume, .playback = { .channels_min = 2, .channels_max = 8, .rates = SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S32_LE,}, .capture = { .channels_min = 2, .channels_max = 8, .rates = SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S32_LE,}, .ops = &bf5xx_tdm_dai_ops, }; EXPORT_SYMBOL_GPL(bf5xx_tdm_dai); static int __devinit bfin_tdm_probe(struct platform_device *pdev) { int ret = 0; if (peripheral_request_list(&sport_req[sport_num][0], "soc-audio")) { pr_err("Requesting Peripherals failed\n"); return -EFAULT; } /* request DMA for SPORT */ sport_handle = sport_init(&sport_params[sport_num], 4, \ 8 * sizeof(u32), NULL); if (!sport_handle) { peripheral_free_list(&sport_req[sport_num][0]); return -ENODEV; } /* SPORT works in TDM mode */ ret = sport_set_multichannel(sport_handle, 8, 0xFF, 1); if (ret) { pr_err("SPORT is busy!\n"); ret = -EBUSY; goto sport_config_err; } ret = sport_config_rx(sport_handle, IRFS, 0x1F, 0, 0); if (ret) { pr_err("SPORT is busy!\n"); ret = -EBUSY; goto sport_config_err; } ret = sport_config_tx(sport_handle, ITFS, 0x1F, 0, 0); if (ret) { pr_err("SPORT is busy!\n"); ret = -EBUSY; goto sport_config_err; } ret = snd_soc_register_dai(&bf5xx_tdm_dai); if (ret) { pr_err("Failed to register DAI: %d\n", ret); goto sport_config_err; } sport_handle->private_data = &bf5xx_tdm; return 0; sport_config_err: peripheral_free_list(&sport_req[sport_num][0]); return ret; } static int __devexit bfin_tdm_remove(struct platform_device *pdev) { peripheral_free_list(&sport_req[sport_num][0]); snd_soc_unregister_dai(&bf5xx_tdm_dai); return 0; } static struct platform_driver bfin_tdm_driver = { .probe = bfin_tdm_probe, .remove = __devexit_p(bfin_tdm_remove), .driver = { .name = "bfin-tdm", .owner = THIS_MODULE, }, }; static int __init bfin_tdm_init(void) { return platform_driver_register(&bfin_tdm_driver); } module_init(bfin_tdm_init); static void __exit bfin_tdm_exit(void) { platform_driver_unregister(&bfin_tdm_driver); } module_exit(bfin_tdm_exit); /* Module information */ MODULE_AUTHOR("Barry Song"); MODULE_DESCRIPTION("TDM driver for ADI Blackfin"); MODULE_LICENSE("GPL");
gpl-2.0
megraf/asuswrt-merlin
release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/staging/octeon/ethernet-sgmii.c
41
3612
/********************************************************************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2007 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information **********************************************************************/ #include <linux/kernel.h> #include <linux/netdevice.h> #include <net/dst.h> #include <asm/octeon/octeon.h> #include "ethernet-defines.h" #include "octeon-ethernet.h" #include "ethernet-util.h" #include "cvmx-helper.h" #include "cvmx-gmxx-defs.h" int cvm_oct_sgmii_open(struct net_device *dev) { union cvmx_gmxx_prtx_cfg gmx_cfg; struct octeon_ethernet *priv = netdev_priv(dev); int interface = INTERFACE(priv->port); int index = INDEX(priv->port); cvmx_helper_link_info_t link_info; gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); gmx_cfg.s.en = 1; cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); if (!octeon_is_simulation()) { link_info = cvmx_helper_link_get(priv->port); if (!link_info.s.link_up) netif_carrier_off(dev); } return 0; } int cvm_oct_sgmii_stop(struct net_device *dev) { union cvmx_gmxx_prtx_cfg gmx_cfg; struct octeon_ethernet *priv = netdev_priv(dev); int interface = INTERFACE(priv->port); int index = INDEX(priv->port); gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); gmx_cfg.s.en = 0; cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); return 0; } static void cvm_oct_sgmii_poll(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); cvmx_helper_link_info_t link_info; link_info = cvmx_helper_link_get(priv->port); if (link_info.u64 == priv->link_info) return; link_info = cvmx_helper_link_autoconf(priv->port); priv->link_info = link_info.u64; /* Tell Linux */ if (link_info.s.link_up) { if (!netif_carrier_ok(dev)) netif_carrier_on(dev); if (priv->queue != -1) DEBUGPRINT ("%s: %u Mbps %s duplex, port %2d, queue %2d\n", dev->name, link_info.s.speed, (link_info.s.full_duplex) ? "Full" : "Half", priv->port, priv->queue); else DEBUGPRINT("%s: %u Mbps %s duplex, port %2d, POW\n", dev->name, link_info.s.speed, (link_info.s.full_duplex) ? "Full" : "Half", priv->port); } else { if (netif_carrier_ok(dev)) netif_carrier_off(dev); DEBUGPRINT("%s: Link down\n", dev->name); } } int cvm_oct_sgmii_init(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); cvm_oct_common_init(dev); dev->netdev_ops->ndo_stop(dev); if (!octeon_is_simulation() && priv->phydev == NULL) priv->poll = cvm_oct_sgmii_poll; return 0; } void cvm_oct_sgmii_uninit(struct net_device *dev) { cvm_oct_common_uninit(dev); }
gpl-2.0
alcobar/asuswrt-merlin
release/src-rt-7.x.main/src/linux/linux-2.6.36/drivers/char/dsp56k.c
41
12003
/* * The DSP56001 Device Driver, saviour of the Free World(tm) * * Authors: Fredrik Noring <noring@nocrew.org> * lars brinkhoff <lars@nocrew.org> * Tomas Berndtsson <tomas@nocrew.org> * * First version May 1996 * * History: * 97-01-29 Tomas Berndtsson, * Integrated with Linux 2.1.21 kernel sources. * 97-02-15 Tomas Berndtsson, * Fixed for kernel 2.1.26 * * BUGS: * Hmm... there must be something here :) * * Copyright (C) 1996,1997 Fredrik Noring, lars brinkhoff & Tomas Berndtsson * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/module.h> #include <linux/major.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/delay.h> /* guess what */ #include <linux/fs.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/device.h> #include <linux/smp_lock.h> #include <linux/firmware.h> #include <linux/platform_device.h> #include <linux/uaccess.h> /* For put_user and get_user */ #include <asm/atarihw.h> #include <asm/traps.h> #include <asm/dsp56k.h> /* minor devices */ #define DSP56K_DEV_56001 0 /* The only device so far */ #define TIMEOUT 10 /* Host port timeout in number of tries */ #define MAXIO 2048 /* Maximum number of words before sleep */ #define DSP56K_MAX_BINARY_LENGTH (3*64*1024) #define DSP56K_TX_INT_ON dsp56k_host_interface.icr |= DSP56K_ICR_TREQ #define DSP56K_RX_INT_ON dsp56k_host_interface.icr |= DSP56K_ICR_RREQ #define DSP56K_TX_INT_OFF dsp56k_host_interface.icr &= ~DSP56K_ICR_TREQ #define DSP56K_RX_INT_OFF dsp56k_host_interface.icr &= ~DSP56K_ICR_RREQ #define DSP56K_TRANSMIT (dsp56k_host_interface.isr & DSP56K_ISR_TXDE) #define DSP56K_RECEIVE (dsp56k_host_interface.isr & DSP56K_ISR_RXDF) #define handshake(count, maxio, timeout, ENABLE, f) \ { \ long i, t, m; \ while (count > 0) { \ m = min_t(unsigned long, count, maxio); \ for (i = 0; i < m; i++) { \ for (t = 0; t < timeout && !ENABLE; t++) \ msleep(20); \ if(!ENABLE) \ return -EIO; \ f; \ } \ count -= m; \ if (m == maxio) msleep(20); \ } \ } #define tx_wait(n) \ { \ int t; \ for(t = 0; t < n && !DSP56K_TRANSMIT; t++) \ msleep(10); \ if(!DSP56K_TRANSMIT) { \ return -EIO; \ } \ } #define rx_wait(n) \ { \ int t; \ for(t = 0; t < n && !DSP56K_RECEIVE; t++) \ msleep(10); \ if(!DSP56K_RECEIVE) { \ return -EIO; \ } \ } static struct dsp56k_device { unsigned long in_use; long maxio, timeout; int tx_wsize, rx_wsize; } dsp56k; static struct class *dsp56k_class; static int dsp56k_reset(void) { u_char status; /* Power down the DSP */ sound_ym.rd_data_reg_sel = 14; status = sound_ym.rd_data_reg_sel & 0xef; sound_ym.wd_data = status; sound_ym.wd_data = status | 0x10; udelay(10); /* Power up the DSP */ sound_ym.rd_data_reg_sel = 14; sound_ym.wd_data = sound_ym.rd_data_reg_sel & 0xef; return 0; } static int dsp56k_upload(u_char __user *bin, int len) { struct platform_device *pdev; const struct firmware *fw; const char fw_name[] = "dsp56k/bootstrap.bin"; int err; int i; dsp56k_reset(); pdev = platform_device_register_simple("dsp56k", 0, NULL, 0); if (IS_ERR(pdev)) { printk(KERN_ERR "Failed to register device for \"%s\"\n", fw_name); return -EINVAL; } err = request_firmware(&fw, fw_name, &pdev->dev); platform_device_unregister(pdev); if (err) { printk(KERN_ERR "Failed to load image \"%s\" err %d\n", fw_name, err); return err; } if (fw->size % 3) { printk(KERN_ERR "Bogus length %d in image \"%s\"\n", fw->size, fw_name); release_firmware(fw); return -EINVAL; } for (i = 0; i < fw->size; i = i + 3) { /* tx_wait(10); */ dsp56k_host_interface.data.b[1] = fw->data[i]; dsp56k_host_interface.data.b[2] = fw->data[i + 1]; dsp56k_host_interface.data.b[3] = fw->data[i + 2]; } release_firmware(fw); for (; i < 512; i++) { /* tx_wait(10); */ dsp56k_host_interface.data.b[1] = 0; dsp56k_host_interface.data.b[2] = 0; dsp56k_host_interface.data.b[3] = 0; } for (i = 0; i < len; i++) { tx_wait(10); get_user(dsp56k_host_interface.data.b[1], bin++); get_user(dsp56k_host_interface.data.b[2], bin++); get_user(dsp56k_host_interface.data.b[3], bin++); } tx_wait(10); dsp56k_host_interface.data.l = 3; /* Magic execute */ return 0; } static ssize_t dsp56k_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct inode *inode = file->f_path.dentry->d_inode; int dev = iminor(inode) & 0x0f; switch(dev) { case DSP56K_DEV_56001: { long n; /* Don't do anything if nothing is to be done */ if (!count) return 0; n = 0; switch (dsp56k.rx_wsize) { case 1: /* 8 bit */ { handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE, put_user(dsp56k_host_interface.data.b[3], buf+n++)); return n; } case 2: /* 16 bit */ { short __user *data; count /= 2; data = (short __user *) buf; handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE, put_user(dsp56k_host_interface.data.w[1], data+n++)); return 2*n; } case 3: /* 24 bit */ { count /= 3; handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE, put_user(dsp56k_host_interface.data.b[1], buf+n++); put_user(dsp56k_host_interface.data.b[2], buf+n++); put_user(dsp56k_host_interface.data.b[3], buf+n++)); return 3*n; } case 4: /* 32 bit */ { long __user *data; count /= 4; data = (long __user *) buf; handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE, put_user(dsp56k_host_interface.data.l, data+n++)); return 4*n; } } return -EFAULT; } default: printk(KERN_ERR "DSP56k driver: Unknown minor device: %d\n", dev); return -ENXIO; } } static ssize_t dsp56k_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct inode *inode = file->f_path.dentry->d_inode; int dev = iminor(inode) & 0x0f; switch(dev) { case DSP56K_DEV_56001: { long n; /* Don't do anything if nothing is to be done */ if (!count) return 0; n = 0; switch (dsp56k.tx_wsize) { case 1: /* 8 bit */ { handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT, get_user(dsp56k_host_interface.data.b[3], buf+n++)); return n; } case 2: /* 16 bit */ { const short __user *data; count /= 2; data = (const short __user *)buf; handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT, get_user(dsp56k_host_interface.data.w[1], data+n++)); return 2*n; } case 3: /* 24 bit */ { count /= 3; handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT, get_user(dsp56k_host_interface.data.b[1], buf+n++); get_user(dsp56k_host_interface.data.b[2], buf+n++); get_user(dsp56k_host_interface.data.b[3], buf+n++)); return 3*n; } case 4: /* 32 bit */ { const long __user *data; count /= 4; data = (const long __user *)buf; handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT, get_user(dsp56k_host_interface.data.l, data+n++)); return 4*n; } } return -EFAULT; } default: printk(KERN_ERR "DSP56k driver: Unknown minor device: %d\n", dev); return -ENXIO; } } static long dsp56k_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int dev = iminor(file->f_path.dentry->d_inode) & 0x0f; void __user *argp = (void __user *)arg; switch(dev) { case DSP56K_DEV_56001: switch(cmd) { case DSP56K_UPLOAD: { char __user *bin; int r, len; struct dsp56k_upload __user *binary = argp; if(get_user(len, &binary->len) < 0) return -EFAULT; if(get_user(bin, &binary->bin) < 0) return -EFAULT; if (len == 0) { return -EINVAL; /* nothing to upload?!? */ } if (len > DSP56K_MAX_BINARY_LENGTH) { return -EINVAL; } lock_kernel(); r = dsp56k_upload(bin, len); unlock_kernel(); if (r < 0) { return r; } break; } case DSP56K_SET_TX_WSIZE: if (arg > 4 || arg < 1) return -EINVAL; lock_kernel(); dsp56k.tx_wsize = (int) arg; unlock_kernel(); break; case DSP56K_SET_RX_WSIZE: if (arg > 4 || arg < 1) return -EINVAL; lock_kernel(); dsp56k.rx_wsize = (int) arg; unlock_kernel(); break; case DSP56K_HOST_FLAGS: { int dir, out, status; struct dsp56k_host_flags __user *hf = argp; if(get_user(dir, &hf->dir) < 0) return -EFAULT; if(get_user(out, &hf->out) < 0) return -EFAULT; lock_kernel(); if ((dir & 0x1) && (out & 0x1)) dsp56k_host_interface.icr |= DSP56K_ICR_HF0; else if (dir & 0x1) dsp56k_host_interface.icr &= ~DSP56K_ICR_HF0; if ((dir & 0x2) && (out & 0x2)) dsp56k_host_interface.icr |= DSP56K_ICR_HF1; else if (dir & 0x2) dsp56k_host_interface.icr &= ~DSP56K_ICR_HF1; status = 0; if (dsp56k_host_interface.icr & DSP56K_ICR_HF0) status |= 0x1; if (dsp56k_host_interface.icr & DSP56K_ICR_HF1) status |= 0x2; if (dsp56k_host_interface.isr & DSP56K_ISR_HF2) status |= 0x4; if (dsp56k_host_interface.isr & DSP56K_ISR_HF3) status |= 0x8; unlock_kernel(); return put_user(status, &hf->status); } case DSP56K_HOST_CMD: if (arg > 31 || arg < 0) return -EINVAL; lock_kernel(); dsp56k_host_interface.cvr = (u_char)((arg & DSP56K_CVR_HV_MASK) | DSP56K_CVR_HC); unlock_kernel(); break; default: return -EINVAL; } return 0; default: printk(KERN_ERR "DSP56k driver: Unknown minor device: %d\n", dev); return -ENXIO; } } /* As of 2.1.26 this should be dsp56k_poll, * but how do I then check device minor number? * Do I need this function at all??? */ static int dsp56k_open(struct inode *inode, struct file *file) { int dev = iminor(inode) & 0x0f; int ret = 0; lock_kernel(); switch(dev) { case DSP56K_DEV_56001: if (test_and_set_bit(0, &dsp56k.in_use)) { ret = -EBUSY; goto out; } dsp56k.timeout = TIMEOUT; dsp56k.maxio = MAXIO; dsp56k.rx_wsize = dsp56k.tx_wsize = 4; DSP56K_TX_INT_OFF; DSP56K_RX_INT_OFF; /* Zero host flags */ dsp56k_host_interface.icr &= ~DSP56K_ICR_HF0; dsp56k_host_interface.icr &= ~DSP56K_ICR_HF1; break; default: ret = -ENODEV; } out: unlock_kernel(); return ret; } static int dsp56k_release(struct inode *inode, struct file *file) { int dev = iminor(inode) & 0x0f; switch(dev) { case DSP56K_DEV_56001: clear_bit(0, &dsp56k.in_use); break; default: printk(KERN_ERR "DSP56k driver: Unknown minor device: %d\n", dev); return -ENXIO; } return 0; } static const struct file_operations dsp56k_fops = { .owner = THIS_MODULE, .read = dsp56k_read, .write = dsp56k_write, .unlocked_ioctl = dsp56k_ioctl, .open = dsp56k_open, .release = dsp56k_release, }; /****** Init and module functions ******/ static char banner[] __initdata = KERN_INFO "DSP56k driver installed\n"; static int __init dsp56k_init_driver(void) { int err = 0; if(!MACH_IS_ATARI || !ATARIHW_PRESENT(DSP56K)) { printk("DSP56k driver: Hardware not present\n"); return -ENODEV; } if(register_chrdev(DSP56K_MAJOR, "dsp56k", &dsp56k_fops)) { printk("DSP56k driver: Unable to register driver\n"); return -ENODEV; } dsp56k_class = class_create(THIS_MODULE, "dsp56k"); if (IS_ERR(dsp56k_class)) { err = PTR_ERR(dsp56k_class); goto out_chrdev; } device_create(dsp56k_class, NULL, MKDEV(DSP56K_MAJOR, 0), NULL, "dsp56k"); printk(banner); goto out; out_chrdev: unregister_chrdev(DSP56K_MAJOR, "dsp56k"); out: return err; } module_init(dsp56k_init_driver); static void __exit dsp56k_cleanup_driver(void) { device_destroy(dsp56k_class, MKDEV(DSP56K_MAJOR, 0)); class_destroy(dsp56k_class); unregister_chrdev(DSP56K_MAJOR, "dsp56k"); } module_exit(dsp56k_cleanup_driver); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("dsp56k/bootstrap.bin");
gpl-2.0
AndreyPopovNew/asuswrt-merlin-rt-n
release/src-rt-7.x.main/src/linux/linux-2.6.36/drivers/media/video/uvc/uvc_status.c
41
5565
/* * uvc_status.c -- USB Video Class driver - Status endpoint * * Copyright (C) 2007-2009 * Laurent Pinchart (laurent.pinchart@skynet.be) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/kernel.h> #include <linux/input.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/input.h> #include "uvcvideo.h" /* -------------------------------------------------------------------------- * Input device */ #ifdef CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV static int uvc_input_init(struct uvc_device *dev) { struct input_dev *input; int ret; input = input_allocate_device(); if (input == NULL) return -ENOMEM; usb_make_path(dev->udev, dev->input_phys, sizeof(dev->input_phys)); strlcat(dev->input_phys, "/button", sizeof(dev->input_phys)); input->name = dev->name; input->phys = dev->input_phys; usb_to_input_id(dev->udev, &input->id); input->dev.parent = &dev->intf->dev; __set_bit(EV_KEY, input->evbit); __set_bit(KEY_CAMERA, input->keybit); if ((ret = input_register_device(input)) < 0) goto error; dev->input = input; return 0; error: input_free_device(input); return ret; } static void uvc_input_cleanup(struct uvc_device *dev) { if (dev->input) input_unregister_device(dev->input); } static void uvc_input_report_key(struct uvc_device *dev, unsigned int code, int value) { if (dev->input) { input_report_key(dev->input, code, value); input_sync(dev->input); } } #else #define uvc_input_init(dev) #define uvc_input_cleanup(dev) #define uvc_input_report_key(dev, code, value) #endif /* CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV */ /* -------------------------------------------------------------------------- * Status interrupt endpoint */ static void uvc_event_streaming(struct uvc_device *dev, __u8 *data, int len) { if (len < 3) { uvc_trace(UVC_TRACE_STATUS, "Invalid streaming status event " "received.\n"); return; } if (data[2] == 0) { if (len < 4) return; uvc_trace(UVC_TRACE_STATUS, "Button (intf %u) %s len %d\n", data[1], data[3] ? "pressed" : "released", len); uvc_input_report_key(dev, KEY_CAMERA, data[3]); } else { uvc_trace(UVC_TRACE_STATUS, "Stream %u error event %02x %02x " "len %d.\n", data[1], data[2], data[3], len); } } static void uvc_event_control(struct uvc_device *dev, __u8 *data, int len) { char *attrs[3] = { "value", "info", "failure" }; if (len < 6 || data[2] != 0 || data[4] > 2) { uvc_trace(UVC_TRACE_STATUS, "Invalid control status event " "received.\n"); return; } uvc_trace(UVC_TRACE_STATUS, "Control %u/%u %s change len %d.\n", data[1], data[3], attrs[data[4]], len); } static void uvc_status_complete(struct urb *urb) { struct uvc_device *dev = urb->context; int len, ret; switch (urb->status) { case 0: break; case -ENOENT: /* usb_kill_urb() called. */ case -ECONNRESET: /* usb_unlink_urb() called. */ case -ESHUTDOWN: /* The endpoint is being disabled. */ case -EPROTO: /* Device is disconnected (reported by some * host controller). */ return; default: uvc_printk(KERN_WARNING, "Non-zero status (%d) in status " "completion handler.\n", urb->status); return; } len = urb->actual_length; if (len > 0) { switch (dev->status[0] & 0x0f) { case UVC_STATUS_TYPE_CONTROL: uvc_event_control(dev, dev->status, len); break; case UVC_STATUS_TYPE_STREAMING: uvc_event_streaming(dev, dev->status, len); break; default: uvc_trace(UVC_TRACE_STATUS, "Unknown status event " "type %u.\n", dev->status[0]); break; } } /* Resubmit the URB. */ urb->interval = dev->int_ep->desc.bInterval; if ((ret = usb_submit_urb(urb, GFP_ATOMIC)) < 0) { uvc_printk(KERN_ERR, "Failed to resubmit status URB (%d).\n", ret); } } int uvc_status_init(struct uvc_device *dev) { struct usb_host_endpoint *ep = dev->int_ep; unsigned int pipe; int interval; if (ep == NULL) return 0; uvc_input_init(dev); dev->status = kzalloc(UVC_MAX_STATUS_SIZE, GFP_KERNEL); if (dev->status == NULL) return -ENOMEM; dev->int_urb = usb_alloc_urb(0, GFP_KERNEL); if (dev->int_urb == NULL) { kfree(dev->status); return -ENOMEM; } pipe = usb_rcvintpipe(dev->udev, ep->desc.bEndpointAddress); /* For high-speed interrupt endpoints, the bInterval value is used as * an exponent of two. Some developers forgot about it. */ interval = ep->desc.bInterval; if (interval > 16 && dev->udev->speed == USB_SPEED_HIGH && (dev->quirks & UVC_QUIRK_STATUS_INTERVAL)) interval = fls(interval) - 1; usb_fill_int_urb(dev->int_urb, dev->udev, pipe, dev->status, UVC_MAX_STATUS_SIZE, uvc_status_complete, dev, interval); return 0; } void uvc_status_cleanup(struct uvc_device *dev) { usb_kill_urb(dev->int_urb); usb_free_urb(dev->int_urb); kfree(dev->status); uvc_input_cleanup(dev); } int uvc_status_start(struct uvc_device *dev) { if (dev->int_urb == NULL) return 0; return usb_submit_urb(dev->int_urb, GFP_KERNEL); } void uvc_status_stop(struct uvc_device *dev) { usb_kill_urb(dev->int_urb); } int uvc_status_suspend(struct uvc_device *dev) { if (atomic_read(&dev->users)) usb_kill_urb(dev->int_urb); return 0; } int uvc_status_resume(struct uvc_device *dev) { if (dev->int_urb == NULL || atomic_read(&dev->users) == 0) return 0; return usb_submit_urb(dev->int_urb, GFP_NOIO); }
gpl-2.0
omegamoon/Rockchip-GPL-Kernel
arch/arm/plat-rk/mem_reserve.c
41
1219
#include <plat/board.h> #include <linux/memblock.h> #include <asm/setup.h> /* Macros for Data Alignment : size */ #define ALIGN_SZ(p, a) \ (((p) + ((a) - 1)) & ~((a) - 1)) static size_t reserved_size = 0; static phys_addr_t reserved_base_end = 0; phys_addr_t __init board_mem_reserve_add(char *name, size_t size) { phys_addr_t base = 0; size_t align_size = ALIGN_SZ(size, SZ_1M); if (reserved_base_end == 0) { reserved_base_end = meminfo.bank[meminfo.nr_banks - 1].start + meminfo.bank[meminfo.nr_banks - 1].size; /* Workaround for RGA driver, which may overflow on physical memory address parameter */ if (reserved_base_end > 0xA0000000) reserved_base_end = 0xA0000000; } reserved_size += align_size; base = reserved_base_end - reserved_size; pr_info("memory reserve: Memory(base:0x%x size:%dM) reserved for <%s>\n", base, align_size/SZ_1M, name); return base; } void __init board_mem_reserved(void) { phys_addr_t base = reserved_base_end - reserved_size; if(reserved_size){ memblock_remove(base, reserved_size); pr_info("memory reserve: Total reserved %dM\n", reserved_size/SZ_1M); } }
gpl-2.0
jshafer817/sailfish_kernel_hp_tenderloin30
drivers/usb/host/ehci-msm72k.c
41
20896
/* ehci-msm.c - HSUSB Host Controller Driver Implementation * * Copyright (c) 2008-2012, The Linux Foundation. All rights reserved. * * Partly derived from ehci-fsl.c and ehci-hcd.c * Copyright (c) 2000-2004 by David Brownell * Copyright (c) 2005 MontaVista Software * * All source code in this file is licensed under the following license except * where indicated. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * See the GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, you can find it at http://www.fsf.org */ #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/clk.h> #include <linux/spinlock.h> #include <mach/board.h> #include <mach/rpc_hsusb.h> #include <mach/msm_hsusb.h> #include <mach/msm_hsusb_hw.h> #include <mach/msm_otg.h> #include <mach/clk.h> #include <linux/wakelock.h> #include <linux/pm_runtime.h> #include <mach/msm72k_otg.h> #ifdef CONFIG_USB_HOST_NOTIFY #include <linux/host_notify.h> #endif #define MSM_USB_BASE (hcd->regs) struct msmusb_hcd { struct ehci_hcd ehci; struct clk *alt_core_clk; struct clk *iface_clk; unsigned in_lpm; struct work_struct lpm_exit_work; spinlock_t lock; struct wake_lock wlock; unsigned int clk_enabled; struct msm_usb_host_platform_data *pdata; unsigned running; struct otg_transceiver *xceiv; struct work_struct otg_work; unsigned flags; struct msm_otg_ops otg_ops; }; static inline struct msmusb_hcd *hcd_to_mhcd(struct usb_hcd *hcd) { return (struct msmusb_hcd *) (hcd->hcd_priv); } static inline struct usb_hcd *mhcd_to_hcd(struct msmusb_hcd *mhcd) { return container_of((void *) mhcd, struct usb_hcd, hcd_priv); } static void msm_xusb_pm_qos_update(struct msmusb_hcd *mhcd, int vote) { struct msm_usb_host_platform_data *pdata = mhcd->pdata; /* if otg driver is available, it would take * care of voting for appropriate pclk source */ if (mhcd->xceiv) return; if (vote) clk_enable(pdata->ebi1_clk); else clk_disable(pdata->ebi1_clk); } static void msm_xusb_enable_clks(struct msmusb_hcd *mhcd) { struct msm_usb_host_platform_data *pdata = mhcd->pdata; if (mhcd->clk_enabled) return; switch (PHY_TYPE(pdata->phy_info)) { case USB_PHY_INTEGRATED: /* OTG driver takes care of clock management */ break; case USB_PHY_SERIAL_PMIC: clk_enable(mhcd->alt_core_clk); clk_enable(mhcd->iface_clk); break; default: pr_err("%s: undefined phy type ( %X )\n", __func__, pdata->phy_info); return; } mhcd->clk_enabled = 1; } static void msm_xusb_disable_clks(struct msmusb_hcd *mhcd) { struct msm_usb_host_platform_data *pdata = mhcd->pdata; if (!mhcd->clk_enabled) return; switch (PHY_TYPE(pdata->phy_info)) { case USB_PHY_INTEGRATED: /* OTG driver takes care of clock management */ break; case USB_PHY_SERIAL_PMIC: clk_disable(mhcd->alt_core_clk); clk_disable(mhcd->iface_clk); break; default: pr_err("%s: undefined phy type ( %X )\n", __func__, pdata->phy_info); return; } mhcd->clk_enabled = 0; } static int usb_wakeup_phy(struct usb_hcd *hcd) { struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd); struct msm_usb_host_platform_data *pdata = mhcd->pdata; int ret = -ENODEV; switch (PHY_TYPE(pdata->phy_info)) { case USB_PHY_INTEGRATED: break; case USB_PHY_SERIAL_PMIC: ret = msm_fsusb_resume_phy(); break; default: pr_err("%s: undefined phy type ( %X ) \n", __func__, pdata->phy_info); } return ret; } #ifdef CONFIG_PM static int usb_suspend_phy(struct usb_hcd *hcd) { int ret = 0; struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd); struct msm_usb_host_platform_data *pdata = mhcd->pdata; switch (PHY_TYPE(pdata->phy_info)) { case USB_PHY_INTEGRATED: break; case USB_PHY_SERIAL_PMIC: ret = msm_fsusb_set_remote_wakeup(); ret = msm_fsusb_suspend_phy(); break; default: pr_err("%s: undefined phy type ( %X ) \n", __func__, pdata->phy_info); ret = -ENODEV; break; } return ret; } static int usb_lpm_enter(struct usb_hcd *hcd) { struct device *dev = container_of((void *)hcd, struct device, platform_data); struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd); disable_irq(hcd->irq); if (mhcd->in_lpm) { pr_info("%s: already in lpm. nothing to do\n", __func__); enable_irq(hcd->irq); return 0; } if (HC_IS_RUNNING(hcd->state)) { pr_info("%s: can't enter into lpm. controller is runnning\n", __func__); enable_irq(hcd->irq); return -1; } pr_info("%s: lpm enter procedure started\n", __func__); mhcd->in_lpm = 1; if (usb_suspend_phy(hcd)) { mhcd->in_lpm = 0; enable_irq(hcd->irq); pr_info("phy suspend failed\n"); pr_info("%s: lpm enter procedure end\n", __func__); return -1; } msm_xusb_disable_clks(mhcd); if (mhcd->xceiv && mhcd->xceiv->set_suspend) mhcd->xceiv->set_suspend(mhcd->xceiv, 1); if (device_may_wakeup(dev)) enable_irq_wake(hcd->irq); enable_irq(hcd->irq); pr_info("%s: lpm enter procedure end\n", __func__); return 0; } #endif void usb_lpm_exit_w(struct work_struct *work) { struct msmusb_hcd *mhcd = container_of((void *) work, struct msmusb_hcd, lpm_exit_work); struct usb_hcd *hcd = mhcd_to_hcd(mhcd); struct device *dev = container_of((void *)hcd, struct device, platform_data); msm_xusb_enable_clks(mhcd); if (usb_wakeup_phy(hcd)) { pr_err("fatal error: cannot bring phy out of lpm\n"); return; } /* If resume signalling finishes before lpm exit, PCD is not set in * USBSTS register. Drive resume signal to the downstream device now * so that EHCI can process the upcoming port change interrupt.*/ writel(readl(USB_PORTSC) | PORTSC_FPR, USB_PORTSC); if (mhcd->xceiv && mhcd->xceiv->set_suspend) mhcd->xceiv->set_suspend(mhcd->xceiv, 0); if (device_may_wakeup(dev)) disable_irq_wake(hcd->irq); enable_irq(hcd->irq); } static void usb_lpm_exit(struct usb_hcd *hcd) { unsigned long flags; struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd); spin_lock_irqsave(&mhcd->lock, flags); if (!mhcd->in_lpm) { spin_unlock_irqrestore(&mhcd->lock, flags); return; } mhcd->in_lpm = 0; disable_irq_nosync(hcd->irq); schedule_work(&mhcd->lpm_exit_work); spin_unlock_irqrestore(&mhcd->lock, flags); } static irqreturn_t ehci_msm_irq(struct usb_hcd *hcd) { struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd); struct msm_otg *otg = container_of(mhcd->xceiv, struct msm_otg, otg); /* * OTG scheduled a work to get Integrated PHY out of LPM, * WAIT till then */ if (PHY_TYPE(mhcd->pdata->phy_info) == USB_PHY_INTEGRATED) if (atomic_read(&otg->in_lpm)) return IRQ_HANDLED; return ehci_irq(hcd); } #ifdef CONFIG_PM static int ehci_msm_bus_suspend(struct usb_hcd *hcd) { int ret; struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd); struct device *dev = hcd->self.controller; ret = ehci_bus_suspend(hcd); if (ret) { pr_err("ehci_bus suspend faield\n"); return ret; } if (PHY_TYPE(mhcd->pdata->phy_info) == USB_PHY_INTEGRATED) ret = otg_set_suspend(mhcd->xceiv, 1); else ret = usb_lpm_enter(hcd); pm_runtime_put_noidle(dev); pm_runtime_suspend(dev); wake_unlock(&mhcd->wlock); return ret; } static int ehci_msm_bus_resume(struct usb_hcd *hcd) { struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd); struct device *dev = hcd->self.controller; wake_lock(&mhcd->wlock); pm_runtime_get_noresume(dev); pm_runtime_resume(dev); if (PHY_TYPE(mhcd->pdata->phy_info) == USB_PHY_INTEGRATED) { otg_set_suspend(mhcd->xceiv, 0); } else { /* PMIC serial phy */ usb_lpm_exit(hcd); if (cancel_work_sync(&(mhcd->lpm_exit_work))) usb_lpm_exit_w(&mhcd->lpm_exit_work); } return ehci_bus_resume(hcd); } #else #define ehci_msm_bus_suspend NULL #define ehci_msm_bus_resume NULL #endif /* CONFIG_PM */ static int ehci_msm_reset(struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); int retval; ehci->caps = USB_CAPLENGTH; ehci->regs = USB_CAPLENGTH + HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); /* cache the data to minimize the chip reads*/ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); retval = ehci_init(hcd); if (retval) return retval; hcd->has_tt = 1; ehci->sbrn = HCD_USB2; retval = ehci_reset(ehci); /* SW workaround for USB stability issues*/ writel(0x0, USB_AHB_MODE); writel(0x0, USB_AHB_BURST); return retval; } #define PTS_VAL(x) (PHY_TYPE(x) == USB_PHY_SERIAL_PMIC) ? PORTSC_PTS_SERIAL : \ PORTSC_PTS_ULPI static int ehci_msm_run(struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd); int retval = 0; int port = HCS_N_PORTS(ehci->hcs_params); u32 __iomem *reg_ptr; u32 hcc_params; struct msm_usb_host_platform_data *pdata = mhcd->pdata; hcd->uses_new_polling = 1; set_bit(HCD_FLAG_POLL_RH, &hcd->flags); /* set hostmode */ reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + USBMODE); ehci_writel(ehci, (USBMODE_VBUS | USBMODE_SDIS), reg_ptr); /* port configuration - phy, port speed, port power, port enable */ while (port--) ehci_writel(ehci, (PTS_VAL(pdata->phy_info) | PORT_POWER | PORT_PE), &ehci->regs->port_status[port]); ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list); ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next); hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params); if (HCC_64BIT_ADDR(hcc_params)) ehci_writel(ehci, 0, &ehci->regs->segment); ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET); ehci->command |= CMD_RUN; ehci_writel(ehci, ehci->command, &ehci->regs->command); ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ hcd->state = HC_STATE_RUNNING; /*Enable appropriate Interrupts*/ ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable); return retval; } static struct hc_driver msm_hc_driver = { .description = hcd_name, .product_desc = "Qualcomm On-Chip EHCI Host Controller", .hcd_priv_size = sizeof(struct msmusb_hcd), /* * generic hardware linkage */ .irq = ehci_msm_irq, .flags = HCD_USB2, .reset = ehci_msm_reset, .start = ehci_msm_run, .stop = ehci_stop, .shutdown = ehci_shutdown, /* * managing i/o requests and associated device resources */ .urb_enqueue = ehci_urb_enqueue, .urb_dequeue = ehci_urb_dequeue, .endpoint_disable = ehci_endpoint_disable, /* * scheduling support */ .get_frame_number = ehci_get_frame, /* * root hub support */ .hub_status_data = ehci_hub_status_data, .hub_control = ehci_hub_control, .bus_suspend = ehci_msm_bus_suspend, .bus_resume = ehci_msm_bus_resume, .relinquish_port = ehci_relinquish_port, .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, }; static void msm_hsusb_request_host(void *handle, int request) { struct msmusb_hcd *mhcd = handle; struct usb_hcd *hcd = mhcd_to_hcd(mhcd); struct msm_usb_host_platform_data *pdata = mhcd->pdata; struct msm_otg *otg = container_of(mhcd->xceiv, struct msm_otg, otg); #ifdef CONFIG_USB_OTG struct usb_device *udev = hcd->self.root_hub; #endif struct device *dev = hcd->self.controller; switch (request) { #ifdef CONFIG_USB_OTG case REQUEST_HNP_SUSPEND: /* disable Root hub auto suspend. As hardware is configured * for peripheral mode, mark hardware is not available. */ if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED) { pm_runtime_disable(&udev->dev); /* Mark root hub as disconnected. This would * protect suspend/resume via sysfs. */ udev->state = USB_STATE_NOTATTACHED; clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); hcd->state = HC_STATE_HALT; pm_runtime_put_noidle(dev); pm_runtime_suspend(dev); } break; case REQUEST_HNP_RESUME: if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED) { pm_runtime_get_noresume(dev); pm_runtime_resume(dev); disable_irq(hcd->irq); ehci_msm_reset(hcd); ehci_msm_run(hcd); set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); pm_runtime_enable(&udev->dev); udev->state = USB_STATE_CONFIGURED; enable_irq(hcd->irq); } break; #endif case REQUEST_RESUME: usb_hcd_resume_root_hub(hcd); break; case REQUEST_START: if (mhcd->running) break; pm_runtime_get_noresume(dev); pm_runtime_resume(dev); wake_lock(&mhcd->wlock); msm_xusb_pm_qos_update(mhcd, 1); msm_xusb_enable_clks(mhcd); if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED) if (otg->set_clk) otg->set_clk(mhcd->xceiv, 1); if (pdata->vbus_power) pdata->vbus_power(pdata->phy_info, 1); if (pdata->config_gpio) pdata->config_gpio(1); usb_add_hcd(hcd, hcd->irq, IRQF_SHARED); mhcd->running = 1; if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED) if (otg->set_clk) otg->set_clk(mhcd->xceiv, 0); break; case REQUEST_STOP: if (!mhcd->running) break; mhcd->running = 0; /* come out of lpm before deregistration */ if (PHY_TYPE(pdata->phy_info) == USB_PHY_SERIAL_PMIC) { usb_lpm_exit(hcd); if (cancel_work_sync(&(mhcd->lpm_exit_work))) usb_lpm_exit_w(&mhcd->lpm_exit_work); } usb_remove_hcd(hcd); if (pdata->config_gpio) pdata->config_gpio(0); if (pdata->vbus_power) pdata->vbus_power(pdata->phy_info, 0); msm_xusb_disable_clks(mhcd); wake_lock_timeout(&mhcd->wlock, HZ/2); msm_xusb_pm_qos_update(mhcd, 0); pm_runtime_put_noidle(dev); pm_runtime_suspend(dev); break; } } static void msm_hsusb_otg_work(struct work_struct *work) { struct msmusb_hcd *mhcd; mhcd = container_of(work, struct msmusb_hcd, otg_work); msm_hsusb_request_host((void *)mhcd, mhcd->flags); } static void msm_hsusb_start_host(struct usb_bus *bus, int start) { struct usb_hcd *hcd = bus_to_hcd(bus); struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd); mhcd->flags = start; if (in_interrupt()) schedule_work(&mhcd->otg_work); else msm_hsusb_request_host((void *)mhcd, mhcd->flags); } static int msm_xusb_init_phy(struct msmusb_hcd *mhcd) { int ret = -ENODEV; struct usb_hcd *hcd = mhcd_to_hcd(mhcd); struct msm_usb_host_platform_data *pdata = mhcd->pdata; switch (PHY_TYPE(pdata->phy_info)) { case USB_PHY_INTEGRATED: ret = 0; case USB_PHY_SERIAL_PMIC: msm_xusb_enable_clks(mhcd); writel(0, USB_USBINTR); ret = msm_fsusb_rpc_init(&mhcd->otg_ops); if (!ret) msm_fsusb_init_phy(); msm_xusb_disable_clks(mhcd); break; default: pr_err("%s: undefined phy type ( %X ) \n", __func__, pdata->phy_info); } return ret; } static int msm_xusb_rpc_close(struct msmusb_hcd *mhcd) { int retval = -ENODEV; struct msm_usb_host_platform_data *pdata = mhcd->pdata; switch (PHY_TYPE(pdata->phy_info)) { case USB_PHY_INTEGRATED: if (!mhcd->xceiv) retval = msm_hsusb_rpc_close(); break; case USB_PHY_SERIAL_PMIC: retval = msm_fsusb_reset_phy(); msm_fsusb_rpc_deinit(); break; default: pr_err("%s: undefined phy type ( %X ) \n", __func__, pdata->phy_info); } return retval; } #ifdef CONFIG_USB_OTG static void ehci_msm_start_hnp(struct ehci_hcd *ehci) { struct usb_hcd *hcd = ehci_to_hcd(ehci); struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd); /* OTG driver handles HNP */ otg_start_hnp(mhcd->xceiv); } #else #define ehci_msm_start_hnp NULL #endif static int msm_xusb_init_host(struct platform_device *pdev, struct msmusb_hcd *mhcd) { int ret = 0; struct msm_otg *otg; struct usb_hcd *hcd = mhcd_to_hcd(mhcd); struct ehci_hcd *ehci = hcd_to_ehci(hcd); struct msm_usb_host_platform_data *pdata = mhcd->pdata; switch (PHY_TYPE(pdata->phy_info)) { case USB_PHY_INTEGRATED: msm_hsusb_rpc_connect(); if (pdata->vbus_init) pdata->vbus_init(1); /* VBUS might be present. Turn off vbus */ if (pdata->vbus_power) pdata->vbus_power(pdata->phy_info, 0); INIT_WORK(&mhcd->otg_work, msm_hsusb_otg_work); mhcd->xceiv = otg_get_transceiver(); if (!mhcd->xceiv) return -ENODEV; otg = container_of(mhcd->xceiv, struct msm_otg, otg); hcd->regs = otg->regs; otg->start_host = msm_hsusb_start_host; ehci->start_hnp = ehci_msm_start_hnp; ret = otg_set_host(mhcd->xceiv, &hcd->self); break; case USB_PHY_SERIAL_PMIC: hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) return -EFAULT; /* get usb clocks */ mhcd->alt_core_clk = clk_get(&pdev->dev, "alt_core_clk"); if (IS_ERR(mhcd->alt_core_clk)) { iounmap(hcd->regs); return PTR_ERR(mhcd->alt_core_clk); } mhcd->iface_clk = clk_get(&pdev->dev, "iface_clk"); if (IS_ERR(mhcd->iface_clk)) { iounmap(hcd->regs); clk_put(mhcd->alt_core_clk); return PTR_ERR(mhcd->iface_clk); } mhcd->otg_ops.request = msm_hsusb_request_host; mhcd->otg_ops.handle = (void *) mhcd; ret = msm_xusb_init_phy(mhcd); if (ret < 0) { iounmap(hcd->regs); clk_put(mhcd->alt_core_clk); clk_put(mhcd->iface_clk); } break; default: pr_err("phy type is bad\n"); } return ret; } static int __devinit ehci_msm_probe(struct platform_device *pdev) { struct usb_hcd *hcd; struct resource *res; struct msm_usb_host_platform_data *pdata; int retval; struct msmusb_hcd *mhcd; hcd = usb_create_hcd(&msm_hc_driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) return -ENOMEM; hcd->irq = platform_get_irq(pdev, 0); if (hcd->irq < 0) { usb_put_hcd(hcd); return hcd->irq; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { usb_put_hcd(hcd); return -ENODEV; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); mhcd = hcd_to_mhcd(hcd); spin_lock_init(&mhcd->lock); mhcd->in_lpm = 0; mhcd->running = 0; device_init_wakeup(&pdev->dev, 1); pdata = pdev->dev.platform_data; if (PHY_TYPE(pdata->phy_info) == USB_PHY_UNDEFINED) { usb_put_hcd(hcd); return -ENODEV; } hcd->power_budget = pdata->power_budget; mhcd->pdata = pdata; INIT_WORK(&mhcd->lpm_exit_work, usb_lpm_exit_w); wake_lock_init(&mhcd->wlock, WAKE_LOCK_SUSPEND, dev_name(&pdev->dev)); pdata->ebi1_clk = clk_get(&pdev->dev, "core_clk"); if (IS_ERR(pdata->ebi1_clk)) pdata->ebi1_clk = NULL; else clk_set_rate(pdata->ebi1_clk, INT_MAX); #ifdef CONFIG_USB_HOST_NOTIFY if (pdata->host_notify) { hcd->host_notify = pdata->host_notify; hcd->ndev.name = dev_name(&pdev->dev); retval = host_notify_dev_register(&hcd->ndev); if (retval) { dev_err(&pdev->dev, "Failed to host_notify_dev_register\n"); return -ENODEV; } } #endif retval = msm_xusb_init_host(pdev, mhcd); if (retval < 0) { wake_lock_destroy(&mhcd->wlock); usb_put_hcd(hcd); clk_put(pdata->ebi1_clk); } pm_runtime_enable(&pdev->dev); return retval; } static void msm_xusb_uninit_host(struct msmusb_hcd *mhcd) { struct usb_hcd *hcd = mhcd_to_hcd(mhcd); struct msm_usb_host_platform_data *pdata = mhcd->pdata; switch (PHY_TYPE(pdata->phy_info)) { case USB_PHY_INTEGRATED: if (pdata->vbus_init) pdata->vbus_init(0); otg_set_host(mhcd->xceiv, NULL); otg_put_transceiver(mhcd->xceiv); cancel_work_sync(&mhcd->otg_work); break; case USB_PHY_SERIAL_PMIC: iounmap(hcd->regs); clk_put(mhcd->alt_core_clk); clk_put(mhcd->iface_clk); msm_fsusb_reset_phy(); msm_fsusb_rpc_deinit(); break; default: pr_err("phy type is bad\n"); } } static int __exit ehci_msm_remove(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd); struct msm_usb_host_platform_data *pdata; int retval = 0; pdata = pdev->dev.platform_data; device_init_wakeup(&pdev->dev, 0); #ifdef CONFIG_USB_HOST_NOTIFY host_notify_dev_unregister(&hcd->ndev); #endif msm_hsusb_request_host((void *)mhcd, REQUEST_STOP); msm_xusb_uninit_host(mhcd); retval = msm_xusb_rpc_close(mhcd); wake_lock_destroy(&mhcd->wlock); usb_put_hcd(hcd); clk_put(pdata->ebi1_clk); pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); return retval; } static int ehci_msm_runtime_suspend(struct device *dev) { dev_dbg(dev, "pm_runtime: suspending...\n"); return 0; } static int ehci_msm_runtime_resume(struct device *dev) { dev_dbg(dev, "pm_runtime: resuming...\n"); return 0; } static int ehci_msm_runtime_idle(struct device *dev) { dev_dbg(dev, "pm_runtime: idling...\n"); return 0; } static const struct dev_pm_ops ehci_msm_dev_pm_ops = { .runtime_suspend = ehci_msm_runtime_suspend, .runtime_resume = ehci_msm_runtime_resume, .runtime_idle = ehci_msm_runtime_idle }; static struct platform_driver ehci_msm_driver = { .probe = ehci_msm_probe, .remove = __exit_p(ehci_msm_remove), .driver = {.name = "msm_hsusb_host", .pm = &ehci_msm_dev_pm_ops, }, };
gpl-2.0
pacificIT/linux-2.6.36
drivers/usb/gadget/f_mtp.c
41
32275
/* * Gadget Function Driver for MTP * * Copyright (C) 2010 Google, Inc. * Author: Mike Lockwood <lockwood@android.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /* #define DEBUG */ /* #define VERBOSE_DEBUG */ #include <linux/module.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/delay.h> #include <linux/wait.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/types.h> #include <linux/file.h> #include <linux/device.h> #include <linux/miscdevice.h> #include <linux/usb.h> #include <linux/usb_usual.h> #include <linux/usb/ch9.h> #include <linux/usb/android_composite.h> #include <linux/usb/f_mtp.h> #define BULK_BUFFER_SIZE 16384 #define INTR_BUFFER_SIZE 28 /* String IDs */ #define INTERFACE_STRING_INDEX 0 /* values for mtp_dev.state */ #define STATE_OFFLINE 0 /* initial state, disconnected */ #define STATE_READY 1 /* ready for userspace calls */ #define STATE_BUSY 2 /* processing userspace calls */ #define STATE_CANCELED 3 /* transaction canceled by host */ #define STATE_ERROR 4 /* error from completion routine */ /* number of tx and rx requests to allocate */ #define TX_REQ_MAX 4 #define RX_REQ_MAX 2 /* ID for Microsoft MTP OS String */ #define MTP_OS_STRING_ID 0xEE /* MTP class reqeusts */ #define MTP_REQ_CANCEL 0x64 #define MTP_REQ_GET_EXT_EVENT_DATA 0x65 #define MTP_REQ_RESET 0x66 #define MTP_REQ_GET_DEVICE_STATUS 0x67 /* constants for device status */ #define MTP_RESPONSE_OK 0x2001 #define MTP_RESPONSE_DEVICE_BUSY 0x2019 static const char shortname[] = "mtp_usb"; struct mtp_dev { struct usb_function function; struct usb_composite_dev *cdev; spinlock_t lock; /* appear as MTP or PTP when enumerating */ int interface_mode; struct usb_ep *ep_in; struct usb_ep *ep_out; struct usb_ep *ep_intr; int state; /* synchronize access to our device file */ atomic_t open_excl; /* to enforce only one ioctl at a time */ atomic_t ioctl_excl; struct list_head tx_idle; wait_queue_head_t read_wq; wait_queue_head_t write_wq; struct usb_request *rx_req[RX_REQ_MAX]; struct usb_request *intr_req; int rx_done; /* true if interrupt endpoint is busy */ int intr_busy; /* for processing MTP_SEND_FILE and MTP_RECEIVE_FILE * ioctls on a work queue */ struct workqueue_struct *wq; struct work_struct send_file_work; struct work_struct receive_file_work; struct file *xfer_file; loff_t xfer_file_offset; int64_t xfer_file_length; int xfer_result; }; static struct usb_interface_descriptor mtp_interface_desc = { .bLength = USB_DT_INTERFACE_SIZE, .bDescriptorType = USB_DT_INTERFACE, .bInterfaceNumber = 0, .bNumEndpoints = 3, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC, .bInterfaceProtocol = 0, }; static struct usb_interface_descriptor ptp_interface_desc = { .bLength = USB_DT_INTERFACE_SIZE, .bDescriptorType = USB_DT_INTERFACE, .bInterfaceNumber = 0, .bNumEndpoints = 3, .bInterfaceClass = USB_CLASS_STILL_IMAGE, .bInterfaceSubClass = 1, .bInterfaceProtocol = 1, }; static struct usb_endpoint_descriptor mtp_highspeed_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(512), }; static struct usb_endpoint_descriptor mtp_highspeed_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(512), }; static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_endpoint_descriptor mtp_intr_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = __constant_cpu_to_le16(INTR_BUFFER_SIZE), .bInterval = 6, }; static struct usb_descriptor_header *fs_mtp_descs[] = { (struct usb_descriptor_header *) &mtp_interface_desc, (struct usb_descriptor_header *) &mtp_fullspeed_in_desc, (struct usb_descriptor_header *) &mtp_fullspeed_out_desc, (struct usb_descriptor_header *) &mtp_intr_desc, NULL, }; static struct usb_descriptor_header *hs_mtp_descs[] = { (struct usb_descriptor_header *) &mtp_interface_desc, (struct usb_descriptor_header *) &mtp_highspeed_in_desc, (struct usb_descriptor_header *) &mtp_highspeed_out_desc, (struct usb_descriptor_header *) &mtp_intr_desc, NULL, }; static struct usb_descriptor_header *fs_ptp_descs[] = { (struct usb_descriptor_header *) &ptp_interface_desc, (struct usb_descriptor_header *) &mtp_fullspeed_in_desc, (struct usb_descriptor_header *) &mtp_fullspeed_out_desc, (struct usb_descriptor_header *) &mtp_intr_desc, NULL, }; static struct usb_descriptor_header *hs_ptp_descs[] = { (struct usb_descriptor_header *) &ptp_interface_desc, (struct usb_descriptor_header *) &mtp_highspeed_in_desc, (struct usb_descriptor_header *) &mtp_highspeed_out_desc, (struct usb_descriptor_header *) &mtp_intr_desc, NULL, }; static struct usb_string mtp_string_defs[] = { /* Naming interface "MTP" so libmtp will recognize us */ [INTERFACE_STRING_INDEX].s = "MTP", { }, /* end of list */ }; static struct usb_gadget_strings mtp_string_table = { .language = 0x0409, /* en-US */ .strings = mtp_string_defs, }; static struct usb_gadget_strings *mtp_strings[] = { &mtp_string_table, NULL, }; /* Microsoft MTP OS String */ static u8 mtp_os_string[] = { 18, /* sizeof(mtp_os_string) */ USB_DT_STRING, /* Signature field: "MSFT100" */ 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0, /* vendor code */ 1, /* padding */ 0 }; /* Microsoft Extended Configuration Descriptor Header Section */ struct mtp_ext_config_desc_header { __le32 dwLength; __u16 bcdVersion; __le16 wIndex; __u8 bCount; __u8 reserved[7]; }; /* Microsoft Extended Configuration Descriptor Function Section */ struct mtp_ext_config_desc_function { __u8 bFirstInterfaceNumber; __u8 bInterfaceCount; __u8 compatibleID[8]; __u8 subCompatibleID[8]; __u8 reserved[6]; }; /* MTP Extended Configuration Descriptor */ struct { struct mtp_ext_config_desc_header header; struct mtp_ext_config_desc_function function; } mtp_ext_config_desc = { .header = { .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)), .bcdVersion = __constant_cpu_to_le16(0x0100), .wIndex = __constant_cpu_to_le16(4), .bCount = __constant_cpu_to_le16(1), }, .function = { .bFirstInterfaceNumber = 0, .bInterfaceCount = 1, .compatibleID = { 'M', 'T', 'P' }, }, }; struct mtp_device_status { __le16 wLength; __le16 wCode; }; /* temporary variable used between mtp_open() and mtp_gadget_bind() */ static struct mtp_dev *_mtp_dev; static inline struct mtp_dev *func_to_dev(struct usb_function *f) { return container_of(f, struct mtp_dev, function); } static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size) { struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL); if (!req) return NULL; /* now allocate buffers for the requests */ req->buf = kmalloc(buffer_size, GFP_KERNEL); if (!req->buf) { usb_ep_free_request(ep, req); return NULL; } return req; } static void mtp_request_free(struct usb_request *req, struct usb_ep *ep) { if (req) { kfree(req->buf); usb_ep_free_request(ep, req); } } static inline int _lock(atomic_t *excl) { if (atomic_inc_return(excl) == 1) { return 0; } else { atomic_dec(excl); return -1; } } static inline void _unlock(atomic_t *excl) { atomic_dec(excl); } /* add a request to the tail of a list */ static void req_put(struct mtp_dev *dev, struct list_head *head, struct usb_request *req) { unsigned long flags; spin_lock_irqsave(&dev->lock, flags); list_add_tail(&req->list, head); spin_unlock_irqrestore(&dev->lock, flags); } /* remove a request from the head of a list */ static struct usb_request *req_get(struct mtp_dev *dev, struct list_head *head) { unsigned long flags; struct usb_request *req; spin_lock_irqsave(&dev->lock, flags); if (list_empty(head)) { req = 0; } else { req = list_first_entry(head, struct usb_request, list); list_del(&req->list); } spin_unlock_irqrestore(&dev->lock, flags); return req; } static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req) { struct mtp_dev *dev = _mtp_dev; if (req->status != 0) dev->state = STATE_ERROR; req_put(dev, &dev->tx_idle, req); wake_up(&dev->write_wq); } static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req) { struct mtp_dev *dev = _mtp_dev; dev->rx_done = 1; if (req->status != 0) dev->state = STATE_ERROR; wake_up(&dev->read_wq); } static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req) { struct mtp_dev *dev = _mtp_dev; DBG(dev->cdev, "mtp_complete_intr status: %d actual: %d\n", req->status, req->actual); dev->intr_busy = 0; if (req->status != 0) dev->state = STATE_ERROR; } static int __init create_bulk_endpoints(struct mtp_dev *dev, struct usb_endpoint_descriptor *in_desc, struct usb_endpoint_descriptor *out_desc, struct usb_endpoint_descriptor *intr_desc) { struct usb_composite_dev *cdev = dev->cdev; struct usb_request *req; struct usb_ep *ep; int i; DBG(cdev, "create_bulk_endpoints dev: %p\n", dev); ep = usb_ep_autoconfig(cdev->gadget, in_desc); if (!ep) { DBG(cdev, "usb_ep_autoconfig for ep_in failed\n"); return -ENODEV; } DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name); ep->driver_data = dev; /* claim the endpoint */ dev->ep_in = ep; ep = usb_ep_autoconfig(cdev->gadget, out_desc); if (!ep) { DBG(cdev, "usb_ep_autoconfig for ep_out failed\n"); return -ENODEV; } DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name); ep->driver_data = dev; /* claim the endpoint */ dev->ep_out = ep; ep = usb_ep_autoconfig(cdev->gadget, out_desc); if (!ep) { DBG(cdev, "usb_ep_autoconfig for ep_out failed\n"); return -ENODEV; } DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name); ep->driver_data = dev; /* claim the endpoint */ dev->ep_out = ep; ep = usb_ep_autoconfig(cdev->gadget, intr_desc); if (!ep) { DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n"); return -ENODEV; } DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name); ep->driver_data = dev; /* claim the endpoint */ dev->ep_intr = ep; /* now allocate requests for our endpoints */ for (i = 0; i < TX_REQ_MAX; i++) { req = mtp_request_new(dev->ep_in, BULK_BUFFER_SIZE); if (!req) goto fail; req->complete = mtp_complete_in; req_put(dev, &dev->tx_idle, req); } for (i = 0; i < RX_REQ_MAX; i++) { req = mtp_request_new(dev->ep_out, BULK_BUFFER_SIZE); if (!req) goto fail; req->complete = mtp_complete_out; dev->rx_req[i] = req; } req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE); if (!req) goto fail; req->complete = mtp_complete_intr; dev->intr_req = req; return 0; fail: printk(KERN_ERR "mtp_bind() could not allocate requests\n"); return -1; } static ssize_t mtp_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) { struct mtp_dev *dev = fp->private_data; struct usb_composite_dev *cdev = dev->cdev; struct usb_request *req; int r = count, xfer; int ret = 0; DBG(cdev, "mtp_read(%d)\n", count); if (count > BULK_BUFFER_SIZE) return -EINVAL; /* we will block until we're online */ DBG(cdev, "mtp_read: waiting for online state\n"); ret = wait_event_interruptible(dev->read_wq, dev->state != STATE_OFFLINE); if (ret < 0) { r = ret; goto done; } spin_lock_irq(&dev->lock); if (dev->state == STATE_CANCELED) { /* report cancelation to userspace */ dev->state = STATE_READY; spin_unlock_irq(&dev->lock); return -ECANCELED; } dev->state = STATE_BUSY; spin_unlock_irq(&dev->lock); requeue_req: /* queue a request */ req = dev->rx_req[0]; req->length = count; dev->rx_done = 0; ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL); if (ret < 0) { r = -EIO; goto done; } else { DBG(cdev, "rx %p queue\n", req); } /* wait for a request to complete */ ret = wait_event_interruptible(dev->read_wq, dev->rx_done); if (ret < 0) { r = ret; usb_ep_dequeue(dev->ep_out, req); goto done; } if (dev->state == STATE_BUSY) { /* If we got a 0-len packet, throw it back and try again. */ if (req->actual == 0) goto requeue_req; DBG(cdev, "rx %p %d\n", req, req->actual); xfer = (req->actual < count) ? req->actual : count; r = xfer; if (copy_to_user(buf, req->buf, xfer)) r = -EFAULT; } else r = -EIO; done: spin_lock_irq(&dev->lock); if (dev->state == STATE_CANCELED) r = -ECANCELED; else if (dev->state != STATE_OFFLINE) dev->state = STATE_READY; spin_unlock_irq(&dev->lock); DBG(cdev, "mtp_read returning %d\n", r); return r; } static ssize_t mtp_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos) { struct mtp_dev *dev = fp->private_data; struct usb_composite_dev *cdev = dev->cdev; struct usb_request *req = 0; int r = count, xfer; int sendZLP = 0; int ret; DBG(cdev, "mtp_write(%d)\n", count); spin_lock_irq(&dev->lock); if (dev->state == STATE_CANCELED) { /* report cancelation to userspace */ dev->state = STATE_READY; spin_unlock_irq(&dev->lock); return -ECANCELED; } if (dev->state == STATE_OFFLINE) { spin_unlock_irq(&dev->lock); return -ENODEV; } dev->state = STATE_BUSY; spin_unlock_irq(&dev->lock); /* we need to send a zero length packet to signal the end of transfer * if the transfer size is aligned to a packet boundary. */ if ((count & (dev->ep_in->maxpacket - 1)) == 0) { sendZLP = 1; } while (count > 0 || sendZLP) { /* so we exit after sending ZLP */ if (count == 0) sendZLP = 0; if (dev->state != STATE_BUSY) { DBG(cdev, "mtp_write dev->error\n"); r = -EIO; break; } /* get an idle tx request to use */ req = 0; ret = wait_event_interruptible(dev->write_wq, ((req = req_get(dev, &dev->tx_idle)) || dev->state != STATE_BUSY)); if (!req) { r = ret; break; } if (count > BULK_BUFFER_SIZE) xfer = BULK_BUFFER_SIZE; else xfer = count; if (xfer && copy_from_user(req->buf, buf, xfer)) { r = -EFAULT; break; } req->length = xfer; ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL); if (ret < 0) { DBG(cdev, "mtp_write: xfer error %d\n", ret); r = -EIO; break; } buf += xfer; count -= xfer; /* zero this so we don't try to free it on error exit */ req = 0; } if (req) req_put(dev, &dev->tx_idle, req); spin_lock_irq(&dev->lock); if (dev->state == STATE_CANCELED) r = -ECANCELED; else if (dev->state != STATE_OFFLINE) dev->state = STATE_READY; spin_unlock_irq(&dev->lock); DBG(cdev, "mtp_write returning %d\n", r); return r; } /* read from a local file and write to USB */ static void send_file_work(struct work_struct *data) { struct mtp_dev *dev = container_of(data, struct mtp_dev, send_file_work); struct usb_composite_dev *cdev = dev->cdev; struct usb_request *req = 0; struct file *filp; loff_t offset; int64_t count; int xfer, ret; int r = 0; int sendZLP = 0; /* read our parameters */ smp_rmb(); filp = dev->xfer_file; offset = dev->xfer_file_offset; count = dev->xfer_file_length; DBG(cdev, "send_file_work(%lld %lld)\n", offset, count); /* we need to send a zero length packet to signal the end of transfer * if the transfer size is aligned to a packet boundary. */ if ((dev->xfer_file_length & (dev->ep_in->maxpacket - 1)) == 0) { sendZLP = 1; } while (count > 0 || sendZLP) { /* so we exit after sending ZLP */ if (count == 0) sendZLP = 0; /* get an idle tx request to use */ req = 0; ret = wait_event_interruptible(dev->write_wq, (req = req_get(dev, &dev->tx_idle)) || dev->state != STATE_BUSY); if (dev->state == STATE_CANCELED) { r = -ECANCELED; break; } if (!req) { r = ret; break; } if (count > BULK_BUFFER_SIZE) xfer = BULK_BUFFER_SIZE; else xfer = count; ret = vfs_read(filp, req->buf, xfer, &offset); if (ret < 0) { r = ret; break; } xfer = ret; req->length = xfer; ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL); if (ret < 0) { DBG(cdev, "send_file_work: xfer error %d\n", ret); dev->state = STATE_ERROR; r = -EIO; break; } count -= xfer; /* zero this so we don't try to free it on error exit */ req = 0; } if (req) req_put(dev, &dev->tx_idle, req); DBG(cdev, "send_file_work returning %d\n", r); /* write the result */ dev->xfer_result = r; smp_wmb(); } /* read from USB and write to a local file */ static void receive_file_work(struct work_struct *data) { struct mtp_dev *dev = container_of(data, struct mtp_dev, receive_file_work); struct usb_composite_dev *cdev = dev->cdev; struct usb_request *read_req = NULL, *write_req = NULL; struct file *filp; loff_t offset; int64_t count; int ret, cur_buf = 0; int r = 0; /* read our parameters */ smp_rmb(); filp = dev->xfer_file; offset = dev->xfer_file_offset; count = dev->xfer_file_length; DBG(cdev, "receive_file_work(%lld)\n", count); while (count > 0 || write_req) { if (count > 0) { /* queue a request */ read_req = dev->rx_req[cur_buf]; cur_buf = (cur_buf + 1) % RX_REQ_MAX; read_req->length = (count > BULK_BUFFER_SIZE ? BULK_BUFFER_SIZE : count); dev->rx_done = 0; ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL); if (ret < 0) { r = -EIO; dev->state = STATE_ERROR; break; } } if (write_req) { DBG(cdev, "rx %p %d\n", write_req, write_req->actual); ret = vfs_write(filp, write_req->buf, write_req->actual, &offset); DBG(cdev, "vfs_write %d\n", ret); if (ret != write_req->actual) { r = -EIO; dev->state = STATE_ERROR; break; } write_req = NULL; } if (read_req) { /* wait for our last read to complete */ ret = wait_event_interruptible(dev->read_wq, dev->rx_done || dev->state != STATE_BUSY); if (dev->state == STATE_CANCELED) { r = -ECANCELED; if (!dev->rx_done) usb_ep_dequeue(dev->ep_out, read_req); break; } /* if xfer_file_length is 0xFFFFFFFF, then we read until * we get a zero length packet */ if (count != 0xFFFFFFFF) count -= read_req->actual; if (read_req->actual < read_req->length) { /* short packet is used to signal EOF for sizes > 4 gig */ DBG(cdev, "got short packet\n"); count = 0; } write_req = read_req; read_req = NULL; } } DBG(cdev, "receive_file_work returning %d\n", r); /* write the result */ dev->xfer_result = r; smp_wmb(); } static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event) { struct usb_request *req; int ret; int length = event->length; DBG(dev->cdev, "mtp_send_event(%d)\n", event->length); if (length < 0 || length > INTR_BUFFER_SIZE) return -EINVAL; if (dev->state == STATE_OFFLINE) return -ENODEV; /* unfortunately an interrupt request might hang indefinitely if the host * is not listening on the interrupt endpoint, so instead of waiting, * we just fail if the endpoint is busy. */ if (dev->intr_busy) return -EBUSY; req = dev->intr_req; if (copy_from_user(req->buf, (void __user *)event->data, length)) return -EFAULT; req->length = length; dev->intr_busy = 1; ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL); if (ret) dev->intr_busy = 0; return ret; } static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value) { struct mtp_dev *dev = fp->private_data; struct file *filp = NULL; int ret = -EINVAL; if (_lock(&dev->ioctl_excl)) return -EBUSY; switch (code) { case MTP_SEND_FILE: case MTP_RECEIVE_FILE: { struct mtp_file_range mfr; struct work_struct *work; spin_lock_irq(&dev->lock); if (dev->state == STATE_CANCELED) { /* report cancelation to userspace */ dev->state = STATE_READY; spin_unlock_irq(&dev->lock); ret = -ECANCELED; goto out; } if (dev->state == STATE_OFFLINE) { spin_unlock_irq(&dev->lock); ret = -ENODEV; goto out; } dev->state = STATE_BUSY; spin_unlock_irq(&dev->lock); if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) { ret = -EFAULT; goto fail; } /* hold a reference to the file while we are working with it */ filp = fget(mfr.fd); if (!filp) { ret = -EBADF; goto fail; } /* write the parameters */ dev->xfer_file = filp; dev->xfer_file_offset = mfr.offset; dev->xfer_file_length = mfr.length; smp_wmb(); if (code == MTP_SEND_FILE) work = &dev->send_file_work; else work = &dev->receive_file_work; /* We do the file transfer on a work queue so it will run * in kernel context, which is necessary for vfs_read and * vfs_write to use our buffers in the kernel address space. */ queue_work(dev->wq, work); /* wait for operation to complete */ flush_workqueue(dev->wq); fput(filp); /* read the result */ smp_rmb(); ret = dev->xfer_result; break; } case MTP_SET_INTERFACE_MODE: if (value == MTP_INTERFACE_MODE_MTP || value == MTP_INTERFACE_MODE_PTP) { dev->interface_mode = value; if (value == MTP_INTERFACE_MODE_PTP) { dev->function.descriptors = fs_ptp_descs; dev->function.hs_descriptors = hs_ptp_descs; } else { dev->function.descriptors = fs_mtp_descs; dev->function.hs_descriptors = hs_mtp_descs; } ret = 0; } break; case MTP_SEND_EVENT: { struct mtp_event event; /* return here so we don't change dev->state below, * which would interfere with bulk transfer state. */ if (copy_from_user(&event, (void __user *)value, sizeof(event))) ret = -EFAULT; else ret = mtp_send_event(dev, &event); goto out; } } fail: spin_lock_irq(&dev->lock); if (dev->state == STATE_CANCELED) ret = -ECANCELED; else if (dev->state != STATE_OFFLINE) dev->state = STATE_READY; spin_unlock_irq(&dev->lock); out: _unlock(&dev->ioctl_excl); DBG(dev->cdev, "ioctl returning %d\n", ret); return ret; } static int mtp_open(struct inode *ip, struct file *fp) { printk(KERN_INFO "mtp_open\n"); if (_lock(&_mtp_dev->open_excl)) return -EBUSY; /* clear any error condition */ if (_mtp_dev->state != STATE_OFFLINE) _mtp_dev->state = STATE_READY; fp->private_data = _mtp_dev; return 0; } static int mtp_release(struct inode *ip, struct file *fp) { printk(KERN_INFO "mtp_release\n"); _unlock(&_mtp_dev->open_excl); return 0; } /* file operations for /dev/mtp_usb */ static const struct file_operations mtp_fops = { .owner = THIS_MODULE, .read = mtp_read, .write = mtp_write, .unlocked_ioctl = mtp_ioctl, .open = mtp_open, .release = mtp_release, }; static struct miscdevice mtp_device = { .minor = MISC_DYNAMIC_MINOR, .name = shortname, .fops = &mtp_fops, }; static int mtp_function_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct mtp_dev *dev = func_to_dev(f); int id; int ret; dev->cdev = cdev; DBG(cdev, "mtp_function_bind dev: %p\n", dev); /* allocate interface ID(s) */ id = usb_interface_id(c, f); if (id < 0) return id; mtp_interface_desc.bInterfaceNumber = id; /* allocate endpoints */ ret = create_bulk_endpoints(dev, &mtp_fullspeed_in_desc, &mtp_fullspeed_out_desc, &mtp_intr_desc); if (ret) return ret; /* support high speed hardware */ if (gadget_is_dualspeed(c->cdev->gadget)) { mtp_highspeed_in_desc.bEndpointAddress = mtp_fullspeed_in_desc.bEndpointAddress; mtp_highspeed_out_desc.bEndpointAddress = mtp_fullspeed_out_desc.bEndpointAddress; } DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n", gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", f->name, dev->ep_in->name, dev->ep_out->name); return 0; } static void mtp_function_unbind(struct usb_configuration *c, struct usb_function *f) { struct mtp_dev *dev = func_to_dev(f); struct usb_request *req; int i; spin_lock_irq(&dev->lock); while ((req = req_get(dev, &dev->tx_idle))) mtp_request_free(req, dev->ep_in); for (i = 0; i < RX_REQ_MAX; i++) mtp_request_free(dev->rx_req[i], dev->ep_out); mtp_request_free(dev->intr_req, dev->ep_intr); dev->state = STATE_OFFLINE; spin_unlock_irq(&dev->lock); misc_deregister(&mtp_device); kfree(_mtp_dev); _mtp_dev = NULL; } static int mtp_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { struct mtp_dev *dev = func_to_dev(f); struct usb_composite_dev *cdev = dev->cdev; int value = -EOPNOTSUPP; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); unsigned long flags; /* do nothing if we are disabled */ if (dev->function.disabled) return value; VDBG(cdev, "mtp_function_setup " "%02x.%02x v%04x i%04x l%u\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); /* Handle MTP OS string */ if (dev->interface_mode == MTP_INTERFACE_MODE_MTP && ctrl->bRequestType == (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE) && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR && (w_value >> 8) == USB_DT_STRING && (w_value & 0xFF) == MTP_OS_STRING_ID) { value = (w_length < sizeof(mtp_os_string) ? w_length : sizeof(mtp_os_string)); memcpy(cdev->req->buf, mtp_os_string, value); /* return here since composite.c will send for us */ return value; } if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) { /* Handle MTP OS descriptor */ DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n", ctrl->bRequest, w_index, w_value, w_length); if (dev->interface_mode == MTP_INTERFACE_MODE_MTP && ctrl->bRequest == 1 && (ctrl->bRequestType & USB_DIR_IN) && (w_index == 4 || w_index == 5)) { value = (w_length < sizeof(mtp_ext_config_desc) ? w_length : sizeof(mtp_ext_config_desc)); memcpy(cdev->req->buf, &mtp_ext_config_desc, value); } } if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) { DBG(cdev, "class request: %d index: %d value: %d length: %d\n", ctrl->bRequest, w_index, w_value, w_length); if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0 && w_value == 0) { DBG(cdev, "MTP_REQ_CANCEL\n"); spin_lock_irqsave(&dev->lock, flags); if (dev->state == STATE_BUSY) { dev->state = STATE_CANCELED; wake_up(&dev->read_wq); wake_up(&dev->write_wq); } spin_unlock_irqrestore(&dev->lock, flags); /* We need to queue a request to read the remaining * bytes, but we don't actually need to look at * the contents. */ value = w_length; } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS && w_index == 0 && w_value == 0) { struct mtp_device_status *status = cdev->req->buf; status->wLength = __constant_cpu_to_le16(sizeof(*status)); DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n"); spin_lock_irqsave(&dev->lock, flags); /* device status is "busy" until we report * the cancelation to userspace */ if (dev->state == STATE_CANCELED) status->wCode = __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY); else status->wCode = __cpu_to_le16(MTP_RESPONSE_OK); spin_unlock_irqrestore(&dev->lock, flags); value = sizeof(*status); } } /* respond with data transfer or status phase? */ if (value >= 0) { int rc; cdev->req->zero = value < w_length; cdev->req->length = value; rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC); if (rc < 0) ERROR(cdev, "%s setup response queue error\n", __func__); } if (value == -EOPNOTSUPP) VDBG(cdev, "unknown class-specific control req " "%02x.%02x v%04x i%04x l%u\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); return value; } static int mtp_function_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct mtp_dev *dev = func_to_dev(f); struct usb_composite_dev *cdev = f->config->cdev; int ret; DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt); ret = usb_ep_enable(dev->ep_in, ep_choose(cdev->gadget, &mtp_highspeed_in_desc, &mtp_fullspeed_in_desc)); if (ret) return ret; ret = usb_ep_enable(dev->ep_out, ep_choose(cdev->gadget, &mtp_highspeed_out_desc, &mtp_fullspeed_out_desc)); if (ret) { usb_ep_disable(dev->ep_in); return ret; } ret = usb_ep_enable(dev->ep_intr, &mtp_intr_desc); if (ret) { usb_ep_disable(dev->ep_out); usb_ep_disable(dev->ep_in); return ret; } dev->state = STATE_READY; /* readers may be blocked waiting for us to go online */ wake_up(&dev->read_wq); return 0; } static void mtp_function_disable(struct usb_function *f) { struct mtp_dev *dev = func_to_dev(f); struct usb_composite_dev *cdev = dev->cdev; DBG(cdev, "mtp_function_disable\n"); dev->state = STATE_OFFLINE; usb_ep_disable(dev->ep_in); usb_ep_disable(dev->ep_out); usb_ep_disable(dev->ep_intr); /* readers may be blocked waiting for us to go online */ wake_up(&dev->read_wq); VDBG(cdev, "%s disabled\n", dev->function.name); } static int mtp_bind_config(struct usb_configuration *c) { struct mtp_dev *dev; int ret = 0; printk(KERN_INFO "mtp_bind_config\n"); dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; /* allocate a string ID for our interface */ if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) { ret = usb_string_id(c->cdev); if (ret < 0) return ret; mtp_string_defs[INTERFACE_STRING_INDEX].id = ret; mtp_interface_desc.iInterface = ret; } spin_lock_init(&dev->lock); init_waitqueue_head(&dev->read_wq); init_waitqueue_head(&dev->write_wq); atomic_set(&dev->open_excl, 0); atomic_set(&dev->ioctl_excl, 0); INIT_LIST_HEAD(&dev->tx_idle); dev->wq = create_singlethread_workqueue("f_mtp"); if (!dev->wq) goto err1; INIT_WORK(&dev->send_file_work, send_file_work); INIT_WORK(&dev->receive_file_work, receive_file_work); dev->cdev = c->cdev; dev->function.name = "mtp"; dev->function.strings = mtp_strings, dev->function.descriptors = fs_mtp_descs; dev->function.hs_descriptors = hs_mtp_descs; dev->function.bind = mtp_function_bind; dev->function.unbind = mtp_function_unbind; dev->function.setup = mtp_function_setup; dev->function.set_alt = mtp_function_set_alt; dev->function.disable = mtp_function_disable; /* MTP mode by default */ dev->interface_mode = MTP_INTERFACE_MODE_MTP; /* _mtp_dev must be set before calling usb_gadget_register_driver */ _mtp_dev = dev; ret = misc_register(&mtp_device); if (ret) goto err1; ret = usb_add_function(c, &dev->function); if (ret) goto err2; return 0; err2: misc_deregister(&mtp_device); err1: if (dev->wq) destroy_workqueue(dev->wq); kfree(dev); printk(KERN_ERR "mtp gadget driver failed to initialize\n"); return ret; } static struct android_usb_function mtp_function = { .name = "mtp", .bind_config = mtp_bind_config, }; static int __init init(void) { printk(KERN_INFO "f_mtp init\n"); android_register_function(&mtp_function); return 0; } module_init(init);
gpl-2.0
NicholasPace/android_kernel_asus_moorefield-stock
drivers/power/max17042_battery.c
41
70936
/* * max17042_battery.c - Fuel gauge driver for Maxim 17042 / 8966 / 8997 * Note that Maxim 8966 and 8997 are mfd and this is its subdevice. * * Copyright (C) 2011 Samsung Electronics * MyungJoo Ham <myungjoo.ham@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * This driver is based on max17040_battery.c */ #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/pm_runtime.h> #include <linux/power_supply.h> #include <linux/power/max17042_battery.h> #include <linux/reboot.h> #include <linux/delay.h> #include <linux/notifier.h> #include <linux/miscdevice.h> #include <linux/atomic.h> #include <linux/acpi.h> #include <linux/acpi_gpio.h> /* Status register bits */ #define STATUS_MASK 0xFF0A #define STATUS_POR_BIT (1 << 1) #define STATUS_BST_BIT (1 << 3) #define STATUS_VMN_BIT (1 << 8) #define STATUS_TMN_BIT (1 << 9) #define STATUS_SMN_BIT (1 << 10) #define STATUS_BI_BIT (1 << 11) #define STATUS_VMX_BIT (1 << 12) #define STATUS_TMX_BIT (1 << 13) #define STATUS_SMX_BIT (1 << 14) #define STATUS_BR_BIT (1 << 15) #define MAX17042_IC_VERSION 0x0092 #define MAX17050_IC_VERSION 0x00AC /* Vmax disabled, Vmin disabled */ #define VOLT_DEF_MAX_MIN_THRLD 0xFF00 /* Vmax disabled, Vmin set to 3300mV */ #define VOLT_MIN_THRLD_ENBL 0xFFA5 /* Tmax disabled, Tmin disabled */ #define TEMP_DEF_MAX_MIN_THRLD 0x7F80 /* SoCmax disabled, SoCmin can be set to 15%, 10%, 5% and 1%. * INT will trigger when the thresholds are voilated. */ #define SOC_DEF_MAX_MIN1_THRLD 0xFF0F #define SOC_DEF_MAX_MIN2_THRLD 0xFF0A #define SOC_DEF_MAX_MIN3_THRLD 0xFF05 #define SOC_DEF_MAX_MIN4_THRLD 0xFF01 /* SOC threshold for 1% interrupt */ #define SOC_INTR_S0_THR 1 #define MISCCFG_CONFIG_REPSOC 0x0000 #define MISCCFG_CONFIG_VFSOC 0x0003 /* low battery notification warning level */ #define SOC_WARNING_LEVEL1 15 #define SOC_WARNING_LEVEL2 10 #define SOC_WARNING_LEVEL3 5 #define SOC_SHUTDOWN_LEVEL 1 #define CONFIG_BER_BIT_ENBL (1 << 0) #define CONFIG_BEI_BIT_ENBL (1 << 1) #define CONFIG_ALRT_BIT_ENBL (1 << 2) #define CONFIG_VSTICKY_BIT_SET (1 << 12) #define CONFIG_TSTICKY_BIT_SET (1 << 13) #define CONFIG_SSTICKY_BIT_SET (1 << 14) #define CONFIG_ALP_BIT_ENBL (1 << 11) #define CONFIG_TEX_BIT_ENBL (1 << 8) #define VFSOC0_LOCK 0x0000 #define VFSOC0_UNLOCK 0x0080 #define FG_MODEL_UNLOCK1 0X0059 #define FG_MODEL_UNLOCK2 0X00C4 #define FG_MODEL_LOCK1 0X0000 #define FG_MODEL_LOCK2 0X0000 #define dQ_ACC_DIV 0x4 #define dP_ACC_100 0x1900 #define dP_ACC_200 0x3200 #define NTC_47K_TGAIN 0xE4E4 #define NTC_47K_TOFF 0x2F1D #define BATT_CHRG_FULL_DES 1550000 #define MAX17042_VOLT_CONV_FCTR 625 #define MAX17042_CURR_CONV_FCTR 156 #define MAX17042_CHRG_CONV_FCTR 500 #define MAX17042_TEMP_SIGN_MASK 0x8000 #define MAX17042_MAX_MEM (0xFF + 1) #define MAX17042_MODEL_MUL_FACTOR(a, b) ((a * 100) / b) #define MAX17042_MODEL_DIV_FACTOR(a, b) ((a * b) / 100) #define CONSTANT_TEMP_IN_POWER_SUPPLY 350 #define POWER_SUPPLY_VOLT_MIN_THRESHOLD 3500000 #define BATTERY_VOLT_MIN_THRESHOLD 3400000 #define CYCLES_ROLLOVER_CUTOFF 0x00FF #define MAX17042_DEF_RO_LRNCFG 0x0076 #define MAX17042_CGAIN_DISABLE 0x0000 #define MAX17042_EN_VOLT_FG 0x0007 #define MAX17042_CFG_INTR_SOCVF 0x0003 /* Vempty value set to 2500mV */ #define MAX17042_DEF_VEMPTY_VAL 0x7D5A #define MAX17042_SIGN_INDICATOR 0x8000 #define SHUTDOWN_DEF_FG_MASK_BIT (1 << 0) #define SHUTDOWN_OCV_MASK_BIT (1 << 1) #define SHUTDOWN_LOWBATT_MASK_BIT (1 << 2) #define BYTE_VALUE 1 #define WORD_VALUE 0 /* Time interval to write temperature values from host, if needed (in milliseconds) */ #define TEMP_WRITE_INTERVAL 120000 enum max17042_register { MAX17042_STATUS = 0x00, MAX17042_VALRT_Th = 0x01, MAX17042_TALRT_Th = 0x02, MAX17042_SALRT_Th = 0x03, MAX17042_AtRate = 0x04, MAX17042_RepCap = 0x05, MAX17042_RepSOC = 0x06, MAX17042_Age = 0x07, MAX17042_TEMP = 0x08, MAX17042_VCELL = 0x09, MAX17042_Current = 0x0A, MAX17042_AvgCurrent = 0x0B, MAX17042_Qresidual = 0x0C, MAX17042_SOC = 0x0D, MAX17042_AvSOC = 0x0E, MAX17042_RemCap = 0x0F, MAX17042_FullCAP = 0x10, MAX17042_TTE = 0x11, MAX17042_V_empty = 0x12, MAX17042_RSLOW = 0x14, MAX17042_AvgTA = 0x16, MAX17042_Cycles = 0x17, MAX17042_DesignCap = 0x18, MAX17042_AvgVCELL = 0x19, MAX17042_MinMaxTemp = 0x1A, MAX17042_MinMaxVolt = 0x1B, MAX17042_MinMaxCurr = 0x1C, MAX17042_CONFIG = 0x1D, MAX17042_ICHGTerm = 0x1E, MAX17042_AvCap = 0x1F, MAX17042_ManName = 0x20, MAX17042_DevName = 0x21, MAX17042_DevChem = 0x22, MAX17042_FullCAPNom = 0x23, MAX17042_TempNom = 0x24, MAX17042_TempCold = 0x25, MAX17042_TempHot = 0x26, MAX17042_AIN = 0x27, MAX17042_LearnCFG = 0x28, MAX17042_SHFTCFG = 0x29, MAX17042_RelaxCFG = 0x2A, MAX17042_MiscCFG = 0x2B, MAX17042_TGAIN = 0x2C, MAx17042_TOFF = 0x2D, MAX17042_CGAIN = 0x2E, MAX17042_COFF = 0x2F, MAX17042_SOCempty = 0x33, MAX17042_T_empty = 0x34, MAX17042_FullCAP0 = 0x35, MAX17042_LAvg_empty = 0x36, MAX17042_FCTC = 0x37, MAX17042_RCOMP0 = 0x38, MAX17042_TempCo = 0x39, MAX17042_ETC = 0x3A, MAX17042_K_empty0 = 0x3B, MAX17042_TaskPeriod = 0x3C, MAX17042_FSTAT = 0x3D, MAX17042_SHDNTIMER = 0x3F, MAX17042_dQacc = 0x45, MAX17042_dPacc = 0x46, MAX17042_VFSOC0 = 0x48, MAX17042_VFRemCap = 0x4A, MAX17042_QH = 0x4D, MAX17042_QL = 0x4E, MAX17042_VFSOC0Enable = 0x60, MAX17042_MLOCKReg1 = 0x62, MAX17042_MLOCKReg2 = 0x63, MAX17042_MODELChrTbl = 0x80, MAX17042_OCV = 0xEE, MAX17042_OCVInternal = 0xFB, MAX17042_VFSOC = 0xFF, }; /* Registers specific to max17047/50 */ enum max17050_register { MAX17050_QRTbl00 = 0x12, MAX17050_FullSOCThr = 0x13, MAX17050_QRTbl10 = 0x22, MAX17050_QRTbl20 = 0x32, MAX17050_V_empty = 0x3A, MAX17050_QRTbl30 = 0x42, }; #define DRV_NAME "max170xx_battery" enum max170xx_chip_type {MAX17042, MAX17050}; /* No of times we should retry on -EAGAIN error */ #define NR_RETRY_CNT 3 /* No of times we should process interrupt reasons @irq handler */ /* Probably all values >1 are ok, Normally It just goes once thought * all bits and everything is handled. Also chips seems to limit * interrupts to ~3/s, so we have ~300ms to process, until we will * miss interrupt. What ever value it's, it doesn't have any * performance impact. */ #define NR_RETRY_INT 3 /* No of times we should reset I2C lines */ #define NR_I2C_RESET_CNT 8 #define VBATT_MAX 4200000 /* 4200mV */ #define VBATT_MIN 3400000 /* 3400mV */ #define VBATT_MIN_OFFSET 100 /* 100mV from VMMIN */ #define VBATT_MAX_OFFSET 50 /* 50mV from VMAX */ #define VALERT_VOLT_OFFSET 20 /* each bit corresponds to 20mV */ /* default fuel gauge cell data for debug purpose only */ static uint16_t cell_char_tbl[] = { /* Data to be written from 0x80h */ 0xA250, 0xB720, 0xB800, 0xB880, 0xB920, 0xBA00, 0xBA60, 0xBBF0, 0xBCF0, 0xBE50, 0xC060, 0xC2D0, 0xC520, 0xC750, 0xCA00, 0xD090, /* Data to be written from 0x90h */ 0x0120, 0x1C80, 0x0470, 0x0440, 0x0100, 0x5500, 0x0960, 0x2410, 0x2250, 0x15F0, 0x0BD0, 0x0D00, 0x0B00, 0x0BB0, 0x08A0, 0x08A0, /* Data to be written from 0xA0h */ 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, }; struct max17042_chip { struct i2c_client *client; enum max170xx_chip_type chip_type; struct power_supply battery; struct max17042_platform_data *pdata; struct mutex batt_lock; struct mutex init_lock; int present; int status; int health; int technology; int charge_full_des; int ext_set_cap; struct work_struct init_worker; struct work_struct evt_worker; struct delayed_work temp_worker; bool plat_rebooting; /* * user space can disable default shutdown * methods set by platform. */ int disable_shdwn_methods; /* * user space can set this variable to report constant * batery temperature for conformence testing. */ bool enable_fake_temp; int extra_resv_cap; int voltage_max; int model_algo_factor; }; /* Sysfs entry for disable shutdown methods from user space */ static ssize_t override_shutdown_methods(struct device *device, struct device_attribute *attr, const char *buf, size_t count); static ssize_t get_shutdown_methods(struct device *device, struct device_attribute *attr, char *buf); static DEVICE_ATTR(disable_shutdown_methods, S_IRUGO | S_IWUSR, get_shutdown_methods, override_shutdown_methods); /* Sysfs entry to enter shutdown voltage from user space */ static int shutdown_volt; static ssize_t set_shutdown_voltage(struct device *device, struct device_attribute *attr, const char *buf, size_t count); static ssize_t get_shutdown_voltage_set_by_user(struct device *device, struct device_attribute *attr, char *buf); static DEVICE_ATTR(shutdown_voltage, S_IRUGO | S_IWUSR, get_shutdown_voltage_set_by_user, set_shutdown_voltage); /* * Sysfs entry to report fake battery temperature. This * interface is needed to support conformence testing */ static ssize_t set_fake_temp_enable(struct device *device, struct device_attribute *attr, const char *buf, size_t count); static ssize_t get_fake_temp_enable(struct device *device, struct device_attribute *attr, char *buf); static DEVICE_ATTR(enable_fake_temp, S_IRUGO | S_IWUSR, get_fake_temp_enable, set_fake_temp_enable); #ifdef CONFIG_DEBUG_FS static struct dentry *max17042_dbgfs_root; static char max17042_dbg_regs[MAX17042_MAX_MEM][4]; #endif static int max17042_reboot_callback(struct notifier_block *nfb, unsigned long event, void *data); static struct notifier_block max17042_reboot_notifier_block = { .notifier_call = max17042_reboot_callback, .priority = 0, }; static bool is_battery_online(struct max17042_chip *chip); static void configure_interrupts(struct max17042_chip *chip); /* Set SOC threshold in S3 state */ static void set_soc_intr_thresholds_s3(struct max17042_chip *chip); /* Set SOC threshold to offset percentage in S0 state */ static void set_soc_intr_thresholds_s0(struct max17042_chip *chip, int offset); static void save_runtime_params(struct max17042_chip *chip); static void set_chip_config(struct max17042_chip *chip); static u16 fg_vfSoc; static bool fake_batt_full; static struct max17042_config_data *fg_conf_data; static struct i2c_client *max17042_client; atomic_t fopen_count; static void update_runtime_params(struct max17042_chip *chip); static int read_batt_pack_temp(struct max17042_chip *chip, int *temp); /* Voltage-Capacity lookup function to get * capacity value against a given voltage */ static unsigned int voltage_capacity_lookup(unsigned int val) { unsigned int max = VBATT_MAX / 1000; unsigned int min = VBATT_MIN / 1000; unsigned int capacity; unsigned int total_diff; unsigned int val_diff; if (val > max) return 100; if (val < min) return 0; total_diff = max - min; val_diff = max - val; capacity = (total_diff - val_diff) * 100 / total_diff; return capacity; } static int max17042_property_is_privileged_read(struct power_supply *psy, enum power_supply_property psp) { switch (psp) { case POWER_SUPPLY_PROP_MODEL_NAME: case POWER_SUPPLY_PROP_SERIAL_NUMBER: return 1; default: break; } return 0; } static int dev_file_open(struct inode *i, struct file *f) { if (atomic_read(&fopen_count)) return -EBUSY; atomic_inc(&fopen_count); return 0; } static int dev_file_close(struct inode *i, struct file *f) { atomic_dec(&fopen_count); return 0; } static ssize_t dev_file_read(struct file *f, char __user *buf, size_t len, loff_t *off) { struct max17042_chip *chip = i2c_get_clientdata(max17042_client); int ret; if (!chip->pdata->is_init_done) { dev_err(&max17042_client->dev, "MAX17042 is not initialized.\n"); return -ECANCELED; } update_runtime_params(chip); if (sizeof(*fg_conf_data) > len) return -EINVAL; ret = copy_to_user(buf, fg_conf_data, sizeof(*fg_conf_data)); if (!ret) return sizeof(*fg_conf_data); return -EINVAL; } static ssize_t dev_file_write(struct file *f, const char __user *buf, size_t len, loff_t *off) { struct max17042_chip *chip = i2c_get_clientdata(max17042_client); if (chip->pdata->is_init_done) { dev_err(&max17042_client->dev, "Already initialized.So ignoring new set of data\n"); return -ECANCELED; } if (len > sizeof(*fg_conf_data)) return -EINVAL; if (copy_from_user(fg_conf_data, buf, len)) return -EINVAL; set_chip_config(chip); if (chip->pdata->is_init_done) { dev_info(&max17042_client->dev, "MAX17042 initialized successfully\n"); fg_conf_data->config_init = 0x1; } /* Return no. of bytes written */ return len; } static const struct file_operations helper_fops = { .owner = THIS_MODULE, .open = &dev_file_open, .release = &dev_file_close, .read = &dev_file_read, .write = &dev_file_write, }; static struct miscdevice fg_helper = { .minor = MISC_DYNAMIC_MINOR, .name = "max170xx", .fops = &helper_fops, }; static enum power_supply_property max17042_battery_props[] = { POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_VOLTAGE_AVG, POWER_SUPPLY_PROP_VOLTAGE_OCV, POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_CURRENT_AVG, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_TEMP_ALERT_MIN, POWER_SUPPLY_PROP_TEMP_ALERT_MAX, POWER_SUPPLY_PROP_CHARGE_NOW, POWER_SUPPLY_PROP_CHARGE_FULL, POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, POWER_SUPPLY_PROP_CHARGE_COUNTER, POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_SERIAL_NUMBER, }; static int max17042_write_reg(struct i2c_client *client, u8 reg, u16 value) { int ret, i; struct max17042_chip *chip = i2c_get_clientdata(client); /* if the shutdown or reboot sequence started * then block the access to maxim registers as chip * cannot be recovered from broken i2c transactions */ if (chip->plat_rebooting) { dev_warn(&client->dev, "rebooting is in progress\n"); return -EINVAL; } for (i = 0; i < NR_RETRY_CNT; i++) { ret = i2c_smbus_write_word_data(client, reg, value); if (ret == -EAGAIN || ret == -ETIMEDOUT) continue; else break; } if (ret < 0) dev_err(&client->dev, "I2C SMbus Write error:%d\n", ret); return ret; } static int max17042_read_reg(struct i2c_client *client, u8 reg) { int ret, i; struct max17042_chip *chip = i2c_get_clientdata(client); /* if the shutdown or reboot sequence started * then block the access to maxim registers as chip * cannot be recovered from broken i2c transactions */ if (chip->plat_rebooting) { dev_warn(&client->dev, "rebooting is in progress\n"); return -EINVAL; } for (i = 0; i < NR_RETRY_CNT; i++) { ret = i2c_smbus_read_word_data(client, reg); if (ret == -EAGAIN || ret == -ETIMEDOUT) continue; else break; } if (ret < 0) dev_err(&client->dev, "I2C SMbus Read error:%d\n", ret); return ret; } /* * max17042 chip has few registers which could get modified by the * chip as well during its fuel gauge learning process. So we need * to do a write verify on those registers and if the write fails * then we have to retry. */ static int max17042_write_verify_reg(struct i2c_client *client, u8 reg, u16 value) { int ret, i; for (i = 0; i < NR_RETRY_CNT; i++) { /* Write the value to register */ ret = max17042_write_reg(client, reg, value); if (ret < 0) continue; /* Read the value from register */ ret = max17042_read_reg(client, reg); if (ret < 0) continue; /* compare the both the values */ if (value != ret) dev_err(&client->dev, "write verify failed on Register:0x%x\n", reg); else break; } return ret; } static int max17042_reg_read_modify(struct i2c_client *client, u8 reg, u16 val, int bit_set) { int ret; ret = max17042_read_reg(client, reg); if (ret < 0) return ret; if (bit_set) ret |= val; else ret &= (~val); ret = max17042_write_reg(client, reg, ret); return ret; } static irqreturn_t max17042_intr_handler(int id, void *dev) { return IRQ_WAKE_THREAD; } static irqreturn_t max17042_thread_handler(int id, void *dev) { struct max17042_chip *chip = dev; struct device *device = &chip->client->dev; int stat, temp, val, count = 0; u16 processed, ignored, config; pm_runtime_get_sync(device); /* read current configuration */ val = max17042_read_reg(chip->client, MAX17042_CONFIG); if (val < 0) config = fg_conf_data->cfg; else config = val; stat = max17042_read_reg(chip->client, MAX17042_STATUS); do { dev_dbg(device, "%s: Status-val: 0x%x\n", __func__, stat); if (stat < 0) { dev_err(device, "max17042-INTR: status read failed:%d\n", stat); pm_runtime_put_sync(device); return IRQ_HANDLED; } processed = 0; ignored = 0; if ((stat & STATUS_VMN_BIT) || (stat & STATUS_VMX_BIT)) { dev_info(device, "VOLT threshold INTR\n"); /* nothing yet */ if (stat & STATUS_VMN_BIT) { if (config & CONFIG_VSTICKY_BIT_SET) processed |= STATUS_VMN_BIT; else ignored |= STATUS_VMN_BIT; } if (stat & STATUS_VMX_BIT) { if (config & CONFIG_VSTICKY_BIT_SET) processed |= STATUS_VMX_BIT; else ignored |= STATUS_VMX_BIT; } } if ((stat & STATUS_SMN_BIT) || (stat & STATUS_SMX_BIT)) { dev_info(device, "SOC threshold INTR\n"); /* Actual processing is done in evt_worker */ /* so we might get interrupt again or miss */ if (stat & STATUS_SMN_BIT) { if (config & CONFIG_SSTICKY_BIT_SET) processed |= STATUS_SMN_BIT; else ignored |= STATUS_SMN_BIT; } if (stat & STATUS_SMX_BIT) { if (config & CONFIG_SSTICKY_BIT_SET) processed |= STATUS_SMX_BIT; else ignored |= STATUS_SMX_BIT; } } if (stat & STATUS_BR_BIT) { dev_info(device, "Battery removed INTR\n"); if ((config & CONFIG_BER_BIT_ENBL) && (stat & STATUS_BST_BIT)) { dev_warn(device, "battery unplugged\n"); mutex_lock(&chip->batt_lock); chip->present = 0; mutex_unlock(&chip->batt_lock); kernel_power_off(); } processed |= STATUS_BR_BIT; } if ((stat & STATUS_TMN_BIT) || (stat & STATUS_TMX_BIT)) { val = read_batt_pack_temp(chip, &temp); if (val) { dev_warn(device, "Can't read temp: %d\n", val); } else { val = max17042_read_reg(chip->client, MAX17042_TALRT_Th); dev_info(device, "Thermal threshold INTR: %d (%d, %d)\n", temp, (int8_t)(val & 0xff), (int8_t)(val >> 8)); } if (stat & STATUS_TMN_BIT) { if (config & CONFIG_TSTICKY_BIT_SET) processed |= STATUS_TMN_BIT; else ignored |= STATUS_TMN_BIT; } if (stat & STATUS_TMX_BIT) { if (config & CONFIG_TSTICKY_BIT_SET) processed |= STATUS_TMX_BIT; else ignored |= STATUS_TMX_BIT; } } if (stat & STATUS_POR_BIT) { dev_info(device, "Power On Reset event\n"); ignored |= STATUS_POR_BIT; } if (stat & STATUS_BST_BIT) ignored |= STATUS_BST_BIT; if (stat & STATUS_BI_BIT) { dev_info(device, "Battery Insert INTR\n"); /* nothing yet */ processed |= STATUS_BI_BIT; } /* clear int */ max17042_reg_read_modify(chip->client, MAX17042_STATUS, processed, 0); stat = max17042_read_reg(chip->client, MAX17042_STATUS); } while ((stat & STATUS_MASK & ~ignored) && (count++ < NR_RETRY_INT)); /* update battery status and health */ schedule_work(&chip->evt_worker); pm_runtime_put_sync(device); if (count >= NR_RETRY_INT) { dev_err(device, "%s: can't process all IRQ reasons: 0x%x\n", __func__, stat); /* desperate */ max17042_write_reg(max17042_client, MAX17042_STATUS, 0x0000); } return IRQ_HANDLED; } static short adjust_sign_value(int value, int is_byte) { short result, temp = (short)value; if (temp & MAX17042_SIGN_INDICATOR) { if (is_byte) { result = (~temp) >> 8; result &= 0xff; } else { result = ~temp; } result++; result *= -1; } else { if (is_byte) result = temp >> 8; else result = temp; } return result; } static int read_batt_pack_temp(struct max17042_chip *chip, int *temp) { int ret; u16 val; /* Read battery pack temperature */ if (chip->pdata->battery_pack_temp) { ret = chip->pdata->battery_pack_temp(temp); if (ret < 0) goto temp_read_err; /* Convert the temperature to 2's complement form. * Most significant byte contains the decimal * equivalent of the data */ if (fg_conf_data->cfg & CONFIG_TEX_BIT_ENBL) { if (*temp < 0) { val = (*temp + 0xff + 1); val <<= 8; } else { val = *temp; val <<= 8; } ret = max17042_write_reg(chip->client, MAX17042_TEMP, val); if (ret < 0) dev_err(&chip->client->dev, "Temp write to maxim failed:%d", ret); } } else { ret = max17042_read_reg(chip->client, MAX17042_TEMP); if (ret < 0) goto temp_read_err; /* MAX17042_TEMP register gives the signed * value and we are ignoring the lower byte * which represents the decimal point */ *temp = adjust_sign_value(ret, BYTE_VALUE); } return 0; temp_read_err: dev_err(&chip->client->dev, "BP Temp read error:%d", ret); return ret; } static int max17042_set_property(struct power_supply *psy, enum power_supply_property psp, const union power_supply_propval *val) { struct max17042_chip *chip = container_of(psy, struct max17042_chip, battery); int ret = 0; int8_t temp; mutex_lock(&chip->batt_lock); switch (psp) { case POWER_SUPPLY_PROP_STATUS: chip->status = val->intval; break; case POWER_SUPPLY_PROP_CAPACITY: if ((val->intval >= 0) && (val->intval <= 100)) chip->ext_set_cap = val->intval; break; case POWER_SUPPLY_PROP_TEMP_ALERT_MIN: ret = max17042_read_reg(chip->client, MAX17042_TALRT_Th); if (ret < 0) break; temp = val->intval / 10; /* 0.1C prop to 1C reg */ /* Force that min is under max */ if (temp >= (int8_t)(ret >> 8)) temp = (int8_t)(ret >> 8) - 1; ret = (ret & 0xff00) + (uint8_t)temp; ret = max17042_write_reg(chip->client, MAX17042_TALRT_Th, ret); break; case POWER_SUPPLY_PROP_TEMP_ALERT_MAX: ret = max17042_read_reg(chip->client, MAX17042_TALRT_Th); if (ret < 0) break; temp = val->intval / 10; /* 0.1C prop to 1C reg */ /* Force that max is over min */ if (temp <= (int8_t)(ret & 0xff)) temp = (int8_t)(ret & 0xff) + 1; ret = (temp << 8) + (ret & 0xff); ret = max17042_write_reg(chip->client, MAX17042_TALRT_Th, ret); break; default: ret = -EINVAL; break; } mutex_unlock(&chip->batt_lock); return ret; } static int max17042_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct max17042_chip *chip = container_of(psy, struct max17042_chip, battery); short int cur; int volt_ocv, ret, batt_temp, batt_vmin; mutex_lock(&chip->batt_lock); switch (psp) { case POWER_SUPPLY_PROP_STATUS: /* * status is being read from external * module so check for error case before * assigning to intval. */ if (chip->status < 0) { ret = chip->status; goto ps_prop_read_err; } else { val->intval = chip->status; } break; case POWER_SUPPLY_PROP_HEALTH: /* * health is being read from external * module so check for error case before * assigning to intval. */ if (chip->health < 0) { ret = chip->health; goto ps_prop_read_err; } else { val->intval = chip->health; } break; case POWER_SUPPLY_PROP_PRESENT: val->intval = chip->present; break; case POWER_SUPPLY_PROP_TECHNOLOGY: val->intval = chip->technology; break; case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: val->intval = chip->charge_full_des; break; case POWER_SUPPLY_PROP_CHARGE_NOW: ret = max17042_read_reg(chip->client, MAX17042_RepCap); if (ret < 0) goto ps_prop_read_err; val->intval = ret * MAX17042_CHRG_CONV_FCTR; break; case POWER_SUPPLY_PROP_CHARGE_FULL: ret = max17042_read_reg(chip->client, MAX17042_FullCAP); if (ret < 0) goto ps_prop_read_err; val->intval = ret * MAX17042_CHRG_CONV_FCTR; break; case POWER_SUPPLY_PROP_CHARGE_COUNTER: ret = max17042_read_reg(chip->client, MAX17042_QH); if (ret < 0) goto ps_prop_read_err; val->intval = ret * MAX17042_CHRG_CONV_FCTR; break; case POWER_SUPPLY_PROP_CURRENT_NOW: ret = max17042_read_reg(chip->client, MAX17042_Current); if (ret < 0) goto ps_prop_read_err; cur = adjust_sign_value(ret, WORD_VALUE); if (fg_conf_data->rsense) val->intval = (cur * MAX17042_CURR_CONV_FCTR) / fg_conf_data->rsense; else val->intval = cur * MAX17042_CURR_CONV_FCTR; break; case POWER_SUPPLY_PROP_CURRENT_AVG: ret = max17042_read_reg(chip->client, MAX17042_AvgCurrent); if (ret < 0) goto ps_prop_read_err; cur = adjust_sign_value(ret, WORD_VALUE); if (fg_conf_data->rsense) val->intval = (cur * MAX17042_CURR_CONV_FCTR) / fg_conf_data->rsense; else val->intval = cur * MAX17042_CURR_CONV_FCTR; break; case POWER_SUPPLY_PROP_TEMP: if (!chip->pdata->enable_current_sense || chip->enable_fake_temp) { val->intval = CONSTANT_TEMP_IN_POWER_SUPPLY; break; } ret = read_batt_pack_temp(chip, &batt_temp); if (ret < 0) goto ps_prop_read_err; /* * Temperature is measured in units of degrees celcius, the * power_supply class measures temperature in tenths of degrees * celsius. */ val->intval = batt_temp * 10; break; case POWER_SUPPLY_PROP_TEMP_ALERT_MIN: ret = max17042_read_reg(chip->client, MAX17042_TALRT_Th); if (ret < 0) goto ps_prop_read_err; val->intval = ((int8_t)(ret & 0xff)) * 10; /* 0.1C */ break; case POWER_SUPPLY_PROP_TEMP_ALERT_MAX: ret = max17042_read_reg(chip->client, MAX17042_TALRT_Th); if (ret < 0) goto ps_prop_read_err; val->intval = ((int8_t)(ret >> 8)) * 10; /* 0.1C */ break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: ret = max17042_read_reg(chip->client, MAX17042_VCELL); if (ret < 0) goto ps_prop_read_err; val->intval = (ret >> 3) * MAX17042_VOLT_CONV_FCTR; break; case POWER_SUPPLY_PROP_VOLTAGE_AVG: ret = max17042_read_reg(chip->client, MAX17042_AvgVCELL); if (ret < 0) goto ps_prop_read_err; val->intval = (ret >> 3) * MAX17042_VOLT_CONV_FCTR; break; case POWER_SUPPLY_PROP_VOLTAGE_OCV: ret = max17042_read_reg(chip->client, MAX17042_OCVInternal); if (ret < 0) goto ps_prop_read_err; val->intval = (ret >> 3) * MAX17042_VOLT_CONV_FCTR; break; case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: if (chip->chip_type == MAX17042) ret = max17042_read_reg(chip->client, MAX17042_V_empty); else ret = max17042_read_reg(chip->client, MAX17050_V_empty); if (ret < 0) goto ps_prop_read_err; val->intval = (ret >> 7) * 10000; /* Units of LSB = 10mV */ break; case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: val->intval = chip->voltage_max; break; case POWER_SUPPLY_PROP_CAPACITY: /* * Check whether the capacity is set externally or not. If the * capacity value is set externally, use same as the SOC value * for the battery level usage. */ if ((chip->ext_set_cap) >= 0 && (chip->ext_set_cap <= 100)) { val->intval = chip->ext_set_cap; break; } /* * WA added to support power supply voltage * variations b/w supply and FG readings. */ if (fake_batt_full) { val->intval = 100; break; } /* Voltage Based shutdown method to avoid modem crash */ if (chip->pdata->is_volt_shutdown) { ret = max17042_read_reg(chip->client, MAX17042_OCVInternal); if (ret < 0) goto ps_prop_read_err; volt_ocv = (ret >> 3) * MAX17042_VOLT_CONV_FCTR; /* Get the minimum voltage thereshold */ if (shutdown_volt) batt_vmin = shutdown_volt; else if (chip->pdata->get_vmin_threshold) batt_vmin = chip->pdata->get_vmin_threshold(); else batt_vmin = BATTERY_VOLT_MIN_THRESHOLD; if (volt_ocv <= batt_vmin) { /* if user disables OCV shutdown method * report 1% capcity so that platform * will not get shutdown. */ if (chip->disable_shdwn_methods & SHUTDOWN_OCV_MASK_BIT) val->intval = 1; else val->intval = 0; break; } } /* Check for LOW Battery Shutdown mechanism is enabled */ if (chip->pdata->is_lowbatt_shutdown && (chip->health == POWER_SUPPLY_HEALTH_DEAD)) { /* if user disables LOWBATT INT shutdown method * report 1% capcity so that platform * will not get shutdown. */ if (chip->disable_shdwn_methods & SHUTDOWN_LOWBATT_MASK_BIT) val->intval = 1; else val->intval = 0; break; } /* If current sensing is not enabled then read the * voltage based fuel gauge register for SOC */ if (chip->pdata->enable_current_sense) { ret = max17042_read_reg(chip->client, MAX17042_RepSOC); if (ret < 0) goto ps_prop_read_err; val->intval = ret >> 8; /* Check if MSB of lower byte is set * then round off the SOC to higher digit */ if ((ret & 0x80) && val->intval) val->intval += 1; } else { ret = max17042_read_reg(chip->client, MAX17042_VCELL); if (ret < 0) goto ps_prop_read_err; ret = (ret >> 3) * MAX17042_VOLT_CONV_FCTR / 1000; val->intval = voltage_capacity_lookup(ret); } if (val->intval > 100) val->intval = 100; /* if user disables default FG shutdown method * report 1% capcity so that platform * will not get shutdown. */ if ((val->intval == 0) && (chip->disable_shdwn_methods & SHUTDOWN_DEF_FG_MASK_BIT)) val->intval = 1; break; case POWER_SUPPLY_PROP_MODEL_NAME: if (!strncmp(chip->pdata->battid, "UNKNOWNB", 8)) val->strval = chip->pdata->battid; else val->strval = chip->pdata->model_name; break; case POWER_SUPPLY_PROP_SERIAL_NUMBER: val->strval = chip->pdata->serial_num; break; default: mutex_unlock(&chip->batt_lock); return -EINVAL; } mutex_unlock(&chip->batt_lock); return 0; ps_prop_read_err: mutex_unlock(&chip->batt_lock); return ret; } static void dump_fg_conf_data(struct max17042_chip *chip) { int i; dev_info(&chip->client->dev, "size:%x\n", fg_conf_data->size); dev_info(&chip->client->dev, "table_type:%x\n", fg_conf_data->table_type); dev_info(&chip->client->dev, "config_init:%x\n", fg_conf_data->config_init); dev_info(&chip->client->dev, "rcomp0:%x\n", fg_conf_data->rcomp0); dev_info(&chip->client->dev, "tempCo:%x\n", fg_conf_data->tempCo); dev_info(&chip->client->dev, "kempty0:%x\n", fg_conf_data->kempty0); dev_info(&chip->client->dev, "full_cap:%x\n", fg_conf_data->full_cap); dev_info(&chip->client->dev, "cycles:%x\n", fg_conf_data->cycles); dev_info(&chip->client->dev, "full_capnom:%x\n", fg_conf_data->full_capnom); dev_info(&chip->client->dev, "qrtbl00:%x\n", fg_conf_data->qrtbl00); dev_info(&chip->client->dev, "qrtbl10:%x\n", fg_conf_data->qrtbl10); dev_info(&chip->client->dev, "qrtbl20:%x\n", fg_conf_data->qrtbl20); dev_info(&chip->client->dev, "qrtbl30:%x\n", fg_conf_data->qrtbl30); dev_info(&chip->client->dev, "full_soc_thr:%x\n", fg_conf_data->full_soc_thr); dev_info(&chip->client->dev, "vempty:%x\n", fg_conf_data->vempty); dev_info(&chip->client->dev, "soc_empty:%x\n", fg_conf_data->soc_empty); dev_info(&chip->client->dev, "ichgt_term:%x\n", fg_conf_data->ichgt_term); dev_info(&chip->client->dev, "design_cap:%x\n", fg_conf_data->design_cap); dev_info(&chip->client->dev, "etc:%x\n", fg_conf_data->etc); dev_info(&chip->client->dev, "rsense:%x\n", fg_conf_data->rsense); dev_info(&chip->client->dev, "cfg:%x\n", fg_conf_data->cfg); dev_info(&chip->client->dev, "learn_cfg:%x\n", fg_conf_data->learn_cfg); dev_info(&chip->client->dev, "filter_cfg:%x\n", fg_conf_data->filter_cfg); dev_info(&chip->client->dev, "relax_cfg:%x\n", fg_conf_data->relax_cfg); for (i = 0; i < CELL_CHAR_TBL_SAMPLES; i++) dev_info(&chip->client->dev, "%x, ", fg_conf_data->cell_char_tbl[i]); dev_info(&chip->client->dev, "\n"); } static void enable_soft_POR(struct max17042_chip *chip) { u16 val = 0x0000; max17042_write_reg(chip->client, MAX17042_MLOCKReg1, val); max17042_write_reg(chip->client, MAX17042_MLOCKReg2, val); max17042_write_reg(chip->client, MAX17042_STATUS, val); val = max17042_read_reg(chip->client, MAX17042_MLOCKReg1); if (val) dev_err(&chip->client->dev, "MLOCKReg1 read failed\n"); val = max17042_read_reg(chip->client, MAX17042_MLOCKReg2); if (val) dev_err(&chip->client->dev, "MLOCKReg2 read failed\n"); val = max17042_read_reg(chip->client, MAX17042_STATUS); if (val) dev_err(&chip->client->dev, "STATUS read failed\n"); /* send POR command */ max17042_write_reg(chip->client, MAX17042_VFSOC0Enable, 0x000F); mdelay(2); val = max17042_read_reg(chip->client, MAX17042_STATUS); if (val & STATUS_POR_BIT) dev_info(&chip->client->dev, "SoftPOR done!\n"); else dev_err(&chip->client->dev, "SoftPOR failed\n"); } static int write_characterization_data(struct max17042_chip *chip) { uint16_t cell_data[CELL_CHAR_TBL_SAMPLES]; uint16_t temp_data[CELL_CHAR_TBL_SAMPLES]; int i; u8 addr; memset(cell_data, 0x0, sizeof(cell_data)); /* Unlock model access */ max17042_write_reg(chip->client, MAX17042_MLOCKReg1, FG_MODEL_UNLOCK1); max17042_write_reg(chip->client, MAX17042_MLOCKReg2, FG_MODEL_UNLOCK2); addr = MAX17042_MODELChrTbl; /* write the 48 words */ for (i = 0; i < CELL_CHAR_TBL_SAMPLES; i++) max17042_write_reg(chip->client, addr + i, fg_conf_data->cell_char_tbl[i]); /* read the 48 words */ for (i = 0; i < CELL_CHAR_TBL_SAMPLES; i++) cell_data[i] = max17042_read_reg(chip->client, addr + i); /* compare the data */ if (memcmp(cell_data, fg_conf_data->cell_char_tbl, sizeof(cell_data))) { dev_err(&chip->client->dev, "%s write failed\n", __func__); for (i = 0; i < CELL_CHAR_TBL_SAMPLES; i++) dev_err(&chip->client->dev, "0x%x,0x%x\n", cell_data[i], fg_conf_data->cell_char_tbl[i]); /* Lock Model access regs */ max17042_write_reg(chip->client, MAX17042_MLOCKReg1, FG_MODEL_LOCK1); max17042_write_reg(chip->client, MAX17042_MLOCKReg2, FG_MODEL_LOCK2); return -EIO; } memset(temp_data, 0x0, sizeof(temp_data)); /* Lock Model access regs */ max17042_write_reg(chip->client, MAX17042_MLOCKReg1, FG_MODEL_LOCK1); max17042_write_reg(chip->client, MAX17042_MLOCKReg2, FG_MODEL_LOCK2); /* read the 48 words */ for (i = 0; i < CELL_CHAR_TBL_SAMPLES; i++) cell_data[i] = max17042_read_reg(chip->client, addr + i); /* compare the data */ if (memcmp(cell_data, temp_data, sizeof(temp_data))) { dev_err(&chip->client->dev, "%s verify failed\n", __func__); for (i = 0; i < CELL_CHAR_TBL_SAMPLES; i++) dev_err(&chip->client->dev, "0x%x, ", cell_data[i]); dev_err(&chip->client->dev, "\n"); return -EIO; } return 0; } static void configure_learncfg(struct max17042_chip *chip) { u16 cycles; /*assigning cycles value from restored data*/ cycles = fg_conf_data->cycles; if ((cycles >= CYCLES_ROLLOVER_CUTOFF) && (chip->chip_type == MAX17042)) max17042_write_verify_reg(chip->client, MAX17042_LearnCFG, MAX17042_DEF_RO_LRNCFG); else max17042_write_reg(chip->client, MAX17042_LearnCFG, fg_conf_data->learn_cfg); } static void write_config_regs(struct max17042_chip *chip) { max17042_write_reg(chip->client, MAX17042_CONFIG, fg_conf_data->cfg); configure_learncfg(chip); max17042_write_reg(chip->client, MAX17042_SHFTCFG, fg_conf_data->filter_cfg); max17042_write_reg(chip->client, MAX17042_RelaxCFG, fg_conf_data->relax_cfg); if (chip->chip_type == MAX17050) max17042_write_reg(chip->client, MAX17050_FullSOCThr, fg_conf_data->full_soc_thr); } static void write_custom_regs(struct max17042_chip *chip) { max17042_write_verify_reg(chip->client, MAX17042_RCOMP0, fg_conf_data->rcomp0); max17042_write_verify_reg(chip->client, MAX17042_TempCo, fg_conf_data->tempCo); max17042_write_verify_reg(chip->client, MAX17042_ICHGTerm, fg_conf_data->ichgt_term); /* adjust Temperature gain and offset */ max17042_write_reg(chip->client, MAX17042_TGAIN, chip->pdata->tgain); max17042_write_reg(chip->client, MAx17042_TOFF, chip->pdata->toff); if (chip->chip_type == MAX17042) { max17042_write_reg(chip->client, MAX17042_ETC, fg_conf_data->etc); max17042_write_verify_reg(chip->client, MAX17042_K_empty0, fg_conf_data->kempty0); max17042_write_verify_reg(chip->client, MAX17042_SOCempty, fg_conf_data->soc_empty); max17042_write_verify_reg(chip->client, MAX17042_V_empty, MAX17042_DEF_VEMPTY_VAL); } else { /* chip type max17050 */ max17042_write_verify_reg(chip->client, MAX17050_V_empty, fg_conf_data->vempty); max17042_write_verify_reg(chip->client, MAX17050_QRTbl00, fg_conf_data->qrtbl00 + chip->extra_resv_cap); max17042_write_verify_reg(chip->client, MAX17050_QRTbl10, fg_conf_data->qrtbl10 + chip->extra_resv_cap); max17042_write_verify_reg(chip->client, MAX17050_QRTbl20, fg_conf_data->qrtbl20 + chip->extra_resv_cap); max17042_write_verify_reg(chip->client, MAX17050_QRTbl30, fg_conf_data->qrtbl30 + chip->extra_resv_cap); } } static void update_capacity_regs(struct max17042_chip *chip) { max17042_write_verify_reg(chip->client, MAX17042_FullCAP, MAX17042_MODEL_MUL_FACTOR(fg_conf_data->full_cap, chip->model_algo_factor) * fg_conf_data->rsense); max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom, MAX17042_MODEL_MUL_FACTOR(fg_conf_data->full_cap, chip->model_algo_factor) * fg_conf_data->rsense); max17042_write_reg(chip->client, MAX17042_DesignCap, MAX17042_MODEL_MUL_FACTOR(fg_conf_data->full_cap, chip->model_algo_factor) * fg_conf_data->rsense); } static void reset_vfsoc0_reg(struct max17042_chip *chip) { fg_vfSoc = max17042_read_reg(chip->client, MAX17042_VFSOC); max17042_write_reg(chip->client, MAX17042_VFSOC0Enable, VFSOC0_UNLOCK); max17042_write_verify_reg(chip->client, MAX17042_VFSOC0, fg_vfSoc); max17042_write_reg(chip->client, MAX17042_VFSOC0Enable, VFSOC0_LOCK); } static void load_new_capacity_params(struct max17042_chip *chip, bool is_por) { u16 rem_cap, rep_cap, dq_acc; if (is_por) { /* fg_vfSoc needs to shifted by 8 bits to get the * perc in 1% accuracy, to get the right rem_cap multiply * full_cap by model multiplication factor,fg_vfSoc * and divide by 100 */ rem_cap = ((fg_vfSoc >> 8) * (u32)(MAX17042_MODEL_MUL_FACTOR (fg_conf_data->full_cap, chip->model_algo_factor))) / 100; max17042_write_verify_reg(chip->client, MAX17042_RemCap, rem_cap); rep_cap = rem_cap; max17042_write_verify_reg(chip->client, MAX17042_RepCap, rep_cap); } /* Write dQ_acc to 200% of Capacity and dP_acc to 200% */ dq_acc = MAX17042_MODEL_MUL_FACTOR(fg_conf_data->full_cap, chip->model_algo_factor) / dQ_ACC_DIV; max17042_write_verify_reg(chip->client, MAX17042_dQacc, dq_acc); max17042_write_verify_reg(chip->client, MAX17042_dPacc, dP_ACC_200); max17042_write_verify_reg(chip->client, MAX17042_FullCAP, fg_conf_data->full_cap * fg_conf_data->rsense); max17042_write_reg(chip->client, MAX17042_DesignCap, MAX17042_MODEL_MUL_FACTOR(fg_conf_data->full_cap, chip->model_algo_factor) * fg_conf_data->rsense); max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom, MAX17042_MODEL_MUL_FACTOR(fg_conf_data->full_cap, chip->model_algo_factor) * fg_conf_data->rsense); /* Update SOC register with new SOC */ max17042_write_reg(chip->client, MAX17042_RepSOC, fg_vfSoc); } static void update_runtime_params(struct max17042_chip *chip) { fg_conf_data->rcomp0 = max17042_read_reg(chip->client, MAX17042_RCOMP0); fg_conf_data->tempCo = max17042_read_reg(chip->client, MAX17042_TempCo); /* * Save only the original qrtbl register values ignoring the * additionally reserved capacity. We deal with reserved * capacity while restoring. */ if (chip->chip_type == MAX17050) { fg_conf_data->qrtbl00 = max17042_read_reg(chip->client, MAX17050_QRTbl00) - chip->extra_resv_cap; fg_conf_data->qrtbl10 = max17042_read_reg(chip->client, MAX17050_QRTbl10) - chip->extra_resv_cap; fg_conf_data->qrtbl20 = max17042_read_reg(chip->client, MAX17050_QRTbl20) - chip->extra_resv_cap; fg_conf_data->qrtbl30 = max17042_read_reg(chip->client, MAX17050_QRTbl30) - chip->extra_resv_cap; } fg_conf_data->full_capnom = max17042_read_reg(chip->client, MAX17042_FullCAPNom); fg_conf_data->full_cap = max17042_read_reg(chip->client, MAX17042_FullCAP); if (fg_conf_data->rsense) { fg_conf_data->full_capnom = MAX17042_MODEL_DIV_FACTOR( fg_conf_data->full_capnom, chip->model_algo_factor) / fg_conf_data->rsense; fg_conf_data->full_cap /= fg_conf_data->rsense; } fg_conf_data->cycles = max17042_read_reg(chip->client, MAX17042_Cycles); /* Dump data before saving */ dump_fg_conf_data(chip); } static void save_runtime_params(struct max17042_chip *chip) { int size, retval; dev_dbg(&chip->client->dev, "%s\n", __func__); if (!chip->pdata->save_config_data || !chip->pdata->is_init_done) return ; update_runtime_params(chip); size = sizeof(*fg_conf_data) - sizeof(fg_conf_data->cell_char_tbl); retval = chip->pdata->save_config_data(DRV_NAME, fg_conf_data, size); if (retval < 0) { dev_err(&chip->client->dev, "%s failed\n", __func__); return ; } } static int init_max17042_chip(struct max17042_chip *chip) { int ret = 0, val; bool is_por; val = max17042_read_reg(chip->client, MAX17042_STATUS); dev_info(&chip->client->dev, "Status reg: %x\n", val); if (val & STATUS_POR_BIT) is_por = true; else is_por = false; /* Initialize configuration */ write_config_regs(chip); /* write cell characterization data */ ret = write_characterization_data(chip); if (ret < 0) return ret; /* write custom parameters */ write_custom_regs(chip); /* update capacity params */ update_capacity_regs(chip); /* delay must be atleast 350mS to allow VFSOC * to be calculated from the new configuration */ msleep(350); /* reset vfsoc0 reg */ reset_vfsoc0_reg(chip); /* advance to coulomb counter mode */ max17042_write_verify_reg(chip->client, MAX17042_Cycles, fg_conf_data->cycles); /* load new capacity params */ load_new_capacity_params(chip, is_por); if (is_por) { /* Init complete, Clear the POR bit */ val = max17042_read_reg(chip->client, MAX17042_STATUS); max17042_write_reg(chip->client, MAX17042_STATUS, val & (~STATUS_POR_BIT)); } /* reset FullCap to non inflated value */ max17042_write_verify_reg(chip->client, MAX17042_FullCAP, fg_conf_data->full_cap * fg_conf_data->rsense); return ret; } static void reset_max17042(struct max17042_chip *chip) { /* do soft power reset */ enable_soft_POR(chip); /* After Power up, the MAX17042 requires 500mS in order * to perform signal debouncing and initial SOC reporting */ msleep(500); max17042_write_reg(chip->client, MAX17042_CONFIG, 0x2210); /* adjust Temperature gain and offset */ max17042_write_reg(chip->client, MAX17042_TGAIN, NTC_47K_TGAIN); max17042_write_reg(chip->client, MAx17042_TOFF, NTC_47K_TOFF); } static void max17042_restore_conf_data(struct max17042_chip *chip) { int retval = 0, size; /* return if lock already acquired */ if (!mutex_trylock(&chip->init_lock)) return; if (!chip->pdata->is_init_done && chip->pdata->restore_config_data) { retval = chip->pdata->restore_config_data(DRV_NAME, fg_conf_data, sizeof(*fg_conf_data)); if (retval == -ENXIO) { /* no device found */ dev_err(&chip->client->dev, "device not found\n"); chip->pdata->is_init_done = 1; chip->pdata->save_config_data = NULL; } else if (retval < 0) { /* device not ready */ dev_warn(&chip->client->dev, "device not ready\n"); } else { /* device ready */ set_chip_config(chip); /* mark the dirty byte in non-volatile memory */ if (!fg_conf_data->config_init && retval >= 0) { fg_conf_data->config_init = 0x1; size = sizeof(*fg_conf_data) - sizeof(fg_conf_data->cell_char_tbl); retval = chip->pdata->save_config_data( DRV_NAME, fg_conf_data, size); if (retval < 0) dev_err(&chip->client->dev, "%s failed\n", __func__); } } } if (chip->pdata->is_volt_shutdown_enabled) chip->pdata->is_volt_shutdown = chip->pdata->is_volt_shutdown_enabled(); if (chip->pdata->is_lowbatt_shutdown_enabled) chip->pdata->is_lowbatt_shutdown = chip->pdata->is_lowbatt_shutdown_enabled(); mutex_unlock(&chip->init_lock); } static void set_chip_config(struct max17042_chip *chip) { int val, retval; /* Dump data after restoring */ dump_fg_conf_data(chip); val = max17042_read_reg(chip->client, MAX17042_STATUS); dev_info(&chip->client->dev, "Status reg: %x\n", val); if (!fg_conf_data->config_init || (val & STATUS_POR_BIT)) { dev_info(&chip->client->dev, "Config data should be loaded\n"); if (chip->pdata->reset_chip) reset_max17042(chip); retval = init_max17042_chip(chip); if (retval < 0) { dev_err(&chip->client->dev, "maxim chip init failed\n"); reset_max17042(chip); chip->pdata->save_config_data = NULL; } } if (fg_conf_data->cfg & CONFIG_TEX_BIT_ENBL) schedule_delayed_work(&chip->temp_worker, 0); chip->pdata->is_init_done = 1; configure_interrupts(chip); /* multiply with 1000 to align with linux power supply sub system */ chip->charge_full_des = (fg_conf_data->design_cap / 2) * 1000; } static void max17042_init_worker(struct work_struct *work) { struct max17042_chip *chip = container_of(work, struct max17042_chip, init_worker); dev_info(&chip->client->dev, "%s\n", __func__); max17042_restore_conf_data(chip); } static void max17042_temp_worker(struct work_struct *w) { struct delayed_work *work = to_delayed_work(w); struct max17042_chip *chip = container_of(work, struct max17042_chip, temp_worker); int temp; read_batt_pack_temp(chip, &temp); schedule_delayed_work(&chip->temp_worker, TEMP_WRITE_INTERVAL); } /* Set the SOC threshold interrupt to offset percentage in S0 state */ static void set_soc_intr_thresholds_s0(struct max17042_chip *chip, int offset) { u16 soc_tr; int soc, ret; /* program interrupt thesholds such that we should * get interrupt for every 'offset' perc change in the soc */ ret = max17042_read_reg(chip->client, MAX17042_RepSOC); if (ret < 0) { dev_err(&chip->client->dev, "maxim RepSOC read failed:%d\n", ret); return ; } soc = ret >> 8; /* if upper threshold exceeds 100% then stop * the interrupt for upper thresholds */ if ((soc + offset) > 100) soc_tr = 0xff << 8; else soc_tr = (soc + offset) << 8; /* if lower threshold falls * below 1% limit it to 1% */ if ((soc - offset) < 1) soc_tr |= 1; else soc_tr |= soc; dev_info(&chip->client->dev, "soc perc: soc: %d, offset: %d\n", soc, offset); ret = max17042_write_reg(chip->client, MAX17042_SALRT_Th, soc_tr); if (ret < 0) dev_err(&chip->client->dev, "SOC threshold write to maxim fail:%d", ret); } static void set_soc_intr_thresholds_s3(struct max17042_chip *chip) { int ret, val, soc; if (chip->pdata->enable_current_sense) ret = max17042_read_reg(chip->client, MAX17042_RepSOC); else ret = max17042_read_reg(chip->client, MAX17042_VFSOC); if (ret < 0) { dev_err(&chip->client->dev, "maxim RepSOC read failed:%d\n", ret); return ; } val = ret; soc = val >> 8; /* Check if MSB of lower byte is set * then round off the SOC to higher digit */ if (val & 0x80) soc += 1; /* If soc > 15% set the alert threshold to 15% * else if soc > 4% set the threshold to 4% * else set it to 1% */ if (soc > SOC_WARNING_LEVEL1) val = SOC_DEF_MAX_MIN1_THRLD; else if (soc > SOC_WARNING_LEVEL2) val = SOC_DEF_MAX_MIN2_THRLD; else if (soc > SOC_WARNING_LEVEL3) val = SOC_DEF_MAX_MIN3_THRLD; else val = SOC_DEF_MAX_MIN4_THRLD; max17042_write_reg(chip->client, MAX17042_SALRT_Th, val); } static int max17042_get_batt_health(void) { struct max17042_chip *chip = i2c_get_clientdata(max17042_client); int vavg, temp, ret; int stat; if (!chip->pdata->valid_battery) { dev_err(&chip->client->dev, "Invalid battery detected"); return POWER_SUPPLY_HEALTH_UNKNOWN; } ret = read_batt_pack_temp(chip, &temp); if (ret < 0) { dev_err(&chip->client->dev, "battery pack temp read fail:%d", ret); return POWER_SUPPLY_HEALTH_UNSPEC_FAILURE; } if ((temp <= chip->pdata->temp_min_lim) || (temp >= chip->pdata->temp_max_lim)) { dev_info(&chip->client->dev, "Battery Over Temp condition Detected:%d\n", temp); return POWER_SUPPLY_HEALTH_OVERHEAT; } stat = max17042_read_reg(chip->client, MAX17042_STATUS); if (stat < 0) { dev_err(&chip->client->dev, "error reading status register"); return POWER_SUPPLY_HEALTH_UNSPEC_FAILURE; } ret = max17042_read_reg(chip->client, MAX17042_AvgVCELL); if (ret < 0) { dev_err(&chip->client->dev, "Vavg read fail:%d", ret); return POWER_SUPPLY_HEALTH_UNSPEC_FAILURE; } /* get the voltage to milli volts */ vavg = ((ret >> 3) * MAX17042_VOLT_CONV_FCTR) / 1000; if (vavg < chip->pdata->volt_min_lim) { dev_info(&chip->client->dev, "Low Battery condition Detected:%d\n", vavg); return POWER_SUPPLY_HEALTH_DEAD; } if (vavg > chip->pdata->volt_max_lim + VBATT_MAX_OFFSET) { dev_info(&chip->client->dev, "Battery Over Voltage condition Detected:%d\n", vavg); return POWER_SUPPLY_HEALTH_OVERVOLTAGE; } if (stat & STATUS_VMX_BIT) { dev_info(&chip->client->dev, "Battery Over Voltage condition Detected:%d\n", vavg); return POWER_SUPPLY_HEALTH_OVERVOLTAGE; } return POWER_SUPPLY_HEALTH_GOOD; } static void max17042_evt_worker(struct work_struct *work) { struct max17042_chip *chip = container_of(work, struct max17042_chip, evt_worker); int status = 0, health; pm_runtime_get_sync(&chip->client->dev); /* get the battery status */ if (chip->pdata->battery_status) status = chip->pdata->battery_status(); /* get the battery health */ if (chip->pdata->battery_health) health = chip->pdata->battery_health(); else health = max17042_get_batt_health(); mutex_lock(&chip->batt_lock); if (chip->pdata->battery_status) chip->status = status; chip->health = health; mutex_unlock(&chip->batt_lock); /* Init maxim chip if it is not already initialized */ if (!chip->pdata->is_init_done && !chip->pdata->file_sys_storage_enabled) schedule_work(&chip->init_worker); power_supply_changed(&chip->battery); /* If charging is stopped and there is a sudden drop in SOC below * minimum threshold currently set, we'll not get further interrupts. * This call to set thresholds, will take care of this scenario. */ if (chip->pdata->soc_intr_mode_enabled) set_soc_intr_thresholds_s0(chip, SOC_INTR_S0_THR); pm_runtime_put_sync(&chip->client->dev); } static void max17042_external_power_changed(struct power_supply *psy) { struct max17042_chip *chip = container_of(psy, struct max17042_chip, battery); schedule_work(&chip->evt_worker); } static bool is_battery_online(struct max17042_chip *chip) { int val; bool online = false; val = max17042_read_reg(chip->client, MAX17042_STATUS); if (val < 0) { dev_info(&chip->client->dev, "i2c read error\n"); return online; } /* check battery present bit */ if (val & STATUS_BST_BIT) online = false; else online = true; return online; } static void init_battery_props(struct max17042_chip *chip) { chip->present = 1; chip->ext_set_cap = -EINVAL; chip->status = POWER_SUPPLY_STATUS_UNKNOWN; chip->health = POWER_SUPPLY_HEALTH_UNKNOWN; chip->technology = chip->pdata->technology; chip->charge_full_des = BATT_CHRG_FULL_DES; } #ifdef CONFIG_DEBUG_FS /** * max17042_show - debugfs: show the state of an endpoint. * @seq: The seq_file to write data to. * @unused: not used * * This debugfs entry shows the content of the register * given in the data parameter. */ static int max17042_show(struct seq_file *seq, void *unused) { u16 val; long addr; if (kstrtol((char *)seq->private, 16, &addr)) return -EINVAL; val = max17042_read_reg(max17042_client, addr); seq_printf(seq, "%x\n", val); return 0; } static int max17042_dbgfs_open(struct inode *inode, struct file *file) { return single_open(file, max17042_show, inode->i_private); } static const struct file_operations max17042_dbgfs_fops = { .owner = THIS_MODULE, .open = max17042_dbgfs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void max17042_create_debugfs(struct max17042_chip *chip) { int i; struct dentry *entry; max17042_dbgfs_root = debugfs_create_dir(DRV_NAME, NULL); if (IS_ERR(max17042_dbgfs_root)) { dev_warn(&chip->client->dev, "DEBUGFS DIR create failed\n"); return ; } for (i = 0; i < MAX17042_MAX_MEM; i++) { sprintf((char *)&max17042_dbg_regs[i], "%x", i); entry = debugfs_create_file( (const char *)&max17042_dbg_regs[i], S_IRUGO, max17042_dbgfs_root, &max17042_dbg_regs[i], &max17042_dbgfs_fops); if (IS_ERR(entry)) { debugfs_remove_recursive(max17042_dbgfs_root); max17042_dbgfs_root = NULL; dev_warn(&chip->client->dev, "DEBUGFS entry Create failed\n"); return ; } } } static inline void max17042_remove_debugfs(struct max17042_chip *chip) { if (max17042_dbgfs_root) debugfs_remove_recursive(max17042_dbgfs_root); } #else static inline void max17042_create_debugfs(struct max17042_chip *chip) { } static inline void max17042_remove_debugfs(struct max17042_chip *chip) { } #endif /** * override_shutdown_methods - sysfs to set disable_shdwn_methods * Parameter as define by sysfs interface * Context: can sleep * */ static ssize_t override_shutdown_methods(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct max17042_chip *chip = dev_get_drvdata(dev); unsigned long value; if (kstrtoul(buf, 10, &value)) return -EINVAL; if (value > (SHUTDOWN_DEF_FG_MASK_BIT | SHUTDOWN_OCV_MASK_BIT | SHUTDOWN_LOWBATT_MASK_BIT)) return -EINVAL; chip->disable_shdwn_methods = value; return count; } /** * get_shutdown_methods - sysfs get disable_shdwn_methods * Parameter as define by sysfs interface * Context: can sleep * */ static ssize_t get_shutdown_methods(struct device *dev, struct device_attribute *attr, char *buf) { struct max17042_chip *chip = dev_get_drvdata(dev); return sprintf(buf, "%d\n", chip->disable_shdwn_methods); } /** * get_shutdown_voltage_set_by_user - get function for sysfs shutdown_voltage * Parameters as defined by sysfs interface */ static ssize_t get_shutdown_voltage_set_by_user(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", shutdown_volt); } /** * set_shutdown_voltage - set function for sysfs shutdown_voltage * Parameters as defined by sysfs interface * shutdown_volt can take the values between 3.4V to 4.2V */ static ssize_t set_shutdown_voltage(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long value; if (kstrtoul(buf, 10, &value)) return -EINVAL; if ((value < VBATT_MIN) || (value > VBATT_MAX)) return -EINVAL; shutdown_volt = value; return count; } /** * set_fake_temp_enable - sysfs to set enable_fake_temp * Parameter as define by sysfs interface */ static ssize_t set_fake_temp_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct max17042_chip *chip = dev_get_drvdata(dev); unsigned long value; if (kstrtoul(buf, 10, &value)) return -EINVAL; /* allow only 0 or 1 */ if (value > 1) return -EINVAL; if (value) chip->enable_fake_temp = true; else chip->enable_fake_temp = false; return count; } /** * get_fake_temp_enable - sysfs get enable_fake_temp * Parameter as define by sysfs interface * Context: can sleep */ static ssize_t get_fake_temp_enable(struct device *dev, struct device_attribute *attr, char *buf) { struct max17042_chip *chip = dev_get_drvdata(dev); return sprintf(buf, "%d\n", chip->enable_fake_temp); } static void configure_interrupts(struct max17042_chip *chip) { int ret; unsigned int edge_type; int vmax, vmin, reg_val; /* set SOC-alert threshold sholds to lowest value */ max17042_write_reg(chip->client, MAX17042_SALRT_Th, SOC_DEF_MAX_MIN4_THRLD); /* enable Alerts for SOCRep */ if (chip->pdata->enable_current_sense) max17042_write_reg(chip->client, MAX17042_MiscCFG, MISCCFG_CONFIG_REPSOC); else max17042_write_reg(chip->client, MAX17042_MiscCFG, MISCCFG_CONFIG_VFSOC); /* disable the T-alert sticky bit */ max17042_reg_read_modify(chip->client, MAX17042_CONFIG, CONFIG_TSTICKY_BIT_SET, 0); /* Setting V-alrt threshold register to default values */ if (chip->pdata->en_vmax_intr) { vmax = chip->pdata->volt_max_lim + VBATT_MAX_OFFSET; vmin = chip->pdata->volt_min_lim - VBATT_MIN_OFFSET; reg_val = ((vmax / VALERT_VOLT_OFFSET) << 8) | (vmin / VALERT_VOLT_OFFSET); max17042_write_reg(chip->client, MAX17042_VALRT_Th, reg_val); } else { max17042_write_reg(chip->client, MAX17042_VALRT_Th, VOLT_DEF_MAX_MIN_THRLD); } /* Setting T-alrt threshold register to default values */ max17042_write_reg(chip->client, MAX17042_TALRT_Th, TEMP_DEF_MAX_MIN_THRLD); /* clear BI bit */ max17042_reg_read_modify(chip->client, MAX17042_STATUS, STATUS_BI_BIT, 0); /* clear BR bit */ max17042_reg_read_modify(chip->client, MAX17042_STATUS, STATUS_BR_BIT, 0); /* get interrupt edge type from ALP pin */ if (fg_conf_data->cfg & CONFIG_ALP_BIT_ENBL) edge_type = IRQF_TRIGGER_RISING; else edge_type = IRQF_TRIGGER_FALLING; /* register interrupt */ ret = request_threaded_irq(chip->client->irq, max17042_intr_handler, max17042_thread_handler, edge_type, DRV_NAME, chip); if (ret) { dev_warn(&chip->client->dev, "cannot get IRQ:%d\n", chip->client->irq); chip->client->irq = -1; } else { dev_info(&chip->client->dev, "IRQ No:%d\n", chip->client->irq); } /* enable interrupts */ max17042_reg_read_modify(chip->client, MAX17042_CONFIG, CONFIG_ALRT_BIT_ENBL, 1); /* set the Interrupt threshold register for soc */ if (chip->pdata->soc_intr_mode_enabled) set_soc_intr_thresholds_s0(chip, SOC_INTR_S0_THR); /* * recheckthe battery present status to * make sure we didn't miss any battery * removal event and power off if battery * is removed/unplugged. */ if ((fg_conf_data->cfg & CONFIG_BER_BIT_ENBL) && !is_battery_online(chip)) { dev_warn(&chip->client->dev, "battery NOT present\n"); mutex_lock(&chip->batt_lock); chip->present = 0; mutex_unlock(&chip->batt_lock); kernel_power_off(); } } #ifdef CONFIG_ACPI extern void *max17042_platform_data(void *info); #endif static int max17042_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); struct max17042_chip *chip; int ret, i, gpio; struct acpi_gpio_info gpio_info; #ifdef CONFIG_XEN return -ENODEV; #endif #ifdef CONFIG_ACPI client->dev.platform_data = max17042_platform_data(NULL); gpio = acpi_get_gpio_by_index(&client->dev, 0, &gpio_info); client->irq = gpio_to_irq(gpio); ret = gpio_request_one(gpio, GPIOF_IN, client->name); if (ret < 0) { dev_warn(&client->dev, "gpio request failed."); return -EIO; } #endif if (!client->dev.platform_data) { dev_err(&client->dev, "Platform Data is NULL"); return -EFAULT; } if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) { dev_err(&client->dev, "SM bus doesn't support DWORD transactions\n"); return -EIO; } chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (!chip) { dev_err(&client->dev, "mem alloc failed\n"); return -ENOMEM; } fg_conf_data = kzalloc(sizeof(*fg_conf_data), GFP_KERNEL); if (!fg_conf_data) { dev_err(&client->dev, "mem alloc failed\n"); kfree(chip); return -ENOMEM; } chip->client = client; chip->pdata = client->dev.platform_data; /* LSB offset for qrtbl registers is 0.25% * ie, 0x04 = 1% reserved capacity */ chip->extra_resv_cap = 4 * chip->pdata->resv_cap; if (chip->pdata->get_vmax_threshold) chip->voltage_max = chip->pdata->get_vmax_threshold(); else chip->voltage_max = VBATT_MAX; if (chip->pdata->fg_algo_model) chip->model_algo_factor = chip->pdata->fg_algo_model; else chip->model_algo_factor = 100; i2c_set_clientdata(client, chip); max17042_client = client; ret = max17042_read_reg(chip->client, MAX17042_DevName); if (ret < 0 && chip->pdata->reset_i2c_lines) { dev_warn(&client->dev, "reset i2c device:%d\n", ret); for (i = 0; i < NR_I2C_RESET_CNT; i++) { chip->pdata->reset_i2c_lines(); ret = max17042_read_reg(chip->client, MAX17042_DevName); if (ret < 0) dev_warn(&client->dev, "reset i2c device:%d\n", ret); else break; } } if (ret == MAX17042_IC_VERSION) { dev_info(&client->dev, "chip type max17042 detected\n"); chip->chip_type = MAX17042; } else if (ret == MAX17050_IC_VERSION) { dev_info(&client->dev, "chip type max17047/50 detected\n"); chip->chip_type = MAX17050; } else { dev_err(&client->dev, "device version mismatch: %x\n", ret); kfree(chip); kfree(fg_conf_data); return -EIO; } /* init battery properties */ init_battery_props(chip); INIT_WORK(&chip->init_worker, max17042_init_worker); INIT_WORK(&chip->evt_worker, max17042_evt_worker); INIT_DEFERRABLE_WORK(&chip->temp_worker, max17042_temp_worker); mutex_init(&chip->batt_lock); mutex_init(&chip->init_lock); /* disable the Alert pin before setting thresholds */ max17042_reg_read_modify(client, MAX17042_CONFIG, CONFIG_ALRT_BIT_ENBL, 0); if (chip->pdata->enable_current_sense) { dev_info(&chip->client->dev, "current sensing enabled\n"); /* Initialize the chip with battery config data */ max17042_restore_conf_data(chip); } else { dev_info(&chip->client->dev, "current sensing NOT enabled\n"); /* incase of invalid battery no need to init the FG chip */ chip->pdata->is_init_done = 1; /* disable coulomb counter based fuel gauging */ max17042_write_reg(chip->client, MAX17042_CGAIN, MAX17042_CGAIN_DISABLE); /* Enable voltage based Fuel Gauging */ max17042_write_reg(chip->client, MAX17042_LearnCFG, MAX17042_EN_VOLT_FG); /* configure interrupts for SOCvf */ max17042_write_reg(chip->client, MAX17042_MiscCFG, MAX17042_CFG_INTR_SOCVF); } chip->technology = chip->pdata->technology; if (chip->chip_type == MAX17042) chip->battery.name = "max17042_battery"; else chip->battery.name = "max17047_battery"; chip->battery.type = POWER_SUPPLY_TYPE_BATTERY; chip->battery.get_property = max17042_get_property; chip->battery.set_property = max17042_set_property; chip->battery.property_is_privileged_read = max17042_property_is_privileged_read; chip->battery.external_power_changed = max17042_external_power_changed; chip->battery.properties = max17042_battery_props; chip->battery.num_properties = ARRAY_SIZE(max17042_battery_props); chip->status = POWER_SUPPLY_STATUS_DISCHARGING; ret = power_supply_register(&client->dev, &chip->battery); if (ret) { dev_err(&client->dev, "failed: power supply register\n"); kfree(chip); kfree(fg_conf_data); return ret; } /* Init Runtime PM State */ pm_runtime_put_noidle(&chip->client->dev); pm_schedule_suspend(&chip->client->dev, MSEC_PER_SEC); /* In case of power supply register INT now * else the INT will registered after chip init. */ if (!chip->pdata->enable_current_sense) configure_interrupts(chip); if (chip->pdata->file_sys_storage_enabled) misc_register(&fg_helper); /* Create debugfs for maxim registers */ max17042_create_debugfs(chip); /* create sysfs file to disable shutdown methods */ ret = device_create_file(&client->dev, &dev_attr_disable_shutdown_methods); if (ret) dev_warn(&client->dev, "cannot create sysfs entry\n"); /* create sysfs file to enter shutdown voltage */ ret = device_create_file(&client->dev, &dev_attr_shutdown_voltage); if (ret) dev_warn(&client->dev, "cannot create sysfs entry\n"); /* create sysfs file to enable fake battery temperature */ ret = device_create_file(&client->dev, &dev_attr_enable_fake_temp); if (ret) dev_warn(&client->dev, "cannot create sysfs entry\n"); /* Register reboot notifier callback */ if (!chip->pdata->file_sys_storage_enabled) register_reboot_notifier(&max17042_reboot_notifier_block); schedule_work(&chip->evt_worker); return 0; } static int max17042_remove(struct i2c_client *client) { struct max17042_chip *chip = i2c_get_clientdata(client); if (chip->pdata->file_sys_storage_enabled) misc_deregister(&fg_helper); else unregister_reboot_notifier(&max17042_reboot_notifier_block); device_remove_file(&client->dev, &dev_attr_disable_shutdown_methods); device_remove_file(&client->dev, &dev_attr_shutdown_voltage); device_remove_file(&client->dev, &dev_attr_enable_fake_temp); max17042_remove_debugfs(chip); if (client->irq > 0) free_irq(client->irq, chip); power_supply_unregister(&chip->battery); pm_runtime_get_noresume(&chip->client->dev); kfree(chip); kfree(fg_conf_data); return 0; } #ifdef CONFIG_PM static int max17042_suspend(struct device *dev) { struct max17042_chip *chip = dev_get_drvdata(dev); /* * disable irq here doesn't mean max17042 interrupt * can't wake up system. max17042 interrupt is triggered * by GPIO pin, which is always active. * When resume callback calls enable_irq, kernel * would deliver the buffered interrupt (if it has) to * driver. */ if (chip->client->irq > 0) { /* set SOC alert thresholds */ set_soc_intr_thresholds_s3(chip); /* setting Vmin(3300mV) threshold to wake the * platfrom in under low battery conditions */ max17042_write_reg(chip->client, MAX17042_VALRT_Th, VOLT_MIN_THRLD_ENBL); disable_irq(chip->client->irq); enable_irq_wake(chip->client->irq); } if (fg_conf_data->cfg & CONFIG_TEX_BIT_ENBL) cancel_delayed_work_sync(&chip->temp_worker); /* max17042 IC automatically goes into shutdown mode * if the SCL and SDA were held low for more than * timeout of SHDNTIMER register value */ dev_dbg(&chip->client->dev, "max17042 suspend\n"); return 0; } static int max17042_resume(struct device *dev) { struct max17042_chip *chip = dev_get_drvdata(dev); int vmax, vmin, reg_val; if (chip->client->irq > 0) { /* Setting V-alrt threshold register to default values */ if (chip->pdata->en_vmax_intr) { vmax = chip->pdata->volt_max_lim + VBATT_MAX_OFFSET; vmin = chip->pdata->volt_min_lim - VBATT_MIN_OFFSET; reg_val = ((vmax / VALERT_VOLT_OFFSET) << 8) | (vmin / VALERT_VOLT_OFFSET); max17042_write_reg(chip->client, MAX17042_VALRT_Th, reg_val); } else { max17042_write_reg(chip->client, MAX17042_VALRT_Th, VOLT_DEF_MAX_MIN_THRLD); } /* set SOC-alert threshold sholds to lowest value */ max17042_write_reg(chip->client, MAX17042_SALRT_Th, SOC_DEF_MAX_MIN4_THRLD); enable_irq(chip->client->irq); disable_irq_wake(chip->client->irq); } /* update battery status and health */ schedule_work(&chip->evt_worker); if (fg_conf_data->cfg & CONFIG_TEX_BIT_ENBL) schedule_delayed_work(&chip->temp_worker, 0); /* max17042 IC automatically wakes up if any edge * on SDCl or SDA if we set I2CSH of CONFG reg */ dev_dbg(&chip->client->dev, "max17042 resume\n"); return 0; } #else #define max17042_suspend NULL #define max17042_resume NULL #endif #ifdef CONFIG_PM_RUNTIME static int max17042_runtime_suspend(struct device *dev) { dev_dbg(dev, "%s called\n", __func__); return 0; } static int max17042_runtime_resume(struct device *dev) { dev_dbg(dev, "%s called\n", __func__); return 0; } static int max17042_runtime_idle(struct device *dev) { dev_dbg(dev, "%s called\n", __func__); return 0; } #else #define max17042_runtime_suspend NULL #define max17042_runtime_resume NULL #define max17042_runtime_idle NULL #endif static const struct i2c_device_id max17042_id[] = { { "max17042", 0 }, { "max17047", 1 }, { "max17050", 2 }, { "MAX17042", 0 }, { "MAX17047", 1 }, { "MAX17050", 2 }, { }, }; MODULE_DEVICE_TABLE(i2c, max17042_id); static const struct dev_pm_ops max17042_pm_ops = { .suspend = max17042_suspend, .resume = max17042_resume, .runtime_suspend = max17042_runtime_suspend, .runtime_resume = max17042_runtime_resume, .runtime_idle = max17042_runtime_idle, }; #ifdef CONFIG_ACPI static struct acpi_device_id max17042_acpi_match[] = { {"MAX17047", 0}, {} }; MODULE_DEVICE_TABLE(acpi, max17042_acpi_match); #endif static struct i2c_driver max17042_i2c_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, .pm = &max17042_pm_ops, #ifdef CONFIG_ACPI .acpi_match_table = ACPI_PTR(max17042_acpi_match), #endif }, .probe = max17042_probe, .remove = max17042_remove, .id_table = max17042_id, }; static int max17042_reboot_callback(struct notifier_block *nfb, unsigned long event, void *data) { struct max17042_chip *chip = i2c_get_clientdata(max17042_client); if (chip->pdata->enable_current_sense) save_runtime_params(chip); /* if the shutdown or reboot sequence started * then block the access to maxim registers as chip * cannot be recovered from broken i2c transactions */ mutex_lock(&chip->batt_lock); chip->plat_rebooting = true; mutex_unlock(&chip->batt_lock); return NOTIFY_OK; } static int __init max17042_init(void) { return i2c_add_driver(&max17042_i2c_driver); } #ifdef CONFIG_ACPI late_initcall(max17042_init); #else module_init(max17042_init); #endif static void __exit max17042_exit(void) { i2c_del_driver(&max17042_i2c_driver); } module_exit(max17042_exit); int __init set_fake_batt_full(char *p) { fake_batt_full = true; return 0; } early_param("fake_batt_full", set_fake_batt_full); MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); MODULE_DESCRIPTION("MAX17042 Fuel Gauge"); MODULE_LICENSE("GPL");
gpl-2.0
AnurupDey/UNDONE_Engine
Dependencies/Includes/glm/glm/detail/glm.cpp
297
9664
/////////////////////////////////////////////////////////////////////////////////// /// OpenGL Mathematics (glm.g-truc.net) /// /// Copyright (c) 2005 - 2014 G-Truc Creation (www.g-truc.net) /// Permission is hereby granted, free of charge, to any person obtaining a copy /// of this software and associated documentation files (the "Software"), to deal /// in the Software without restriction, including without limitation the rights /// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell /// copies of the Software, and to permit persons to whom the Software is /// furnished to do so, subject to the following conditions: /// /// The above copyright notice and this permission notice shall be included in /// all copies or substantial portions of the Software. /// /// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR /// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, /// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE /// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER /// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, /// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN /// THE SOFTWARE. /// /// @ref core /// @file glm/glm.cpp /// @date 2013-04-22 / 2013-04-22 /// @author Christophe Riccio /////////////////////////////////////////////////////////////////////////////////// #include <glm/glm.hpp> #include <glm/gtc/quaternion.hpp> #include <glm/gtx/dual_quaternion.hpp> namespace glm{ namespace detail { // tvec1 type explicit instantiation /* template struct tvec1<uint8, lowp>; template struct tvec1<uint16, lowp>; template struct tvec1<uint32, lowp>; template struct tvec1<uint64, lowp>; template struct tvec1<int8, lowp>; template struct tvec1<int16, lowp>; template struct tvec1<int32, lowp>; template struct tvec1<int64, lowp>; template struct tvec1<float16, lowp>; template struct tvec1<float32, lowp>; template struct tvec1<float64, lowp>; template struct tvec1<uint8, mediump>; template struct tvec1<uint16, mediump>; template struct tvec1<uint32, mediump>; template struct tvec1<uint64, mediump>; template struct tvec1<int8, mediump>; template struct tvec1<int16, mediump>; template struct tvec1<int32, mediump>; template struct tvec1<int64, mediump>; template struct tvec1<float16, mediump>; template struct tvec1<float32, mediump>; template struct tvec1<float64, mediump>; template struct tvec1<uint8, highp>; template struct tvec1<uint16, highp>; template struct tvec1<uint32, highp>; template struct tvec1<uint64, highp>; template struct tvec1<int8, highp>; template struct tvec1<int16, highp>; template struct tvec1<int32, highp>; template struct tvec1<int64, highp>; template struct tvec1<float16, highp>; template struct tvec1<float32, highp>; template struct tvec1<float64, highp>; */ // tvec2 type explicit instantiation template struct tvec2<uint8, lowp>; template struct tvec2<uint16, lowp>; template struct tvec2<uint32, lowp>; template struct tvec2<uint64, lowp>; template struct tvec2<int8, lowp>; template struct tvec2<int16, lowp>; template struct tvec2<int32, lowp>; template struct tvec2<int64, lowp>; template struct tvec2<float32, lowp>; template struct tvec2<float64, lowp>; template struct tvec2<uint8, mediump>; template struct tvec2<uint16, mediump>; template struct tvec2<uint32, mediump>; template struct tvec2<uint64, mediump>; template struct tvec2<int8, mediump>; template struct tvec2<int16, mediump>; template struct tvec2<int32, mediump>; template struct tvec2<int64, mediump>; template struct tvec2<float32, mediump>; template struct tvec2<float64, mediump>; template struct tvec2<uint8, highp>; template struct tvec2<uint16, highp>; template struct tvec2<uint32, highp>; template struct tvec2<uint64, highp>; template struct tvec2<int8, highp>; template struct tvec2<int16, highp>; template struct tvec2<int32, highp>; template struct tvec2<int64, highp>; template struct tvec2<float32, highp>; template struct tvec2<float64, highp>; // tvec3 type explicit instantiation template struct tvec3<uint8, lowp>; template struct tvec3<uint16, lowp>; template struct tvec3<uint32, lowp>; template struct tvec3<uint64, lowp>; template struct tvec3<int8, lowp>; template struct tvec3<int16, lowp>; template struct tvec3<int32, lowp>; template struct tvec3<int64, lowp>; template struct tvec3<float32, lowp>; template struct tvec3<float64, lowp>; template struct tvec3<uint8, mediump>; template struct tvec3<uint16, mediump>; template struct tvec3<uint32, mediump>; template struct tvec3<uint64, mediump>; template struct tvec3<int8, mediump>; template struct tvec3<int16, mediump>; template struct tvec3<int32, mediump>; template struct tvec3<int64, mediump>; template struct tvec3<float32, mediump>; template struct tvec3<float64, mediump>; template struct tvec3<uint8, highp>; template struct tvec3<uint16, highp>; template struct tvec3<uint32, highp>; template struct tvec3<uint64, highp>; template struct tvec3<int8, highp>; template struct tvec3<int16, highp>; template struct tvec3<int32, highp>; template struct tvec3<int64, highp>; template struct tvec3<float32, highp>; template struct tvec3<float64, highp>; // tvec4 type explicit instantiation template struct tvec4<uint8, lowp>; template struct tvec4<uint16, lowp>; template struct tvec4<uint32, lowp>; template struct tvec4<uint64, lowp>; template struct tvec4<int8, lowp>; template struct tvec4<int16, lowp>; template struct tvec4<int32, lowp>; template struct tvec4<int64, lowp>; template struct tvec4<float32, lowp>; template struct tvec4<float64, lowp>; template struct tvec4<uint8, mediump>; template struct tvec4<uint16, mediump>; template struct tvec4<uint32, mediump>; template struct tvec4<uint64, mediump>; template struct tvec4<int8, mediump>; template struct tvec4<int16, mediump>; template struct tvec4<int32, mediump>; template struct tvec4<int64, mediump>; template struct tvec4<float32, mediump>; template struct tvec4<float64, mediump>; template struct tvec4<uint8, highp>; template struct tvec4<uint16, highp>; template struct tvec4<uint32, highp>; template struct tvec4<uint64, highp>; template struct tvec4<int8, highp>; template struct tvec4<int16, highp>; template struct tvec4<int32, highp>; template struct tvec4<int64, highp>; template struct tvec4<float32, highp>; template struct tvec4<float64, highp>; // tmat2x2 type explicit instantiation template struct tmat2x2<float32, lowp>; template struct tmat2x2<float64, lowp>; template struct tmat2x2<float32, mediump>; template struct tmat2x2<float64, mediump>; template struct tmat2x2<float32, highp>; template struct tmat2x2<float64, highp>; // tmat2x3 type explicit instantiation template struct tmat2x3<float32, lowp>; template struct tmat2x3<float64, lowp>; template struct tmat2x3<float32, mediump>; template struct tmat2x3<float64, mediump>; template struct tmat2x3<float32, highp>; template struct tmat2x3<float64, highp>; // tmat2x4 type explicit instantiation template struct tmat2x4<float32, lowp>; template struct tmat2x4<float64, lowp>; template struct tmat2x4<float32, mediump>; template struct tmat2x4<float64, mediump>; template struct tmat2x4<float32, highp>; template struct tmat2x4<float64, highp>; // tmat3x2 type explicit instantiation template struct tmat3x2<float32, lowp>; template struct tmat3x2<float64, lowp>; template struct tmat3x2<float32, mediump>; template struct tmat3x2<float64, mediump>; template struct tmat3x2<float32, highp>; template struct tmat3x2<float64, highp>; // tmat3x3 type explicit instantiation template struct tmat3x3<float32, lowp>; template struct tmat3x3<float64, lowp>; template struct tmat3x3<float32, mediump>; template struct tmat3x3<float64, mediump>; template struct tmat3x3<float32, highp>; template struct tmat3x3<float64, highp>; // tmat3x4 type explicit instantiation template struct tmat3x4<float32, lowp>; template struct tmat3x4<float64, lowp>; template struct tmat3x4<float32, mediump>; template struct tmat3x4<float64, mediump>; template struct tmat3x4<float32, highp>; template struct tmat3x4<float64, highp>; // tmat4x2 type explicit instantiation template struct tmat4x2<float32, lowp>; template struct tmat4x2<float64, lowp>; template struct tmat4x2<float32, mediump>; template struct tmat4x2<float64, mediump>; template struct tmat4x2<float32, highp>; template struct tmat4x2<float64, highp>; // tmat4x3 type explicit instantiation template struct tmat4x3<float32, lowp>; template struct tmat4x3<float64, lowp>; template struct tmat4x3<float32, mediump>; template struct tmat4x3<float64, mediump>; template struct tmat4x3<float32, highp>; template struct tmat4x3<float64, highp>; // tmat4x4 type explicit instantiation template struct tmat4x4<float32, lowp>; template struct tmat4x4<float64, lowp>; template struct tmat4x4<float32, mediump>; template struct tmat4x4<float64, mediump>; template struct tmat4x4<float32, highp>; template struct tmat4x4<float64, highp>; // tquat type explicit instantiation template struct tquat<float32, lowp>; template struct tquat<float64, lowp>; template struct tquat<float32, mediump>; template struct tquat<float64, mediump>; template struct tquat<float32, highp>; template struct tquat<float64, highp>; //tdualquat type explicit instantiation template struct tdualquat<float32, lowp>; template struct tdualquat<float64, lowp>; template struct tdualquat<float32, mediump>; template struct tdualquat<float64, mediump>; template struct tdualquat<float32, highp>; template struct tdualquat<float64, highp>; }//namespace detail }//namespace glm
gpl-2.0
prakhya/linux_sai
drivers/media/i2c/as3645a.c
297
23454
/* * drivers/media/i2c/as3645a.c - AS3645A and LM3555 flash controllers driver * * Copyright (C) 2008-2011 Nokia Corporation * Copyright (c) 2011, Intel Corporation. * * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * * TODO: * - Check hardware FSTROBE control when sensor driver add support for this * */ #include <linux/delay.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> #include <media/i2c/as3645a.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #define AS_TIMER_MS_TO_CODE(t) (((t) - 100) / 50) #define AS_TIMER_CODE_TO_MS(c) (50 * (c) + 100) /* Register definitions */ /* Read-only Design info register: Reset state: xxxx 0001 */ #define AS_DESIGN_INFO_REG 0x00 #define AS_DESIGN_INFO_FACTORY(x) (((x) >> 4)) #define AS_DESIGN_INFO_MODEL(x) ((x) & 0x0f) /* Read-only Version control register: Reset state: 0000 0000 * for first engineering samples */ #define AS_VERSION_CONTROL_REG 0x01 #define AS_VERSION_CONTROL_RFU(x) (((x) >> 4)) #define AS_VERSION_CONTROL_VERSION(x) ((x) & 0x0f) /* Read / Write (Indicator and timer register): Reset state: 0000 1111 */ #define AS_INDICATOR_AND_TIMER_REG 0x02 #define AS_INDICATOR_AND_TIMER_TIMEOUT_SHIFT 0 #define AS_INDICATOR_AND_TIMER_VREF_SHIFT 4 #define AS_INDICATOR_AND_TIMER_INDICATOR_SHIFT 6 /* Read / Write (Current set register): Reset state: 0110 1001 */ #define AS_CURRENT_SET_REG 0x03 #define AS_CURRENT_ASSIST_LIGHT_SHIFT 0 #define AS_CURRENT_LED_DET_ON (1 << 3) #define AS_CURRENT_FLASH_CURRENT_SHIFT 4 /* Read / Write (Control register): Reset state: 1011 0100 */ #define AS_CONTROL_REG 0x04 #define AS_CONTROL_MODE_SETTING_SHIFT 0 #define AS_CONTROL_STROBE_ON (1 << 2) #define AS_CONTROL_OUT_ON (1 << 3) #define AS_CONTROL_EXT_TORCH_ON (1 << 4) #define AS_CONTROL_STROBE_TYPE_EDGE (0 << 5) #define AS_CONTROL_STROBE_TYPE_LEVEL (1 << 5) #define AS_CONTROL_COIL_PEAK_SHIFT 6 /* Read only (D3 is read / write) (Fault and info): Reset state: 0000 x000 */ #define AS_FAULT_INFO_REG 0x05 #define AS_FAULT_INFO_INDUCTOR_PEAK_LIMIT (1 << 1) #define AS_FAULT_INFO_INDICATOR_LED (1 << 2) #define AS_FAULT_INFO_LED_AMOUNT (1 << 3) #define AS_FAULT_INFO_TIMEOUT (1 << 4) #define AS_FAULT_INFO_OVER_TEMPERATURE (1 << 5) #define AS_FAULT_INFO_SHORT_CIRCUIT (1 << 6) #define AS_FAULT_INFO_OVER_VOLTAGE (1 << 7) /* Boost register */ #define AS_BOOST_REG 0x0d #define AS_BOOST_CURRENT_DISABLE (0 << 0) #define AS_BOOST_CURRENT_ENABLE (1 << 0) /* Password register is used to unlock boost register writing */ #define AS_PASSWORD_REG 0x0f #define AS_PASSWORD_UNLOCK_VALUE 0x55 enum as_mode { AS_MODE_EXT_TORCH = 0 << AS_CONTROL_MODE_SETTING_SHIFT, AS_MODE_INDICATOR = 1 << AS_CONTROL_MODE_SETTING_SHIFT, AS_MODE_ASSIST = 2 << AS_CONTROL_MODE_SETTING_SHIFT, AS_MODE_FLASH = 3 << AS_CONTROL_MODE_SETTING_SHIFT, }; /* * struct as3645a * * @subdev: V4L2 subdev * @pdata: Flash platform data * @power_lock: Protects power_count * @power_count: Power reference count * @led_mode: V4L2 flash LED mode * @timeout: Flash timeout in microseconds * @flash_current: Flash current (0=200mA ... 15=500mA). Maximum * values are 400mA for two LEDs and 500mA for one LED. * @assist_current: Torch/Assist light current (0=20mA, 1=40mA ... 7=160mA) * @indicator_current: Indicator LED current (0=0mA, 1=2.5mA ... 4=10mA) * @strobe_source: Flash strobe source (software or external) */ struct as3645a { struct v4l2_subdev subdev; const struct as3645a_platform_data *pdata; struct mutex power_lock; int power_count; /* Controls */ struct v4l2_ctrl_handler ctrls; enum v4l2_flash_led_mode led_mode; unsigned int timeout; u8 flash_current; u8 assist_current; u8 indicator_current; enum v4l2_flash_strobe_source strobe_source; }; #define to_as3645a(sd) container_of(sd, struct as3645a, subdev) /* Return negative errno else zero on success */ static int as3645a_write(struct as3645a *flash, u8 addr, u8 val) { struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev); int rval; rval = i2c_smbus_write_byte_data(client, addr, val); dev_dbg(&client->dev, "Write Addr:%02X Val:%02X %s\n", addr, val, rval < 0 ? "fail" : "ok"); return rval; } /* Return negative errno else a data byte received from the device. */ static int as3645a_read(struct as3645a *flash, u8 addr) { struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev); int rval; rval = i2c_smbus_read_byte_data(client, addr); dev_dbg(&client->dev, "Read Addr:%02X Val:%02X %s\n", addr, rval, rval < 0 ? "fail" : "ok"); return rval; } /* ----------------------------------------------------------------------------- * Hardware configuration and trigger */ /* * as3645a_set_config - Set flash configuration registers * @flash: The flash * * Configure the hardware with flash, assist and indicator currents, as well as * flash timeout. * * Return 0 on success, or a negative error code if an I2C communication error * occurred. */ static int as3645a_set_config(struct as3645a *flash) { int ret; u8 val; val = (flash->flash_current << AS_CURRENT_FLASH_CURRENT_SHIFT) | (flash->assist_current << AS_CURRENT_ASSIST_LIGHT_SHIFT) | AS_CURRENT_LED_DET_ON; ret = as3645a_write(flash, AS_CURRENT_SET_REG, val); if (ret < 0) return ret; val = AS_TIMER_MS_TO_CODE(flash->timeout / 1000) << AS_INDICATOR_AND_TIMER_TIMEOUT_SHIFT; val |= (flash->pdata->vref << AS_INDICATOR_AND_TIMER_VREF_SHIFT) | ((flash->indicator_current ? flash->indicator_current - 1 : 0) << AS_INDICATOR_AND_TIMER_INDICATOR_SHIFT); return as3645a_write(flash, AS_INDICATOR_AND_TIMER_REG, val); } /* * as3645a_set_control - Set flash control register * @flash: The flash * @mode: Desired output mode * @on: Desired output state * * Configure the hardware with output mode and state. * * Return 0 on success, or a negative error code if an I2C communication error * occurred. */ static int as3645a_set_control(struct as3645a *flash, enum as_mode mode, bool on) { u8 reg; /* Configure output parameters and operation mode. */ reg = (flash->pdata->peak << AS_CONTROL_COIL_PEAK_SHIFT) | (on ? AS_CONTROL_OUT_ON : 0) | mode; if (flash->led_mode == V4L2_FLASH_LED_MODE_FLASH && flash->strobe_source == V4L2_FLASH_STROBE_SOURCE_EXTERNAL) { reg |= AS_CONTROL_STROBE_TYPE_LEVEL | AS_CONTROL_STROBE_ON; } return as3645a_write(flash, AS_CONTROL_REG, reg); } /* * as3645a_set_output - Configure output and operation mode * @flash: Flash controller * @strobe: Strobe the flash (only valid in flash mode) * * Turn the LEDs output on/off and set the operation mode based on the current * parameters. * * The AS3645A can't control the indicator LED independently of the flash/torch * LED. If the flash controller is in V4L2_FLASH_LED_MODE_NONE mode, set the * chip to indicator mode. Otherwise set it to assist light (torch) or flash * mode. * * In indicator and assist modes, turn the output on/off based on the indicator * and torch currents. In software strobe flash mode, turn the output on/off * based on the strobe parameter. */ static int as3645a_set_output(struct as3645a *flash, bool strobe) { enum as_mode mode; bool on; switch (flash->led_mode) { case V4L2_FLASH_LED_MODE_NONE: on = flash->indicator_current != 0; mode = AS_MODE_INDICATOR; break; case V4L2_FLASH_LED_MODE_TORCH: on = true; mode = AS_MODE_ASSIST; break; case V4L2_FLASH_LED_MODE_FLASH: on = strobe; mode = AS_MODE_FLASH; break; default: BUG(); } /* Configure output parameters and operation mode. */ return as3645a_set_control(flash, mode, on); } /* ----------------------------------------------------------------------------- * V4L2 controls */ static int as3645a_is_active(struct as3645a *flash) { int ret; ret = as3645a_read(flash, AS_CONTROL_REG); return ret < 0 ? ret : !!(ret & AS_CONTROL_OUT_ON); } static int as3645a_read_fault(struct as3645a *flash) { struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev); int rval; /* NOTE: reading register clear fault status */ rval = as3645a_read(flash, AS_FAULT_INFO_REG); if (rval < 0) return rval; if (rval & AS_FAULT_INFO_INDUCTOR_PEAK_LIMIT) dev_dbg(&client->dev, "Inductor Peak limit fault\n"); if (rval & AS_FAULT_INFO_INDICATOR_LED) dev_dbg(&client->dev, "Indicator LED fault: " "Short circuit or open loop\n"); dev_dbg(&client->dev, "%u connected LEDs\n", rval & AS_FAULT_INFO_LED_AMOUNT ? 2 : 1); if (rval & AS_FAULT_INFO_TIMEOUT) dev_dbg(&client->dev, "Timeout fault\n"); if (rval & AS_FAULT_INFO_OVER_TEMPERATURE) dev_dbg(&client->dev, "Over temperature fault\n"); if (rval & AS_FAULT_INFO_SHORT_CIRCUIT) dev_dbg(&client->dev, "Short circuit fault\n"); if (rval & AS_FAULT_INFO_OVER_VOLTAGE) dev_dbg(&client->dev, "Over voltage fault: " "Indicates missing capacitor or open connection\n"); return rval; } static int as3645a_get_ctrl(struct v4l2_ctrl *ctrl) { struct as3645a *flash = container_of(ctrl->handler, struct as3645a, ctrls); struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev); int value; switch (ctrl->id) { case V4L2_CID_FLASH_FAULT: value = as3645a_read_fault(flash); if (value < 0) return value; ctrl->cur.val = 0; if (value & AS_FAULT_INFO_SHORT_CIRCUIT) ctrl->cur.val |= V4L2_FLASH_FAULT_SHORT_CIRCUIT; if (value & AS_FAULT_INFO_OVER_TEMPERATURE) ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_TEMPERATURE; if (value & AS_FAULT_INFO_TIMEOUT) ctrl->cur.val |= V4L2_FLASH_FAULT_TIMEOUT; if (value & AS_FAULT_INFO_OVER_VOLTAGE) ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_VOLTAGE; if (value & AS_FAULT_INFO_INDUCTOR_PEAK_LIMIT) ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_CURRENT; if (value & AS_FAULT_INFO_INDICATOR_LED) ctrl->cur.val |= V4L2_FLASH_FAULT_INDICATOR; break; case V4L2_CID_FLASH_STROBE_STATUS: if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH) { ctrl->cur.val = 0; break; } value = as3645a_is_active(flash); if (value < 0) return value; ctrl->cur.val = value; break; } dev_dbg(&client->dev, "G_CTRL %08x:%d\n", ctrl->id, ctrl->cur.val); return 0; } static int as3645a_set_ctrl(struct v4l2_ctrl *ctrl) { struct as3645a *flash = container_of(ctrl->handler, struct as3645a, ctrls); struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev); int ret; dev_dbg(&client->dev, "S_CTRL %08x:%d\n", ctrl->id, ctrl->val); /* If a control that doesn't apply to the current mode is modified, * we store the value and return immediately. The setting will be * applied when the LED mode is changed. Otherwise we apply the setting * immediately. */ switch (ctrl->id) { case V4L2_CID_FLASH_LED_MODE: if (flash->indicator_current) return -EBUSY; ret = as3645a_set_config(flash); if (ret < 0) return ret; flash->led_mode = ctrl->val; return as3645a_set_output(flash, false); case V4L2_CID_FLASH_STROBE_SOURCE: flash->strobe_source = ctrl->val; /* Applies to flash mode only. */ if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH) break; return as3645a_set_output(flash, false); case V4L2_CID_FLASH_STROBE: if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH) return -EBUSY; return as3645a_set_output(flash, true); case V4L2_CID_FLASH_STROBE_STOP: if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH) return -EBUSY; return as3645a_set_output(flash, false); case V4L2_CID_FLASH_TIMEOUT: flash->timeout = ctrl->val; /* Applies to flash mode only. */ if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH) break; return as3645a_set_config(flash); case V4L2_CID_FLASH_INTENSITY: flash->flash_current = (ctrl->val - AS3645A_FLASH_INTENSITY_MIN) / AS3645A_FLASH_INTENSITY_STEP; /* Applies to flash mode only. */ if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH) break; return as3645a_set_config(flash); case V4L2_CID_FLASH_TORCH_INTENSITY: flash->assist_current = (ctrl->val - AS3645A_TORCH_INTENSITY_MIN) / AS3645A_TORCH_INTENSITY_STEP; /* Applies to torch mode only. */ if (flash->led_mode != V4L2_FLASH_LED_MODE_TORCH) break; return as3645a_set_config(flash); case V4L2_CID_FLASH_INDICATOR_INTENSITY: if (flash->led_mode != V4L2_FLASH_LED_MODE_NONE) return -EBUSY; flash->indicator_current = (ctrl->val - AS3645A_INDICATOR_INTENSITY_MIN) / AS3645A_INDICATOR_INTENSITY_STEP; ret = as3645a_set_config(flash); if (ret < 0) return ret; if ((ctrl->val == 0) == (ctrl->cur.val == 0)) break; return as3645a_set_output(flash, false); } return 0; } static const struct v4l2_ctrl_ops as3645a_ctrl_ops = { .g_volatile_ctrl = as3645a_get_ctrl, .s_ctrl = as3645a_set_ctrl, }; /* ----------------------------------------------------------------------------- * V4L2 subdev core operations */ /* Put device into know state. */ static int as3645a_setup(struct as3645a *flash) { struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev); int ret; /* clear errors */ ret = as3645a_read(flash, AS_FAULT_INFO_REG); if (ret < 0) return ret; dev_dbg(&client->dev, "Fault info: %02x\n", ret); ret = as3645a_set_config(flash); if (ret < 0) return ret; ret = as3645a_set_output(flash, false); if (ret < 0) return ret; /* read status */ ret = as3645a_read_fault(flash); if (ret < 0) return ret; dev_dbg(&client->dev, "AS_INDICATOR_AND_TIMER_REG: %02x\n", as3645a_read(flash, AS_INDICATOR_AND_TIMER_REG)); dev_dbg(&client->dev, "AS_CURRENT_SET_REG: %02x\n", as3645a_read(flash, AS_CURRENT_SET_REG)); dev_dbg(&client->dev, "AS_CONTROL_REG: %02x\n", as3645a_read(flash, AS_CONTROL_REG)); return ret & ~AS_FAULT_INFO_LED_AMOUNT ? -EIO : 0; } static int __as3645a_set_power(struct as3645a *flash, int on) { int ret; if (!on) as3645a_set_control(flash, AS_MODE_EXT_TORCH, false); if (flash->pdata->set_power) { ret = flash->pdata->set_power(&flash->subdev, on); if (ret < 0) return ret; } if (!on) return 0; ret = as3645a_setup(flash); if (ret < 0) { if (flash->pdata->set_power) flash->pdata->set_power(&flash->subdev, 0); } return ret; } static int as3645a_set_power(struct v4l2_subdev *sd, int on) { struct as3645a *flash = to_as3645a(sd); int ret = 0; mutex_lock(&flash->power_lock); if (flash->power_count == !on) { ret = __as3645a_set_power(flash, !!on); if (ret < 0) goto done; } flash->power_count += on ? 1 : -1; WARN_ON(flash->power_count < 0); done: mutex_unlock(&flash->power_lock); return ret; } static int as3645a_registered(struct v4l2_subdev *sd) { struct as3645a *flash = to_as3645a(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int rval, man, model, rfu, version; const char *vendor; /* Power up the flash driver and read manufacturer ID, model ID, RFU * and version. */ rval = as3645a_set_power(&flash->subdev, 1); if (rval < 0) return rval; rval = as3645a_read(flash, AS_DESIGN_INFO_REG); if (rval < 0) goto power_off; man = AS_DESIGN_INFO_FACTORY(rval); model = AS_DESIGN_INFO_MODEL(rval); rval = as3645a_read(flash, AS_VERSION_CONTROL_REG); if (rval < 0) goto power_off; rfu = AS_VERSION_CONTROL_RFU(rval); version = AS_VERSION_CONTROL_VERSION(rval); /* Verify the chip model and version. */ if (model != 0x01 || rfu != 0x00) { dev_err(&client->dev, "AS3645A not detected " "(model %d rfu %d)\n", model, rfu); rval = -ENODEV; goto power_off; } switch (man) { case 1: vendor = "AMS, Austria Micro Systems"; break; case 2: vendor = "ADI, Analog Devices Inc."; break; case 3: vendor = "NSC, National Semiconductor"; break; case 4: vendor = "NXP"; break; case 5: vendor = "TI, Texas Instrument"; break; default: vendor = "Unknown"; } dev_info(&client->dev, "Chip vendor: %s (%d) Version: %d\n", vendor, man, version); rval = as3645a_write(flash, AS_PASSWORD_REG, AS_PASSWORD_UNLOCK_VALUE); if (rval < 0) goto power_off; rval = as3645a_write(flash, AS_BOOST_REG, AS_BOOST_CURRENT_DISABLE); if (rval < 0) goto power_off; /* Setup default values. This makes sure that the chip is in a known * state, in case the power rail can't be controlled. */ rval = as3645a_setup(flash); power_off: as3645a_set_power(&flash->subdev, 0); return rval; } static int as3645a_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { return as3645a_set_power(sd, 1); } static int as3645a_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { return as3645a_set_power(sd, 0); } static const struct v4l2_subdev_core_ops as3645a_core_ops = { .s_power = as3645a_set_power, }; static const struct v4l2_subdev_ops as3645a_ops = { .core = &as3645a_core_ops, }; static const struct v4l2_subdev_internal_ops as3645a_internal_ops = { .registered = as3645a_registered, .open = as3645a_open, .close = as3645a_close, }; /* ----------------------------------------------------------------------------- * I2C driver */ #ifdef CONFIG_PM static int as3645a_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *subdev = i2c_get_clientdata(client); struct as3645a *flash = to_as3645a(subdev); int rval; if (flash->power_count == 0) return 0; rval = __as3645a_set_power(flash, 0); dev_dbg(&client->dev, "Suspend %s\n", rval < 0 ? "failed" : "ok"); return rval; } static int as3645a_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *subdev = i2c_get_clientdata(client); struct as3645a *flash = to_as3645a(subdev); int rval; if (flash->power_count == 0) return 0; rval = __as3645a_set_power(flash, 1); dev_dbg(&client->dev, "Resume %s\n", rval < 0 ? "fail" : "ok"); return rval; } #else #define as3645a_suspend NULL #define as3645a_resume NULL #endif /* CONFIG_PM */ /* * as3645a_init_controls - Create controls * @flash: The flash * * The number of LEDs reported in platform data is used to compute default * limits. Parameters passed through platform data can override those limits. */ static int as3645a_init_controls(struct as3645a *flash) { const struct as3645a_platform_data *pdata = flash->pdata; struct v4l2_ctrl *ctrl; int maximum; v4l2_ctrl_handler_init(&flash->ctrls, 10); /* V4L2_CID_FLASH_LED_MODE */ v4l2_ctrl_new_std_menu(&flash->ctrls, &as3645a_ctrl_ops, V4L2_CID_FLASH_LED_MODE, 2, ~7, V4L2_FLASH_LED_MODE_NONE); /* V4L2_CID_FLASH_STROBE_SOURCE */ v4l2_ctrl_new_std_menu(&flash->ctrls, &as3645a_ctrl_ops, V4L2_CID_FLASH_STROBE_SOURCE, pdata->ext_strobe ? 1 : 0, pdata->ext_strobe ? ~3 : ~1, V4L2_FLASH_STROBE_SOURCE_SOFTWARE); flash->strobe_source = V4L2_FLASH_STROBE_SOURCE_SOFTWARE; /* V4L2_CID_FLASH_STROBE */ v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops, V4L2_CID_FLASH_STROBE, 0, 0, 0, 0); /* V4L2_CID_FLASH_STROBE_STOP */ v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops, V4L2_CID_FLASH_STROBE_STOP, 0, 0, 0, 0); /* V4L2_CID_FLASH_STROBE_STATUS */ ctrl = v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops, V4L2_CID_FLASH_STROBE_STATUS, 0, 1, 1, 1); if (ctrl != NULL) ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; /* V4L2_CID_FLASH_TIMEOUT */ maximum = pdata->timeout_max; v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops, V4L2_CID_FLASH_TIMEOUT, AS3645A_FLASH_TIMEOUT_MIN, maximum, AS3645A_FLASH_TIMEOUT_STEP, maximum); flash->timeout = maximum; /* V4L2_CID_FLASH_INTENSITY */ maximum = pdata->flash_max_current; v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops, V4L2_CID_FLASH_INTENSITY, AS3645A_FLASH_INTENSITY_MIN, maximum, AS3645A_FLASH_INTENSITY_STEP, maximum); flash->flash_current = (maximum - AS3645A_FLASH_INTENSITY_MIN) / AS3645A_FLASH_INTENSITY_STEP; /* V4L2_CID_FLASH_TORCH_INTENSITY */ maximum = pdata->torch_max_current; v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops, V4L2_CID_FLASH_TORCH_INTENSITY, AS3645A_TORCH_INTENSITY_MIN, maximum, AS3645A_TORCH_INTENSITY_STEP, AS3645A_TORCH_INTENSITY_MIN); flash->assist_current = 0; /* V4L2_CID_FLASH_INDICATOR_INTENSITY */ v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops, V4L2_CID_FLASH_INDICATOR_INTENSITY, AS3645A_INDICATOR_INTENSITY_MIN, AS3645A_INDICATOR_INTENSITY_MAX, AS3645A_INDICATOR_INTENSITY_STEP, AS3645A_INDICATOR_INTENSITY_MIN); flash->indicator_current = 0; /* V4L2_CID_FLASH_FAULT */ ctrl = v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops, V4L2_CID_FLASH_FAULT, 0, V4L2_FLASH_FAULT_OVER_VOLTAGE | V4L2_FLASH_FAULT_TIMEOUT | V4L2_FLASH_FAULT_OVER_TEMPERATURE | V4L2_FLASH_FAULT_SHORT_CIRCUIT, 0, 0); if (ctrl != NULL) ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; flash->subdev.ctrl_handler = &flash->ctrls; return flash->ctrls.error; } static int as3645a_probe(struct i2c_client *client, const struct i2c_device_id *devid) { struct as3645a *flash; int ret; if (client->dev.platform_data == NULL) return -ENODEV; flash = devm_kzalloc(&client->dev, sizeof(*flash), GFP_KERNEL); if (flash == NULL) return -ENOMEM; flash->pdata = client->dev.platform_data; v4l2_i2c_subdev_init(&flash->subdev, client, &as3645a_ops); flash->subdev.internal_ops = &as3645a_internal_ops; flash->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; ret = as3645a_init_controls(flash); if (ret < 0) goto done; ret = media_entity_pads_init(&flash->subdev.entity, 0, NULL); if (ret < 0) goto done; flash->subdev.entity.function = MEDIA_ENT_F_FLASH; mutex_init(&flash->power_lock); flash->led_mode = V4L2_FLASH_LED_MODE_NONE; done: if (ret < 0) v4l2_ctrl_handler_free(&flash->ctrls); return ret; } static int as3645a_remove(struct i2c_client *client) { struct v4l2_subdev *subdev = i2c_get_clientdata(client); struct as3645a *flash = to_as3645a(subdev); v4l2_device_unregister_subdev(subdev); v4l2_ctrl_handler_free(&flash->ctrls); media_entity_cleanup(&flash->subdev.entity); mutex_destroy(&flash->power_lock); return 0; } static const struct i2c_device_id as3645a_id_table[] = { { AS3645A_NAME, 0 }, { }, }; MODULE_DEVICE_TABLE(i2c, as3645a_id_table); static const struct dev_pm_ops as3645a_pm_ops = { .suspend = as3645a_suspend, .resume = as3645a_resume, }; static struct i2c_driver as3645a_i2c_driver = { .driver = { .name = AS3645A_NAME, .pm = &as3645a_pm_ops, }, .probe = as3645a_probe, .remove = as3645a_remove, .id_table = as3645a_id_table, }; module_i2c_driver(as3645a_i2c_driver); MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>"); MODULE_DESCRIPTION("LED flash driver for AS3645A, LM3555 and their clones"); MODULE_LICENSE("GPL");
gpl-2.0
ByteInternet/linux-grsec
arch/powerpc/sysdev/fsl_pci.c
553
18909
/* * MPC83xx/85xx/86xx PCI/PCIE support routing. * * Copyright 2007-2011 Freescale Semiconductor, Inc. * Copyright 2008-2009 MontaVista Software, Inc. * * Initial author: Xianghua Xiao <x.xiao@freescale.com> * Recode: ZHANG WEI <wei.zhang@freescale.com> * Rewrite the routing for Frescale PCI and PCI Express * Roy Zang <tie-fei.zang@freescale.com> * MPC83xx PCI-Express support: * Tony Li <tony.li@freescale.com> * Anton Vorontsov <avorontsov@ru.mvista.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/memblock.h> #include <linux/log2.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> static int fsl_pcie_bus_fixup, is_mpc83xx_pci; static void __init quirk_fsl_pcie_header(struct pci_dev *dev) { u8 progif; /* if we aren't a PCIe don't bother */ if (!pci_find_capability(dev, PCI_CAP_ID_EXP)) return; /* if we aren't in host mode don't bother */ pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); if (progif & 0x1) return; dev->class = PCI_CLASS_BRIDGE_PCI << 8; fsl_pcie_bus_fixup = 1; return; } static int __init fsl_pcie_check_link(struct pci_controller *hose) { u32 val; early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val); if (val < PCIE_LTSSM_L0) return 1; return 0; } #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) static int __init setup_one_atmu(struct ccsr_pci __iomem *pci, unsigned int index, const struct resource *res, resource_size_t offset) { resource_size_t pci_addr = res->start - offset; resource_size_t phys_addr = res->start; resource_size_t size = resource_size(res); u32 flags = 0x80044000; /* enable & mem R/W */ unsigned int i; pr_debug("PCI MEM resource start 0x%016llx, size 0x%016llx.\n", (u64)res->start, (u64)size); if (res->flags & IORESOURCE_PREFETCH) flags |= 0x10000000; /* enable relaxed ordering */ for (i = 0; size > 0; i++) { unsigned int bits = min(__ilog2(size), __ffs(pci_addr | phys_addr)); if (index + i >= 5) return -1; out_be32(&pci->pow[index + i].potar, pci_addr >> 12); out_be32(&pci->pow[index + i].potear, (u64)pci_addr >> 44); out_be32(&pci->pow[index + i].powbar, phys_addr >> 12); out_be32(&pci->pow[index + i].powar, flags | (bits - 1)); pci_addr += (resource_size_t)1U << bits; phys_addr += (resource_size_t)1U << bits; size -= (resource_size_t)1U << bits; } return i; } /* atmu setup for fsl pci/pcie controller */ static void __init setup_pci_atmu(struct pci_controller *hose, struct resource *rsrc) { struct ccsr_pci __iomem *pci; int i, j, n, mem_log, win_idx = 3, start_idx = 1, end_idx = 4; u64 mem, sz, paddr_hi = 0; u64 paddr_lo = ULLONG_MAX; u32 pcicsrbar = 0, pcicsrbar_sz; u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL | PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP; char *name = hose->dn->full_name; pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n", (u64)rsrc->start, (u64)resource_size(rsrc)); if (of_device_is_compatible(hose->dn, "fsl,qoriq-pcie-v2.2")) { win_idx = 2; start_idx = 0; end_idx = 3; } pci = ioremap(rsrc->start, resource_size(rsrc)); if (!pci) { dev_err(hose->parent, "Unable to map ATMU registers\n"); return; } /* Disable all windows (except powar0 since it's ignored) */ for(i = 1; i < 5; i++) out_be32(&pci->pow[i].powar, 0); for (i = start_idx; i < end_idx; i++) out_be32(&pci->piw[i].piwar, 0); /* Setup outbound MEM window */ for(i = 0, j = 1; i < 3; i++) { if (!(hose->mem_resources[i].flags & IORESOURCE_MEM)) continue; paddr_lo = min(paddr_lo, (u64)hose->mem_resources[i].start); paddr_hi = max(paddr_hi, (u64)hose->mem_resources[i].end); n = setup_one_atmu(pci, j, &hose->mem_resources[i], hose->pci_mem_offset); if (n < 0 || j >= 5) { pr_err("Ran out of outbound PCI ATMUs for resource %d!\n", i); hose->mem_resources[i].flags |= IORESOURCE_DISABLED; } else j += n; } /* Setup outbound IO window */ if (hose->io_resource.flags & IORESOURCE_IO) { if (j >= 5) { pr_err("Ran out of outbound PCI ATMUs for IO resource\n"); } else { pr_debug("PCI IO resource start 0x%016llx, size 0x%016llx, " "phy base 0x%016llx.\n", (u64)hose->io_resource.start, (u64)resource_size(&hose->io_resource), (u64)hose->io_base_phys); out_be32(&pci->pow[j].potar, (hose->io_resource.start >> 12)); out_be32(&pci->pow[j].potear, 0); out_be32(&pci->pow[j].powbar, (hose->io_base_phys >> 12)); /* Enable, IO R/W */ out_be32(&pci->pow[j].powar, 0x80088000 | (__ilog2(hose->io_resource.end - hose->io_resource.start + 1) - 1)); } } /* convert to pci address space */ paddr_hi -= hose->pci_mem_offset; paddr_lo -= hose->pci_mem_offset; if (paddr_hi == paddr_lo) { pr_err("%s: No outbound window space\n", name); return ; } if (paddr_lo == 0) { pr_err("%s: No space for inbound window\n", name); return ; } /* setup PCSRBAR/PEXCSRBAR */ early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, 0xffffffff); early_read_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, &pcicsrbar_sz); pcicsrbar_sz = ~pcicsrbar_sz + 1; if (paddr_hi < (0x100000000ull - pcicsrbar_sz) || (paddr_lo > 0x100000000ull)) pcicsrbar = 0x100000000ull - pcicsrbar_sz; else pcicsrbar = (paddr_lo - pcicsrbar_sz) & -pcicsrbar_sz; early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, pcicsrbar); paddr_lo = min(paddr_lo, (u64)pcicsrbar); pr_info("%s: PCICSRBAR @ 0x%x\n", name, pcicsrbar); /* Setup inbound mem window */ mem = memblock_end_of_DRAM(); sz = min(mem, paddr_lo); mem_log = __ilog2_u64(sz); /* PCIe can overmap inbound & outbound since RX & TX are separated */ if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { /* Size window to exact size if power-of-two or one size up */ if ((1ull << mem_log) != mem) { if ((1ull << mem_log) > mem) pr_info("%s: Setting PCI inbound window " "greater than memory size\n", name); mem_log++; } piwar |= ((mem_log - 1) & PIWAR_SZ_MASK); /* Setup inbound memory window */ out_be32(&pci->piw[win_idx].pitar, 0x00000000); out_be32(&pci->piw[win_idx].piwbar, 0x00000000); out_be32(&pci->piw[win_idx].piwar, piwar); win_idx--; hose->dma_window_base_cur = 0x00000000; hose->dma_window_size = (resource_size_t)sz; } else { u64 paddr = 0; /* Setup inbound memory window */ out_be32(&pci->piw[win_idx].pitar, paddr >> 12); out_be32(&pci->piw[win_idx].piwbar, paddr >> 12); out_be32(&pci->piw[win_idx].piwar, (piwar | (mem_log - 1))); win_idx--; paddr += 1ull << mem_log; sz -= 1ull << mem_log; if (sz) { mem_log = __ilog2_u64(sz); piwar |= (mem_log - 1); out_be32(&pci->piw[win_idx].pitar, paddr >> 12); out_be32(&pci->piw[win_idx].piwbar, paddr >> 12); out_be32(&pci->piw[win_idx].piwar, piwar); win_idx--; paddr += 1ull << mem_log; } hose->dma_window_base_cur = 0x00000000; hose->dma_window_size = (resource_size_t)paddr; } if (hose->dma_window_size < mem) { #ifndef CONFIG_SWIOTLB pr_err("%s: ERROR: Memory size exceeds PCI ATMU ability to " "map - enable CONFIG_SWIOTLB to avoid dma errors.\n", name); #endif /* adjusting outbound windows could reclaim space in mem map */ if (paddr_hi < 0xffffffffull) pr_warning("%s: WARNING: Outbound window cfg leaves " "gaps in memory map. Adjusting the memory map " "could reduce unnecessary bounce buffering.\n", name); pr_info("%s: DMA window size is 0x%llx\n", name, (u64)hose->dma_window_size); } iounmap(pci); } static void __init setup_pci_cmd(struct pci_controller *hose) { u16 cmd; int cap_x; early_read_config_word(hose, 0, 0, PCI_COMMAND, &cmd); cmd |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO; early_write_config_word(hose, 0, 0, PCI_COMMAND, cmd); cap_x = early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX); if (cap_x) { int pci_x_cmd = cap_x + PCI_X_CMD; cmd = PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ | PCI_X_CMD_ERO | PCI_X_CMD_DPERR_E; early_write_config_word(hose, 0, 0, pci_x_cmd, cmd); } else { early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80); } } void fsl_pcibios_fixup_bus(struct pci_bus *bus) { struct pci_controller *hose = pci_bus_to_host(bus); int i; if ((bus->parent == hose->bus) && ((fsl_pcie_bus_fixup && early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) || (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK))) { for (i = 0; i < 4; ++i) { struct resource *res = bus->resource[i]; struct resource *par = bus->parent->resource[i]; if (res) { res->start = 0; res->end = 0; res->flags = 0; } if (res && par) { res->start = par->start; res->end = par->end; res->flags = par->flags; } } } } int __init fsl_add_bridge(struct device_node *dev, int is_primary) { int len; struct pci_controller *hose; struct resource rsrc; const int *bus_range; u8 progif; if (!of_device_is_available(dev)) { pr_warning("%s: disabled\n", dev->full_name); return -ENODEV; } pr_debug("Adding PCI host bridge %s\n", dev->full_name); /* Fetch host bridge registers address */ if (of_address_to_resource(dev, 0, &rsrc)) { printk(KERN_WARNING "Can't get pci register base!"); return -ENOMEM; } /* Get bus range if any */ bus_range = of_get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) printk(KERN_WARNING "Can't get bus-range for %s, assume" " bus 0\n", dev->full_name); pci_add_flags(PCI_REASSIGN_ALL_BUS); hose = pcibios_alloc_controller(dev); if (!hose) return -ENOMEM; hose->first_busno = bus_range ? bus_range[0] : 0x0; hose->last_busno = bus_range ? bus_range[1] : 0xff; setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4, PPC_INDIRECT_TYPE_BIG_ENDIAN); early_read_config_byte(hose, 0, 0, PCI_CLASS_PROG, &progif); if ((progif & 1) == 1) { /* unmap cfg_data & cfg_addr separately if not on same page */ if (((unsigned long)hose->cfg_data & PAGE_MASK) != ((unsigned long)hose->cfg_addr & PAGE_MASK)) iounmap(hose->cfg_data); iounmap(hose->cfg_addr); pcibios_free_controller(hose); return 0; } setup_pci_cmd(hose); /* check PCI express link status */ if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { hose->indirect_type |= PPC_INDIRECT_TYPE_EXT_REG | PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS; if (fsl_pcie_check_link(hose)) hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK; } printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. " "Firmware bus number: %d->%d\n", (unsigned long long)rsrc.start, hose->first_busno, hose->last_busno); pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n", hose, hose->cfg_addr, hose->cfg_data); /* Interpret the "ranges" property */ /* This also maps the I/O region and sets isa_io/mem_base */ pci_process_bridge_OF_ranges(hose, dev, is_primary); /* Setup PEX window registers */ setup_pci_atmu(hose, &rsrc); return 0; } #endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_pcie_header); #if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x) struct mpc83xx_pcie_priv { void __iomem *cfg_type0; void __iomem *cfg_type1; u32 dev_base; }; struct pex_inbound_window { u32 ar; u32 tar; u32 barl; u32 barh; }; /* * With the convention of u-boot, the PCIE outbound window 0 serves * as configuration transactions outbound. */ #define PEX_OUTWIN0_BAR 0xCA4 #define PEX_OUTWIN0_TAL 0xCA8 #define PEX_OUTWIN0_TAH 0xCAC #define PEX_RC_INWIN_BASE 0xE60 #define PEX_RCIWARn_EN 0x1 static int mpc83xx_pcie_exclude_device(struct pci_bus *bus, unsigned int devfn) { struct pci_controller *hose = pci_bus_to_host(bus); if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK) return PCIBIOS_DEVICE_NOT_FOUND; /* * Workaround for the HW bug: for Type 0 configure transactions the * PCI-E controller does not check the device number bits and just * assumes that the device number bits are 0. */ if (bus->number == hose->first_busno || bus->primary == hose->first_busno) { if (devfn & 0xf8) return PCIBIOS_DEVICE_NOT_FOUND; } if (ppc_md.pci_exclude_device) { if (ppc_md.pci_exclude_device(hose, bus->number, devfn)) return PCIBIOS_DEVICE_NOT_FOUND; } return PCIBIOS_SUCCESSFUL; } static void __iomem *mpc83xx_pcie_remap_cfg(struct pci_bus *bus, unsigned int devfn, int offset) { struct pci_controller *hose = pci_bus_to_host(bus); struct mpc83xx_pcie_priv *pcie = hose->dn->data; u32 dev_base = bus->number << 24 | devfn << 16; int ret; ret = mpc83xx_pcie_exclude_device(bus, devfn); if (ret) return NULL; offset &= 0xfff; /* Type 0 */ if (bus->number == hose->first_busno) return pcie->cfg_type0 + offset; if (pcie->dev_base == dev_base) goto mapped; out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, dev_base); pcie->dev_base = dev_base; mapped: return pcie->cfg_type1 + offset; } static int mpc83xx_pcie_read_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 *val) { void __iomem *cfg_addr; cfg_addr = mpc83xx_pcie_remap_cfg(bus, devfn, offset); if (!cfg_addr) return PCIBIOS_DEVICE_NOT_FOUND; switch (len) { case 1: *val = in_8(cfg_addr); break; case 2: *val = in_le16(cfg_addr); break; default: *val = in_le32(cfg_addr); break; } return PCIBIOS_SUCCESSFUL; } static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 val) { struct pci_controller *hose = pci_bus_to_host(bus); void __iomem *cfg_addr; cfg_addr = mpc83xx_pcie_remap_cfg(bus, devfn, offset); if (!cfg_addr) return PCIBIOS_DEVICE_NOT_FOUND; /* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */ if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno) val &= 0xffffff00; switch (len) { case 1: out_8(cfg_addr, val); break; case 2: out_le16(cfg_addr, val); break; default: out_le32(cfg_addr, val); break; } return PCIBIOS_SUCCESSFUL; } static struct pci_ops mpc83xx_pcie_ops = { .read = mpc83xx_pcie_read_config, .write = mpc83xx_pcie_write_config, }; static int __init mpc83xx_pcie_setup(struct pci_controller *hose, struct resource *reg) { struct mpc83xx_pcie_priv *pcie; u32 cfg_bar; int ret = -ENOMEM; pcie = zalloc_maybe_bootmem(sizeof(*pcie), GFP_KERNEL); if (!pcie) return ret; pcie->cfg_type0 = ioremap(reg->start, resource_size(reg)); if (!pcie->cfg_type0) goto err0; cfg_bar = in_le32(pcie->cfg_type0 + PEX_OUTWIN0_BAR); if (!cfg_bar) { /* PCI-E isn't configured. */ ret = -ENODEV; goto err1; } pcie->cfg_type1 = ioremap(cfg_bar, 0x1000); if (!pcie->cfg_type1) goto err1; WARN_ON(hose->dn->data); hose->dn->data = pcie; hose->ops = &mpc83xx_pcie_ops; out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAH, 0); out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, 0); if (fsl_pcie_check_link(hose)) hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK; return 0; err1: iounmap(pcie->cfg_type0); err0: kfree(pcie); return ret; } int __init mpc83xx_add_bridge(struct device_node *dev) { int ret; int len; struct pci_controller *hose; struct resource rsrc_reg; struct resource rsrc_cfg; const int *bus_range; int primary; is_mpc83xx_pci = 1; if (!of_device_is_available(dev)) { pr_warning("%s: disabled by the firmware.\n", dev->full_name); return -ENODEV; } pr_debug("Adding PCI host bridge %s\n", dev->full_name); /* Fetch host bridge registers address */ if (of_address_to_resource(dev, 0, &rsrc_reg)) { printk(KERN_WARNING "Can't get pci register base!\n"); return -ENOMEM; } memset(&rsrc_cfg, 0, sizeof(rsrc_cfg)); if (of_address_to_resource(dev, 1, &rsrc_cfg)) { printk(KERN_WARNING "No pci config register base in dev tree, " "using default\n"); /* * MPC83xx supports up to two host controllers * one at 0x8500 has config space registers at 0x8300 * one at 0x8600 has config space registers at 0x8380 */ if ((rsrc_reg.start & 0xfffff) == 0x8500) rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8300; else if ((rsrc_reg.start & 0xfffff) == 0x8600) rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8380; } /* * Controller at offset 0x8500 is primary */ if ((rsrc_reg.start & 0xfffff) == 0x8500) primary = 1; else primary = 0; /* Get bus range if any */ bus_range = of_get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { printk(KERN_WARNING "Can't get bus-range for %s, assume" " bus 0\n", dev->full_name); } pci_add_flags(PCI_REASSIGN_ALL_BUS); hose = pcibios_alloc_controller(dev); if (!hose) return -ENOMEM; hose->first_busno = bus_range ? bus_range[0] : 0; hose->last_busno = bus_range ? bus_range[1] : 0xff; if (of_device_is_compatible(dev, "fsl,mpc8314-pcie")) { ret = mpc83xx_pcie_setup(hose, &rsrc_reg); if (ret) goto err0; } else { setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 4, 0); } printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. " "Firmware bus number: %d->%d\n", (unsigned long long)rsrc_reg.start, hose->first_busno, hose->last_busno); pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n", hose, hose->cfg_addr, hose->cfg_data); /* Interpret the "ranges" property */ /* This also maps the I/O region and sets isa_io/mem_base */ pci_process_bridge_OF_ranges(hose, dev, primary); return 0; err0: pcibios_free_controller(hose); return ret; } #endif /* CONFIG_PPC_83xx */ u64 fsl_pci_immrbar_base(struct pci_controller *hose) { #ifdef CONFIG_PPC_83xx if (is_mpc83xx_pci) { struct mpc83xx_pcie_priv *pcie = hose->dn->data; struct pex_inbound_window *in; int i; /* Walk the Root Complex Inbound windows to match IMMR base */ in = pcie->cfg_type0 + PEX_RC_INWIN_BASE; for (i = 0; i < 4; i++) { /* not enabled, skip */ if (!in_le32(&in[i].ar) & PEX_RCIWARn_EN) continue; if (get_immrbase() == in_le32(&in[i].tar)) return (u64)in_le32(&in[i].barh) << 32 | in_le32(&in[i].barl); } printk(KERN_WARNING "could not find PCI BAR matching IMMR\n"); } #endif #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) if (!is_mpc83xx_pci) { u32 base; pci_bus_read_config_dword(hose->bus, PCI_DEVFN(0, 0), PCI_BASE_ADDRESS_0, &base); return base; } #endif return 0; }
gpl-2.0
hazard209/Charge_Kernel
kernel/params.c
809
18804
/* Helpers for initial module or kernel cmdline parsing Copyright (C) 2001 Rusty Russell. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/device.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/ctype.h> #if 0 #define DEBUGP printk #else #define DEBUGP(fmt, a...) #endif static inline char dash2underscore(char c) { if (c == '-') return '_'; return c; } static inline int parameq(const char *input, const char *paramname) { unsigned int i; for (i = 0; dash2underscore(input[i]) == paramname[i]; i++) if (input[i] == '\0') return 1; return 0; } static int parse_one(char *param, char *val, struct kernel_param *params, unsigned num_params, int (*handle_unknown)(char *param, char *val)) { unsigned int i; /* Find parameter */ for (i = 0; i < num_params; i++) { if (parameq(param, params[i].name)) { DEBUGP("They are equal! Calling %p\n", params[i].set); return params[i].set(val, &params[i]); } } if (handle_unknown) { DEBUGP("Unknown argument: calling %p\n", handle_unknown); return handle_unknown(param, val); } DEBUGP("Unknown argument `%s'\n", param); return -ENOENT; } /* You can use " around spaces, but can't escape ". */ /* Hyphens and underscores equivalent in parameter names. */ static char *next_arg(char *args, char **param, char **val) { unsigned int i, equals = 0; int in_quote = 0, quoted = 0; char *next; if (*args == '"') { args++; in_quote = 1; quoted = 1; } for (i = 0; args[i]; i++) { if (isspace(args[i]) && !in_quote) break; if (equals == 0) { if (args[i] == '=') equals = i; } if (args[i] == '"') in_quote = !in_quote; } *param = args; if (!equals) *val = NULL; else { args[equals] = '\0'; *val = args + equals + 1; /* Don't include quotes in value. */ if (**val == '"') { (*val)++; if (args[i-1] == '"') args[i-1] = '\0'; } if (quoted && args[i-1] == '"') args[i-1] = '\0'; } if (args[i]) { args[i] = '\0'; next = args + i + 1; } else next = args + i; /* Chew up trailing spaces. */ return skip_spaces(next); } /* Args looks like "foo=bar,bar2 baz=fuz wiz". */ int parse_args(const char *name, char *args, struct kernel_param *params, unsigned num, int (*unknown)(char *param, char *val)) { char *param, *val; DEBUGP("Parsing ARGS: %s\n", args); /* Chew leading spaces */ args = skip_spaces(args); while (*args) { int ret; int irq_was_disabled; args = next_arg(args, &param, &val); irq_was_disabled = irqs_disabled(); ret = parse_one(param, val, params, num, unknown); if (irq_was_disabled && !irqs_disabled()) { printk(KERN_WARNING "parse_args(): option '%s' enabled " "irq's!\n", param); } switch (ret) { case -ENOENT: printk(KERN_ERR "%s: Unknown parameter `%s'\n", name, param); return ret; case -ENOSPC: printk(KERN_ERR "%s: `%s' too large for parameter `%s'\n", name, val ?: "", param); return ret; case 0: break; default: printk(KERN_ERR "%s: `%s' invalid for parameter `%s'\n", name, val ?: "", param); return ret; } } /* All parsed OK. */ return 0; } /* Lazy bastard, eh? */ #define STANDARD_PARAM_DEF(name, type, format, tmptype, strtolfn) \ int param_set_##name(const char *val, struct kernel_param *kp) \ { \ tmptype l; \ int ret; \ \ if (!val) return -EINVAL; \ ret = strtolfn(val, 0, &l); \ if (ret == -EINVAL || ((type)l != l)) \ return -EINVAL; \ *((type *)kp->arg) = l; \ return 0; \ } \ int param_get_##name(char *buffer, struct kernel_param *kp) \ { \ return sprintf(buffer, format, *((type *)kp->arg)); \ } STANDARD_PARAM_DEF(byte, unsigned char, "%c", unsigned long, strict_strtoul); STANDARD_PARAM_DEF(short, short, "%hi", long, strict_strtol); STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, strict_strtoul); STANDARD_PARAM_DEF(int, int, "%i", long, strict_strtol); STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, strict_strtoul); STANDARD_PARAM_DEF(long, long, "%li", long, strict_strtol); STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, strict_strtoul); int param_set_charp(const char *val, struct kernel_param *kp) { if (!val) { printk(KERN_ERR "%s: string parameter expected\n", kp->name); return -EINVAL; } if (strlen(val) > 1024) { printk(KERN_ERR "%s: string parameter too long\n", kp->name); return -ENOSPC; } /* This is a hack. We can't need to strdup in early boot, and we * don't need to; this mangled commandline is preserved. */ if (slab_is_available()) { *(char **)kp->arg = kstrdup(val, GFP_KERNEL); if (!*(char **)kp->arg) return -ENOMEM; } else *(const char **)kp->arg = val; return 0; } int param_get_charp(char *buffer, struct kernel_param *kp) { return sprintf(buffer, "%s", *((char **)kp->arg)); } /* Actually could be a bool or an int, for historical reasons. */ int param_set_bool(const char *val, struct kernel_param *kp) { bool v; /* No equals means "set"... */ if (!val) val = "1"; /* One of =[yYnN01] */ switch (val[0]) { case 'y': case 'Y': case '1': v = true; break; case 'n': case 'N': case '0': v = false; break; default: return -EINVAL; } if (kp->flags & KPARAM_ISBOOL) *(bool *)kp->arg = v; else *(int *)kp->arg = v; return 0; } int param_get_bool(char *buffer, struct kernel_param *kp) { bool val; if (kp->flags & KPARAM_ISBOOL) val = *(bool *)kp->arg; else val = *(int *)kp->arg; /* Y and N chosen as being relatively non-coder friendly */ return sprintf(buffer, "%c", val ? 'Y' : 'N'); } /* This one must be bool. */ int param_set_invbool(const char *val, struct kernel_param *kp) { int ret; bool boolval; struct kernel_param dummy; dummy.arg = &boolval; dummy.flags = KPARAM_ISBOOL; ret = param_set_bool(val, &dummy); if (ret == 0) *(bool *)kp->arg = !boolval; return ret; } int param_get_invbool(char *buffer, struct kernel_param *kp) { return sprintf(buffer, "%c", (*(bool *)kp->arg) ? 'N' : 'Y'); } /* We break the rule and mangle the string. */ static int param_array(const char *name, const char *val, unsigned int min, unsigned int max, void *elem, int elemsize, int (*set)(const char *, struct kernel_param *kp), u16 flags, unsigned int *num) { int ret; struct kernel_param kp; char save; /* Get the name right for errors. */ kp.name = name; kp.arg = elem; kp.flags = flags; /* No equals sign? */ if (!val) { printk(KERN_ERR "%s: expects arguments\n", name); return -EINVAL; } *num = 0; /* We expect a comma-separated list of values. */ do { int len; if (*num == max) { printk(KERN_ERR "%s: can only take %i arguments\n", name, max); return -EINVAL; } len = strcspn(val, ","); /* nul-terminate and parse */ save = val[len]; ((char *)val)[len] = '\0'; ret = set(val, &kp); if (ret != 0) return ret; kp.arg += elemsize; val += len+1; (*num)++; } while (save == ','); if (*num < min) { printk(KERN_ERR "%s: needs at least %i arguments\n", name, min); return -EINVAL; } return 0; } int param_array_set(const char *val, struct kernel_param *kp) { const struct kparam_array *arr = kp->arr; unsigned int temp_num; return param_array(kp->name, val, 1, arr->max, arr->elem, arr->elemsize, arr->set, kp->flags, arr->num ?: &temp_num); } int param_array_get(char *buffer, struct kernel_param *kp) { int i, off, ret; const struct kparam_array *arr = kp->arr; struct kernel_param p; p = *kp; for (i = off = 0; i < (arr->num ? *arr->num : arr->max); i++) { if (i) buffer[off++] = ','; p.arg = arr->elem + arr->elemsize * i; ret = arr->get(buffer + off, &p); if (ret < 0) return ret; off += ret; } buffer[off] = '\0'; return off; } int param_set_copystring(const char *val, struct kernel_param *kp) { const struct kparam_string *kps = kp->str; if (!val) { printk(KERN_ERR "%s: missing param set value\n", kp->name); return -EINVAL; } if (strlen(val)+1 > kps->maxlen) { printk(KERN_ERR "%s: string doesn't fit in %u chars.\n", kp->name, kps->maxlen-1); return -ENOSPC; } strcpy(kps->string, val); return 0; } int param_get_string(char *buffer, struct kernel_param *kp) { const struct kparam_string *kps = kp->str; return strlcpy(buffer, kps->string, kps->maxlen); } /* sysfs output in /sys/modules/XYZ/parameters/ */ #define to_module_attr(n) container_of(n, struct module_attribute, attr) #define to_module_kobject(n) container_of(n, struct module_kobject, kobj) extern struct kernel_param __start___param[], __stop___param[]; struct param_attribute { struct module_attribute mattr; struct kernel_param *param; }; struct module_param_attrs { unsigned int num; struct attribute_group grp; struct param_attribute attrs[0]; }; #ifdef CONFIG_SYSFS #define to_param_attr(n) container_of(n, struct param_attribute, mattr) static ssize_t param_attr_show(struct module_attribute *mattr, struct module *mod, char *buf) { int count; struct param_attribute *attribute = to_param_attr(mattr); if (!attribute->param->get) return -EPERM; count = attribute->param->get(buf, attribute->param); if (count > 0) { strcat(buf, "\n"); ++count; } return count; } /* sysfs always hands a nul-terminated string in buf. We rely on that. */ static ssize_t param_attr_store(struct module_attribute *mattr, struct module *owner, const char *buf, size_t len) { int err; struct param_attribute *attribute = to_param_attr(mattr); if (!attribute->param->set) return -EPERM; err = attribute->param->set(buf, attribute->param); if (!err) return len; return err; } #endif #ifdef CONFIG_MODULES #define __modinit #else #define __modinit __init #endif #ifdef CONFIG_SYSFS /* * add_sysfs_param - add a parameter to sysfs * @mk: struct module_kobject * @kparam: the actual parameter definition to add to sysfs * @name: name of parameter * * Create a kobject if for a (per-module) parameter if mp NULL, and * create file in sysfs. Returns an error on out of memory. Always cleans up * if there's an error. */ static __modinit int add_sysfs_param(struct module_kobject *mk, struct kernel_param *kp, const char *name) { struct module_param_attrs *new; struct attribute **attrs; int err, num; /* We don't bother calling this with invisible parameters. */ BUG_ON(!kp->perm); if (!mk->mp) { num = 0; attrs = NULL; } else { num = mk->mp->num; attrs = mk->mp->grp.attrs; } /* Enlarge. */ new = krealloc(mk->mp, sizeof(*mk->mp) + sizeof(mk->mp->attrs[0]) * (num+1), GFP_KERNEL); if (!new) { kfree(mk->mp); err = -ENOMEM; goto fail; } attrs = krealloc(attrs, sizeof(new->grp.attrs[0])*(num+2), GFP_KERNEL); if (!attrs) { err = -ENOMEM; goto fail_free_new; } /* Sysfs wants everything zeroed. */ memset(new, 0, sizeof(*new)); memset(&new->attrs[num], 0, sizeof(new->attrs[num])); memset(&attrs[num], 0, sizeof(attrs[num])); new->grp.name = "parameters"; new->grp.attrs = attrs; /* Tack new one on the end. */ sysfs_attr_init(&new->attrs[num].mattr.attr); new->attrs[num].param = kp; new->attrs[num].mattr.show = param_attr_show; new->attrs[num].mattr.store = param_attr_store; new->attrs[num].mattr.attr.name = (char *)name; new->attrs[num].mattr.attr.mode = kp->perm; new->num = num+1; /* Fix up all the pointers, since krealloc can move us */ for (num = 0; num < new->num; num++) new->grp.attrs[num] = &new->attrs[num].mattr.attr; new->grp.attrs[num] = NULL; mk->mp = new; return 0; fail_free_new: kfree(new); fail: mk->mp = NULL; return err; } #ifdef CONFIG_MODULES static void free_module_param_attrs(struct module_kobject *mk) { kfree(mk->mp->grp.attrs); kfree(mk->mp); mk->mp = NULL; } /* * module_param_sysfs_setup - setup sysfs support for one module * @mod: module * @kparam: module parameters (array) * @num_params: number of module parameters * * Adds sysfs entries for module parameters under * /sys/module/[mod->name]/parameters/ */ int module_param_sysfs_setup(struct module *mod, struct kernel_param *kparam, unsigned int num_params) { int i, err; bool params = false; for (i = 0; i < num_params; i++) { if (kparam[i].perm == 0) continue; err = add_sysfs_param(&mod->mkobj, &kparam[i], kparam[i].name); if (err) return err; params = true; } if (!params) return 0; /* Create the param group. */ err = sysfs_create_group(&mod->mkobj.kobj, &mod->mkobj.mp->grp); if (err) free_module_param_attrs(&mod->mkobj); return err; } /* * module_param_sysfs_remove - remove sysfs support for one module * @mod: module * * Remove sysfs entries for module parameters and the corresponding * kobject. */ void module_param_sysfs_remove(struct module *mod) { if (mod->mkobj.mp) { sysfs_remove_group(&mod->mkobj.kobj, &mod->mkobj.mp->grp); /* We are positive that no one is using any param * attrs at this point. Deallocate immediately. */ free_module_param_attrs(&mod->mkobj); } } #endif void destroy_params(const struct kernel_param *params, unsigned num) { /* FIXME: This should free kmalloced charp parameters. It doesn't. */ } static void __init kernel_add_sysfs_param(const char *name, struct kernel_param *kparam, unsigned int name_skip) { struct module_kobject *mk; struct kobject *kobj; int err; kobj = kset_find_obj(module_kset, name); if (kobj) { /* We already have one. Remove params so we can add more. */ mk = to_module_kobject(kobj); /* We need to remove it before adding parameters. */ sysfs_remove_group(&mk->kobj, &mk->mp->grp); } else { mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL); BUG_ON(!mk); mk->mod = THIS_MODULE; mk->kobj.kset = module_kset; err = kobject_init_and_add(&mk->kobj, &module_ktype, NULL, "%s", name); if (err) { kobject_put(&mk->kobj); printk(KERN_ERR "Module '%s' failed add to sysfs, " "error number %d\n", name, err); printk(KERN_ERR "The system will be unstable now.\n"); return; } /* So that exit path is even. */ kobject_get(&mk->kobj); } /* These should not fail at boot. */ err = add_sysfs_param(mk, kparam, kparam->name + name_skip); BUG_ON(err); err = sysfs_create_group(&mk->kobj, &mk->mp->grp); BUG_ON(err); kobject_uevent(&mk->kobj, KOBJ_ADD); kobject_put(&mk->kobj); } /* * param_sysfs_builtin - add contents in /sys/parameters for built-in modules * * Add module_parameters to sysfs for "modules" built into the kernel. * * The "module" name (KBUILD_MODNAME) is stored before a dot, the * "parameter" name is stored behind a dot in kernel_param->name. So, * extract the "module" name for all built-in kernel_param-eters, * and for all who have the same, call kernel_add_sysfs_param. */ static void __init param_sysfs_builtin(void) { struct kernel_param *kp; unsigned int name_len; char modname[MODULE_NAME_LEN]; for (kp = __start___param; kp < __stop___param; kp++) { char *dot; if (kp->perm == 0) continue; dot = strchr(kp->name, '.'); if (!dot) { /* This happens for core_param() */ strcpy(modname, "kernel"); name_len = 0; } else { name_len = dot - kp->name + 1; strlcpy(modname, kp->name, name_len); } kernel_add_sysfs_param(modname, kp, name_len); } } /* module-related sysfs stuff */ static ssize_t module_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct module_attribute *attribute; struct module_kobject *mk; int ret; attribute = to_module_attr(attr); mk = to_module_kobject(kobj); if (!attribute->show) return -EIO; ret = attribute->show(attribute, mk->mod, buf); return ret; } static ssize_t module_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t len) { struct module_attribute *attribute; struct module_kobject *mk; int ret; attribute = to_module_attr(attr); mk = to_module_kobject(kobj); if (!attribute->store) return -EIO; ret = attribute->store(attribute, mk->mod, buf, len); return ret; } static const struct sysfs_ops module_sysfs_ops = { .show = module_attr_show, .store = module_attr_store, }; static int uevent_filter(struct kset *kset, struct kobject *kobj) { struct kobj_type *ktype = get_ktype(kobj); if (ktype == &module_ktype) return 1; return 0; } static const struct kset_uevent_ops module_uevent_ops = { .filter = uevent_filter, }; struct kset *module_kset; int module_sysfs_initialized; struct kobj_type module_ktype = { .sysfs_ops = &module_sysfs_ops, }; /* * param_sysfs_init - wrapper for built-in params support */ static int __init param_sysfs_init(void) { module_kset = kset_create_and_add("module", &module_uevent_ops, NULL); if (!module_kset) { printk(KERN_WARNING "%s (%d): error creating kset\n", __FILE__, __LINE__); return -ENOMEM; } module_sysfs_initialized = 1; param_sysfs_builtin(); return 0; } subsys_initcall(param_sysfs_init); #endif /* CONFIG_SYSFS */ EXPORT_SYMBOL(param_set_byte); EXPORT_SYMBOL(param_get_byte); EXPORT_SYMBOL(param_set_short); EXPORT_SYMBOL(param_get_short); EXPORT_SYMBOL(param_set_ushort); EXPORT_SYMBOL(param_get_ushort); EXPORT_SYMBOL(param_set_int); EXPORT_SYMBOL(param_get_int); EXPORT_SYMBOL(param_set_uint); EXPORT_SYMBOL(param_get_uint); EXPORT_SYMBOL(param_set_long); EXPORT_SYMBOL(param_get_long); EXPORT_SYMBOL(param_set_ulong); EXPORT_SYMBOL(param_get_ulong); EXPORT_SYMBOL(param_set_charp); EXPORT_SYMBOL(param_get_charp); EXPORT_SYMBOL(param_set_bool); EXPORT_SYMBOL(param_get_bool); EXPORT_SYMBOL(param_set_invbool); EXPORT_SYMBOL(param_get_invbool); EXPORT_SYMBOL(param_array_set); EXPORT_SYMBOL(param_array_get); EXPORT_SYMBOL(param_set_copystring); EXPORT_SYMBOL(param_get_string);
gpl-2.0
Kali-/htc-kernel-msm7x30-exp
drivers/gpu/drm/radeon/radeon_cp.c
809
66501
/* radeon_cp.c -- CP support for Radeon -*- linux-c -*- */ /* * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Fremont, California. * Copyright 2007 Advanced Micro Devices, Inc. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Kevin E. Martin <martin@valinux.com> * Gareth Hughes <gareth@valinux.com> */ #include "drmP.h" #include "drm.h" #include "drm_sarea.h" #include "radeon_drm.h" #include "radeon_drv.h" #include "r300_reg.h" #define RADEON_FIFO_DEBUG 0 /* Firmware Names */ #define FIRMWARE_R100 "radeon/R100_cp.bin" #define FIRMWARE_R200 "radeon/R200_cp.bin" #define FIRMWARE_R300 "radeon/R300_cp.bin" #define FIRMWARE_R420 "radeon/R420_cp.bin" #define FIRMWARE_RS690 "radeon/RS690_cp.bin" #define FIRMWARE_RS600 "radeon/RS600_cp.bin" #define FIRMWARE_R520 "radeon/R520_cp.bin" MODULE_FIRMWARE(FIRMWARE_R100); MODULE_FIRMWARE(FIRMWARE_R200); MODULE_FIRMWARE(FIRMWARE_R300); MODULE_FIRMWARE(FIRMWARE_R420); MODULE_FIRMWARE(FIRMWARE_RS690); MODULE_FIRMWARE(FIRMWARE_RS600); MODULE_FIRMWARE(FIRMWARE_R520); static int radeon_do_cleanup_cp(struct drm_device * dev); static void radeon_do_cp_start(drm_radeon_private_t * dev_priv); u32 radeon_read_ring_rptr(drm_radeon_private_t *dev_priv, u32 off) { u32 val; if (dev_priv->flags & RADEON_IS_AGP) { val = DRM_READ32(dev_priv->ring_rptr, off); } else { val = *(((volatile u32 *) dev_priv->ring_rptr->handle) + (off / sizeof(u32))); val = le32_to_cpu(val); } return val; } u32 radeon_get_ring_head(drm_radeon_private_t *dev_priv) { if (dev_priv->writeback_works) return radeon_read_ring_rptr(dev_priv, 0); else { if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) return RADEON_READ(R600_CP_RB_RPTR); else return RADEON_READ(RADEON_CP_RB_RPTR); } } void radeon_write_ring_rptr(drm_radeon_private_t *dev_priv, u32 off, u32 val) { if (dev_priv->flags & RADEON_IS_AGP) DRM_WRITE32(dev_priv->ring_rptr, off, val); else *(((volatile u32 *) dev_priv->ring_rptr->handle) + (off / sizeof(u32))) = cpu_to_le32(val); } void radeon_set_ring_head(drm_radeon_private_t *dev_priv, u32 val) { radeon_write_ring_rptr(dev_priv, 0, val); } u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index) { if (dev_priv->writeback_works) { if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) return radeon_read_ring_rptr(dev_priv, R600_SCRATCHOFF(index)); else return radeon_read_ring_rptr(dev_priv, RADEON_SCRATCHOFF(index)); } else { if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) return RADEON_READ(R600_SCRATCH_REG0 + 4*index); else return RADEON_READ(RADEON_SCRATCH_REG0 + 4*index); } } u32 RADEON_READ_MM(drm_radeon_private_t *dev_priv, int addr) { u32 ret; if (addr < 0x10000) ret = DRM_READ32(dev_priv->mmio, addr); else { DRM_WRITE32(dev_priv->mmio, RADEON_MM_INDEX, addr); ret = DRM_READ32(dev_priv->mmio, RADEON_MM_DATA); } return ret; } static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) { u32 ret; RADEON_WRITE(R520_MC_IND_INDEX, 0x7f0000 | (addr & 0xff)); ret = RADEON_READ(R520_MC_IND_DATA); RADEON_WRITE(R520_MC_IND_INDEX, 0); return ret; } static u32 RS480_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) { u32 ret; RADEON_WRITE(RS480_NB_MC_INDEX, addr & 0xff); ret = RADEON_READ(RS480_NB_MC_DATA); RADEON_WRITE(RS480_NB_MC_INDEX, 0xff); return ret; } static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) { u32 ret; RADEON_WRITE(RS690_MC_INDEX, (addr & RS690_MC_INDEX_MASK)); ret = RADEON_READ(RS690_MC_DATA); RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_MASK); return ret; } static u32 RS600_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) { u32 ret; RADEON_WRITE(RS600_MC_INDEX, ((addr & RS600_MC_ADDR_MASK) | RS600_MC_IND_CITF_ARB0)); ret = RADEON_READ(RS600_MC_DATA); return ret; } static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) { if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) return RS690_READ_MCIND(dev_priv, addr); else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) return RS600_READ_MCIND(dev_priv, addr); else return RS480_READ_MCIND(dev_priv, addr); } u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv) { if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) return RADEON_READ(R700_MC_VM_FB_LOCATION); else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) return RADEON_READ(R600_MC_VM_FB_LOCATION); else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION); else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION); else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) return RS600_READ_MCIND(dev_priv, RS600_MC_FB_LOCATION); else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION); else return RADEON_READ(RADEON_MC_FB_LOCATION); } static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc) { if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) RADEON_WRITE(R700_MC_VM_FB_LOCATION, fb_loc); else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) RADEON_WRITE(R600_MC_VM_FB_LOCATION, fb_loc); else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc); else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc); else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) RS600_WRITE_MCIND(RS600_MC_FB_LOCATION, fb_loc); else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc); else RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc); } void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc) { /*R6xx/R7xx: AGP_TOP and BOT are actually 18 bits each */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) { RADEON_WRITE(R700_MC_VM_AGP_BOT, agp_loc & 0xffff); /* FIX ME */ RADEON_WRITE(R700_MC_VM_AGP_TOP, (agp_loc >> 16) & 0xffff); } else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) { RADEON_WRITE(R600_MC_VM_AGP_BOT, agp_loc & 0xffff); /* FIX ME */ RADEON_WRITE(R600_MC_VM_AGP_TOP, (agp_loc >> 16) & 0xffff); } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc); else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc); else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) RS600_WRITE_MCIND(RS600_MC_AGP_LOCATION, agp_loc); else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc); else RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc); } void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base) { u32 agp_base_hi = upper_32_bits(agp_base); u32 agp_base_lo = agp_base & 0xffffffff; u32 r6xx_agp_base = (agp_base >> 22) & 0x3ffff; /* R6xx/R7xx must be aligned to a 4MB boundry */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) RADEON_WRITE(R700_MC_VM_AGP_BASE, r6xx_agp_base); else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) RADEON_WRITE(R600_MC_VM_AGP_BASE, r6xx_agp_base); else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) { R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo); R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi); } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) { RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo); RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi); } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) { RS600_WRITE_MCIND(RS600_AGP_BASE, agp_base_lo); RS600_WRITE_MCIND(RS600_AGP_BASE_2, agp_base_hi); } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) { R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo); R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi); } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo); RADEON_WRITE(RS480_AGP_BASE_2, agp_base_hi); } else { RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo); if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200) RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi); } } void radeon_enable_bm(struct drm_radeon_private *dev_priv) { u32 tmp; /* Turn on bus mastering */ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) { /* rs600/rs690/rs740 */ tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; RADEON_WRITE(RADEON_BUS_CNTL, tmp); } else if (((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV350) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { /* r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; RADEON_WRITE(RADEON_BUS_CNTL, tmp); } /* PCIE cards appears to not need this */ } static int RADEON_READ_PLL(struct drm_device * dev, int addr) { drm_radeon_private_t *dev_priv = dev->dev_private; RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, addr & 0x1f); return RADEON_READ(RADEON_CLOCK_CNTL_DATA); } static u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr) { RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff); return RADEON_READ(RADEON_PCIE_DATA); } #if RADEON_FIFO_DEBUG static void radeon_status(drm_radeon_private_t * dev_priv) { printk("%s:\n", __func__); printk("RBBM_STATUS = 0x%08x\n", (unsigned int)RADEON_READ(RADEON_RBBM_STATUS)); printk("CP_RB_RTPR = 0x%08x\n", (unsigned int)RADEON_READ(RADEON_CP_RB_RPTR)); printk("CP_RB_WTPR = 0x%08x\n", (unsigned int)RADEON_READ(RADEON_CP_RB_WPTR)); printk("AIC_CNTL = 0x%08x\n", (unsigned int)RADEON_READ(RADEON_AIC_CNTL)); printk("AIC_STAT = 0x%08x\n", (unsigned int)RADEON_READ(RADEON_AIC_STAT)); printk("AIC_PT_BASE = 0x%08x\n", (unsigned int)RADEON_READ(RADEON_AIC_PT_BASE)); printk("TLB_ADDR = 0x%08x\n", (unsigned int)RADEON_READ(RADEON_AIC_TLB_ADDR)); printk("TLB_DATA = 0x%08x\n", (unsigned int)RADEON_READ(RADEON_AIC_TLB_DATA)); } #endif /* ================================================================ * Engine, FIFO control */ static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv) { u32 tmp; int i; dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { tmp = RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT); tmp |= RADEON_RB3D_DC_FLUSH_ALL; RADEON_WRITE(RADEON_RB3D_DSTCACHE_CTLSTAT, tmp); for (i = 0; i < dev_priv->usec_timeout; i++) { if (!(RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT) & RADEON_RB3D_DC_BUSY)) { return 0; } DRM_UDELAY(1); } } else { /* don't flush or purge cache here or lockup */ return 0; } #if RADEON_FIFO_DEBUG DRM_ERROR("failed!\n"); radeon_status(dev_priv); #endif return -EBUSY; } static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries) { int i; dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; for (i = 0; i < dev_priv->usec_timeout; i++) { int slots = (RADEON_READ(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK); if (slots >= entries) return 0; DRM_UDELAY(1); } DRM_DEBUG("wait for fifo failed status : 0x%08X 0x%08X\n", RADEON_READ(RADEON_RBBM_STATUS), RADEON_READ(R300_VAP_CNTL_STATUS)); #if RADEON_FIFO_DEBUG DRM_ERROR("failed!\n"); radeon_status(dev_priv); #endif return -EBUSY; } static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv) { int i, ret; dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; ret = radeon_do_wait_for_fifo(dev_priv, 64); if (ret) return ret; for (i = 0; i < dev_priv->usec_timeout; i++) { if (!(RADEON_READ(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)) { radeon_do_pixcache_flush(dev_priv); return 0; } DRM_UDELAY(1); } DRM_DEBUG("wait idle failed status : 0x%08X 0x%08X\n", RADEON_READ(RADEON_RBBM_STATUS), RADEON_READ(R300_VAP_CNTL_STATUS)); #if RADEON_FIFO_DEBUG DRM_ERROR("failed!\n"); radeon_status(dev_priv); #endif return -EBUSY; } static void radeon_init_pipes(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; uint32_t gb_tile_config, gb_pipe_sel = 0; if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) { uint32_t z_pipe_sel = RADEON_READ(RV530_GB_PIPE_SELECT2); if ((z_pipe_sel & 3) == 3) dev_priv->num_z_pipes = 2; else dev_priv->num_z_pipes = 1; } else dev_priv->num_z_pipes = 1; /* RS4xx/RS6xx/R4xx/R5xx */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) { gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT); dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1; /* SE cards have 1 pipe */ if ((dev->pdev->device == 0x5e4c) || (dev->pdev->device == 0x5e4f)) dev_priv->num_gb_pipes = 1; } else { /* R3xx */ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 && dev->pdev->device != 0x4144) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350 && dev->pdev->device != 0x4148)) { dev_priv->num_gb_pipes = 2; } else { /* RV3xx/R300 AD/R350 AH */ dev_priv->num_gb_pipes = 1; } } DRM_INFO("Num pipes: %d\n", dev_priv->num_gb_pipes); gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16 /*| R300_SUBPIXEL_1_16*/); switch (dev_priv->num_gb_pipes) { case 2: gb_tile_config |= R300_PIPE_COUNT_R300; break; case 3: gb_tile_config |= R300_PIPE_COUNT_R420_3P; break; case 4: gb_tile_config |= R300_PIPE_COUNT_R420; break; default: case 1: gb_tile_config |= R300_PIPE_COUNT_RV350; break; } if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) { RADEON_WRITE_PLL(R500_DYN_SCLK_PWMEM_PIPE, (1 | ((gb_pipe_sel >> 8) & 0xf) << 4)); RADEON_WRITE(R300_SU_REG_DEST, ((1 << dev_priv->num_gb_pipes) - 1)); } RADEON_WRITE(R300_GB_TILE_CONFIG, gb_tile_config); radeon_do_wait_for_idle(dev_priv); RADEON_WRITE(R300_DST_PIPE_CONFIG, RADEON_READ(R300_DST_PIPE_CONFIG) | R300_PIPE_AUTO_CONFIG); RADEON_WRITE(R300_RB2D_DSTCACHE_MODE, (RADEON_READ(R300_RB2D_DSTCACHE_MODE) | R300_DC_AUTOFLUSH_ENABLE | R300_DC_DC_DISABLE_IGNORE_PE)); } /* ================================================================ * CP control, initialization */ /* Load the microcode for the CP */ static int radeon_cp_init_microcode(drm_radeon_private_t *dev_priv) { struct platform_device *pdev; const char *fw_name = NULL; int err; DRM_DEBUG("\n"); pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); err = IS_ERR(pdev); if (err) { printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); return -EINVAL; } if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R100) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV100) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV200) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS100) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS200)) { DRM_INFO("Loading R100 Microcode\n"); fw_name = FIRMWARE_R100; } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R200) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV250) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV280) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS300)) { DRM_INFO("Loading R200 Microcode\n"); fw_name = FIRMWARE_R200; } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { DRM_INFO("Loading R300 Microcode\n"); fw_name = FIRMWARE_R300; } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R423) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) { DRM_INFO("Loading R400 Microcode\n"); fw_name = FIRMWARE_R420; } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) { DRM_INFO("Loading RS690/RS740 Microcode\n"); fw_name = FIRMWARE_RS690; } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) { DRM_INFO("Loading RS600 Microcode\n"); fw_name = FIRMWARE_RS600; } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R520) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R580) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV560) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV570)) { DRM_INFO("Loading R500 Microcode\n"); fw_name = FIRMWARE_R520; } err = request_firmware(&dev_priv->me_fw, fw_name, &pdev->dev); platform_device_unregister(pdev); if (err) { printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n", fw_name); } else if (dev_priv->me_fw->size % 8) { printk(KERN_ERR "radeon_cp: Bogus length %zu in firmware \"%s\"\n", dev_priv->me_fw->size, fw_name); err = -EINVAL; release_firmware(dev_priv->me_fw); dev_priv->me_fw = NULL; } return err; } static void radeon_cp_load_microcode(drm_radeon_private_t *dev_priv) { const __be32 *fw_data; int i, size; radeon_do_wait_for_idle(dev_priv); if (dev_priv->me_fw) { size = dev_priv->me_fw->size / 4; fw_data = (const __be32 *)&dev_priv->me_fw->data[0]; RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0); for (i = 0; i < size; i += 2) { RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, be32_to_cpup(&fw_data[i])); RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, be32_to_cpup(&fw_data[i + 1])); } } } /* Flush any pending commands to the CP. This should only be used just * prior to a wait for idle, as it informs the engine that the command * stream is ending. */ static void radeon_do_cp_flush(drm_radeon_private_t * dev_priv) { DRM_DEBUG("\n"); #if 0 u32 tmp; tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1 << 31); RADEON_WRITE(RADEON_CP_RB_WPTR, tmp); #endif } /* Wait for the CP to go idle. */ int radeon_do_cp_idle(drm_radeon_private_t * dev_priv) { RING_LOCALS; DRM_DEBUG("\n"); BEGIN_RING(6); RADEON_PURGE_CACHE(); RADEON_PURGE_ZCACHE(); RADEON_WAIT_UNTIL_IDLE(); ADVANCE_RING(); COMMIT_RING(); return radeon_do_wait_for_idle(dev_priv); } /* Start the Command Processor. */ static void radeon_do_cp_start(drm_radeon_private_t * dev_priv) { RING_LOCALS; DRM_DEBUG("\n"); radeon_do_wait_for_idle(dev_priv); RADEON_WRITE(RADEON_CP_CSQ_CNTL, dev_priv->cp_mode); dev_priv->cp_running = 1; /* on r420, any DMA from CP to system memory while 2D is active * can cause a hang. workaround is to queue a CP RESYNC token */ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) { BEGIN_RING(3); OUT_RING(CP_PACKET0(R300_CP_RESYNC_ADDR, 1)); OUT_RING(5); /* scratch reg 5 */ OUT_RING(0xdeadbeef); ADVANCE_RING(); COMMIT_RING(); } BEGIN_RING(8); /* isync can only be written through cp on r5xx write it here */ OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0)); OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D | RADEON_ISYNC_ANY3D_IDLE2D | RADEON_ISYNC_WAIT_IDLEGUI | RADEON_ISYNC_CPSCRATCH_IDLEGUI); RADEON_PURGE_CACHE(); RADEON_PURGE_ZCACHE(); RADEON_WAIT_UNTIL_IDLE(); ADVANCE_RING(); COMMIT_RING(); dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED; } /* Reset the Command Processor. This will not flush any pending * commands, so you must wait for the CP command stream to complete * before calling this routine. */ static void radeon_do_cp_reset(drm_radeon_private_t * dev_priv) { u32 cur_read_ptr; DRM_DEBUG("\n"); cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR); RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr); SET_RING_HEAD(dev_priv, cur_read_ptr); dev_priv->ring.tail = cur_read_ptr; } /* Stop the Command Processor. This will not flush any pending * commands, so you must flush the command stream and wait for the CP * to go idle before calling this routine. */ static void radeon_do_cp_stop(drm_radeon_private_t * dev_priv) { RING_LOCALS; DRM_DEBUG("\n"); /* finish the pending CP_RESYNC token */ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) { BEGIN_RING(2); OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); OUT_RING(R300_RB3D_DC_FINISH); ADVANCE_RING(); COMMIT_RING(); radeon_do_wait_for_idle(dev_priv); } RADEON_WRITE(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS); dev_priv->cp_running = 0; } /* Reset the engine. This will stop the CP if it is running. */ static int radeon_do_engine_reset(struct drm_device * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; u32 clock_cntl_index = 0, mclk_cntl = 0, rbbm_soft_reset; DRM_DEBUG("\n"); radeon_do_pixcache_flush(dev_priv); if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) { /* may need something similar for newer chips */ clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX); mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL); RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl | RADEON_FORCEON_MCLKA | RADEON_FORCEON_MCLKB | RADEON_FORCEON_YCLKA | RADEON_FORCEON_YCLKB | RADEON_FORCEON_MC | RADEON_FORCEON_AIC)); } rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET); RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset | RADEON_SOFT_RESET_CP | RADEON_SOFT_RESET_HI | RADEON_SOFT_RESET_SE | RADEON_SOFT_RESET_RE | RADEON_SOFT_RESET_PP | RADEON_SOFT_RESET_E2 | RADEON_SOFT_RESET_RB)); RADEON_READ(RADEON_RBBM_SOFT_RESET); RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset & ~(RADEON_SOFT_RESET_CP | RADEON_SOFT_RESET_HI | RADEON_SOFT_RESET_SE | RADEON_SOFT_RESET_RE | RADEON_SOFT_RESET_PP | RADEON_SOFT_RESET_E2 | RADEON_SOFT_RESET_RB))); RADEON_READ(RADEON_RBBM_SOFT_RESET); if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) { RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl); RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index); RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset); } /* setup the raster pipes */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300) radeon_init_pipes(dev); /* Reset the CP ring */ radeon_do_cp_reset(dev_priv); /* The CP is no longer running after an engine reset */ dev_priv->cp_running = 0; /* Reset any pending vertex, indirect buffers */ radeon_freelist_reset(dev); return 0; } static void radeon_cp_init_ring_buffer(struct drm_device * dev, drm_radeon_private_t *dev_priv, struct drm_file *file_priv) { struct drm_radeon_master_private *master_priv; u32 ring_start, cur_read_ptr; /* Initialize the memory controller. With new memory map, the fb location * is not changed, it should have been properly initialized already. Part * of the problem is that the code below is bogus, assuming the GART is * always appended to the fb which is not necessarily the case */ if (!dev_priv->new_memmap) radeon_write_fb_location(dev_priv, ((dev_priv->gart_vm_start - 1) & 0xffff0000) | (dev_priv->fb_location >> 16)); #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) { radeon_write_agp_base(dev_priv, dev->agp->base); radeon_write_agp_location(dev_priv, (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) & 0xffff0000) | (dev_priv->gart_vm_start >> 16))); ring_start = (dev_priv->cp_ring->offset - dev->agp->base + dev_priv->gart_vm_start); } else #endif ring_start = (dev_priv->cp_ring->offset - (unsigned long)dev->sg->virtual + dev_priv->gart_vm_start); RADEON_WRITE(RADEON_CP_RB_BASE, ring_start); /* Set the write pointer delay */ RADEON_WRITE(RADEON_CP_RB_WPTR_DELAY, 0); /* Initialize the ring buffer's read and write pointers */ cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR); RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr); SET_RING_HEAD(dev_priv, cur_read_ptr); dev_priv->ring.tail = cur_read_ptr; #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) { RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, dev_priv->ring_rptr->offset - dev->agp->base + dev_priv->gart_vm_start); } else #endif { RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, dev_priv->ring_rptr->offset - ((unsigned long) dev->sg->virtual) + dev_priv->gart_vm_start); } /* Set ring buffer size */ #ifdef __BIG_ENDIAN RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_BUF_SWAP_32BIT | (dev_priv->ring.fetch_size_l2ow << 18) | (dev_priv->ring.rptr_update_l2qw << 8) | dev_priv->ring.size_l2qw); #else RADEON_WRITE(RADEON_CP_RB_CNTL, (dev_priv->ring.fetch_size_l2ow << 18) | (dev_priv->ring.rptr_update_l2qw << 8) | dev_priv->ring.size_l2qw); #endif /* Initialize the scratch register pointer. This will cause * the scratch register values to be written out to memory * whenever they are updated. * * We simply put this behind the ring read pointer, this works * with PCI GART as well as (whatever kind of) AGP GART */ RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR) + RADEON_SCRATCH_REG_OFFSET); RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7); radeon_enable_bm(dev_priv); radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(0), 0); RADEON_WRITE(RADEON_LAST_FRAME_REG, 0); radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1), 0); RADEON_WRITE(RADEON_LAST_DISPATCH_REG, 0); radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(2), 0); RADEON_WRITE(RADEON_LAST_CLEAR_REG, 0); /* reset sarea copies of these */ master_priv = file_priv->master->driver_priv; if (master_priv->sarea_priv) { master_priv->sarea_priv->last_frame = 0; master_priv->sarea_priv->last_dispatch = 0; master_priv->sarea_priv->last_clear = 0; } radeon_do_wait_for_idle(dev_priv); /* Sync everything up */ RADEON_WRITE(RADEON_ISYNC_CNTL, (RADEON_ISYNC_ANY2D_IDLE3D | RADEON_ISYNC_ANY3D_IDLE2D | RADEON_ISYNC_WAIT_IDLEGUI | RADEON_ISYNC_CPSCRATCH_IDLEGUI)); } static void radeon_test_writeback(drm_radeon_private_t * dev_priv) { u32 tmp; /* Start with assuming that writeback doesn't work */ dev_priv->writeback_works = 0; /* Writeback doesn't seem to work everywhere, test it here and possibly * enable it if it appears to work */ radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1), 0); RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef); for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) { u32 val; val = radeon_read_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1)); if (val == 0xdeadbeef) break; DRM_UDELAY(1); } if (tmp < dev_priv->usec_timeout) { dev_priv->writeback_works = 1; DRM_INFO("writeback test succeeded in %d usecs\n", tmp); } else { dev_priv->writeback_works = 0; DRM_INFO("writeback test failed\n"); } if (radeon_no_wb == 1) { dev_priv->writeback_works = 0; DRM_INFO("writeback forced off\n"); } if (!dev_priv->writeback_works) { /* Disable writeback to avoid unnecessary bus master transfer */ RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_READ(RADEON_CP_RB_CNTL) | RADEON_RB_NO_UPDATE); RADEON_WRITE(RADEON_SCRATCH_UMSK, 0); } } /* Enable or disable IGP GART on the chip */ static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on) { u32 temp; if (on) { DRM_DEBUG("programming igp gart %08X %08lX %08X\n", dev_priv->gart_vm_start, (long)dev_priv->gart_info.bus_addr, dev_priv->gart_size); temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL); if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN)); else IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN); IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | RS480_VA_SIZE_32MB)); temp = IGP_READ_MCIND(dev_priv, RS480_GART_FEATURE_ID); IGP_WRITE_MCIND(RS480_GART_FEATURE_ID, (RS480_HANG_EN | RS480_TLB_ENABLE | RS480_GTW_LAC_EN | RS480_1LEVEL_GART)); temp = dev_priv->gart_info.bus_addr & 0xfffff000; temp |= (upper_32_bits(dev_priv->gart_info.bus_addr) & 0xff) << 4; IGP_WRITE_MCIND(RS480_GART_BASE, temp); temp = IGP_READ_MCIND(dev_priv, RS480_AGP_MODE_CNTL); IGP_WRITE_MCIND(RS480_AGP_MODE_CNTL, ((1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS)); radeon_write_agp_base(dev_priv, dev_priv->gart_vm_start); dev_priv->gart_size = 32*1024*1024; temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) & 0xffff0000) | (dev_priv->gart_vm_start >> 16)); radeon_write_agp_location(dev_priv, temp); temp = IGP_READ_MCIND(dev_priv, RS480_AGP_ADDRESS_SPACE_SIZE); IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | RS480_VA_SIZE_32MB)); do { temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL); if ((temp & RS480_GART_CACHE_INVALIDATE) == 0) break; DRM_UDELAY(1); } while (1); IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE); do { temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL); if ((temp & RS480_GART_CACHE_INVALIDATE) == 0) break; DRM_UDELAY(1); } while (1); IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, 0); } else { IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, 0); } } /* Enable or disable IGP GART on the chip */ static void rs600_set_igpgart(drm_radeon_private_t *dev_priv, int on) { u32 temp; int i; if (on) { DRM_DEBUG("programming igp gart %08X %08lX %08X\n", dev_priv->gart_vm_start, (long)dev_priv->gart_info.bus_addr, dev_priv->gart_size); IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, (RS600_EFFECTIVE_L2_CACHE_SIZE(6) | RS600_EFFECTIVE_L2_QUEUE_SIZE(6))); for (i = 0; i < 19; i++) IGP_WRITE_MCIND(RS600_MC_PT0_CLIENT0_CNTL + i, (RS600_ENABLE_TRANSLATION_MODE_OVERRIDE | RS600_SYSTEM_ACCESS_MODE_IN_SYS | RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASSTHROUGH | RS600_EFFECTIVE_L1_CACHE_SIZE(3) | RS600_ENABLE_FRAGMENT_PROCESSING | RS600_EFFECTIVE_L1_QUEUE_SIZE(3))); IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_CNTL, (RS600_ENABLE_PAGE_TABLE | RS600_PAGE_TABLE_TYPE_FLAT)); /* disable all other contexts */ for (i = 1; i < 8; i++) IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_CNTL + i, 0); /* setup the page table aperture */ IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, dev_priv->gart_info.bus_addr); IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR, dev_priv->gart_vm_start); IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1)); IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); /* setup the system aperture */ IGP_WRITE_MCIND(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, dev_priv->gart_vm_start); IGP_WRITE_MCIND(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1)); /* enable page tables */ temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL); IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, (temp | RS600_ENABLE_PT)); temp = IGP_READ_MCIND(dev_priv, RS600_MC_CNTL1); IGP_WRITE_MCIND(RS600_MC_CNTL1, (temp | RS600_ENABLE_PAGE_TABLES)); /* invalidate the cache */ temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL); temp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp); temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL); temp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE; IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp); temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL); temp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp); temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL); } else { IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, 0); temp = IGP_READ_MCIND(dev_priv, RS600_MC_CNTL1); temp &= ~RS600_ENABLE_PAGE_TABLES; IGP_WRITE_MCIND(RS600_MC_CNTL1, temp); } } static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on) { u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL); if (on) { DRM_DEBUG("programming pcie %08X %08lX %08X\n", dev_priv->gart_vm_start, (long)dev_priv->gart_info.bus_addr, dev_priv->gart_size); RADEON_WRITE_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, dev_priv->gart_vm_start); RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_BASE, dev_priv->gart_info.bus_addr); RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_START_LO, dev_priv->gart_vm_start); RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_END_LO, dev_priv->gart_vm_start + dev_priv->gart_size - 1); radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, RADEON_PCIE_TX_GART_EN); } else { RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); } } /* Enable or disable PCI GART on the chip */ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on) { u32 tmp; if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740) || (dev_priv->flags & RADEON_IS_IGPGART)) { radeon_set_igpgart(dev_priv, on); return; } if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) { rs600_set_igpgart(dev_priv, on); return; } if (dev_priv->flags & RADEON_IS_PCIE) { radeon_set_pciegart(dev_priv, on); return; } tmp = RADEON_READ(RADEON_AIC_CNTL); if (on) { RADEON_WRITE(RADEON_AIC_CNTL, tmp | RADEON_PCIGART_TRANSLATE_EN); /* set PCI GART page-table base address */ RADEON_WRITE(RADEON_AIC_PT_BASE, dev_priv->gart_info.bus_addr); /* set address range for PCI address translate */ RADEON_WRITE(RADEON_AIC_LO_ADDR, dev_priv->gart_vm_start); RADEON_WRITE(RADEON_AIC_HI_ADDR, dev_priv->gart_vm_start + dev_priv->gart_size - 1); /* Turn off AGP aperture -- is this required for PCI GART? */ radeon_write_agp_location(dev_priv, 0xffffffc0); RADEON_WRITE(RADEON_AGP_COMMAND, 0); /* clear AGP_COMMAND */ } else { RADEON_WRITE(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN); } } static int radeon_setup_pcigart_surface(drm_radeon_private_t *dev_priv) { struct drm_ati_pcigart_info *gart_info = &dev_priv->gart_info; struct radeon_virt_surface *vp; int i; for (i = 0; i < RADEON_MAX_SURFACES * 2; i++) { if (!dev_priv->virt_surfaces[i].file_priv || dev_priv->virt_surfaces[i].file_priv == PCIGART_FILE_PRIV) break; } if (i >= 2 * RADEON_MAX_SURFACES) return -ENOMEM; vp = &dev_priv->virt_surfaces[i]; for (i = 0; i < RADEON_MAX_SURFACES; i++) { struct radeon_surface *sp = &dev_priv->surfaces[i]; if (sp->refcount) continue; vp->surface_index = i; vp->lower = gart_info->bus_addr; vp->upper = vp->lower + gart_info->table_size; vp->flags = 0; vp->file_priv = PCIGART_FILE_PRIV; sp->refcount = 1; sp->lower = vp->lower; sp->upper = vp->upper; sp->flags = 0; RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, sp->flags); RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * i, sp->lower); RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * i, sp->upper); return 0; } return -ENOMEM; } static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; DRM_DEBUG("\n"); /* if we require new memory map but we don't have it fail */ if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) { DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) { DRM_DEBUG("Forcing AGP card to PCI mode\n"); dev_priv->flags &= ~RADEON_IS_AGP; } else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE)) && !init->is_pci) { DRM_DEBUG("Restoring AGP flag\n"); dev_priv->flags |= RADEON_IS_AGP; } if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) { DRM_ERROR("PCI GART memory not allocated!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } dev_priv->usec_timeout = init->usec_timeout; if (dev_priv->usec_timeout < 1 || dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) { DRM_DEBUG("TIMEOUT problem!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } /* Enable vblank on CRTC1 for older X servers */ dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1; switch(init->func) { case RADEON_INIT_R200_CP: dev_priv->microcode_version = UCODE_R200; break; case RADEON_INIT_R300_CP: dev_priv->microcode_version = UCODE_R300; break; default: dev_priv->microcode_version = UCODE_R100; } dev_priv->do_boxes = 0; dev_priv->cp_mode = init->cp_mode; /* We don't support anything other than bus-mastering ring mode, * but the ring can be in either AGP or PCI space for the ring * read pointer. */ if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) && (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) { DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode); radeon_do_cleanup_cp(dev); return -EINVAL; } switch (init->fb_bpp) { case 16: dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565; break; case 32: default: dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888; break; } dev_priv->front_offset = init->front_offset; dev_priv->front_pitch = init->front_pitch; dev_priv->back_offset = init->back_offset; dev_priv->back_pitch = init->back_pitch; switch (init->depth_bpp) { case 16: dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z; break; case 32: default: dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z; break; } dev_priv->depth_offset = init->depth_offset; dev_priv->depth_pitch = init->depth_pitch; /* Hardware state for depth clears. Remove this if/when we no * longer clear the depth buffer with a 3D rectangle. Hard-code * all values to prevent unwanted 3D state from slipping through * and screwing with the clear operation. */ dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE | (dev_priv->color_fmt << 10) | (dev_priv->microcode_version == UCODE_R100 ? RADEON_ZBLOCK16 : 0)); dev_priv->depth_clear.rb3d_zstencilcntl = (dev_priv->depth_fmt | RADEON_Z_TEST_ALWAYS | RADEON_STENCIL_TEST_ALWAYS | RADEON_STENCIL_S_FAIL_REPLACE | RADEON_STENCIL_ZPASS_REPLACE | RADEON_STENCIL_ZFAIL_REPLACE | RADEON_Z_WRITE_ENABLE); dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW | RADEON_BFACE_SOLID | RADEON_FFACE_SOLID | RADEON_FLAT_SHADE_VTX_LAST | RADEON_DIFFUSE_SHADE_FLAT | RADEON_ALPHA_SHADE_FLAT | RADEON_SPECULAR_SHADE_FLAT | RADEON_FOG_SHADE_FLAT | RADEON_VTX_PIX_CENTER_OGL | RADEON_ROUND_MODE_TRUNC | RADEON_ROUND_PREC_8TH_PIX); dev_priv->ring_offset = init->ring_offset; dev_priv->ring_rptr_offset = init->ring_rptr_offset; dev_priv->buffers_offset = init->buffers_offset; dev_priv->gart_textures_offset = init->gart_textures_offset; master_priv->sarea = drm_getsarea(dev); if (!master_priv->sarea) { DRM_ERROR("could not find sarea!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset); if (!dev_priv->cp_ring) { DRM_ERROR("could not find cp ring region!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); if (!dev_priv->ring_rptr) { DRM_ERROR("could not find ring read pointer!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } dev->agp_buffer_token = init->buffers_offset; dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); if (!dev->agp_buffer_map) { DRM_ERROR("could not find dma buffer region!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } if (init->gart_textures_offset) { dev_priv->gart_textures = drm_core_findmap(dev, init->gart_textures_offset); if (!dev_priv->gart_textures) { DRM_ERROR("could not find GART texture region!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } } #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) { drm_core_ioremap_wc(dev_priv->cp_ring, dev); drm_core_ioremap_wc(dev_priv->ring_rptr, dev); drm_core_ioremap_wc(dev->agp_buffer_map, dev); if (!dev_priv->cp_ring->handle || !dev_priv->ring_rptr->handle || !dev->agp_buffer_map->handle) { DRM_ERROR("could not find ioremap agp regions!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } } else #endif { dev_priv->cp_ring->handle = (void *)(unsigned long)dev_priv->cp_ring->offset; dev_priv->ring_rptr->handle = (void *)(unsigned long)dev_priv->ring_rptr->offset; dev->agp_buffer_map->handle = (void *)(unsigned long)dev->agp_buffer_map->offset; DRM_DEBUG("dev_priv->cp_ring->handle %p\n", dev_priv->cp_ring->handle); DRM_DEBUG("dev_priv->ring_rptr->handle %p\n", dev_priv->ring_rptr->handle); DRM_DEBUG("dev->agp_buffer_map->handle %p\n", dev->agp_buffer_map->handle); } dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16; dev_priv->fb_size = ((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000) - dev_priv->fb_location; dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) | ((dev_priv->front_offset + dev_priv->fb_location) >> 10)); dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) | ((dev_priv->back_offset + dev_priv->fb_location) >> 10)); dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) | ((dev_priv->depth_offset + dev_priv->fb_location) >> 10)); dev_priv->gart_size = init->gart_size; /* New let's set the memory map ... */ if (dev_priv->new_memmap) { u32 base = 0; DRM_INFO("Setting GART location based on new memory map\n"); /* If using AGP, try to locate the AGP aperture at the same * location in the card and on the bus, though we have to * align it down. */ #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) { base = dev->agp->base; /* Check if valid */ if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location && base < (dev_priv->fb_location + dev_priv->fb_size - 1)) { DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n", dev->agp->base); base = 0; } } #endif /* If not or if AGP is at 0 (Macs), try to put it elsewhere */ if (base == 0) { base = dev_priv->fb_location + dev_priv->fb_size; if (base < dev_priv->fb_location || ((base + dev_priv->gart_size) & 0xfffffffful) < base) base = dev_priv->fb_location - dev_priv->gart_size; } dev_priv->gart_vm_start = base & 0xffc00000u; if (dev_priv->gart_vm_start != base) DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n", base, dev_priv->gart_vm_start); } else { DRM_INFO("Setting GART location based on old memory map\n"); dev_priv->gart_vm_start = dev_priv->fb_location + RADEON_READ(RADEON_CONFIG_APER_SIZE); } #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset - dev->agp->base + dev_priv->gart_vm_start); else #endif dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset - (unsigned long)dev->sg->virtual + dev_priv->gart_vm_start); DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size); DRM_DEBUG("dev_priv->gart_vm_start 0x%x\n", dev_priv->gart_vm_start); DRM_DEBUG("dev_priv->gart_buffers_offset 0x%lx\n", dev_priv->gart_buffers_offset); dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle; dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle + init->ring_size / sizeof(u32)); dev_priv->ring.size = init->ring_size; dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); dev_priv->ring.rptr_update = /* init->rptr_update */ 4096; dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8); dev_priv->ring.fetch_size = /* init->fetch_size */ 32; dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16); dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) { /* Turn off PCI GART */ radeon_set_pcigart(dev_priv, 0); } else #endif { u32 sctrl; int ret; dev_priv->gart_info.table_mask = DMA_BIT_MASK(32); /* if we have an offset set from userspace */ if (dev_priv->pcigart_offset_set) { dev_priv->gart_info.bus_addr = (resource_size_t)dev_priv->pcigart_offset + dev_priv->fb_location; dev_priv->gart_info.mapping.offset = dev_priv->pcigart_offset + dev_priv->fb_aper_offset; dev_priv->gart_info.mapping.size = dev_priv->gart_info.table_size; drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev); dev_priv->gart_info.addr = dev_priv->gart_info.mapping.handle; if (dev_priv->flags & RADEON_IS_PCIE) dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE; else dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI; dev_priv->gart_info.gart_table_location = DRM_ATI_GART_FB; DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n", dev_priv->gart_info.addr, dev_priv->pcigart_offset); } else { if (dev_priv->flags & RADEON_IS_IGPGART) dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP; else dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI; dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN; dev_priv->gart_info.addr = NULL; dev_priv->gart_info.bus_addr = 0; if (dev_priv->flags & RADEON_IS_PCIE) { DRM_ERROR ("Cannot use PCI Express without GART in FB memory\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } } sctrl = RADEON_READ(RADEON_SURFACE_CNTL); RADEON_WRITE(RADEON_SURFACE_CNTL, 0); if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) ret = r600_page_table_init(dev); else ret = drm_ati_pcigart_init(dev, &dev_priv->gart_info); RADEON_WRITE(RADEON_SURFACE_CNTL, sctrl); if (!ret) { DRM_ERROR("failed to init PCI GART!\n"); radeon_do_cleanup_cp(dev); return -ENOMEM; } ret = radeon_setup_pcigart_surface(dev_priv); if (ret) { DRM_ERROR("failed to setup GART surface!\n"); if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) r600_page_table_cleanup(dev, &dev_priv->gart_info); else drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info); radeon_do_cleanup_cp(dev); return ret; } /* Turn on PCI GART */ radeon_set_pcigart(dev_priv, 1); } if (!dev_priv->me_fw) { int err = radeon_cp_init_microcode(dev_priv); if (err) { DRM_ERROR("Failed to load firmware!\n"); radeon_do_cleanup_cp(dev); return err; } } radeon_cp_load_microcode(dev_priv); radeon_cp_init_ring_buffer(dev, dev_priv, file_priv); dev_priv->last_buf = 0; radeon_do_engine_reset(dev); radeon_test_writeback(dev_priv); return 0; } static int radeon_do_cleanup_cp(struct drm_device * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); /* Make sure interrupts are disabled here because the uninstall ioctl * may not have been called from userspace and after dev_private * is freed, it's too late. */ if (dev->irq_enabled) drm_irq_uninstall(dev); #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) { if (dev_priv->cp_ring != NULL) { drm_core_ioremapfree(dev_priv->cp_ring, dev); dev_priv->cp_ring = NULL; } if (dev_priv->ring_rptr != NULL) { drm_core_ioremapfree(dev_priv->ring_rptr, dev); dev_priv->ring_rptr = NULL; } if (dev->agp_buffer_map != NULL) { drm_core_ioremapfree(dev->agp_buffer_map, dev); dev->agp_buffer_map = NULL; } } else #endif { if (dev_priv->gart_info.bus_addr) { /* Turn off PCI GART */ radeon_set_pcigart(dev_priv, 0); if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) r600_page_table_cleanup(dev, &dev_priv->gart_info); else { if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info)) DRM_ERROR("failed to cleanup PCI GART!\n"); } } if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) { drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev); dev_priv->gart_info.addr = NULL; } } /* only clear to the start of flags */ memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags)); return 0; } /* This code will reinit the Radeon CP hardware after a resume from disc. * AFAIK, it would be very difficult to pickle the state at suspend time, so * here we make sure that all Radeon hardware initialisation is re-done without * affecting running applications. * * Charl P. Botha <http://cpbotha.net> */ static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; if (!dev_priv) { DRM_ERROR("Called with no initialization\n"); return -EINVAL; } DRM_DEBUG("Starting radeon_do_resume_cp()\n"); #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) { /* Turn off PCI GART */ radeon_set_pcigart(dev_priv, 0); } else #endif { /* Turn on PCI GART */ radeon_set_pcigart(dev_priv, 1); } radeon_cp_load_microcode(dev_priv); radeon_cp_init_ring_buffer(dev, dev_priv, file_priv); dev_priv->have_z_offset = 0; radeon_do_engine_reset(dev); radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); DRM_DEBUG("radeon_do_resume_cp() complete\n"); return 0; } int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_init_t *init = data; LOCK_TEST_WITH_RETURN(dev, file_priv); if (init->func == RADEON_INIT_R300_CP) r300_init_reg_flags(dev); switch (init->func) { case RADEON_INIT_CP: case RADEON_INIT_R200_CP: case RADEON_INIT_R300_CP: return radeon_do_init_cp(dev, init, file_priv); case RADEON_INIT_R600_CP: return r600_do_init_cp(dev, init, file_priv); case RADEON_CLEANUP_CP: if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) return r600_do_cleanup_cp(dev); else return radeon_do_cleanup_cp(dev); } return -EINVAL; } int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); if (dev_priv->cp_running) { DRM_DEBUG("while CP running\n"); return 0; } if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) { DRM_DEBUG("called with bogus CP mode (%d)\n", dev_priv->cp_mode); return 0; } if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) r600_do_cp_start(dev_priv); else radeon_do_cp_start(dev_priv); return 0; } /* Stop the CP. The engine must have been idled before calling this * routine. */ int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_cp_stop_t *stop = data; int ret; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); if (!dev_priv->cp_running) return 0; /* Flush any pending CP commands. This ensures any outstanding * commands are exectuted by the engine before we turn it off. */ if (stop->flush) { radeon_do_cp_flush(dev_priv); } /* If we fail to make the engine go idle, we return an error * code so that the DRM ioctl wrapper can try again. */ if (stop->idle) { if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) ret = r600_do_cp_idle(dev_priv); else ret = radeon_do_cp_idle(dev_priv); if (ret) return ret; } /* Finally, we can turn off the CP. If the engine isn't idle, * we will get some dropped triangles as they won't be fully * rendered before the CP is shut down. */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) r600_do_cp_stop(dev_priv); else radeon_do_cp_stop(dev_priv); /* Reset the engine */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) r600_do_engine_reset(dev); else radeon_do_engine_reset(dev); return 0; } void radeon_do_release(struct drm_device * dev) { drm_radeon_private_t *dev_priv = dev->dev_private; int i, ret; if (dev_priv) { if (dev_priv->cp_running) { /* Stop the cp */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) { while ((ret = r600_do_cp_idle(dev_priv)) != 0) { DRM_DEBUG("radeon_do_cp_idle %d\n", ret); #ifdef __linux__ schedule(); #else tsleep(&ret, PZERO, "rdnrel", 1); #endif } } else { while ((ret = radeon_do_cp_idle(dev_priv)) != 0) { DRM_DEBUG("radeon_do_cp_idle %d\n", ret); #ifdef __linux__ schedule(); #else tsleep(&ret, PZERO, "rdnrel", 1); #endif } } if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) { r600_do_cp_stop(dev_priv); r600_do_engine_reset(dev); } else { radeon_do_cp_stop(dev_priv); radeon_do_engine_reset(dev); } } if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_R600) { /* Disable *all* interrupts */ if (dev_priv->mmio) /* remove this after permanent addmaps */ RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); if (dev_priv->mmio) { /* remove all surfaces */ for (i = 0; i < RADEON_MAX_SURFACES; i++) { RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, 0); RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * i, 0); RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * i, 0); } } } /* Free memory heap structures */ radeon_mem_takedown(&(dev_priv->gart_heap)); radeon_mem_takedown(&(dev_priv->fb_heap)); /* deallocate kernel resources */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) r600_do_cleanup_cp(dev); else radeon_do_cleanup_cp(dev); if (dev_priv->me_fw) { release_firmware(dev_priv->me_fw); dev_priv->me_fw = NULL; } if (dev_priv->pfp_fw) { release_firmware(dev_priv->pfp_fw); dev_priv->pfp_fw = NULL; } } } /* Just reset the CP ring. Called as part of an X Server engine reset. */ int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); if (!dev_priv) { DRM_DEBUG("called before init done\n"); return -EINVAL; } if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) r600_do_cp_reset(dev_priv); else radeon_do_cp_reset(dev_priv); /* The CP is no longer running after an engine reset */ dev_priv->cp_running = 0; return 0; } int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) return r600_do_cp_idle(dev_priv); else return radeon_do_cp_idle(dev_priv); } /* Added by Charl P. Botha to call radeon_do_resume_cp(). */ int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) return r600_do_resume_cp(dev, file_priv); else return radeon_do_resume_cp(dev, file_priv); } int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) return r600_do_engine_reset(dev); else return radeon_do_engine_reset(dev); } /* ================================================================ * Fullscreen mode */ /* KW: Deprecated to say the least: */ int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv) { return 0; } /* ================================================================ * Freelist management */ /* Original comment: FIXME: ROTATE_BUFS is a hack to cycle through * bufs until freelist code is used. Note this hides a problem with * the scratch register * (used to keep track of last buffer * completed) being written to before * the last buffer has actually * completed rendering. * * KW: It's also a good way to find free buffers quickly. * * KW: Ideally this loop wouldn't exist, and freelist_get wouldn't * sleep. However, bugs in older versions of radeon_accel.c mean that * we essentially have to do this, else old clients will break. * * However, it does leave open a potential deadlock where all the * buffers are held by other clients, which can't release them because * they can't get the lock. */ struct drm_buf *radeon_freelist_get(struct drm_device * dev) { struct drm_device_dma *dma = dev->dma; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_buf_priv_t *buf_priv; struct drm_buf *buf; int i, t; int start; if (++dev_priv->last_buf >= dma->buf_count) dev_priv->last_buf = 0; start = dev_priv->last_buf; for (t = 0; t < dev_priv->usec_timeout; t++) { u32 done_age = GET_SCRATCH(dev_priv, 1); DRM_DEBUG("done_age = %d\n", done_age); for (i = 0; i < dma->buf_count; i++) { buf = dma->buflist[start]; buf_priv = buf->dev_private; if (buf->file_priv == NULL || (buf->pending && buf_priv->age <= done_age)) { dev_priv->stats.requested_bufs++; buf->pending = 0; return buf; } if (++start >= dma->buf_count) start = 0; } if (t) { DRM_UDELAY(1); dev_priv->stats.freelist_loops++; } } return NULL; } void radeon_freelist_reset(struct drm_device * dev) { struct drm_device_dma *dma = dev->dma; drm_radeon_private_t *dev_priv = dev->dev_private; int i; dev_priv->last_buf = 0; for (i = 0; i < dma->buf_count; i++) { struct drm_buf *buf = dma->buflist[i]; drm_radeon_buf_priv_t *buf_priv = buf->dev_private; buf_priv->age = 0; } } /* ================================================================ * CP command submission */ int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n) { drm_radeon_ring_buffer_t *ring = &dev_priv->ring; int i; u32 last_head = GET_RING_HEAD(dev_priv); for (i = 0; i < dev_priv->usec_timeout; i++) { u32 head = GET_RING_HEAD(dev_priv); ring->space = (head - ring->tail) * sizeof(u32); if (ring->space <= 0) ring->space += ring->size; if (ring->space > n) return 0; dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; if (head != last_head) i = 0; last_head = head; DRM_UDELAY(1); } /* FIXME: This return value is ignored in the BEGIN_RING macro! */ #if RADEON_FIFO_DEBUG radeon_status(dev_priv); DRM_ERROR("failed!\n"); #endif return -EBUSY; } static int radeon_cp_get_buffers(struct drm_device *dev, struct drm_file *file_priv, struct drm_dma * d) { int i; struct drm_buf *buf; for (i = d->granted_count; i < d->request_count; i++) { buf = radeon_freelist_get(dev); if (!buf) return -EBUSY; /* NOTE: broken client */ buf->file_priv = file_priv; if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, sizeof(buf->idx))) return -EFAULT; if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, sizeof(buf->total))) return -EFAULT; d->granted_count++; } return 0; } int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; int ret = 0; struct drm_dma *d = data; LOCK_TEST_WITH_RETURN(dev, file_priv); /* Please don't send us buffers. */ if (d->send_count != 0) { DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", DRM_CURRENTPID, d->send_count); return -EINVAL; } /* We'll send you buffers. */ if (d->request_count < 0 || d->request_count > dma->buf_count) { DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", DRM_CURRENTPID, d->request_count, dma->buf_count); return -EINVAL; } d->granted_count = 0; if (d->request_count) { ret = radeon_cp_get_buffers(dev, file_priv, d); } return ret; } int radeon_driver_load(struct drm_device *dev, unsigned long flags) { drm_radeon_private_t *dev_priv; int ret = 0; dev_priv = kzalloc(sizeof(drm_radeon_private_t), GFP_KERNEL); if (dev_priv == NULL) return -ENOMEM; dev->dev_private = (void *)dev_priv; dev_priv->flags = flags; switch (flags & RADEON_FAMILY_MASK) { case CHIP_R100: case CHIP_RV200: case CHIP_R200: case CHIP_R300: case CHIP_R350: case CHIP_R420: case CHIP_R423: case CHIP_RV410: case CHIP_RV515: case CHIP_R520: case CHIP_RV570: case CHIP_R580: dev_priv->flags |= RADEON_HAS_HIERZ; break; default: /* all other chips have no hierarchical z buffer */ break; } if (drm_device_is_agp(dev)) dev_priv->flags |= RADEON_IS_AGP; else if (drm_device_is_pcie(dev)) dev_priv->flags |= RADEON_IS_PCIE; else dev_priv->flags |= RADEON_IS_PCI; ret = drm_addmap(dev, drm_get_resource_start(dev, 2), drm_get_resource_len(dev, 2), _DRM_REGISTERS, _DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio); if (ret != 0) return ret; ret = drm_vblank_init(dev, 2); if (ret) { radeon_driver_unload(dev); return ret; } DRM_DEBUG("%s card detected\n", ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI")))); return ret; } int radeon_master_create(struct drm_device *dev, struct drm_master *master) { struct drm_radeon_master_private *master_priv; unsigned long sareapage; int ret; master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); if (!master_priv) return -ENOMEM; /* prebuild the SAREA */ sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE); ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &master_priv->sarea); if (ret) { DRM_ERROR("SAREA setup failed\n"); kfree(master_priv); return ret; } master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea); master_priv->sarea_priv->pfCurrentPage = 0; master->driver_priv = master_priv; return 0; } void radeon_master_destroy(struct drm_device *dev, struct drm_master *master) { struct drm_radeon_master_private *master_priv = master->driver_priv; if (!master_priv) return; if (master_priv->sarea_priv && master_priv->sarea_priv->pfCurrentPage != 0) radeon_cp_dispatch_flip(dev, master); master_priv->sarea_priv = NULL; if (master_priv->sarea) drm_rmmap_locked(dev, master_priv->sarea); kfree(master_priv); master->driver_priv = NULL; } /* Create mappings for registers and framebuffer so userland doesn't necessarily * have to find them. */ int radeon_driver_firstopen(struct drm_device *dev) { int ret; drm_local_map_t *map; drm_radeon_private_t *dev_priv = dev->dev_private; dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0); ret = drm_addmap(dev, dev_priv->fb_aper_offset, drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, &map); if (ret != 0) return ret; return 0; } int radeon_driver_unload(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); drm_rmmap(dev, dev_priv->mmio); kfree(dev_priv); dev->dev_private = NULL; return 0; } void radeon_commit_ring(drm_radeon_private_t *dev_priv) { int i; u32 *ring; int tail_aligned; /* check if the ring is padded out to 16-dword alignment */ tail_aligned = dev_priv->ring.tail & (RADEON_RING_ALIGN-1); if (tail_aligned) { int num_p2 = RADEON_RING_ALIGN - tail_aligned; ring = dev_priv->ring.start; /* pad with some CP_PACKET2 */ for (i = 0; i < num_p2; i++) ring[dev_priv->ring.tail + i] = CP_PACKET2(); dev_priv->ring.tail += i; dev_priv->ring.space -= num_p2 * sizeof(u32); } dev_priv->ring.tail &= dev_priv->ring.tail_mask; DRM_MEMORYBARRIER(); GET_RING_HEAD( dev_priv ); if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) { RADEON_WRITE(R600_CP_RB_WPTR, dev_priv->ring.tail); /* read from PCI bus to ensure correct posting */ RADEON_READ(R600_CP_RB_RPTR); } else { RADEON_WRITE(RADEON_CP_RB_WPTR, dev_priv->ring.tail); /* read from PCI bus to ensure correct posting */ RADEON_READ(RADEON_CP_RB_RPTR); } }
gpl-2.0
NieNs/IM-A840S-kernel
drivers/mtd/redboot.c
1065
8718
/* * Parse RedBoot-style Flash Image System (FIS) tables and * produce a Linux partition array to match. * * Copyright © 2001 Red Hat UK Limited * Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/vmalloc.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> struct fis_image_desc { unsigned char name[16]; // Null terminated name uint32_t flash_base; // Address within FLASH of image uint32_t mem_base; // Address in memory where it executes uint32_t size; // Length of image uint32_t entry_point; // Execution entry point uint32_t data_length; // Length of actual data unsigned char _pad[256-(16+7*sizeof(uint32_t))]; uint32_t desc_cksum; // Checksum over image descriptor uint32_t file_cksum; // Checksum over image data }; struct fis_list { struct fis_image_desc *img; struct fis_list *next; }; static int directory = CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK; module_param(directory, int, 0); static inline int redboot_checksum(struct fis_image_desc *img) { /* RedBoot doesn't actually write the desc_cksum field yet AFAICT */ return 1; } static int parse_redboot_partitions(struct mtd_info *master, struct mtd_partition **pparts, unsigned long fis_origin) { int nrparts = 0; struct fis_image_desc *buf; struct mtd_partition *parts; struct fis_list *fl = NULL, *tmp_fl; int ret, i; size_t retlen; char *names; char *nullname; int namelen = 0; int nulllen = 0; int numslots; unsigned long offset; #ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED static char nullstring[] = "unallocated"; #endif if ( directory < 0 ) { offset = master->size + directory * master->erasesize; while (master->block_isbad && master->block_isbad(master, offset)) { if (!offset) { nogood: printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n"); return -EIO; } offset -= master->erasesize; } } else { offset = directory * master->erasesize; while (master->block_isbad && master->block_isbad(master, offset)) { offset += master->erasesize; if (offset == master->size) goto nogood; } } buf = vmalloc(master->erasesize); if (!buf) return -ENOMEM; printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n", master->name, offset); ret = master->read(master, offset, master->erasesize, &retlen, (void *)buf); if (ret) goto out; if (retlen != master->erasesize) { ret = -EIO; goto out; } numslots = (master->erasesize / sizeof(struct fis_image_desc)); for (i = 0; i < numslots; i++) { if (!memcmp(buf[i].name, "FIS directory", 14)) { /* This is apparently the FIS directory entry for the * FIS directory itself. The FIS directory size is * one erase block; if the buf[i].size field is * swab32(erasesize) then we know we are looking at * a byte swapped FIS directory - swap all the entries! * (NOTE: this is 'size' not 'data_length'; size is * the full size of the entry.) */ /* RedBoot can combine the FIS directory and config partitions into a single eraseblock; we assume wrong-endian if either the swapped 'size' matches the eraseblock size precisely, or if the swapped size actually fits in an eraseblock while the unswapped size doesn't. */ if (swab32(buf[i].size) == master->erasesize || (buf[i].size > master->erasesize && swab32(buf[i].size) < master->erasesize)) { int j; /* Update numslots based on actual FIS directory size */ numslots = swab32(buf[i].size) / sizeof (struct fis_image_desc); for (j = 0; j < numslots; ++j) { /* A single 0xff denotes a deleted entry. * Two of them in a row is the end of the table. */ if (buf[j].name[0] == 0xff) { if (buf[j].name[1] == 0xff) { break; } else { continue; } } /* The unsigned long fields were written with the * wrong byte sex, name and pad have no byte sex. */ swab32s(&buf[j].flash_base); swab32s(&buf[j].mem_base); swab32s(&buf[j].size); swab32s(&buf[j].entry_point); swab32s(&buf[j].data_length); swab32s(&buf[j].desc_cksum); swab32s(&buf[j].file_cksum); } } else if (buf[i].size < master->erasesize) { /* Update numslots based on actual FIS directory size */ numslots = buf[i].size / sizeof(struct fis_image_desc); } break; } } if (i == numslots) { /* Didn't find it */ printk(KERN_NOTICE "No RedBoot partition table detected in %s\n", master->name); ret = 0; goto out; } for (i = 0; i < numslots; i++) { struct fis_list *new_fl, **prev; if (buf[i].name[0] == 0xff) { if (buf[i].name[1] == 0xff) { break; } else { continue; } } if (!redboot_checksum(&buf[i])) break; new_fl = kmalloc(sizeof(struct fis_list), GFP_KERNEL); namelen += strlen(buf[i].name)+1; if (!new_fl) { ret = -ENOMEM; goto out; } new_fl->img = &buf[i]; if (fis_origin) { buf[i].flash_base -= fis_origin; } else { buf[i].flash_base &= master->size-1; } /* I'm sure the JFFS2 code has done me permanent damage. * I now think the following is _normal_ */ prev = &fl; while(*prev && (*prev)->img->flash_base < new_fl->img->flash_base) prev = &(*prev)->next; new_fl->next = *prev; *prev = new_fl; nrparts++; } #ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED if (fl->img->flash_base) { nrparts++; nulllen = sizeof(nullstring); } for (tmp_fl = fl; tmp_fl->next; tmp_fl = tmp_fl->next) { if (tmp_fl->img->flash_base + tmp_fl->img->size + master->erasesize <= tmp_fl->next->img->flash_base) { nrparts++; nulllen = sizeof(nullstring); } } #endif parts = kzalloc(sizeof(*parts)*nrparts + nulllen + namelen, GFP_KERNEL); if (!parts) { ret = -ENOMEM; goto out; } nullname = (char *)&parts[nrparts]; #ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED if (nulllen > 0) { strcpy(nullname, nullstring); } #endif names = nullname + nulllen; i=0; #ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED if (fl->img->flash_base) { parts[0].name = nullname; parts[0].size = fl->img->flash_base; parts[0].offset = 0; i++; } #endif for ( ; i<nrparts; i++) { parts[i].size = fl->img->size; parts[i].offset = fl->img->flash_base; parts[i].name = names; strcpy(names, fl->img->name); #ifdef CONFIG_MTD_REDBOOT_PARTS_READONLY if (!memcmp(names, "RedBoot", 8) || !memcmp(names, "RedBoot config", 15) || !memcmp(names, "FIS directory", 14)) { parts[i].mask_flags = MTD_WRITEABLE; } #endif names += strlen(names)+1; #ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED if(fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) { i++; parts[i].offset = parts[i-1].size + parts[i-1].offset; parts[i].size = fl->next->img->flash_base - parts[i].offset; parts[i].name = nullname; } #endif tmp_fl = fl; fl = fl->next; kfree(tmp_fl); } ret = nrparts; *pparts = parts; out: while (fl) { struct fis_list *old = fl; fl = fl->next; kfree(old); } vfree(buf); return ret; } static struct mtd_part_parser redboot_parser = { .owner = THIS_MODULE, .parse_fn = parse_redboot_partitions, .name = "RedBoot", }; static int __init redboot_parser_init(void) { return register_mtd_parser(&redboot_parser); } static void __exit redboot_parser_exit(void) { deregister_mtd_parser(&redboot_parser); } module_init(redboot_parser_init); module_exit(redboot_parser_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); MODULE_DESCRIPTION("Parsing code for RedBoot Flash Image System (FIS) tables");
gpl-2.0
aatjitra/Note2
drivers/s390/scsi/zfcp_dbf.c
2089
13745
/* * zfcp device driver * * Debug traces for zfcp. * * Copyright IBM Corporation 2002, 2010 */ #define KMSG_COMPONENT "zfcp" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/ctype.h> #include <linux/slab.h> #include <asm/debug.h> #include "zfcp_dbf.h" #include "zfcp_ext.h" #include "zfcp_fc.h" static u32 dbfsize = 4; module_param(dbfsize, uint, 0400); MODULE_PARM_DESC(dbfsize, "number of pages for each debug feature area (default 4)"); static inline unsigned int zfcp_dbf_plen(unsigned int offset) { return sizeof(struct zfcp_dbf_pay) + offset - ZFCP_DBF_PAY_MAX_REC; } static inline void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area, u64 req_id) { struct zfcp_dbf_pay *pl = &dbf->pay_buf; u16 offset = 0, rec_length; spin_lock(&dbf->pay_lock); memset(pl, 0, sizeof(*pl)); pl->fsf_req_id = req_id; memcpy(pl->area, area, ZFCP_DBF_TAG_LEN); while (offset < length) { rec_length = min((u16) ZFCP_DBF_PAY_MAX_REC, (u16) (length - offset)); memcpy(pl->data, data + offset, rec_length); debug_event(dbf->pay, 1, pl, zfcp_dbf_plen(rec_length)); offset += rec_length; pl->counter++; } spin_unlock(&dbf->pay_lock); } /** * zfcp_dbf_hba_fsf_res - trace event for fsf responses * @tag: tag indicating which kind of unsolicited status has been received * @req: request for which a response was received */ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req) { struct zfcp_dbf *dbf = req->adapter->dbf; struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix; struct fsf_qtcb_header *q_head = &req->qtcb->header; struct zfcp_dbf_hba *rec = &dbf->hba_buf; unsigned long flags; spin_lock_irqsave(&dbf->hba_lock, flags); memset(rec, 0, sizeof(*rec)); memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); rec->id = ZFCP_DBF_HBA_RES; rec->fsf_req_id = req->req_id; rec->fsf_req_status = req->status; rec->fsf_cmd = req->fsf_command; rec->fsf_seq_no = req->seq_no; rec->u.res.req_issued = req->issued; rec->u.res.prot_status = q_pref->prot_status; rec->u.res.fsf_status = q_head->fsf_status; memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual, FSF_PROT_STATUS_QUAL_SIZE); memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE); if (req->fsf_command != FSF_QTCB_FCP_CMND) { rec->pl_len = q_head->log_length; zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start, rec->pl_len, "fsf_res", req->req_id); } debug_event(dbf->hba, 1, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->hba_lock, flags); } /** * zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer * @tag: tag indicating which kind of unsolicited status has been received * @req: request providing the unsolicited status */ void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req) { struct zfcp_dbf *dbf = req->adapter->dbf; struct fsf_status_read_buffer *srb = req->data; struct zfcp_dbf_hba *rec = &dbf->hba_buf; unsigned long flags; spin_lock_irqsave(&dbf->hba_lock, flags); memset(rec, 0, sizeof(*rec)); memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); rec->id = ZFCP_DBF_HBA_USS; rec->fsf_req_id = req->req_id; rec->fsf_req_status = req->status; rec->fsf_cmd = req->fsf_command; if (!srb) goto log; rec->u.uss.status_type = srb->status_type; rec->u.uss.status_subtype = srb->status_subtype; rec->u.uss.d_id = ntoh24(srb->d_id); rec->u.uss.lun = srb->fcp_lun; memcpy(&rec->u.uss.queue_designator, &srb->queue_designator, sizeof(rec->u.uss.queue_designator)); /* status read buffer payload length */ rec->pl_len = (!srb->length) ? 0 : srb->length - offsetof(struct fsf_status_read_buffer, payload); if (rec->pl_len) zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len, "fsf_uss", req->req_id); log: debug_event(dbf->hba, 2, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->hba_lock, flags); } /** * zfcp_dbf_hba_bit_err - trace event for bit error conditions * @tag: tag indicating which kind of unsolicited status has been received * @req: request which caused the bit_error condition */ void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req) { struct zfcp_dbf *dbf = req->adapter->dbf; struct zfcp_dbf_hba *rec = &dbf->hba_buf; struct fsf_status_read_buffer *sr_buf = req->data; unsigned long flags; spin_lock_irqsave(&dbf->hba_lock, flags); memset(rec, 0, sizeof(*rec)); memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); rec->id = ZFCP_DBF_HBA_BIT; rec->fsf_req_id = req->req_id; rec->fsf_req_status = req->status; rec->fsf_cmd = req->fsf_command; memcpy(&rec->u.be, &sr_buf->payload.bit_error, sizeof(struct fsf_bit_error_payload)); debug_event(dbf->hba, 1, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->hba_lock, flags); } static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec, struct zfcp_adapter *adapter, struct zfcp_port *port, struct scsi_device *sdev) { rec->adapter_status = atomic_read(&adapter->status); if (port) { rec->port_status = atomic_read(&port->status); rec->wwpn = port->wwpn; rec->d_id = port->d_id; } if (sdev) { rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status); rec->lun = zfcp_scsi_dev_lun(sdev); } } /** * zfcp_dbf_rec_trig - trace event related to triggered recovery * @tag: identifier for event * @adapter: adapter on which the erp_action should run * @port: remote port involved in the erp_action * @sdev: scsi device involved in the erp_action * @want: wanted erp_action * @need: required erp_action * * The adapter->erp_lock has to be held. */ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter, struct zfcp_port *port, struct scsi_device *sdev, u8 want, u8 need) { struct zfcp_dbf *dbf = adapter->dbf; struct zfcp_dbf_rec *rec = &dbf->rec_buf; struct list_head *entry; unsigned long flags; spin_lock_irqsave(&dbf->rec_lock, flags); memset(rec, 0, sizeof(*rec)); rec->id = ZFCP_DBF_REC_TRIG; memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); zfcp_dbf_set_common(rec, adapter, port, sdev); list_for_each(entry, &adapter->erp_ready_head) rec->u.trig.ready++; list_for_each(entry, &adapter->erp_running_head) rec->u.trig.running++; rec->u.trig.want = want; rec->u.trig.need = need; debug_event(dbf->rec, 1, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->rec_lock, flags); } /** * zfcp_dbf_rec_run - trace event related to running recovery * @tag: identifier for event * @erp: erp_action running */ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp) { struct zfcp_dbf *dbf = erp->adapter->dbf; struct zfcp_dbf_rec *rec = &dbf->rec_buf; unsigned long flags; spin_lock_irqsave(&dbf->rec_lock, flags); memset(rec, 0, sizeof(*rec)); rec->id = ZFCP_DBF_REC_RUN; memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); zfcp_dbf_set_common(rec, erp->adapter, erp->port, erp->sdev); rec->u.run.fsf_req_id = erp->fsf_req_id; rec->u.run.rec_status = erp->status; rec->u.run.rec_step = erp->step; rec->u.run.rec_action = erp->action; if (erp->sdev) rec->u.run.rec_count = atomic_read(&sdev_to_zfcp(erp->sdev)->erp_counter); else if (erp->port) rec->u.run.rec_count = atomic_read(&erp->port->erp_counter); else rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter); debug_event(dbf->rec, 1, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->rec_lock, flags); } static inline void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len, u64 req_id, u32 d_id) { struct zfcp_dbf_san *rec = &dbf->san_buf; u16 rec_len; unsigned long flags; spin_lock_irqsave(&dbf->san_lock, flags); memset(rec, 0, sizeof(*rec)); rec->id = id; rec->fsf_req_id = req_id; rec->d_id = d_id; rec_len = min(len, (u16)ZFCP_DBF_SAN_MAX_PAYLOAD); memcpy(rec->payload, data, rec_len); memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); debug_event(dbf->san, 1, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->san_lock, flags); } /** * zfcp_dbf_san_req - trace event for issued SAN request * @tag: indentifier for event * @fsf_req: request containing issued CT data * d_id: destination ID */ void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id) { struct zfcp_dbf *dbf = fsf->adapter->dbf; struct zfcp_fsf_ct_els *ct_els = fsf->data; u16 length; length = (u16)(ct_els->req->length + FC_CT_HDR_LEN); zfcp_dbf_san(tag, dbf, sg_virt(ct_els->req), ZFCP_DBF_SAN_REQ, length, fsf->req_id, d_id); } /** * zfcp_dbf_san_res - trace event for received SAN request * @tag: indentifier for event * @fsf_req: request containing issued CT data */ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf) { struct zfcp_dbf *dbf = fsf->adapter->dbf; struct zfcp_fsf_ct_els *ct_els = fsf->data; u16 length; length = (u16)(ct_els->resp->length + FC_CT_HDR_LEN); zfcp_dbf_san(tag, dbf, sg_virt(ct_els->resp), ZFCP_DBF_SAN_RES, length, fsf->req_id, 0); } /** * zfcp_dbf_san_in_els - trace event for incoming ELS * @tag: indentifier for event * @fsf_req: request containing issued CT data */ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf) { struct zfcp_dbf *dbf = fsf->adapter->dbf; struct fsf_status_read_buffer *srb = (struct fsf_status_read_buffer *) fsf->data; u16 length; length = (u16)(srb->length - offsetof(struct fsf_status_read_buffer, payload)); zfcp_dbf_san(tag, dbf, srb->payload.data, ZFCP_DBF_SAN_ELS, length, fsf->req_id, ntoh24(srb->d_id)); } /** * zfcp_dbf_scsi - trace event for scsi commands * @tag: identifier for event * @sc: pointer to struct scsi_cmnd * @fsf: pointer to struct zfcp_fsf_req */ void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf) { struct zfcp_adapter *adapter = (struct zfcp_adapter *) sc->device->host->hostdata[0]; struct zfcp_dbf *dbf = adapter->dbf; struct zfcp_dbf_scsi *rec = &dbf->scsi_buf; struct fcp_resp_with_ext *fcp_rsp; struct fcp_resp_rsp_info *fcp_rsp_info; unsigned long flags; spin_lock_irqsave(&dbf->scsi_lock, flags); memset(rec, 0, sizeof(*rec)); memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); rec->id = ZFCP_DBF_SCSI_CMND; rec->scsi_result = sc->result; rec->scsi_retries = sc->retries; rec->scsi_allowed = sc->allowed; rec->scsi_id = sc->device->id; rec->scsi_lun = sc->device->lun; rec->host_scribble = (unsigned long)sc->host_scribble; memcpy(rec->scsi_opcode, sc->cmnd, min((int)sc->cmd_len, ZFCP_DBF_SCSI_OPCODE)); if (fsf) { rec->fsf_req_id = fsf->req_id; fcp_rsp = (struct fcp_resp_with_ext *) &(fsf->qtcb->bottom.io.fcp_rsp); memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT); if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) { fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1]; rec->fcp_rsp_info = fcp_rsp_info->rsp_code; } if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) { rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE, (u16)ZFCP_DBF_PAY_MAX_REC); zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len, "fcp_sns", fsf->req_id); } } debug_event(dbf->scsi, 1, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->scsi_lock, flags); } static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size) { struct debug_info *d; d = debug_register(name, size, 1, rec_size); if (!d) return NULL; debug_register_view(d, &debug_hex_ascii_view); debug_set_level(d, 3); return d; } static void zfcp_dbf_unregister(struct zfcp_dbf *dbf) { if (!dbf) return; debug_unregister(dbf->scsi); debug_unregister(dbf->san); debug_unregister(dbf->hba); debug_unregister(dbf->pay); debug_unregister(dbf->rec); kfree(dbf); } /** * zfcp_adapter_debug_register - registers debug feature for an adapter * @adapter: pointer to adapter for which debug features should be registered * return: -ENOMEM on error, 0 otherwise */ int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter) { char name[DEBUG_MAX_NAME_LEN]; struct zfcp_dbf *dbf; dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL); if (!dbf) return -ENOMEM; spin_lock_init(&dbf->pay_lock); spin_lock_init(&dbf->hba_lock); spin_lock_init(&dbf->san_lock); spin_lock_init(&dbf->scsi_lock); spin_lock_init(&dbf->rec_lock); /* debug feature area which records recovery activity */ sprintf(name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev)); dbf->rec = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_rec)); if (!dbf->rec) goto err_out; /* debug feature area which records HBA (FSF and QDIO) conditions */ sprintf(name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev)); dbf->hba = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_hba)); if (!dbf->hba) goto err_out; /* debug feature area which records payload info */ sprintf(name, "zfcp_%s_pay", dev_name(&adapter->ccw_device->dev)); dbf->pay = zfcp_dbf_reg(name, dbfsize * 2, sizeof(struct zfcp_dbf_pay)); if (!dbf->pay) goto err_out; /* debug feature area which records SAN command failures and recovery */ sprintf(name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev)); dbf->san = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_san)); if (!dbf->san) goto err_out; /* debug feature area which records SCSI command failures and recovery */ sprintf(name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev)); dbf->scsi = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_scsi)); if (!dbf->scsi) goto err_out; adapter->dbf = dbf; return 0; err_out: zfcp_dbf_unregister(dbf); return -ENOMEM; } /** * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter * @adapter: pointer to adapter for which debug features should be unregistered */ void zfcp_dbf_adapter_unregister(struct zfcp_adapter *adapter) { struct zfcp_dbf *dbf = adapter->dbf; adapter->dbf = NULL; zfcp_dbf_unregister(dbf); }
gpl-2.0
Clust3r/P8000-Kernel
drivers/staging/comedi/drivers/ni_6527.c
2089
12156
/* comedi/drivers/ni_6527.c driver for National Instruments PCI-6527 COMEDI - Linux Control and Measurement Device Interface Copyright (C) 1999,2002,2003 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: ni_6527 Description: National Instruments 6527 Author: ds Status: works Devices: [National Instruments] PCI-6527 (ni6527), PXI-6527 Updated: Sat, 25 Jan 2003 13:24:40 -0800 */ /* Manuals (available from ftp://ftp.natinst.com/support/manuals) 370106b.pdf 6527 Register Level Programmer Manual */ #define DEBUG 1 #define DEBUG_FLAGS #include <linux/pci.h> #include <linux/interrupt.h> #include "../comedidev.h" #include "comedi_fc.h" #include "mite.h" #define DRIVER_NAME "ni_6527" #define NI6527_DIO_SIZE 4096 #define NI6527_MITE_SIZE 4096 #define Port_Register(x) (0x00+(x)) #define ID_Register 0x06 #define Clear_Register 0x07 #define ClrEdge 0x08 #define ClrOverflow 0x04 #define ClrFilter 0x02 #define ClrInterval 0x01 #define Filter_Interval(x) (0x08+(x)) #define Filter_Enable(x) (0x0c+(x)) #define Change_Status 0x14 #define MasterInterruptStatus 0x04 #define Overflow 0x02 #define EdgeStatus 0x01 #define Master_Interrupt_Control 0x15 #define FallingEdgeIntEnable 0x10 #define RisingEdgeIntEnable 0x08 #define MasterInterruptEnable 0x04 #define OverflowIntEnable 0x02 #define EdgeIntEnable 0x01 #define Rising_Edge_Detection_Enable(x) (0x018+(x)) #define Falling_Edge_Detection_Enable(x) (0x020+(x)) enum ni6527_boardid { BOARD_PCI6527, BOARD_PXI6527, }; struct ni6527_board { const char *name; }; static const struct ni6527_board ni6527_boards[] = { [BOARD_PCI6527] = { .name = "pci-6527", }, [BOARD_PXI6527] = { .name = "pxi-6527", }, }; struct ni6527_private { struct mite_struct *mite; unsigned int filter_interval; unsigned int filter_enable; }; static int ni6527_di_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct ni6527_private *devpriv = dev->private; int chan = CR_CHAN(insn->chanspec); unsigned int interval; if (insn->n != 2) return -EINVAL; if (data[0] != INSN_CONFIG_FILTER) return -EINVAL; if (data[1]) { interval = (data[1] + 100) / 200; data[1] = interval * 200; if (interval != devpriv->filter_interval) { writeb(interval & 0xff, devpriv->mite->daq_io_addr + Filter_Interval(0)); writeb((interval >> 8) & 0xff, devpriv->mite->daq_io_addr + Filter_Interval(1)); writeb((interval >> 16) & 0x0f, devpriv->mite->daq_io_addr + Filter_Interval(2)); writeb(ClrInterval, devpriv->mite->daq_io_addr + Clear_Register); devpriv->filter_interval = interval; } devpriv->filter_enable |= 1 << chan; } else { devpriv->filter_enable &= ~(1 << chan); } writeb(devpriv->filter_enable, devpriv->mite->daq_io_addr + Filter_Enable(0)); writeb(devpriv->filter_enable >> 8, devpriv->mite->daq_io_addr + Filter_Enable(1)); writeb(devpriv->filter_enable >> 16, devpriv->mite->daq_io_addr + Filter_Enable(2)); return 2; } static int ni6527_di_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct ni6527_private *devpriv = dev->private; data[1] = readb(devpriv->mite->daq_io_addr + Port_Register(0)); data[1] |= readb(devpriv->mite->daq_io_addr + Port_Register(1)) << 8; data[1] |= readb(devpriv->mite->daq_io_addr + Port_Register(2)) << 16; return insn->n; } static int ni6527_do_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct ni6527_private *devpriv = dev->private; if (data[0]) { s->state &= ~data[0]; s->state |= (data[0] & data[1]); /* The open relay state on the board cooresponds to 1, * but in Comedi, it is represented by 0. */ if (data[0] & 0x0000ff) { writeb((s->state ^ 0xff), devpriv->mite->daq_io_addr + Port_Register(3)); } if (data[0] & 0x00ff00) { writeb((s->state >> 8) ^ 0xff, devpriv->mite->daq_io_addr + Port_Register(4)); } if (data[0] & 0xff0000) { writeb((s->state >> 16) ^ 0xff, devpriv->mite->daq_io_addr + Port_Register(5)); } } data[1] = s->state; return insn->n; } static irqreturn_t ni6527_interrupt(int irq, void *d) { struct comedi_device *dev = d; struct ni6527_private *devpriv = dev->private; struct comedi_subdevice *s = &dev->subdevices[2]; unsigned int status; status = readb(devpriv->mite->daq_io_addr + Change_Status); if ((status & MasterInterruptStatus) == 0) return IRQ_NONE; if ((status & EdgeStatus) == 0) return IRQ_NONE; writeb(ClrEdge | ClrOverflow, devpriv->mite->daq_io_addr + Clear_Register); comedi_buf_put(s->async, 0); s->async->events |= COMEDI_CB_EOS; comedi_event(dev, s); return IRQ_HANDLED; } static int ni6527_intr_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; /* Step 1 : check if triggers are trivially valid */ err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW); err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_OTHER); err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_FOLLOW); err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT); err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT); if (err) return 1; /* Step 2a : make sure trigger sources are unique */ /* Step 2b : and mutually compatible */ if (err) return 2; /* Step 3: check if arguments are trivially valid */ err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0); err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0); err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0); err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, 1); err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0); if (err) return 3; /* step 4: fix up any arguments */ if (err) return 4; return 0; } static int ni6527_intr_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct ni6527_private *devpriv = dev->private; /* struct comedi_cmd *cmd = &s->async->cmd; */ writeb(ClrEdge | ClrOverflow, devpriv->mite->daq_io_addr + Clear_Register); writeb(FallingEdgeIntEnable | RisingEdgeIntEnable | MasterInterruptEnable | EdgeIntEnable, devpriv->mite->daq_io_addr + Master_Interrupt_Control); return 0; } static int ni6527_intr_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { struct ni6527_private *devpriv = dev->private; writeb(0x00, devpriv->mite->daq_io_addr + Master_Interrupt_Control); return 0; } static int ni6527_intr_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[1] = 0; return insn->n; } static int ni6527_intr_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct ni6527_private *devpriv = dev->private; if (insn->n < 1) return -EINVAL; if (data[0] != INSN_CONFIG_CHANGE_NOTIFY) return -EINVAL; writeb(data[1], devpriv->mite->daq_io_addr + Rising_Edge_Detection_Enable(0)); writeb(data[1] >> 8, devpriv->mite->daq_io_addr + Rising_Edge_Detection_Enable(1)); writeb(data[1] >> 16, devpriv->mite->daq_io_addr + Rising_Edge_Detection_Enable(2)); writeb(data[2], devpriv->mite->daq_io_addr + Falling_Edge_Detection_Enable(0)); writeb(data[2] >> 8, devpriv->mite->daq_io_addr + Falling_Edge_Detection_Enable(1)); writeb(data[2] >> 16, devpriv->mite->daq_io_addr + Falling_Edge_Detection_Enable(2)); return 2; } static int ni6527_auto_attach(struct comedi_device *dev, unsigned long context) { struct pci_dev *pcidev = comedi_to_pci_dev(dev); const struct ni6527_board *board = NULL; struct ni6527_private *devpriv; struct comedi_subdevice *s; int ret; if (context < ARRAY_SIZE(ni6527_boards)) board = &ni6527_boards[context]; if (!board) return -ENODEV; dev->board_ptr = board; dev->board_name = board->name; ret = comedi_pci_enable(dev); if (ret) return ret; devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL); if (!devpriv) return -ENOMEM; dev->private = devpriv; devpriv->mite = mite_alloc(pcidev); if (!devpriv->mite) return -ENOMEM; ret = mite_setup(devpriv->mite); if (ret < 0) { dev_err(dev->class_dev, "error setting up mite\n"); return ret; } dev_info(dev->class_dev, "board: %s, ID=0x%02x\n", dev->board_name, readb(devpriv->mite->daq_io_addr + ID_Register)); ret = comedi_alloc_subdevices(dev, 3); if (ret) return ret; s = &dev->subdevices[0]; s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE; s->n_chan = 24; s->range_table = &range_digital; s->maxdata = 1; s->insn_config = ni6527_di_insn_config; s->insn_bits = ni6527_di_insn_bits; s = &dev->subdevices[1]; s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 24; s->range_table = &range_unknown; /* FIXME: actually conductance */ s->maxdata = 1; s->insn_bits = ni6527_do_insn_bits; s = &dev->subdevices[2]; dev->read_subdev = s; s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE | SDF_CMD_READ; s->n_chan = 1; s->range_table = &range_unknown; s->maxdata = 1; s->do_cmdtest = ni6527_intr_cmdtest; s->do_cmd = ni6527_intr_cmd; s->cancel = ni6527_intr_cancel; s->insn_bits = ni6527_intr_insn_bits; s->insn_config = ni6527_intr_insn_config; writeb(0x00, devpriv->mite->daq_io_addr + Filter_Enable(0)); writeb(0x00, devpriv->mite->daq_io_addr + Filter_Enable(1)); writeb(0x00, devpriv->mite->daq_io_addr + Filter_Enable(2)); writeb(ClrEdge | ClrOverflow | ClrFilter | ClrInterval, devpriv->mite->daq_io_addr + Clear_Register); writeb(0x00, devpriv->mite->daq_io_addr + Master_Interrupt_Control); ret = request_irq(mite_irq(devpriv->mite), ni6527_interrupt, IRQF_SHARED, DRIVER_NAME, dev); if (ret < 0) dev_warn(dev->class_dev, "irq not available\n"); else dev->irq = mite_irq(devpriv->mite); return 0; } static void ni6527_detach(struct comedi_device *dev) { struct ni6527_private *devpriv = dev->private; if (devpriv && devpriv->mite && devpriv->mite->daq_io_addr) writeb(0x00, devpriv->mite->daq_io_addr + Master_Interrupt_Control); if (dev->irq) free_irq(dev->irq, dev); if (devpriv && devpriv->mite) { mite_unsetup(devpriv->mite); mite_free(devpriv->mite); } comedi_pci_disable(dev); } static struct comedi_driver ni6527_driver = { .driver_name = DRIVER_NAME, .module = THIS_MODULE, .auto_attach = ni6527_auto_attach, .detach = ni6527_detach, }; static int ni6527_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { return comedi_pci_auto_config(dev, &ni6527_driver, id->driver_data); } static DEFINE_PCI_DEVICE_TABLE(ni6527_pci_table) = { { PCI_VDEVICE(NI, 0x2b10), BOARD_PXI6527 }, { PCI_VDEVICE(NI, 0x2b20), BOARD_PCI6527 }, { 0 } }; MODULE_DEVICE_TABLE(pci, ni6527_pci_table); static struct pci_driver ni6527_pci_driver = { .name = DRIVER_NAME, .id_table = ni6527_pci_table, .probe = ni6527_pci_probe, .remove = comedi_pci_auto_unconfig, }; module_comedi_pci_driver(ni6527_driver, ni6527_pci_driver); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
shambakey1/kernel_sh
arch/powerpc/kernel/smp-tbsync.c
3369
3174
/* * Smp timebase synchronization for ppc. * * Copyright (C) 2003 Samuel Rydh (samuel@ibrium.se) * */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/unistd.h> #include <linux/init.h> #include <linux/slab.h> #include <asm/atomic.h> #include <asm/smp.h> #include <asm/time.h> #define NUM_ITER 300 enum { kExit=0, kSetAndTest, kTest }; static struct { volatile u64 tb; volatile u64 mark; volatile int cmd; volatile int handshake; int filler[2]; volatile int ack; int filler2[7]; volatile int race_result; } *tbsync; static volatile int running; static void __devinit enter_contest(u64 mark, long add) { while (get_tb() < mark) tbsync->race_result = add; } void __devinit smp_generic_take_timebase(void) { int cmd; u64 tb; unsigned long flags; local_irq_save(flags); while (!running) barrier(); rmb(); for (;;) { tbsync->ack = 1; while (!tbsync->handshake) barrier(); rmb(); cmd = tbsync->cmd; tb = tbsync->tb; mb(); tbsync->ack = 0; if (cmd == kExit) break; while (tbsync->handshake) barrier(); if (cmd == kSetAndTest) set_tb(tb >> 32, tb & 0xfffffffful); enter_contest(tbsync->mark, -1); } local_irq_restore(flags); } static int __devinit start_contest(int cmd, long offset, int num) { int i, score=0; u64 tb; u64 mark; tbsync->cmd = cmd; local_irq_disable(); for (i = -3; i < num; ) { tb = get_tb() + 400; tbsync->tb = tb + offset; tbsync->mark = mark = tb + 400; wmb(); tbsync->handshake = 1; while (tbsync->ack) barrier(); while (get_tb() <= tb) barrier(); tbsync->handshake = 0; enter_contest(mark, 1); while (!tbsync->ack) barrier(); if (i++ > 0) score += tbsync->race_result; } local_irq_enable(); return score; } void __devinit smp_generic_give_timebase(void) { int i, score, score2, old, min=0, max=5000, offset=1000; pr_debug("Software timebase sync\n"); /* if this fails then this kernel won't work anyway... */ tbsync = kzalloc( sizeof(*tbsync), GFP_KERNEL ); mb(); running = 1; while (!tbsync->ack) barrier(); pr_debug("Got ack\n"); /* binary search */ for (old = -1; old != offset ; offset = (min+max) / 2) { score = start_contest(kSetAndTest, offset, NUM_ITER); pr_debug("score %d, offset %d\n", score, offset ); if( score > 0 ) max = offset; else min = offset; old = offset; } score = start_contest(kSetAndTest, min, NUM_ITER); score2 = start_contest(kSetAndTest, max, NUM_ITER); pr_debug("Min %d (score %d), Max %d (score %d)\n", min, score, max, score2); score = abs(score); score2 = abs(score2); offset = (score < score2) ? min : max; /* guard against inaccurate mttb */ for (i = 0; i < 10; i++) { start_contest(kSetAndTest, offset, NUM_ITER/10); if ((score2 = start_contest(kTest, offset, NUM_ITER)) < 0) score2 = -score2; if (score2 <= score || score2 < 20) break; } pr_debug("Final offset: %d (%d/%d)\n", offset, score2, NUM_ITER ); /* exiting */ tbsync->cmd = kExit; wmb(); tbsync->handshake = 1; while (tbsync->ack) barrier(); tbsync->handshake = 0; kfree(tbsync); tbsync = NULL; running = 0; }
gpl-2.0
MI2S/android_kernel_xiaomi_aries
drivers/target/iscsi/iscsi_target_erl0.c
3625
28851
/****************************************************************************** * This file contains error recovery level zero functions used by * the iSCSI Target driver. * * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. * * Licensed to the Linux Foundation under the General Public License (GPL) version 2. * * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. ******************************************************************************/ #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> #include "iscsi_target_core.h" #include "iscsi_target_seq_pdu_list.h" #include "iscsi_target_tq.h" #include "iscsi_target_erl0.h" #include "iscsi_target_erl1.h" #include "iscsi_target_erl2.h" #include "iscsi_target_util.h" #include "iscsi_target.h" /* * Used to set values in struct iscsi_cmd that iscsit_dataout_check_sequence() * checks against to determine a PDU's Offset+Length is within the current * DataOUT Sequence. Used for DataSequenceInOrder=Yes only. */ void iscsit_set_dataout_sequence_values( struct iscsi_cmd *cmd) { struct iscsi_conn *conn = cmd->conn; /* * Still set seq_start_offset and seq_end_offset for Unsolicited * DataOUT, even if DataSequenceInOrder=No. */ if (cmd->unsolicited_data) { cmd->seq_start_offset = cmd->write_data_done; cmd->seq_end_offset = (cmd->write_data_done + (cmd->data_length > conn->sess->sess_ops->FirstBurstLength) ? conn->sess->sess_ops->FirstBurstLength : cmd->data_length); return; } if (!conn->sess->sess_ops->DataSequenceInOrder) return; if (!cmd->seq_start_offset && !cmd->seq_end_offset) { cmd->seq_start_offset = cmd->write_data_done; cmd->seq_end_offset = (cmd->data_length > conn->sess->sess_ops->MaxBurstLength) ? (cmd->write_data_done + conn->sess->sess_ops->MaxBurstLength) : cmd->data_length; } else { cmd->seq_start_offset = cmd->seq_end_offset; cmd->seq_end_offset = ((cmd->seq_end_offset + conn->sess->sess_ops->MaxBurstLength) >= cmd->data_length) ? cmd->data_length : (cmd->seq_end_offset + conn->sess->sess_ops->MaxBurstLength); } } static int iscsit_dataout_within_command_recovery_check( struct iscsi_cmd *cmd, unsigned char *buf) { struct iscsi_conn *conn = cmd->conn; struct iscsi_data *hdr = (struct iscsi_data *) buf; u32 payload_length = ntoh24(hdr->dlength); /* * We do the within-command recovery checks here as it is * the first function called in iscsi_check_pre_dataout(). * Basically, if we are in within-command recovery and * the PDU does not contain the offset the sequence needs, * dump the payload. * * This only applies to DataPDUInOrder=Yes, for * DataPDUInOrder=No we only re-request the failed PDU * and check that all PDUs in a sequence are received * upon end of sequence. */ if (conn->sess->sess_ops->DataSequenceInOrder) { if ((cmd->cmd_flags & ICF_WITHIN_COMMAND_RECOVERY) && (cmd->write_data_done != hdr->offset)) goto dump; cmd->cmd_flags &= ~ICF_WITHIN_COMMAND_RECOVERY; } else { struct iscsi_seq *seq; seq = iscsit_get_seq_holder(cmd, hdr->offset, payload_length); if (!seq) return DATAOUT_CANNOT_RECOVER; /* * Set the struct iscsi_seq pointer to reuse later. */ cmd->seq_ptr = seq; if (conn->sess->sess_ops->DataPDUInOrder) { if ((seq->status == DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY) && ((seq->offset != hdr->offset) || (seq->data_sn != hdr->datasn))) goto dump; } else { if ((seq->status == DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY) && (seq->data_sn != hdr->datasn)) goto dump; } if (seq->status == DATAOUT_SEQUENCE_COMPLETE) goto dump; if (seq->status != DATAOUT_SEQUENCE_COMPLETE) seq->status = 0; } return DATAOUT_NORMAL; dump: pr_err("Dumping DataOUT PDU Offset: %u Length: %d DataSN:" " 0x%08x\n", hdr->offset, payload_length, hdr->datasn); return iscsit_dump_data_payload(conn, payload_length, 1); } static int iscsit_dataout_check_unsolicited_sequence( struct iscsi_cmd *cmd, unsigned char *buf) { u32 first_burst_len; struct iscsi_conn *conn = cmd->conn; struct iscsi_data *hdr = (struct iscsi_data *) buf; u32 payload_length = ntoh24(hdr->dlength); if ((hdr->offset < cmd->seq_start_offset) || ((hdr->offset + payload_length) > cmd->seq_end_offset)) { pr_err("Command ITT: 0x%08x with Offset: %u," " Length: %u outside of Unsolicited Sequence %u:%u while" " DataSequenceInOrder=Yes.\n", cmd->init_task_tag, hdr->offset, payload_length, cmd->seq_start_offset, cmd->seq_end_offset); return DATAOUT_CANNOT_RECOVER; } first_burst_len = (cmd->first_burst_len + payload_length); if (first_burst_len > conn->sess->sess_ops->FirstBurstLength) { pr_err("Total %u bytes exceeds FirstBurstLength: %u" " for this Unsolicited DataOut Burst.\n", first_burst_len, conn->sess->sess_ops->FirstBurstLength); transport_send_check_condition_and_sense(&cmd->se_cmd, TCM_INCORRECT_AMOUNT_OF_DATA, 0); return DATAOUT_CANNOT_RECOVER; } /* * Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity * checks for the current Unsolicited DataOUT Sequence. */ if (hdr->flags & ISCSI_FLAG_CMD_FINAL) { /* * Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of * sequence checks are handled in * iscsit_dataout_datapduinorder_no_fbit(). */ if (!conn->sess->sess_ops->DataPDUInOrder) goto out; if ((first_burst_len != cmd->data_length) && (first_burst_len != conn->sess->sess_ops->FirstBurstLength)) { pr_err("Unsolicited non-immediate data" " received %u does not equal FirstBurstLength: %u, and" " does not equal ExpXferLen %u.\n", first_burst_len, conn->sess->sess_ops->FirstBurstLength, cmd->data_length); transport_send_check_condition_and_sense(&cmd->se_cmd, TCM_INCORRECT_AMOUNT_OF_DATA, 0); return DATAOUT_CANNOT_RECOVER; } } else { if (first_burst_len == conn->sess->sess_ops->FirstBurstLength) { pr_err("Command ITT: 0x%08x reached" " FirstBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol" " error.\n", cmd->init_task_tag, conn->sess->sess_ops->FirstBurstLength); return DATAOUT_CANNOT_RECOVER; } if (first_burst_len == cmd->data_length) { pr_err("Command ITT: 0x%08x reached" " ExpXferLen: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol" " error.\n", cmd->init_task_tag, cmd->data_length); return DATAOUT_CANNOT_RECOVER; } } out: return DATAOUT_NORMAL; } static int iscsit_dataout_check_sequence( struct iscsi_cmd *cmd, unsigned char *buf) { u32 next_burst_len; struct iscsi_conn *conn = cmd->conn; struct iscsi_seq *seq = NULL; struct iscsi_data *hdr = (struct iscsi_data *) buf; u32 payload_length = ntoh24(hdr->dlength); /* * For DataSequenceInOrder=Yes: Check that the offset and offset+length * is within range as defined by iscsi_set_dataout_sequence_values(). * * For DataSequenceInOrder=No: Check that an struct iscsi_seq exists for * offset+length tuple. */ if (conn->sess->sess_ops->DataSequenceInOrder) { /* * Due to possibility of recovery DataOUT sent by the initiator * fullfilling an Recovery R2T, it's best to just dump the * payload here, instead of erroring out. */ if ((hdr->offset < cmd->seq_start_offset) || ((hdr->offset + payload_length) > cmd->seq_end_offset)) { pr_err("Command ITT: 0x%08x with Offset: %u," " Length: %u outside of Sequence %u:%u while" " DataSequenceInOrder=Yes.\n", cmd->init_task_tag, hdr->offset, payload_length, cmd->seq_start_offset, cmd->seq_end_offset); if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) return DATAOUT_CANNOT_RECOVER; return DATAOUT_WITHIN_COMMAND_RECOVERY; } next_burst_len = (cmd->next_burst_len + payload_length); } else { seq = iscsit_get_seq_holder(cmd, hdr->offset, payload_length); if (!seq) return DATAOUT_CANNOT_RECOVER; /* * Set the struct iscsi_seq pointer to reuse later. */ cmd->seq_ptr = seq; if (seq->status == DATAOUT_SEQUENCE_COMPLETE) { if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) return DATAOUT_CANNOT_RECOVER; return DATAOUT_WITHIN_COMMAND_RECOVERY; } next_burst_len = (seq->next_burst_len + payload_length); } if (next_burst_len > conn->sess->sess_ops->MaxBurstLength) { pr_err("Command ITT: 0x%08x, NextBurstLength: %u and" " Length: %u exceeds MaxBurstLength: %u. protocol" " error.\n", cmd->init_task_tag, (next_burst_len - payload_length), payload_length, conn->sess->sess_ops->MaxBurstLength); return DATAOUT_CANNOT_RECOVER; } /* * Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity * checks for the current DataOUT Sequence. */ if (hdr->flags & ISCSI_FLAG_CMD_FINAL) { /* * Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of * sequence checks are handled in * iscsit_dataout_datapduinorder_no_fbit(). */ if (!conn->sess->sess_ops->DataPDUInOrder) goto out; if (conn->sess->sess_ops->DataSequenceInOrder) { if ((next_burst_len < conn->sess->sess_ops->MaxBurstLength) && ((cmd->write_data_done + payload_length) < cmd->data_length)) { pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL" " before end of DataOUT sequence, protocol" " error.\n", cmd->init_task_tag); return DATAOUT_CANNOT_RECOVER; } } else { if (next_burst_len < seq->xfer_len) { pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL" " before end of DataOUT sequence, protocol" " error.\n", cmd->init_task_tag); return DATAOUT_CANNOT_RECOVER; } } } else { if (conn->sess->sess_ops->DataSequenceInOrder) { if (next_burst_len == conn->sess->sess_ops->MaxBurstLength) { pr_err("Command ITT: 0x%08x reached" " MaxBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is" " not set, protocol error.", cmd->init_task_tag, conn->sess->sess_ops->MaxBurstLength); return DATAOUT_CANNOT_RECOVER; } if ((cmd->write_data_done + payload_length) == cmd->data_length) { pr_err("Command ITT: 0x%08x reached" " last DataOUT PDU in sequence but ISCSI_FLAG_" "CMD_FINAL is not set, protocol error.\n", cmd->init_task_tag); return DATAOUT_CANNOT_RECOVER; } } else { if (next_burst_len == seq->xfer_len) { pr_err("Command ITT: 0x%08x reached" " last DataOUT PDU in sequence but ISCSI_FLAG_" "CMD_FINAL is not set, protocol error.\n", cmd->init_task_tag); return DATAOUT_CANNOT_RECOVER; } } } out: return DATAOUT_NORMAL; } static int iscsit_dataout_check_datasn( struct iscsi_cmd *cmd, unsigned char *buf) { int dump = 0, recovery = 0; u32 data_sn = 0; struct iscsi_conn *conn = cmd->conn; struct iscsi_data *hdr = (struct iscsi_data *) buf; u32 payload_length = ntoh24(hdr->dlength); /* * Considering the target has no method of re-requesting DataOUT * by DataSN, if we receieve a greater DataSN than expected we * assume the functions for DataPDUInOrder=[Yes,No] below will * handle it. * * If the DataSN is less than expected, dump the payload. */ if (conn->sess->sess_ops->DataSequenceInOrder) data_sn = cmd->data_sn; else { struct iscsi_seq *seq = cmd->seq_ptr; data_sn = seq->data_sn; } if (hdr->datasn > data_sn) { pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x" " higher than expected 0x%08x.\n", cmd->init_task_tag, hdr->datasn, data_sn); recovery = 1; goto recover; } else if (hdr->datasn < data_sn) { pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x" " lower than expected 0x%08x, discarding payload.\n", cmd->init_task_tag, hdr->datasn, data_sn); dump = 1; goto dump; } return DATAOUT_NORMAL; recover: if (!conn->sess->sess_ops->ErrorRecoveryLevel) { pr_err("Unable to perform within-command recovery" " while ERL=0.\n"); return DATAOUT_CANNOT_RECOVER; } dump: if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) return DATAOUT_CANNOT_RECOVER; return (recovery || dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY : DATAOUT_NORMAL; } static int iscsit_dataout_pre_datapduinorder_yes( struct iscsi_cmd *cmd, unsigned char *buf) { int dump = 0, recovery = 0; struct iscsi_conn *conn = cmd->conn; struct iscsi_data *hdr = (struct iscsi_data *) buf; u32 payload_length = ntoh24(hdr->dlength); /* * For DataSequenceInOrder=Yes: If the offset is greater than the global * DataPDUInOrder=Yes offset counter in struct iscsi_cmd a protcol error has * occured and fail the connection. * * For DataSequenceInOrder=No: If the offset is greater than the per * sequence DataPDUInOrder=Yes offset counter in struct iscsi_seq a protocol * error has occured and fail the connection. */ if (conn->sess->sess_ops->DataSequenceInOrder) { if (hdr->offset != cmd->write_data_done) { pr_err("Command ITT: 0x%08x, received offset" " %u different than expected %u.\n", cmd->init_task_tag, hdr->offset, cmd->write_data_done); recovery = 1; goto recover; } } else { struct iscsi_seq *seq = cmd->seq_ptr; if (hdr->offset > seq->offset) { pr_err("Command ITT: 0x%08x, received offset" " %u greater than expected %u.\n", cmd->init_task_tag, hdr->offset, seq->offset); recovery = 1; goto recover; } else if (hdr->offset < seq->offset) { pr_err("Command ITT: 0x%08x, received offset" " %u less than expected %u, discarding payload.\n", cmd->init_task_tag, hdr->offset, seq->offset); dump = 1; goto dump; } } return DATAOUT_NORMAL; recover: if (!conn->sess->sess_ops->ErrorRecoveryLevel) { pr_err("Unable to perform within-command recovery" " while ERL=0.\n"); return DATAOUT_CANNOT_RECOVER; } dump: if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) return DATAOUT_CANNOT_RECOVER; return (recovery) ? iscsit_recover_dataout_sequence(cmd, hdr->offset, payload_length) : (dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY : DATAOUT_NORMAL; } static int iscsit_dataout_pre_datapduinorder_no( struct iscsi_cmd *cmd, unsigned char *buf) { struct iscsi_pdu *pdu; struct iscsi_data *hdr = (struct iscsi_data *) buf; u32 payload_length = ntoh24(hdr->dlength); pdu = iscsit_get_pdu_holder(cmd, hdr->offset, payload_length); if (!pdu) return DATAOUT_CANNOT_RECOVER; cmd->pdu_ptr = pdu; switch (pdu->status) { case ISCSI_PDU_NOT_RECEIVED: case ISCSI_PDU_CRC_FAILED: case ISCSI_PDU_TIMED_OUT: break; case ISCSI_PDU_RECEIVED_OK: pr_err("Command ITT: 0x%08x received already gotten" " Offset: %u, Length: %u\n", cmd->init_task_tag, hdr->offset, payload_length); return iscsit_dump_data_payload(cmd->conn, payload_length, 1); default: return DATAOUT_CANNOT_RECOVER; } return DATAOUT_NORMAL; } static int iscsit_dataout_update_r2t(struct iscsi_cmd *cmd, u32 offset, u32 length) { struct iscsi_r2t *r2t; if (cmd->unsolicited_data) return 0; r2t = iscsit_get_r2t_for_eos(cmd, offset, length); if (!r2t) return -1; spin_lock_bh(&cmd->r2t_lock); r2t->seq_complete = 1; cmd->outstanding_r2ts--; spin_unlock_bh(&cmd->r2t_lock); return 0; } static int iscsit_dataout_update_datapduinorder_no( struct iscsi_cmd *cmd, u32 data_sn, int f_bit) { int ret = 0; struct iscsi_pdu *pdu = cmd->pdu_ptr; pdu->data_sn = data_sn; switch (pdu->status) { case ISCSI_PDU_NOT_RECEIVED: pdu->status = ISCSI_PDU_RECEIVED_OK; break; case ISCSI_PDU_CRC_FAILED: pdu->status = ISCSI_PDU_RECEIVED_OK; break; case ISCSI_PDU_TIMED_OUT: pdu->status = ISCSI_PDU_RECEIVED_OK; break; default: return DATAOUT_CANNOT_RECOVER; } if (f_bit) { ret = iscsit_dataout_datapduinorder_no_fbit(cmd, pdu); if (ret == DATAOUT_CANNOT_RECOVER) return ret; } return DATAOUT_NORMAL; } static int iscsit_dataout_post_crc_passed( struct iscsi_cmd *cmd, unsigned char *buf) { int ret, send_r2t = 0; struct iscsi_conn *conn = cmd->conn; struct iscsi_seq *seq = NULL; struct iscsi_data *hdr = (struct iscsi_data *) buf; u32 payload_length = ntoh24(hdr->dlength); if (cmd->unsolicited_data) { if ((cmd->first_burst_len + payload_length) == conn->sess->sess_ops->FirstBurstLength) { if (iscsit_dataout_update_r2t(cmd, hdr->offset, payload_length) < 0) return DATAOUT_CANNOT_RECOVER; send_r2t = 1; } if (!conn->sess->sess_ops->DataPDUInOrder) { ret = iscsit_dataout_update_datapduinorder_no(cmd, hdr->datasn, (hdr->flags & ISCSI_FLAG_CMD_FINAL)); if (ret == DATAOUT_CANNOT_RECOVER) return ret; } cmd->first_burst_len += payload_length; if (conn->sess->sess_ops->DataSequenceInOrder) cmd->data_sn++; else { seq = cmd->seq_ptr; seq->data_sn++; seq->offset += payload_length; } if (send_r2t) { if (seq) seq->status = DATAOUT_SEQUENCE_COMPLETE; cmd->first_burst_len = 0; cmd->unsolicited_data = 0; } } else { if (conn->sess->sess_ops->DataSequenceInOrder) { if ((cmd->next_burst_len + payload_length) == conn->sess->sess_ops->MaxBurstLength) { if (iscsit_dataout_update_r2t(cmd, hdr->offset, payload_length) < 0) return DATAOUT_CANNOT_RECOVER; send_r2t = 1; } if (!conn->sess->sess_ops->DataPDUInOrder) { ret = iscsit_dataout_update_datapduinorder_no( cmd, hdr->datasn, (hdr->flags & ISCSI_FLAG_CMD_FINAL)); if (ret == DATAOUT_CANNOT_RECOVER) return ret; } cmd->next_burst_len += payload_length; cmd->data_sn++; if (send_r2t) cmd->next_burst_len = 0; } else { seq = cmd->seq_ptr; if ((seq->next_burst_len + payload_length) == seq->xfer_len) { if (iscsit_dataout_update_r2t(cmd, hdr->offset, payload_length) < 0) return DATAOUT_CANNOT_RECOVER; send_r2t = 1; } if (!conn->sess->sess_ops->DataPDUInOrder) { ret = iscsit_dataout_update_datapduinorder_no( cmd, hdr->datasn, (hdr->flags & ISCSI_FLAG_CMD_FINAL)); if (ret == DATAOUT_CANNOT_RECOVER) return ret; } seq->data_sn++; seq->offset += payload_length; seq->next_burst_len += payload_length; if (send_r2t) { seq->next_burst_len = 0; seq->status = DATAOUT_SEQUENCE_COMPLETE; } } } if (send_r2t && conn->sess->sess_ops->DataSequenceInOrder) cmd->data_sn = 0; cmd->write_data_done += payload_length; return (cmd->write_data_done == cmd->data_length) ? DATAOUT_SEND_TO_TRANSPORT : (send_r2t) ? DATAOUT_SEND_R2T : DATAOUT_NORMAL; } static int iscsit_dataout_post_crc_failed( struct iscsi_cmd *cmd, unsigned char *buf) { struct iscsi_conn *conn = cmd->conn; struct iscsi_pdu *pdu; struct iscsi_data *hdr = (struct iscsi_data *) buf; u32 payload_length = ntoh24(hdr->dlength); if (conn->sess->sess_ops->DataPDUInOrder) goto recover; /* * The rest of this function is only called when DataPDUInOrder=No. */ pdu = cmd->pdu_ptr; switch (pdu->status) { case ISCSI_PDU_NOT_RECEIVED: pdu->status = ISCSI_PDU_CRC_FAILED; break; case ISCSI_PDU_CRC_FAILED: break; case ISCSI_PDU_TIMED_OUT: pdu->status = ISCSI_PDU_CRC_FAILED; break; default: return DATAOUT_CANNOT_RECOVER; } recover: return iscsit_recover_dataout_sequence(cmd, hdr->offset, payload_length); } /* * Called from iscsit_handle_data_out() before DataOUT Payload is received * and CRC computed. */ extern int iscsit_check_pre_dataout( struct iscsi_cmd *cmd, unsigned char *buf) { int ret; struct iscsi_conn *conn = cmd->conn; ret = iscsit_dataout_within_command_recovery_check(cmd, buf); if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) || (ret == DATAOUT_CANNOT_RECOVER)) return ret; ret = iscsit_dataout_check_datasn(cmd, buf); if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) || (ret == DATAOUT_CANNOT_RECOVER)) return ret; if (cmd->unsolicited_data) { ret = iscsit_dataout_check_unsolicited_sequence(cmd, buf); if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) || (ret == DATAOUT_CANNOT_RECOVER)) return ret; } else { ret = iscsit_dataout_check_sequence(cmd, buf); if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) || (ret == DATAOUT_CANNOT_RECOVER)) return ret; } return (conn->sess->sess_ops->DataPDUInOrder) ? iscsit_dataout_pre_datapduinorder_yes(cmd, buf) : iscsit_dataout_pre_datapduinorder_no(cmd, buf); } /* * Called from iscsit_handle_data_out() after DataOUT Payload is received * and CRC computed. */ int iscsit_check_post_dataout( struct iscsi_cmd *cmd, unsigned char *buf, u8 data_crc_failed) { struct iscsi_conn *conn = cmd->conn; cmd->dataout_timeout_retries = 0; if (!data_crc_failed) return iscsit_dataout_post_crc_passed(cmd, buf); else { if (!conn->sess->sess_ops->ErrorRecoveryLevel) { pr_err("Unable to recover from DataOUT CRC" " failure while ERL=0, closing session.\n"); iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR, 1, 0, buf, cmd); return DATAOUT_CANNOT_RECOVER; } iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR, 0, 0, buf, cmd); return iscsit_dataout_post_crc_failed(cmd, buf); } } static void iscsit_handle_time2retain_timeout(unsigned long data) { struct iscsi_session *sess = (struct iscsi_session *) data; struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; spin_lock_bh(&se_tpg->session_lock); if (sess->time2retain_timer_flags & ISCSI_TF_STOP) { spin_unlock_bh(&se_tpg->session_lock); return; } if (atomic_read(&sess->session_reinstatement)) { pr_err("Exiting Time2Retain handler because" " session_reinstatement=1\n"); spin_unlock_bh(&se_tpg->session_lock); return; } sess->time2retain_timer_flags |= ISCSI_TF_EXPIRED; pr_err("Time2Retain timer expired for SID: %u, cleaning up" " iSCSI session.\n", sess->sid); { struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; if (tiqn) { spin_lock(&tiqn->sess_err_stats.lock); strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name, (void *)sess->sess_ops->InitiatorName); tiqn->sess_err_stats.last_sess_failure_type = ISCSI_SESS_ERR_CXN_TIMEOUT; tiqn->sess_err_stats.cxn_timeout_errors++; sess->conn_timeout_errors++; spin_unlock(&tiqn->sess_err_stats.lock); } } spin_unlock_bh(&se_tpg->session_lock); target_put_session(sess->se_sess); } extern void iscsit_start_time2retain_handler(struct iscsi_session *sess) { int tpg_active; /* * Only start Time2Retain timer when the assoicated TPG is still in * an ACTIVE (eg: not disabled or shutdown) state. */ spin_lock(&ISCSI_TPG_S(sess)->tpg_state_lock); tpg_active = (ISCSI_TPG_S(sess)->tpg_state == TPG_STATE_ACTIVE); spin_unlock(&ISCSI_TPG_S(sess)->tpg_state_lock); if (!tpg_active) return; if (sess->time2retain_timer_flags & ISCSI_TF_RUNNING) return; pr_debug("Starting Time2Retain timer for %u seconds on" " SID: %u\n", sess->sess_ops->DefaultTime2Retain, sess->sid); init_timer(&sess->time2retain_timer); sess->time2retain_timer.expires = (get_jiffies_64() + sess->sess_ops->DefaultTime2Retain * HZ); sess->time2retain_timer.data = (unsigned long)sess; sess->time2retain_timer.function = iscsit_handle_time2retain_timeout; sess->time2retain_timer_flags &= ~ISCSI_TF_STOP; sess->time2retain_timer_flags |= ISCSI_TF_RUNNING; add_timer(&sess->time2retain_timer); } /* * Called with spin_lock_bh(&struct se_portal_group->session_lock) held */ extern int iscsit_stop_time2retain_timer(struct iscsi_session *sess) { struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED) return -1; if (!(sess->time2retain_timer_flags & ISCSI_TF_RUNNING)) return 0; sess->time2retain_timer_flags |= ISCSI_TF_STOP; spin_unlock_bh(&se_tpg->session_lock); del_timer_sync(&sess->time2retain_timer); spin_lock_bh(&se_tpg->session_lock); sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING; pr_debug("Stopped Time2Retain Timer for SID: %u\n", sess->sid); return 0; } void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn) { spin_lock_bh(&conn->state_lock); if (atomic_read(&conn->connection_exit)) { spin_unlock_bh(&conn->state_lock); goto sleep; } if (atomic_read(&conn->transport_failed)) { spin_unlock_bh(&conn->state_lock); goto sleep; } spin_unlock_bh(&conn->state_lock); iscsi_thread_set_force_reinstatement(conn); sleep: wait_for_completion(&conn->conn_wait_rcfr_comp); complete(&conn->conn_post_wait_comp); } void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep) { spin_lock_bh(&conn->state_lock); if (atomic_read(&conn->connection_exit)) { spin_unlock_bh(&conn->state_lock); return; } if (atomic_read(&conn->transport_failed)) { spin_unlock_bh(&conn->state_lock); return; } if (atomic_read(&conn->connection_reinstatement)) { spin_unlock_bh(&conn->state_lock); return; } if (iscsi_thread_set_force_reinstatement(conn) < 0) { spin_unlock_bh(&conn->state_lock); return; } atomic_set(&conn->connection_reinstatement, 1); if (!sleep) { spin_unlock_bh(&conn->state_lock); return; } atomic_set(&conn->sleep_on_conn_wait_comp, 1); spin_unlock_bh(&conn->state_lock); wait_for_completion(&conn->conn_wait_comp); complete(&conn->conn_post_wait_comp); } void iscsit_fall_back_to_erl0(struct iscsi_session *sess) { pr_debug("Falling back to ErrorRecoveryLevel=0 for SID:" " %u\n", sess->sid); atomic_set(&sess->session_fall_back_to_erl0, 1); } static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn) { struct iscsi_session *sess = conn->sess; if ((sess->sess_ops->ErrorRecoveryLevel == 2) && !atomic_read(&sess->session_reinstatement) && !atomic_read(&sess->session_fall_back_to_erl0)) iscsit_connection_recovery_transport_reset(conn); else { pr_debug("Performing cleanup for failed iSCSI" " Connection ID: %hu from %s\n", conn->cid, sess->sess_ops->InitiatorName); iscsit_close_connection(conn); } } extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) { spin_lock_bh(&conn->state_lock); if (atomic_read(&conn->connection_exit)) { spin_unlock_bh(&conn->state_lock); return; } atomic_set(&conn->connection_exit, 1); if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { spin_unlock_bh(&conn->state_lock); iscsit_close_connection(conn); return; } if (conn->conn_state == TARG_CONN_STATE_CLEANUP_WAIT) { spin_unlock_bh(&conn->state_lock); return; } pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n"); conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT; spin_unlock_bh(&conn->state_lock); iscsit_handle_connection_cleanup(conn); } /* * This is the simple function that makes the magic of * sync and steering happen in the follow paradoxical order: * * 0) Receive conn->of_marker (bytes left until next OFMarker) * bytes into an offload buffer. When we pass the exact number * of bytes in conn->of_marker, iscsit_dump_data_payload() and hence * rx_data() will automatically receive the identical u32 marker * values and store it in conn->of_marker_offset; * 1) Now conn->of_marker_offset will contain the offset to the start * of the next iSCSI PDU. Dump these remaining bytes into another * offload buffer. * 2) We are done! * Next byte in the TCP stream will contain the next iSCSI PDU! * Cool Huh?! */ int iscsit_recover_from_unknown_opcode(struct iscsi_conn *conn) { /* * Make sure the remaining bytes to next maker is a sane value. */ if (conn->of_marker > (conn->conn_ops->OFMarkInt * 4)) { pr_err("Remaining bytes to OFMarker: %u exceeds" " OFMarkInt bytes: %u.\n", conn->of_marker, conn->conn_ops->OFMarkInt * 4); return -1; } pr_debug("Advancing %u bytes in TCP stream to get to the" " next OFMarker.\n", conn->of_marker); if (iscsit_dump_data_payload(conn, conn->of_marker, 0) < 0) return -1; /* * Make sure the offset marker we retrived is a valid value. */ if (conn->of_marker_offset > (ISCSI_HDR_LEN + (ISCSI_CRC_LEN * 2) + conn->conn_ops->MaxRecvDataSegmentLength)) { pr_err("OfMarker offset value: %u exceeds limit.\n", conn->of_marker_offset); return -1; } pr_debug("Discarding %u bytes of TCP stream to get to the" " next iSCSI Opcode.\n", conn->of_marker_offset); if (iscsit_dump_data_payload(conn, conn->of_marker_offset, 0) < 0) return -1; return 0; }
gpl-2.0
Snakefreak/i9100kerneljbhk
arch/powerpc/platforms/cell/spufs/context.c
3881
4879
/* * SPU file system -- SPU context management * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 * * Author: Arnd Bergmann <arndb@de.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/fs.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/slab.h> #include <asm/atomic.h> #include <asm/spu.h> #include <asm/spu_csa.h> #include "spufs.h" #include "sputrace.h" atomic_t nr_spu_contexts = ATOMIC_INIT(0); struct spu_context *alloc_spu_context(struct spu_gang *gang) { struct spu_context *ctx; struct timespec ts; ctx = kzalloc(sizeof *ctx, GFP_KERNEL); if (!ctx) goto out; /* Binding to physical processor deferred * until spu_activate(). */ if (spu_init_csa(&ctx->csa)) goto out_free; spin_lock_init(&ctx->mmio_lock); mutex_init(&ctx->mapping_lock); kref_init(&ctx->kref); mutex_init(&ctx->state_mutex); mutex_init(&ctx->run_mutex); init_waitqueue_head(&ctx->ibox_wq); init_waitqueue_head(&ctx->wbox_wq); init_waitqueue_head(&ctx->stop_wq); init_waitqueue_head(&ctx->mfc_wq); init_waitqueue_head(&ctx->run_wq); ctx->state = SPU_STATE_SAVED; ctx->ops = &spu_backing_ops; ctx->owner = get_task_mm(current); INIT_LIST_HEAD(&ctx->rq); INIT_LIST_HEAD(&ctx->aff_list); if (gang) spu_gang_add_ctx(gang, ctx); __spu_update_sched_info(ctx); spu_set_timeslice(ctx); ctx->stats.util_state = SPU_UTIL_IDLE_LOADED; ktime_get_ts(&ts); ctx->stats.tstamp = timespec_to_ns(&ts); atomic_inc(&nr_spu_contexts); goto out; out_free: kfree(ctx); ctx = NULL; out: return ctx; } void destroy_spu_context(struct kref *kref) { struct spu_context *ctx; ctx = container_of(kref, struct spu_context, kref); spu_context_nospu_trace(destroy_spu_context__enter, ctx); mutex_lock(&ctx->state_mutex); spu_deactivate(ctx); mutex_unlock(&ctx->state_mutex); spu_fini_csa(&ctx->csa); if (ctx->gang) spu_gang_remove_ctx(ctx->gang, ctx); if (ctx->prof_priv_kref) kref_put(ctx->prof_priv_kref, ctx->prof_priv_release); BUG_ON(!list_empty(&ctx->rq)); atomic_dec(&nr_spu_contexts); kfree(ctx->switch_log); kfree(ctx); } struct spu_context * get_spu_context(struct spu_context *ctx) { kref_get(&ctx->kref); return ctx; } int put_spu_context(struct spu_context *ctx) { return kref_put(&ctx->kref, &destroy_spu_context); } /* give up the mm reference when the context is about to be destroyed */ void spu_forget(struct spu_context *ctx) { struct mm_struct *mm; /* * This is basically an open-coded spu_acquire_saved, except that * we don't acquire the state mutex interruptible, and we don't * want this context to be rescheduled on release. */ mutex_lock(&ctx->state_mutex); if (ctx->state != SPU_STATE_SAVED) spu_deactivate(ctx); mm = ctx->owner; ctx->owner = NULL; mmput(mm); spu_release(ctx); } void spu_unmap_mappings(struct spu_context *ctx) { mutex_lock(&ctx->mapping_lock); if (ctx->local_store) unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1); if (ctx->mfc) unmap_mapping_range(ctx->mfc, 0, SPUFS_MFC_MAP_SIZE, 1); if (ctx->cntl) unmap_mapping_range(ctx->cntl, 0, SPUFS_CNTL_MAP_SIZE, 1); if (ctx->signal1) unmap_mapping_range(ctx->signal1, 0, SPUFS_SIGNAL_MAP_SIZE, 1); if (ctx->signal2) unmap_mapping_range(ctx->signal2, 0, SPUFS_SIGNAL_MAP_SIZE, 1); if (ctx->mss) unmap_mapping_range(ctx->mss, 0, SPUFS_MSS_MAP_SIZE, 1); if (ctx->psmap) unmap_mapping_range(ctx->psmap, 0, SPUFS_PS_MAP_SIZE, 1); mutex_unlock(&ctx->mapping_lock); } /** * spu_acquire_saved - lock spu contex and make sure it is in saved state * @ctx: spu contex to lock */ int spu_acquire_saved(struct spu_context *ctx) { int ret; spu_context_nospu_trace(spu_acquire_saved__enter, ctx); ret = spu_acquire(ctx); if (ret) return ret; if (ctx->state != SPU_STATE_SAVED) { set_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags); spu_deactivate(ctx); } return 0; } /** * spu_release_saved - unlock spu context and return it to the runqueue * @ctx: context to unlock */ void spu_release_saved(struct spu_context *ctx) { BUG_ON(ctx->state != SPU_STATE_SAVED); if (test_and_clear_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags) && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) spu_activate(ctx, 0); spu_release(ctx); }
gpl-2.0
noobnl/msm-jf-kernel
arch/arm/mach-msm/board-mahimahi-rfkill.c
4649
2972
/* * Copyright (C) 2009 Google, Inc. * Copyright (C) 2009 HTC Corporation. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/delay.h> #include <linux/device.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/rfkill.h> #include <asm/gpio.h> #include <asm/mach-types.h> #include "board-mahimahi.h" static struct rfkill *bt_rfk; static const char bt_name[] = "bcm4329"; static int bluetooth_set_power(void *data, bool blocked) { if (!blocked) { gpio_direction_output(MAHIMAHI_GPIO_BT_RESET_N, 1); gpio_direction_output(MAHIMAHI_GPIO_BT_SHUTDOWN_N, 1); } else { gpio_direction_output(MAHIMAHI_GPIO_BT_SHUTDOWN_N, 0); gpio_direction_output(MAHIMAHI_GPIO_BT_RESET_N, 0); } return 0; } static struct rfkill_ops mahimahi_rfkill_ops = { .set_block = bluetooth_set_power, }; static int mahimahi_rfkill_probe(struct platform_device *pdev) { int rc = 0; bool default_state = true; /* off */ rc = gpio_request(MAHIMAHI_GPIO_BT_RESET_N, "bt_reset"); if (rc) goto err_gpio_reset; rc = gpio_request(MAHIMAHI_GPIO_BT_SHUTDOWN_N, "bt_shutdown"); if (rc) goto err_gpio_shutdown; bluetooth_set_power(NULL, default_state); bt_rfk = rfkill_alloc(bt_name, &pdev->dev, RFKILL_TYPE_BLUETOOTH, &mahimahi_rfkill_ops, NULL); if (!bt_rfk) { rc = -ENOMEM; goto err_rfkill_alloc; } rfkill_set_states(bt_rfk, default_state, false); /* userspace cannot take exclusive control */ rc = rfkill_register(bt_rfk); if (rc) goto err_rfkill_reg; return 0; err_rfkill_reg: rfkill_destroy(bt_rfk); err_rfkill_alloc: gpio_free(MAHIMAHI_GPIO_BT_SHUTDOWN_N); err_gpio_shutdown: gpio_free(MAHIMAHI_GPIO_BT_RESET_N); err_gpio_reset: return rc; } static int mahimahi_rfkill_remove(struct platform_device *dev) { rfkill_unregister(bt_rfk); rfkill_destroy(bt_rfk); gpio_free(MAHIMAHI_GPIO_BT_SHUTDOWN_N); gpio_free(MAHIMAHI_GPIO_BT_RESET_N); return 0; } static struct platform_driver mahimahi_rfkill_driver = { .probe = mahimahi_rfkill_probe, .remove = mahimahi_rfkill_remove, .driver = { .name = "mahimahi_rfkill", .owner = THIS_MODULE, }, }; static int __init mahimahi_rfkill_init(void) { if (!machine_is_mahimahi()) return 0; return platform_driver_register(&mahimahi_rfkill_driver); } static void __exit mahimahi_rfkill_exit(void) { platform_driver_unregister(&mahimahi_rfkill_driver); } module_init(mahimahi_rfkill_init); module_exit(mahimahi_rfkill_exit); MODULE_DESCRIPTION("mahimahi rfkill"); MODULE_AUTHOR("Nick Pelly <npelly@google.com>"); MODULE_LICENSE("GPL");
gpl-2.0
diszell2008/A830
drivers/media/video/gspca/pac7311.c
4905
23000
/* * Pixart PAC7311 library * Copyright (C) 2005 Thomas Kaiser thomas@kaiser-linux.li * * V4L2 by Jean-Francois Moine <http://moinejf.free.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* Some documentation about various registers as determined by trial and error. When the register addresses differ between the 7202 and the 7311 the 2 different addresses are written as 7302addr/7311addr, when one of the 2 addresses is a - sign that register description is not valid for the matching IC. Register page 1: Address Description -/0x08 Unknown compressor related, must always be 8 except when not in 640x480 resolution and page 4 reg 2 <= 3 then set it to 9 ! -/0x1b Auto white balance related, bit 0 is AWB enable (inverted) bits 345 seem to toggle per color gains on/off (inverted) 0x78 Global control, bit 6 controls the LED (inverted) -/0x80 JPEG compression ratio ? Best not touched Register page 3/4: Address Description 0x02 Clock divider 2-63, fps =~ 60 / val. Must be a multiple of 3 on the 7302, so one of 3, 6, 9, ..., except when between 6 and 12? -/0x0f Master gain 1-245, low value = high gain 0x10/- Master gain 0-31 -/0x10 Another gain 0-15, limited influence (1-2x gain I guess) 0x21 Bitfield: 0-1 unused, 2-3 vflip/hflip, 4-5 unknown, 6-7 unused -/0x27 Seems to toggle various gains on / off, Setting bit 7 seems to completely disable the analog amplification block. Set to 0x68 for max gain, 0x14 for minimal gain. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "pac7311" #include <linux/input.h> #include "gspca.h" MODULE_AUTHOR("Thomas Kaiser thomas@kaiser-linux.li"); MODULE_DESCRIPTION("Pixart PAC7311"); MODULE_LICENSE("GPL"); /* specific webcam descriptor for pac7311 */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ unsigned char contrast; unsigned char gain; unsigned char exposure; unsigned char autogain; __u8 hflip; __u8 vflip; u8 sof_read; u8 autogain_ignore_frames; atomic_t avg_lum; }; /* V4L2 controls supported by the driver */ static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val); static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val); static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val); static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val); static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val); static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val); static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val); static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val); static const struct ctrl sd_ctrls[] = { /* This control is for both the 7302 and the 7311 */ { { .id = V4L2_CID_CONTRAST, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Contrast", .minimum = 0, #define CONTRAST_MAX 255 .maximum = CONTRAST_MAX, .step = 1, #define CONTRAST_DEF 127 .default_value = CONTRAST_DEF, }, .set = sd_setcontrast, .get = sd_getcontrast, }, /* All controls below are for both the 7302 and the 7311 */ { { .id = V4L2_CID_GAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Gain", .minimum = 0, #define GAIN_MAX 255 .maximum = GAIN_MAX, .step = 1, #define GAIN_DEF 127 #define GAIN_KNEE 255 /* Gain seems to cause little noise on the pac73xx */ .default_value = GAIN_DEF, }, .set = sd_setgain, .get = sd_getgain, }, { { .id = V4L2_CID_EXPOSURE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Exposure", .minimum = 0, #define EXPOSURE_MAX 255 .maximum = EXPOSURE_MAX, .step = 1, #define EXPOSURE_DEF 16 /* 32 ms / 30 fps */ #define EXPOSURE_KNEE 50 /* 100 ms / 10 fps */ .default_value = EXPOSURE_DEF, }, .set = sd_setexposure, .get = sd_getexposure, }, { { .id = V4L2_CID_AUTOGAIN, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Auto Gain", .minimum = 0, .maximum = 1, .step = 1, #define AUTOGAIN_DEF 1 .default_value = AUTOGAIN_DEF, }, .set = sd_setautogain, .get = sd_getautogain, }, { { .id = V4L2_CID_HFLIP, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Mirror", .minimum = 0, .maximum = 1, .step = 1, #define HFLIP_DEF 0 .default_value = HFLIP_DEF, }, .set = sd_sethflip, .get = sd_gethflip, }, { { .id = V4L2_CID_VFLIP, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Vflip", .minimum = 0, .maximum = 1, .step = 1, #define VFLIP_DEF 0 .default_value = VFLIP_DEF, }, .set = sd_setvflip, .get = sd_getvflip, }, }; static const struct v4l2_pix_format vga_mode[] = { {160, 120, V4L2_PIX_FMT_PJPG, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 2}, {320, 240, V4L2_PIX_FMT_PJPG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 1}, {640, 480, V4L2_PIX_FMT_PJPG, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0}, }; #define LOAD_PAGE4 254 #define END_OF_SEQUENCE 0 /* pac 7311 */ static const __u8 init_7311[] = { 0x78, 0x40, /* Bit_0=start stream, Bit_6=LED */ 0x78, 0x40, /* Bit_0=start stream, Bit_6=LED */ 0x78, 0x44, /* Bit_0=start stream, Bit_6=LED */ 0xff, 0x04, 0x27, 0x80, 0x28, 0xca, 0x29, 0x53, 0x2a, 0x0e, 0xff, 0x01, 0x3e, 0x20, }; static const __u8 start_7311[] = { /* index, len, [value]* */ 0xff, 1, 0x01, /* page 1 */ 0x02, 43, 0x48, 0x0a, 0x40, 0x08, 0x00, 0x00, 0x08, 0x00, 0x06, 0xff, 0x11, 0xff, 0x5a, 0x30, 0x90, 0x4c, 0x00, 0x07, 0x00, 0x0a, 0x10, 0x00, 0xa0, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 42, 0x00, 0x00, 0x78, 0x52, 0x4a, 0x52, 0x78, 0x6e, 0x48, 0x46, 0x48, 0x6e, 0x5f, 0x49, 0x42, 0x49, 0x5f, 0x5f, 0x49, 0x42, 0x49, 0x5f, 0x6e, 0x48, 0x46, 0x48, 0x6e, 0x78, 0x52, 0x4a, 0x52, 0x78, 0x00, 0x00, 0x09, 0x1b, 0x34, 0x49, 0x5c, 0x9b, 0xd0, 0xff, 0x78, 6, 0x44, 0x00, 0xf2, 0x01, 0x01, 0x80, 0x7f, 18, 0x2a, 0x1c, 0x00, 0xc8, 0x02, 0x58, 0x03, 0x84, 0x12, 0x00, 0x1a, 0x04, 0x08, 0x0c, 0x10, 0x14, 0x18, 0x20, 0x96, 3, 0x01, 0x08, 0x04, 0xa0, 4, 0x44, 0x44, 0x44, 0x04, 0xf0, 13, 0x01, 0x00, 0x00, 0x00, 0x22, 0x00, 0x20, 0x00, 0x3f, 0x00, 0x0a, 0x01, 0x00, 0xff, 1, 0x04, /* page 4 */ 0, LOAD_PAGE4, /* load the page 4 */ 0x11, 1, 0x01, 0, END_OF_SEQUENCE /* end of sequence */ }; #define SKIP 0xaa /* page 4 - the value SKIP says skip the index - see reg_w_page() */ static const __u8 page4_7311[] = { SKIP, SKIP, 0x04, 0x54, 0x07, 0x2b, 0x09, 0x0f, 0x09, 0x00, SKIP, SKIP, 0x07, 0x00, 0x00, 0x62, 0x08, SKIP, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xa0, 0x01, 0xf4, SKIP, SKIP, 0x00, 0x08, SKIP, 0x03, SKIP, 0x00, 0x68, 0xca, 0x10, 0x06, 0x78, 0x00, 0x00, 0x00, 0x00, 0x23, 0x28, 0x04, 0x11, 0x00, 0x00 }; static void reg_w_buf(struct gspca_dev *gspca_dev, __u8 index, const u8 *buffer, int len) { int ret; if (gspca_dev->usb_err < 0) return; memcpy(gspca_dev->usb_buf, buffer, len); ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, /* value */ index, gspca_dev->usb_buf, len, 500); if (ret < 0) { pr_err("reg_w_buf() failed index 0x%02x, error %d\n", index, ret); gspca_dev->usb_err = ret; } } static void reg_w(struct gspca_dev *gspca_dev, __u8 index, __u8 value) { int ret; if (gspca_dev->usb_err < 0) return; gspca_dev->usb_buf[0] = value; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, gspca_dev->usb_buf, 1, 500); if (ret < 0) { pr_err("reg_w() failed index 0x%02x, value 0x%02x, error %d\n", index, value, ret); gspca_dev->usb_err = ret; } } static void reg_w_seq(struct gspca_dev *gspca_dev, const __u8 *seq, int len) { while (--len >= 0) { reg_w(gspca_dev, seq[0], seq[1]); seq += 2; } } /* load the beginning of a page */ static void reg_w_page(struct gspca_dev *gspca_dev, const __u8 *page, int len) { int index; int ret = 0; if (gspca_dev->usb_err < 0) return; for (index = 0; index < len; index++) { if (page[index] == SKIP) /* skip this index */ continue; gspca_dev->usb_buf[0] = page[index]; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, gspca_dev->usb_buf, 1, 500); if (ret < 0) { pr_err("reg_w_page() failed index 0x%02x, value 0x%02x, error %d\n", index, page[index], ret); gspca_dev->usb_err = ret; break; } } } /* output a variable sequence */ static void reg_w_var(struct gspca_dev *gspca_dev, const __u8 *seq, const __u8 *page4, unsigned int page4_len) { int index, len; for (;;) { index = *seq++; len = *seq++; switch (len) { case END_OF_SEQUENCE: return; case LOAD_PAGE4: reg_w_page(gspca_dev, page4, page4_len); break; default: if (len > USB_BUF_SZ) { PDEBUG(D_ERR|D_STREAM, "Incorrect variable sequence"); return; } while (len > 0) { if (len < 8) { reg_w_buf(gspca_dev, index, seq, len); seq += len; break; } reg_w_buf(gspca_dev, index, seq, 8); seq += 8; index += 8; len -= 8; } } } /* not reached */ } /* this function is called at probe time for pac7311 */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; cam = &gspca_dev->cam; PDEBUG(D_CONF, "Find Sensor PAC7311"); cam->cam_mode = vga_mode; cam->nmodes = ARRAY_SIZE(vga_mode); sd->contrast = CONTRAST_DEF; sd->gain = GAIN_DEF; sd->exposure = EXPOSURE_DEF; sd->autogain = AUTOGAIN_DEF; sd->hflip = HFLIP_DEF; sd->vflip = VFLIP_DEF; return 0; } /* This function is used by pac7311 only */ static void setcontrast(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w(gspca_dev, 0xff, 0x04); reg_w(gspca_dev, 0x10, sd->contrast >> 4); /* load registers to sensor (Bit 0, auto clear) */ reg_w(gspca_dev, 0x11, 0x01); } static void setgain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int gain = GAIN_MAX - sd->gain; if (gain < 1) gain = 1; else if (gain > 245) gain = 245; reg_w(gspca_dev, 0xff, 0x04); /* page 4 */ reg_w(gspca_dev, 0x0e, 0x00); reg_w(gspca_dev, 0x0f, gain); /* load registers to sensor (Bit 0, auto clear) */ reg_w(gspca_dev, 0x11, 0x01); } static void setexposure(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; __u8 reg; /* register 2 of frame 3/4 contains the clock divider configuring the no fps according to the formula: 60 / reg. sd->exposure is the desired exposure time in ms. */ reg = 120 * sd->exposure / 1000; if (reg < 2) reg = 2; else if (reg > 63) reg = 63; reg_w(gspca_dev, 0xff, 0x04); /* page 4 */ reg_w(gspca_dev, 0x02, reg); /* Page 1 register 8 must always be 0x08 except when not in 640x480 mode and Page3/4 reg 2 <= 3 then it must be 9 */ reg_w(gspca_dev, 0xff, 0x01); if (gspca_dev->cam.cam_mode[(int)gspca_dev->curr_mode].priv && reg <= 3) { reg_w(gspca_dev, 0x08, 0x09); } else { reg_w(gspca_dev, 0x08, 0x08); } /* load registers to sensor (Bit 0, auto clear) */ reg_w(gspca_dev, 0x11, 0x01); } static void sethvflip(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; __u8 data; reg_w(gspca_dev, 0xff, 0x04); /* page 4 */ data = (sd->hflip ? 0x04 : 0x00) | (sd->vflip ? 0x08 : 0x00); reg_w(gspca_dev, 0x21, data); /* load registers to sensor (Bit 0, auto clear) */ reg_w(gspca_dev, 0x11, 0x01); } /* this function is called at probe and resume time for pac7311 */ static int sd_init(struct gspca_dev *gspca_dev) { reg_w_seq(gspca_dev, init_7311, sizeof(init_7311)/2); return gspca_dev->usb_err; } static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; sd->sof_read = 0; reg_w_var(gspca_dev, start_7311, page4_7311, sizeof(page4_7311)); setcontrast(gspca_dev); setgain(gspca_dev); setexposure(gspca_dev); sethvflip(gspca_dev); /* set correct resolution */ switch (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) { case 2: /* 160x120 pac7311 */ reg_w(gspca_dev, 0xff, 0x01); reg_w(gspca_dev, 0x17, 0x20); reg_w(gspca_dev, 0x87, 0x10); break; case 1: /* 320x240 pac7311 */ reg_w(gspca_dev, 0xff, 0x01); reg_w(gspca_dev, 0x17, 0x30); reg_w(gspca_dev, 0x87, 0x11); break; case 0: /* 640x480 */ reg_w(gspca_dev, 0xff, 0x01); reg_w(gspca_dev, 0x17, 0x00); reg_w(gspca_dev, 0x87, 0x12); break; } sd->sof_read = 0; sd->autogain_ignore_frames = 0; atomic_set(&sd->avg_lum, -1); /* start stream */ reg_w(gspca_dev, 0xff, 0x01); reg_w(gspca_dev, 0x78, 0x05); return gspca_dev->usb_err; } static void sd_stopN(struct gspca_dev *gspca_dev) { reg_w(gspca_dev, 0xff, 0x04); reg_w(gspca_dev, 0x27, 0x80); reg_w(gspca_dev, 0x28, 0xca); reg_w(gspca_dev, 0x29, 0x53); reg_w(gspca_dev, 0x2a, 0x0e); reg_w(gspca_dev, 0xff, 0x01); reg_w(gspca_dev, 0x3e, 0x20); reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_6=LED */ reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_6=LED */ reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_6=LED */ } /* called on streamoff with alt 0 and on disconnect for 7311 */ static void sd_stop0(struct gspca_dev *gspca_dev) { } /* Include pac common sof detection functions */ #include "pac_common.h" static void do_autogain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int avg_lum = atomic_read(&sd->avg_lum); int desired_lum, deadzone; if (avg_lum == -1) return; desired_lum = 200; deadzone = 20; if (sd->autogain_ignore_frames > 0) sd->autogain_ignore_frames--; else if (gspca_auto_gain_n_exposure(gspca_dev, avg_lum, desired_lum, deadzone, GAIN_KNEE, EXPOSURE_KNEE)) sd->autogain_ignore_frames = PAC_AUTOGAIN_IGNORE_FRAMES; } /* JPEG header, part 1 */ static const unsigned char pac_jpeg_header1[] = { 0xff, 0xd8, /* SOI: Start of Image */ 0xff, 0xc0, /* SOF0: Start of Frame (Baseline DCT) */ 0x00, 0x11, /* length = 17 bytes (including this length field) */ 0x08 /* Precision: 8 */ /* 2 bytes is placed here: number of image lines */ /* 2 bytes is placed here: samples per line */ }; /* JPEG header, continued */ static const unsigned char pac_jpeg_header2[] = { 0x03, /* Number of image components: 3 */ 0x01, 0x21, 0x00, /* ID=1, Subsampling 1x1, Quantization table: 0 */ 0x02, 0x11, 0x01, /* ID=2, Subsampling 2x1, Quantization table: 1 */ 0x03, 0x11, 0x01, /* ID=3, Subsampling 2x1, Quantization table: 1 */ 0xff, 0xda, /* SOS: Start Of Scan */ 0x00, 0x0c, /* length = 12 bytes (including this length field) */ 0x03, /* number of components: 3 */ 0x01, 0x00, /* selector 1, table 0x00 */ 0x02, 0x11, /* selector 2, table 0x11 */ 0x03, 0x11, /* selector 3, table 0x11 */ 0x00, 0x3f, /* Spectral selection: 0 .. 63 */ 0x00 /* Successive approximation: 0 */ }; static void pac_start_frame(struct gspca_dev *gspca_dev, __u16 lines, __u16 samples_per_line) { unsigned char tmpbuf[4]; gspca_frame_add(gspca_dev, FIRST_PACKET, pac_jpeg_header1, sizeof(pac_jpeg_header1)); tmpbuf[0] = lines >> 8; tmpbuf[1] = lines & 0xff; tmpbuf[2] = samples_per_line >> 8; tmpbuf[3] = samples_per_line & 0xff; gspca_frame_add(gspca_dev, INTER_PACKET, tmpbuf, sizeof(tmpbuf)); gspca_frame_add(gspca_dev, INTER_PACKET, pac_jpeg_header2, sizeof(pac_jpeg_header2)); } /* this function is run at interrupt level */ static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; u8 *image; unsigned char *sof; sof = pac_find_sof(&sd->sof_read, data, len); if (sof) { int n, lum_offset, footer_length; /* 6 bytes after the FF D9 EOF marker a number of lumination bytes are send corresponding to different parts of the image, the 14th and 15th byte after the EOF seem to correspond to the center of the image */ lum_offset = 24 + sizeof pac_sof_marker; footer_length = 26; /* Finish decoding current frame */ n = (sof - data) - (footer_length + sizeof pac_sof_marker); if (n < 0) { gspca_dev->image_len += n; n = 0; } else { gspca_frame_add(gspca_dev, INTER_PACKET, data, n); } image = gspca_dev->image; if (image != NULL && image[gspca_dev->image_len - 2] == 0xff && image[gspca_dev->image_len - 1] == 0xd9) gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); n = sof - data; len -= n; data = sof; /* Get average lumination */ if (gspca_dev->last_packet_type == LAST_PACKET && n >= lum_offset) atomic_set(&sd->avg_lum, data[-lum_offset] + data[-lum_offset + 1]); else atomic_set(&sd->avg_lum, -1); /* Start the new frame with the jpeg header */ pac_start_frame(gspca_dev, gspca_dev->height, gspca_dev->width); } gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->contrast = val; if (gspca_dev->streaming) setcontrast(gspca_dev); return gspca_dev->usb_err; } static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->contrast; return 0; } static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->gain = val; if (gspca_dev->streaming) setgain(gspca_dev); return gspca_dev->usb_err; } static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->gain; return 0; } static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->exposure = val; if (gspca_dev->streaming) setexposure(gspca_dev); return gspca_dev->usb_err; } static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->exposure; return 0; } static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->autogain = val; /* when switching to autogain set defaults to make sure we are on a valid point of the autogain gain / exposure knee graph, and give this change time to take effect before doing autogain. */ if (sd->autogain) { sd->exposure = EXPOSURE_DEF; sd->gain = GAIN_DEF; if (gspca_dev->streaming) { sd->autogain_ignore_frames = PAC_AUTOGAIN_IGNORE_FRAMES; setexposure(gspca_dev); setgain(gspca_dev); } } return gspca_dev->usb_err; } static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->autogain; return 0; } static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->hflip = val; if (gspca_dev->streaming) sethvflip(gspca_dev); return gspca_dev->usb_err; } static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->hflip; return 0; } static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->vflip = val; if (gspca_dev->streaming) sethvflip(gspca_dev); return gspca_dev->usb_err; } static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->vflip; return 0; } #if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE) static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* interrupt packet data */ int len) /* interrupt packet length */ { int ret = -EINVAL; u8 data0, data1; if (len == 2) { data0 = data[0]; data1 = data[1]; if ((data0 == 0x00 && data1 == 0x11) || (data0 == 0x22 && data1 == 0x33) || (data0 == 0x44 && data1 == 0x55) || (data0 == 0x66 && data1 == 0x77) || (data0 == 0x88 && data1 == 0x99) || (data0 == 0xaa && data1 == 0xbb) || (data0 == 0xcc && data1 == 0xdd) || (data0 == 0xee && data1 == 0xff)) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); input_sync(gspca_dev->input_dev); input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); input_sync(gspca_dev->input_dev); ret = 0; } } return ret; } #endif /* sub-driver description for pac7311 */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .ctrls = sd_ctrls, .nctrls = ARRAY_SIZE(sd_ctrls), .config = sd_config, .init = sd_init, .start = sd_start, .stopN = sd_stopN, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, .dq_callback = do_autogain, #if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE) .int_pkt_scan = sd_int_pkt_scan, #endif }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x093a, 0x2600)}, {USB_DEVICE(0x093a, 0x2601)}, {USB_DEVICE(0x093a, 0x2603)}, {USB_DEVICE(0x093a, 0x2608)}, {USB_DEVICE(0x093a, 0x260e)}, {USB_DEVICE(0x093a, 0x260f)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
gpl-2.0
ngxson/android_kernel_sony_msm8x27
drivers/media/video/gspca/sq905.c
4905
12995
/* * SQ905 subdriver * * Copyright (C) 2008, 2009 Adam Baker and Theodore Kilgore * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * History and Acknowledgments * * The original Linux driver for SQ905 based cameras was written by * Marcell Lengyel and furter developed by many other contributors * and is available from http://sourceforge.net/projects/sqcam/ * * This driver takes advantage of the reverse engineering work done for * that driver and for libgphoto2 but shares no code with them. * * This driver has used as a base the finepix driver and other gspca * based drivers and may still contain code fragments taken from those * drivers. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "sq905" #include <linux/workqueue.h> #include <linux/slab.h> #include "gspca.h" MODULE_AUTHOR("Adam Baker <linux@baker-net.org.uk>, " "Theodore Kilgore <kilgota@auburn.edu>"); MODULE_DESCRIPTION("GSPCA/SQ905 USB Camera Driver"); MODULE_LICENSE("GPL"); /* Default timeouts, in ms */ #define SQ905_CMD_TIMEOUT 500 #define SQ905_DATA_TIMEOUT 1000 /* Maximum transfer size to use. */ #define SQ905_MAX_TRANSFER 0x8000 #define FRAME_HEADER_LEN 64 /* The known modes, or registers. These go in the "value" slot. */ /* 00 is "none" obviously */ #define SQ905_BULK_READ 0x03 /* precedes any bulk read */ #define SQ905_COMMAND 0x06 /* precedes the command codes below */ #define SQ905_PING 0x07 /* when reading an "idling" command */ #define SQ905_READ_DONE 0xc0 /* ack bulk read completed */ /* Any non-zero value in the bottom 2 bits of the 2nd byte of * the ID appears to indicate the camera can do 640*480. If the * LSB of that byte is set the image is just upside down, otherwise * it is rotated 180 degrees. */ #define SQ905_HIRES_MASK 0x00000300 #define SQ905_ORIENTATION_MASK 0x00000100 /* Some command codes. These go in the "index" slot. */ #define SQ905_ID 0xf0 /* asks for model string */ #define SQ905_CONFIG 0x20 /* gets photo alloc. table, not used here */ #define SQ905_DATA 0x30 /* accesses photo data, not used here */ #define SQ905_CLEAR 0xa0 /* clear everything */ #define SQ905_CAPTURE_LOW 0x60 /* Starts capture at 160x120 */ #define SQ905_CAPTURE_MED 0x61 /* Starts capture at 320x240 */ #define SQ905_CAPTURE_HIGH 0x62 /* Starts capture at 640x480 (some cams only) */ /* note that the capture command also controls the output dimensions */ /* Structure to hold all of our device specific stuff */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ /* * Driver stuff */ struct work_struct work_struct; struct workqueue_struct *work_thread; }; static struct v4l2_pix_format sq905_mode[] = { { 160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, { 320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, { 640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0} }; /* * Send a command to the camera. */ static int sq905_command(struct gspca_dev *gspca_dev, u16 index) { int ret; gspca_dev->usb_buf[0] = '\0'; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, SQ905_COMMAND, index, gspca_dev->usb_buf, 1, SQ905_CMD_TIMEOUT); if (ret < 0) { pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret); return ret; } ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, SQ905_PING, 0, gspca_dev->usb_buf, 1, SQ905_CMD_TIMEOUT); if (ret < 0) { pr_err("%s: usb_control_msg failed 2 (%d)\n", __func__, ret); return ret; } return 0; } /* * Acknowledge the end of a frame - see warning on sq905_command. */ static int sq905_ack_frame(struct gspca_dev *gspca_dev) { int ret; gspca_dev->usb_buf[0] = '\0'; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, SQ905_READ_DONE, 0, gspca_dev->usb_buf, 1, SQ905_CMD_TIMEOUT); if (ret < 0) { pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret); return ret; } return 0; } /* * request and read a block of data - see warning on sq905_command. */ static int sq905_read_data(struct gspca_dev *gspca_dev, u8 *data, int size, int need_lock) { int ret; int act_len; gspca_dev->usb_buf[0] = '\0'; if (need_lock) mutex_lock(&gspca_dev->usb_lock); ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, SQ905_BULK_READ, size, gspca_dev->usb_buf, 1, SQ905_CMD_TIMEOUT); if (need_lock) mutex_unlock(&gspca_dev->usb_lock); if (ret < 0) { pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret); return ret; } ret = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x81), data, size, &act_len, SQ905_DATA_TIMEOUT); /* successful, it returns 0, otherwise negative */ if (ret < 0 || act_len != size) { pr_err("bulk read fail (%d) len %d/%d\n", ret, act_len, size); return -EIO; } return 0; } /* This function is called as a workqueue function and runs whenever the camera * is streaming data. Because it is a workqueue function it is allowed to sleep * so we can use synchronous USB calls. To avoid possible collisions with other * threads attempting to use the camera's USB interface we take the gspca * usb_lock when performing USB operations. In practice the only thing we need * to protect against is the usb_set_interface call that gspca makes during * stream_off as the camera doesn't provide any controls that the user could try * to change. */ static void sq905_dostream(struct work_struct *work) { struct sd *dev = container_of(work, struct sd, work_struct); struct gspca_dev *gspca_dev = &dev->gspca_dev; int bytes_left; /* bytes remaining in current frame. */ int data_len; /* size to use for the next read. */ int header_read; /* true if we have already read the frame header. */ int packet_type; int frame_sz; int ret; u8 *data; u8 *buffer; buffer = kmalloc(SQ905_MAX_TRANSFER, GFP_KERNEL | GFP_DMA); if (!buffer) { pr_err("Couldn't allocate USB buffer\n"); goto quit_stream; } frame_sz = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].sizeimage + FRAME_HEADER_LEN; while (gspca_dev->present && gspca_dev->streaming) { /* request some data and then read it until we have * a complete frame. */ bytes_left = frame_sz; header_read = 0; /* Note we do not check for gspca_dev->streaming here, as we must finish reading an entire frame, otherwise the next time we stream we start reading in the middle of a frame. */ while (bytes_left > 0 && gspca_dev->present) { data_len = bytes_left > SQ905_MAX_TRANSFER ? SQ905_MAX_TRANSFER : bytes_left; ret = sq905_read_data(gspca_dev, buffer, data_len, 1); if (ret < 0) goto quit_stream; PDEBUG(D_PACK, "Got %d bytes out of %d for frame", data_len, bytes_left); bytes_left -= data_len; data = buffer; if (!header_read) { packet_type = FIRST_PACKET; /* The first 64 bytes of each frame are * a header full of FF 00 bytes */ data += FRAME_HEADER_LEN; data_len -= FRAME_HEADER_LEN; header_read = 1; } else if (bytes_left == 0) { packet_type = LAST_PACKET; } else { packet_type = INTER_PACKET; } gspca_frame_add(gspca_dev, packet_type, data, data_len); /* If entire frame fits in one packet we still need to add a LAST_PACKET */ if (packet_type == FIRST_PACKET && bytes_left == 0) gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); } if (gspca_dev->present) { /* acknowledge the frame */ mutex_lock(&gspca_dev->usb_lock); ret = sq905_ack_frame(gspca_dev); mutex_unlock(&gspca_dev->usb_lock); if (ret < 0) goto quit_stream; } } quit_stream: if (gspca_dev->present) { mutex_lock(&gspca_dev->usb_lock); sq905_command(gspca_dev, SQ905_CLEAR); mutex_unlock(&gspca_dev->usb_lock); } kfree(buffer); } /* This function is called at probe time just before sd_init */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct cam *cam = &gspca_dev->cam; struct sd *dev = (struct sd *) gspca_dev; /* We don't use the buffer gspca allocates so make it small. */ cam->bulk = 1; cam->bulk_size = 64; INIT_WORK(&dev->work_struct, sq905_dostream); return 0; } /* called on streamoff with alt==0 and on disconnect */ /* the usb_lock is held at entry - restore on exit */ static void sd_stop0(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *) gspca_dev; /* wait for the work queue to terminate */ mutex_unlock(&gspca_dev->usb_lock); /* This waits for sq905_dostream to finish */ destroy_workqueue(dev->work_thread); dev->work_thread = NULL; mutex_lock(&gspca_dev->usb_lock); } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { u32 ident; int ret; /* connect to the camera and read * the model ID and process that and put it away. */ ret = sq905_command(gspca_dev, SQ905_CLEAR); if (ret < 0) return ret; ret = sq905_command(gspca_dev, SQ905_ID); if (ret < 0) return ret; ret = sq905_read_data(gspca_dev, gspca_dev->usb_buf, 4, 0); if (ret < 0) return ret; /* usb_buf is allocated with kmalloc so is aligned. * Camera model number is the right way round if we assume this * reverse engineered ID is supposed to be big endian. */ ident = be32_to_cpup((__be32 *)gspca_dev->usb_buf); ret = sq905_command(gspca_dev, SQ905_CLEAR); if (ret < 0) return ret; PDEBUG(D_CONF, "SQ905 camera ID %08x detected", ident); gspca_dev->cam.cam_mode = sq905_mode; gspca_dev->cam.nmodes = ARRAY_SIZE(sq905_mode); if (!(ident & SQ905_HIRES_MASK)) gspca_dev->cam.nmodes--; if (ident & SQ905_ORIENTATION_MASK) gspca_dev->cam.input_flags = V4L2_IN_ST_VFLIP; else gspca_dev->cam.input_flags = V4L2_IN_ST_VFLIP | V4L2_IN_ST_HFLIP; return 0; } /* Set up for getting frames. */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *) gspca_dev; int ret; /* "Open the shutter" and set size, to start capture */ switch (gspca_dev->curr_mode) { default: /* case 2: */ PDEBUG(D_STREAM, "Start streaming at high resolution"); ret = sq905_command(&dev->gspca_dev, SQ905_CAPTURE_HIGH); break; case 1: PDEBUG(D_STREAM, "Start streaming at medium resolution"); ret = sq905_command(&dev->gspca_dev, SQ905_CAPTURE_MED); break; case 0: PDEBUG(D_STREAM, "Start streaming at low resolution"); ret = sq905_command(&dev->gspca_dev, SQ905_CAPTURE_LOW); } if (ret < 0) { PDEBUG(D_ERR, "Start streaming command failed"); return ret; } /* Start the workqueue function to do the streaming */ dev->work_thread = create_singlethread_workqueue(MODULE_NAME); queue_work(dev->work_thread, &dev->work_struct); return 0; } /* Table of supported USB devices */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x2770, 0x9120)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .start = sd_start, .stop0 = sd_stop0, }; /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
gpl-2.0
CyanogenMod/android_kernel_huawei_msm8226
drivers/staging/rtl8192u/ieee80211/compress.c
7977
1591
/* * Cryptographic API. * * Compression operations. * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <linux/types.h> /*#include <linux/crypto.h>*/ #include "rtl_crypto.h" #include <linux/errno.h> #include <linux/scatterlist.h> #include <linux/string.h> #include "internal.h" static int crypto_compress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { return tfm->__crt_alg->cra_compress.coa_compress(crypto_tfm_ctx(tfm), src, slen, dst, dlen); } static int crypto_decompress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { return tfm->__crt_alg->cra_compress.coa_decompress(crypto_tfm_ctx(tfm), src, slen, dst, dlen); } int crypto_init_compress_flags(struct crypto_tfm *tfm, u32 flags) { return flags ? -EINVAL : 0; } int crypto_init_compress_ops(struct crypto_tfm *tfm) { int ret = 0; struct compress_tfm *ops = &tfm->crt_compress; ret = tfm->__crt_alg->cra_compress.coa_init(crypto_tfm_ctx(tfm)); if (ret) goto out; ops->cot_compress = crypto_compress; ops->cot_decompress = crypto_decompress; out: return ret; } void crypto_exit_compress_ops(struct crypto_tfm *tfm) { tfm->__crt_alg->cra_compress.coa_exit(crypto_tfm_ctx(tfm)); }
gpl-2.0
CyanogenMod/android_kernel_lge_msm8974
arch/m68k/platform/coldfire/dma_timer.c
9001
2187
/* * dma_timer.c -- Freescale ColdFire DMA Timer. * * Copyright (C) 2007, Benedikt Spranger <b.spranger@linutronix.de> * Copyright (C) 2008. Sebastian Siewior, Linutronix * */ #include <linux/clocksource.h> #include <linux/io.h> #include <asm/machdep.h> #include <asm/coldfire.h> #include <asm/mcfpit.h> #include <asm/mcfsim.h> #define DMA_TIMER_0 (0x00) #define DMA_TIMER_1 (0x40) #define DMA_TIMER_2 (0x80) #define DMA_TIMER_3 (0xc0) #define DTMR0 (MCF_IPSBAR + DMA_TIMER_0 + 0x400) #define DTXMR0 (MCF_IPSBAR + DMA_TIMER_0 + 0x402) #define DTER0 (MCF_IPSBAR + DMA_TIMER_0 + 0x403) #define DTRR0 (MCF_IPSBAR + DMA_TIMER_0 + 0x404) #define DTCR0 (MCF_IPSBAR + DMA_TIMER_0 + 0x408) #define DTCN0 (MCF_IPSBAR + DMA_TIMER_0 + 0x40c) #define DMA_FREQ ((MCF_CLK / 2) / 16) /* DTMR */ #define DMA_DTMR_RESTART (1 << 3) #define DMA_DTMR_CLK_DIV_1 (1 << 1) #define DMA_DTMR_CLK_DIV_16 (2 << 1) #define DMA_DTMR_ENABLE (1 << 0) static cycle_t cf_dt_get_cycles(struct clocksource *cs) { return __raw_readl(DTCN0); } static struct clocksource clocksource_cf_dt = { .name = "coldfire_dma_timer", .rating = 200, .read = cf_dt_get_cycles, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static int __init init_cf_dt_clocksource(void) { /* * We setup DMA timer 0 in free run mode. This incrementing counter is * used as a highly precious clock source. With MCF_CLOCK = 150 MHz we * get a ~213 ns resolution and the 32bit register will overflow almost * every 15 minutes. */ __raw_writeb(0x00, DTXMR0); __raw_writeb(0x00, DTER0); __raw_writel(0x00000000, DTRR0); __raw_writew(DMA_DTMR_CLK_DIV_16 | DMA_DTMR_ENABLE, DTMR0); return clocksource_register_hz(&clocksource_cf_dt, DMA_FREQ); } arch_initcall(init_cf_dt_clocksource); #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ #define CYC2NS_SCALE ((1000000 << CYC2NS_SCALE_FACTOR) / (DMA_FREQ / 1000)) static unsigned long long cycles2ns(unsigned long cycl) { return (unsigned long long) ((unsigned long long)cycl * CYC2NS_SCALE) >> CYC2NS_SCALE_FACTOR; } unsigned long long sched_clock(void) { unsigned long cycl = __raw_readl(DTCN0); return cycles2ns(cycl); }
gpl-2.0
FenomenalSabderMOD/MOTOE
arch/sh/drivers/pci/common.c
13097
3996
#include <linux/pci.h> #include <linux/interrupt.h> #include <linux/timer.h> #include <linux/kernel.h> /* * These functions are used early on before PCI scanning is done * and all of the pci_dev and pci_bus structures have been created. */ static struct pci_dev *fake_pci_dev(struct pci_channel *hose, int top_bus, int busnr, int devfn) { static struct pci_dev dev; static struct pci_bus bus; dev.bus = &bus; dev.sysdata = hose; dev.devfn = devfn; bus.number = busnr; bus.sysdata = hose; bus.ops = hose->pci_ops; if(busnr != top_bus) /* Fake a parent bus structure. */ bus.parent = &bus; else bus.parent = NULL; return &dev; } #define EARLY_PCI_OP(rw, size, type) \ int __init early_##rw##_config_##size(struct pci_channel *hose, \ int top_bus, int bus, int devfn, int offset, type value) \ { \ return pci_##rw##_config_##size( \ fake_pci_dev(hose, top_bus, bus, devfn), \ offset, value); \ } EARLY_PCI_OP(read, byte, u8 *) EARLY_PCI_OP(read, word, u16 *) EARLY_PCI_OP(read, dword, u32 *) EARLY_PCI_OP(write, byte, u8) EARLY_PCI_OP(write, word, u16) EARLY_PCI_OP(write, dword, u32) int __init pci_is_66mhz_capable(struct pci_channel *hose, int top_bus, int current_bus) { u32 pci_devfn; unsigned short vid; int cap66 = -1; u16 stat; printk(KERN_INFO "PCI: Checking 66MHz capabilities...\n"); for (pci_devfn = 0; pci_devfn < 0xff; pci_devfn++) { if (PCI_FUNC(pci_devfn)) continue; if (early_read_config_word(hose, top_bus, current_bus, pci_devfn, PCI_VENDOR_ID, &vid) != PCIBIOS_SUCCESSFUL) continue; if (vid == 0xffff) continue; /* check 66MHz capability */ if (cap66 < 0) cap66 = 1; if (cap66) { early_read_config_word(hose, top_bus, current_bus, pci_devfn, PCI_STATUS, &stat); if (!(stat & PCI_STATUS_66MHZ)) { printk(KERN_DEBUG "PCI: %02x:%02x not 66MHz capable.\n", current_bus, pci_devfn); cap66 = 0; break; } } } return cap66 > 0; } static void pcibios_enable_err(unsigned long __data) { struct pci_channel *hose = (struct pci_channel *)__data; del_timer(&hose->err_timer); printk(KERN_DEBUG "PCI: re-enabling error IRQ.\n"); enable_irq(hose->err_irq); } static void pcibios_enable_serr(unsigned long __data) { struct pci_channel *hose = (struct pci_channel *)__data; del_timer(&hose->serr_timer); printk(KERN_DEBUG "PCI: re-enabling system error IRQ.\n"); enable_irq(hose->serr_irq); } void pcibios_enable_timers(struct pci_channel *hose) { if (hose->err_irq) { init_timer(&hose->err_timer); hose->err_timer.data = (unsigned long)hose; hose->err_timer.function = pcibios_enable_err; } if (hose->serr_irq) { init_timer(&hose->serr_timer); hose->serr_timer.data = (unsigned long)hose; hose->serr_timer.function = pcibios_enable_serr; } } /* * A simple handler for the regular PCI status errors, called from IRQ * context. */ unsigned int pcibios_handle_status_errors(unsigned long addr, unsigned int status, struct pci_channel *hose) { unsigned int cmd = 0; if (status & PCI_STATUS_REC_MASTER_ABORT) { printk(KERN_DEBUG "PCI: master abort, pc=0x%08lx\n", addr); cmd |= PCI_STATUS_REC_MASTER_ABORT; } if (status & PCI_STATUS_REC_TARGET_ABORT) { printk(KERN_DEBUG "PCI: target abort: "); pcibios_report_status(PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT, 1); printk("\n"); cmd |= PCI_STATUS_REC_TARGET_ABORT; } if (status & (PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY)) { printk(KERN_DEBUG "PCI: parity error detected: "); pcibios_report_status(PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY, 1); printk("\n"); cmd |= PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY; /* Now back off of the IRQ for awhile */ if (hose->err_irq) { disable_irq_nosync(hose->err_irq); hose->err_timer.expires = jiffies + HZ; add_timer(&hose->err_timer); } } return cmd; }
gpl-2.0
LightningZap/sgs4g_lz_kernel
arch/sh/drivers/pci/common.c
13097
3996
#include <linux/pci.h> #include <linux/interrupt.h> #include <linux/timer.h> #include <linux/kernel.h> /* * These functions are used early on before PCI scanning is done * and all of the pci_dev and pci_bus structures have been created. */ static struct pci_dev *fake_pci_dev(struct pci_channel *hose, int top_bus, int busnr, int devfn) { static struct pci_dev dev; static struct pci_bus bus; dev.bus = &bus; dev.sysdata = hose; dev.devfn = devfn; bus.number = busnr; bus.sysdata = hose; bus.ops = hose->pci_ops; if(busnr != top_bus) /* Fake a parent bus structure. */ bus.parent = &bus; else bus.parent = NULL; return &dev; } #define EARLY_PCI_OP(rw, size, type) \ int __init early_##rw##_config_##size(struct pci_channel *hose, \ int top_bus, int bus, int devfn, int offset, type value) \ { \ return pci_##rw##_config_##size( \ fake_pci_dev(hose, top_bus, bus, devfn), \ offset, value); \ } EARLY_PCI_OP(read, byte, u8 *) EARLY_PCI_OP(read, word, u16 *) EARLY_PCI_OP(read, dword, u32 *) EARLY_PCI_OP(write, byte, u8) EARLY_PCI_OP(write, word, u16) EARLY_PCI_OP(write, dword, u32) int __init pci_is_66mhz_capable(struct pci_channel *hose, int top_bus, int current_bus) { u32 pci_devfn; unsigned short vid; int cap66 = -1; u16 stat; printk(KERN_INFO "PCI: Checking 66MHz capabilities...\n"); for (pci_devfn = 0; pci_devfn < 0xff; pci_devfn++) { if (PCI_FUNC(pci_devfn)) continue; if (early_read_config_word(hose, top_bus, current_bus, pci_devfn, PCI_VENDOR_ID, &vid) != PCIBIOS_SUCCESSFUL) continue; if (vid == 0xffff) continue; /* check 66MHz capability */ if (cap66 < 0) cap66 = 1; if (cap66) { early_read_config_word(hose, top_bus, current_bus, pci_devfn, PCI_STATUS, &stat); if (!(stat & PCI_STATUS_66MHZ)) { printk(KERN_DEBUG "PCI: %02x:%02x not 66MHz capable.\n", current_bus, pci_devfn); cap66 = 0; break; } } } return cap66 > 0; } static void pcibios_enable_err(unsigned long __data) { struct pci_channel *hose = (struct pci_channel *)__data; del_timer(&hose->err_timer); printk(KERN_DEBUG "PCI: re-enabling error IRQ.\n"); enable_irq(hose->err_irq); } static void pcibios_enable_serr(unsigned long __data) { struct pci_channel *hose = (struct pci_channel *)__data; del_timer(&hose->serr_timer); printk(KERN_DEBUG "PCI: re-enabling system error IRQ.\n"); enable_irq(hose->serr_irq); } void pcibios_enable_timers(struct pci_channel *hose) { if (hose->err_irq) { init_timer(&hose->err_timer); hose->err_timer.data = (unsigned long)hose; hose->err_timer.function = pcibios_enable_err; } if (hose->serr_irq) { init_timer(&hose->serr_timer); hose->serr_timer.data = (unsigned long)hose; hose->serr_timer.function = pcibios_enable_serr; } } /* * A simple handler for the regular PCI status errors, called from IRQ * context. */ unsigned int pcibios_handle_status_errors(unsigned long addr, unsigned int status, struct pci_channel *hose) { unsigned int cmd = 0; if (status & PCI_STATUS_REC_MASTER_ABORT) { printk(KERN_DEBUG "PCI: master abort, pc=0x%08lx\n", addr); cmd |= PCI_STATUS_REC_MASTER_ABORT; } if (status & PCI_STATUS_REC_TARGET_ABORT) { printk(KERN_DEBUG "PCI: target abort: "); pcibios_report_status(PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT, 1); printk("\n"); cmd |= PCI_STATUS_REC_TARGET_ABORT; } if (status & (PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY)) { printk(KERN_DEBUG "PCI: parity error detected: "); pcibios_report_status(PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY, 1); printk("\n"); cmd |= PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY; /* Now back off of the IRQ for awhile */ if (hose->err_irq) { disable_irq_nosync(hose->err_irq); hose->err_timer.expires = jiffies + HZ; add_timer(&hose->err_timer); } } return cmd; }
gpl-2.0
Stane1983/amlogic-m1
drivers/gpu/drm/drm_bufs.c
42
44347
/** * \file drm_bufs.c * Generic buffer template * * \author Rickard E. (Rik) Faith <faith@valinux.com> * \author Gareth Hughes <gareth@valinux.com> */ /* * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com * * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/log2.h> #include <asm/shmparam.h> #include "drmP.h" resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource) { return pci_resource_start(dev->pdev, resource); } EXPORT_SYMBOL(drm_get_resource_start); resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource) { return pci_resource_len(dev->pdev, resource); } EXPORT_SYMBOL(drm_get_resource_len); static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, struct drm_local_map *map) { struct drm_map_list *entry; list_for_each_entry(entry, &dev->maplist, head) { /* * Because the kernel-userspace ABI is fixed at a 32-bit offset * while PCI resources may live above that, we ignore the map * offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS. * It is assumed that each driver will have only one resource of * each type. */ if (!entry->map || map->type != entry->map->type || entry->master != dev->primary->master) continue; switch (map->type) { case _DRM_SHM: if (map->flags != _DRM_CONTAINS_LOCK) break; case _DRM_REGISTERS: case _DRM_FRAME_BUFFER: return entry; default: /* Make gcc happy */ ; } if (entry->map->offset == map->offset) return entry; } return NULL; } static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, unsigned long user_token, int hashed_handle, int shm) { int use_hashed_handle, shift; unsigned long add; #if (BITS_PER_LONG == 64) use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle); #elif (BITS_PER_LONG == 32) use_hashed_handle = hashed_handle; #else #error Unsupported long size. Neither 64 nor 32 bits. #endif if (!use_hashed_handle) { int ret; hash->key = user_token >> PAGE_SHIFT; ret = drm_ht_insert_item(&dev->map_hash, hash); if (ret != -EINVAL) return ret; } shift = 0; add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT; if (shm && (SHMLBA > PAGE_SIZE)) { int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1; /* For shared memory, we have to preserve the SHMLBA * bits of the eventual vma->vm_pgoff value during * mmap(). Otherwise we run into cache aliasing problems * on some platforms. On these platforms, the pgoff of * a mmap() request is used to pick a suitable virtual * address for the mmap() region such that it will not * cause cache aliasing problems. * * Therefore, make sure the SHMLBA relevant bits of the * hash value we use are equal to those in the original * kernel virtual address. */ shift = bits; add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL)); } return drm_ht_just_insert_please(&dev->map_hash, hash, user_token, 32 - PAGE_SHIFT - 3, shift, add); } /** * Core function to create a range of memory available for mapping by a * non-root process. * * Adjusts the memory offset to its absolute value according to the mapping * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where * applicable and if supported by the kernel. */ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, unsigned int size, enum drm_map_type type, enum drm_map_flags flags, struct drm_map_list ** maplist) { struct drm_local_map *map; struct drm_map_list *list; drm_dma_handle_t *dmah; unsigned long user_token; int ret; map = kmalloc(sizeof(*map), GFP_KERNEL); if (!map) return -ENOMEM; map->offset = offset; map->size = size; map->flags = flags; map->type = type; /* Only allow shared memory to be removable since we only keep enough * book keeping information about shared memory to allow for removal * when processes fork. */ if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { kfree(map); return -EINVAL; } DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n", (unsigned long long)map->offset, map->size, map->type); /* page-align _DRM_SHM maps. They are allocated here so there is no security * hole created by that and it works around various broken drivers that use * a non-aligned quantity to map the SAREA. --BenH */ if (map->type == _DRM_SHM) map->size = PAGE_ALIGN(map->size); if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) { kfree(map); return -EINVAL; } map->mtrr = -1; map->handle = NULL; switch (map->type) { case _DRM_REGISTERS: case _DRM_FRAME_BUFFER: #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) if (map->offset + (map->size-1) < map->offset || map->offset < virt_to_phys(high_memory)) { kfree(map); return -EINVAL; } #endif #ifdef __alpha__ map->offset += dev->hose->mem_space->start; #endif /* Some drivers preinitialize some maps, without the X Server * needing to be aware of it. Therefore, we just return success * when the server tries to create a duplicate map. */ list = drm_find_matching_map(dev, map); if (list != NULL) { if (list->map->size != map->size) { DRM_DEBUG("Matching maps of type %d with " "mismatched sizes, (%ld vs %ld)\n", map->type, map->size, list->map->size); list->map->size = map->size; } kfree(map); *maplist = list; return 0; } if (drm_core_has_MTRR(dev)) { if (map->type == _DRM_FRAME_BUFFER || (map->flags & _DRM_WRITE_COMBINING)) { map->mtrr = mtrr_add(map->offset, map->size, MTRR_TYPE_WRCOMB, 1); } } if (map->type == _DRM_REGISTERS) { map->handle = ioremap(map->offset, map->size); if (!map->handle) { kfree(map); return -ENOMEM; } } break; case _DRM_SHM: list = drm_find_matching_map(dev, map); if (list != NULL) { if(list->map->size != map->size) { DRM_DEBUG("Matching maps of type %d with " "mismatched sizes, (%ld vs %ld)\n", map->type, map->size, list->map->size); list->map->size = map->size; } kfree(map); *maplist = list; return 0; } map->handle = vmalloc_user(map->size); DRM_DEBUG("%lu %d %p\n", map->size, drm_order(map->size), map->handle); if (!map->handle) { kfree(map); return -ENOMEM; } map->offset = (unsigned long)map->handle; if (map->flags & _DRM_CONTAINS_LOCK) { /* Prevent a 2nd X Server from creating a 2nd lock */ if (dev->primary->master->lock.hw_lock != NULL) { vfree(map->handle); kfree(map); return -EBUSY; } dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */ } break; case _DRM_AGP: { struct drm_agp_mem *entry; int valid = 0; if (!drm_core_has_AGP(dev)) { kfree(map); return -EINVAL; } #ifdef __alpha__ map->offset += dev->hose->mem_space->start; #endif /* In some cases (i810 driver), user space may have already * added the AGP base itself, because dev->agp->base previously * only got set during AGP enable. So, only add the base * address if the map's offset isn't already within the * aperture. */ if (map->offset < dev->agp->base || map->offset > dev->agp->base + dev->agp->agp_info.aper_size * 1024 * 1024 - 1) { map->offset += dev->agp->base; } map->mtrr = dev->agp->agp_mtrr; /* for getmap */ /* This assumes the DRM is in total control of AGP space. * It's not always the case as AGP can be in the control * of user space (i.e. i810 driver). So this loop will get * skipped and we double check that dev->agp->memory is * actually set as well as being invalid before EPERM'ing */ list_for_each_entry(entry, &dev->agp->memory, head) { if ((map->offset >= entry->bound) && (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { valid = 1; break; } } if (!list_empty(&dev->agp->memory) && !valid) { kfree(map); return -EPERM; } DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n", (unsigned long long)map->offset, map->size); break; } case _DRM_GEM: DRM_ERROR("tried to addmap GEM object\n"); break; case _DRM_SCATTER_GATHER: if (!dev->sg) { kfree(map); return -EINVAL; } map->offset += (unsigned long)dev->sg->virtual; break; case _DRM_CONSISTENT: /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G, * As we're limiting the address to 2^32-1 (or less), * casting it down to 32 bits is no problem, but we * need to point to a 64bit variable first. */ dmah = drm_pci_alloc(dev, map->size, map->size); if (!dmah) { kfree(map); return -ENOMEM; } map->handle = dmah->vaddr; map->offset = (unsigned long)dmah->busaddr; kfree(dmah); break; default: kfree(map); return -EINVAL; } list = kmalloc(sizeof(*list), GFP_KERNEL); if (!list) { if (map->type == _DRM_REGISTERS) iounmap(map->handle); kfree(map); return -EINVAL; } memset(list, 0, sizeof(*list)); list->map = map; mutex_lock(&dev->struct_mutex); list_add(&list->head, &dev->maplist); /* Assign a 32-bit handle */ /* We do it here so that dev->struct_mutex protects the increment */ user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle : map->offset; ret = drm_map_handle(dev, &list->hash, user_token, 0, (map->type == _DRM_SHM)); if (ret) { if (map->type == _DRM_REGISTERS) iounmap(map->handle); kfree(map); kfree(list); mutex_unlock(&dev->struct_mutex); return ret; } list->user_token = list->hash.key << PAGE_SHIFT; mutex_unlock(&dev->struct_mutex); if (!(map->flags & _DRM_DRIVER)) list->master = dev->primary->master; *maplist = list; return 0; } int drm_addmap(struct drm_device * dev, resource_size_t offset, unsigned int size, enum drm_map_type type, enum drm_map_flags flags, struct drm_local_map ** map_ptr) { struct drm_map_list *list; int rc; rc = drm_addmap_core(dev, offset, size, type, flags, &list); if (!rc) *map_ptr = list->map; return rc; } EXPORT_SYMBOL(drm_addmap); /** * Ioctl to specify a range of memory that is available for mapping by a * non-root process. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_map structure. * \return zero on success or a negative value on error. * */ int drm_addmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_map *map = data; struct drm_map_list *maplist; int err; if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM)) return -EPERM; err = drm_addmap_core(dev, map->offset, map->size, map->type, map->flags, &maplist); if (err) return err; /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ map->handle = (void *)(unsigned long)maplist->user_token; return 0; } /** * Remove a map private from list and deallocate resources if the mapping * isn't in use. * * Searches the map on drm_device::maplist, removes it from the list, see if * its being used, and free any associate resource (such as MTRR's) if it's not * being on use. * * \sa drm_addmap */ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) { struct drm_map_list *r_list = NULL, *list_t; drm_dma_handle_t dmah; int found = 0; struct drm_master *master; /* Find the list entry for the map and remove it */ list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { if (r_list->map == map) { master = r_list->master; list_del(&r_list->head); drm_ht_remove_key(&dev->map_hash, r_list->user_token >> PAGE_SHIFT); kfree(r_list); found = 1; break; } } if (!found) return -EINVAL; switch (map->type) { case _DRM_REGISTERS: iounmap(map->handle); /* FALLTHROUGH */ case _DRM_FRAME_BUFFER: if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { int retcode; retcode = mtrr_del(map->mtrr, map->offset, map->size); DRM_DEBUG("mtrr_del=%d\n", retcode); } break; case _DRM_SHM: vfree(map->handle); if (master) { if (dev->sigdata.lock == master->lock.hw_lock) dev->sigdata.lock = NULL; master->lock.hw_lock = NULL; /* SHM removed */ master->lock.file_priv = NULL; wake_up_interruptible_all(&master->lock.lock_queue); } break; case _DRM_AGP: case _DRM_SCATTER_GATHER: break; case _DRM_CONSISTENT: dmah.vaddr = map->handle; dmah.busaddr = map->offset; dmah.size = map->size; __drm_pci_free(dev, &dmah); break; case _DRM_GEM: DRM_ERROR("tried to rmmap GEM object\n"); break; } kfree(map); return 0; } EXPORT_SYMBOL(drm_rmmap_locked); int drm_rmmap(struct drm_device *dev, struct drm_local_map *map) { int ret; mutex_lock(&dev->struct_mutex); ret = drm_rmmap_locked(dev, map); mutex_unlock(&dev->struct_mutex); return ret; } EXPORT_SYMBOL(drm_rmmap); /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on * the last close of the device, and this is necessary for cleanup when things * exit uncleanly. Therefore, having userland manually remove mappings seems * like a pointless exercise since they're going away anyway. * * One use case might be after addmap is allowed for normal users for SHM and * gets used by drivers that the server doesn't need to care about. This seems * unlikely. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a struct drm_map structure. * \return zero on success or a negative value on error. */ int drm_rmmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_map *request = data; struct drm_local_map *map = NULL; struct drm_map_list *r_list; int ret; mutex_lock(&dev->struct_mutex); list_for_each_entry(r_list, &dev->maplist, head) { if (r_list->map && r_list->user_token == (unsigned long)request->handle && r_list->map->flags & _DRM_REMOVABLE) { map = r_list->map; break; } } /* List has wrapped around to the head pointer, or its empty we didn't * find anything. */ if (list_empty(&dev->maplist) || !map) { mutex_unlock(&dev->struct_mutex); return -EINVAL; } /* Register and framebuffer maps are permanent */ if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { mutex_unlock(&dev->struct_mutex); return 0; } ret = drm_rmmap_locked(dev, map); mutex_unlock(&dev->struct_mutex); return ret; } /** * Cleanup after an error on one of the addbufs() functions. * * \param dev DRM device. * \param entry buffer entry where the error occurred. * * Frees any pages and buffers associated with the given entry. */ static void drm_cleanup_buf_error(struct drm_device * dev, struct drm_buf_entry * entry) { int i; if (entry->seg_count) { for (i = 0; i < entry->seg_count; i++) { if (entry->seglist[i]) { drm_pci_free(dev, entry->seglist[i]); } } kfree(entry->seglist); entry->seg_count = 0; } if (entry->buf_count) { for (i = 0; i < entry->buf_count; i++) { kfree(entry->buflist[i].dev_private); } kfree(entry->buflist); entry->buf_count = 0; } } #if __OS_HAS_AGP /** * Add AGP buffers for DMA transfers. * * \param dev struct drm_device to which the buffers are to be added. * \param request pointer to a struct drm_buf_desc describing the request. * \return zero on success or a negative number on failure. * * After some sanity checks creates a drm_buf structure for each buffer and * reallocates the buffer list of the same size order to accommodate the new * buffers. */ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request) { struct drm_device_dma *dma = dev->dma; struct drm_buf_entry *entry; struct drm_agp_mem *agp_entry; struct drm_buf *buf; unsigned long offset; unsigned long agp_offset; int count; int order; int size; int alignment; int page_order; int total; int byte_count; int i, valid; struct drm_buf **temp_buflist; if (!dma) return -EINVAL; count = request->count; order = drm_order(request->size); size = 1 << order; alignment = (request->flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) : size; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; total = PAGE_SIZE << page_order; byte_count = 0; agp_offset = dev->agp->base + request->agp_start; DRM_DEBUG("count: %d\n", count); DRM_DEBUG("order: %d\n", order); DRM_DEBUG("size: %d\n", size); DRM_DEBUG("agp_offset: %lx\n", agp_offset); DRM_DEBUG("alignment: %d\n", alignment); DRM_DEBUG("page_order: %d\n", page_order); DRM_DEBUG("total: %d\n", total); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL; if (dev->queue_count) return -EBUSY; /* Not while in use */ /* Make sure buffers are located in AGP memory that we own */ valid = 0; list_for_each_entry(agp_entry, &dev->agp->memory, head) { if ((agp_offset >= agp_entry->bound) && (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { valid = 1; break; } } if (!list_empty(&dev->agp->memory) && !valid) { DRM_DEBUG("zone invalid\n"); return -EINVAL; } spin_lock(&dev->count_lock); if (dev->buf_use) { spin_unlock(&dev->count_lock); return -EBUSY; } atomic_inc(&dev->buf_alloc); spin_unlock(&dev->count_lock); mutex_lock(&dev->struct_mutex); entry = &dma->bufs[order]; if (entry->buf_count) { mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -ENOMEM; /* May only call once for each order */ } if (count < 0 || count > 4096) { mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -EINVAL; } entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL); if (!entry->buflist) { mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -ENOMEM; } memset(entry->buflist, 0, count * sizeof(*entry->buflist)); entry->buf_size = size; entry->page_order = page_order; offset = 0; while (entry->buf_count < count) { buf = &entry->buflist[entry->buf_count]; buf->idx = dma->buf_count + entry->buf_count; buf->total = alignment; buf->order = order; buf->used = 0; buf->offset = (dma->byte_count + offset); buf->bus_address = agp_offset + offset; buf->address = (void *)(agp_offset + offset); buf->next = NULL; buf->waiting = 0; buf->pending = 0; init_waitqueue_head(&buf->dma_wait); buf->file_priv = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL); if (!buf->dev_private) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; drm_cleanup_buf_error(dev, entry); mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -ENOMEM; } memset(buf->dev_private, 0, buf->dev_priv_size); DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); offset += alignment; entry->buf_count++; byte_count += PAGE_SIZE << page_order; } DRM_DEBUG("byte_count: %d\n", byte_count); temp_buflist = krealloc(dma->buflist, (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), GFP_KERNEL); if (!temp_buflist) { /* Free the entry because it isn't valid */ drm_cleanup_buf_error(dev, entry); mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -ENOMEM; } dma->buflist = temp_buflist; for (i = 0; i < entry->buf_count; i++) { dma->buflist[i + dma->buf_count] = &entry->buflist[i]; } dma->buf_count += entry->buf_count; dma->seg_count += entry->seg_count; dma->page_count += byte_count >> PAGE_SHIFT; dma->byte_count += byte_count; DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); mutex_unlock(&dev->struct_mutex); request->count = entry->buf_count; request->size = size; dma->flags = _DRM_DMA_USE_AGP; atomic_dec(&dev->buf_alloc); return 0; } EXPORT_SYMBOL(drm_addbufs_agp); #endif /* __OS_HAS_AGP */ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) { struct drm_device_dma *dma = dev->dma; int count; int order; int size; int total; int page_order; struct drm_buf_entry *entry; drm_dma_handle_t *dmah; struct drm_buf *buf; int alignment; unsigned long offset; int i; int byte_count; int page_count; unsigned long *temp_pagelist; struct drm_buf **temp_buflist; if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL; if (!dma) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; count = request->count; order = drm_order(request->size); size = 1 << order; DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n", request->count, request->size, size, order, dev->queue_count); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL; if (dev->queue_count) return -EBUSY; /* Not while in use */ alignment = (request->flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) : size; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; total = PAGE_SIZE << page_order; spin_lock(&dev->count_lock); if (dev->buf_use) { spin_unlock(&dev->count_lock); return -EBUSY; } atomic_inc(&dev->buf_alloc); spin_unlock(&dev->count_lock); mutex_lock(&dev->struct_mutex); entry = &dma->bufs[order]; if (entry->buf_count) { mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -ENOMEM; /* May only call once for each order */ } if (count < 0 || count > 4096) { mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -EINVAL; } entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL); if (!entry->buflist) { mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -ENOMEM; } memset(entry->buflist, 0, count * sizeof(*entry->buflist)); entry->seglist = kmalloc(count * sizeof(*entry->seglist), GFP_KERNEL); if (!entry->seglist) { kfree(entry->buflist); mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -ENOMEM; } memset(entry->seglist, 0, count * sizeof(*entry->seglist)); /* Keep the original pagelist until we know all the allocations * have succeeded */ temp_pagelist = kmalloc((dma->page_count + (count << page_order)) * sizeof(*dma->pagelist), GFP_KERNEL); if (!temp_pagelist) { kfree(entry->buflist); kfree(entry->seglist); mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -ENOMEM; } memcpy(temp_pagelist, dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); DRM_DEBUG("pagelist: %d entries\n", dma->page_count + (count << page_order)); entry->buf_size = size; entry->page_order = page_order; byte_count = 0; page_count = 0; while (entry->buf_count < count) { dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000); if (!dmah) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; entry->seg_count = count; drm_cleanup_buf_error(dev, entry); kfree(temp_pagelist); mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -ENOMEM; } entry->seglist[entry->seg_count++] = dmah; for (i = 0; i < (1 << page_order); i++) { DRM_DEBUG("page %d @ 0x%08lx\n", dma->page_count + page_count, (unsigned long)dmah->vaddr + PAGE_SIZE * i); temp_pagelist[dma->page_count + page_count++] = (unsigned long)dmah->vaddr + PAGE_SIZE * i; } for (offset = 0; offset + size <= total && entry->buf_count < count; offset += alignment, ++entry->buf_count) { buf = &entry->buflist[entry->buf_count]; buf->idx = dma->buf_count + entry->buf_count; buf->total = alignment; buf->order = order; buf->used = 0; buf->offset = (dma->byte_count + byte_count + offset); buf->address = (void *)(dmah->vaddr + offset); buf->bus_address = dmah->busaddr + offset; buf->next = NULL; buf->waiting = 0; buf->pending = 0; init_waitqueue_head(&buf->dma_wait); buf->file_priv = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL); if (!buf->dev_private) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; entry->seg_count = count; drm_cleanup_buf_error(dev, entry); kfree(temp_pagelist); mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -ENOMEM; } memset(buf->dev_private, 0, buf->dev_priv_size); DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); } byte_count += PAGE_SIZE << page_order; } temp_buflist = krealloc(dma->buflist, (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), GFP_KERNEL); if (!temp_buflist) { /* Free the entry because it isn't valid */ drm_cleanup_buf_error(dev, entry); kfree(temp_pagelist); mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -ENOMEM; } dma->buflist = temp_buflist; for (i = 0; i < entry->buf_count; i++) { dma->buflist[i + dma->buf_count] = &entry->buflist[i]; } /* No allocations failed, so now we can replace the orginal pagelist * with the new one. */ if (dma->page_count) { kfree(dma->pagelist); } dma->pagelist = temp_pagelist; dma->buf_count += entry->buf_count; dma->seg_count += entry->seg_count; dma->page_count += entry->seg_count << page_order; dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); mutex_unlock(&dev->struct_mutex); request->count = entry->buf_count; request->size = size; if (request->flags & _DRM_PCI_BUFFER_RO) dma->flags = _DRM_DMA_USE_PCI_RO; atomic_dec(&dev->buf_alloc); return 0; } EXPORT_SYMBOL(drm_addbufs_pci); static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request) { struct drm_device_dma *dma = dev->dma; struct drm_buf_entry *entry; struct drm_buf *buf; unsigned long offset; unsigned long agp_offset; int count; int order; int size; int alignment; int page_order; int total; int byte_count; int i; struct drm_buf **temp_buflist; if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL; if (!dma) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; count = request->count; order = drm_order(request->size); size = 1 << order; alignment = (request->flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) : size; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; total = PAGE_SIZE << page_order; byte_count = 0; agp_offset = request->agp_start; DRM_DEBUG("count: %d\n", count); DRM_DEBUG("order: %d\n", order); DRM_DEBUG("size: %d\n", size); DRM_DEBUG("agp_offset: %lu\n", agp_offset); DRM_DEBUG("alignment: %d\n", alignment); DRM_DEBUG("page_order: %d\n", page_order); DRM_DEBUG("total: %d\n", total); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL; if (dev->queue_count) return -EBUSY; /* Not while in use */ spin_lock(&dev->count_lock); if (dev->buf_use) { spin_unlock(&dev->count_lock); return -EBUSY; } atomic_inc(&dev->buf_alloc); spin_unlock(&dev->count_lock); mutex_lock(&dev->struct_mutex); entry = &dma->bufs[order]; if (entry->buf_count) { mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -ENOMEM; /* May only call once for each order */ } if (count < 0 || count > 4096) { mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -EINVAL; } entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL); if (!entry->buflist) { mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -ENOMEM; } memset(entry->buflist, 0, count * sizeof(*entry->buflist)); entry->buf_size = size; entry->page_order = page_order; offset = 0; while (entry->buf_count < count) { buf = &entry->buflist[entry->buf_count]; buf->idx = dma->buf_count + entry->buf_count; buf->total = alignment; buf->order = order; buf->used = 0; buf->offset = (dma->byte_count + offset); buf->bus_address = agp_offset + offset; buf->address = (void *)(agp_offset + offset + (unsigned long)dev->sg->virtual); buf->next = NULL; buf->waiting = 0; buf->pending = 0; init_waitqueue_head(&buf->dma_wait); buf->file_priv = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL); if (!buf->dev_private) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; drm_cleanup_buf_error(dev, entry); mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -ENOMEM; } memset(buf->dev_private, 0, buf->dev_priv_size); DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); offset += alignment; entry->buf_count++; byte_count += PAGE_SIZE << page_order; } DRM_DEBUG("byte_count: %d\n", byte_count); temp_buflist = krealloc(dma->buflist, (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), GFP_KERNEL); if (!temp_buflist) { /* Free the entry because it isn't valid */ drm_cleanup_buf_error(dev, entry); mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -ENOMEM; } dma->buflist = temp_buflist; for (i = 0; i < entry->buf_count; i++) { dma->buflist[i + dma->buf_count] = &entry->buflist[i]; } dma->buf_count += entry->buf_count; dma->seg_count += entry->seg_count; dma->page_count += byte_count >> PAGE_SHIFT; dma->byte_count += byte_count; DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); mutex_unlock(&dev->struct_mutex); request->count = entry->buf_count; request->size = size; dma->flags = _DRM_DMA_USE_SG; atomic_dec(&dev->buf_alloc); return 0; } static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request) { struct drm_device_dma *dma = dev->dma; struct drm_buf_entry *entry; struct drm_buf *buf; unsigned long offset; unsigned long agp_offset; int count; int order; int size; int alignment; int page_order; int total; int byte_count; int i; struct drm_buf **temp_buflist; if (!drm_core_check_feature(dev, DRIVER_FB_DMA)) return -EINVAL; if (!dma) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; count = request->count; order = drm_order(request->size); size = 1 << order; alignment = (request->flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) : size; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; total = PAGE_SIZE << page_order; byte_count = 0; agp_offset = request->agp_start; DRM_DEBUG("count: %d\n", count); DRM_DEBUG("order: %d\n", order); DRM_DEBUG("size: %d\n", size); DRM_DEBUG("agp_offset: %lu\n", agp_offset); DRM_DEBUG("alignment: %d\n", alignment); DRM_DEBUG("page_order: %d\n", page_order); DRM_DEBUG("total: %d\n", total); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL; if (dev->queue_count) return -EBUSY; /* Not while in use */ spin_lock(&dev->count_lock); if (dev->buf_use) { spin_unlock(&dev->count_lock); return -EBUSY; } atomic_inc(&dev->buf_alloc); spin_unlock(&dev->count_lock); mutex_lock(&dev->struct_mutex); entry = &dma->bufs[order]; if (entry->buf_count) { mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -ENOMEM; /* May only call once for each order */ } if (count < 0 || count > 4096) { mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -EINVAL; } entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL); if (!entry->buflist) { mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -ENOMEM; } memset(entry->buflist, 0, count * sizeof(*entry->buflist)); entry->buf_size = size; entry->page_order = page_order; offset = 0; while (entry->buf_count < count) { buf = &entry->buflist[entry->buf_count]; buf->idx = dma->buf_count + entry->buf_count; buf->total = alignment; buf->order = order; buf->used = 0; buf->offset = (dma->byte_count + offset); buf->bus_address = agp_offset + offset; buf->address = (void *)(agp_offset + offset); buf->next = NULL; buf->waiting = 0; buf->pending = 0; init_waitqueue_head(&buf->dma_wait); buf->file_priv = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL); if (!buf->dev_private) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; drm_cleanup_buf_error(dev, entry); mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -ENOMEM; } memset(buf->dev_private, 0, buf->dev_priv_size); DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); offset += alignment; entry->buf_count++; byte_count += PAGE_SIZE << page_order; } DRM_DEBUG("byte_count: %d\n", byte_count); temp_buflist = krealloc(dma->buflist, (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), GFP_KERNEL); if (!temp_buflist) { /* Free the entry because it isn't valid */ drm_cleanup_buf_error(dev, entry); mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -ENOMEM; } dma->buflist = temp_buflist; for (i = 0; i < entry->buf_count; i++) { dma->buflist[i + dma->buf_count] = &entry->buflist[i]; } dma->buf_count += entry->buf_count; dma->seg_count += entry->seg_count; dma->page_count += byte_count >> PAGE_SHIFT; dma->byte_count += byte_count; DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); mutex_unlock(&dev->struct_mutex); request->count = entry->buf_count; request->size = size; dma->flags = _DRM_DMA_USE_FB; atomic_dec(&dev->buf_alloc); return 0; } /** * Add buffers for DMA transfers (ioctl). * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a struct drm_buf_desc request. * \return zero on success or a negative number on failure. * * According with the memory type specified in drm_buf_desc::flags and the * build options, it dispatches the call either to addbufs_agp(), * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent * PCI memory respectively. */ int drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_buf_desc *request = data; int ret; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; #if __OS_HAS_AGP if (request->flags & _DRM_AGP_BUFFER) ret = drm_addbufs_agp(dev, request); else #endif if (request->flags & _DRM_SG_BUFFER) ret = drm_addbufs_sg(dev, request); else if (request->flags & _DRM_FB_BUFFER) ret = drm_addbufs_fb(dev, request); else ret = drm_addbufs_pci(dev, request); return ret; } /** * Get information about the buffer mappings. * * This was originally mean for debugging purposes, or by a sophisticated * client library to determine how best to use the available buffers (e.g., * large buffers can be used for image transfer). * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_buf_info structure. * \return zero on success or a negative number on failure. * * Increments drm_device::buf_use while holding the drm_device::count_lock * lock, preventing of allocating more buffers after this call. Information * about each requested buffer is then copied into user space. */ int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; struct drm_buf_info *request = data; int i; int count; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; if (!dma) return -EINVAL; spin_lock(&dev->count_lock); if (atomic_read(&dev->buf_alloc)) { spin_unlock(&dev->count_lock); return -EBUSY; } ++dev->buf_use; /* Can't allocate more after this call */ spin_unlock(&dev->count_lock); for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { if (dma->bufs[i].buf_count) ++count; } DRM_DEBUG("count = %d\n", count); if (request->count >= count) { for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { if (dma->bufs[i].buf_count) { struct drm_buf_desc __user *to = &request->list[count]; struct drm_buf_entry *from = &dma->bufs[i]; struct drm_freelist *list = &dma->bufs[i].freelist; if (copy_to_user(&to->count, &from->buf_count, sizeof(from->buf_count)) || copy_to_user(&to->size, &from->buf_size, sizeof(from->buf_size)) || copy_to_user(&to->low_mark, &list->low_mark, sizeof(list->low_mark)) || copy_to_user(&to->high_mark, &list->high_mark, sizeof(list->high_mark))) return -EFAULT; DRM_DEBUG("%d %d %d %d %d\n", i, dma->bufs[i].buf_count, dma->bufs[i].buf_size, dma->bufs[i].freelist.low_mark, dma->bufs[i].freelist.high_mark); ++count; } } } request->count = count; return 0; } /** * Specifies a low and high water mark for buffer allocation * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg a pointer to a drm_buf_desc structure. * \return zero on success or a negative number on failure. * * Verifies that the size order is bounded between the admissible orders and * updates the respective drm_device_dma::bufs entry low and high water mark. * * \note This ioctl is deprecated and mostly never used. */ int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; struct drm_buf_desc *request = data; int order; struct drm_buf_entry *entry; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; if (!dma) return -EINVAL; DRM_DEBUG("%d, %d, %d\n", request->size, request->low_mark, request->high_mark); order = drm_order(request->size); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL; entry = &dma->bufs[order]; if (request->low_mark < 0 || request->low_mark > entry->buf_count) return -EINVAL; if (request->high_mark < 0 || request->high_mark > entry->buf_count) return -EINVAL; entry->freelist.low_mark = request->low_mark; entry->freelist.high_mark = request->high_mark; return 0; } /** * Unreserve the buffers in list, previously reserved using drmDMA. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_buf_free structure. * \return zero on success or a negative number on failure. * * Calls free_buffer() for each used buffer. * This function is primarily used for debugging. */ int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; struct drm_buf_free *request = data; int i; int idx; struct drm_buf *buf; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; if (!dma) return -EINVAL; DRM_DEBUG("%d\n", request->count); for (i = 0; i < request->count; i++) { if (copy_from_user(&idx, &request->list[i], sizeof(idx))) return -EFAULT; if (idx < 0 || idx >= dma->buf_count) { DRM_ERROR("Index %d (of %d max)\n", idx, dma->buf_count - 1); return -EINVAL; } buf = dma->buflist[idx]; if (buf->file_priv != file_priv) { DRM_ERROR("Process %d freeing buffer not owned\n", task_pid_nr(current)); return -EINVAL; } drm_free_buffer(dev, buf); } return 0; } /** * Maps all of the DMA buffers into client-virtual space (ioctl). * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_buf_map structure. * \return zero on success or a negative number on failure. * * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information * about each buffer into user space. For PCI buffers, it calls do_mmap() with * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls * drm_mmap_dma(). */ int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; int retcode = 0; const int zero = 0; unsigned long virtual; unsigned long address; struct drm_buf_map *request = data; int i; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; if (!dma) return -EINVAL; spin_lock(&dev->count_lock); if (atomic_read(&dev->buf_alloc)) { spin_unlock(&dev->count_lock); return -EBUSY; } dev->buf_use++; /* Can't allocate more after this call */ spin_unlock(&dev->count_lock); if (request->count >= dma->buf_count) { if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) || (drm_core_check_feature(dev, DRIVER_SG) && (dma->flags & _DRM_DMA_USE_SG)) || (drm_core_check_feature(dev, DRIVER_FB_DMA) && (dma->flags & _DRM_DMA_USE_FB))) { struct drm_local_map *map = dev->agp_buffer_map; unsigned long token = dev->agp_buffer_token; if (!map) { retcode = -EINVAL; goto done; } down_write(&current->mm->mmap_sem); virtual = do_mmap(file_priv->filp, 0, map->size, PROT_READ | PROT_WRITE, MAP_SHARED, token); up_write(&current->mm->mmap_sem); } else { down_write(&current->mm->mmap_sem); virtual = do_mmap(file_priv->filp, 0, dma->byte_count, PROT_READ | PROT_WRITE, MAP_SHARED, 0); up_write(&current->mm->mmap_sem); } if (virtual > -1024UL) { /* Real error */ retcode = (signed long)virtual; goto done; } request->virtual = (void __user *)virtual; for (i = 0; i < dma->buf_count; i++) { if (copy_to_user(&request->list[i].idx, &dma->buflist[i]->idx, sizeof(request->list[0].idx))) { retcode = -EFAULT; goto done; } if (copy_to_user(&request->list[i].total, &dma->buflist[i]->total, sizeof(request->list[0].total))) { retcode = -EFAULT; goto done; } if (copy_to_user(&request->list[i].used, &zero, sizeof(zero))) { retcode = -EFAULT; goto done; } address = virtual + dma->buflist[i]->offset; /* *** */ if (copy_to_user(&request->list[i].address, &address, sizeof(address))) { retcode = -EFAULT; goto done; } } } done: request->count = dma->buf_count; DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode); return retcode; } /** * Compute size order. Returns the exponent of the smaller power of two which * is greater or equal to given number. * * \param size size. * \return order. * * \todo Can be made faster. */ int drm_order(unsigned long size) { int order; unsigned long tmp; for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ; if (size & (size - 1)) ++order; return order; } EXPORT_SYMBOL(drm_order);
gpl-2.0
Stane1983/amlogic-m1
security/integrity/ima/ima_iint.c
42
3534
/* * Copyright (C) 2008 IBM Corporation * * Authors: * Mimi Zohar <zohar@us.ibm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. * * File: ima_iint.c * - implements the IMA hooks: ima_inode_alloc, ima_inode_free * - cache integrity information associated with an inode * using a radix tree. */ #include <linux/slab.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/radix-tree.h> #include "ima.h" RADIX_TREE(ima_iint_store, GFP_ATOMIC); DEFINE_SPINLOCK(ima_iint_lock); static struct kmem_cache *iint_cache __read_mostly; /* ima_iint_find_get - return the iint associated with an inode * * ima_iint_find_get gets a reference to the iint. Caller must * remember to put the iint reference. */ struct ima_iint_cache *ima_iint_find_get(struct inode *inode) { struct ima_iint_cache *iint; rcu_read_lock(); iint = radix_tree_lookup(&ima_iint_store, (unsigned long)inode); if (!iint) goto out; kref_get(&iint->refcount); out: rcu_read_unlock(); return iint; } /** * ima_inode_alloc - allocate an iint associated with an inode * @inode: pointer to the inode */ int ima_inode_alloc(struct inode *inode) { struct ima_iint_cache *iint = NULL; int rc = 0; iint = kmem_cache_alloc(iint_cache, GFP_NOFS); if (!iint) return -ENOMEM; rc = radix_tree_preload(GFP_NOFS); if (rc < 0) goto out; spin_lock(&ima_iint_lock); rc = radix_tree_insert(&ima_iint_store, (unsigned long)inode, iint); spin_unlock(&ima_iint_lock); radix_tree_preload_end(); out: if (rc < 0) kmem_cache_free(iint_cache, iint); return rc; } /* iint_free - called when the iint refcount goes to zero */ void iint_free(struct kref *kref) { struct ima_iint_cache *iint = container_of(kref, struct ima_iint_cache, refcount); iint->version = 0; iint->flags = 0UL; if (iint->readcount != 0) { printk(KERN_INFO "%s: readcount: %ld\n", __FUNCTION__, iint->readcount); iint->readcount = 0; } if (iint->writecount != 0) { printk(KERN_INFO "%s: writecount: %ld\n", __FUNCTION__, iint->writecount); iint->writecount = 0; } if (iint->opencount != 0) { printk(KERN_INFO "%s: opencount: %ld\n", __FUNCTION__, iint->opencount); iint->opencount = 0; } kref_set(&iint->refcount, 1); kmem_cache_free(iint_cache, iint); } void iint_rcu_free(struct rcu_head *rcu_head) { struct ima_iint_cache *iint = container_of(rcu_head, struct ima_iint_cache, rcu); kref_put(&iint->refcount, iint_free); } /** * ima_inode_free - called on security_inode_free * @inode: pointer to the inode * * Free the integrity information(iint) associated with an inode. */ void ima_inode_free(struct inode *inode) { struct ima_iint_cache *iint; spin_lock(&ima_iint_lock); iint = radix_tree_delete(&ima_iint_store, (unsigned long)inode); spin_unlock(&ima_iint_lock); if (iint) call_rcu(&iint->rcu, iint_rcu_free); } static void init_once(void *foo) { struct ima_iint_cache *iint = foo; memset(iint, 0, sizeof *iint); iint->version = 0; iint->flags = 0UL; mutex_init(&iint->mutex); iint->readcount = 0; iint->writecount = 0; iint->opencount = 0; kref_set(&iint->refcount, 1); } static int __init ima_iintcache_init(void) { iint_cache = kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0, SLAB_PANIC, init_once); return 0; } security_initcall(ima_iintcache_init);
gpl-2.0
wtbdaaaa/i8320kernel
drivers/dsp/bridge-i9003/rmgr/dspdrv.c
42
4148
/* * dspdrv.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * Interface to allocate and free bridge resources. * * Copyright (C) 2005-2006 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* ----------------------------------- Host OS */ #include <dspbridge/host_os.h> /* ----------------------------------- DSP/BIOS Bridge */ #include <dspbridge/std.h> #include <dspbridge/dbdefs.h> /* ----------------------------------- Trace & Debug */ #include <dspbridge/dbc.h> /* ----------------------------------- OS Adaptation Layer */ #include <dspbridge/cfg.h> /* ----------------------------------- Platform Manager */ #include <dspbridge/drv.h> #include <dspbridge/dev.h> #include <dspbridge/_dcd.h> /* ----------------------------------- Resource Manager */ #include <dspbridge/mgr.h> /* ----------------------------------- This */ #include <dspbridge/dspdrv.h> /* * ======== dsp_init ======== * Allocates bridge resources. Loads a base image onto DSP, if specified. */ u32 dsp_init(OUT u32 *init_status) { char dev_node[MAXREGPATHLENGTH] = "TIOMAP1510"; int status = -EPERM; struct drv_object *drv_obj = NULL; u32 device_node; u32 device_node_string; if (!wcd_init()) goto func_cont; status = drv_create(&drv_obj); if (DSP_FAILED(status)) { wcd_exit(); goto func_cont; } /* End drv_create */ /* Request Resources */ status = drv_request_resources((u32) &dev_node, &device_node_string); if (DSP_SUCCEEDED(status)) { /* Attempt to Start the Device */ status = dev_start_device((struct cfg_devnode *) device_node_string); if (DSP_FAILED(status)) (void)drv_release_resources ((u32) device_node_string, drv_obj); } else { dev_dbg(bridge, "%s: drv_request_resources Failed\n", __func__); status = -EPERM; } /* Unwind whatever was loaded */ if (DSP_FAILED(status)) { /* irrespective of the status of dev_remove_device we conitinue * unloading. Get the Driver Object iterate through and remove. * Reset the status to E_FAIL to avoid going through * wcd_init_complete2. */ for (device_node = drv_get_first_dev_extension(); device_node != 0; device_node = drv_get_next_dev_extension(device_node)) { (void)dev_remove_device((struct cfg_devnode *) device_node); (void)drv_release_resources((u32) device_node, drv_obj); } /* Remove the Driver Object */ (void)drv_destroy(drv_obj); drv_obj = NULL; wcd_exit(); dev_dbg(bridge, "%s: Logical device failed init\n", __func__); } /* Unwinding the loaded drivers */ func_cont: /* Attempt to Start the Board */ if (DSP_SUCCEEDED(status)) { /* BRD_AutoStart could fail if the dsp execuetable is not the * correct one. We should not propagate that error * into the device loader. */ (void)wcd_init_complete2(); } else { dev_dbg(bridge, "%s: Failed\n", __func__); } /* End wcd_init_complete2 */ DBC_ENSURE((DSP_SUCCEEDED(status) && drv_obj != NULL) || (DSP_FAILED(status) && drv_obj == NULL)); *init_status = status; /* Return the Driver Object */ return (u32) drv_obj; } /* * ======== dsp_deinit ======== * Frees the resources allocated for bridge. */ bool dsp_deinit(u32 deviceContext) { bool ret = true; u32 device_node; struct mgr_object *mgr_obj = NULL; while ((device_node = drv_get_first_dev_extension()) != 0) { (void)dev_remove_device((struct cfg_devnode *)device_node); (void)drv_release_resources((u32) device_node, (struct drv_object *)deviceContext); } (void)drv_destroy((struct drv_object *)deviceContext); /* Get the Manager Object from Registry * MGR Destroy will unload the DCD dll */ if (DSP_SUCCEEDED(cfg_get_object((u32 *) &mgr_obj, REG_MGR_OBJECT))) (void)mgr_destroy(mgr_obj); wcd_exit(); return ret; }
gpl-2.0
Stane1983/amlogic-m3
drivers/edac/mpc85xx_edac.c
42
32543
/* * Freescale MPC85xx Memory Controller kenel module * * Author: Dave Jiang <djiang@mvista.com> * * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ctype.h> #include <linux/io.h> #include <linux/mod_devicetable.h> #include <linux/edac.h> #include <linux/smp.h> #include <linux/gfp.h> #include <linux/of_platform.h> #include <linux/of_device.h> #include "edac_module.h" #include "edac_core.h" #include "mpc85xx_edac.h" static int edac_dev_idx; #ifdef CONFIG_PCI static int edac_pci_idx; #endif static int edac_mc_idx; static u32 orig_ddr_err_disable; static u32 orig_ddr_err_sbe; /* * PCI Err defines */ #ifdef CONFIG_PCI static u32 orig_pci_err_cap_dr; static u32 orig_pci_err_en; #endif static u32 orig_l2_err_disable; #ifdef CONFIG_MPC85xx static u32 orig_hid1[2]; #endif /************************ MC SYSFS parts ***********************************/ static ssize_t mpc85xx_mc_inject_data_hi_show(struct mem_ctl_info *mci, char *data) { struct mpc85xx_mc_pdata *pdata = mci->pvt_info; return sprintf(data, "0x%08x", in_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI)); } static ssize_t mpc85xx_mc_inject_data_lo_show(struct mem_ctl_info *mci, char *data) { struct mpc85xx_mc_pdata *pdata = mci->pvt_info; return sprintf(data, "0x%08x", in_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO)); } static ssize_t mpc85xx_mc_inject_ctrl_show(struct mem_ctl_info *mci, char *data) { struct mpc85xx_mc_pdata *pdata = mci->pvt_info; return sprintf(data, "0x%08x", in_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT)); } static ssize_t mpc85xx_mc_inject_data_hi_store(struct mem_ctl_info *mci, const char *data, size_t count) { struct mpc85xx_mc_pdata *pdata = mci->pvt_info; if (isdigit(*data)) { out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI, simple_strtoul(data, NULL, 0)); return count; } return 0; } static ssize_t mpc85xx_mc_inject_data_lo_store(struct mem_ctl_info *mci, const char *data, size_t count) { struct mpc85xx_mc_pdata *pdata = mci->pvt_info; if (isdigit(*data)) { out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO, simple_strtoul(data, NULL, 0)); return count; } return 0; } static ssize_t mpc85xx_mc_inject_ctrl_store(struct mem_ctl_info *mci, const char *data, size_t count) { struct mpc85xx_mc_pdata *pdata = mci->pvt_info; if (isdigit(*data)) { out_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT, simple_strtoul(data, NULL, 0)); return count; } return 0; } static struct mcidev_sysfs_attribute mpc85xx_mc_sysfs_attributes[] = { { .attr = { .name = "inject_data_hi", .mode = (S_IRUGO | S_IWUSR) }, .show = mpc85xx_mc_inject_data_hi_show, .store = mpc85xx_mc_inject_data_hi_store}, { .attr = { .name = "inject_data_lo", .mode = (S_IRUGO | S_IWUSR) }, .show = mpc85xx_mc_inject_data_lo_show, .store = mpc85xx_mc_inject_data_lo_store}, { .attr = { .name = "inject_ctrl", .mode = (S_IRUGO | S_IWUSR) }, .show = mpc85xx_mc_inject_ctrl_show, .store = mpc85xx_mc_inject_ctrl_store}, /* End of list */ { .attr = {.name = NULL} } }; static void mpc85xx_set_mc_sysfs_attributes(struct mem_ctl_info *mci) { mci->mc_driver_sysfs_attributes = mpc85xx_mc_sysfs_attributes; } /**************************** PCI Err device ***************************/ #ifdef CONFIG_PCI static void mpc85xx_pci_check(struct edac_pci_ctl_info *pci) { struct mpc85xx_pci_pdata *pdata = pci->pvt_info; u32 err_detect; err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR); /* master aborts can happen during PCI config cycles */ if (!(err_detect & ~(PCI_EDE_MULTI_ERR | PCI_EDE_MST_ABRT))) { out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect); return; } printk(KERN_ERR "PCI error(s) detected\n"); printk(KERN_ERR "PCI/X ERR_DR register: %#08x\n", err_detect); printk(KERN_ERR "PCI/X ERR_ATTRIB register: %#08x\n", in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ATTRIB)); printk(KERN_ERR "PCI/X ERR_ADDR register: %#08x\n", in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR)); printk(KERN_ERR "PCI/X ERR_EXT_ADDR register: %#08x\n", in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EXT_ADDR)); printk(KERN_ERR "PCI/X ERR_DL register: %#08x\n", in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DL)); printk(KERN_ERR "PCI/X ERR_DH register: %#08x\n", in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DH)); /* clear error bits */ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect); if (err_detect & PCI_EDE_PERR_MASK) edac_pci_handle_pe(pci, pci->ctl_name); if ((err_detect & ~PCI_EDE_MULTI_ERR) & ~PCI_EDE_PERR_MASK) edac_pci_handle_npe(pci, pci->ctl_name); } static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id) { struct edac_pci_ctl_info *pci = dev_id; struct mpc85xx_pci_pdata *pdata = pci->pvt_info; u32 err_detect; err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR); if (!err_detect) return IRQ_NONE; mpc85xx_pci_check(pci); return IRQ_HANDLED; } static int __devinit mpc85xx_pci_err_probe(struct of_device *op, const struct of_device_id *match) { struct edac_pci_ctl_info *pci; struct mpc85xx_pci_pdata *pdata; struct resource r; int res = 0; if (!devres_open_group(&op->dev, mpc85xx_pci_err_probe, GFP_KERNEL)) return -ENOMEM; pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mpc85xx_pci_err"); if (!pci) return -ENOMEM; pdata = pci->pvt_info; pdata->name = "mpc85xx_pci_err"; pdata->irq = NO_IRQ; dev_set_drvdata(&op->dev, pci); pci->dev = &op->dev; pci->mod_name = EDAC_MOD_STR; pci->ctl_name = pdata->name; pci->dev_name = dev_name(&op->dev); if (edac_op_state == EDAC_OPSTATE_POLL) pci->edac_check = mpc85xx_pci_check; pdata->edac_idx = edac_pci_idx++; res = of_address_to_resource(op->node, 0, &r); if (res) { printk(KERN_ERR "%s: Unable to get resource for " "PCI err regs\n", __func__); goto err; } /* we only need the error registers */ r.start += 0xe00; if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r), pdata->name)) { printk(KERN_ERR "%s: Error while requesting mem region\n", __func__); res = -EBUSY; goto err; } pdata->pci_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r)); if (!pdata->pci_vbase) { printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__); res = -ENOMEM; goto err; } orig_pci_err_cap_dr = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR); /* PCI master abort is expected during config cycles */ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40); orig_pci_err_en = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN); /* disable master abort reporting */ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40); /* clear error bits */ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0); if (edac_pci_add_device(pci, pdata->edac_idx) > 0) { debugf3("%s(): failed edac_pci_add_device()\n", __func__); goto err; } if (edac_op_state == EDAC_OPSTATE_INT) { pdata->irq = irq_of_parse_and_map(op->node, 0); res = devm_request_irq(&op->dev, pdata->irq, mpc85xx_pci_isr, IRQF_DISABLED, "[EDAC] PCI err", pci); if (res < 0) { printk(KERN_ERR "%s: Unable to requiest irq %d for " "MPC85xx PCI err\n", __func__, pdata->irq); irq_dispose_mapping(pdata->irq); res = -ENODEV; goto err2; } printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n", pdata->irq); } devres_remove_group(&op->dev, mpc85xx_pci_err_probe); debugf3("%s(): success\n", __func__); printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n"); return 0; err2: edac_pci_del_device(&op->dev); err: edac_pci_free_ctl_info(pci); devres_release_group(&op->dev, mpc85xx_pci_err_probe); return res; } static int mpc85xx_pci_err_remove(struct of_device *op) { struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev); struct mpc85xx_pci_pdata *pdata = pci->pvt_info; debugf0("%s()\n", __func__); out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, orig_pci_err_cap_dr); out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, orig_pci_err_en); edac_pci_del_device(pci->dev); if (edac_op_state == EDAC_OPSTATE_INT) irq_dispose_mapping(pdata->irq); edac_pci_free_ctl_info(pci); return 0; } static struct of_device_id mpc85xx_pci_err_of_match[] = { { .compatible = "fsl,mpc8540-pcix", }, { .compatible = "fsl,mpc8540-pci", }, {}, }; static struct of_platform_driver mpc85xx_pci_err_driver = { .owner = THIS_MODULE, .name = "mpc85xx_pci_err", .match_table = mpc85xx_pci_err_of_match, .probe = mpc85xx_pci_err_probe, .remove = __devexit_p(mpc85xx_pci_err_remove), .driver = { .name = "mpc85xx_pci_err", .owner = THIS_MODULE, }, }; #endif /* CONFIG_PCI */ /**************************** L2 Err device ***************************/ /************************ L2 SYSFS parts ***********************************/ static ssize_t mpc85xx_l2_inject_data_hi_show(struct edac_device_ctl_info *edac_dev, char *data) { struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; return sprintf(data, "0x%08x", in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI)); } static ssize_t mpc85xx_l2_inject_data_lo_show(struct edac_device_ctl_info *edac_dev, char *data) { struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; return sprintf(data, "0x%08x", in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO)); } static ssize_t mpc85xx_l2_inject_ctrl_show(struct edac_device_ctl_info *edac_dev, char *data) { struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; return sprintf(data, "0x%08x", in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL)); } static ssize_t mpc85xx_l2_inject_data_hi_store(struct edac_device_ctl_info *edac_dev, const char *data, size_t count) { struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; if (isdigit(*data)) { out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI, simple_strtoul(data, NULL, 0)); return count; } return 0; } static ssize_t mpc85xx_l2_inject_data_lo_store(struct edac_device_ctl_info *edac_dev, const char *data, size_t count) { struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; if (isdigit(*data)) { out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO, simple_strtoul(data, NULL, 0)); return count; } return 0; } static ssize_t mpc85xx_l2_inject_ctrl_store(struct edac_device_ctl_info *edac_dev, const char *data, size_t count) { struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; if (isdigit(*data)) { out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL, simple_strtoul(data, NULL, 0)); return count; } return 0; } static struct edac_dev_sysfs_attribute mpc85xx_l2_sysfs_attributes[] = { { .attr = { .name = "inject_data_hi", .mode = (S_IRUGO | S_IWUSR) }, .show = mpc85xx_l2_inject_data_hi_show, .store = mpc85xx_l2_inject_data_hi_store}, { .attr = { .name = "inject_data_lo", .mode = (S_IRUGO | S_IWUSR) }, .show = mpc85xx_l2_inject_data_lo_show, .store = mpc85xx_l2_inject_data_lo_store}, { .attr = { .name = "inject_ctrl", .mode = (S_IRUGO | S_IWUSR) }, .show = mpc85xx_l2_inject_ctrl_show, .store = mpc85xx_l2_inject_ctrl_store}, /* End of list */ { .attr = {.name = NULL} } }; static void mpc85xx_set_l2_sysfs_attributes(struct edac_device_ctl_info *edac_dev) { edac_dev->sysfs_attributes = mpc85xx_l2_sysfs_attributes; } /***************************** L2 ops ***********************************/ static void mpc85xx_l2_check(struct edac_device_ctl_info *edac_dev) { struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; u32 err_detect; err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET); if (!(err_detect & L2_EDE_MASK)) return; printk(KERN_ERR "ECC Error in CPU L2 cache\n"); printk(KERN_ERR "L2 Error Detect Register: 0x%08x\n", err_detect); printk(KERN_ERR "L2 Error Capture Data High Register: 0x%08x\n", in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATAHI)); printk(KERN_ERR "L2 Error Capture Data Lo Register: 0x%08x\n", in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATALO)); printk(KERN_ERR "L2 Error Syndrome Register: 0x%08x\n", in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTECC)); printk(KERN_ERR "L2 Error Attributes Capture Register: 0x%08x\n", in_be32(pdata->l2_vbase + MPC85XX_L2_ERRATTR)); printk(KERN_ERR "L2 Error Address Capture Register: 0x%08x\n", in_be32(pdata->l2_vbase + MPC85XX_L2_ERRADDR)); /* clear error detect register */ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, err_detect); if (err_detect & L2_EDE_CE_MASK) edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name); if (err_detect & L2_EDE_UE_MASK) edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name); } static irqreturn_t mpc85xx_l2_isr(int irq, void *dev_id) { struct edac_device_ctl_info *edac_dev = dev_id; struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; u32 err_detect; err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET); if (!(err_detect & L2_EDE_MASK)) return IRQ_NONE; mpc85xx_l2_check(edac_dev); return IRQ_HANDLED; } static int __devinit mpc85xx_l2_err_probe(struct of_device *op, const struct of_device_id *match) { struct edac_device_ctl_info *edac_dev; struct mpc85xx_l2_pdata *pdata; struct resource r; int res; if (!devres_open_group(&op->dev, mpc85xx_l2_err_probe, GFP_KERNEL)) return -ENOMEM; edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata), "cpu", 1, "L", 1, 2, NULL, 0, edac_dev_idx); if (!edac_dev) { devres_release_group(&op->dev, mpc85xx_l2_err_probe); return -ENOMEM; } pdata = edac_dev->pvt_info; pdata->name = "mpc85xx_l2_err"; pdata->irq = NO_IRQ; edac_dev->dev = &op->dev; dev_set_drvdata(edac_dev->dev, edac_dev); edac_dev->ctl_name = pdata->name; edac_dev->dev_name = pdata->name; res = of_address_to_resource(op->node, 0, &r); if (res) { printk(KERN_ERR "%s: Unable to get resource for " "L2 err regs\n", __func__); goto err; } /* we only need the error registers */ r.start += 0xe00; if (!devm_request_mem_region(&op->dev, r.start, r.end - r.start + 1, pdata->name)) { printk(KERN_ERR "%s: Error while requesting mem region\n", __func__); res = -EBUSY; goto err; } pdata->l2_vbase = devm_ioremap(&op->dev, r.start, r.end - r.start + 1); if (!pdata->l2_vbase) { printk(KERN_ERR "%s: Unable to setup L2 err regs\n", __func__); res = -ENOMEM; goto err; } out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, ~0); orig_l2_err_disable = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS); /* clear the err_dis */ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, 0); edac_dev->mod_name = EDAC_MOD_STR; if (edac_op_state == EDAC_OPSTATE_POLL) edac_dev->edac_check = mpc85xx_l2_check; mpc85xx_set_l2_sysfs_attributes(edac_dev); pdata->edac_idx = edac_dev_idx++; if (edac_device_add_device(edac_dev) > 0) { debugf3("%s(): failed edac_device_add_device()\n", __func__); goto err; } if (edac_op_state == EDAC_OPSTATE_INT) { pdata->irq = irq_of_parse_and_map(op->node, 0); res = devm_request_irq(&op->dev, pdata->irq, mpc85xx_l2_isr, IRQF_DISABLED, "[EDAC] L2 err", edac_dev); if (res < 0) { printk(KERN_ERR "%s: Unable to requiest irq %d for " "MPC85xx L2 err\n", __func__, pdata->irq); irq_dispose_mapping(pdata->irq); res = -ENODEV; goto err2; } printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for L2 Err\n", pdata->irq); edac_dev->op_state = OP_RUNNING_INTERRUPT; out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, L2_EIE_MASK); } devres_remove_group(&op->dev, mpc85xx_l2_err_probe); debugf3("%s(): success\n", __func__); printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n"); return 0; err2: edac_device_del_device(&op->dev); err: devres_release_group(&op->dev, mpc85xx_l2_err_probe); edac_device_free_ctl_info(edac_dev); return res; } static int mpc85xx_l2_err_remove(struct of_device *op) { struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev); struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; debugf0("%s()\n", __func__); if (edac_op_state == EDAC_OPSTATE_INT) { out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0); irq_dispose_mapping(pdata->irq); } out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, orig_l2_err_disable); edac_device_del_device(&op->dev); edac_device_free_ctl_info(edac_dev); return 0; } static struct of_device_id mpc85xx_l2_err_of_match[] = { /* deprecate the fsl,85.. forms in the future, 2.6.30? */ { .compatible = "fsl,8540-l2-cache-controller", }, { .compatible = "fsl,8541-l2-cache-controller", }, { .compatible = "fsl,8544-l2-cache-controller", }, { .compatible = "fsl,8548-l2-cache-controller", }, { .compatible = "fsl,8555-l2-cache-controller", }, { .compatible = "fsl,8568-l2-cache-controller", }, { .compatible = "fsl,mpc8536-l2-cache-controller", }, { .compatible = "fsl,mpc8540-l2-cache-controller", }, { .compatible = "fsl,mpc8541-l2-cache-controller", }, { .compatible = "fsl,mpc8544-l2-cache-controller", }, { .compatible = "fsl,mpc8548-l2-cache-controller", }, { .compatible = "fsl,mpc8555-l2-cache-controller", }, { .compatible = "fsl,mpc8560-l2-cache-controller", }, { .compatible = "fsl,mpc8568-l2-cache-controller", }, { .compatible = "fsl,mpc8572-l2-cache-controller", }, { .compatible = "fsl,p2020-l2-cache-controller", }, {}, }; static struct of_platform_driver mpc85xx_l2_err_driver = { .owner = THIS_MODULE, .name = "mpc85xx_l2_err", .match_table = mpc85xx_l2_err_of_match, .probe = mpc85xx_l2_err_probe, .remove = mpc85xx_l2_err_remove, .driver = { .name = "mpc85xx_l2_err", .owner = THIS_MODULE, }, }; /**************************** MC Err device ***************************/ /* * Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the * MPC8572 User's Manual. Each line represents a syndrome bit column as a * 64-bit value, but split into an upper and lower 32-bit chunk. The labels * below correspond to Freescale's manuals. */ static unsigned int ecc_table[16] = { /* MSB LSB */ /* [0:31] [32:63] */ 0xf00fe11e, 0xc33c0ff7, /* Syndrome bit 7 */ 0x00ff00ff, 0x00fff0ff, 0x0f0f0f0f, 0x0f0fff00, 0x11113333, 0x7777000f, 0x22224444, 0x8888222f, 0x44448888, 0xffff4441, 0x8888ffff, 0x11118882, 0xffff1111, 0x22221114, /* Syndrome bit 0 */ }; /* * Calculate the correct ECC value for a 64-bit value specified by high:low */ static u8 calculate_ecc(u32 high, u32 low) { u32 mask_low; u32 mask_high; int bit_cnt; u8 ecc = 0; int i; int j; for (i = 0; i < 8; i++) { mask_high = ecc_table[i * 2]; mask_low = ecc_table[i * 2 + 1]; bit_cnt = 0; for (j = 0; j < 32; j++) { if ((mask_high >> j) & 1) bit_cnt ^= (high >> j) & 1; if ((mask_low >> j) & 1) bit_cnt ^= (low >> j) & 1; } ecc |= bit_cnt << i; } return ecc; } /* * Create the syndrome code which is generated if the data line specified by * 'bit' failed. Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641 * User's Manual and 9-61 in the MPC8572 User's Manual. */ static u8 syndrome_from_bit(unsigned int bit) { int i; u8 syndrome = 0; /* * Cycle through the upper or lower 32-bit portion of each value in * ecc_table depending on if 'bit' is in the upper or lower half of * 64-bit data. */ for (i = bit < 32; i < 16; i += 2) syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2); return syndrome; } /* * Decode data and ecc syndrome to determine what went wrong * Note: This can only decode single-bit errors */ static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc, int *bad_data_bit, int *bad_ecc_bit) { int i; u8 syndrome; *bad_data_bit = -1; *bad_ecc_bit = -1; /* * Calculate the ECC of the captured data and XOR it with the captured * ECC to find an ECC syndrome value we can search for */ syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc; /* Check if a data line is stuck... */ for (i = 0; i < 64; i++) { if (syndrome == syndrome_from_bit(i)) { *bad_data_bit = i; return; } } /* If data is correct, check ECC bits for errors... */ for (i = 0; i < 8; i++) { if ((syndrome >> i) & 0x1) { *bad_ecc_bit = i; return; } } } static void mpc85xx_mc_check(struct mem_ctl_info *mci) { struct mpc85xx_mc_pdata *pdata = mci->pvt_info; struct csrow_info *csrow; u32 bus_width; u32 err_detect; u32 syndrome; u32 err_addr; u32 pfn; int row_index; u32 cap_high; u32 cap_low; int bad_data_bit; int bad_ecc_bit; err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT); if (!err_detect) return; mpc85xx_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n", err_detect); /* no more processing if not ECC bit errors */ if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) { out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect); return; } syndrome = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ECC); /* Mask off appropriate bits of syndrome based on bus width */ bus_width = (in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG) & DSC_DBW_MASK) ? 32 : 64; if (bus_width == 64) syndrome &= 0xff; else syndrome &= 0xffff; err_addr = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ADDRESS); pfn = err_addr >> PAGE_SHIFT; for (row_index = 0; row_index < mci->nr_csrows; row_index++) { csrow = &mci->csrows[row_index]; if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page)) break; } cap_high = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_HI); cap_low = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_LO); /* * Analyze single-bit errors on 64-bit wide buses * TODO: Add support for 32-bit wide buses */ if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) { sbe_ecc_decode(cap_high, cap_low, syndrome, &bad_data_bit, &bad_ecc_bit); if (bad_data_bit != -1) mpc85xx_mc_printk(mci, KERN_ERR, "Faulty Data bit: %d\n", bad_data_bit); if (bad_ecc_bit != -1) mpc85xx_mc_printk(mci, KERN_ERR, "Faulty ECC bit: %d\n", bad_ecc_bit); mpc85xx_mc_printk(mci, KERN_ERR, "Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n", cap_high ^ (1 << (bad_data_bit - 32)), cap_low ^ (1 << bad_data_bit), syndrome ^ (1 << bad_ecc_bit)); } mpc85xx_mc_printk(mci, KERN_ERR, "Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n", cap_high, cap_low, syndrome); mpc85xx_mc_printk(mci, KERN_ERR, "Err addr: %#8.8x\n", err_addr); mpc85xx_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn); /* we are out of range */ if (row_index == mci->nr_csrows) mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n"); if (err_detect & DDR_EDE_SBE) edac_mc_handle_ce(mci, pfn, err_addr & PAGE_MASK, syndrome, row_index, 0, mci->ctl_name); if (err_detect & DDR_EDE_MBE) edac_mc_handle_ue(mci, pfn, err_addr & PAGE_MASK, row_index, mci->ctl_name); out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect); } static irqreturn_t mpc85xx_mc_isr(int irq, void *dev_id) { struct mem_ctl_info *mci = dev_id; struct mpc85xx_mc_pdata *pdata = mci->pvt_info; u32 err_detect; err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT); if (!err_detect) return IRQ_NONE; mpc85xx_mc_check(mci); return IRQ_HANDLED; } static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci) { struct mpc85xx_mc_pdata *pdata = mci->pvt_info; struct csrow_info *csrow; u32 sdram_ctl; u32 sdtype; enum mem_type mtype; u32 cs_bnds; int index; sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG); sdtype = sdram_ctl & DSC_SDTYPE_MASK; if (sdram_ctl & DSC_RD_EN) { switch (sdtype) { case DSC_SDTYPE_DDR: mtype = MEM_RDDR; break; case DSC_SDTYPE_DDR2: mtype = MEM_RDDR2; break; case DSC_SDTYPE_DDR3: mtype = MEM_RDDR3; break; default: mtype = MEM_UNKNOWN; break; } } else { switch (sdtype) { case DSC_SDTYPE_DDR: mtype = MEM_DDR; break; case DSC_SDTYPE_DDR2: mtype = MEM_DDR2; break; case DSC_SDTYPE_DDR3: mtype = MEM_DDR3; break; default: mtype = MEM_UNKNOWN; break; } } for (index = 0; index < mci->nr_csrows; index++) { u32 start; u32 end; csrow = &mci->csrows[index]; cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 + (index * MPC85XX_MC_CS_BNDS_OFS)); start = (cs_bnds & 0xffff0000) >> 16; end = (cs_bnds & 0x0000ffff); if (start == end) continue; /* not populated */ start <<= (24 - PAGE_SHIFT); end <<= (24 - PAGE_SHIFT); end |= (1 << (24 - PAGE_SHIFT)) - 1; csrow->first_page = start; csrow->last_page = end; csrow->nr_pages = end + 1 - start; csrow->grain = 8; csrow->mtype = mtype; csrow->dtype = DEV_UNKNOWN; if (sdram_ctl & DSC_X32_EN) csrow->dtype = DEV_X32; csrow->edac_mode = EDAC_SECDED; } } static int __devinit mpc85xx_mc_err_probe(struct of_device *op, const struct of_device_id *match) { struct mem_ctl_info *mci; struct mpc85xx_mc_pdata *pdata; struct resource r; u32 sdram_ctl; int res; if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL)) return -ENOMEM; mci = edac_mc_alloc(sizeof(*pdata), 4, 1, edac_mc_idx); if (!mci) { devres_release_group(&op->dev, mpc85xx_mc_err_probe); return -ENOMEM; } pdata = mci->pvt_info; pdata->name = "mpc85xx_mc_err"; pdata->irq = NO_IRQ; mci->dev = &op->dev; pdata->edac_idx = edac_mc_idx++; dev_set_drvdata(mci->dev, mci); mci->ctl_name = pdata->name; mci->dev_name = pdata->name; res = of_address_to_resource(op->node, 0, &r); if (res) { printk(KERN_ERR "%s: Unable to get resource for MC err regs\n", __func__); goto err; } if (!devm_request_mem_region(&op->dev, r.start, r.end - r.start + 1, pdata->name)) { printk(KERN_ERR "%s: Error while requesting mem region\n", __func__); res = -EBUSY; goto err; } pdata->mc_vbase = devm_ioremap(&op->dev, r.start, r.end - r.start + 1); if (!pdata->mc_vbase) { printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__); res = -ENOMEM; goto err; } sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG); if (!(sdram_ctl & DSC_ECC_EN)) { /* no ECC */ printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__); res = -ENODEV; goto err; } debugf3("%s(): init mci\n", __func__); mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 | MEM_FLAG_DDR | MEM_FLAG_DDR2; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; mci->edac_cap = EDAC_FLAG_SECDED; mci->mod_name = EDAC_MOD_STR; mci->mod_ver = MPC85XX_REVISION; if (edac_op_state == EDAC_OPSTATE_POLL) mci->edac_check = mpc85xx_mc_check; mci->ctl_page_to_phys = NULL; mci->scrub_mode = SCRUB_SW_SRC; mpc85xx_set_mc_sysfs_attributes(mci); mpc85xx_init_csrows(mci); /* store the original error disable bits */ orig_ddr_err_disable = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE); out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE, 0); /* clear all error bits */ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0); if (edac_mc_add_mc(mci)) { debugf3("%s(): failed edac_mc_add_mc()\n", __func__); goto err; } if (edac_op_state == EDAC_OPSTATE_INT) { out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, DDR_EIE_MBEE | DDR_EIE_SBEE); /* store the original error management threshold */ orig_ddr_err_sbe = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE) & 0xff0000; /* set threshold to 1 error per interrupt */ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, 0x10000); /* register interrupts */ pdata->irq = irq_of_parse_and_map(op->node, 0); res = devm_request_irq(&op->dev, pdata->irq, mpc85xx_mc_isr, IRQF_DISABLED | IRQF_SHARED, "[EDAC] MC err", mci); if (res < 0) { printk(KERN_ERR "%s: Unable to request irq %d for " "MPC85xx DRAM ERR\n", __func__, pdata->irq); irq_dispose_mapping(pdata->irq); res = -ENODEV; goto err2; } printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC\n", pdata->irq); } devres_remove_group(&op->dev, mpc85xx_mc_err_probe); debugf3("%s(): success\n", __func__); printk(KERN_INFO EDAC_MOD_STR " MC err registered\n"); return 0; err2: edac_mc_del_mc(&op->dev); err: devres_release_group(&op->dev, mpc85xx_mc_err_probe); edac_mc_free(mci); return res; } static int mpc85xx_mc_err_remove(struct of_device *op) { struct mem_ctl_info *mci = dev_get_drvdata(&op->dev); struct mpc85xx_mc_pdata *pdata = mci->pvt_info; debugf0("%s()\n", __func__); if (edac_op_state == EDAC_OPSTATE_INT) { out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0); irq_dispose_mapping(pdata->irq); } out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE, orig_ddr_err_disable); out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, orig_ddr_err_sbe); edac_mc_del_mc(&op->dev); edac_mc_free(mci); return 0; } static struct of_device_id mpc85xx_mc_err_of_match[] = { /* deprecate the fsl,85.. forms in the future, 2.6.30? */ { .compatible = "fsl,8540-memory-controller", }, { .compatible = "fsl,8541-memory-controller", }, { .compatible = "fsl,8544-memory-controller", }, { .compatible = "fsl,8548-memory-controller", }, { .compatible = "fsl,8555-memory-controller", }, { .compatible = "fsl,8568-memory-controller", }, { .compatible = "fsl,mpc8536-memory-controller", }, { .compatible = "fsl,mpc8540-memory-controller", }, { .compatible = "fsl,mpc8541-memory-controller", }, { .compatible = "fsl,mpc8544-memory-controller", }, { .compatible = "fsl,mpc8548-memory-controller", }, { .compatible = "fsl,mpc8555-memory-controller", }, { .compatible = "fsl,mpc8560-memory-controller", }, { .compatible = "fsl,mpc8568-memory-controller", }, { .compatible = "fsl,mpc8572-memory-controller", }, { .compatible = "fsl,mpc8349-memory-controller", }, { .compatible = "fsl,p2020-memory-controller", }, {}, }; static struct of_platform_driver mpc85xx_mc_err_driver = { .owner = THIS_MODULE, .name = "mpc85xx_mc_err", .match_table = mpc85xx_mc_err_of_match, .probe = mpc85xx_mc_err_probe, .remove = mpc85xx_mc_err_remove, .driver = { .name = "mpc85xx_mc_err", .owner = THIS_MODULE, }, }; #ifdef CONFIG_MPC85xx static void __init mpc85xx_mc_clear_rfxe(void *data) { orig_hid1[smp_processor_id()] = mfspr(SPRN_HID1); mtspr(SPRN_HID1, (orig_hid1[smp_processor_id()] & ~0x20000)); } #endif static int __init mpc85xx_mc_init(void) { int res = 0; printk(KERN_INFO "Freescale(R) MPC85xx EDAC driver, " "(C) 2006 Montavista Software\n"); /* make sure error reporting method is sane */ switch (edac_op_state) { case EDAC_OPSTATE_POLL: case EDAC_OPSTATE_INT: break; default: edac_op_state = EDAC_OPSTATE_INT; break; } res = of_register_platform_driver(&mpc85xx_mc_err_driver); if (res) printk(KERN_WARNING EDAC_MOD_STR "MC fails to register\n"); res = of_register_platform_driver(&mpc85xx_l2_err_driver); if (res) printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n"); #ifdef CONFIG_PCI res = of_register_platform_driver(&mpc85xx_pci_err_driver); if (res) printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n"); #endif #ifdef CONFIG_MPC85xx /* * need to clear HID1[RFXE] to disable machine check int * so we can catch it */ if (edac_op_state == EDAC_OPSTATE_INT) on_each_cpu(mpc85xx_mc_clear_rfxe, NULL, 0); #endif return 0; } module_init(mpc85xx_mc_init); #ifdef CONFIG_MPC85xx static void __exit mpc85xx_mc_restore_hid1(void *data) { mtspr(SPRN_HID1, orig_hid1[smp_processor_id()]); } #endif static void __exit mpc85xx_mc_exit(void) { #ifdef CONFIG_MPC85xx on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0); #endif #ifdef CONFIG_PCI of_unregister_platform_driver(&mpc85xx_pci_err_driver); #endif of_unregister_platform_driver(&mpc85xx_l2_err_driver); of_unregister_platform_driver(&mpc85xx_mc_err_driver); } module_exit(mpc85xx_mc_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Montavista Software, Inc."); module_param(edac_op_state, int, 0444); MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
gpl-2.0
BPI-SINOVOIP/BPI-Mainline-kernel
linux-4.19/arch/mips/pci/pci-lasat.c
298
2141
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2000, 2001, 04 Keith M Wesolowski */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/types.h> #include <asm/lasat/lasat.h> #include <irq.h> extern struct pci_ops nile4_pci_ops; extern struct pci_ops gt64xxx_pci0_ops; static struct resource lasat_pci_mem_resource = { .name = "LASAT PCI MEM", .start = 0x18000000, .end = 0x19ffffff, .flags = IORESOURCE_MEM, }; static struct resource lasat_pci_io_resource = { .name = "LASAT PCI IO", .start = 0x1a000000, .end = 0x1bffffff, .flags = IORESOURCE_IO, }; static struct pci_controller lasat_pci_controller = { .mem_resource = &lasat_pci_mem_resource, .io_resource = &lasat_pci_io_resource, }; static int __init lasat_pci_setup(void) { printk(KERN_DEBUG "PCI: starting\n"); if (IS_LASAT_200()) lasat_pci_controller.pci_ops = &nile4_pci_ops; else lasat_pci_controller.pci_ops = &gt64xxx_pci0_ops; register_pci_controller(&lasat_pci_controller); return 0; } arch_initcall(lasat_pci_setup); #define LASAT_IRQ_ETH1 (LASAT_IRQ_BASE + 0) #define LASAT_IRQ_ETH0 (LASAT_IRQ_BASE + 1) #define LASAT_IRQ_HDC (LASAT_IRQ_BASE + 2) #define LASAT_IRQ_COMP (LASAT_IRQ_BASE + 3) #define LASAT_IRQ_HDLC (LASAT_IRQ_BASE + 4) #define LASAT_IRQ_PCIA (LASAT_IRQ_BASE + 5) #define LASAT_IRQ_PCIB (LASAT_IRQ_BASE + 6) #define LASAT_IRQ_PCIC (LASAT_IRQ_BASE + 7) #define LASAT_IRQ_PCID (LASAT_IRQ_BASE + 8) int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { switch (slot) { case 1: case 2: case 3: return LASAT_IRQ_PCIA + (((slot-1) + (pin-1)) % 4); case 4: return LASAT_IRQ_ETH1; /* Ethernet 1 (LAN 2) */ case 5: return LASAT_IRQ_ETH0; /* Ethernet 0 (LAN 1) */ case 6: return LASAT_IRQ_HDC; /* IDE controller */ default: return 0xff; /* Illegal */ } return -1; } /* Do platform specific device initialization at pci_enable_device() time */ int pcibios_plat_dev_init(struct pci_dev *dev) { return 0; }
gpl-2.0
Elite-Kernels/HTC-10
kernel/pid.c
298
15238
/* * Generic pidhash and scalable, time-bounded PID allocator * * (C) 2002-2003 Nadia Yvette Chambers, IBM * (C) 2004 Nadia Yvette Chambers, Oracle * (C) 2002-2004 Ingo Molnar, Red Hat * * pid-structures are backing objects for tasks sharing a given ID to chain * against. There is very little to them aside from hashing them and * parking tasks using given ID's on a list. * * The hash is always changed with the tasklist_lock write-acquired, * and the hash is only accessed with the tasklist_lock at least * read-acquired, so there's no additional SMP locking needed here. * * We have a list of bitmap pages, which bitmaps represent the PID space. * Allocating and freeing PIDs is completely lockless. The worst-case * allocation scenario when all but one out of 1 million PIDs possible are * allocated already: the scanning of 32 list entries and at most PAGE_SIZE * bytes. The typical fastpath is a single successful setbit. Freeing is O(1). * * Pid namespaces: * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM * Many thanks to Oleg Nesterov for comments and help * */ #include <linux/mm.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/rculist.h> #include <linux/bootmem.h> #include <linux/hash.h> #include <linux/pid_namespace.h> #include <linux/init_task.h> #include <linux/syscalls.h> #include <linux/proc_ns.h> #include <linux/proc_fs.h> #define pid_hashfn(nr, ns) \ hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) static struct hlist_head *pid_hash; static unsigned int pidhash_shift = 4; struct pid init_struct_pid = INIT_STRUCT_PID; int pid_max = PID_MAX_DEFAULT; #define RESERVED_PIDS 300 int pid_max_min = RESERVED_PIDS + 1; int pid_max_max = PID_MAX_LIMIT; static inline int mk_pid(struct pid_namespace *pid_ns, struct pidmap *map, int off) { return (map - pid_ns->pidmap)*BITS_PER_PAGE + off; } #define find_next_offset(map, off) \ find_next_zero_bit((map)->page, BITS_PER_PAGE, off) /* * PID-map pages start out as NULL, they get allocated upon * first use and are never deallocated. This way a low pid_max * value does not cause lots of bitmaps to be allocated, but * the scheme scales to up to 4 million PIDs, runtime. */ struct pid_namespace init_pid_ns = { .kref = { .refcount = ATOMIC_INIT(2), }, .pidmap = { [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } }, .last_pid = 0, .nr_hashed = PIDNS_HASH_ADDING, .level = 0, .child_reaper = &init_task, .user_ns = &init_user_ns, .proc_inum = PROC_PID_INIT_INO, }; EXPORT_SYMBOL_GPL(init_pid_ns); /* * Note: disable interrupts while the pidmap_lock is held as an * interrupt might come in and do read_lock(&tasklist_lock). * * If we don't disable interrupts there is a nasty deadlock between * detach_pid()->free_pid() and another cpu that does * spin_lock(&pidmap_lock) followed by an interrupt routine that does * read_lock(&tasklist_lock); * * After we clean up the tasklist_lock and know there are no * irq handlers that take it we can leave the interrupts enabled. * For now it is easier to be safe than to prove it can't happen. */ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); static void free_pidmap(struct upid *upid) { int nr = upid->nr; struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE; int offset = nr & BITS_PER_PAGE_MASK; clear_bit(offset, map->page); atomic_inc(&map->nr_free); } /* * If we started walking pids at 'base', is 'a' seen before 'b'? */ static int pid_before(int base, int a, int b) { /* * This is the same as saying * * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT * and that mapping orders 'a' and 'b' with respect to 'base'. */ return (unsigned)(a - base) < (unsigned)(b - base); } /* * We might be racing with someone else trying to set pid_ns->last_pid * at the pid allocation time (there's also a sysctl for this, but racing * with this one is OK, see comment in kernel/pid_namespace.c about it). * We want the winner to have the "later" value, because if the * "earlier" value prevails, then a pid may get reused immediately. * * Since pids rollover, it is not sufficient to just pick the bigger * value. We have to consider where we started counting from. * * 'base' is the value of pid_ns->last_pid that we observed when * we started looking for a pid. * * 'pid' is the pid that we eventually found. */ static void set_last_pid(struct pid_namespace *pid_ns, int base, int pid) { int prev; int last_write = base; do { prev = last_write; last_write = cmpxchg(&pid_ns->last_pid, prev, pid); } while ((prev != last_write) && (pid_before(base, last_write, pid))); } static int alloc_pidmap(struct pid_namespace *pid_ns) { int i, offset, max_scan, pid, last = pid_ns->last_pid; struct pidmap *map; pid = last + 1; if (pid >= pid_max) pid = RESERVED_PIDS; offset = pid & BITS_PER_PAGE_MASK; map = &pid_ns->pidmap[pid/BITS_PER_PAGE]; /* * If last_pid points into the middle of the map->page we * want to scan this bitmap block twice, the second time * we start with offset == 0 (or RESERVED_PIDS). */ max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset; for (i = 0; i <= max_scan; ++i) { if (unlikely(!map->page)) { void *page = kzalloc(PAGE_SIZE, GFP_KERNEL); /* * Free the page if someone raced with us * installing it: */ spin_lock_irq(&pidmap_lock); if (!map->page) { map->page = page; page = NULL; } spin_unlock_irq(&pidmap_lock); kfree(page); if (unlikely(!map->page)) break; } if (likely(atomic_read(&map->nr_free))) { for ( ; ; ) { if (!test_and_set_bit(offset, map->page)) { atomic_dec(&map->nr_free); set_last_pid(pid_ns, last, pid); return pid; } offset = find_next_offset(map, offset); if (offset >= BITS_PER_PAGE) break; pid = mk_pid(pid_ns, map, offset); if (pid >= pid_max) break; } } if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) { ++map; offset = 0; } else { map = &pid_ns->pidmap[0]; offset = RESERVED_PIDS; if (unlikely(last == offset)) break; } pid = mk_pid(pid_ns, map, offset); } return -1; } int next_pidmap(struct pid_namespace *pid_ns, unsigned int last) { int offset; struct pidmap *map, *end; if (last >= PID_MAX_LIMIT) return -1; offset = (last + 1) & BITS_PER_PAGE_MASK; map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE]; end = &pid_ns->pidmap[PIDMAP_ENTRIES]; for (; map < end; map++, offset = 0) { if (unlikely(!map->page)) continue; offset = find_next_bit((map)->page, BITS_PER_PAGE, offset); if (offset < BITS_PER_PAGE) return mk_pid(pid_ns, map, offset); } return -1; } void put_pid(struct pid *pid) { struct pid_namespace *ns; if (!pid) return; ns = pid->numbers[pid->level].ns; if ((atomic_read(&pid->count) == 1) || atomic_dec_and_test(&pid->count)) { kmem_cache_free(ns->pid_cachep, pid); put_pid_ns(ns); } } EXPORT_SYMBOL_GPL(put_pid); static void delayed_put_pid(struct rcu_head *rhp) { struct pid *pid = container_of(rhp, struct pid, rcu); put_pid(pid); } void free_pid(struct pid *pid) { /* We can be called with write_lock_irq(&tasklist_lock) held */ int i; unsigned long flags; spin_lock_irqsave(&pidmap_lock, flags); for (i = 0; i <= pid->level; i++) { struct upid *upid = pid->numbers + i; struct pid_namespace *ns = upid->ns; hlist_del_rcu(&upid->pid_chain); switch(--ns->nr_hashed) { case 2: case 1: /* When all that is left in the pid namespace * is the reaper wake up the reaper. The reaper * may be sleeping in zap_pid_ns_processes(). */ wake_up_process(ns->child_reaper); break; case PIDNS_HASH_ADDING: /* Handle a fork failure of the first process */ WARN_ON(ns->child_reaper); ns->nr_hashed = 0; /* fall through */ case 0: schedule_work(&ns->proc_work); break; } } spin_unlock_irqrestore(&pidmap_lock, flags); for (i = 0; i <= pid->level; i++) free_pidmap(pid->numbers + i); call_rcu(&pid->rcu, delayed_put_pid); } struct pid *alloc_pid(struct pid_namespace *ns) { struct pid *pid; enum pid_type type; int i, nr; struct pid_namespace *tmp; struct upid *upid; pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); if (!pid) goto out; tmp = ns; pid->level = ns->level; for (i = ns->level; i >= 0; i--) { nr = alloc_pidmap(tmp); if (nr < 0) goto out_free; pid->numbers[i].nr = nr; pid->numbers[i].ns = tmp; tmp = tmp->parent; } if (unlikely(is_child_reaper(pid))) { if (pid_ns_prepare_proc(ns)) goto out_free; } get_pid_ns(ns); atomic_set(&pid->count, 1); for (type = 0; type < PIDTYPE_MAX; ++type) INIT_HLIST_HEAD(&pid->tasks[type]); upid = pid->numbers + ns->level; spin_lock_irq(&pidmap_lock); if (!(ns->nr_hashed & PIDNS_HASH_ADDING)) goto out_unlock; for ( ; upid >= pid->numbers; --upid) { hlist_add_head_rcu(&upid->pid_chain, &pid_hash[pid_hashfn(upid->nr, upid->ns)]); upid->ns->nr_hashed++; } spin_unlock_irq(&pidmap_lock); out: return pid; out_unlock: spin_unlock_irq(&pidmap_lock); put_pid_ns(ns); out_free: while (++i <= ns->level) free_pidmap(pid->numbers + i); kmem_cache_free(ns->pid_cachep, pid); pid = NULL; goto out; } void disable_pid_allocation(struct pid_namespace *ns) { spin_lock_irq(&pidmap_lock); ns->nr_hashed &= ~PIDNS_HASH_ADDING; spin_unlock_irq(&pidmap_lock); } struct pid *find_pid_ns(int nr, struct pid_namespace *ns) { struct upid *pnr; hlist_for_each_entry_rcu(pnr, &pid_hash[pid_hashfn(nr, ns)], pid_chain) if (pnr->nr == nr && pnr->ns == ns) return container_of(pnr, struct pid, numbers[ns->level]); return NULL; } EXPORT_SYMBOL_GPL(find_pid_ns); struct pid *find_vpid(int nr) { return find_pid_ns(nr, task_active_pid_ns(current)); } EXPORT_SYMBOL_GPL(find_vpid); /* * attach_pid() must be called with the tasklist_lock write-held. */ void attach_pid(struct task_struct *task, enum pid_type type) { struct pid_link *link = &task->pids[type]; hlist_add_head_rcu(&link->node, &link->pid->tasks[type]); } static void __change_pid(struct task_struct *task, enum pid_type type, struct pid *new) { struct pid_link *link; struct pid *pid; int tmp; link = &task->pids[type]; pid = link->pid; hlist_del_rcu(&link->node); link->pid = new; for (tmp = PIDTYPE_MAX; --tmp >= 0; ) if (!hlist_empty(&pid->tasks[tmp])) return; free_pid(pid); } void detach_pid(struct task_struct *task, enum pid_type type) { __change_pid(task, type, NULL); } void change_pid(struct task_struct *task, enum pid_type type, struct pid *pid) { __change_pid(task, type, pid); attach_pid(task, type); } /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ void transfer_pid(struct task_struct *old, struct task_struct *new, enum pid_type type) { new->pids[type].pid = old->pids[type].pid; hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node); } struct task_struct *pid_task(struct pid *pid, enum pid_type type) { struct task_struct *result = NULL; if (pid) { struct hlist_node *first; first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]), lockdep_tasklist_lock_is_held()); if (first) result = hlist_entry(first, struct task_struct, pids[(type)].node); } return result; } EXPORT_SYMBOL(pid_task); /* * Must be called under rcu_read_lock(). */ struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) { rcu_lockdep_assert(rcu_read_lock_held(), "find_task_by_pid_ns() needs rcu_read_lock()" " protection"); return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); } struct task_struct *find_task_by_vpid(pid_t vnr) { return find_task_by_pid_ns(vnr, task_active_pid_ns(current)); } struct pid *get_task_pid(struct task_struct *task, enum pid_type type) { struct pid *pid; rcu_read_lock(); if (type != PIDTYPE_PID) task = task->group_leader; pid = get_pid(task->pids[type].pid); rcu_read_unlock(); return pid; } EXPORT_SYMBOL_GPL(get_task_pid); struct task_struct *get_pid_task(struct pid *pid, enum pid_type type) { struct task_struct *result; rcu_read_lock(); result = pid_task(pid, type); if (result) get_task_struct(result); rcu_read_unlock(); return result; } EXPORT_SYMBOL_GPL(get_pid_task); struct pid *find_get_pid(pid_t nr) { struct pid *pid; rcu_read_lock(); pid = get_pid(find_vpid(nr)); rcu_read_unlock(); return pid; } EXPORT_SYMBOL_GPL(find_get_pid); pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) { struct upid *upid; pid_t nr = 0; if (pid && ns->level <= pid->level) { upid = &pid->numbers[ns->level]; if (upid->ns == ns) nr = upid->nr; } return nr; } EXPORT_SYMBOL_GPL(pid_nr_ns); pid_t pid_vnr(struct pid *pid) { return pid_nr_ns(pid, task_active_pid_ns(current)); } EXPORT_SYMBOL_GPL(pid_vnr); pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns) { pid_t nr = 0; rcu_read_lock(); if (!ns) ns = task_active_pid_ns(current); if (likely(pid_alive(task))) { if (type != PIDTYPE_PID) task = task->group_leader; nr = pid_nr_ns(task->pids[type].pid, ns); } rcu_read_unlock(); return nr; } EXPORT_SYMBOL(__task_pid_nr_ns); pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) { return pid_nr_ns(task_tgid(tsk), ns); } EXPORT_SYMBOL(task_tgid_nr_ns); struct pid_namespace *task_active_pid_ns(struct task_struct *tsk) { return ns_of_pid(task_pid(tsk)); } EXPORT_SYMBOL_GPL(task_active_pid_ns); /* * Used by proc to find the first pid that is greater than or equal to nr. * * If there is a pid at nr this function is exactly the same as find_pid_ns. */ struct pid *find_ge_pid(int nr, struct pid_namespace *ns) { struct pid *pid; do { pid = find_pid_ns(nr, ns); if (pid) break; nr = next_pidmap(ns, nr); } while (nr > 0); return pid; } /* * The pid hash table is scaled according to the amount of memory in the * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or * more. */ void __init pidhash_init(void) { unsigned int i, pidhash_size; pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18, HASH_EARLY | HASH_SMALL, &pidhash_shift, NULL, 0, 4096); pidhash_size = 1U << pidhash_shift; for (i = 0; i < pidhash_size; i++) INIT_HLIST_HEAD(&pid_hash[i]); } void __init pidmap_init(void) { /* Veryify no one has done anything silly */ BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING); /* bump default and minimum pid_max based on number of cpus */ pid_max = min(pid_max_max, max_t(int, pid_max, PIDS_PER_CPU_DEFAULT * num_possible_cpus())); pid_max_min = max_t(int, pid_max_min, PIDS_PER_CPU_MIN * num_possible_cpus()); pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min); init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); /* Reserve PID 0. We never call free_pidmap(0) */ set_bit(0, init_pid_ns.pidmap[0].page); atomic_dec(&init_pid_ns.pidmap[0].nr_free); init_pid_ns.pid_cachep = KMEM_CACHE(pid, SLAB_HWCACHE_ALIGN | SLAB_PANIC); }
gpl-2.0
LuckJava/KVMGT-kernel
drivers/staging/lustre/lustre/ptlrpc/gss/gss_bulk.c
298
12850
/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 only, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. * * GPL HEADER END */ /* * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. */ /* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. * * lustre/ptlrpc/gss/gss_bulk.c * * Author: Eric Mei <eric.mei@sun.com> */ #define DEBUG_SUBSYSTEM S_SEC #include <linux/module.h> #include <linux/slab.h> #include <linux/dcache.h> #include <linux/fs.h> #include <linux/mutex.h> #include <linux/crypto.h> #include <obd.h> #include <obd_class.h> #include <obd_support.h> #include <lustre/lustre_idl.h> #include <lustre_net.h> #include <lustre_import.h> #include <lustre_sec.h> #include "gss_err.h" #include "gss_internal.h" #include "gss_api.h" int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc) { struct gss_cli_ctx *gctx; struct lustre_msg *msg; struct ptlrpc_bulk_sec_desc *bsd; rawobj_t token; __u32 maj; int offset; int rc; LASSERT(req->rq_pack_bulk); LASSERT(req->rq_bulk_read || req->rq_bulk_write); gctx = container_of(ctx, struct gss_cli_ctx, gc_base); LASSERT(gctx->gc_mechctx); switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) { case SPTLRPC_SVC_NULL: LASSERT(req->rq_reqbuf->lm_bufcount >= 3); msg = req->rq_reqbuf; offset = msg->lm_bufcount - 1; break; case SPTLRPC_SVC_AUTH: case SPTLRPC_SVC_INTG: LASSERT(req->rq_reqbuf->lm_bufcount >= 4); msg = req->rq_reqbuf; offset = msg->lm_bufcount - 2; break; case SPTLRPC_SVC_PRIV: LASSERT(req->rq_clrbuf->lm_bufcount >= 2); msg = req->rq_clrbuf; offset = msg->lm_bufcount - 1; break; default: LBUG(); } bsd = lustre_msg_buf(msg, offset, sizeof(*bsd)); bsd->bsd_version = 0; bsd->bsd_flags = 0; bsd->bsd_type = SPTLRPC_BULK_DEFAULT; bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc); if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL) return 0; LASSERT(bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG || bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV); if (req->rq_bulk_read) { /* * bulk read: prepare receiving pages only for privacy mode. */ if (bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV) return gss_cli_prep_bulk(req, desc); } else { /* * bulk write: sign or encrypt bulk pages. */ bsd->bsd_nob = desc->bd_nob; if (bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG) { /* integrity mode */ token.data = bsd->bsd_data; token.len = lustre_msg_buflen(msg, offset) - sizeof(*bsd); maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL, desc->bd_iov_count, desc->bd_iov, &token); if (maj != GSS_S_COMPLETE) { CWARN("failed to sign bulk data: %x\n", maj); return -EACCES; } } else { /* privacy mode */ if (desc->bd_iov_count == 0) return 0; rc = sptlrpc_enc_pool_get_pages(desc); if (rc) { CERROR("bulk write: failed to allocate " "encryption pages: %d\n", rc); return rc; } token.data = bsd->bsd_data; token.len = lustre_msg_buflen(msg, offset) - sizeof(*bsd); maj = lgss_wrap_bulk(gctx->gc_mechctx, desc, &token, 0); if (maj != GSS_S_COMPLETE) { CWARN("fail to encrypt bulk data: %x\n", maj); return -EACCES; } } } return 0; } int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc) { struct gss_cli_ctx *gctx; struct lustre_msg *rmsg, *vmsg; struct ptlrpc_bulk_sec_desc *bsdr, *bsdv; rawobj_t token; __u32 maj; int roff, voff; LASSERT(req->rq_pack_bulk); LASSERT(req->rq_bulk_read || req->rq_bulk_write); switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) { case SPTLRPC_SVC_NULL: vmsg = req->rq_repdata; voff = vmsg->lm_bufcount - 1; LASSERT(vmsg && vmsg->lm_bufcount >= 3); rmsg = req->rq_reqbuf; roff = rmsg->lm_bufcount - 1; /* last segment */ LASSERT(rmsg && rmsg->lm_bufcount >= 3); break; case SPTLRPC_SVC_AUTH: case SPTLRPC_SVC_INTG: vmsg = req->rq_repdata; voff = vmsg->lm_bufcount - 2; LASSERT(vmsg && vmsg->lm_bufcount >= 4); rmsg = req->rq_reqbuf; roff = rmsg->lm_bufcount - 2; /* second last segment */ LASSERT(rmsg && rmsg->lm_bufcount >= 4); break; case SPTLRPC_SVC_PRIV: vmsg = req->rq_repdata; voff = vmsg->lm_bufcount - 1; LASSERT(vmsg && vmsg->lm_bufcount >= 2); rmsg = req->rq_clrbuf; roff = rmsg->lm_bufcount - 1; /* last segment */ LASSERT(rmsg && rmsg->lm_bufcount >= 2); break; default: LBUG(); } bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr)); bsdv = lustre_msg_buf(vmsg, voff, sizeof(*bsdv)); LASSERT(bsdr && bsdv); if (bsdr->bsd_version != bsdv->bsd_version || bsdr->bsd_type != bsdv->bsd_type || bsdr->bsd_svc != bsdv->bsd_svc) { CERROR("bulk security descriptor mismatch: " "(%u,%u,%u) != (%u,%u,%u)\n", bsdr->bsd_version, bsdr->bsd_type, bsdr->bsd_svc, bsdv->bsd_version, bsdv->bsd_type, bsdv->bsd_svc); return -EPROTO; } LASSERT(bsdv->bsd_svc == SPTLRPC_BULK_SVC_NULL || bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG || bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV); /* * in privacy mode if return success, make sure bd_nob_transferred * is the actual size of the clear text, otherwise upper layer * may be surprised. */ if (req->rq_bulk_write) { if (bsdv->bsd_flags & BSD_FL_ERR) { CERROR("server reported bulk i/o failure\n"); return -EIO; } if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV) desc->bd_nob_transferred = desc->bd_nob; } else { /* * bulk read, upon return success, bd_nob_transferred is * the size of plain text actually received. */ gctx = container_of(ctx, struct gss_cli_ctx, gc_base); LASSERT(gctx->gc_mechctx); if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG) { int i, nob; /* fix the actual data size */ for (i = 0, nob = 0; i < desc->bd_iov_count; i++) { if (desc->bd_iov[i].kiov_len + nob > desc->bd_nob_transferred) { desc->bd_iov[i].kiov_len = desc->bd_nob_transferred - nob; } nob += desc->bd_iov[i].kiov_len; } token.data = bsdv->bsd_data; token.len = lustre_msg_buflen(vmsg, voff) - sizeof(*bsdv); maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL, desc->bd_iov_count, desc->bd_iov, &token); if (maj != GSS_S_COMPLETE) { CERROR("failed to verify bulk read: %x\n", maj); return -EACCES; } } else if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV) { desc->bd_nob = bsdv->bsd_nob; if (desc->bd_nob == 0) return 0; token.data = bsdv->bsd_data; token.len = lustre_msg_buflen(vmsg, voff) - sizeof(*bsdr); maj = lgss_unwrap_bulk(gctx->gc_mechctx, desc, &token, 1); if (maj != GSS_S_COMPLETE) { CERROR("failed to decrypt bulk read: %x\n", maj); return -EACCES; } desc->bd_nob_transferred = desc->bd_nob; } } return 0; } static int gss_prep_bulk(struct ptlrpc_bulk_desc *desc, struct gss_ctx *mechctx) { int rc; if (desc->bd_iov_count == 0) return 0; rc = sptlrpc_enc_pool_get_pages(desc); if (rc) return rc; if (lgss_prep_bulk(mechctx, desc) != GSS_S_COMPLETE) return -EACCES; return 0; } int gss_cli_prep_bulk(struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc) { int rc; LASSERT(req->rq_cli_ctx); LASSERT(req->rq_pack_bulk); LASSERT(req->rq_bulk_read); if (SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_BULK_SVC_PRIV) return 0; rc = gss_prep_bulk(desc, ctx2gctx(req->rq_cli_ctx)->gc_mechctx); if (rc) CERROR("bulk read: failed to prepare encryption " "pages: %d\n", rc); return rc; } int gss_svc_prep_bulk(struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc) { struct gss_svc_reqctx *grctx; struct ptlrpc_bulk_sec_desc *bsd; int rc; LASSERT(req->rq_svc_ctx); LASSERT(req->rq_pack_bulk); LASSERT(req->rq_bulk_write); grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx); LASSERT(grctx->src_reqbsd); LASSERT(grctx->src_repbsd); LASSERT(grctx->src_ctx); LASSERT(grctx->src_ctx->gsc_mechctx); bsd = grctx->src_reqbsd; if (bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV) return 0; rc = gss_prep_bulk(desc, grctx->src_ctx->gsc_mechctx); if (rc) CERROR("bulk write: failed to prepare encryption " "pages: %d\n", rc); return rc; } int gss_svc_unwrap_bulk(struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc) { struct gss_svc_reqctx *grctx; struct ptlrpc_bulk_sec_desc *bsdr, *bsdv; rawobj_t token; __u32 maj; LASSERT(req->rq_svc_ctx); LASSERT(req->rq_pack_bulk); LASSERT(req->rq_bulk_write); grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx); LASSERT(grctx->src_reqbsd); LASSERT(grctx->src_repbsd); LASSERT(grctx->src_ctx); LASSERT(grctx->src_ctx->gsc_mechctx); bsdr = grctx->src_reqbsd; bsdv = grctx->src_repbsd; /* bsdr has been sanity checked during unpacking */ bsdv->bsd_version = 0; bsdv->bsd_type = SPTLRPC_BULK_DEFAULT; bsdv->bsd_svc = bsdr->bsd_svc; bsdv->bsd_flags = 0; switch (bsdv->bsd_svc) { case SPTLRPC_BULK_SVC_INTG: token.data = bsdr->bsd_data; token.len = grctx->src_reqbsd_size - sizeof(*bsdr); maj = lgss_verify_mic(grctx->src_ctx->gsc_mechctx, 0, NULL, desc->bd_iov_count, desc->bd_iov, &token); if (maj != GSS_S_COMPLETE) { bsdv->bsd_flags |= BSD_FL_ERR; CERROR("failed to verify bulk signature: %x\n", maj); return -EACCES; } break; case SPTLRPC_BULK_SVC_PRIV: if (bsdr->bsd_nob != desc->bd_nob) { bsdv->bsd_flags |= BSD_FL_ERR; CERROR("prepared nob %d doesn't match the actual " "nob %d\n", desc->bd_nob, bsdr->bsd_nob); return -EPROTO; } if (desc->bd_iov_count == 0) { LASSERT(desc->bd_nob == 0); break; } token.data = bsdr->bsd_data; token.len = grctx->src_reqbsd_size - sizeof(*bsdr); maj = lgss_unwrap_bulk(grctx->src_ctx->gsc_mechctx, desc, &token, 0); if (maj != GSS_S_COMPLETE) { bsdv->bsd_flags |= BSD_FL_ERR; CERROR("failed decrypt bulk data: %x\n", maj); return -EACCES; } break; } return 0; } int gss_svc_wrap_bulk(struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc) { struct gss_svc_reqctx *grctx; struct ptlrpc_bulk_sec_desc *bsdr, *bsdv; rawobj_t token; __u32 maj; int rc; LASSERT(req->rq_svc_ctx); LASSERT(req->rq_pack_bulk); LASSERT(req->rq_bulk_read); grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx); LASSERT(grctx->src_reqbsd); LASSERT(grctx->src_repbsd); LASSERT(grctx->src_ctx); LASSERT(grctx->src_ctx->gsc_mechctx); bsdr = grctx->src_reqbsd; bsdv = grctx->src_repbsd; /* bsdr has been sanity checked during unpacking */ bsdv->bsd_version = 0; bsdv->bsd_type = SPTLRPC_BULK_DEFAULT; bsdv->bsd_svc = bsdr->bsd_svc; bsdv->bsd_flags = 0; switch (bsdv->bsd_svc) { case SPTLRPC_BULK_SVC_INTG: token.data = bsdv->bsd_data; token.len = grctx->src_repbsd_size - sizeof(*bsdv); maj = lgss_get_mic(grctx->src_ctx->gsc_mechctx, 0, NULL, desc->bd_iov_count, desc->bd_iov, &token); if (maj != GSS_S_COMPLETE) { bsdv->bsd_flags |= BSD_FL_ERR; CERROR("failed to sign bulk data: %x\n", maj); return -EACCES; } break; case SPTLRPC_BULK_SVC_PRIV: bsdv->bsd_nob = desc->bd_nob; if (desc->bd_iov_count == 0) { LASSERT(desc->bd_nob == 0); break; } rc = sptlrpc_enc_pool_get_pages(desc); if (rc) { bsdv->bsd_flags |= BSD_FL_ERR; CERROR("bulk read: failed to allocate encryption " "pages: %d\n", rc); return rc; } token.data = bsdv->bsd_data; token.len = grctx->src_repbsd_size - sizeof(*bsdv); maj = lgss_wrap_bulk(grctx->src_ctx->gsc_mechctx, desc, &token, 1); if (maj != GSS_S_COMPLETE) { bsdv->bsd_flags |= BSD_FL_ERR; CERROR("failed to encrypt bulk data: %x\n", maj); return -EACCES; } break; } return 0; }
gpl-2.0
gohai/linux-vc4
arch/mips/cavium-octeon/smp.c
298
9470
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks */ #include <linux/cpu.h> #include <linux/delay.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/sched.h> #include <linux/module.h> #include <asm/mmu_context.h> #include <asm/time.h> #include <asm/setup.h> #include <asm/octeon/octeon.h> #include "octeon_boot.h" volatile unsigned long octeon_processor_boot = 0xff; volatile unsigned long octeon_processor_sp; volatile unsigned long octeon_processor_gp; #ifdef CONFIG_HOTPLUG_CPU uint64_t octeon_bootloader_entry_addr; EXPORT_SYMBOL(octeon_bootloader_entry_addr); #endif static irqreturn_t mailbox_interrupt(int irq, void *dev_id) { const int coreid = cvmx_get_core_num(); uint64_t action; /* Load the mailbox register to figure out what we're supposed to do */ action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)) & 0xffff; /* Clear the mailbox to clear the interrupt */ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); if (action & SMP_CALL_FUNCTION) smp_call_function_interrupt(); if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); /* Check if we've been told to flush the icache */ if (action & SMP_ICACHE_FLUSH) asm volatile ("synci 0($0)\n"); return IRQ_HANDLED; } /** * Cause the function described by call_data to be executed on the passed * cpu. When the function has finished, increment the finished field of * call_data. */ void octeon_send_ipi_single(int cpu, unsigned int action) { int coreid = cpu_logical_map(cpu); /* pr_info("SMP: Mailbox send cpu=%d, coreid=%d, action=%u\n", cpu, coreid, action); */ cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action); } static inline void octeon_send_ipi_mask(const struct cpumask *mask, unsigned int action) { unsigned int i; for_each_cpu(i, mask) octeon_send_ipi_single(i, action); } /** * Detect available CPUs, populate cpu_possible_mask */ static void octeon_smp_hotplug_setup(void) { #ifdef CONFIG_HOTPLUG_CPU struct linux_app_boot_info *labi; if (!setup_max_cpus) return; labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); if (labi->labi_signature != LABI_SIGNATURE) { pr_info("The bootloader on this board does not support HOTPLUG_CPU."); return; } octeon_bootloader_entry_addr = labi->InitTLBStart_addr; #endif } static void octeon_smp_setup(void) { const int coreid = cvmx_get_core_num(); int cpus; int id; int core_mask = octeon_get_boot_coremask(); #ifdef CONFIG_HOTPLUG_CPU unsigned int num_cores = cvmx_octeon_num_cores(); #endif /* The present CPUs are initially just the boot cpu (CPU 0). */ for (id = 0; id < NR_CPUS; id++) { set_cpu_possible(id, id == 0); set_cpu_present(id, id == 0); } __cpu_number_map[coreid] = 0; __cpu_logical_map[0] = coreid; /* The present CPUs get the lowest CPU numbers. */ cpus = 1; for (id = 0; id < NR_CPUS; id++) { if ((id != coreid) && (core_mask & (1 << id))) { set_cpu_possible(cpus, true); set_cpu_present(cpus, true); __cpu_number_map[id] = cpus; __cpu_logical_map[cpus] = id; cpus++; } } #ifdef CONFIG_HOTPLUG_CPU /* * The possible CPUs are all those present on the chip. We * will assign CPU numbers for possible cores as well. Cores * are always consecutively numberd from 0. */ for (id = 0; setup_max_cpus && octeon_bootloader_entry_addr && id < num_cores && id < NR_CPUS; id++) { if (!(core_mask & (1 << id))) { set_cpu_possible(cpus, true); __cpu_number_map[id] = cpus; __cpu_logical_map[cpus] = id; cpus++; } } #endif octeon_smp_hotplug_setup(); } /** * Firmware CPU startup hook * */ static void octeon_boot_secondary(int cpu, struct task_struct *idle) { int count; pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu, cpu_logical_map(cpu)); octeon_processor_sp = __KSTK_TOS(idle); octeon_processor_gp = (unsigned long)(task_thread_info(idle)); octeon_processor_boot = cpu_logical_map(cpu); mb(); count = 10000; while (octeon_processor_sp && count) { /* Waiting for processor to get the SP and GP */ udelay(1); count--; } if (count == 0) pr_err("Secondary boot timeout\n"); } /** * After we've done initial boot, this function is called to allow the * board code to clean up state, if needed */ static void octeon_init_secondary(void) { unsigned int sr; sr = set_c0_status(ST0_BEV); write_c0_ebase((u32)ebase); write_c0_status(sr); octeon_check_cpu_bist(); octeon_init_cvmcount(); octeon_irq_setup_secondary(); } /** * Callout to firmware before smp_init * */ void octeon_prepare_cpus(unsigned int max_cpus) { /* * Only the low order mailbox bits are used for IPIs, leave * the other bits alone. */ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff); if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI", mailbox_interrupt)) { panic("Cannot request_irq(OCTEON_IRQ_MBOX0)"); } } /** * Last chance for the board code to finish SMP initialization before * the CPU is "online". */ static void octeon_smp_finish(void) { octeon_user_io_init(); /* to generate the first CPU timer interrupt */ write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ); local_irq_enable(); } #ifdef CONFIG_HOTPLUG_CPU /* State of each CPU. */ DEFINE_PER_CPU(int, cpu_state); static int octeon_cpu_disable(void) { unsigned int cpu = smp_processor_id(); if (cpu == 0) return -EBUSY; if (!octeon_bootloader_entry_addr) return -ENOTSUPP; set_cpu_online(cpu, false); cpumask_clear_cpu(cpu, &cpu_callin_map); octeon_fixup_irqs(); flush_cache_all(); local_flush_tlb_all(); return 0; } static void octeon_cpu_die(unsigned int cpu) { int coreid = cpu_logical_map(cpu); uint32_t mask, new_mask; const struct cvmx_bootmem_named_block_desc *block_desc; while (per_cpu(cpu_state, cpu) != CPU_DEAD) cpu_relax(); /* * This is a bit complicated strategics of getting/settig available * cores mask, copied from bootloader */ mask = 1 << coreid; /* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */ block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME); if (!block_desc) { struct linux_app_boot_info *labi; labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); labi->avail_coremask |= mask; new_mask = labi->avail_coremask; } else { /* alternative, already initialized */ uint32_t *p = (uint32_t *)PHYS_TO_XKSEG_CACHED(block_desc->base_addr + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK); *p |= mask; new_mask = *p; } pr_info("Reset core %d. Available Coremask = 0x%x \n", coreid, new_mask); mb(); cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid); cvmx_write_csr(CVMX_CIU_PP_RST, 0); } void play_dead(void) { int cpu = cpu_number_map(cvmx_get_core_num()); idle_task_exit(); octeon_processor_boot = 0xff; per_cpu(cpu_state, cpu) = CPU_DEAD; mb(); while (1) /* core will be reset here */ ; } extern void kernel_entry(unsigned long arg1, ...); static void start_after_reset(void) { kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */ } static int octeon_update_boot_vector(unsigned int cpu) { int coreid = cpu_logical_map(cpu); uint32_t avail_coremask; const struct cvmx_bootmem_named_block_desc *block_desc; struct boot_init_vector *boot_vect = (struct boot_init_vector *)PHYS_TO_XKSEG_CACHED(BOOTLOADER_BOOT_VECTOR); block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME); if (!block_desc) { struct linux_app_boot_info *labi; labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); avail_coremask = labi->avail_coremask; labi->avail_coremask &= ~(1 << coreid); } else { /* alternative, already initialized */ avail_coremask = *(uint32_t *)PHYS_TO_XKSEG_CACHED( block_desc->base_addr + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK); } if (!(avail_coremask & (1 << coreid))) { /* core not available, assume, that catched by simple-executive */ cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid); cvmx_write_csr(CVMX_CIU_PP_RST, 0); } boot_vect[coreid].app_start_func_addr = (uint32_t) (unsigned long) start_after_reset; boot_vect[coreid].code_addr = octeon_bootloader_entry_addr; mb(); cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask); return 0; } static int octeon_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; switch (action) { case CPU_UP_PREPARE: octeon_update_boot_vector(cpu); break; case CPU_ONLINE: pr_info("Cpu %d online\n", cpu); break; case CPU_DEAD: break; } return NOTIFY_OK; } static int register_cavium_notifier(void) { hotcpu_notifier(octeon_cpu_callback, 0); return 0; } late_initcall(register_cavium_notifier); #endif /* CONFIG_HOTPLUG_CPU */ struct plat_smp_ops octeon_smp_ops = { .send_ipi_single = octeon_send_ipi_single, .send_ipi_mask = octeon_send_ipi_mask, .init_secondary = octeon_init_secondary, .smp_finish = octeon_smp_finish, .boot_secondary = octeon_boot_secondary, .smp_setup = octeon_smp_setup, .prepare_cpus = octeon_prepare_cpus, #ifdef CONFIG_HOTPLUG_CPU .cpu_disable = octeon_cpu_disable, .cpu_die = octeon_cpu_die, #endif };
gpl-2.0
tytung/android_kernel_htcleo-2.6.32
arch/mn10300/unit-asb2305/leds.c
554
2968
/* ASB2305 Peripheral 7-segment LEDs x4 support * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/param.h> #include <linux/init.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/cpu/intctl-regs.h> #include <asm/cpu/rtc-regs.h> #include <unit/leds.h> static const u8 asb2305_led_hex_tbl[16] = { 0x80, 0xf2, 0x48, 0x60, 0x32, 0x24, 0x04, 0xf0, 0x00, 0x20, 0x10, 0x06, 0x8c, 0x42, 0x0c, 0x1c }; static const u32 asb2305_led_chase_tbl[6] = { ~0x02020202, /* top - segA */ ~0x04040404, /* right top - segB */ ~0x08080808, /* right bottom - segC */ ~0x10101010, /* bottom - segD */ ~0x20202020, /* left bottom - segE */ ~0x40404040, /* left top - segF */ }; static unsigned asb2305_led_chase; void peripheral_leds7x4_display_dec(unsigned int val, unsigned int points) { u32 leds; leds = asb2305_led_hex_tbl[(val/1000) % 10]; leds <<= 8; leds |= asb2305_led_hex_tbl[(val/100) % 10]; leds <<= 8; leds |= asb2305_led_hex_tbl[(val/10) % 10]; leds <<= 8; leds |= asb2305_led_hex_tbl[val % 10]; leds |= points^0x01010101; ASB2305_7SEGLEDS = leds; } void peripheral_leds7x4_display_hex(unsigned int val, unsigned int points) { u32 leds; leds = asb2305_led_hex_tbl[(val/1000) % 10]; leds <<= 8; leds |= asb2305_led_hex_tbl[(val/100) % 10]; leds <<= 8; leds |= asb2305_led_hex_tbl[(val/10) % 10]; leds <<= 8; leds |= asb2305_led_hex_tbl[val % 10]; leds |= points^0x01010101; ASB2305_7SEGLEDS = leds; } void peripheral_leds_display_exception(enum exception_code code) { u32 leds; leds = asb2305_led_hex_tbl[(code/0x100) % 0x10]; leds <<= 8; leds |= asb2305_led_hex_tbl[(code/0x10) % 0x10]; leds <<= 8; leds |= asb2305_led_hex_tbl[code % 0x10]; leds |= 0x6d010101; ASB2305_7SEGLEDS = leds; } void peripheral_leds7x4_display_minssecs(unsigned int time, unsigned int points) { u32 leds; leds = asb2305_led_hex_tbl[(time/600) % 6]; leds <<= 8; leds |= asb2305_led_hex_tbl[(time/60) % 10]; leds <<= 8; leds |= asb2305_led_hex_tbl[(time/10) % 6]; leds <<= 8; leds |= asb2305_led_hex_tbl[time % 10]; leds |= points^0x01010101; ASB2305_7SEGLEDS = leds; } void peripheral_leds7x4_display_rtc(void) { unsigned int clock; u8 mins, secs; mins = RTMCR; secs = RTSCR; clock = ((mins & 0xf0) >> 4); clock *= 10; clock += (mins & 0x0f); clock *= 6; clock += ((secs & 0xf0) >> 4); clock *= 10; clock += (secs & 0x0f); peripheral_leds7x4_display_minssecs(clock, 0); } void peripheral_leds_led_chase(void) { ASB2305_7SEGLEDS = asb2305_led_chase_tbl[asb2305_led_chase]; asb2305_led_chase++; if (asb2305_led_chase >= 6) asb2305_led_chase = 0; }
gpl-2.0
silver-alx/ac100_kernel
net/netfilter/xt_SECMARK.c
810
3385
/* * Module for modifying the secmark field of the skb, for use by * security subsystems. * * Based on the nfmark match by: * (C) 1999-2001 Marc Boucher <marc@mbsi.ca> * * (C) 2006,2008 Red Hat, Inc., James Morris <jmorris@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/selinux.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_SECMARK.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("James Morris <jmorris@redhat.com>"); MODULE_DESCRIPTION("Xtables: packet security mark modification"); MODULE_ALIAS("ipt_SECMARK"); MODULE_ALIAS("ip6t_SECMARK"); #define PFX "SECMARK: " static u8 mode; static unsigned int secmark_tg(struct sk_buff *skb, const struct xt_target_param *par) { u32 secmark = 0; const struct xt_secmark_target_info *info = par->targinfo; BUG_ON(info->mode != mode); switch (mode) { case SECMARK_MODE_SEL: secmark = info->u.sel.selsid; break; default: BUG(); } skb->secmark = secmark; return XT_CONTINUE; } static bool checkentry_selinux(struct xt_secmark_target_info *info) { int err; struct xt_secmark_target_selinux_info *sel = &info->u.sel; sel->selctx[SECMARK_SELCTX_MAX - 1] = '\0'; err = selinux_string_to_sid(sel->selctx, &sel->selsid); if (err) { if (err == -EINVAL) printk(KERN_INFO PFX "invalid SELinux context \'%s\'\n", sel->selctx); return false; } if (!sel->selsid) { printk(KERN_INFO PFX "unable to map SELinux context \'%s\'\n", sel->selctx); return false; } err = selinux_secmark_relabel_packet_permission(sel->selsid); if (err) { printk(KERN_INFO PFX "unable to obtain relabeling permission\n"); return false; } selinux_secmark_refcount_inc(); return true; } static bool secmark_tg_check(const struct xt_tgchk_param *par) { struct xt_secmark_target_info *info = par->targinfo; if (strcmp(par->table, "mangle") != 0 && strcmp(par->table, "security") != 0) { printk(KERN_INFO PFX "target only valid in the \'mangle\' " "or \'security\' tables, not \'%s\'.\n", par->table); return false; } if (mode && mode != info->mode) { printk(KERN_INFO PFX "mode already set to %hu cannot mix with " "rules for mode %hu\n", mode, info->mode); return false; } switch (info->mode) { case SECMARK_MODE_SEL: if (!checkentry_selinux(info)) return false; break; default: printk(KERN_INFO PFX "invalid mode: %hu\n", info->mode); return false; } if (!mode) mode = info->mode; return true; } static void secmark_tg_destroy(const struct xt_tgdtor_param *par) { switch (mode) { case SECMARK_MODE_SEL: selinux_secmark_refcount_dec(); } } static struct xt_target secmark_tg_reg __read_mostly = { .name = "SECMARK", .revision = 0, .family = NFPROTO_UNSPEC, .checkentry = secmark_tg_check, .destroy = secmark_tg_destroy, .target = secmark_tg, .targetsize = sizeof(struct xt_secmark_target_info), .me = THIS_MODULE, }; static int __init secmark_tg_init(void) { return xt_register_target(&secmark_tg_reg); } static void __exit secmark_tg_exit(void) { xt_unregister_target(&secmark_tg_reg); } module_init(secmark_tg_init); module_exit(secmark_tg_exit);
gpl-2.0
kevinzyuan/ok6410
drivers/scsi/bfa/bfa_ioc.c
810
45697
/* * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * * Linux driver for Brocade Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <bfa.h> #include <bfa_ioc.h> #include <bfa_fwimg_priv.h> #include <cna/bfa_cna_trcmod.h> #include <cs/bfa_debug.h> #include <bfi/bfi_ioc.h> #include <bfi/bfi_ctreg.h> #include <aen/bfa_aen_ioc.h> #include <aen/bfa_aen.h> #include <log/bfa_log_hal.h> #include <defs/bfa_defs_pci.h> BFA_TRC_FILE(CNA, IOC); /** * IOC local definitions */ #define BFA_IOC_TOV 2000 /* msecs */ #define BFA_IOC_HWSEM_TOV 500 /* msecs */ #define BFA_IOC_HB_TOV 500 /* msecs */ #define BFA_IOC_HWINIT_MAX 2 #define BFA_IOC_FWIMG_MINSZ (16 * 1024) #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV #define bfa_ioc_timer_start(__ioc) \ bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ bfa_ioc_timeout, (__ioc), BFA_IOC_TOV) #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer) #define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS) #define BFA_DBG_FWTRC_LEN \ (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \ (sizeof(struct bfa_trc_mod_s) - \ BFA_TRC_MAX * sizeof(struct bfa_trc_s))) #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn)) /** * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */ #define bfa_ioc_firmware_lock(__ioc) \ ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) #define bfa_ioc_firmware_unlock(__ioc) \ ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) #define bfa_ioc_fwimg_get_chunk(__ioc, __off) \ ((__ioc)->ioc_hwif->ioc_fwimg_get_chunk(__ioc, __off)) #define bfa_ioc_fwimg_get_size(__ioc) \ ((__ioc)->ioc_hwif->ioc_fwimg_get_size(__ioc)) #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) #define bfa_ioc_notify_hbfail(__ioc) \ ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc)) bfa_boolean_t bfa_auto_recover = BFA_TRUE; /* * forward declarations */ static void bfa_ioc_aen_post(struct bfa_ioc_s *bfa, enum bfa_ioc_aen_event event); static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc); static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc); static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force); static void bfa_ioc_timeout(void *ioc); static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc); static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc); static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc); static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc); static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc); static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force); static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc); static void bfa_ioc_recover(struct bfa_ioc_s *ioc); static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc); /** * bfa_ioc_sm */ /** * IOC state machine events */ enum ioc_event { IOC_E_ENABLE = 1, /* IOC enable request */ IOC_E_DISABLE = 2, /* IOC disable request */ IOC_E_TIMEOUT = 3, /* f/w response timeout */ IOC_E_FWREADY = 4, /* f/w initialization done */ IOC_E_FWRSP_GETATTR = 5, /* IOC get attribute response */ IOC_E_FWRSP_ENABLE = 6, /* enable f/w response */ IOC_E_FWRSP_DISABLE = 7, /* disable f/w response */ IOC_E_HBFAIL = 8, /* heartbeat failure */ IOC_E_HWERROR = 9, /* hardware error interrupt */ IOC_E_SEMLOCKED = 10, /* h/w semaphore is locked */ IOC_E_DETACH = 11, /* driver detach cleanup */ }; bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event); static struct bfa_sm_table_s ioc_sm_table[] = { {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH}, {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH}, {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT}, {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT}, {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT}, {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL}, {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL}, {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, }; /** * Reset entry actions -- initialize state machine */ static void bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc) { ioc->retry_count = 0; ioc->auto_recover = bfa_auto_recover; } /** * Beginning state. IOC is in reset state. */ static void bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event) { bfa_trc(ioc, event); switch (event) { case IOC_E_ENABLE: bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck); break; case IOC_E_DISABLE: bfa_ioc_disable_comp(ioc); break; case IOC_E_DETACH: break; default: bfa_sm_fault(ioc, event); } } /** * Semaphore should be acquired for version check. */ static void bfa_ioc_sm_fwcheck_entry(struct bfa_ioc_s *ioc) { bfa_ioc_hw_sem_get(ioc); } /** * Awaiting h/w semaphore to continue with version check. */ static void bfa_ioc_sm_fwcheck(struct bfa_ioc_s *ioc, enum ioc_event event) { bfa_trc(ioc, event); switch (event) { case IOC_E_SEMLOCKED: if (bfa_ioc_firmware_lock(ioc)) { ioc->retry_count = 0; bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); } else { bfa_ioc_hw_sem_release(ioc); bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch); } break; case IOC_E_DISABLE: bfa_ioc_disable_comp(ioc); /* * fall through */ case IOC_E_DETACH: bfa_ioc_hw_sem_get_cancel(ioc); bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); break; case IOC_E_FWREADY: break; default: bfa_sm_fault(ioc, event); } } /** * Notify enable completion callback and generate mismatch AEN. */ static void bfa_ioc_sm_mismatch_entry(struct bfa_ioc_s *ioc) { /** * Provide enable completion callback and AEN notification only once. */ if (ioc->retry_count == 0) { ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH); } ioc->retry_count++; bfa_ioc_timer_start(ioc); } /** * Awaiting firmware version match. */ static void bfa_ioc_sm_mismatch(struct bfa_ioc_s *ioc, enum ioc_event event) { bfa_trc(ioc, event); switch (event) { case IOC_E_TIMEOUT: bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck); break; case IOC_E_DISABLE: bfa_ioc_disable_comp(ioc); /* * fall through */ case IOC_E_DETACH: bfa_ioc_timer_stop(ioc); bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); break; case IOC_E_FWREADY: break; default: bfa_sm_fault(ioc, event); } } /** * Request for semaphore. */ static void bfa_ioc_sm_semwait_entry(struct bfa_ioc_s *ioc) { bfa_ioc_hw_sem_get(ioc); } /** * Awaiting semaphore for h/w initialzation. */ static void bfa_ioc_sm_semwait(struct bfa_ioc_s *ioc, enum ioc_event event) { bfa_trc(ioc, event); switch (event) { case IOC_E_SEMLOCKED: ioc->retry_count = 0; bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); break; case IOC_E_DISABLE: bfa_ioc_hw_sem_get_cancel(ioc); bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); break; default: bfa_sm_fault(ioc, event); } } static void bfa_ioc_sm_hwinit_entry(struct bfa_ioc_s *ioc) { bfa_ioc_timer_start(ioc); bfa_ioc_reset(ioc, BFA_FALSE); } /** * Hardware is being initialized. Interrupts are enabled. * Holding hardware semaphore lock. */ static void bfa_ioc_sm_hwinit(struct bfa_ioc_s *ioc, enum ioc_event event) { bfa_trc(ioc, event); switch (event) { case IOC_E_FWREADY: bfa_ioc_timer_stop(ioc); bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); break; case IOC_E_HWERROR: bfa_ioc_timer_stop(ioc); /* * fall through */ case IOC_E_TIMEOUT: ioc->retry_count++; if (ioc->retry_count < BFA_IOC_HWINIT_MAX) { bfa_ioc_timer_start(ioc); bfa_ioc_reset(ioc, BFA_TRUE); break; } bfa_ioc_hw_sem_release(ioc); bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); break; case IOC_E_DISABLE: bfa_ioc_hw_sem_release(ioc); bfa_ioc_timer_stop(ioc); bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); break; default: bfa_sm_fault(ioc, event); } } static void bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc) { bfa_ioc_timer_start(ioc); bfa_ioc_send_enable(ioc); } /** * Host IOC function is being enabled, awaiting response from firmware. * Semaphore is acquired. */ static void bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event) { bfa_trc(ioc, event); switch (event) { case IOC_E_FWRSP_ENABLE: bfa_ioc_timer_stop(ioc); bfa_ioc_hw_sem_release(ioc); bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); break; case IOC_E_HWERROR: bfa_ioc_timer_stop(ioc); /* * fall through */ case IOC_E_TIMEOUT: ioc->retry_count++; if (ioc->retry_count < BFA_IOC_HWINIT_MAX) { bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_UNINIT); bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); break; } bfa_ioc_hw_sem_release(ioc); bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); break; case IOC_E_DISABLE: bfa_ioc_timer_stop(ioc); bfa_ioc_hw_sem_release(ioc); bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); break; case IOC_E_FWREADY: bfa_ioc_send_enable(ioc); break; default: bfa_sm_fault(ioc, event); } } static void bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc) { bfa_ioc_timer_start(ioc); bfa_ioc_send_getattr(ioc); } /** * IOC configuration in progress. Timer is active. */ static void bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event) { bfa_trc(ioc, event); switch (event) { case IOC_E_FWRSP_GETATTR: bfa_ioc_timer_stop(ioc); bfa_fsm_set_state(ioc, bfa_ioc_sm_op); break; case IOC_E_HWERROR: bfa_ioc_timer_stop(ioc); /* * fall through */ case IOC_E_TIMEOUT: bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); break; case IOC_E_DISABLE: bfa_ioc_timer_stop(ioc); bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); break; default: bfa_sm_fault(ioc, event); } } static void bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc) { ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); bfa_ioc_hb_monitor(ioc); bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE); } static void bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event) { bfa_trc(ioc, event); switch (event) { case IOC_E_ENABLE: break; case IOC_E_DISABLE: bfa_ioc_hb_stop(ioc); bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); break; case IOC_E_HWERROR: case IOC_E_FWREADY: /** * Hard error or IOC recovery by other function. * Treat it same as heartbeat failure. */ bfa_ioc_hb_stop(ioc); /* * !!! fall through !!! */ case IOC_E_HBFAIL: bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail); break; default: bfa_sm_fault(ioc, event); } } static void bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc) { bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE); bfa_ioc_timer_start(ioc); bfa_ioc_send_disable(ioc); } /** * IOC is being disabled */ static void bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event) { bfa_trc(ioc, event); switch (event) { case IOC_E_FWRSP_DISABLE: bfa_ioc_timer_stop(ioc); bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); break; case IOC_E_HWERROR: bfa_ioc_timer_stop(ioc); /* * !!! fall through !!! */ case IOC_E_TIMEOUT: bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); break; default: bfa_sm_fault(ioc, event); } } /** * IOC disable completion entry. */ static void bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc) { bfa_ioc_disable_comp(ioc); } static void bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event) { bfa_trc(ioc, event); switch (event) { case IOC_E_ENABLE: bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); break; case IOC_E_DISABLE: ioc->cbfn->disable_cbfn(ioc->bfa); break; case IOC_E_FWREADY: break; case IOC_E_DETACH: bfa_ioc_firmware_unlock(ioc); bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); break; default: bfa_sm_fault(ioc, event); } } static void bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc) { ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); bfa_ioc_timer_start(ioc); } /** * Hardware initialization failed. */ static void bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event) { bfa_trc(ioc, event); switch (event) { case IOC_E_DISABLE: bfa_ioc_timer_stop(ioc); bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); break; case IOC_E_DETACH: bfa_ioc_timer_stop(ioc); bfa_ioc_firmware_unlock(ioc); bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); break; case IOC_E_TIMEOUT: bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); break; default: bfa_sm_fault(ioc, event); } } static void bfa_ioc_sm_hbfail_entry(struct bfa_ioc_s *ioc) { struct list_head *qe; struct bfa_ioc_hbfail_notify_s *notify; /** * Mark IOC as failed in hardware and stop firmware. */ bfa_ioc_lpu_stop(ioc); bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); /** * Notify other functions on HB failure. */ bfa_ioc_notify_hbfail(ioc); /** * Notify driver and common modules registered for notification. */ ioc->cbfn->hbfail_cbfn(ioc->bfa); list_for_each(qe, &ioc->hb_notify_q) { notify = (struct bfa_ioc_hbfail_notify_s *)qe; notify->cbfn(notify->cbarg); } /** * Flush any queued up mailbox requests. */ bfa_ioc_mbox_hbfail(ioc); bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL); /** * Trigger auto-recovery after a delay. */ if (ioc->auto_recover) { bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_timeout, ioc, BFA_IOC_TOV_RECOVER); } } /** * IOC heartbeat failure. */ static void bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event) { bfa_trc(ioc, event); switch (event) { case IOC_E_ENABLE: ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); break; case IOC_E_DISABLE: if (ioc->auto_recover) bfa_ioc_timer_stop(ioc); bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); break; case IOC_E_TIMEOUT: bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); break; case IOC_E_FWREADY: /** * Recovery is already initiated by other function. */ break; case IOC_E_HWERROR: /* * HB failure notification, ignore. */ break; default: bfa_sm_fault(ioc, event); } } /** * bfa_ioc_pvt BFA IOC private functions */ static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc) { struct list_head *qe; struct bfa_ioc_hbfail_notify_s *notify; ioc->cbfn->disable_cbfn(ioc->bfa); /** * Notify common modules registered for notification. */ list_for_each(qe, &ioc->hb_notify_q) { notify = (struct bfa_ioc_hbfail_notify_s *)qe; notify->cbfn(notify->cbarg); } } void bfa_ioc_sem_timeout(void *ioc_arg) { struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg; bfa_ioc_hw_sem_get(ioc); } bfa_boolean_t bfa_ioc_sem_get(bfa_os_addr_t sem_reg) { u32 r32; int cnt = 0; #define BFA_SEM_SPINCNT 3000 r32 = bfa_reg_read(sem_reg); while (r32 && (cnt < BFA_SEM_SPINCNT)) { cnt++; bfa_os_udelay(2); r32 = bfa_reg_read(sem_reg); } if (r32 == 0) return BFA_TRUE; bfa_assert(cnt < BFA_SEM_SPINCNT); return BFA_FALSE; } void bfa_ioc_sem_release(bfa_os_addr_t sem_reg) { bfa_reg_write(sem_reg, 1); } static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc) { u32 r32; /** * First read to the semaphore register will return 0, subsequent reads * will return 1. Semaphore is released by writing 1 to the register */ r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); if (r32 == 0) { bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED); return; } bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout, ioc, BFA_IOC_HWSEM_TOV); } void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc) { bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1); } static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc) { bfa_timer_stop(&ioc->sem_timer); } /** * Initialize LPU local memory (aka secondary memory / SRAM) */ static void bfa_ioc_lmem_init(struct bfa_ioc_s *ioc) { u32 pss_ctl; int i; #define PSS_LMEM_INIT_TIME 10000 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg); pss_ctl &= ~__PSS_LMEM_RESET; pss_ctl |= __PSS_LMEM_INIT_EN; pss_ctl |= __PSS_I2C_CLK_DIV(3UL); /* i2c workaround 12.5khz clock */ bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl); /** * wait for memory initialization to be complete */ i = 0; do { pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg); i++; } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME)); /** * If memory initialization is not successful, IOC timeout will catch * such failures. */ bfa_assert(pss_ctl & __PSS_LMEM_INIT_DONE); bfa_trc(ioc, pss_ctl); pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN); bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl); } static void bfa_ioc_lpu_start(struct bfa_ioc_s *ioc) { u32 pss_ctl; /** * Take processor out of reset. */ pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg); pss_ctl &= ~__PSS_LPU0_RESET; bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl); } static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc) { u32 pss_ctl; /** * Put processors in reset. */ pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg); pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET); bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl); } /** * Get driver and firmware versions. */ void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) { u32 pgnum, pgoff; u32 loff = 0; int i; u32 *fwsig = (u32 *) fwhdr; pgnum = bfa_ioc_smem_pgnum(ioc, loff); pgoff = bfa_ioc_smem_pgoff(ioc, loff); bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32)); i++) { fwsig[i] = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); loff += sizeof(u32); } } /** * Returns TRUE if same. */ bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) { struct bfi_ioc_image_hdr_s *drv_fwhdr; int i; drv_fwhdr = (struct bfi_ioc_image_hdr_s *)bfa_ioc_fwimg_get_chunk(ioc, 0); for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) { bfa_trc(ioc, i); bfa_trc(ioc, fwhdr->md5sum[i]); bfa_trc(ioc, drv_fwhdr->md5sum[i]); return BFA_FALSE; } } bfa_trc(ioc, fwhdr->md5sum[0]); return BFA_TRUE; } /** * Return true if current running version is valid. Firmware signature and * execution context (driver/bios) must match. */ static bfa_boolean_t bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc) { struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr; /** * If bios/efi boot (flash based) -- return true */ if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ) return BFA_TRUE; bfa_ioc_fwver_get(ioc, &fwhdr); drv_fwhdr = (struct bfi_ioc_image_hdr_s *)bfa_ioc_fwimg_get_chunk(ioc, 0); if (fwhdr.signature != drv_fwhdr->signature) { bfa_trc(ioc, fwhdr.signature); bfa_trc(ioc, drv_fwhdr->signature); return BFA_FALSE; } if (fwhdr.exec != drv_fwhdr->exec) { bfa_trc(ioc, fwhdr.exec); bfa_trc(ioc, drv_fwhdr->exec); return BFA_FALSE; } return bfa_ioc_fwver_cmp(ioc, &fwhdr); } /** * Conditionally flush any pending message from firmware at start. */ static void bfa_ioc_msgflush(struct bfa_ioc_s *ioc) { u32 r32; r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd); if (r32) bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1); } static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) { enum bfi_ioc_state ioc_fwstate; bfa_boolean_t fwvalid; ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate); if (force) ioc_fwstate = BFI_IOC_UNINIT; bfa_trc(ioc, ioc_fwstate); /** * check if firmware is valid */ fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? BFA_FALSE : bfa_ioc_fwver_valid(ioc); if (!fwvalid) { bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id); return; } /** * If hardware initialization is in progress (initialized by other IOC), * just wait for an initialization completion interrupt. */ if (ioc_fwstate == BFI_IOC_INITING) { bfa_trc(ioc, ioc_fwstate); ioc->cbfn->reset_cbfn(ioc->bfa); return; } /** * If IOC function is disabled and firmware version is same, * just re-enable IOC. */ if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) { bfa_trc(ioc, ioc_fwstate); /** * When using MSI-X any pending firmware ready event should * be flushed. Otherwise MSI-X interrupts are not delivered. */ bfa_ioc_msgflush(ioc); ioc->cbfn->reset_cbfn(ioc->bfa); bfa_fsm_send_event(ioc, IOC_E_FWREADY); return; } /** * Initialize the h/w for any other states. */ bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id); } static void bfa_ioc_timeout(void *ioc_arg) { struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg; bfa_trc(ioc, 0); bfa_fsm_send_event(ioc, IOC_E_TIMEOUT); } void bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len) { u32 *msgp = (u32 *) ioc_msg; u32 i; bfa_trc(ioc, msgp[0]); bfa_trc(ioc, len); bfa_assert(len <= BFI_IOC_MSGLEN_MAX); /* * first write msg to mailbox registers */ for (i = 0; i < len / sizeof(u32); i++) bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32), bfa_os_wtole(msgp[i])); for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++) bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32), 0); /* * write 1 to mailbox CMD to trigger LPU event */ bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1); (void)bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd); } static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc) { struct bfi_ioc_ctrl_req_s enable_req; bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, bfa_ioc_portid(ioc)); enable_req.ioc_class = ioc->ioc_mc; bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s)); } static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc) { struct bfi_ioc_ctrl_req_s disable_req; bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ, bfa_ioc_portid(ioc)); bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s)); } static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc) { struct bfi_ioc_getattr_req_s attr_req; bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ, bfa_ioc_portid(ioc)); bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa); bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req)); } static void bfa_ioc_hb_check(void *cbarg) { struct bfa_ioc_s *ioc = cbarg; u32 hb_count; hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); if (ioc->hb_count == hb_count) { bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE, hb_count); bfa_ioc_recover(ioc); return; } else { ioc->hb_count = hb_count; } bfa_ioc_mbox_poll(ioc); bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc, BFA_IOC_HB_TOV); } static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc) { ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc, BFA_IOC_HB_TOV); } static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc) { bfa_timer_stop(&ioc->ioc_timer); } /** * Initiate a full firmware download. */ static void bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param) { u32 *fwimg; u32 pgnum, pgoff; u32 loff = 0; u32 chunkno = 0; u32 i; /** * Initialize LMEM first before code download */ bfa_ioc_lmem_init(ioc); /** * Flash based firmware boot */ bfa_trc(ioc, bfa_ioc_fwimg_get_size(ioc)); if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ) boot_type = BFI_BOOT_TYPE_FLASH; fwimg = bfa_ioc_fwimg_get_chunk(ioc, chunkno); pgnum = bfa_ioc_smem_pgnum(ioc, loff); pgoff = bfa_ioc_smem_pgoff(ioc, loff); bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); for (i = 0; i < bfa_ioc_fwimg_get_size(ioc); i++) { if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { chunkno = BFA_IOC_FLASH_CHUNK_NO(i); fwimg = bfa_ioc_fwimg_get_chunk(ioc, BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); } /** * write smem */ bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]); loff += sizeof(u32); /** * handle page offset wrap around */ loff = PSS_SMEM_PGOFF(loff); if (loff == 0) { pgnum++; bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); } } bfa_reg_write(ioc->ioc_regs.host_page_num_fn, bfa_ioc_smem_pgnum(ioc, 0)); /* * Set boot type and boot param at the end. */ bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF, bfa_os_swap32(boot_type)); bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_PARAM_OFF, bfa_os_swap32(boot_param)); } static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force) { bfa_ioc_hwinit(ioc, force); } /** * Update BFA configuration from firmware configuration. */ static void bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc) { struct bfi_ioc_attr_s *attr = ioc->attr; attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop); attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize); bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); } /** * Attach time initialization of mbox logic. */ static void bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc) { struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; int mc; INIT_LIST_HEAD(&mod->cmd_q); for (mc = 0; mc < BFI_MC_MAX; mc++) { mod->mbhdlr[mc].cbfn = NULL; mod->mbhdlr[mc].cbarg = ioc->bfa; } } /** * Mbox poll timer -- restarts any pending mailbox requests. */ static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc) { struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; struct bfa_mbox_cmd_s *cmd; u32 stat; /** * If no command pending, do nothing */ if (list_empty(&mod->cmd_q)) return; /** * If previous command is not yet fetched by firmware, do nothing */ stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd); if (stat) return; /** * Enqueue command to firmware. */ bfa_q_deq(&mod->cmd_q, &cmd); bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); } /** * Cleanup any pending requests. */ static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc) { struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; struct bfa_mbox_cmd_s *cmd; while (!list_empty(&mod->cmd_q)) bfa_q_deq(&mod->cmd_q, &cmd); } /** * bfa_ioc_public */ /** * Interface used by diag module to do firmware boot with memory test * as the entry vector. */ void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param) { bfa_os_addr_t rb; bfa_ioc_stats(ioc, ioc_boots); if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) return; /** * Initialize IOC state of all functions on a chip reset. */ rb = ioc->pcidev.pci_bar_kva; if (boot_param == BFI_BOOT_TYPE_MEMTEST) { bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST); bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST); } else { bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_INITING); bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_INITING); } bfa_ioc_download_fw(ioc, boot_type, boot_param); /** * Enable interrupts just before starting LPU */ ioc->cbfn->reset_cbfn(ioc->bfa); bfa_ioc_lpu_start(ioc); } /** * Enable/disable IOC failure auto recovery. */ void bfa_ioc_auto_recover(bfa_boolean_t auto_recover) { bfa_auto_recover = auto_recover; } bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc) { return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); } void bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg) { u32 *msgp = mbmsg; u32 r32; int i; /** * read the MBOX msg */ for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32)); i++) { r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox + i * sizeof(u32)); msgp[i] = bfa_os_htonl(r32); } /** * turn off mailbox interrupt by clearing mailbox status */ bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1); bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd); } void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m) { union bfi_ioc_i2h_msg_u *msg; msg = (union bfi_ioc_i2h_msg_u *)m; bfa_ioc_stats(ioc, ioc_isrs); switch (msg->mh.msg_id) { case BFI_IOC_I2H_HBEAT: break; case BFI_IOC_I2H_READY_EVENT: bfa_fsm_send_event(ioc, IOC_E_FWREADY); break; case BFI_IOC_I2H_ENABLE_REPLY: bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE); break; case BFI_IOC_I2H_DISABLE_REPLY: bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE); break; case BFI_IOC_I2H_GETATTR_REPLY: bfa_ioc_getattr_reply(ioc); break; default: bfa_trc(ioc, msg->mh.msg_id); bfa_assert(0); } } /** * IOC attach time initialization and setup. * * @param[in] ioc memory for IOC * @param[in] bfa driver instance structure * @param[in] trcmod kernel trace module * @param[in] aen kernel aen event module * @param[in] logm kernel logging module */ void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod, struct bfa_trc_mod_s *trcmod, struct bfa_aen_s *aen, struct bfa_log_mod_s *logm) { ioc->bfa = bfa; ioc->cbfn = cbfn; ioc->timer_mod = timer_mod; ioc->trcmod = trcmod; ioc->aen = aen; ioc->logm = logm; ioc->fcmode = BFA_FALSE; ioc->pllinit = BFA_FALSE; ioc->dbg_fwsave_once = BFA_TRUE; bfa_ioc_mbox_attach(ioc); INIT_LIST_HEAD(&ioc->hb_notify_q); bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); } /** * Driver detach time IOC cleanup. */ void bfa_ioc_detach(struct bfa_ioc_s *ioc) { bfa_fsm_send_event(ioc, IOC_E_DETACH); } /** * Setup IOC PCI properties. * * @param[in] pcidev PCI device information for this IOC */ void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, enum bfi_mclass mc) { ioc->ioc_mc = mc; ioc->pcidev = *pcidev; ioc->ctdev = (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT); ioc->cna = ioc->ctdev && !ioc->fcmode; /** * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c */ if (ioc->ctdev) bfa_ioc_set_ct_hwif(ioc); else bfa_ioc_set_cb_hwif(ioc); bfa_ioc_map_port(ioc); bfa_ioc_reg_init(ioc); } /** * Initialize IOC dma memory * * @param[in] dm_kva kernel virtual address of IOC dma memory * @param[in] dm_pa physical address of IOC dma memory */ void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa) { /** * dma memory for firmware attribute */ ioc->attr_dma.kva = dm_kva; ioc->attr_dma.pa = dm_pa; ioc->attr = (struct bfi_ioc_attr_s *)dm_kva; } /** * Return size of dma memory required. */ u32 bfa_ioc_meminfo(void) { return BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ); } void bfa_ioc_enable(struct bfa_ioc_s *ioc) { bfa_ioc_stats(ioc, ioc_enables); ioc->dbg_fwsave_once = BFA_TRUE; bfa_fsm_send_event(ioc, IOC_E_ENABLE); } void bfa_ioc_disable(struct bfa_ioc_s *ioc) { bfa_ioc_stats(ioc, ioc_disables); bfa_fsm_send_event(ioc, IOC_E_DISABLE); } /** * Returns memory required for saving firmware trace in case of crash. * Driver must call this interface to allocate memory required for * automatic saving of firmware trace. Driver should call * bfa_ioc_debug_memclaim() right after bfa_ioc_attach() to setup this * trace memory. */ int bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover) { return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0; } /** * Initialize memory for saving firmware trace. Driver must initialize * trace memory before call bfa_ioc_enable(). */ void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave) { ioc->dbg_fwsave = dbg_fwsave; ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->auto_recover); } u32 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr) { return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr); } u32 bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr) { return PSS_SMEM_PGOFF(fmaddr); } /** * Register mailbox message handler functions * * @param[in] ioc IOC instance * @param[in] mcfuncs message class handler functions */ void bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs) { struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; int mc; for (mc = 0; mc < BFI_MC_MAX; mc++) mod->mbhdlr[mc].cbfn = mcfuncs[mc]; } /** * Register mailbox message handler function, to be called by common modules */ void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc, bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg) { struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; mod->mbhdlr[mc].cbfn = cbfn; mod->mbhdlr[mc].cbarg = cbarg; } /** * Queue a mailbox command request to firmware. Waits if mailbox is busy. * Responsibility of caller to serialize * * @param[in] ioc IOC instance * @param[i] cmd Mailbox command */ void bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd) { struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; u32 stat; /** * If a previous command is pending, queue new command */ if (!list_empty(&mod->cmd_q)) { list_add_tail(&cmd->qe, &mod->cmd_q); return; } /** * If mailbox is busy, queue command for poll timer */ stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd); if (stat) { list_add_tail(&cmd->qe, &mod->cmd_q); return; } /** * mailbox is free -- queue command to firmware */ bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); } /** * Handle mailbox interrupts */ void bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc) { struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; struct bfi_mbmsg_s m; int mc; bfa_ioc_msgget(ioc, &m); /** * Treat IOC message class as special. */ mc = m.mh.msg_class; if (mc == BFI_MC_IOC) { bfa_ioc_isr(ioc, &m); return; } if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL)) return; mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m); } void bfa_ioc_error_isr(struct bfa_ioc_s *ioc) { bfa_fsm_send_event(ioc, IOC_E_HWERROR); } #ifndef BFA_BIOS_BUILD /** * return true if IOC is disabled */ bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc) { return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); } /** * return true if IOC firmware is different. */ bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc) { return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck) || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch); } #define bfa_ioc_state_disabled(__sm) \ (((__sm) == BFI_IOC_UNINIT) || \ ((__sm) == BFI_IOC_INITING) || \ ((__sm) == BFI_IOC_HWINIT) || \ ((__sm) == BFI_IOC_DISABLED) || \ ((__sm) == BFI_IOC_FAIL) || \ ((__sm) == BFI_IOC_CFG_DISABLED)) /** * Check if adapter is disabled -- both IOCs should be in a disabled * state. */ bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc) { u32 ioc_state; bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled)) return BFA_FALSE; ioc_state = bfa_reg_read(rb + BFA_IOC0_STATE_REG); if (!bfa_ioc_state_disabled(ioc_state)) return BFA_FALSE; ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG); if (!bfa_ioc_state_disabled(ioc_state)) return BFA_FALSE; return BFA_TRUE; } /** * Add to IOC heartbeat failure notification queue. To be used by common * modules such as */ void bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc, struct bfa_ioc_hbfail_notify_s *notify) { list_add_tail(&notify->qe, &ioc->hb_notify_q); } #define BFA_MFG_NAME "Brocade" void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, struct bfa_adapter_attr_s *ad_attr) { struct bfi_ioc_attr_s *ioc_attr; ioc_attr = ioc->attr; bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num); bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver); bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver); bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer); bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd, sizeof(struct bfa_mfg_vpd_s)); ad_attr->nports = bfa_ioc_get_nports(ioc); ad_attr->max_speed = bfa_ioc_speed_sup(ioc); bfa_ioc_get_adapter_model(ioc, ad_attr->model); /* For now, model descr uses same model string */ bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr); if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop)) ad_attr->prototype = 1; else ad_attr->prototype = 0; ad_attr->pwwn = bfa_ioc_get_pwwn(ioc); ad_attr->mac = bfa_ioc_get_mac(ioc); ad_attr->pcie_gen = ioc_attr->pcie_gen; ad_attr->pcie_lanes = ioc_attr->pcie_lanes; ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig; ad_attr->asic_rev = ioc_attr->asic_rev; bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); ad_attr->cna_capable = ioc->cna; } enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc) { if (!ioc->ctdev || ioc->fcmode) return BFA_IOC_TYPE_FC; else if (ioc->ioc_mc == BFI_MC_IOCFC) return BFA_IOC_TYPE_FCoE; else if (ioc->ioc_mc == BFI_MC_LL) return BFA_IOC_TYPE_LL; else { bfa_assert(ioc->ioc_mc == BFI_MC_LL); return BFA_IOC_TYPE_LL; } } void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num) { bfa_os_memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN); bfa_os_memcpy((void *)serial_num, (void *)ioc->attr->brcd_serialnum, BFA_ADAPTER_SERIAL_NUM_LEN); } void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver) { bfa_os_memset((void *)fw_ver, 0, BFA_VERSION_LEN); bfa_os_memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); } void bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev) { bfa_assert(chip_rev); bfa_os_memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN); chip_rev[0] = 'R'; chip_rev[1] = 'e'; chip_rev[2] = 'v'; chip_rev[3] = '-'; chip_rev[4] = ioc->attr->asic_rev; chip_rev[5] = '\0'; } void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver) { bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN); bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version, BFA_VERSION_LEN); } void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer) { bfa_os_memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN); bfa_os_memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); } void bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model) { struct bfi_ioc_attr_s *ioc_attr; u8 nports; u8 max_speed; bfa_assert(model); bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN); ioc_attr = ioc->attr; nports = bfa_ioc_get_nports(ioc); max_speed = bfa_ioc_speed_sup(ioc); /** * model name */ if (max_speed == 10) { strcpy(model, "BR-10?0"); model[5] = '0' + nports; } else { strcpy(model, "Brocade-??5"); model[8] = '0' + max_speed; model[9] = '0' + nports; } } enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc) { return bfa_sm_to_state(ioc_sm_table, ioc->fsm); } void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr) { bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s)); ioc_attr->state = bfa_ioc_get_state(ioc); ioc_attr->port_id = ioc->port_id; ioc_attr->ioc_type = bfa_ioc_get_type(ioc); bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); ioc_attr->pci_attr.device_id = ioc->pcidev.device_id; ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func; bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); } /** * hal_wwn_public */ wwn_t bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc) { union { wwn_t wwn; u8 byte[sizeof(wwn_t)]; } w; w.wwn = ioc->attr->mfg_wwn; if (bfa_ioc_portid(ioc) == 1) w.byte[7]++; return w.wwn; } wwn_t bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc) { union { wwn_t wwn; u8 byte[sizeof(wwn_t)]; } w; w.wwn = ioc->attr->mfg_wwn; if (bfa_ioc_portid(ioc) == 1) w.byte[7]++; w.byte[0] = 0x20; return w.wwn; } wwn_t bfa_ioc_get_wwn_naa5(struct bfa_ioc_s *ioc, u16 inst) { union { wwn_t wwn; u8 byte[sizeof(wwn_t)]; } w , w5; bfa_trc(ioc, inst); w.wwn = ioc->attr->mfg_wwn; w5.byte[0] = 0x50 | w.byte[2] >> 4; w5.byte[1] = w.byte[2] << 4 | w.byte[3] >> 4; w5.byte[2] = w.byte[3] << 4 | w.byte[4] >> 4; w5.byte[3] = w.byte[4] << 4 | w.byte[5] >> 4; w5.byte[4] = w.byte[5] << 4 | w.byte[6] >> 4; w5.byte[5] = w.byte[6] << 4 | w.byte[7] >> 4; w5.byte[6] = w.byte[7] << 4 | (inst & 0x0f00) >> 8; w5.byte[7] = (inst & 0xff); return w5.wwn; } u64 bfa_ioc_get_adid(struct bfa_ioc_s *ioc) { return ioc->attr->mfg_wwn; } mac_t bfa_ioc_get_mac(struct bfa_ioc_s *ioc) { mac_t mac; mac = ioc->attr->mfg_mac; mac.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc); return mac; } void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc) { ioc->fcmode = BFA_TRUE; ioc->port_id = bfa_ioc_pcifn(ioc); } bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc) { return ioc->fcmode || (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_CT); } /** * Send AEN notification */ static void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event) { union bfa_aen_data_u aen_data; struct bfa_log_mod_s *logmod = ioc->logm; s32 inst_num = 0; enum bfa_ioc_type_e ioc_type; bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, event), inst_num); memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn)); memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac)); ioc_type = bfa_ioc_get_type(ioc); switch (ioc_type) { case BFA_IOC_TYPE_FC: aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc); break; case BFA_IOC_TYPE_FCoE: aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc); aen_data.ioc.mac = bfa_ioc_get_mac(ioc); break; case BFA_IOC_TYPE_LL: aen_data.ioc.mac = bfa_ioc_get_mac(ioc); break; default: bfa_assert(ioc_type == BFA_IOC_TYPE_FC); break; } aen_data.ioc.ioc_type = ioc_type; } /** * Retrieve saved firmware trace from a prior IOC failure. */ bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) { int tlen; if (ioc->dbg_fwsave_len == 0) return BFA_STATUS_ENOFSAVE; tlen = *trclen; if (tlen > ioc->dbg_fwsave_len) tlen = ioc->dbg_fwsave_len; bfa_os_memcpy(trcdata, ioc->dbg_fwsave, tlen); *trclen = tlen; return BFA_STATUS_OK; } /** * Clear saved firmware trace */ void bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc) { ioc->dbg_fwsave_once = BFA_TRUE; } /** * Retrieve saved firmware trace from a prior IOC failure. */ bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) { u32 pgnum; u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc)); int i, tlen; u32 *tbuf = trcdata, r32; bfa_trc(ioc, *trclen); pgnum = bfa_ioc_smem_pgnum(ioc, loff); loff = bfa_ioc_smem_pgoff(ioc, loff); /* * Hold semaphore to serialize pll init and fwtrc. */ if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) return BFA_STATUS_FAILED; bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); tlen = *trclen; if (tlen > BFA_DBG_FWTRC_LEN) tlen = BFA_DBG_FWTRC_LEN; tlen /= sizeof(u32); bfa_trc(ioc, tlen); for (i = 0; i < tlen; i++) { r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); tbuf[i] = bfa_os_ntohl(r32); loff += sizeof(u32); /** * handle page offset wrap around */ loff = PSS_SMEM_PGOFF(loff); if (loff == 0) { pgnum++; bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); } } bfa_reg_write(ioc->ioc_regs.host_page_num_fn, bfa_ioc_smem_pgnum(ioc, 0)); /* * release semaphore. */ bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); bfa_trc(ioc, pgnum); *trclen = tlen * sizeof(u32); return BFA_STATUS_OK; } /** * Save firmware trace if configured. */ static void bfa_ioc_debug_save(struct bfa_ioc_s *ioc) { int tlen; if (ioc->dbg_fwsave_len) { tlen = ioc->dbg_fwsave_len; bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen); } } /** * Firmware failure detected. Start recovery actions. */ static void bfa_ioc_recover(struct bfa_ioc_s *ioc) { if (ioc->dbg_fwsave_once) { ioc->dbg_fwsave_once = BFA_FALSE; bfa_ioc_debug_save(ioc); } bfa_ioc_stats(ioc, ioc_hbfails); bfa_fsm_send_event(ioc, IOC_E_HBFAIL); } #else static void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event) { } static void bfa_ioc_recover(struct bfa_ioc_s *ioc) { bfa_assert(0); } #endif
gpl-2.0
fabianbergmark/linux-sctp
sound/pci/rme9652/rme9652.c
1066
74456
/* * ALSA driver for RME Digi9652 audio interfaces * * Copyright (c) 1999 IEM - Winfried Ritsch * Copyright (c) 1999-2001 Paul Davis * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/io.h> #include <sound/core.h> #include <sound/control.h> #include <sound/pcm.h> #include <sound/info.h> #include <sound/asoundef.h> #include <sound/initval.h> #include <asm/current.h> static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ static bool precise_ptr[SNDRV_CARDS]; /* Enable precise pointer */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for RME Digi9652 (Hammerfall) soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for RME Digi9652 (Hammerfall) soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable/disable specific RME96{52,36} soundcards."); module_param_array(precise_ptr, bool, NULL, 0444); MODULE_PARM_DESC(precise_ptr, "Enable precise pointer (doesn't work reliably)."); MODULE_AUTHOR("Paul Davis <pbd@op.net>, Winfried Ritsch"); MODULE_DESCRIPTION("RME Digi9652/Digi9636"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{RME,Hammerfall}," "{RME,Hammerfall-Light}}"); /* The Hammerfall has two sets of 24 ADAT + 2 S/PDIF channels, one for capture, one for playback. Both the ADAT and S/PDIF channels appear to the host CPU in the same block of memory. There is no functional difference between them in terms of access. The Hammerfall Light is identical to the Hammerfall, except that it has 2 sets 18 channels (16 ADAT + 2 S/PDIF) for capture and playback. */ #define RME9652_NCHANNELS 26 #define RME9636_NCHANNELS 18 /* Preferred sync source choices - used by "sync_pref" control switch */ #define RME9652_SYNC_FROM_SPDIF 0 #define RME9652_SYNC_FROM_ADAT1 1 #define RME9652_SYNC_FROM_ADAT2 2 #define RME9652_SYNC_FROM_ADAT3 3 /* Possible sources of S/PDIF input */ #define RME9652_SPDIFIN_OPTICAL 0 /* optical (ADAT1) */ #define RME9652_SPDIFIN_COAXIAL 1 /* coaxial (RCA) */ #define RME9652_SPDIFIN_INTERN 2 /* internal (CDROM) */ /* ------------- Status-Register bits --------------------- */ #define RME9652_IRQ (1<<0) /* IRQ is High if not reset by irq_clear */ #define RME9652_lock_2 (1<<1) /* ADAT 3-PLL: 1=locked, 0=unlocked */ #define RME9652_lock_1 (1<<2) /* ADAT 2-PLL: 1=locked, 0=unlocked */ #define RME9652_lock_0 (1<<3) /* ADAT 1-PLL: 1=locked, 0=unlocked */ #define RME9652_fs48 (1<<4) /* sample rate is 0=44.1/88.2,1=48/96 Khz */ #define RME9652_wsel_rd (1<<5) /* if Word-Clock is used and valid then 1 */ /* bits 6-15 encode h/w buffer pointer position */ #define RME9652_sync_2 (1<<16) /* if ADAT-IN 3 in sync to system clock */ #define RME9652_sync_1 (1<<17) /* if ADAT-IN 2 in sync to system clock */ #define RME9652_sync_0 (1<<18) /* if ADAT-IN 1 in sync to system clock */ #define RME9652_DS_rd (1<<19) /* 1=Double Speed Mode, 0=Normal Speed */ #define RME9652_tc_busy (1<<20) /* 1=time-code copy in progress (960ms) */ #define RME9652_tc_out (1<<21) /* time-code out bit */ #define RME9652_F_0 (1<<22) /* 000=64kHz, 100=88.2kHz, 011=96kHz */ #define RME9652_F_1 (1<<23) /* 111=32kHz, 110=44.1kHz, 101=48kHz, */ #define RME9652_F_2 (1<<24) /* external Crystal Chip if ERF=1 */ #define RME9652_ERF (1<<25) /* Error-Flag of SDPIF Receiver (1=No Lock) */ #define RME9652_buffer_id (1<<26) /* toggles by each interrupt on rec/play */ #define RME9652_tc_valid (1<<27) /* 1 = a signal is detected on time-code input */ #define RME9652_SPDIF_READ (1<<28) /* byte available from Rev 1.5+ S/PDIF interface */ #define RME9652_sync (RME9652_sync_0|RME9652_sync_1|RME9652_sync_2) #define RME9652_lock (RME9652_lock_0|RME9652_lock_1|RME9652_lock_2) #define RME9652_F (RME9652_F_0|RME9652_F_1|RME9652_F_2) #define rme9652_decode_spdif_rate(x) ((x)>>22) /* Bit 6..15 : h/w buffer pointer */ #define RME9652_buf_pos 0x000FFC0 /* Bits 31,30,29 are bits 5,4,3 of h/w pointer position on later Rev G EEPROMS and Rev 1.5 cards or later. */ #define RME9652_REV15_buf_pos(x) ((((x)&0xE0000000)>>26)|((x)&RME9652_buf_pos)) /* amount of io space we remap for register access. i'm not sure we even need this much, but 1K is nice round number :) */ #define RME9652_IO_EXTENT 1024 #define RME9652_init_buffer 0 #define RME9652_play_buffer 32 /* holds ptr to 26x64kBit host RAM */ #define RME9652_rec_buffer 36 /* holds ptr to 26x64kBit host RAM */ #define RME9652_control_register 64 #define RME9652_irq_clear 96 #define RME9652_time_code 100 /* useful if used with alesis adat */ #define RME9652_thru_base 128 /* 132...228 Thru for 26 channels */ /* Read-only registers */ /* Writing to any of the register locations writes to the status register. We'll use the first location as our point of access. */ #define RME9652_status_register 0 /* --------- Control-Register Bits ---------------- */ #define RME9652_start_bit (1<<0) /* start record/play */ /* bits 1-3 encode buffersize/latency */ #define RME9652_Master (1<<4) /* Clock Mode Master=1,Slave/Auto=0 */ #define RME9652_IE (1<<5) /* Interrupt Enable */ #define RME9652_freq (1<<6) /* samplerate 0=44.1/88.2, 1=48/96 kHz */ #define RME9652_freq1 (1<<7) /* if 0, 32kHz, else always 1 */ #define RME9652_DS (1<<8) /* Doule Speed 0=44.1/48, 1=88.2/96 Khz */ #define RME9652_PRO (1<<9) /* S/PDIF out: 0=consumer, 1=professional */ #define RME9652_EMP (1<<10) /* Emphasis 0=None, 1=ON */ #define RME9652_Dolby (1<<11) /* Non-audio bit 1=set, 0=unset */ #define RME9652_opt_out (1<<12) /* Use 1st optical OUT as SPDIF: 1=yes,0=no */ #define RME9652_wsel (1<<13) /* use Wordclock as sync (overwrites master) */ #define RME9652_inp_0 (1<<14) /* SPDIF-IN: 00=optical (ADAT1), */ #define RME9652_inp_1 (1<<15) /* 01=koaxial (Cinch), 10=Internal CDROM */ #define RME9652_SyncPref_ADAT2 (1<<16) #define RME9652_SyncPref_ADAT3 (1<<17) #define RME9652_SPDIF_RESET (1<<18) /* Rev 1.5+: h/w S/PDIF receiver */ #define RME9652_SPDIF_SELECT (1<<19) #define RME9652_SPDIF_CLOCK (1<<20) #define RME9652_SPDIF_WRITE (1<<21) #define RME9652_ADAT1_INTERNAL (1<<22) /* Rev 1.5+: if set, internal CD connector carries ADAT */ /* buffersize = 512Bytes * 2^n, where n is made from Bit2 ... Bit0 */ #define RME9652_latency 0x0e #define rme9652_encode_latency(x) (((x)&0x7)<<1) #define rme9652_decode_latency(x) (((x)>>1)&0x7) #define rme9652_running_double_speed(s) ((s)->control_register & RME9652_DS) #define RME9652_inp (RME9652_inp_0|RME9652_inp_1) #define rme9652_encode_spdif_in(x) (((x)&0x3)<<14) #define rme9652_decode_spdif_in(x) (((x)>>14)&0x3) #define RME9652_SyncPref_Mask (RME9652_SyncPref_ADAT2|RME9652_SyncPref_ADAT3) #define RME9652_SyncPref_ADAT1 0 #define RME9652_SyncPref_SPDIF (RME9652_SyncPref_ADAT2|RME9652_SyncPref_ADAT3) /* the size of a substream (1 mono data stream) */ #define RME9652_CHANNEL_BUFFER_SAMPLES (16*1024) #define RME9652_CHANNEL_BUFFER_BYTES (4*RME9652_CHANNEL_BUFFER_SAMPLES) /* the size of the area we need to allocate for DMA transfers. the size is the same regardless of the number of channels - the 9636 still uses the same memory area. Note that we allocate 1 more channel than is apparently needed because the h/w seems to write 1 byte beyond the end of the last page. Sigh. */ #define RME9652_DMA_AREA_BYTES ((RME9652_NCHANNELS+1) * RME9652_CHANNEL_BUFFER_BYTES) #define RME9652_DMA_AREA_KILOBYTES (RME9652_DMA_AREA_BYTES/1024) struct snd_rme9652 { int dev; spinlock_t lock; int irq; unsigned long port; void __iomem *iobase; int precise_ptr; u32 control_register; /* cached value */ u32 thru_bits; /* thru 1=on, 0=off channel 1=Bit1... channel 26= Bit26 */ u32 creg_spdif; u32 creg_spdif_stream; char *card_name; /* hammerfall or hammerfall light names */ size_t hw_offsetmask; /* &-with status register to get real hw_offset */ size_t prev_hw_offset; /* previous hw offset */ size_t max_jitter; /* maximum jitter in frames for hw pointer */ size_t period_bytes; /* guess what this is */ unsigned char ds_channels; unsigned char ss_channels; /* different for hammerfall/hammerfall-light */ struct snd_dma_buffer playback_dma_buf; struct snd_dma_buffer capture_dma_buf; unsigned char *capture_buffer; /* suitably aligned address */ unsigned char *playback_buffer; /* suitably aligned address */ pid_t capture_pid; pid_t playback_pid; struct snd_pcm_substream *capture_substream; struct snd_pcm_substream *playback_substream; int running; int passthru; /* non-zero if doing pass-thru */ int hw_rev; /* h/w rev * 10 (i.e. 1.5 has hw_rev = 15) */ int last_spdif_sample_rate; /* so that we can catch externally ... */ int last_adat_sample_rate; /* ... induced rate changes */ char *channel_map; struct snd_card *card; struct snd_pcm *pcm; struct pci_dev *pci; struct snd_kcontrol *spdif_ctl; }; /* These tables map the ALSA channels 1..N to the channels that we need to use in order to find the relevant channel buffer. RME refer to this kind of mapping as between "the ADAT channel and the DMA channel." We index it using the logical audio channel, and the value is the DMA channel (i.e. channel buffer number) where the data for that channel can be read/written from/to. */ static char channel_map_9652_ss[26] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25 }; static char channel_map_9636_ss[26] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, /* channels 16 and 17 are S/PDIF */ 24, 25, /* channels 18-25 don't exist */ -1, -1, -1, -1, -1, -1, -1, -1 }; static char channel_map_9652_ds[26] = { /* ADAT channels are remapped */ 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, /* channels 12 and 13 are S/PDIF */ 24, 25, /* others don't exist */ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; static char channel_map_9636_ds[26] = { /* ADAT channels are remapped */ 1, 3, 5, 7, 9, 11, 13, 15, /* channels 8 and 9 are S/PDIF */ 24, 25, /* others don't exist */ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; static int snd_hammerfall_get_buffer(struct pci_dev *pci, struct snd_dma_buffer *dmab, size_t size) { dmab->dev.type = SNDRV_DMA_TYPE_DEV; dmab->dev.dev = snd_dma_pci_data(pci); if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci), size, dmab) < 0) return -ENOMEM; return 0; } static void snd_hammerfall_free_buffer(struct snd_dma_buffer *dmab, struct pci_dev *pci) { if (dmab->area) snd_dma_free_pages(dmab); } static const struct pci_device_id snd_rme9652_ids[] = { { .vendor = 0x10ee, .device = 0x3fc4, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, /* RME Digi9652 */ { 0, }, }; MODULE_DEVICE_TABLE(pci, snd_rme9652_ids); static inline void rme9652_write(struct snd_rme9652 *rme9652, int reg, int val) { writel(val, rme9652->iobase + reg); } static inline unsigned int rme9652_read(struct snd_rme9652 *rme9652, int reg) { return readl(rme9652->iobase + reg); } static inline int snd_rme9652_use_is_exclusive(struct snd_rme9652 *rme9652) { unsigned long flags; int ret = 1; spin_lock_irqsave(&rme9652->lock, flags); if ((rme9652->playback_pid != rme9652->capture_pid) && (rme9652->playback_pid >= 0) && (rme9652->capture_pid >= 0)) { ret = 0; } spin_unlock_irqrestore(&rme9652->lock, flags); return ret; } static inline int rme9652_adat_sample_rate(struct snd_rme9652 *rme9652) { if (rme9652_running_double_speed(rme9652)) { return (rme9652_read(rme9652, RME9652_status_register) & RME9652_fs48) ? 96000 : 88200; } else { return (rme9652_read(rme9652, RME9652_status_register) & RME9652_fs48) ? 48000 : 44100; } } static inline void rme9652_compute_period_size(struct snd_rme9652 *rme9652) { unsigned int i; i = rme9652->control_register & RME9652_latency; rme9652->period_bytes = 1 << ((rme9652_decode_latency(i) + 8)); rme9652->hw_offsetmask = (rme9652->period_bytes * 2 - 1) & RME9652_buf_pos; rme9652->max_jitter = 80; } static snd_pcm_uframes_t rme9652_hw_pointer(struct snd_rme9652 *rme9652) { int status; unsigned int offset, frag; snd_pcm_uframes_t period_size = rme9652->period_bytes / 4; snd_pcm_sframes_t delta; status = rme9652_read(rme9652, RME9652_status_register); if (!rme9652->precise_ptr) return (status & RME9652_buffer_id) ? period_size : 0; offset = status & RME9652_buf_pos; /* The hardware may give a backward movement for up to 80 frames Martin Kirst <martin.kirst@freenet.de> knows the details. */ delta = rme9652->prev_hw_offset - offset; delta &= 0xffff; if (delta <= (snd_pcm_sframes_t)rme9652->max_jitter * 4) offset = rme9652->prev_hw_offset; else rme9652->prev_hw_offset = offset; offset &= rme9652->hw_offsetmask; offset /= 4; frag = status & RME9652_buffer_id; if (offset < period_size) { if (offset > rme9652->max_jitter) { if (frag) dev_err(rme9652->card->dev, "Unexpected hw_pointer position (bufid == 0): status: %x offset: %d\n", status, offset); } else if (!frag) return 0; offset -= rme9652->max_jitter; if ((int)offset < 0) offset += period_size * 2; } else { if (offset > period_size + rme9652->max_jitter) { if (!frag) dev_err(rme9652->card->dev, "Unexpected hw_pointer position (bufid == 1): status: %x offset: %d\n", status, offset); } else if (frag) return period_size; offset -= rme9652->max_jitter; } return offset; } static inline void rme9652_reset_hw_pointer(struct snd_rme9652 *rme9652) { int i; /* reset the FIFO pointer to zero. We do this by writing to 8 registers, each of which is a 32bit wide register, and set them all to zero. Note that s->iobase is a pointer to int32, not pointer to char. */ for (i = 0; i < 8; i++) { rme9652_write(rme9652, i * 4, 0); udelay(10); } rme9652->prev_hw_offset = 0; } static inline void rme9652_start(struct snd_rme9652 *s) { s->control_register |= (RME9652_IE | RME9652_start_bit); rme9652_write(s, RME9652_control_register, s->control_register); } static inline void rme9652_stop(struct snd_rme9652 *s) { s->control_register &= ~(RME9652_start_bit | RME9652_IE); rme9652_write(s, RME9652_control_register, s->control_register); } static int rme9652_set_interrupt_interval(struct snd_rme9652 *s, unsigned int frames) { int restart = 0; int n; spin_lock_irq(&s->lock); if ((restart = s->running)) { rme9652_stop(s); } frames >>= 7; n = 0; while (frames) { n++; frames >>= 1; } s->control_register &= ~RME9652_latency; s->control_register |= rme9652_encode_latency(n); rme9652_write(s, RME9652_control_register, s->control_register); rme9652_compute_period_size(s); if (restart) rme9652_start(s); spin_unlock_irq(&s->lock); return 0; } static int rme9652_set_rate(struct snd_rme9652 *rme9652, int rate) { int restart; int reject_if_open = 0; int xrate; if (!snd_rme9652_use_is_exclusive (rme9652)) { return -EBUSY; } /* Changing from a "single speed" to a "double speed" rate is not allowed if any substreams are open. This is because such a change causes a shift in the location of the DMA buffers and a reduction in the number of available buffers. Note that a similar but essentially insoluble problem exists for externally-driven rate changes. All we can do is to flag rate changes in the read/write routines. */ spin_lock_irq(&rme9652->lock); xrate = rme9652_adat_sample_rate(rme9652); switch (rate) { case 44100: if (xrate > 48000) { reject_if_open = 1; } rate = 0; break; case 48000: if (xrate > 48000) { reject_if_open = 1; } rate = RME9652_freq; break; case 88200: if (xrate < 48000) { reject_if_open = 1; } rate = RME9652_DS; break; case 96000: if (xrate < 48000) { reject_if_open = 1; } rate = RME9652_DS | RME9652_freq; break; default: spin_unlock_irq(&rme9652->lock); return -EINVAL; } if (reject_if_open && (rme9652->capture_pid >= 0 || rme9652->playback_pid >= 0)) { spin_unlock_irq(&rme9652->lock); return -EBUSY; } if ((restart = rme9652->running)) { rme9652_stop(rme9652); } rme9652->control_register &= ~(RME9652_freq | RME9652_DS); rme9652->control_register |= rate; rme9652_write(rme9652, RME9652_control_register, rme9652->control_register); if (restart) { rme9652_start(rme9652); } if (rate & RME9652_DS) { if (rme9652->ss_channels == RME9652_NCHANNELS) { rme9652->channel_map = channel_map_9652_ds; } else { rme9652->channel_map = channel_map_9636_ds; } } else { if (rme9652->ss_channels == RME9652_NCHANNELS) { rme9652->channel_map = channel_map_9652_ss; } else { rme9652->channel_map = channel_map_9636_ss; } } spin_unlock_irq(&rme9652->lock); return 0; } static void rme9652_set_thru(struct snd_rme9652 *rme9652, int channel, int enable) { int i; rme9652->passthru = 0; if (channel < 0) { /* set thru for all channels */ if (enable) { for (i = 0; i < RME9652_NCHANNELS; i++) { rme9652->thru_bits |= (1 << i); rme9652_write(rme9652, RME9652_thru_base + i * 4, 1); } } else { for (i = 0; i < RME9652_NCHANNELS; i++) { rme9652->thru_bits &= ~(1 << i); rme9652_write(rme9652, RME9652_thru_base + i * 4, 0); } } } else { int mapped_channel; mapped_channel = rme9652->channel_map[channel]; if (enable) { rme9652->thru_bits |= (1 << mapped_channel); } else { rme9652->thru_bits &= ~(1 << mapped_channel); } rme9652_write(rme9652, RME9652_thru_base + mapped_channel * 4, enable ? 1 : 0); } } static int rme9652_set_passthru(struct snd_rme9652 *rme9652, int onoff) { if (onoff) { rme9652_set_thru(rme9652, -1, 1); /* we don't want interrupts, so do a custom version of rme9652_start(). */ rme9652->control_register = RME9652_inp_0 | rme9652_encode_latency(7) | RME9652_start_bit; rme9652_reset_hw_pointer(rme9652); rme9652_write(rme9652, RME9652_control_register, rme9652->control_register); rme9652->passthru = 1; } else { rme9652_set_thru(rme9652, -1, 0); rme9652_stop(rme9652); rme9652->passthru = 0; } return 0; } static void rme9652_spdif_set_bit (struct snd_rme9652 *rme9652, int mask, int onoff) { if (onoff) rme9652->control_register |= mask; else rme9652->control_register &= ~mask; rme9652_write(rme9652, RME9652_control_register, rme9652->control_register); } static void rme9652_spdif_write_byte (struct snd_rme9652 *rme9652, const int val) { long mask; long i; for (i = 0, mask = 0x80; i < 8; i++, mask >>= 1) { if (val & mask) rme9652_spdif_set_bit (rme9652, RME9652_SPDIF_WRITE, 1); else rme9652_spdif_set_bit (rme9652, RME9652_SPDIF_WRITE, 0); rme9652_spdif_set_bit (rme9652, RME9652_SPDIF_CLOCK, 1); rme9652_spdif_set_bit (rme9652, RME9652_SPDIF_CLOCK, 0); } } static int rme9652_spdif_read_byte (struct snd_rme9652 *rme9652) { long mask; long val; long i; val = 0; for (i = 0, mask = 0x80; i < 8; i++, mask >>= 1) { rme9652_spdif_set_bit (rme9652, RME9652_SPDIF_CLOCK, 1); if (rme9652_read (rme9652, RME9652_status_register) & RME9652_SPDIF_READ) val |= mask; rme9652_spdif_set_bit (rme9652, RME9652_SPDIF_CLOCK, 0); } return val; } static void rme9652_write_spdif_codec (struct snd_rme9652 *rme9652, const int address, const int data) { rme9652_spdif_set_bit (rme9652, RME9652_SPDIF_SELECT, 1); rme9652_spdif_write_byte (rme9652, 0x20); rme9652_spdif_write_byte (rme9652, address); rme9652_spdif_write_byte (rme9652, data); rme9652_spdif_set_bit (rme9652, RME9652_SPDIF_SELECT, 0); } static int rme9652_spdif_read_codec (struct snd_rme9652 *rme9652, const int address) { int ret; rme9652_spdif_set_bit (rme9652, RME9652_SPDIF_SELECT, 1); rme9652_spdif_write_byte (rme9652, 0x20); rme9652_spdif_write_byte (rme9652, address); rme9652_spdif_set_bit (rme9652, RME9652_SPDIF_SELECT, 0); rme9652_spdif_set_bit (rme9652, RME9652_SPDIF_SELECT, 1); rme9652_spdif_write_byte (rme9652, 0x21); ret = rme9652_spdif_read_byte (rme9652); rme9652_spdif_set_bit (rme9652, RME9652_SPDIF_SELECT, 0); return ret; } static void rme9652_initialize_spdif_receiver (struct snd_rme9652 *rme9652) { /* XXX what unsets this ? */ rme9652->control_register |= RME9652_SPDIF_RESET; rme9652_write_spdif_codec (rme9652, 4, 0x40); rme9652_write_spdif_codec (rme9652, 17, 0x13); rme9652_write_spdif_codec (rme9652, 6, 0x02); } static inline int rme9652_spdif_sample_rate(struct snd_rme9652 *s) { unsigned int rate_bits; if (rme9652_read(s, RME9652_status_register) & RME9652_ERF) { return -1; /* error condition */ } if (s->hw_rev == 15) { int x, y, ret; x = rme9652_spdif_read_codec (s, 30); if (x != 0) y = 48000 * 64 / x; else y = 0; if (y > 30400 && y < 33600) ret = 32000; else if (y > 41900 && y < 46000) ret = 44100; else if (y > 46000 && y < 50400) ret = 48000; else if (y > 60800 && y < 67200) ret = 64000; else if (y > 83700 && y < 92000) ret = 88200; else if (y > 92000 && y < 100000) ret = 96000; else ret = 0; return ret; } rate_bits = rme9652_read(s, RME9652_status_register) & RME9652_F; switch (rme9652_decode_spdif_rate(rate_bits)) { case 0x7: return 32000; break; case 0x6: return 44100; break; case 0x5: return 48000; break; case 0x4: return 88200; break; case 0x3: return 96000; break; case 0x0: return 64000; break; default: dev_err(s->card->dev, "%s: unknown S/PDIF input rate (bits = 0x%x)\n", s->card_name, rate_bits); return 0; break; } } /*----------------------------------------------------------------------------- Control Interface ----------------------------------------------------------------------------*/ static u32 snd_rme9652_convert_from_aes(struct snd_aes_iec958 *aes) { u32 val = 0; val |= (aes->status[0] & IEC958_AES0_PROFESSIONAL) ? RME9652_PRO : 0; val |= (aes->status[0] & IEC958_AES0_NONAUDIO) ? RME9652_Dolby : 0; if (val & RME9652_PRO) val |= (aes->status[0] & IEC958_AES0_PRO_EMPHASIS_5015) ? RME9652_EMP : 0; else val |= (aes->status[0] & IEC958_AES0_CON_EMPHASIS_5015) ? RME9652_EMP : 0; return val; } static void snd_rme9652_convert_to_aes(struct snd_aes_iec958 *aes, u32 val) { aes->status[0] = ((val & RME9652_PRO) ? IEC958_AES0_PROFESSIONAL : 0) | ((val & RME9652_Dolby) ? IEC958_AES0_NONAUDIO : 0); if (val & RME9652_PRO) aes->status[0] |= (val & RME9652_EMP) ? IEC958_AES0_PRO_EMPHASIS_5015 : 0; else aes->status[0] |= (val & RME9652_EMP) ? IEC958_AES0_CON_EMPHASIS_5015 : 0; } static int snd_rme9652_control_spdif_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_rme9652_control_spdif_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_rme9652 *rme9652 = snd_kcontrol_chip(kcontrol); snd_rme9652_convert_to_aes(&ucontrol->value.iec958, rme9652->creg_spdif); return 0; } static int snd_rme9652_control_spdif_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_rme9652 *rme9652 = snd_kcontrol_chip(kcontrol); int change; u32 val; val = snd_rme9652_convert_from_aes(&ucontrol->value.iec958); spin_lock_irq(&rme9652->lock); change = val != rme9652->creg_spdif; rme9652->creg_spdif = val; spin_unlock_irq(&rme9652->lock); return change; } static int snd_rme9652_control_spdif_stream_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_rme9652_control_spdif_stream_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_rme9652 *rme9652 = snd_kcontrol_chip(kcontrol); snd_rme9652_convert_to_aes(&ucontrol->value.iec958, rme9652->creg_spdif_stream); return 0; } static int snd_rme9652_control_spdif_stream_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_rme9652 *rme9652 = snd_kcontrol_chip(kcontrol); int change; u32 val; val = snd_rme9652_convert_from_aes(&ucontrol->value.iec958); spin_lock_irq(&rme9652->lock); change = val != rme9652->creg_spdif_stream; rme9652->creg_spdif_stream = val; rme9652->control_register &= ~(RME9652_PRO | RME9652_Dolby | RME9652_EMP); rme9652_write(rme9652, RME9652_control_register, rme9652->control_register |= val); spin_unlock_irq(&rme9652->lock); return change; } static int snd_rme9652_control_spdif_mask_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_rme9652_control_spdif_mask_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.iec958.status[0] = kcontrol->private_value; return 0; } #define RME9652_ADAT1_IN(xname, xindex) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_rme9652_info_adat1_in, \ .get = snd_rme9652_get_adat1_in, \ .put = snd_rme9652_put_adat1_in } static unsigned int rme9652_adat1_in(struct snd_rme9652 *rme9652) { if (rme9652->control_register & RME9652_ADAT1_INTERNAL) return 1; return 0; } static int rme9652_set_adat1_input(struct snd_rme9652 *rme9652, int internal) { int restart = 0; if (internal) { rme9652->control_register |= RME9652_ADAT1_INTERNAL; } else { rme9652->control_register &= ~RME9652_ADAT1_INTERNAL; } /* XXX do we actually need to stop the card when we do this ? */ if ((restart = rme9652->running)) { rme9652_stop(rme9652); } rme9652_write(rme9652, RME9652_control_register, rme9652->control_register); if (restart) { rme9652_start(rme9652); } return 0; } static int snd_rme9652_info_adat1_in(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static const char * const texts[2] = {"ADAT1", "Internal"}; return snd_ctl_enum_info(uinfo, 1, 2, texts); } static int snd_rme9652_get_adat1_in(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_rme9652 *rme9652 = snd_kcontrol_chip(kcontrol); spin_lock_irq(&rme9652->lock); ucontrol->value.enumerated.item[0] = rme9652_adat1_in(rme9652); spin_unlock_irq(&rme9652->lock); return 0; } static int snd_rme9652_put_adat1_in(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_rme9652 *rme9652 = snd_kcontrol_chip(kcontrol); int change; unsigned int val; if (!snd_rme9652_use_is_exclusive(rme9652)) return -EBUSY; val = ucontrol->value.enumerated.item[0] % 2; spin_lock_irq(&rme9652->lock); change = val != rme9652_adat1_in(rme9652); if (change) rme9652_set_adat1_input(rme9652, val); spin_unlock_irq(&rme9652->lock); return change; } #define RME9652_SPDIF_IN(xname, xindex) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_rme9652_info_spdif_in, \ .get = snd_rme9652_get_spdif_in, .put = snd_rme9652_put_spdif_in } static unsigned int rme9652_spdif_in(struct snd_rme9652 *rme9652) { return rme9652_decode_spdif_in(rme9652->control_register & RME9652_inp); } static int rme9652_set_spdif_input(struct snd_rme9652 *rme9652, int in) { int restart = 0; rme9652->control_register &= ~RME9652_inp; rme9652->control_register |= rme9652_encode_spdif_in(in); if ((restart = rme9652->running)) { rme9652_stop(rme9652); } rme9652_write(rme9652, RME9652_control_register, rme9652->control_register); if (restart) { rme9652_start(rme9652); } return 0; } static int snd_rme9652_info_spdif_in(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static const char * const texts[3] = {"ADAT1", "Coaxial", "Internal"}; return snd_ctl_enum_info(uinfo, 1, 3, texts); } static int snd_rme9652_get_spdif_in(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_rme9652 *rme9652 = snd_kcontrol_chip(kcontrol); spin_lock_irq(&rme9652->lock); ucontrol->value.enumerated.item[0] = rme9652_spdif_in(rme9652); spin_unlock_irq(&rme9652->lock); return 0; } static int snd_rme9652_put_spdif_in(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_rme9652 *rme9652 = snd_kcontrol_chip(kcontrol); int change; unsigned int val; if (!snd_rme9652_use_is_exclusive(rme9652)) return -EBUSY; val = ucontrol->value.enumerated.item[0] % 3; spin_lock_irq(&rme9652->lock); change = val != rme9652_spdif_in(rme9652); if (change) rme9652_set_spdif_input(rme9652, val); spin_unlock_irq(&rme9652->lock); return change; } #define RME9652_SPDIF_OUT(xname, xindex) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_rme9652_info_spdif_out, \ .get = snd_rme9652_get_spdif_out, .put = snd_rme9652_put_spdif_out } static int rme9652_spdif_out(struct snd_rme9652 *rme9652) { return (rme9652->control_register & RME9652_opt_out) ? 1 : 0; } static int rme9652_set_spdif_output(struct snd_rme9652 *rme9652, int out) { int restart = 0; if (out) { rme9652->control_register |= RME9652_opt_out; } else { rme9652->control_register &= ~RME9652_opt_out; } if ((restart = rme9652->running)) { rme9652_stop(rme9652); } rme9652_write(rme9652, RME9652_control_register, rme9652->control_register); if (restart) { rme9652_start(rme9652); } return 0; } #define snd_rme9652_info_spdif_out snd_ctl_boolean_mono_info static int snd_rme9652_get_spdif_out(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_rme9652 *rme9652 = snd_kcontrol_chip(kcontrol); spin_lock_irq(&rme9652->lock); ucontrol->value.integer.value[0] = rme9652_spdif_out(rme9652); spin_unlock_irq(&rme9652->lock); return 0; } static int snd_rme9652_put_spdif_out(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_rme9652 *rme9652 = snd_kcontrol_chip(kcontrol); int change; unsigned int val; if (!snd_rme9652_use_is_exclusive(rme9652)) return -EBUSY; val = ucontrol->value.integer.value[0] & 1; spin_lock_irq(&rme9652->lock); change = (int)val != rme9652_spdif_out(rme9652); rme9652_set_spdif_output(rme9652, val); spin_unlock_irq(&rme9652->lock); return change; } #define RME9652_SYNC_MODE(xname, xindex) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_rme9652_info_sync_mode, \ .get = snd_rme9652_get_sync_mode, .put = snd_rme9652_put_sync_mode } static int rme9652_sync_mode(struct snd_rme9652 *rme9652) { if (rme9652->control_register & RME9652_wsel) { return 2; } else if (rme9652->control_register & RME9652_Master) { return 1; } else { return 0; } } static int rme9652_set_sync_mode(struct snd_rme9652 *rme9652, int mode) { int restart = 0; switch (mode) { case 0: rme9652->control_register &= ~(RME9652_Master | RME9652_wsel); break; case 1: rme9652->control_register = (rme9652->control_register & ~RME9652_wsel) | RME9652_Master; break; case 2: rme9652->control_register |= (RME9652_Master | RME9652_wsel); break; } if ((restart = rme9652->running)) { rme9652_stop(rme9652); } rme9652_write(rme9652, RME9652_control_register, rme9652->control_register); if (restart) { rme9652_start(rme9652); } return 0; } static int snd_rme9652_info_sync_mode(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static const char * const texts[3] = { "AutoSync", "Master", "Word Clock" }; return snd_ctl_enum_info(uinfo, 1, 3, texts); } static int snd_rme9652_get_sync_mode(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_rme9652 *rme9652 = snd_kcontrol_chip(kcontrol); spin_lock_irq(&rme9652->lock); ucontrol->value.enumerated.item[0] = rme9652_sync_mode(rme9652); spin_unlock_irq(&rme9652->lock); return 0; } static int snd_rme9652_put_sync_mode(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_rme9652 *rme9652 = snd_kcontrol_chip(kcontrol); int change; unsigned int val; val = ucontrol->value.enumerated.item[0] % 3; spin_lock_irq(&rme9652->lock); change = (int)val != rme9652_sync_mode(rme9652); rme9652_set_sync_mode(rme9652, val); spin_unlock_irq(&rme9652->lock); return change; } #define RME9652_SYNC_PREF(xname, xindex) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_rme9652_info_sync_pref, \ .get = snd_rme9652_get_sync_pref, .put = snd_rme9652_put_sync_pref } static int rme9652_sync_pref(struct snd_rme9652 *rme9652) { switch (rme9652->control_register & RME9652_SyncPref_Mask) { case RME9652_SyncPref_ADAT1: return RME9652_SYNC_FROM_ADAT1; case RME9652_SyncPref_ADAT2: return RME9652_SYNC_FROM_ADAT2; case RME9652_SyncPref_ADAT3: return RME9652_SYNC_FROM_ADAT3; case RME9652_SyncPref_SPDIF: return RME9652_SYNC_FROM_SPDIF; } /* Not reachable */ return 0; } static int rme9652_set_sync_pref(struct snd_rme9652 *rme9652, int pref) { int restart; rme9652->control_register &= ~RME9652_SyncPref_Mask; switch (pref) { case RME9652_SYNC_FROM_ADAT1: rme9652->control_register |= RME9652_SyncPref_ADAT1; break; case RME9652_SYNC_FROM_ADAT2: rme9652->control_register |= RME9652_SyncPref_ADAT2; break; case RME9652_SYNC_FROM_ADAT3: rme9652->control_register |= RME9652_SyncPref_ADAT3; break; case RME9652_SYNC_FROM_SPDIF: rme9652->control_register |= RME9652_SyncPref_SPDIF; break; } if ((restart = rme9652->running)) { rme9652_stop(rme9652); } rme9652_write(rme9652, RME9652_control_register, rme9652->control_register); if (restart) { rme9652_start(rme9652); } return 0; } static int snd_rme9652_info_sync_pref(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static const char * const texts[4] = { "IEC958 In", "ADAT1 In", "ADAT2 In", "ADAT3 In" }; struct snd_rme9652 *rme9652 = snd_kcontrol_chip(kcontrol); return snd_ctl_enum_info(uinfo, 1, rme9652->ss_channels == RME9652_NCHANNELS ? 4 : 3, texts); } static int snd_rme9652_get_sync_pref(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_rme9652 *rme9652 = snd_kcontrol_chip(kcontrol); spin_lock_irq(&rme9652->lock); ucontrol->value.enumerated.item[0] = rme9652_sync_pref(rme9652); spin_unlock_irq(&rme9652->lock); return 0; } static int snd_rme9652_put_sync_pref(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_rme9652 *rme9652 = snd_kcontrol_chip(kcontrol); int change, max; unsigned int val; if (!snd_rme9652_use_is_exclusive(rme9652)) return -EBUSY; max = rme9652->ss_channels == RME9652_NCHANNELS ? 4 : 3; val = ucontrol->value.enumerated.item[0] % max; spin_lock_irq(&rme9652->lock); change = (int)val != rme9652_sync_pref(rme9652); rme9652_set_sync_pref(rme9652, val); spin_unlock_irq(&rme9652->lock); return change; } static int snd_rme9652_info_thru(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct snd_rme9652 *rme9652 = snd_kcontrol_chip(kcontrol); uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; uinfo->count = rme9652->ss_channels; uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; return 0; } static int snd_rme9652_get_thru(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_rme9652 *rme9652 = snd_kcontrol_chip(kcontrol); unsigned int k; u32 thru_bits = rme9652->thru_bits; for (k = 0; k < rme9652->ss_channels; ++k) { ucontrol->value.integer.value[k] = !!(thru_bits & (1 << k)); } return 0; } static int snd_rme9652_put_thru(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_rme9652 *rme9652 = snd_kcontrol_chip(kcontrol); int change; unsigned int chn; u32 thru_bits = 0; if (!snd_rme9652_use_is_exclusive(rme9652)) return -EBUSY; for (chn = 0; chn < rme9652->ss_channels; ++chn) { if (ucontrol->value.integer.value[chn]) thru_bits |= 1 << chn; } spin_lock_irq(&rme9652->lock); change = thru_bits ^ rme9652->thru_bits; if (change) { for (chn = 0; chn < rme9652->ss_channels; ++chn) { if (!(change & (1 << chn))) continue; rme9652_set_thru(rme9652,chn,thru_bits&(1<<chn)); } } spin_unlock_irq(&rme9652->lock); return !!change; } #define RME9652_PASSTHRU(xname, xindex) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_rme9652_info_passthru, \ .put = snd_rme9652_put_passthru, \ .get = snd_rme9652_get_passthru } #define snd_rme9652_info_passthru snd_ctl_boolean_mono_info static int snd_rme9652_get_passthru(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_rme9652 *rme9652 = snd_kcontrol_chip(kcontrol); spin_lock_irq(&rme9652->lock); ucontrol->value.integer.value[0] = rme9652->passthru; spin_unlock_irq(&rme9652->lock); return 0; } static int snd_rme9652_put_passthru(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_rme9652 *rme9652 = snd_kcontrol_chip(kcontrol); int change; unsigned int val; int err = 0; if (!snd_rme9652_use_is_exclusive(rme9652)) return -EBUSY; val = ucontrol->value.integer.value[0] & 1; spin_lock_irq(&rme9652->lock); change = (ucontrol->value.integer.value[0] != rme9652->passthru); if (change) err = rme9652_set_passthru(rme9652, val); spin_unlock_irq(&rme9652->lock); return err ? err : change; } /* Read-only switches */ #define RME9652_SPDIF_RATE(xname, xindex) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \ .info = snd_rme9652_info_spdif_rate, \ .get = snd_rme9652_get_spdif_rate } static int snd_rme9652_info_spdif_rate(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 96000; return 0; } static int snd_rme9652_get_spdif_rate(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_rme9652 *rme9652 = snd_kcontrol_chip(kcontrol); spin_lock_irq(&rme9652->lock); ucontrol->value.integer.value[0] = rme9652_spdif_sample_rate(rme9652); spin_unlock_irq(&rme9652->lock); return 0; } #define RME9652_ADAT_SYNC(xname, xindex, xidx) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \ .info = snd_rme9652_info_adat_sync, \ .get = snd_rme9652_get_adat_sync, .private_value = xidx } static int snd_rme9652_info_adat_sync(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static const char * const texts[4] = { "No Lock", "Lock", "No Lock Sync", "Lock Sync" }; return snd_ctl_enum_info(uinfo, 1, 4, texts); } static int snd_rme9652_get_adat_sync(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_rme9652 *rme9652 = snd_kcontrol_chip(kcontrol); unsigned int mask1, mask2, val; switch (kcontrol->private_value) { case 0: mask1 = RME9652_lock_0; mask2 = RME9652_sync_0; break; case 1: mask1 = RME9652_lock_1; mask2 = RME9652_sync_1; break; case 2: mask1 = RME9652_lock_2; mask2 = RME9652_sync_2; break; default: return -EINVAL; } val = rme9652_read(rme9652, RME9652_status_register); ucontrol->value.enumerated.item[0] = (val & mask1) ? 1 : 0; ucontrol->value.enumerated.item[0] |= (val & mask2) ? 2 : 0; return 0; } #define RME9652_TC_VALID(xname, xindex) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \ .info = snd_rme9652_info_tc_valid, \ .get = snd_rme9652_get_tc_valid } #define snd_rme9652_info_tc_valid snd_ctl_boolean_mono_info static int snd_rme9652_get_tc_valid(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_rme9652 *rme9652 = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = (rme9652_read(rme9652, RME9652_status_register) & RME9652_tc_valid) ? 1 : 0; return 0; } #ifdef ALSA_HAS_STANDARD_WAY_OF_RETURNING_TIMECODE /* FIXME: this routine needs a port to the new control API --jk */ static int snd_rme9652_get_tc_value(void *private_data, snd_kswitch_t *kswitch, snd_switch_t *uswitch) { struct snd_rme9652 *s = (struct snd_rme9652 *) private_data; u32 value; int i; uswitch->type = SNDRV_SW_TYPE_DWORD; if ((rme9652_read(s, RME9652_status_register) & RME9652_tc_valid) == 0) { uswitch->value.data32[0] = 0; return 0; } /* timecode request */ rme9652_write(s, RME9652_time_code, 0); /* XXX bug alert: loop-based timing !!!! */ for (i = 0; i < 50; i++) { if (!(rme9652_read(s, i * 4) & RME9652_tc_busy)) break; } if (!(rme9652_read(s, i * 4) & RME9652_tc_busy)) { return -EIO; } value = 0; for (i = 0; i < 32; i++) { value >>= 1; if (rme9652_read(s, i * 4) & RME9652_tc_out) value |= 0x80000000; } if (value > 2 * 60 * 48000) { value -= 2 * 60 * 48000; } else { value = 0; } uswitch->value.data32[0] = value; return 0; } #endif /* ALSA_HAS_STANDARD_WAY_OF_RETURNING_TIMECODE */ static struct snd_kcontrol_new snd_rme9652_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT), .info = snd_rme9652_control_spdif_info, .get = snd_rme9652_control_spdif_get, .put = snd_rme9652_control_spdif_put, }, { .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PCM_STREAM), .info = snd_rme9652_control_spdif_stream_info, .get = snd_rme9652_control_spdif_stream_get, .put = snd_rme9652_control_spdif_stream_put, }, { .access = SNDRV_CTL_ELEM_ACCESS_READ, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK), .info = snd_rme9652_control_spdif_mask_info, .get = snd_rme9652_control_spdif_mask_get, .private_value = IEC958_AES0_NONAUDIO | IEC958_AES0_PROFESSIONAL | IEC958_AES0_CON_EMPHASIS, }, { .access = SNDRV_CTL_ELEM_ACCESS_READ, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PRO_MASK), .info = snd_rme9652_control_spdif_mask_info, .get = snd_rme9652_control_spdif_mask_get, .private_value = IEC958_AES0_NONAUDIO | IEC958_AES0_PROFESSIONAL | IEC958_AES0_PRO_EMPHASIS, }, RME9652_SPDIF_IN("IEC958 Input Connector", 0), RME9652_SPDIF_OUT("IEC958 Output also on ADAT1", 0), RME9652_SYNC_MODE("Sync Mode", 0), RME9652_SYNC_PREF("Preferred Sync Source", 0), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Channels Thru", .index = 0, .info = snd_rme9652_info_thru, .get = snd_rme9652_get_thru, .put = snd_rme9652_put_thru, }, RME9652_SPDIF_RATE("IEC958 Sample Rate", 0), RME9652_ADAT_SYNC("ADAT1 Sync Check", 0, 0), RME9652_ADAT_SYNC("ADAT2 Sync Check", 0, 1), RME9652_TC_VALID("Timecode Valid", 0), RME9652_PASSTHRU("Passthru", 0) }; static struct snd_kcontrol_new snd_rme9652_adat3_check = RME9652_ADAT_SYNC("ADAT3 Sync Check", 0, 2); static struct snd_kcontrol_new snd_rme9652_adat1_input = RME9652_ADAT1_IN("ADAT1 Input Source", 0); static int snd_rme9652_create_controls(struct snd_card *card, struct snd_rme9652 *rme9652) { unsigned int idx; int err; struct snd_kcontrol *kctl; for (idx = 0; idx < ARRAY_SIZE(snd_rme9652_controls); idx++) { if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_rme9652_controls[idx], rme9652))) < 0) return err; if (idx == 1) /* IEC958 (S/PDIF) Stream */ rme9652->spdif_ctl = kctl; } if (rme9652->ss_channels == RME9652_NCHANNELS) if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_rme9652_adat3_check, rme9652))) < 0) return err; if (rme9652->hw_rev >= 15) if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_rme9652_adat1_input, rme9652))) < 0) return err; return 0; } /*------------------------------------------------------------ /proc interface ------------------------------------------------------------*/ static void snd_rme9652_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_rme9652 *rme9652 = (struct snd_rme9652 *) entry->private_data; u32 thru_bits = rme9652->thru_bits; int show_auto_sync_source = 0; int i; unsigned int status; int x; status = rme9652_read(rme9652, RME9652_status_register); snd_iprintf(buffer, "%s (Card #%d)\n", rme9652->card_name, rme9652->card->number + 1); snd_iprintf(buffer, "Buffers: capture %p playback %p\n", rme9652->capture_buffer, rme9652->playback_buffer); snd_iprintf(buffer, "IRQ: %d Registers bus: 0x%lx VM: 0x%lx\n", rme9652->irq, rme9652->port, (unsigned long)rme9652->iobase); snd_iprintf(buffer, "Control register: %x\n", rme9652->control_register); snd_iprintf(buffer, "\n"); x = 1 << (6 + rme9652_decode_latency(rme9652->control_register & RME9652_latency)); snd_iprintf(buffer, "Latency: %d samples (2 periods of %lu bytes)\n", x, (unsigned long) rme9652->period_bytes); snd_iprintf(buffer, "Hardware pointer (frames): %ld\n", rme9652_hw_pointer(rme9652)); snd_iprintf(buffer, "Passthru: %s\n", rme9652->passthru ? "yes" : "no"); if ((rme9652->control_register & (RME9652_Master | RME9652_wsel)) == 0) { snd_iprintf(buffer, "Clock mode: autosync\n"); show_auto_sync_source = 1; } else if (rme9652->control_register & RME9652_wsel) { if (status & RME9652_wsel_rd) { snd_iprintf(buffer, "Clock mode: word clock\n"); } else { snd_iprintf(buffer, "Clock mode: word clock (no signal)\n"); } } else { snd_iprintf(buffer, "Clock mode: master\n"); } if (show_auto_sync_source) { switch (rme9652->control_register & RME9652_SyncPref_Mask) { case RME9652_SyncPref_ADAT1: snd_iprintf(buffer, "Pref. sync source: ADAT1\n"); break; case RME9652_SyncPref_ADAT2: snd_iprintf(buffer, "Pref. sync source: ADAT2\n"); break; case RME9652_SyncPref_ADAT3: snd_iprintf(buffer, "Pref. sync source: ADAT3\n"); break; case RME9652_SyncPref_SPDIF: snd_iprintf(buffer, "Pref. sync source: IEC958\n"); break; default: snd_iprintf(buffer, "Pref. sync source: ???\n"); } } if (rme9652->hw_rev >= 15) snd_iprintf(buffer, "\nADAT1 Input source: %s\n", (rme9652->control_register & RME9652_ADAT1_INTERNAL) ? "Internal" : "ADAT1 optical"); snd_iprintf(buffer, "\n"); switch (rme9652_decode_spdif_in(rme9652->control_register & RME9652_inp)) { case RME9652_SPDIFIN_OPTICAL: snd_iprintf(buffer, "IEC958 input: ADAT1\n"); break; case RME9652_SPDIFIN_COAXIAL: snd_iprintf(buffer, "IEC958 input: Coaxial\n"); break; case RME9652_SPDIFIN_INTERN: snd_iprintf(buffer, "IEC958 input: Internal\n"); break; default: snd_iprintf(buffer, "IEC958 input: ???\n"); break; } if (rme9652->control_register & RME9652_opt_out) { snd_iprintf(buffer, "IEC958 output: Coaxial & ADAT1\n"); } else { snd_iprintf(buffer, "IEC958 output: Coaxial only\n"); } if (rme9652->control_register & RME9652_PRO) { snd_iprintf(buffer, "IEC958 quality: Professional\n"); } else { snd_iprintf(buffer, "IEC958 quality: Consumer\n"); } if (rme9652->control_register & RME9652_EMP) { snd_iprintf(buffer, "IEC958 emphasis: on\n"); } else { snd_iprintf(buffer, "IEC958 emphasis: off\n"); } if (rme9652->control_register & RME9652_Dolby) { snd_iprintf(buffer, "IEC958 Dolby: on\n"); } else { snd_iprintf(buffer, "IEC958 Dolby: off\n"); } i = rme9652_spdif_sample_rate(rme9652); if (i < 0) { snd_iprintf(buffer, "IEC958 sample rate: error flag set\n"); } else if (i == 0) { snd_iprintf(buffer, "IEC958 sample rate: undetermined\n"); } else { snd_iprintf(buffer, "IEC958 sample rate: %d\n", i); } snd_iprintf(buffer, "\n"); snd_iprintf(buffer, "ADAT Sample rate: %dHz\n", rme9652_adat_sample_rate(rme9652)); /* Sync Check */ x = status & RME9652_sync_0; if (status & RME9652_lock_0) { snd_iprintf(buffer, "ADAT1: %s\n", x ? "Sync" : "Lock"); } else { snd_iprintf(buffer, "ADAT1: No Lock\n"); } x = status & RME9652_sync_1; if (status & RME9652_lock_1) { snd_iprintf(buffer, "ADAT2: %s\n", x ? "Sync" : "Lock"); } else { snd_iprintf(buffer, "ADAT2: No Lock\n"); } x = status & RME9652_sync_2; if (status & RME9652_lock_2) { snd_iprintf(buffer, "ADAT3: %s\n", x ? "Sync" : "Lock"); } else { snd_iprintf(buffer, "ADAT3: No Lock\n"); } snd_iprintf(buffer, "\n"); snd_iprintf(buffer, "Timecode signal: %s\n", (status & RME9652_tc_valid) ? "yes" : "no"); /* thru modes */ snd_iprintf(buffer, "Punch Status:\n\n"); for (i = 0; i < rme9652->ss_channels; i++) { if (thru_bits & (1 << i)) { snd_iprintf(buffer, "%2d: on ", i + 1); } else { snd_iprintf(buffer, "%2d: off ", i + 1); } if (((i + 1) % 8) == 0) { snd_iprintf(buffer, "\n"); } } snd_iprintf(buffer, "\n"); } static void snd_rme9652_proc_init(struct snd_rme9652 *rme9652) { struct snd_info_entry *entry; if (! snd_card_proc_new(rme9652->card, "rme9652", &entry)) snd_info_set_text_ops(entry, rme9652, snd_rme9652_proc_read); } static void snd_rme9652_free_buffers(struct snd_rme9652 *rme9652) { snd_hammerfall_free_buffer(&rme9652->capture_dma_buf, rme9652->pci); snd_hammerfall_free_buffer(&rme9652->playback_dma_buf, rme9652->pci); } static int snd_rme9652_free(struct snd_rme9652 *rme9652) { if (rme9652->irq >= 0) rme9652_stop(rme9652); snd_rme9652_free_buffers(rme9652); if (rme9652->irq >= 0) free_irq(rme9652->irq, (void *)rme9652); iounmap(rme9652->iobase); if (rme9652->port) pci_release_regions(rme9652->pci); pci_disable_device(rme9652->pci); return 0; } static int snd_rme9652_initialize_memory(struct snd_rme9652 *rme9652) { unsigned long pb_bus, cb_bus; if (snd_hammerfall_get_buffer(rme9652->pci, &rme9652->capture_dma_buf, RME9652_DMA_AREA_BYTES) < 0 || snd_hammerfall_get_buffer(rme9652->pci, &rme9652->playback_dma_buf, RME9652_DMA_AREA_BYTES) < 0) { if (rme9652->capture_dma_buf.area) snd_dma_free_pages(&rme9652->capture_dma_buf); dev_err(rme9652->card->dev, "%s: no buffers available\n", rme9652->card_name); return -ENOMEM; } /* Align to bus-space 64K boundary */ cb_bus = ALIGN(rme9652->capture_dma_buf.addr, 0x10000ul); pb_bus = ALIGN(rme9652->playback_dma_buf.addr, 0x10000ul); /* Tell the card where it is */ rme9652_write(rme9652, RME9652_rec_buffer, cb_bus); rme9652_write(rme9652, RME9652_play_buffer, pb_bus); rme9652->capture_buffer = rme9652->capture_dma_buf.area + (cb_bus - rme9652->capture_dma_buf.addr); rme9652->playback_buffer = rme9652->playback_dma_buf.area + (pb_bus - rme9652->playback_dma_buf.addr); return 0; } static void snd_rme9652_set_defaults(struct snd_rme9652 *rme9652) { unsigned int k; /* ASSUMPTION: rme9652->lock is either held, or there is no need to hold it (e.g. during module initialization). */ /* set defaults: SPDIF Input via Coax autosync clock mode maximum latency (7 = 8192 samples, 64Kbyte buffer, which implies 2 4096 sample, 32Kbyte periods). if rev 1.5, initialize the S/PDIF receiver. */ rme9652->control_register = RME9652_inp_0 | rme9652_encode_latency(7); rme9652_write(rme9652, RME9652_control_register, rme9652->control_register); rme9652_reset_hw_pointer(rme9652); rme9652_compute_period_size(rme9652); /* default: thru off for all channels */ for (k = 0; k < RME9652_NCHANNELS; ++k) rme9652_write(rme9652, RME9652_thru_base + k * 4, 0); rme9652->thru_bits = 0; rme9652->passthru = 0; /* set a default rate so that the channel map is set up */ rme9652_set_rate(rme9652, 48000); } static irqreturn_t snd_rme9652_interrupt(int irq, void *dev_id) { struct snd_rme9652 *rme9652 = (struct snd_rme9652 *) dev_id; if (!(rme9652_read(rme9652, RME9652_status_register) & RME9652_IRQ)) { return IRQ_NONE; } rme9652_write(rme9652, RME9652_irq_clear, 0); if (rme9652->capture_substream) { snd_pcm_period_elapsed(rme9652->pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream); } if (rme9652->playback_substream) { snd_pcm_period_elapsed(rme9652->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream); } return IRQ_HANDLED; } static snd_pcm_uframes_t snd_rme9652_hw_pointer(struct snd_pcm_substream *substream) { struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream); return rme9652_hw_pointer(rme9652); } static char *rme9652_channel_buffer_location(struct snd_rme9652 *rme9652, int stream, int channel) { int mapped_channel; if (snd_BUG_ON(channel < 0 || channel >= RME9652_NCHANNELS)) return NULL; if ((mapped_channel = rme9652->channel_map[channel]) < 0) { return NULL; } if (stream == SNDRV_PCM_STREAM_CAPTURE) { return rme9652->capture_buffer + (mapped_channel * RME9652_CHANNEL_BUFFER_BYTES); } else { return rme9652->playback_buffer + (mapped_channel * RME9652_CHANNEL_BUFFER_BYTES); } } static int snd_rme9652_playback_copy(struct snd_pcm_substream *substream, int channel, snd_pcm_uframes_t pos, void __user *src, snd_pcm_uframes_t count) { struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream); char *channel_buf; if (snd_BUG_ON(pos + count > RME9652_CHANNEL_BUFFER_BYTES / 4)) return -EINVAL; channel_buf = rme9652_channel_buffer_location (rme9652, substream->pstr->stream, channel); if (snd_BUG_ON(!channel_buf)) return -EIO; if (copy_from_user(channel_buf + pos * 4, src, count * 4)) return -EFAULT; return count; } static int snd_rme9652_capture_copy(struct snd_pcm_substream *substream, int channel, snd_pcm_uframes_t pos, void __user *dst, snd_pcm_uframes_t count) { struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream); char *channel_buf; if (snd_BUG_ON(pos + count > RME9652_CHANNEL_BUFFER_BYTES / 4)) return -EINVAL; channel_buf = rme9652_channel_buffer_location (rme9652, substream->pstr->stream, channel); if (snd_BUG_ON(!channel_buf)) return -EIO; if (copy_to_user(dst, channel_buf + pos * 4, count * 4)) return -EFAULT; return count; } static int snd_rme9652_hw_silence(struct snd_pcm_substream *substream, int channel, snd_pcm_uframes_t pos, snd_pcm_uframes_t count) { struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream); char *channel_buf; channel_buf = rme9652_channel_buffer_location (rme9652, substream->pstr->stream, channel); if (snd_BUG_ON(!channel_buf)) return -EIO; memset(channel_buf + pos * 4, 0, count * 4); return count; } static int snd_rme9652_reset(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream); struct snd_pcm_substream *other; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) other = rme9652->capture_substream; else other = rme9652->playback_substream; if (rme9652->running) runtime->status->hw_ptr = rme9652_hw_pointer(rme9652); else runtime->status->hw_ptr = 0; if (other) { struct snd_pcm_substream *s; struct snd_pcm_runtime *oruntime = other->runtime; snd_pcm_group_for_each_entry(s, substream) { if (s == other) { oruntime->status->hw_ptr = runtime->status->hw_ptr; break; } } } return 0; } static int snd_rme9652_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream); int err; pid_t this_pid; pid_t other_pid; spin_lock_irq(&rme9652->lock); if (substream->pstr->stream == SNDRV_PCM_STREAM_PLAYBACK) { rme9652->control_register &= ~(RME9652_PRO | RME9652_Dolby | RME9652_EMP); rme9652_write(rme9652, RME9652_control_register, rme9652->control_register |= rme9652->creg_spdif_stream); this_pid = rme9652->playback_pid; other_pid = rme9652->capture_pid; } else { this_pid = rme9652->capture_pid; other_pid = rme9652->playback_pid; } if ((other_pid > 0) && (this_pid != other_pid)) { /* The other stream is open, and not by the same task as this one. Make sure that the parameters that matter are the same. */ if ((int)params_rate(params) != rme9652_adat_sample_rate(rme9652)) { spin_unlock_irq(&rme9652->lock); _snd_pcm_hw_param_setempty(params, SNDRV_PCM_HW_PARAM_RATE); return -EBUSY; } if (params_period_size(params) != rme9652->period_bytes / 4) { spin_unlock_irq(&rme9652->lock); _snd_pcm_hw_param_setempty(params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE); return -EBUSY; } /* We're fine. */ spin_unlock_irq(&rme9652->lock); return 0; } else { spin_unlock_irq(&rme9652->lock); } /* how to make sure that the rate matches an externally-set one ? */ if ((err = rme9652_set_rate(rme9652, params_rate(params))) < 0) { _snd_pcm_hw_param_setempty(params, SNDRV_PCM_HW_PARAM_RATE); return err; } if ((err = rme9652_set_interrupt_interval(rme9652, params_period_size(params))) < 0) { _snd_pcm_hw_param_setempty(params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE); return err; } return 0; } static int snd_rme9652_channel_info(struct snd_pcm_substream *substream, struct snd_pcm_channel_info *info) { struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream); int chn; if (snd_BUG_ON(info->channel >= RME9652_NCHANNELS)) return -EINVAL; if ((chn = rme9652->channel_map[info->channel]) < 0) { return -EINVAL; } info->offset = chn * RME9652_CHANNEL_BUFFER_BYTES; info->first = 0; info->step = 32; return 0; } static int snd_rme9652_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg) { switch (cmd) { case SNDRV_PCM_IOCTL1_RESET: { return snd_rme9652_reset(substream); } case SNDRV_PCM_IOCTL1_CHANNEL_INFO: { struct snd_pcm_channel_info *info = arg; return snd_rme9652_channel_info(substream, info); } default: break; } return snd_pcm_lib_ioctl(substream, cmd, arg); } static void rme9652_silence_playback(struct snd_rme9652 *rme9652) { memset(rme9652->playback_buffer, 0, RME9652_DMA_AREA_BYTES); } static int snd_rme9652_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream); struct snd_pcm_substream *other; int running; spin_lock(&rme9652->lock); running = rme9652->running; switch (cmd) { case SNDRV_PCM_TRIGGER_START: running |= 1 << substream->stream; break; case SNDRV_PCM_TRIGGER_STOP: running &= ~(1 << substream->stream); break; default: snd_BUG(); spin_unlock(&rme9652->lock); return -EINVAL; } if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) other = rme9652->capture_substream; else other = rme9652->playback_substream; if (other) { struct snd_pcm_substream *s; snd_pcm_group_for_each_entry(s, substream) { if (s == other) { snd_pcm_trigger_done(s, substream); if (cmd == SNDRV_PCM_TRIGGER_START) running |= 1 << s->stream; else running &= ~(1 << s->stream); goto _ok; } } if (cmd == SNDRV_PCM_TRIGGER_START) { if (!(running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) && substream->stream == SNDRV_PCM_STREAM_CAPTURE) rme9652_silence_playback(rme9652); } else { if (running && substream->stream == SNDRV_PCM_STREAM_PLAYBACK) rme9652_silence_playback(rme9652); } } else { if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) rme9652_silence_playback(rme9652); } _ok: snd_pcm_trigger_done(substream, substream); if (!rme9652->running && running) rme9652_start(rme9652); else if (rme9652->running && !running) rme9652_stop(rme9652); rme9652->running = running; spin_unlock(&rme9652->lock); return 0; } static int snd_rme9652_prepare(struct snd_pcm_substream *substream) { struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream); unsigned long flags; int result = 0; spin_lock_irqsave(&rme9652->lock, flags); if (!rme9652->running) rme9652_reset_hw_pointer(rme9652); spin_unlock_irqrestore(&rme9652->lock, flags); return result; } static struct snd_pcm_hardware snd_rme9652_playback_subinfo = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_NONINTERLEAVED | SNDRV_PCM_INFO_SYNC_START | SNDRV_PCM_INFO_DOUBLE), .formats = SNDRV_PCM_FMTBIT_S32_LE, .rates = (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000), .rate_min = 44100, .rate_max = 96000, .channels_min = 10, .channels_max = 26, .buffer_bytes_max = RME9652_CHANNEL_BUFFER_BYTES * 26, .period_bytes_min = (64 * 4) * 10, .period_bytes_max = (8192 * 4) * 26, .periods_min = 2, .periods_max = 2, .fifo_size = 0, }; static struct snd_pcm_hardware snd_rme9652_capture_subinfo = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_NONINTERLEAVED | SNDRV_PCM_INFO_SYNC_START), .formats = SNDRV_PCM_FMTBIT_S32_LE, .rates = (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000), .rate_min = 44100, .rate_max = 96000, .channels_min = 10, .channels_max = 26, .buffer_bytes_max = RME9652_CHANNEL_BUFFER_BYTES *26, .period_bytes_min = (64 * 4) * 10, .period_bytes_max = (8192 * 4) * 26, .periods_min = 2, .periods_max = 2, .fifo_size = 0, }; static unsigned int period_sizes[] = { 64, 128, 256, 512, 1024, 2048, 4096, 8192 }; static struct snd_pcm_hw_constraint_list hw_constraints_period_sizes = { .count = ARRAY_SIZE(period_sizes), .list = period_sizes, .mask = 0 }; static int snd_rme9652_hw_rule_channels(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_rme9652 *rme9652 = rule->private; struct snd_interval *c = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); unsigned int list[2] = { rme9652->ds_channels, rme9652->ss_channels }; return snd_interval_list(c, 2, list, 0); } static int snd_rme9652_hw_rule_channels_rate(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_rme9652 *rme9652 = rule->private; struct snd_interval *c = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); struct snd_interval *r = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); if (r->min > 48000) { struct snd_interval t = { .min = rme9652->ds_channels, .max = rme9652->ds_channels, .integer = 1, }; return snd_interval_refine(c, &t); } else if (r->max < 88200) { struct snd_interval t = { .min = rme9652->ss_channels, .max = rme9652->ss_channels, .integer = 1, }; return snd_interval_refine(c, &t); } return 0; } static int snd_rme9652_hw_rule_rate_channels(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_rme9652 *rme9652 = rule->private; struct snd_interval *c = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); struct snd_interval *r = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); if (c->min >= rme9652->ss_channels) { struct snd_interval t = { .min = 44100, .max = 48000, .integer = 1, }; return snd_interval_refine(r, &t); } else if (c->max <= rme9652->ds_channels) { struct snd_interval t = { .min = 88200, .max = 96000, .integer = 1, }; return snd_interval_refine(r, &t); } return 0; } static int snd_rme9652_playback_open(struct snd_pcm_substream *substream) { struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; spin_lock_irq(&rme9652->lock); snd_pcm_set_sync(substream); runtime->hw = snd_rme9652_playback_subinfo; runtime->dma_area = rme9652->playback_buffer; runtime->dma_bytes = RME9652_DMA_AREA_BYTES; if (rme9652->capture_substream == NULL) { rme9652_stop(rme9652); rme9652_set_thru(rme9652, -1, 0); } rme9652->playback_pid = current->pid; rme9652->playback_substream = substream; spin_unlock_irq(&rme9652->lock); snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24); snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, &hw_constraints_period_sizes); snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, snd_rme9652_hw_rule_channels, rme9652, SNDRV_PCM_HW_PARAM_CHANNELS, -1); snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, snd_rme9652_hw_rule_channels_rate, rme9652, SNDRV_PCM_HW_PARAM_RATE, -1); snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, snd_rme9652_hw_rule_rate_channels, rme9652, SNDRV_PCM_HW_PARAM_CHANNELS, -1); rme9652->creg_spdif_stream = rme9652->creg_spdif; rme9652->spdif_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(rme9652->card, SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO, &rme9652->spdif_ctl->id); return 0; } static int snd_rme9652_playback_release(struct snd_pcm_substream *substream) { struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream); spin_lock_irq(&rme9652->lock); rme9652->playback_pid = -1; rme9652->playback_substream = NULL; spin_unlock_irq(&rme9652->lock); rme9652->spdif_ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(rme9652->card, SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO, &rme9652->spdif_ctl->id); return 0; } static int snd_rme9652_capture_open(struct snd_pcm_substream *substream) { struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; spin_lock_irq(&rme9652->lock); snd_pcm_set_sync(substream); runtime->hw = snd_rme9652_capture_subinfo; runtime->dma_area = rme9652->capture_buffer; runtime->dma_bytes = RME9652_DMA_AREA_BYTES; if (rme9652->playback_substream == NULL) { rme9652_stop(rme9652); rme9652_set_thru(rme9652, -1, 0); } rme9652->capture_pid = current->pid; rme9652->capture_substream = substream; spin_unlock_irq(&rme9652->lock); snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24); snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, &hw_constraints_period_sizes); snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, snd_rme9652_hw_rule_channels, rme9652, SNDRV_PCM_HW_PARAM_CHANNELS, -1); snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, snd_rme9652_hw_rule_channels_rate, rme9652, SNDRV_PCM_HW_PARAM_RATE, -1); snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, snd_rme9652_hw_rule_rate_channels, rme9652, SNDRV_PCM_HW_PARAM_CHANNELS, -1); return 0; } static int snd_rme9652_capture_release(struct snd_pcm_substream *substream) { struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream); spin_lock_irq(&rme9652->lock); rme9652->capture_pid = -1; rme9652->capture_substream = NULL; spin_unlock_irq(&rme9652->lock); return 0; } static struct snd_pcm_ops snd_rme9652_playback_ops = { .open = snd_rme9652_playback_open, .close = snd_rme9652_playback_release, .ioctl = snd_rme9652_ioctl, .hw_params = snd_rme9652_hw_params, .prepare = snd_rme9652_prepare, .trigger = snd_rme9652_trigger, .pointer = snd_rme9652_hw_pointer, .copy = snd_rme9652_playback_copy, .silence = snd_rme9652_hw_silence, }; static struct snd_pcm_ops snd_rme9652_capture_ops = { .open = snd_rme9652_capture_open, .close = snd_rme9652_capture_release, .ioctl = snd_rme9652_ioctl, .hw_params = snd_rme9652_hw_params, .prepare = snd_rme9652_prepare, .trigger = snd_rme9652_trigger, .pointer = snd_rme9652_hw_pointer, .copy = snd_rme9652_capture_copy, }; static int snd_rme9652_create_pcm(struct snd_card *card, struct snd_rme9652 *rme9652) { struct snd_pcm *pcm; int err; if ((err = snd_pcm_new(card, rme9652->card_name, 0, 1, 1, &pcm)) < 0) { return err; } rme9652->pcm = pcm; pcm->private_data = rme9652; strcpy(pcm->name, rme9652->card_name); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_rme9652_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_rme9652_capture_ops); pcm->info_flags = SNDRV_PCM_INFO_JOINT_DUPLEX; return 0; } static int snd_rme9652_create(struct snd_card *card, struct snd_rme9652 *rme9652, int precise_ptr) { struct pci_dev *pci = rme9652->pci; int err; int status; unsigned short rev; rme9652->irq = -1; rme9652->card = card; pci_read_config_word(rme9652->pci, PCI_CLASS_REVISION, &rev); switch (rev & 0xff) { case 3: case 4: case 8: case 9: break; default: /* who knows? */ return -ENODEV; } if ((err = pci_enable_device(pci)) < 0) return err; spin_lock_init(&rme9652->lock); if ((err = pci_request_regions(pci, "rme9652")) < 0) return err; rme9652->port = pci_resource_start(pci, 0); rme9652->iobase = ioremap_nocache(rme9652->port, RME9652_IO_EXTENT); if (rme9652->iobase == NULL) { dev_err(card->dev, "unable to remap region 0x%lx-0x%lx\n", rme9652->port, rme9652->port + RME9652_IO_EXTENT - 1); return -EBUSY; } if (request_irq(pci->irq, snd_rme9652_interrupt, IRQF_SHARED, KBUILD_MODNAME, rme9652)) { dev_err(card->dev, "unable to request IRQ %d\n", pci->irq); return -EBUSY; } rme9652->irq = pci->irq; rme9652->precise_ptr = precise_ptr; /* Determine the h/w rev level of the card. This seems like a particularly kludgy way to encode it, but its what RME chose to do, so we follow them ... */ status = rme9652_read(rme9652, RME9652_status_register); if (rme9652_decode_spdif_rate(status&RME9652_F) == 1) { rme9652->hw_rev = 15; } else { rme9652->hw_rev = 11; } /* Differentiate between the standard Hammerfall, and the "Light", which does not have the expansion board. This method comes from information received from Mathhias Clausen at RME. Display the EEPROM and h/w revID where relevant. */ switch (rev) { case 8: /* original eprom */ strcpy(card->driver, "RME9636"); if (rme9652->hw_rev == 15) { rme9652->card_name = "RME Digi9636 (Rev 1.5)"; } else { rme9652->card_name = "RME Digi9636"; } rme9652->ss_channels = RME9636_NCHANNELS; break; case 9: /* W36_G EPROM */ strcpy(card->driver, "RME9636"); rme9652->card_name = "RME Digi9636 (Rev G)"; rme9652->ss_channels = RME9636_NCHANNELS; break; case 4: /* W52_G EPROM */ strcpy(card->driver, "RME9652"); rme9652->card_name = "RME Digi9652 (Rev G)"; rme9652->ss_channels = RME9652_NCHANNELS; break; case 3: /* original eprom */ strcpy(card->driver, "RME9652"); if (rme9652->hw_rev == 15) { rme9652->card_name = "RME Digi9652 (Rev 1.5)"; } else { rme9652->card_name = "RME Digi9652"; } rme9652->ss_channels = RME9652_NCHANNELS; break; } rme9652->ds_channels = (rme9652->ss_channels - 2) / 2 + 2; pci_set_master(rme9652->pci); if ((err = snd_rme9652_initialize_memory(rme9652)) < 0) { return err; } if ((err = snd_rme9652_create_pcm(card, rme9652)) < 0) { return err; } if ((err = snd_rme9652_create_controls(card, rme9652)) < 0) { return err; } snd_rme9652_proc_init(rme9652); rme9652->last_spdif_sample_rate = -1; rme9652->last_adat_sample_rate = -1; rme9652->playback_pid = -1; rme9652->capture_pid = -1; rme9652->capture_substream = NULL; rme9652->playback_substream = NULL; snd_rme9652_set_defaults(rme9652); if (rme9652->hw_rev == 15) { rme9652_initialize_spdif_receiver (rme9652); } return 0; } static void snd_rme9652_card_free(struct snd_card *card) { struct snd_rme9652 *rme9652 = (struct snd_rme9652 *) card->private_data; if (rme9652) snd_rme9652_free(rme9652); } static int snd_rme9652_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_rme9652 *rme9652; struct snd_card *card; int err; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE, sizeof(struct snd_rme9652), &card); if (err < 0) return err; rme9652 = (struct snd_rme9652 *) card->private_data; card->private_free = snd_rme9652_card_free; rme9652->dev = dev; rme9652->pci = pci; if ((err = snd_rme9652_create(card, rme9652, precise_ptr[dev])) < 0) { snd_card_free(card); return err; } strcpy(card->shortname, rme9652->card_name); sprintf(card->longname, "%s at 0x%lx, irq %d", card->shortname, rme9652->port, rme9652->irq); if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); dev++; return 0; } static void snd_rme9652_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); } static struct pci_driver rme9652_driver = { .name = KBUILD_MODNAME, .id_table = snd_rme9652_ids, .probe = snd_rme9652_probe, .remove = snd_rme9652_remove, }; module_pci_driver(rme9652_driver);
gpl-2.0
VM12/kernel_moto_shamu
arch/arm64/mm/mmap.c
1066
3681
/* * Based on arch/arm/mm/mmap.c * * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/elf.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/export.h> #include <linux/shm.h> #include <linux/sched.h> #include <linux/io.h> #include <linux/personality.h> #include <linux/random.h> #include <asm/cputype.h> /* * Leave enough space between the mmap area and the stack to honour ulimit in * the face of randomisation. */ #define MIN_GAP (SZ_128M + ((STACK_RND_MASK << PAGE_SHIFT) + 1)) #define MAX_GAP (STACK_TOP/6*5) static int mmap_is_legacy(void) { if (current->personality & ADDR_COMPAT_LAYOUT) return 1; if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) return 1; return sysctl_legacy_va_layout; } /* * Since get_random_int() returns the same value within a 1 jiffy window, we * will almost always get the same randomisation for the stack and mmap * region. This will mean the relative distance between stack and mmap will be * the same. * * To avoid this we can shift the randomness by 1 bit. */ static unsigned long mmap_rnd(void) { unsigned long rnd = 0; if (current->flags & PF_RANDOMIZE) rnd = (long)get_random_int() & (STACK_RND_MASK >> 1); return rnd << (PAGE_SHIFT + 1); } static unsigned long mmap_base(void) { unsigned long gap = rlimit(RLIMIT_STACK); if (gap < MIN_GAP) gap = MIN_GAP; else if (gap > MAX_GAP) gap = MAX_GAP; return PAGE_ALIGN(STACK_TOP - gap - mmap_rnd()); } /* * This function, called very early during the creation of a new process VM * image, sets up which VM layout function to use: */ void arch_pick_mmap_layout(struct mm_struct *mm) { /* * Fall back to the standard layout if the personality bit is set, or * if the expected stack growth is unlimited: */ if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; } } EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); /* * You really shouldn't be using read() or write() on /dev/mem. This might go * away in the future. */ int valid_phys_addr_range(unsigned long addr, size_t size) { if (addr < PHYS_OFFSET) return 0; if (addr + size > __pa(high_memory - 1) + 1) return 0; return 1; } /* * Do not allow /dev/mem mappings beyond the supported physical range. */ int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) { return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK); } #ifdef CONFIG_STRICT_DEVMEM #include <linux/ioport.h> /* * devmem_is_allowed() checks to see if /dev/mem access to a certain address * is valid. The argument is a physical page number. We mimic x86 here by * disallowing access to system RAM as well as device-exclusive MMIO regions. * This effectively disable read()/write() on /dev/mem. */ int devmem_is_allowed(unsigned long pfn) { if (iomem_is_exclusive(pfn << PAGE_SHIFT)) return 0; if (!page_is_ram(pfn)) return 1; return 0; } #endif
gpl-2.0
TeamWin/android_kernel_samsung_goyave
drivers/staging/comedi/drivers/dmm32at.c
2090
22299
/* comedi/drivers/dmm32at.c Diamond Systems mm32at code for a Comedi driver COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: dmm32at Description: Diamond Systems mm32at driver. Devices: Author: Perry J. Piplani <perry.j.piplani@nasa.gov> Updated: Fri Jun 4 09:13:24 CDT 2004 Status: experimental This driver is for the Diamond Systems MM-32-AT board http://www.diamondsystems.com/products/diamondmm32at It is being used on serveral projects inside NASA, without problems so far. For analog input commands, TRIG_EXT is not yet supported at all.. Configuration Options: comedi_config /dev/comedi0 dmm32at baseaddr,irq */ #include <linux/interrupt.h> #include "../comedidev.h" #include <linux/ioport.h> #include "comedi_fc.h" /* Board register addresses */ #define DMM32AT_MEMSIZE 0x10 #define DMM32AT_CONV 0x00 #define DMM32AT_AILSB 0x00 #define DMM32AT_AUXDOUT 0x01 #define DMM32AT_AIMSB 0x01 #define DMM32AT_AILOW 0x02 #define DMM32AT_AIHIGH 0x03 #define DMM32AT_DACLSB 0x04 #define DMM32AT_DACSTAT 0x04 #define DMM32AT_DACMSB 0x05 #define DMM32AT_FIFOCNTRL 0x07 #define DMM32AT_FIFOSTAT 0x07 #define DMM32AT_CNTRL 0x08 #define DMM32AT_AISTAT 0x08 #define DMM32AT_INTCLOCK 0x09 #define DMM32AT_CNTRDIO 0x0a #define DMM32AT_AICONF 0x0b #define DMM32AT_AIRBACK 0x0b #define DMM32AT_CLK1 0x0d #define DMM32AT_CLK2 0x0e #define DMM32AT_CLKCT 0x0f #define DMM32AT_DIOA 0x0c #define DMM32AT_DIOB 0x0d #define DMM32AT_DIOC 0x0e #define DMM32AT_DIOCONF 0x0f /* Board register values. */ /* DMM32AT_DACSTAT 0x04 */ #define DMM32AT_DACBUSY 0x80 /* DMM32AT_FIFOCNTRL 0x07 */ #define DMM32AT_FIFORESET 0x02 #define DMM32AT_SCANENABLE 0x04 /* DMM32AT_CNTRL 0x08 */ #define DMM32AT_RESET 0x20 #define DMM32AT_INTRESET 0x08 #define DMM32AT_CLKACC 0x00 #define DMM32AT_DIOACC 0x01 /* DMM32AT_AISTAT 0x08 */ #define DMM32AT_STATUS 0x80 /* DMM32AT_INTCLOCK 0x09 */ #define DMM32AT_ADINT 0x80 #define DMM32AT_CLKSEL 0x03 /* DMM32AT_CNTRDIO 0x0a */ #define DMM32AT_FREQ12 0x80 /* DMM32AT_AICONF 0x0b */ #define DMM32AT_RANGE_U10 0x0c #define DMM32AT_RANGE_U5 0x0d #define DMM32AT_RANGE_B10 0x08 #define DMM32AT_RANGE_B5 0x00 #define DMM32AT_SCINT_20 0x00 #define DMM32AT_SCINT_15 0x10 #define DMM32AT_SCINT_10 0x20 #define DMM32AT_SCINT_5 0x30 /* DMM32AT_CLKCT 0x0f */ #define DMM32AT_CLKCT1 0x56 /* mode3 counter 1 - write low byte only */ #define DMM32AT_CLKCT2 0xb6 /* mode3 counter 2 - write high and low byte */ /* DMM32AT_DIOCONF 0x0f */ #define DMM32AT_DIENABLE 0x80 #define DMM32AT_DIRA 0x10 #define DMM32AT_DIRB 0x02 #define DMM32AT_DIRCL 0x01 #define DMM32AT_DIRCH 0x08 /* board AI ranges in comedi structure */ static const struct comedi_lrange dmm32at_airanges = { 4, { UNI_RANGE(10), UNI_RANGE(5), BIP_RANGE(10), BIP_RANGE(5), } }; /* register values for above ranges */ static const unsigned char dmm32at_rangebits[] = { DMM32AT_RANGE_U10, DMM32AT_RANGE_U5, DMM32AT_RANGE_B10, DMM32AT_RANGE_B5, }; /* only one of these ranges is valid, as set by a jumper on the * board. The application should only use the range set by the jumper */ static const struct comedi_lrange dmm32at_aoranges = { 4, { UNI_RANGE(10), UNI_RANGE(5), BIP_RANGE(10), BIP_RANGE(5), } }; struct dmm32at_private { int data; int ai_inuse; unsigned int ai_scans_left; /* Used for AO readback */ unsigned int ao_readback[4]; unsigned char dio_config; }; static int dmm32at_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int n, i; unsigned int d; unsigned char status; unsigned short msb, lsb; unsigned char chan; int range; /* get the channel and range number */ chan = CR_CHAN(insn->chanspec) & (s->n_chan - 1); range = CR_RANGE(insn->chanspec); /* printk("channel=0x%02x, range=%d\n",chan,range); */ /* zero scan and fifo control and reset fifo */ outb(DMM32AT_FIFORESET, dev->iobase + DMM32AT_FIFOCNTRL); /* write the ai channel range regs */ outb(chan, dev->iobase + DMM32AT_AILOW); outb(chan, dev->iobase + DMM32AT_AIHIGH); /* set the range bits */ outb(dmm32at_rangebits[range], dev->iobase + DMM32AT_AICONF); /* wait for circuit to settle */ for (i = 0; i < 40000; i++) { status = inb(dev->iobase + DMM32AT_AIRBACK); if ((status & DMM32AT_STATUS) == 0) break; } if (i == 40000) { printk(KERN_WARNING "dmm32at: timeout\n"); return -ETIMEDOUT; } /* convert n samples */ for (n = 0; n < insn->n; n++) { /* trigger conversion */ outb(0xff, dev->iobase + DMM32AT_CONV); /* wait for conversion to end */ for (i = 0; i < 40000; i++) { status = inb(dev->iobase + DMM32AT_AISTAT); if ((status & DMM32AT_STATUS) == 0) break; } if (i == 40000) { printk(KERN_WARNING "dmm32at: timeout\n"); return -ETIMEDOUT; } /* read data */ lsb = inb(dev->iobase + DMM32AT_AILSB); msb = inb(dev->iobase + DMM32AT_AIMSB); /* invert sign bit to make range unsigned, this is an idiosyncrasy of the diamond board, it return conversions as a signed value, i.e. -32768 to 32767, flipping the bit and interpreting it as signed gives you a range of 0 to 65535 which is used by comedi */ d = ((msb ^ 0x0080) << 8) + lsb; data[n] = d; } /* return the number of samples read/written */ return n; } static int dmm32at_ns_to_timer(unsigned int *ns, int round) { /* trivial timer */ return *ns; } static int dmm32at_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; int start_chan, gain, i; /* Step 1 : check if triggers are trivially valid */ err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW); err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER /*| TRIG_EXT */); err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_TIMER /*| TRIG_EXT */); err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT); err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE); if (err) return 1; /* Step 2a : make sure trigger sources are unique */ err |= cfc_check_trigger_is_unique(cmd->scan_begin_src); err |= cfc_check_trigger_is_unique(cmd->convert_src); err |= cfc_check_trigger_is_unique(cmd->stop_src); /* Step 2b : and mutually compatible */ if (err) return 2; /* Step 3: check if arguments are trivially valid */ err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0); #define MAX_SCAN_SPEED 1000000 /* in nanoseconds */ #define MIN_SCAN_SPEED 1000000000 /* in nanoseconds */ if (cmd->scan_begin_src == TRIG_TIMER) { err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg, MAX_SCAN_SPEED); err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg, MIN_SCAN_SPEED); } else { /* external trigger */ /* should be level/edge, hi/lo specification here */ /* should specify multiple external triggers */ err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg, 9); } if (cmd->convert_src == TRIG_TIMER) { if (cmd->convert_arg >= 17500) cmd->convert_arg = 20000; else if (cmd->convert_arg >= 12500) cmd->convert_arg = 15000; else if (cmd->convert_arg >= 7500) cmd->convert_arg = 10000; else cmd->convert_arg = 5000; } else { /* external trigger */ /* see above */ err |= cfc_check_trigger_arg_max(&cmd->convert_arg, 9); } err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len); if (cmd->stop_src == TRIG_COUNT) { err |= cfc_check_trigger_arg_max(&cmd->stop_arg, 0xfffffff0); err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 1); } else { /* TRIG_NONE */ err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0); } if (err) return 3; /* step 4: fix up any arguments */ if (cmd->scan_begin_src == TRIG_TIMER) { tmp = cmd->scan_begin_arg; dmm32at_ns_to_timer(&cmd->scan_begin_arg, cmd->flags & TRIG_ROUND_MASK); if (tmp != cmd->scan_begin_arg) err++; } if (cmd->convert_src == TRIG_TIMER) { tmp = cmd->convert_arg; dmm32at_ns_to_timer(&cmd->convert_arg, cmd->flags & TRIG_ROUND_MASK); if (tmp != cmd->convert_arg) err++; if (cmd->scan_begin_src == TRIG_TIMER && cmd->scan_begin_arg < cmd->convert_arg * cmd->scan_end_arg) { cmd->scan_begin_arg = cmd->convert_arg * cmd->scan_end_arg; err++; } } if (err) return 4; /* step 5 check the channel list, the channel list for this board must be consecutive and gains must be the same */ if (cmd->chanlist) { gain = CR_RANGE(cmd->chanlist[0]); start_chan = CR_CHAN(cmd->chanlist[0]); for (i = 1; i < cmd->chanlist_len; i++) { if (CR_CHAN(cmd->chanlist[i]) != (start_chan + i) % s->n_chan) { comedi_error(dev, "entries in chanlist must be consecutive channels, counting upwards\n"); err++; } if (CR_RANGE(cmd->chanlist[i]) != gain) { comedi_error(dev, "entries in chanlist must all have the same gain\n"); err++; } } } if (err) return 5; return 0; } static void dmm32at_setaitimer(struct comedi_device *dev, unsigned int nansec) { unsigned char lo1, lo2, hi2; unsigned short both2; /* based on 10mhz clock */ lo1 = 200; both2 = nansec / 20000; hi2 = (both2 & 0xff00) >> 8; lo2 = both2 & 0x00ff; /* set the counter frequency to 10mhz */ outb(0, dev->iobase + DMM32AT_CNTRDIO); /* get access to the clock regs */ outb(DMM32AT_CLKACC, dev->iobase + DMM32AT_CNTRL); /* write the counter 1 control word and low byte to counter */ outb(DMM32AT_CLKCT1, dev->iobase + DMM32AT_CLKCT); outb(lo1, dev->iobase + DMM32AT_CLK1); /* write the counter 2 control word and low byte then to counter */ outb(DMM32AT_CLKCT2, dev->iobase + DMM32AT_CLKCT); outb(lo2, dev->iobase + DMM32AT_CLK2); outb(hi2, dev->iobase + DMM32AT_CLK2); /* enable the ai conversion interrupt and the clock to start scans */ outb(DMM32AT_ADINT | DMM32AT_CLKSEL, dev->iobase + DMM32AT_INTCLOCK); } static int dmm32at_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct dmm32at_private *devpriv = dev->private; struct comedi_cmd *cmd = &s->async->cmd; int i, range; unsigned char chanlo, chanhi, status; if (!cmd->chanlist) return -EINVAL; /* get the channel list and range */ chanlo = CR_CHAN(cmd->chanlist[0]) & (s->n_chan - 1); chanhi = chanlo + cmd->chanlist_len - 1; if (chanhi >= s->n_chan) return -EINVAL; range = CR_RANGE(cmd->chanlist[0]); /* reset fifo */ outb(DMM32AT_FIFORESET, dev->iobase + DMM32AT_FIFOCNTRL); /* set scan enable */ outb(DMM32AT_SCANENABLE, dev->iobase + DMM32AT_FIFOCNTRL); /* write the ai channel range regs */ outb(chanlo, dev->iobase + DMM32AT_AILOW); outb(chanhi, dev->iobase + DMM32AT_AIHIGH); /* set the range bits */ outb(dmm32at_rangebits[range], dev->iobase + DMM32AT_AICONF); /* reset the interrupt just in case */ outb(DMM32AT_INTRESET, dev->iobase + DMM32AT_CNTRL); if (cmd->stop_src == TRIG_COUNT) devpriv->ai_scans_left = cmd->stop_arg; else { /* TRIG_NONE */ devpriv->ai_scans_left = 0xffffffff; /* indicates TRIG_NONE to * isr */ } /* wait for circuit to settle */ for (i = 0; i < 40000; i++) { status = inb(dev->iobase + DMM32AT_AIRBACK); if ((status & DMM32AT_STATUS) == 0) break; } if (i == 40000) { printk(KERN_WARNING "dmm32at: timeout\n"); return -ETIMEDOUT; } if (devpriv->ai_scans_left > 1) { /* start the clock and enable the interrupts */ dmm32at_setaitimer(dev, cmd->scan_begin_arg); } else { /* start the interrups and initiate a single scan */ outb(DMM32AT_ADINT, dev->iobase + DMM32AT_INTCLOCK); outb(0xff, dev->iobase + DMM32AT_CONV); } /* printk("dmmat32 in command\n"); */ /* for(i=0;i<cmd->chanlist_len;i++) */ /* comedi_buf_put(s->async,i*100); */ /* s->async->events |= COMEDI_CB_EOA; */ /* comedi_event(dev, s); */ return 0; } static int dmm32at_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { struct dmm32at_private *devpriv = dev->private; devpriv->ai_scans_left = 1; return 0; } static irqreturn_t dmm32at_isr(int irq, void *d) { struct comedi_device *dev = d; struct dmm32at_private *devpriv = dev->private; unsigned char intstat; unsigned int samp; unsigned short msb, lsb; int i; if (!dev->attached) { comedi_error(dev, "spurious interrupt"); return IRQ_HANDLED; } intstat = inb(dev->iobase + DMM32AT_INTCLOCK); if (intstat & DMM32AT_ADINT) { struct comedi_subdevice *s = dev->read_subdev; struct comedi_cmd *cmd = &s->async->cmd; for (i = 0; i < cmd->chanlist_len; i++) { /* read data */ lsb = inb(dev->iobase + DMM32AT_AILSB); msb = inb(dev->iobase + DMM32AT_AIMSB); /* invert sign bit to make range unsigned */ samp = ((msb ^ 0x0080) << 8) + lsb; comedi_buf_put(s->async, samp); } if (devpriv->ai_scans_left != 0xffffffff) { /* TRIG_COUNT */ devpriv->ai_scans_left--; if (devpriv->ai_scans_left == 0) { /* disable further interrupts and clocks */ outb(0x0, dev->iobase + DMM32AT_INTCLOCK); /* set the buffer to be flushed with an EOF */ s->async->events |= COMEDI_CB_EOA; } } /* flush the buffer */ comedi_event(dev, s); } /* reset the interrupt */ outb(DMM32AT_INTRESET, dev->iobase + DMM32AT_CNTRL); return IRQ_HANDLED; } static int dmm32at_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct dmm32at_private *devpriv = dev->private; int i; int chan = CR_CHAN(insn->chanspec); unsigned char hi, lo, status; /* Writing a list of values to an AO channel is probably not * very useful, but that's how the interface is defined. */ for (i = 0; i < insn->n; i++) { devpriv->ao_readback[chan] = data[i]; /* get the low byte */ lo = data[i] & 0x00ff; /* high byte also contains channel number */ hi = (data[i] >> 8) + chan * (1 << 6); /* printk("writing 0x%02x 0x%02x\n",hi,lo); */ /* write the low and high values to the board */ outb(lo, dev->iobase + DMM32AT_DACLSB); outb(hi, dev->iobase + DMM32AT_DACMSB); /* wait for circuit to settle */ for (i = 0; i < 40000; i++) { status = inb(dev->iobase + DMM32AT_DACSTAT); if ((status & DMM32AT_DACBUSY) == 0) break; } if (i == 40000) { printk(KERN_WARNING "dmm32at: timeout\n"); return -ETIMEDOUT; } /* dummy read to update trigger the output */ status = inb(dev->iobase + DMM32AT_DACMSB); } /* return the number of samples read/written */ return i; } static int dmm32at_ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct dmm32at_private *devpriv = dev->private; int i; int chan = CR_CHAN(insn->chanspec); for (i = 0; i < insn->n; i++) data[i] = devpriv->ao_readback[chan]; return i; } static int dmm32at_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct dmm32at_private *devpriv = dev->private; unsigned char diobits; /* The insn data is a mask in data[0] and the new data * in data[1], each channel cooresponding to a bit. */ if (data[0]) { s->state &= ~data[0]; s->state |= data[0] & data[1]; /* Write out the new digital output lines */ /* outw(s->state,dev->iobase + DMM32AT_DIO); */ } /* get access to the DIO regs */ outb(DMM32AT_DIOACC, dev->iobase + DMM32AT_CNTRL); /* if either part of dio is set for output */ if (((devpriv->dio_config & DMM32AT_DIRCL) == 0) || ((devpriv->dio_config & DMM32AT_DIRCH) == 0)) { diobits = (s->state & 0x00ff0000) >> 16; outb(diobits, dev->iobase + DMM32AT_DIOC); } if ((devpriv->dio_config & DMM32AT_DIRB) == 0) { diobits = (s->state & 0x0000ff00) >> 8; outb(diobits, dev->iobase + DMM32AT_DIOB); } if ((devpriv->dio_config & DMM32AT_DIRA) == 0) { diobits = (s->state & 0x000000ff); outb(diobits, dev->iobase + DMM32AT_DIOA); } /* now read the state back in */ s->state = inb(dev->iobase + DMM32AT_DIOC); s->state <<= 8; s->state |= inb(dev->iobase + DMM32AT_DIOB); s->state <<= 8; s->state |= inb(dev->iobase + DMM32AT_DIOA); data[1] = s->state; /* on return, data[1] contains the value of the digital * input and output lines. */ /* data[1]=inw(dev->iobase + DMM32AT_DIO); */ /* or we could just return the software copy of the output values if * it was a purely digital output subdevice */ /* data[1]=s->state; */ return insn->n; } static int dmm32at_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct dmm32at_private *devpriv = dev->private; unsigned char chanbit; int chan = CR_CHAN(insn->chanspec); if (insn->n != 1) return -EINVAL; if (chan < 8) chanbit = DMM32AT_DIRA; else if (chan < 16) chanbit = DMM32AT_DIRB; else if (chan < 20) chanbit = DMM32AT_DIRCL; else chanbit = DMM32AT_DIRCH; /* The input or output configuration of each digital line is * configured by a special insn_config instruction. chanspec * contains the channel to be changed, and data[0] contains the * value COMEDI_INPUT or COMEDI_OUTPUT. */ /* if output clear the bit, otherwise set it */ if (data[0] == COMEDI_OUTPUT) devpriv->dio_config &= ~chanbit; else devpriv->dio_config |= chanbit; /* get access to the DIO regs */ outb(DMM32AT_DIOACC, dev->iobase + DMM32AT_CNTRL); /* set the DIO's to the new configuration setting */ outb(devpriv->dio_config, dev->iobase + DMM32AT_DIOCONF); return 1; } static int dmm32at_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct dmm32at_private *devpriv; int ret; struct comedi_subdevice *s; unsigned char aihi, ailo, fifostat, aistat, intstat, airback; unsigned int irq; irq = it->options[1]; ret = comedi_request_region(dev, it->options[0], DMM32AT_MEMSIZE); if (ret) return ret; /* the following just makes sure the board is there and gets it to a known state */ /* reset the board */ outb(DMM32AT_RESET, dev->iobase + DMM32AT_CNTRL); /* allow a millisecond to reset */ udelay(1000); /* zero scan and fifo control */ outb(0x0, dev->iobase + DMM32AT_FIFOCNTRL); /* zero interrupt and clock control */ outb(0x0, dev->iobase + DMM32AT_INTCLOCK); /* write a test channel range, the high 3 bits should drop */ outb(0x80, dev->iobase + DMM32AT_AILOW); outb(0xff, dev->iobase + DMM32AT_AIHIGH); /* set the range at 10v unipolar */ outb(DMM32AT_RANGE_U10, dev->iobase + DMM32AT_AICONF); /* should take 10 us to settle, here's a hundred */ udelay(100); /* read back the values */ ailo = inb(dev->iobase + DMM32AT_AILOW); aihi = inb(dev->iobase + DMM32AT_AIHIGH); fifostat = inb(dev->iobase + DMM32AT_FIFOSTAT); aistat = inb(dev->iobase + DMM32AT_AISTAT); intstat = inb(dev->iobase + DMM32AT_INTCLOCK); airback = inb(dev->iobase + DMM32AT_AIRBACK); printk(KERN_DEBUG "dmm32at: lo=0x%02x hi=0x%02x fifostat=0x%02x\n", ailo, aihi, fifostat); printk(KERN_DEBUG "dmm32at: aistat=0x%02x intstat=0x%02x airback=0x%02x\n", aistat, intstat, airback); if ((ailo != 0x00) || (aihi != 0x1f) || (fifostat != 0x80) || (aistat != 0x60 || (intstat != 0x00) || airback != 0x0c)) { printk(KERN_ERR "dmmat32: board detection failed\n"); return -EIO; } /* board is there, register interrupt */ if (irq) { ret = request_irq(irq, dmm32at_isr, 0, dev->board_name, dev); if (ret < 0) { printk(KERN_ERR "dmm32at: irq conflict\n"); return ret; } dev->irq = irq; } devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL); if (!devpriv) return -ENOMEM; dev->private = devpriv; ret = comedi_alloc_subdevices(dev, 3); if (ret) return ret; s = &dev->subdevices[0]; dev->read_subdev = s; /* analog input subdevice */ s->type = COMEDI_SUBD_AI; /* we support single-ended (ground) and differential */ s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF | SDF_CMD_READ; s->n_chan = 32; s->maxdata = 0xffff; s->range_table = &dmm32at_airanges; s->len_chanlist = 32; /* This is the maximum chanlist length that the board can handle */ s->insn_read = dmm32at_ai_rinsn; s->do_cmd = dmm32at_ai_cmd; s->do_cmdtest = dmm32at_ai_cmdtest; s->cancel = dmm32at_ai_cancel; s = &dev->subdevices[1]; /* analog output subdevice */ s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE; s->n_chan = 4; s->maxdata = 0x0fff; s->range_table = &dmm32at_aoranges; s->insn_write = dmm32at_ao_winsn; s->insn_read = dmm32at_ao_rinsn; s = &dev->subdevices[2]; /* digital i/o subdevice */ /* get access to the DIO regs */ outb(DMM32AT_DIOACC, dev->iobase + DMM32AT_CNTRL); /* set the DIO's to the defualt input setting */ devpriv->dio_config = DMM32AT_DIRA | DMM32AT_DIRB | DMM32AT_DIRCL | DMM32AT_DIRCH | DMM32AT_DIENABLE; outb(devpriv->dio_config, dev->iobase + DMM32AT_DIOCONF); /* set up the subdevice */ s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 24; s->maxdata = 1; s->state = 0; s->range_table = &range_digital; s->insn_bits = dmm32at_dio_insn_bits; s->insn_config = dmm32at_dio_insn_config; /* success */ printk(KERN_INFO "comedi%d: dmm32at: attached\n", dev->minor); return 1; } static struct comedi_driver dmm32at_driver = { .driver_name = "dmm32at", .module = THIS_MODULE, .attach = dmm32at_attach, .detach = comedi_legacy_detach, }; module_comedi_driver(dmm32at_driver); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
ShinySide/kernel_T230NU_ND4
drivers/media/pci/ivtv/ivtv-ioctl.c
2090
53989
/* ioctl system call Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com> Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "ivtv-driver.h" #include "ivtv-version.h" #include "ivtv-mailbox.h" #include "ivtv-i2c.h" #include "ivtv-queue.h" #include "ivtv-fileops.h" #include "ivtv-vbi.h" #include "ivtv-routing.h" #include "ivtv-streams.h" #include "ivtv-yuv.h" #include "ivtv-ioctl.h" #include "ivtv-gpio.h" #include "ivtv-controls.h" #include "ivtv-cards.h" #include <media/saa7127.h> #include <media/tveeprom.h> #include <media/v4l2-chip-ident.h> #include <media/v4l2-event.h> #include <linux/dvb/audio.h> u16 ivtv_service2vbi(int type) { switch (type) { case V4L2_SLICED_TELETEXT_B: return IVTV_SLICED_TYPE_TELETEXT_B; case V4L2_SLICED_CAPTION_525: return IVTV_SLICED_TYPE_CAPTION_525; case V4L2_SLICED_WSS_625: return IVTV_SLICED_TYPE_WSS_625; case V4L2_SLICED_VPS: return IVTV_SLICED_TYPE_VPS; default: return 0; } } static int valid_service_line(int field, int line, int is_pal) { return (is_pal && line >= 6 && (line != 23 || field == 0)) || (!is_pal && line >= 10 && line < 22); } static u16 select_service_from_set(int field, int line, u16 set, int is_pal) { u16 valid_set = (is_pal ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525); int i; set = set & valid_set; if (set == 0 || !valid_service_line(field, line, is_pal)) { return 0; } if (!is_pal) { if (line == 21 && (set & V4L2_SLICED_CAPTION_525)) return V4L2_SLICED_CAPTION_525; } else { if (line == 16 && field == 0 && (set & V4L2_SLICED_VPS)) return V4L2_SLICED_VPS; if (line == 23 && field == 0 && (set & V4L2_SLICED_WSS_625)) return V4L2_SLICED_WSS_625; if (line == 23) return 0; } for (i = 0; i < 32; i++) { if ((1 << i) & set) return 1 << i; } return 0; } void ivtv_expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal) { u16 set = fmt->service_set; int f, l; fmt->service_set = 0; for (f = 0; f < 2; f++) { for (l = 0; l < 24; l++) { fmt->service_lines[f][l] = select_service_from_set(f, l, set, is_pal); } } } static void check_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal) { int f, l; for (f = 0; f < 2; f++) { for (l = 0; l < 24; l++) { fmt->service_lines[f][l] = select_service_from_set(f, l, fmt->service_lines[f][l], is_pal); } } } u16 ivtv_get_service_set(struct v4l2_sliced_vbi_format *fmt) { int f, l; u16 set = 0; for (f = 0; f < 2; f++) { for (l = 0; l < 24; l++) { set |= fmt->service_lines[f][l]; } } return set; } void ivtv_set_osd_alpha(struct ivtv *itv) { ivtv_vapi(itv, CX2341X_OSD_SET_GLOBAL_ALPHA, 3, itv->osd_global_alpha_state, itv->osd_global_alpha, !itv->osd_local_alpha_state); ivtv_vapi(itv, CX2341X_OSD_SET_CHROMA_KEY, 2, itv->osd_chroma_key_state, itv->osd_chroma_key); } int ivtv_set_speed(struct ivtv *itv, int speed) { u32 data[CX2341X_MBOX_MAX_DATA]; int single_step = (speed == 1 || speed == -1); DEFINE_WAIT(wait); if (speed == 0) speed = 1000; /* No change? */ if (speed == itv->speed && !single_step) return 0; if (single_step && (speed < 0) == (itv->speed < 0)) { /* Single step video and no need to change direction */ ivtv_vapi(itv, CX2341X_DEC_STEP_VIDEO, 1, 0); itv->speed = speed; return 0; } if (single_step) /* Need to change direction */ speed = speed < 0 ? -1000 : 1000; data[0] = (speed > 1000 || speed < -1000) ? 0x80000000 : 0; data[0] |= (speed > 1000 || speed < -1500) ? 0x40000000 : 0; data[1] = (speed < 0); data[2] = speed < 0 ? 3 : 7; data[3] = v4l2_ctrl_g_ctrl(itv->cxhdl.video_b_frames); data[4] = (speed == 1500 || speed == 500) ? itv->speed_mute_audio : 0; data[5] = 0; data[6] = 0; if (speed == 1500 || speed == -1500) data[0] |= 1; else if (speed == 2000 || speed == -2000) data[0] |= 2; else if (speed > -1000 && speed < 0) data[0] |= (-1000 / speed); else if (speed < 1000 && speed > 0) data[0] |= (1000 / speed); /* If not decoding, just change speed setting */ if (atomic_read(&itv->decoding) > 0) { int got_sig = 0; /* Stop all DMA and decoding activity */ ivtv_vapi(itv, CX2341X_DEC_PAUSE_PLAYBACK, 1, 0); /* Wait for any DMA to finish */ mutex_unlock(&itv->serialize_lock); prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE); while (test_bit(IVTV_F_I_DMA, &itv->i_flags)) { got_sig = signal_pending(current); if (got_sig) break; got_sig = 0; schedule(); } finish_wait(&itv->dma_waitq, &wait); mutex_lock(&itv->serialize_lock); if (got_sig) return -EINTR; /* Change Speed safely */ ivtv_api(itv, CX2341X_DEC_SET_PLAYBACK_SPEED, 7, data); IVTV_DEBUG_INFO("Setting Speed to 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", data[0], data[1], data[2], data[3], data[4], data[5], data[6]); } if (single_step) { speed = (speed < 0) ? -1 : 1; ivtv_vapi(itv, CX2341X_DEC_STEP_VIDEO, 1, 0); } itv->speed = speed; return 0; } static int ivtv_validate_speed(int cur_speed, int new_speed) { int fact = new_speed < 0 ? -1 : 1; int s; if (cur_speed == 0) cur_speed = 1000; if (new_speed < 0) new_speed = -new_speed; if (cur_speed < 0) cur_speed = -cur_speed; if (cur_speed <= new_speed) { if (new_speed > 1500) return fact * 2000; if (new_speed > 1000) return fact * 1500; } else { if (new_speed >= 2000) return fact * 2000; if (new_speed >= 1500) return fact * 1500; if (new_speed >= 1000) return fact * 1000; } if (new_speed == 0) return 1000; if (new_speed == 1 || new_speed == 1000) return fact * new_speed; s = new_speed; new_speed = 1000 / new_speed; if (1000 / cur_speed == new_speed) new_speed += (cur_speed < s) ? -1 : 1; if (new_speed > 60) return 1000 / (fact * 60); return 1000 / (fact * new_speed); } static int ivtv_video_command(struct ivtv *itv, struct ivtv_open_id *id, struct v4l2_decoder_cmd *dc, int try) { struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG]; if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; switch (dc->cmd) { case V4L2_DEC_CMD_START: { dc->flags &= V4L2_DEC_CMD_START_MUTE_AUDIO; dc->start.speed = ivtv_validate_speed(itv->speed, dc->start.speed); if (dc->start.speed < 0) dc->start.format = V4L2_DEC_START_FMT_GOP; else dc->start.format = V4L2_DEC_START_FMT_NONE; if (dc->start.speed != 500 && dc->start.speed != 1500) dc->flags = dc->start.speed == 1000 ? 0 : V4L2_DEC_CMD_START_MUTE_AUDIO; if (try) break; itv->speed_mute_audio = dc->flags & V4L2_DEC_CMD_START_MUTE_AUDIO; if (ivtv_set_output_mode(itv, OUT_MPG) != OUT_MPG) return -EBUSY; if (test_and_clear_bit(IVTV_F_I_DEC_PAUSED, &itv->i_flags)) { /* forces ivtv_set_speed to be called */ itv->speed = 0; } return ivtv_start_decoding(id, dc->start.speed); } case V4L2_DEC_CMD_STOP: dc->flags &= V4L2_DEC_CMD_STOP_IMMEDIATELY | V4L2_DEC_CMD_STOP_TO_BLACK; if (dc->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY) dc->stop.pts = 0; if (try) break; if (atomic_read(&itv->decoding) == 0) return 0; if (itv->output_mode != OUT_MPG) return -EBUSY; itv->output_mode = OUT_NONE; return ivtv_stop_v4l2_decode_stream(s, dc->flags, dc->stop.pts); case V4L2_DEC_CMD_PAUSE: dc->flags &= V4L2_DEC_CMD_PAUSE_TO_BLACK; if (try) break; if (!atomic_read(&itv->decoding)) return -EPERM; if (itv->output_mode != OUT_MPG) return -EBUSY; if (atomic_read(&itv->decoding) > 0) { ivtv_vapi(itv, CX2341X_DEC_PAUSE_PLAYBACK, 1, (dc->flags & V4L2_DEC_CMD_PAUSE_TO_BLACK) ? 1 : 0); set_bit(IVTV_F_I_DEC_PAUSED, &itv->i_flags); } break; case V4L2_DEC_CMD_RESUME: dc->flags = 0; if (try) break; if (!atomic_read(&itv->decoding)) return -EPERM; if (itv->output_mode != OUT_MPG) return -EBUSY; if (test_and_clear_bit(IVTV_F_I_DEC_PAUSED, &itv->i_flags)) { int speed = itv->speed; itv->speed = 0; return ivtv_start_decoding(id, speed); } break; default: return -EINVAL; } return 0; } static int ivtv_g_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv *itv = fh2id(fh)->itv; struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced; vbifmt->reserved[0] = 0; vbifmt->reserved[1] = 0; if (!(itv->v4l2_cap & V4L2_CAP_SLICED_VBI_OUTPUT)) return -EINVAL; vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36; memset(vbifmt->service_lines, 0, sizeof(vbifmt->service_lines)); if (itv->is_60hz) { vbifmt->service_lines[0][21] = V4L2_SLICED_CAPTION_525; vbifmt->service_lines[1][21] = V4L2_SLICED_CAPTION_525; } else { vbifmt->service_lines[0][23] = V4L2_SLICED_WSS_625; vbifmt->service_lines[0][16] = V4L2_SLICED_VPS; } vbifmt->service_set = ivtv_get_service_set(vbifmt); return 0; } static int ivtv_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; struct v4l2_pix_format *pixfmt = &fmt->fmt.pix; pixfmt->width = itv->cxhdl.width; pixfmt->height = itv->cxhdl.height; pixfmt->colorspace = V4L2_COLORSPACE_SMPTE170M; pixfmt->field = V4L2_FIELD_INTERLACED; pixfmt->priv = 0; if (id->type == IVTV_ENC_STREAM_TYPE_YUV) { pixfmt->pixelformat = V4L2_PIX_FMT_HM12; /* YUV size is (Y=(h*720) + UV=(h*(720/2))) */ pixfmt->sizeimage = pixfmt->height * 720 * 3 / 2; pixfmt->bytesperline = 720; } else { pixfmt->pixelformat = V4L2_PIX_FMT_MPEG; pixfmt->sizeimage = 128 * 1024; pixfmt->bytesperline = 0; } return 0; } static int ivtv_g_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv *itv = fh2id(fh)->itv; struct v4l2_vbi_format *vbifmt = &fmt->fmt.vbi; vbifmt->sampling_rate = 27000000; vbifmt->offset = 248; vbifmt->samples_per_line = itv->vbi.raw_decoder_line_size - 4; vbifmt->sample_format = V4L2_PIX_FMT_GREY; vbifmt->start[0] = itv->vbi.start[0]; vbifmt->start[1] = itv->vbi.start[1]; vbifmt->count[0] = vbifmt->count[1] = itv->vbi.count; vbifmt->flags = 0; vbifmt->reserved[0] = 0; vbifmt->reserved[1] = 0; return 0; } static int ivtv_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced; struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; vbifmt->reserved[0] = 0; vbifmt->reserved[1] = 0; vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36; if (id->type == IVTV_DEC_STREAM_TYPE_VBI) { vbifmt->service_set = itv->is_50hz ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525; ivtv_expand_service_set(vbifmt, itv->is_50hz); vbifmt->service_set = ivtv_get_service_set(vbifmt); return 0; } v4l2_subdev_call(itv->sd_video, vbi, g_sliced_fmt, vbifmt); vbifmt->service_set = ivtv_get_service_set(vbifmt); return 0; } static int ivtv_g_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; struct v4l2_pix_format *pixfmt = &fmt->fmt.pix; if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; pixfmt->width = itv->main_rect.width; pixfmt->height = itv->main_rect.height; pixfmt->colorspace = V4L2_COLORSPACE_SMPTE170M; pixfmt->field = V4L2_FIELD_INTERLACED; pixfmt->priv = 0; if (id->type == IVTV_DEC_STREAM_TYPE_YUV) { switch (itv->yuv_info.lace_mode & IVTV_YUV_MODE_MASK) { case IVTV_YUV_MODE_INTERLACED: pixfmt->field = (itv->yuv_info.lace_mode & IVTV_YUV_SYNC_MASK) ? V4L2_FIELD_INTERLACED_BT : V4L2_FIELD_INTERLACED_TB; break; case IVTV_YUV_MODE_PROGRESSIVE: pixfmt->field = V4L2_FIELD_NONE; break; default: pixfmt->field = V4L2_FIELD_ANY; break; } pixfmt->pixelformat = V4L2_PIX_FMT_HM12; pixfmt->bytesperline = 720; pixfmt->width = itv->yuv_info.v4l2_src_w; pixfmt->height = itv->yuv_info.v4l2_src_h; /* YUV size is (Y=(h*w) + UV=(h*(w/2))) */ pixfmt->sizeimage = 1080 * ((pixfmt->height + 31) & ~31); } else { pixfmt->pixelformat = V4L2_PIX_FMT_MPEG; pixfmt->sizeimage = 128 * 1024; pixfmt->bytesperline = 0; } return 0; } static int ivtv_g_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv *itv = fh2id(fh)->itv; struct v4l2_window *winfmt = &fmt->fmt.win; if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; winfmt->chromakey = itv->osd_chroma_key; winfmt->global_alpha = itv->osd_global_alpha; winfmt->field = V4L2_FIELD_INTERLACED; winfmt->clips = NULL; winfmt->clipcount = 0; winfmt->bitmap = NULL; winfmt->w.top = winfmt->w.left = 0; winfmt->w.width = itv->osd_rect.width; winfmt->w.height = itv->osd_rect.height; return 0; } static int ivtv_try_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt) { return ivtv_g_fmt_sliced_vbi_out(file, fh, fmt); } static int ivtv_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; int w = fmt->fmt.pix.width; int h = fmt->fmt.pix.height; int min_h = 2; w = min(w, 720); w = max(w, 2); if (id->type == IVTV_ENC_STREAM_TYPE_YUV) { /* YUV height must be a multiple of 32 */ h &= ~0x1f; min_h = 32; } h = min(h, itv->is_50hz ? 576 : 480); h = max(h, min_h); ivtv_g_fmt_vid_cap(file, fh, fmt); fmt->fmt.pix.width = w; fmt->fmt.pix.height = h; return 0; } static int ivtv_try_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt) { return ivtv_g_fmt_vbi_cap(file, fh, fmt); } static int ivtv_try_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced; struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; if (id->type == IVTV_DEC_STREAM_TYPE_VBI) return ivtv_g_fmt_sliced_vbi_cap(file, fh, fmt); /* set sliced VBI capture format */ vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36; vbifmt->reserved[0] = 0; vbifmt->reserved[1] = 0; if (vbifmt->service_set) ivtv_expand_service_set(vbifmt, itv->is_50hz); check_service_set(vbifmt, itv->is_50hz); vbifmt->service_set = ivtv_get_service_set(vbifmt); return 0; } static int ivtv_try_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv_open_id *id = fh2id(fh); s32 w = fmt->fmt.pix.width; s32 h = fmt->fmt.pix.height; int field = fmt->fmt.pix.field; int ret = ivtv_g_fmt_vid_out(file, fh, fmt); w = min(w, 720); w = max(w, 2); /* Why can the height be 576 even when the output is NTSC? Internally the buffers of the PVR350 are always set to 720x576. The decoded video frame will always be placed in the top left corner of this buffer. For any video which is not 720x576, the buffer will then be cropped to remove the unused right and lower areas, with the remaining image being scaled by the hardware to fit the display area. The video can be scaled both up and down, so a 720x480 video can be displayed full-screen on PAL and a 720x576 video can be displayed without cropping on NTSC. Note that the scaling only occurs on the video stream, the osd resolution is locked to the broadcast standard and not scaled. Thanks to Ian Armstrong for this explanation. */ h = min(h, 576); h = max(h, 2); if (id->type == IVTV_DEC_STREAM_TYPE_YUV) fmt->fmt.pix.field = field; fmt->fmt.pix.width = w; fmt->fmt.pix.height = h; return ret; } static int ivtv_try_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv *itv = fh2id(fh)->itv; u32 chromakey = fmt->fmt.win.chromakey; u8 global_alpha = fmt->fmt.win.global_alpha; if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; ivtv_g_fmt_vid_out_overlay(file, fh, fmt); fmt->fmt.win.chromakey = chromakey; fmt->fmt.win.global_alpha = global_alpha; return 0; } static int ivtv_s_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt) { return ivtv_g_fmt_sliced_vbi_out(file, fh, fmt); } static int ivtv_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; struct v4l2_mbus_framefmt mbus_fmt; int ret = ivtv_try_fmt_vid_cap(file, fh, fmt); int w = fmt->fmt.pix.width; int h = fmt->fmt.pix.height; if (ret) return ret; if (itv->cxhdl.width == w && itv->cxhdl.height == h) return 0; if (atomic_read(&itv->capturing) > 0) return -EBUSY; itv->cxhdl.width = w; itv->cxhdl.height = h; if (v4l2_ctrl_g_ctrl(itv->cxhdl.video_encoding) == V4L2_MPEG_VIDEO_ENCODING_MPEG_1) fmt->fmt.pix.width /= 2; mbus_fmt.width = fmt->fmt.pix.width; mbus_fmt.height = h; mbus_fmt.code = V4L2_MBUS_FMT_FIXED; v4l2_subdev_call(itv->sd_video, video, s_mbus_fmt, &mbus_fmt); return ivtv_g_fmt_vid_cap(file, fh, fmt); } static int ivtv_s_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv *itv = fh2id(fh)->itv; if (!ivtv_raw_vbi(itv) && atomic_read(&itv->capturing) > 0) return -EBUSY; itv->vbi.sliced_in->service_set = 0; itv->vbi.in.type = V4L2_BUF_TYPE_VBI_CAPTURE; v4l2_subdev_call(itv->sd_video, vbi, s_raw_fmt, &fmt->fmt.vbi); return ivtv_g_fmt_vbi_cap(file, fh, fmt); } static int ivtv_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced; struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; int ret = ivtv_try_fmt_sliced_vbi_cap(file, fh, fmt); if (ret || id->type == IVTV_DEC_STREAM_TYPE_VBI) return ret; check_service_set(vbifmt, itv->is_50hz); if (ivtv_raw_vbi(itv) && atomic_read(&itv->capturing) > 0) return -EBUSY; itv->vbi.in.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE; v4l2_subdev_call(itv->sd_video, vbi, s_sliced_fmt, vbifmt); memcpy(itv->vbi.sliced_in, vbifmt, sizeof(*itv->vbi.sliced_in)); return 0; } static int ivtv_s_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; struct yuv_playback_info *yi = &itv->yuv_info; int ret = ivtv_try_fmt_vid_out(file, fh, fmt); if (ret) return ret; if (id->type != IVTV_DEC_STREAM_TYPE_YUV) return 0; /* Return now if we already have some frame data */ if (yi->stream_size) return -EBUSY; yi->v4l2_src_w = fmt->fmt.pix.width; yi->v4l2_src_h = fmt->fmt.pix.height; switch (fmt->fmt.pix.field) { case V4L2_FIELD_NONE: yi->lace_mode = IVTV_YUV_MODE_PROGRESSIVE; break; case V4L2_FIELD_ANY: yi->lace_mode = IVTV_YUV_MODE_AUTO; break; case V4L2_FIELD_INTERLACED_BT: yi->lace_mode = IVTV_YUV_MODE_INTERLACED|IVTV_YUV_SYNC_ODD; break; case V4L2_FIELD_INTERLACED_TB: default: yi->lace_mode = IVTV_YUV_MODE_INTERLACED; break; } yi->lace_sync_field = (yi->lace_mode & IVTV_YUV_SYNC_MASK) == IVTV_YUV_SYNC_EVEN ? 0 : 1; if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) itv->dma_data_req_size = 1080 * ((yi->v4l2_src_h + 31) & ~31); return 0; } static int ivtv_s_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv *itv = fh2id(fh)->itv; int ret = ivtv_try_fmt_vid_out_overlay(file, fh, fmt); if (ret == 0) { itv->osd_chroma_key = fmt->fmt.win.chromakey; itv->osd_global_alpha = fmt->fmt.win.global_alpha; ivtv_set_osd_alpha(itv); } return ret; } static int ivtv_g_chip_ident(struct file *file, void *fh, struct v4l2_dbg_chip_ident *chip) { struct ivtv *itv = fh2id(fh)->itv; chip->ident = V4L2_IDENT_NONE; chip->revision = 0; if (chip->match.type == V4L2_CHIP_MATCH_HOST) { if (v4l2_chip_match_host(&chip->match)) chip->ident = itv->has_cx23415 ? V4L2_IDENT_CX23415 : V4L2_IDENT_CX23416; return 0; } if (chip->match.type != V4L2_CHIP_MATCH_I2C_DRIVER && chip->match.type != V4L2_CHIP_MATCH_I2C_ADDR) return -EINVAL; /* TODO: is this correct? */ return ivtv_call_all_err(itv, core, g_chip_ident, chip); } #ifdef CONFIG_VIDEO_ADV_DEBUG static int ivtv_itvc(struct ivtv *itv, bool get, u64 reg, u64 *val) { volatile u8 __iomem *reg_start; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (reg >= IVTV_REG_OFFSET && reg < IVTV_REG_OFFSET + IVTV_REG_SIZE) reg_start = itv->reg_mem - IVTV_REG_OFFSET; else if (itv->has_cx23415 && reg >= IVTV_DECODER_OFFSET && reg < IVTV_DECODER_OFFSET + IVTV_DECODER_SIZE) reg_start = itv->dec_mem - IVTV_DECODER_OFFSET; else if (reg < IVTV_ENCODER_SIZE) reg_start = itv->enc_mem; else return -EINVAL; if (get) *val = readl(reg + reg_start); else writel(*val, reg + reg_start); return 0; } static int ivtv_g_register(struct file *file, void *fh, struct v4l2_dbg_register *reg) { struct ivtv *itv = fh2id(fh)->itv; if (v4l2_chip_match_host(&reg->match)) { reg->size = 4; return ivtv_itvc(itv, true, reg->reg, &reg->val); } /* TODO: subdev errors should not be ignored, this should become a subdev helper function. */ ivtv_call_all(itv, core, g_register, reg); return 0; } static int ivtv_s_register(struct file *file, void *fh, const struct v4l2_dbg_register *reg) { struct ivtv *itv = fh2id(fh)->itv; if (v4l2_chip_match_host(&reg->match)) { u64 val = reg->val; return ivtv_itvc(itv, false, reg->reg, &val); } /* TODO: subdev errors should not be ignored, this should become a subdev helper function. */ ivtv_call_all(itv, core, s_register, reg); return 0; } #endif static int ivtv_querycap(struct file *file, void *fh, struct v4l2_capability *vcap) { struct ivtv_open_id *id = fh2id(file->private_data); struct ivtv *itv = id->itv; struct ivtv_stream *s = &itv->streams[id->type]; strlcpy(vcap->driver, IVTV_DRIVER_NAME, sizeof(vcap->driver)); strlcpy(vcap->card, itv->card_name, sizeof(vcap->card)); snprintf(vcap->bus_info, sizeof(vcap->bus_info), "PCI:%s", pci_name(itv->pdev)); vcap->capabilities = itv->v4l2_cap | V4L2_CAP_DEVICE_CAPS; vcap->device_caps = s->caps; return 0; } static int ivtv_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin) { struct ivtv *itv = fh2id(fh)->itv; return ivtv_get_audio_input(itv, vin->index, vin); } static int ivtv_g_audio(struct file *file, void *fh, struct v4l2_audio *vin) { struct ivtv *itv = fh2id(fh)->itv; vin->index = itv->audio_input; return ivtv_get_audio_input(itv, vin->index, vin); } static int ivtv_s_audio(struct file *file, void *fh, const struct v4l2_audio *vout) { struct ivtv *itv = fh2id(fh)->itv; if (vout->index >= itv->nof_audio_inputs) return -EINVAL; itv->audio_input = vout->index; ivtv_audio_set_io(itv); return 0; } static int ivtv_enumaudout(struct file *file, void *fh, struct v4l2_audioout *vin) { struct ivtv *itv = fh2id(fh)->itv; /* set it to defaults from our table */ return ivtv_get_audio_output(itv, vin->index, vin); } static int ivtv_g_audout(struct file *file, void *fh, struct v4l2_audioout *vin) { struct ivtv *itv = fh2id(fh)->itv; vin->index = 0; return ivtv_get_audio_output(itv, vin->index, vin); } static int ivtv_s_audout(struct file *file, void *fh, const struct v4l2_audioout *vout) { struct ivtv *itv = fh2id(fh)->itv; if (itv->card->video_outputs == NULL || vout->index != 0) return -EINVAL; return 0; } static int ivtv_enum_input(struct file *file, void *fh, struct v4l2_input *vin) { struct ivtv *itv = fh2id(fh)->itv; /* set it to defaults from our table */ return ivtv_get_input(itv, vin->index, vin); } static int ivtv_enum_output(struct file *file, void *fh, struct v4l2_output *vout) { struct ivtv *itv = fh2id(fh)->itv; return ivtv_get_output(itv, vout->index, vout); } static int ivtv_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; struct yuv_playback_info *yi = &itv->yuv_info; int streamtype; streamtype = id->type; if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; cropcap->bounds.top = cropcap->bounds.left = 0; cropcap->bounds.width = 720; if (cropcap->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { cropcap->bounds.height = itv->is_50hz ? 576 : 480; cropcap->pixelaspect.numerator = itv->is_50hz ? 59 : 10; cropcap->pixelaspect.denominator = itv->is_50hz ? 54 : 11; } else if (streamtype == IVTV_DEC_STREAM_TYPE_YUV) { if (yi->track_osd) { cropcap->bounds.width = yi->osd_full_w; cropcap->bounds.height = yi->osd_full_h; } else { cropcap->bounds.width = 720; cropcap->bounds.height = itv->is_out_50hz ? 576 : 480; } cropcap->pixelaspect.numerator = itv->is_out_50hz ? 59 : 10; cropcap->pixelaspect.denominator = itv->is_out_50hz ? 54 : 11; } else { cropcap->bounds.height = itv->is_out_50hz ? 576 : 480; cropcap->pixelaspect.numerator = itv->is_out_50hz ? 59 : 10; cropcap->pixelaspect.denominator = itv->is_out_50hz ? 54 : 11; } cropcap->defrect = cropcap->bounds; return 0; } static int ivtv_s_crop(struct file *file, void *fh, const struct v4l2_crop *crop) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; struct yuv_playback_info *yi = &itv->yuv_info; int streamtype; streamtype = id->type; if (crop->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) { if (streamtype == IVTV_DEC_STREAM_TYPE_YUV) { yi->main_rect = crop->c; return 0; } else { if (!ivtv_vapi(itv, CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, 4, crop->c.width, crop->c.height, crop->c.left, crop->c.top)) { itv->main_rect = crop->c; return 0; } } return -EINVAL; } return -EINVAL; } static int ivtv_g_crop(struct file *file, void *fh, struct v4l2_crop *crop) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; struct yuv_playback_info *yi = &itv->yuv_info; int streamtype; streamtype = id->type; if (crop->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) { if (streamtype == IVTV_DEC_STREAM_TYPE_YUV) crop->c = yi->main_rect; else crop->c = itv->main_rect; return 0; } return -EINVAL; } static int ivtv_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *fmt) { static const struct v4l2_fmtdesc hm12 = { 0, V4L2_BUF_TYPE_VIDEO_CAPTURE, 0, "HM12 (YUV 4:2:0)", V4L2_PIX_FMT_HM12, { 0, 0, 0, 0 } }; static const struct v4l2_fmtdesc mpeg = { 0, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FMT_FLAG_COMPRESSED, "MPEG", V4L2_PIX_FMT_MPEG, { 0, 0, 0, 0 } }; struct ivtv *itv = fh2id(fh)->itv; struct ivtv_stream *s = &itv->streams[fh2id(fh)->type]; if (fmt->index) return -EINVAL; if (s->type == IVTV_ENC_STREAM_TYPE_MPG) *fmt = mpeg; else if (s->type == IVTV_ENC_STREAM_TYPE_YUV) *fmt = hm12; else return -EINVAL; return 0; } static int ivtv_enum_fmt_vid_out(struct file *file, void *fh, struct v4l2_fmtdesc *fmt) { static const struct v4l2_fmtdesc hm12 = { 0, V4L2_BUF_TYPE_VIDEO_OUTPUT, 0, "HM12 (YUV 4:2:0)", V4L2_PIX_FMT_HM12, { 0, 0, 0, 0 } }; static const struct v4l2_fmtdesc mpeg = { 0, V4L2_BUF_TYPE_VIDEO_OUTPUT, V4L2_FMT_FLAG_COMPRESSED, "MPEG", V4L2_PIX_FMT_MPEG, { 0, 0, 0, 0 } }; struct ivtv *itv = fh2id(fh)->itv; struct ivtv_stream *s = &itv->streams[fh2id(fh)->type]; if (fmt->index) return -EINVAL; if (s->type == IVTV_DEC_STREAM_TYPE_MPG) *fmt = mpeg; else if (s->type == IVTV_DEC_STREAM_TYPE_YUV) *fmt = hm12; else return -EINVAL; return 0; } static int ivtv_g_input(struct file *file, void *fh, unsigned int *i) { struct ivtv *itv = fh2id(fh)->itv; *i = itv->active_input; return 0; } int ivtv_s_input(struct file *file, void *fh, unsigned int inp) { struct ivtv *itv = fh2id(fh)->itv; v4l2_std_id std; int i; if (inp >= itv->nof_inputs) return -EINVAL; if (inp == itv->active_input) { IVTV_DEBUG_INFO("Input unchanged\n"); return 0; } if (atomic_read(&itv->capturing) > 0) { return -EBUSY; } IVTV_DEBUG_INFO("Changing input from %d to %d\n", itv->active_input, inp); itv->active_input = inp; /* Set the audio input to whatever is appropriate for the input type. */ itv->audio_input = itv->card->video_inputs[inp].audio_index; if (itv->card->video_inputs[inp].video_type == IVTV_CARD_INPUT_VID_TUNER) std = itv->tuner_std; else std = V4L2_STD_ALL; for (i = 0; i <= IVTV_ENC_STREAM_TYPE_VBI; i++) itv->streams[i].vdev->tvnorms = std; /* prevent others from messing with the streams until we're finished changing inputs. */ ivtv_mute(itv); ivtv_video_set_io(itv); ivtv_audio_set_io(itv); ivtv_unmute(itv); return 0; } static int ivtv_g_output(struct file *file, void *fh, unsigned int *i) { struct ivtv *itv = fh2id(fh)->itv; if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; *i = itv->active_output; return 0; } static int ivtv_s_output(struct file *file, void *fh, unsigned int outp) { struct ivtv *itv = fh2id(fh)->itv; if (outp >= itv->card->nof_outputs) return -EINVAL; if (outp == itv->active_output) { IVTV_DEBUG_INFO("Output unchanged\n"); return 0; } IVTV_DEBUG_INFO("Changing output from %d to %d\n", itv->active_output, outp); itv->active_output = outp; ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_routing, SAA7127_INPUT_TYPE_NORMAL, itv->card->video_outputs[outp].video_output, 0); return 0; } static int ivtv_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf) { struct ivtv *itv = fh2id(fh)->itv; struct ivtv_stream *s = &itv->streams[fh2id(fh)->type]; if (s->vdev->vfl_dir) return -ENOTTY; if (vf->tuner != 0) return -EINVAL; ivtv_call_all(itv, tuner, g_frequency, vf); return 0; } int ivtv_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf) { struct ivtv *itv = fh2id(fh)->itv; struct ivtv_stream *s = &itv->streams[fh2id(fh)->type]; if (s->vdev->vfl_dir) return -ENOTTY; if (vf->tuner != 0) return -EINVAL; ivtv_mute(itv); IVTV_DEBUG_INFO("v4l2 ioctl: set frequency %d\n", vf->frequency); ivtv_call_all(itv, tuner, s_frequency, vf); ivtv_unmute(itv); return 0; } static int ivtv_g_std(struct file *file, void *fh, v4l2_std_id *std) { struct ivtv *itv = fh2id(fh)->itv; *std = itv->std; return 0; } void ivtv_s_std_enc(struct ivtv *itv, v4l2_std_id std) { itv->std = std; itv->is_60hz = (std & V4L2_STD_525_60) ? 1 : 0; itv->is_50hz = !itv->is_60hz; cx2341x_handler_set_50hz(&itv->cxhdl, itv->is_50hz); itv->cxhdl.width = 720; itv->cxhdl.height = itv->is_50hz ? 576 : 480; itv->vbi.count = itv->is_50hz ? 18 : 12; itv->vbi.start[0] = itv->is_50hz ? 6 : 10; itv->vbi.start[1] = itv->is_50hz ? 318 : 273; if (itv->hw_flags & IVTV_HW_CX25840) itv->vbi.sliced_decoder_line_size = itv->is_60hz ? 272 : 284; /* Tuner */ ivtv_call_all(itv, core, s_std, itv->std); } void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id std) { struct yuv_playback_info *yi = &itv->yuv_info; DEFINE_WAIT(wait); int f; /* set display standard */ itv->std_out = std; itv->is_out_60hz = (std & V4L2_STD_525_60) ? 1 : 0; itv->is_out_50hz = !itv->is_out_60hz; ivtv_call_all(itv, video, s_std_output, itv->std_out); /* * The next firmware call is time sensitive. Time it to * avoid risk of a hard lock, by trying to ensure the call * happens within the first 100 lines of the top field. * Make 4 attempts to sync to the decoder before giving up. */ mutex_unlock(&itv->serialize_lock); for (f = 0; f < 4; f++) { prepare_to_wait(&itv->vsync_waitq, &wait, TASK_UNINTERRUPTIBLE); if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100) break; schedule_timeout(msecs_to_jiffies(25)); } finish_wait(&itv->vsync_waitq, &wait); mutex_lock(&itv->serialize_lock); if (f == 4) IVTV_WARN("Mode change failed to sync to decoder\n"); ivtv_vapi(itv, CX2341X_DEC_SET_STANDARD, 1, itv->is_out_50hz); itv->main_rect.left = 0; itv->main_rect.top = 0; itv->main_rect.width = 720; itv->main_rect.height = itv->is_out_50hz ? 576 : 480; ivtv_vapi(itv, CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, 4, 720, itv->main_rect.height, 0, 0); yi->main_rect = itv->main_rect; if (!itv->osd_info) { yi->osd_full_w = 720; yi->osd_full_h = itv->is_out_50hz ? 576 : 480; } } static int ivtv_s_std(struct file *file, void *fh, v4l2_std_id std) { struct ivtv *itv = fh2id(fh)->itv; if ((std & V4L2_STD_ALL) == 0) return -EINVAL; if (std == itv->std) return 0; if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) || atomic_read(&itv->capturing) > 0 || atomic_read(&itv->decoding) > 0) { /* Switching standard would mess with already running streams, prevent that by returning EBUSY. */ return -EBUSY; } IVTV_DEBUG_INFO("Switching standard to %llx.\n", (unsigned long long)itv->std); ivtv_s_std_enc(itv, std); if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) ivtv_s_std_dec(itv, std); return 0; } static int ivtv_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; if (vt->index != 0) return -EINVAL; ivtv_call_all(itv, tuner, s_tuner, vt); return 0; } static int ivtv_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt) { struct ivtv *itv = fh2id(fh)->itv; if (vt->index != 0) return -EINVAL; ivtv_call_all(itv, tuner, g_tuner, vt); if (vt->type == V4L2_TUNER_RADIO) strlcpy(vt->name, "ivtv Radio Tuner", sizeof(vt->name)); else strlcpy(vt->name, "ivtv TV Tuner", sizeof(vt->name)); return 0; } static int ivtv_g_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_sliced_vbi_cap *cap) { struct ivtv *itv = fh2id(fh)->itv; int set = itv->is_50hz ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525; int f, l; if (cap->type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE) { for (f = 0; f < 2; f++) { for (l = 0; l < 24; l++) { if (valid_service_line(f, l, itv->is_50hz)) cap->service_lines[f][l] = set; } } } else if (cap->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) { if (!(itv->v4l2_cap & V4L2_CAP_SLICED_VBI_OUTPUT)) return -EINVAL; if (itv->is_60hz) { cap->service_lines[0][21] = V4L2_SLICED_CAPTION_525; cap->service_lines[1][21] = V4L2_SLICED_CAPTION_525; } else { cap->service_lines[0][23] = V4L2_SLICED_WSS_625; cap->service_lines[0][16] = V4L2_SLICED_VPS; } } else { return -EINVAL; } set = 0; for (f = 0; f < 2; f++) for (l = 0; l < 24; l++) set |= cap->service_lines[f][l]; cap->service_set = set; return 0; } static int ivtv_g_enc_index(struct file *file, void *fh, struct v4l2_enc_idx *idx) { struct ivtv *itv = fh2id(fh)->itv; struct v4l2_enc_idx_entry *e = idx->entry; int entries; int i; entries = (itv->pgm_info_write_idx + IVTV_MAX_PGM_INDEX - itv->pgm_info_read_idx) % IVTV_MAX_PGM_INDEX; if (entries > V4L2_ENC_IDX_ENTRIES) entries = V4L2_ENC_IDX_ENTRIES; idx->entries = 0; idx->entries_cap = IVTV_MAX_PGM_INDEX; if (!atomic_read(&itv->capturing)) return 0; for (i = 0; i < entries; i++) { *e = itv->pgm_info[(itv->pgm_info_read_idx + i) % IVTV_MAX_PGM_INDEX]; if ((e->flags & V4L2_ENC_IDX_FRAME_MASK) <= V4L2_ENC_IDX_FRAME_B) { idx->entries++; e++; } } itv->pgm_info_read_idx = (itv->pgm_info_read_idx + idx->entries) % IVTV_MAX_PGM_INDEX; return 0; } static int ivtv_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *enc) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; switch (enc->cmd) { case V4L2_ENC_CMD_START: IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_START\n"); enc->flags = 0; return ivtv_start_capture(id); case V4L2_ENC_CMD_STOP: IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_STOP\n"); enc->flags &= V4L2_ENC_CMD_STOP_AT_GOP_END; ivtv_stop_capture(id, enc->flags & V4L2_ENC_CMD_STOP_AT_GOP_END); return 0; case V4L2_ENC_CMD_PAUSE: IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_PAUSE\n"); enc->flags = 0; if (!atomic_read(&itv->capturing)) return -EPERM; if (test_and_set_bit(IVTV_F_I_ENC_PAUSED, &itv->i_flags)) return 0; ivtv_mute(itv); ivtv_vapi(itv, CX2341X_ENC_PAUSE_ENCODER, 1, 0); break; case V4L2_ENC_CMD_RESUME: IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_RESUME\n"); enc->flags = 0; if (!atomic_read(&itv->capturing)) return -EPERM; if (!test_and_clear_bit(IVTV_F_I_ENC_PAUSED, &itv->i_flags)) return 0; ivtv_vapi(itv, CX2341X_ENC_PAUSE_ENCODER, 1, 1); ivtv_unmute(itv); break; default: IVTV_DEBUG_IOCTL("Unknown cmd %d\n", enc->cmd); return -EINVAL; } return 0; } static int ivtv_try_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *enc) { struct ivtv *itv = fh2id(fh)->itv; switch (enc->cmd) { case V4L2_ENC_CMD_START: IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_START\n"); enc->flags = 0; return 0; case V4L2_ENC_CMD_STOP: IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_STOP\n"); enc->flags &= V4L2_ENC_CMD_STOP_AT_GOP_END; return 0; case V4L2_ENC_CMD_PAUSE: IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_PAUSE\n"); enc->flags = 0; return 0; case V4L2_ENC_CMD_RESUME: IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_RESUME\n"); enc->flags = 0; return 0; default: IVTV_DEBUG_IOCTL("Unknown cmd %d\n", enc->cmd); return -EINVAL; } } static int ivtv_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *fb) { struct ivtv *itv = fh2id(fh)->itv; u32 data[CX2341X_MBOX_MAX_DATA]; struct yuv_playback_info *yi = &itv->yuv_info; int pixfmt; static u32 pixel_format[16] = { V4L2_PIX_FMT_PAL8, /* Uses a 256-entry RGB colormap */ V4L2_PIX_FMT_RGB565, V4L2_PIX_FMT_RGB555, V4L2_PIX_FMT_RGB444, V4L2_PIX_FMT_RGB32, 0, 0, 0, V4L2_PIX_FMT_PAL8, /* Uses a 256-entry YUV colormap */ V4L2_PIX_FMT_YUV565, V4L2_PIX_FMT_YUV555, V4L2_PIX_FMT_YUV444, V4L2_PIX_FMT_YUV32, 0, 0, 0, }; if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) return -EINVAL; if (!itv->osd_video_pbase) return -EINVAL; fb->capability = V4L2_FBUF_CAP_EXTERNOVERLAY | V4L2_FBUF_CAP_CHROMAKEY | V4L2_FBUF_CAP_GLOBAL_ALPHA; ivtv_vapi_result(itv, data, CX2341X_OSD_GET_STATE, 0); data[0] |= (read_reg(0x2a00) >> 7) & 0x40; pixfmt = (data[0] >> 3) & 0xf; fb->fmt.pixelformat = pixel_format[pixfmt]; fb->fmt.width = itv->osd_rect.width; fb->fmt.height = itv->osd_rect.height; fb->fmt.field = V4L2_FIELD_INTERLACED; fb->fmt.bytesperline = fb->fmt.width; fb->fmt.colorspace = V4L2_COLORSPACE_SMPTE170M; fb->fmt.field = V4L2_FIELD_INTERLACED; fb->fmt.priv = 0; if (fb->fmt.pixelformat != V4L2_PIX_FMT_PAL8) fb->fmt.bytesperline *= 2; if (fb->fmt.pixelformat == V4L2_PIX_FMT_RGB32 || fb->fmt.pixelformat == V4L2_PIX_FMT_YUV32) fb->fmt.bytesperline *= 2; fb->fmt.sizeimage = fb->fmt.bytesperline * fb->fmt.height; fb->base = (void *)itv->osd_video_pbase; fb->flags = 0; if (itv->osd_chroma_key_state) fb->flags |= V4L2_FBUF_FLAG_CHROMAKEY; if (itv->osd_global_alpha_state) fb->flags |= V4L2_FBUF_FLAG_GLOBAL_ALPHA; if (yi->track_osd) fb->flags |= V4L2_FBUF_FLAG_OVERLAY; pixfmt &= 7; /* no local alpha for RGB565 or unknown formats */ if (pixfmt == 1 || pixfmt > 4) return 0; /* 16-bit formats have inverted local alpha */ if (pixfmt == 2 || pixfmt == 3) fb->capability |= V4L2_FBUF_CAP_LOCAL_INV_ALPHA; else fb->capability |= V4L2_FBUF_CAP_LOCAL_ALPHA; if (itv->osd_local_alpha_state) { /* 16-bit formats have inverted local alpha */ if (pixfmt == 2 || pixfmt == 3) fb->flags |= V4L2_FBUF_FLAG_LOCAL_INV_ALPHA; else fb->flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA; } return 0; } static int ivtv_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *fb) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; struct yuv_playback_info *yi = &itv->yuv_info; if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) return -EINVAL; if (!itv->osd_video_pbase) return -EINVAL; itv->osd_global_alpha_state = (fb->flags & V4L2_FBUF_FLAG_GLOBAL_ALPHA) != 0; itv->osd_local_alpha_state = (fb->flags & (V4L2_FBUF_FLAG_LOCAL_ALPHA|V4L2_FBUF_FLAG_LOCAL_INV_ALPHA)) != 0; itv->osd_chroma_key_state = (fb->flags & V4L2_FBUF_FLAG_CHROMAKEY) != 0; ivtv_set_osd_alpha(itv); yi->track_osd = (fb->flags & V4L2_FBUF_FLAG_OVERLAY) != 0; return 0; } static int ivtv_overlay(struct file *file, void *fh, unsigned int on) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) return -EINVAL; ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, on != 0); return 0; } static int ivtv_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub) { switch (sub->type) { case V4L2_EVENT_VSYNC: case V4L2_EVENT_EOS: return v4l2_event_subscribe(fh, sub, 0, NULL); case V4L2_EVENT_CTRL: return v4l2_event_subscribe(fh, sub, 0, &v4l2_ctrl_sub_ev_ops); default: return -EINVAL; } } static int ivtv_log_status(struct file *file, void *fh) { struct ivtv *itv = fh2id(fh)->itv; u32 data[CX2341X_MBOX_MAX_DATA]; int has_output = itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT; struct v4l2_input vidin; struct v4l2_audio audin; int i; IVTV_INFO("Version: %s Card: %s\n", IVTV_VERSION, itv->card_name); if (itv->hw_flags & IVTV_HW_TVEEPROM) { struct tveeprom tv; ivtv_read_eeprom(itv, &tv); } ivtv_call_all(itv, core, log_status); ivtv_get_input(itv, itv->active_input, &vidin); ivtv_get_audio_input(itv, itv->audio_input, &audin); IVTV_INFO("Video Input: %s\n", vidin.name); IVTV_INFO("Audio Input: %s%s\n", audin.name, (itv->dualwatch_stereo_mode & ~0x300) == 0x200 ? " (Bilingual)" : ""); if (has_output) { struct v4l2_output vidout; struct v4l2_audioout audout; int mode = itv->output_mode; static const char * const output_modes[5] = { "None", "MPEG Streaming", "YUV Streaming", "YUV Frames", "Passthrough", }; static const char * const alpha_mode[4] = { "None", "Global", "Local", "Global and Local" }; static const char * const pixel_format[16] = { "ARGB Indexed", "RGB 5:6:5", "ARGB 1:5:5:5", "ARGB 1:4:4:4", "ARGB 8:8:8:8", "5", "6", "7", "AYUV Indexed", "YUV 5:6:5", "AYUV 1:5:5:5", "AYUV 1:4:4:4", "AYUV 8:8:8:8", "13", "14", "15", }; ivtv_get_output(itv, itv->active_output, &vidout); ivtv_get_audio_output(itv, 0, &audout); IVTV_INFO("Video Output: %s\n", vidout.name); if (mode < 0 || mode > OUT_PASSTHROUGH) mode = OUT_NONE; IVTV_INFO("Output Mode: %s\n", output_modes[mode]); ivtv_vapi_result(itv, data, CX2341X_OSD_GET_STATE, 0); data[0] |= (read_reg(0x2a00) >> 7) & 0x40; IVTV_INFO("Overlay: %s, Alpha: %s, Pixel Format: %s\n", data[0] & 1 ? "On" : "Off", alpha_mode[(data[0] >> 1) & 0x3], pixel_format[(data[0] >> 3) & 0xf]); } IVTV_INFO("Tuner: %s\n", test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) ? "Radio" : "TV"); v4l2_ctrl_handler_log_status(&itv->cxhdl.hdl, itv->v4l2_dev.name); IVTV_INFO("Status flags: 0x%08lx\n", itv->i_flags); for (i = 0; i < IVTV_MAX_STREAMS; i++) { struct ivtv_stream *s = &itv->streams[i]; if (s->vdev == NULL || s->buffers == 0) continue; IVTV_INFO("Stream %s: status 0x%04lx, %d%% of %d KiB (%d buffers) in use\n", s->name, s->s_flags, (s->buffers - s->q_free.buffers) * 100 / s->buffers, (s->buffers * s->buf_size) / 1024, s->buffers); } IVTV_INFO("Read MPG/VBI: %lld/%lld bytes\n", (long long)itv->mpg_data_received, (long long)itv->vbi_data_inserted); return 0; } static int ivtv_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *dec) { struct ivtv_open_id *id = fh2id(file->private_data); struct ivtv *itv = id->itv; IVTV_DEBUG_IOCTL("VIDIOC_DECODER_CMD %d\n", dec->cmd); return ivtv_video_command(itv, id, dec, false); } static int ivtv_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *dec) { struct ivtv_open_id *id = fh2id(file->private_data); struct ivtv *itv = id->itv; IVTV_DEBUG_IOCTL("VIDIOC_TRY_DECODER_CMD %d\n", dec->cmd); return ivtv_video_command(itv, id, dec, true); } static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg) { struct ivtv_open_id *id = fh2id(filp->private_data); struct ivtv *itv = id->itv; int nonblocking = filp->f_flags & O_NONBLOCK; struct ivtv_stream *s = &itv->streams[id->type]; unsigned long iarg = (unsigned long)arg; switch (cmd) { case IVTV_IOC_DMA_FRAME: { struct ivtv_dma_frame *args = arg; IVTV_DEBUG_IOCTL("IVTV_IOC_DMA_FRAME\n"); if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; if (args->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; if (itv->output_mode == OUT_UDMA_YUV && args->y_source == NULL) return 0; if (ivtv_start_decoding(id, id->type)) { return -EBUSY; } if (ivtv_set_output_mode(itv, OUT_UDMA_YUV) != OUT_UDMA_YUV) { ivtv_release_stream(s); return -EBUSY; } /* Mark that this file handle started the UDMA_YUV mode */ id->yuv_frames = 1; if (args->y_source == NULL) return 0; return ivtv_yuv_prep_frame(itv, args); } case IVTV_IOC_PASSTHROUGH_MODE: IVTV_DEBUG_IOCTL("IVTV_IOC_PASSTHROUGH_MODE\n"); if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; return ivtv_passthrough_mode(itv, *(int *)arg != 0); case VIDEO_GET_PTS: { s64 *pts = arg; s64 frame; IVTV_DEBUG_IOCTL("VIDEO_GET_PTS\n"); if (s->type < IVTV_DEC_STREAM_TYPE_MPG) { *pts = s->dma_pts; break; } if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; return ivtv_g_pts_frame(itv, pts, &frame); } case VIDEO_GET_FRAME_COUNT: { s64 *frame = arg; s64 pts; IVTV_DEBUG_IOCTL("VIDEO_GET_FRAME_COUNT\n"); if (s->type < IVTV_DEC_STREAM_TYPE_MPG) { *frame = 0; break; } if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; return ivtv_g_pts_frame(itv, &pts, frame); } case VIDEO_PLAY: { struct v4l2_decoder_cmd dc; IVTV_DEBUG_IOCTL("VIDEO_PLAY\n"); memset(&dc, 0, sizeof(dc)); dc.cmd = V4L2_DEC_CMD_START; return ivtv_video_command(itv, id, &dc, 0); } case VIDEO_STOP: { struct v4l2_decoder_cmd dc; IVTV_DEBUG_IOCTL("VIDEO_STOP\n"); memset(&dc, 0, sizeof(dc)); dc.cmd = V4L2_DEC_CMD_STOP; dc.flags = V4L2_DEC_CMD_STOP_TO_BLACK | V4L2_DEC_CMD_STOP_IMMEDIATELY; return ivtv_video_command(itv, id, &dc, 0); } case VIDEO_FREEZE: { struct v4l2_decoder_cmd dc; IVTV_DEBUG_IOCTL("VIDEO_FREEZE\n"); memset(&dc, 0, sizeof(dc)); dc.cmd = V4L2_DEC_CMD_PAUSE; return ivtv_video_command(itv, id, &dc, 0); } case VIDEO_CONTINUE: { struct v4l2_decoder_cmd dc; IVTV_DEBUG_IOCTL("VIDEO_CONTINUE\n"); memset(&dc, 0, sizeof(dc)); dc.cmd = V4L2_DEC_CMD_RESUME; return ivtv_video_command(itv, id, &dc, 0); } case VIDEO_COMMAND: case VIDEO_TRY_COMMAND: { /* Note: struct v4l2_decoder_cmd has the same layout as struct video_command */ struct v4l2_decoder_cmd *dc = arg; int try = (cmd == VIDEO_TRY_COMMAND); if (try) IVTV_DEBUG_IOCTL("VIDEO_TRY_COMMAND %d\n", dc->cmd); else IVTV_DEBUG_IOCTL("VIDEO_COMMAND %d\n", dc->cmd); return ivtv_video_command(itv, id, dc, try); } case VIDEO_GET_EVENT: { struct video_event *ev = arg; DEFINE_WAIT(wait); IVTV_DEBUG_IOCTL("VIDEO_GET_EVENT\n"); if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; memset(ev, 0, sizeof(*ev)); set_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags); while (1) { if (test_and_clear_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags)) ev->type = VIDEO_EVENT_DECODER_STOPPED; else if (test_and_clear_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags)) { ev->type = VIDEO_EVENT_VSYNC; ev->u.vsync_field = test_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags) ? VIDEO_VSYNC_FIELD_ODD : VIDEO_VSYNC_FIELD_EVEN; if (itv->output_mode == OUT_UDMA_YUV && (itv->yuv_info.lace_mode & IVTV_YUV_MODE_MASK) == IVTV_YUV_MODE_PROGRESSIVE) { ev->u.vsync_field = VIDEO_VSYNC_FIELD_PROGRESSIVE; } } if (ev->type) return 0; if (nonblocking) return -EAGAIN; /* Wait for event. Note that serialize_lock is locked, so to allow other processes to access the driver while we are waiting unlock first and later lock again. */ mutex_unlock(&itv->serialize_lock); prepare_to_wait(&itv->event_waitq, &wait, TASK_INTERRUPTIBLE); if (!test_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags) && !test_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags)) schedule(); finish_wait(&itv->event_waitq, &wait); mutex_lock(&itv->serialize_lock); if (signal_pending(current)) { /* return if a signal was received */ IVTV_DEBUG_INFO("User stopped wait for event\n"); return -EINTR; } } break; } case VIDEO_SELECT_SOURCE: IVTV_DEBUG_IOCTL("VIDEO_SELECT_SOURCE\n"); if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; return ivtv_passthrough_mode(itv, iarg == VIDEO_SOURCE_DEMUX); case AUDIO_SET_MUTE: IVTV_DEBUG_IOCTL("AUDIO_SET_MUTE\n"); itv->speed_mute_audio = iarg; return 0; case AUDIO_CHANNEL_SELECT: IVTV_DEBUG_IOCTL("AUDIO_CHANNEL_SELECT\n"); if (iarg > AUDIO_STEREO_SWAPPED) return -EINVAL; return v4l2_ctrl_s_ctrl(itv->ctrl_audio_playback, iarg + 1); case AUDIO_BILINGUAL_CHANNEL_SELECT: IVTV_DEBUG_IOCTL("AUDIO_BILINGUAL_CHANNEL_SELECT\n"); if (iarg > AUDIO_STEREO_SWAPPED) return -EINVAL; return v4l2_ctrl_s_ctrl(itv->ctrl_audio_multilingual_playback, iarg + 1); default: return -EINVAL; } return 0; } static long ivtv_default(struct file *file, void *fh, bool valid_prio, unsigned int cmd, void *arg) { struct ivtv *itv = fh2id(fh)->itv; if (!valid_prio) { switch (cmd) { case IVTV_IOC_PASSTHROUGH_MODE: case VIDEO_PLAY: case VIDEO_STOP: case VIDEO_FREEZE: case VIDEO_CONTINUE: case VIDEO_COMMAND: case VIDEO_SELECT_SOURCE: case AUDIO_SET_MUTE: case AUDIO_CHANNEL_SELECT: case AUDIO_BILINGUAL_CHANNEL_SELECT: return -EBUSY; } } switch (cmd) { case VIDIOC_INT_RESET: { u32 val = *(u32 *)arg; if ((val == 0 && itv->options.newi2c) || (val & 0x01)) ivtv_reset_ir_gpio(itv); if (val & 0x02) v4l2_subdev_call(itv->sd_video, core, reset, 0); break; } case IVTV_IOC_DMA_FRAME: case IVTV_IOC_PASSTHROUGH_MODE: case VIDEO_GET_PTS: case VIDEO_GET_FRAME_COUNT: case VIDEO_GET_EVENT: case VIDEO_PLAY: case VIDEO_STOP: case VIDEO_FREEZE: case VIDEO_CONTINUE: case VIDEO_COMMAND: case VIDEO_TRY_COMMAND: case VIDEO_SELECT_SOURCE: case AUDIO_SET_MUTE: case AUDIO_CHANNEL_SELECT: case AUDIO_BILINGUAL_CHANNEL_SELECT: return ivtv_decoder_ioctls(file, cmd, (void *)arg); default: return -ENOTTY; } return 0; } static const struct v4l2_ioctl_ops ivtv_ioctl_ops = { .vidioc_querycap = ivtv_querycap, .vidioc_s_audio = ivtv_s_audio, .vidioc_g_audio = ivtv_g_audio, .vidioc_enumaudio = ivtv_enumaudio, .vidioc_s_audout = ivtv_s_audout, .vidioc_g_audout = ivtv_g_audout, .vidioc_enum_input = ivtv_enum_input, .vidioc_enum_output = ivtv_enum_output, .vidioc_enumaudout = ivtv_enumaudout, .vidioc_cropcap = ivtv_cropcap, .vidioc_s_crop = ivtv_s_crop, .vidioc_g_crop = ivtv_g_crop, .vidioc_g_input = ivtv_g_input, .vidioc_s_input = ivtv_s_input, .vidioc_g_output = ivtv_g_output, .vidioc_s_output = ivtv_s_output, .vidioc_g_frequency = ivtv_g_frequency, .vidioc_s_frequency = ivtv_s_frequency, .vidioc_s_tuner = ivtv_s_tuner, .vidioc_g_tuner = ivtv_g_tuner, .vidioc_g_enc_index = ivtv_g_enc_index, .vidioc_g_fbuf = ivtv_g_fbuf, .vidioc_s_fbuf = ivtv_s_fbuf, .vidioc_g_std = ivtv_g_std, .vidioc_s_std = ivtv_s_std, .vidioc_overlay = ivtv_overlay, .vidioc_log_status = ivtv_log_status, .vidioc_enum_fmt_vid_cap = ivtv_enum_fmt_vid_cap, .vidioc_encoder_cmd = ivtv_encoder_cmd, .vidioc_try_encoder_cmd = ivtv_try_encoder_cmd, .vidioc_decoder_cmd = ivtv_decoder_cmd, .vidioc_try_decoder_cmd = ivtv_try_decoder_cmd, .vidioc_enum_fmt_vid_out = ivtv_enum_fmt_vid_out, .vidioc_g_fmt_vid_cap = ivtv_g_fmt_vid_cap, .vidioc_g_fmt_vbi_cap = ivtv_g_fmt_vbi_cap, .vidioc_g_fmt_sliced_vbi_cap = ivtv_g_fmt_sliced_vbi_cap, .vidioc_g_fmt_vid_out = ivtv_g_fmt_vid_out, .vidioc_g_fmt_vid_out_overlay = ivtv_g_fmt_vid_out_overlay, .vidioc_g_fmt_sliced_vbi_out = ivtv_g_fmt_sliced_vbi_out, .vidioc_s_fmt_vid_cap = ivtv_s_fmt_vid_cap, .vidioc_s_fmt_vbi_cap = ivtv_s_fmt_vbi_cap, .vidioc_s_fmt_sliced_vbi_cap = ivtv_s_fmt_sliced_vbi_cap, .vidioc_s_fmt_vid_out = ivtv_s_fmt_vid_out, .vidioc_s_fmt_vid_out_overlay = ivtv_s_fmt_vid_out_overlay, .vidioc_s_fmt_sliced_vbi_out = ivtv_s_fmt_sliced_vbi_out, .vidioc_try_fmt_vid_cap = ivtv_try_fmt_vid_cap, .vidioc_try_fmt_vbi_cap = ivtv_try_fmt_vbi_cap, .vidioc_try_fmt_sliced_vbi_cap = ivtv_try_fmt_sliced_vbi_cap, .vidioc_try_fmt_vid_out = ivtv_try_fmt_vid_out, .vidioc_try_fmt_vid_out_overlay = ivtv_try_fmt_vid_out_overlay, .vidioc_try_fmt_sliced_vbi_out = ivtv_try_fmt_sliced_vbi_out, .vidioc_g_sliced_vbi_cap = ivtv_g_sliced_vbi_cap, .vidioc_g_chip_ident = ivtv_g_chip_ident, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = ivtv_g_register, .vidioc_s_register = ivtv_s_register, #endif .vidioc_default = ivtv_default, .vidioc_subscribe_event = ivtv_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; void ivtv_set_funcs(struct video_device *vdev) { vdev->ioctl_ops = &ivtv_ioctl_ops; }
gpl-2.0
HelllGuest/xolo_q1100_kernel
drivers/net/usb/ipheth.c
2858
14602
/* * ipheth.c - Apple iPhone USB Ethernet driver * * Copyright (c) 2009 Diego Giagio <diego@giagio.com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of GIAGIO.COM nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * Alternatively, provided that this notice is retained in full, this * software may be distributed under the terms of the GNU General * Public License ("GPL") version 2, in which case the provisions of the * GPL apply INSTEAD OF those given above. * * The provided data structures and external interfaces from this code * are not restricted to be used by modules with a GPL compatible license. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * * * Attention: iPhone device must be paired, otherwise it won't respond to our * driver. For more info: http://giagio.com/wiki/moin.cgi/iPhoneEthernetDriver * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/usb.h> #include <linux/workqueue.h> #define USB_VENDOR_APPLE 0x05ac #define USB_PRODUCT_IPHONE 0x1290 #define USB_PRODUCT_IPHONE_3G 0x1292 #define USB_PRODUCT_IPHONE_3GS 0x1294 #define USB_PRODUCT_IPHONE_4 0x1297 #define USB_PRODUCT_IPHONE_4_VZW 0x129c #define USB_PRODUCT_IPHONE_4S 0x12a0 #define IPHETH_USBINTF_CLASS 255 #define IPHETH_USBINTF_SUBCLASS 253 #define IPHETH_USBINTF_PROTO 1 #define IPHETH_BUF_SIZE 1516 #define IPHETH_IP_ALIGN 2 /* padding at front of URB */ #define IPHETH_TX_TIMEOUT (5 * HZ) #define IPHETH_INTFNUM 2 #define IPHETH_ALT_INTFNUM 1 #define IPHETH_CTRL_ENDP 0x00 #define IPHETH_CTRL_BUF_SIZE 0x40 #define IPHETH_CTRL_TIMEOUT (5 * HZ) #define IPHETH_CMD_GET_MACADDR 0x00 #define IPHETH_CMD_CARRIER_CHECK 0x45 #define IPHETH_CARRIER_CHECK_TIMEOUT round_jiffies_relative(1 * HZ) #define IPHETH_CARRIER_ON 0x04 static struct usb_device_id ipheth_table[] = { { USB_DEVICE_AND_INTERFACE_INFO( USB_VENDOR_APPLE, USB_PRODUCT_IPHONE, IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, IPHETH_USBINTF_PROTO) }, { USB_DEVICE_AND_INTERFACE_INFO( USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3G, IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, IPHETH_USBINTF_PROTO) }, { USB_DEVICE_AND_INTERFACE_INFO( USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS, IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, IPHETH_USBINTF_PROTO) }, { USB_DEVICE_AND_INTERFACE_INFO( USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4, IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, IPHETH_USBINTF_PROTO) }, { USB_DEVICE_AND_INTERFACE_INFO( USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW, IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, IPHETH_USBINTF_PROTO) }, { USB_DEVICE_AND_INTERFACE_INFO( USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4S, IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, IPHETH_USBINTF_PROTO) }, { } }; MODULE_DEVICE_TABLE(usb, ipheth_table); struct ipheth_device { struct usb_device *udev; struct usb_interface *intf; struct net_device *net; struct sk_buff *tx_skb; struct urb *tx_urb; struct urb *rx_urb; unsigned char *tx_buf; unsigned char *rx_buf; unsigned char *ctrl_buf; u8 bulk_in; u8 bulk_out; struct delayed_work carrier_work; }; static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags); static int ipheth_alloc_urbs(struct ipheth_device *iphone) { struct urb *tx_urb = NULL; struct urb *rx_urb = NULL; u8 *tx_buf = NULL; u8 *rx_buf = NULL; tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (tx_urb == NULL) goto error_nomem; rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (rx_urb == NULL) goto free_tx_urb; tx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE, GFP_KERNEL, &tx_urb->transfer_dma); if (tx_buf == NULL) goto free_rx_urb; rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE, GFP_KERNEL, &rx_urb->transfer_dma); if (rx_buf == NULL) goto free_tx_buf; iphone->tx_urb = tx_urb; iphone->rx_urb = rx_urb; iphone->tx_buf = tx_buf; iphone->rx_buf = rx_buf; return 0; free_tx_buf: usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, tx_buf, tx_urb->transfer_dma); free_rx_urb: usb_free_urb(rx_urb); free_tx_urb: usb_free_urb(tx_urb); error_nomem: return -ENOMEM; } static void ipheth_free_urbs(struct ipheth_device *iphone) { usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf, iphone->rx_urb->transfer_dma); usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf, iphone->tx_urb->transfer_dma); usb_free_urb(iphone->rx_urb); usb_free_urb(iphone->tx_urb); } static void ipheth_kill_urbs(struct ipheth_device *dev) { usb_kill_urb(dev->tx_urb); usb_kill_urb(dev->rx_urb); } static void ipheth_rcvbulk_callback(struct urb *urb) { struct ipheth_device *dev; struct sk_buff *skb; int status; char *buf; int len; dev = urb->context; if (dev == NULL) return; status = urb->status; switch (status) { case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: return; case 0: break; default: err("%s: urb status: %d", __func__, status); return; } if (urb->actual_length <= IPHETH_IP_ALIGN) { dev->net->stats.rx_length_errors++; return; } len = urb->actual_length - IPHETH_IP_ALIGN; buf = urb->transfer_buffer + IPHETH_IP_ALIGN; skb = dev_alloc_skb(len); if (!skb) { err("%s: dev_alloc_skb: -ENOMEM", __func__); dev->net->stats.rx_dropped++; return; } memcpy(skb_put(skb, len), buf, len); skb->dev = dev->net; skb->protocol = eth_type_trans(skb, dev->net); dev->net->stats.rx_packets++; dev->net->stats.rx_bytes += len; netif_rx(skb); ipheth_rx_submit(dev, GFP_ATOMIC); } static void ipheth_sndbulk_callback(struct urb *urb) { struct ipheth_device *dev; int status = urb->status; dev = urb->context; if (dev == NULL) return; if (status != 0 && status != -ENOENT && status != -ECONNRESET && status != -ESHUTDOWN) err("%s: urb status: %d", __func__, status); dev_kfree_skb_irq(dev->tx_skb); netif_wake_queue(dev->net); } static int ipheth_carrier_set(struct ipheth_device *dev) { struct usb_device *udev = dev->udev; int retval; retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP), IPHETH_CMD_CARRIER_CHECK, /* request */ 0xc0, /* request type */ 0x00, /* value */ 0x02, /* index */ dev->ctrl_buf, IPHETH_CTRL_BUF_SIZE, IPHETH_CTRL_TIMEOUT); if (retval < 0) { err("%s: usb_control_msg: %d", __func__, retval); return retval; } if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON) netif_carrier_on(dev->net); else netif_carrier_off(dev->net); return 0; } static void ipheth_carrier_check_work(struct work_struct *work) { struct ipheth_device *dev = container_of(work, struct ipheth_device, carrier_work.work); ipheth_carrier_set(dev); schedule_delayed_work(&dev->carrier_work, IPHETH_CARRIER_CHECK_TIMEOUT); } static int ipheth_get_macaddr(struct ipheth_device *dev) { struct usb_device *udev = dev->udev; struct net_device *net = dev->net; int retval; retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP), IPHETH_CMD_GET_MACADDR, /* request */ 0xc0, /* request type */ 0x00, /* value */ 0x02, /* index */ dev->ctrl_buf, IPHETH_CTRL_BUF_SIZE, IPHETH_CTRL_TIMEOUT); if (retval < 0) { err("%s: usb_control_msg: %d", __func__, retval); } else if (retval < ETH_ALEN) { err("%s: usb_control_msg: short packet: %d bytes", __func__, retval); retval = -EINVAL; } else { memcpy(net->dev_addr, dev->ctrl_buf, ETH_ALEN); retval = 0; } return retval; } static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags) { struct usb_device *udev = dev->udev; int retval; usb_fill_bulk_urb(dev->rx_urb, udev, usb_rcvbulkpipe(udev, dev->bulk_in), dev->rx_buf, IPHETH_BUF_SIZE, ipheth_rcvbulk_callback, dev); dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; retval = usb_submit_urb(dev->rx_urb, mem_flags); if (retval) err("%s: usb_submit_urb: %d", __func__, retval); return retval; } static int ipheth_open(struct net_device *net) { struct ipheth_device *dev = netdev_priv(net); struct usb_device *udev = dev->udev; int retval = 0; usb_set_interface(udev, IPHETH_INTFNUM, IPHETH_ALT_INTFNUM); retval = ipheth_carrier_set(dev); if (retval) return retval; retval = ipheth_rx_submit(dev, GFP_KERNEL); if (retval) return retval; schedule_delayed_work(&dev->carrier_work, IPHETH_CARRIER_CHECK_TIMEOUT); netif_start_queue(net); return retval; } static int ipheth_close(struct net_device *net) { struct ipheth_device *dev = netdev_priv(net); cancel_delayed_work_sync(&dev->carrier_work); netif_stop_queue(net); return 0; } static int ipheth_tx(struct sk_buff *skb, struct net_device *net) { struct ipheth_device *dev = netdev_priv(net); struct usb_device *udev = dev->udev; int retval; /* Paranoid */ if (skb->len > IPHETH_BUF_SIZE) { WARN(1, "%s: skb too large: %d bytes\n", __func__, skb->len); dev->net->stats.tx_dropped++; dev_kfree_skb_irq(skb); return NETDEV_TX_OK; } memcpy(dev->tx_buf, skb->data, skb->len); if (skb->len < IPHETH_BUF_SIZE) memset(dev->tx_buf + skb->len, 0, IPHETH_BUF_SIZE - skb->len); usb_fill_bulk_urb(dev->tx_urb, udev, usb_sndbulkpipe(udev, dev->bulk_out), dev->tx_buf, IPHETH_BUF_SIZE, ipheth_sndbulk_callback, dev); dev->tx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; retval = usb_submit_urb(dev->tx_urb, GFP_ATOMIC); if (retval) { err("%s: usb_submit_urb: %d", __func__, retval); dev->net->stats.tx_errors++; dev_kfree_skb_irq(skb); } else { dev->tx_skb = skb; dev->net->stats.tx_packets++; dev->net->stats.tx_bytes += skb->len; netif_stop_queue(net); } return NETDEV_TX_OK; } static void ipheth_tx_timeout(struct net_device *net) { struct ipheth_device *dev = netdev_priv(net); err("%s: TX timeout", __func__); dev->net->stats.tx_errors++; usb_unlink_urb(dev->tx_urb); } static u32 ipheth_ethtool_op_get_link(struct net_device *net) { struct ipheth_device *dev = netdev_priv(net); return netif_carrier_ok(dev->net); } static const struct ethtool_ops ops = { .get_link = ipheth_ethtool_op_get_link }; static const struct net_device_ops ipheth_netdev_ops = { .ndo_open = ipheth_open, .ndo_stop = ipheth_close, .ndo_start_xmit = ipheth_tx, .ndo_tx_timeout = ipheth_tx_timeout, }; static int ipheth_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); struct usb_host_interface *hintf; struct usb_endpoint_descriptor *endp; struct ipheth_device *dev; struct net_device *netdev; int i; int retval; netdev = alloc_etherdev(sizeof(struct ipheth_device)); if (!netdev) return -ENOMEM; netdev->netdev_ops = &ipheth_netdev_ops; netdev->watchdog_timeo = IPHETH_TX_TIMEOUT; strcpy(netdev->name, "eth%d"); dev = netdev_priv(netdev); dev->udev = udev; dev->net = netdev; dev->intf = intf; /* Set up endpoints */ hintf = usb_altnum_to_altsetting(intf, IPHETH_ALT_INTFNUM); if (hintf == NULL) { retval = -ENODEV; err("Unable to find alternate settings interface"); goto err_endpoints; } for (i = 0; i < hintf->desc.bNumEndpoints; i++) { endp = &hintf->endpoint[i].desc; if (usb_endpoint_is_bulk_in(endp)) dev->bulk_in = endp->bEndpointAddress; else if (usb_endpoint_is_bulk_out(endp)) dev->bulk_out = endp->bEndpointAddress; } if (!(dev->bulk_in && dev->bulk_out)) { retval = -ENODEV; err("Unable to find endpoints"); goto err_endpoints; } dev->ctrl_buf = kmalloc(IPHETH_CTRL_BUF_SIZE, GFP_KERNEL); if (dev->ctrl_buf == NULL) { retval = -ENOMEM; goto err_alloc_ctrl_buf; } retval = ipheth_get_macaddr(dev); if (retval) goto err_get_macaddr; INIT_DELAYED_WORK(&dev->carrier_work, ipheth_carrier_check_work); retval = ipheth_alloc_urbs(dev); if (retval) { err("error allocating urbs: %d", retval); goto err_alloc_urbs; } usb_set_intfdata(intf, dev); SET_NETDEV_DEV(netdev, &intf->dev); SET_ETHTOOL_OPS(netdev, &ops); retval = register_netdev(netdev); if (retval) { err("error registering netdev: %d", retval); retval = -EIO; goto err_register_netdev; } dev_info(&intf->dev, "Apple iPhone USB Ethernet device attached\n"); return 0; err_register_netdev: ipheth_free_urbs(dev); err_alloc_urbs: err_get_macaddr: err_alloc_ctrl_buf: kfree(dev->ctrl_buf); err_endpoints: free_netdev(netdev); return retval; } static void ipheth_disconnect(struct usb_interface *intf) { struct ipheth_device *dev; dev = usb_get_intfdata(intf); if (dev != NULL) { unregister_netdev(dev->net); ipheth_kill_urbs(dev); ipheth_free_urbs(dev); kfree(dev->ctrl_buf); free_netdev(dev->net); } usb_set_intfdata(intf, NULL); dev_info(&intf->dev, "Apple iPhone USB Ethernet now disconnected\n"); } static struct usb_driver ipheth_driver = { .name = "ipheth", .probe = ipheth_probe, .disconnect = ipheth_disconnect, .id_table = ipheth_table, }; module_usb_driver(ipheth_driver); MODULE_AUTHOR("Diego Giagio <diego@giagio.com>"); MODULE_DESCRIPTION("Apple iPhone USB Ethernet driver"); MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
Gaojiquan/android_kernel_zte_digger
drivers/atm/ambassador.c
4650
67858
/* Madge Ambassador ATM Adapter driver. Copyright (C) 1995-1999 Madge Networks Ltd. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA The GNU GPL is contained in /usr/doc/copyright/GPL on a Debian system and in the file COPYING in the Linux kernel source. */ /* * dedicated to the memory of Graham Gordon 1971-1998 * */ #include <linux/module.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/atmdev.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/poison.h> #include <linux/bitrev.h> #include <linux/mutex.h> #include <linux/firmware.h> #include <linux/ihex.h> #include <linux/slab.h> #include <linux/atomic.h> #include <asm/io.h> #include <asm/byteorder.h> #include "ambassador.h" #define maintainer_string "Giuliano Procida at Madge Networks <gprocida@madge.com>" #define description_string "Madge ATM Ambassador driver" #define version_string "1.2.4" static inline void __init show_version (void) { printk ("%s version %s\n", description_string, version_string); } /* Theory of Operation I Hardware, detection, initialisation and shutdown. 1. Supported Hardware This driver is for the PCI ATMizer-based Ambassador card (except very early versions). It is not suitable for the similar EISA "TR7" card. Commercially, both cards are known as Collage Server ATM adapters. The loader supports image transfer to the card, image start and few other miscellaneous commands. Only AAL5 is supported with vpi = 0 and vci in the range 0 to 1023. The cards are big-endian. 2. Detection Standard PCI stuff, the early cards are detected and rejected. 3. Initialisation The cards are reset and the self-test results are checked. The microcode image is then transferred and started. This waits for a pointer to a descriptor containing details of the host-based queues and buffers and various parameters etc. Once they are processed normal operations may begin. The BIA is read using a microcode command. 4. Shutdown This may be accomplished either by a card reset or via the microcode shutdown command. Further investigation required. 5. Persistent state The card reset does not affect PCI configuration (good) or the contents of several other "shared run-time registers" (bad) which include doorbell and interrupt control as well as EEPROM and PCI control. The driver must be careful when modifying these registers not to touch bits it does not use and to undo any changes at exit. II Driver software 0. Generalities The adapter is quite intelligent (fast) and has a simple interface (few features). VPI is always zero, 1024 VCIs are supported. There is limited cell rate support. UBR channels can be capped and ABR (explicit rate, but not EFCI) is supported. There is no CBR or VBR support. 1. Driver <-> Adapter Communication Apart from the basic loader commands, the driver communicates through three entities: the command queue (CQ), the transmit queue pair (TXQ) and the receive queue pairs (RXQ). These three entities are set up by the host and passed to the microcode just after it has been started. All queues are host-based circular queues. They are contiguous and (due to hardware limitations) have some restrictions as to their locations in (bus) memory. They are of the "full means the same as empty so don't do that" variety since the adapter uses pointers internally. The queue pairs work as follows: one queue is for supply to the adapter, items in it are pending and are owned by the adapter; the other is the queue for return from the adapter, items in it have been dealt with by the adapter. The host adds items to the supply (TX descriptors and free RX buffer descriptors) and removes items from the return (TX and RX completions). The adapter deals with out of order completions. Interrupts (card to host) and the doorbell (host to card) are used for signalling. 1. CQ This is to communicate "open VC", "close VC", "get stats" etc. to the adapter. At most one command is retired every millisecond by the card. There is no out of order completion or notification. The driver needs to check the return code of the command, waiting as appropriate. 2. TXQ TX supply items are of variable length (scatter gather support) and so the queue items are (more or less) pointers to the real thing. Each TX supply item contains a unique, host-supplied handle (the skb bus address seems most sensible as this works for Alphas as well, there is no need to do any endian conversions on the handles). TX return items consist of just the handles above. 3. RXQ (up to 4 of these with different lengths and buffer sizes) RX supply items consist of a unique, host-supplied handle (the skb bus address again) and a pointer to the buffer data area. RX return items consist of the handle above, the VC, length and a status word. This just screams "oh so easy" doesn't it? Note on RX pool sizes: Each pool should have enough buffers to handle a back-to-back stream of minimum sized frames on a single VC. For example: frame spacing = 3us (about right) delay = IRQ lat + RX handling + RX buffer replenish = 20 (us) (a guess) min number of buffers for one VC = 1 + delay/spacing (buffers) delay/spacing = latency = (20+2)/3 = 7 (buffers) (rounding up) The 20us delay assumes that there is no need to sleep; if we need to sleep to get buffers we are going to drop frames anyway. In fact, each pool should have enough buffers to support the simultaneous reassembly of a separate frame on each VC and cope with the case in which frames complete in round robin cell fashion on each VC. Only one frame can complete at each cell arrival, so if "n" VCs are open, the worst case is to have them all complete frames together followed by all starting new frames together. desired number of buffers = n + delay/spacing These are the extreme requirements, however, they are "n+k" for some "k" so we have only the constant to choose. This is the argument rx_lats which current defaults to 7. Actually, "n ? n+k : 0" is better and this is what is implemented, subject to the limit given by the pool size. 4. Driver locking Simple spinlocks are used around the TX and RX queue mechanisms. Anyone with a faster, working method is welcome to implement it. The adapter command queue is protected with a spinlock. We always wait for commands to complete. A more complex form of locking is used around parts of the VC open and close functions. There are three reasons for a lock: 1. we need to do atomic rate reservation and release (not used yet), 2. Opening sometimes involves two adapter commands which must not be separated by another command on the same VC, 3. the changes to RX pool size must be atomic. The lock needs to work over context switches, so we use a semaphore. III Hardware Features and Microcode Bugs 1. Byte Ordering *%^"$&%^$*&^"$(%^$#&^%$(&#%$*(&^#%!"!"!*! 2. Memory access All structures that are not accessed using DMA must be 4-byte aligned (not a problem) and must not cross 4MB boundaries. There is a DMA memory hole at E0000000-E00000FF (groan). TX fragments (DMA read) must not cross 4MB boundaries (would be 16MB but for a hardware bug). RX buffers (DMA write) must not cross 16MB boundaries and must include spare trailing bytes up to the next 4-byte boundary; they will be written with rubbish. The PLX likes to prefetch; if reading up to 4 u32 past the end of each TX fragment is not a problem, then TX can be made to go a little faster by passing a flag at init that disables a prefetch workaround. We do not pass this flag. (new microcode only) Now we: . Note that alloc_skb rounds up size to a 16byte boundary. . Ensure all areas do not traverse 4MB boundaries. . Ensure all areas do not start at a E00000xx bus address. (I cannot be certain, but this may always hold with Linux) . Make all failures cause a loud message. . Discard non-conforming SKBs (causes TX failure or RX fill delay). . Discard non-conforming TX fragment descriptors (the TX fails). In the future we could: . Allow RX areas that traverse 4MB (but not 16MB) boundaries. . Segment TX areas into some/more fragments, when necessary. . Relax checks for non-DMA items (ignore hole). . Give scatter-gather (iovec) requirements using ???. (?) 3. VC close is broken (only for new microcode) The VC close adapter microcode command fails to do anything if any frames have been received on the VC but none have been transmitted. Frames continue to be reassembled and passed (with IRQ) to the driver. IV To Do List . Fix bugs! . Timer code may be broken. . Deal with buggy VC close (somehow) in microcode 12. . Handle interrupted and/or non-blocking writes - is this a job for the protocol layer? . Add code to break up TX fragments when they span 4MB boundaries. . Add SUNI phy layer (need to know where SUNI lives on card). . Implement a tx_alloc fn to (a) satisfy TX alignment etc. and (b) leave extra headroom space for Ambassador TX descriptors. . Understand these elements of struct atm_vcc: recvq (proto?), sleep, callback, listenq, backlog_quota, reply and user_back. . Adjust TX/RX skb allocation to favour IP with LANE/CLIP (configurable). . Impose a TX-pending limit (2?) on each VC, help avoid TX q overflow. . Decide whether RX buffer recycling is or can be made completely safe; turn it back on. It looks like Werner is going to axe this. . Implement QoS changes on open VCs (involves extracting parts of VC open and close into separate functions and using them to make changes). . Hack on command queue so that someone can issue multiple commands and wait on the last one (OR only "no-op" or "wait" commands are waited for). . Eliminate need for while-schedule around do_command. */ static void do_housekeeping (unsigned long arg); /********** globals **********/ static unsigned short debug = 0; static unsigned int cmds = 8; static unsigned int txs = 32; static unsigned int rxs[NUM_RX_POOLS] = { 64, 64, 64, 64 }; static unsigned int rxs_bs[NUM_RX_POOLS] = { 4080, 12240, 36720, 65535 }; static unsigned int rx_lats = 7; static unsigned char pci_lat = 0; static const unsigned long onegigmask = -1 << 30; /********** access to adapter **********/ static inline void wr_plain (const amb_dev * dev, size_t addr, u32 data) { PRINTD (DBG_FLOW|DBG_REGS, "wr: %08zx <- %08x", addr, data); #ifdef AMB_MMIO dev->membase[addr / sizeof(u32)] = data; #else outl (data, dev->iobase + addr); #endif } static inline u32 rd_plain (const amb_dev * dev, size_t addr) { #ifdef AMB_MMIO u32 data = dev->membase[addr / sizeof(u32)]; #else u32 data = inl (dev->iobase + addr); #endif PRINTD (DBG_FLOW|DBG_REGS, "rd: %08zx -> %08x", addr, data); return data; } static inline void wr_mem (const amb_dev * dev, size_t addr, u32 data) { __be32 be = cpu_to_be32 (data); PRINTD (DBG_FLOW|DBG_REGS, "wr: %08zx <- %08x b[%08x]", addr, data, be); #ifdef AMB_MMIO dev->membase[addr / sizeof(u32)] = be; #else outl (be, dev->iobase + addr); #endif } static inline u32 rd_mem (const amb_dev * dev, size_t addr) { #ifdef AMB_MMIO __be32 be = dev->membase[addr / sizeof(u32)]; #else __be32 be = inl (dev->iobase + addr); #endif u32 data = be32_to_cpu (be); PRINTD (DBG_FLOW|DBG_REGS, "rd: %08zx -> %08x b[%08x]", addr, data, be); return data; } /********** dump routines **********/ static inline void dump_registers (const amb_dev * dev) { #ifdef DEBUG_AMBASSADOR if (debug & DBG_REGS) { size_t i; PRINTD (DBG_REGS, "reading PLX control: "); for (i = 0x00; i < 0x30; i += sizeof(u32)) rd_mem (dev, i); PRINTD (DBG_REGS, "reading mailboxes: "); for (i = 0x40; i < 0x60; i += sizeof(u32)) rd_mem (dev, i); PRINTD (DBG_REGS, "reading doorb irqev irqen reset:"); for (i = 0x60; i < 0x70; i += sizeof(u32)) rd_mem (dev, i); } #else (void) dev; #endif return; } static inline void dump_loader_block (volatile loader_block * lb) { #ifdef DEBUG_AMBASSADOR unsigned int i; PRINTDB (DBG_LOAD, "lb @ %p; res: %d, cmd: %d, pay:", lb, be32_to_cpu (lb->result), be32_to_cpu (lb->command)); for (i = 0; i < MAX_COMMAND_DATA; ++i) PRINTDM (DBG_LOAD, " %08x", be32_to_cpu (lb->payload.data[i])); PRINTDE (DBG_LOAD, ", vld: %08x", be32_to_cpu (lb->valid)); #else (void) lb; #endif return; } static inline void dump_command (command * cmd) { #ifdef DEBUG_AMBASSADOR unsigned int i; PRINTDB (DBG_CMD, "cmd @ %p, req: %08x, pars:", cmd, /*be32_to_cpu*/ (cmd->request)); for (i = 0; i < 3; ++i) PRINTDM (DBG_CMD, " %08x", /*be32_to_cpu*/ (cmd->args.par[i])); PRINTDE (DBG_CMD, ""); #else (void) cmd; #endif return; } static inline void dump_skb (char * prefix, unsigned int vc, struct sk_buff * skb) { #ifdef DEBUG_AMBASSADOR unsigned int i; unsigned char * data = skb->data; PRINTDB (DBG_DATA, "%s(%u) ", prefix, vc); for (i=0; i<skb->len && i < 256;i++) PRINTDM (DBG_DATA, "%02x ", data[i]); PRINTDE (DBG_DATA,""); #else (void) prefix; (void) vc; (void) skb; #endif return; } /********** check memory areas for use by Ambassador **********/ /* see limitations under Hardware Features */ static int check_area (void * start, size_t length) { // assumes length > 0 const u32 fourmegmask = -1 << 22; const u32 twofivesixmask = -1 << 8; const u32 starthole = 0xE0000000; u32 startaddress = virt_to_bus (start); u32 lastaddress = startaddress+length-1; if ((startaddress ^ lastaddress) & fourmegmask || (startaddress & twofivesixmask) == starthole) { PRINTK (KERN_ERR, "check_area failure: [%x,%x] - mail maintainer!", startaddress, lastaddress); return -1; } else { return 0; } } /********** free an skb (as per ATM device driver documentation) **********/ static void amb_kfree_skb (struct sk_buff * skb) { if (ATM_SKB(skb)->vcc->pop) { ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb); } else { dev_kfree_skb_any (skb); } } /********** TX completion **********/ static void tx_complete (amb_dev * dev, tx_out * tx) { tx_simple * tx_descr = bus_to_virt (tx->handle); struct sk_buff * skb = tx_descr->skb; PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx); // VC layer stats atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); // free the descriptor kfree (tx_descr); // free the skb amb_kfree_skb (skb); dev->stats.tx_ok++; return; } /********** RX completion **********/ static void rx_complete (amb_dev * dev, rx_out * rx) { struct sk_buff * skb = bus_to_virt (rx->handle); u16 vc = be16_to_cpu (rx->vc); // unused: u16 lec_id = be16_to_cpu (rx->lec_id); u16 status = be16_to_cpu (rx->status); u16 rx_len = be16_to_cpu (rx->length); PRINTD (DBG_FLOW|DBG_RX, "rx_complete %p %p (len=%hu)", dev, rx, rx_len); // XXX move this in and add to VC stats ??? if (!status) { struct atm_vcc * atm_vcc = dev->rxer[vc]; dev->stats.rx.ok++; if (atm_vcc) { if (rx_len <= atm_vcc->qos.rxtp.max_sdu) { if (atm_charge (atm_vcc, skb->truesize)) { // prepare socket buffer ATM_SKB(skb)->vcc = atm_vcc; skb_put (skb, rx_len); dump_skb ("<<<", vc, skb); // VC layer stats atomic_inc(&atm_vcc->stats->rx); __net_timestamp(skb); // end of our responsibility atm_vcc->push (atm_vcc, skb); return; } else { // someone fix this (message), please! PRINTD (DBG_INFO|DBG_RX, "dropped thanks to atm_charge (vc %hu, truesize %u)", vc, skb->truesize); // drop stats incremented in atm_charge } } else { PRINTK (KERN_INFO, "dropped over-size frame"); // should we count this? atomic_inc(&atm_vcc->stats->rx_drop); } } else { PRINTD (DBG_WARN|DBG_RX, "got frame but RX closed for channel %hu", vc); // this is an adapter bug, only in new version of microcode } } else { dev->stats.rx.error++; if (status & CRC_ERR) dev->stats.rx.badcrc++; if (status & LEN_ERR) dev->stats.rx.toolong++; if (status & ABORT_ERR) dev->stats.rx.aborted++; if (status & UNUSED_ERR) dev->stats.rx.unused++; } dev_kfree_skb_any (skb); return; } /* Note on queue handling. Here "give" and "take" refer to queue entries and a queue (pair) rather than frames to or from the host or adapter. Empty frame buffers are given to the RX queue pair and returned unused or containing RX frames. TX frames (well, pointers to TX fragment lists) are given to the TX queue pair, completions are returned. */ /********** command queue **********/ // I really don't like this, but it's the best I can do at the moment // also, the callers are responsible for byte order as the microcode // sometimes does 16-bit accesses (yuk yuk yuk) static int command_do (amb_dev * dev, command * cmd) { amb_cq * cq = &dev->cq; volatile amb_cq_ptrs * ptrs = &cq->ptrs; command * my_slot; PRINTD (DBG_FLOW|DBG_CMD, "command_do %p", dev); if (test_bit (dead, &dev->flags)) return 0; spin_lock (&cq->lock); // if not full... if (cq->pending < cq->maximum) { // remember my slot for later my_slot = ptrs->in; PRINTD (DBG_CMD, "command in slot %p", my_slot); dump_command (cmd); // copy command in *ptrs->in = *cmd; cq->pending++; ptrs->in = NEXTQ (ptrs->in, ptrs->start, ptrs->limit); // mail the command wr_mem (dev, offsetof(amb_mem, mb.adapter.cmd_address), virt_to_bus (ptrs->in)); if (cq->pending > cq->high) cq->high = cq->pending; spin_unlock (&cq->lock); // these comments were in a while-loop before, msleep removes the loop // go to sleep // PRINTD (DBG_CMD, "wait: sleeping %lu for command", timeout); msleep(cq->pending); // wait for my slot to be reached (all waiters are here or above, until...) while (ptrs->out != my_slot) { PRINTD (DBG_CMD, "wait: command slot (now at %p)", ptrs->out); set_current_state(TASK_UNINTERRUPTIBLE); schedule(); } // wait on my slot (... one gets to its slot, and... ) while (ptrs->out->request != cpu_to_be32 (SRB_COMPLETE)) { PRINTD (DBG_CMD, "wait: command slot completion"); set_current_state(TASK_UNINTERRUPTIBLE); schedule(); } PRINTD (DBG_CMD, "command complete"); // update queue (... moves the queue along to the next slot) spin_lock (&cq->lock); cq->pending--; // copy command out *cmd = *ptrs->out; ptrs->out = NEXTQ (ptrs->out, ptrs->start, ptrs->limit); spin_unlock (&cq->lock); return 0; } else { cq->filled++; spin_unlock (&cq->lock); return -EAGAIN; } } /********** TX queue pair **********/ static int tx_give (amb_dev * dev, tx_in * tx) { amb_txq * txq = &dev->txq; unsigned long flags; PRINTD (DBG_FLOW|DBG_TX, "tx_give %p", dev); if (test_bit (dead, &dev->flags)) return 0; spin_lock_irqsave (&txq->lock, flags); if (txq->pending < txq->maximum) { PRINTD (DBG_TX, "TX in slot %p", txq->in.ptr); *txq->in.ptr = *tx; txq->pending++; txq->in.ptr = NEXTQ (txq->in.ptr, txq->in.start, txq->in.limit); // hand over the TX and ring the bell wr_mem (dev, offsetof(amb_mem, mb.adapter.tx_address), virt_to_bus (txq->in.ptr)); wr_mem (dev, offsetof(amb_mem, doorbell), TX_FRAME); if (txq->pending > txq->high) txq->high = txq->pending; spin_unlock_irqrestore (&txq->lock, flags); return 0; } else { txq->filled++; spin_unlock_irqrestore (&txq->lock, flags); return -EAGAIN; } } static int tx_take (amb_dev * dev) { amb_txq * txq = &dev->txq; unsigned long flags; PRINTD (DBG_FLOW|DBG_TX, "tx_take %p", dev); spin_lock_irqsave (&txq->lock, flags); if (txq->pending && txq->out.ptr->handle) { // deal with TX completion tx_complete (dev, txq->out.ptr); // mark unused again txq->out.ptr->handle = 0; // remove item txq->pending--; txq->out.ptr = NEXTQ (txq->out.ptr, txq->out.start, txq->out.limit); spin_unlock_irqrestore (&txq->lock, flags); return 0; } else { spin_unlock_irqrestore (&txq->lock, flags); return -1; } } /********** RX queue pairs **********/ static int rx_give (amb_dev * dev, rx_in * rx, unsigned char pool) { amb_rxq * rxq = &dev->rxq[pool]; unsigned long flags; PRINTD (DBG_FLOW|DBG_RX, "rx_give %p[%hu]", dev, pool); spin_lock_irqsave (&rxq->lock, flags); if (rxq->pending < rxq->maximum) { PRINTD (DBG_RX, "RX in slot %p", rxq->in.ptr); *rxq->in.ptr = *rx; rxq->pending++; rxq->in.ptr = NEXTQ (rxq->in.ptr, rxq->in.start, rxq->in.limit); // hand over the RX buffer wr_mem (dev, offsetof(amb_mem, mb.adapter.rx_address[pool]), virt_to_bus (rxq->in.ptr)); spin_unlock_irqrestore (&rxq->lock, flags); return 0; } else { spin_unlock_irqrestore (&rxq->lock, flags); return -1; } } static int rx_take (amb_dev * dev, unsigned char pool) { amb_rxq * rxq = &dev->rxq[pool]; unsigned long flags; PRINTD (DBG_FLOW|DBG_RX, "rx_take %p[%hu]", dev, pool); spin_lock_irqsave (&rxq->lock, flags); if (rxq->pending && (rxq->out.ptr->status || rxq->out.ptr->length)) { // deal with RX completion rx_complete (dev, rxq->out.ptr); // mark unused again rxq->out.ptr->status = 0; rxq->out.ptr->length = 0; // remove item rxq->pending--; rxq->out.ptr = NEXTQ (rxq->out.ptr, rxq->out.start, rxq->out.limit); if (rxq->pending < rxq->low) rxq->low = rxq->pending; spin_unlock_irqrestore (&rxq->lock, flags); return 0; } else { if (!rxq->pending && rxq->buffers_wanted) rxq->emptied++; spin_unlock_irqrestore (&rxq->lock, flags); return -1; } } /********** RX Pool handling **********/ /* pre: buffers_wanted = 0, post: pending = 0 */ static void drain_rx_pool (amb_dev * dev, unsigned char pool) { amb_rxq * rxq = &dev->rxq[pool]; PRINTD (DBG_FLOW|DBG_POOL, "drain_rx_pool %p %hu", dev, pool); if (test_bit (dead, &dev->flags)) return; /* we are not quite like the fill pool routines as we cannot just remove one buffer, we have to remove all of them, but we might as well pretend... */ if (rxq->pending > rxq->buffers_wanted) { command cmd; cmd.request = cpu_to_be32 (SRB_FLUSH_BUFFER_Q); cmd.args.flush.flags = cpu_to_be32 (pool << SRB_POOL_SHIFT); while (command_do (dev, &cmd)) schedule(); /* the pool may also be emptied via the interrupt handler */ while (rxq->pending > rxq->buffers_wanted) if (rx_take (dev, pool)) schedule(); } return; } static void drain_rx_pools (amb_dev * dev) { unsigned char pool; PRINTD (DBG_FLOW|DBG_POOL, "drain_rx_pools %p", dev); for (pool = 0; pool < NUM_RX_POOLS; ++pool) drain_rx_pool (dev, pool); } static void fill_rx_pool (amb_dev * dev, unsigned char pool, gfp_t priority) { rx_in rx; amb_rxq * rxq; PRINTD (DBG_FLOW|DBG_POOL, "fill_rx_pool %p %hu %x", dev, pool, priority); if (test_bit (dead, &dev->flags)) return; rxq = &dev->rxq[pool]; while (rxq->pending < rxq->maximum && rxq->pending < rxq->buffers_wanted) { struct sk_buff * skb = alloc_skb (rxq->buffer_size, priority); if (!skb) { PRINTD (DBG_SKB|DBG_POOL, "failed to allocate skb for RX pool %hu", pool); return; } if (check_area (skb->data, skb->truesize)) { dev_kfree_skb_any (skb); return; } // cast needed as there is no %? for pointer differences PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li", skb, skb->head, (long) (skb_end_pointer(skb) - skb->head)); rx.handle = virt_to_bus (skb); rx.host_address = cpu_to_be32 (virt_to_bus (skb->data)); if (rx_give (dev, &rx, pool)) dev_kfree_skb_any (skb); } return; } // top up all RX pools static void fill_rx_pools (amb_dev * dev) { unsigned char pool; PRINTD (DBG_FLOW|DBG_POOL, "fill_rx_pools %p", dev); for (pool = 0; pool < NUM_RX_POOLS; ++pool) fill_rx_pool (dev, pool, GFP_ATOMIC); return; } /********** enable host interrupts **********/ static void interrupts_on (amb_dev * dev) { wr_plain (dev, offsetof(amb_mem, interrupt_control), rd_plain (dev, offsetof(amb_mem, interrupt_control)) | AMB_INTERRUPT_BITS); } /********** disable host interrupts **********/ static void interrupts_off (amb_dev * dev) { wr_plain (dev, offsetof(amb_mem, interrupt_control), rd_plain (dev, offsetof(amb_mem, interrupt_control)) &~ AMB_INTERRUPT_BITS); } /********** interrupt handling **********/ static irqreturn_t interrupt_handler(int irq, void *dev_id) { amb_dev * dev = dev_id; PRINTD (DBG_IRQ|DBG_FLOW, "interrupt_handler: %p", dev_id); { u32 interrupt = rd_plain (dev, offsetof(amb_mem, interrupt)); // for us or someone else sharing the same interrupt if (!interrupt) { PRINTD (DBG_IRQ, "irq not for me: %d", irq); return IRQ_NONE; } // definitely for us PRINTD (DBG_IRQ, "FYI: interrupt was %08x", interrupt); wr_plain (dev, offsetof(amb_mem, interrupt), -1); } { unsigned int irq_work = 0; unsigned char pool; for (pool = 0; pool < NUM_RX_POOLS; ++pool) while (!rx_take (dev, pool)) ++irq_work; while (!tx_take (dev)) ++irq_work; if (irq_work) { fill_rx_pools (dev); PRINTD (DBG_IRQ, "work done: %u", irq_work); } else { PRINTD (DBG_IRQ|DBG_WARN, "no work done"); } } PRINTD (DBG_IRQ|DBG_FLOW, "interrupt_handler done: %p", dev_id); return IRQ_HANDLED; } /********** make rate (not quite as much fun as Horizon) **********/ static int make_rate (unsigned int rate, rounding r, u16 * bits, unsigned int * actual) { unsigned char exp = -1; // hush gcc unsigned int man = -1; // hush gcc PRINTD (DBG_FLOW|DBG_QOS, "make_rate %u", rate); // rates in cells per second, ITU format (nasty 16-bit floating-point) // given 5-bit e and 9-bit m: // rate = EITHER (1+m/2^9)*2^e OR 0 // bits = EITHER 1<<14 | e<<9 | m OR 0 // (bit 15 is "reserved", bit 14 "non-zero") // smallest rate is 0 (special representation) // largest rate is (1+511/512)*2^31 = 4290772992 (< 2^32-1) // smallest non-zero rate is (1+0/512)*2^0 = 1 (> 0) // simple algorithm: // find position of top bit, this gives e // remove top bit and shift (rounding if feeling clever) by 9-e // ucode bug: please don't set bit 14! so 0 rate not representable if (rate > 0xffc00000U) { // larger than largest representable rate if (r == round_up) { return -EINVAL; } else { exp = 31; man = 511; } } else if (rate) { // representable rate exp = 31; man = rate; // invariant: rate = man*2^(exp-31) while (!(man & (1<<31))) { exp = exp - 1; man = man<<1; } // man has top bit set // rate = (2^31+(man-2^31))*2^(exp-31) // rate = (1+(man-2^31)/2^31)*2^exp man = man<<1; man &= 0xffffffffU; // a nop on 32-bit systems // rate = (1+man/2^32)*2^exp // exp is in the range 0 to 31, man is in the range 0 to 2^32-1 // time to lose significance... we want m in the range 0 to 2^9-1 // rounding presents a minor problem... we first decide which way // we are rounding (based on given rounding direction and possibly // the bits of the mantissa that are to be discarded). switch (r) { case round_down: { // just truncate man = man>>(32-9); break; } case round_up: { // check all bits that we are discarding if (man & (~0U>>9)) { man = (man>>(32-9)) + 1; if (man == (1<<9)) { // no need to check for round up outside of range man = 0; exp += 1; } } else { man = (man>>(32-9)); } break; } case round_nearest: { // check msb that we are discarding if (man & (1<<(32-9-1))) { man = (man>>(32-9)) + 1; if (man == (1<<9)) { // no need to check for round up outside of range man = 0; exp += 1; } } else { man = (man>>(32-9)); } break; } } } else { // zero rate - not representable if (r == round_down) { return -EINVAL; } else { exp = 0; man = 0; } } PRINTD (DBG_QOS, "rate: man=%u, exp=%hu", man, exp); if (bits) *bits = /* (1<<14) | */ (exp<<9) | man; if (actual) *actual = (exp >= 9) ? (1 << exp) + (man << (exp-9)) : (1 << exp) + ((man + (1<<(9-exp-1))) >> (9-exp)); return 0; } /********** Linux ATM Operations **********/ // some are not yet implemented while others do not make sense for // this device /********** Open a VC **********/ static int amb_open (struct atm_vcc * atm_vcc) { int error; struct atm_qos * qos; struct atm_trafprm * txtp; struct atm_trafprm * rxtp; u16 tx_rate_bits = -1; // hush gcc u16 tx_vc_bits = -1; // hush gcc u16 tx_frame_bits = -1; // hush gcc amb_dev * dev = AMB_DEV(atm_vcc->dev); amb_vcc * vcc; unsigned char pool = -1; // hush gcc short vpi = atm_vcc->vpi; int vci = atm_vcc->vci; PRINTD (DBG_FLOW|DBG_VCC, "amb_open %x %x", vpi, vci); #ifdef ATM_VPI_UNSPEC // UNSPEC is deprecated, remove this code eventually if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC) { PRINTK (KERN_WARNING, "rejecting open with unspecified VPI/VCI (deprecated)"); return -EINVAL; } #endif if (!(0 <= vpi && vpi < (1<<NUM_VPI_BITS) && 0 <= vci && vci < (1<<NUM_VCI_BITS))) { PRINTD (DBG_WARN|DBG_VCC, "VPI/VCI out of range: %hd/%d", vpi, vci); return -EINVAL; } qos = &atm_vcc->qos; if (qos->aal != ATM_AAL5) { PRINTD (DBG_QOS, "AAL not supported"); return -EINVAL; } // traffic parameters PRINTD (DBG_QOS, "TX:"); txtp = &qos->txtp; if (txtp->traffic_class != ATM_NONE) { switch (txtp->traffic_class) { case ATM_UBR: { // we take "the PCR" as a rate-cap int pcr = atm_pcr_goal (txtp); if (!pcr) { // no rate cap tx_rate_bits = 0; tx_vc_bits = TX_UBR; tx_frame_bits = TX_FRAME_NOTCAP; } else { rounding r; if (pcr < 0) { r = round_down; pcr = -pcr; } else { r = round_up; } error = make_rate (pcr, r, &tx_rate_bits, NULL); if (error) return error; tx_vc_bits = TX_UBR_CAPPED; tx_frame_bits = TX_FRAME_CAPPED; } break; } #if 0 case ATM_ABR: { pcr = atm_pcr_goal (txtp); PRINTD (DBG_QOS, "pcr goal = %d", pcr); break; } #endif default: { // PRINTD (DBG_QOS, "request for non-UBR/ABR denied"); PRINTD (DBG_QOS, "request for non-UBR denied"); return -EINVAL; } } PRINTD (DBG_QOS, "tx_rate_bits=%hx, tx_vc_bits=%hx", tx_rate_bits, tx_vc_bits); } PRINTD (DBG_QOS, "RX:"); rxtp = &qos->rxtp; if (rxtp->traffic_class == ATM_NONE) { // do nothing } else { // choose an RX pool (arranged in increasing size) for (pool = 0; pool < NUM_RX_POOLS; ++pool) if ((unsigned int) rxtp->max_sdu <= dev->rxq[pool].buffer_size) { PRINTD (DBG_VCC|DBG_QOS|DBG_POOL, "chose pool %hu (max_sdu %u <= %u)", pool, rxtp->max_sdu, dev->rxq[pool].buffer_size); break; } if (pool == NUM_RX_POOLS) { PRINTD (DBG_WARN|DBG_VCC|DBG_QOS|DBG_POOL, "no pool suitable for VC (RX max_sdu %d is too large)", rxtp->max_sdu); return -EINVAL; } switch (rxtp->traffic_class) { case ATM_UBR: { break; } #if 0 case ATM_ABR: { pcr = atm_pcr_goal (rxtp); PRINTD (DBG_QOS, "pcr goal = %d", pcr); break; } #endif default: { // PRINTD (DBG_QOS, "request for non-UBR/ABR denied"); PRINTD (DBG_QOS, "request for non-UBR denied"); return -EINVAL; } } } // get space for our vcc stuff vcc = kmalloc (sizeof(amb_vcc), GFP_KERNEL); if (!vcc) { PRINTK (KERN_ERR, "out of memory!"); return -ENOMEM; } atm_vcc->dev_data = (void *) vcc; // no failures beyond this point // we are not really "immediately before allocating the connection // identifier in hardware", but it will just have to do! set_bit(ATM_VF_ADDR,&atm_vcc->flags); if (txtp->traffic_class != ATM_NONE) { command cmd; vcc->tx_frame_bits = tx_frame_bits; mutex_lock(&dev->vcc_sf); if (dev->rxer[vci]) { // RXer on the channel already, just modify rate... cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE); cmd.args.modify_rate.vc = cpu_to_be32 (vci); // vpi 0 cmd.args.modify_rate.rate = cpu_to_be32 (tx_rate_bits << SRB_RATE_SHIFT); while (command_do (dev, &cmd)) schedule(); // ... and TX flags, preserving the RX pool cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS); cmd.args.modify_flags.vc = cpu_to_be32 (vci); // vpi 0 cmd.args.modify_flags.flags = cpu_to_be32 ( (AMB_VCC(dev->rxer[vci])->rx_info.pool << SRB_POOL_SHIFT) | (tx_vc_bits << SRB_FLAGS_SHIFT) ); while (command_do (dev, &cmd)) schedule(); } else { // no RXer on the channel, just open (with pool zero) cmd.request = cpu_to_be32 (SRB_OPEN_VC); cmd.args.open.vc = cpu_to_be32 (vci); // vpi 0 cmd.args.open.flags = cpu_to_be32 (tx_vc_bits << SRB_FLAGS_SHIFT); cmd.args.open.rate = cpu_to_be32 (tx_rate_bits << SRB_RATE_SHIFT); while (command_do (dev, &cmd)) schedule(); } dev->txer[vci].tx_present = 1; mutex_unlock(&dev->vcc_sf); } if (rxtp->traffic_class != ATM_NONE) { command cmd; vcc->rx_info.pool = pool; mutex_lock(&dev->vcc_sf); /* grow RX buffer pool */ if (!dev->rxq[pool].buffers_wanted) dev->rxq[pool].buffers_wanted = rx_lats; dev->rxq[pool].buffers_wanted += 1; fill_rx_pool (dev, pool, GFP_KERNEL); if (dev->txer[vci].tx_present) { // TXer on the channel already // switch (from pool zero) to this pool, preserving the TX bits cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS); cmd.args.modify_flags.vc = cpu_to_be32 (vci); // vpi 0 cmd.args.modify_flags.flags = cpu_to_be32 ( (pool << SRB_POOL_SHIFT) | (dev->txer[vci].tx_vc_bits << SRB_FLAGS_SHIFT) ); } else { // no TXer on the channel, open the VC (with no rate info) cmd.request = cpu_to_be32 (SRB_OPEN_VC); cmd.args.open.vc = cpu_to_be32 (vci); // vpi 0 cmd.args.open.flags = cpu_to_be32 (pool << SRB_POOL_SHIFT); cmd.args.open.rate = cpu_to_be32 (0); } while (command_do (dev, &cmd)) schedule(); // this link allows RX frames through dev->rxer[vci] = atm_vcc; mutex_unlock(&dev->vcc_sf); } // indicate readiness set_bit(ATM_VF_READY,&atm_vcc->flags); return 0; } /********** Close a VC **********/ static void amb_close (struct atm_vcc * atm_vcc) { amb_dev * dev = AMB_DEV (atm_vcc->dev); amb_vcc * vcc = AMB_VCC (atm_vcc); u16 vci = atm_vcc->vci; PRINTD (DBG_VCC|DBG_FLOW, "amb_close"); // indicate unreadiness clear_bit(ATM_VF_READY,&atm_vcc->flags); // disable TXing if (atm_vcc->qos.txtp.traffic_class != ATM_NONE) { command cmd; mutex_lock(&dev->vcc_sf); if (dev->rxer[vci]) { // RXer still on the channel, just modify rate... XXX not really needed cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE); cmd.args.modify_rate.vc = cpu_to_be32 (vci); // vpi 0 cmd.args.modify_rate.rate = cpu_to_be32 (0); // ... and clear TX rate flags (XXX to stop RM cell output?), preserving RX pool } else { // no RXer on the channel, close channel cmd.request = cpu_to_be32 (SRB_CLOSE_VC); cmd.args.close.vc = cpu_to_be32 (vci); // vpi 0 } dev->txer[vci].tx_present = 0; while (command_do (dev, &cmd)) schedule(); mutex_unlock(&dev->vcc_sf); } // disable RXing if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) { command cmd; // this is (the?) one reason why we need the amb_vcc struct unsigned char pool = vcc->rx_info.pool; mutex_lock(&dev->vcc_sf); if (dev->txer[vci].tx_present) { // TXer still on the channel, just go to pool zero XXX not really needed cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS); cmd.args.modify_flags.vc = cpu_to_be32 (vci); // vpi 0 cmd.args.modify_flags.flags = cpu_to_be32 (dev->txer[vci].tx_vc_bits << SRB_FLAGS_SHIFT); } else { // no TXer on the channel, close the VC cmd.request = cpu_to_be32 (SRB_CLOSE_VC); cmd.args.close.vc = cpu_to_be32 (vci); // vpi 0 } // forget the rxer - no more skbs will be pushed if (atm_vcc != dev->rxer[vci]) PRINTK (KERN_ERR, "%s vcc=%p rxer[vci]=%p", "arghhh! we're going to die!", vcc, dev->rxer[vci]); dev->rxer[vci] = NULL; while (command_do (dev, &cmd)) schedule(); /* shrink RX buffer pool */ dev->rxq[pool].buffers_wanted -= 1; if (dev->rxq[pool].buffers_wanted == rx_lats) { dev->rxq[pool].buffers_wanted = 0; drain_rx_pool (dev, pool); } mutex_unlock(&dev->vcc_sf); } // free our structure kfree (vcc); // say the VPI/VCI is free again clear_bit(ATM_VF_ADDR,&atm_vcc->flags); return; } /********** Send **********/ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) { amb_dev * dev = AMB_DEV(atm_vcc->dev); amb_vcc * vcc = AMB_VCC(atm_vcc); u16 vc = atm_vcc->vci; unsigned int tx_len = skb->len; unsigned char * tx_data = skb->data; tx_simple * tx_descr; tx_in tx; if (test_bit (dead, &dev->flags)) return -EIO; PRINTD (DBG_FLOW|DBG_TX, "amb_send vc %x data %p len %u", vc, tx_data, tx_len); dump_skb (">>>", vc, skb); if (!dev->txer[vc].tx_present) { PRINTK (KERN_ERR, "attempt to send on RX-only VC %x", vc); return -EBADFD; } // this is a driver private field so we have to set it ourselves, // despite the fact that we are _required_ to use it to check for a // pop function ATM_SKB(skb)->vcc = atm_vcc; if (skb->len > (size_t) atm_vcc->qos.txtp.max_sdu) { PRINTK (KERN_ERR, "sk_buff length greater than agreed max_sdu, dropping..."); return -EIO; } if (check_area (skb->data, skb->len)) { atomic_inc(&atm_vcc->stats->tx_err); return -ENOMEM; // ? } // allocate memory for fragments tx_descr = kmalloc (sizeof(tx_simple), GFP_KERNEL); if (!tx_descr) { PRINTK (KERN_ERR, "could not allocate TX descriptor"); return -ENOMEM; } if (check_area (tx_descr, sizeof(tx_simple))) { kfree (tx_descr); return -ENOMEM; } PRINTD (DBG_TX, "fragment list allocated at %p", tx_descr); tx_descr->skb = skb; tx_descr->tx_frag.bytes = cpu_to_be32 (tx_len); tx_descr->tx_frag.address = cpu_to_be32 (virt_to_bus (tx_data)); tx_descr->tx_frag_end.handle = virt_to_bus (tx_descr); tx_descr->tx_frag_end.vc = 0; tx_descr->tx_frag_end.next_descriptor_length = 0; tx_descr->tx_frag_end.next_descriptor = 0; #ifdef AMB_NEW_MICROCODE tx_descr->tx_frag_end.cpcs_uu = 0; tx_descr->tx_frag_end.cpi = 0; tx_descr->tx_frag_end.pad = 0; #endif tx.vc = cpu_to_be16 (vcc->tx_frame_bits | vc); tx.tx_descr_length = cpu_to_be16 (sizeof(tx_frag)+sizeof(tx_frag_end)); tx.tx_descr_addr = cpu_to_be32 (virt_to_bus (&tx_descr->tx_frag)); while (tx_give (dev, &tx)) schedule(); return 0; } /********** Change QoS on a VC **********/ // int amb_change_qos (struct atm_vcc * atm_vcc, struct atm_qos * qos, int flags); /********** Free RX Socket Buffer **********/ #if 0 static void amb_free_rx_skb (struct atm_vcc * atm_vcc, struct sk_buff * skb) { amb_dev * dev = AMB_DEV (atm_vcc->dev); amb_vcc * vcc = AMB_VCC (atm_vcc); unsigned char pool = vcc->rx_info.pool; rx_in rx; // This may be unsafe for various reasons that I cannot really guess // at. However, I note that the ATM layer calls kfree_skb rather // than dev_kfree_skb at this point so we are least covered as far // as buffer locking goes. There may be bugs if pcap clones RX skbs. PRINTD (DBG_FLOW|DBG_SKB, "amb_rx_free skb %p (atm_vcc %p, vcc %p)", skb, atm_vcc, vcc); rx.handle = virt_to_bus (skb); rx.host_address = cpu_to_be32 (virt_to_bus (skb->data)); skb->data = skb->head; skb->tail = skb->head; skb->len = 0; if (!rx_give (dev, &rx, pool)) { // success PRINTD (DBG_SKB|DBG_POOL, "recycled skb for pool %hu", pool); return; } // just do what the ATM layer would have done dev_kfree_skb_any (skb); return; } #endif /********** Proc File Output **********/ static int amb_proc_read (struct atm_dev * atm_dev, loff_t * pos, char * page) { amb_dev * dev = AMB_DEV (atm_dev); int left = *pos; unsigned char pool; PRINTD (DBG_FLOW, "amb_proc_read"); /* more diagnostics here? */ if (!left--) { amb_stats * s = &dev->stats; return sprintf (page, "frames: TX OK %lu, RX OK %lu, RX bad %lu " "(CRC %lu, long %lu, aborted %lu, unused %lu).\n", s->tx_ok, s->rx.ok, s->rx.error, s->rx.badcrc, s->rx.toolong, s->rx.aborted, s->rx.unused); } if (!left--) { amb_cq * c = &dev->cq; return sprintf (page, "cmd queue [cur/hi/max]: %u/%u/%u. ", c->pending, c->high, c->maximum); } if (!left--) { amb_txq * t = &dev->txq; return sprintf (page, "TX queue [cur/max high full]: %u/%u %u %u.\n", t->pending, t->maximum, t->high, t->filled); } if (!left--) { unsigned int count = sprintf (page, "RX queues [cur/max/req low empty]:"); for (pool = 0; pool < NUM_RX_POOLS; ++pool) { amb_rxq * r = &dev->rxq[pool]; count += sprintf (page+count, " %u/%u/%u %u %u", r->pending, r->maximum, r->buffers_wanted, r->low, r->emptied); } count += sprintf (page+count, ".\n"); return count; } if (!left--) { unsigned int count = sprintf (page, "RX buffer sizes:"); for (pool = 0; pool < NUM_RX_POOLS; ++pool) { amb_rxq * r = &dev->rxq[pool]; count += sprintf (page+count, " %u", r->buffer_size); } count += sprintf (page+count, ".\n"); return count; } #if 0 if (!left--) { // suni block etc? } #endif return 0; } /********** Operation Structure **********/ static const struct atmdev_ops amb_ops = { .open = amb_open, .close = amb_close, .send = amb_send, .proc_read = amb_proc_read, .owner = THIS_MODULE, }; /********** housekeeping **********/ static void do_housekeeping (unsigned long arg) { amb_dev * dev = (amb_dev *) arg; // could collect device-specific (not driver/atm-linux) stats here // last resort refill once every ten seconds fill_rx_pools (dev); mod_timer(&dev->housekeeping, jiffies + 10*HZ); return; } /********** creation of communication queues **********/ static int __devinit create_queues (amb_dev * dev, unsigned int cmds, unsigned int txs, unsigned int * rxs, unsigned int * rx_buffer_sizes) { unsigned char pool; size_t total = 0; void * memory; void * limit; PRINTD (DBG_FLOW, "create_queues %p", dev); total += cmds * sizeof(command); total += txs * (sizeof(tx_in) + sizeof(tx_out)); for (pool = 0; pool < NUM_RX_POOLS; ++pool) total += rxs[pool] * (sizeof(rx_in) + sizeof(rx_out)); memory = kmalloc (total, GFP_KERNEL); if (!memory) { PRINTK (KERN_ERR, "could not allocate queues"); return -ENOMEM; } if (check_area (memory, total)) { PRINTK (KERN_ERR, "queues allocated in nasty area"); kfree (memory); return -ENOMEM; } limit = memory + total; PRINTD (DBG_INIT, "queues from %p to %p", memory, limit); PRINTD (DBG_CMD, "command queue at %p", memory); { command * cmd = memory; amb_cq * cq = &dev->cq; cq->pending = 0; cq->high = 0; cq->maximum = cmds - 1; cq->ptrs.start = cmd; cq->ptrs.in = cmd; cq->ptrs.out = cmd; cq->ptrs.limit = cmd + cmds; memory = cq->ptrs.limit; } PRINTD (DBG_TX, "TX queue pair at %p", memory); { tx_in * in = memory; tx_out * out; amb_txq * txq = &dev->txq; txq->pending = 0; txq->high = 0; txq->filled = 0; txq->maximum = txs - 1; txq->in.start = in; txq->in.ptr = in; txq->in.limit = in + txs; memory = txq->in.limit; out = memory; txq->out.start = out; txq->out.ptr = out; txq->out.limit = out + txs; memory = txq->out.limit; } PRINTD (DBG_RX, "RX queue pairs at %p", memory); for (pool = 0; pool < NUM_RX_POOLS; ++pool) { rx_in * in = memory; rx_out * out; amb_rxq * rxq = &dev->rxq[pool]; rxq->buffer_size = rx_buffer_sizes[pool]; rxq->buffers_wanted = 0; rxq->pending = 0; rxq->low = rxs[pool] - 1; rxq->emptied = 0; rxq->maximum = rxs[pool] - 1; rxq->in.start = in; rxq->in.ptr = in; rxq->in.limit = in + rxs[pool]; memory = rxq->in.limit; out = memory; rxq->out.start = out; rxq->out.ptr = out; rxq->out.limit = out + rxs[pool]; memory = rxq->out.limit; } if (memory == limit) { return 0; } else { PRINTK (KERN_ERR, "bad queue alloc %p != %p (tell maintainer)", memory, limit); kfree (limit - total); return -ENOMEM; } } /********** destruction of communication queues **********/ static void destroy_queues (amb_dev * dev) { // all queues assumed empty void * memory = dev->cq.ptrs.start; // includes txq.in, txq.out, rxq[].in and rxq[].out PRINTD (DBG_FLOW, "destroy_queues %p", dev); PRINTD (DBG_INIT, "freeing queues at %p", memory); kfree (memory); return; } /********** basic loader commands and error handling **********/ // centisecond timeouts - guessing away here static unsigned int command_timeouts [] = { [host_memory_test] = 15, [read_adapter_memory] = 2, [write_adapter_memory] = 2, [adapter_start] = 50, [get_version_number] = 10, [interrupt_host] = 1, [flash_erase_sector] = 1, [adap_download_block] = 1, [adap_erase_flash] = 1, [adap_run_in_iram] = 1, [adap_end_download] = 1 }; static unsigned int command_successes [] = { [host_memory_test] = COMMAND_PASSED_TEST, [read_adapter_memory] = COMMAND_READ_DATA_OK, [write_adapter_memory] = COMMAND_WRITE_DATA_OK, [adapter_start] = COMMAND_COMPLETE, [get_version_number] = COMMAND_COMPLETE, [interrupt_host] = COMMAND_COMPLETE, [flash_erase_sector] = COMMAND_COMPLETE, [adap_download_block] = COMMAND_COMPLETE, [adap_erase_flash] = COMMAND_COMPLETE, [adap_run_in_iram] = COMMAND_COMPLETE, [adap_end_download] = COMMAND_COMPLETE }; static int decode_loader_result (loader_command cmd, u32 result) { int res; const char *msg; if (result == command_successes[cmd]) return 0; switch (result) { case BAD_COMMAND: res = -EINVAL; msg = "bad command"; break; case COMMAND_IN_PROGRESS: res = -ETIMEDOUT; msg = "command in progress"; break; case COMMAND_PASSED_TEST: res = 0; msg = "command passed test"; break; case COMMAND_FAILED_TEST: res = -EIO; msg = "command failed test"; break; case COMMAND_READ_DATA_OK: res = 0; msg = "command read data ok"; break; case COMMAND_READ_BAD_ADDRESS: res = -EINVAL; msg = "command read bad address"; break; case COMMAND_WRITE_DATA_OK: res = 0; msg = "command write data ok"; break; case COMMAND_WRITE_BAD_ADDRESS: res = -EINVAL; msg = "command write bad address"; break; case COMMAND_WRITE_FLASH_FAILURE: res = -EIO; msg = "command write flash failure"; break; case COMMAND_COMPLETE: res = 0; msg = "command complete"; break; case COMMAND_FLASH_ERASE_FAILURE: res = -EIO; msg = "command flash erase failure"; break; case COMMAND_WRITE_BAD_DATA: res = -EINVAL; msg = "command write bad data"; break; default: res = -EINVAL; msg = "unknown error"; PRINTD (DBG_LOAD|DBG_ERR, "decode_loader_result got %d=%x !", result, result); break; } PRINTK (KERN_ERR, "%s", msg); return res; } static int __devinit do_loader_command (volatile loader_block * lb, const amb_dev * dev, loader_command cmd) { unsigned long timeout; PRINTD (DBG_FLOW|DBG_LOAD, "do_loader_command"); /* do a command Set the return value to zero, set the command type and set the valid entry to the right magic value. The payload is already correctly byte-ordered so we leave it alone. Hit the doorbell with the bus address of this structure. */ lb->result = 0; lb->command = cpu_to_be32 (cmd); lb->valid = cpu_to_be32 (DMA_VALID); // dump_registers (dev); // dump_loader_block (lb); wr_mem (dev, offsetof(amb_mem, doorbell), virt_to_bus (lb) & ~onegigmask); timeout = command_timeouts[cmd] * 10; while (!lb->result || lb->result == cpu_to_be32 (COMMAND_IN_PROGRESS)) if (timeout) { timeout = msleep_interruptible(timeout); } else { PRINTD (DBG_LOAD|DBG_ERR, "command %d timed out", cmd); dump_registers (dev); dump_loader_block (lb); return -ETIMEDOUT; } if (cmd == adapter_start) { // wait for start command to acknowledge... timeout = 100; while (rd_plain (dev, offsetof(amb_mem, doorbell))) if (timeout) { timeout = msleep_interruptible(timeout); } else { PRINTD (DBG_LOAD|DBG_ERR, "start command did not clear doorbell, res=%08x", be32_to_cpu (lb->result)); dump_registers (dev); return -ETIMEDOUT; } return 0; } else { return decode_loader_result (cmd, be32_to_cpu (lb->result)); } } /* loader: determine loader version */ static int __devinit get_loader_version (loader_block * lb, const amb_dev * dev, u32 * version) { int res; PRINTD (DBG_FLOW|DBG_LOAD, "get_loader_version"); res = do_loader_command (lb, dev, get_version_number); if (res) return res; if (version) *version = be32_to_cpu (lb->payload.version); return 0; } /* loader: write memory data blocks */ static int __devinit loader_write (loader_block* lb, const amb_dev *dev, const struct ihex_binrec *rec) { transfer_block * tb = &lb->payload.transfer; PRINTD (DBG_FLOW|DBG_LOAD, "loader_write"); tb->address = rec->addr; tb->count = cpu_to_be32(be16_to_cpu(rec->len) / 4); memcpy(tb->data, rec->data, be16_to_cpu(rec->len)); return do_loader_command (lb, dev, write_adapter_memory); } /* loader: verify memory data blocks */ static int __devinit loader_verify (loader_block * lb, const amb_dev *dev, const struct ihex_binrec *rec) { transfer_block * tb = &lb->payload.transfer; int res; PRINTD (DBG_FLOW|DBG_LOAD, "loader_verify"); tb->address = rec->addr; tb->count = cpu_to_be32(be16_to_cpu(rec->len) / 4); res = do_loader_command (lb, dev, read_adapter_memory); if (!res && memcmp(tb->data, rec->data, be16_to_cpu(rec->len))) res = -EINVAL; return res; } /* loader: start microcode */ static int __devinit loader_start (loader_block * lb, const amb_dev * dev, u32 address) { PRINTD (DBG_FLOW|DBG_LOAD, "loader_start"); lb->payload.start = cpu_to_be32 (address); return do_loader_command (lb, dev, adapter_start); } /********** reset card **********/ static inline void sf (const char * msg) { PRINTK (KERN_ERR, "self-test failed: %s", msg); } static int amb_reset (amb_dev * dev, int diags) { u32 word; PRINTD (DBG_FLOW|DBG_LOAD, "amb_reset"); word = rd_plain (dev, offsetof(amb_mem, reset_control)); // put card into reset state wr_plain (dev, offsetof(amb_mem, reset_control), word | AMB_RESET_BITS); // wait a short while udelay (10); #if 1 // put card into known good state wr_plain (dev, offsetof(amb_mem, interrupt_control), AMB_DOORBELL_BITS); // clear all interrupts just in case wr_plain (dev, offsetof(amb_mem, interrupt), -1); #endif // clear self-test done flag wr_plain (dev, offsetof(amb_mem, mb.loader.ready), 0); // take card out of reset state wr_plain (dev, offsetof(amb_mem, reset_control), word &~ AMB_RESET_BITS); if (diags) { unsigned long timeout; // 4.2 second wait msleep(4200); // half second time-out timeout = 500; while (!rd_plain (dev, offsetof(amb_mem, mb.loader.ready))) if (timeout) { timeout = msleep_interruptible(timeout); } else { PRINTD (DBG_LOAD|DBG_ERR, "reset timed out"); return -ETIMEDOUT; } // get results of self-test // XXX double check byte-order word = rd_mem (dev, offsetof(amb_mem, mb.loader.result)); if (word & SELF_TEST_FAILURE) { if (word & GPINT_TST_FAILURE) sf ("interrupt"); if (word & SUNI_DATA_PATTERN_FAILURE) sf ("SUNI data pattern"); if (word & SUNI_DATA_BITS_FAILURE) sf ("SUNI data bits"); if (word & SUNI_UTOPIA_FAILURE) sf ("SUNI UTOPIA interface"); if (word & SUNI_FIFO_FAILURE) sf ("SUNI cell buffer FIFO"); if (word & SRAM_FAILURE) sf ("bad SRAM"); // better return value? return -EIO; } } return 0; } /********** transfer and start the microcode **********/ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) { const struct firmware *fw; unsigned long start_address; const struct ihex_binrec *rec; const char *errmsg = 0; int res; res = request_ihex_firmware(&fw, "atmsar11.fw", &dev->pci_dev->dev); if (res) { PRINTK (KERN_ERR, "Cannot load microcode data"); return res; } /* First record contains just the start address */ rec = (const struct ihex_binrec *)fw->data; if (be16_to_cpu(rec->len) != sizeof(__be32) || be32_to_cpu(rec->addr)) { errmsg = "no start record"; goto fail; } start_address = be32_to_cpup((__be32 *)rec->data); rec = ihex_next_binrec(rec); PRINTD (DBG_FLOW|DBG_LOAD, "ucode_init"); while (rec) { PRINTD (DBG_LOAD, "starting region (%x, %u)", be32_to_cpu(rec->addr), be16_to_cpu(rec->len)); if (be16_to_cpu(rec->len) > 4 * MAX_TRANSFER_DATA) { errmsg = "record too long"; goto fail; } if (be16_to_cpu(rec->len) & 3) { errmsg = "odd number of bytes"; goto fail; } res = loader_write(lb, dev, rec); if (res) break; res = loader_verify(lb, dev, rec); if (res) break; } release_firmware(fw); if (!res) res = loader_start(lb, dev, start_address); return res; fail: release_firmware(fw); PRINTK(KERN_ERR, "Bad microcode data (%s)", errmsg); return -EINVAL; } /********** give adapter parameters **********/ static inline __be32 bus_addr(void * addr) { return cpu_to_be32 (virt_to_bus (addr)); } static int __devinit amb_talk (amb_dev * dev) { adap_talk_block a; unsigned char pool; unsigned long timeout; PRINTD (DBG_FLOW, "amb_talk %p", dev); a.command_start = bus_addr (dev->cq.ptrs.start); a.command_end = bus_addr (dev->cq.ptrs.limit); a.tx_start = bus_addr (dev->txq.in.start); a.tx_end = bus_addr (dev->txq.in.limit); a.txcom_start = bus_addr (dev->txq.out.start); a.txcom_end = bus_addr (dev->txq.out.limit); for (pool = 0; pool < NUM_RX_POOLS; ++pool) { // the other "a" items are set up by the adapter a.rec_struct[pool].buffer_start = bus_addr (dev->rxq[pool].in.start); a.rec_struct[pool].buffer_end = bus_addr (dev->rxq[pool].in.limit); a.rec_struct[pool].rx_start = bus_addr (dev->rxq[pool].out.start); a.rec_struct[pool].rx_end = bus_addr (dev->rxq[pool].out.limit); a.rec_struct[pool].buffer_size = cpu_to_be32 (dev->rxq[pool].buffer_size); } #ifdef AMB_NEW_MICROCODE // disable fast PLX prefetching a.init_flags = 0; #endif // pass the structure wr_mem (dev, offsetof(amb_mem, doorbell), virt_to_bus (&a)); // 2.2 second wait (must not touch doorbell during 2 second DMA test) msleep(2200); // give the adapter another half second? timeout = 500; while (rd_plain (dev, offsetof(amb_mem, doorbell))) if (timeout) { timeout = msleep_interruptible(timeout); } else { PRINTD (DBG_INIT|DBG_ERR, "adapter init timed out"); return -ETIMEDOUT; } return 0; } // get microcode version static void __devinit amb_ucode_version (amb_dev * dev) { u32 major; u32 minor; command cmd; cmd.request = cpu_to_be32 (SRB_GET_VERSION); while (command_do (dev, &cmd)) { set_current_state(TASK_UNINTERRUPTIBLE); schedule(); } major = be32_to_cpu (cmd.args.version.major); minor = be32_to_cpu (cmd.args.version.minor); PRINTK (KERN_INFO, "microcode version is %u.%u", major, minor); } // get end station address static void __devinit amb_esi (amb_dev * dev, u8 * esi) { u32 lower4; u16 upper2; command cmd; cmd.request = cpu_to_be32 (SRB_GET_BIA); while (command_do (dev, &cmd)) { set_current_state(TASK_UNINTERRUPTIBLE); schedule(); } lower4 = be32_to_cpu (cmd.args.bia.lower4); upper2 = be32_to_cpu (cmd.args.bia.upper2); PRINTD (DBG_LOAD, "BIA: lower4: %08x, upper2 %04x", lower4, upper2); if (esi) { unsigned int i; PRINTDB (DBG_INIT, "ESI:"); for (i = 0; i < ESI_LEN; ++i) { if (i < 4) esi[i] = bitrev8(lower4>>(8*i)); else esi[i] = bitrev8(upper2>>(8*(i-4))); PRINTDM (DBG_INIT, " %02x", esi[i]); } PRINTDE (DBG_INIT, ""); } return; } static void fixup_plx_window (amb_dev *dev, loader_block *lb) { // fix up the PLX-mapped window base address to match the block unsigned long blb; u32 mapreg; blb = virt_to_bus(lb); // the kernel stack had better not ever cross a 1Gb boundary! mapreg = rd_plain (dev, offsetof(amb_mem, stuff[10])); mapreg &= ~onegigmask; mapreg |= blb & onegigmask; wr_plain (dev, offsetof(amb_mem, stuff[10]), mapreg); return; } static int __devinit amb_init (amb_dev * dev) { loader_block lb; u32 version; if (amb_reset (dev, 1)) { PRINTK (KERN_ERR, "card reset failed!"); } else { fixup_plx_window (dev, &lb); if (get_loader_version (&lb, dev, &version)) { PRINTK (KERN_INFO, "failed to get loader version"); } else { PRINTK (KERN_INFO, "loader version is %08x", version); if (ucode_init (&lb, dev)) { PRINTK (KERN_ERR, "microcode failure"); } else if (create_queues (dev, cmds, txs, rxs, rxs_bs)) { PRINTK (KERN_ERR, "failed to get memory for queues"); } else { if (amb_talk (dev)) { PRINTK (KERN_ERR, "adapter did not accept queues"); } else { amb_ucode_version (dev); return 0; } /* amb_talk */ destroy_queues (dev); } /* create_queues, ucode_init */ amb_reset (dev, 0); } /* get_loader_version */ } /* amb_reset */ return -EINVAL; } static void setup_dev(amb_dev *dev, struct pci_dev *pci_dev) { unsigned char pool; // set up known dev items straight away dev->pci_dev = pci_dev; pci_set_drvdata(pci_dev, dev); dev->iobase = pci_resource_start (pci_dev, 1); dev->irq = pci_dev->irq; dev->membase = bus_to_virt(pci_resource_start(pci_dev, 0)); // flags (currently only dead) dev->flags = 0; // Allocate cell rates (fibre) // ATM_OC3_PCR = 1555200000/8/270*260/53 - 29/53 // to be really pedantic, this should be ATM_OC3c_PCR dev->tx_avail = ATM_OC3_PCR; dev->rx_avail = ATM_OC3_PCR; // semaphore for txer/rxer modifications - we cannot use a // spinlock as the critical region needs to switch processes mutex_init(&dev->vcc_sf); // queue manipulation spinlocks; we want atomic reads and // writes to the queue descriptors (handles IRQ and SMP) // consider replacing "int pending" -> "atomic_t available" // => problem related to who gets to move queue pointers spin_lock_init (&dev->cq.lock); spin_lock_init (&dev->txq.lock); for (pool = 0; pool < NUM_RX_POOLS; ++pool) spin_lock_init (&dev->rxq[pool].lock); } static void setup_pci_dev(struct pci_dev *pci_dev) { unsigned char lat; // enable bus master accesses pci_set_master(pci_dev); // frobnicate latency (upwards, usually) pci_read_config_byte (pci_dev, PCI_LATENCY_TIMER, &lat); if (!pci_lat) pci_lat = (lat < MIN_PCI_LATENCY) ? MIN_PCI_LATENCY : lat; if (lat != pci_lat) { PRINTK (KERN_INFO, "Changing PCI latency timer from %hu to %hu", lat, pci_lat); pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, pci_lat); } } static int __devinit amb_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent) { amb_dev * dev; int err; unsigned int irq; err = pci_enable_device(pci_dev); if (err < 0) { PRINTK (KERN_ERR, "skipped broken (PLX rev 2) card"); goto out; } // read resources from PCI configuration space irq = pci_dev->irq; if (pci_dev->device == PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD) { PRINTK (KERN_ERR, "skipped broken (PLX rev 2) card"); err = -EINVAL; goto out_disable; } PRINTD (DBG_INFO, "found Madge ATM adapter (amb) at" " IO %llx, IRQ %u, MEM %p", (unsigned long long)pci_resource_start(pci_dev, 1), irq, bus_to_virt(pci_resource_start(pci_dev, 0))); // check IO region err = pci_request_region(pci_dev, 1, DEV_LABEL); if (err < 0) { PRINTK (KERN_ERR, "IO range already in use!"); goto out_disable; } dev = kzalloc(sizeof(amb_dev), GFP_KERNEL); if (!dev) { PRINTK (KERN_ERR, "out of memory!"); err = -ENOMEM; goto out_release; } setup_dev(dev, pci_dev); err = amb_init(dev); if (err < 0) { PRINTK (KERN_ERR, "adapter initialisation failure"); goto out_free; } setup_pci_dev(pci_dev); // grab (but share) IRQ and install handler err = request_irq(irq, interrupt_handler, IRQF_SHARED, DEV_LABEL, dev); if (err < 0) { PRINTK (KERN_ERR, "request IRQ failed!"); goto out_reset; } dev->atm_dev = atm_dev_register (DEV_LABEL, &pci_dev->dev, &amb_ops, -1, NULL); if (!dev->atm_dev) { PRINTD (DBG_ERR, "failed to register Madge ATM adapter"); err = -EINVAL; goto out_free_irq; } PRINTD (DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p", dev->atm_dev->number, dev, dev->atm_dev); dev->atm_dev->dev_data = (void *) dev; // register our address amb_esi (dev, dev->atm_dev->esi); // 0 bits for vpi, 10 bits for vci dev->atm_dev->ci_range.vpi_bits = NUM_VPI_BITS; dev->atm_dev->ci_range.vci_bits = NUM_VCI_BITS; init_timer(&dev->housekeeping); dev->housekeeping.function = do_housekeeping; dev->housekeeping.data = (unsigned long) dev; mod_timer(&dev->housekeeping, jiffies); // enable host interrupts interrupts_on (dev); out: return err; out_free_irq: free_irq(irq, dev); out_reset: amb_reset(dev, 0); out_free: kfree(dev); out_release: pci_release_region(pci_dev, 1); out_disable: pci_disable_device(pci_dev); goto out; } static void __devexit amb_remove_one(struct pci_dev *pci_dev) { struct amb_dev *dev; dev = pci_get_drvdata(pci_dev); PRINTD(DBG_INFO|DBG_INIT, "closing %p (atm_dev = %p)", dev, dev->atm_dev); del_timer_sync(&dev->housekeeping); // the drain should not be necessary drain_rx_pools(dev); interrupts_off(dev); amb_reset(dev, 0); free_irq(dev->irq, dev); pci_disable_device(pci_dev); destroy_queues(dev); atm_dev_deregister(dev->atm_dev); kfree(dev); pci_release_region(pci_dev, 1); } static void __init amb_check_args (void) { unsigned char pool; unsigned int max_rx_size; #ifdef DEBUG_AMBASSADOR PRINTK (KERN_NOTICE, "debug bitmap is %hx", debug &= DBG_MASK); #else if (debug) PRINTK (KERN_NOTICE, "no debugging support"); #endif if (cmds < MIN_QUEUE_SIZE) PRINTK (KERN_NOTICE, "cmds has been raised to %u", cmds = MIN_QUEUE_SIZE); if (txs < MIN_QUEUE_SIZE) PRINTK (KERN_NOTICE, "txs has been raised to %u", txs = MIN_QUEUE_SIZE); for (pool = 0; pool < NUM_RX_POOLS; ++pool) if (rxs[pool] < MIN_QUEUE_SIZE) PRINTK (KERN_NOTICE, "rxs[%hu] has been raised to %u", pool, rxs[pool] = MIN_QUEUE_SIZE); // buffers sizes should be greater than zero and strictly increasing max_rx_size = 0; for (pool = 0; pool < NUM_RX_POOLS; ++pool) if (rxs_bs[pool] <= max_rx_size) PRINTK (KERN_NOTICE, "useless pool (rxs_bs[%hu] = %u)", pool, rxs_bs[pool]); else max_rx_size = rxs_bs[pool]; if (rx_lats < MIN_RX_BUFFERS) PRINTK (KERN_NOTICE, "rx_lats has been raised to %u", rx_lats = MIN_RX_BUFFERS); return; } /********** module stuff **********/ MODULE_AUTHOR(maintainer_string); MODULE_DESCRIPTION(description_string); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("atmsar11.fw"); module_param(debug, ushort, 0644); module_param(cmds, uint, 0); module_param(txs, uint, 0); module_param_array(rxs, uint, NULL, 0); module_param_array(rxs_bs, uint, NULL, 0); module_param(rx_lats, uint, 0); module_param(pci_lat, byte, 0); MODULE_PARM_DESC(debug, "debug bitmap, see .h file"); MODULE_PARM_DESC(cmds, "number of command queue entries"); MODULE_PARM_DESC(txs, "number of TX queue entries"); MODULE_PARM_DESC(rxs, "number of RX queue entries [" __MODULE_STRING(NUM_RX_POOLS) "]"); MODULE_PARM_DESC(rxs_bs, "size of RX buffers [" __MODULE_STRING(NUM_RX_POOLS) "]"); MODULE_PARM_DESC(rx_lats, "number of extra buffers to cope with RX latencies"); MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles"); /********** module entry **********/ static struct pci_device_id amb_pci_tbl[] = { { PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR), 0 }, { PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD), 0 }, { 0, } }; MODULE_DEVICE_TABLE(pci, amb_pci_tbl); static struct pci_driver amb_driver = { .name = "amb", .probe = amb_probe, .remove = __devexit_p(amb_remove_one), .id_table = amb_pci_tbl, }; static int __init amb_module_init (void) { PRINTD (DBG_FLOW|DBG_INIT, "init_module"); // sanity check - cast needed as printk does not support %Zu if (sizeof(amb_mem) != 4*16 + 4*12) { PRINTK (KERN_ERR, "Fix amb_mem (is %lu words).", (unsigned long) sizeof(amb_mem)); return -ENOMEM; } show_version(); amb_check_args(); // get the juice return pci_register_driver(&amb_driver); } /********** module exit **********/ static void __exit amb_module_exit (void) { PRINTD (DBG_FLOW|DBG_INIT, "cleanup_module"); pci_unregister_driver(&amb_driver); } module_init(amb_module_init); module_exit(amb_module_exit);
gpl-2.0
mkasick/android_kernel_samsung_jfltespr
drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c
9002
12310
/* IEEE 802.11 SoftMAC layer * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it> * * Mostly extracted from the rtl8180-sa2400 driver for the * in-kernel generic ieee802.11 stack. * * Some pieces of code might be stolen from ipw2100 driver * copyright of who own it's copyright ;-) * * PS wx handler mostly stolen from hostap, copyright who * own it's copyright ;-) * * released under the GPL */ #include "ieee80211.h" /* FIXME: add A freqs */ const long ieee80211_wlan_frequencies[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484 }; int ieee80211_wx_set_freq(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret; struct iw_freq *fwrq = & wrqu->freq; // printk("in %s\n",__func__); down(&ieee->wx_sem); if(ieee->iw_mode == IW_MODE_INFRA){ ret = -EOPNOTSUPP; goto out; } /* if setting by freq convert to channel */ if (fwrq->e == 1) { if ((fwrq->m >= (int) 2.412e8 && fwrq->m <= (int) 2.487e8)) { int f = fwrq->m / 100000; int c = 0; while ((c < 14) && (f != ieee80211_wlan_frequencies[c])) c++; /* hack to fall through */ fwrq->e = 0; fwrq->m = c + 1; } } if (fwrq->e > 0 || fwrq->m > 14 || fwrq->m < 1 ){ ret = -EOPNOTSUPP; goto out; }else { /* Set the channel */ ieee->current_network.channel = fwrq->m; ieee->set_chan(ieee->dev, ieee->current_network.channel); if(ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER) if(ieee->state == IEEE80211_LINKED){ ieee80211_stop_send_beacons(ieee); ieee80211_start_send_beacons(ieee); } } ret = 0; out: up(&ieee->wx_sem); return ret; } int ieee80211_wx_get_freq(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct iw_freq *fwrq = & wrqu->freq; if (ieee->current_network.channel == 0) return -1; fwrq->m = ieee->current_network.channel; fwrq->e = 0; return 0; } int ieee80211_wx_get_wap(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { unsigned long flags; wrqu->ap_addr.sa_family = ARPHRD_ETHER; if (ieee->iw_mode == IW_MODE_MONITOR) return -1; /* We want avoid to give to the user inconsistent infos*/ spin_lock_irqsave(&ieee->lock, flags); if (ieee->state != IEEE80211_LINKED && ieee->state != IEEE80211_LINKED_SCANNING && ieee->wap_set == 0) memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); else memcpy(wrqu->ap_addr.sa_data, ieee->current_network.bssid, ETH_ALEN); spin_unlock_irqrestore(&ieee->lock, flags); return 0; } int ieee80211_wx_set_wap(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *awrq, char *extra) { int ret = 0; u8 zero[] = {0,0,0,0,0,0}; unsigned long flags; short ifup = ieee->proto_started;//dev->flags & IFF_UP; struct sockaddr *temp = (struct sockaddr *)awrq; //printk("=======Set WAP:"); ieee->sync_scan_hurryup = 1; down(&ieee->wx_sem); /* use ifconfig hw ether */ if (ieee->iw_mode == IW_MODE_MASTER){ ret = -1; goto out; } if (temp->sa_family != ARPHRD_ETHER){ ret = -EINVAL; goto out; } if (ifup) ieee80211_stop_protocol(ieee); /* just to avoid to give inconsistent infos in the * get wx method. not really needed otherwise */ spin_lock_irqsave(&ieee->lock, flags); memcpy(ieee->current_network.bssid, temp->sa_data, ETH_ALEN); ieee->wap_set = memcmp(temp->sa_data, zero,ETH_ALEN)!=0; //printk(" %x:%x:%x:%x:%x:%x\n", ieee->current_network.bssid[0],ieee->current_network.bssid[1],ieee->current_network.bssid[2],ieee->current_network.bssid[3],ieee->current_network.bssid[4],ieee->current_network.bssid[5]); spin_unlock_irqrestore(&ieee->lock, flags); if (ifup) ieee80211_start_protocol(ieee); out: up(&ieee->wx_sem); return ret; } int ieee80211_wx_get_essid(struct ieee80211_device *ieee, struct iw_request_info *a,union iwreq_data *wrqu,char *b) { int len,ret = 0; unsigned long flags; if (ieee->iw_mode == IW_MODE_MONITOR) return -1; /* We want avoid to give to the user inconsistent infos*/ spin_lock_irqsave(&ieee->lock, flags); if (ieee->current_network.ssid[0] == '\0' || ieee->current_network.ssid_len == 0){ ret = -1; goto out; } if (ieee->state != IEEE80211_LINKED && ieee->state != IEEE80211_LINKED_SCANNING && ieee->ssid_set == 0){ ret = -1; goto out; } len = ieee->current_network.ssid_len; wrqu->essid.length = len; strncpy(b,ieee->current_network.ssid,len); wrqu->essid.flags = 1; out: spin_unlock_irqrestore(&ieee->lock, flags); return ret; } int ieee80211_wx_set_rate(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { u32 target_rate = wrqu->bitrate.value; //added by lizhaoming for auto mode if(target_rate == -1){ ieee->rate = 110; } else { ieee->rate = target_rate/100000; } //FIXME: we might want to limit rate also in management protocols. return 0; } int ieee80211_wx_get_rate(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { wrqu->bitrate.value = ieee->rate * 100000; return 0; } int ieee80211_wx_set_mode(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { ieee->sync_scan_hurryup = 1; down(&ieee->wx_sem); if (wrqu->mode == ieee->iw_mode) goto out; if (wrqu->mode == IW_MODE_MONITOR){ ieee->dev->type = ARPHRD_IEEE80211; }else{ ieee->dev->type = ARPHRD_ETHER; } if (!ieee->proto_started){ ieee->iw_mode = wrqu->mode; }else{ ieee80211_stop_protocol(ieee); ieee->iw_mode = wrqu->mode; ieee80211_start_protocol(ieee); } out: up(&ieee->wx_sem); return 0; } void ieee80211_wx_sync_scan_wq(struct work_struct *work) { struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, wx_sync_scan_wq); short chan; chan = ieee->current_network.channel; if (ieee->data_hard_stop) ieee->data_hard_stop(ieee->dev); ieee80211_stop_send_beacons(ieee); ieee->state = IEEE80211_LINKED_SCANNING; ieee->link_change(ieee->dev); ieee80211_start_scan_syncro(ieee); ieee->set_chan(ieee->dev, chan); ieee->state = IEEE80211_LINKED; ieee->link_change(ieee->dev); if (ieee->data_hard_resume) ieee->data_hard_resume(ieee->dev); if(ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER) ieee80211_start_send_beacons(ieee); //YJ,add,080828, In prevent of lossing ping packet during scanning //ieee80211_sta_ps_send_null_frame(ieee, false); //YJ,add,080828,end up(&ieee->wx_sem); } int ieee80211_wx_set_scan(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret = 0; down(&ieee->wx_sem); if (ieee->iw_mode == IW_MODE_MONITOR || !(ieee->proto_started)){ ret = -1; goto out; } //YJ,add,080828 //In prevent of lossing ping packet during scanning //ieee80211_sta_ps_send_null_frame(ieee, true); //YJ,add,080828,end if ( ieee->state == IEEE80211_LINKED){ queue_work(ieee->wq, &ieee->wx_sync_scan_wq); /* intentionally forget to up sem */ return 0; } out: up(&ieee->wx_sem); return ret; } int ieee80211_wx_set_essid(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *extra) { int ret=0,len; short proto_started; unsigned long flags; ieee->sync_scan_hurryup = 1; down(&ieee->wx_sem); proto_started = ieee->proto_started; if (wrqu->essid.length > IW_ESSID_MAX_SIZE){ ret= -E2BIG; goto out; } if (ieee->iw_mode == IW_MODE_MONITOR){ ret= -1; goto out; } if(proto_started) ieee80211_stop_protocol(ieee); /* this is just to be sure that the GET wx callback * has consisten infos. not needed otherwise */ spin_lock_irqsave(&ieee->lock, flags); if (wrqu->essid.flags && wrqu->essid.length) { //YJ,modified,080819 len = (wrqu->essid.length < IW_ESSID_MAX_SIZE) ? (wrqu->essid.length) : IW_ESSID_MAX_SIZE; memset(ieee->current_network.ssid, 0, ieee->current_network.ssid_len); //YJ,add,080819 strncpy(ieee->current_network.ssid, extra, len); ieee->current_network.ssid_len = len; ieee->ssid_set = 1; //YJ,modified,080819,end //YJ,add,080819,for hidden ap if(len == 0){ memset(ieee->current_network.bssid, 0, ETH_ALEN); ieee->current_network.capability = 0; } //YJ,add,080819,for hidden ap,end } else{ ieee->ssid_set = 0; ieee->current_network.ssid[0] = '\0'; ieee->current_network.ssid_len = 0; } //printk("==========set essid %s!\n",ieee->current_network.ssid); spin_unlock_irqrestore(&ieee->lock, flags); if (proto_started) ieee80211_start_protocol(ieee); out: up(&ieee->wx_sem); return ret; } int ieee80211_wx_get_mode(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { wrqu->mode = ieee->iw_mode; return 0; } int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int *parms = (int *)extra; int enable = (parms[0] > 0); short prev = ieee->raw_tx; down(&ieee->wx_sem); if(enable) ieee->raw_tx = 1; else ieee->raw_tx = 0; printk(KERN_INFO"raw TX is %s\n", ieee->raw_tx ? "enabled" : "disabled"); if(ieee->iw_mode == IW_MODE_MONITOR) { if(prev == 0 && ieee->raw_tx){ if (ieee->data_hard_resume) ieee->data_hard_resume(ieee->dev); netif_carrier_on(ieee->dev); } if(prev && ieee->raw_tx == 1) netif_carrier_off(ieee->dev); } up(&ieee->wx_sem); return 0; } int ieee80211_wx_get_name(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { strlcpy(wrqu->name, "802.11", IFNAMSIZ); if(ieee->modulation & IEEE80211_CCK_MODULATION){ strlcat(wrqu->name, "b", IFNAMSIZ); if(ieee->modulation & IEEE80211_OFDM_MODULATION) strlcat(wrqu->name, "/g", IFNAMSIZ); }else if(ieee->modulation & IEEE80211_OFDM_MODULATION) strlcat(wrqu->name, "g", IFNAMSIZ); if((ieee->state == IEEE80211_LINKED) || (ieee->state == IEEE80211_LINKED_SCANNING)) strlcat(wrqu->name," link", IFNAMSIZ); else if(ieee->state != IEEE80211_NOLINK) strlcat(wrqu->name," .....", IFNAMSIZ); return 0; } /* this is mostly stolen from hostap */ int ieee80211_wx_set_power(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; if( (!ieee->sta_wake_up) || (!ieee->ps_request_tx_ack) || (!ieee->enter_sleep_state) || (!ieee->ps_is_queue_empty)){ printk("ERROR. PS mode tried to be use but driver missed a callback\n\n"); return -1; } down(&ieee->wx_sem); if (wrqu->power.disabled){ ieee->ps = IEEE80211_PS_DISABLED; goto exit; } switch (wrqu->power.flags & IW_POWER_MODE) { case IW_POWER_UNICAST_R: ieee->ps = IEEE80211_PS_UNICAST; break; case IW_POWER_ALL_R: ieee->ps = IEEE80211_PS_UNICAST | IEEE80211_PS_MBCAST; break; case IW_POWER_ON: ieee->ps = IEEE80211_PS_DISABLED; break; default: ret = -EINVAL; goto exit; } if (wrqu->power.flags & IW_POWER_TIMEOUT) { ieee->ps_timeout = wrqu->power.value / 1000; printk("Timeout %d\n",ieee->ps_timeout); } if (wrqu->power.flags & IW_POWER_PERIOD) { ret = -EOPNOTSUPP; goto exit; //wrq->value / 1024; } exit: up(&ieee->wx_sem); return ret; } /* this is stolen from hostap */ int ieee80211_wx_get_power(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret =0; down(&ieee->wx_sem); if(ieee->ps == IEEE80211_PS_DISABLED){ wrqu->power.disabled = 1; goto exit; } wrqu->power.disabled = 0; // if ((wrqu->power.flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) { wrqu->power.flags = IW_POWER_TIMEOUT; wrqu->power.value = ieee->ps_timeout * 1000; // } else { // ret = -EOPNOTSUPP; // goto exit; //wrqu->power.flags = IW_POWER_PERIOD; //wrqu->power.value = ieee->current_network.dtim_period * // ieee->current_network.beacon_interval * 1024; // } if (ieee->ps & IEEE80211_PS_MBCAST) wrqu->power.flags |= IW_POWER_ALL_R; else wrqu->power.flags |= IW_POWER_UNICAST_R; exit: up(&ieee->wx_sem); return ret; }
gpl-2.0
benatto/linux-1
arch/arm/mach-at91/at91sam9263_devices.c
43
40413
/* * arch/arm/mach-at91/at91sam9263_devices.c * * Copyright (C) 2007 Atmel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <linux/dma-mapping.h> #include <linux/gpio.h> #include <linux/platform_device.h> #include <linux/i2c-gpio.h> #include <linux/fb.h> #include <video/atmel_lcdc.h> #include <mach/at91sam9263.h> #include <mach/at91sam9263_matrix.h> #include <mach/at91_matrix.h> #include <mach/at91sam9_smc.h> #include <mach/hardware.h> #include "board.h" #include "generic.h" /* -------------------------------------------------------------------- * USB Host * -------------------------------------------------------------------- */ #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) static u64 ohci_dmamask = DMA_BIT_MASK(32); static struct at91_usbh_data usbh_data; static struct resource usbh_resources[] = { [0] = { .start = AT91SAM9263_UHP_BASE, .end = AT91SAM9263_UHP_BASE + SZ_1M - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9263_ID_UHP, .end = NR_IRQS_LEGACY + AT91SAM9263_ID_UHP, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91_usbh_device = { .name = "at91_ohci", .id = -1, .dev = { .dma_mask = &ohci_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &usbh_data, }, .resource = usbh_resources, .num_resources = ARRAY_SIZE(usbh_resources), }; void __init at91_add_device_usbh(struct at91_usbh_data *data) { int i; if (!data) return; /* Enable VBus control for UHP ports */ for (i = 0; i < data->ports; i++) { if (gpio_is_valid(data->vbus_pin[i])) at91_set_gpio_output(data->vbus_pin[i], data->vbus_pin_active_low[i]); } /* Enable overcurrent notification */ for (i = 0; i < data->ports; i++) { if (gpio_is_valid(data->overcurrent_pin[i])) at91_set_gpio_input(data->overcurrent_pin[i], 1); } usbh_data = *data; platform_device_register(&at91_usbh_device); } #else void __init at91_add_device_usbh(struct at91_usbh_data *data) {} #endif /* -------------------------------------------------------------------- * USB Device (Gadget) * -------------------------------------------------------------------- */ #if defined(CONFIG_USB_AT91) || defined(CONFIG_USB_AT91_MODULE) static struct at91_udc_data udc_data; static struct resource udc_resources[] = { [0] = { .start = AT91SAM9263_BASE_UDP, .end = AT91SAM9263_BASE_UDP + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9263_ID_UDP, .end = NR_IRQS_LEGACY + AT91SAM9263_ID_UDP, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91_udc_device = { .name = "at91_udc", .id = -1, .dev = { .platform_data = &udc_data, }, .resource = udc_resources, .num_resources = ARRAY_SIZE(udc_resources), }; void __init at91_add_device_udc(struct at91_udc_data *data) { if (!data) return; if (gpio_is_valid(data->vbus_pin)) { at91_set_gpio_input(data->vbus_pin, 0); at91_set_deglitch(data->vbus_pin, 1); } /* Pullup pin is handled internally by USB device peripheral */ udc_data = *data; platform_device_register(&at91_udc_device); } #else void __init at91_add_device_udc(struct at91_udc_data *data) {} #endif /* -------------------------------------------------------------------- * Ethernet * -------------------------------------------------------------------- */ #if defined(CONFIG_MACB) || defined(CONFIG_MACB_MODULE) static u64 eth_dmamask = DMA_BIT_MASK(32); static struct macb_platform_data eth_data; static struct resource eth_resources[] = { [0] = { .start = AT91SAM9263_BASE_EMAC, .end = AT91SAM9263_BASE_EMAC + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9263_ID_EMAC, .end = NR_IRQS_LEGACY + AT91SAM9263_ID_EMAC, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9263_eth_device = { .name = "macb", .id = -1, .dev = { .dma_mask = &eth_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &eth_data, }, .resource = eth_resources, .num_resources = ARRAY_SIZE(eth_resources), }; void __init at91_add_device_eth(struct macb_platform_data *data) { if (!data) return; if (gpio_is_valid(data->phy_irq_pin)) { at91_set_gpio_input(data->phy_irq_pin, 0); at91_set_deglitch(data->phy_irq_pin, 1); } /* Pins used for MII and RMII */ at91_set_A_periph(AT91_PIN_PE21, 0); /* ETXCK_EREFCK */ at91_set_B_periph(AT91_PIN_PC25, 0); /* ERXDV */ at91_set_A_periph(AT91_PIN_PE25, 0); /* ERX0 */ at91_set_A_periph(AT91_PIN_PE26, 0); /* ERX1 */ at91_set_A_periph(AT91_PIN_PE27, 0); /* ERXER */ at91_set_A_periph(AT91_PIN_PE28, 0); /* ETXEN */ at91_set_A_periph(AT91_PIN_PE23, 0); /* ETX0 */ at91_set_A_periph(AT91_PIN_PE24, 0); /* ETX1 */ at91_set_A_periph(AT91_PIN_PE30, 0); /* EMDIO */ at91_set_A_periph(AT91_PIN_PE29, 0); /* EMDC */ if (!data->is_rmii) { at91_set_A_periph(AT91_PIN_PE22, 0); /* ECRS */ at91_set_B_periph(AT91_PIN_PC26, 0); /* ECOL */ at91_set_B_periph(AT91_PIN_PC22, 0); /* ERX2 */ at91_set_B_periph(AT91_PIN_PC23, 0); /* ERX3 */ at91_set_B_periph(AT91_PIN_PC27, 0); /* ERXCK */ at91_set_B_periph(AT91_PIN_PC20, 0); /* ETX2 */ at91_set_B_periph(AT91_PIN_PC21, 0); /* ETX3 */ at91_set_B_periph(AT91_PIN_PC24, 0); /* ETXER */ } eth_data = *data; platform_device_register(&at91sam9263_eth_device); } #else void __init at91_add_device_eth(struct macb_platform_data *data) {} #endif /* -------------------------------------------------------------------- * MMC / SD * -------------------------------------------------------------------- */ #if IS_ENABLED(CONFIG_MMC_ATMELMCI) static u64 mmc_dmamask = DMA_BIT_MASK(32); static struct mci_platform_data mmc0_data, mmc1_data; static struct resource mmc0_resources[] = { [0] = { .start = AT91SAM9263_BASE_MCI0, .end = AT91SAM9263_BASE_MCI0 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9263_ID_MCI0, .end = NR_IRQS_LEGACY + AT91SAM9263_ID_MCI0, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9263_mmc0_device = { .name = "atmel_mci", .id = 0, .dev = { .dma_mask = &mmc_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &mmc0_data, }, .resource = mmc0_resources, .num_resources = ARRAY_SIZE(mmc0_resources), }; static struct resource mmc1_resources[] = { [0] = { .start = AT91SAM9263_BASE_MCI1, .end = AT91SAM9263_BASE_MCI1 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9263_ID_MCI1, .end = NR_IRQS_LEGACY + AT91SAM9263_ID_MCI1, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9263_mmc1_device = { .name = "atmel_mci", .id = 1, .dev = { .dma_mask = &mmc_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &mmc1_data, }, .resource = mmc1_resources, .num_resources = ARRAY_SIZE(mmc1_resources), }; void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data) { unsigned int i; unsigned int slot_count = 0; if (!data) return; for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { if (!data->slot[i].bus_width) continue; /* input/irq */ if (gpio_is_valid(data->slot[i].detect_pin)) { at91_set_gpio_input(data->slot[i].detect_pin, 1); at91_set_deglitch(data->slot[i].detect_pin, 1); } if (gpio_is_valid(data->slot[i].wp_pin)) at91_set_gpio_input(data->slot[i].wp_pin, 1); if (mmc_id == 0) { /* MCI0 */ switch (i) { case 0: /* slot A */ /* CMD */ at91_set_A_periph(AT91_PIN_PA1, 1); /* DAT0, maybe DAT1..DAT3 */ at91_set_A_periph(AT91_PIN_PA0, 1); if (data->slot[i].bus_width == 4) { at91_set_A_periph(AT91_PIN_PA3, 1); at91_set_A_periph(AT91_PIN_PA4, 1); at91_set_A_periph(AT91_PIN_PA5, 1); } slot_count++; break; case 1: /* slot B */ /* CMD */ at91_set_A_periph(AT91_PIN_PA16, 1); /* DAT0, maybe DAT1..DAT3 */ at91_set_A_periph(AT91_PIN_PA17, 1); if (data->slot[i].bus_width == 4) { at91_set_A_periph(AT91_PIN_PA18, 1); at91_set_A_periph(AT91_PIN_PA19, 1); at91_set_A_periph(AT91_PIN_PA20, 1); } slot_count++; break; default: printk(KERN_ERR "AT91: SD/MMC slot %d not available\n", i); break; } if (slot_count) { /* CLK */ at91_set_A_periph(AT91_PIN_PA12, 0); mmc0_data = *data; platform_device_register(&at91sam9263_mmc0_device); } } else if (mmc_id == 1) { /* MCI1 */ switch (i) { case 0: /* slot A */ /* CMD */ at91_set_A_periph(AT91_PIN_PA7, 1); /* DAT0, maybe DAT1..DAT3 */ at91_set_A_periph(AT91_PIN_PA8, 1); if (data->slot[i].bus_width == 4) { at91_set_A_periph(AT91_PIN_PA9, 1); at91_set_A_periph(AT91_PIN_PA10, 1); at91_set_A_periph(AT91_PIN_PA11, 1); } slot_count++; break; case 1: /* slot B */ /* CMD */ at91_set_A_periph(AT91_PIN_PA21, 1); /* DAT0, maybe DAT1..DAT3 */ at91_set_A_periph(AT91_PIN_PA22, 1); if (data->slot[i].bus_width == 4) { at91_set_A_periph(AT91_PIN_PA23, 1); at91_set_A_periph(AT91_PIN_PA24, 1); at91_set_A_periph(AT91_PIN_PA25, 1); } slot_count++; break; default: printk(KERN_ERR "AT91: SD/MMC slot %d not available\n", i); break; } if (slot_count) { /* CLK */ at91_set_A_periph(AT91_PIN_PA6, 0); mmc1_data = *data; platform_device_register(&at91sam9263_mmc1_device); } } } } #else void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data) {} #endif /* -------------------------------------------------------------------- * Compact Flash (PCMCIA or IDE) * -------------------------------------------------------------------- */ #if defined(CONFIG_PATA_AT91) || defined(CONFIG_PATA_AT91_MODULE) || \ defined(CONFIG_AT91_CF) || defined(CONFIG_AT91_CF_MODULE) static struct at91_cf_data cf0_data; static struct resource cf0_resources[] = { [0] = { .start = AT91_CHIPSELECT_4, .end = AT91_CHIPSELECT_4 + SZ_256M - 1, .flags = IORESOURCE_MEM | IORESOURCE_MEM_8AND16BIT, } }; static struct platform_device cf0_device = { .id = 0, .dev = { .platform_data = &cf0_data, }, .resource = cf0_resources, .num_resources = ARRAY_SIZE(cf0_resources), }; static struct at91_cf_data cf1_data; static struct resource cf1_resources[] = { [0] = { .start = AT91_CHIPSELECT_5, .end = AT91_CHIPSELECT_5 + SZ_256M - 1, .flags = IORESOURCE_MEM | IORESOURCE_MEM_8AND16BIT, } }; static struct platform_device cf1_device = { .id = 1, .dev = { .platform_data = &cf1_data, }, .resource = cf1_resources, .num_resources = ARRAY_SIZE(cf1_resources), }; void __init at91_add_device_cf(struct at91_cf_data *data) { unsigned long ebi0_csa; struct platform_device *pdev; if (!data) return; /* * assign CS4 or CS5 to SMC with Compact Flash logic support, * we assume SMC timings are configured by board code, * except True IDE where timings are controlled by driver */ ebi0_csa = at91_matrix_read(AT91_MATRIX_EBI0CSA); switch (data->chipselect) { case 4: at91_set_A_periph(AT91_PIN_PD6, 0); /* EBI0_NCS4/CFCS0 */ ebi0_csa |= AT91_MATRIX_EBI0_CS4A_SMC_CF1; cf0_data = *data; pdev = &cf0_device; break; case 5: at91_set_A_periph(AT91_PIN_PD7, 0); /* EBI0_NCS5/CFCS1 */ ebi0_csa |= AT91_MATRIX_EBI0_CS5A_SMC_CF2; cf1_data = *data; pdev = &cf1_device; break; default: printk(KERN_ERR "AT91 CF: bad chip-select requested (%u)\n", data->chipselect); return; } at91_matrix_write(AT91_MATRIX_EBI0CSA, ebi0_csa); if (gpio_is_valid(data->det_pin)) { at91_set_gpio_input(data->det_pin, 1); at91_set_deglitch(data->det_pin, 1); } if (gpio_is_valid(data->irq_pin)) { at91_set_gpio_input(data->irq_pin, 1); at91_set_deglitch(data->irq_pin, 1); } if (gpio_is_valid(data->vcc_pin)) /* initially off */ at91_set_gpio_output(data->vcc_pin, 0); /* enable EBI controlled pins */ at91_set_A_periph(AT91_PIN_PD5, 1); /* NWAIT */ at91_set_A_periph(AT91_PIN_PD8, 0); /* CFCE1 */ at91_set_A_periph(AT91_PIN_PD9, 0); /* CFCE2 */ at91_set_A_periph(AT91_PIN_PD14, 0); /* CFNRW */ pdev->name = (data->flags & AT91_CF_TRUE_IDE) ? "pata_at91" : "at91_cf"; platform_device_register(pdev); } #else void __init at91_add_device_cf(struct at91_cf_data *data) {} #endif /* -------------------------------------------------------------------- * NAND / SmartMedia * -------------------------------------------------------------------- */ #if defined(CONFIG_MTD_NAND_ATMEL) || defined(CONFIG_MTD_NAND_ATMEL_MODULE) static struct atmel_nand_data nand_data; #define NAND_BASE AT91_CHIPSELECT_3 static struct resource nand_resources[] = { [0] = { .start = NAND_BASE, .end = NAND_BASE + SZ_256M - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9263_BASE_ECC0, .end = AT91SAM9263_BASE_ECC0 + SZ_512 - 1, .flags = IORESOURCE_MEM, } }; static struct platform_device at91sam9263_nand_device = { .name = "atmel_nand", .id = -1, .dev = { .platform_data = &nand_data, }, .resource = nand_resources, .num_resources = ARRAY_SIZE(nand_resources), }; void __init at91_add_device_nand(struct atmel_nand_data *data) { unsigned long csa; if (!data) return; csa = at91_matrix_read(AT91_MATRIX_EBI0CSA); at91_matrix_write(AT91_MATRIX_EBI0CSA, csa | AT91_MATRIX_EBI0_CS3A_SMC_SMARTMEDIA); /* enable pin */ if (gpio_is_valid(data->enable_pin)) at91_set_gpio_output(data->enable_pin, 1); /* ready/busy pin */ if (gpio_is_valid(data->rdy_pin)) at91_set_gpio_input(data->rdy_pin, 1); /* card detect pin */ if (gpio_is_valid(data->det_pin)) at91_set_gpio_input(data->det_pin, 1); nand_data = *data; platform_device_register(&at91sam9263_nand_device); } #else void __init at91_add_device_nand(struct atmel_nand_data *data) {} #endif /* -------------------------------------------------------------------- * TWI (i2c) * -------------------------------------------------------------------- */ /* * Prefer the GPIO code since the TWI controller isn't robust * (gets overruns and underruns under load) and can only issue * repeated STARTs in one scenario (the driver doesn't yet handle them). */ #if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) static struct i2c_gpio_platform_data pdata = { .sda_pin = AT91_PIN_PB4, .sda_is_open_drain = 1, .scl_pin = AT91_PIN_PB5, .scl_is_open_drain = 1, .udelay = 2, /* ~100 kHz */ }; static struct platform_device at91sam9263_twi_device = { .name = "i2c-gpio", .id = 0, .dev.platform_data = &pdata, }; void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices) { at91_set_GPIO_periph(AT91_PIN_PB4, 1); /* TWD (SDA) */ at91_set_multi_drive(AT91_PIN_PB4, 1); at91_set_GPIO_periph(AT91_PIN_PB5, 1); /* TWCK (SCL) */ at91_set_multi_drive(AT91_PIN_PB5, 1); i2c_register_board_info(0, devices, nr_devices); platform_device_register(&at91sam9263_twi_device); } #elif defined(CONFIG_I2C_AT91) || defined(CONFIG_I2C_AT91_MODULE) static struct resource twi_resources[] = { [0] = { .start = AT91SAM9263_BASE_TWI, .end = AT91SAM9263_BASE_TWI + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9263_ID_TWI, .end = NR_IRQS_LEGACY + AT91SAM9263_ID_TWI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9263_twi_device = { .name = "i2c-at91sam9260", .id = 0, .resource = twi_resources, .num_resources = ARRAY_SIZE(twi_resources), }; void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices) { /* pins used for TWI interface */ at91_set_A_periph(AT91_PIN_PB4, 0); /* TWD */ at91_set_multi_drive(AT91_PIN_PB4, 1); at91_set_A_periph(AT91_PIN_PB5, 0); /* TWCK */ at91_set_multi_drive(AT91_PIN_PB5, 1); i2c_register_board_info(0, devices, nr_devices); platform_device_register(&at91sam9263_twi_device); } #else void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices) {} #endif /* -------------------------------------------------------------------- * SPI * -------------------------------------------------------------------- */ #if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE) static u64 spi_dmamask = DMA_BIT_MASK(32); static struct resource spi0_resources[] = { [0] = { .start = AT91SAM9263_BASE_SPI0, .end = AT91SAM9263_BASE_SPI0 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9263_ID_SPI0, .end = NR_IRQS_LEGACY + AT91SAM9263_ID_SPI0, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9263_spi0_device = { .name = "atmel_spi", .id = 0, .dev = { .dma_mask = &spi_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = spi0_resources, .num_resources = ARRAY_SIZE(spi0_resources), }; static const unsigned spi0_standard_cs[4] = { AT91_PIN_PA5, AT91_PIN_PA3, AT91_PIN_PA4, AT91_PIN_PB11 }; static struct resource spi1_resources[] = { [0] = { .start = AT91SAM9263_BASE_SPI1, .end = AT91SAM9263_BASE_SPI1 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9263_ID_SPI1, .end = NR_IRQS_LEGACY + AT91SAM9263_ID_SPI1, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9263_spi1_device = { .name = "atmel_spi", .id = 1, .dev = { .dma_mask = &spi_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = spi1_resources, .num_resources = ARRAY_SIZE(spi1_resources), }; static const unsigned spi1_standard_cs[4] = { AT91_PIN_PB15, AT91_PIN_PB16, AT91_PIN_PB17, AT91_PIN_PB18 }; void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices) { int i; unsigned long cs_pin; short enable_spi0 = 0; short enable_spi1 = 0; /* Choose SPI chip-selects */ for (i = 0; i < nr_devices; i++) { if (devices[i].controller_data) cs_pin = (unsigned long) devices[i].controller_data; else if (devices[i].bus_num == 0) cs_pin = spi0_standard_cs[devices[i].chip_select]; else cs_pin = spi1_standard_cs[devices[i].chip_select]; if (!gpio_is_valid(cs_pin)) continue; if (devices[i].bus_num == 0) enable_spi0 = 1; else enable_spi1 = 1; /* enable chip-select pin */ at91_set_gpio_output(cs_pin, 1); /* pass chip-select pin to driver */ devices[i].controller_data = (void *) cs_pin; } spi_register_board_info(devices, nr_devices); /* Configure SPI bus(es) */ if (enable_spi0) { at91_set_B_periph(AT91_PIN_PA0, 0); /* SPI0_MISO */ at91_set_B_periph(AT91_PIN_PA1, 0); /* SPI0_MOSI */ at91_set_B_periph(AT91_PIN_PA2, 0); /* SPI0_SPCK */ platform_device_register(&at91sam9263_spi0_device); } if (enable_spi1) { at91_set_A_periph(AT91_PIN_PB12, 0); /* SPI1_MISO */ at91_set_A_periph(AT91_PIN_PB13, 0); /* SPI1_MOSI */ at91_set_A_periph(AT91_PIN_PB14, 0); /* SPI1_SPCK */ platform_device_register(&at91sam9263_spi1_device); } } #else void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices) {} #endif /* -------------------------------------------------------------------- * AC97 * -------------------------------------------------------------------- */ #if defined(CONFIG_SND_ATMEL_AC97C) || defined(CONFIG_SND_ATMEL_AC97C_MODULE) static u64 ac97_dmamask = DMA_BIT_MASK(32); static struct ac97c_platform_data ac97_data; static struct resource ac97_resources[] = { [0] = { .start = AT91SAM9263_BASE_AC97C, .end = AT91SAM9263_BASE_AC97C + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9263_ID_AC97C, .end = NR_IRQS_LEGACY + AT91SAM9263_ID_AC97C, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9263_ac97_device = { .name = "atmel_ac97c", .id = 0, .dev = { .dma_mask = &ac97_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &ac97_data, }, .resource = ac97_resources, .num_resources = ARRAY_SIZE(ac97_resources), }; void __init at91_add_device_ac97(struct ac97c_platform_data *data) { if (!data) return; at91_set_A_periph(AT91_PIN_PB0, 0); /* AC97FS */ at91_set_A_periph(AT91_PIN_PB1, 0); /* AC97CK */ at91_set_A_periph(AT91_PIN_PB2, 0); /* AC97TX */ at91_set_A_periph(AT91_PIN_PB3, 0); /* AC97RX */ /* reset */ if (gpio_is_valid(data->reset_pin)) at91_set_gpio_output(data->reset_pin, 0); ac97_data = *data; platform_device_register(&at91sam9263_ac97_device); } #else void __init at91_add_device_ac97(struct ac97c_platform_data *data) {} #endif /* -------------------------------------------------------------------- * CAN Controller * -------------------------------------------------------------------- */ #if defined(CONFIG_CAN_AT91) || defined(CONFIG_CAN_AT91_MODULE) static struct resource can_resources[] = { [0] = { .start = AT91SAM9263_BASE_CAN, .end = AT91SAM9263_BASE_CAN + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9263_ID_CAN, .end = NR_IRQS_LEGACY + AT91SAM9263_ID_CAN, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9263_can_device = { .name = "at91_can", .id = -1, .resource = can_resources, .num_resources = ARRAY_SIZE(can_resources), }; void __init at91_add_device_can(struct at91_can_data *data) { at91_set_A_periph(AT91_PIN_PA13, 0); /* CANTX */ at91_set_A_periph(AT91_PIN_PA14, 0); /* CANRX */ at91sam9263_can_device.dev.platform_data = data; platform_device_register(&at91sam9263_can_device); } #else void __init at91_add_device_can(struct at91_can_data *data) {} #endif /* -------------------------------------------------------------------- * LCD Controller * -------------------------------------------------------------------- */ #if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE) static u64 lcdc_dmamask = DMA_BIT_MASK(32); static struct atmel_lcdfb_pdata lcdc_data; static struct resource lcdc_resources[] = { [0] = { .start = AT91SAM9263_LCDC_BASE, .end = AT91SAM9263_LCDC_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9263_ID_LCDC, .end = NR_IRQS_LEGACY + AT91SAM9263_ID_LCDC, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91_lcdc_device = { .name = "at91sam9263-lcdfb", .id = 0, .dev = { .dma_mask = &lcdc_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &lcdc_data, }, .resource = lcdc_resources, .num_resources = ARRAY_SIZE(lcdc_resources), }; void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) { if (!data) return; at91_set_A_periph(AT91_PIN_PC1, 0); /* LCDHSYNC */ at91_set_A_periph(AT91_PIN_PC2, 0); /* LCDDOTCK */ at91_set_A_periph(AT91_PIN_PC3, 0); /* LCDDEN */ at91_set_B_periph(AT91_PIN_PB9, 0); /* LCDCC */ at91_set_A_periph(AT91_PIN_PC6, 0); /* LCDD2 */ at91_set_A_periph(AT91_PIN_PC7, 0); /* LCDD3 */ at91_set_A_periph(AT91_PIN_PC8, 0); /* LCDD4 */ at91_set_A_periph(AT91_PIN_PC9, 0); /* LCDD5 */ at91_set_A_periph(AT91_PIN_PC10, 0); /* LCDD6 */ at91_set_A_periph(AT91_PIN_PC11, 0); /* LCDD7 */ at91_set_A_periph(AT91_PIN_PC14, 0); /* LCDD10 */ at91_set_A_periph(AT91_PIN_PC15, 0); /* LCDD11 */ at91_set_A_periph(AT91_PIN_PC16, 0); /* LCDD12 */ at91_set_B_periph(AT91_PIN_PC12, 0); /* LCDD13 */ at91_set_A_periph(AT91_PIN_PC18, 0); /* LCDD14 */ at91_set_A_periph(AT91_PIN_PC19, 0); /* LCDD15 */ at91_set_A_periph(AT91_PIN_PC22, 0); /* LCDD18 */ at91_set_A_periph(AT91_PIN_PC23, 0); /* LCDD19 */ at91_set_A_periph(AT91_PIN_PC24, 0); /* LCDD20 */ at91_set_B_periph(AT91_PIN_PC17, 0); /* LCDD21 */ at91_set_A_periph(AT91_PIN_PC26, 0); /* LCDD22 */ at91_set_A_periph(AT91_PIN_PC27, 0); /* LCDD23 */ lcdc_data = *data; platform_device_register(&at91_lcdc_device); } #else void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) {} #endif /* -------------------------------------------------------------------- * Image Sensor Interface * -------------------------------------------------------------------- */ #if defined(CONFIG_VIDEO_AT91_ISI) || defined(CONFIG_VIDEO_AT91_ISI_MODULE) struct resource isi_resources[] = { [0] = { .start = AT91SAM9263_BASE_ISI, .end = AT91SAM9263_BASE_ISI + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9263_ID_ISI, .end = NR_IRQS_LEGACY + AT91SAM9263_ID_ISI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9263_isi_device = { .name = "at91_isi", .id = -1, .resource = isi_resources, .num_resources = ARRAY_SIZE(isi_resources), }; void __init at91_add_device_isi(struct isi_platform_data *data, bool use_pck_as_mck) { at91_set_A_periph(AT91_PIN_PE0, 0); /* ISI_D0 */ at91_set_A_periph(AT91_PIN_PE1, 0); /* ISI_D1 */ at91_set_A_periph(AT91_PIN_PE2, 0); /* ISI_D2 */ at91_set_A_periph(AT91_PIN_PE3, 0); /* ISI_D3 */ at91_set_A_periph(AT91_PIN_PE4, 0); /* ISI_D4 */ at91_set_A_periph(AT91_PIN_PE5, 0); /* ISI_D5 */ at91_set_A_periph(AT91_PIN_PE6, 0); /* ISI_D6 */ at91_set_A_periph(AT91_PIN_PE7, 0); /* ISI_D7 */ at91_set_A_periph(AT91_PIN_PE8, 0); /* ISI_PCK */ at91_set_A_periph(AT91_PIN_PE9, 0); /* ISI_HSYNC */ at91_set_A_periph(AT91_PIN_PE10, 0); /* ISI_VSYNC */ at91_set_B_periph(AT91_PIN_PE12, 0); /* ISI_PD8 */ at91_set_B_periph(AT91_PIN_PE13, 0); /* ISI_PD9 */ at91_set_B_periph(AT91_PIN_PE14, 0); /* ISI_PD10 */ at91_set_B_periph(AT91_PIN_PE15, 0); /* ISI_PD11 */ if (use_pck_as_mck) { at91_set_B_periph(AT91_PIN_PE11, 0); /* ISI_MCK (PCK3) */ /* TODO: register the PCK for ISI_MCK and set its parent */ } } #else void __init at91_add_device_isi(struct isi_platform_data *data, bool use_pck_as_mck) {} #endif /* -------------------------------------------------------------------- * Timer/Counter block * -------------------------------------------------------------------- */ #ifdef CONFIG_ATMEL_TCLIB static struct resource tcb_resources[] = { [0] = { .start = AT91SAM9263_BASE_TCB0, .end = AT91SAM9263_BASE_TCB0 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9263_ID_TCB, .end = NR_IRQS_LEGACY + AT91SAM9263_ID_TCB, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9263_tcb_device = { .name = "atmel_tcb", .id = 0, .resource = tcb_resources, .num_resources = ARRAY_SIZE(tcb_resources), }; #if defined(CONFIG_OF) static struct of_device_id tcb_ids[] = { { .compatible = "atmel,at91rm9200-tcb" }, { /*sentinel*/ } }; #endif static void __init at91_add_device_tc(void) { #if defined(CONFIG_OF) struct device_node *np; np = of_find_matching_node(NULL, tcb_ids); if (np) { of_node_put(np); return; } #endif platform_device_register(&at91sam9263_tcb_device); } #else static void __init at91_add_device_tc(void) { } #endif /* -------------------------------------------------------------------- * RTT * -------------------------------------------------------------------- */ static struct resource rtt0_resources[] = { { .start = AT91SAM9263_BASE_RTT0, .end = AT91SAM9263_BASE_RTT0 + SZ_16 - 1, .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_IRQ, } }; static struct platform_device at91sam9263_rtt0_device = { .name = "at91_rtt", .id = 0, .resource = rtt0_resources, }; static struct resource rtt1_resources[] = { { .start = AT91SAM9263_BASE_RTT1, .end = AT91SAM9263_BASE_RTT1 + SZ_16 - 1, .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_IRQ, } }; static struct platform_device at91sam9263_rtt1_device = { .name = "at91_rtt", .id = 1, .resource = rtt1_resources, }; #if IS_ENABLED(CONFIG_RTC_DRV_AT91SAM9) static void __init at91_add_device_rtt_rtc(void) { struct platform_device *pdev; struct resource *r; switch (CONFIG_RTC_DRV_AT91SAM9_RTT) { case 0: /* * The second resource is needed only for the chosen RTT: * GPBR will serve as the storage for RTC time offset */ at91sam9263_rtt0_device.num_resources = 3; at91sam9263_rtt1_device.num_resources = 1; pdev = &at91sam9263_rtt0_device; r = rtt0_resources; break; case 1: at91sam9263_rtt0_device.num_resources = 1; at91sam9263_rtt1_device.num_resources = 3; pdev = &at91sam9263_rtt1_device; r = rtt1_resources; break; default: pr_err("at91sam9263: only supports 2 RTT (%d)\n", CONFIG_RTC_DRV_AT91SAM9_RTT); return; } pdev->name = "rtc-at91sam9"; r[1].start = AT91SAM9263_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; r[1].end = r[1].start + 3; r[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; r[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; } #else static void __init at91_add_device_rtt_rtc(void) { /* Only one resource is needed: RTT not used as RTC */ at91sam9263_rtt0_device.num_resources = 1; at91sam9263_rtt1_device.num_resources = 1; } #endif static void __init at91_add_device_rtt(void) { at91_add_device_rtt_rtc(); platform_device_register(&at91sam9263_rtt0_device); platform_device_register(&at91sam9263_rtt1_device); } /* -------------------------------------------------------------------- * Watchdog * -------------------------------------------------------------------- */ #if defined(CONFIG_AT91SAM9X_WATCHDOG) || defined(CONFIG_AT91SAM9X_WATCHDOG_MODULE) static struct resource wdt_resources[] = { { .start = AT91SAM9263_BASE_WDT, .end = AT91SAM9263_BASE_WDT + SZ_16 - 1, .flags = IORESOURCE_MEM, } }; static struct platform_device at91sam9263_wdt_device = { .name = "at91_wdt", .id = -1, .resource = wdt_resources, .num_resources = ARRAY_SIZE(wdt_resources), }; static void __init at91_add_device_watchdog(void) { platform_device_register(&at91sam9263_wdt_device); } #else static void __init at91_add_device_watchdog(void) {} #endif /* -------------------------------------------------------------------- * PWM * --------------------------------------------------------------------*/ #if defined(CONFIG_ATMEL_PWM) static u32 pwm_mask; static struct resource pwm_resources[] = { [0] = { .start = AT91SAM9263_BASE_PWMC, .end = AT91SAM9263_BASE_PWMC + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9263_ID_PWMC, .end = NR_IRQS_LEGACY + AT91SAM9263_ID_PWMC, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9263_pwm0_device = { .name = "atmel_pwm", .id = -1, .dev = { .platform_data = &pwm_mask, }, .resource = pwm_resources, .num_resources = ARRAY_SIZE(pwm_resources), }; void __init at91_add_device_pwm(u32 mask) { if (mask & (1 << AT91_PWM0)) at91_set_B_periph(AT91_PIN_PB7, 1); /* enable PWM0 */ if (mask & (1 << AT91_PWM1)) at91_set_B_periph(AT91_PIN_PB8, 1); /* enable PWM1 */ if (mask & (1 << AT91_PWM2)) at91_set_B_periph(AT91_PIN_PC29, 1); /* enable PWM2 */ if (mask & (1 << AT91_PWM3)) at91_set_B_periph(AT91_PIN_PB29, 1); /* enable PWM3 */ pwm_mask = mask; platform_device_register(&at91sam9263_pwm0_device); } #else void __init at91_add_device_pwm(u32 mask) {} #endif /* -------------------------------------------------------------------- * SSC -- Synchronous Serial Controller * -------------------------------------------------------------------- */ #if defined(CONFIG_ATMEL_SSC) || defined(CONFIG_ATMEL_SSC_MODULE) static u64 ssc0_dmamask = DMA_BIT_MASK(32); static struct resource ssc0_resources[] = { [0] = { .start = AT91SAM9263_BASE_SSC0, .end = AT91SAM9263_BASE_SSC0 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9263_ID_SSC0, .end = NR_IRQS_LEGACY + AT91SAM9263_ID_SSC0, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9263_ssc0_device = { .name = "at91rm9200_ssc", .id = 0, .dev = { .dma_mask = &ssc0_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = ssc0_resources, .num_resources = ARRAY_SIZE(ssc0_resources), }; static inline void configure_ssc0_pins(unsigned pins) { if (pins & ATMEL_SSC_TF) at91_set_B_periph(AT91_PIN_PB0, 1); if (pins & ATMEL_SSC_TK) at91_set_B_periph(AT91_PIN_PB1, 1); if (pins & ATMEL_SSC_TD) at91_set_B_periph(AT91_PIN_PB2, 1); if (pins & ATMEL_SSC_RD) at91_set_B_periph(AT91_PIN_PB3, 1); if (pins & ATMEL_SSC_RK) at91_set_B_periph(AT91_PIN_PB4, 1); if (pins & ATMEL_SSC_RF) at91_set_B_periph(AT91_PIN_PB5, 1); } static u64 ssc1_dmamask = DMA_BIT_MASK(32); static struct resource ssc1_resources[] = { [0] = { .start = AT91SAM9263_BASE_SSC1, .end = AT91SAM9263_BASE_SSC1 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9263_ID_SSC1, .end = NR_IRQS_LEGACY + AT91SAM9263_ID_SSC1, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9263_ssc1_device = { .name = "at91rm9200_ssc", .id = 1, .dev = { .dma_mask = &ssc1_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = ssc1_resources, .num_resources = ARRAY_SIZE(ssc1_resources), }; static inline void configure_ssc1_pins(unsigned pins) { if (pins & ATMEL_SSC_TF) at91_set_A_periph(AT91_PIN_PB6, 1); if (pins & ATMEL_SSC_TK) at91_set_A_periph(AT91_PIN_PB7, 1); if (pins & ATMEL_SSC_TD) at91_set_A_periph(AT91_PIN_PB8, 1); if (pins & ATMEL_SSC_RD) at91_set_A_periph(AT91_PIN_PB9, 1); if (pins & ATMEL_SSC_RK) at91_set_A_periph(AT91_PIN_PB10, 1); if (pins & ATMEL_SSC_RF) at91_set_A_periph(AT91_PIN_PB11, 1); } /* * SSC controllers are accessed through library code, instead of any * kind of all-singing/all-dancing driver. For example one could be * used by a particular I2S audio codec's driver, while another one * on the same system might be used by a custom data capture driver. */ void __init at91_add_device_ssc(unsigned id, unsigned pins) { struct platform_device *pdev; /* * NOTE: caller is responsible for passing information matching * "pins" to whatever will be using each particular controller. */ switch (id) { case AT91SAM9263_ID_SSC0: pdev = &at91sam9263_ssc0_device; configure_ssc0_pins(pins); break; case AT91SAM9263_ID_SSC1: pdev = &at91sam9263_ssc1_device; configure_ssc1_pins(pins); break; default: return; } platform_device_register(pdev); } #else void __init at91_add_device_ssc(unsigned id, unsigned pins) {} #endif /* -------------------------------------------------------------------- * UART * -------------------------------------------------------------------- */ #if defined(CONFIG_SERIAL_ATMEL) static struct resource dbgu_resources[] = { [0] = { .start = AT91SAM9263_BASE_DBGU, .end = AT91SAM9263_BASE_DBGU + SZ_512 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91_ID_SYS, .end = NR_IRQS_LEGACY + AT91_ID_SYS, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data dbgu_data = { .use_dma_tx = 0, .use_dma_rx = 0, /* DBGU not capable of receive DMA */ .rts_gpio = -EINVAL, }; static u64 dbgu_dmamask = DMA_BIT_MASK(32); static struct platform_device at91sam9263_dbgu_device = { .name = "atmel_usart", .id = 0, .dev = { .dma_mask = &dbgu_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &dbgu_data, }, .resource = dbgu_resources, .num_resources = ARRAY_SIZE(dbgu_resources), }; static inline void configure_dbgu_pins(void) { at91_set_A_periph(AT91_PIN_PC30, 0); /* DRXD */ at91_set_A_periph(AT91_PIN_PC31, 1); /* DTXD */ } static struct resource uart0_resources[] = { [0] = { .start = AT91SAM9263_BASE_US0, .end = AT91SAM9263_BASE_US0 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9263_ID_US0, .end = NR_IRQS_LEGACY + AT91SAM9263_ID_US0, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data uart0_data = { .use_dma_tx = 1, .use_dma_rx = 1, .rts_gpio = -EINVAL, }; static u64 uart0_dmamask = DMA_BIT_MASK(32); static struct platform_device at91sam9263_uart0_device = { .name = "atmel_usart", .id = 1, .dev = { .dma_mask = &uart0_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &uart0_data, }, .resource = uart0_resources, .num_resources = ARRAY_SIZE(uart0_resources), }; static inline void configure_usart0_pins(unsigned pins) { at91_set_A_periph(AT91_PIN_PA26, 1); /* TXD0 */ at91_set_A_periph(AT91_PIN_PA27, 0); /* RXD0 */ if (pins & ATMEL_UART_RTS) at91_set_A_periph(AT91_PIN_PA28, 0); /* RTS0 */ if (pins & ATMEL_UART_CTS) at91_set_A_periph(AT91_PIN_PA29, 0); /* CTS0 */ } static struct resource uart1_resources[] = { [0] = { .start = AT91SAM9263_BASE_US1, .end = AT91SAM9263_BASE_US1 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9263_ID_US1, .end = NR_IRQS_LEGACY + AT91SAM9263_ID_US1, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data uart1_data = { .use_dma_tx = 1, .use_dma_rx = 1, .rts_gpio = -EINVAL, }; static u64 uart1_dmamask = DMA_BIT_MASK(32); static struct platform_device at91sam9263_uart1_device = { .name = "atmel_usart", .id = 2, .dev = { .dma_mask = &uart1_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &uart1_data, }, .resource = uart1_resources, .num_resources = ARRAY_SIZE(uart1_resources), }; static inline void configure_usart1_pins(unsigned pins) { at91_set_A_periph(AT91_PIN_PD0, 1); /* TXD1 */ at91_set_A_periph(AT91_PIN_PD1, 0); /* RXD1 */ if (pins & ATMEL_UART_RTS) at91_set_B_periph(AT91_PIN_PD7, 0); /* RTS1 */ if (pins & ATMEL_UART_CTS) at91_set_B_periph(AT91_PIN_PD8, 0); /* CTS1 */ } static struct resource uart2_resources[] = { [0] = { .start = AT91SAM9263_BASE_US2, .end = AT91SAM9263_BASE_US2 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = NR_IRQS_LEGACY + AT91SAM9263_ID_US2, .end = NR_IRQS_LEGACY + AT91SAM9263_ID_US2, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data uart2_data = { .use_dma_tx = 1, .use_dma_rx = 1, .rts_gpio = -EINVAL, }; static u64 uart2_dmamask = DMA_BIT_MASK(32); static struct platform_device at91sam9263_uart2_device = { .name = "atmel_usart", .id = 3, .dev = { .dma_mask = &uart2_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &uart2_data, }, .resource = uart2_resources, .num_resources = ARRAY_SIZE(uart2_resources), }; static inline void configure_usart2_pins(unsigned pins) { at91_set_A_periph(AT91_PIN_PD2, 1); /* TXD2 */ at91_set_A_periph(AT91_PIN_PD3, 0); /* RXD2 */ if (pins & ATMEL_UART_RTS) at91_set_B_periph(AT91_PIN_PD5, 0); /* RTS2 */ if (pins & ATMEL_UART_CTS) at91_set_B_periph(AT91_PIN_PD6, 0); /* CTS2 */ } static struct platform_device *__initdata at91_uarts[ATMEL_MAX_UART]; /* the UARTs to use */ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) { struct platform_device *pdev; struct atmel_uart_data *pdata; switch (id) { case 0: /* DBGU */ pdev = &at91sam9263_dbgu_device; configure_dbgu_pins(); break; case AT91SAM9263_ID_US0: pdev = &at91sam9263_uart0_device; configure_usart0_pins(pins); break; case AT91SAM9263_ID_US1: pdev = &at91sam9263_uart1_device; configure_usart1_pins(pins); break; case AT91SAM9263_ID_US2: pdev = &at91sam9263_uart2_device; configure_usart2_pins(pins); break; default: return; } pdata = pdev->dev.platform_data; pdata->num = portnr; /* update to mapped ID */ if (portnr < ATMEL_MAX_UART) at91_uarts[portnr] = pdev; } void __init at91_add_device_serial(void) { int i; for (i = 0; i < ATMEL_MAX_UART; i++) { if (at91_uarts[i]) platform_device_register(at91_uarts[i]); } } #else void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) {} void __init at91_add_device_serial(void) {} #endif /* -------------------------------------------------------------------- */ /* * These devices are always present and don't need any board-specific * setup. */ static int __init at91_add_standard_devices(void) { if (of_have_populated_dt()) return 0; at91_add_device_rtt(); at91_add_device_watchdog(); at91_add_device_tc(); return 0; } arch_initcall(at91_add_standard_devices);
gpl-2.0
csc027/qmk_firmware
keyboards/massdrop/alt/keymaps/abishalom/keymap.c
43
5845
#include QMK_KEYBOARD_H enum alt_keycodes { U_T_AUTO = SAFE_RANGE, //USB Extra Port Toggle Auto Detect / Always Active U_T_AGCR, //USB Toggle Automatic GCR control DBG_TOG, //DEBUG Toggle On / Off DBG_MTRX, //DEBUG Toggle Matrix Prints DBG_KBD, //DEBUG Toggle Keyboard Prints DBG_MOU, //DEBUG Toggle Mouse Prints MD_BOOT, //Restart into bootloader after hold timeout }; #define TG_NKRO MAGIC_TOGGLE_NKRO //Toggle 6KRO / NKRO mode const uint16_t PROGMEM keymaps[][MATRIX_ROWS][MATRIX_COLS] = { [0] = LAYOUT( KC_ESC, KC_1, KC_2, KC_3, KC_4, KC_5, KC_6, KC_7, KC_8, KC_9, KC_0, KC_MINS, KC_EQL, KC_BSPC, KC_DEL, \ KC_TAB, KC_Q, KC_W, KC_E, KC_R, KC_T, KC_Y, KC_U, KC_I, KC_O, KC_P, KC_LBRC, KC_RBRC, KC_BSLS, KC_HOME, \ KC_CAPS, KC_A, KC_S, KC_D, KC_F, KC_G, KC_H, KC_J, KC_K, KC_L, KC_SCLN, KC_QUOT, KC_ENT, KC_PGUP, \ KC_LSFT, KC_Z, KC_X, KC_C, KC_V, KC_B, KC_N, KC_M, KC_COMM, KC_DOT, KC_SLSH, KC_RSFT, KC_UP, KC_PGDN, \ KC_LCTL, KC_LGUI, KC_LALT, KC_SPC, KC_RALT, MO(1), KC_LEFT, KC_DOWN, KC_RGHT \ ), [1] = LAYOUT( KC_GRV, KC_F1, KC_F2, KC_F3, KC_F4, KC_F5, KC_F6, KC_F7, KC_F8, KC_F9, KC_F10, KC_F11, KC_F12, _______, KC_MUTE, \ _______, RGB_SPD, RGB_VAI, RGB_SPI, RGB_HUI, RGB_SAI, _______, U_T_AUTO,U_T_AGCR,_______, KC_PSCR, KC_SLCK, KC_PAUS, _______, KC_END, \ _______, RGB_RMOD,RGB_VAD, RGB_MOD, RGB_HUD, RGB_SAD, _______, _______, _______, _______, _______, _______, _______, KC_VOLU, \ _______, RGB_TOG, _______, _______, _______, MD_BOOT, TG_NKRO, DBG_TOG, _______, _______, _______, _______, KC_PGUP, KC_VOLD, \ _______, _______, _______, _______, _______, _______, KC_HOME, KC_PGDN, KC_END \ ), /* [X] = LAYOUT( _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, \ _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, \ _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, \ _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, \ _______, _______, _______, _______, _______, _______, _______, _______, _______ \ ), */ }; // Runs just one time when the keyboard initializes. void matrix_init_user(void) { }; // Runs constantly in the background, in a loop. void matrix_scan_user(void) { }; #define MODS_SHIFT (get_mods() & MOD_BIT(KC_LSHIFT) || get_mods() & MOD_BIT(KC_RSHIFT)) #define MODS_CTRL (get_mods() & MOD_BIT(KC_LCTL) || get_mods() & MOD_BIT(KC_RCTRL)) #define MODS_ALT (get_mods() & MOD_BIT(KC_LALT) || get_mods() & MOD_BIT(KC_RALT)) bool process_record_user(uint16_t keycode, keyrecord_t *record) { static uint32_t key_timer; switch (keycode) { case U_T_AUTO: if (record->event.pressed && MODS_SHIFT && MODS_CTRL) { TOGGLE_FLAG_AND_PRINT(usb_extra_manual, "USB extra port manual mode"); } return false; case U_T_AGCR: if (record->event.pressed && MODS_SHIFT && MODS_CTRL) { TOGGLE_FLAG_AND_PRINT(usb_gcr_auto, "USB GCR auto mode"); } return false; case DBG_TOG: if (record->event.pressed) { TOGGLE_FLAG_AND_PRINT(debug_enable, "Debug mode"); } return false; case DBG_MTRX: if (record->event.pressed) { TOGGLE_FLAG_AND_PRINT(debug_matrix, "Debug matrix"); } return false; case DBG_KBD: if (record->event.pressed) { TOGGLE_FLAG_AND_PRINT(debug_keyboard, "Debug keyboard"); } return false; case DBG_MOU: if (record->event.pressed) { TOGGLE_FLAG_AND_PRINT(debug_mouse, "Debug mouse"); } return false; case MD_BOOT: if (record->event.pressed) { key_timer = timer_read32(); } else { if (timer_elapsed32(key_timer) >= 500) { reset_keyboard(); } } return false; case RGB_TOG: if (record->event.pressed) { switch (rgb_matrix_get_flags()) { case LED_FLAG_ALL: { rgb_matrix_set_flags(LED_FLAG_KEYLIGHT); rgb_matrix_set_color_all(0, 0, 0); } break; case LED_FLAG_KEYLIGHT: { rgb_matrix_set_flags(LED_FLAG_UNDERGLOW); rgb_matrix_set_color_all(0, 0, 0); } break; case LED_FLAG_UNDERGLOW: { rgb_matrix_set_flags(LED_FLAG_NONE); rgb_matrix_disable_noeeprom(); } break; default: { rgb_matrix_set_flags(LED_FLAG_ALL); rgb_matrix_enable_noeeprom(); } break; } } return false; default: return true; //Process all other keycodes normally } }
gpl-2.0
Hellybean/SaberMod_ROM_Toolchain
gcc/testsuite/gcc.target/powerpc/vsx-builtin-3.c
43
8050
/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */ /* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */ /* { dg-require-effective-target powerpc_vsx_ok } */ /* { dg-options "-O2 -mcpu=power7" } */ /* { dg-final { scan-assembler "xxsel" } } */ /* { dg-final { scan-assembler "vperm" } } */ /* { dg-final { scan-assembler "xvrdpi" } } */ /* { dg-final { scan-assembler "xvrdpic" } } */ /* { dg-final { scan-assembler "xvrdpim" } } */ /* { dg-final { scan-assembler "xvrdpip" } } */ /* { dg-final { scan-assembler "xvrdpiz" } } */ /* { dg-final { scan-assembler "xvrspi" } } */ /* { dg-final { scan-assembler "xvrspic" } } */ /* { dg-final { scan-assembler "xvrspim" } } */ /* { dg-final { scan-assembler "xvrspip" } } */ /* { dg-final { scan-assembler "xvrspiz" } } */ /* { dg-final { scan-assembler "xsrdpi" } } */ /* { dg-final { scan-assembler "xsrdpic" } } */ /* { dg-final { scan-assembler "xsrdpim\|frim" } } */ /* { dg-final { scan-assembler "xsrdpip\|frip" } } */ /* { dg-final { scan-assembler "xsrdpiz\|friz" } } */ /* { dg-final { scan-assembler "xsmaxdp" } } */ /* { dg-final { scan-assembler "xsmindp" } } */ /* { dg-final { scan-assembler "xxland" } } */ /* { dg-final { scan-assembler "xxlandc" } } */ /* { dg-final { scan-assembler "xxlnor" } } */ /* { dg-final { scan-assembler "xxlor" } } */ /* { dg-final { scan-assembler "xxlxor" } } */ /* { dg-final { scan-assembler "xvcmpeqdp" } } */ /* { dg-final { scan-assembler "xvcmpgtdp" } } */ /* { dg-final { scan-assembler "xvcmpgedp" } } */ /* { dg-final { scan-assembler "xvcmpeqsp" } } */ /* { dg-final { scan-assembler "xvcmpgtsp" } } */ /* { dg-final { scan-assembler "xvcmpgesp" } } */ /* { dg-final { scan-assembler "xxsldwi" } } */ /* { dg-final { scan-assembler-not "call" } } */ extern __vector int si[][4]; extern __vector short ss[][4]; extern __vector signed char sc[][4]; extern __vector float f[][4]; extern __vector unsigned int ui[][4]; extern __vector unsigned short us[][4]; extern __vector unsigned char uc[][4]; extern __vector __bool int bi[][4]; extern __vector __bool short bs[][4]; extern __vector __bool char bc[][4]; extern __vector __pixel p[][4]; #ifdef __VSX__ extern __vector double d[][4]; extern __vector long sl[][4]; extern __vector unsigned long ul[][4]; extern __vector __bool long bl[][4]; #endif int do_sel(void) { int i = 0; si[i][0] = __builtin_vsx_xxsel_4si (si[i][1], si[i][2], si[i][3]); i++; ss[i][0] = __builtin_vsx_xxsel_8hi (ss[i][1], ss[i][2], ss[i][3]); i++; sc[i][0] = __builtin_vsx_xxsel_16qi (sc[i][1], sc[i][2], sc[i][3]); i++; f[i][0] = __builtin_vsx_xxsel_4sf (f[i][1], f[i][2], f[i][3]); i++; d[i][0] = __builtin_vsx_xxsel_2df (d[i][1], d[i][2], d[i][3]); i++; si[i][0] = __builtin_vsx_xxsel (si[i][1], si[i][2], bi[i][3]); i++; ss[i][0] = __builtin_vsx_xxsel (ss[i][1], ss[i][2], bs[i][3]); i++; sc[i][0] = __builtin_vsx_xxsel (sc[i][1], sc[i][2], bc[i][3]); i++; f[i][0] = __builtin_vsx_xxsel (f[i][1], f[i][2], bi[i][3]); i++; d[i][0] = __builtin_vsx_xxsel (d[i][1], d[i][2], bl[i][3]); i++; si[i][0] = __builtin_vsx_xxsel (si[i][1], si[i][2], ui[i][3]); i++; ss[i][0] = __builtin_vsx_xxsel (ss[i][1], ss[i][2], us[i][3]); i++; sc[i][0] = __builtin_vsx_xxsel (sc[i][1], sc[i][2], uc[i][3]); i++; f[i][0] = __builtin_vsx_xxsel (f[i][1], f[i][2], ui[i][3]); i++; d[i][0] = __builtin_vsx_xxsel (d[i][1], d[i][2], ul[i][3]); i++; return i; } int do_perm(void) { int i = 0; si[i][0] = __builtin_vsx_vperm_4si (si[i][1], si[i][2], uc[i][3]); i++; ss[i][0] = __builtin_vsx_vperm_8hi (ss[i][1], ss[i][2], uc[i][3]); i++; sc[i][0] = __builtin_vsx_vperm_16qi (sc[i][1], sc[i][2], uc[i][3]); i++; f[i][0] = __builtin_vsx_vperm_4sf (f[i][1], f[i][2], uc[i][3]); i++; d[i][0] = __builtin_vsx_vperm_2df (d[i][1], d[i][2], uc[i][3]); i++; si[i][0] = __builtin_vsx_vperm (si[i][1], si[i][2], uc[i][3]); i++; ss[i][0] = __builtin_vsx_vperm (ss[i][1], ss[i][2], uc[i][3]); i++; sc[i][0] = __builtin_vsx_vperm (sc[i][1], sc[i][2], uc[i][3]); i++; f[i][0] = __builtin_vsx_vperm (f[i][1], f[i][2], uc[i][3]); i++; d[i][0] = __builtin_vsx_vperm (d[i][1], d[i][2], uc[i][3]); i++; return i; } int do_xxperm (void) { int i = 0; d[i][0] = __builtin_vsx_xxpermdi_2df (d[i][1], d[i][2], 0); i++; d[i][0] = __builtin_vsx_xxpermdi (d[i][1], d[i][2], 1); i++; return i; } double x, y; void do_concat (void) { d[0][0] = __builtin_vsx_concat_2df (x, y); } void do_set (void) { d[0][0] = __builtin_vsx_set_2df (d[0][1], x, 0); d[1][0] = __builtin_vsx_set_2df (d[1][1], y, 1); } extern double z[][4]; int do_math (void) { int i = 0; d[i][0] = __builtin_vsx_xvrdpi (d[i][1]); i++; d[i][0] = __builtin_vsx_xvrdpic (d[i][1]); i++; d[i][0] = __builtin_vsx_xvrdpim (d[i][1]); i++; d[i][0] = __builtin_vsx_xvrdpip (d[i][1]); i++; d[i][0] = __builtin_vsx_xvrdpiz (d[i][1]); i++; f[i][0] = __builtin_vsx_xvrspi (f[i][1]); i++; f[i][0] = __builtin_vsx_xvrspic (f[i][1]); i++; f[i][0] = __builtin_vsx_xvrspim (f[i][1]); i++; f[i][0] = __builtin_vsx_xvrspip (f[i][1]); i++; f[i][0] = __builtin_vsx_xvrspiz (f[i][1]); i++; z[i][0] = __builtin_vsx_xsrdpi (z[i][1]); i++; z[i][0] = __builtin_vsx_xsrdpic (z[i][1]); i++; z[i][0] = __builtin_vsx_xsrdpim (z[i][1]); i++; z[i][0] = __builtin_vsx_xsrdpip (z[i][1]); i++; z[i][0] = __builtin_vsx_xsrdpiz (z[i][1]); i++; z[i][0] = __builtin_vsx_xsmaxdp (z[i][1], z[i][0]); i++; z[i][0] = __builtin_vsx_xsmindp (z[i][1], z[i][0]); i++; return i; } int do_cmp (void) { int i = 0; d[i][0] = __builtin_vsx_xvcmpeqdp (d[i][1], d[i][2]); i++; d[i][0] = __builtin_vsx_xvcmpgtdp (d[i][1], d[i][2]); i++; d[i][0] = __builtin_vsx_xvcmpgedp (d[i][1], d[i][2]); i++; f[i][0] = __builtin_vsx_xvcmpeqsp (f[i][1], f[i][2]); i++; f[i][0] = __builtin_vsx_xvcmpgtsp (f[i][1], f[i][2]); i++; f[i][0] = __builtin_vsx_xvcmpgesp (f[i][1], f[i][2]); i++; return i; } int do_logical (void) { int i = 0; si[i][0] = __builtin_vsx_xxland (si[i][1], si[i][2]); i++; si[i][0] = __builtin_vsx_xxlandc (si[i][1], si[i][2]); i++; si[i][0] = __builtin_vsx_xxlnor (si[i][1], si[i][2]); i++; si[i][0] = __builtin_vsx_xxlor (si[i][1], si[i][2]); i++; si[i][0] = __builtin_vsx_xxlxor (si[i][1], si[i][2]); i++; ss[i][0] = __builtin_vsx_xxland (ss[i][1], ss[i][2]); i++; ss[i][0] = __builtin_vsx_xxlandc (ss[i][1], ss[i][2]); i++; ss[i][0] = __builtin_vsx_xxlnor (ss[i][1], ss[i][2]); i++; ss[i][0] = __builtin_vsx_xxlor (ss[i][1], ss[i][2]); i++; ss[i][0] = __builtin_vsx_xxlxor (ss[i][1], ss[i][2]); i++; sc[i][0] = __builtin_vsx_xxland (sc[i][1], sc[i][2]); i++; sc[i][0] = __builtin_vsx_xxlandc (sc[i][1], sc[i][2]); i++; sc[i][0] = __builtin_vsx_xxlnor (sc[i][1], sc[i][2]); i++; sc[i][0] = __builtin_vsx_xxlor (sc[i][1], sc[i][2]); i++; sc[i][0] = __builtin_vsx_xxlxor (sc[i][1], sc[i][2]); i++; d[i][0] = __builtin_vsx_xxland (d[i][1], d[i][2]); i++; d[i][0] = __builtin_vsx_xxlandc (d[i][1], d[i][2]); i++; d[i][0] = __builtin_vsx_xxlnor (d[i][1], d[i][2]); i++; d[i][0] = __builtin_vsx_xxlor (d[i][1], d[i][2]); i++; d[i][0] = __builtin_vsx_xxlxor (d[i][1], d[i][2]); i++; f[i][0] = __builtin_vsx_xxland (f[i][1], f[i][2]); i++; f[i][0] = __builtin_vsx_xxlandc (f[i][1], f[i][2]); i++; f[i][0] = __builtin_vsx_xxlnor (f[i][1], f[i][2]); i++; f[i][0] = __builtin_vsx_xxlor (f[i][1], f[i][2]); i++; f[i][0] = __builtin_vsx_xxlxor (f[i][1], f[i][2]); i++; return i; } int do_xxsldwi (void) { int i = 0; si[i][0] = __builtin_vsx_xxsldwi (si[i][1], si[i][2], 0); i++; ss[i][0] = __builtin_vsx_xxsldwi (ss[i][1], ss[i][2], 1); i++; sc[i][0] = __builtin_vsx_xxsldwi (sc[i][1], sc[i][2], 2); i++; ui[i][0] = __builtin_vsx_xxsldwi (ui[i][1], ui[i][2], 3); i++; us[i][0] = __builtin_vsx_xxsldwi (us[i][1], us[i][2], 0); i++; uc[i][0] = __builtin_vsx_xxsldwi (uc[i][1], uc[i][2], 1); i++; f[i][0] = __builtin_vsx_xxsldwi (f[i][1], f[i][2], 2); i++; d[i][0] = __builtin_vsx_xxsldwi (d[i][1], d[i][2], 3); i++; return i; }
gpl-2.0
gfreewind/latest-fastsocket
kernel/drivers/s390/char/tape_34xx.c
43
38441
/* * drivers/s390/char/tape_34xx.c * tape device discipline for 3480/3490 tapes. * * Copyright IBM Corp. 2001, 2009 * Author(s): Carsten Otte <cotte@de.ibm.com> * Tuan Ngo-Anh <ngoanh@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> */ #define KMSG_COMPONENT "tape_34xx" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/bio.h> #include <linux/workqueue.h> #define TAPE_DBF_AREA tape_34xx_dbf #include "tape.h" #include "tape_std.h" /* * Pointer to debug area. */ debug_info_t *TAPE_DBF_AREA = NULL; EXPORT_SYMBOL(TAPE_DBF_AREA); #define TAPE34XX_FMT_3480 0 #define TAPE34XX_FMT_3480_2_XF 1 #define TAPE34XX_FMT_3480_XF 2 struct tape_34xx_block_id { unsigned int wrap : 1; unsigned int segment : 7; unsigned int format : 2; unsigned int block : 22; }; /* * A list of block ID's is used to faster seek blocks. */ struct tape_34xx_sbid { struct list_head list; struct tape_34xx_block_id bid; }; static void tape_34xx_delete_sbid_from(struct tape_device *, int); /* * Medium sense for 34xx tapes. There is no 'real' medium sense call. * So we just do a normal sense. */ static void __tape_34xx_medium_sense(struct tape_request *request) { struct tape_device *device = request->device; unsigned char *sense; if (request->rc == 0) { sense = request->cpdata; /* * This isn't quite correct. But since INTERVENTION_REQUIRED * means that the drive is 'neither ready nor on-line' it is * only slightly inaccurate to say there is no tape loaded if * the drive isn't online... */ if (sense[0] & SENSE_INTERVENTION_REQUIRED) tape_med_state_set(device, MS_UNLOADED); else tape_med_state_set(device, MS_LOADED); if (sense[1] & SENSE_WRITE_PROTECT) device->tape_generic_status |= GMT_WR_PROT(~0); else device->tape_generic_status &= ~GMT_WR_PROT(~0); } else DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n", request->rc); tape_free_request(request); } static int tape_34xx_medium_sense(struct tape_device *device) { struct tape_request *request; int rc; request = tape_alloc_request(1, 32); if (IS_ERR(request)) { DBF_EXCEPTION(6, "MSEN fail\n"); return PTR_ERR(request); } request->op = TO_MSEN; tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata); rc = tape_do_io_interruptible(device, request); __tape_34xx_medium_sense(request); return rc; } static void tape_34xx_medium_sense_async(struct tape_device *device) { struct tape_request *request; request = tape_alloc_request(1, 32); if (IS_ERR(request)) { DBF_EXCEPTION(6, "MSEN fail\n"); return; } request->op = TO_MSEN; tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata); request->callback = (void *) __tape_34xx_medium_sense; request->callback_data = NULL; tape_do_io_async(device, request); } struct tape_34xx_work { struct tape_device *device; enum tape_op op; struct work_struct work; }; /* * These functions are currently used only to schedule a medium_sense for * later execution. This is because we get an interrupt whenever a medium * is inserted but cannot call tape_do_io* from an interrupt context. * Maybe that's useful for other actions we want to start from the * interrupt handler. * Note: the work handler is called by the system work queue. The tape * commands started by the handler need to be asynchrounous, otherwise * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq). */ static void tape_34xx_work_handler(struct work_struct *work) { struct tape_34xx_work *p = container_of(work, struct tape_34xx_work, work); switch(p->op) { case TO_MSEN: tape_34xx_medium_sense_async(p->device); break; default: DBF_EVENT(3, "T34XX: internal error: unknown work\n"); } p->device = tape_put_device(p->device); kfree(p); } static int tape_34xx_schedule_work(struct tape_device *device, enum tape_op op) { struct tape_34xx_work *p; if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL) return -ENOMEM; INIT_WORK(&p->work, tape_34xx_work_handler); p->device = tape_get_device_reference(device); p->op = op; schedule_work(&p->work); return 0; } /* * Done Handler is called when dev stat = DEVICE-END (successful operation) */ static inline int tape_34xx_done(struct tape_request *request) { DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]); switch (request->op) { case TO_DSE: case TO_RUN: case TO_WRI: case TO_WTM: case TO_ASSIGN: case TO_UNASSIGN: tape_34xx_delete_sbid_from(request->device, 0); break; default: ; } return TAPE_IO_SUCCESS; } static inline int tape_34xx_erp_failed(struct tape_request *request, int rc) { DBF_EVENT(3, "Error recovery failed for %s (RC=%d)\n", tape_op_verbose[request->op], rc); return rc; } static inline int tape_34xx_erp_succeeded(struct tape_request *request) { DBF_EVENT(3, "Error Recovery successful for %s\n", tape_op_verbose[request->op]); return tape_34xx_done(request); } static inline int tape_34xx_erp_retry(struct tape_request *request) { DBF_EVENT(3, "xerp retr %s\n", tape_op_verbose[request->op]); return TAPE_IO_RETRY; } /* * This function is called, when no request is outstanding and we get an * interrupt */ static int tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb) { if (irb->scsw.cmd.dstat == 0x85) { /* READY */ /* A medium was inserted in the drive. */ DBF_EVENT(6, "xuud med\n"); tape_34xx_delete_sbid_from(device, 0); tape_34xx_schedule_work(device, TO_MSEN); } else { DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id); tape_dump_sense_dbf(device, NULL, irb); } return TAPE_IO_SUCCESS; } /* * Read Opposite Error Recovery Function: * Used, when Read Forward does not work */ static int tape_34xx_erp_read_opposite(struct tape_device *device, struct tape_request *request) { if (request->op == TO_RFO) { /* * We did read forward, but the data could not be read * *correctly*. We transform the request to a read backward * and try again. */ tape_std_read_backward(device, request); return tape_34xx_erp_retry(request); } /* * We tried to read forward and backward, but hat no * success -> failed. */ return tape_34xx_erp_failed(request, -EIO); } static int tape_34xx_erp_bug(struct tape_device *device, struct tape_request *request, struct irb *irb, int no) { if (request->op != TO_ASSIGN) { dev_err(&device->cdev->dev, "An unexpected condition %d " "occurred in tape error recovery\n", no); tape_dump_sense_dbf(device, request, irb); } return tape_34xx_erp_failed(request, -EIO); } /* * Handle data overrun between cu and drive. The channel speed might * be too slow. */ static int tape_34xx_erp_overrun(struct tape_device *device, struct tape_request *request, struct irb *irb) { if (irb->ecw[3] == 0x40) { dev_warn (&device->cdev->dev, "A data overrun occurred between" " the control unit and tape unit\n"); return tape_34xx_erp_failed(request, -EIO); } return tape_34xx_erp_bug(device, request, irb, -1); } /* * Handle record sequence error. */ static int tape_34xx_erp_sequence(struct tape_device *device, struct tape_request *request, struct irb *irb) { if (irb->ecw[3] == 0x41) { /* * cu detected incorrect block-id sequence on tape. */ dev_warn (&device->cdev->dev, "The block ID sequence on the " "tape is incorrect\n"); return tape_34xx_erp_failed(request, -EIO); } /* * Record sequence error bit is set, but erpa does not * show record sequence error. */ return tape_34xx_erp_bug(device, request, irb, -2); } /* * This function analyses the tape's sense-data in case of a unit-check. * If possible, it tries to recover from the error. Else the user is * informed about the problem. */ static int tape_34xx_unit_check(struct tape_device *device, struct tape_request *request, struct irb *irb) { int inhibit_cu_recovery; __u8* sense; inhibit_cu_recovery = (*device->modeset_byte & 0x80) ? 1 : 0; sense = irb->ecw; #ifdef CONFIG_S390_TAPE_BLOCK if (request->op == TO_BLOCK) { /* * Recovery for block device requests. Set the block_position * to something invalid and retry. */ device->blk_data.block_position = -1; if (request->retries-- <= 0) return tape_34xx_erp_failed(request, -EIO); else return tape_34xx_erp_retry(request); } #endif if ( sense[0] & SENSE_COMMAND_REJECT && sense[1] & SENSE_WRITE_PROTECT ) { if ( request->op == TO_DSE || request->op == TO_WRI || request->op == TO_WTM ) { /* medium is write protected */ return tape_34xx_erp_failed(request, -EACCES); } else { return tape_34xx_erp_bug(device, request, irb, -3); } } /* * Special cases for various tape-states when reaching * end of recorded area * * FIXME: Maybe a special case of the special case: * sense[0] == SENSE_EQUIPMENT_CHECK && * sense[1] == SENSE_DRIVE_ONLINE && * sense[3] == 0x47 (Volume Fenced) * * This was caused by continued FSF or FSR after an * 'End Of Data'. */ if (( sense[0] == SENSE_DATA_CHECK || sense[0] == SENSE_EQUIPMENT_CHECK || sense[0] == SENSE_EQUIPMENT_CHECK + SENSE_DEFERRED_UNIT_CHECK ) && ( sense[1] == SENSE_DRIVE_ONLINE || sense[1] == SENSE_BEGINNING_OF_TAPE + SENSE_WRITE_MODE )) { switch (request->op) { /* * sense[0] == SENSE_DATA_CHECK && * sense[1] == SENSE_DRIVE_ONLINE * sense[3] == 0x36 (End Of Data) * * Further seeks might return a 'Volume Fenced'. */ case TO_FSF: case TO_FSB: /* Trying to seek beyond end of recorded area */ return tape_34xx_erp_failed(request, -ENOSPC); case TO_BSB: return tape_34xx_erp_retry(request); /* * sense[0] == SENSE_DATA_CHECK && * sense[1] == SENSE_DRIVE_ONLINE && * sense[3] == 0x36 (End Of Data) */ case TO_LBL: /* Block could not be located. */ tape_34xx_delete_sbid_from(device, 0); return tape_34xx_erp_failed(request, -EIO); case TO_RFO: /* Read beyond end of recorded area -> 0 bytes read */ return tape_34xx_erp_failed(request, 0); /* * sense[0] == SENSE_EQUIPMENT_CHECK && * sense[1] == SENSE_DRIVE_ONLINE && * sense[3] == 0x38 (Physical End Of Volume) */ case TO_WRI: /* Writing at physical end of volume */ return tape_34xx_erp_failed(request, -ENOSPC); default: return tape_34xx_erp_failed(request, 0); } } /* Sensing special bits */ if (sense[0] & SENSE_BUS_OUT_CHECK) return tape_34xx_erp_retry(request); if (sense[0] & SENSE_DATA_CHECK) { /* * hardware failure, damaged tape or improper * operating conditions */ switch (sense[3]) { case 0x23: /* a read data check occurred */ if ((sense[2] & SENSE_TAPE_SYNC_MODE) || inhibit_cu_recovery) // data check is not permanent, may be // recovered. We always use async-mode with // cu-recovery, so this should *never* happen. return tape_34xx_erp_bug(device, request, irb, -4); /* data check is permanent, CU recovery has failed */ dev_warn (&device->cdev->dev, "A read error occurred " "that cannot be recovered\n"); return tape_34xx_erp_failed(request, -EIO); case 0x25: // a write data check occurred if ((sense[2] & SENSE_TAPE_SYNC_MODE) || inhibit_cu_recovery) // data check is not permanent, may be // recovered. We always use async-mode with // cu-recovery, so this should *never* happen. return tape_34xx_erp_bug(device, request, irb, -5); // data check is permanent, cu-recovery has failed dev_warn (&device->cdev->dev, "A write error on the " "tape cannot be recovered\n"); return tape_34xx_erp_failed(request, -EIO); case 0x26: /* Data Check (read opposite) occurred. */ return tape_34xx_erp_read_opposite(device, request); case 0x28: /* ID-Mark at tape start couldn't be written */ dev_warn (&device->cdev->dev, "Writing the ID-mark " "failed\n"); return tape_34xx_erp_failed(request, -EIO); case 0x31: /* Tape void. Tried to read beyond end of device. */ dev_warn (&device->cdev->dev, "Reading the tape beyond" " the end of the recorded area failed\n"); return tape_34xx_erp_failed(request, -ENOSPC); case 0x41: /* Record sequence error. */ dev_warn (&device->cdev->dev, "The tape contains an " "incorrect block ID sequence\n"); return tape_34xx_erp_failed(request, -EIO); default: /* all data checks for 3480 should result in one of * the above erpa-codes. For 3490, other data-check * conditions do exist. */ if (device->cdev->id.driver_info == tape_3480) return tape_34xx_erp_bug(device, request, irb, -6); } } if (sense[0] & SENSE_OVERRUN) return tape_34xx_erp_overrun(device, request, irb); if (sense[1] & SENSE_RECORD_SEQUENCE_ERR) return tape_34xx_erp_sequence(device, request, irb); /* Sensing erpa codes */ switch (sense[3]) { case 0x00: /* Unit check with erpa code 0. Report and ignore. */ return TAPE_IO_SUCCESS; case 0x21: /* * Data streaming not operational. CU will switch to * interlock mode. Reissue the command. */ return tape_34xx_erp_retry(request); case 0x22: /* * Path equipment check. Might be drive adapter error, buffer * error on the lower interface, internal path not usable, * or error during cartridge load. */ dev_warn (&device->cdev->dev, "A path equipment check occurred" " for the tape device\n"); return tape_34xx_erp_failed(request, -EIO); case 0x24: /* * Load display check. Load display was command was issued, * but the drive is displaying a drive check message. Can * be threated as "device end". */ return tape_34xx_erp_succeeded(request); case 0x27: /* * Command reject. May indicate illegal channel program or * buffer over/underrun. Since all channel programs are * issued by this driver and ought be correct, we assume a * over/underrun situation and retry the channel program. */ return tape_34xx_erp_retry(request); case 0x29: /* * Function incompatible. Either the tape is idrc compressed * but the hardware isn't capable to do idrc, or a perform * subsystem func is issued and the CU is not on-line. */ return tape_34xx_erp_failed(request, -EIO); case 0x2a: /* * Unsolicited environmental data. An internal counter * overflows, we can ignore this and reissue the cmd. */ return tape_34xx_erp_retry(request); case 0x2b: /* * Environmental data present. Indicates either unload * completed ok or read buffered log command completed ok. */ if (request->op == TO_RUN) { /* Rewind unload completed ok. */ tape_med_state_set(device, MS_UNLOADED); return tape_34xx_erp_succeeded(request); } /* tape_34xx doesn't use read buffered log commands. */ return tape_34xx_erp_bug(device, request, irb, sense[3]); case 0x2c: /* * Permanent equipment check. CU has tried recovery, but * did not succeed. */ return tape_34xx_erp_failed(request, -EIO); case 0x2d: /* Data security erase failure. */ if (request->op == TO_DSE) return tape_34xx_erp_failed(request, -EIO); /* Data security erase failure, but no such command issued. */ return tape_34xx_erp_bug(device, request, irb, sense[3]); case 0x2e: /* * Not capable. This indicates either that the drive fails * reading the format id mark or that that format specified * is not supported by the drive. */ dev_warn (&device->cdev->dev, "The tape unit cannot process " "the tape format\n"); return tape_34xx_erp_failed(request, -EMEDIUMTYPE); case 0x30: /* The medium is write protected. */ dev_warn (&device->cdev->dev, "The tape medium is write-" "protected\n"); return tape_34xx_erp_failed(request, -EACCES); case 0x32: // Tension loss. We cannot recover this, it's an I/O error. dev_warn (&device->cdev->dev, "The tape does not have the " "required tape tension\n"); return tape_34xx_erp_failed(request, -EIO); case 0x33: /* * Load Failure. The cartridge was not inserted correctly or * the tape is not threaded correctly. */ dev_warn (&device->cdev->dev, "The tape unit failed to load" " the cartridge\n"); tape_34xx_delete_sbid_from(device, 0); return tape_34xx_erp_failed(request, -EIO); case 0x34: /* * Unload failure. The drive cannot maintain tape tension * and control tape movement during an unload operation. */ dev_warn (&device->cdev->dev, "Automatic unloading of the tape" " cartridge failed\n"); if (request->op == TO_RUN) return tape_34xx_erp_failed(request, -EIO); return tape_34xx_erp_bug(device, request, irb, sense[3]); case 0x35: /* * Drive equipment check. One of the following: * - cu cannot recover from a drive detected error * - a check code message is shown on drive display * - the cartridge loader does not respond correctly * - a failure occurs during an index, load, or unload cycle */ dev_warn (&device->cdev->dev, "An equipment check has occurred" " on the tape unit\n"); return tape_34xx_erp_failed(request, -EIO); case 0x36: if (device->cdev->id.driver_info == tape_3490) /* End of data. */ return tape_34xx_erp_failed(request, -EIO); /* This erpa is reserved for 3480 */ return tape_34xx_erp_bug(device, request, irb, sense[3]); case 0x37: /* * Tape length error. The tape is shorter than reported in * the beginning-of-tape data. */ dev_warn (&device->cdev->dev, "The tape information states an" " incorrect length\n"); return tape_34xx_erp_failed(request, -EIO); case 0x38: /* * Physical end of tape. A read/write operation reached * the physical end of tape. */ if (request->op==TO_WRI || request->op==TO_DSE || request->op==TO_WTM) return tape_34xx_erp_failed(request, -ENOSPC); return tape_34xx_erp_failed(request, -EIO); case 0x39: /* Backward at Beginning of tape. */ return tape_34xx_erp_failed(request, -EIO); case 0x3a: /* Drive switched to not ready. */ dev_warn (&device->cdev->dev, "The tape unit is not ready\n"); return tape_34xx_erp_failed(request, -EIO); case 0x3b: /* Manual rewind or unload. This causes an I/O error. */ dev_warn (&device->cdev->dev, "The tape medium has been " "rewound or unloaded manually\n"); tape_34xx_delete_sbid_from(device, 0); return tape_34xx_erp_failed(request, -EIO); case 0x42: /* * Degraded mode. A condition that can cause degraded * performance is detected. */ dev_warn (&device->cdev->dev, "The tape subsystem is running " "in degraded mode\n"); return tape_34xx_erp_retry(request); case 0x43: /* Drive not ready. */ tape_34xx_delete_sbid_from(device, 0); tape_med_state_set(device, MS_UNLOADED); /* Some commands commands are successful even in this case */ if (sense[1] & SENSE_DRIVE_ONLINE) { switch(request->op) { case TO_ASSIGN: case TO_UNASSIGN: case TO_DIS: case TO_NOP: return tape_34xx_done(request); break; default: break; } } return tape_34xx_erp_failed(request, -ENOMEDIUM); case 0x44: /* Locate Block unsuccessful. */ if (request->op != TO_BLOCK && request->op != TO_LBL) /* No locate block was issued. */ return tape_34xx_erp_bug(device, request, irb, sense[3]); return tape_34xx_erp_failed(request, -EIO); case 0x45: /* The drive is assigned to a different channel path. */ dev_warn (&device->cdev->dev, "The tape unit is already " "assigned\n"); return tape_34xx_erp_failed(request, -EIO); case 0x46: /* * Drive not on-line. Drive may be switched offline, * the power supply may be switched off or * the drive address may not be set correctly. */ dev_warn (&device->cdev->dev, "The tape unit is not online\n"); return tape_34xx_erp_failed(request, -EIO); case 0x47: /* Volume fenced. CU reports volume integrity is lost. */ dev_warn (&device->cdev->dev, "The control unit has fenced " "access to the tape volume\n"); tape_34xx_delete_sbid_from(device, 0); return tape_34xx_erp_failed(request, -EIO); case 0x48: /* Log sense data and retry request. */ return tape_34xx_erp_retry(request); case 0x49: /* Bus out check. A parity check error on the bus was found. */ dev_warn (&device->cdev->dev, "A parity error occurred on the " "tape bus\n"); return tape_34xx_erp_failed(request, -EIO); case 0x4a: /* Control unit erp failed. */ dev_warn (&device->cdev->dev, "I/O error recovery failed on " "the tape control unit\n"); return tape_34xx_erp_failed(request, -EIO); case 0x4b: /* * CU and drive incompatible. The drive requests micro-program * patches, which are not available on the CU. */ dev_warn (&device->cdev->dev, "The tape unit requires a " "firmware update\n"); return tape_34xx_erp_failed(request, -EIO); case 0x4c: /* * Recovered Check-One failure. Cu develops a hardware error, * but is able to recover. */ return tape_34xx_erp_retry(request); case 0x4d: if (device->cdev->id.driver_info == tape_3490) /* * Resetting event received. Since the driver does * not support resetting event recovery (which has to * be handled by the I/O Layer), retry our command. */ return tape_34xx_erp_retry(request); /* This erpa is reserved for 3480. */ return tape_34xx_erp_bug(device, request, irb, sense[3]); case 0x4e: if (device->cdev->id.driver_info == tape_3490) { /* * Maximum block size exceeded. This indicates, that * the block to be written is larger than allowed for * buffered mode. */ dev_warn (&device->cdev->dev, "The maximum block size" " for buffered mode is exceeded\n"); return tape_34xx_erp_failed(request, -ENOBUFS); } /* This erpa is reserved for 3480. */ return tape_34xx_erp_bug(device, request, irb, sense[3]); case 0x50: /* * Read buffered log (Overflow). CU is running in extended * buffered log mode, and a counter overflows. This should * never happen, since we're never running in extended * buffered log mode. */ return tape_34xx_erp_retry(request); case 0x51: /* * Read buffered log (EOV). EOF processing occurs while the * CU is in extended buffered log mode. This should never * happen, since we're never running in extended buffered * log mode. */ return tape_34xx_erp_retry(request); case 0x52: /* End of Volume complete. Rewind unload completed ok. */ if (request->op == TO_RUN) { tape_med_state_set(device, MS_UNLOADED); tape_34xx_delete_sbid_from(device, 0); return tape_34xx_erp_succeeded(request); } return tape_34xx_erp_bug(device, request, irb, sense[3]); case 0x53: /* Global command intercept. */ return tape_34xx_erp_retry(request); case 0x54: /* Channel interface recovery (temporary). */ return tape_34xx_erp_retry(request); case 0x55: /* Channel interface recovery (permanent). */ dev_warn (&device->cdev->dev, "A channel interface error cannot be" " recovered\n"); return tape_34xx_erp_failed(request, -EIO); case 0x56: /* Channel protocol error. */ dev_warn (&device->cdev->dev, "A channel protocol error " "occurred\n"); return tape_34xx_erp_failed(request, -EIO); case 0x57: if (device->cdev->id.driver_info == tape_3480) { /* Attention intercept. */ return tape_34xx_erp_retry(request); } else { /* Global status intercept. */ return tape_34xx_erp_retry(request); } case 0x5a: /* * Tape length incompatible. The tape inserted is too long, * which could cause damage to the tape or the drive. */ dev_warn (&device->cdev->dev, "The tape unit does not support " "the tape length\n"); return tape_34xx_erp_failed(request, -EIO); case 0x5b: /* Format 3480 XF incompatible */ if (sense[1] & SENSE_BEGINNING_OF_TAPE) /* The tape will get overwritten. */ return tape_34xx_erp_retry(request); dev_warn (&device->cdev->dev, "The tape unit does not support" " format 3480 XF\n"); return tape_34xx_erp_failed(request, -EIO); case 0x5c: /* Format 3480-2 XF incompatible */ dev_warn (&device->cdev->dev, "The tape unit does not support tape " "format 3480-2 XF\n"); return tape_34xx_erp_failed(request, -EIO); case 0x5d: /* Tape length violation. */ dev_warn (&device->cdev->dev, "The tape unit does not support" " the current tape length\n"); return tape_34xx_erp_failed(request, -EMEDIUMTYPE); case 0x5e: /* Compaction algorithm incompatible. */ dev_warn (&device->cdev->dev, "The tape unit does not support" " the compaction algorithm\n"); return tape_34xx_erp_failed(request, -EMEDIUMTYPE); /* The following erpas should have been covered earlier. */ case 0x23: /* Read data check. */ case 0x25: /* Write data check. */ case 0x26: /* Data check (read opposite). */ case 0x28: /* Write id mark check. */ case 0x31: /* Tape void. */ case 0x40: /* Overrun error. */ case 0x41: /* Record sequence error. */ /* All other erpas are reserved for future use. */ default: return tape_34xx_erp_bug(device, request, irb, sense[3]); } } /* * 3480/3490 interrupt handler */ static int tape_34xx_irq(struct tape_device *device, struct tape_request *request, struct irb *irb) { if (request == NULL) return tape_34xx_unsolicited_irq(device, irb); if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) && (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) && (request->op == TO_WRI)) { /* Write at end of volume */ return tape_34xx_erp_failed(request, -ENOSPC); } if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) return tape_34xx_unit_check(device, request, irb); if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) { /* * A unit exception occurs on skipping over a tapemark block. */ if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) { if (request->op == TO_BSB || request->op == TO_FSB) request->rescnt++; else DBF_EVENT(5, "Unit Exception!\n"); } return tape_34xx_done(request); } DBF_EVENT(6, "xunknownirq\n"); tape_dump_sense_dbf(device, request, irb); return TAPE_IO_STOP; } /* * ioctl_overload */ static int tape_34xx_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg) { if (cmd == TAPE390_DISPLAY) { struct display_struct disp; if (copy_from_user(&disp, (char __user *) arg, sizeof(disp)) != 0) return -EFAULT; return tape_std_display(device, &disp); } else return -EINVAL; } static inline void tape_34xx_append_new_sbid(struct tape_34xx_block_id bid, struct list_head *l) { struct tape_34xx_sbid * new_sbid; new_sbid = kmalloc(sizeof(*new_sbid), GFP_ATOMIC); if (!new_sbid) return; new_sbid->bid = bid; list_add(&new_sbid->list, l); } /* * Build up the search block ID list. The block ID consists of a logical * block number and a hardware specific part. The hardware specific part * helps the tape drive to speed up searching for a specific block. */ static void tape_34xx_add_sbid(struct tape_device *device, struct tape_34xx_block_id bid) { struct list_head * sbid_list; struct tape_34xx_sbid * sbid; struct list_head * l; /* * immediately return if there is no list at all or the block to add * is located in segment 1 of wrap 0 because this position is used * if no hardware position data is supplied. */ sbid_list = (struct list_head *) device->discdata; if (!sbid_list || (bid.segment < 2 && bid.wrap == 0)) return; /* * Search the position where to insert the new entry. Hardware * acceleration uses only the segment and wrap number. So we * need only one entry for a specific wrap/segment combination. * If there is a block with a lower number but the same hard- * ware position data we just update the block number in the * existing entry. */ list_for_each(l, sbid_list) { sbid = list_entry(l, struct tape_34xx_sbid, list); if ( (sbid->bid.segment == bid.segment) && (sbid->bid.wrap == bid.wrap) ) { if (bid.block < sbid->bid.block) sbid->bid = bid; else return; break; } /* Sort in according to logical block number. */ if (bid.block < sbid->bid.block) { tape_34xx_append_new_sbid(bid, l->prev); break; } } /* List empty or new block bigger than last entry. */ if (l == sbid_list) tape_34xx_append_new_sbid(bid, l->prev); DBF_LH(4, "Current list is:\n"); list_for_each(l, sbid_list) { sbid = list_entry(l, struct tape_34xx_sbid, list); DBF_LH(4, "%d:%03d@%05d\n", sbid->bid.wrap, sbid->bid.segment, sbid->bid.block ); } } /* * Delete all entries from the search block ID list that belong to tape blocks * equal or higher than the given number. */ static void tape_34xx_delete_sbid_from(struct tape_device *device, int from) { struct list_head * sbid_list; struct tape_34xx_sbid * sbid; struct list_head * l; struct list_head * n; sbid_list = (struct list_head *) device->discdata; if (!sbid_list) return; list_for_each_safe(l, n, sbid_list) { sbid = list_entry(l, struct tape_34xx_sbid, list); if (sbid->bid.block >= from) { DBF_LH(4, "Delete sbid %d:%03d@%05d\n", sbid->bid.wrap, sbid->bid.segment, sbid->bid.block ); list_del(l); kfree(sbid); } } } /* * Merge hardware position data into a block id. */ static void tape_34xx_merge_sbid( struct tape_device * device, struct tape_34xx_block_id * bid ) { struct tape_34xx_sbid * sbid; struct tape_34xx_sbid * sbid_to_use; struct list_head * sbid_list; struct list_head * l; sbid_list = (struct list_head *) device->discdata; bid->wrap = 0; bid->segment = 1; if (!sbid_list || list_empty(sbid_list)) return; sbid_to_use = NULL; list_for_each(l, sbid_list) { sbid = list_entry(l, struct tape_34xx_sbid, list); if (sbid->bid.block >= bid->block) break; sbid_to_use = sbid; } if (sbid_to_use) { bid->wrap = sbid_to_use->bid.wrap; bid->segment = sbid_to_use->bid.segment; DBF_LH(4, "Use %d:%03d@%05d for %05d\n", sbid_to_use->bid.wrap, sbid_to_use->bid.segment, sbid_to_use->bid.block, bid->block ); } } static int tape_34xx_setup_device(struct tape_device * device) { int rc; struct list_head * discdata; DBF_EVENT(6, "34xx device setup\n"); if ((rc = tape_std_assign(device)) == 0) { if ((rc = tape_34xx_medium_sense(device)) != 0) { DBF_LH(3, "34xx medium sense returned %d\n", rc); } } discdata = kmalloc(sizeof(struct list_head), GFP_KERNEL); if (discdata) { INIT_LIST_HEAD(discdata); device->discdata = discdata; } return rc; } static void tape_34xx_cleanup_device(struct tape_device *device) { tape_std_unassign(device); if (device->discdata) { tape_34xx_delete_sbid_from(device, 0); kfree(device->discdata); device->discdata = NULL; } } /* * MTTELL: Tell block. Return the number of block relative to current file. */ static int tape_34xx_mttell(struct tape_device *device, int mt_count) { struct { struct tape_34xx_block_id cbid; struct tape_34xx_block_id dbid; } __attribute__ ((packed)) block_id; int rc; rc = tape_std_read_block_id(device, (__u64 *) &block_id); if (rc) return rc; tape_34xx_add_sbid(device, block_id.cbid); return block_id.cbid.block; } /* * MTSEEK: seek to the specified block. */ static int tape_34xx_mtseek(struct tape_device *device, int mt_count) { struct tape_request *request; struct tape_34xx_block_id * bid; if (mt_count > 0x3fffff) { DBF_EXCEPTION(6, "xsee parm\n"); return -EINVAL; } request = tape_alloc_request(3, 4); if (IS_ERR(request)) return PTR_ERR(request); /* setup ccws */ request->op = TO_LBL; bid = (struct tape_34xx_block_id *) request->cpdata; bid->format = (*device->modeset_byte & 0x08) ? TAPE34XX_FMT_3480_XF : TAPE34XX_FMT_3480; bid->block = mt_count; tape_34xx_merge_sbid(device, bid); tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata); tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); /* execute it */ return tape_do_io_free(device, request); } #ifdef CONFIG_S390_TAPE_BLOCK /* * Tape block read for 34xx. */ static struct tape_request * tape_34xx_bread(struct tape_device *device, struct request *req) { struct tape_request *request; struct ccw1 *ccw; int count = 0; unsigned off; char *dst; struct bio_vec *bv; struct req_iterator iter; struct tape_34xx_block_id * start_block; DBF_EVENT(6, "xBREDid:"); /* Count the number of blocks for the request. */ rq_for_each_segment(bv, req, iter) count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9); /* Allocate the ccw request. */ request = tape_alloc_request(3+count+1, 8); if (IS_ERR(request)) return request; /* Setup ccws. */ request->op = TO_BLOCK; start_block = (struct tape_34xx_block_id *) request->cpdata; start_block->block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B; DBF_EVENT(6, "start_block = %i\n", start_block->block); ccw = request->cpaddr; ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte); /* * We always setup a nop after the mode set ccw. This slot is * used in tape_std_check_locate to insert a locate ccw if the * current tape position doesn't match the start block to be read. * The second nop will be filled with a read block id which is in * turn used by tape_34xx_free_bread to populate the segment bid * table. */ ccw = tape_ccw_cc(ccw, NOP, 0, NULL); ccw = tape_ccw_cc(ccw, NOP, 0, NULL); rq_for_each_segment(bv, req, iter) { dst = kmap(bv->bv_page) + bv->bv_offset; for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) { ccw->flags = CCW_FLAG_CC; ccw->cmd_code = READ_FORWARD; ccw->count = TAPEBLOCK_HSEC_SIZE; set_normalized_cda(ccw, (void*) __pa(dst)); ccw++; dst += TAPEBLOCK_HSEC_SIZE; } } ccw = tape_ccw_end(ccw, NOP, 0, NULL); DBF_EVENT(6, "xBREDccwg\n"); return request; } static void tape_34xx_free_bread (struct tape_request *request) { struct ccw1* ccw; ccw = request->cpaddr; if ((ccw + 2)->cmd_code == READ_BLOCK_ID) { struct { struct tape_34xx_block_id cbid; struct tape_34xx_block_id dbid; } __attribute__ ((packed)) *rbi_data; rbi_data = request->cpdata; if (request->device) tape_34xx_add_sbid(request->device, rbi_data->cbid); } /* Last ccw is a nop and doesn't need clear_normalized_cda */ for (; ccw->flags & CCW_FLAG_CC; ccw++) if (ccw->cmd_code == READ_FORWARD) clear_normalized_cda(ccw); tape_free_request(request); } /* * check_locate is called just before the tape request is passed to * the common io layer for execution. It has to check the current * tape position and insert a locate ccw if it doesn't match the * start block for the request. */ static void tape_34xx_check_locate(struct tape_device *device, struct tape_request *request) { struct tape_34xx_block_id * start_block; start_block = (struct tape_34xx_block_id *) request->cpdata; if (start_block->block == device->blk_data.block_position) return; DBF_LH(4, "Block seek(%06d+%06d)\n", start_block->block, device->bof); start_block->wrap = 0; start_block->segment = 1; start_block->format = (*device->modeset_byte & 0x08) ? TAPE34XX_FMT_3480_XF : TAPE34XX_FMT_3480; start_block->block = start_block->block + device->bof; tape_34xx_merge_sbid(device, start_block); tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata); tape_ccw_cc(request->cpaddr + 2, READ_BLOCK_ID, 8, request->cpdata); } #endif /* * List of 3480/3490 magnetic tape commands. */ static tape_mtop_fn tape_34xx_mtop[TAPE_NR_MTOPS] = { [MTRESET] = tape_std_mtreset, [MTFSF] = tape_std_mtfsf, [MTBSF] = tape_std_mtbsf, [MTFSR] = tape_std_mtfsr, [MTBSR] = tape_std_mtbsr, [MTWEOF] = tape_std_mtweof, [MTREW] = tape_std_mtrew, [MTOFFL] = tape_std_mtoffl, [MTNOP] = tape_std_mtnop, [MTRETEN] = tape_std_mtreten, [MTBSFM] = tape_std_mtbsfm, [MTFSFM] = tape_std_mtfsfm, [MTEOM] = tape_std_mteom, [MTERASE] = tape_std_mterase, [MTRAS1] = NULL, [MTRAS2] = NULL, [MTRAS3] = NULL, [MTSETBLK] = tape_std_mtsetblk, [MTSETDENSITY] = NULL, [MTSEEK] = tape_34xx_mtseek, [MTTELL] = tape_34xx_mttell, [MTSETDRVBUFFER] = NULL, [MTFSS] = NULL, [MTBSS] = NULL, [MTWSM] = NULL, [MTLOCK] = NULL, [MTUNLOCK] = NULL, [MTLOAD] = tape_std_mtload, [MTUNLOAD] = tape_std_mtunload, [MTCOMPRESSION] = tape_std_mtcompression, [MTSETPART] = NULL, [MTMKPART] = NULL }; /* * Tape discipline structure for 3480 and 3490. */ static struct tape_discipline tape_discipline_34xx = { .owner = THIS_MODULE, .setup_device = tape_34xx_setup_device, .cleanup_device = tape_34xx_cleanup_device, .process_eov = tape_std_process_eov, .irq = tape_34xx_irq, .read_block = tape_std_read_block, .write_block = tape_std_write_block, #ifdef CONFIG_S390_TAPE_BLOCK .bread = tape_34xx_bread, .free_bread = tape_34xx_free_bread, .check_locate = tape_34xx_check_locate, #endif .ioctl_fn = tape_34xx_ioctl, .mtop_array = tape_34xx_mtop }; static struct ccw_device_id tape_34xx_ids[] = { { CCW_DEVICE_DEVTYPE(0x3480, 0, 0x3480, 0), .driver_info = tape_3480}, { CCW_DEVICE_DEVTYPE(0x3490, 0, 0x3490, 0), .driver_info = tape_3490}, { /* end of list */ }, }; static int tape_34xx_online(struct ccw_device *cdev) { return tape_generic_online( dev_get_drvdata(&cdev->dev), &tape_discipline_34xx ); } static struct ccw_driver tape_34xx_driver = { .name = "tape_34xx", .owner = THIS_MODULE, .ids = tape_34xx_ids, .probe = tape_generic_probe, .remove = tape_generic_remove, .set_online = tape_34xx_online, .set_offline = tape_generic_offline, .freeze = tape_generic_pm_suspend, }; static int tape_34xx_init (void) { int rc; TAPE_DBF_AREA = debug_register ( "tape_34xx", 2, 2, 4*sizeof(long)); debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view); #ifdef DBF_LIKE_HELL debug_set_level(TAPE_DBF_AREA, 6); #endif DBF_EVENT(3, "34xx init\n"); /* Register driver for 3480/3490 tapes. */ rc = ccw_driver_register(&tape_34xx_driver); if (rc) DBF_EVENT(3, "34xx init failed\n"); else DBF_EVENT(3, "34xx registered\n"); return rc; } static void tape_34xx_exit(void) { ccw_driver_unregister(&tape_34xx_driver); debug_unregister(TAPE_DBF_AREA); } MODULE_DEVICE_TABLE(ccw, tape_34xx_ids); MODULE_AUTHOR("(C) 2001-2002 IBM Deutschland Entwicklung GmbH"); MODULE_DESCRIPTION("Linux on zSeries channel attached 3480 tape device driver"); MODULE_LICENSE("GPL"); module_init(tape_34xx_init); module_exit(tape_34xx_exit);
gpl-2.0
nasser-embedded/linux
drivers/usb/gadget/composite.c
43
36208
/* * composite.c - infrastructure for Composite USB Gadgets * * Copyright (C) 2006-2008 David Brownell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* #define VERBOSE_DEBUG */ #include <linux/kallsyms.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/usb/composite.h> /* * The code in this file is utility code, used to build a gadget driver * from one or more "function" drivers, one or more "configuration" * objects, and a "usb_composite_driver" by gluing them together along * with the relevant device-wide data. */ /* big enough to hold our biggest descriptor */ #define USB_BUFSIZ 1024 static struct usb_composite_driver *composite; /* Some systems will need runtime overrides for the product identifers * published in the device descriptor, either numbers or strings or both. * String parameters are in UTF-8 (superset of ASCII's 7 bit characters). */ static ushort idVendor; module_param(idVendor, ushort, 0); MODULE_PARM_DESC(idVendor, "USB Vendor ID"); static ushort idProduct; module_param(idProduct, ushort, 0); MODULE_PARM_DESC(idProduct, "USB Product ID"); static ushort bcdDevice; module_param(bcdDevice, ushort, 0); MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)"); static char *iManufacturer; module_param(iManufacturer, charp, 0); MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string"); static char *iProduct; module_param(iProduct, charp, 0); MODULE_PARM_DESC(iProduct, "USB Product string"); static char *iSerialNumber; module_param(iSerialNumber, charp, 0); MODULE_PARM_DESC(iSerialNumber, "SerialNumber string"); /*-------------------------------------------------------------------------*/ static ssize_t enable_show(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_function *f = dev_get_drvdata(dev); return sprintf(buf, "%d\n", !f->disabled); } static ssize_t enable_store( struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct usb_function *f = dev_get_drvdata(dev); struct usb_composite_driver *driver = f->config->cdev->driver; int value; sscanf(buf, "%d", &value); if (driver->enable_function) driver->enable_function(f, value); else usb_function_set_enabled(f, value); return size; } static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, enable_show, enable_store); void usb_function_set_enabled(struct usb_function *f, int enabled) { f->disabled = !enabled; kobject_uevent(&f->dev->kobj, KOBJ_CHANGE); } void usb_composite_force_reset(struct usb_composite_dev *cdev) { unsigned long flags; spin_lock_irqsave(&cdev->lock, flags); /* force reenumeration */ if (cdev && cdev->gadget && cdev->gadget->speed != USB_SPEED_UNKNOWN) { /* avoid sending a disconnect switch event until after we disconnect */ cdev->mute_switch = 1; spin_unlock_irqrestore(&cdev->lock, flags); usb_gadget_disconnect(cdev->gadget); msleep(10); usb_gadget_connect(cdev->gadget); } else { spin_unlock_irqrestore(&cdev->lock, flags); } } /** * usb_add_function() - add a function to a configuration * @config: the configuration * @function: the function being added * Context: single threaded during gadget setup * * After initialization, each configuration must have one or more * functions added to it. Adding a function involves calling its @bind() * method to allocate resources such as interface and string identifiers * and endpoints. * * This function returns the value of the function's bind(), which is * zero for success else a negative errno value. */ int usb_add_function(struct usb_configuration *config, struct usb_function *function) { struct usb_composite_dev *cdev = config->cdev; int value = -EINVAL; int index; DBG(cdev, "adding '%s'/%p to config '%s'/%p\n", function->name, function, config->label, config); if (!function->set_alt || !function->disable) goto done; index = atomic_inc_return(&cdev->driver->function_count); function->dev = device_create(cdev->driver->class, NULL, MKDEV(0, index), NULL, function->name); if (IS_ERR(function->dev)) return PTR_ERR(function->dev); value = device_create_file(function->dev, &dev_attr_enable); if (value < 0) { device_destroy(cdev->driver->class, MKDEV(0, index)); return value; } dev_set_drvdata(function->dev, function); function->config = config; list_add_tail(&function->list, &config->functions); /* REVISIT *require* function->bind? */ if (function->bind) { value = function->bind(config, function); if (value < 0) { list_del(&function->list); function->config = NULL; } } else value = 0; /* We allow configurations that don't work at both speeds. * If we run into a lowspeed Linux system, treat it the same * as full speed ... it's the function drivers that will need * to avoid bulk and ISO transfers. */ if (!config->fullspeed && function->descriptors) config->fullspeed = true; if (!config->highspeed && function->hs_descriptors) config->highspeed = true; done: if (value) DBG(cdev, "adding '%s'/%p --> %d\n", function->name, function, value); return value; } /** * usb_function_deactivate - prevent function and gadget enumeration * @function: the function that isn't yet ready to respond * * Blocks response of the gadget driver to host enumeration by * preventing the data line pullup from being activated. This is * normally called during @bind() processing to change from the * initial "ready to respond" state, or when a required resource * becomes available. * * For example, drivers that serve as a passthrough to a userspace * daemon can block enumeration unless that daemon (such as an OBEX, * MTP, or print server) is ready to handle host requests. * * Not all systems support software control of their USB peripheral * data pullups. * * Returns zero on success, else negative errno. */ int usb_function_deactivate(struct usb_function *function) { struct usb_composite_dev *cdev = function->config->cdev; unsigned long flags; int status = 0; spin_lock_irqsave(&cdev->lock, flags); if (cdev->deactivations == 0) status = usb_gadget_disconnect(cdev->gadget); if (status == 0) cdev->deactivations++; spin_unlock_irqrestore(&cdev->lock, flags); return status; } /** * usb_function_activate - allow function and gadget enumeration * @function: function on which usb_function_activate() was called * * Reverses effect of usb_function_deactivate(). If no more functions * are delaying their activation, the gadget driver will respond to * host enumeration procedures. * * Returns zero on success, else negative errno. */ int usb_function_activate(struct usb_function *function) { struct usb_composite_dev *cdev = function->config->cdev; int status = 0; spin_lock(&cdev->lock); if (WARN_ON(cdev->deactivations == 0)) status = -EINVAL; else { cdev->deactivations--; if (cdev->deactivations == 0) status = usb_gadget_connect(cdev->gadget); } spin_unlock(&cdev->lock); return status; } /** * usb_interface_id() - allocate an unused interface ID * @config: configuration associated with the interface * @function: function handling the interface * Context: single threaded during gadget setup * * usb_interface_id() is called from usb_function.bind() callbacks to * allocate new interface IDs. The function driver will then store that * ID in interface, association, CDC union, and other descriptors. It * will also handle any control requests targetted at that interface, * particularly changing its altsetting via set_alt(). There may * also be class-specific or vendor-specific requests to handle. * * All interface identifier should be allocated using this routine, to * ensure that for example different functions don't wrongly assign * different meanings to the same identifier. Note that since interface * identifers are configuration-specific, functions used in more than * one configuration (or more than once in a given configuration) need * multiple versions of the relevant descriptors. * * Returns the interface ID which was allocated; or -ENODEV if no * more interface IDs can be allocated. */ int usb_interface_id(struct usb_configuration *config, struct usb_function *function) { unsigned id = config->next_interface_id; if (id < MAX_CONFIG_INTERFACES) { config->interface[id] = function; config->next_interface_id = id + 1; return id; } return -ENODEV; } static int config_buf(struct usb_configuration *config, enum usb_device_speed speed, void *buf, u8 type) { struct usb_config_descriptor *c = buf; struct usb_interface_descriptor *intf; void *next = buf + USB_DT_CONFIG_SIZE; int len = USB_BUFSIZ - USB_DT_CONFIG_SIZE; struct usb_function *f; int status; int interfaceCount = 0; u8 *dest; /* write the config descriptor */ c = buf; c->bLength = USB_DT_CONFIG_SIZE; c->bDescriptorType = type; /* wTotalLength and bNumInterfaces are written later */ c->bConfigurationValue = config->bConfigurationValue; c->iConfiguration = config->iConfiguration; c->bmAttributes = USB_CONFIG_ATT_ONE | config->bmAttributes; c->bMaxPower = config->bMaxPower ? : (CONFIG_USB_GADGET_VBUS_DRAW / 2); /* There may be e.g. OTG descriptors */ if (config->descriptors) { status = usb_descriptor_fillbuf(next, len, config->descriptors); if (status < 0) return status; len -= status; next += status; } /* add each function's descriptors */ list_for_each_entry(f, &config->functions, list) { struct usb_descriptor_header **descriptors; struct usb_descriptor_header *descriptor; if (speed == USB_SPEED_HIGH) descriptors = f->hs_descriptors; else descriptors = f->descriptors; if (f->disabled || !descriptors || descriptors[0] == NULL) continue; status = usb_descriptor_fillbuf(next, len, (const struct usb_descriptor_header **) descriptors); if (status < 0) return status; /* set interface numbers dynamically */ dest = next; while ((descriptor = *descriptors++) != NULL) { intf = (struct usb_interface_descriptor *)dest; if (intf->bDescriptorType == USB_DT_INTERFACE) { /* don't increment bInterfaceNumber for alternate settings */ if (intf->bAlternateSetting == 0) intf->bInterfaceNumber = interfaceCount++; else intf->bInterfaceNumber = interfaceCount - 1; } dest += intf->bLength; } len -= status; next += status; } len = next - buf; c->wTotalLength = cpu_to_le16(len); c->bNumInterfaces = interfaceCount; return len; } static int config_desc(struct usb_composite_dev *cdev, unsigned w_value) { struct usb_gadget *gadget = cdev->gadget; struct usb_configuration *c; u8 type = w_value >> 8; enum usb_device_speed speed = USB_SPEED_UNKNOWN; if (gadget_is_dualspeed(gadget)) { int hs = 0; if (gadget->speed == USB_SPEED_HIGH) hs = 1; if (type == USB_DT_OTHER_SPEED_CONFIG) hs = !hs; if (hs) speed = USB_SPEED_HIGH; } /* This is a lookup by config *INDEX* */ w_value &= 0xff; list_for_each_entry(c, &cdev->configs, list) { /* ignore configs that won't work at this speed */ if (speed == USB_SPEED_HIGH) { if (!c->highspeed) continue; } else { if (!c->fullspeed) continue; } if (w_value == 0) return config_buf(c, speed, cdev->req->buf, type); w_value--; } return -EINVAL; } static int count_configs(struct usb_composite_dev *cdev, unsigned type) { struct usb_gadget *gadget = cdev->gadget; struct usb_configuration *c; unsigned count = 0; int hs = 0; if (gadget_is_dualspeed(gadget)) { if (gadget->speed == USB_SPEED_HIGH) hs = 1; if (type == USB_DT_DEVICE_QUALIFIER) hs = !hs; } list_for_each_entry(c, &cdev->configs, list) { /* ignore configs that won't work at this speed */ if (hs) { if (!c->highspeed) continue; } else { if (!c->fullspeed) continue; } count++; } return count; } static void device_qual(struct usb_composite_dev *cdev) { struct usb_qualifier_descriptor *qual = cdev->req->buf; qual->bLength = sizeof(*qual); qual->bDescriptorType = USB_DT_DEVICE_QUALIFIER; /* POLICY: same bcdUSB and device type info at both speeds */ qual->bcdUSB = cdev->desc.bcdUSB; qual->bDeviceClass = cdev->desc.bDeviceClass; qual->bDeviceSubClass = cdev->desc.bDeviceSubClass; qual->bDeviceProtocol = cdev->desc.bDeviceProtocol; /* ASSUME same EP0 fifo size at both speeds */ qual->bMaxPacketSize0 = cdev->desc.bMaxPacketSize0; qual->bNumConfigurations = count_configs(cdev, USB_DT_DEVICE_QUALIFIER); qual->bRESERVED = 0; } /*-------------------------------------------------------------------------*/ static void reset_config(struct usb_composite_dev *cdev) { struct usb_function *f; DBG(cdev, "reset config\n"); list_for_each_entry(f, &cdev->config->functions, list) { if (f->disable) f->disable(f); bitmap_zero(f->endpoints, 32); } cdev->config = NULL; } static int set_config(struct usb_composite_dev *cdev, const struct usb_ctrlrequest *ctrl, unsigned number) { struct usb_gadget *gadget = cdev->gadget; struct usb_configuration *c = NULL; int result = -EINVAL; unsigned power = gadget_is_otg(gadget) ? 8 : 100; int tmp; if (cdev->config) reset_config(cdev); if (number) { list_for_each_entry(c, &cdev->configs, list) { if (c->bConfigurationValue == number) { result = 0; break; } } if (result < 0) goto done; } else result = 0; INFO(cdev, "%s speed config #%d: %s\n", ({ char *speed; switch (gadget->speed) { case USB_SPEED_LOW: speed = "low"; break; case USB_SPEED_FULL: speed = "full"; break; case USB_SPEED_HIGH: speed = "high"; break; default: speed = "?"; break; } ; speed; }), number, c ? c->label : "unconfigured"); if (!c) goto done; cdev->config = c; /* Initialize all interfaces by setting them to altsetting zero. */ for (tmp = 0; tmp < MAX_CONFIG_INTERFACES; tmp++) { struct usb_function *f = c->interface[tmp]; struct usb_descriptor_header **descriptors; if (!f) break; if (f->disabled) continue; /* * Record which endpoints are used by the function. This is used * to dispatch control requests targeted at that endpoint to the * function's setup callback instead of the current * configuration's setup callback. */ if (gadget->speed == USB_SPEED_HIGH) descriptors = f->hs_descriptors; else descriptors = f->descriptors; for (; *descriptors; ++descriptors) { struct usb_endpoint_descriptor *ep; int addr; if ((*descriptors)->bDescriptorType != USB_DT_ENDPOINT) continue; ep = (struct usb_endpoint_descriptor *)*descriptors; addr = ((ep->bEndpointAddress & 0x80) >> 3) | (ep->bEndpointAddress & 0x0f); set_bit(addr, f->endpoints); } result = f->set_alt(f, tmp, 0); if (result < 0) { DBG(cdev, "interface %d (%s/%p) alt 0 --> %d\n", tmp, f->name, f, result); reset_config(cdev); goto done; } } /* when we return, be sure our power usage is valid */ power = c->bMaxPower ? (2 * c->bMaxPower) : CONFIG_USB_GADGET_VBUS_DRAW; done: usb_gadget_vbus_draw(gadget, power); schedule_work(&cdev->switch_work); return result; } /** * usb_add_config() - add a configuration to a device. * @cdev: wraps the USB gadget * @config: the configuration, with bConfigurationValue assigned * Context: single threaded during gadget setup * * One of the main tasks of a composite driver's bind() routine is to * add each of the configurations it supports, using this routine. * * This function returns the value of the configuration's bind(), which * is zero for success else a negative errno value. Binding configurations * assigns global resources including string IDs, and per-configuration * resources such as interface IDs and endpoints. */ int usb_add_config(struct usb_composite_dev *cdev, struct usb_configuration *config) { int status = -EINVAL; struct usb_configuration *c; DBG(cdev, "adding config #%u '%s'/%p\n", config->bConfigurationValue, config->label, config); if (!config->bConfigurationValue || !config->bind) goto done; /* Prevent duplicate configuration identifiers */ list_for_each_entry(c, &cdev->configs, list) { if (c->bConfigurationValue == config->bConfigurationValue) { status = -EBUSY; goto done; } } config->cdev = cdev; list_add_tail(&config->list, &cdev->configs); INIT_LIST_HEAD(&config->functions); config->next_interface_id = 0; status = config->bind(config); if (status < 0) { list_del(&config->list); config->cdev = NULL; } else { unsigned i; DBG(cdev, "cfg %d/%p speeds:%s%s\n", config->bConfigurationValue, config, config->highspeed ? " high" : "", config->fullspeed ? (gadget_is_dualspeed(cdev->gadget) ? " full" : " full/low") : ""); for (i = 0; i < MAX_CONFIG_INTERFACES; i++) { struct usb_function *f = config->interface[i]; if (!f) continue; DBG(cdev, " interface %d = %s/%p\n", i, f->name, f); } } /* set_alt(), or next config->bind(), sets up * ep->driver_data as needed. */ usb_ep_autoconfig_reset(cdev->gadget); done: if (status) DBG(cdev, "added config '%s'/%u --> %d\n", config->label, config->bConfigurationValue, status); return status; } /*-------------------------------------------------------------------------*/ /* We support strings in multiple languages ... string descriptor zero * says which languages are supported. The typical case will be that * only one language (probably English) is used, with I18N handled on * the host side. */ static void collect_langs(struct usb_gadget_strings **sp, __le16 *buf) { const struct usb_gadget_strings *s; u16 language; __le16 *tmp; while (*sp) { s = *sp; language = cpu_to_le16(s->language); for (tmp = buf; *tmp && tmp < &buf[126]; tmp++) { if (*tmp == language) goto repeat; } *tmp++ = language; repeat: sp++; } } static int lookup_string( struct usb_gadget_strings **sp, void *buf, u16 language, int id ) { struct usb_gadget_strings *s; int value; while (*sp) { s = *sp++; if (s->language != language) continue; value = usb_gadget_get_string(s, id, buf); if (value > 0) return value; } return -EINVAL; } static int get_string(struct usb_composite_dev *cdev, void *buf, u16 language, int id) { struct usb_configuration *c; struct usb_function *f; int len; /* Yes, not only is USB's I18N support probably more than most * folk will ever care about ... also, it's all supported here. * (Except for UTF8 support for Unicode's "Astral Planes".) */ /* 0 == report all available language codes */ if (id == 0) { struct usb_string_descriptor *s = buf; struct usb_gadget_strings **sp; memset(s, 0, 256); s->bDescriptorType = USB_DT_STRING; sp = composite->strings; if (sp) collect_langs(sp, s->wData); list_for_each_entry(c, &cdev->configs, list) { sp = c->strings; if (sp) collect_langs(sp, s->wData); list_for_each_entry(f, &c->functions, list) { sp = f->strings; if (sp) collect_langs(sp, s->wData); } } for (len = 0; len <= 126 && s->wData[len]; len++) continue; if (!len) return -EINVAL; s->bLength = 2 * (len + 1); return s->bLength; } /* Otherwise, look up and return a specified string. String IDs * are device-scoped, so we look up each string table we're told * about. These lookups are infrequent; simpler-is-better here. */ if (composite->strings) { len = lookup_string(composite->strings, buf, language, id); if (len > 0) return len; } list_for_each_entry(c, &cdev->configs, list) { if (c->strings) { len = lookup_string(c->strings, buf, language, id); if (len > 0) return len; } list_for_each_entry(f, &c->functions, list) { if (!f->strings) continue; len = lookup_string(f->strings, buf, language, id); if (len > 0) return len; } } return -EINVAL; } /** * usb_string_id() - allocate an unused string ID * @cdev: the device whose string descriptor IDs are being allocated * Context: single threaded during gadget setup * * @usb_string_id() is called from bind() callbacks to allocate * string IDs. Drivers for functions, configurations, or gadgets will * then store that ID in the appropriate descriptors and string table. * * All string identifier should be allocated using this routine, to * ensure that for example different functions don't wrongly assign * different meanings to the same identifier. */ int usb_string_id(struct usb_composite_dev *cdev) { if (cdev->next_string_id < 254) { /* string id 0 is reserved */ cdev->next_string_id++; return cdev->next_string_id; } return -ENODEV; } /*-------------------------------------------------------------------------*/ static void composite_setup_complete(struct usb_ep *ep, struct usb_request *req) { if (req->status || req->actual != req->length) DBG((struct usb_composite_dev *) ep->driver_data, "setup complete --> %d, %d/%d\n", req->status, req->actual, req->length); } /* * The setup() callback implements all the ep0 functionality that's * not handled lower down, in hardware or the hardware driver(like * device and endpoint feature flags, and their status). It's all * housekeeping for the gadget function we're implementing. Most of * the work is in config and function specific setup. */ static int composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) { struct usb_composite_dev *cdev = get_gadget_data(gadget); struct usb_request *req = cdev->req; int value = -EOPNOTSUPP; u16 w_index = le16_to_cpu(ctrl->wIndex); u8 intf = w_index & 0xFF; u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); struct usb_function *f = NULL; u8 endp; /* partial re-init of the response message; the function or the * gadget might need to intercept e.g. a control-OUT completion * when we delegate to it. */ req->zero = 0; req->complete = composite_setup_complete; req->length = USB_BUFSIZ; gadget->ep0->driver_data = cdev; switch (ctrl->bRequest) { /* we handle all standard USB descriptors */ case USB_REQ_GET_DESCRIPTOR: if (ctrl->bRequestType != USB_DIR_IN) goto unknown; switch (w_value >> 8) { case USB_DT_DEVICE: cdev->desc.bNumConfigurations = count_configs(cdev, USB_DT_DEVICE); value = min(w_length, (u16) sizeof cdev->desc); memcpy(req->buf, &cdev->desc, value); break; case USB_DT_DEVICE_QUALIFIER: if (!gadget_is_dualspeed(gadget)) break; device_qual(cdev); value = min_t(int, w_length, sizeof(struct usb_qualifier_descriptor)); break; case USB_DT_OTHER_SPEED_CONFIG: if (!gadget_is_dualspeed(gadget)) break; /* FALLTHROUGH */ case USB_DT_CONFIG: value = config_desc(cdev, w_value); if (value >= 0) value = min(w_length, (u16) value); break; case USB_DT_STRING: value = get_string(cdev, req->buf, w_index, w_value & 0xff); /* Allow functions to handle USB_DT_STRING. * This is required for MTP. */ if (value < 0) { struct usb_configuration *cfg; list_for_each_entry(cfg, &cdev->configs, list) { if (cfg && cfg->setup) { value = cfg->setup(cfg, ctrl); if (value >= 0) break; } } } if (value >= 0) value = min(w_length, (u16) value); break; } break; /* any number of configs can work */ case USB_REQ_SET_CONFIGURATION: if (ctrl->bRequestType != 0) goto unknown; if (gadget_is_otg(gadget)) { if (gadget->a_hnp_support) DBG(cdev, "HNP available\n"); else if (gadget->a_alt_hnp_support) DBG(cdev, "HNP on another port\n"); else VDBG(cdev, "HNP inactive\n"); } spin_lock(&cdev->lock); value = set_config(cdev, ctrl, w_value); spin_unlock(&cdev->lock); break; case USB_REQ_GET_CONFIGURATION: if (ctrl->bRequestType != USB_DIR_IN) goto unknown; if (cdev->config) { *(u8 *)req->buf = cdev->config->bConfigurationValue; value = min(w_length, (u16) 1); } else *(u8 *)req->buf = 0; break; /* function drivers must handle get/set altsetting; if there's * no get() method, we know only altsetting zero works. */ case USB_REQ_SET_INTERFACE: if (ctrl->bRequestType != USB_RECIP_INTERFACE) goto unknown; if (!cdev->config || w_index >= MAX_CONFIG_INTERFACES) break; f = cdev->config->interface[intf]; if (!f) break; if (w_value && !f->set_alt) break; value = f->set_alt(f, w_index, w_value); break; case USB_REQ_GET_INTERFACE: if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE)) goto unknown; if (!cdev->config || w_index >= MAX_CONFIG_INTERFACES) break; f = cdev->config->interface[intf]; if (!f) break; /* lots of interfaces only need altsetting zero... */ value = f->get_alt ? f->get_alt(f, w_index) : 0; if (value < 0) break; *((u8 *)req->buf) = value; value = min(w_length, (u16) 1); break; default: unknown: VDBG(cdev, "non-core control req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); /* functions always handle their interfaces and endpoints... * punt other recipients (other, WUSB, ...) to the current * configuration code. * * REVISIT it could make sense to let the composite device * take such requests too, if that's ever needed: to work * in config 0, etc. */ switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_INTERFACE: if (cdev->config == NULL) return value; f = cdev->config->interface[intf]; break; case USB_RECIP_ENDPOINT: endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f); list_for_each_entry(f, &cdev->config->functions, list) { if (test_bit(endp, f->endpoints)) break; } if (&f->list == &cdev->config->functions) f = NULL; break; } if (f && f->setup) value = f->setup(f, ctrl); else { struct usb_configuration *c; c = cdev->config; if (c && c->setup) value = c->setup(c, ctrl); } /* If the vendor request is not processed (value < 0), * call all device registered configure setup callbacks * to process it. * This is used to handle the following cases: * - vendor request is for the device and arrives before * setconfiguration. * - Some devices are required to handle vendor request before * setconfiguration such as MTP, USBNET. */ if (value < 0) { struct usb_configuration *cfg; list_for_each_entry(cfg, &cdev->configs, list) { if (cfg && cfg->setup) value = cfg->setup(cfg, ctrl); } } goto done; } /* respond with data transfer before status phase? */ if (value >= 0) { req->length = value; req->zero = value < w_length; value = usb_ep_queue(gadget->ep0, req, GFP_ATOMIC); if (value < 0) { DBG(cdev, "ep_queue --> %d\n", value); req->status = 0; composite_setup_complete(gadget->ep0, req); } } done: /* device either stalls (value < 0) or reports success */ return value; } static void composite_disconnect(struct usb_gadget *gadget) { struct usb_composite_dev *cdev = get_gadget_data(gadget); unsigned long flags; /* REVISIT: should we have config and device level * disconnect callbacks? */ spin_lock_irqsave(&cdev->lock, flags); if (cdev->config) reset_config(cdev); if (cdev->mute_switch) cdev->mute_switch = 0; else schedule_work(&cdev->switch_work); spin_unlock_irqrestore(&cdev->lock, flags); } /*-------------------------------------------------------------------------*/ static ssize_t composite_show_suspended(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_gadget *gadget = dev_to_usb_gadget(dev); struct usb_composite_dev *cdev = get_gadget_data(gadget); return sprintf(buf, "%d\n", cdev->suspended); } static DEVICE_ATTR(suspended, 0444, composite_show_suspended, NULL); static void composite_unbind(struct usb_gadget *gadget) { struct usb_composite_dev *cdev = get_gadget_data(gadget); /* composite_disconnect() must already have been called * by the underlying peripheral controller driver! * so there's no i/o concurrency that could affect the * state protected by cdev->lock. */ WARN_ON(cdev->config); while (!list_empty(&cdev->configs)) { struct usb_configuration *c; c = list_first_entry(&cdev->configs, struct usb_configuration, list); while (!list_empty(&c->functions)) { struct usb_function *f; f = list_first_entry(&c->functions, struct usb_function, list); list_del(&f->list); if (f->unbind) { DBG(cdev, "unbind function '%s'/%p\n", f->name, f); f->unbind(c, f); /* may free memory for "f" */ } } list_del(&c->list); if (c->unbind) { DBG(cdev, "unbind config '%s'/%p\n", c->label, c); c->unbind(c); /* may free memory for "c" */ } } if (composite->unbind) composite->unbind(cdev); if (cdev->req) { kfree(cdev->req->buf); usb_ep_free_request(gadget->ep0, cdev->req); } switch_dev_unregister(&cdev->sdev); kfree(cdev); set_gadget_data(gadget, NULL); device_remove_file(&gadget->dev, &dev_attr_suspended); composite = NULL; } static void string_override_one(struct usb_gadget_strings *tab, u8 id, const char *s) { struct usb_string *str = tab->strings; for (str = tab->strings; str->s; str++) { if (str->id == id) { str->s = s; return; } } } static void string_override(struct usb_gadget_strings **tab, u8 id, const char *s) { while (*tab) { string_override_one(*tab, id, s); tab++; } } static void composite_switch_work(struct work_struct *data) { struct usb_composite_dev *cdev = container_of(data, struct usb_composite_dev, switch_work); struct usb_configuration *config = cdev->config; if (config) switch_set_state(&cdev->sdev, config->bConfigurationValue); else switch_set_state(&cdev->sdev, 0); } static int composite_bind(struct usb_gadget *gadget) { struct usb_composite_dev *cdev; int status = -ENOMEM; cdev = kzalloc(sizeof *cdev, GFP_KERNEL); if (!cdev) return status; spin_lock_init(&cdev->lock); cdev->gadget = gadget; set_gadget_data(gadget, cdev); INIT_LIST_HEAD(&cdev->configs); /* preallocate control response and buffer */ cdev->req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL); if (!cdev->req) goto fail; cdev->req->buf = kmalloc(USB_BUFSIZ, GFP_KERNEL); if (!cdev->req->buf) goto fail; cdev->req->complete = composite_setup_complete; gadget->ep0->driver_data = cdev; cdev->bufsiz = USB_BUFSIZ; cdev->driver = composite; usb_gadget_set_selfpowered(gadget); /* interface and string IDs start at zero via kzalloc. * we force endpoints to start unassigned; few controller * drivers will zero ep->driver_data. */ usb_ep_autoconfig_reset(cdev->gadget); /* standardized runtime overrides for device ID data */ if (idVendor) cdev->desc.idVendor = cpu_to_le16(idVendor); if (idProduct) cdev->desc.idProduct = cpu_to_le16(idProduct); if (bcdDevice) cdev->desc.bcdDevice = cpu_to_le16(bcdDevice); /* composite gadget needs to assign strings for whole device (like * serial number), register function drivers, potentially update * power state and consumption, etc */ status = composite->bind(cdev); if (status < 0) goto fail; cdev->sdev.name = "usb_configuration"; status = switch_dev_register(&cdev->sdev); if (status < 0) goto fail; INIT_WORK(&cdev->switch_work, composite_switch_work); cdev->desc = *composite->dev; cdev->desc.bMaxPacketSize0 = gadget->ep0->maxpacket; /* strings can't be assigned before bind() allocates the * releavnt identifiers */ if (cdev->desc.iManufacturer && iManufacturer) string_override(composite->strings, cdev->desc.iManufacturer, iManufacturer); if (cdev->desc.iProduct && iProduct) string_override(composite->strings, cdev->desc.iProduct, iProduct); if (cdev->desc.iSerialNumber && iSerialNumber) string_override(composite->strings, cdev->desc.iSerialNumber, iSerialNumber); status = device_create_file(&gadget->dev, &dev_attr_suspended); if (status) goto fail; INFO(cdev, "%s ready\n", composite->name); return 0; fail: composite_unbind(gadget); return status; } /*-------------------------------------------------------------------------*/ static void composite_suspend(struct usb_gadget *gadget) { struct usb_composite_dev *cdev = get_gadget_data(gadget); struct usb_function *f; /* REVISIT: should we have config level * suspend/resume callbacks? */ DBG(cdev, "suspend\n"); if (cdev->config) { list_for_each_entry(f, &cdev->config->functions, list) { if (f->suspend) f->suspend(f); } } if (composite->suspend) composite->suspend(cdev); cdev->suspended = 1; } static void composite_resume(struct usb_gadget *gadget) { struct usb_composite_dev *cdev = get_gadget_data(gadget); struct usb_function *f; /* REVISIT: should we have config level * suspend/resume callbacks? */ DBG(cdev, "resume\n"); if (composite->resume) composite->resume(cdev); if (cdev->config) { list_for_each_entry(f, &cdev->config->functions, list) { if (f->resume) f->resume(f); } } cdev->suspended = 0; } static int composite_uevent(struct device *dev, struct kobj_uevent_env *env) { struct usb_function *f = dev_get_drvdata(dev); if (!f) { /* this happens when the device is first created */ return 0; } if (add_uevent_var(env, "FUNCTION=%s", f->name)) return -ENOMEM; if (add_uevent_var(env, "ENABLED=%d", !f->disabled)) return -ENOMEM; return 0; } /*-------------------------------------------------------------------------*/ static struct usb_gadget_driver composite_driver = { .speed = USB_SPEED_HIGH, .bind = composite_bind, .unbind = composite_unbind, .setup = composite_setup, .disconnect = composite_disconnect, .suspend = composite_suspend, .resume = composite_resume, .driver = { .owner = THIS_MODULE, }, }; /** * usb_composite_register() - register a composite driver * @driver: the driver to register * Context: single threaded during gadget setup * * This function is used to register drivers using the composite driver * framework. The return value is zero, or a negative errno value. * Those values normally come from the driver's @bind method, which does * all the work of setting up the driver to match the hardware. * * On successful return, the gadget is ready to respond to requests from * the host, unless one of its components invokes usb_gadget_disconnect() * while it was binding. That would usually be done in order to wait for * some userspace participation. */ int usb_composite_register(struct usb_composite_driver *driver) { if (!driver || !driver->dev || !driver->bind || composite) return -EINVAL; if (!driver->name) driver->name = "composite"; composite_driver.function = (char *) driver->name; composite_driver.driver.name = driver->name; composite = driver; driver->class = class_create(THIS_MODULE, "usb_composite"); if (IS_ERR(driver->class)) return PTR_ERR(driver->class); driver->class->dev_uevent = composite_uevent; return usb_gadget_register_driver(&composite_driver); } /** * usb_composite_unregister() - unregister a composite driver * @driver: the driver to unregister * * This function is used to unregister drivers using the composite * driver framework. */ void usb_composite_unregister(struct usb_composite_driver *driver) { if (composite != driver) return; usb_gadget_unregister_driver(&composite_driver); }
gpl-2.0
rudischilder/gr10_1
sw/airborne/modules/meteo/humid_dpicco.c
43
2136
/* * Copyright (C) 2005-2012 The Paparazzi Team * * This file is part of paparazzi. * * paparazzi is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * paparazzi is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with paparazzi; see the file COPYING. If not, write to * the Free Software Foundation, 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * */ /** * @file modules/meteo/humid_dpicco.c * @brief DigiPicco I2C sensor interface * * This reads the values for humidity and temperature from the IST DigiPicco sensor through I2C. */ #include "modules/meteo/humid_dpicco.h" #include "mcu_periph/i2c.h" #include "led.h" #include "mcu_periph/uart.h" #include "messages.h" #include "subsystems/datalink/downlink.h" #ifndef DPICCO_I2C_DEV #define DPICCO_I2C_DEV i2c0 #endif #define DPICCO_SLAVE_ADDR 0xF0 uint16_t dpicco_val[2]; float dpicco_humid; float dpicco_temp; struct i2c_transaction dpicco_trans; void dpicco_init(void) { dpicco_trans.status = I2CTransDone; } void dpicco_periodic(void) { /* init read */ i2c_receive(&DPICCO_I2C_DEV, &dpicco_trans, DPICCO_SLAVE_ADDR, 4); } void dpicco_event(void) { if (dpicco_trans.status == I2CTransSuccess) { //LED_TOGGLE(2); dpicco_val[0] = (dpicco_trans.buf[0] << 8) | dpicco_trans.buf[1]; dpicco_val[1] = (dpicco_trans.buf[2] << 8) | dpicco_trans.buf[3]; dpicco_humid = (dpicco_val[0] * DPICCO_HUMID_RANGE) / DPICCO_HUMID_MAX; dpicco_temp = ((dpicco_val[1] * DPICCO_TEMP_RANGE) / DPICCO_TEMP_MAX) + DPICCO_TEMP_OFFS; DOWNLINK_SEND_DPICCO_STATUS(DefaultChannel, DefaultDevice, &dpicco_val[0], &dpicco_val[1], &dpicco_humid, &dpicco_temp); dpicco_trans.status = I2CTransDone; } }
gpl-2.0
hallor/linux
drivers/input/misc/drv260x.c
555
20632
/* * DRV260X haptics driver family * * Author: Dan Murphy <dmurphy@ti.com> * * Copyright: (C) 2014 Texas Instruments, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/i2c.h> #include <linux/input.h> #include <linux/module.h> #include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/regulator/consumer.h> #include <dt-bindings/input/ti-drv260x.h> #include <linux/platform_data/drv260x-pdata.h> #define DRV260X_STATUS 0x0 #define DRV260X_MODE 0x1 #define DRV260X_RT_PB_IN 0x2 #define DRV260X_LIB_SEL 0x3 #define DRV260X_WV_SEQ_1 0x4 #define DRV260X_WV_SEQ_2 0x5 #define DRV260X_WV_SEQ_3 0x6 #define DRV260X_WV_SEQ_4 0x7 #define DRV260X_WV_SEQ_5 0x8 #define DRV260X_WV_SEQ_6 0x9 #define DRV260X_WV_SEQ_7 0xa #define DRV260X_WV_SEQ_8 0xb #define DRV260X_GO 0xc #define DRV260X_OVERDRIVE_OFF 0xd #define DRV260X_SUSTAIN_P_OFF 0xe #define DRV260X_SUSTAIN_N_OFF 0xf #define DRV260X_BRAKE_OFF 0x10 #define DRV260X_A_TO_V_CTRL 0x11 #define DRV260X_A_TO_V_MIN_INPUT 0x12 #define DRV260X_A_TO_V_MAX_INPUT 0x13 #define DRV260X_A_TO_V_MIN_OUT 0x14 #define DRV260X_A_TO_V_MAX_OUT 0x15 #define DRV260X_RATED_VOLT 0x16 #define DRV260X_OD_CLAMP_VOLT 0x17 #define DRV260X_CAL_COMP 0x18 #define DRV260X_CAL_BACK_EMF 0x19 #define DRV260X_FEEDBACK_CTRL 0x1a #define DRV260X_CTRL1 0x1b #define DRV260X_CTRL2 0x1c #define DRV260X_CTRL3 0x1d #define DRV260X_CTRL4 0x1e #define DRV260X_CTRL5 0x1f #define DRV260X_LRA_LOOP_PERIOD 0x20 #define DRV260X_VBAT_MON 0x21 #define DRV260X_LRA_RES_PERIOD 0x22 #define DRV260X_MAX_REG 0x23 #define DRV260X_GO_BIT 0x01 /* Library Selection */ #define DRV260X_LIB_SEL_MASK 0x07 #define DRV260X_LIB_SEL_RAM 0x0 #define DRV260X_LIB_SEL_OD 0x1 #define DRV260X_LIB_SEL_40_60 0x2 #define DRV260X_LIB_SEL_60_80 0x3 #define DRV260X_LIB_SEL_100_140 0x4 #define DRV260X_LIB_SEL_140_PLUS 0x5 #define DRV260X_LIB_SEL_HIZ_MASK 0x10 #define DRV260X_LIB_SEL_HIZ_EN 0x01 #define DRV260X_LIB_SEL_HIZ_DIS 0 /* Mode register */ #define DRV260X_STANDBY (1 << 6) #define DRV260X_STANDBY_MASK 0x40 #define DRV260X_INTERNAL_TRIGGER 0x00 #define DRV260X_EXT_TRIGGER_EDGE 0x01 #define DRV260X_EXT_TRIGGER_LEVEL 0x02 #define DRV260X_PWM_ANALOG_IN 0x03 #define DRV260X_AUDIOHAPTIC 0x04 #define DRV260X_RT_PLAYBACK 0x05 #define DRV260X_DIAGNOSTICS 0x06 #define DRV260X_AUTO_CAL 0x07 /* Audio to Haptics Control */ #define DRV260X_AUDIO_HAPTICS_PEAK_10MS (0 << 2) #define DRV260X_AUDIO_HAPTICS_PEAK_20MS (1 << 2) #define DRV260X_AUDIO_HAPTICS_PEAK_30MS (2 << 2) #define DRV260X_AUDIO_HAPTICS_PEAK_40MS (3 << 2) #define DRV260X_AUDIO_HAPTICS_FILTER_100HZ 0x00 #define DRV260X_AUDIO_HAPTICS_FILTER_125HZ 0x01 #define DRV260X_AUDIO_HAPTICS_FILTER_150HZ 0x02 #define DRV260X_AUDIO_HAPTICS_FILTER_200HZ 0x03 /* Min/Max Input/Output Voltages */ #define DRV260X_AUDIO_HAPTICS_MIN_IN_VOLT 0x19 #define DRV260X_AUDIO_HAPTICS_MAX_IN_VOLT 0x64 #define DRV260X_AUDIO_HAPTICS_MIN_OUT_VOLT 0x19 #define DRV260X_AUDIO_HAPTICS_MAX_OUT_VOLT 0xFF /* Feedback register */ #define DRV260X_FB_REG_ERM_MODE 0x7f #define DRV260X_FB_REG_LRA_MODE (1 << 7) #define DRV260X_BRAKE_FACTOR_MASK 0x1f #define DRV260X_BRAKE_FACTOR_2X (1 << 0) #define DRV260X_BRAKE_FACTOR_3X (2 << 4) #define DRV260X_BRAKE_FACTOR_4X (3 << 4) #define DRV260X_BRAKE_FACTOR_6X (4 << 4) #define DRV260X_BRAKE_FACTOR_8X (5 << 4) #define DRV260X_BRAKE_FACTOR_16 (6 << 4) #define DRV260X_BRAKE_FACTOR_DIS (7 << 4) #define DRV260X_LOOP_GAIN_LOW 0xf3 #define DRV260X_LOOP_GAIN_MED (1 << 2) #define DRV260X_LOOP_GAIN_HIGH (2 << 2) #define DRV260X_LOOP_GAIN_VERY_HIGH (3 << 2) #define DRV260X_BEMF_GAIN_0 0xfc #define DRV260X_BEMF_GAIN_1 (1 << 0) #define DRV260X_BEMF_GAIN_2 (2 << 0) #define DRV260X_BEMF_GAIN_3 (3 << 0) /* Control 1 register */ #define DRV260X_AC_CPLE_EN (1 << 5) #define DRV260X_STARTUP_BOOST (1 << 7) /* Control 2 register */ #define DRV260X_IDISS_TIME_45 0 #define DRV260X_IDISS_TIME_75 (1 << 0) #define DRV260X_IDISS_TIME_150 (1 << 1) #define DRV260X_IDISS_TIME_225 0x03 #define DRV260X_BLANK_TIME_45 (0 << 2) #define DRV260X_BLANK_TIME_75 (1 << 2) #define DRV260X_BLANK_TIME_150 (2 << 2) #define DRV260X_BLANK_TIME_225 (3 << 2) #define DRV260X_SAMP_TIME_150 (0 << 4) #define DRV260X_SAMP_TIME_200 (1 << 4) #define DRV260X_SAMP_TIME_250 (2 << 4) #define DRV260X_SAMP_TIME_300 (3 << 4) #define DRV260X_BRAKE_STABILIZER (1 << 6) #define DRV260X_UNIDIR_IN (0 << 7) #define DRV260X_BIDIR_IN (1 << 7) /* Control 3 Register */ #define DRV260X_LRA_OPEN_LOOP (1 << 0) #define DRV260X_ANANLOG_IN (1 << 1) #define DRV260X_LRA_DRV_MODE (1 << 2) #define DRV260X_RTP_UNSIGNED_DATA (1 << 3) #define DRV260X_SUPPLY_COMP_DIS (1 << 4) #define DRV260X_ERM_OPEN_LOOP (1 << 5) #define DRV260X_NG_THRESH_0 (0 << 6) #define DRV260X_NG_THRESH_2 (1 << 6) #define DRV260X_NG_THRESH_4 (2 << 6) #define DRV260X_NG_THRESH_8 (3 << 6) /* Control 4 Register */ #define DRV260X_AUTOCAL_TIME_150MS (0 << 4) #define DRV260X_AUTOCAL_TIME_250MS (1 << 4) #define DRV260X_AUTOCAL_TIME_500MS (2 << 4) #define DRV260X_AUTOCAL_TIME_1000MS (3 << 4) /** * struct drv260x_data - * @input_dev - Pointer to the input device * @client - Pointer to the I2C client * @regmap - Register map of the device * @work - Work item used to off load the enable/disable of the vibration * @enable_gpio - Pointer to the gpio used for enable/disabling * @regulator - Pointer to the regulator for the IC * @magnitude - Magnitude of the vibration event * @mode - The operating mode of the IC (LRA_NO_CAL, ERM or LRA) * @library - The vibration library to be used * @rated_voltage - The rated_voltage of the actuator * @overdriver_voltage - The over drive voltage of the actuator **/ struct drv260x_data { struct input_dev *input_dev; struct i2c_client *client; struct regmap *regmap; struct work_struct work; struct gpio_desc *enable_gpio; struct regulator *regulator; u32 magnitude; u32 mode; u32 library; int rated_voltage; int overdrive_voltage; }; static const struct reg_default drv260x_reg_defs[] = { { DRV260X_STATUS, 0xe0 }, { DRV260X_MODE, 0x40 }, { DRV260X_RT_PB_IN, 0x00 }, { DRV260X_LIB_SEL, 0x00 }, { DRV260X_WV_SEQ_1, 0x01 }, { DRV260X_WV_SEQ_2, 0x00 }, { DRV260X_WV_SEQ_3, 0x00 }, { DRV260X_WV_SEQ_4, 0x00 }, { DRV260X_WV_SEQ_5, 0x00 }, { DRV260X_WV_SEQ_6, 0x00 }, { DRV260X_WV_SEQ_7, 0x00 }, { DRV260X_WV_SEQ_8, 0x00 }, { DRV260X_GO, 0x00 }, { DRV260X_OVERDRIVE_OFF, 0x00 }, { DRV260X_SUSTAIN_P_OFF, 0x00 }, { DRV260X_SUSTAIN_N_OFF, 0x00 }, { DRV260X_BRAKE_OFF, 0x00 }, { DRV260X_A_TO_V_CTRL, 0x05 }, { DRV260X_A_TO_V_MIN_INPUT, 0x19 }, { DRV260X_A_TO_V_MAX_INPUT, 0xff }, { DRV260X_A_TO_V_MIN_OUT, 0x19 }, { DRV260X_A_TO_V_MAX_OUT, 0xff }, { DRV260X_RATED_VOLT, 0x3e }, { DRV260X_OD_CLAMP_VOLT, 0x8c }, { DRV260X_CAL_COMP, 0x0c }, { DRV260X_CAL_BACK_EMF, 0x6c }, { DRV260X_FEEDBACK_CTRL, 0x36 }, { DRV260X_CTRL1, 0x93 }, { DRV260X_CTRL2, 0xfa }, { DRV260X_CTRL3, 0xa0 }, { DRV260X_CTRL4, 0x20 }, { DRV260X_CTRL5, 0x80 }, { DRV260X_LRA_LOOP_PERIOD, 0x33 }, { DRV260X_VBAT_MON, 0x00 }, { DRV260X_LRA_RES_PERIOD, 0x00 }, }; #define DRV260X_DEF_RATED_VOLT 0x90 #define DRV260X_DEF_OD_CLAMP_VOLT 0x90 /** * Rated and Overdriver Voltages: * Calculated using the formula r = v * 255 / 5.6 * where r is what will be written to the register * and v is the rated or overdriver voltage of the actuator **/ static int drv260x_calculate_voltage(unsigned int voltage) { return (voltage * 255 / 5600); } static void drv260x_worker(struct work_struct *work) { struct drv260x_data *haptics = container_of(work, struct drv260x_data, work); int error; gpiod_set_value(haptics->enable_gpio, 1); /* Data sheet says to wait 250us before trying to communicate */ udelay(250); error = regmap_write(haptics->regmap, DRV260X_MODE, DRV260X_RT_PLAYBACK); if (error) { dev_err(&haptics->client->dev, "Failed to write set mode: %d\n", error); } else { error = regmap_write(haptics->regmap, DRV260X_RT_PB_IN, haptics->magnitude); if (error) dev_err(&haptics->client->dev, "Failed to set magnitude: %d\n", error); } } static int drv260x_haptics_play(struct input_dev *input, void *data, struct ff_effect *effect) { struct drv260x_data *haptics = input_get_drvdata(input); haptics->mode = DRV260X_LRA_NO_CAL_MODE; if (effect->u.rumble.strong_magnitude > 0) haptics->magnitude = effect->u.rumble.strong_magnitude; else if (effect->u.rumble.weak_magnitude > 0) haptics->magnitude = effect->u.rumble.weak_magnitude; else haptics->magnitude = 0; schedule_work(&haptics->work); return 0; } static void drv260x_close(struct input_dev *input) { struct drv260x_data *haptics = input_get_drvdata(input); int error; cancel_work_sync(&haptics->work); error = regmap_write(haptics->regmap, DRV260X_MODE, DRV260X_STANDBY); if (error) dev_err(&haptics->client->dev, "Failed to enter standby mode: %d\n", error); gpiod_set_value(haptics->enable_gpio, 0); } static const struct reg_sequence drv260x_lra_cal_regs[] = { { DRV260X_MODE, DRV260X_AUTO_CAL }, { DRV260X_CTRL3, DRV260X_NG_THRESH_2 }, { DRV260X_FEEDBACK_CTRL, DRV260X_FB_REG_LRA_MODE | DRV260X_BRAKE_FACTOR_4X | DRV260X_LOOP_GAIN_HIGH }, }; static const struct reg_sequence drv260x_lra_init_regs[] = { { DRV260X_MODE, DRV260X_RT_PLAYBACK }, { DRV260X_A_TO_V_CTRL, DRV260X_AUDIO_HAPTICS_PEAK_20MS | DRV260X_AUDIO_HAPTICS_FILTER_125HZ }, { DRV260X_A_TO_V_MIN_INPUT, DRV260X_AUDIO_HAPTICS_MIN_IN_VOLT }, { DRV260X_A_TO_V_MAX_INPUT, DRV260X_AUDIO_HAPTICS_MAX_IN_VOLT }, { DRV260X_A_TO_V_MIN_OUT, DRV260X_AUDIO_HAPTICS_MIN_OUT_VOLT }, { DRV260X_A_TO_V_MAX_OUT, DRV260X_AUDIO_HAPTICS_MAX_OUT_VOLT }, { DRV260X_FEEDBACK_CTRL, DRV260X_FB_REG_LRA_MODE | DRV260X_BRAKE_FACTOR_2X | DRV260X_LOOP_GAIN_MED | DRV260X_BEMF_GAIN_3 }, { DRV260X_CTRL1, DRV260X_STARTUP_BOOST }, { DRV260X_CTRL2, DRV260X_SAMP_TIME_250 }, { DRV260X_CTRL3, DRV260X_NG_THRESH_2 | DRV260X_ANANLOG_IN }, { DRV260X_CTRL4, DRV260X_AUTOCAL_TIME_500MS }, }; static const struct reg_sequence drv260x_erm_cal_regs[] = { { DRV260X_MODE, DRV260X_AUTO_CAL }, { DRV260X_A_TO_V_MIN_INPUT, DRV260X_AUDIO_HAPTICS_MIN_IN_VOLT }, { DRV260X_A_TO_V_MAX_INPUT, DRV260X_AUDIO_HAPTICS_MAX_IN_VOLT }, { DRV260X_A_TO_V_MIN_OUT, DRV260X_AUDIO_HAPTICS_MIN_OUT_VOLT }, { DRV260X_A_TO_V_MAX_OUT, DRV260X_AUDIO_HAPTICS_MAX_OUT_VOLT }, { DRV260X_FEEDBACK_CTRL, DRV260X_BRAKE_FACTOR_3X | DRV260X_LOOP_GAIN_MED | DRV260X_BEMF_GAIN_2 }, { DRV260X_CTRL1, DRV260X_STARTUP_BOOST }, { DRV260X_CTRL2, DRV260X_SAMP_TIME_250 | DRV260X_BLANK_TIME_75 | DRV260X_IDISS_TIME_75 }, { DRV260X_CTRL3, DRV260X_NG_THRESH_2 | DRV260X_ERM_OPEN_LOOP }, { DRV260X_CTRL4, DRV260X_AUTOCAL_TIME_500MS }, }; static int drv260x_init(struct drv260x_data *haptics) { int error; unsigned int cal_buf; error = regmap_write(haptics->regmap, DRV260X_RATED_VOLT, haptics->rated_voltage); if (error) { dev_err(&haptics->client->dev, "Failed to write DRV260X_RATED_VOLT register: %d\n", error); return error; } error = regmap_write(haptics->regmap, DRV260X_OD_CLAMP_VOLT, haptics->overdrive_voltage); if (error) { dev_err(&haptics->client->dev, "Failed to write DRV260X_OD_CLAMP_VOLT register: %d\n", error); return error; } switch (haptics->mode) { case DRV260X_LRA_MODE: error = regmap_register_patch(haptics->regmap, drv260x_lra_cal_regs, ARRAY_SIZE(drv260x_lra_cal_regs)); if (error) { dev_err(&haptics->client->dev, "Failed to write LRA calibration registers: %d\n", error); return error; } break; case DRV260X_ERM_MODE: error = regmap_register_patch(haptics->regmap, drv260x_erm_cal_regs, ARRAY_SIZE(drv260x_erm_cal_regs)); if (error) { dev_err(&haptics->client->dev, "Failed to write ERM calibration registers: %d\n", error); return error; } error = regmap_update_bits(haptics->regmap, DRV260X_LIB_SEL, DRV260X_LIB_SEL_MASK, haptics->library); if (error) { dev_err(&haptics->client->dev, "Failed to write DRV260X_LIB_SEL register: %d\n", error); return error; } break; default: error = regmap_register_patch(haptics->regmap, drv260x_lra_init_regs, ARRAY_SIZE(drv260x_lra_init_regs)); if (error) { dev_err(&haptics->client->dev, "Failed to write LRA init registers: %d\n", error); return error; } error = regmap_update_bits(haptics->regmap, DRV260X_LIB_SEL, DRV260X_LIB_SEL_MASK, haptics->library); if (error) { dev_err(&haptics->client->dev, "Failed to write DRV260X_LIB_SEL register: %d\n", error); return error; } /* No need to set GO bit here */ return 0; } error = regmap_write(haptics->regmap, DRV260X_GO, DRV260X_GO_BIT); if (error) { dev_err(&haptics->client->dev, "Failed to write GO register: %d\n", error); return error; } do { error = regmap_read(haptics->regmap, DRV260X_GO, &cal_buf); if (error) { dev_err(&haptics->client->dev, "Failed to read GO register: %d\n", error); return error; } } while (cal_buf == DRV260X_GO_BIT); return 0; } static const struct regmap_config drv260x_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = DRV260X_MAX_REG, .reg_defaults = drv260x_reg_defs, .num_reg_defaults = ARRAY_SIZE(drv260x_reg_defs), .cache_type = REGCACHE_NONE, }; #ifdef CONFIG_OF static int drv260x_parse_dt(struct device *dev, struct drv260x_data *haptics) { struct device_node *np = dev->of_node; unsigned int voltage; int error; error = of_property_read_u32(np, "mode", &haptics->mode); if (error) { dev_err(dev, "%s: No entry for mode\n", __func__); return error; } error = of_property_read_u32(np, "library-sel", &haptics->library); if (error) { dev_err(dev, "%s: No entry for library selection\n", __func__); return error; } error = of_property_read_u32(np, "vib-rated-mv", &voltage); if (!error) haptics->rated_voltage = drv260x_calculate_voltage(voltage); error = of_property_read_u32(np, "vib-overdrive-mv", &voltage); if (!error) haptics->overdrive_voltage = drv260x_calculate_voltage(voltage); return 0; } #else static inline int drv260x_parse_dt(struct device *dev, struct drv260x_data *haptics) { dev_err(dev, "no platform data defined\n"); return -EINVAL; } #endif static int drv260x_probe(struct i2c_client *client, const struct i2c_device_id *id) { const struct drv260x_platform_data *pdata = dev_get_platdata(&client->dev); struct drv260x_data *haptics; int error; haptics = devm_kzalloc(&client->dev, sizeof(*haptics), GFP_KERNEL); if (!haptics) return -ENOMEM; haptics->rated_voltage = DRV260X_DEF_OD_CLAMP_VOLT; haptics->rated_voltage = DRV260X_DEF_RATED_VOLT; if (pdata) { haptics->mode = pdata->mode; haptics->library = pdata->library_selection; if (pdata->vib_overdrive_voltage) haptics->overdrive_voltage = drv260x_calculate_voltage(pdata->vib_overdrive_voltage); if (pdata->vib_rated_voltage) haptics->rated_voltage = drv260x_calculate_voltage(pdata->vib_rated_voltage); } else if (client->dev.of_node) { error = drv260x_parse_dt(&client->dev, haptics); if (error) return error; } else { dev_err(&client->dev, "Platform data not set\n"); return -ENODEV; } if (haptics->mode < DRV260X_LRA_MODE || haptics->mode > DRV260X_ERM_MODE) { dev_err(&client->dev, "Vibrator mode is invalid: %i\n", haptics->mode); return -EINVAL; } if (haptics->library < DRV260X_LIB_EMPTY || haptics->library > DRV260X_ERM_LIB_F) { dev_err(&client->dev, "Library value is invalid: %i\n", haptics->library); return -EINVAL; } if (haptics->mode == DRV260X_LRA_MODE && haptics->library != DRV260X_LIB_EMPTY && haptics->library != DRV260X_LIB_LRA) { dev_err(&client->dev, "LRA Mode with ERM Library mismatch\n"); return -EINVAL; } if (haptics->mode == DRV260X_ERM_MODE && (haptics->library == DRV260X_LIB_EMPTY || haptics->library == DRV260X_LIB_LRA)) { dev_err(&client->dev, "ERM Mode with LRA Library mismatch\n"); return -EINVAL; } haptics->regulator = devm_regulator_get(&client->dev, "vbat"); if (IS_ERR(haptics->regulator)) { error = PTR_ERR(haptics->regulator); dev_err(&client->dev, "unable to get regulator, error: %d\n", error); return error; } haptics->enable_gpio = devm_gpiod_get_optional(&client->dev, "enable", GPIOD_OUT_HIGH); if (IS_ERR(haptics->enable_gpio)) return PTR_ERR(haptics->enable_gpio); haptics->input_dev = devm_input_allocate_device(&client->dev); if (!haptics->input_dev) { dev_err(&client->dev, "Failed to allocate input device\n"); return -ENOMEM; } haptics->input_dev->name = "drv260x:haptics"; haptics->input_dev->dev.parent = client->dev.parent; haptics->input_dev->close = drv260x_close; input_set_drvdata(haptics->input_dev, haptics); input_set_capability(haptics->input_dev, EV_FF, FF_RUMBLE); error = input_ff_create_memless(haptics->input_dev, NULL, drv260x_haptics_play); if (error) { dev_err(&client->dev, "input_ff_create() failed: %d\n", error); return error; } INIT_WORK(&haptics->work, drv260x_worker); haptics->client = client; i2c_set_clientdata(client, haptics); haptics->regmap = devm_regmap_init_i2c(client, &drv260x_regmap_config); if (IS_ERR(haptics->regmap)) { error = PTR_ERR(haptics->regmap); dev_err(&client->dev, "Failed to allocate register map: %d\n", error); return error; } error = drv260x_init(haptics); if (error) { dev_err(&client->dev, "Device init failed: %d\n", error); return error; } error = input_register_device(haptics->input_dev); if (error) { dev_err(&client->dev, "couldn't register input device: %d\n", error); return error; } return 0; } static int __maybe_unused drv260x_suspend(struct device *dev) { struct drv260x_data *haptics = dev_get_drvdata(dev); int ret = 0; mutex_lock(&haptics->input_dev->mutex); if (haptics->input_dev->users) { ret = regmap_update_bits(haptics->regmap, DRV260X_MODE, DRV260X_STANDBY_MASK, DRV260X_STANDBY); if (ret) { dev_err(dev, "Failed to set standby mode\n"); goto out; } gpiod_set_value(haptics->enable_gpio, 0); ret = regulator_disable(haptics->regulator); if (ret) { dev_err(dev, "Failed to disable regulator\n"); regmap_update_bits(haptics->regmap, DRV260X_MODE, DRV260X_STANDBY_MASK, 0); } } out: mutex_unlock(&haptics->input_dev->mutex); return ret; } static int __maybe_unused drv260x_resume(struct device *dev) { struct drv260x_data *haptics = dev_get_drvdata(dev); int ret = 0; mutex_lock(&haptics->input_dev->mutex); if (haptics->input_dev->users) { ret = regulator_enable(haptics->regulator); if (ret) { dev_err(dev, "Failed to enable regulator\n"); goto out; } ret = regmap_update_bits(haptics->regmap, DRV260X_MODE, DRV260X_STANDBY_MASK, 0); if (ret) { dev_err(dev, "Failed to unset standby mode\n"); regulator_disable(haptics->regulator); goto out; } gpiod_set_value(haptics->enable_gpio, 1); } out: mutex_unlock(&haptics->input_dev->mutex); return ret; } static SIMPLE_DEV_PM_OPS(drv260x_pm_ops, drv260x_suspend, drv260x_resume); static const struct i2c_device_id drv260x_id[] = { { "drv2605l", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, drv260x_id); #ifdef CONFIG_OF static const struct of_device_id drv260x_of_match[] = { { .compatible = "ti,drv2604", }, { .compatible = "ti,drv2604l", }, { .compatible = "ti,drv2605", }, { .compatible = "ti,drv2605l", }, { } }; MODULE_DEVICE_TABLE(of, drv260x_of_match); #endif static struct i2c_driver drv260x_driver = { .probe = drv260x_probe, .driver = { .name = "drv260x-haptics", .of_match_table = of_match_ptr(drv260x_of_match), .pm = &drv260x_pm_ops, }, .id_table = drv260x_id, }; module_i2c_driver(drv260x_driver); MODULE_DESCRIPTION("TI DRV260x haptics driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
gpl-2.0
doadin/kernel_gravitysmart
drivers/usb/image/microtek.c
555
23616
/* Driver for Microtek Scanmaker X6 USB scanner, and possibly others. * * (C) Copyright 2000 John Fremlin <vii@penguinpowered.com> * (C) Copyright 2000 Oliver Neukum <Oliver.Neukum@lrz.uni-muenchen.de> * * Parts shamelessly stolen from usb-storage and copyright by their * authors. Thanks to Matt Dharm for giving us permission! * * This driver implements a SCSI host controller driver and a USB * device driver. To avoid confusion, all the USB related stuff is * prefixed by mts_usb_ and all the SCSI stuff by mts_scsi_. * * Microtek (www.microtek.com) did not release the specifications for * their USB protocol to us, so we had to reverse engineer them. We * don't know for which models they are valid. * * The X6 USB has three bulk endpoints, one output (0x1) down which * commands and outgoing data are sent, and two input: 0x82 from which * normal data is read from the scanner (in packets of maximum 32 * bytes) and from which the status byte is read, and 0x83 from which * the results of a scan (or preview) are read in up to 64 * 1024 byte * chunks by the Windows driver. We don't know how much it is possible * to read at a time from 0x83. * * It seems possible to read (with URB transfers) everything from 0x82 * in one go, without bothering to read in 32 byte chunks. * * There seems to be an optimisation of a further READ implicit if * you simply read from 0x83. * * Guessed protocol: * * Send raw SCSI command to EP 0x1 * * If there is data to receive: * If the command was READ datatype=image: * Read a lot of data from EP 0x83 * Else: * Read data from EP 0x82 * Else: * If there is data to transmit: * Write it to EP 0x1 * * Read status byte from EP 0x82 * * References: * * The SCSI command set for the scanner is available from * ftp://ftp.microtek.com/microtek/devpack/ * * Microtek NV sent us a more up to date version of the document. If * you want it, just send mail. * * Status: * * Untested with multiple scanners. * Untested on SMP. * Untested on a bigendian machine. * * History: * * 20000417 starting history * 20000417 fixed load oops * 20000417 fixed unload oops * 20000419 fixed READ IMAGE detection * 20000424 started conversion to use URBs * 20000502 handled short transfers as errors * 20000513 rename and organisation of functions (john) * 20000513 added IDs for all products supported by Windows driver (john) * 20000514 Rewrote mts_scsi_queuecommand to use URBs (john) * 20000514 Version 0.0.8j * 20000514 Fix reporting of non-existant devices to SCSI layer (john) * 20000514 Added MTS_DEBUG_INT (john) * 20000514 Changed "usb-microtek" to "microtek" for consistency (john) * 20000514 Stupid bug fixes (john) * 20000514 Version 0.0.9j * 20000515 Put transfer context and URB in mts_desc (john) * 20000515 Added prelim turn off debugging support (john) * 20000515 Version 0.0.10j * 20000515 Fixed up URB allocation (clear URB on alloc) (john) * 20000515 Version 0.0.11j * 20000516 Removed unnecessary spinlock in mts_transfer_context (john) * 20000516 Removed unnecessary up on instance lock in mts_remove_nolock (john) * 20000516 Implemented (badly) scsi_abort (john) * 20000516 Version 0.0.12j * 20000517 Hopefully removed mts_remove_nolock quasideadlock (john) * 20000517 Added mts_debug_dump to print ll USB info (john) * 20000518 Tweaks and documentation updates (john) * 20000518 Version 0.0.13j * 20000518 Cleaned up abort handling (john) * 20000523 Removed scsi_command and various scsi_..._resets (john) * 20000523 Added unlink URB on scsi_abort, now OHCI supports it (john) * 20000523 Fixed last tiresome compile warning (john) * 20000523 Version 0.0.14j (though version 0.1 has come out?) * 20000602 Added primitive reset * 20000602 Version 0.2.0 * 20000603 various cosmetic changes * 20000603 Version 0.2.1 * 20000620 minor cosmetic changes * 20000620 Version 0.2.2 * 20000822 Hopefully fixed deadlock in mts_remove_nolock() * 20000822 Fixed minor race in mts_transfer_cleanup() * 20000822 Fixed deadlock on submission error in queuecommand * 20000822 Version 0.2.3 * 20000913 Reduced module size if debugging is off * 20000913 Version 0.2.4 * 20010210 New abort logic * 20010210 Version 0.3.0 * 20010217 Merged scatter/gather * 20010218 Version 0.4.0 * 20010218 Cosmetic fixes * 20010218 Version 0.4.1 * 20010306 Abort while using scatter/gather * 20010306 Version 0.4.2 * 20010311 Remove all timeouts and tidy up generally (john) * 20010320 check return value of scsi_register() * 20010320 Version 0.4.3 * 20010408 Identify version on module load. * 20011003 Fix multiple requests */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/random.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/usb.h> #include <linux/proc_fs.h> #include <asm/atomic.h> #include <linux/blkdev.h> #include "../../scsi/scsi.h" #include <scsi/scsi_host.h> #include "microtek.h" /* * Version Information */ #define DRIVER_VERSION "v0.4.3" #define DRIVER_AUTHOR "John Fremlin <vii@penguinpowered.com>, Oliver Neukum <Oliver.Neukum@lrz.uni-muenchen.de>" #define DRIVER_DESC "Microtek Scanmaker X6 USB scanner driver" /* Should we do debugging? */ //#define MTS_DO_DEBUG /* USB layer driver interface */ static int mts_usb_probe(struct usb_interface *intf, const struct usb_device_id *id); static void mts_usb_disconnect(struct usb_interface *intf); static struct usb_device_id mts_usb_ids []; static struct usb_driver mts_usb_driver = { .name = "microtekX6", .probe = mts_usb_probe, .disconnect = mts_usb_disconnect, .id_table = mts_usb_ids, }; /* Internal driver stuff */ #define MTS_VERSION "0.4.3" #define MTS_NAME "microtek usb (rev " MTS_VERSION "): " #define MTS_WARNING(x...) \ printk( KERN_WARNING MTS_NAME x ) #define MTS_ERROR(x...) \ printk( KERN_ERR MTS_NAME x ) #define MTS_INT_ERROR(x...) \ MTS_ERROR(x) #define MTS_MESSAGE(x...) \ printk( KERN_INFO MTS_NAME x ) #if defined MTS_DO_DEBUG #define MTS_DEBUG(x...) \ printk( KERN_DEBUG MTS_NAME x ) #define MTS_DEBUG_GOT_HERE() \ MTS_DEBUG("got to %s:%d (%s)\n", __FILE__, (int)__LINE__, __func__ ) #define MTS_DEBUG_INT() \ do { MTS_DEBUG_GOT_HERE(); \ MTS_DEBUG("transfer = 0x%x context = 0x%x\n",(int)transfer,(int)context ); \ MTS_DEBUG("status = 0x%x data-length = 0x%x sent = 0x%x\n",transfer->status,(int)context->data_length, (int)transfer->actual_length ); \ mts_debug_dump(context->instance);\ } while(0) #else #define MTS_NUL_STATEMENT do { } while(0) #define MTS_DEBUG(x...) MTS_NUL_STATEMENT #define MTS_DEBUG_GOT_HERE() MTS_NUL_STATEMENT #define MTS_DEBUG_INT() MTS_NUL_STATEMENT #endif #define MTS_INT_INIT()\ struct mts_transfer_context* context = (struct mts_transfer_context*)transfer->context; \ MTS_DEBUG_INT();\ #ifdef MTS_DO_DEBUG static inline void mts_debug_dump(struct mts_desc* desc) { MTS_DEBUG("desc at 0x%x: toggle = %02x%02x\n", (int)desc, (int)desc->usb_dev->toggle[1],(int)desc->usb_dev->toggle[0] ); MTS_DEBUG("ep_out=%x ep_response=%x ep_image=%x\n", usb_sndbulkpipe(desc->usb_dev,desc->ep_out), usb_rcvbulkpipe(desc->usb_dev,desc->ep_response), usb_rcvbulkpipe(desc->usb_dev,desc->ep_image) ); } static inline void mts_show_command(struct scsi_cmnd *srb) { char *what = NULL; switch (srb->cmnd[0]) { case TEST_UNIT_READY: what = "TEST_UNIT_READY"; break; case REZERO_UNIT: what = "REZERO_UNIT"; break; case REQUEST_SENSE: what = "REQUEST_SENSE"; break; case FORMAT_UNIT: what = "FORMAT_UNIT"; break; case READ_BLOCK_LIMITS: what = "READ_BLOCK_LIMITS"; break; case REASSIGN_BLOCKS: what = "REASSIGN_BLOCKS"; break; case READ_6: what = "READ_6"; break; case WRITE_6: what = "WRITE_6"; break; case SEEK_6: what = "SEEK_6"; break; case READ_REVERSE: what = "READ_REVERSE"; break; case WRITE_FILEMARKS: what = "WRITE_FILEMARKS"; break; case SPACE: what = "SPACE"; break; case INQUIRY: what = "INQUIRY"; break; case RECOVER_BUFFERED_DATA: what = "RECOVER_BUFFERED_DATA"; break; case MODE_SELECT: what = "MODE_SELECT"; break; case RESERVE: what = "RESERVE"; break; case RELEASE: what = "RELEASE"; break; case COPY: what = "COPY"; break; case ERASE: what = "ERASE"; break; case MODE_SENSE: what = "MODE_SENSE"; break; case START_STOP: what = "START_STOP"; break; case RECEIVE_DIAGNOSTIC: what = "RECEIVE_DIAGNOSTIC"; break; case SEND_DIAGNOSTIC: what = "SEND_DIAGNOSTIC"; break; case ALLOW_MEDIUM_REMOVAL: what = "ALLOW_MEDIUM_REMOVAL"; break; case SET_WINDOW: what = "SET_WINDOW"; break; case READ_CAPACITY: what = "READ_CAPACITY"; break; case READ_10: what = "READ_10"; break; case WRITE_10: what = "WRITE_10"; break; case SEEK_10: what = "SEEK_10"; break; case WRITE_VERIFY: what = "WRITE_VERIFY"; break; case VERIFY: what = "VERIFY"; break; case SEARCH_HIGH: what = "SEARCH_HIGH"; break; case SEARCH_EQUAL: what = "SEARCH_EQUAL"; break; case SEARCH_LOW: what = "SEARCH_LOW"; break; case SET_LIMITS: what = "SET_LIMITS"; break; case READ_POSITION: what = "READ_POSITION"; break; case SYNCHRONIZE_CACHE: what = "SYNCHRONIZE_CACHE"; break; case LOCK_UNLOCK_CACHE: what = "LOCK_UNLOCK_CACHE"; break; case READ_DEFECT_DATA: what = "READ_DEFECT_DATA"; break; case MEDIUM_SCAN: what = "MEDIUM_SCAN"; break; case COMPARE: what = "COMPARE"; break; case COPY_VERIFY: what = "COPY_VERIFY"; break; case WRITE_BUFFER: what = "WRITE_BUFFER"; break; case READ_BUFFER: what = "READ_BUFFER"; break; case UPDATE_BLOCK: what = "UPDATE_BLOCK"; break; case READ_LONG: what = "READ_LONG"; break; case WRITE_LONG: what = "WRITE_LONG"; break; case CHANGE_DEFINITION: what = "CHANGE_DEFINITION"; break; case WRITE_SAME: what = "WRITE_SAME"; break; case READ_TOC: what = "READ_TOC"; break; case LOG_SELECT: what = "LOG_SELECT"; break; case LOG_SENSE: what = "LOG_SENSE"; break; case MODE_SELECT_10: what = "MODE_SELECT_10"; break; case MODE_SENSE_10: what = "MODE_SENSE_10"; break; case MOVE_MEDIUM: what = "MOVE_MEDIUM"; break; case READ_12: what = "READ_12"; break; case WRITE_12: what = "WRITE_12"; break; case WRITE_VERIFY_12: what = "WRITE_VERIFY_12"; break; case SEARCH_HIGH_12: what = "SEARCH_HIGH_12"; break; case SEARCH_EQUAL_12: what = "SEARCH_EQUAL_12"; break; case SEARCH_LOW_12: what = "SEARCH_LOW_12"; break; case READ_ELEMENT_STATUS: what = "READ_ELEMENT_STATUS"; break; case SEND_VOLUME_TAG: what = "SEND_VOLUME_TAG"; break; case WRITE_LONG_2: what = "WRITE_LONG_2"; break; default: MTS_DEBUG("can't decode command\n"); goto out; break; } MTS_DEBUG( "Command %s (%d bytes)\n", what, srb->cmd_len); out: MTS_DEBUG( " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", srb->cmnd[0], srb->cmnd[1], srb->cmnd[2], srb->cmnd[3], srb->cmnd[4], srb->cmnd[5], srb->cmnd[6], srb->cmnd[7], srb->cmnd[8], srb->cmnd[9]); } #else static inline void mts_show_command(struct scsi_cmnd * dummy) { } static inline void mts_debug_dump(struct mts_desc* dummy) { } #endif static inline void mts_urb_abort(struct mts_desc* desc) { MTS_DEBUG_GOT_HERE(); mts_debug_dump(desc); usb_kill_urb( desc->urb ); } static int mts_slave_alloc (struct scsi_device *s) { s->inquiry_len = 0x24; return 0; } static int mts_slave_configure (struct scsi_device *s) { blk_queue_dma_alignment(s->request_queue, (512 - 1)); return 0; } static int mts_scsi_abort(struct scsi_cmnd *srb) { struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]); MTS_DEBUG_GOT_HERE(); mts_urb_abort(desc); return FAILED; } static int mts_scsi_host_reset(struct scsi_cmnd *srb) { struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]); int result; MTS_DEBUG_GOT_HERE(); mts_debug_dump(desc); result = usb_lock_device_for_reset(desc->usb_dev, desc->usb_intf); if (result == 0) { result = usb_reset_device(desc->usb_dev); usb_unlock_device(desc->usb_dev); } return result ? FAILED : SUCCESS; } static int mts_scsi_queuecommand(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback); static void mts_transfer_cleanup( struct urb *transfer ); static void mts_do_sg(struct urb * transfer); static inline void mts_int_submit_urb (struct urb* transfer, int pipe, void* data, unsigned length, usb_complete_t callback ) /* Interrupt context! */ /* Holding transfer->context->lock! */ { int res; MTS_INT_INIT(); usb_fill_bulk_urb(transfer, context->instance->usb_dev, pipe, data, length, callback, context ); res = usb_submit_urb( transfer, GFP_ATOMIC ); if ( unlikely(res) ) { MTS_INT_ERROR( "could not submit URB! Error was %d\n",(int)res ); context->srb->result = DID_ERROR << 16; mts_transfer_cleanup(transfer); } return; } static void mts_transfer_cleanup( struct urb *transfer ) /* Interrupt context! */ { MTS_INT_INIT(); if ( likely(context->final_callback != NULL) ) context->final_callback(context->srb); } static void mts_transfer_done( struct urb *transfer ) { MTS_INT_INIT(); context->srb->result &= MTS_SCSI_ERR_MASK; context->srb->result |= (unsigned)(*context->scsi_status)<<1; mts_transfer_cleanup(transfer); return; } static void mts_get_status( struct urb *transfer ) /* Interrupt context! */ { MTS_INT_INIT(); mts_int_submit_urb(transfer, usb_rcvbulkpipe(context->instance->usb_dev, context->instance->ep_response), context->scsi_status, 1, mts_transfer_done ); } static void mts_data_done( struct urb* transfer ) /* Interrupt context! */ { int status = transfer->status; MTS_INT_INIT(); if ( context->data_length != transfer->actual_length ) { scsi_set_resid(context->srb, context->data_length - transfer->actual_length); } else if ( unlikely(status) ) { context->srb->result = (status == -ENOENT ? DID_ABORT : DID_ERROR)<<16; } mts_get_status(transfer); return; } static void mts_command_done( struct urb *transfer ) /* Interrupt context! */ { int status = transfer->status; MTS_INT_INIT(); if ( unlikely(status) ) { if (status == -ENOENT) { /* We are being killed */ MTS_DEBUG_GOT_HERE(); context->srb->result = DID_ABORT<<16; } else { /* A genuine error has occurred */ MTS_DEBUG_GOT_HERE(); context->srb->result = DID_ERROR<<16; } mts_transfer_cleanup(transfer); return; } if (context->srb->cmnd[0] == REQUEST_SENSE) { mts_int_submit_urb(transfer, context->data_pipe, context->srb->sense_buffer, context->data_length, mts_data_done); } else { if ( context->data ) { mts_int_submit_urb(transfer, context->data_pipe, context->data, context->data_length, scsi_sg_count(context->srb) > 1 ? mts_do_sg : mts_data_done); } else { mts_get_status(transfer); } } return; } static void mts_do_sg (struct urb* transfer) { struct scatterlist * sg; int status = transfer->status; MTS_INT_INIT(); MTS_DEBUG("Processing fragment %d of %d\n", context->fragment, scsi_sg_count(context->srb)); if (unlikely(status)) { context->srb->result = (status == -ENOENT ? DID_ABORT : DID_ERROR)<<16; mts_transfer_cleanup(transfer); } sg = scsi_sglist(context->srb); context->fragment++; mts_int_submit_urb(transfer, context->data_pipe, sg_virt(&sg[context->fragment]), sg[context->fragment].length, context->fragment + 1 == scsi_sg_count(context->srb) ? mts_data_done : mts_do_sg); return; } static const u8 mts_read_image_sig[] = { 0x28, 00, 00, 00 }; static const u8 mts_read_image_sig_len = 4; static const unsigned char mts_direction[256/8] = { 0x28, 0x81, 0x14, 0x14, 0x20, 0x01, 0x90, 0x77, 0x0C, 0x20, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; #define MTS_DIRECTION_IS_IN(x) ((mts_direction[x>>3] >> (x & 7)) & 1) static void mts_build_transfer_context(struct scsi_cmnd *srb, struct mts_desc* desc) { int pipe; struct scatterlist * sg; MTS_DEBUG_GOT_HERE(); desc->context.instance = desc; desc->context.srb = srb; desc->context.fragment = 0; if (!scsi_bufflen(srb)) { desc->context.data = NULL; desc->context.data_length = 0; return; } else { sg = scsi_sglist(srb); desc->context.data = sg_virt(&sg[0]); desc->context.data_length = sg[0].length; } /* can't rely on srb->sc_data_direction */ /* Brutally ripped from usb-storage */ if ( !memcmp( srb->cmnd, mts_read_image_sig, mts_read_image_sig_len ) ) { pipe = usb_rcvbulkpipe(desc->usb_dev,desc->ep_image); MTS_DEBUG( "transfering from desc->ep_image == %d\n", (int)desc->ep_image ); } else if ( MTS_DIRECTION_IS_IN(srb->cmnd[0]) ) { pipe = usb_rcvbulkpipe(desc->usb_dev,desc->ep_response); MTS_DEBUG( "transfering from desc->ep_response == %d\n", (int)desc->ep_response); } else { MTS_DEBUG("transfering to desc->ep_out == %d\n", (int)desc->ep_out); pipe = usb_sndbulkpipe(desc->usb_dev,desc->ep_out); } desc->context.data_pipe = pipe; } static int mts_scsi_queuecommand(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback) { struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]); int err = 0; int res; MTS_DEBUG_GOT_HERE(); mts_show_command(srb); mts_debug_dump(desc); if ( srb->device->lun || srb->device->id || srb->device->channel ) { MTS_DEBUG("Command to LUN=%d ID=%d CHANNEL=%d from SCSI layer\n",(int)srb->device->lun,(int)srb->device->id, (int)srb->device->channel ); MTS_DEBUG("this device doesn't exist\n"); srb->result = DID_BAD_TARGET << 16; if(likely(callback != NULL)) callback(srb); goto out; } usb_fill_bulk_urb(desc->urb, desc->usb_dev, usb_sndbulkpipe(desc->usb_dev,desc->ep_out), srb->cmnd, srb->cmd_len, mts_command_done, &desc->context ); mts_build_transfer_context( srb, desc ); desc->context.final_callback = callback; /* here we need ATOMIC as we are called with the iolock */ res=usb_submit_urb(desc->urb, GFP_ATOMIC); if(unlikely(res)){ MTS_ERROR("error %d submitting URB\n",(int)res); srb->result = DID_ERROR << 16; if(likely(callback != NULL)) callback(srb); } out: return err; } static struct scsi_host_template mts_scsi_host_template = { .module = THIS_MODULE, .name = "microtekX6", .proc_name = "microtekX6", .queuecommand = mts_scsi_queuecommand, .eh_abort_handler = mts_scsi_abort, .eh_host_reset_handler = mts_scsi_host_reset, .sg_tablesize = SG_ALL, .can_queue = 1, .this_id = -1, .cmd_per_lun = 1, .use_clustering = 1, .emulated = 1, .slave_alloc = mts_slave_alloc, .slave_configure = mts_slave_configure, .max_sectors= 256, /* 128 K */ }; /* The entries of microtek_table must correspond, line-by-line to the entries of mts_supported_products[]. */ static struct usb_device_id mts_usb_ids [] = { { USB_DEVICE(0x4ce, 0x0300) }, { USB_DEVICE(0x5da, 0x0094) }, { USB_DEVICE(0x5da, 0x0099) }, { USB_DEVICE(0x5da, 0x009a) }, { USB_DEVICE(0x5da, 0x00a0) }, { USB_DEVICE(0x5da, 0x00a3) }, { USB_DEVICE(0x5da, 0x80a3) }, { USB_DEVICE(0x5da, 0x80ac) }, { USB_DEVICE(0x5da, 0x00b6) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, mts_usb_ids); static int mts_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { int i; int ep_out = -1; int ep_in_set[3]; /* this will break if we have more than three endpoints which is why we check */ int *ep_in_current = ep_in_set; int err_retval = -ENOMEM; struct mts_desc * new_desc; struct usb_device *dev = interface_to_usbdev (intf); /* the current altsetting on the interface we're probing */ struct usb_host_interface *altsetting; MTS_DEBUG_GOT_HERE(); MTS_DEBUG( "usb-device descriptor at %x\n", (int)dev ); MTS_DEBUG( "product id = 0x%x, vendor id = 0x%x\n", le16_to_cpu(dev->descriptor.idProduct), le16_to_cpu(dev->descriptor.idVendor) ); MTS_DEBUG_GOT_HERE(); /* the current altsetting on the interface we're probing */ altsetting = intf->cur_altsetting; /* Check if the config is sane */ if ( altsetting->desc.bNumEndpoints != MTS_EP_TOTAL ) { MTS_WARNING( "expecting %d got %d endpoints! Bailing out.\n", (int)MTS_EP_TOTAL, (int)altsetting->desc.bNumEndpoints ); return -ENODEV; } for( i = 0; i < altsetting->desc.bNumEndpoints; i++ ) { if ((altsetting->endpoint[i].desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_BULK) { MTS_WARNING( "can only deal with bulk endpoints; endpoint %d is not bulk.\n", (int)altsetting->endpoint[i].desc.bEndpointAddress ); } else { if (altsetting->endpoint[i].desc.bEndpointAddress & USB_DIR_IN) *ep_in_current++ = altsetting->endpoint[i].desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; else { if ( ep_out != -1 ) { MTS_WARNING( "can only deal with one output endpoints. Bailing out." ); return -ENODEV; } ep_out = altsetting->endpoint[i].desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; } } } if ( ep_out == -1 ) { MTS_WARNING( "couldn't find an output bulk endpoint. Bailing out.\n" ); return -ENODEV; } new_desc = kzalloc(sizeof(struct mts_desc), GFP_KERNEL); if (!new_desc) goto out; new_desc->urb = usb_alloc_urb(0, GFP_KERNEL); if (!new_desc->urb) goto out_kfree; new_desc->context.scsi_status = kmalloc(1, GFP_KERNEL); if (!new_desc->context.scsi_status) goto out_free_urb; new_desc->usb_dev = dev; new_desc->usb_intf = intf; /* endpoints */ new_desc->ep_out = ep_out; new_desc->ep_response = ep_in_set[0]; new_desc->ep_image = ep_in_set[1]; if ( new_desc->ep_out != MTS_EP_OUT ) MTS_WARNING( "will this work? Command EP is not usually %d\n", (int)new_desc->ep_out ); if ( new_desc->ep_response != MTS_EP_RESPONSE ) MTS_WARNING( "will this work? Response EP is not usually %d\n", (int)new_desc->ep_response ); if ( new_desc->ep_image != MTS_EP_IMAGE ) MTS_WARNING( "will this work? Image data EP is not usually %d\n", (int)new_desc->ep_image ); new_desc->host = scsi_host_alloc(&mts_scsi_host_template, sizeof(new_desc)); if (!new_desc->host) goto out_kfree2; new_desc->host->hostdata[0] = (unsigned long)new_desc; if (scsi_add_host(new_desc->host, &dev->dev)) { err_retval = -EIO; goto out_host_put; } scsi_scan_host(new_desc->host); usb_set_intfdata(intf, new_desc); return 0; out_host_put: scsi_host_put(new_desc->host); out_kfree2: kfree(new_desc->context.scsi_status); out_free_urb: usb_free_urb(new_desc->urb); out_kfree: kfree(new_desc); out: return err_retval; } static void mts_usb_disconnect (struct usb_interface *intf) { struct mts_desc *desc = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); usb_kill_urb(desc->urb); scsi_remove_host(desc->host); scsi_host_put(desc->host); usb_free_urb(desc->urb); kfree(desc->context.scsi_status); kfree(desc); } static int __init microtek_drv_init(void) { return usb_register(&mts_usb_driver); } static void __exit microtek_drv_exit(void) { usb_deregister(&mts_usb_driver); } module_init(microtek_drv_init); module_exit(microtek_drv_exit); MODULE_AUTHOR( DRIVER_AUTHOR ); MODULE_DESCRIPTION( DRIVER_DESC ); MODULE_LICENSE("GPL");
gpl-2.0
snegovick/linux
tools/testing/selftests/mqueue/mq_perf_tests.c
811
21888
/* * This application is Copyright 2012 Red Hat, Inc. * Doug Ledford <dledford@redhat.com> * * mq_perf_tests is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 3. * * mq_perf_tests is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * For the full text of the license, see <http://www.gnu.org/licenses/>. * * mq_perf_tests.c * Tests various types of message queue workloads, concentrating on those * situations that invole large message sizes, large message queue depths, * or both, and reports back useful metrics about kernel message queue * performance. * */ #define _GNU_SOURCE #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <fcntl.h> #include <string.h> #include <limits.h> #include <errno.h> #include <signal.h> #include <pthread.h> #include <sched.h> #include <sys/types.h> #include <sys/time.h> #include <sys/resource.h> #include <sys/stat.h> #include <mqueue.h> #include <popt.h> static char *usage = "Usage:\n" " %s [-c #[,#..] -f] path\n" "\n" " -c # Skip most tests and go straight to a high queue depth test\n" " and then run that test continuously (useful for running at\n" " the same time as some other workload to see how much the\n" " cache thrashing caused by adding messages to a very deep\n" " queue impacts the performance of other programs). The number\n" " indicates which CPU core we should bind the process to during\n" " the run. If you have more than one physical CPU, then you\n" " will need one copy per physical CPU package, and you should\n" " specify the CPU cores to pin ourself to via a comma separated\n" " list of CPU values.\n" " -f Only usable with continuous mode. Pin ourself to the CPUs\n" " as requested, then instead of looping doing a high mq\n" " workload, just busy loop. This will allow us to lock up a\n" " single CPU just like we normally would, but without actually\n" " thrashing the CPU cache. This is to make it easier to get\n" " comparable numbers from some other workload running on the\n" " other CPUs. One set of numbers with # CPUs locked up running\n" " an mq workload, and another set of numbers with those same\n" " CPUs locked away from the test workload, but not doing\n" " anything to trash the cache like the mq workload might.\n" " path Path name of the message queue to create\n" "\n" " Note: this program must be run as root in order to enable all tests\n" "\n"; char *MAX_MSGS = "/proc/sys/fs/mqueue/msg_max"; char *MAX_MSGSIZE = "/proc/sys/fs/mqueue/msgsize_max"; #define min(a, b) ((a) < (b) ? (a) : (b)) #define MAX_CPUS 64 char *cpu_option_string; int cpus_to_pin[MAX_CPUS]; int num_cpus_to_pin; pthread_t cpu_threads[MAX_CPUS]; pthread_t main_thread; cpu_set_t *cpu_set; int cpu_set_size; int cpus_online; #define MSG_SIZE 16 #define TEST1_LOOPS 10000000 #define TEST2_LOOPS 100000 int continuous_mode; int continuous_mode_fake; struct rlimit saved_limits, cur_limits; int saved_max_msgs, saved_max_msgsize; int cur_max_msgs, cur_max_msgsize; FILE *max_msgs, *max_msgsize; int cur_nice; char *queue_path = "/mq_perf_tests"; mqd_t queue = -1; struct mq_attr result; int mq_prio_max; const struct poptOption options[] = { { .longName = "continuous", .shortName = 'c', .argInfo = POPT_ARG_STRING, .arg = &cpu_option_string, .val = 'c', .descrip = "Run continuous tests at a high queue depth in " "order to test the effects of cache thrashing on " "other tasks on the system. This test is intended " "to be run on one core of each physical CPU while " "some other CPU intensive task is run on all the other " "cores of that same physical CPU and the other task " "is timed. It is assumed that the process of adding " "messages to the message queue in a tight loop will " "impact that other task to some degree. Once the " "tests are performed in this way, you should then " "re-run the tests using fake mode in order to check " "the difference in time required to perform the CPU " "intensive task", .argDescrip = "cpu[,cpu]", }, { .longName = "fake", .shortName = 'f', .argInfo = POPT_ARG_NONE, .arg = &continuous_mode_fake, .val = 0, .descrip = "Tie up the CPUs that we would normally tie up in" "continuous mode, but don't actually do any mq stuff, " "just keep the CPU busy so it can't be used to process " "system level tasks as this would free up resources on " "the other CPU cores and skew the comparison between " "the no-mqueue work and mqueue work tests", .argDescrip = NULL, }, { .longName = "path", .shortName = 'p', .argInfo = POPT_ARG_STRING | POPT_ARGFLAG_SHOW_DEFAULT, .arg = &queue_path, .val = 'p', .descrip = "The name of the path to use in the mqueue " "filesystem for our tests", .argDescrip = "pathname", }, POPT_AUTOHELP POPT_TABLEEND }; static inline void __set(FILE *stream, int value, char *err_msg); void shutdown(int exit_val, char *err_cause, int line_no); void sig_action_SIGUSR1(int signum, siginfo_t *info, void *context); void sig_action(int signum, siginfo_t *info, void *context); static inline int get(FILE *stream); static inline void set(FILE *stream, int value); static inline int try_set(FILE *stream, int value); static inline void getr(int type, struct rlimit *rlim); static inline void setr(int type, struct rlimit *rlim); static inline void open_queue(struct mq_attr *attr); void increase_limits(void); static inline void __set(FILE *stream, int value, char *err_msg) { rewind(stream); if (fprintf(stream, "%d", value) < 0) perror(err_msg); } void shutdown(int exit_val, char *err_cause, int line_no) { static int in_shutdown = 0; int errno_at_shutdown = errno; int i; /* In case we get called by multiple threads or from an sighandler */ if (in_shutdown++) return; for (i = 0; i < num_cpus_to_pin; i++) if (cpu_threads[i]) { pthread_kill(cpu_threads[i], SIGUSR1); pthread_join(cpu_threads[i], NULL); } if (queue != -1) if (mq_close(queue)) perror("mq_close() during shutdown"); if (queue_path) /* * Be silent if this fails, if we cleaned up already it's * expected to fail */ mq_unlink(queue_path); if (saved_max_msgs) __set(max_msgs, saved_max_msgs, "failed to restore saved_max_msgs"); if (saved_max_msgsize) __set(max_msgsize, saved_max_msgsize, "failed to restore saved_max_msgsize"); if (exit_val) error(exit_val, errno_at_shutdown, "%s at %d", err_cause, line_no); exit(0); } void sig_action_SIGUSR1(int signum, siginfo_t *info, void *context) { if (pthread_self() != main_thread) pthread_exit(0); else { fprintf(stderr, "Caught signal %d in SIGUSR1 handler, " "exiting\n", signum); shutdown(0, "", 0); fprintf(stderr, "\n\nReturned from shutdown?!?!\n\n"); exit(0); } } void sig_action(int signum, siginfo_t *info, void *context) { if (pthread_self() != main_thread) pthread_kill(main_thread, signum); else { fprintf(stderr, "Caught signal %d, exiting\n", signum); shutdown(0, "", 0); fprintf(stderr, "\n\nReturned from shutdown?!?!\n\n"); exit(0); } } static inline int get(FILE *stream) { int value; rewind(stream); if (fscanf(stream, "%d", &value) != 1) shutdown(4, "Error reading /proc entry", __LINE__); return value; } static inline void set(FILE *stream, int value) { int new_value; rewind(stream); if (fprintf(stream, "%d", value) < 0) return shutdown(5, "Failed writing to /proc file", __LINE__); new_value = get(stream); if (new_value != value) return shutdown(5, "We didn't get what we wrote to /proc back", __LINE__); } static inline int try_set(FILE *stream, int value) { int new_value; rewind(stream); fprintf(stream, "%d", value); new_value = get(stream); return new_value == value; } static inline void getr(int type, struct rlimit *rlim) { if (getrlimit(type, rlim)) shutdown(6, "getrlimit()", __LINE__); } static inline void setr(int type, struct rlimit *rlim) { if (setrlimit(type, rlim)) shutdown(7, "setrlimit()", __LINE__); } /** * open_queue - open the global queue for testing * @attr - An attr struct specifying the desired queue traits * @result - An attr struct that lists the actual traits the queue has * * This open is not allowed to fail, failure will result in an orderly * shutdown of the program. The global queue_path is used to set what * queue to open, the queue descriptor is saved in the global queue * variable. */ static inline void open_queue(struct mq_attr *attr) { int flags = O_RDWR | O_EXCL | O_CREAT | O_NONBLOCK; int perms = DEFFILEMODE; queue = mq_open(queue_path, flags, perms, attr); if (queue == -1) shutdown(1, "mq_open()", __LINE__); if (mq_getattr(queue, &result)) shutdown(1, "mq_getattr()", __LINE__); printf("\n\tQueue %s created:\n", queue_path); printf("\t\tmq_flags:\t\t\t%s\n", result.mq_flags & O_NONBLOCK ? "O_NONBLOCK" : "(null)"); printf("\t\tmq_maxmsg:\t\t\t%lu\n", result.mq_maxmsg); printf("\t\tmq_msgsize:\t\t\t%lu\n", result.mq_msgsize); printf("\t\tmq_curmsgs:\t\t\t%lu\n", result.mq_curmsgs); } void *fake_cont_thread(void *arg) { int i; for (i = 0; i < num_cpus_to_pin; i++) if (cpu_threads[i] == pthread_self()) break; printf("\tStarted fake continuous mode thread %d on CPU %d\n", i, cpus_to_pin[i]); while (1) ; } void *cont_thread(void *arg) { char buff[MSG_SIZE]; int i, priority; for (i = 0; i < num_cpus_to_pin; i++) if (cpu_threads[i] == pthread_self()) break; printf("\tStarted continuous mode thread %d on CPU %d\n", i, cpus_to_pin[i]); while (1) { while (mq_send(queue, buff, sizeof(buff), 0) == 0) ; mq_receive(queue, buff, sizeof(buff), &priority); } } #define drain_queue() \ while (mq_receive(queue, buff, MSG_SIZE, &prio_in) == MSG_SIZE) #define do_untimed_send() \ do { \ if (mq_send(queue, buff, MSG_SIZE, prio_out)) \ shutdown(3, "Test send failure", __LINE__); \ } while (0) #define do_send_recv() \ do { \ clock_gettime(clock, &start); \ if (mq_send(queue, buff, MSG_SIZE, prio_out)) \ shutdown(3, "Test send failure", __LINE__); \ clock_gettime(clock, &middle); \ if (mq_receive(queue, buff, MSG_SIZE, &prio_in) != MSG_SIZE) \ shutdown(3, "Test receive failure", __LINE__); \ clock_gettime(clock, &end); \ nsec = ((middle.tv_sec - start.tv_sec) * 1000000000) + \ (middle.tv_nsec - start.tv_nsec); \ send_total.tv_nsec += nsec; \ if (send_total.tv_nsec >= 1000000000) { \ send_total.tv_sec++; \ send_total.tv_nsec -= 1000000000; \ } \ nsec = ((end.tv_sec - middle.tv_sec) * 1000000000) + \ (end.tv_nsec - middle.tv_nsec); \ recv_total.tv_nsec += nsec; \ if (recv_total.tv_nsec >= 1000000000) { \ recv_total.tv_sec++; \ recv_total.tv_nsec -= 1000000000; \ } \ } while (0) struct test { char *desc; void (*func)(int *); }; void const_prio(int *prio) { return; } void inc_prio(int *prio) { if (++*prio == mq_prio_max) *prio = 0; } void dec_prio(int *prio) { if (--*prio < 0) *prio = mq_prio_max - 1; } void random_prio(int *prio) { *prio = random() % mq_prio_max; } struct test test2[] = { {"\n\tTest #2a: Time send/recv message, queue full, constant prio\n", const_prio}, {"\n\tTest #2b: Time send/recv message, queue full, increasing prio\n", inc_prio}, {"\n\tTest #2c: Time send/recv message, queue full, decreasing prio\n", dec_prio}, {"\n\tTest #2d: Time send/recv message, queue full, random prio\n", random_prio}, {NULL, NULL} }; /** * Tests to perform (all done with MSG_SIZE messages): * * 1) Time to add/remove message with 0 messages on queue * 1a) with constant prio * 2) Time to add/remove message when queue close to capacity: * 2a) with constant prio * 2b) with increasing prio * 2c) with decreasing prio * 2d) with random prio * 3) Test limits of priorities honored (double check _SC_MQ_PRIO_MAX) */ void *perf_test_thread(void *arg) { char buff[MSG_SIZE]; int prio_out, prio_in; int i; clockid_t clock; pthread_t *t; struct timespec res, start, middle, end, send_total, recv_total; unsigned long long nsec; struct test *cur_test; t = &cpu_threads[0]; printf("\n\tStarted mqueue performance test thread on CPU %d\n", cpus_to_pin[0]); mq_prio_max = sysconf(_SC_MQ_PRIO_MAX); if (mq_prio_max == -1) shutdown(2, "sysconf(_SC_MQ_PRIO_MAX)", __LINE__); if (pthread_getcpuclockid(cpu_threads[0], &clock) != 0) shutdown(2, "pthread_getcpuclockid", __LINE__); if (clock_getres(clock, &res)) shutdown(2, "clock_getres()", __LINE__); printf("\t\tMax priorities:\t\t\t%d\n", mq_prio_max); printf("\t\tClock resolution:\t\t%lu nsec%s\n", res.tv_nsec, res.tv_nsec > 1 ? "s" : ""); printf("\n\tTest #1: Time send/recv message, queue empty\n"); printf("\t\t(%d iterations)\n", TEST1_LOOPS); prio_out = 0; send_total.tv_sec = 0; send_total.tv_nsec = 0; recv_total.tv_sec = 0; recv_total.tv_nsec = 0; for (i = 0; i < TEST1_LOOPS; i++) do_send_recv(); printf("\t\tSend msg:\t\t\t%ld.%lus total time\n", send_total.tv_sec, send_total.tv_nsec); nsec = ((unsigned long long)send_total.tv_sec * 1000000000 + send_total.tv_nsec) / TEST1_LOOPS; printf("\t\t\t\t\t\t%lld nsec/msg\n", nsec); printf("\t\tRecv msg:\t\t\t%ld.%lus total time\n", recv_total.tv_sec, recv_total.tv_nsec); nsec = ((unsigned long long)recv_total.tv_sec * 1000000000 + recv_total.tv_nsec) / TEST1_LOOPS; printf("\t\t\t\t\t\t%lld nsec/msg\n", nsec); for (cur_test = test2; cur_test->desc != NULL; cur_test++) { printf("%s:\n", cur_test->desc); printf("\t\t(%d iterations)\n", TEST2_LOOPS); prio_out = 0; send_total.tv_sec = 0; send_total.tv_nsec = 0; recv_total.tv_sec = 0; recv_total.tv_nsec = 0; printf("\t\tFilling queue..."); fflush(stdout); clock_gettime(clock, &start); for (i = 0; i < result.mq_maxmsg - 1; i++) { do_untimed_send(); cur_test->func(&prio_out); } clock_gettime(clock, &end); nsec = ((unsigned long long)(end.tv_sec - start.tv_sec) * 1000000000) + (end.tv_nsec - start.tv_nsec); printf("done.\t\t%lld.%llds\n", nsec / 1000000000, nsec % 1000000000); printf("\t\tTesting..."); fflush(stdout); for (i = 0; i < TEST2_LOOPS; i++) { do_send_recv(); cur_test->func(&prio_out); } printf("done.\n"); printf("\t\tSend msg:\t\t\t%ld.%lus total time\n", send_total.tv_sec, send_total.tv_nsec); nsec = ((unsigned long long)send_total.tv_sec * 1000000000 + send_total.tv_nsec) / TEST2_LOOPS; printf("\t\t\t\t\t\t%lld nsec/msg\n", nsec); printf("\t\tRecv msg:\t\t\t%ld.%lus total time\n", recv_total.tv_sec, recv_total.tv_nsec); nsec = ((unsigned long long)recv_total.tv_sec * 1000000000 + recv_total.tv_nsec) / TEST2_LOOPS; printf("\t\t\t\t\t\t%lld nsec/msg\n", nsec); printf("\t\tDraining queue..."); fflush(stdout); clock_gettime(clock, &start); drain_queue(); clock_gettime(clock, &end); nsec = ((unsigned long long)(end.tv_sec - start.tv_sec) * 1000000000) + (end.tv_nsec - start.tv_nsec); printf("done.\t\t%lld.%llds\n", nsec / 1000000000, nsec % 1000000000); } return 0; } void increase_limits(void) { cur_limits.rlim_cur = RLIM_INFINITY; cur_limits.rlim_max = RLIM_INFINITY; setr(RLIMIT_MSGQUEUE, &cur_limits); while (try_set(max_msgs, cur_max_msgs += 10)) ; cur_max_msgs = get(max_msgs); while (try_set(max_msgsize, cur_max_msgsize += 1024)) ; cur_max_msgsize = get(max_msgsize); if (setpriority(PRIO_PROCESS, 0, -20) != 0) shutdown(2, "setpriority()", __LINE__); cur_nice = -20; } int main(int argc, char *argv[]) { struct mq_attr attr; char *option, *next_option; int i, cpu, rc; struct sigaction sa; poptContext popt_context; void *retval; main_thread = pthread_self(); num_cpus_to_pin = 0; if (sysconf(_SC_NPROCESSORS_ONLN) == -1) { perror("sysconf(_SC_NPROCESSORS_ONLN)"); exit(1); } cpus_online = min(MAX_CPUS, sysconf(_SC_NPROCESSORS_ONLN)); cpu_set = CPU_ALLOC(cpus_online); if (cpu_set == NULL) { perror("CPU_ALLOC()"); exit(1); } cpu_set_size = CPU_ALLOC_SIZE(cpus_online); CPU_ZERO_S(cpu_set_size, cpu_set); popt_context = poptGetContext(NULL, argc, (const char **)argv, options, 0); while ((rc = poptGetNextOpt(popt_context)) > 0) { switch (rc) { case 'c': continuous_mode = 1; option = cpu_option_string; do { next_option = strchr(option, ','); if (next_option) *next_option = '\0'; cpu = atoi(option); if (cpu >= cpus_online) fprintf(stderr, "CPU %d exceeds " "cpus online, ignoring.\n", cpu); else cpus_to_pin[num_cpus_to_pin++] = cpu; if (next_option) option = ++next_option; } while (next_option && num_cpus_to_pin < MAX_CPUS); /* Double check that they didn't give us the same CPU * more than once */ for (cpu = 0; cpu < num_cpus_to_pin; cpu++) { if (CPU_ISSET_S(cpus_to_pin[cpu], cpu_set_size, cpu_set)) { fprintf(stderr, "Any given CPU may " "only be given once.\n"); exit(1); } else CPU_SET_S(cpus_to_pin[cpu], cpu_set_size, cpu_set); } break; case 'p': /* * Although we can create a msg queue with a * non-absolute path name, unlink will fail. So, * if the name doesn't start with a /, add one * when we save it. */ option = queue_path; if (*option != '/') { queue_path = malloc(strlen(option) + 2); if (!queue_path) { perror("malloc()"); exit(1); } queue_path[0] = '/'; queue_path[1] = 0; strcat(queue_path, option); free(option); } break; } } if (continuous_mode && num_cpus_to_pin == 0) { fprintf(stderr, "Must pass at least one CPU to continuous " "mode.\n"); poptPrintUsage(popt_context, stderr, 0); exit(1); } else if (!continuous_mode) { num_cpus_to_pin = 1; cpus_to_pin[0] = cpus_online - 1; } if (getuid() != 0) { fprintf(stderr, "Not running as root, but almost all tests " "require root in order to modify\nsystem settings. " "Exiting.\n"); exit(1); } max_msgs = fopen(MAX_MSGS, "r+"); max_msgsize = fopen(MAX_MSGSIZE, "r+"); if (!max_msgs) shutdown(2, "Failed to open msg_max", __LINE__); if (!max_msgsize) shutdown(2, "Failed to open msgsize_max", __LINE__); /* Load up the current system values for everything we can */ getr(RLIMIT_MSGQUEUE, &saved_limits); cur_limits = saved_limits; saved_max_msgs = cur_max_msgs = get(max_msgs); saved_max_msgsize = cur_max_msgsize = get(max_msgsize); errno = 0; cur_nice = getpriority(PRIO_PROCESS, 0); if (errno) shutdown(2, "getpriority()", __LINE__); /* Tell the user our initial state */ printf("\nInitial system state:\n"); printf("\tUsing queue path:\t\t\t%s\n", queue_path); printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t%ld\n", (long) saved_limits.rlim_cur); printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t%ld\n", (long) saved_limits.rlim_max); printf("\tMaximum Message Size:\t\t\t%d\n", saved_max_msgsize); printf("\tMaximum Queue Size:\t\t\t%d\n", saved_max_msgs); printf("\tNice value:\t\t\t\t%d\n", cur_nice); printf("\n"); increase_limits(); printf("Adjusted system state for testing:\n"); if (cur_limits.rlim_cur == RLIM_INFINITY) { printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t(unlimited)\n"); printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t(unlimited)\n"); } else { printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t%ld\n", (long) cur_limits.rlim_cur); printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t%ld\n", (long) cur_limits.rlim_max); } printf("\tMaximum Message Size:\t\t\t%d\n", cur_max_msgsize); printf("\tMaximum Queue Size:\t\t\t%d\n", cur_max_msgs); printf("\tNice value:\t\t\t\t%d\n", cur_nice); printf("\tContinuous mode:\t\t\t(%s)\n", continuous_mode ? (continuous_mode_fake ? "fake mode" : "enabled") : "disabled"); printf("\tCPUs to pin:\t\t\t\t%d", cpus_to_pin[0]); for (cpu = 1; cpu < num_cpus_to_pin; cpu++) printf(",%d", cpus_to_pin[cpu]); printf("\n"); sa.sa_sigaction = sig_action_SIGUSR1; sigemptyset(&sa.sa_mask); sigaddset(&sa.sa_mask, SIGHUP); sigaddset(&sa.sa_mask, SIGINT); sigaddset(&sa.sa_mask, SIGQUIT); sigaddset(&sa.sa_mask, SIGTERM); sa.sa_flags = SA_SIGINFO; if (sigaction(SIGUSR1, &sa, NULL) == -1) shutdown(1, "sigaction(SIGUSR1)", __LINE__); sa.sa_sigaction = sig_action; if (sigaction(SIGHUP, &sa, NULL) == -1) shutdown(1, "sigaction(SIGHUP)", __LINE__); if (sigaction(SIGINT, &sa, NULL) == -1) shutdown(1, "sigaction(SIGINT)", __LINE__); if (sigaction(SIGQUIT, &sa, NULL) == -1) shutdown(1, "sigaction(SIGQUIT)", __LINE__); if (sigaction(SIGTERM, &sa, NULL) == -1) shutdown(1, "sigaction(SIGTERM)", __LINE__); if (!continuous_mode_fake) { attr.mq_flags = O_NONBLOCK; attr.mq_maxmsg = cur_max_msgs; attr.mq_msgsize = MSG_SIZE; open_queue(&attr); } for (i = 0; i < num_cpus_to_pin; i++) { pthread_attr_t thread_attr; void *thread_func; if (continuous_mode_fake) thread_func = &fake_cont_thread; else if (continuous_mode) thread_func = &cont_thread; else thread_func = &perf_test_thread; CPU_ZERO_S(cpu_set_size, cpu_set); CPU_SET_S(cpus_to_pin[i], cpu_set_size, cpu_set); pthread_attr_init(&thread_attr); pthread_attr_setaffinity_np(&thread_attr, cpu_set_size, cpu_set); if (pthread_create(&cpu_threads[i], &thread_attr, thread_func, NULL)) shutdown(1, "pthread_create()", __LINE__); pthread_attr_destroy(&thread_attr); } if (!continuous_mode) { pthread_join(cpu_threads[0], &retval); shutdown((long)retval, "perf_test_thread()", __LINE__); } else { while (1) sleep(1); } shutdown(0, "", 0); }
gpl-2.0
zarboz/HTC-Jewel-Kernel-OC
net/caif/cfcnfg.c
811
15012
/* * Copyright (C) ST-Ericsson AB 2010 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com * License terms: GNU General Public License (GPL) version 2 */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/slab.h> #include <linux/netdevice.h> #include <linux/module.h> #include <net/caif/caif_layer.h> #include <net/caif/cfpkt.h> #include <net/caif/cfcnfg.h> #include <net/caif/cfctrl.h> #include <net/caif/cfmuxl.h> #include <net/caif/cffrml.h> #include <net/caif/cfserl.h> #include <net/caif/cfsrvl.h> #include <net/caif/caif_dev.h> #define container_obj(layr) container_of(layr, struct cfcnfg, layer) /* Information about CAIF physical interfaces held by Config Module in order * to manage physical interfaces */ struct cfcnfg_phyinfo { struct list_head node; bool up; /* Pointer to the layer below the MUX (framing layer) */ struct cflayer *frm_layer; /* Pointer to the lowest actual physical layer */ struct cflayer *phy_layer; /* Unique identifier of the physical interface */ unsigned int id; /* Preference of the physical in interface */ enum cfcnfg_phy_preference pref; /* Information about the physical device */ struct dev_info dev_info; /* Interface index */ int ifindex; /* Use Start of frame extension */ bool use_stx; /* Use Start of frame checksum */ bool use_fcs; }; struct cfcnfg { struct cflayer layer; struct cflayer *ctrl; struct cflayer *mux; struct list_head phys; struct mutex lock; }; static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv, u8 phyid, struct cflayer *adapt_layer); static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id); static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id, struct cflayer *adapt_layer); static void cfctrl_resp_func(void); static void cfctrl_enum_resp(void); struct cfcnfg *cfcnfg_create(void) { struct cfcnfg *this; struct cfctrl_rsp *resp; might_sleep(); /* Initiate this layer */ this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC); if (!this) { pr_warn("Out of memory\n"); return NULL; } this->mux = cfmuxl_create(); if (!this->mux) goto out_of_mem; this->ctrl = cfctrl_create(); if (!this->ctrl) goto out_of_mem; /* Initiate response functions */ resp = cfctrl_get_respfuncs(this->ctrl); resp->enum_rsp = cfctrl_enum_resp; resp->linkerror_ind = cfctrl_resp_func; resp->linkdestroy_rsp = cfcnfg_linkdestroy_rsp; resp->sleep_rsp = cfctrl_resp_func; resp->wake_rsp = cfctrl_resp_func; resp->restart_rsp = cfctrl_resp_func; resp->radioset_rsp = cfctrl_resp_func; resp->linksetup_rsp = cfcnfg_linkup_rsp; resp->reject_rsp = cfcnfg_reject_rsp; INIT_LIST_HEAD(&this->phys); cfmuxl_set_uplayer(this->mux, this->ctrl, 0); layer_set_dn(this->ctrl, this->mux); layer_set_up(this->ctrl, this); mutex_init(&this->lock); return this; out_of_mem: pr_warn("Out of memory\n"); synchronize_rcu(); kfree(this->mux); kfree(this->ctrl); kfree(this); return NULL; } void cfcnfg_remove(struct cfcnfg *cfg) { might_sleep(); if (cfg) { synchronize_rcu(); kfree(cfg->mux); cfctrl_remove(cfg->ctrl); kfree(cfg); } } static void cfctrl_resp_func(void) { } static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo_rcu(struct cfcnfg *cnfg, u8 phyid) { struct cfcnfg_phyinfo *phy; list_for_each_entry_rcu(phy, &cnfg->phys, node) if (phy->id == phyid) return phy; return NULL; } static void cfctrl_enum_resp(void) { } static struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg, enum cfcnfg_phy_preference phy_pref) { /* Try to match with specified preference */ struct cfcnfg_phyinfo *phy; list_for_each_entry_rcu(phy, &cnfg->phys, node) { if (phy->up && phy->pref == phy_pref && phy->frm_layer != NULL) return &phy->dev_info; } /* Otherwise just return something */ list_for_each_entry_rcu(phy, &cnfg->phys, node) if (phy->up) return &phy->dev_info; return NULL; } static int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi) { struct cfcnfg_phyinfo *phy; list_for_each_entry_rcu(phy, &cnfg->phys, node) if (phy->ifindex == ifi && phy->up) return phy->id; return -ENODEV; } int caif_disconnect_client(struct net *net, struct cflayer *adap_layer) { u8 channel_id; struct cfcnfg *cfg = get_cfcnfg(net); caif_assert(adap_layer != NULL); cfctrl_cancel_req(cfg->ctrl, adap_layer); channel_id = adap_layer->id; if (channel_id != 0) { struct cflayer *servl; servl = cfmuxl_remove_uplayer(cfg->mux, channel_id); if (servl != NULL) layer_set_up(servl, NULL); } else pr_debug("nothing to disconnect\n"); cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer); /* Do RCU sync before initiating cleanup */ synchronize_rcu(); if (adap_layer->ctrlcmd != NULL) adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0); return 0; } EXPORT_SYMBOL(caif_disconnect_client); static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id) { } static const int protohead[CFCTRL_SRV_MASK] = { [CFCTRL_SRV_VEI] = 4, [CFCTRL_SRV_DATAGRAM] = 7, [CFCTRL_SRV_UTIL] = 4, [CFCTRL_SRV_RFM] = 3, [CFCTRL_SRV_DBG] = 3, }; static int caif_connect_req_to_link_param(struct cfcnfg *cnfg, struct caif_connect_request *s, struct cfctrl_link_param *l) { struct dev_info *dev_info; enum cfcnfg_phy_preference pref; int res; memset(l, 0, sizeof(*l)); /* In caif protocol low value is high priority */ l->priority = CAIF_PRIO_MAX - s->priority + 1; if (s->ifindex != 0) { res = cfcnfg_get_id_from_ifi(cnfg, s->ifindex); if (res < 0) return res; l->phyid = res; } else { switch (s->link_selector) { case CAIF_LINK_HIGH_BANDW: pref = CFPHYPREF_HIGH_BW; break; case CAIF_LINK_LOW_LATENCY: pref = CFPHYPREF_LOW_LAT; break; default: return -EINVAL; } dev_info = cfcnfg_get_phyid(cnfg, pref); if (dev_info == NULL) return -ENODEV; l->phyid = dev_info->id; } switch (s->protocol) { case CAIFPROTO_AT: l->linktype = CFCTRL_SRV_VEI; l->endpoint = (s->sockaddr.u.at.type >> 2) & 0x3; l->chtype = s->sockaddr.u.at.type & 0x3; break; case CAIFPROTO_DATAGRAM: l->linktype = CFCTRL_SRV_DATAGRAM; l->chtype = 0x00; l->u.datagram.connid = s->sockaddr.u.dgm.connection_id; break; case CAIFPROTO_DATAGRAM_LOOP: l->linktype = CFCTRL_SRV_DATAGRAM; l->chtype = 0x03; l->endpoint = 0x00; l->u.datagram.connid = s->sockaddr.u.dgm.connection_id; break; case CAIFPROTO_RFM: l->linktype = CFCTRL_SRV_RFM; l->u.datagram.connid = s->sockaddr.u.rfm.connection_id; strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume, sizeof(l->u.rfm.volume)-1); l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0; break; case CAIFPROTO_UTIL: l->linktype = CFCTRL_SRV_UTIL; l->endpoint = 0x00; l->chtype = 0x00; strncpy(l->u.utility.name, s->sockaddr.u.util.service, sizeof(l->u.utility.name)-1); l->u.utility.name[sizeof(l->u.utility.name)-1] = 0; caif_assert(sizeof(l->u.utility.name) > 10); l->u.utility.paramlen = s->param.size; if (l->u.utility.paramlen > sizeof(l->u.utility.params)) l->u.utility.paramlen = sizeof(l->u.utility.params); memcpy(l->u.utility.params, s->param.data, l->u.utility.paramlen); break; case CAIFPROTO_DEBUG: l->linktype = CFCTRL_SRV_DBG; l->endpoint = s->sockaddr.u.dbg.service; l->chtype = s->sockaddr.u.dbg.type; break; default: return -EINVAL; } return 0; } int caif_connect_client(struct net *net, struct caif_connect_request *conn_req, struct cflayer *adap_layer, int *ifindex, int *proto_head, int *proto_tail) { struct cflayer *frml; struct cfcnfg_phyinfo *phy; int err; struct cfctrl_link_param param; struct cfcnfg *cfg = get_cfcnfg(net); caif_assert(cfg != NULL); rcu_read_lock(); err = caif_connect_req_to_link_param(cfg, conn_req, &param); if (err) goto unlock; phy = cfcnfg_get_phyinfo_rcu(cfg, param.phyid); if (!phy) { err = -ENODEV; goto unlock; } err = -EINVAL; if (adap_layer == NULL) { pr_err("adap_layer is zero\n"); goto unlock; } if (adap_layer->receive == NULL) { pr_err("adap_layer->receive is NULL\n"); goto unlock; } if (adap_layer->ctrlcmd == NULL) { pr_err("adap_layer->ctrlcmd == NULL\n"); goto unlock; } err = -ENODEV; frml = phy->frm_layer; if (frml == NULL) { pr_err("Specified PHY type does not exist!\n"); goto unlock; } caif_assert(param.phyid == phy->id); caif_assert(phy->frm_layer->id == param.phyid); caif_assert(phy->phy_layer->id == param.phyid); *ifindex = phy->ifindex; *proto_tail = 2; *proto_head = protohead[param.linktype] + (phy->use_stx ? 1 : 0); rcu_read_unlock(); /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */ cfctrl_enum_req(cfg->ctrl, param.phyid); return cfctrl_linkup_request(cfg->ctrl, &param, adap_layer); unlock: rcu_read_unlock(); return err; } EXPORT_SYMBOL(caif_connect_client); static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id, struct cflayer *adapt_layer) { if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL) adapt_layer->ctrlcmd(adapt_layer, CAIF_CTRLCMD_INIT_FAIL_RSP, 0); } static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv, u8 phyid, struct cflayer *adapt_layer) { struct cfcnfg *cnfg = container_obj(layer); struct cflayer *servicel = NULL; struct cfcnfg_phyinfo *phyinfo; struct net_device *netdev; if (channel_id == 0) { pr_warn("received channel_id zero\n"); if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL) adapt_layer->ctrlcmd(adapt_layer, CAIF_CTRLCMD_INIT_FAIL_RSP, 0); return; } rcu_read_lock(); if (adapt_layer == NULL) { pr_debug("link setup response but no client exist," "send linkdown back\n"); cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL); goto unlock; } caif_assert(cnfg != NULL); caif_assert(phyid != 0); phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid); if (phyinfo == NULL) { pr_err("ERROR: Link Layer Device dissapeared" "while connecting\n"); goto unlock; } caif_assert(phyinfo != NULL); caif_assert(phyinfo->id == phyid); caif_assert(phyinfo->phy_layer != NULL); caif_assert(phyinfo->phy_layer->id == phyid); adapt_layer->id = channel_id; switch (serv) { case CFCTRL_SRV_VEI: servicel = cfvei_create(channel_id, &phyinfo->dev_info); break; case CFCTRL_SRV_DATAGRAM: servicel = cfdgml_create(channel_id, &phyinfo->dev_info); break; case CFCTRL_SRV_RFM: netdev = phyinfo->dev_info.dev; servicel = cfrfml_create(channel_id, &phyinfo->dev_info, netdev->mtu); break; case CFCTRL_SRV_UTIL: servicel = cfutill_create(channel_id, &phyinfo->dev_info); break; case CFCTRL_SRV_VIDEO: servicel = cfvidl_create(channel_id, &phyinfo->dev_info); break; case CFCTRL_SRV_DBG: servicel = cfdbgl_create(channel_id, &phyinfo->dev_info); break; default: pr_err("Protocol error. Link setup response " "- unknown channel type\n"); goto unlock; } if (!servicel) { pr_warn("Out of memory\n"); goto unlock; } layer_set_dn(servicel, cnfg->mux); cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id); layer_set_up(servicel, adapt_layer); layer_set_dn(adapt_layer, servicel); rcu_read_unlock(); servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0); return; unlock: rcu_read_unlock(); } void cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, struct net_device *dev, struct cflayer *phy_layer, enum cfcnfg_phy_preference pref, bool fcs, bool stx) { struct cflayer *frml; struct cflayer *phy_driver = NULL; struct cfcnfg_phyinfo *phyinfo; int i; u8 phyid; mutex_lock(&cnfg->lock); /* CAIF protocol allow maximum 6 link-layers */ for (i = 0; i < 7; i++) { phyid = (dev->ifindex + i) & 0x7; if (phyid == 0) continue; if (cfcnfg_get_phyinfo_rcu(cnfg, phyid) == NULL) goto got_phyid; } pr_warn("Too many CAIF Link Layers (max 6)\n"); goto out; got_phyid: phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC); switch (phy_type) { case CFPHYTYPE_FRAG: phy_driver = cfserl_create(CFPHYTYPE_FRAG, phyid, stx); if (!phy_driver) { pr_warn("Out of memory\n"); goto out; } break; case CFPHYTYPE_CAIF: phy_driver = NULL; break; default: goto out; } phy_layer->id = phyid; phyinfo->pref = pref; phyinfo->id = phyid; phyinfo->dev_info.id = phyid; phyinfo->dev_info.dev = dev; phyinfo->phy_layer = phy_layer; phyinfo->ifindex = dev->ifindex; phyinfo->use_stx = stx; phyinfo->use_fcs = fcs; frml = cffrml_create(phyid, fcs); if (!frml) { pr_warn("Out of memory\n"); kfree(phyinfo); goto out; } phyinfo->frm_layer = frml; layer_set_up(frml, cnfg->mux); if (phy_driver != NULL) { phy_driver->id = phyid; layer_set_dn(frml, phy_driver); layer_set_up(phy_driver, frml); layer_set_dn(phy_driver, phy_layer); layer_set_up(phy_layer, phy_driver); } else { layer_set_dn(frml, phy_layer); layer_set_up(phy_layer, frml); } list_add_rcu(&phyinfo->node, &cnfg->phys); out: mutex_unlock(&cnfg->lock); } EXPORT_SYMBOL(cfcnfg_add_phy_layer); int cfcnfg_set_phy_state(struct cfcnfg *cnfg, struct cflayer *phy_layer, bool up) { struct cfcnfg_phyinfo *phyinfo; rcu_read_lock(); phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phy_layer->id); if (phyinfo == NULL) { rcu_read_unlock(); return -ENODEV; } if (phyinfo->up == up) { rcu_read_unlock(); return 0; } phyinfo->up = up; if (up) { cffrml_hold(phyinfo->frm_layer); cfmuxl_set_dnlayer(cnfg->mux, phyinfo->frm_layer, phy_layer->id); } else { cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id); cffrml_put(phyinfo->frm_layer); } rcu_read_unlock(); return 0; } EXPORT_SYMBOL(cfcnfg_set_phy_state); int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer) { struct cflayer *frml, *frml_dn; u16 phyid; struct cfcnfg_phyinfo *phyinfo; might_sleep(); mutex_lock(&cnfg->lock); phyid = phy_layer->id; phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid); if (phyinfo == NULL) { mutex_unlock(&cnfg->lock); return 0; } caif_assert(phyid == phyinfo->id); caif_assert(phy_layer == phyinfo->phy_layer); caif_assert(phy_layer->id == phyid); caif_assert(phyinfo->frm_layer->id == phyid); list_del_rcu(&phyinfo->node); synchronize_rcu(); /* Fail if reference count is not zero */ if (cffrml_refcnt_read(phyinfo->frm_layer) != 0) { pr_info("Wait for device inuse\n"); list_add_rcu(&phyinfo->node, &cnfg->phys); mutex_unlock(&cnfg->lock); return -EAGAIN; } frml = phyinfo->frm_layer; frml_dn = frml->dn; cffrml_set_uplayer(frml, NULL); cffrml_set_dnlayer(frml, NULL); if (phy_layer != frml_dn) { layer_set_up(frml_dn, NULL); layer_set_dn(frml_dn, NULL); } layer_set_up(phy_layer, NULL); if (phyinfo->phy_layer != frml_dn) kfree(frml_dn); cffrml_free(frml); kfree(phyinfo); mutex_unlock(&cnfg->lock); return 0; } EXPORT_SYMBOL(cfcnfg_del_phy_layer);
gpl-2.0
DirtyUnicorns/android_kernel_lge_gee
drivers/video/msm/tvout_msm.c
1323
17843
/* Copyright (c) 2008-2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/mutex.h> #include <linux/delay.h> #include "msm_fb.h" #include "tvenc.h" #include "external_common.h" #define TVOUT_HPD_DUTY_CYCLE 3000 #define TV_DIMENSION_MAX_WIDTH 720 #define TV_DIMENSION_MAX_HEIGHT 576 struct tvout_msm_state_type { struct external_common_state_type common; struct platform_device *pdev; struct timer_list hpd_state_timer; struct timer_list hpd_work_timer; struct work_struct hpd_work; uint32 hpd_int_status; uint32 prev_hpd_int_status; uint32 five_retry; int irq; uint16 y_res; boolean hpd_initialized; boolean disp_powered_up; #ifdef CONFIG_SUSPEND boolean pm_suspended; #endif }; static struct tvout_msm_state_type *tvout_msm_state; static DEFINE_MUTEX(tvout_msm_state_mutex); static int tvout_off(struct platform_device *pdev); static int tvout_on(struct platform_device *pdev); static void tvout_check_status(void); static void tvout_msm_turn_on(boolean power_on) { uint32 reg_val = 0; reg_val = TV_IN(TV_ENC_CTL); if (power_on) { DEV_DBG("%s: TV Encoder turned on\n", __func__); reg_val |= TVENC_CTL_ENC_EN; } else { DEV_DBG("%s: TV Encoder turned off\n", __func__); reg_val = 0; } /* Enable TV Encoder*/ TV_OUT(TV_ENC_CTL, reg_val); } static void tvout_check_status() { tvout_msm_state->hpd_int_status &= 0x05; /* hpd_int_status could either be 0x05 or 0x04 for a cable plug-out event when cable detect is driven by polling. */ if ((((tvout_msm_state->hpd_int_status == 0x05) || (tvout_msm_state->hpd_int_status == 0x04)) && (tvout_msm_state->prev_hpd_int_status == BIT(2))) || ((tvout_msm_state->hpd_int_status == 0x01) && (tvout_msm_state->prev_hpd_int_status == BIT(0)))) { DEV_DBG("%s: cable event sent already!", __func__); return; } if (tvout_msm_state->hpd_int_status & BIT(2)) { DEV_DBG("%s: cable plug-out\n", __func__); mutex_lock(&external_common_state_hpd_mutex); external_common_state->hpd_state = FALSE; mutex_unlock(&external_common_state_hpd_mutex); kobject_uevent(external_common_state->uevent_kobj, KOBJ_OFFLINE); tvout_msm_state->prev_hpd_int_status = BIT(2); } else if (tvout_msm_state->hpd_int_status & BIT(0)) { DEV_DBG("%s: cable plug-in\n", __func__); mutex_lock(&external_common_state_hpd_mutex); external_common_state->hpd_state = TRUE; mutex_unlock(&external_common_state_hpd_mutex); kobject_uevent(external_common_state->uevent_kobj, KOBJ_ONLINE); tvout_msm_state->prev_hpd_int_status = BIT(0); } } /* ISR for TV out cable detect */ static irqreturn_t tvout_msm_isr(int irq, void *dev_id) { tvout_msm_state->hpd_int_status = TV_IN(TV_INTR_STATUS); TV_OUT(TV_INTR_CLEAR, tvout_msm_state->hpd_int_status); DEV_DBG("%s: ISR: 0x%02x\n", __func__, tvout_msm_state->hpd_int_status & 0x05); if (tvenc_pdata->poll) if (!tvout_msm_state || !tvout_msm_state->disp_powered_up) { DEV_DBG("%s: ISR ignored, display not yet powered on\n", __func__); return IRQ_HANDLED; } if (tvout_msm_state->hpd_int_status & BIT(0) || tvout_msm_state->hpd_int_status & BIT(2)) { /* Use .75sec to debounce the interrupt */ mod_timer(&tvout_msm_state->hpd_state_timer, jiffies + msecs_to_jiffies(750)); } return IRQ_HANDLED; } /* Interrupt debounce timer */ static void tvout_msm_hpd_state_timer(unsigned long data) { #ifdef CONFIG_SUSPEND mutex_lock(&tvout_msm_state_mutex); if (tvout_msm_state->pm_suspended) { mutex_unlock(&tvout_msm_state_mutex); DEV_WARN("%s: ignored, pm_suspended\n", __func__); return; } mutex_unlock(&tvout_msm_state_mutex); #endif if (tvenc_pdata->poll) if (!tvout_msm_state || !tvout_msm_state->disp_powered_up) { DEV_DBG("%s: ignored, display powered off\n", __func__); return; } /* TV_INTR_STATUS[0x204] When a TV_ENC interrupt occurs, then reading this register will indicate what caused the interrupt since that each bit indicates the source of the interrupt that had happened. If multiple interrupt sources had happened, then multiple bits of this register will be set Bit 0 : Load present on Video1 Bit 1 : Load present on Video2 Bit 2 : Load removed on Video1 Bit 3 : Load removed on Video2 */ /* Locking interrupt status is not required because last status read after debouncing is used */ if ((tvout_msm_state->hpd_int_status & 0x05) == 0x05) { /* SW-workaround :If the status read after debouncing is 0x05(indicating both load present & load removed- which can't happen in reality), force an update. If status remains 0x05 after retry, it's a cable unplug event */ if (++tvout_msm_state->five_retry < 2) { uint32 reg; DEV_DBG("tvout: Timer: 0x05\n"); TV_OUT(TV_INTR_CLEAR, 0xf); reg = TV_IN(TV_DAC_INTF); TV_OUT(TV_DAC_INTF, reg & ~TVENC_LOAD_DETECT_EN); TV_OUT(TV_INTR_CLEAR, 0xf); reg = TV_IN(TV_DAC_INTF); TV_OUT(TV_DAC_INTF, reg | TVENC_LOAD_DETECT_EN); return; } } tvout_msm_state->five_retry = 0; tvout_check_status(); } static void tvout_msm_hpd_work(struct work_struct *work) { uint32 reg; #ifdef CONFIG_SUSPEND mutex_lock(&tvout_msm_state_mutex); if (tvout_msm_state->pm_suspended) { mutex_unlock(&tvout_msm_state_mutex); DEV_WARN("%s: ignored, pm_suspended\n", __func__); return; } mutex_unlock(&tvout_msm_state_mutex); #endif /* Enable power lines & clocks */ tvenc_pdata->pm_vid_en(1); tvenc_set_clock(CLOCK_ON); /* Enable encoder to get a stable interrupt */ reg = TV_IN(TV_ENC_CTL); TV_OUT(TV_ENC_CTL, reg | TVENC_CTL_ENC_EN); /* SW- workaround to update status register */ reg = TV_IN(TV_DAC_INTF); TV_OUT(TV_DAC_INTF, reg & ~TVENC_LOAD_DETECT_EN); TV_OUT(TV_INTR_CLEAR, 0xf); reg = TV_IN(TV_DAC_INTF); TV_OUT(TV_DAC_INTF, reg | TVENC_LOAD_DETECT_EN); tvout_msm_state->hpd_int_status = TV_IN(TV_INTR_STATUS); /* Disable TV encoder */ reg = TV_IN(TV_ENC_CTL); TV_OUT(TV_ENC_CTL, reg & ~TVENC_CTL_ENC_EN); /*Disable power lines & clocks */ tvenc_set_clock(CLOCK_OFF); tvenc_pdata->pm_vid_en(0); DEV_DBG("%s: ISR: 0x%02x\n", __func__, tvout_msm_state->hpd_int_status & 0x05); mod_timer(&tvout_msm_state->hpd_work_timer, jiffies + msecs_to_jiffies(TVOUT_HPD_DUTY_CYCLE)); tvout_check_status(); } static void tvout_msm_hpd_work_timer(unsigned long data) { schedule_work(&tvout_msm_state->hpd_work); } static int tvout_on(struct platform_device *pdev) { uint32 reg = 0; uint32 userformat = 0; struct fb_var_screeninfo *var; struct msm_fb_data_type *mfd = platform_get_drvdata(pdev); if (!mfd) return -ENODEV; if (mfd->key != MFD_KEY) return -EINVAL; #ifdef CONFIG_SUSPEND mutex_lock(&tvout_msm_state_mutex); if (tvout_msm_state->pm_suspended) { mutex_unlock(&tvout_msm_state_mutex); DEV_WARN("%s: ignored, pm_suspended\n", __func__); return -ENODEV; } mutex_unlock(&tvout_msm_state_mutex); #endif var = &mfd->fbi->var; userformat = var->reserved[3] >> 16; if (userformat >= NTSC_M && userformat <= PAL_N) external_common_state->video_resolution = userformat; tvout_msm_state->pdev = pdev; if (del_timer(&tvout_msm_state->hpd_work_timer)) DEV_DBG("%s: work timer stopped\n", __func__); TV_OUT(TV_ENC_CTL, 0); /* disable TV encoder */ switch (external_common_state->video_resolution) { case NTSC_M: case NTSC_J: TV_OUT(TV_CGMS, 0x0); /* NTSC Timing */ TV_OUT(TV_SYNC_1, 0x0020009e); TV_OUT(TV_SYNC_2, 0x011306B4); TV_OUT(TV_SYNC_3, 0x0006000C); TV_OUT(TV_SYNC_4, 0x0028020D); TV_OUT(TV_SYNC_5, 0x005E02FB); TV_OUT(TV_SYNC_6, 0x0006000C); TV_OUT(TV_SYNC_7, 0x00000012); TV_OUT(TV_BURST_V1, 0x0013020D); TV_OUT(TV_BURST_V2, 0x0014020C); TV_OUT(TV_BURST_V3, 0x0013020D); TV_OUT(TV_BURST_V4, 0x0014020C); TV_OUT(TV_BURST_H, 0x00AE00F2); TV_OUT(TV_SOL_REQ_ODD, 0x00280208); TV_OUT(TV_SOL_REQ_EVEN, 0x00290209); reg |= TVENC_CTL_TV_MODE_NTSC_M_PAL60; if (external_common_state->video_resolution == NTSC_M) { /* Cr gain 11, Cb gain C6, y_gain 97 */ TV_OUT(TV_GAIN, 0x0081B697); } else { /* Cr gain 11, Cb gain C6, y_gain 97 */ TV_OUT(TV_GAIN, 0x008bc4a3); reg |= TVENC_CTL_NTSCJ_MODE; } var->yres = 480; break; case PAL_BDGHIN: case PAL_N: /* PAL Timing */ TV_OUT(TV_SYNC_1, 0x00180097); TV_OUT(TV_SYNC_3, 0x0005000a); TV_OUT(TV_SYNC_4, 0x00320271); TV_OUT(TV_SYNC_5, 0x005602f9); TV_OUT(TV_SYNC_6, 0x0005000a); TV_OUT(TV_SYNC_7, 0x0000000f); TV_OUT(TV_BURST_V1, 0x0012026e); TV_OUT(TV_BURST_V2, 0x0011026d); TV_OUT(TV_BURST_V3, 0x00100270); TV_OUT(TV_BURST_V4, 0x0013026f); TV_OUT(TV_SOL_REQ_ODD, 0x0030026e); TV_OUT(TV_SOL_REQ_EVEN, 0x0031026f); if (external_common_state->video_resolution == PAL_BDGHIN) { /* Cr gain 11, Cb gain C6, y_gain 97 */ TV_OUT(TV_GAIN, 0x0088c1a0); TV_OUT(TV_CGMS, 0x00012345); TV_OUT(TV_SYNC_2, 0x011f06c0); TV_OUT(TV_BURST_H, 0x00af00ea); reg |= TVENC_CTL_TV_MODE_PAL_BDGHIN; } else { /* Cr gain 11, Cb gain C6, y_gain 97 */ TV_OUT(TV_GAIN, 0x0081b697); TV_OUT(TV_CGMS, 0x000af317); TV_OUT(TV_SYNC_2, 0x12006c0); TV_OUT(TV_BURST_H, 0x00af00fa); reg |= TVENC_CTL_TV_MODE_PAL_N; } var->yres = 576; break; case PAL_M: /* Cr gain 11, Cb gain C6, y_gain 97 */ TV_OUT(TV_GAIN, 0x0081b697); TV_OUT(TV_CGMS, 0x000af317); TV_OUT(TV_TEST_MUX, 0x000001c3); TV_OUT(TV_TEST_MODE, 0x00000002); /* PAL Timing */ TV_OUT(TV_SYNC_1, 0x0020009e); TV_OUT(TV_SYNC_2, 0x011306b4); TV_OUT(TV_SYNC_3, 0x0006000c); TV_OUT(TV_SYNC_4, 0x0028020D); TV_OUT(TV_SYNC_5, 0x005e02fb); TV_OUT(TV_SYNC_6, 0x0006000c); TV_OUT(TV_SYNC_7, 0x00000012); TV_OUT(TV_BURST_V1, 0x0012020b); TV_OUT(TV_BURST_V2, 0x0016020c); TV_OUT(TV_BURST_V3, 0x00150209); TV_OUT(TV_BURST_V4, 0x0013020c); TV_OUT(TV_BURST_H, 0x00bf010b); TV_OUT(TV_SOL_REQ_ODD, 0x00280208); TV_OUT(TV_SOL_REQ_EVEN, 0x00290209); reg |= TVENC_CTL_TV_MODE_PAL_M; var->yres = 480; break; default: return -ENODEV; } reg |= TVENC_CTL_Y_FILTER_EN | TVENC_CTL_CR_FILTER_EN | TVENC_CTL_CB_FILTER_EN | TVENC_CTL_SINX_FILTER_EN; /* DC offset to 0. */ TV_OUT(TV_LEVEL, 0x00000000); TV_OUT(TV_OFFSET, 0x008080f0); #ifdef CONFIG_FB_MSM_TVOUT_SVIDEO reg |= TVENC_CTL_S_VIDEO_EN; #endif #if defined(CONFIG_FB_MSM_MDP31) TV_OUT(TV_DAC_INTF, 0x29); #endif TV_OUT(TV_ENC_CTL, reg); if (!tvout_msm_state->hpd_initialized) { tvout_msm_state->hpd_initialized = TRUE; /* Load detect enable */ reg = TV_IN(TV_DAC_INTF); reg |= TVENC_LOAD_DETECT_EN; TV_OUT(TV_DAC_INTF, reg); } tvout_msm_state->disp_powered_up = TRUE; tvout_msm_turn_on(TRUE); if (tvenc_pdata->poll) { /* Enable Load present & removal interrupts for Video1 */ TV_OUT(TV_INTR_ENABLE, 0x5); /* Enable interrupts when display is on */ enable_irq(tvout_msm_state->irq); } return 0; } static int tvout_off(struct platform_device *pdev) { /* Disable TV encoder irqs when display is off */ if (tvenc_pdata->poll) disable_irq(tvout_msm_state->irq); tvout_msm_turn_on(FALSE); tvout_msm_state->hpd_initialized = FALSE; tvout_msm_state->disp_powered_up = FALSE; if (tvenc_pdata->poll) { mod_timer(&tvout_msm_state->hpd_work_timer, jiffies + msecs_to_jiffies(TVOUT_HPD_DUTY_CYCLE)); } return 0; } static int __devinit tvout_probe(struct platform_device *pdev) { int rc = 0; uint32 reg; struct platform_device *fb_dev; #ifdef CONFIG_FB_MSM_TVOUT_NTSC_M external_common_state->video_resolution = NTSC_M; #elif defined CONFIG_FB_MSM_TVOUT_NTSC_J external_common_state->video_resolution = NTSC_J; #elif defined CONFIG_FB_MSM_TVOUT_PAL_M external_common_state->video_resolution = PAL_M; #elif defined CONFIG_FB_MSM_TVOUT_PAL_N external_common_state->video_resolution = PAL_N; #elif defined CONFIG_FB_MSM_TVOUT_PAL_BDGHIN external_common_state->video_resolution = PAL_BDGHIN; #endif external_common_state->dev = &pdev->dev; if (pdev->id == 0) { struct resource *res; #define GET_RES(name, mode) do { \ res = platform_get_resource_byname(pdev, mode, name); \ if (!res) { \ DEV_DBG("'" name "' resource not found\n"); \ rc = -ENODEV; \ goto error; \ } \ } while (0) #define GET_IRQ(var, name) do { \ GET_RES(name, IORESOURCE_IRQ); \ var = res->start; \ } while (0) GET_IRQ(tvout_msm_state->irq, "tvout_device_irq"); #undef GET_IRQ #undef GET_RES return 0; } DEV_DBG("%s: tvout_msm_state->irq : %d", __func__, tvout_msm_state->irq); rc = request_irq(tvout_msm_state->irq, &tvout_msm_isr, IRQF_TRIGGER_HIGH, "tvout_msm_isr", NULL); if (rc) { DEV_DBG("Init FAILED: IRQ request, rc=%d\n", rc); goto error; } disable_irq(tvout_msm_state->irq); init_timer(&tvout_msm_state->hpd_state_timer); tvout_msm_state->hpd_state_timer.function = tvout_msm_hpd_state_timer; tvout_msm_state->hpd_state_timer.data = (uint32)NULL; tvout_msm_state->hpd_state_timer.expires = jiffies + msecs_to_jiffies(1000); if (tvenc_pdata->poll) { init_timer(&tvout_msm_state->hpd_work_timer); tvout_msm_state->hpd_work_timer.function = tvout_msm_hpd_work_timer; tvout_msm_state->hpd_work_timer.data = (uint32)NULL; tvout_msm_state->hpd_work_timer.expires = jiffies + msecs_to_jiffies(1000); } fb_dev = msm_fb_add_device(pdev); if (fb_dev) { rc = external_common_state_create(fb_dev); if (rc) { DEV_ERR("Init FAILED: tvout_msm_state_create, rc=%d\n", rc); goto error; } if (tvenc_pdata->poll) { /* Start polling timer to detect load */ mod_timer(&tvout_msm_state->hpd_work_timer, jiffies + msecs_to_jiffies(TVOUT_HPD_DUTY_CYCLE)); } else { /* Enable interrupt to detect load */ tvenc_set_encoder_clock(CLOCK_ON); reg = TV_IN(TV_DAC_INTF); reg |= TVENC_LOAD_DETECT_EN; TV_OUT(TV_DAC_INTF, reg); TV_OUT(TV_INTR_ENABLE, 0x5); enable_irq(tvout_msm_state->irq); } } else DEV_ERR("Init FAILED: failed to add fb device\n"); error: return 0; } static int __devexit tvout_remove(struct platform_device *pdev) { external_common_state_remove(); kfree(tvout_msm_state); tvout_msm_state = NULL; return 0; } #ifdef CONFIG_SUSPEND static int tvout_device_pm_suspend(struct device *dev) { mutex_lock(&tvout_msm_state_mutex); if (tvout_msm_state->pm_suspended) { mutex_unlock(&tvout_msm_state_mutex); return 0; } if (tvenc_pdata->poll) { if (del_timer(&tvout_msm_state->hpd_work_timer)) DEV_DBG("%s: suspending cable detect timer\n", __func__); } else { disable_irq(tvout_msm_state->irq); tvenc_set_encoder_clock(CLOCK_OFF); } tvout_msm_state->pm_suspended = TRUE; mutex_unlock(&tvout_msm_state_mutex); return 0; } static int tvout_device_pm_resume(struct device *dev) { mutex_lock(&tvout_msm_state_mutex); if (!tvout_msm_state->pm_suspended) { mutex_unlock(&tvout_msm_state_mutex); return 0; } if (tvenc_pdata->poll) { tvout_msm_state->pm_suspended = FALSE; mod_timer(&tvout_msm_state->hpd_work_timer, jiffies + msecs_to_jiffies(TVOUT_HPD_DUTY_CYCLE)); mutex_unlock(&tvout_msm_state_mutex); DEV_DBG("%s: resuming cable detect timer\n", __func__); } else { tvenc_set_encoder_clock(CLOCK_ON); tvout_msm_state->pm_suspended = FALSE; mutex_unlock(&tvout_msm_state_mutex); enable_irq(tvout_msm_state->irq); DEV_DBG("%s: enable cable detect interrupt\n", __func__); } return 0; } #else #define tvout_device_pm_suspend NULL #define tvout_device_pm_resume NULL #endif static const struct dev_pm_ops tvout_device_pm_ops = { .suspend = tvout_device_pm_suspend, .resume = tvout_device_pm_resume, }; static struct platform_driver this_driver = { .probe = tvout_probe, .remove = tvout_remove, .driver = { .name = "tvout_device", .pm = &tvout_device_pm_ops, }, }; static struct msm_fb_panel_data tvout_panel_data = { .panel_info.xres = TV_DIMENSION_MAX_WIDTH, .panel_info.yres = TV_DIMENSION_MAX_HEIGHT, .panel_info.type = TV_PANEL, .panel_info.pdest = DISPLAY_2, .panel_info.wait_cycle = 0, #ifdef CONFIG_FB_MSM_MDP40 .panel_info.bpp = 24, #else .panel_info.bpp = 16, #endif .panel_info.fb_num = 2, .on = tvout_on, .off = tvout_off, }; static struct platform_device this_device = { .name = "tvout_device", .id = 1, .dev = { .platform_data = &tvout_panel_data, } }; static int __init tvout_init(void) { int ret; if (msm_fb_detect_client("tvout_msm")) return 0; tvout_msm_state = kzalloc(sizeof(*tvout_msm_state), GFP_KERNEL); if (!tvout_msm_state) { DEV_ERR("tvout_msm_init FAILED: out of memory\n"); ret = -ENOMEM; goto init_exit; } external_common_state = &tvout_msm_state->common; ret = platform_driver_register(&this_driver); if (ret) { DEV_ERR("tvout_device_init FAILED: platform_driver_register\ rc=%d\n", ret); goto init_exit; } ret = platform_device_register(&this_device); if (ret) { DEV_ERR("tvout_device_init FAILED: platform_driver_register\ rc=%d\n", ret); platform_driver_unregister(&this_driver); goto init_exit; } INIT_WORK(&tvout_msm_state->hpd_work, tvout_msm_hpd_work); return 0; init_exit: kfree(tvout_msm_state); tvout_msm_state = NULL; return ret; } static void __exit tvout_exit(void) { platform_device_unregister(&this_device); platform_driver_unregister(&this_driver); } module_init(tvout_init); module_exit(tvout_exit); MODULE_LICENSE("GPL v2"); MODULE_VERSION("1.0"); MODULE_AUTHOR("Qualcomm Innovation Center, Inc."); MODULE_DESCRIPTION("TV out driver");
gpl-2.0
vic3t3chn0/kernel_ubuntu_togari
arch/mips/lantiq/xway/devices.c
4651
2864
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * Copyright (C) 2010 John Crispin <blogic@openwrt.org> */ #include <linux/init.h> #include <linux/export.h> #include <linux/types.h> #include <linux/string.h> #include <linux/mtd/physmap.h> #include <linux/kernel.h> #include <linux/reboot.h> #include <linux/platform_device.h> #include <linux/leds.h> #include <linux/etherdevice.h> #include <linux/time.h> #include <linux/io.h> #include <linux/gpio.h> #include <asm/bootinfo.h> #include <asm/irq.h> #include <lantiq_soc.h> #include <lantiq_irq.h> #include <lantiq_platform.h> #include "devices.h" /* gpio */ static struct resource ltq_gpio_resource[] = { { .name = "gpio0", .start = LTQ_GPIO0_BASE_ADDR, .end = LTQ_GPIO0_BASE_ADDR + LTQ_GPIO_SIZE - 1, .flags = IORESOURCE_MEM, }, { .name = "gpio1", .start = LTQ_GPIO1_BASE_ADDR, .end = LTQ_GPIO1_BASE_ADDR + LTQ_GPIO_SIZE - 1, .flags = IORESOURCE_MEM, }, { .name = "gpio2", .start = LTQ_GPIO2_BASE_ADDR, .end = LTQ_GPIO2_BASE_ADDR + LTQ_GPIO_SIZE - 1, .flags = IORESOURCE_MEM, } }; void __init ltq_register_gpio(void) { platform_device_register_simple("ltq_gpio", 0, &ltq_gpio_resource[0], 1); platform_device_register_simple("ltq_gpio", 1, &ltq_gpio_resource[1], 1); /* AR9 and VR9 have an extra gpio block */ if (ltq_is_ar9() || ltq_is_vr9()) { platform_device_register_simple("ltq_gpio", 2, &ltq_gpio_resource[2], 1); } } /* serial to parallel conversion */ static struct resource ltq_stp_resource = { .name = "stp", .start = LTQ_STP_BASE_ADDR, .end = LTQ_STP_BASE_ADDR + LTQ_STP_SIZE - 1, .flags = IORESOURCE_MEM, }; void __init ltq_register_gpio_stp(void) { platform_device_register_simple("ltq_stp", 0, &ltq_stp_resource, 1); } /* asc ports - amazon se has its own serial mapping */ static struct resource ltq_ase_asc_resources[] = { { .name = "asc0", .start = LTQ_ASC1_BASE_ADDR, .end = LTQ_ASC1_BASE_ADDR + LTQ_ASC_SIZE - 1, .flags = IORESOURCE_MEM, }, IRQ_RES(tx, LTQ_ASC_ASE_TIR), IRQ_RES(rx, LTQ_ASC_ASE_RIR), IRQ_RES(err, LTQ_ASC_ASE_EIR), }; void __init ltq_register_ase_asc(void) { platform_device_register_simple("ltq_asc", 0, ltq_ase_asc_resources, ARRAY_SIZE(ltq_ase_asc_resources)); } /* ethernet */ static struct resource ltq_etop_resources = { .name = "etop", .start = LTQ_ETOP_BASE_ADDR, .end = LTQ_ETOP_BASE_ADDR + LTQ_ETOP_SIZE - 1, .flags = IORESOURCE_MEM, }; static struct platform_device ltq_etop = { .name = "ltq_etop", .resource = &ltq_etop_resources, .num_resources = 1, }; void __init ltq_register_etop(struct ltq_eth_data *eth) { if (eth) { ltq_etop.dev.platform_data = eth; platform_device_register(&ltq_etop); } }
gpl-2.0
zaventh/nexus7-kernel-grouper
drivers/cpufreq/e_powersaver.c
4651
8906
/* * Based on documentation provided by Dave Jones. Thanks! * * Licensed under the terms of the GNU GPL License version 2. * * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/timex.h> #include <linux/io.h> #include <linux/delay.h> #include <asm/msr.h> #include <asm/tsc.h> #define EPS_BRAND_C7M 0 #define EPS_BRAND_C7 1 #define EPS_BRAND_EDEN 2 #define EPS_BRAND_C3 3 #define EPS_BRAND_C7D 4 struct eps_cpu_data { u32 fsb; struct cpufreq_frequency_table freq_table[]; }; static struct eps_cpu_data *eps_cpu[NR_CPUS]; static unsigned int eps_get(unsigned int cpu) { struct eps_cpu_data *centaur; u32 lo, hi; if (cpu) return 0; centaur = eps_cpu[cpu]; if (centaur == NULL) return 0; /* Return current frequency */ rdmsr(MSR_IA32_PERF_STATUS, lo, hi); return centaur->fsb * ((lo >> 8) & 0xff); } static int eps_set_state(struct eps_cpu_data *centaur, unsigned int cpu, u32 dest_state) { struct cpufreq_freqs freqs; u32 lo, hi; int err = 0; int i; freqs.old = eps_get(cpu); freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff); freqs.cpu = cpu; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); /* Wait while CPU is busy */ rdmsr(MSR_IA32_PERF_STATUS, lo, hi); i = 0; while (lo & ((1 << 16) | (1 << 17))) { udelay(16); rdmsr(MSR_IA32_PERF_STATUS, lo, hi); i++; if (unlikely(i > 64)) { err = -ENODEV; goto postchange; } } /* Set new multiplier and voltage */ wrmsr(MSR_IA32_PERF_CTL, dest_state & 0xffff, 0); /* Wait until transition end */ i = 0; do { udelay(16); rdmsr(MSR_IA32_PERF_STATUS, lo, hi); i++; if (unlikely(i > 64)) { err = -ENODEV; goto postchange; } } while (lo & ((1 << 16) | (1 << 17))); /* Return current frequency */ postchange: rdmsr(MSR_IA32_PERF_STATUS, lo, hi); freqs.new = centaur->fsb * ((lo >> 8) & 0xff); #ifdef DEBUG { u8 current_multiplier, current_voltage; /* Print voltage and multiplier */ rdmsr(MSR_IA32_PERF_STATUS, lo, hi); current_voltage = lo & 0xff; printk(KERN_INFO "eps: Current voltage = %dmV\n", current_voltage * 16 + 700); current_multiplier = (lo >> 8) & 0xff; printk(KERN_INFO "eps: Current multiplier = %d\n", current_multiplier); } #endif cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); return err; } static int eps_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { struct eps_cpu_data *centaur; unsigned int newstate = 0; unsigned int cpu = policy->cpu; unsigned int dest_state; int ret; if (unlikely(eps_cpu[cpu] == NULL)) return -ENODEV; centaur = eps_cpu[cpu]; if (unlikely(cpufreq_frequency_table_target(policy, &eps_cpu[cpu]->freq_table[0], target_freq, relation, &newstate))) { return -EINVAL; } /* Make frequency transition */ dest_state = centaur->freq_table[newstate].index & 0xffff; ret = eps_set_state(centaur, cpu, dest_state); if (ret) printk(KERN_ERR "eps: Timeout!\n"); return ret; } static int eps_verify(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, &eps_cpu[policy->cpu]->freq_table[0]); } static int eps_cpu_init(struct cpufreq_policy *policy) { unsigned int i; u32 lo, hi; u64 val; u8 current_multiplier, current_voltage; u8 max_multiplier, max_voltage; u8 min_multiplier, min_voltage; u8 brand = 0; u32 fsb; struct eps_cpu_data *centaur; struct cpuinfo_x86 *c = &cpu_data(0); struct cpufreq_frequency_table *f_table; int k, step, voltage; int ret; int states; if (policy->cpu != 0) return -ENODEV; /* Check brand */ printk(KERN_INFO "eps: Detected VIA "); switch (c->x86_model) { case 10: rdmsr(0x1153, lo, hi); brand = (((lo >> 2) ^ lo) >> 18) & 3; printk(KERN_CONT "Model A "); break; case 13: rdmsr(0x1154, lo, hi); brand = (((lo >> 4) ^ (lo >> 2))) & 0x000000ff; printk(KERN_CONT "Model D "); break; } switch (brand) { case EPS_BRAND_C7M: printk(KERN_CONT "C7-M\n"); break; case EPS_BRAND_C7: printk(KERN_CONT "C7\n"); break; case EPS_BRAND_EDEN: printk(KERN_CONT "Eden\n"); break; case EPS_BRAND_C7D: printk(KERN_CONT "C7-D\n"); break; case EPS_BRAND_C3: printk(KERN_CONT "C3\n"); return -ENODEV; break; } /* Enable Enhanced PowerSaver */ rdmsrl(MSR_IA32_MISC_ENABLE, val); if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP; wrmsrl(MSR_IA32_MISC_ENABLE, val); /* Can be locked at 0 */ rdmsrl(MSR_IA32_MISC_ENABLE, val); if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n"); return -ENODEV; } } /* Print voltage and multiplier */ rdmsr(MSR_IA32_PERF_STATUS, lo, hi); current_voltage = lo & 0xff; printk(KERN_INFO "eps: Current voltage = %dmV\n", current_voltage * 16 + 700); current_multiplier = (lo >> 8) & 0xff; printk(KERN_INFO "eps: Current multiplier = %d\n", current_multiplier); /* Print limits */ max_voltage = hi & 0xff; printk(KERN_INFO "eps: Highest voltage = %dmV\n", max_voltage * 16 + 700); max_multiplier = (hi >> 8) & 0xff; printk(KERN_INFO "eps: Highest multiplier = %d\n", max_multiplier); min_voltage = (hi >> 16) & 0xff; printk(KERN_INFO "eps: Lowest voltage = %dmV\n", min_voltage * 16 + 700); min_multiplier = (hi >> 24) & 0xff; printk(KERN_INFO "eps: Lowest multiplier = %d\n", min_multiplier); /* Sanity checks */ if (current_multiplier == 0 || max_multiplier == 0 || min_multiplier == 0) return -EINVAL; if (current_multiplier > max_multiplier || max_multiplier <= min_multiplier) return -EINVAL; if (current_voltage > 0x1f || max_voltage > 0x1f) return -EINVAL; if (max_voltage < min_voltage) return -EINVAL; /* Calc FSB speed */ fsb = cpu_khz / current_multiplier; /* Calc number of p-states supported */ if (brand == EPS_BRAND_C7M) states = max_multiplier - min_multiplier + 1; else states = 2; /* Allocate private data and frequency table for current cpu */ centaur = kzalloc(sizeof(struct eps_cpu_data) + (states + 1) * sizeof(struct cpufreq_frequency_table), GFP_KERNEL); if (!centaur) return -ENOMEM; eps_cpu[0] = centaur; /* Copy basic values */ centaur->fsb = fsb; /* Fill frequency and MSR value table */ f_table = &centaur->freq_table[0]; if (brand != EPS_BRAND_C7M) { f_table[0].frequency = fsb * min_multiplier; f_table[0].index = (min_multiplier << 8) | min_voltage; f_table[1].frequency = fsb * max_multiplier; f_table[1].index = (max_multiplier << 8) | max_voltage; f_table[2].frequency = CPUFREQ_TABLE_END; } else { k = 0; step = ((max_voltage - min_voltage) * 256) / (max_multiplier - min_multiplier); for (i = min_multiplier; i <= max_multiplier; i++) { voltage = (k * step) / 256 + min_voltage; f_table[k].frequency = fsb * i; f_table[k].index = (i << 8) | voltage; k++; } f_table[k].frequency = CPUFREQ_TABLE_END; } policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */ policy->cur = fsb * current_multiplier; ret = cpufreq_frequency_table_cpuinfo(policy, &centaur->freq_table[0]); if (ret) { kfree(centaur); return ret; } cpufreq_frequency_table_get_attr(&centaur->freq_table[0], policy->cpu); return 0; } static int eps_cpu_exit(struct cpufreq_policy *policy) { unsigned int cpu = policy->cpu; struct eps_cpu_data *centaur; u32 lo, hi; if (eps_cpu[cpu] == NULL) return -ENODEV; centaur = eps_cpu[cpu]; /* Get max frequency */ rdmsr(MSR_IA32_PERF_STATUS, lo, hi); /* Set max frequency */ eps_set_state(centaur, cpu, hi & 0xffff); /* Bye */ cpufreq_frequency_table_put_attr(policy->cpu); kfree(eps_cpu[cpu]); eps_cpu[cpu] = NULL; return 0; } static struct freq_attr *eps_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; static struct cpufreq_driver eps_driver = { .verify = eps_verify, .target = eps_target, .init = eps_cpu_init, .exit = eps_cpu_exit, .get = eps_get, .name = "e_powersaver", .owner = THIS_MODULE, .attr = eps_attr, }; static int __init eps_init(void) { struct cpuinfo_x86 *c = &cpu_data(0); /* This driver will work only on Centaur C7 processors with * Enhanced SpeedStep/PowerSaver registers */ if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6 || c->x86_model < 10) return -ENODEV; if (!cpu_has(c, X86_FEATURE_EST)) return -ENODEV; if (cpufreq_register_driver(&eps_driver)) return -EINVAL; return 0; } static void __exit eps_exit(void) { cpufreq_unregister_driver(&eps_driver); } MODULE_AUTHOR("Rafal Bilski <rafalbilski@interia.pl>"); MODULE_DESCRIPTION("Enhanced PowerSaver driver for VIA C7 CPU's."); MODULE_LICENSE("GPL"); module_init(eps_init); module_exit(eps_exit);
gpl-2.0
gearslam/himawhlspcs
drivers/net/wireless/ath/ath5k/desc.c
4907
22515
/* * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org> * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com> * Copyright (c) 2007-2008 Pavel Roskin <proski@gnu.org> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * */ /******************************\ Hardware Descriptor Functions \******************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "ath5k.h" #include "reg.h" #include "debug.h" /** * DOC: Hardware descriptor functions * * Here we handle the processing of the low-level hw descriptors * that hw reads and writes via DMA for each TX and RX attempt (that means * we can also have descriptors for failed TX/RX tries). We have two kind of * descriptors for RX and TX, control descriptors tell the hw how to send or * receive a packet where to read/write it from/to etc and status descriptors * that contain information about how the packet was sent or received (errors * included). * * Descriptor format is not exactly the same for each MAC chip version so we * have function pointers on &struct ath5k_hw we initialize at runtime based on * the chip used. */ /************************\ * TX Control descriptors * \************************/ /** * ath5k_hw_setup_2word_tx_desc() - Initialize a 2-word tx control descriptor * @ah: The &struct ath5k_hw * @desc: The &struct ath5k_desc * @pkt_len: Frame length in bytes * @hdr_len: Header length in bytes (only used on AR5210) * @padsize: Any padding we've added to the frame length * @type: One of enum ath5k_pkt_type * @tx_power: Tx power in 0.5dB steps * @tx_rate0: HW idx for transmission rate * @tx_tries0: Max number of retransmissions * @key_index: Index on key table to use for encryption * @antenna_mode: Which antenna to use (0 for auto) * @flags: One of AR5K_TXDESC_* flags (desc.h) * @rtscts_rate: HW idx for RTS/CTS transmission rate * @rtscts_duration: What to put on duration field on the header of RTS/CTS * * Internal function to initialize a 2-Word TX control descriptor * found on AR5210 and AR5211 MACs chips. * * Returns 0 on success or -EINVAL on false input */ static int ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len, int padsize, enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0, unsigned int key_index, unsigned int antenna_mode, unsigned int flags, unsigned int rtscts_rate, unsigned int rtscts_duration) { u32 frame_type; struct ath5k_hw_2w_tx_ctl *tx_ctl; unsigned int frame_len; tx_ctl = &desc->ud.ds_tx5210.tx_ctl; /* * Validate input * - Zero retries don't make sense. * - A zero rate will put the HW into a mode where it continuously sends * noise on the channel, so it is important to avoid this. */ if (unlikely(tx_tries0 == 0)) { ATH5K_ERR(ah, "zero retries\n"); WARN_ON(1); return -EINVAL; } if (unlikely(tx_rate0 == 0)) { ATH5K_ERR(ah, "zero rate\n"); WARN_ON(1); return -EINVAL; } /* Clear descriptor */ memset(&desc->ud.ds_tx5210, 0, sizeof(struct ath5k_hw_5210_tx_desc)); /* Setup control descriptor */ /* Verify and set frame length */ /* remove padding we might have added before */ frame_len = pkt_len - padsize + FCS_LEN; if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN) return -EINVAL; tx_ctl->tx_control_0 = frame_len & AR5K_2W_TX_DESC_CTL0_FRAME_LEN; /* Verify and set buffer length */ /* NB: beacon's BufLen must be a multiple of 4 bytes */ if (type == AR5K_PKT_TYPE_BEACON) pkt_len = roundup(pkt_len, 4); if (pkt_len & ~AR5K_2W_TX_DESC_CTL1_BUF_LEN) return -EINVAL; tx_ctl->tx_control_1 = pkt_len & AR5K_2W_TX_DESC_CTL1_BUF_LEN; /* * Verify and set header length (only 5210) */ if (ah->ah_version == AR5K_AR5210) { if (hdr_len & ~AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210) return -EINVAL; tx_ctl->tx_control_0 |= AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210); } /*Differences between 5210-5211*/ if (ah->ah_version == AR5K_AR5210) { switch (type) { case AR5K_PKT_TYPE_BEACON: case AR5K_PKT_TYPE_PROBE_RESP: frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY; break; case AR5K_PKT_TYPE_PIFS: frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS; break; default: frame_type = type; break; } tx_ctl->tx_control_0 |= AR5K_REG_SM(frame_type, AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_5210) | AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE); } else { tx_ctl->tx_control_0 |= AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE) | AR5K_REG_SM(antenna_mode, AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT); tx_ctl->tx_control_1 |= AR5K_REG_SM(type, AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_5211); } #define _TX_FLAGS(_c, _flag) \ if (flags & AR5K_TXDESC_##_flag) { \ tx_ctl->tx_control_##_c |= \ AR5K_2W_TX_DESC_CTL##_c##_##_flag; \ } #define _TX_FLAGS_5211(_c, _flag) \ if (flags & AR5K_TXDESC_##_flag) { \ tx_ctl->tx_control_##_c |= \ AR5K_2W_TX_DESC_CTL##_c##_##_flag##_5211; \ } _TX_FLAGS(0, CLRDMASK); _TX_FLAGS(0, INTREQ); _TX_FLAGS(0, RTSENA); if (ah->ah_version == AR5K_AR5211) { _TX_FLAGS_5211(0, VEOL); _TX_FLAGS_5211(1, NOACK); } #undef _TX_FLAGS #undef _TX_FLAGS_5211 /* * WEP crap */ if (key_index != AR5K_TXKEYIX_INVALID) { tx_ctl->tx_control_0 |= AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID; tx_ctl->tx_control_1 |= AR5K_REG_SM(key_index, AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX); } /* * RTS/CTS Duration [5210 ?] */ if ((ah->ah_version == AR5K_AR5210) && (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA))) tx_ctl->tx_control_1 |= rtscts_duration & AR5K_2W_TX_DESC_CTL1_RTS_DURATION_5210; return 0; } /** * ath5k_hw_setup_4word_tx_desc() - Initialize a 4-word tx control descriptor * @ah: The &struct ath5k_hw * @desc: The &struct ath5k_desc * @pkt_len: Frame length in bytes * @hdr_len: Header length in bytes (only used on AR5210) * @padsize: Any padding we've added to the frame length * @type: One of enum ath5k_pkt_type * @tx_power: Tx power in 0.5dB steps * @tx_rate0: HW idx for transmission rate * @tx_tries0: Max number of retransmissions * @key_index: Index on key table to use for encryption * @antenna_mode: Which antenna to use (0 for auto) * @flags: One of AR5K_TXDESC_* flags (desc.h) * @rtscts_rate: HW idx for RTS/CTS transmission rate * @rtscts_duration: What to put on duration field on the header of RTS/CTS * * Internal function to initialize a 4-Word TX control descriptor * found on AR5212 and later MACs chips. * * Returns 0 on success or -EINVAL on false input */ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len, int padsize, enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0, unsigned int key_index, unsigned int antenna_mode, unsigned int flags, unsigned int rtscts_rate, unsigned int rtscts_duration) { struct ath5k_hw_4w_tx_ctl *tx_ctl; unsigned int frame_len; /* * Use local variables for these to reduce load/store access on * uncached memory */ u32 txctl0 = 0, txctl1 = 0, txctl2 = 0, txctl3 = 0; tx_ctl = &desc->ud.ds_tx5212.tx_ctl; /* * Validate input * - Zero retries don't make sense. * - A zero rate will put the HW into a mode where it continuously sends * noise on the channel, so it is important to avoid this. */ if (unlikely(tx_tries0 == 0)) { ATH5K_ERR(ah, "zero retries\n"); WARN_ON(1); return -EINVAL; } if (unlikely(tx_rate0 == 0)) { ATH5K_ERR(ah, "zero rate\n"); WARN_ON(1); return -EINVAL; } tx_power += ah->ah_txpower.txp_offset; if (tx_power > AR5K_TUNE_MAX_TXPOWER) tx_power = AR5K_TUNE_MAX_TXPOWER; /* Clear descriptor status area */ memset(&desc->ud.ds_tx5212.tx_stat, 0, sizeof(desc->ud.ds_tx5212.tx_stat)); /* Setup control descriptor */ /* Verify and set frame length */ /* remove padding we might have added before */ frame_len = pkt_len - padsize + FCS_LEN; if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN) return -EINVAL; txctl0 = frame_len & AR5K_4W_TX_DESC_CTL0_FRAME_LEN; /* Verify and set buffer length */ /* NB: beacon's BufLen must be a multiple of 4 bytes */ if (type == AR5K_PKT_TYPE_BEACON) pkt_len = roundup(pkt_len, 4); if (pkt_len & ~AR5K_4W_TX_DESC_CTL1_BUF_LEN) return -EINVAL; txctl1 = pkt_len & AR5K_4W_TX_DESC_CTL1_BUF_LEN; txctl0 |= AR5K_REG_SM(tx_power, AR5K_4W_TX_DESC_CTL0_XMIT_POWER) | AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT); txctl1 |= AR5K_REG_SM(type, AR5K_4W_TX_DESC_CTL1_FRAME_TYPE); txctl2 = AR5K_REG_SM(tx_tries0, AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0); txctl3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0; #define _TX_FLAGS(_c, _flag) \ if (flags & AR5K_TXDESC_##_flag) { \ txctl##_c |= AR5K_4W_TX_DESC_CTL##_c##_##_flag; \ } _TX_FLAGS(0, CLRDMASK); _TX_FLAGS(0, VEOL); _TX_FLAGS(0, INTREQ); _TX_FLAGS(0, RTSENA); _TX_FLAGS(0, CTSENA); _TX_FLAGS(1, NOACK); #undef _TX_FLAGS /* * WEP crap */ if (key_index != AR5K_TXKEYIX_INVALID) { txctl0 |= AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID; txctl1 |= AR5K_REG_SM(key_index, AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX); } /* * RTS/CTS */ if (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)) { if ((flags & AR5K_TXDESC_RTSENA) && (flags & AR5K_TXDESC_CTSENA)) return -EINVAL; txctl2 |= rtscts_duration & AR5K_4W_TX_DESC_CTL2_RTS_DURATION; txctl3 |= AR5K_REG_SM(rtscts_rate, AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE); } tx_ctl->tx_control_0 = txctl0; tx_ctl->tx_control_1 = txctl1; tx_ctl->tx_control_2 = txctl2; tx_ctl->tx_control_3 = txctl3; return 0; } /** * ath5k_hw_setup_mrr_tx_desc() - Initialize an MRR tx control descriptor * @ah: The &struct ath5k_hw * @desc: The &struct ath5k_desc * @tx_rate1: HW idx for rate used on transmission series 1 * @tx_tries1: Max number of retransmissions for transmission series 1 * @tx_rate2: HW idx for rate used on transmission series 2 * @tx_tries2: Max number of retransmissions for transmission series 2 * @tx_rate3: HW idx for rate used on transmission series 3 * @tx_tries3: Max number of retransmissions for transmission series 3 * * Multi rate retry (MRR) tx control descriptors are available only on AR5212 * MACs, they are part of the normal 4-word tx control descriptor (see above) * but we handle them through a separate function for better abstraction. * * Returns 0 on success or -EINVAL on invalid input */ int ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, u_int tx_rate1, u_int tx_tries1, u_int tx_rate2, u_int tx_tries2, u_int tx_rate3, u_int tx_tries3) { struct ath5k_hw_4w_tx_ctl *tx_ctl; /* no mrr support for cards older than 5212 */ if (ah->ah_version < AR5K_AR5212) return 0; /* * Rates can be 0 as long as the retry count is 0 too. * A zero rate and nonzero retry count will put the HW into a mode where * it continuously sends noise on the channel, so it is important to * avoid this. */ if (unlikely((tx_rate1 == 0 && tx_tries1 != 0) || (tx_rate2 == 0 && tx_tries2 != 0) || (tx_rate3 == 0 && tx_tries3 != 0))) { ATH5K_ERR(ah, "zero rate\n"); WARN_ON(1); return -EINVAL; } if (ah->ah_version == AR5K_AR5212) { tx_ctl = &desc->ud.ds_tx5212.tx_ctl; #define _XTX_TRIES(_n) \ if (tx_tries##_n) { \ tx_ctl->tx_control_2 |= \ AR5K_REG_SM(tx_tries##_n, \ AR5K_4W_TX_DESC_CTL2_XMIT_TRIES##_n); \ tx_ctl->tx_control_3 |= \ AR5K_REG_SM(tx_rate##_n, \ AR5K_4W_TX_DESC_CTL3_XMIT_RATE##_n); \ } _XTX_TRIES(1); _XTX_TRIES(2); _XTX_TRIES(3); #undef _XTX_TRIES return 1; } return 0; } /***********************\ * TX Status descriptors * \***********************/ /** * ath5k_hw_proc_2word_tx_status() - Process a tx status descriptor on 5210/1 * @ah: The &struct ath5k_hw * @desc: The &struct ath5k_desc * @ts: The &struct ath5k_tx_status */ static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah, struct ath5k_desc *desc, struct ath5k_tx_status *ts) { struct ath5k_hw_tx_status *tx_status; tx_status = &desc->ud.ds_tx5210.tx_stat; /* No frame has been send or error */ if (unlikely((tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE) == 0)) return -EINPROGRESS; /* * Get descriptor status */ ts->ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0, AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP); ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0, AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT); ts->ts_final_retry = AR5K_REG_MS(tx_status->tx_status_0, AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT); /*TODO: ts->ts_virtcol + test*/ ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1, AR5K_DESC_TX_STATUS1_SEQ_NUM); ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1, AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH); ts->ts_antenna = 1; ts->ts_status = 0; ts->ts_final_idx = 0; if (!(tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) { if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES) ts->ts_status |= AR5K_TXERR_XRETRY; if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN) ts->ts_status |= AR5K_TXERR_FIFO; if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED) ts->ts_status |= AR5K_TXERR_FILT; } return 0; } /** * ath5k_hw_proc_4word_tx_status() - Process a tx status descriptor on 5212 * @ah: The &struct ath5k_hw * @desc: The &struct ath5k_desc * @ts: The &struct ath5k_tx_status */ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah, struct ath5k_desc *desc, struct ath5k_tx_status *ts) { struct ath5k_hw_tx_status *tx_status; u32 txstat0, txstat1; tx_status = &desc->ud.ds_tx5212.tx_stat; txstat1 = ACCESS_ONCE(tx_status->tx_status_1); /* No frame has been send or error */ if (unlikely(!(txstat1 & AR5K_DESC_TX_STATUS1_DONE))) return -EINPROGRESS; txstat0 = ACCESS_ONCE(tx_status->tx_status_0); /* * Get descriptor status */ ts->ts_tstamp = AR5K_REG_MS(txstat0, AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP); ts->ts_shortretry = AR5K_REG_MS(txstat0, AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT); ts->ts_final_retry = AR5K_REG_MS(txstat0, AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT); ts->ts_seqnum = AR5K_REG_MS(txstat1, AR5K_DESC_TX_STATUS1_SEQ_NUM); ts->ts_rssi = AR5K_REG_MS(txstat1, AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH); ts->ts_antenna = (txstat1 & AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212) ? 2 : 1; ts->ts_status = 0; ts->ts_final_idx = AR5K_REG_MS(txstat1, AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212); /* TX error */ if (!(txstat0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) { if (txstat0 & AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES) ts->ts_status |= AR5K_TXERR_XRETRY; if (txstat0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN) ts->ts_status |= AR5K_TXERR_FIFO; if (txstat0 & AR5K_DESC_TX_STATUS0_FILTERED) ts->ts_status |= AR5K_TXERR_FILT; } return 0; } /****************\ * RX Descriptors * \****************/ /** * ath5k_hw_setup_rx_desc() - Initialize an rx control descriptor * @ah: The &struct ath5k_hw * @desc: The &struct ath5k_desc * @size: RX buffer length in bytes * @flags: One of AR5K_RXDESC_* flags */ int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, u32 size, unsigned int flags) { struct ath5k_hw_rx_ctl *rx_ctl; rx_ctl = &desc->ud.ds_rx.rx_ctl; /* * Clear the descriptor * If we don't clean the status descriptor, * while scanning we get too many results, * most of them virtual, after some secs * of scanning system hangs. M.F. */ memset(&desc->ud.ds_rx, 0, sizeof(struct ath5k_hw_all_rx_desc)); if (unlikely(size & ~AR5K_DESC_RX_CTL1_BUF_LEN)) return -EINVAL; /* Setup descriptor */ rx_ctl->rx_control_1 = size & AR5K_DESC_RX_CTL1_BUF_LEN; if (flags & AR5K_RXDESC_INTREQ) rx_ctl->rx_control_1 |= AR5K_DESC_RX_CTL1_INTREQ; return 0; } /** * ath5k_hw_proc_5210_rx_status() - Process the rx status descriptor on 5210/1 * @ah: The &struct ath5k_hw * @desc: The &struct ath5k_desc * @rs: The &struct ath5k_rx_status * * Internal function used to process an RX status descriptor * on AR5210/5211 MAC. * * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e * frame yet. */ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah, struct ath5k_desc *desc, struct ath5k_rx_status *rs) { struct ath5k_hw_rx_status *rx_status; rx_status = &desc->ud.ds_rx.rx_stat; /* No frame received / not ready */ if (unlikely(!(rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_DONE))) return -EINPROGRESS; memset(rs, 0, sizeof(struct ath5k_rx_status)); /* * Frame receive status */ rs->rs_datalen = rx_status->rx_status_0 & AR5K_5210_RX_DESC_STATUS0_DATA_LEN; rs->rs_rssi = AR5K_REG_MS(rx_status->rx_status_0, AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL); rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0, AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE); rs->rs_more = !!(rx_status->rx_status_0 & AR5K_5210_RX_DESC_STATUS0_MORE); /* TODO: this timestamp is 13 bit, later on we assume 15 bit! * also the HAL code for 5210 says the timestamp is bits [10..22] of the * TSF, and extends the timestamp here to 15 bit. * we need to check on 5210... */ rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); if (ah->ah_version == AR5K_AR5211) rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0, AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5211); else rs->rs_antenna = (rx_status->rx_status_0 & AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5210) ? 2 : 1; /* * Key table status */ if (rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID) rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1, AR5K_5210_RX_DESC_STATUS1_KEY_INDEX); else rs->rs_keyix = AR5K_RXKEYIX_INVALID; /* * Receive/descriptor errors */ if (!(rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) { if (rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_CRC_ERROR) rs->rs_status |= AR5K_RXERR_CRC; /* only on 5210 */ if ((ah->ah_version == AR5K_AR5210) && (rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN_5210)) rs->rs_status |= AR5K_RXERR_FIFO; if (rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_PHY_ERROR) { rs->rs_status |= AR5K_RXERR_PHY; rs->rs_phyerr = AR5K_REG_MS(rx_status->rx_status_1, AR5K_5210_RX_DESC_STATUS1_PHY_ERROR); } if (rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR) rs->rs_status |= AR5K_RXERR_DECRYPT; } return 0; } /** * ath5k_hw_proc_5212_rx_status() - Process the rx status descriptor on 5212 * @ah: The &struct ath5k_hw * @desc: The &struct ath5k_desc * @rs: The &struct ath5k_rx_status * * Internal function used to process an RX status descriptor * on AR5212 and later MAC. * * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e * frame yet. */ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, struct ath5k_desc *desc, struct ath5k_rx_status *rs) { struct ath5k_hw_rx_status *rx_status; u32 rxstat0, rxstat1; rx_status = &desc->ud.ds_rx.rx_stat; rxstat1 = ACCESS_ONCE(rx_status->rx_status_1); /* No frame received / not ready */ if (unlikely(!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_DONE))) return -EINPROGRESS; memset(rs, 0, sizeof(struct ath5k_rx_status)); rxstat0 = ACCESS_ONCE(rx_status->rx_status_0); /* * Frame receive status */ rs->rs_datalen = rxstat0 & AR5K_5212_RX_DESC_STATUS0_DATA_LEN; rs->rs_rssi = AR5K_REG_MS(rxstat0, AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL); rs->rs_rate = AR5K_REG_MS(rxstat0, AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE); rs->rs_antenna = AR5K_REG_MS(rxstat0, AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA); rs->rs_more = !!(rxstat0 & AR5K_5212_RX_DESC_STATUS0_MORE); rs->rs_tstamp = AR5K_REG_MS(rxstat1, AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); /* * Key table status */ if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID) rs->rs_keyix = AR5K_REG_MS(rxstat1, AR5K_5212_RX_DESC_STATUS1_KEY_INDEX); else rs->rs_keyix = AR5K_RXKEYIX_INVALID; /* * Receive/descriptor errors */ if (!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) { if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_CRC_ERROR) rs->rs_status |= AR5K_RXERR_CRC; if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) { rs->rs_status |= AR5K_RXERR_PHY; rs->rs_phyerr = AR5K_REG_MS(rxstat1, AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE); if (!ah->ah_capabilities.cap_has_phyerr_counters) ath5k_ani_phy_error_report(ah, rs->rs_phyerr); } if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR) rs->rs_status |= AR5K_RXERR_DECRYPT; if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_MIC_ERROR) rs->rs_status |= AR5K_RXERR_MIC; } return 0; } /********\ * Attach * \********/ /** * ath5k_hw_init_desc_functions() - Init function pointers inside ah * @ah: The &struct ath5k_hw * * Maps the internal descriptor functions to the function pointers on ah, used * from above. This is used as an abstraction layer to handle the various chips * the same way. */ int ath5k_hw_init_desc_functions(struct ath5k_hw *ah) { if (ah->ah_version == AR5K_AR5212) { ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc; ah->ah_proc_tx_desc = ath5k_hw_proc_4word_tx_status; ah->ah_proc_rx_desc = ath5k_hw_proc_5212_rx_status; } else if (ah->ah_version <= AR5K_AR5211) { ah->ah_setup_tx_desc = ath5k_hw_setup_2word_tx_desc; ah->ah_proc_tx_desc = ath5k_hw_proc_2word_tx_status; ah->ah_proc_rx_desc = ath5k_hw_proc_5210_rx_status; } else return -ENOTSUPP; return 0; }
gpl-2.0
mapleshadow/M7-4.3-Kernel
drivers/s390/cio/css.c
5163
29464
/* * driver for channel subsystem * * Copyright IBM Corp. 2002, 2010 * * Author(s): Arnd Bergmann (arndb@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) */ #define KMSG_COMPONENT "cio" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/reboot.h> #include <linux/suspend.h> #include <linux/proc_fs.h> #include <asm/isc.h> #include <asm/crw.h> #include "css.h" #include "cio.h" #include "cio_debug.h" #include "ioasm.h" #include "chsc.h" #include "device.h" #include "idset.h" #include "chp.h" int css_init_done = 0; int max_ssid; struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; static struct bus_type css_bus_type; int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) { struct subchannel_id schid; int ret; init_subchannel_id(&schid); ret = -ENODEV; do { do { ret = fn(schid, data); if (ret) break; } while (schid.sch_no++ < __MAX_SUBCHANNEL); schid.sch_no = 0; } while (schid.ssid++ < max_ssid); return ret; } struct cb_data { void *data; struct idset *set; int (*fn_known_sch)(struct subchannel *, void *); int (*fn_unknown_sch)(struct subchannel_id, void *); }; static int call_fn_known_sch(struct device *dev, void *data) { struct subchannel *sch = to_subchannel(dev); struct cb_data *cb = data; int rc = 0; idset_sch_del(cb->set, sch->schid); if (cb->fn_known_sch) rc = cb->fn_known_sch(sch, cb->data); return rc; } static int call_fn_unknown_sch(struct subchannel_id schid, void *data) { struct cb_data *cb = data; int rc = 0; if (idset_sch_contains(cb->set, schid)) rc = cb->fn_unknown_sch(schid, cb->data); return rc; } static int call_fn_all_sch(struct subchannel_id schid, void *data) { struct cb_data *cb = data; struct subchannel *sch; int rc = 0; sch = get_subchannel_by_schid(schid); if (sch) { if (cb->fn_known_sch) rc = cb->fn_known_sch(sch, cb->data); put_device(&sch->dev); } else { if (cb->fn_unknown_sch) rc = cb->fn_unknown_sch(schid, cb->data); } return rc; } int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), int (*fn_unknown)(struct subchannel_id, void *), void *data) { struct cb_data cb; int rc; cb.data = data; cb.fn_known_sch = fn_known; cb.fn_unknown_sch = fn_unknown; cb.set = idset_sch_new(); if (!cb.set) /* fall back to brute force scanning in case of oom */ return for_each_subchannel(call_fn_all_sch, &cb); idset_fill(cb.set); /* Process registered subchannels. */ rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch); if (rc) goto out; /* Process unregistered subchannels. */ if (fn_unknown) rc = for_each_subchannel(call_fn_unknown_sch, &cb); out: idset_free(cb.set); return rc; } static void css_sch_todo(struct work_struct *work); static struct subchannel * css_alloc_subchannel(struct subchannel_id schid) { struct subchannel *sch; int ret; sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); if (sch == NULL) return ERR_PTR(-ENOMEM); ret = cio_validate_subchannel (sch, schid); if (ret < 0) { kfree(sch); return ERR_PTR(ret); } INIT_WORK(&sch->todo_work, css_sch_todo); return sch; } static void css_subchannel_release(struct device *dev) { struct subchannel *sch; sch = to_subchannel(dev); if (!cio_is_console(sch->schid)) { /* Reset intparm to zeroes. */ sch->config.intparm = 0; cio_commit_config(sch); kfree(sch->lock); kfree(sch); } } static int css_sch_device_register(struct subchannel *sch) { int ret; mutex_lock(&sch->reg_mutex); dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid, sch->schid.sch_no); ret = device_register(&sch->dev); mutex_unlock(&sch->reg_mutex); return ret; } /** * css_sch_device_unregister - unregister a subchannel * @sch: subchannel to be unregistered */ void css_sch_device_unregister(struct subchannel *sch) { mutex_lock(&sch->reg_mutex); if (device_is_registered(&sch->dev)) device_unregister(&sch->dev); mutex_unlock(&sch->reg_mutex); } EXPORT_SYMBOL_GPL(css_sch_device_unregister); static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) { int i; int mask; memset(ssd, 0, sizeof(struct chsc_ssd_info)); ssd->path_mask = pmcw->pim; for (i = 0; i < 8; i++) { mask = 0x80 >> i; if (pmcw->pim & mask) { chp_id_init(&ssd->chpid[i]); ssd->chpid[i].id = pmcw->chpid[i]; } } } static void ssd_register_chpids(struct chsc_ssd_info *ssd) { int i; int mask; for (i = 0; i < 8; i++) { mask = 0x80 >> i; if (ssd->path_mask & mask) if (!chp_is_registered(ssd->chpid[i])) chp_new(ssd->chpid[i]); } } void css_update_ssd_info(struct subchannel *sch) { int ret; if (cio_is_console(sch->schid)) { /* Console is initialized too early for functions requiring * memory allocation. */ ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); } else { ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); if (ret) ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); ssd_register_chpids(&sch->ssd_info); } } static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct subchannel *sch = to_subchannel(dev); return sprintf(buf, "%01x\n", sch->st); } static DEVICE_ATTR(type, 0444, type_show, NULL); static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { struct subchannel *sch = to_subchannel(dev); return sprintf(buf, "css:t%01X\n", sch->st); } static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); static struct attribute *subch_attrs[] = { &dev_attr_type.attr, &dev_attr_modalias.attr, NULL, }; static struct attribute_group subch_attr_group = { .attrs = subch_attrs, }; static const struct attribute_group *default_subch_attr_groups[] = { &subch_attr_group, NULL, }; static int css_register_subchannel(struct subchannel *sch) { int ret; /* Initialize the subchannel structure */ sch->dev.parent = &channel_subsystems[0]->device; sch->dev.bus = &css_bus_type; sch->dev.release = &css_subchannel_release; sch->dev.groups = default_subch_attr_groups; /* * We don't want to generate uevents for I/O subchannels that don't * have a working ccw device behind them since they will be * unregistered before they can be used anyway, so we delay the add * uevent until after device recognition was successful. * Note that we suppress the uevent for all subchannel types; * the subchannel driver can decide itself when it wants to inform * userspace of its existence. */ dev_set_uevent_suppress(&sch->dev, 1); css_update_ssd_info(sch); /* make it known to the system */ ret = css_sch_device_register(sch); if (ret) { CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n", sch->schid.ssid, sch->schid.sch_no, ret); return ret; } if (!sch->driver) { /* * No driver matched. Generate the uevent now so that * a fitting driver module may be loaded based on the * modalias. */ dev_set_uevent_suppress(&sch->dev, 0); kobject_uevent(&sch->dev.kobj, KOBJ_ADD); } return ret; } int css_probe_device(struct subchannel_id schid) { int ret; struct subchannel *sch; if (cio_is_console(schid)) sch = cio_get_console_subchannel(); else { sch = css_alloc_subchannel(schid); if (IS_ERR(sch)) return PTR_ERR(sch); } ret = css_register_subchannel(sch); if (ret) { if (!cio_is_console(schid)) put_device(&sch->dev); } return ret; } static int check_subchannel(struct device * dev, void * data) { struct subchannel *sch; struct subchannel_id *schid = data; sch = to_subchannel(dev); return schid_equal(&sch->schid, schid); } struct subchannel * get_subchannel_by_schid(struct subchannel_id schid) { struct device *dev; dev = bus_find_device(&css_bus_type, NULL, &schid, check_subchannel); return dev ? to_subchannel(dev) : NULL; } /** * css_sch_is_valid() - check if a subchannel is valid * @schib: subchannel information block for the subchannel */ int css_sch_is_valid(struct schib *schib) { if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) return 0; if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w) return 0; return 1; } EXPORT_SYMBOL_GPL(css_sch_is_valid); static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) { struct schib schib; if (!slow) { /* Will be done on the slow path. */ return -EAGAIN; } if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) { /* Unusable - ignore. */ return 0; } CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid, schid.sch_no); return css_probe_device(schid); } static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) { int ret = 0; if (sch->driver) { if (sch->driver->sch_event) ret = sch->driver->sch_event(sch, slow); else dev_dbg(&sch->dev, "Got subchannel machine check but " "no sch_event handler provided.\n"); } if (ret != 0 && ret != -EAGAIN) { CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n", sch->schid.ssid, sch->schid.sch_no, ret); } return ret; } static void css_evaluate_subchannel(struct subchannel_id schid, int slow) { struct subchannel *sch; int ret; sch = get_subchannel_by_schid(schid); if (sch) { ret = css_evaluate_known_subchannel(sch, slow); put_device(&sch->dev); } else ret = css_evaluate_new_subchannel(schid, slow); if (ret == -EAGAIN) css_schedule_eval(schid); } /** * css_sched_sch_todo - schedule a subchannel operation * @sch: subchannel * @todo: todo * * Schedule the operation identified by @todo to be performed on the slow path * workqueue. Do nothing if another operation with higher priority is already * scheduled. Needs to be called with subchannel lock held. */ void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) { CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", sch->schid.ssid, sch->schid.sch_no, todo); if (sch->todo >= todo) return; /* Get workqueue ref. */ if (!get_device(&sch->dev)) return; sch->todo = todo; if (!queue_work(cio_work_q, &sch->todo_work)) { /* Already queued, release workqueue ref. */ put_device(&sch->dev); } } static void css_sch_todo(struct work_struct *work) { struct subchannel *sch; enum sch_todo todo; int ret; sch = container_of(work, struct subchannel, todo_work); /* Find out todo. */ spin_lock_irq(sch->lock); todo = sch->todo; CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, sch->schid.sch_no, todo); sch->todo = SCH_TODO_NOTHING; spin_unlock_irq(sch->lock); /* Perform todo. */ switch (todo) { case SCH_TODO_NOTHING: break; case SCH_TODO_EVAL: ret = css_evaluate_known_subchannel(sch, 1); if (ret == -EAGAIN) { spin_lock_irq(sch->lock); css_sched_sch_todo(sch, todo); spin_unlock_irq(sch->lock); } break; case SCH_TODO_UNREG: css_sch_device_unregister(sch); break; } /* Release workqueue ref. */ put_device(&sch->dev); } static struct idset *slow_subchannel_set; static spinlock_t slow_subchannel_lock; static wait_queue_head_t css_eval_wq; static atomic_t css_eval_scheduled; static int __init slow_subchannel_init(void) { spin_lock_init(&slow_subchannel_lock); atomic_set(&css_eval_scheduled, 0); init_waitqueue_head(&css_eval_wq); slow_subchannel_set = idset_sch_new(); if (!slow_subchannel_set) { CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n"); return -ENOMEM; } return 0; } static int slow_eval_known_fn(struct subchannel *sch, void *data) { int eval; int rc; spin_lock_irq(&slow_subchannel_lock); eval = idset_sch_contains(slow_subchannel_set, sch->schid); idset_sch_del(slow_subchannel_set, sch->schid); spin_unlock_irq(&slow_subchannel_lock); if (eval) { rc = css_evaluate_known_subchannel(sch, 1); if (rc == -EAGAIN) css_schedule_eval(sch->schid); } return 0; } static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) { int eval; int rc = 0; spin_lock_irq(&slow_subchannel_lock); eval = idset_sch_contains(slow_subchannel_set, schid); idset_sch_del(slow_subchannel_set, schid); spin_unlock_irq(&slow_subchannel_lock); if (eval) { rc = css_evaluate_new_subchannel(schid, 1); switch (rc) { case -EAGAIN: css_schedule_eval(schid); rc = 0; break; case -ENXIO: case -ENOMEM: case -EIO: /* These should abort looping */ break; default: rc = 0; } } return rc; } static void css_slow_path_func(struct work_struct *unused) { unsigned long flags; CIO_TRACE_EVENT(4, "slowpath"); for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn, NULL); spin_lock_irqsave(&slow_subchannel_lock, flags); if (idset_is_empty(slow_subchannel_set)) { atomic_set(&css_eval_scheduled, 0); wake_up(&css_eval_wq); } spin_unlock_irqrestore(&slow_subchannel_lock, flags); } static DECLARE_WORK(slow_path_work, css_slow_path_func); struct workqueue_struct *cio_work_q; void css_schedule_eval(struct subchannel_id schid) { unsigned long flags; spin_lock_irqsave(&slow_subchannel_lock, flags); idset_sch_add(slow_subchannel_set, schid); atomic_set(&css_eval_scheduled, 1); queue_work(cio_work_q, &slow_path_work); spin_unlock_irqrestore(&slow_subchannel_lock, flags); } void css_schedule_eval_all(void) { unsigned long flags; spin_lock_irqsave(&slow_subchannel_lock, flags); idset_fill(slow_subchannel_set); atomic_set(&css_eval_scheduled, 1); queue_work(cio_work_q, &slow_path_work); spin_unlock_irqrestore(&slow_subchannel_lock, flags); } static int __unset_registered(struct device *dev, void *data) { struct idset *set = data; struct subchannel *sch = to_subchannel(dev); idset_sch_del(set, sch->schid); return 0; } static void css_schedule_eval_all_unreg(void) { unsigned long flags; struct idset *unreg_set; /* Find unregistered subchannels. */ unreg_set = idset_sch_new(); if (!unreg_set) { /* Fallback. */ css_schedule_eval_all(); return; } idset_fill(unreg_set); bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered); /* Apply to slow_subchannel_set. */ spin_lock_irqsave(&slow_subchannel_lock, flags); idset_add_set(slow_subchannel_set, unreg_set); atomic_set(&css_eval_scheduled, 1); queue_work(cio_work_q, &slow_path_work); spin_unlock_irqrestore(&slow_subchannel_lock, flags); idset_free(unreg_set); } void css_wait_for_slow_path(void) { flush_workqueue(cio_work_q); } /* Schedule reprobing of all unregistered subchannels. */ void css_schedule_reprobe(void) { css_schedule_eval_all_unreg(); } EXPORT_SYMBOL_GPL(css_schedule_reprobe); /* * Called from the machine check handler for subchannel report words. */ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) { struct subchannel_id mchk_schid; struct subchannel *sch; if (overflow) { css_schedule_eval_all(); return; } CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, " "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, crw0->erc, crw0->rsid); if (crw1) CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, " "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", crw1->slct, crw1->oflw, crw1->chn, crw1->rsc, crw1->anc, crw1->erc, crw1->rsid); init_subchannel_id(&mchk_schid); mchk_schid.sch_no = crw0->rsid; if (crw1) mchk_schid.ssid = (crw1->rsid >> 4) & 3; if (crw0->erc == CRW_ERC_PMOD) { sch = get_subchannel_by_schid(mchk_schid); if (sch) { css_update_ssd_info(sch); put_device(&sch->dev); } } /* * Since we are always presented with IPI in the CRW, we have to * use stsch() to find out if the subchannel in question has come * or gone. */ css_evaluate_subchannel(mchk_schid, 0); } static void __init css_generate_pgid(struct channel_subsystem *css, u32 tod_high) { struct cpuid cpu_id; if (css_general_characteristics.mcss) { css->global_pgid.pgid_high.ext_cssid.version = 0x80; css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; } else { #ifdef CONFIG_SMP css->global_pgid.pgid_high.cpu_addr = stap(); #else css->global_pgid.pgid_high.cpu_addr = 0; #endif } get_cpu_id(&cpu_id); css->global_pgid.cpu_id = cpu_id.ident; css->global_pgid.cpu_model = cpu_id.machine; css->global_pgid.tod_high = tod_high; } static void channel_subsystem_release(struct device *dev) { struct channel_subsystem *css; css = to_css(dev); mutex_destroy(&css->mutex); if (css->pseudo_subchannel) { /* Implies that it has been generated but never registered. */ css_subchannel_release(&css->pseudo_subchannel->dev); css->pseudo_subchannel = NULL; } kfree(css); } static ssize_t css_cm_enable_show(struct device *dev, struct device_attribute *attr, char *buf) { struct channel_subsystem *css = to_css(dev); int ret; if (!css) return 0; mutex_lock(&css->mutex); ret = sprintf(buf, "%x\n", css->cm_enabled); mutex_unlock(&css->mutex); return ret; } static ssize_t css_cm_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct channel_subsystem *css = to_css(dev); int ret; unsigned long val; ret = strict_strtoul(buf, 16, &val); if (ret) return ret; mutex_lock(&css->mutex); switch (val) { case 0: ret = css->cm_enabled ? chsc_secm(css, 0) : 0; break; case 1: ret = css->cm_enabled ? 0 : chsc_secm(css, 1); break; default: ret = -EINVAL; } mutex_unlock(&css->mutex); return ret < 0 ? ret : count; } static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); static int __init setup_css(int nr) { u32 tod_high; int ret; struct channel_subsystem *css; css = channel_subsystems[nr]; memset(css, 0, sizeof(struct channel_subsystem)); css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL); if (!css->pseudo_subchannel) return -ENOMEM; css->pseudo_subchannel->dev.parent = &css->device; css->pseudo_subchannel->dev.release = css_subchannel_release; dev_set_name(&css->pseudo_subchannel->dev, "defunct"); mutex_init(&css->pseudo_subchannel->reg_mutex); ret = cio_create_sch_lock(css->pseudo_subchannel); if (ret) { kfree(css->pseudo_subchannel); return ret; } mutex_init(&css->mutex); css->valid = 1; css->cssid = nr; dev_set_name(&css->device, "css%x", nr); css->device.release = channel_subsystem_release; tod_high = (u32) (get_clock() >> 32); css_generate_pgid(css, tod_high); return 0; } static int css_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) { int ret, i; ret = NOTIFY_DONE; for (i = 0; i <= __MAX_CSSID; i++) { struct channel_subsystem *css; css = channel_subsystems[i]; mutex_lock(&css->mutex); if (css->cm_enabled) if (chsc_secm(css, 0)) ret = NOTIFY_BAD; mutex_unlock(&css->mutex); } return ret; } static struct notifier_block css_reboot_notifier = { .notifier_call = css_reboot_event, }; /* * Since the css devices are neither on a bus nor have a class * nor have a special device type, we cannot stop/restart channel * path measurements via the normal suspend/resume callbacks, but have * to use notifiers. */ static int css_power_event(struct notifier_block *this, unsigned long event, void *ptr) { int ret, i; switch (event) { case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: ret = NOTIFY_DONE; for (i = 0; i <= __MAX_CSSID; i++) { struct channel_subsystem *css; css = channel_subsystems[i]; mutex_lock(&css->mutex); if (!css->cm_enabled) { mutex_unlock(&css->mutex); continue; } ret = __chsc_do_secm(css, 0); ret = notifier_from_errno(ret); mutex_unlock(&css->mutex); } break; case PM_POST_HIBERNATION: case PM_POST_SUSPEND: ret = NOTIFY_DONE; for (i = 0; i <= __MAX_CSSID; i++) { struct channel_subsystem *css; css = channel_subsystems[i]; mutex_lock(&css->mutex); if (!css->cm_enabled) { mutex_unlock(&css->mutex); continue; } ret = __chsc_do_secm(css, 1); ret = notifier_from_errno(ret); mutex_unlock(&css->mutex); } /* search for subchannels, which appeared during hibernation */ css_schedule_reprobe(); break; default: ret = NOTIFY_DONE; } return ret; } static struct notifier_block css_power_notifier = { .notifier_call = css_power_event, }; /* * Now that the driver core is running, we can setup our channel subsystem. * The struct subchannel's are created during probing (except for the * static console subchannel). */ static int __init css_bus_init(void) { int ret, i; ret = chsc_init(); if (ret) return ret; chsc_determine_css_characteristics(); /* Try to enable MSS. */ ret = chsc_enable_facility(CHSC_SDA_OC_MSS); if (ret) max_ssid = 0; else /* Success. */ max_ssid = __MAX_SSID; ret = slow_subchannel_init(); if (ret) goto out; ret = crw_register_handler(CRW_RSC_SCH, css_process_crw); if (ret) goto out; if ((ret = bus_register(&css_bus_type))) goto out; /* Setup css structure. */ for (i = 0; i <= __MAX_CSSID; i++) { struct channel_subsystem *css; css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL); if (!css) { ret = -ENOMEM; goto out_unregister; } channel_subsystems[i] = css; ret = setup_css(i); if (ret) { kfree(channel_subsystems[i]); goto out_unregister; } ret = device_register(&css->device); if (ret) { put_device(&css->device); goto out_unregister; } if (css_chsc_characteristics.secm) { ret = device_create_file(&css->device, &dev_attr_cm_enable); if (ret) goto out_device; } ret = device_register(&css->pseudo_subchannel->dev); if (ret) { put_device(&css->pseudo_subchannel->dev); goto out_file; } } ret = register_reboot_notifier(&css_reboot_notifier); if (ret) goto out_unregister; ret = register_pm_notifier(&css_power_notifier); if (ret) { unregister_reboot_notifier(&css_reboot_notifier); goto out_unregister; } css_init_done = 1; /* Enable default isc for I/O subchannels. */ isc_register(IO_SCH_ISC); return 0; out_file: if (css_chsc_characteristics.secm) device_remove_file(&channel_subsystems[i]->device, &dev_attr_cm_enable); out_device: device_unregister(&channel_subsystems[i]->device); out_unregister: while (i > 0) { struct channel_subsystem *css; i--; css = channel_subsystems[i]; device_unregister(&css->pseudo_subchannel->dev); css->pseudo_subchannel = NULL; if (css_chsc_characteristics.secm) device_remove_file(&css->device, &dev_attr_cm_enable); device_unregister(&css->device); } bus_unregister(&css_bus_type); out: crw_unregister_handler(CRW_RSC_SCH); idset_free(slow_subchannel_set); chsc_init_cleanup(); pr_alert("The CSS device driver initialization failed with " "errno=%d\n", ret); return ret; } static void __init css_bus_cleanup(void) { struct channel_subsystem *css; int i; for (i = 0; i <= __MAX_CSSID; i++) { css = channel_subsystems[i]; device_unregister(&css->pseudo_subchannel->dev); css->pseudo_subchannel = NULL; if (css_chsc_characteristics.secm) device_remove_file(&css->device, &dev_attr_cm_enable); device_unregister(&css->device); } bus_unregister(&css_bus_type); crw_unregister_handler(CRW_RSC_SCH); idset_free(slow_subchannel_set); chsc_init_cleanup(); isc_unregister(IO_SCH_ISC); } static int __init channel_subsystem_init(void) { int ret; ret = css_bus_init(); if (ret) return ret; cio_work_q = create_singlethread_workqueue("cio"); if (!cio_work_q) { ret = -ENOMEM; goto out_bus; } ret = io_subchannel_init(); if (ret) goto out_wq; return ret; out_wq: destroy_workqueue(cio_work_q); out_bus: css_bus_cleanup(); return ret; } subsys_initcall(channel_subsystem_init); static int css_settle(struct device_driver *drv, void *unused) { struct css_driver *cssdrv = to_cssdriver(drv); if (cssdrv->settle) return cssdrv->settle(); return 0; } int css_complete_work(void) { int ret; /* Wait for the evaluation of subchannels to finish. */ ret = wait_event_interruptible(css_eval_wq, atomic_read(&css_eval_scheduled) == 0); if (ret) return -EINTR; flush_workqueue(cio_work_q); /* Wait for the subchannel type specific initialization to finish */ return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); } /* * Wait for the initialization of devices to finish, to make sure we are * done with our setup if the search for the root device starts. */ static int __init channel_subsystem_init_sync(void) { /* Start initial subchannel evaluation. */ css_schedule_eval_all(); css_complete_work(); return 0; } subsys_initcall_sync(channel_subsystem_init_sync); void channel_subsystem_reinit(void) { struct channel_path *chp; struct chp_id chpid; chsc_enable_facility(CHSC_SDA_OC_MSS); chp_id_for_each(&chpid) { chp = chpid_to_chp(chpid); if (!chp) continue; chsc_determine_base_channel_path_desc(chpid, &chp->desc); } } #ifdef CONFIG_PROC_FS static ssize_t cio_settle_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { int ret; /* Handle pending CRW's. */ crw_wait_for_channel_report(); ret = css_complete_work(); return ret ? ret : count; } static const struct file_operations cio_settle_proc_fops = { .open = nonseekable_open, .write = cio_settle_write, .llseek = no_llseek, }; static int __init cio_settle_init(void) { struct proc_dir_entry *entry; entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_fops); if (!entry) return -ENOMEM; return 0; } device_initcall(cio_settle_init); #endif /*CONFIG_PROC_FS*/ int sch_is_pseudo_sch(struct subchannel *sch) { return sch == to_css(sch->dev.parent)->pseudo_subchannel; } static int css_bus_match(struct device *dev, struct device_driver *drv) { struct subchannel *sch = to_subchannel(dev); struct css_driver *driver = to_cssdriver(drv); struct css_device_id *id; for (id = driver->subchannel_type; id->match_flags; id++) { if (sch->st == id->type) return 1; } return 0; } static int css_probe(struct device *dev) { struct subchannel *sch; int ret; sch = to_subchannel(dev); sch->driver = to_cssdriver(dev->driver); ret = sch->driver->probe ? sch->driver->probe(sch) : 0; if (ret) sch->driver = NULL; return ret; } static int css_remove(struct device *dev) { struct subchannel *sch; int ret; sch = to_subchannel(dev); ret = sch->driver->remove ? sch->driver->remove(sch) : 0; sch->driver = NULL; return ret; } static void css_shutdown(struct device *dev) { struct subchannel *sch; sch = to_subchannel(dev); if (sch->driver && sch->driver->shutdown) sch->driver->shutdown(sch); } static int css_uevent(struct device *dev, struct kobj_uevent_env *env) { struct subchannel *sch = to_subchannel(dev); int ret; ret = add_uevent_var(env, "ST=%01X", sch->st); if (ret) return ret; ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st); return ret; } static int css_pm_prepare(struct device *dev) { struct subchannel *sch = to_subchannel(dev); struct css_driver *drv; if (mutex_is_locked(&sch->reg_mutex)) return -EAGAIN; if (!sch->dev.driver) return 0; drv = to_cssdriver(sch->dev.driver); /* Notify drivers that they may not register children. */ return drv->prepare ? drv->prepare(sch) : 0; } static void css_pm_complete(struct device *dev) { struct subchannel *sch = to_subchannel(dev); struct css_driver *drv; if (!sch->dev.driver) return; drv = to_cssdriver(sch->dev.driver); if (drv->complete) drv->complete(sch); } static int css_pm_freeze(struct device *dev) { struct subchannel *sch = to_subchannel(dev); struct css_driver *drv; if (!sch->dev.driver) return 0; drv = to_cssdriver(sch->dev.driver); return drv->freeze ? drv->freeze(sch) : 0; } static int css_pm_thaw(struct device *dev) { struct subchannel *sch = to_subchannel(dev); struct css_driver *drv; if (!sch->dev.driver) return 0; drv = to_cssdriver(sch->dev.driver); return drv->thaw ? drv->thaw(sch) : 0; } static int css_pm_restore(struct device *dev) { struct subchannel *sch = to_subchannel(dev); struct css_driver *drv; css_update_ssd_info(sch); if (!sch->dev.driver) return 0; drv = to_cssdriver(sch->dev.driver); return drv->restore ? drv->restore(sch) : 0; } static const struct dev_pm_ops css_pm_ops = { .prepare = css_pm_prepare, .complete = css_pm_complete, .freeze = css_pm_freeze, .thaw = css_pm_thaw, .restore = css_pm_restore, }; static struct bus_type css_bus_type = { .name = "css", .match = css_bus_match, .probe = css_probe, .remove = css_remove, .shutdown = css_shutdown, .uevent = css_uevent, .pm = &css_pm_ops, }; /** * css_driver_register - register a css driver * @cdrv: css driver to register * * This is mainly a wrapper around driver_register that sets name * and bus_type in the embedded struct device_driver correctly. */ int css_driver_register(struct css_driver *cdrv) { cdrv->drv.bus = &css_bus_type; return driver_register(&cdrv->drv); } EXPORT_SYMBOL_GPL(css_driver_register); /** * css_driver_unregister - unregister a css driver * @cdrv: css driver to unregister * * This is a wrapper around driver_unregister. */ void css_driver_unregister(struct css_driver *cdrv) { driver_unregister(&cdrv->drv); } EXPORT_SYMBOL_GPL(css_driver_unregister); MODULE_LICENSE("GPL");
gpl-2.0
imoseyon/leanKernel-note3
arch/arm/mach-imx/cpu-imx27.c
5163
1919
/* * Copyright 2007 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright 2008 Juergen Beisert, kernel@pengutronix.de * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. */ /* * i.MX27 specific CPU detection code */ #include <linux/io.h> #include <linux/module.h> #include <mach/hardware.h> static int mx27_cpu_rev = -1; static int mx27_cpu_partnumber; #define SYS_CHIP_ID 0x00 /* The offset of CHIP ID register */ static int mx27_read_cpu_rev(void) { u32 val; /* * now we have access to the IO registers. As we need * the silicon revision very early we read it here to * avoid any further hooks */ val = __raw_readl(MX27_IO_ADDRESS(MX27_SYSCTRL_BASE_ADDR + SYS_CHIP_ID)); mx27_cpu_partnumber = (int)((val >> 12) & 0xFFFF); switch (val >> 28) { case 0: return IMX_CHIP_REVISION_1_0; case 1: return IMX_CHIP_REVISION_2_0; case 2: return IMX_CHIP_REVISION_2_1; default: return IMX_CHIP_REVISION_UNKNOWN; } } /* * Returns: * the silicon revision of the cpu * -EINVAL - not a mx27 */ int mx27_revision(void) { if (mx27_cpu_rev == -1) mx27_cpu_rev = mx27_read_cpu_rev(); if (mx27_cpu_partnumber != 0x8821) return -EINVAL; return mx27_cpu_rev; } EXPORT_SYMBOL(mx27_revision);
gpl-2.0
Radium-Devices/Radium_taoshan
arch/powerpc/boot/mpc52xx-psc.c
13867
1467
/* * MPC5200 PSC serial console support. * * Author: Grant Likely <grant.likely@secretlab.ca> * * Copyright (c) 2007 Secret Lab Technologies Ltd. * Copyright (c) 2007 Freescale Semiconductor, Inc. * * It is assumed that the firmware (or the platform file) has already set * up the port. */ #include "types.h" #include "io.h" #include "ops.h" /* Programmable Serial Controller (PSC) status register bits */ #define MPC52xx_PSC_SR 0x04 #define MPC52xx_PSC_SR_RXRDY 0x0100 #define MPC52xx_PSC_SR_RXFULL 0x0200 #define MPC52xx_PSC_SR_TXRDY 0x0400 #define MPC52xx_PSC_SR_TXEMP 0x0800 #define MPC52xx_PSC_BUFFER 0x0C static void *psc; static int psc_open(void) { /* Assume the firmware has already configured the PSC into * uart mode */ return 0; } static void psc_putc(unsigned char c) { while (!(in_be16(psc + MPC52xx_PSC_SR) & MPC52xx_PSC_SR_TXRDY)) ; out_8(psc + MPC52xx_PSC_BUFFER, c); } static unsigned char psc_tstc(void) { return (in_be16(psc + MPC52xx_PSC_SR) & MPC52xx_PSC_SR_RXRDY) != 0; } static unsigned char psc_getc(void) { while (!(in_be16(psc + MPC52xx_PSC_SR) & MPC52xx_PSC_SR_RXRDY)) ; return in_8(psc + MPC52xx_PSC_BUFFER); } int mpc5200_psc_console_init(void *devp, struct serial_console_data *scdp) { /* Get the base address of the psc registers */ if (dt_get_virtual_reg(devp, &psc, 1) < 1) return -1; scdp->open = psc_open; scdp->putc = psc_putc; scdp->getc = psc_getc; scdp->tstc = psc_tstc; return 0; }
gpl-2.0
jkkj93/FREYA-LIVE-LIBRARY-OPTIMIZER-FOR-ANDROID
ffmpeg/libavcodec/targa.c
44
10128
/* * Targa (.tga) image decoder * Copyright (c) 2006 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/intreadwrite.h" #include "libavutil/imgutils.h" #include "avcodec.h" #include "bytestream.h" #include "internal.h" #include "targa.h" typedef struct TargaContext { GetByteContext gb; } TargaContext; static uint8_t *advance_line(uint8_t *start, uint8_t *line, int stride, int *y, int h, int interleave) { *y += interleave; if (*y < h) { return line + interleave * stride; } else { *y = (*y + 1) & (interleave - 1); if (*y && *y < h) { return start + *y * stride; } else { return NULL; } } } static int targa_decode_rle(AVCodecContext *avctx, TargaContext *s, uint8_t *start, int w, int h, int stride, int bpp, int interleave) { int x, y; int depth = (bpp + 1) >> 3; int type, count; uint8_t *line = start; uint8_t *dst = line; x = y = count = 0; while (dst) { if (bytestream2_get_bytes_left(&s->gb) <= 0) { av_log(avctx, AV_LOG_ERROR, "Ran ouf of data before end-of-image\n"); return AVERROR_INVALIDDATA; } type = bytestream2_get_byteu(&s->gb); count = (type & 0x7F) + 1; type &= 0x80; if (!type) { do { int n = FFMIN(count, w - x); bytestream2_get_buffer(&s->gb, dst, n * depth); count -= n; dst += n * depth; x += n; if (x == w) { x = 0; dst = line = advance_line(start, line, stride, &y, h, interleave); } } while (dst && count > 0); } else { uint8_t tmp[4]; bytestream2_get_buffer(&s->gb, tmp, depth); do { int n = FFMIN(count, w - x); count -= n; x += n; do { memcpy(dst, tmp, depth); dst += depth; } while (--n); if (x == w) { x = 0; dst = line = advance_line(start, line, stride, &y, h, interleave); } } while (dst && count > 0); } } if (count) { av_log(avctx, AV_LOG_ERROR, "Packet went out of bounds\n"); return AVERROR_INVALIDDATA; } return 0; } static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { TargaContext * const s = avctx->priv_data; AVFrame * const p = data; uint8_t *dst; int stride; int idlen, pal, compr, y, w, h, bpp, flags, ret; int first_clr, colors, csize; int interleave; bytestream2_init(&s->gb, avpkt->data, avpkt->size); /* parse image header */ idlen = bytestream2_get_byte(&s->gb); pal = bytestream2_get_byte(&s->gb); compr = bytestream2_get_byte(&s->gb); first_clr = bytestream2_get_le16(&s->gb); colors = bytestream2_get_le16(&s->gb); csize = bytestream2_get_byte(&s->gb); bytestream2_skip(&s->gb, 4); /* 2: x, 2: y */ w = bytestream2_get_le16(&s->gb); h = bytestream2_get_le16(&s->gb); bpp = bytestream2_get_byte(&s->gb); if (bytestream2_get_bytes_left(&s->gb) <= idlen) { av_log(avctx, AV_LOG_ERROR, "Not enough data to read header\n"); return AVERROR_INVALIDDATA; } flags = bytestream2_get_byte(&s->gb); if (!pal && (first_clr || colors || csize)) { av_log(avctx, AV_LOG_WARNING, "File without colormap has colormap information set.\n"); // specification says we should ignore those value in this case first_clr = colors = csize = 0; } // skip identifier if any bytestream2_skip(&s->gb, idlen); switch (bpp) { case 8: avctx->pix_fmt = ((compr & (~TGA_RLE)) == TGA_BW) ? AV_PIX_FMT_GRAY8 : AV_PIX_FMT_PAL8; break; case 15: case 16: avctx->pix_fmt = AV_PIX_FMT_RGB555LE; break; case 24: avctx->pix_fmt = AV_PIX_FMT_BGR24; break; case 32: avctx->pix_fmt = AV_PIX_FMT_BGRA; break; default: av_log(avctx, AV_LOG_ERROR, "Bit depth %i is not supported\n", bpp); return AVERROR_INVALIDDATA; } if (colors && (colors + first_clr) > 256) { av_log(avctx, AV_LOG_ERROR, "Incorrect palette: %i colors with offset %i\n", colors, first_clr); return AVERROR_INVALIDDATA; } if ((ret = ff_set_dimensions(avctx, w, h)) < 0) return ret; if ((ret = ff_get_buffer(avctx, p, 0)) < 0) return ret; p->pict_type = AV_PICTURE_TYPE_I; if (flags & TGA_TOPTOBOTTOM) { dst = p->data[0]; stride = p->linesize[0]; } else { //image is upside-down dst = p->data[0] + p->linesize[0] * (h - 1); stride = -p->linesize[0]; } interleave = flags & TGA_INTERLEAVE2 ? 2 : flags & TGA_INTERLEAVE4 ? 4 : 1; if (colors) { int pal_size, pal_sample_size; switch (csize) { case 32: pal_sample_size = 4; break; case 24: pal_sample_size = 3; break; case 16: case 15: pal_sample_size = 2; break; default: av_log(avctx, AV_LOG_ERROR, "Palette entry size %i bits is not supported\n", csize); return AVERROR_INVALIDDATA; } pal_size = colors * pal_sample_size; if (avctx->pix_fmt != AV_PIX_FMT_PAL8) //should not occur but skip palette anyway bytestream2_skip(&s->gb, pal_size); else { int t; uint32_t *pal = ((uint32_t *)p->data[1]) + first_clr; if (bytestream2_get_bytes_left(&s->gb) < pal_size) { av_log(avctx, AV_LOG_ERROR, "Not enough data to read palette\n"); return AVERROR_INVALIDDATA; } switch (pal_sample_size) { case 4: for (t = 0; t < colors; t++) *pal++ = bytestream2_get_le32u(&s->gb); break; case 3: /* RGB24 */ for (t = 0; t < colors; t++) *pal++ = (0xffU<<24) | bytestream2_get_le24u(&s->gb); break; case 2: /* RGB555 */ for (t = 0; t < colors; t++) { uint32_t v = bytestream2_get_le16u(&s->gb); v = ((v & 0x7C00) << 9) | ((v & 0x03E0) << 6) | ((v & 0x001F) << 3); /* left bit replication */ v |= (v & 0xE0E0E0U) >> 5; *pal++ = (0xffU<<24) | v; } break; } p->palette_has_changed = 1; } } if ((compr & (~TGA_RLE)) == TGA_NODATA) { memset(p->data[0], 0, p->linesize[0] * h); } else { if (compr & TGA_RLE) { int res = targa_decode_rle(avctx, s, dst, w, h, stride, bpp, interleave); if (res < 0) return res; } else { size_t img_size = w * ((bpp + 1) >> 3); uint8_t *line; if (bytestream2_get_bytes_left(&s->gb) < img_size * h) { av_log(avctx, AV_LOG_ERROR, "Not enough data available for image\n"); return AVERROR_INVALIDDATA; } line = dst; y = 0; do { bytestream2_get_buffer(&s->gb, line, img_size); line = advance_line(dst, line, stride, &y, h, interleave); } while (line); } } if (flags & TGA_RIGHTTOLEFT) { // right-to-left, needs horizontal flip int x; for (y = 0; y < h; y++) { void *line = &p->data[0][y * p->linesize[0]]; for (x = 0; x < w >> 1; x++) { switch (bpp) { case 32: FFSWAP(uint32_t, ((uint32_t *)line)[x], ((uint32_t *)line)[w - x - 1]); break; case 24: FFSWAP(uint8_t, ((uint8_t *)line)[3 * x ], ((uint8_t *)line)[3 * w - 3 * x - 3]); FFSWAP(uint8_t, ((uint8_t *)line)[3 * x + 1], ((uint8_t *)line)[3 * w - 3 * x - 2]); FFSWAP(uint8_t, ((uint8_t *)line)[3 * x + 2], ((uint8_t *)line)[3 * w - 3 * x - 1]); break; case 16: FFSWAP(uint16_t, ((uint16_t *)line)[x], ((uint16_t *)line)[w - x - 1]); break; case 8: FFSWAP(uint8_t, ((uint8_t *)line)[x], ((uint8_t *)line)[w - x - 1]); } } } } *got_frame = 1; return avpkt->size; } AVCodec ff_targa_decoder = { .name = "targa", .long_name = NULL_IF_CONFIG_SMALL("Truevision Targa image"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_TARGA, .priv_data_size = sizeof(TargaContext), .decode = decode_frame, .capabilities = AV_CODEC_CAP_DR1, };
gpl-2.0
x75/paparazzi
sw/airborne/firmwares/wind_tunnel/wt_baro.c
44
3061
/* * Copyright (C) 2007 ENAC * * This file is part of paparazzi. * * paparazzi is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * paparazzi is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with paparazzi; see the file COPYING. If not, write to * the Free Software Foundation, 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * */ /** \file * \brief * */ #include "wt_baro.h" #include "spi.h" uint32_t wt_baro_pressure; bool_t wt_baro_available; static bool_t status_read_data; #define CMD_INIT_1 0x24 // set chanel AIN1/AIN2 and next operation on filter high #define CMD_INIT_2 0xCF // set unipolar mode, 24 bits, no boost, filter high #define CMD_INIT_3 0x34 // set chanel AIN1/AIN2 and next operation on filter low #define CMD_INIT_4 0x00 // set low filter #define CMD_INIT_5 0x14 // set chanel AIN1/AIN2 and next operation on mode register #define CMD_INIT_6 0x20 // set gain to 1, burnout current off, no filter sync, self calibration #define CMD_MEASUREMENT 0x54 // set chanel AIN1/AIN2 and next operation on data register uint8_t buf_input[3]; uint8_t buf_output[3]; #define Uint24(buf_input) (((uint32_t)buf_input[0]) << 16 |((uint16_t)buf_input[1]) << 8 | buf_input[2]) static void send1_on_spi(uint8_t d) { buf_output[0] = d; spi_buffer_length = 1; spi_buffer_input = (uint8_t *)&buf_input; spi_buffer_output = (uint8_t *)&buf_output; SpiStart(); } void wt_baro_init(void) { wt_baro_pressure = 0; send1_on_spi(CMD_INIT_1); send1_on_spi(CMD_INIT_2); send1_on_spi(CMD_INIT_3); send1_on_spi(CMD_INIT_4); send1_on_spi(CMD_INIT_5); send1_on_spi(CMD_INIT_6); status_read_data = FALSE; wt_baro_available = FALSE; } void wt_baro_periodic(void) { if (!SpiCheckAvailable()) { SpiOverRun(); return; } if (status_read_data) { buf_output[0] = buf_output[1] = buf_output[2] = 0; spi_buffer_length = 3; } else { buf_output[0] = CMD_MEASUREMENT; spi_buffer_length = 1; } spi_buffer_input = (uint8_t *)&buf_input; spi_buffer_output = (uint8_t *)&buf_output; //if (status_read_data) // SpiSetCPHA(); //else // SpiClrCPHA(); SpiStart(); } static uint32_t data; /* Handle the SPI message, i.e. store the received values in variables */ void wt_baro_event(void) { if (status_read_data) { data = Uint24(buf_input); /* Compute pressure */ wt_baro_pressure = data; wt_baro_available = TRUE; } /* else nothing to read */ status_read_data = !status_read_data; //if (!status_read_data) { // /* Ask next conversion now */ // baro_MS5534A_send(); //} }
gpl-2.0
kprkpr/kernel-e400
drivers/ata/libata-eh.c
44
106920
/* * libata-eh.c - libata error handling * * Maintained by: Jeff Garzik <jgarzik@pobox.com> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * * Copyright 2006 Tejun Heo <htejun@gmail.com> * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, * USA. * * * libata documentation is available via 'make {ps|pdf}docs', * as Documentation/DocBook/libata.* * * Hardware documentation available from http://www.t13.org/ and * http://www.sata-io.org/ * */ #include <linux/kernel.h> #include <linux/blkdev.h> #include <linux/pci.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_dbg.h> #include "../scsi/scsi_transport_api.h" #include <linux/libata.h> #include "libata.h" enum { /* speed down verdicts */ ATA_EH_SPDN_NCQ_OFF = (1 << 0), ATA_EH_SPDN_SPEED_DOWN = (1 << 1), ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), ATA_EH_SPDN_KEEP_ERRORS = (1 << 3), /* error flags */ ATA_EFLAG_IS_IO = (1 << 0), ATA_EFLAG_DUBIOUS_XFER = (1 << 1), ATA_EFLAG_OLD_ER = (1 << 31), /* error categories */ ATA_ECAT_NONE = 0, ATA_ECAT_ATA_BUS = 1, ATA_ECAT_TOUT_HSM = 2, ATA_ECAT_UNK_DEV = 3, ATA_ECAT_DUBIOUS_NONE = 4, ATA_ECAT_DUBIOUS_ATA_BUS = 5, ATA_ECAT_DUBIOUS_TOUT_HSM = 6, ATA_ECAT_DUBIOUS_UNK_DEV = 7, ATA_ECAT_NR = 8, ATA_EH_CMD_DFL_TIMEOUT = 5000, /* always put at least this amount of time between resets */ ATA_EH_RESET_COOL_DOWN = 5000, /* Waiting in ->prereset can never be reliable. It's * sometimes nice to wait there but it can't be depended upon; * otherwise, we wouldn't be resetting. Just give it enough * time for most drives to spin up. */ ATA_EH_PRERESET_TIMEOUT = 10000, ATA_EH_FASTDRAIN_INTERVAL = 3000, ATA_EH_UA_TRIES = 5, /* probe speed down parameters, see ata_eh_schedule_probe() */ ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */ ATA_EH_PROBE_TRIALS = 2, }; /* The following table determines how we sequence resets. Each entry * represents timeout for that try. The first try can be soft or * hardreset. All others are hardreset if available. In most cases * the first reset w/ 10sec timeout should succeed. Following entries * are mostly for error handling, hotplug and retarded devices. */ static const unsigned long ata_eh_reset_timeouts[] = { 10000, /* most drives spin up by 10sec */ 10000, /* > 99% working drives spin up before 20sec */ 35000, /* give > 30 secs of idleness for retarded devices */ 5000, /* and sweet one last chance */ ULONG_MAX, /* > 1 min has elapsed, give up */ }; static const unsigned long ata_eh_identify_timeouts[] = { 5000, /* covers > 99% of successes and not too boring on failures */ 10000, /* combined time till here is enough even for media access */ 30000, /* for true idiots */ ULONG_MAX, }; static const unsigned long ata_eh_flush_timeouts[] = { 15000, /* be generous with flush */ 15000, /* ditto */ 30000, /* and even more generous */ ULONG_MAX, }; static const unsigned long ata_eh_other_timeouts[] = { 5000, /* same rationale as identify timeout */ 10000, /* ditto */ /* but no merciful 30sec for other commands, it just isn't worth it */ ULONG_MAX, }; struct ata_eh_cmd_timeout_ent { const u8 *commands; const unsigned long *timeouts; }; /* The following table determines timeouts to use for EH internal * commands. Each table entry is a command class and matches the * commands the entry applies to and the timeout table to use. * * On the retry after a command timed out, the next timeout value from * the table is used. If the table doesn't contain further entries, * the last value is used. * * ehc->cmd_timeout_idx keeps track of which timeout to use per * command class, so if SET_FEATURES times out on the first try, the * next try will use the second timeout value only for that class. */ #define CMDS(cmds...) (const u8 []){ cmds, 0 } static const struct ata_eh_cmd_timeout_ent ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI), .timeouts = ata_eh_identify_timeouts, }, { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT), .timeouts = ata_eh_other_timeouts, }, { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT), .timeouts = ata_eh_other_timeouts, }, { .commands = CMDS(ATA_CMD_SET_FEATURES), .timeouts = ata_eh_other_timeouts, }, { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS), .timeouts = ata_eh_other_timeouts, }, { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT), .timeouts = ata_eh_flush_timeouts }, }; #undef CMDS static void __ata_port_freeze(struct ata_port *ap); #ifdef CONFIG_PM static void ata_eh_handle_port_suspend(struct ata_port *ap); static void ata_eh_handle_port_resume(struct ata_port *ap); #else /* CONFIG_PM */ static void ata_eh_handle_port_suspend(struct ata_port *ap) { } static void ata_eh_handle_port_resume(struct ata_port *ap) { } #endif /* CONFIG_PM */ static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, va_list args) { ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, ATA_EH_DESC_LEN - ehi->desc_len, fmt, args); } /** * __ata_ehi_push_desc - push error description without adding separator * @ehi: target EHI * @fmt: printf format string * * Format string according to @fmt and append it to @ehi->desc. * * LOCKING: * spin_lock_irqsave(host lock) */ void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) { va_list args; va_start(args, fmt); __ata_ehi_pushv_desc(ehi, fmt, args); va_end(args); } /** * ata_ehi_push_desc - push error description with separator * @ehi: target EHI * @fmt: printf format string * * Format string according to @fmt and append it to @ehi->desc. * If @ehi->desc is not empty, ", " is added in-between. * * LOCKING: * spin_lock_irqsave(host lock) */ void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) { va_list args; if (ehi->desc_len) __ata_ehi_push_desc(ehi, ", "); va_start(args, fmt); __ata_ehi_pushv_desc(ehi, fmt, args); va_end(args); } /** * ata_ehi_clear_desc - clean error description * @ehi: target EHI * * Clear @ehi->desc. * * LOCKING: * spin_lock_irqsave(host lock) */ void ata_ehi_clear_desc(struct ata_eh_info *ehi) { ehi->desc[0] = '\0'; ehi->desc_len = 0; } /** * ata_port_desc - append port description * @ap: target ATA port * @fmt: printf format string * * Format string according to @fmt and append it to port * description. If port description is not empty, " " is added * in-between. This function is to be used while initializing * ata_host. The description is printed on host registration. * * LOCKING: * None. */ void ata_port_desc(struct ata_port *ap, const char *fmt, ...) { va_list args; WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); if (ap->link.eh_info.desc_len) __ata_ehi_push_desc(&ap->link.eh_info, " "); va_start(args, fmt); __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); va_end(args); } #ifdef CONFIG_PCI /** * ata_port_pbar_desc - append PCI BAR description * @ap: target ATA port * @bar: target PCI BAR * @offset: offset into PCI BAR * @name: name of the area * * If @offset is negative, this function formats a string which * contains the name, address, size and type of the BAR and * appends it to the port description. If @offset is zero or * positive, only name and offsetted address is appended. * * LOCKING: * None. */ void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, const char *name) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); char *type = ""; unsigned long long start, len; if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) type = "m"; else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) type = "i"; start = (unsigned long long)pci_resource_start(pdev, bar); len = (unsigned long long)pci_resource_len(pdev, bar); if (offset < 0) ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); else ata_port_desc(ap, "%s 0x%llx", name, start + (unsigned long long)offset); } #endif /* CONFIG_PCI */ static int ata_lookup_timeout_table(u8 cmd) { int i; for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) { const u8 *cur; for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++) if (*cur == cmd) return i; } return -1; } /** * ata_internal_cmd_timeout - determine timeout for an internal command * @dev: target device * @cmd: internal command to be issued * * Determine timeout for internal command @cmd for @dev. * * LOCKING: * EH context. * * RETURNS: * Determined timeout. */ unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd) { struct ata_eh_context *ehc = &dev->link->eh_context; int ent = ata_lookup_timeout_table(cmd); int idx; if (ent < 0) return ATA_EH_CMD_DFL_TIMEOUT; idx = ehc->cmd_timeout_idx[dev->devno][ent]; return ata_eh_cmd_timeout_table[ent].timeouts[idx]; } /** * ata_internal_cmd_timed_out - notification for internal command timeout * @dev: target device * @cmd: internal command which timed out * * Notify EH that internal command @cmd for @dev timed out. This * function should be called only for commands whose timeouts are * determined using ata_internal_cmd_timeout(). * * LOCKING: * EH context. */ void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd) { struct ata_eh_context *ehc = &dev->link->eh_context; int ent = ata_lookup_timeout_table(cmd); int idx; if (ent < 0) return; idx = ehc->cmd_timeout_idx[dev->devno][ent]; if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX) ehc->cmd_timeout_idx[dev->devno][ent]++; } static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, unsigned int err_mask) { struct ata_ering_entry *ent; WARN_ON(!err_mask); ering->cursor++; ering->cursor %= ATA_ERING_SIZE; ent = &ering->ring[ering->cursor]; ent->eflags = eflags; ent->err_mask = err_mask; ent->timestamp = get_jiffies_64(); } static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering) { struct ata_ering_entry *ent = &ering->ring[ering->cursor]; if (ent->err_mask) return ent; return NULL; } int ata_ering_map(struct ata_ering *ering, int (*map_fn)(struct ata_ering_entry *, void *), void *arg) { int idx, rc = 0; struct ata_ering_entry *ent; idx = ering->cursor; do { ent = &ering->ring[idx]; if (!ent->err_mask) break; rc = map_fn(ent, arg); if (rc) break; idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; } while (idx != ering->cursor); return rc; } int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg) { ent->eflags |= ATA_EFLAG_OLD_ER; return 0; } static void ata_ering_clear(struct ata_ering *ering) { ata_ering_map(ering, ata_ering_clear_cb, NULL); } static unsigned int ata_eh_dev_action(struct ata_device *dev) { struct ata_eh_context *ehc = &dev->link->eh_context; return ehc->i.action | ehc->i.dev_action[dev->devno]; } static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, struct ata_eh_info *ehi, unsigned int action) { struct ata_device *tdev; if (!dev) { ehi->action &= ~action; ata_for_each_dev(tdev, link, ALL) ehi->dev_action[tdev->devno] &= ~action; } else { /* doesn't make sense for port-wide EH actions */ WARN_ON(!(action & ATA_EH_PERDEV_MASK)); /* break ehi->action into ehi->dev_action */ if (ehi->action & action) { ata_for_each_dev(tdev, link, ALL) ehi->dev_action[tdev->devno] |= ehi->action & action; ehi->action &= ~action; } /* turn off the specified per-dev action */ ehi->dev_action[dev->devno] &= ~action; } } /** * ata_eh_acquire - acquire EH ownership * @ap: ATA port to acquire EH ownership for * * Acquire EH ownership for @ap. This is the basic exclusion * mechanism for ports sharing a host. Only one port hanging off * the same host can claim the ownership of EH. * * LOCKING: * EH context. */ void ata_eh_acquire(struct ata_port *ap) { mutex_lock(&ap->host->eh_mutex); WARN_ON_ONCE(ap->host->eh_owner); ap->host->eh_owner = current; } /** * ata_eh_release - release EH ownership * @ap: ATA port to release EH ownership for * * Release EH ownership for @ap if the caller. The caller must * have acquired EH ownership using ata_eh_acquire() previously. * * LOCKING: * EH context. */ void ata_eh_release(struct ata_port *ap) { WARN_ON_ONCE(ap->host->eh_owner != current); ap->host->eh_owner = NULL; mutex_unlock(&ap->host->eh_mutex); } /** * ata_scsi_timed_out - SCSI layer time out callback * @cmd: timed out SCSI command * * Handles SCSI layer timeout. We race with normal completion of * the qc for @cmd. If the qc is already gone, we lose and let * the scsi command finish (EH_HANDLED). Otherwise, the qc has * timed out and EH should be invoked. Prevent ata_qc_complete() * from finishing it by setting EH_SCHEDULED and return * EH_NOT_HANDLED. * * TODO: kill this function once old EH is gone. * * LOCKING: * Called from timer context * * RETURNS: * EH_HANDLED or EH_NOT_HANDLED */ enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; struct ata_port *ap = ata_shost_to_port(host); unsigned long flags; struct ata_queued_cmd *qc; enum blk_eh_timer_return ret; DPRINTK("ENTER\n"); if (ap->ops->error_handler) { ret = BLK_EH_NOT_HANDLED; goto out; } ret = BLK_EH_HANDLED; spin_lock_irqsave(ap->lock, flags); qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc) { WARN_ON(qc->scsicmd != cmd); qc->flags |= ATA_QCFLAG_EH_SCHEDULED; qc->err_mask |= AC_ERR_TIMEOUT; ret = BLK_EH_NOT_HANDLED; } spin_unlock_irqrestore(ap->lock, flags); out: DPRINTK("EXIT, ret=%d\n", ret); return ret; } static void ata_eh_unload(struct ata_port *ap) { struct ata_link *link; struct ata_device *dev; unsigned long flags; /* Restore SControl IPM and SPD for the next driver and * disable attached devices. */ ata_for_each_link(link, ap, PMP_FIRST) { sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0); ata_for_each_dev(dev, link, ALL) ata_dev_disable(dev); } /* freeze and set UNLOADED */ spin_lock_irqsave(ap->lock, flags); ata_port_freeze(ap); /* won't be thawed */ ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */ ap->pflags |= ATA_PFLAG_UNLOADED; spin_unlock_irqrestore(ap->lock, flags); } /** * ata_scsi_error - SCSI layer error handler callback * @host: SCSI host on which error occurred * * Handles SCSI-layer-thrown error events. * * LOCKING: * Inherited from SCSI layer (none, can sleep) * * RETURNS: * Zero. */ void ata_scsi_error(struct Scsi_Host *host) { struct ata_port *ap = ata_shost_to_port(host); int i; unsigned long flags; DPRINTK("ENTER\n"); /* make sure sff pio task is not running */ ata_sff_flush_pio_task(ap); /* synchronize with host lock and sort out timeouts */ /* For new EH, all qcs are finished in one of three ways - * normal completion, error completion, and SCSI timeout. * Both completions can race against SCSI timeout. When normal * completion wins, the qc never reaches EH. When error * completion wins, the qc has ATA_QCFLAG_FAILED set. * * When SCSI timeout wins, things are a bit more complex. * Normal or error completion can occur after the timeout but * before this point. In such cases, both types of * completions are honored. A scmd is determined to have * timed out iff its associated qc is active and not failed. */ if (ap->ops->error_handler) { struct scsi_cmnd *scmd, *tmp; int nr_timedout = 0; spin_lock_irqsave(ap->lock, flags); /* This must occur under the ap->lock as we don't want a polled recovery to race the real interrupt handler The lost_interrupt handler checks for any completed but non-notified command and completes much like an IRQ handler. We then fall into the error recovery code which will treat this as if normal completion won the race */ if (ap->ops->lost_interrupt) ap->ops->lost_interrupt(ap); list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { struct ata_queued_cmd *qc; for (i = 0; i < ATA_MAX_QUEUE; i++) { qc = __ata_qc_from_tag(ap, i); if (qc->flags & ATA_QCFLAG_ACTIVE && qc->scsicmd == scmd) break; } if (i < ATA_MAX_QUEUE) { /* the scmd has an associated qc */ if (!(qc->flags & ATA_QCFLAG_FAILED)) { /* which hasn't failed yet, timeout */ qc->err_mask |= AC_ERR_TIMEOUT; qc->flags |= ATA_QCFLAG_FAILED; nr_timedout++; } } else { /* Normal completion occurred after * SCSI timeout but before this point. * Successfully complete it. */ scmd->retries = scmd->allowed; scsi_eh_finish_cmd(scmd, &ap->eh_done_q); } } /* If we have timed out qcs. They belong to EH from * this point but the state of the controller is * unknown. Freeze the port to make sure the IRQ * handler doesn't diddle with those qcs. This must * be done atomically w.r.t. setting QCFLAG_FAILED. */ if (nr_timedout) __ata_port_freeze(ap); spin_unlock_irqrestore(ap->lock, flags); /* initialize eh_tries */ ap->eh_tries = ATA_EH_MAX_TRIES; } else spin_unlock_wait(ap->lock); /* If we timed raced normal completion and there is nothing to recover nr_timedout == 0 why exactly are we doing error recovery ? */ /* invoke error handler */ if (ap->ops->error_handler) { struct ata_link *link; /* acquire EH ownership */ ata_eh_acquire(ap); repeat: /* kill fast drain timer */ del_timer_sync(&ap->fastdrain_timer); /* process port resume request */ ata_eh_handle_port_resume(ap); /* fetch & clear EH info */ spin_lock_irqsave(ap->lock, flags); ata_for_each_link(link, ap, HOST_FIRST) { struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev; memset(&link->eh_context, 0, sizeof(link->eh_context)); link->eh_context.i = link->eh_info; memset(&link->eh_info, 0, sizeof(link->eh_info)); ata_for_each_dev(dev, link, ENABLED) { int devno = dev->devno; ehc->saved_xfer_mode[devno] = dev->xfer_mode; if (ata_ncq_enabled(dev)) ehc->saved_ncq_enabled |= 1 << devno; } } ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; ap->pflags &= ~ATA_PFLAG_EH_PENDING; ap->excl_link = NULL; /* don't maintain exclusion over EH */ spin_unlock_irqrestore(ap->lock, flags); /* invoke EH, skip if unloading or suspended */ if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) ap->ops->error_handler(ap); else { /* if unloading, commence suicide */ if ((ap->pflags & ATA_PFLAG_UNLOADING) && !(ap->pflags & ATA_PFLAG_UNLOADED)) ata_eh_unload(ap); ata_eh_finish(ap); } /* process port suspend request */ ata_eh_handle_port_suspend(ap); /* Exception might have happend after ->error_handler * recovered the port but before this point. Repeat * EH in such case. */ spin_lock_irqsave(ap->lock, flags); if (ap->pflags & ATA_PFLAG_EH_PENDING) { if (--ap->eh_tries) { spin_unlock_irqrestore(ap->lock, flags); goto repeat; } ata_port_printk(ap, KERN_ERR, "EH pending after %d " "tries, giving up\n", ATA_EH_MAX_TRIES); ap->pflags &= ~ATA_PFLAG_EH_PENDING; } /* this run is complete, make sure EH info is clear */ ata_for_each_link(link, ap, HOST_FIRST) memset(&link->eh_info, 0, sizeof(link->eh_info)); /* Clear host_eh_scheduled while holding ap->lock such * that if exception occurs after this point but * before EH completion, SCSI midlayer will * re-initiate EH. */ host->host_eh_scheduled = 0; spin_unlock_irqrestore(ap->lock, flags); ata_eh_release(ap); } else { WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL); ap->ops->eng_timeout(ap); } /* finish or retry handled scmd's and clean up */ WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q)); scsi_eh_flush_done_q(&ap->eh_done_q); /* clean up */ spin_lock_irqsave(ap->lock, flags); if (ap->pflags & ATA_PFLAG_LOADING) ap->pflags &= ~ATA_PFLAG_LOADING; else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) schedule_delayed_work(&ap->hotplug_task, 0); if (ap->pflags & ATA_PFLAG_RECOVERED) ata_port_printk(ap, KERN_INFO, "EH complete\n"); ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); /* tell wait_eh that we're done */ ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; wake_up_all(&ap->eh_wait_q); spin_unlock_irqrestore(ap->lock, flags); DPRINTK("EXIT\n"); } /** * ata_port_wait_eh - Wait for the currently pending EH to complete * @ap: Port to wait EH for * * Wait until the currently pending EH is complete. * * LOCKING: * Kernel thread context (may sleep). */ void ata_port_wait_eh(struct ata_port *ap) { unsigned long flags; DEFINE_WAIT(wait); retry: spin_lock_irqsave(ap->lock, flags); while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); spin_unlock_irqrestore(ap->lock, flags); schedule(); spin_lock_irqsave(ap->lock, flags); } finish_wait(&ap->eh_wait_q, &wait); spin_unlock_irqrestore(ap->lock, flags); /* make sure SCSI EH is complete */ if (scsi_host_in_recovery(ap->scsi_host)) { ata_msleep(ap, 10); goto retry; } } static int ata_eh_nr_in_flight(struct ata_port *ap) { unsigned int tag; int nr = 0; /* count only non-internal commands */ for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) if (ata_qc_from_tag(ap, tag)) nr++; return nr; } void ata_eh_fastdrain_timerfn(unsigned long arg) { struct ata_port *ap = (void *)arg; unsigned long flags; int cnt; spin_lock_irqsave(ap->lock, flags); cnt = ata_eh_nr_in_flight(ap); /* are we done? */ if (!cnt) goto out_unlock; if (cnt == ap->fastdrain_cnt) { unsigned int tag; /* No progress during the last interval, tag all * in-flight qcs as timed out and freeze the port. */ for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) { struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); if (qc) qc->err_mask |= AC_ERR_TIMEOUT; } ata_port_freeze(ap); } else { /* some qcs have finished, give it another chance */ ap->fastdrain_cnt = cnt; ap->fastdrain_timer.expires = ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); add_timer(&ap->fastdrain_timer); } out_unlock: spin_unlock_irqrestore(ap->lock, flags); } /** * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain * @ap: target ATA port * @fastdrain: activate fast drain * * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain * is non-zero and EH wasn't pending before. Fast drain ensures * that EH kicks in in timely manner. * * LOCKING: * spin_lock_irqsave(host lock) */ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) { int cnt; /* already scheduled? */ if (ap->pflags & ATA_PFLAG_EH_PENDING) return; ap->pflags |= ATA_PFLAG_EH_PENDING; if (!fastdrain) return; /* do we have in-flight qcs? */ cnt = ata_eh_nr_in_flight(ap); if (!cnt) return; /* activate fast drain */ ap->fastdrain_cnt = cnt; ap->fastdrain_timer.expires = ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); add_timer(&ap->fastdrain_timer); } /** * ata_qc_schedule_eh - schedule qc for error handling * @qc: command to schedule error handling for * * Schedule error handling for @qc. EH will kick in as soon as * other commands are drained. * * LOCKING: * spin_lock_irqsave(host lock) */ void ata_qc_schedule_eh(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct request_queue *q = qc->scsicmd->device->request_queue; unsigned long flags; WARN_ON(!ap->ops->error_handler); qc->flags |= ATA_QCFLAG_FAILED; ata_eh_set_pending(ap, 1); /* The following will fail if timeout has already expired. * ata_scsi_error() takes care of such scmds on EH entry. * Note that ATA_QCFLAG_FAILED is unconditionally set after * this function completes. */ spin_lock_irqsave(q->queue_lock, flags); blk_abort_request(qc->scsicmd->request); spin_unlock_irqrestore(q->queue_lock, flags); } /** * ata_port_schedule_eh - schedule error handling without a qc * @ap: ATA port to schedule EH for * * Schedule error handling for @ap. EH will kick in as soon as * all commands are drained. * * LOCKING: * spin_lock_irqsave(host lock) */ void ata_port_schedule_eh(struct ata_port *ap) { WARN_ON(!ap->ops->error_handler); if (ap->pflags & ATA_PFLAG_INITIALIZING) return; ata_eh_set_pending(ap, 1); scsi_schedule_eh(ap->scsi_host); DPRINTK("port EH scheduled\n"); } static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) { int tag, nr_aborted = 0; WARN_ON(!ap->ops->error_handler); /* we're gonna abort all commands, no need for fast drain */ ata_eh_set_pending(ap, 0); for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); if (qc && (!link || qc->dev->link == link)) { qc->flags |= ATA_QCFLAG_FAILED; ata_qc_complete(qc); nr_aborted++; } } if (!nr_aborted) ata_port_schedule_eh(ap); return nr_aborted; } /** * ata_link_abort - abort all qc's on the link * @link: ATA link to abort qc's for * * Abort all active qc's active on @link and schedule EH. * * LOCKING: * spin_lock_irqsave(host lock) * * RETURNS: * Number of aborted qc's. */ int ata_link_abort(struct ata_link *link) { return ata_do_link_abort(link->ap, link); } /** * ata_port_abort - abort all qc's on the port * @ap: ATA port to abort qc's for * * Abort all active qc's of @ap and schedule EH. * * LOCKING: * spin_lock_irqsave(host_set lock) * * RETURNS: * Number of aborted qc's. */ int ata_port_abort(struct ata_port *ap) { return ata_do_link_abort(ap, NULL); } /** * __ata_port_freeze - freeze port * @ap: ATA port to freeze * * This function is called when HSM violation or some other * condition disrupts normal operation of the port. Frozen port * is not allowed to perform any operation until the port is * thawed, which usually follows a successful reset. * * ap->ops->freeze() callback can be used for freezing the port * hardware-wise (e.g. mask interrupt and stop DMA engine). If a * port cannot be frozen hardware-wise, the interrupt handler * must ack and clear interrupts unconditionally while the port * is frozen. * * LOCKING: * spin_lock_irqsave(host lock) */ static void __ata_port_freeze(struct ata_port *ap) { WARN_ON(!ap->ops->error_handler); if (ap->ops->freeze) ap->ops->freeze(ap); ap->pflags |= ATA_PFLAG_FROZEN; DPRINTK("ata%u port frozen\n", ap->print_id); } /** * ata_port_freeze - abort & freeze port * @ap: ATA port to freeze * * Abort and freeze @ap. The freeze operation must be called * first, because some hardware requires special operations * before the taskfile registers are accessible. * * LOCKING: * spin_lock_irqsave(host lock) * * RETURNS: * Number of aborted commands. */ int ata_port_freeze(struct ata_port *ap) { int nr_aborted; WARN_ON(!ap->ops->error_handler); __ata_port_freeze(ap); nr_aborted = ata_port_abort(ap); return nr_aborted; } /** * sata_async_notification - SATA async notification handler * @ap: ATA port where async notification is received * * Handler to be called when async notification via SDB FIS is * received. This function schedules EH if necessary. * * LOCKING: * spin_lock_irqsave(host lock) * * RETURNS: * 1 if EH is scheduled, 0 otherwise. */ int sata_async_notification(struct ata_port *ap) { u32 sntf; int rc; if (!(ap->flags & ATA_FLAG_AN)) return 0; rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); if (rc == 0) sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); if (!sata_pmp_attached(ap) || rc) { /* PMP is not attached or SNTF is not available */ if (!sata_pmp_attached(ap)) { /* PMP is not attached. Check whether ATAPI * AN is configured. If so, notify media * change. */ struct ata_device *dev = ap->link.device; if ((dev->class == ATA_DEV_ATAPI) && (dev->flags & ATA_DFLAG_AN)) ata_scsi_media_change_notify(dev); return 0; } else { /* PMP is attached but SNTF is not available. * ATAPI async media change notification is * not used. The PMP must be reporting PHY * status change, schedule EH. */ ata_port_schedule_eh(ap); return 1; } } else { /* PMP is attached and SNTF is available */ struct ata_link *link; /* check and notify ATAPI AN */ ata_for_each_link(link, ap, EDGE) { if (!(sntf & (1 << link->pmp))) continue; if ((link->device->class == ATA_DEV_ATAPI) && (link->device->flags & ATA_DFLAG_AN)) ata_scsi_media_change_notify(link->device); } /* If PMP is reporting that PHY status of some * downstream ports has changed, schedule EH. */ if (sntf & (1 << SATA_PMP_CTRL_PORT)) { ata_port_schedule_eh(ap); return 1; } return 0; } } /** * ata_eh_freeze_port - EH helper to freeze port * @ap: ATA port to freeze * * Freeze @ap. * * LOCKING: * None. */ void ata_eh_freeze_port(struct ata_port *ap) { unsigned long flags; if (!ap->ops->error_handler) return; spin_lock_irqsave(ap->lock, flags); __ata_port_freeze(ap); spin_unlock_irqrestore(ap->lock, flags); } /** * ata_port_thaw_port - EH helper to thaw port * @ap: ATA port to thaw * * Thaw frozen port @ap. * * LOCKING: * None. */ void ata_eh_thaw_port(struct ata_port *ap) { unsigned long flags; if (!ap->ops->error_handler) return; spin_lock_irqsave(ap->lock, flags); ap->pflags &= ~ATA_PFLAG_FROZEN; if (ap->ops->thaw) ap->ops->thaw(ap); spin_unlock_irqrestore(ap->lock, flags); DPRINTK("ata%u port thawed\n", ap->print_id); } static void ata_eh_scsidone(struct scsi_cmnd *scmd) { /* nada */ } static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct scsi_cmnd *scmd = qc->scsicmd; unsigned long flags; spin_lock_irqsave(ap->lock, flags); qc->scsidone = ata_eh_scsidone; __ata_qc_complete(qc); WARN_ON(ata_tag_valid(qc->tag)); spin_unlock_irqrestore(ap->lock, flags); scsi_eh_finish_cmd(scmd, &ap->eh_done_q); } /** * ata_eh_qc_complete - Complete an active ATA command from EH * @qc: Command to complete * * Indicate to the mid and upper layers that an ATA command has * completed. To be used from EH. */ void ata_eh_qc_complete(struct ata_queued_cmd *qc) { struct scsi_cmnd *scmd = qc->scsicmd; scmd->retries = scmd->allowed; __ata_eh_qc_complete(qc); } /** * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH * @qc: Command to retry * * Indicate to the mid and upper layers that an ATA command * should be retried. To be used from EH. * * SCSI midlayer limits the number of retries to scmd->allowed. * scmd->retries is decremented for commands which get retried * due to unrelated failures (qc->err_mask is zero). */ void ata_eh_qc_retry(struct ata_queued_cmd *qc) { struct scsi_cmnd *scmd = qc->scsicmd; if (!qc->err_mask && scmd->retries) scmd->retries--; __ata_eh_qc_complete(qc); } /** * ata_dev_disable - disable ATA device * @dev: ATA device to disable * * Disable @dev. * * Locking: * EH context. */ void ata_dev_disable(struct ata_device *dev) { if (!ata_dev_enabled(dev)) return; if (ata_msg_drv(dev->link->ap)) ata_dev_printk(dev, KERN_WARNING, "disabled\n"); ata_acpi_on_disable(dev); ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET); dev->class++; /* From now till the next successful probe, ering is used to * track probe failures. Clear accumulated device error info. */ ata_ering_clear(&dev->ering); } /** * ata_eh_detach_dev - detach ATA device * @dev: ATA device to detach * * Detach @dev. * * LOCKING: * None. */ void ata_eh_detach_dev(struct ata_device *dev) { struct ata_link *link = dev->link; struct ata_port *ap = link->ap; struct ata_eh_context *ehc = &link->eh_context; unsigned long flags; ata_dev_disable(dev); spin_lock_irqsave(ap->lock, flags); dev->flags &= ~ATA_DFLAG_DETACH; if (ata_scsi_offline_dev(dev)) { dev->flags |= ATA_DFLAG_DETACHED; ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; } /* clear per-dev EH info */ ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); ehc->saved_xfer_mode[dev->devno] = 0; ehc->saved_ncq_enabled &= ~(1 << dev->devno); spin_unlock_irqrestore(ap->lock, flags); } /** * ata_eh_about_to_do - about to perform eh_action * @link: target ATA link * @dev: target ATA dev for per-dev action (can be NULL) * @action: action about to be performed * * Called just before performing EH actions to clear related bits * in @link->eh_info such that eh actions are not unnecessarily * repeated. * * LOCKING: * None. */ void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, unsigned int action) { struct ata_port *ap = link->ap; struct ata_eh_info *ehi = &link->eh_info; struct ata_eh_context *ehc = &link->eh_context; unsigned long flags; spin_lock_irqsave(ap->lock, flags); ata_eh_clear_action(link, dev, ehi, action); /* About to take EH action, set RECOVERED. Ignore actions on * slave links as master will do them again. */ if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) ap->pflags |= ATA_PFLAG_RECOVERED; spin_unlock_irqrestore(ap->lock, flags); } /** * ata_eh_done - EH action complete * @ap: target ATA port * @dev: target ATA dev for per-dev action (can be NULL) * @action: action just completed * * Called right after performing EH actions to clear related bits * in @link->eh_context. * * LOCKING: * None. */ void ata_eh_done(struct ata_link *link, struct ata_device *dev, unsigned int action) { struct ata_eh_context *ehc = &link->eh_context; ata_eh_clear_action(link, dev, &ehc->i, action); } /** * ata_err_string - convert err_mask to descriptive string * @err_mask: error mask to convert to string * * Convert @err_mask to descriptive string. Errors are * prioritized according to severity and only the most severe * error is reported. * * LOCKING: * None. * * RETURNS: * Descriptive string for @err_mask */ static const char *ata_err_string(unsigned int err_mask) { if (err_mask & AC_ERR_HOST_BUS) return "host bus error"; if (err_mask & AC_ERR_ATA_BUS) return "ATA bus error"; if (err_mask & AC_ERR_TIMEOUT) return "timeout"; if (err_mask & AC_ERR_HSM) return "HSM violation"; if (err_mask & AC_ERR_SYSTEM) return "internal error"; if (err_mask & AC_ERR_MEDIA) return "media error"; if (err_mask & AC_ERR_INVALID) return "invalid argument"; if (err_mask & AC_ERR_DEV) return "device error"; return "unknown error"; } /** * ata_read_log_page - read a specific log page * @dev: target device * @page: page to read * @buf: buffer to store read page * @sectors: number of sectors to read * * Read log page using READ_LOG_EXT command. * * LOCKING: * Kernel thread context (may sleep). * * RETURNS: * 0 on success, AC_ERR_* mask otherwise. */ static unsigned int ata_read_log_page(struct ata_device *dev, u8 page, void *buf, unsigned int sectors) { struct ata_taskfile tf; unsigned int err_mask; DPRINTK("read log page - page %d\n", page); ata_tf_init(dev, &tf); tf.command = ATA_CMD_READ_LOG_EXT; tf.lbal = page; tf.nsect = sectors; tf.hob_nsect = sectors >> 8; tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; tf.protocol = ATA_PROT_PIO; err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, buf, sectors * ATA_SECT_SIZE, 0); DPRINTK("EXIT, err_mask=%x\n", err_mask); return err_mask; } /** * ata_eh_read_log_10h - Read log page 10h for NCQ error details * @dev: Device to read log page 10h from * @tag: Resulting tag of the failed command * @tf: Resulting taskfile registers of the failed command * * Read log page 10h to obtain NCQ error details and clear error * condition. * * LOCKING: * Kernel thread context (may sleep). * * RETURNS: * 0 on success, -errno otherwise. */ static int ata_eh_read_log_10h(struct ata_device *dev, int *tag, struct ata_taskfile *tf) { u8 *buf = dev->link->ap->sector_buf; unsigned int err_mask; u8 csum; int i; err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1); if (err_mask) return -EIO; csum = 0; for (i = 0; i < ATA_SECT_SIZE; i++) csum += buf[i]; if (csum) ata_dev_printk(dev, KERN_WARNING, "invalid checksum 0x%x on log page 10h\n", csum); if (buf[0] & 0x80) return -ENOENT; *tag = buf[0] & 0x1f; tf->command = buf[2]; tf->feature = buf[3]; tf->lbal = buf[4]; tf->lbam = buf[5]; tf->lbah = buf[6]; tf->device = buf[7]; tf->hob_lbal = buf[8]; tf->hob_lbam = buf[9]; tf->hob_lbah = buf[10]; tf->nsect = buf[12]; tf->hob_nsect = buf[13]; return 0; } /** * atapi_eh_tur - perform ATAPI TEST_UNIT_READY * @dev: target ATAPI device * @r_sense_key: out parameter for sense_key * * Perform ATAPI TEST_UNIT_READY. * * LOCKING: * EH context (may sleep). * * RETURNS: * 0 on success, AC_ERR_* mask on failure. */ static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) { u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 }; struct ata_taskfile tf; unsigned int err_mask; ata_tf_init(dev, &tf); tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf.command = ATA_CMD_PACKET; tf.protocol = ATAPI_PROT_NODATA; err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); if (err_mask == AC_ERR_DEV) *r_sense_key = tf.feature >> 4; return err_mask; } /** * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE * @dev: device to perform REQUEST_SENSE to * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) * @dfl_sense_key: default sense key to use * * Perform ATAPI REQUEST_SENSE after the device reported CHECK * SENSE. This function is EH helper. * * LOCKING: * Kernel thread context (may sleep). * * RETURNS: * 0 on success, AC_ERR_* mask on failure */ static unsigned int atapi_eh_request_sense(struct ata_device *dev, u8 *sense_buf, u8 dfl_sense_key) { u8 cdb[ATAPI_CDB_LEN] = { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 }; struct ata_port *ap = dev->link->ap; struct ata_taskfile tf; DPRINTK("ATAPI request sense\n"); /* FIXME: is this needed? */ memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); /* initialize sense_buf with the error register, * for the case where they are -not- overwritten */ sense_buf[0] = 0x70; sense_buf[2] = dfl_sense_key; /* some devices time out if garbage left in tf */ ata_tf_init(dev, &tf); tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf.command = ATA_CMD_PACKET; /* is it pointless to prefer PIO for "safety reasons"? */ if (ap->flags & ATA_FLAG_PIO_DMA) { tf.protocol = ATAPI_PROT_DMA; tf.feature |= ATAPI_PKT_DMA; } else { tf.protocol = ATAPI_PROT_PIO; tf.lbam = SCSI_SENSE_BUFFERSIZE; tf.lbah = 0; } return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, sense_buf, SCSI_SENSE_BUFFERSIZE, 0); } /** * ata_eh_analyze_serror - analyze SError for a failed port * @link: ATA link to analyze SError for * * Analyze SError if available and further determine cause of * failure. * * LOCKING: * None. */ static void ata_eh_analyze_serror(struct ata_link *link) { struct ata_eh_context *ehc = &link->eh_context; u32 serror = ehc->i.serror; unsigned int err_mask = 0, action = 0; u32 hotplug_mask; if (serror & (SERR_PERSISTENT | SERR_DATA)) { err_mask |= AC_ERR_ATA_BUS; action |= ATA_EH_RESET; } if (serror & SERR_PROTOCOL) { err_mask |= AC_ERR_HSM; action |= ATA_EH_RESET; } if (serror & SERR_INTERNAL) { err_mask |= AC_ERR_SYSTEM; action |= ATA_EH_RESET; } /* Determine whether a hotplug event has occurred. Both * SError.N/X are considered hotplug events for enabled or * host links. For disabled PMP links, only N bit is * considered as X bit is left at 1 for link plugging. */ if (link->lpm_policy > ATA_LPM_MAX_POWER) hotplug_mask = 0; /* hotplug doesn't work w/ LPM */ else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; else hotplug_mask = SERR_PHYRDY_CHG; if (serror & hotplug_mask) ata_ehi_hotplugged(&ehc->i); ehc->i.err_mask |= err_mask; ehc->i.action |= action; } /** * ata_eh_analyze_ncq_error - analyze NCQ error * @link: ATA link to analyze NCQ error for * * Read log page 10h, determine the offending qc and acquire * error status TF. For NCQ device errors, all LLDDs have to do * is setting AC_ERR_DEV in ehi->err_mask. This function takes * care of the rest. * * LOCKING: * Kernel thread context (may sleep). */ void ata_eh_analyze_ncq_error(struct ata_link *link) { struct ata_port *ap = link->ap; struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev = link->device; struct ata_queued_cmd *qc; struct ata_taskfile tf; int tag, rc; /* if frozen, we can't do much */ if (ap->pflags & ATA_PFLAG_FROZEN) return; /* is it NCQ device error? */ if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) return; /* has LLDD analyzed already? */ for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { qc = __ata_qc_from_tag(ap, tag); if (!(qc->flags & ATA_QCFLAG_FAILED)) continue; if (qc->err_mask) return; } /* okay, this error is ours */ memset(&tf, 0, sizeof(tf)); rc = ata_eh_read_log_10h(dev, &tag, &tf); if (rc) { ata_link_printk(link, KERN_ERR, "failed to read log page 10h " "(errno=%d)\n", rc); return; } if (!(link->sactive & (1 << tag))) { ata_link_printk(link, KERN_ERR, "log page 10h reported " "inactive tag %d\n", tag); return; } /* we've got the perpetrator, condemn it */ qc = __ata_qc_from_tag(ap, tag); memcpy(&qc->result_tf, &tf, sizeof(tf)); qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; ehc->i.err_mask &= ~AC_ERR_DEV; } /** * ata_eh_analyze_tf - analyze taskfile of a failed qc * @qc: qc to analyze * @tf: Taskfile registers to analyze * * Analyze taskfile of @qc and further determine cause of * failure. This function also requests ATAPI sense data if * avaliable. * * LOCKING: * Kernel thread context (may sleep). * * RETURNS: * Determined recovery action */ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, const struct ata_taskfile *tf) { unsigned int tmp, action = 0; u8 stat = tf->command, err = tf->feature; if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { qc->err_mask |= AC_ERR_HSM; return ATA_EH_RESET; } if (stat & (ATA_ERR | ATA_DF)) qc->err_mask |= AC_ERR_DEV; else return 0; switch (qc->dev->class) { case ATA_DEV_ATA: if (err & ATA_ICRC) qc->err_mask |= AC_ERR_ATA_BUS; if (err & ATA_UNC) qc->err_mask |= AC_ERR_MEDIA; if (err & ATA_IDNF) qc->err_mask |= AC_ERR_INVALID; break; case ATA_DEV_ATAPI: if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { tmp = atapi_eh_request_sense(qc->dev, qc->scsicmd->sense_buffer, qc->result_tf.feature >> 4); if (!tmp) { /* ATA_QCFLAG_SENSE_VALID is used to * tell atapi_qc_complete() that sense * data is already valid. * * TODO: interpret sense data and set * appropriate err_mask. */ qc->flags |= ATA_QCFLAG_SENSE_VALID; } else qc->err_mask |= tmp; } } if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) action |= ATA_EH_RESET; return action; } static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask, int *xfer_ok) { int base = 0; if (!(eflags & ATA_EFLAG_DUBIOUS_XFER)) *xfer_ok = 1; if (!*xfer_ok) base = ATA_ECAT_DUBIOUS_NONE; if (err_mask & AC_ERR_ATA_BUS) return base + ATA_ECAT_ATA_BUS; if (err_mask & AC_ERR_TIMEOUT) return base + ATA_ECAT_TOUT_HSM; if (eflags & ATA_EFLAG_IS_IO) { if (err_mask & AC_ERR_HSM) return base + ATA_ECAT_TOUT_HSM; if ((err_mask & (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) return base + ATA_ECAT_UNK_DEV; } return 0; } struct speed_down_verdict_arg { u64 since; int xfer_ok; int nr_errors[ATA_ECAT_NR]; }; static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) { struct speed_down_verdict_arg *arg = void_arg; int cat; if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since)) return -1; cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, &arg->xfer_ok); arg->nr_errors[cat]++; return 0; } /** * ata_eh_speed_down_verdict - Determine speed down verdict * @dev: Device of interest * * This function examines error ring of @dev and determines * whether NCQ needs to be turned off, transfer speed should be * stepped down, or falling back to PIO is necessary. * * ECAT_ATA_BUS : ATA_BUS error for any command * * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for * IO commands * * ECAT_UNK_DEV : Unknown DEV error for IO commands * * ECAT_DUBIOUS_* : Identical to above three but occurred while * data transfer hasn't been verified. * * Verdicts are * * NCQ_OFF : Turn off NCQ. * * SPEED_DOWN : Speed down transfer speed but don't fall back * to PIO. * * FALLBACK_TO_PIO : Fall back to PIO. * * Even if multiple verdicts are returned, only one action is * taken per error. An action triggered by non-DUBIOUS errors * clears ering, while one triggered by DUBIOUS_* errors doesn't. * This is to expedite speed down decisions right after device is * initially configured. * * The followings are speed down rules. #1 and #2 deal with * DUBIOUS errors. * * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO. * * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors * occurred during last 5 mins, NCQ_OFF. * * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors * ocurred during last 5 mins, FALLBACK_TO_PIO * * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred * during last 10 mins, NCQ_OFF. * * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN. * * LOCKING: * Inherited from caller. * * RETURNS: * OR of ATA_EH_SPDN_* flags. */ static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) { const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ; u64 j64 = get_jiffies_64(); struct speed_down_verdict_arg arg; unsigned int verdict = 0; /* scan past 5 mins of error history */ memset(&arg, 0, sizeof(arg)); arg.since = j64 - min(j64, j5mins); ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] + arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1) verdict |= ATA_EH_SPDN_SPEED_DOWN | ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS; if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] + arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1) verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS; if (arg.nr_errors[ATA_ECAT_ATA_BUS] + arg.nr_errors[ATA_ECAT_TOUT_HSM] + arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO; /* scan past 10 mins of error history */ memset(&arg, 0, sizeof(arg)); arg.since = j64 - min(j64, j10mins); ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); if (arg.nr_errors[ATA_ECAT_TOUT_HSM] + arg.nr_errors[ATA_ECAT_UNK_DEV] > 3) verdict |= ATA_EH_SPDN_NCQ_OFF; if (arg.nr_errors[ATA_ECAT_ATA_BUS] + arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 || arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) verdict |= ATA_EH_SPDN_SPEED_DOWN; return verdict; } /** * ata_eh_speed_down - record error and speed down if necessary * @dev: Failed device * @eflags: mask of ATA_EFLAG_* flags * @err_mask: err_mask of the error * * Record error and examine error history to determine whether * adjusting transmission speed is necessary. It also sets * transmission limits appropriately if such adjustment is * necessary. * * LOCKING: * Kernel thread context (may sleep). * * RETURNS: * Determined recovery action. */ static unsigned int ata_eh_speed_down(struct ata_device *dev, unsigned int eflags, unsigned int err_mask) { struct ata_link *link = ata_dev_phys_link(dev); int xfer_ok = 0; unsigned int verdict; unsigned int action = 0; /* don't bother if Cat-0 error */ if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0) return 0; /* record error and determine whether speed down is necessary */ ata_ering_record(&dev->ering, eflags, err_mask); verdict = ata_eh_speed_down_verdict(dev); /* turn off NCQ? */ if ((verdict & ATA_EH_SPDN_NCQ_OFF) && (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ | ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) { dev->flags |= ATA_DFLAG_NCQ_OFF; ata_dev_printk(dev, KERN_WARNING, "NCQ disabled due to excessive errors\n"); goto done; } /* speed down? */ if (verdict & ATA_EH_SPDN_SPEED_DOWN) { /* speed down SATA link speed if possible */ if (sata_down_spd_limit(link, 0) == 0) { action |= ATA_EH_RESET; goto done; } /* lower transfer mode */ if (dev->spdn_cnt < 2) { static const int dma_dnxfer_sel[] = { ATA_DNXFER_DMA, ATA_DNXFER_40C }; static const int pio_dnxfer_sel[] = { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 }; int sel; if (dev->xfer_shift != ATA_SHIFT_PIO) sel = dma_dnxfer_sel[dev->spdn_cnt]; else sel = pio_dnxfer_sel[dev->spdn_cnt]; dev->spdn_cnt++; if (ata_down_xfermask_limit(dev, sel) == 0) { action |= ATA_EH_RESET; goto done; } } } /* Fall back to PIO? Slowing down to PIO is meaningless for * SATA ATA devices. Consider it only for PATA and SATAPI. */ if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && (dev->xfer_shift != ATA_SHIFT_PIO)) { if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { dev->spdn_cnt = 0; action |= ATA_EH_RESET; goto done; } } return 0; done: /* device has been slowed down, blow error history */ if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS)) ata_ering_clear(&dev->ering); return action; } /** * ata_eh_link_autopsy - analyze error and determine recovery action * @link: host link to perform autopsy on * * Analyze why @link failed and determine which recovery actions * are needed. This function also sets more detailed AC_ERR_* * values and fills sense data for ATAPI CHECK SENSE. * * LOCKING: * Kernel thread context (may sleep). */ static void ata_eh_link_autopsy(struct ata_link *link) { struct ata_port *ap = link->ap; struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev; unsigned int all_err_mask = 0, eflags = 0; int tag; u32 serror; int rc; DPRINTK("ENTER\n"); if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) return; /* obtain and analyze SError */ rc = sata_scr_read(link, SCR_ERROR, &serror); if (rc == 0) { ehc->i.serror |= serror; ata_eh_analyze_serror(link); } else if (rc != -EOPNOTSUPP) { /* SError read failed, force reset and probing */ ehc->i.probe_mask |= ATA_ALL_DEVICES; ehc->i.action |= ATA_EH_RESET; ehc->i.err_mask |= AC_ERR_OTHER; } /* analyze NCQ failure */ ata_eh_analyze_ncq_error(link); /* any real error trumps AC_ERR_OTHER */ if (ehc->i.err_mask & ~AC_ERR_OTHER) ehc->i.err_mask &= ~AC_ERR_OTHER; all_err_mask |= ehc->i.err_mask; for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); if (!(qc->flags & ATA_QCFLAG_FAILED) || ata_dev_phys_link(qc->dev) != link) continue; /* inherit upper level err_mask */ qc->err_mask |= ehc->i.err_mask; /* analyze TF */ ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf); /* DEV errors are probably spurious in case of ATA_BUS error */ if (qc->err_mask & AC_ERR_ATA_BUS) qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | AC_ERR_INVALID); /* any real error trumps unknown error */ if (qc->err_mask & ~AC_ERR_OTHER) qc->err_mask &= ~AC_ERR_OTHER; /* SENSE_VALID trumps dev/unknown error and revalidation */ if (qc->flags & ATA_QCFLAG_SENSE_VALID) qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); /* determine whether the command is worth retrying */ if (qc->flags & ATA_QCFLAG_IO || (!(qc->err_mask & AC_ERR_INVALID) && qc->err_mask != AC_ERR_DEV)) qc->flags |= ATA_QCFLAG_RETRY; /* accumulate error info */ ehc->i.dev = qc->dev; all_err_mask |= qc->err_mask; if (qc->flags & ATA_QCFLAG_IO) eflags |= ATA_EFLAG_IS_IO; } /* enforce default EH actions */ if (ap->pflags & ATA_PFLAG_FROZEN || all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) ehc->i.action |= ATA_EH_RESET; else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) || (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV))) ehc->i.action |= ATA_EH_REVALIDATE; /* If we have offending qcs and the associated failed device, * perform per-dev EH action only on the offending device. */ if (ehc->i.dev) { ehc->i.dev_action[ehc->i.dev->devno] |= ehc->i.action & ATA_EH_PERDEV_MASK; ehc->i.action &= ~ATA_EH_PERDEV_MASK; } /* propagate timeout to host link */ if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link)) ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; /* record error and consider speeding down */ dev = ehc->i.dev; if (!dev && ((ata_link_max_devices(link) == 1 && ata_dev_enabled(link->device)))) dev = link->device; if (dev) { if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) eflags |= ATA_EFLAG_DUBIOUS_XFER; ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); } DPRINTK("EXIT\n"); } /** * ata_eh_autopsy - analyze error and determine recovery action * @ap: host port to perform autopsy on * * Analyze all links of @ap and determine why they failed and * which recovery actions are needed. * * LOCKING: * Kernel thread context (may sleep). */ void ata_eh_autopsy(struct ata_port *ap) { struct ata_link *link; ata_for_each_link(link, ap, EDGE) ata_eh_link_autopsy(link); /* Handle the frigging slave link. Autopsy is done similarly * but actions and flags are transferred over to the master * link and handled from there. */ if (ap->slave_link) { struct ata_eh_context *mehc = &ap->link.eh_context; struct ata_eh_context *sehc = &ap->slave_link->eh_context; /* transfer control flags from master to slave */ sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK; /* perform autopsy on the slave link */ ata_eh_link_autopsy(ap->slave_link); /* transfer actions from slave to master and clear slave */ ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); mehc->i.action |= sehc->i.action; mehc->i.dev_action[1] |= sehc->i.dev_action[1]; mehc->i.flags |= sehc->i.flags; ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); } /* Autopsy of fanout ports can affect host link autopsy. * Perform host link autopsy last. */ if (sata_pmp_attached(ap)) ata_eh_link_autopsy(&ap->link); } /** * ata_get_cmd_descript - get description for ATA command * @command: ATA command code to get description for * * Return a textual description of the given command, or NULL if the * command is not known. * * LOCKING: * None */ const char *ata_get_cmd_descript(u8 command) { #ifdef CONFIG_ATA_VERBOSE_ERROR static const struct { u8 command; const char *text; } cmd_descr[] = { { ATA_CMD_DEV_RESET, "DEVICE RESET" }, { ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, { ATA_CMD_STANDBY, "STANDBY" }, { ATA_CMD_IDLE, "IDLE" }, { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" }, { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" }, { ATA_CMD_NOP, "NOP" }, { ATA_CMD_FLUSH, "FLUSH CACHE" }, { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" }, { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" }, { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" }, { ATA_CMD_SERVICE, "SERVICE" }, { ATA_CMD_READ, "READ DMA" }, { ATA_CMD_READ_EXT, "READ DMA EXT" }, { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" }, { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" }, { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" }, { ATA_CMD_WRITE, "WRITE DMA" }, { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" }, { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" }, { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" }, { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" }, { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" }, { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" }, { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" }, { ATA_CMD_PIO_READ, "READ SECTOR(S)" }, { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" }, { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" }, { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" }, { ATA_CMD_READ_MULTI, "READ MULTIPLE" }, { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" }, { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" }, { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" }, { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" }, { ATA_CMD_SET_FEATURES, "SET FEATURES" }, { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" }, { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" }, { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" }, { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" }, { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" }, { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" }, { ATA_CMD_SLEEP, "SLEEP" }, { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" }, { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" }, { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" }, { ATA_CMD_SET_MAX, "SET MAX ADDRESS" }, { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" }, { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" }, { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" }, { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" }, { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" }, { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" }, { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" }, { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" }, { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" }, { ATA_CMD_PMP_READ, "READ BUFFER" }, { ATA_CMD_PMP_WRITE, "WRITE BUFFER" }, { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" }, { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" }, { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" }, { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" }, { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" }, { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" }, { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" }, { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" }, { ATA_CMD_SMART, "SMART" }, { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" }, { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" }, { ATA_CMD_DSM, "DATA SET MANAGEMENT" }, { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" }, { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" }, { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" }, { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, { ATA_CMD_READ_LONG, "READ LONG (with retries)" }, { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" }, { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" }, { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" }, { ATA_CMD_RESTORE, "RECALIBRATE" }, { 0, NULL } /* terminate list */ }; unsigned int i; for (i = 0; cmd_descr[i].text; i++) if (cmd_descr[i].command == command) return cmd_descr[i].text; #endif return NULL; } /** * ata_eh_link_report - report error handling to user * @link: ATA link EH is going on * * Report EH to user. * * LOCKING: * None. */ static void ata_eh_link_report(struct ata_link *link) { struct ata_port *ap = link->ap; struct ata_eh_context *ehc = &link->eh_context; const char *frozen, *desc; char tries_buf[6]; int tag, nr_failed = 0; if (ehc->i.flags & ATA_EHI_QUIET) return; desc = NULL; if (ehc->i.desc[0] != '\0') desc = ehc->i.desc; for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); if (!(qc->flags & ATA_QCFLAG_FAILED) || ata_dev_phys_link(qc->dev) != link || ((qc->flags & ATA_QCFLAG_QUIET) && qc->err_mask == AC_ERR_DEV)) continue; if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) continue; nr_failed++; } if (!nr_failed && !ehc->i.err_mask) return; frozen = ""; if (ap->pflags & ATA_PFLAG_FROZEN) frozen = " frozen"; memset(tries_buf, 0, sizeof(tries_buf)); if (ap->eh_tries < ATA_EH_MAX_TRIES) snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d", ap->eh_tries); if (ehc->i.dev) { ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x " "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", ehc->i.err_mask, link->sactive, ehc->i.serror, ehc->i.action, frozen, tries_buf); if (desc) ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc); } else { ata_link_printk(link, KERN_ERR, "exception Emask 0x%x " "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", ehc->i.err_mask, link->sactive, ehc->i.serror, ehc->i.action, frozen, tries_buf); if (desc) ata_link_printk(link, KERN_ERR, "%s\n", desc); } #ifdef CONFIG_ATA_VERBOSE_ERROR if (ehc->i.serror) ata_link_printk(link, KERN_ERR, "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", ehc->i.serror & SERR_DATA ? "UnrecovData " : "", ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", ehc->i.serror & SERR_CRC ? "BadCRC " : "", ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); #endif for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; const u8 *cdb = qc->cdb; char data_buf[20] = ""; char cdb_buf[70] = ""; if (!(qc->flags & ATA_QCFLAG_FAILED) || ata_dev_phys_link(qc->dev) != link || !qc->err_mask) continue; if (qc->dma_dir != DMA_NONE) { static const char *dma_str[] = { [DMA_BIDIRECTIONAL] = "bidi", [DMA_TO_DEVICE] = "out", [DMA_FROM_DEVICE] = "in", }; static const char *prot_str[] = { [ATA_PROT_PIO] = "pio", [ATA_PROT_DMA] = "dma", [ATA_PROT_NCQ] = "ncq", [ATAPI_PROT_PIO] = "pio", [ATAPI_PROT_DMA] = "dma", }; snprintf(data_buf, sizeof(data_buf), " %s %u %s", prot_str[qc->tf.protocol], qc->nbytes, dma_str[qc->dma_dir]); } if (ata_is_atapi(qc->tf.protocol)) { if (qc->scsicmd) scsi_print_command(qc->scsicmd); else snprintf(cdb_buf, sizeof(cdb_buf), "cdb %02x %02x %02x %02x %02x %02x %02x %02x " "%02x %02x %02x %02x %02x %02x %02x %02x\n ", cdb[0], cdb[1], cdb[2], cdb[3], cdb[4], cdb[5], cdb[6], cdb[7], cdb[8], cdb[9], cdb[10], cdb[11], cdb[12], cdb[13], cdb[14], cdb[15]); } else { const char *descr = ata_get_cmd_descript(cmd->command); if (descr) ata_dev_printk(qc->dev, KERN_ERR, "failed command: %s\n", descr); } ata_dev_printk(qc->dev, KERN_ERR, "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " "tag %d%s\n %s" "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " "Emask 0x%x (%s)%s\n", cmd->command, cmd->feature, cmd->nsect, cmd->lbal, cmd->lbam, cmd->lbah, cmd->hob_feature, cmd->hob_nsect, cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, cmd->device, qc->tag, data_buf, cdb_buf, res->command, res->feature, res->nsect, res->lbal, res->lbam, res->lbah, res->hob_feature, res->hob_nsect, res->hob_lbal, res->hob_lbam, res->hob_lbah, res->device, qc->err_mask, ata_err_string(qc->err_mask), qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); #ifdef CONFIG_ATA_VERBOSE_ERROR if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR)) { if (res->command & ATA_BUSY) ata_dev_printk(qc->dev, KERN_ERR, "status: { Busy }\n"); else ata_dev_printk(qc->dev, KERN_ERR, "status: { %s%s%s%s}\n", res->command & ATA_DRDY ? "DRDY " : "", res->command & ATA_DF ? "DF " : "", res->command & ATA_DRQ ? "DRQ " : "", res->command & ATA_ERR ? "ERR " : ""); } if (cmd->command != ATA_CMD_PACKET && (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF | ATA_ABORTED))) ata_dev_printk(qc->dev, KERN_ERR, "error: { %s%s%s%s}\n", res->feature & ATA_ICRC ? "ICRC " : "", res->feature & ATA_UNC ? "UNC " : "", res->feature & ATA_IDNF ? "IDNF " : "", res->feature & ATA_ABORTED ? "ABRT " : ""); #endif } } /** * ata_eh_report - report error handling to user * @ap: ATA port to report EH about * * Report EH to user. * * LOCKING: * None. */ void ata_eh_report(struct ata_port *ap) { struct ata_link *link; ata_for_each_link(link, ap, HOST_FIRST) ata_eh_link_report(link); } static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, unsigned int *classes, unsigned long deadline, bool clear_classes) { struct ata_device *dev; if (clear_classes) ata_for_each_dev(dev, link, ALL) classes[dev->devno] = ATA_DEV_UNKNOWN; return reset(link, classes, deadline); } static int ata_eh_followup_srst_needed(struct ata_link *link, int rc, const unsigned int *classes) { if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) return 0; if (rc == -EAGAIN) return 1; if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) return 1; return 0; } int ata_eh_reset(struct ata_link *link, int classify, ata_prereset_fn_t prereset, ata_reset_fn_t softreset, ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) { struct ata_port *ap = link->ap; struct ata_link *slave = ap->slave_link; struct ata_eh_context *ehc = &link->eh_context; struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; unsigned int *classes = ehc->classes; unsigned int lflags = link->flags; int verbose = !(ehc->i.flags & ATA_EHI_QUIET); int max_tries = 0, try = 0; struct ata_link *failed_link; struct ata_device *dev; unsigned long deadline, now; ata_reset_fn_t reset; unsigned long flags; u32 sstatus; int nr_unknown, rc; /* * Prepare to reset */ while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX) max_tries++; if (link->flags & ATA_LFLAG_NO_HRST) hardreset = NULL; if (link->flags & ATA_LFLAG_NO_SRST) softreset = NULL; /* make sure each reset attemp is at least COOL_DOWN apart */ if (ehc->i.flags & ATA_EHI_DID_RESET) { now = jiffies; WARN_ON(time_after(ehc->last_reset, now)); deadline = ata_deadline(ehc->last_reset, ATA_EH_RESET_COOL_DOWN); if (time_before(now, deadline)) schedule_timeout_uninterruptible(deadline - now); } spin_lock_irqsave(ap->lock, flags); ap->pflags |= ATA_PFLAG_RESETTING; spin_unlock_irqrestore(ap->lock, flags); ata_eh_about_to_do(link, NULL, ATA_EH_RESET); ata_for_each_dev(dev, link, ALL) { /* If we issue an SRST then an ATA drive (not ATAPI) * may change configuration and be in PIO0 timing. If * we do a hard reset (or are coming from power on) * this is true for ATA or ATAPI. Until we've set a * suitable controller mode we should not touch the * bus as we may be talking too fast. */ dev->pio_mode = XFER_PIO_0; /* If the controller has a pio mode setup function * then use it to set the chipset to rights. Don't * touch the DMA setup as that will be dealt with when * configuring devices. */ if (ap->ops->set_piomode) ap->ops->set_piomode(ap, dev); } /* prefer hardreset */ reset = NULL; ehc->i.action &= ~ATA_EH_RESET; if (hardreset) { reset = hardreset; ehc->i.action |= ATA_EH_HARDRESET; } else if (softreset) { reset = softreset; ehc->i.action |= ATA_EH_SOFTRESET; } if (prereset) { unsigned long deadline = ata_deadline(jiffies, ATA_EH_PRERESET_TIMEOUT); if (slave) { sehc->i.action &= ~ATA_EH_RESET; sehc->i.action |= ehc->i.action; } rc = prereset(link, deadline); /* If present, do prereset on slave link too. Reset * is skipped iff both master and slave links report * -ENOENT or clear ATA_EH_RESET. */ if (slave && (rc == 0 || rc == -ENOENT)) { int tmp; tmp = prereset(slave, deadline); if (tmp != -ENOENT) rc = tmp; ehc->i.action |= sehc->i.action; } if (rc) { if (rc == -ENOENT) { ata_link_printk(link, KERN_DEBUG, "port disabled. ignoring.\n"); ehc->i.action &= ~ATA_EH_RESET; ata_for_each_dev(dev, link, ALL) classes[dev->devno] = ATA_DEV_NONE; rc = 0; } else ata_link_printk(link, KERN_ERR, "prereset failed (errno=%d)\n", rc); goto out; } /* prereset() might have cleared ATA_EH_RESET. If so, * bang classes, thaw and return. */ if (reset && !(ehc->i.action & ATA_EH_RESET)) { ata_for_each_dev(dev, link, ALL) classes[dev->devno] = ATA_DEV_NONE; if ((ap->pflags & ATA_PFLAG_FROZEN) && ata_is_host_link(link)) ata_eh_thaw_port(ap); rc = 0; goto out; } } retry: /* * Perform reset */ if (ata_is_host_link(link)) ata_eh_freeze_port(ap); deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]); if (reset) { if (verbose) ata_link_printk(link, KERN_INFO, "%s resetting link\n", reset == softreset ? "soft" : "hard"); /* mark that this EH session started with reset */ ehc->last_reset = jiffies; if (reset == hardreset) ehc->i.flags |= ATA_EHI_DID_HARDRESET; else ehc->i.flags |= ATA_EHI_DID_SOFTRESET; rc = ata_do_reset(link, reset, classes, deadline, true); if (rc && rc != -EAGAIN) { failed_link = link; goto fail; } /* hardreset slave link if existent */ if (slave && reset == hardreset) { int tmp; if (verbose) ata_link_printk(slave, KERN_INFO, "hard resetting link\n"); ata_eh_about_to_do(slave, NULL, ATA_EH_RESET); tmp = ata_do_reset(slave, reset, classes, deadline, false); switch (tmp) { case -EAGAIN: rc = -EAGAIN; case 0: break; default: failed_link = slave; rc = tmp; goto fail; } } /* perform follow-up SRST if necessary */ if (reset == hardreset && ata_eh_followup_srst_needed(link, rc, classes)) { reset = softreset; if (!reset) { ata_link_printk(link, KERN_ERR, "follow-up softreset required " "but no softreset avaliable\n"); failed_link = link; rc = -EINVAL; goto fail; } ata_eh_about_to_do(link, NULL, ATA_EH_RESET); rc = ata_do_reset(link, reset, classes, deadline, true); if (rc) { failed_link = link; goto fail; } } } else { if (verbose) ata_link_printk(link, KERN_INFO, "no reset method " "available, skipping reset\n"); if (!(lflags & ATA_LFLAG_ASSUME_CLASS)) lflags |= ATA_LFLAG_ASSUME_ATA; } /* * Post-reset processing */ ata_for_each_dev(dev, link, ALL) { /* After the reset, the device state is PIO 0 and the * controller state is undefined. Reset also wakes up * drives from sleeping mode. */ dev->pio_mode = XFER_PIO_0; dev->flags &= ~ATA_DFLAG_SLEEPING; if (ata_phys_link_offline(ata_dev_phys_link(dev))) continue; /* apply class override */ if (lflags & ATA_LFLAG_ASSUME_ATA) classes[dev->devno] = ATA_DEV_ATA; else if (lflags & ATA_LFLAG_ASSUME_SEMB) classes[dev->devno] = ATA_DEV_SEMB_UNSUP; } /* record current link speed */ if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) link->sata_spd = (sstatus >> 4) & 0xf; if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0) slave->sata_spd = (sstatus >> 4) & 0xf; /* thaw the port */ if (ata_is_host_link(link)) ata_eh_thaw_port(ap); /* postreset() should clear hardware SError. Although SError * is cleared during link resume, clearing SError here is * necessary as some PHYs raise hotplug events after SRST. * This introduces race condition where hotplug occurs between * reset and here. This race is mediated by cross checking * link onlineness and classification result later. */ if (postreset) { postreset(link, classes); if (slave) postreset(slave, classes); } /* * Some controllers can't be frozen very well and may set * spuruious error conditions during reset. Clear accumulated * error information. As reset is the final recovery action, * nothing is lost by doing this. */ spin_lock_irqsave(link->ap->lock, flags); memset(&link->eh_info, 0, sizeof(link->eh_info)); if (slave) memset(&slave->eh_info, 0, sizeof(link->eh_info)); ap->pflags &= ~ATA_PFLAG_EH_PENDING; spin_unlock_irqrestore(link->ap->lock, flags); /* * Make sure onlineness and classification result correspond. * Hotplug could have happened during reset and some * controllers fail to wait while a drive is spinning up after * being hotplugged causing misdetection. By cross checking * link on/offlineness and classification result, those * conditions can be reliably detected and retried. */ nr_unknown = 0; ata_for_each_dev(dev, link, ALL) { if (ata_phys_link_online(ata_dev_phys_link(dev))) { if (classes[dev->devno] == ATA_DEV_UNKNOWN) { ata_dev_printk(dev, KERN_DEBUG, "link online " "but device misclassifed\n"); classes[dev->devno] = ATA_DEV_NONE; nr_unknown++; } } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) { if (ata_class_enabled(classes[dev->devno])) ata_dev_printk(dev, KERN_DEBUG, "link offline, " "clearing class %d to NONE\n", classes[dev->devno]); classes[dev->devno] = ATA_DEV_NONE; } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) { ata_dev_printk(dev, KERN_DEBUG, "link status unknown, " "clearing UNKNOWN to NONE\n"); classes[dev->devno] = ATA_DEV_NONE; } } if (classify && nr_unknown) { if (try < max_tries) { ata_link_printk(link, KERN_WARNING, "link online but " "%d devices misclassified, retrying\n", nr_unknown); failed_link = link; rc = -EAGAIN; goto fail; } ata_link_printk(link, KERN_WARNING, "link online but %d devices misclassified, " "device detection might fail\n", nr_unknown); } /* reset successful, schedule revalidation */ ata_eh_done(link, NULL, ATA_EH_RESET); if (slave) ata_eh_done(slave, NULL, ATA_EH_RESET); ehc->last_reset = jiffies; /* update to completion time */ ehc->i.action |= ATA_EH_REVALIDATE; link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */ rc = 0; out: /* clear hotplug flag */ ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; if (slave) sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; spin_lock_irqsave(ap->lock, flags); ap->pflags &= ~ATA_PFLAG_RESETTING; spin_unlock_irqrestore(ap->lock, flags); return rc; fail: /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ if (!ata_is_host_link(link) && sata_scr_read(link, SCR_STATUS, &sstatus)) rc = -ERESTART; if (rc == -ERESTART || try >= max_tries) goto out; now = jiffies; if (time_before(now, deadline)) { unsigned long delta = deadline - now; ata_link_printk(failed_link, KERN_WARNING, "reset failed (errno=%d), retrying in %u secs\n", rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); ata_eh_release(ap); while (delta) delta = schedule_timeout_uninterruptible(delta); ata_eh_acquire(ap); } if (try == max_tries - 1) { sata_down_spd_limit(link, 0); if (slave) sata_down_spd_limit(slave, 0); } else if (rc == -EPIPE) sata_down_spd_limit(failed_link, 0); if (hardreset) reset = hardreset; goto retry; } static inline void ata_eh_pull_park_action(struct ata_port *ap) { struct ata_link *link; struct ata_device *dev; unsigned long flags; /* * This function can be thought of as an extended version of * ata_eh_about_to_do() specially crafted to accommodate the * requirements of ATA_EH_PARK handling. Since the EH thread * does not leave the do {} while () loop in ata_eh_recover as * long as the timeout for a park request to *one* device on * the port has not expired, and since we still want to pick * up park requests to other devices on the same port or * timeout updates for the same device, we have to pull * ATA_EH_PARK actions from eh_info into eh_context.i * ourselves at the beginning of each pass over the loop. * * Additionally, all write accesses to &ap->park_req_pending * through INIT_COMPLETION() (see below) or complete_all() * (see ata_scsi_park_store()) are protected by the host lock. * As a result we have that park_req_pending.done is zero on * exit from this function, i.e. when ATA_EH_PARK actions for * *all* devices on port ap have been pulled into the * respective eh_context structs. If, and only if, * park_req_pending.done is non-zero by the time we reach * wait_for_completion_timeout(), another ATA_EH_PARK action * has been scheduled for at least one of the devices on port * ap and we have to cycle over the do {} while () loop in * ata_eh_recover() again. */ spin_lock_irqsave(ap->lock, flags); INIT_COMPLETION(ap->park_req_pending); ata_for_each_link(link, ap, EDGE) { ata_for_each_dev(dev, link, ALL) { struct ata_eh_info *ehi = &link->eh_info; link->eh_context.i.dev_action[dev->devno] |= ehi->dev_action[dev->devno] & ATA_EH_PARK; ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK); } } spin_unlock_irqrestore(ap->lock, flags); } static void ata_eh_park_issue_cmd(struct ata_device *dev, int park) { struct ata_eh_context *ehc = &dev->link->eh_context; struct ata_taskfile tf; unsigned int err_mask; ata_tf_init(dev, &tf); if (park) { ehc->unloaded_mask |= 1 << dev->devno; tf.command = ATA_CMD_IDLEIMMEDIATE; tf.feature = 0x44; tf.lbal = 0x4c; tf.lbam = 0x4e; tf.lbah = 0x55; } else { ehc->unloaded_mask &= ~(1 << dev->devno); tf.command = ATA_CMD_CHK_POWER; } tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; tf.protocol |= ATA_PROT_NODATA; err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); if (park && (err_mask || tf.lbal != 0xc4)) { ata_dev_printk(dev, KERN_ERR, "head unload failed!\n"); ehc->unloaded_mask &= ~(1 << dev->devno); } } static int ata_eh_revalidate_and_attach(struct ata_link *link, struct ata_device **r_failed_dev) { struct ata_port *ap = link->ap; struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev; unsigned int new_mask = 0; unsigned long flags; int rc = 0; DPRINTK("ENTER\n"); /* For PATA drive side cable detection to work, IDENTIFY must * be done backwards such that PDIAG- is released by the slave * device before the master device is identified. */ ata_for_each_dev(dev, link, ALL_REVERSE) { unsigned int action = ata_eh_dev_action(dev); unsigned int readid_flags = 0; if (ehc->i.flags & ATA_EHI_DID_RESET) readid_flags |= ATA_READID_POSTRESET; if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { WARN_ON(dev->class == ATA_DEV_PMP); if (ata_phys_link_offline(ata_dev_phys_link(dev))) { rc = -EIO; goto err; } ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE); rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], readid_flags); if (rc) goto err; ata_eh_done(link, dev, ATA_EH_REVALIDATE); /* Configuration may have changed, reconfigure * transfer mode. */ ehc->i.flags |= ATA_EHI_SETMODE; /* schedule the scsi_rescan_device() here */ schedule_work(&(ap->scsi_rescan_task)); } else if (dev->class == ATA_DEV_UNKNOWN && ehc->tries[dev->devno] && ata_class_enabled(ehc->classes[dev->devno])) { /* Temporarily set dev->class, it will be * permanently set once all configurations are * complete. This is necessary because new * device configuration is done in two * separate loops. */ dev->class = ehc->classes[dev->devno]; if (dev->class == ATA_DEV_PMP) rc = sata_pmp_attach(dev); else rc = ata_dev_read_id(dev, &dev->class, readid_flags, dev->id); /* read_id might have changed class, store and reset */ ehc->classes[dev->devno] = dev->class; dev->class = ATA_DEV_UNKNOWN; switch (rc) { case 0: /* clear error info accumulated during probe */ ata_ering_clear(&dev->ering); new_mask |= 1 << dev->devno; break; case -ENOENT: /* IDENTIFY was issued to non-existent * device. No need to reset. Just * thaw and ignore the device. */ ata_eh_thaw_port(ap); break; default: goto err; } } } /* PDIAG- should have been released, ask cable type if post-reset */ if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { if (ap->ops->cable_detect) ap->cbl = ap->ops->cable_detect(ap); ata_force_cbl(ap); } /* Configure new devices forward such that user doesn't see * device detection messages backwards. */ ata_for_each_dev(dev, link, ALL) { if (!(new_mask & (1 << dev->devno))) continue; dev->class = ehc->classes[dev->devno]; if (dev->class == ATA_DEV_PMP) continue; ehc->i.flags |= ATA_EHI_PRINTINFO; rc = ata_dev_configure(dev); ehc->i.flags &= ~ATA_EHI_PRINTINFO; if (rc) { dev->class = ATA_DEV_UNKNOWN; goto err; } spin_lock_irqsave(ap->lock, flags); ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; spin_unlock_irqrestore(ap->lock, flags); /* new device discovered, configure xfermode */ ehc->i.flags |= ATA_EHI_SETMODE; } return 0; err: *r_failed_dev = dev; DPRINTK("EXIT rc=%d\n", rc); return rc; } /** * ata_set_mode - Program timings and issue SET FEATURES - XFER * @link: link on which timings will be programmed * @r_failed_dev: out parameter for failed device * * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If * ata_set_mode() fails, pointer to the failing device is * returned in @r_failed_dev. * * LOCKING: * PCI/etc. bus probe sem. * * RETURNS: * 0 on success, negative errno otherwise */ int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) { struct ata_port *ap = link->ap; struct ata_device *dev; int rc; /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ ata_for_each_dev(dev, link, ENABLED) { if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { struct ata_ering_entry *ent; ent = ata_ering_top(&dev->ering); if (ent) ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; } } /* has private set_mode? */ if (ap->ops->set_mode) rc = ap->ops->set_mode(link, r_failed_dev); else rc = ata_do_set_mode(link, r_failed_dev); /* if transfer mode has changed, set DUBIOUS_XFER on device */ ata_for_each_dev(dev, link, ENABLED) { struct ata_eh_context *ehc = &link->eh_context; u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); if (dev->xfer_mode != saved_xfer_mode || ata_ncq_enabled(dev) != saved_ncq) dev->flags |= ATA_DFLAG_DUBIOUS_XFER; } return rc; } /** * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset * @dev: ATAPI device to clear UA for * * Resets and other operations can make an ATAPI device raise * UNIT ATTENTION which causes the next operation to fail. This * function clears UA. * * LOCKING: * EH context (may sleep). * * RETURNS: * 0 on success, -errno on failure. */ static int atapi_eh_clear_ua(struct ata_device *dev) { int i; for (i = 0; i < ATA_EH_UA_TRIES; i++) { u8 *sense_buffer = dev->link->ap->sector_buf; u8 sense_key = 0; unsigned int err_mask; err_mask = atapi_eh_tur(dev, &sense_key); if (err_mask != 0 && err_mask != AC_ERR_DEV) { ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY " "failed (err_mask=0x%x)\n", err_mask); return -EIO; } if (!err_mask || sense_key != UNIT_ATTENTION) return 0; err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key); if (err_mask) { ata_dev_printk(dev, KERN_WARNING, "failed to clear " "UNIT ATTENTION (err_mask=0x%x)\n", err_mask); return -EIO; } } ata_dev_printk(dev, KERN_WARNING, "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES); return 0; } /** * ata_eh_maybe_retry_flush - Retry FLUSH if necessary * @dev: ATA device which may need FLUSH retry * * If @dev failed FLUSH, it needs to be reported upper layer * immediately as it means that @dev failed to remap and already * lost at least a sector and further FLUSH retrials won't make * any difference to the lost sector. However, if FLUSH failed * for other reasons, for example transmission error, FLUSH needs * to be retried. * * This function determines whether FLUSH failure retry is * necessary and performs it if so. * * RETURNS: * 0 if EH can continue, -errno if EH needs to be repeated. */ static int ata_eh_maybe_retry_flush(struct ata_device *dev) { struct ata_link *link = dev->link; struct ata_port *ap = link->ap; struct ata_queued_cmd *qc; struct ata_taskfile tf; unsigned int err_mask; int rc = 0; /* did flush fail for this device? */ if (!ata_tag_valid(link->active_tag)) return 0; qc = __ata_qc_from_tag(ap, link->active_tag); if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT && qc->tf.command != ATA_CMD_FLUSH)) return 0; /* if the device failed it, it should be reported to upper layers */ if (qc->err_mask & AC_ERR_DEV) return 0; /* flush failed for some other reason, give it another shot */ ata_tf_init(dev, &tf); tf.command = qc->tf.command; tf.flags |= ATA_TFLAG_DEVICE; tf.protocol = ATA_PROT_NODATA; ata_dev_printk(dev, KERN_WARNING, "retrying FLUSH 0x%x Emask 0x%x\n", tf.command, qc->err_mask); err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); if (!err_mask) { /* * FLUSH is complete but there's no way to * successfully complete a failed command from EH. * Making sure retry is allowed at least once and * retrying it should do the trick - whatever was in * the cache is already on the platter and this won't * cause infinite loop. */ qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1); } else { ata_dev_printk(dev, KERN_WARNING, "FLUSH failed Emask 0x%x\n", err_mask); rc = -EIO; /* if device failed it, report it to upper layers */ if (err_mask & AC_ERR_DEV) { qc->err_mask |= AC_ERR_DEV; qc->result_tf = tf; if (!(ap->pflags & ATA_PFLAG_FROZEN)) rc = 0; } } return rc; } /** * ata_eh_set_lpm - configure SATA interface power management * @link: link to configure power management * @policy: the link power management policy * @r_failed_dev: out parameter for failed device * * Enable SATA Interface power management. This will enable * Device Interface Power Management (DIPM) for min_power * policy, and then call driver specific callbacks for * enabling Host Initiated Power management. * * LOCKING: * EH context. * * RETURNS: * 0 on success, -errno on failure. */ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, struct ata_device **r_failed_dev) { struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; enum ata_lpm_policy old_policy = link->lpm_policy; bool no_dipm = ap->flags & ATA_FLAG_NO_DIPM; unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; unsigned int err_mask; int rc; /* if the link or host doesn't do LPM, noop */ if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) return 0; /* * DIPM is enabled only for MIN_POWER as some devices * misbehave when the host NACKs transition to SLUMBER. Order * device and link configurations such that the host always * allows DIPM requests. */ ata_for_each_dev(dev, link, ENABLED) { bool hipm = ata_id_has_hipm(dev->id); bool dipm = ata_id_has_dipm(dev->id) && !no_dipm; /* find the first enabled and LPM enabled devices */ if (!link_dev) link_dev = dev; if (!lpm_dev && (hipm || dipm)) lpm_dev = dev; hints &= ~ATA_LPM_EMPTY; if (!hipm) hints &= ~ATA_LPM_HIPM; /* disable DIPM before changing link config */ if (policy != ATA_LPM_MIN_POWER && dipm) { err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_DISABLE, SATA_DIPM); if (err_mask && err_mask != AC_ERR_DEV) { ata_dev_printk(dev, KERN_WARNING, "failed to disable DIPM, Emask 0x%x\n", err_mask); rc = -EIO; goto fail; } } } if (ap) { rc = ap->ops->set_lpm(link, policy, hints); if (!rc && ap->slave_link) rc = ap->ops->set_lpm(ap->slave_link, policy, hints); } else rc = sata_pmp_set_lpm(link, policy, hints); /* * Attribute link config failure to the first (LPM) enabled * device on the link. */ if (rc) { if (rc == -EOPNOTSUPP) { link->flags |= ATA_LFLAG_NO_LPM; return 0; } dev = lpm_dev ? lpm_dev : link_dev; goto fail; } /* * Low level driver acked the transition. Issue DIPM command * with the new policy set. */ link->lpm_policy = policy; if (ap && ap->slave_link) ap->slave_link->lpm_policy = policy; /* host config updated, enable DIPM if transitioning to MIN_POWER */ ata_for_each_dev(dev, link, ENABLED) { if (policy == ATA_LPM_MIN_POWER && !no_dipm && ata_id_has_dipm(dev->id)) { err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, SATA_DIPM); if (err_mask && err_mask != AC_ERR_DEV) { ata_dev_printk(dev, KERN_WARNING, "failed to enable DIPM, Emask 0x%x\n", err_mask); rc = -EIO; goto fail; } } } return 0; fail: /* restore the old policy */ link->lpm_policy = old_policy; if (ap && ap->slave_link) ap->slave_link->lpm_policy = old_policy; /* if no device or only one more chance is left, disable LPM */ if (!dev || ehc->tries[dev->devno] <= 2) { ata_link_printk(link, KERN_WARNING, "disabling LPM on the link\n"); link->flags |= ATA_LFLAG_NO_LPM; } if (r_failed_dev) *r_failed_dev = dev; return rc; } static int ata_link_nr_enabled(struct ata_link *link) { struct ata_device *dev; int cnt = 0; ata_for_each_dev(dev, link, ENABLED) cnt++; return cnt; } static int ata_link_nr_vacant(struct ata_link *link) { struct ata_device *dev; int cnt = 0; ata_for_each_dev(dev, link, ALL) if (dev->class == ATA_DEV_UNKNOWN) cnt++; return cnt; } static int ata_eh_skip_recovery(struct ata_link *link) { struct ata_port *ap = link->ap; struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev; /* skip disabled links */ if (link->flags & ATA_LFLAG_DISABLED) return 1; /* skip if explicitly requested */ if (ehc->i.flags & ATA_EHI_NO_RECOVERY) return 1; /* thaw frozen port and recover failed devices */ if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) return 0; /* reset at least once if reset is requested */ if ((ehc->i.action & ATA_EH_RESET) && !(ehc->i.flags & ATA_EHI_DID_RESET)) return 0; /* skip if class codes for all vacant slots are ATA_DEV_NONE */ ata_for_each_dev(dev, link, ALL) { if (dev->class == ATA_DEV_UNKNOWN && ehc->classes[dev->devno] != ATA_DEV_NONE) return 0; } return 1; } static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg) { u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL); u64 now = get_jiffies_64(); int *trials = void_arg; if (ent->timestamp < now - min(now, interval)) return -1; (*trials)++; return 0; } static int ata_eh_schedule_probe(struct ata_device *dev) { struct ata_eh_context *ehc = &dev->link->eh_context; struct ata_link *link = ata_dev_phys_link(dev); int trials = 0; if (!(ehc->i.probe_mask & (1 << dev->devno)) || (ehc->did_probe_mask & (1 << dev->devno))) return 0; ata_eh_detach_dev(dev); ata_dev_init(dev); ehc->did_probe_mask |= (1 << dev->devno); ehc->i.action |= ATA_EH_RESET; ehc->saved_xfer_mode[dev->devno] = 0; ehc->saved_ncq_enabled &= ~(1 << dev->devno); /* the link maybe in a deep sleep, wake it up */ if (link->lpm_policy > ATA_LPM_MAX_POWER) { if (ata_is_host_link(link)) link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER, ATA_LPM_EMPTY); else sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER, ATA_LPM_EMPTY); } /* Record and count probe trials on the ering. The specific * error mask used is irrelevant. Because a successful device * detection clears the ering, this count accumulates only if * there are consecutive failed probes. * * If the count is equal to or higher than ATA_EH_PROBE_TRIALS * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is * forced to 1.5Gbps. * * This is to work around cases where failed link speed * negotiation results in device misdetection leading to * infinite DEVXCHG or PHRDY CHG events. */ ata_ering_record(&dev->ering, 0, AC_ERR_OTHER); ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials); if (trials > ATA_EH_PROBE_TRIALS) sata_down_spd_limit(link, 1); return 1; } static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) { struct ata_eh_context *ehc = &dev->link->eh_context; /* -EAGAIN from EH routine indicates retry without prejudice. * The requester is responsible for ensuring forward progress. */ if (err != -EAGAIN) ehc->tries[dev->devno]--; switch (err) { case -ENODEV: /* device missing or wrong IDENTIFY data, schedule probing */ ehc->i.probe_mask |= (1 << dev->devno); case -EINVAL: /* give it just one more chance */ ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); case -EIO: if (ehc->tries[dev->devno] == 1) { /* This is the last chance, better to slow * down than lose it. */ sata_down_spd_limit(ata_dev_phys_link(dev), 0); if (dev->pio_mode > XFER_PIO_0) ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); } } if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { /* disable device if it has used up all its chances */ ata_dev_disable(dev); /* detach if offline */ if (ata_phys_link_offline(ata_dev_phys_link(dev))) ata_eh_detach_dev(dev); /* schedule probe if necessary */ if (ata_eh_schedule_probe(dev)) { ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; memset(ehc->cmd_timeout_idx[dev->devno], 0, sizeof(ehc->cmd_timeout_idx[dev->devno])); } return 1; } else { ehc->i.action |= ATA_EH_RESET; return 0; } } /** * ata_eh_recover - recover host port after error * @ap: host port to recover * @prereset: prereset method (can be NULL) * @softreset: softreset method (can be NULL) * @hardreset: hardreset method (can be NULL) * @postreset: postreset method (can be NULL) * @r_failed_link: out parameter for failed link * * This is the alpha and omega, eum and yang, heart and soul of * libata exception handling. On entry, actions required to * recover each link and hotplug requests are recorded in the * link's eh_context. This function executes all the operations * with appropriate retrials and fallbacks to resurrect failed * devices, detach goners and greet newcomers. * * LOCKING: * Kernel thread context (may sleep). * * RETURNS: * 0 on success, -errno on failure. */ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, ata_reset_fn_t softreset, ata_reset_fn_t hardreset, ata_postreset_fn_t postreset, struct ata_link **r_failed_link) { struct ata_link *link; struct ata_device *dev; int rc, nr_fails; unsigned long flags, deadline; DPRINTK("ENTER\n"); /* prep for recovery */ ata_for_each_link(link, ap, EDGE) { struct ata_eh_context *ehc = &link->eh_context; /* re-enable link? */ if (ehc->i.action & ATA_EH_ENABLE_LINK) { ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK); spin_lock_irqsave(ap->lock, flags); link->flags &= ~ATA_LFLAG_DISABLED; spin_unlock_irqrestore(ap->lock, flags); ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK); } ata_for_each_dev(dev, link, ALL) { if (link->flags & ATA_LFLAG_NO_RETRY) ehc->tries[dev->devno] = 1; else ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; /* collect port action mask recorded in dev actions */ ehc->i.action |= ehc->i.dev_action[dev->devno] & ~ATA_EH_PERDEV_MASK; ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; /* process hotplug request */ if (dev->flags & ATA_DFLAG_DETACH) ata_eh_detach_dev(dev); /* schedule probe if necessary */ if (!ata_dev_enabled(dev)) ata_eh_schedule_probe(dev); } } retry: rc = 0; /* if UNLOADING, finish immediately */ if (ap->pflags & ATA_PFLAG_UNLOADING) goto out; /* prep for EH */ ata_for_each_link(link, ap, EDGE) { struct ata_eh_context *ehc = &link->eh_context; /* skip EH if possible. */ if (ata_eh_skip_recovery(link)) ehc->i.action = 0; ata_for_each_dev(dev, link, ALL) ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; } /* reset */ ata_for_each_link(link, ap, EDGE) { struct ata_eh_context *ehc = &link->eh_context; if (!(ehc->i.action & ATA_EH_RESET)) continue; rc = ata_eh_reset(link, ata_link_nr_vacant(link), prereset, softreset, hardreset, postreset); if (rc) { ata_link_printk(link, KERN_ERR, "reset failed, giving up\n"); goto out; } } do { unsigned long now; /* * clears ATA_EH_PARK in eh_info and resets * ap->park_req_pending */ ata_eh_pull_park_action(ap); deadline = jiffies; ata_for_each_link(link, ap, EDGE) { ata_for_each_dev(dev, link, ALL) { struct ata_eh_context *ehc = &link->eh_context; unsigned long tmp; if (dev->class != ATA_DEV_ATA) continue; if (!(ehc->i.dev_action[dev->devno] & ATA_EH_PARK)) continue; tmp = dev->unpark_deadline; if (time_before(deadline, tmp)) deadline = tmp; else if (time_before_eq(tmp, jiffies)) continue; if (ehc->unloaded_mask & (1 << dev->devno)) continue; ata_eh_park_issue_cmd(dev, 1); } } now = jiffies; if (time_before_eq(deadline, now)) break; ata_eh_release(ap); deadline = wait_for_completion_timeout(&ap->park_req_pending, deadline - now); ata_eh_acquire(ap); } while (deadline); ata_for_each_link(link, ap, EDGE) { ata_for_each_dev(dev, link, ALL) { if (!(link->eh_context.unloaded_mask & (1 << dev->devno))) continue; ata_eh_park_issue_cmd(dev, 0); ata_eh_done(link, dev, ATA_EH_PARK); } } /* the rest */ nr_fails = 0; ata_for_each_link(link, ap, PMP_FIRST) { struct ata_eh_context *ehc = &link->eh_context; if (sata_pmp_attached(ap) && ata_is_host_link(link)) goto config_lpm; /* revalidate existing devices and attach new ones */ rc = ata_eh_revalidate_and_attach(link, &dev); if (rc) goto rest_fail; /* if PMP got attached, return, pmp EH will take care of it */ if (link->device->class == ATA_DEV_PMP) { ehc->i.action = 0; return 0; } /* configure transfer mode if necessary */ if (ehc->i.flags & ATA_EHI_SETMODE) { rc = ata_set_mode(link, &dev); if (rc) goto rest_fail; ehc->i.flags &= ~ATA_EHI_SETMODE; } /* If reset has been issued, clear UA to avoid * disrupting the current users of the device. */ if (ehc->i.flags & ATA_EHI_DID_RESET) { ata_for_each_dev(dev, link, ALL) { if (dev->class != ATA_DEV_ATAPI) continue; rc = atapi_eh_clear_ua(dev); if (rc) goto rest_fail; } } /* retry flush if necessary */ ata_for_each_dev(dev, link, ALL) { if (dev->class != ATA_DEV_ATA) continue; rc = ata_eh_maybe_retry_flush(dev); if (rc) goto rest_fail; } config_lpm: /* configure link power saving */ if (link->lpm_policy != ap->target_lpm_policy) { rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev); if (rc) goto rest_fail; } /* this link is okay now */ ehc->i.flags = 0; continue; rest_fail: nr_fails++; if (dev) ata_eh_handle_dev_fail(dev, rc); if (ap->pflags & ATA_PFLAG_FROZEN) { /* PMP reset requires working host port. * Can't retry if it's frozen. */ if (sata_pmp_attached(ap)) goto out; break; } } if (nr_fails) goto retry; out: if (rc && r_failed_link) *r_failed_link = link; DPRINTK("EXIT, rc=%d\n", rc); return rc; } /** * ata_eh_finish - finish up EH * @ap: host port to finish EH for * * Recovery is complete. Clean up EH states and retry or finish * failed qcs. * * LOCKING: * None. */ void ata_eh_finish(struct ata_port *ap) { int tag; /* retry or finish qcs */ for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); if (!(qc->flags & ATA_QCFLAG_FAILED)) continue; if (qc->err_mask) { /* FIXME: Once EH migration is complete, * generate sense data in this function, * considering both err_mask and tf. */ if (qc->flags & ATA_QCFLAG_RETRY) ata_eh_qc_retry(qc); else ata_eh_qc_complete(qc); } else { if (qc->flags & ATA_QCFLAG_SENSE_VALID) { ata_eh_qc_complete(qc); } else { /* feed zero TF to sense generation */ memset(&qc->result_tf, 0, sizeof(qc->result_tf)); ata_eh_qc_retry(qc); } } } /* make sure nr_active_links is zero after EH */ WARN_ON(ap->nr_active_links); ap->nr_active_links = 0; } /** * ata_do_eh - do standard error handling * @ap: host port to handle error for * * @prereset: prereset method (can be NULL) * @softreset: softreset method (can be NULL) * @hardreset: hardreset method (can be NULL) * @postreset: postreset method (can be NULL) * * Perform standard error handling sequence. * * LOCKING: * Kernel thread context (may sleep). */ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, ata_reset_fn_t softreset, ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) { struct ata_device *dev; int rc; ata_eh_autopsy(ap); ata_eh_report(ap); rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, NULL); if (rc) { ata_for_each_dev(dev, &ap->link, ALL) ata_dev_disable(dev); } ata_eh_finish(ap); } /** * ata_std_error_handler - standard error handler * @ap: host port to handle error for * * Standard error handler * * LOCKING: * Kernel thread context (may sleep). */ void ata_std_error_handler(struct ata_port *ap) { struct ata_port_operations *ops = ap->ops; ata_reset_fn_t hardreset = ops->hardreset; /* ignore built-in hardreset if SCR access is not available */ if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link)) hardreset = NULL; ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); } #ifdef CONFIG_PM /** * ata_eh_handle_port_suspend - perform port suspend operation * @ap: port to suspend * * Suspend @ap. * * LOCKING: * Kernel thread context (may sleep). */ static void ata_eh_handle_port_suspend(struct ata_port *ap) { unsigned long flags; int rc = 0; /* are we suspending? */ spin_lock_irqsave(ap->lock, flags); if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || ap->pm_mesg.event == PM_EVENT_ON) { spin_unlock_irqrestore(ap->lock, flags); return; } spin_unlock_irqrestore(ap->lock, flags); WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); /* tell ACPI we're suspending */ rc = ata_acpi_on_suspend(ap); if (rc) goto out; /* suspend */ ata_eh_freeze_port(ap); if (ap->ops->port_suspend) rc = ap->ops->port_suspend(ap, ap->pm_mesg); ata_acpi_set_state(ap, PMSG_SUSPEND); out: /* report result */ spin_lock_irqsave(ap->lock, flags); ap->pflags &= ~ATA_PFLAG_PM_PENDING; if (rc == 0) ap->pflags |= ATA_PFLAG_SUSPENDED; else if (ap->pflags & ATA_PFLAG_FROZEN) ata_port_schedule_eh(ap); if (ap->pm_result) { *ap->pm_result = rc; ap->pm_result = NULL; } spin_unlock_irqrestore(ap->lock, flags); return; } /** * ata_eh_handle_port_resume - perform port resume operation * @ap: port to resume * * Resume @ap. * * LOCKING: * Kernel thread context (may sleep). */ static void ata_eh_handle_port_resume(struct ata_port *ap) { struct ata_link *link; struct ata_device *dev; unsigned long flags; int rc = 0; /* are we resuming? */ spin_lock_irqsave(ap->lock, flags); if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || ap->pm_mesg.event != PM_EVENT_ON) { spin_unlock_irqrestore(ap->lock, flags); return; } spin_unlock_irqrestore(ap->lock, flags); WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); /* * Error timestamps are in jiffies which doesn't run while * suspended and PHY events during resume isn't too uncommon. * When the two are combined, it can lead to unnecessary speed * downs if the machine is suspended and resumed repeatedly. * Clear error history. */ ata_for_each_link(link, ap, HOST_FIRST) ata_for_each_dev(dev, link, ALL) ata_ering_clear(&dev->ering); ata_acpi_set_state(ap, PMSG_ON); if (ap->ops->port_resume) rc = ap->ops->port_resume(ap); /* tell ACPI that we're resuming */ ata_acpi_on_resume(ap); /* report result */ spin_lock_irqsave(ap->lock, flags); ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); if (ap->pm_result) { *ap->pm_result = rc; ap->pm_result = NULL; } spin_unlock_irqrestore(ap->lock, flags); } #endif /* CONFIG_PM */
gpl-2.0
KutuSystems/linux
arch/mips/kvm/mips.c
44
46184
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * KVM/MIPS: MIPS specific KVM APIs * * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Authors: Sanjay Lal <sanjayl@kymasys.com> */ #include <linux/bitops.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/kdebug.h> #include <linux/module.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/bootmem.h> #include <asm/fpu.h> #include <asm/page.h> #include <asm/cacheflush.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <linux/kvm_host.h> #include "interrupt.h" #include "commpage.h" #define CREATE_TRACE_POINTS #include "trace.h" #ifndef VECTORSPACING #define VECTORSPACING 0x100 /* for EI/VI mode */ #endif #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x) struct kvm_stats_debugfs_item debugfs_entries[] = { { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU }, { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU }, { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU }, { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU }, { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU }, { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU }, { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU }, { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU }, { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU }, { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU }, { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU }, { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU }, { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU }, { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU }, { "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU }, { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU }, { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU }, { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU }, { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU }, { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU }, { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU }, { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU }, {NULL} }; static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu) { int i; for_each_possible_cpu(i) { vcpu->arch.guest_kernel_asid[i] = 0; vcpu->arch.guest_user_asid[i] = 0; } return 0; } /* * XXXKYMA: We are simulatoring a processor that has the WII bit set in * Config7, so we are "runnable" if interrupts are pending */ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { return !!(vcpu->arch.pending_exceptions); } int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) { return 1; } int kvm_arch_hardware_enable(void) { return 0; } int kvm_arch_hardware_setup(void) { return 0; } void kvm_arch_check_processor_compat(void *rtn) { *(int *)rtn = 0; } static void kvm_mips_init_tlbs(struct kvm *kvm) { unsigned long wired; /* * Add a wired entry to the TLB, it is used to map the commpage to * the Guest kernel */ wired = read_c0_wired(); write_c0_wired(wired + 1); mtc0_tlbw_hazard(); kvm->arch.commpage_tlb = wired; kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(), kvm->arch.commpage_tlb); } static void kvm_mips_init_vm_percpu(void *arg) { struct kvm *kvm = (struct kvm *)arg; kvm_mips_init_tlbs(kvm); kvm_mips_callbacks->vm_init(kvm); } int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { if (atomic_inc_return(&kvm_mips_instance) == 1) { kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n", __func__); on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1); } return 0; } bool kvm_arch_has_vcpu_debugfs(void) { return false; } int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) { return 0; } void kvm_mips_free_vcpus(struct kvm *kvm) { unsigned int i; struct kvm_vcpu *vcpu; /* Put the pages we reserved for the guest pmap */ for (i = 0; i < kvm->arch.guest_pmap_npages; i++) { if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE) kvm_release_pfn_clean(kvm->arch.guest_pmap[i]); } kfree(kvm->arch.guest_pmap); kvm_for_each_vcpu(i, vcpu, kvm) { kvm_arch_vcpu_free(vcpu); } mutex_lock(&kvm->lock); for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) kvm->vcpus[i] = NULL; atomic_set(&kvm->online_vcpus, 0); mutex_unlock(&kvm->lock); } static void kvm_mips_uninit_tlbs(void *arg) { /* Restore wired count */ write_c0_wired(0); mtc0_tlbw_hazard(); /* Clear out all the TLBs */ kvm_local_flush_tlb_all(); } void kvm_arch_destroy_vm(struct kvm *kvm) { kvm_mips_free_vcpus(kvm); /* If this is the last instance, restore wired count */ if (atomic_dec_return(&kvm_mips_instance) == 0) { kvm_debug("%s: last KVM instance, restoring TLB parameters\n", __func__); on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1); } } long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { return -ENOIOCTLCMD; } int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages) { return 0; } int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, enum kvm_mr_change change) { return 0; } void kvm_arch_commit_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) { unsigned long npages = 0; int i; kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n", __func__, kvm, mem->slot, mem->guest_phys_addr, mem->memory_size, mem->userspace_addr); /* Setup Guest PMAP table */ if (!kvm->arch.guest_pmap) { if (mem->slot == 0) npages = mem->memory_size >> PAGE_SHIFT; if (npages) { kvm->arch.guest_pmap_npages = npages; kvm->arch.guest_pmap = kzalloc(npages * sizeof(unsigned long), GFP_KERNEL); if (!kvm->arch.guest_pmap) { kvm_err("Failed to allocate guest PMAP\n"); return; } kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n", npages, kvm->arch.guest_pmap); /* Now setup the page table */ for (i = 0; i < npages; i++) kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE; } } } static inline void dump_handler(const char *symbol, void *start, void *end) { u32 *p; pr_debug("LEAF(%s)\n", symbol); pr_debug("\t.set push\n"); pr_debug("\t.set noreorder\n"); for (p = start; p < (u32 *)end; ++p) pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p); pr_debug("\t.set\tpop\n"); pr_debug("\tEND(%s)\n", symbol); } struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { int err, size; void *gebase, *p, *handler; int i; struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); if (!vcpu) { err = -ENOMEM; goto out; } err = kvm_vcpu_init(vcpu, kvm, id); if (err) goto out_free_cpu; kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu); /* * Allocate space for host mode exception handlers that handle * guest mode exits */ if (cpu_has_veic || cpu_has_vint) size = 0x200 + VECTORSPACING * 64; else size = 0x4000; gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL); if (!gebase) { err = -ENOMEM; goto out_uninit_cpu; } kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n", ALIGN(size, PAGE_SIZE), gebase); /* * Check new ebase actually fits in CP0_EBase. The lack of a write gate * limits us to the low 512MB of physical address space. If the memory * we allocate is out of range, just give up now. */ if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) { kvm_err("CP0_EBase.WG required for guest exception base %pK\n", gebase); err = -ENOMEM; goto out_free_gebase; } /* Save new ebase */ vcpu->arch.guest_ebase = gebase; /* Build guest exception vectors dynamically in unmapped memory */ handler = gebase + 0x2000; /* TLB Refill, EXL = 0 */ kvm_mips_build_exception(gebase, handler); /* General Exception Entry point */ kvm_mips_build_exception(gebase + 0x180, handler); /* For vectored interrupts poke the exception code @ all offsets 0-7 */ for (i = 0; i < 8; i++) { kvm_debug("L1 Vectored handler @ %p\n", gebase + 0x200 + (i * VECTORSPACING)); kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING, handler); } /* General exit handler */ p = handler; p = kvm_mips_build_exit(p); /* Guest entry routine */ vcpu->arch.vcpu_run = p; p = kvm_mips_build_vcpu_run(p); /* Dump the generated code */ pr_debug("#include <asm/asm.h>\n"); pr_debug("#include <asm/regdef.h>\n"); pr_debug("\n"); dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p); dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200); dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run); /* Invalidate the icache for these ranges */ flush_icache_range((unsigned long)gebase, (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); /* * Allocate comm page for guest kernel, a TLB will be reserved for * mapping GVA @ 0xFFFF8000 to this page */ vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); if (!vcpu->arch.kseg0_commpage) { err = -ENOMEM; goto out_free_gebase; } kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage); kvm_mips_commpage_init(vcpu); /* Init */ vcpu->arch.last_sched_cpu = -1; /* Start off the timer */ kvm_mips_init_count(vcpu); return vcpu; out_free_gebase: kfree(gebase); out_uninit_cpu: kvm_vcpu_uninit(vcpu); out_free_cpu: kfree(vcpu); out: return ERR_PTR(err); } void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) { hrtimer_cancel(&vcpu->arch.comparecount_timer); kvm_vcpu_uninit(vcpu); kvm_mips_dump_stats(vcpu); kfree(vcpu->arch.guest_ebase); kfree(vcpu->arch.kseg0_commpage); kfree(vcpu); } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { kvm_arch_vcpu_free(vcpu); } int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { return -ENOIOCTLCMD; } /* Must be called with preemption disabled, just before entering guest */ static void kvm_mips_check_asids(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; int i, cpu = smp_processor_id(); unsigned int gasid; /* * Lazy host ASID regeneration for guest user mode. * If the guest ASID has changed since the last guest usermode * execution, regenerate the host ASID so as to invalidate stale TLB * entries. */ if (!KVM_GUEST_KERNEL_MODE(vcpu)) { gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID; if (gasid != vcpu->arch.last_user_gasid) { kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu); vcpu->arch.guest_user_asid[cpu] = vcpu->arch.guest_user_mm.context.asid[cpu]; for_each_possible_cpu(i) if (i != cpu) vcpu->arch.guest_user_asid[cpu] = 0; vcpu->arch.last_user_gasid = gasid; } } } int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) { int r = 0; sigset_t sigsaved; if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); if (vcpu->mmio_needed) { if (!vcpu->mmio_is_write) kvm_mips_complete_mmio_load(vcpu, run); vcpu->mmio_needed = 0; } lose_fpu(1); local_irq_disable(); /* Check if we have any exceptions/interrupts pending */ kvm_mips_deliver_interrupts(vcpu, kvm_read_c0_guest_cause(vcpu->arch.cop0)); guest_enter_irqoff(); /* Disable hardware page table walking while in guest */ htw_stop(); trace_kvm_enter(vcpu); kvm_mips_check_asids(vcpu); r = vcpu->arch.vcpu_run(run, vcpu); trace_kvm_out(vcpu); /* Re-enable HTW before enabling interrupts */ htw_start(); guest_exit_irqoff(); local_irq_enable(); if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); return r; } int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) { int intr = (int)irq->irq; struct kvm_vcpu *dvcpu = NULL; if (intr == 3 || intr == -3 || intr == 4 || intr == -4) kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu, (int)intr); if (irq->cpu == -1) dvcpu = vcpu; else dvcpu = vcpu->kvm->vcpus[irq->cpu]; if (intr == 2 || intr == 3 || intr == 4) { kvm_mips_callbacks->queue_io_int(dvcpu, irq); } else if (intr == -2 || intr == -3 || intr == -4) { kvm_mips_callbacks->dequeue_io_int(dvcpu, irq); } else { kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__, irq->cpu, irq->irq); return -EINVAL; } dvcpu->arch.wait = 0; if (swait_active(&dvcpu->wq)) swake_up(&dvcpu->wq); return 0; } int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { return -ENOIOCTLCMD; } int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { return -ENOIOCTLCMD; } static u64 kvm_mips_get_one_regs[] = { KVM_REG_MIPS_R0, KVM_REG_MIPS_R1, KVM_REG_MIPS_R2, KVM_REG_MIPS_R3, KVM_REG_MIPS_R4, KVM_REG_MIPS_R5, KVM_REG_MIPS_R6, KVM_REG_MIPS_R7, KVM_REG_MIPS_R8, KVM_REG_MIPS_R9, KVM_REG_MIPS_R10, KVM_REG_MIPS_R11, KVM_REG_MIPS_R12, KVM_REG_MIPS_R13, KVM_REG_MIPS_R14, KVM_REG_MIPS_R15, KVM_REG_MIPS_R16, KVM_REG_MIPS_R17, KVM_REG_MIPS_R18, KVM_REG_MIPS_R19, KVM_REG_MIPS_R20, KVM_REG_MIPS_R21, KVM_REG_MIPS_R22, KVM_REG_MIPS_R23, KVM_REG_MIPS_R24, KVM_REG_MIPS_R25, KVM_REG_MIPS_R26, KVM_REG_MIPS_R27, KVM_REG_MIPS_R28, KVM_REG_MIPS_R29, KVM_REG_MIPS_R30, KVM_REG_MIPS_R31, #ifndef CONFIG_CPU_MIPSR6 KVM_REG_MIPS_HI, KVM_REG_MIPS_LO, #endif KVM_REG_MIPS_PC, KVM_REG_MIPS_CP0_INDEX, KVM_REG_MIPS_CP0_CONTEXT, KVM_REG_MIPS_CP0_USERLOCAL, KVM_REG_MIPS_CP0_PAGEMASK, KVM_REG_MIPS_CP0_WIRED, KVM_REG_MIPS_CP0_HWRENA, KVM_REG_MIPS_CP0_BADVADDR, KVM_REG_MIPS_CP0_COUNT, KVM_REG_MIPS_CP0_ENTRYHI, KVM_REG_MIPS_CP0_COMPARE, KVM_REG_MIPS_CP0_STATUS, KVM_REG_MIPS_CP0_CAUSE, KVM_REG_MIPS_CP0_EPC, KVM_REG_MIPS_CP0_PRID, KVM_REG_MIPS_CP0_CONFIG, KVM_REG_MIPS_CP0_CONFIG1, KVM_REG_MIPS_CP0_CONFIG2, KVM_REG_MIPS_CP0_CONFIG3, KVM_REG_MIPS_CP0_CONFIG4, KVM_REG_MIPS_CP0_CONFIG5, KVM_REG_MIPS_CP0_CONFIG7, KVM_REG_MIPS_CP0_ERROREPC, KVM_REG_MIPS_COUNT_CTL, KVM_REG_MIPS_COUNT_RESUME, KVM_REG_MIPS_COUNT_HZ, }; static u64 kvm_mips_get_one_regs_fpu[] = { KVM_REG_MIPS_FCR_IR, KVM_REG_MIPS_FCR_CSR, }; static u64 kvm_mips_get_one_regs_msa[] = { KVM_REG_MIPS_MSA_IR, KVM_REG_MIPS_MSA_CSR, }; static u64 kvm_mips_get_one_regs_kscratch[] = { KVM_REG_MIPS_CP0_KSCRATCH1, KVM_REG_MIPS_CP0_KSCRATCH2, KVM_REG_MIPS_CP0_KSCRATCH3, KVM_REG_MIPS_CP0_KSCRATCH4, KVM_REG_MIPS_CP0_KSCRATCH5, KVM_REG_MIPS_CP0_KSCRATCH6, }; static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu) { unsigned long ret; ret = ARRAY_SIZE(kvm_mips_get_one_regs); if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48; /* odd doubles */ if (boot_cpu_data.fpu_id & MIPS_FPIR_F64) ret += 16; } if (kvm_mips_guest_can_have_msa(&vcpu->arch)) ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32; ret += __arch_hweight8(vcpu->arch.kscratch_enabled); ret += kvm_mips_callbacks->num_regs(vcpu); return ret; } static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices) { u64 index; unsigned int i; if (copy_to_user(indices, kvm_mips_get_one_regs, sizeof(kvm_mips_get_one_regs))) return -EFAULT; indices += ARRAY_SIZE(kvm_mips_get_one_regs); if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { if (copy_to_user(indices, kvm_mips_get_one_regs_fpu, sizeof(kvm_mips_get_one_regs_fpu))) return -EFAULT; indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu); for (i = 0; i < 32; ++i) { index = KVM_REG_MIPS_FPR_32(i); if (copy_to_user(indices, &index, sizeof(index))) return -EFAULT; ++indices; /* skip odd doubles if no F64 */ if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64)) continue; index = KVM_REG_MIPS_FPR_64(i); if (copy_to_user(indices, &index, sizeof(index))) return -EFAULT; ++indices; } } if (kvm_mips_guest_can_have_msa(&vcpu->arch)) { if (copy_to_user(indices, kvm_mips_get_one_regs_msa, sizeof(kvm_mips_get_one_regs_msa))) return -EFAULT; indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa); for (i = 0; i < 32; ++i) { index = KVM_REG_MIPS_VEC_128(i); if (copy_to_user(indices, &index, sizeof(index))) return -EFAULT; ++indices; } } for (i = 0; i < 6; ++i) { if (!(vcpu->arch.kscratch_enabled & BIT(i + 2))) continue; if (copy_to_user(indices, &kvm_mips_get_one_regs_kscratch[i], sizeof(kvm_mips_get_one_regs_kscratch[i]))) return -EFAULT; ++indices; } return kvm_mips_callbacks->copy_reg_indices(vcpu, indices); } static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_fpu_struct *fpu = &vcpu->arch.fpu; int ret; s64 v; s64 vs[2]; unsigned int idx; switch (reg->id) { /* General purpose registers */ case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31: v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; break; #ifndef CONFIG_CPU_MIPSR6 case KVM_REG_MIPS_HI: v = (long)vcpu->arch.hi; break; case KVM_REG_MIPS_LO: v = (long)vcpu->arch.lo; break; #endif case KVM_REG_MIPS_PC: v = (long)vcpu->arch.pc; break; /* Floating point registers */ case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31): if (!kvm_mips_guest_has_fpu(&vcpu->arch)) return -EINVAL; idx = reg->id - KVM_REG_MIPS_FPR_32(0); /* Odd singles in top of even double when FR=0 */ if (kvm_read_c0_guest_status(cop0) & ST0_FR) v = get_fpr32(&fpu->fpr[idx], 0); else v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1); break; case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31): if (!kvm_mips_guest_has_fpu(&vcpu->arch)) return -EINVAL; idx = reg->id - KVM_REG_MIPS_FPR_64(0); /* Can't access odd doubles in FR=0 mode */ if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR)) return -EINVAL; v = get_fpr64(&fpu->fpr[idx], 0); break; case KVM_REG_MIPS_FCR_IR: if (!kvm_mips_guest_has_fpu(&vcpu->arch)) return -EINVAL; v = boot_cpu_data.fpu_id; break; case KVM_REG_MIPS_FCR_CSR: if (!kvm_mips_guest_has_fpu(&vcpu->arch)) return -EINVAL; v = fpu->fcr31; break; /* MIPS SIMD Architecture (MSA) registers */ case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31): if (!kvm_mips_guest_has_msa(&vcpu->arch)) return -EINVAL; /* Can't access MSA registers in FR=0 mode */ if (!(kvm_read_c0_guest_status(cop0) & ST0_FR)) return -EINVAL; idx = reg->id - KVM_REG_MIPS_VEC_128(0); #ifdef CONFIG_CPU_LITTLE_ENDIAN /* least significant byte first */ vs[0] = get_fpr64(&fpu->fpr[idx], 0); vs[1] = get_fpr64(&fpu->fpr[idx], 1); #else /* most significant byte first */ vs[0] = get_fpr64(&fpu->fpr[idx], 1); vs[1] = get_fpr64(&fpu->fpr[idx], 0); #endif break; case KVM_REG_MIPS_MSA_IR: if (!kvm_mips_guest_has_msa(&vcpu->arch)) return -EINVAL; v = boot_cpu_data.msa_id; break; case KVM_REG_MIPS_MSA_CSR: if (!kvm_mips_guest_has_msa(&vcpu->arch)) return -EINVAL; v = fpu->msacsr; break; /* Co-processor 0 registers */ case KVM_REG_MIPS_CP0_INDEX: v = (long)kvm_read_c0_guest_index(cop0); break; case KVM_REG_MIPS_CP0_CONTEXT: v = (long)kvm_read_c0_guest_context(cop0); break; case KVM_REG_MIPS_CP0_USERLOCAL: v = (long)kvm_read_c0_guest_userlocal(cop0); break; case KVM_REG_MIPS_CP0_PAGEMASK: v = (long)kvm_read_c0_guest_pagemask(cop0); break; case KVM_REG_MIPS_CP0_WIRED: v = (long)kvm_read_c0_guest_wired(cop0); break; case KVM_REG_MIPS_CP0_HWRENA: v = (long)kvm_read_c0_guest_hwrena(cop0); break; case KVM_REG_MIPS_CP0_BADVADDR: v = (long)kvm_read_c0_guest_badvaddr(cop0); break; case KVM_REG_MIPS_CP0_ENTRYHI: v = (long)kvm_read_c0_guest_entryhi(cop0); break; case KVM_REG_MIPS_CP0_COMPARE: v = (long)kvm_read_c0_guest_compare(cop0); break; case KVM_REG_MIPS_CP0_STATUS: v = (long)kvm_read_c0_guest_status(cop0); break; case KVM_REG_MIPS_CP0_CAUSE: v = (long)kvm_read_c0_guest_cause(cop0); break; case KVM_REG_MIPS_CP0_EPC: v = (long)kvm_read_c0_guest_epc(cop0); break; case KVM_REG_MIPS_CP0_PRID: v = (long)kvm_read_c0_guest_prid(cop0); break; case KVM_REG_MIPS_CP0_CONFIG: v = (long)kvm_read_c0_guest_config(cop0); break; case KVM_REG_MIPS_CP0_CONFIG1: v = (long)kvm_read_c0_guest_config1(cop0); break; case KVM_REG_MIPS_CP0_CONFIG2: v = (long)kvm_read_c0_guest_config2(cop0); break; case KVM_REG_MIPS_CP0_CONFIG3: v = (long)kvm_read_c0_guest_config3(cop0); break; case KVM_REG_MIPS_CP0_CONFIG4: v = (long)kvm_read_c0_guest_config4(cop0); break; case KVM_REG_MIPS_CP0_CONFIG5: v = (long)kvm_read_c0_guest_config5(cop0); break; case KVM_REG_MIPS_CP0_CONFIG7: v = (long)kvm_read_c0_guest_config7(cop0); break; case KVM_REG_MIPS_CP0_ERROREPC: v = (long)kvm_read_c0_guest_errorepc(cop0); break; case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6: idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; if (!(vcpu->arch.kscratch_enabled & BIT(idx))) return -EINVAL; switch (idx) { case 2: v = (long)kvm_read_c0_guest_kscratch1(cop0); break; case 3: v = (long)kvm_read_c0_guest_kscratch2(cop0); break; case 4: v = (long)kvm_read_c0_guest_kscratch3(cop0); break; case 5: v = (long)kvm_read_c0_guest_kscratch4(cop0); break; case 6: v = (long)kvm_read_c0_guest_kscratch5(cop0); break; case 7: v = (long)kvm_read_c0_guest_kscratch6(cop0); break; } break; /* registers to be handled specially */ default: ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v); if (ret) return ret; break; } if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; return put_user(v, uaddr64); } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; u32 v32 = (u32)v; return put_user(v32, uaddr32); } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { void __user *uaddr = (void __user *)(long)reg->addr; return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0; } else { return -EINVAL; } } static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_fpu_struct *fpu = &vcpu->arch.fpu; s64 v; s64 vs[2]; unsigned int idx; if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; if (get_user(v, uaddr64) != 0) return -EFAULT; } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; s32 v32; if (get_user(v32, uaddr32) != 0) return -EFAULT; v = (s64)v32; } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { void __user *uaddr = (void __user *)(long)reg->addr; return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0; } else { return -EINVAL; } switch (reg->id) { /* General purpose registers */ case KVM_REG_MIPS_R0: /* Silently ignore requests to set $0 */ break; case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31: vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; break; #ifndef CONFIG_CPU_MIPSR6 case KVM_REG_MIPS_HI: vcpu->arch.hi = v; break; case KVM_REG_MIPS_LO: vcpu->arch.lo = v; break; #endif case KVM_REG_MIPS_PC: vcpu->arch.pc = v; break; /* Floating point registers */ case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31): if (!kvm_mips_guest_has_fpu(&vcpu->arch)) return -EINVAL; idx = reg->id - KVM_REG_MIPS_FPR_32(0); /* Odd singles in top of even double when FR=0 */ if (kvm_read_c0_guest_status(cop0) & ST0_FR) set_fpr32(&fpu->fpr[idx], 0, v); else set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v); break; case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31): if (!kvm_mips_guest_has_fpu(&vcpu->arch)) return -EINVAL; idx = reg->id - KVM_REG_MIPS_FPR_64(0); /* Can't access odd doubles in FR=0 mode */ if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR)) return -EINVAL; set_fpr64(&fpu->fpr[idx], 0, v); break; case KVM_REG_MIPS_FCR_IR: if (!kvm_mips_guest_has_fpu(&vcpu->arch)) return -EINVAL; /* Read-only */ break; case KVM_REG_MIPS_FCR_CSR: if (!kvm_mips_guest_has_fpu(&vcpu->arch)) return -EINVAL; fpu->fcr31 = v; break; /* MIPS SIMD Architecture (MSA) registers */ case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31): if (!kvm_mips_guest_has_msa(&vcpu->arch)) return -EINVAL; idx = reg->id - KVM_REG_MIPS_VEC_128(0); #ifdef CONFIG_CPU_LITTLE_ENDIAN /* least significant byte first */ set_fpr64(&fpu->fpr[idx], 0, vs[0]); set_fpr64(&fpu->fpr[idx], 1, vs[1]); #else /* most significant byte first */ set_fpr64(&fpu->fpr[idx], 1, vs[0]); set_fpr64(&fpu->fpr[idx], 0, vs[1]); #endif break; case KVM_REG_MIPS_MSA_IR: if (!kvm_mips_guest_has_msa(&vcpu->arch)) return -EINVAL; /* Read-only */ break; case KVM_REG_MIPS_MSA_CSR: if (!kvm_mips_guest_has_msa(&vcpu->arch)) return -EINVAL; fpu->msacsr = v; break; /* Co-processor 0 registers */ case KVM_REG_MIPS_CP0_INDEX: kvm_write_c0_guest_index(cop0, v); break; case KVM_REG_MIPS_CP0_CONTEXT: kvm_write_c0_guest_context(cop0, v); break; case KVM_REG_MIPS_CP0_USERLOCAL: kvm_write_c0_guest_userlocal(cop0, v); break; case KVM_REG_MIPS_CP0_PAGEMASK: kvm_write_c0_guest_pagemask(cop0, v); break; case KVM_REG_MIPS_CP0_WIRED: kvm_write_c0_guest_wired(cop0, v); break; case KVM_REG_MIPS_CP0_HWRENA: kvm_write_c0_guest_hwrena(cop0, v); break; case KVM_REG_MIPS_CP0_BADVADDR: kvm_write_c0_guest_badvaddr(cop0, v); break; case KVM_REG_MIPS_CP0_ENTRYHI: kvm_write_c0_guest_entryhi(cop0, v); break; case KVM_REG_MIPS_CP0_STATUS: kvm_write_c0_guest_status(cop0, v); break; case KVM_REG_MIPS_CP0_EPC: kvm_write_c0_guest_epc(cop0, v); break; case KVM_REG_MIPS_CP0_PRID: kvm_write_c0_guest_prid(cop0, v); break; case KVM_REG_MIPS_CP0_ERROREPC: kvm_write_c0_guest_errorepc(cop0, v); break; case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6: idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; if (!(vcpu->arch.kscratch_enabled & BIT(idx))) return -EINVAL; switch (idx) { case 2: kvm_write_c0_guest_kscratch1(cop0, v); break; case 3: kvm_write_c0_guest_kscratch2(cop0, v); break; case 4: kvm_write_c0_guest_kscratch3(cop0, v); break; case 5: kvm_write_c0_guest_kscratch4(cop0, v); break; case 6: kvm_write_c0_guest_kscratch5(cop0, v); break; case 7: kvm_write_c0_guest_kscratch6(cop0, v); break; } break; /* registers to be handled specially */ default: return kvm_mips_callbacks->set_one_reg(vcpu, reg, v); } return 0; } static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, struct kvm_enable_cap *cap) { int r = 0; if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap)) return -EINVAL; if (cap->flags) return -EINVAL; if (cap->args[0]) return -EINVAL; switch (cap->cap) { case KVM_CAP_MIPS_FPU: vcpu->arch.fpu_enabled = true; break; case KVM_CAP_MIPS_MSA: vcpu->arch.msa_enabled = true; break; default: r = -EINVAL; break; } return r; } long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; long r; switch (ioctl) { case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) return kvm_mips_set_reg(vcpu, &reg); else return kvm_mips_get_reg(vcpu, &reg); } case KVM_GET_REG_LIST: { struct kvm_reg_list __user *user_list = argp; struct kvm_reg_list reg_list; unsigned n; if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n; reg_list.n = kvm_mips_num_regs(vcpu); if (copy_to_user(user_list, &reg_list, sizeof(reg_list))) return -EFAULT; if (n < reg_list.n) return -E2BIG; return kvm_mips_copy_reg_indices(vcpu, user_list->reg); } case KVM_NMI: /* Treat the NMI as a CPU reset */ r = kvm_mips_reset_vcpu(vcpu); break; case KVM_INTERRUPT: { struct kvm_mips_interrupt irq; r = -EFAULT; if (copy_from_user(&irq, argp, sizeof(irq))) goto out; kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq); r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); break; } case KVM_ENABLE_CAP: { struct kvm_enable_cap cap; r = -EFAULT; if (copy_from_user(&cap, argp, sizeof(cap))) goto out; r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); break; } default: r = -ENOIOCTLCMD; } out: return r; } /* Get (and clear) the dirty memory log for a memory slot. */ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; unsigned long ga, ga_end; int is_dirty = 0; int r; unsigned long n; mutex_lock(&kvm->slots_lock); r = kvm_get_dirty_log(kvm, log, &is_dirty); if (r) goto out; /* If nothing is dirty, don't bother messing with page tables. */ if (is_dirty) { slots = kvm_memslots(kvm); memslot = id_to_memslot(slots, log->slot); ga = memslot->base_gfn << PAGE_SHIFT; ga_end = ga + (memslot->npages << PAGE_SHIFT); kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga, ga_end); n = kvm_dirty_bitmap_bytes(memslot); memset(memslot->dirty_bitmap, 0, n); } r = 0; out: mutex_unlock(&kvm->slots_lock); return r; } long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { long r; switch (ioctl) { default: r = -ENOIOCTLCMD; } return r; } int kvm_arch_init(void *opaque) { if (kvm_mips_callbacks) { kvm_err("kvm: module already exists\n"); return -EEXIST; } return kvm_mips_emulation_init(&kvm_mips_callbacks); } void kvm_arch_exit(void) { kvm_mips_callbacks = NULL; } int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { return -ENOIOCTLCMD; } int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { return -ENOIOCTLCMD; } void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) { } int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { return -ENOIOCTLCMD; } int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { return -ENOIOCTLCMD; } int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) { int r; switch (ext) { case KVM_CAP_ONE_REG: case KVM_CAP_ENABLE_CAP: r = 1; break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; case KVM_CAP_MIPS_FPU: /* We don't handle systems with inconsistent cpu_has_fpu */ r = !!raw_cpu_has_fpu; break; case KVM_CAP_MIPS_MSA: /* * We don't support MSA vector partitioning yet: * 1) It would require explicit support which can't be tested * yet due to lack of support in current hardware. * 2) It extends the state that would need to be saved/restored * by e.g. QEMU for migration. * * When vector partitioning hardware becomes available, support * could be added by requiring a flag when enabling * KVM_CAP_MIPS_MSA capability to indicate that userland knows * to save/restore the appropriate extra state. */ r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF); break; default: r = 0; break; } return r; } int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { return kvm_mips_pending_timer(vcpu); } int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) { int i; struct mips_coproc *cop0; if (!vcpu) return -1; kvm_debug("VCPU Register Dump:\n"); kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc); kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); for (i = 0; i < 32; i += 4) { kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i, vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1], vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); } kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi); kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo); cop0 = vcpu->arch.cop0; kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n", kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0)); kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0)); return 0; } int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) vcpu->arch.gprs[i] = regs->gpr[i]; vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ vcpu->arch.hi = regs->hi; vcpu->arch.lo = regs->lo; vcpu->arch.pc = regs->pc; return 0; } int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) regs->gpr[i] = vcpu->arch.gprs[i]; regs->hi = vcpu->arch.hi; regs->lo = vcpu->arch.lo; regs->pc = vcpu->arch.pc; return 0; } static void kvm_mips_comparecount_func(unsigned long data) { struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; kvm_mips_callbacks->queue_timer_int(vcpu); vcpu->arch.wait = 0; if (swait_active(&vcpu->wq)) swake_up(&vcpu->wq); } /* low level hrtimer wake routine */ static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) { struct kvm_vcpu *vcpu; vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer); kvm_mips_comparecount_func((unsigned long) vcpu); return kvm_mips_count_timeout(vcpu); } int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { kvm_mips_callbacks->vcpu_init(vcpu); hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; return 0; } int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { return 0; } /* Initial guest state */ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { return kvm_mips_callbacks->vcpu_setup(vcpu); } static void kvm_mips_set_c0_status(void) { u32 status = read_c0_status(); if (cpu_has_dsp) status |= (ST0_MX); write_c0_status(status); ehb(); } /* * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) */ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) { u32 cause = vcpu->arch.host_cp0_cause; u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; u32 __user *opc = (u32 __user *) vcpu->arch.pc; unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; enum emulation_result er = EMULATE_DONE; int ret = RESUME_GUEST; /* re-enable HTW before enabling interrupts */ htw_start(); /* Set a default exit reason */ run->exit_reason = KVM_EXIT_UNKNOWN; run->ready_for_interrupt_injection = 1; /* * Set the appropriate status bits based on host CPU features, * before we hit the scheduler */ kvm_mips_set_c0_status(); local_irq_enable(); kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", cause, opc, run, vcpu); trace_kvm_exit(vcpu, exccode); /* * Do a privilege check, if in UM most of these exit conditions end up * causing an exception to be delivered to the Guest Kernel */ er = kvm_mips_check_privilege(cause, opc, run, vcpu); if (er == EMULATE_PRIV_FAIL) { goto skip_emul; } else if (er == EMULATE_FAIL) { run->exit_reason = KVM_EXIT_INTERNAL_ERROR; ret = RESUME_HOST; goto skip_emul; } switch (exccode) { case EXCCODE_INT: kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc); ++vcpu->stat.int_exits; if (need_resched()) cond_resched(); ret = RESUME_GUEST; break; case EXCCODE_CPU: kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc); ++vcpu->stat.cop_unusable_exits; ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); /* XXXKYMA: Might need to return to user space */ if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) ret = RESUME_HOST; break; case EXCCODE_MOD: ++vcpu->stat.tlbmod_exits; ret = kvm_mips_callbacks->handle_tlb_mod(vcpu); break; case EXCCODE_TLBS: kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n", cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, badvaddr); ++vcpu->stat.tlbmiss_st_exits; ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu); break; case EXCCODE_TLBL: kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n", cause, opc, badvaddr); ++vcpu->stat.tlbmiss_ld_exits; ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu); break; case EXCCODE_ADES: ++vcpu->stat.addrerr_st_exits; ret = kvm_mips_callbacks->handle_addr_err_st(vcpu); break; case EXCCODE_ADEL: ++vcpu->stat.addrerr_ld_exits; ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu); break; case EXCCODE_SYS: ++vcpu->stat.syscall_exits; ret = kvm_mips_callbacks->handle_syscall(vcpu); break; case EXCCODE_RI: ++vcpu->stat.resvd_inst_exits; ret = kvm_mips_callbacks->handle_res_inst(vcpu); break; case EXCCODE_BP: ++vcpu->stat.break_inst_exits; ret = kvm_mips_callbacks->handle_break(vcpu); break; case EXCCODE_TR: ++vcpu->stat.trap_inst_exits; ret = kvm_mips_callbacks->handle_trap(vcpu); break; case EXCCODE_MSAFPE: ++vcpu->stat.msa_fpe_exits; ret = kvm_mips_callbacks->handle_msa_fpe(vcpu); break; case EXCCODE_FPE: ++vcpu->stat.fpe_exits; ret = kvm_mips_callbacks->handle_fpe(vcpu); break; case EXCCODE_MSADIS: ++vcpu->stat.msa_disabled_exits; ret = kvm_mips_callbacks->handle_msa_disabled(vcpu); break; default: kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, kvm_read_c0_guest_status(vcpu->arch.cop0)); kvm_arch_vcpu_dump_regs(vcpu); run->exit_reason = KVM_EXIT_INTERNAL_ERROR; ret = RESUME_HOST; break; } skip_emul: local_irq_disable(); if (er == EMULATE_DONE && !(ret & RESUME_HOST)) kvm_mips_deliver_interrupts(vcpu, cause); if (!(ret & RESUME_HOST)) { /* Only check for signals if not already exiting to userspace */ if (signal_pending(current)) { run->exit_reason = KVM_EXIT_INTR; ret = (-EINTR << 2) | RESUME_HOST; ++vcpu->stat.signal_exits; trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL); } } if (ret == RESUME_GUEST) { trace_kvm_reenter(vcpu); kvm_mips_check_asids(vcpu); /* * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context * is live), restore FCR31 / MSACSR. * * This should be before returning to the guest exception * vector, as it may well cause an [MSA] FP exception if there * are pending exception bits unmasked. (see * kvm_mips_csr_die_notifier() for how that is handled). */ if (kvm_mips_guest_has_fpu(&vcpu->arch) && read_c0_status() & ST0_CU1) __kvm_restore_fcsr(&vcpu->arch); if (kvm_mips_guest_has_msa(&vcpu->arch) && read_c0_config5() & MIPS_CONF5_MSAEN) __kvm_restore_msacsr(&vcpu->arch); } /* Disable HTW before returning to guest or host */ htw_stop(); return ret; } /* Enable FPU for guest and restore context */ void kvm_own_fpu(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; unsigned int sr, cfg5; preempt_disable(); sr = kvm_read_c0_guest_status(cop0); /* * If MSA state is already live, it is undefined how it interacts with * FR=0 FPU state, and we don't want to hit reserved instruction * exceptions trying to save the MSA state later when CU=1 && FR=1, so * play it safe and save it first. * * In theory we shouldn't ever hit this case since kvm_lose_fpu() should * get called when guest CU1 is set, however we can't trust the guest * not to clobber the status register directly via the commpage. */ if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) kvm_lose_fpu(vcpu); /* * Enable FPU for guest * We set FR and FRE according to guest context */ change_c0_status(ST0_CU1 | ST0_FR, sr); if (cpu_has_fre) { cfg5 = kvm_read_c0_guest_config5(cop0); change_c0_config5(MIPS_CONF5_FRE, cfg5); } enable_fpu_hazard(); /* If guest FPU state not active, restore it now */ if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { __kvm_restore_fpu(&vcpu->arch); vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU); } else { trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU); } preempt_enable(); } #ifdef CONFIG_CPU_HAS_MSA /* Enable MSA for guest and restore context */ void kvm_own_msa(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; unsigned int sr, cfg5; preempt_disable(); /* * Enable FPU if enabled in guest, since we're restoring FPU context * anyway. We set FR and FRE according to guest context. */ if (kvm_mips_guest_has_fpu(&vcpu->arch)) { sr = kvm_read_c0_guest_status(cop0); /* * If FR=0 FPU state is already live, it is undefined how it * interacts with MSA state, so play it safe and save it first. */ if (!(sr & ST0_FR) && (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU) kvm_lose_fpu(vcpu); change_c0_status(ST0_CU1 | ST0_FR, sr); if (sr & ST0_CU1 && cpu_has_fre) { cfg5 = kvm_read_c0_guest_config5(cop0); change_c0_config5(MIPS_CONF5_FRE, cfg5); } } /* Enable MSA for guest */ set_c0_config5(MIPS_CONF5_MSAEN); enable_fpu_hazard(); switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) { case KVM_MIPS_AUX_FPU: /* * Guest FPU state already loaded, only restore upper MSA state */ __kvm_restore_msa_upper(&vcpu->arch); vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA); break; case 0: /* Neither FPU or MSA already active, restore full MSA state */ __kvm_restore_msa(&vcpu->arch); vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; if (kvm_mips_guest_has_fpu(&vcpu->arch)) vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU_MSA); break; default: trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA); break; } preempt_enable(); } #endif /* Drop FPU & MSA without saving it */ void kvm_drop_fpu(struct kvm_vcpu *vcpu) { preempt_disable(); if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { disable_msa(); trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA); vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA; } if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { clear_c0_status(ST0_CU1 | ST0_FR); trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU); vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; } preempt_enable(); } /* Save and disable FPU & MSA */ void kvm_lose_fpu(struct kvm_vcpu *vcpu) { /* * FPU & MSA get disabled in root context (hardware) when it is disabled * in guest context (software), but the register state in the hardware * may still be in use. This is why we explicitly re-enable the hardware * before saving. */ preempt_disable(); if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { set_c0_config5(MIPS_CONF5_MSAEN); enable_fpu_hazard(); __kvm_save_msa(&vcpu->arch); trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA); /* Disable MSA & FPU */ disable_msa(); if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { clear_c0_status(ST0_CU1 | ST0_FR); disable_fpu_hazard(); } vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA); } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { set_c0_status(ST0_CU1); enable_fpu_hazard(); __kvm_save_fpu(&vcpu->arch); vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU); /* Disable FPU */ clear_c0_status(ST0_CU1 | ST0_FR); disable_fpu_hazard(); } preempt_enable(); } /* * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP * exception if cause bits are set in the value being written. */ static int kvm_mips_csr_die_notify(struct notifier_block *self, unsigned long cmd, void *ptr) { struct die_args *args = (struct die_args *)ptr; struct pt_regs *regs = args->regs; unsigned long pc; /* Only interested in FPE and MSAFPE */ if (cmd != DIE_FP && cmd != DIE_MSAFP) return NOTIFY_DONE; /* Return immediately if guest context isn't active */ if (!(current->flags & PF_VCPU)) return NOTIFY_DONE; /* Should never get here from user mode */ BUG_ON(user_mode(regs)); pc = instruction_pointer(regs); switch (cmd) { case DIE_FP: /* match 2nd instruction in __kvm_restore_fcsr */ if (pc != (unsigned long)&__kvm_restore_fcsr + 4) return NOTIFY_DONE; break; case DIE_MSAFP: /* match 2nd/3rd instruction in __kvm_restore_msacsr */ if (!cpu_has_msa || pc < (unsigned long)&__kvm_restore_msacsr + 4 || pc > (unsigned long)&__kvm_restore_msacsr + 8) return NOTIFY_DONE; break; } /* Move PC forward a little and continue executing */ instruction_pointer(regs) += 4; return NOTIFY_STOP; } static struct notifier_block kvm_mips_csr_die_notifier = { .notifier_call = kvm_mips_csr_die_notify, }; static int __init kvm_mips_init(void) { int ret; ret = kvm_mips_entry_setup(); if (ret) return ret; ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); if (ret) return ret; register_die_notifier(&kvm_mips_csr_die_notifier); return 0; } static void __exit kvm_mips_exit(void) { kvm_exit(); unregister_die_notifier(&kvm_mips_csr_die_notifier); } module_init(kvm_mips_init); module_exit(kvm_mips_exit); EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
gpl-2.0
TrinkCore/linux-core
drivers/net/wireless/rt2x00/rt2800pci.c
44
36807
/* Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com> Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com> Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org> Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com> Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de> Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com> Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com> Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com> <http://rt2x00.serialmonkey.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Module: rt2800pci Abstract: rt2800pci device specific routines. Supported chipsets: RT2800E & RT2800ED. */ #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/eeprom_93cx6.h> #include "rt2x00.h" #include "rt2x00pci.h" #include "rt2x00soc.h" #include "rt2800lib.h" #include "rt2800.h" #include "rt2800pci.h" /* * Allow hardware encryption to be disabled. */ static bool modparam_nohwcrypt = false; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); static bool rt2800pci_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev) { return modparam_nohwcrypt; } static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token) { unsigned int i; u32 reg; /* * SOC devices don't support MCU requests. */ if (rt2x00_is_soc(rt2x00dev)) return; for (i = 0; i < 200; i++) { rt2x00pci_register_read(rt2x00dev, H2M_MAILBOX_CID, &reg); if ((rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD0) == token) || (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD1) == token) || (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD2) == token) || (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD3) == token)) break; udelay(REGISTER_BUSY_DELAY); } if (i == 200) ERROR(rt2x00dev, "MCU request failed, no response from hardware\n"); rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0); rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); } #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) { void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE); memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE); iounmap(base_addr); } #else static inline void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) { } #endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */ #ifdef CONFIG_PCI static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom) { struct rt2x00_dev *rt2x00dev = eeprom->data; u32 reg; rt2x00pci_register_read(rt2x00dev, E2PROM_CSR, &reg); eeprom->reg_data_in = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_IN); eeprom->reg_data_out = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_OUT); eeprom->reg_data_clock = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_CLOCK); eeprom->reg_chip_select = !!rt2x00_get_field32(reg, E2PROM_CSR_CHIP_SELECT); } static void rt2800pci_eepromregister_write(struct eeprom_93cx6 *eeprom) { struct rt2x00_dev *rt2x00dev = eeprom->data; u32 reg = 0; rt2x00_set_field32(&reg, E2PROM_CSR_DATA_IN, !!eeprom->reg_data_in); rt2x00_set_field32(&reg, E2PROM_CSR_DATA_OUT, !!eeprom->reg_data_out); rt2x00_set_field32(&reg, E2PROM_CSR_DATA_CLOCK, !!eeprom->reg_data_clock); rt2x00_set_field32(&reg, E2PROM_CSR_CHIP_SELECT, !!eeprom->reg_chip_select); rt2x00pci_register_write(rt2x00dev, E2PROM_CSR, reg); } static void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev) { struct eeprom_93cx6 eeprom; u32 reg; rt2x00pci_register_read(rt2x00dev, E2PROM_CSR, &reg); eeprom.data = rt2x00dev; eeprom.register_read = rt2800pci_eepromregister_read; eeprom.register_write = rt2800pci_eepromregister_write; switch (rt2x00_get_field32(reg, E2PROM_CSR_TYPE)) { case 0: eeprom.width = PCI_EEPROM_WIDTH_93C46; break; case 1: eeprom.width = PCI_EEPROM_WIDTH_93C66; break; default: eeprom.width = PCI_EEPROM_WIDTH_93C86; break; } eeprom.reg_data_in = 0; eeprom.reg_data_out = 0; eeprom.reg_data_clock = 0; eeprom.reg_chip_select = 0; eeprom_93cx6_multiread(&eeprom, EEPROM_BASE, rt2x00dev->eeprom, EEPROM_SIZE / sizeof(u16)); } static int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev) { return rt2800_efuse_detect(rt2x00dev); } static inline void rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev) { rt2800_read_eeprom_efuse(rt2x00dev); } #else static inline void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev) { } static inline int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev) { return 0; } static inline void rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev) { } #endif /* CONFIG_PCI */ /* * Queue handlers. */ static void rt2800pci_start_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u32 reg; switch (queue->qid) { case QID_RX: rt2x00pci_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1); rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, reg); break; case QID_BEACON: rt2x00pci_register_read(rt2x00dev, BCN_TIME_CFG, &reg); rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1); rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1); rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1); rt2x00pci_register_write(rt2x00dev, BCN_TIME_CFG, reg); rt2x00pci_register_read(rt2x00dev, INT_TIMER_EN, &reg); rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1); rt2x00pci_register_write(rt2x00dev, INT_TIMER_EN, reg); break; default: break; } } static void rt2800pci_kick_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; struct queue_entry *entry; switch (queue->qid) { case QID_AC_VO: case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: entry = rt2x00queue_get_entry(queue, Q_INDEX); rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX(queue->qid), entry->entry_idx); break; case QID_MGMT: entry = rt2x00queue_get_entry(queue, Q_INDEX); rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX(5), entry->entry_idx); break; default: break; } } static void rt2800pci_stop_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u32 reg; switch (queue->qid) { case QID_RX: rt2x00pci_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0); rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, reg); break; case QID_BEACON: rt2x00pci_register_read(rt2x00dev, BCN_TIME_CFG, &reg); rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0); rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0); rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0); rt2x00pci_register_write(rt2x00dev, BCN_TIME_CFG, reg); rt2x00pci_register_read(rt2x00dev, INT_TIMER_EN, &reg); rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0); rt2x00pci_register_write(rt2x00dev, INT_TIMER_EN, reg); /* * Wait for current invocation to finish. The tasklet * won't be scheduled anymore afterwards since we disabled * the TBTT and PRE TBTT timer. */ tasklet_kill(&rt2x00dev->tbtt_tasklet); tasklet_kill(&rt2x00dev->pretbtt_tasklet); break; default: break; } } /* * Firmware functions */ static char *rt2800pci_get_firmware_name(struct rt2x00_dev *rt2x00dev) { /* * Chip rt3290 use specific 4KB firmware named rt3290.bin. */ if (rt2x00_rt(rt2x00dev, RT3290)) return FIRMWARE_RT3290; else return FIRMWARE_RT2860; } static int rt2800pci_write_firmware(struct rt2x00_dev *rt2x00dev, const u8 *data, const size_t len) { u32 reg; /* * enable Host program ram write selection */ reg = 0; rt2x00_set_field32(&reg, PBF_SYS_CTRL_HOST_RAM_WRITE, 1); rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, reg); /* * Write firmware to device. */ rt2x00pci_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, data, len); rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000); rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001); rt2x00pci_register_write(rt2x00dev, H2M_BBP_AGENT, 0); rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); return 0; } /* * Initialization functions. */ static bool rt2800pci_get_entry_state(struct queue_entry *entry) { struct queue_entry_priv_pci *entry_priv = entry->priv_data; u32 word; if (entry->queue->qid == QID_RX) { rt2x00_desc_read(entry_priv->desc, 1, &word); return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE)); } else { rt2x00_desc_read(entry_priv->desc, 1, &word); return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE)); } } static void rt2800pci_clear_entry(struct queue_entry *entry) { struct queue_entry_priv_pci *entry_priv = entry->priv_data; struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; u32 word; if (entry->queue->qid == QID_RX) { rt2x00_desc_read(entry_priv->desc, 0, &word); rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma); rt2x00_desc_write(entry_priv->desc, 0, word); rt2x00_desc_read(entry_priv->desc, 1, &word); rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0); rt2x00_desc_write(entry_priv->desc, 1, word); /* * Set RX IDX in register to inform hardware that we have * handled this entry and it is available for reuse again. */ rt2x00pci_register_write(rt2x00dev, RX_CRX_IDX, entry->entry_idx); } else { rt2x00_desc_read(entry_priv->desc, 1, &word); rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1); rt2x00_desc_write(entry_priv->desc, 1, word); } } static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev) { struct queue_entry_priv_pci *entry_priv; /* * Initialize registers. */ entry_priv = rt2x00dev->tx[0].entries[0].priv_data; rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR0, entry_priv->desc_dma); rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT0, rt2x00dev->tx[0].limit); rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX0, 0); rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX0, 0); entry_priv = rt2x00dev->tx[1].entries[0].priv_data; rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR1, entry_priv->desc_dma); rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT1, rt2x00dev->tx[1].limit); rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX1, 0); rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX1, 0); entry_priv = rt2x00dev->tx[2].entries[0].priv_data; rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR2, entry_priv->desc_dma); rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT2, rt2x00dev->tx[2].limit); rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX2, 0); rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX2, 0); entry_priv = rt2x00dev->tx[3].entries[0].priv_data; rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR3, entry_priv->desc_dma); rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT3, rt2x00dev->tx[3].limit); rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX3, 0); rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX3, 0); rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR4, 0); rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT4, 0); rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX4, 0); rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX4, 0); rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR5, 0); rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT5, 0); rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX5, 0); rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX5, 0); entry_priv = rt2x00dev->rx->entries[0].priv_data; rt2x00pci_register_write(rt2x00dev, RX_BASE_PTR, entry_priv->desc_dma); rt2x00pci_register_write(rt2x00dev, RX_MAX_CNT, rt2x00dev->rx[0].limit); rt2x00pci_register_write(rt2x00dev, RX_CRX_IDX, rt2x00dev->rx[0].limit - 1); rt2x00pci_register_write(rt2x00dev, RX_DRX_IDX, 0); rt2800_disable_wpdma(rt2x00dev); rt2x00pci_register_write(rt2x00dev, DELAY_INT_CFG, 0); return 0; } /* * Device state switch handlers. */ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev, enum dev_state state) { u32 reg; unsigned long flags; /* * When interrupts are being enabled, the interrupt registers * should clear the register to assure a clean state. */ if (state == STATE_RADIO_IRQ_ON) { rt2x00pci_register_read(rt2x00dev, INT_SOURCE_CSR, &reg); rt2x00pci_register_write(rt2x00dev, INT_SOURCE_CSR, reg); } spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); reg = 0; if (state == STATE_RADIO_IRQ_ON) { rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1); rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1); rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1); rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1); rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1); } rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg); spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); if (state == STATE_RADIO_IRQ_OFF) { /* * Wait for possibly running tasklets to finish. */ tasklet_kill(&rt2x00dev->txstatus_tasklet); tasklet_kill(&rt2x00dev->rxdone_tasklet); tasklet_kill(&rt2x00dev->autowake_tasklet); tasklet_kill(&rt2x00dev->tbtt_tasklet); tasklet_kill(&rt2x00dev->pretbtt_tasklet); } } static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev) { u32 reg; /* * Reset DMA indexes */ rt2x00pci_register_read(rt2x00dev, WPDMA_RST_IDX, &reg); rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1); rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1); rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1); rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1); rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1); rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1); rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1); rt2x00pci_register_write(rt2x00dev, WPDMA_RST_IDX, reg); rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f); rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); if (rt2x00_is_pcie(rt2x00dev) && (rt2x00_rt(rt2x00dev, RT3572) || rt2x00_rt(rt2x00dev, RT5390) || rt2x00_rt(rt2x00dev, RT5392))) { rt2x00pci_register_read(rt2x00dev, AUX_CTRL, &reg); rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1); rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1); rt2x00pci_register_write(rt2x00dev, AUX_CTRL, reg); } rt2x00pci_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003); reg = 0; rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1); rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1); rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, reg); rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000); return 0; } static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev) { int retval; /* Wait for DMA, ignore error until we initialize queues. */ rt2800_wait_wpdma_ready(rt2x00dev); if (unlikely(rt2800pci_init_queues(rt2x00dev))) return -EIO; retval = rt2800_enable_radio(rt2x00dev); if (retval) return retval; /* After resume MCU_BOOT_SIGNAL will trash these. */ rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0); rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); rt2800_mcu_request(rt2x00dev, MCU_SLEEP, TOKEN_RADIO_OFF, 0xff, 0x02); rt2800pci_mcu_status(rt2x00dev, TOKEN_RADIO_OFF); rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKEUP, 0, 0); rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKEUP); return retval; } static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev) { if (rt2x00_is_soc(rt2x00dev)) { rt2800_disable_radio(rt2x00dev); rt2x00pci_register_write(rt2x00dev, PWR_PIN_CFG, 0); rt2x00pci_register_write(rt2x00dev, TX_PIN_CFG, 0); } } static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { if (state == STATE_AWAKE) { rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKEUP, 0, 0x02); rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKEUP); } else if (state == STATE_SLEEP) { rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_STATUS, 0xffffffff); rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID, 0xffffffff); rt2800_mcu_request(rt2x00dev, MCU_SLEEP, TOKEN_SLEEP, 0xff, 0x01); } return 0; } static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { int retval = 0; switch (state) { case STATE_RADIO_ON: retval = rt2800pci_enable_radio(rt2x00dev); break; case STATE_RADIO_OFF: /* * After the radio has been disabled, the device should * be put to sleep for powersaving. */ rt2800pci_disable_radio(rt2x00dev); rt2800pci_set_state(rt2x00dev, STATE_SLEEP); break; case STATE_RADIO_IRQ_ON: case STATE_RADIO_IRQ_OFF: rt2800pci_toggle_irq(rt2x00dev, state); break; case STATE_DEEP_SLEEP: case STATE_SLEEP: case STATE_STANDBY: case STATE_AWAKE: retval = rt2800pci_set_state(rt2x00dev, state); break; default: retval = -ENOTSUPP; break; } if (unlikely(retval)) ERROR(rt2x00dev, "Device failed to enter state %d (%d).\n", state, retval); return retval; } /* * TX descriptor initialization */ static __le32 *rt2800pci_get_txwi(struct queue_entry *entry) { return (__le32 *) entry->skb->data; } static void rt2800pci_write_tx_desc(struct queue_entry *entry, struct txentry_desc *txdesc) { struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); struct queue_entry_priv_pci *entry_priv = entry->priv_data; __le32 *txd = entry_priv->desc; u32 word; /* * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1 * must contains a TXWI structure + 802.11 header + padding + 802.11 * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11 * data. It means that LAST_SEC0 is always 0. */ /* * Initialize TX descriptor */ word = 0; rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma); rt2x00_desc_write(txd, 0, word); word = 0; rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len); rt2x00_set_field32(&word, TXD_W1_LAST_SEC1, !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W1_BURST, test_bit(ENTRY_TXD_BURST, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W1_SD_LEN0, TXWI_DESC_SIZE); rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0); rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0); rt2x00_desc_write(txd, 1, word); word = 0; rt2x00_set_field32(&word, TXD_W2_SD_PTR1, skbdesc->skb_dma + TXWI_DESC_SIZE); rt2x00_desc_write(txd, 2, word); word = 0; rt2x00_set_field32(&word, TXD_W3_WIV, !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W3_QSEL, 2); rt2x00_desc_write(txd, 3, word); /* * Register descriptor details in skb frame descriptor. */ skbdesc->desc = txd; skbdesc->desc_len = TXD_DESC_SIZE; } /* * RX control handlers */ static void rt2800pci_fill_rxdone(struct queue_entry *entry, struct rxdone_entry_desc *rxdesc) { struct queue_entry_priv_pci *entry_priv = entry->priv_data; __le32 *rxd = entry_priv->desc; u32 word; rt2x00_desc_read(rxd, 3, &word); if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR)) rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; /* * Unfortunately we don't know the cipher type used during * decryption. This prevents us from correct providing * correct statistics through debugfs. */ rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR); if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) { /* * Hardware has stripped IV/EIV data from 802.11 frame during * decryption. Unfortunately the descriptor doesn't contain * any fields with the EIV/IV data either, so they can't * be restored by rt2x00lib. */ rxdesc->flags |= RX_FLAG_IV_STRIPPED; /* * The hardware has already checked the Michael Mic and has * stripped it from the frame. Signal this to mac80211. */ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) rxdesc->flags |= RX_FLAG_DECRYPTED; else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) rxdesc->flags |= RX_FLAG_MMIC_ERROR; } if (rt2x00_get_field32(word, RXD_W3_MY_BSS)) rxdesc->dev_flags |= RXDONE_MY_BSS; if (rt2x00_get_field32(word, RXD_W3_L2PAD)) rxdesc->dev_flags |= RXDONE_L2PAD; /* * Process the RXWI structure that is at the start of the buffer. */ rt2800_process_rxwi(entry, rxdesc); } /* * Interrupt functions. */ static void rt2800pci_wakeup(struct rt2x00_dev *rt2x00dev) { struct ieee80211_conf conf = { .flags = 0 }; struct rt2x00lib_conf libconf = { .conf = &conf }; rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS); } static bool rt2800pci_txdone(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; struct queue_entry *entry; u32 status; u8 qid; int max_tx_done = 16; while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) { qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE); if (unlikely(qid >= QID_RX)) { /* * Unknown queue, this shouldn't happen. Just drop * this tx status. */ WARNING(rt2x00dev, "Got TX status report with " "unexpected pid %u, dropping\n", qid); break; } queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); if (unlikely(queue == NULL)) { /* * The queue is NULL, this shouldn't happen. Stop * processing here and drop the tx status */ WARNING(rt2x00dev, "Got TX status for an unavailable " "queue %u, dropping\n", qid); break; } if (unlikely(rt2x00queue_empty(queue))) { /* * The queue is empty. Stop processing here * and drop the tx status. */ WARNING(rt2x00dev, "Got TX status for an empty " "queue %u, dropping\n", qid); break; } entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); rt2800_txdone_entry(entry, status, rt2800pci_get_txwi(entry)); if (--max_tx_done == 0) break; } return !max_tx_done; } static inline void rt2800pci_enable_interrupt(struct rt2x00_dev *rt2x00dev, struct rt2x00_field32 irq_field) { u32 reg; /* * Enable a single interrupt. The interrupt mask register * access needs locking. */ spin_lock_irq(&rt2x00dev->irqmask_lock); rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg); rt2x00_set_field32(&reg, irq_field, 1); rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg); spin_unlock_irq(&rt2x00dev->irqmask_lock); } static void rt2800pci_txstatus_tasklet(unsigned long data) { struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; if (rt2800pci_txdone(rt2x00dev)) tasklet_schedule(&rt2x00dev->txstatus_tasklet); /* * No need to enable the tx status interrupt here as we always * leave it enabled to minimize the possibility of a tx status * register overflow. See comment in interrupt handler. */ } static void rt2800pci_pretbtt_tasklet(unsigned long data) { struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; rt2x00lib_pretbtt(rt2x00dev); if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT); } static void rt2800pci_tbtt_tasklet(unsigned long data) { struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; u32 reg; rt2x00lib_beacondone(rt2x00dev); if (rt2x00dev->intf_ap_count) { /* * The rt2800pci hardware tbtt timer is off by 1us per tbtt * causing beacon skew and as a result causing problems with * some powersaving clients over time. Shorten the beacon * interval every 64 beacons by 64us to mitigate this effect. */ if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) { rt2x00pci_register_read(rt2x00dev, BCN_TIME_CFG, &reg); rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL, (rt2x00dev->beacon_int * 16) - 1); rt2x00pci_register_write(rt2x00dev, BCN_TIME_CFG, reg); } else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) { rt2x00pci_register_read(rt2x00dev, BCN_TIME_CFG, &reg); rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL, (rt2x00dev->beacon_int * 16)); rt2x00pci_register_write(rt2x00dev, BCN_TIME_CFG, reg); } drv_data->tbtt_tick++; drv_data->tbtt_tick %= BCN_TBTT_OFFSET; } if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT); } static void rt2800pci_rxdone_tasklet(unsigned long data) { struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; if (rt2x00pci_rxdone(rt2x00dev)) tasklet_schedule(&rt2x00dev->rxdone_tasklet); else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE); } static void rt2800pci_autowake_tasklet(unsigned long data) { struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; rt2800pci_wakeup(rt2x00dev); if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_AUTO_WAKEUP); } static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev) { u32 status; int i; /* * The TX_FIFO_STATUS interrupt needs special care. We should * read TX_STA_FIFO but we should do it immediately as otherwise * the register can overflow and we would lose status reports. * * Hence, read the TX_STA_FIFO register and copy all tx status * reports into a kernel FIFO which is handled in the txstatus * tasklet. We use a tasklet to process the tx status reports * because we can schedule the tasklet multiple times (when the * interrupt fires again during tx status processing). * * Furthermore we don't disable the TX_FIFO_STATUS * interrupt here but leave it enabled so that the TX_STA_FIFO * can also be read while the tx status tasklet gets executed. * * Since we have only one producer and one consumer we don't * need to lock the kfifo. */ for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) { rt2x00pci_register_read(rt2x00dev, TX_STA_FIFO, &status); if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID)) break; if (!kfifo_put(&rt2x00dev->txstatus_fifo, &status)) { WARNING(rt2x00dev, "TX status FIFO overrun," "drop tx status report.\n"); break; } } /* Schedule the tasklet for processing the tx status. */ tasklet_schedule(&rt2x00dev->txstatus_tasklet); } static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance) { struct rt2x00_dev *rt2x00dev = dev_instance; u32 reg, mask; /* Read status and ACK all interrupts */ rt2x00pci_register_read(rt2x00dev, INT_SOURCE_CSR, &reg); rt2x00pci_register_write(rt2x00dev, INT_SOURCE_CSR, reg); if (!reg) return IRQ_NONE; if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return IRQ_HANDLED; /* * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits * for interrupts and interrupt masks we can just use the value of * INT_SOURCE_CSR to create the interrupt mask. */ mask = ~reg; if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) { rt2800pci_txstatus_interrupt(rt2x00dev); /* * Never disable the TX_FIFO_STATUS interrupt. */ rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1); } if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT)) tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet); if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT)) tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet); if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE)) tasklet_schedule(&rt2x00dev->rxdone_tasklet); if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP)) tasklet_schedule(&rt2x00dev->autowake_tasklet); /* * Disable all interrupts for which a tasklet was scheduled right now, * the tasklet will reenable the appropriate interrupts. */ spin_lock(&rt2x00dev->irqmask_lock); rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg); reg &= mask; rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg); spin_unlock(&rt2x00dev->irqmask_lock); return IRQ_HANDLED; } /* * Device probe functions. */ static void rt2800pci_read_eeprom(struct rt2x00_dev *rt2x00dev) { if (rt2x00_is_soc(rt2x00dev)) rt2800pci_read_eeprom_soc(rt2x00dev); else if (rt2800pci_efuse_detect(rt2x00dev)) rt2800pci_read_eeprom_efuse(rt2x00dev); else rt2800pci_read_eeprom_pci(rt2x00dev); } static const struct ieee80211_ops rt2800pci_mac80211_ops = { .tx = rt2x00mac_tx, .start = rt2x00mac_start, .stop = rt2x00mac_stop, .add_interface = rt2x00mac_add_interface, .remove_interface = rt2x00mac_remove_interface, .config = rt2x00mac_config, .configure_filter = rt2x00mac_configure_filter, .set_key = rt2x00mac_set_key, .sw_scan_start = rt2x00mac_sw_scan_start, .sw_scan_complete = rt2x00mac_sw_scan_complete, .get_stats = rt2x00mac_get_stats, .get_tkip_seq = rt2800_get_tkip_seq, .set_rts_threshold = rt2800_set_rts_threshold, .sta_add = rt2x00mac_sta_add, .sta_remove = rt2x00mac_sta_remove, .bss_info_changed = rt2x00mac_bss_info_changed, .conf_tx = rt2800_conf_tx, .get_tsf = rt2800_get_tsf, .rfkill_poll = rt2x00mac_rfkill_poll, .ampdu_action = rt2800_ampdu_action, .flush = rt2x00mac_flush, .get_survey = rt2800_get_survey, .get_ringparam = rt2x00mac_get_ringparam, .tx_frames_pending = rt2x00mac_tx_frames_pending, }; static const struct rt2800_ops rt2800pci_rt2800_ops = { .register_read = rt2x00pci_register_read, .register_read_lock = rt2x00pci_register_read, /* same for PCI */ .register_write = rt2x00pci_register_write, .register_write_lock = rt2x00pci_register_write, /* same for PCI */ .register_multiread = rt2x00pci_register_multiread, .register_multiwrite = rt2x00pci_register_multiwrite, .regbusy_read = rt2x00pci_regbusy_read, .read_eeprom = rt2800pci_read_eeprom, .hwcrypt_disabled = rt2800pci_hwcrypt_disabled, .drv_write_firmware = rt2800pci_write_firmware, .drv_init_registers = rt2800pci_init_registers, .drv_get_txwi = rt2800pci_get_txwi, }; static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = { .irq_handler = rt2800pci_interrupt, .txstatus_tasklet = rt2800pci_txstatus_tasklet, .pretbtt_tasklet = rt2800pci_pretbtt_tasklet, .tbtt_tasklet = rt2800pci_tbtt_tasklet, .rxdone_tasklet = rt2800pci_rxdone_tasklet, .autowake_tasklet = rt2800pci_autowake_tasklet, .probe_hw = rt2800_probe_hw, .get_firmware_name = rt2800pci_get_firmware_name, .check_firmware = rt2800_check_firmware, .load_firmware = rt2800_load_firmware, .initialize = rt2x00pci_initialize, .uninitialize = rt2x00pci_uninitialize, .get_entry_state = rt2800pci_get_entry_state, .clear_entry = rt2800pci_clear_entry, .set_device_state = rt2800pci_set_device_state, .rfkill_poll = rt2800_rfkill_poll, .link_stats = rt2800_link_stats, .reset_tuner = rt2800_reset_tuner, .link_tuner = rt2800_link_tuner, .gain_calibration = rt2800_gain_calibration, .vco_calibration = rt2800_vco_calibration, .start_queue = rt2800pci_start_queue, .kick_queue = rt2800pci_kick_queue, .stop_queue = rt2800pci_stop_queue, .flush_queue = rt2x00pci_flush_queue, .write_tx_desc = rt2800pci_write_tx_desc, .write_tx_data = rt2800_write_tx_data, .write_beacon = rt2800_write_beacon, .clear_beacon = rt2800_clear_beacon, .fill_rxdone = rt2800pci_fill_rxdone, .config_shared_key = rt2800_config_shared_key, .config_pairwise_key = rt2800_config_pairwise_key, .config_filter = rt2800_config_filter, .config_intf = rt2800_config_intf, .config_erp = rt2800_config_erp, .config_ant = rt2800_config_ant, .config = rt2800_config, .sta_add = rt2800_sta_add, .sta_remove = rt2800_sta_remove, }; static const struct data_queue_desc rt2800pci_queue_rx = { .entry_num = 128, .data_size = AGGREGATION_SIZE, .desc_size = RXD_DESC_SIZE, .priv_size = sizeof(struct queue_entry_priv_pci), }; static const struct data_queue_desc rt2800pci_queue_tx = { .entry_num = 64, .data_size = AGGREGATION_SIZE, .desc_size = TXD_DESC_SIZE, .priv_size = sizeof(struct queue_entry_priv_pci), }; static const struct data_queue_desc rt2800pci_queue_bcn = { .entry_num = 8, .data_size = 0, /* No DMA required for beacons */ .desc_size = TXWI_DESC_SIZE, .priv_size = sizeof(struct queue_entry_priv_pci), }; static const struct rt2x00_ops rt2800pci_ops = { .name = KBUILD_MODNAME, .drv_data_size = sizeof(struct rt2800_drv_data), .max_ap_intf = 8, .eeprom_size = EEPROM_SIZE, .rf_size = RF_SIZE, .tx_queues = NUM_TX_QUEUES, .extra_tx_headroom = TXWI_DESC_SIZE, .rx = &rt2800pci_queue_rx, .tx = &rt2800pci_queue_tx, .bcn = &rt2800pci_queue_bcn, .lib = &rt2800pci_rt2x00_ops, .drv = &rt2800pci_rt2800_ops, .hw = &rt2800pci_mac80211_ops, #ifdef CONFIG_RT2X00_LIB_DEBUGFS .debugfs = &rt2800_rt2x00debug, #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ }; /* * RT2800pci module information. */ #ifdef CONFIG_PCI static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = { { PCI_DEVICE(0x1814, 0x0601) }, { PCI_DEVICE(0x1814, 0x0681) }, { PCI_DEVICE(0x1814, 0x0701) }, { PCI_DEVICE(0x1814, 0x0781) }, { PCI_DEVICE(0x1814, 0x3090) }, { PCI_DEVICE(0x1814, 0x3091) }, { PCI_DEVICE(0x1814, 0x3092) }, { PCI_DEVICE(0x1432, 0x7708) }, { PCI_DEVICE(0x1432, 0x7727) }, { PCI_DEVICE(0x1432, 0x7728) }, { PCI_DEVICE(0x1432, 0x7738) }, { PCI_DEVICE(0x1432, 0x7748) }, { PCI_DEVICE(0x1432, 0x7758) }, { PCI_DEVICE(0x1432, 0x7768) }, { PCI_DEVICE(0x1462, 0x891a) }, { PCI_DEVICE(0x1a3b, 0x1059) }, #ifdef CONFIG_RT2800PCI_RT3290 { PCI_DEVICE(0x1814, 0x3290) }, #endif #ifdef CONFIG_RT2800PCI_RT33XX { PCI_DEVICE(0x1814, 0x3390) }, #endif #ifdef CONFIG_RT2800PCI_RT35XX { PCI_DEVICE(0x1432, 0x7711) }, { PCI_DEVICE(0x1432, 0x7722) }, { PCI_DEVICE(0x1814, 0x3060) }, { PCI_DEVICE(0x1814, 0x3062) }, { PCI_DEVICE(0x1814, 0x3562) }, { PCI_DEVICE(0x1814, 0x3592) }, { PCI_DEVICE(0x1814, 0x3593) }, #endif #ifdef CONFIG_RT2800PCI_RT53XX { PCI_DEVICE(0x1814, 0x5360) }, { PCI_DEVICE(0x1814, 0x5362) }, { PCI_DEVICE(0x1814, 0x5390) }, { PCI_DEVICE(0x1814, 0x5392) }, { PCI_DEVICE(0x1814, 0x539a) }, { PCI_DEVICE(0x1814, 0x539b) }, { PCI_DEVICE(0x1814, 0x539f) }, #endif { 0, } }; #endif /* CONFIG_PCI */ MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT2800 PCI & PCMCIA Wireless LAN driver."); MODULE_SUPPORTED_DEVICE("Ralink RT2860 PCI & PCMCIA chipset based cards"); #ifdef CONFIG_PCI MODULE_FIRMWARE(FIRMWARE_RT2860); MODULE_DEVICE_TABLE(pci, rt2800pci_device_table); #endif /* CONFIG_PCI */ MODULE_LICENSE("GPL"); #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) static int rt2800soc_probe(struct platform_device *pdev) { return rt2x00soc_probe(pdev, &rt2800pci_ops); } static struct platform_driver rt2800soc_driver = { .driver = { .name = "rt2800_wmac", .owner = THIS_MODULE, .mod_name = KBUILD_MODNAME, }, .probe = rt2800soc_probe, .remove = __devexit_p(rt2x00soc_remove), .suspend = rt2x00soc_suspend, .resume = rt2x00soc_resume, }; #endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */ #ifdef CONFIG_PCI static int rt2800pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { return rt2x00pci_probe(pci_dev, &rt2800pci_ops); } static struct pci_driver rt2800pci_driver = { .name = KBUILD_MODNAME, .id_table = rt2800pci_device_table, .probe = rt2800pci_probe, .remove = __devexit_p(rt2x00pci_remove), .suspend = rt2x00pci_suspend, .resume = rt2x00pci_resume, }; #endif /* CONFIG_PCI */ static int __init rt2800pci_init(void) { int ret = 0; #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) ret = platform_driver_register(&rt2800soc_driver); if (ret) return ret; #endif #ifdef CONFIG_PCI ret = pci_register_driver(&rt2800pci_driver); if (ret) { #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) platform_driver_unregister(&rt2800soc_driver); #endif return ret; } #endif return ret; } static void __exit rt2800pci_exit(void) { #ifdef CONFIG_PCI pci_unregister_driver(&rt2800pci_driver); #endif #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) platform_driver_unregister(&rt2800soc_driver); #endif } module_init(rt2800pci_init); module_exit(rt2800pci_exit);
gpl-2.0
TeamWin/android_kernel_samsung_sprat
drivers/gpio/gpio-tc3589x.c
2092
11102
/* * Copyright (C) ST-Ericsson SA 2010 * * License Terms: GNU General Public License, version 2 * Author: Hanumath Prasad <hanumath.prasad@stericsson.com> for ST-Ericsson * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson */ #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/gpio.h> #include <linux/of.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/interrupt.h> #include <linux/mfd/tc3589x.h> /* * These registers are modified under the irq bus lock and cached to avoid * unnecessary writes in bus_sync_unlock. */ enum { REG_IBE, REG_IEV, REG_IS, REG_IE }; #define CACHE_NR_REGS 4 #define CACHE_NR_BANKS 3 struct tc3589x_gpio { struct gpio_chip chip; struct tc3589x *tc3589x; struct device *dev; struct mutex irq_lock; struct irq_domain *domain; int irq_base; /* Caches of interrupt control registers for bus_lock */ u8 regs[CACHE_NR_REGS][CACHE_NR_BANKS]; u8 oldregs[CACHE_NR_REGS][CACHE_NR_BANKS]; }; static inline struct tc3589x_gpio *to_tc3589x_gpio(struct gpio_chip *chip) { return container_of(chip, struct tc3589x_gpio, chip); } static int tc3589x_gpio_get(struct gpio_chip *chip, unsigned offset) { struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(chip); struct tc3589x *tc3589x = tc3589x_gpio->tc3589x; u8 reg = TC3589x_GPIODATA0 + (offset / 8) * 2; u8 mask = 1 << (offset % 8); int ret; ret = tc3589x_reg_read(tc3589x, reg); if (ret < 0) return ret; return ret & mask; } static void tc3589x_gpio_set(struct gpio_chip *chip, unsigned offset, int val) { struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(chip); struct tc3589x *tc3589x = tc3589x_gpio->tc3589x; u8 reg = TC3589x_GPIODATA0 + (offset / 8) * 2; unsigned pos = offset % 8; u8 data[] = {!!val << pos, 1 << pos}; tc3589x_block_write(tc3589x, reg, ARRAY_SIZE(data), data); } static int tc3589x_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int val) { struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(chip); struct tc3589x *tc3589x = tc3589x_gpio->tc3589x; u8 reg = TC3589x_GPIODIR0 + offset / 8; unsigned pos = offset % 8; tc3589x_gpio_set(chip, offset, val); return tc3589x_set_bits(tc3589x, reg, 1 << pos, 1 << pos); } static int tc3589x_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(chip); struct tc3589x *tc3589x = tc3589x_gpio->tc3589x; u8 reg = TC3589x_GPIODIR0 + offset / 8; unsigned pos = offset % 8; return tc3589x_set_bits(tc3589x, reg, 1 << pos, 0); } /** * tc3589x_gpio_irq_get_virq(): Map an interrupt on a chip to a virtual IRQ * * @tc3589x_gpio: tc3589x_gpio_irq controller to operate on. * @irq: index of the interrupt requested in the chip IRQs * * Useful for drivers to request their own IRQs. */ static int tc3589x_gpio_irq_get_virq(struct tc3589x_gpio *tc3589x_gpio, int irq) { if (!tc3589x_gpio) return -EINVAL; return irq_create_mapping(tc3589x_gpio->domain, irq); } static int tc3589x_gpio_to_irq(struct gpio_chip *chip, unsigned offset) { struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(chip); return tc3589x_gpio_irq_get_virq(tc3589x_gpio, offset); } static struct gpio_chip template_chip = { .label = "tc3589x", .owner = THIS_MODULE, .direction_input = tc3589x_gpio_direction_input, .get = tc3589x_gpio_get, .direction_output = tc3589x_gpio_direction_output, .set = tc3589x_gpio_set, .to_irq = tc3589x_gpio_to_irq, .can_sleep = 1, }; static int tc3589x_gpio_irq_set_type(struct irq_data *d, unsigned int type) { struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d); int offset = d->hwirq; int regoffset = offset / 8; int mask = 1 << (offset % 8); if (type == IRQ_TYPE_EDGE_BOTH) { tc3589x_gpio->regs[REG_IBE][regoffset] |= mask; return 0; } tc3589x_gpio->regs[REG_IBE][regoffset] &= ~mask; if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH) tc3589x_gpio->regs[REG_IS][regoffset] |= mask; else tc3589x_gpio->regs[REG_IS][regoffset] &= ~mask; if (type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH) tc3589x_gpio->regs[REG_IEV][regoffset] |= mask; else tc3589x_gpio->regs[REG_IEV][regoffset] &= ~mask; return 0; } static void tc3589x_gpio_irq_lock(struct irq_data *d) { struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d); mutex_lock(&tc3589x_gpio->irq_lock); } static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d) { struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d); struct tc3589x *tc3589x = tc3589x_gpio->tc3589x; static const u8 regmap[] = { [REG_IBE] = TC3589x_GPIOIBE0, [REG_IEV] = TC3589x_GPIOIEV0, [REG_IS] = TC3589x_GPIOIS0, [REG_IE] = TC3589x_GPIOIE0, }; int i, j; for (i = 0; i < CACHE_NR_REGS; i++) { for (j = 0; j < CACHE_NR_BANKS; j++) { u8 old = tc3589x_gpio->oldregs[i][j]; u8 new = tc3589x_gpio->regs[i][j]; if (new == old) continue; tc3589x_gpio->oldregs[i][j] = new; tc3589x_reg_write(tc3589x, regmap[i] + j * 8, new); } } mutex_unlock(&tc3589x_gpio->irq_lock); } static void tc3589x_gpio_irq_mask(struct irq_data *d) { struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d); int offset = d->hwirq; int regoffset = offset / 8; int mask = 1 << (offset % 8); tc3589x_gpio->regs[REG_IE][regoffset] &= ~mask; } static void tc3589x_gpio_irq_unmask(struct irq_data *d) { struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d); int offset = d->hwirq; int regoffset = offset / 8; int mask = 1 << (offset % 8); tc3589x_gpio->regs[REG_IE][regoffset] |= mask; } static struct irq_chip tc3589x_gpio_irq_chip = { .name = "tc3589x-gpio", .irq_bus_lock = tc3589x_gpio_irq_lock, .irq_bus_sync_unlock = tc3589x_gpio_irq_sync_unlock, .irq_mask = tc3589x_gpio_irq_mask, .irq_unmask = tc3589x_gpio_irq_unmask, .irq_set_type = tc3589x_gpio_irq_set_type, }; static irqreturn_t tc3589x_gpio_irq(int irq, void *dev) { struct tc3589x_gpio *tc3589x_gpio = dev; struct tc3589x *tc3589x = tc3589x_gpio->tc3589x; u8 status[CACHE_NR_BANKS]; int ret; int i; ret = tc3589x_block_read(tc3589x, TC3589x_GPIOMIS0, ARRAY_SIZE(status), status); if (ret < 0) return IRQ_NONE; for (i = 0; i < ARRAY_SIZE(status); i++) { unsigned int stat = status[i]; if (!stat) continue; while (stat) { int bit = __ffs(stat); int line = i * 8 + bit; int virq = tc3589x_gpio_irq_get_virq(tc3589x_gpio, line); handle_nested_irq(virq); stat &= ~(1 << bit); } tc3589x_reg_write(tc3589x, TC3589x_GPIOIC0 + i, status[i]); } return IRQ_HANDLED; } static int tc3589x_gpio_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) { struct tc3589x *tc3589x_gpio = d->host_data; irq_set_chip_data(virq, tc3589x_gpio); irq_set_chip_and_handler(virq, &tc3589x_gpio_irq_chip, handle_simple_irq); irq_set_nested_thread(virq, 1); #ifdef CONFIG_ARM set_irq_flags(virq, IRQF_VALID); #else irq_set_noprobe(virq); #endif return 0; } static void tc3589x_gpio_irq_unmap(struct irq_domain *d, unsigned int virq) { #ifdef CONFIG_ARM set_irq_flags(virq, 0); #endif irq_set_chip_and_handler(virq, NULL, NULL); irq_set_chip_data(virq, NULL); } static struct irq_domain_ops tc3589x_irq_ops = { .map = tc3589x_gpio_irq_map, .unmap = tc3589x_gpio_irq_unmap, .xlate = irq_domain_xlate_twocell, }; static int tc3589x_gpio_irq_init(struct tc3589x_gpio *tc3589x_gpio, struct device_node *np) { int base = tc3589x_gpio->irq_base; /* * If this results in a linear domain, irq_create_mapping() will * take care of allocating IRQ descriptors at runtime. When a base * is provided, the IRQ descriptors will be allocated when the * domain is instantiated. */ tc3589x_gpio->domain = irq_domain_add_simple(np, tc3589x_gpio->chip.ngpio, base, &tc3589x_irq_ops, tc3589x_gpio); if (!tc3589x_gpio->domain) { dev_err(tc3589x_gpio->dev, "Failed to create irqdomain\n"); return -ENOSYS; } return 0; } static int tc3589x_gpio_probe(struct platform_device *pdev) { struct tc3589x *tc3589x = dev_get_drvdata(pdev->dev.parent); struct tc3589x_gpio_platform_data *pdata; struct device_node *np = pdev->dev.of_node; struct tc3589x_gpio *tc3589x_gpio; int ret; int irq; pdata = tc3589x->pdata->gpio; if (!(pdata || np)) { dev_err(&pdev->dev, "No platform data or Device Tree found\n"); return -EINVAL; } irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; tc3589x_gpio = kzalloc(sizeof(struct tc3589x_gpio), GFP_KERNEL); if (!tc3589x_gpio) return -ENOMEM; mutex_init(&tc3589x_gpio->irq_lock); tc3589x_gpio->dev = &pdev->dev; tc3589x_gpio->tc3589x = tc3589x; tc3589x_gpio->chip = template_chip; tc3589x_gpio->chip.ngpio = tc3589x->num_gpio; tc3589x_gpio->chip.dev = &pdev->dev; tc3589x_gpio->chip.base = (pdata) ? pdata->gpio_base : -1; #ifdef CONFIG_OF_GPIO tc3589x_gpio->chip.of_node = np; #endif tc3589x_gpio->irq_base = tc3589x->irq_base ? tc3589x->irq_base + TC3589x_INT_GPIO(0) : 0; /* Bring the GPIO module out of reset */ ret = tc3589x_set_bits(tc3589x, TC3589x_RSTCTRL, TC3589x_RSTCTRL_GPIRST, 0); if (ret < 0) goto out_free; ret = tc3589x_gpio_irq_init(tc3589x_gpio, np); if (ret) goto out_free; ret = request_threaded_irq(irq, NULL, tc3589x_gpio_irq, IRQF_ONESHOT, "tc3589x-gpio", tc3589x_gpio); if (ret) { dev_err(&pdev->dev, "unable to get irq: %d\n", ret); goto out_free; } ret = gpiochip_add(&tc3589x_gpio->chip); if (ret) { dev_err(&pdev->dev, "unable to add gpiochip: %d\n", ret); goto out_freeirq; } if (pdata && pdata->setup) pdata->setup(tc3589x, tc3589x_gpio->chip.base); platform_set_drvdata(pdev, tc3589x_gpio); return 0; out_freeirq: free_irq(irq, tc3589x_gpio); out_free: kfree(tc3589x_gpio); return ret; } static int tc3589x_gpio_remove(struct platform_device *pdev) { struct tc3589x_gpio *tc3589x_gpio = platform_get_drvdata(pdev); struct tc3589x *tc3589x = tc3589x_gpio->tc3589x; struct tc3589x_gpio_platform_data *pdata = tc3589x->pdata->gpio; int irq = platform_get_irq(pdev, 0); int ret; if (pdata && pdata->remove) pdata->remove(tc3589x, tc3589x_gpio->chip.base); ret = gpiochip_remove(&tc3589x_gpio->chip); if (ret < 0) { dev_err(tc3589x_gpio->dev, "unable to remove gpiochip: %d\n", ret); return ret; } free_irq(irq, tc3589x_gpio); platform_set_drvdata(pdev, NULL); kfree(tc3589x_gpio); return 0; } static struct platform_driver tc3589x_gpio_driver = { .driver.name = "tc3589x-gpio", .driver.owner = THIS_MODULE, .probe = tc3589x_gpio_probe, .remove = tc3589x_gpio_remove, }; static int __init tc3589x_gpio_init(void) { return platform_driver_register(&tc3589x_gpio_driver); } subsys_initcall(tc3589x_gpio_init); static void __exit tc3589x_gpio_exit(void) { platform_driver_unregister(&tc3589x_gpio_driver); } module_exit(tc3589x_gpio_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("TC3589x GPIO driver"); MODULE_AUTHOR("Hanumath Prasad, Rabin Vincent");
gpl-2.0
stelios97/sony-kernel-msm7x27a
drivers/isdn/i4l/isdn_net.c
2348
88339
/* $Id: isdn_net.c,v 1.1.2.2 2004/01/12 22:37:19 keil Exp $ * * Linux ISDN subsystem, network interfaces and related functions (linklevel). * * Copyright 1994-1998 by Fritz Elfert (fritz@isdn4linux.de) * Copyright 1995,96 by Thinking Objects Software GmbH Wuerzburg * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * Data Over Voice (DOV) support added - Guy Ellis 23-Mar-02 * guy@traverse.com.au * Outgoing calls - looks for a 'V' in first char of dialed number * Incoming calls - checks first character of eaz as follows: * Numeric - accept DATA only - original functionality * 'V' - accept VOICE (DOV) only * 'B' - accept BOTH DATA and DOV types * * Jan 2001: fix CISCO HDLC Bjoern A. Zeeb <i4l@zabbadoz.net> * for info on the protocol, see * http://i4l.zabbadoz.net/i4l/cisco-hdlc.txt */ #include <linux/isdn.h> #include <linux/slab.h> #include <net/arp.h> #include <net/dst.h> #include <net/pkt_sched.h> #include <linux/inetdevice.h> #include "isdn_common.h" #include "isdn_net.h" #ifdef CONFIG_ISDN_PPP #include "isdn_ppp.h" #endif #ifdef CONFIG_ISDN_X25 #include <linux/concap.h> #include "isdn_concap.h" #endif /* * Outline of new tbusy handling: * * Old method, roughly spoken, consisted of setting tbusy when entering * isdn_net_start_xmit() and at several other locations and clearing * it from isdn_net_start_xmit() thread when sending was successful. * * With 2.3.x multithreaded network core, to prevent problems, tbusy should * only be set by the isdn_net_start_xmit() thread and only when a tx-busy * condition is detected. Other threads (in particular isdn_net_stat_callb()) * are only allowed to clear tbusy. * * -HE */ /* * About SOFTNET: * Most of the changes were pretty obvious and basically done by HE already. * * One problem of the isdn net device code is that is uses struct net_device * for masters and slaves. However, only master interface are registered to * the network layer, and therefore, it only makes sense to call netif_* * functions on them. * * --KG */ /* * Find out if the netdevice has been ifup-ed yet. * For slaves, look at the corresponding master. */ static __inline__ int isdn_net_device_started(isdn_net_dev *n) { isdn_net_local *lp = n->local; struct net_device *dev; if (lp->master) dev = lp->master; else dev = n->dev; return netif_running(dev); } /* * wake up the network -> net_device queue. * For slaves, wake the corresponding master interface. */ static __inline__ void isdn_net_device_wake_queue(isdn_net_local *lp) { if (lp->master) netif_wake_queue(lp->master); else netif_wake_queue(lp->netdev->dev); } /* * stop the network -> net_device queue. * For slaves, stop the corresponding master interface. */ static __inline__ void isdn_net_device_stop_queue(isdn_net_local *lp) { if (lp->master) netif_stop_queue(lp->master); else netif_stop_queue(lp->netdev->dev); } /* * find out if the net_device which this lp belongs to (lp can be * master or slave) is busy. It's busy iff all (master and slave) * queues are busy */ static __inline__ int isdn_net_device_busy(isdn_net_local *lp) { isdn_net_local *nlp; isdn_net_dev *nd; unsigned long flags; if (!isdn_net_lp_busy(lp)) return 0; if (lp->master) nd = ISDN_MASTER_PRIV(lp)->netdev; else nd = lp->netdev; spin_lock_irqsave(&nd->queue_lock, flags); nlp = lp->next; while (nlp != lp) { if (!isdn_net_lp_busy(nlp)) { spin_unlock_irqrestore(&nd->queue_lock, flags); return 0; } nlp = nlp->next; } spin_unlock_irqrestore(&nd->queue_lock, flags); return 1; } static __inline__ void isdn_net_inc_frame_cnt(isdn_net_local *lp) { atomic_inc(&lp->frame_cnt); if (isdn_net_device_busy(lp)) isdn_net_device_stop_queue(lp); } static __inline__ void isdn_net_dec_frame_cnt(isdn_net_local *lp) { atomic_dec(&lp->frame_cnt); if (!(isdn_net_device_busy(lp))) { if (!skb_queue_empty(&lp->super_tx_queue)) { schedule_work(&lp->tqueue); } else { isdn_net_device_wake_queue(lp); } } } static __inline__ void isdn_net_zero_frame_cnt(isdn_net_local *lp) { atomic_set(&lp->frame_cnt, 0); } /* For 2.2.x we leave the transmitter busy timeout at 2 secs, just * to be safe. * For 2.3.x we push it up to 20 secs, because call establishment * (in particular callback) may take such a long time, and we * don't want confusing messages in the log. However, there is a slight * possibility that this large timeout will break other things like MPPP, * which might rely on the tx timeout. If so, we'll find out this way... */ #define ISDN_NET_TX_TIMEOUT (20*HZ) /* Prototypes */ static int isdn_net_force_dial_lp(isdn_net_local *); static netdev_tx_t isdn_net_start_xmit(struct sk_buff *, struct net_device *); static void isdn_net_ciscohdlck_connected(isdn_net_local *lp); static void isdn_net_ciscohdlck_disconnected(isdn_net_local *lp); char *isdn_net_revision = "$Revision: 1.1.2.2 $"; /* * Code for raw-networking over ISDN */ static void isdn_net_unreachable(struct net_device *dev, struct sk_buff *skb, char *reason) { if(skb) { u_short proto = ntohs(skb->protocol); printk(KERN_DEBUG "isdn_net: %s: %s, signalling dst_link_failure %s\n", dev->name, (reason != NULL) ? reason : "unknown", (proto != ETH_P_IP) ? "Protocol != ETH_P_IP" : ""); dst_link_failure(skb); } else { /* dial not triggered by rawIP packet */ printk(KERN_DEBUG "isdn_net: %s: %s\n", dev->name, (reason != NULL) ? reason : "reason unknown"); } } static void isdn_net_reset(struct net_device *dev) { #ifdef CONFIG_ISDN_X25 struct concap_device_ops * dops = ((isdn_net_local *) netdev_priv(dev))->dops; struct concap_proto * cprot = ((isdn_net_local *) netdev_priv(dev))->netdev->cprot; #endif #ifdef CONFIG_ISDN_X25 if( cprot && cprot -> pops && dops ) cprot -> pops -> restart ( cprot, dev, dops ); #endif } /* Open/initialize the board. */ static int isdn_net_open(struct net_device *dev) { int i; struct net_device *p; struct in_device *in_dev; /* moved here from isdn_net_reset, because only the master has an interface associated which is supposed to be started. BTW: we need to call netif_start_queue, not netif_wake_queue here */ netif_start_queue(dev); isdn_net_reset(dev); /* Fill in the MAC-level header (not needed, but for compatibility... */ for (i = 0; i < ETH_ALEN - sizeof(u32); i++) dev->dev_addr[i] = 0xfc; if ((in_dev = dev->ip_ptr) != NULL) { /* * Any address will do - we take the first */ struct in_ifaddr *ifa = in_dev->ifa_list; if (ifa != NULL) memcpy(dev->dev_addr+2, &ifa->ifa_local, 4); } /* If this interface has slaves, start them also */ p = MASTER_TO_SLAVE(dev); if (p) { while (p) { isdn_net_reset(p); p = MASTER_TO_SLAVE(p); } } isdn_lock_drivers(); return 0; } /* * Assign an ISDN-channel to a net-interface */ static void isdn_net_bind_channel(isdn_net_local * lp, int idx) { lp->flags |= ISDN_NET_CONNECTED; lp->isdn_device = dev->drvmap[idx]; lp->isdn_channel = dev->chanmap[idx]; dev->rx_netdev[idx] = lp->netdev; dev->st_netdev[idx] = lp->netdev; } /* * unbind a net-interface (resets interface after an error) */ static void isdn_net_unbind_channel(isdn_net_local * lp) { skb_queue_purge(&lp->super_tx_queue); if (!lp->master) { /* reset only master device */ /* Moral equivalent of dev_purge_queues(): BEWARE! This chunk of code cannot be called from hardware interrupt handler. I hope it is true. --ANK */ qdisc_reset_all_tx(lp->netdev->dev); } lp->dialstate = 0; dev->rx_netdev[isdn_dc2minor(lp->isdn_device, lp->isdn_channel)] = NULL; dev->st_netdev[isdn_dc2minor(lp->isdn_device, lp->isdn_channel)] = NULL; if (lp->isdn_device != -1 && lp->isdn_channel != -1) isdn_free_channel(lp->isdn_device, lp->isdn_channel, ISDN_USAGE_NET); lp->flags &= ~ISDN_NET_CONNECTED; lp->isdn_device = -1; lp->isdn_channel = -1; } /* * Perform auto-hangup and cps-calculation for net-interfaces. * * auto-hangup: * Increment idle-counter (this counter is reset on any incoming or * outgoing packet), if counter exceeds configured limit either do a * hangup immediately or - if configured - wait until just before the next * charge-info. * * cps-calculation (needed for dynamic channel-bundling): * Since this function is called every second, simply reset the * byte-counter of the interface after copying it to the cps-variable. */ static unsigned long last_jiffies = -HZ; void isdn_net_autohup(void) { isdn_net_dev *p = dev->netdev; int anymore; anymore = 0; while (p) { isdn_net_local *l = p->local; if (jiffies == last_jiffies) l->cps = l->transcount; else l->cps = (l->transcount * HZ) / (jiffies - last_jiffies); l->transcount = 0; if (dev->net_verbose > 3) printk(KERN_DEBUG "%s: %d bogocps\n", p->dev->name, l->cps); if ((l->flags & ISDN_NET_CONNECTED) && (!l->dialstate)) { anymore = 1; l->huptimer++; /* * if there is some dialmode where timeout-hangup * should _not_ be done, check for that here */ if ((l->onhtime) && (l->huptimer > l->onhtime)) { if (l->hupflags & ISDN_MANCHARGE && l->hupflags & ISDN_CHARGEHUP) { while (time_after(jiffies, l->chargetime + l->chargeint)) l->chargetime += l->chargeint; if (time_after(jiffies, l->chargetime + l->chargeint - 2 * HZ)) if (l->outgoing || l->hupflags & ISDN_INHUP) isdn_net_hangup(p->dev); } else if (l->outgoing) { if (l->hupflags & ISDN_CHARGEHUP) { if (l->hupflags & ISDN_WAITCHARGE) { printk(KERN_DEBUG "isdn_net: Hupflags of %s are %X\n", p->dev->name, l->hupflags); isdn_net_hangup(p->dev); } else if (time_after(jiffies, l->chargetime + l->chargeint)) { printk(KERN_DEBUG "isdn_net: %s: chtime = %lu, chint = %d\n", p->dev->name, l->chargetime, l->chargeint); isdn_net_hangup(p->dev); } } else isdn_net_hangup(p->dev); } else if (l->hupflags & ISDN_INHUP) isdn_net_hangup(p->dev); } if(dev->global_flags & ISDN_GLOBAL_STOPPED || (ISDN_NET_DIALMODE(*l) == ISDN_NET_DM_OFF)) { isdn_net_hangup(p->dev); break; } } p = (isdn_net_dev *) p->next; } last_jiffies = jiffies; isdn_timer_ctrl(ISDN_TIMER_NETHANGUP, anymore); } static void isdn_net_lp_disconnected(isdn_net_local *lp) { isdn_net_rm_from_bundle(lp); } /* * Handle status-messages from ISDN-interfacecard. * This function is called from within the main-status-dispatcher * isdn_status_callback, which itself is called from the low-level driver. * Return: 1 = Event handled, 0 = not for us or unknown Event. */ int isdn_net_stat_callback(int idx, isdn_ctrl *c) { isdn_net_dev *p = dev->st_netdev[idx]; int cmd = c->command; if (p) { isdn_net_local *lp = p->local; #ifdef CONFIG_ISDN_X25 struct concap_proto *cprot = lp->netdev->cprot; struct concap_proto_ops *pops = cprot ? cprot->pops : NULL; #endif switch (cmd) { case ISDN_STAT_BSENT: /* A packet has successfully been sent out */ if ((lp->flags & ISDN_NET_CONNECTED) && (!lp->dialstate)) { isdn_net_dec_frame_cnt(lp); lp->stats.tx_packets++; lp->stats.tx_bytes += c->parm.length; } return 1; case ISDN_STAT_DCONN: /* D-Channel is up */ switch (lp->dialstate) { case 4: case 7: case 8: lp->dialstate++; return 1; case 12: lp->dialstate = 5; return 1; } break; case ISDN_STAT_DHUP: /* Either D-Channel-hangup or error during dialout */ #ifdef CONFIG_ISDN_X25 /* If we are not connencted then dialing had failed. If there are generic encap protocol receiver routines signal the closure of the link*/ if( !(lp->flags & ISDN_NET_CONNECTED) && pops && pops -> disconn_ind ) pops -> disconn_ind(cprot); #endif /* CONFIG_ISDN_X25 */ if ((!lp->dialstate) && (lp->flags & ISDN_NET_CONNECTED)) { if (lp->p_encap == ISDN_NET_ENCAP_CISCOHDLCK) isdn_net_ciscohdlck_disconnected(lp); #ifdef CONFIG_ISDN_PPP if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP) isdn_ppp_free(lp); #endif isdn_net_lp_disconnected(lp); isdn_all_eaz(lp->isdn_device, lp->isdn_channel); printk(KERN_INFO "%s: remote hangup\n", p->dev->name); printk(KERN_INFO "%s: Chargesum is %d\n", p->dev->name, lp->charge); isdn_net_unbind_channel(lp); return 1; } break; #ifdef CONFIG_ISDN_X25 case ISDN_STAT_BHUP: /* B-Channel-hangup */ /* try if there are generic encap protocol receiver routines and signal the closure of the link */ if( pops && pops -> disconn_ind ){ pops -> disconn_ind(cprot); return 1; } break; #endif /* CONFIG_ISDN_X25 */ case ISDN_STAT_BCONN: /* B-Channel is up */ isdn_net_zero_frame_cnt(lp); switch (lp->dialstate) { case 5: case 6: case 7: case 8: case 9: case 10: case 12: if (lp->dialstate <= 6) { dev->usage[idx] |= ISDN_USAGE_OUTGOING; isdn_info_update(); } else dev->rx_netdev[idx] = p; lp->dialstate = 0; isdn_timer_ctrl(ISDN_TIMER_NETHANGUP, 1); if (lp->p_encap == ISDN_NET_ENCAP_CISCOHDLCK) isdn_net_ciscohdlck_connected(lp); if (lp->p_encap != ISDN_NET_ENCAP_SYNCPPP) { if (lp->master) { /* is lp a slave? */ isdn_net_dev *nd = ISDN_MASTER_PRIV(lp)->netdev; isdn_net_add_to_bundle(nd, lp); } } printk(KERN_INFO "isdn_net: %s connected\n", p->dev->name); /* If first Chargeinfo comes before B-Channel connect, * we correct the timestamp here. */ lp->chargetime = jiffies; /* reset dial-timeout */ lp->dialstarted = 0; lp->dialwait_timer = 0; #ifdef CONFIG_ISDN_PPP if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP) isdn_ppp_wakeup_daemon(lp); #endif #ifdef CONFIG_ISDN_X25 /* try if there are generic concap receiver routines */ if( pops ) if( pops->connect_ind) pops->connect_ind(cprot); #endif /* CONFIG_ISDN_X25 */ /* ppp needs to do negotiations first */ if (lp->p_encap != ISDN_NET_ENCAP_SYNCPPP) isdn_net_device_wake_queue(lp); return 1; } break; case ISDN_STAT_NODCH: /* No D-Channel avail. */ if (lp->dialstate == 4) { lp->dialstate--; return 1; } break; case ISDN_STAT_CINF: /* Charge-info from TelCo. Calculate interval between * charge-infos and set timestamp for last info for * usage by isdn_net_autohup() */ lp->charge++; if (lp->hupflags & ISDN_HAVECHARGE) { lp->hupflags &= ~ISDN_WAITCHARGE; lp->chargeint = jiffies - lp->chargetime - (2 * HZ); } if (lp->hupflags & ISDN_WAITCHARGE) lp->hupflags |= ISDN_HAVECHARGE; lp->chargetime = jiffies; printk(KERN_DEBUG "isdn_net: Got CINF chargetime of %s now %lu\n", p->dev->name, lp->chargetime); return 1; } } return 0; } /* * Perform dialout for net-interfaces and timeout-handling for * D-Channel-up and B-Channel-up Messages. * This function is initially called from within isdn_net_start_xmit() or * or isdn_net_find_icall() after initializing the dialstate for an * interface. If further calls are needed, the function schedules itself * for a timer-callback via isdn_timer_function(). * The dialstate is also affected by incoming status-messages from * the ISDN-Channel which are handled in isdn_net_stat_callback() above. */ void isdn_net_dial(void) { isdn_net_dev *p = dev->netdev; int anymore = 0; int i; isdn_ctrl cmd; u_char *phone_number; while (p) { isdn_net_local *lp = p->local; #ifdef ISDN_DEBUG_NET_DIAL if (lp->dialstate) printk(KERN_DEBUG "%s: dialstate=%d\n", p->dev->name, lp->dialstate); #endif switch (lp->dialstate) { case 0: /* Nothing to do for this interface */ break; case 1: /* Initiate dialout. Set phone-number-pointer to first number * of interface. */ lp->dial = lp->phone[1]; if (!lp->dial) { printk(KERN_WARNING "%s: phone number deleted?\n", p->dev->name); isdn_net_hangup(p->dev); break; } anymore = 1; if(lp->dialtimeout > 0) if(lp->dialstarted == 0 || time_after(jiffies, lp->dialstarted + lp->dialtimeout + lp->dialwait)) { lp->dialstarted = jiffies; lp->dialwait_timer = 0; } lp->dialstate++; /* Fall through */ case 2: /* Prepare dialing. Clear EAZ, then set EAZ. */ cmd.driver = lp->isdn_device; cmd.arg = lp->isdn_channel; cmd.command = ISDN_CMD_CLREAZ; isdn_command(&cmd); sprintf(cmd.parm.num, "%s", isdn_map_eaz2msn(lp->msn, cmd.driver)); cmd.command = ISDN_CMD_SETEAZ; isdn_command(&cmd); lp->dialretry = 0; anymore = 1; lp->dialstate++; /* Fall through */ case 3: /* Setup interface, dial current phone-number, switch to next number. * If list of phone-numbers is exhausted, increment * retry-counter. */ if(dev->global_flags & ISDN_GLOBAL_STOPPED || (ISDN_NET_DIALMODE(*lp) == ISDN_NET_DM_OFF)) { char *s; if (dev->global_flags & ISDN_GLOBAL_STOPPED) s = "dial suppressed: isdn system stopped"; else s = "dial suppressed: dialmode `off'"; isdn_net_unreachable(p->dev, NULL, s); isdn_net_hangup(p->dev); break; } cmd.driver = lp->isdn_device; cmd.command = ISDN_CMD_SETL2; cmd.arg = lp->isdn_channel + (lp->l2_proto << 8); isdn_command(&cmd); cmd.driver = lp->isdn_device; cmd.command = ISDN_CMD_SETL3; cmd.arg = lp->isdn_channel + (lp->l3_proto << 8); isdn_command(&cmd); cmd.driver = lp->isdn_device; cmd.arg = lp->isdn_channel; if (!lp->dial) { printk(KERN_WARNING "%s: phone number deleted?\n", p->dev->name); isdn_net_hangup(p->dev); break; } if (!strncmp(lp->dial->num, "LEASED", strlen("LEASED"))) { lp->dialstate = 4; printk(KERN_INFO "%s: Open leased line ...\n", p->dev->name); } else { if(lp->dialtimeout > 0) if (time_after(jiffies, lp->dialstarted + lp->dialtimeout)) { lp->dialwait_timer = jiffies + lp->dialwait; lp->dialstarted = 0; isdn_net_unreachable(p->dev, NULL, "dial: timed out"); isdn_net_hangup(p->dev); break; } cmd.driver = lp->isdn_device; cmd.command = ISDN_CMD_DIAL; cmd.parm.setup.si2 = 0; /* check for DOV */ phone_number = lp->dial->num; if ((*phone_number == 'v') || (*phone_number == 'V')) { /* DOV call */ cmd.parm.setup.si1 = 1; } else { /* DATA call */ cmd.parm.setup.si1 = 7; } strcpy(cmd.parm.setup.phone, phone_number); /* * Switch to next number or back to start if at end of list. */ if (!(lp->dial = (isdn_net_phone *) lp->dial->next)) { lp->dial = lp->phone[1]; lp->dialretry++; if (lp->dialretry > lp->dialmax) { if (lp->dialtimeout == 0) { lp->dialwait_timer = jiffies + lp->dialwait; lp->dialstarted = 0; isdn_net_unreachable(p->dev, NULL, "dial: tried all numbers dialmax times"); } isdn_net_hangup(p->dev); break; } } sprintf(cmd.parm.setup.eazmsn, "%s", isdn_map_eaz2msn(lp->msn, cmd.driver)); i = isdn_dc2minor(lp->isdn_device, lp->isdn_channel); if (i >= 0) { strcpy(dev->num[i], cmd.parm.setup.phone); dev->usage[i] |= ISDN_USAGE_OUTGOING; isdn_info_update(); } printk(KERN_INFO "%s: dialing %d %s... %s\n", p->dev->name, lp->dialretry, cmd.parm.setup.phone, (cmd.parm.setup.si1 == 1) ? "DOV" : ""); lp->dtimer = 0; #ifdef ISDN_DEBUG_NET_DIAL printk(KERN_DEBUG "dial: d=%d c=%d\n", lp->isdn_device, lp->isdn_channel); #endif isdn_command(&cmd); } lp->huptimer = 0; lp->outgoing = 1; if (lp->chargeint) { lp->hupflags |= ISDN_HAVECHARGE; lp->hupflags &= ~ISDN_WAITCHARGE; } else { lp->hupflags |= ISDN_WAITCHARGE; lp->hupflags &= ~ISDN_HAVECHARGE; } anymore = 1; lp->dialstate = (lp->cbdelay && (lp->flags & ISDN_NET_CBOUT)) ? 12 : 4; break; case 4: /* Wait for D-Channel-connect. * If timeout, switch back to state 3. * Dialmax-handling moved to state 3. */ if (lp->dtimer++ > ISDN_TIMER_DTIMEOUT10) lp->dialstate = 3; anymore = 1; break; case 5: /* Got D-Channel-Connect, send B-Channel-request */ cmd.driver = lp->isdn_device; cmd.arg = lp->isdn_channel; cmd.command = ISDN_CMD_ACCEPTB; anymore = 1; lp->dtimer = 0; lp->dialstate++; isdn_command(&cmd); break; case 6: /* Wait for B- or D-Channel-connect. If timeout, * switch back to state 3. */ #ifdef ISDN_DEBUG_NET_DIAL printk(KERN_DEBUG "dialtimer2: %d\n", lp->dtimer); #endif if (lp->dtimer++ > ISDN_TIMER_DTIMEOUT10) lp->dialstate = 3; anymore = 1; break; case 7: /* Got incoming Call, setup L2 and L3 protocols, * then wait for D-Channel-connect */ #ifdef ISDN_DEBUG_NET_DIAL printk(KERN_DEBUG "dialtimer4: %d\n", lp->dtimer); #endif cmd.driver = lp->isdn_device; cmd.command = ISDN_CMD_SETL2; cmd.arg = lp->isdn_channel + (lp->l2_proto << 8); isdn_command(&cmd); cmd.driver = lp->isdn_device; cmd.command = ISDN_CMD_SETL3; cmd.arg = lp->isdn_channel + (lp->l3_proto << 8); isdn_command(&cmd); if (lp->dtimer++ > ISDN_TIMER_DTIMEOUT15) isdn_net_hangup(p->dev); else { anymore = 1; lp->dialstate++; } break; case 9: /* Got incoming D-Channel-Connect, send B-Channel-request */ cmd.driver = lp->isdn_device; cmd.arg = lp->isdn_channel; cmd.command = ISDN_CMD_ACCEPTB; isdn_command(&cmd); anymore = 1; lp->dtimer = 0; lp->dialstate++; break; case 8: case 10: /* Wait for B- or D-channel-connect */ #ifdef ISDN_DEBUG_NET_DIAL printk(KERN_DEBUG "dialtimer4: %d\n", lp->dtimer); #endif if (lp->dtimer++ > ISDN_TIMER_DTIMEOUT10) isdn_net_hangup(p->dev); else anymore = 1; break; case 11: /* Callback Delay */ if (lp->dtimer++ > lp->cbdelay) lp->dialstate = 1; anymore = 1; break; case 12: /* Remote does callback. Hangup after cbdelay, then wait for incoming * call (in state 4). */ if (lp->dtimer++ > lp->cbdelay) { printk(KERN_INFO "%s: hangup waiting for callback ...\n", p->dev->name); lp->dtimer = 0; lp->dialstate = 4; cmd.driver = lp->isdn_device; cmd.command = ISDN_CMD_HANGUP; cmd.arg = lp->isdn_channel; isdn_command(&cmd); isdn_all_eaz(lp->isdn_device, lp->isdn_channel); } anymore = 1; break; default: printk(KERN_WARNING "isdn_net: Illegal dialstate %d for device %s\n", lp->dialstate, p->dev->name); } p = (isdn_net_dev *) p->next; } isdn_timer_ctrl(ISDN_TIMER_NETDIAL, anymore); } /* * Perform hangup for a net-interface. */ void isdn_net_hangup(struct net_device *d) { isdn_net_local *lp = netdev_priv(d); isdn_ctrl cmd; #ifdef CONFIG_ISDN_X25 struct concap_proto *cprot = lp->netdev->cprot; struct concap_proto_ops *pops = cprot ? cprot->pops : NULL; #endif if (lp->flags & ISDN_NET_CONNECTED) { if (lp->slave != NULL) { isdn_net_local *slp = ISDN_SLAVE_PRIV(lp); if (slp->flags & ISDN_NET_CONNECTED) { printk(KERN_INFO "isdn_net: hang up slave %s before %s\n", lp->slave->name, d->name); isdn_net_hangup(lp->slave); } } printk(KERN_INFO "isdn_net: local hangup %s\n", d->name); #ifdef CONFIG_ISDN_PPP if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP) isdn_ppp_free(lp); #endif isdn_net_lp_disconnected(lp); #ifdef CONFIG_ISDN_X25 /* try if there are generic encap protocol receiver routines and signal the closure of the link */ if( pops && pops -> disconn_ind ) pops -> disconn_ind(cprot); #endif /* CONFIG_ISDN_X25 */ cmd.driver = lp->isdn_device; cmd.command = ISDN_CMD_HANGUP; cmd.arg = lp->isdn_channel; isdn_command(&cmd); printk(KERN_INFO "%s: Chargesum is %d\n", d->name, lp->charge); isdn_all_eaz(lp->isdn_device, lp->isdn_channel); } isdn_net_unbind_channel(lp); } typedef struct { __be16 source; __be16 dest; } ip_ports; static void isdn_net_log_skb(struct sk_buff * skb, isdn_net_local * lp) { /* hopefully, this was set correctly */ const u_char *p = skb_network_header(skb); unsigned short proto = ntohs(skb->protocol); int data_ofs; ip_ports *ipp; char addinfo[100]; addinfo[0] = '\0'; /* This check stolen from 2.1.72 dev_queue_xmit_nit() */ if (p < skb->data || skb->network_header >= skb->tail) { /* fall back to old isdn_net_log_packet method() */ char * buf = skb->data; printk(KERN_DEBUG "isdn_net: protocol %04x is buggy, dev %s\n", skb->protocol, lp->netdev->dev->name); p = buf; proto = ETH_P_IP; switch (lp->p_encap) { case ISDN_NET_ENCAP_IPTYP: proto = ntohs(*(__be16 *)&buf[0]); p = &buf[2]; break; case ISDN_NET_ENCAP_ETHER: proto = ntohs(*(__be16 *)&buf[12]); p = &buf[14]; break; case ISDN_NET_ENCAP_CISCOHDLC: proto = ntohs(*(__be16 *)&buf[2]); p = &buf[4]; break; #ifdef CONFIG_ISDN_PPP case ISDN_NET_ENCAP_SYNCPPP: proto = ntohs(skb->protocol); p = &buf[IPPP_MAX_HEADER]; break; #endif } } data_ofs = ((p[0] & 15) * 4); switch (proto) { case ETH_P_IP: switch (p[9]) { case 1: strcpy(addinfo, " ICMP"); break; case 2: strcpy(addinfo, " IGMP"); break; case 4: strcpy(addinfo, " IPIP"); break; case 6: ipp = (ip_ports *) (&p[data_ofs]); sprintf(addinfo, " TCP, port: %d -> %d", ntohs(ipp->source), ntohs(ipp->dest)); break; case 8: strcpy(addinfo, " EGP"); break; case 12: strcpy(addinfo, " PUP"); break; case 17: ipp = (ip_ports *) (&p[data_ofs]); sprintf(addinfo, " UDP, port: %d -> %d", ntohs(ipp->source), ntohs(ipp->dest)); break; case 22: strcpy(addinfo, " IDP"); break; } printk(KERN_INFO "OPEN: %pI4 -> %pI4%s\n", p + 12, p + 16, addinfo); break; case ETH_P_ARP: printk(KERN_INFO "OPEN: ARP %pI4 -> *.*.*.* ?%pI4\n", p + 14, p + 24); break; } } /* * this function is used to send supervisory data, i.e. data which was * not received from the network layer, but e.g. frames from ipppd, CCP * reset frames etc. */ void isdn_net_write_super(isdn_net_local *lp, struct sk_buff *skb) { if (in_irq()) { // we can't grab the lock from irq context, // so we just queue the packet skb_queue_tail(&lp->super_tx_queue, skb); schedule_work(&lp->tqueue); return; } spin_lock_bh(&lp->xmit_lock); if (!isdn_net_lp_busy(lp)) { isdn_net_writebuf_skb(lp, skb); } else { skb_queue_tail(&lp->super_tx_queue, skb); } spin_unlock_bh(&lp->xmit_lock); } /* * called from tq_immediate */ static void isdn_net_softint(struct work_struct *work) { isdn_net_local *lp = container_of(work, isdn_net_local, tqueue); struct sk_buff *skb; spin_lock_bh(&lp->xmit_lock); while (!isdn_net_lp_busy(lp)) { skb = skb_dequeue(&lp->super_tx_queue); if (!skb) break; isdn_net_writebuf_skb(lp, skb); } spin_unlock_bh(&lp->xmit_lock); } /* * all frames sent from the (net) LL to a HL driver should go via this function * it's serialized by the caller holding the lp->xmit_lock spinlock */ void isdn_net_writebuf_skb(isdn_net_local *lp, struct sk_buff *skb) { int ret; int len = skb->len; /* save len */ /* before obtaining the lock the caller should have checked that the lp isn't busy */ if (isdn_net_lp_busy(lp)) { printk("isdn BUG at %s:%d!\n", __FILE__, __LINE__); goto error; } if (!(lp->flags & ISDN_NET_CONNECTED)) { printk("isdn BUG at %s:%d!\n", __FILE__, __LINE__); goto error; } ret = isdn_writebuf_skb_stub(lp->isdn_device, lp->isdn_channel, 1, skb); if (ret != len) { /* we should never get here */ printk(KERN_WARNING "%s: HL driver queue full\n", lp->netdev->dev->name); goto error; } lp->transcount += len; isdn_net_inc_frame_cnt(lp); return; error: dev_kfree_skb(skb); lp->stats.tx_errors++; } /* * Helper function for isdn_net_start_xmit. * When called, the connection is already established. * Based on cps-calculation, check if device is overloaded. * If so, and if a slave exists, trigger dialing for it. * If any slave is online, deliver packets using a simple round robin * scheme. * * Return: 0 on success, !0 on failure. */ static int isdn_net_xmit(struct net_device *ndev, struct sk_buff *skb) { isdn_net_dev *nd; isdn_net_local *slp; isdn_net_local *lp = netdev_priv(ndev); int retv = NETDEV_TX_OK; if (((isdn_net_local *) netdev_priv(ndev))->master) { printk("isdn BUG at %s:%d!\n", __FILE__, __LINE__); dev_kfree_skb(skb); return NETDEV_TX_OK; } /* For the other encaps the header has already been built */ #ifdef CONFIG_ISDN_PPP if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP) { return isdn_ppp_xmit(skb, ndev); } #endif nd = ((isdn_net_local *) netdev_priv(ndev))->netdev; lp = isdn_net_get_locked_lp(nd); if (!lp) { printk(KERN_WARNING "%s: all channels busy - requeuing!\n", ndev->name); return NETDEV_TX_BUSY; } /* we have our lp locked from now on */ /* Reset hangup-timeout */ lp->huptimer = 0; // FIXME? isdn_net_writebuf_skb(lp, skb); spin_unlock_bh(&lp->xmit_lock); /* the following stuff is here for backwards compatibility. * in future, start-up and hangup of slaves (based on current load) * should move to userspace and get based on an overall cps * calculation */ if (lp->cps > lp->triggercps) { if (lp->slave) { if (!lp->sqfull) { /* First time overload: set timestamp only */ lp->sqfull = 1; lp->sqfull_stamp = jiffies; } else { /* subsequent overload: if slavedelay exceeded, start dialing */ if (time_after(jiffies, lp->sqfull_stamp + lp->slavedelay)) { slp = ISDN_SLAVE_PRIV(lp); if (!(slp->flags & ISDN_NET_CONNECTED)) { isdn_net_force_dial_lp(ISDN_SLAVE_PRIV(lp)); } } } } } else { if (lp->sqfull && time_after(jiffies, lp->sqfull_stamp + lp->slavedelay + (10 * HZ))) { lp->sqfull = 0; } /* this is a hack to allow auto-hangup for slaves on moderate loads */ nd->queue = nd->local; } return retv; } static void isdn_net_adjust_hdr(struct sk_buff *skb, struct net_device *dev) { isdn_net_local *lp = netdev_priv(dev); if (!skb) return; if (lp->p_encap == ISDN_NET_ENCAP_ETHER) { const int pullsize = skb_network_offset(skb) - ETH_HLEN; if (pullsize > 0) { printk(KERN_DEBUG "isdn_net: Pull junk %d\n", pullsize); skb_pull(skb, pullsize); } } } static void isdn_net_tx_timeout(struct net_device * ndev) { isdn_net_local *lp = netdev_priv(ndev); printk(KERN_WARNING "isdn_tx_timeout dev %s dialstate %d\n", ndev->name, lp->dialstate); if (!lp->dialstate){ lp->stats.tx_errors++; /* * There is a certain probability that this currently * works at all because if we always wake up the interface, * then upper layer will try to send the next packet * immediately. And then, the old clean_up logic in the * driver will hopefully continue to work as it used to do. * * This is rather primitive right know, we better should * clean internal queues here, in particular for multilink and * ppp, and reset HL driver's channel, too. --HE * * actually, this may not matter at all, because ISDN hardware * should not see transmitter hangs at all IMO * changed KERN_DEBUG to KERN_WARNING to find out if this is * ever called --KG */ } ndev->trans_start = jiffies; netif_wake_queue(ndev); } /* * Try sending a packet. * If this interface isn't connected to a ISDN-Channel, find a free channel, * and start dialing. */ static netdev_tx_t isdn_net_start_xmit(struct sk_buff *skb, struct net_device *ndev) { isdn_net_local *lp = netdev_priv(ndev); #ifdef CONFIG_ISDN_X25 struct concap_proto * cprot = lp -> netdev -> cprot; /* At this point hard_start_xmit() passes control to the encapsulation protocol (if present). For X.25 auto-dialing is completly bypassed because: - It does not conform with the semantics of a reliable datalink service as needed by X.25 PLP. - I don't want that the interface starts dialing when the network layer sends a message which requests to disconnect the lapb link (or if it sends any other message not resulting in data transmission). Instead, dialing will be initiated by the encapsulation protocol entity when a dl_establish request is received from the upper layer. */ if (cprot && cprot -> pops) { int ret = cprot -> pops -> encap_and_xmit ( cprot , skb); if (ret) netif_stop_queue(ndev); return ret; } else #endif /* auto-dialing xmit function */ { #ifdef ISDN_DEBUG_NET_DUMP u_char *buf; #endif isdn_net_adjust_hdr(skb, ndev); #ifdef ISDN_DEBUG_NET_DUMP buf = skb->data; isdn_dumppkt("S:", buf, skb->len, 40); #endif if (!(lp->flags & ISDN_NET_CONNECTED)) { int chi; /* only do autodial if allowed by config */ if (!(ISDN_NET_DIALMODE(*lp) == ISDN_NET_DM_AUTO)) { isdn_net_unreachable(ndev, skb, "dial rejected: interface not in dialmode `auto'"); dev_kfree_skb(skb); return NETDEV_TX_OK; } if (lp->phone[1]) { ulong flags; if(lp->dialwait_timer <= 0) if(lp->dialstarted > 0 && lp->dialtimeout > 0 && time_before(jiffies, lp->dialstarted + lp->dialtimeout + lp->dialwait)) lp->dialwait_timer = lp->dialstarted + lp->dialtimeout + lp->dialwait; if(lp->dialwait_timer > 0) { if(time_before(jiffies, lp->dialwait_timer)) { isdn_net_unreachable(ndev, skb, "dial rejected: retry-time not reached"); dev_kfree_skb(skb); return NETDEV_TX_OK; } else lp->dialwait_timer = 0; } /* Grab a free ISDN-Channel */ spin_lock_irqsave(&dev->lock, flags); if (((chi = isdn_get_free_channel( ISDN_USAGE_NET, lp->l2_proto, lp->l3_proto, lp->pre_device, lp->pre_channel, lp->msn) ) < 0) && ((chi = isdn_get_free_channel( ISDN_USAGE_NET, lp->l2_proto, lp->l3_proto, lp->pre_device, lp->pre_channel^1, lp->msn) ) < 0)) { spin_unlock_irqrestore(&dev->lock, flags); isdn_net_unreachable(ndev, skb, "No channel"); dev_kfree_skb(skb); return NETDEV_TX_OK; } /* Log packet, which triggered dialing */ if (dev->net_verbose) isdn_net_log_skb(skb, lp); lp->dialstate = 1; /* Connect interface with channel */ isdn_net_bind_channel(lp, chi); #ifdef CONFIG_ISDN_PPP if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP) { /* no 'first_skb' handling for syncPPP */ if (isdn_ppp_bind(lp) < 0) { dev_kfree_skb(skb); isdn_net_unbind_channel(lp); spin_unlock_irqrestore(&dev->lock, flags); return NETDEV_TX_OK; /* STN (skb to nirvana) ;) */ } #ifdef CONFIG_IPPP_FILTER if (isdn_ppp_autodial_filter(skb, lp)) { isdn_ppp_free(lp); isdn_net_unbind_channel(lp); spin_unlock_irqrestore(&dev->lock, flags); isdn_net_unreachable(ndev, skb, "dial rejected: packet filtered"); dev_kfree_skb(skb); return NETDEV_TX_OK; } #endif spin_unlock_irqrestore(&dev->lock, flags); isdn_net_dial(); /* Initiate dialing */ netif_stop_queue(ndev); return NETDEV_TX_BUSY; /* let upper layer requeue skb packet */ } #endif /* Initiate dialing */ spin_unlock_irqrestore(&dev->lock, flags); isdn_net_dial(); isdn_net_device_stop_queue(lp); return NETDEV_TX_BUSY; } else { isdn_net_unreachable(ndev, skb, "No phone number"); dev_kfree_skb(skb); return NETDEV_TX_OK; } } else { /* Device is connected to an ISDN channel */ ndev->trans_start = jiffies; if (!lp->dialstate) { /* ISDN connection is established, try sending */ int ret; ret = (isdn_net_xmit(ndev, skb)); if(ret) netif_stop_queue(ndev); return ret; } else netif_stop_queue(ndev); } } return NETDEV_TX_BUSY; } /* * Shutdown a net-interface. */ static int isdn_net_close(struct net_device *dev) { struct net_device *p; #ifdef CONFIG_ISDN_X25 struct concap_proto * cprot = ((isdn_net_local *) netdev_priv(dev))->netdev->cprot; /* printk(KERN_DEBUG "isdn_net_close %s\n" , dev-> name ); */ #endif #ifdef CONFIG_ISDN_X25 if( cprot && cprot -> pops ) cprot -> pops -> close( cprot ); #endif netif_stop_queue(dev); p = MASTER_TO_SLAVE(dev); if (p) { /* If this interface has slaves, stop them also */ while (p) { #ifdef CONFIG_ISDN_X25 cprot = ((isdn_net_local *) netdev_priv(p)) -> netdev -> cprot; if( cprot && cprot -> pops ) cprot -> pops -> close( cprot ); #endif isdn_net_hangup(p); p = MASTER_TO_SLAVE(p); } } isdn_net_hangup(dev); isdn_unlock_drivers(); return 0; } /* * Get statistics */ static struct net_device_stats * isdn_net_get_stats(struct net_device *dev) { isdn_net_local *lp = netdev_priv(dev); return &lp->stats; } /* This is simply a copy from std. eth.c EXCEPT we pull ETH_HLEN * instead of dev->hard_header_len off. This is done because the * lowlevel-driver has already pulled off its stuff when we get * here and this routine only gets called with p_encap == ETHER. * Determine the packet's protocol ID. The rule here is that we * assume 802.3 if the type field is short enough to be a length. * This is normal practice and works for any 'now in use' protocol. */ static __be16 isdn_net_type_trans(struct sk_buff *skb, struct net_device *dev) { struct ethhdr *eth; unsigned char *rawp; skb_reset_mac_header(skb); skb_pull(skb, ETH_HLEN); eth = eth_hdr(skb); if (*eth->h_dest & 1) { if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN) == 0) skb->pkt_type = PACKET_BROADCAST; else skb->pkt_type = PACKET_MULTICAST; } /* * This ALLMULTI check should be redundant by 1.4 * so don't forget to remove it. */ else if (dev->flags & (IFF_PROMISC /*| IFF_ALLMULTI*/)) { if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN)) skb->pkt_type = PACKET_OTHERHOST; } if (ntohs(eth->h_proto) >= 1536) return eth->h_proto; rawp = skb->data; /* * This is a magic hack to spot IPX packets. Older Novell breaks * the protocol design and runs IPX over 802.3 without an 802.2 LLC * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This * won't work for fault tolerant netware but does for the rest. */ if (*(unsigned short *) rawp == 0xFFFF) return htons(ETH_P_802_3); /* * Real 802.2 LLC */ return htons(ETH_P_802_2); } /* * CISCO HDLC keepalive specific stuff */ static struct sk_buff* isdn_net_ciscohdlck_alloc_skb(isdn_net_local *lp, int len) { unsigned short hl = dev->drv[lp->isdn_device]->interface->hl_hdrlen; struct sk_buff *skb; skb = alloc_skb(hl + len, GFP_ATOMIC); if (skb) skb_reserve(skb, hl); else printk("isdn out of mem at %s:%d!\n", __FILE__, __LINE__); return skb; } /* cisco hdlck device private ioctls */ static int isdn_ciscohdlck_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { isdn_net_local *lp = netdev_priv(dev); unsigned long len = 0; unsigned long expires = 0; int tmp = 0; int period = lp->cisco_keepalive_period; s8 debserint = lp->cisco_debserint; int rc = 0; if (lp->p_encap != ISDN_NET_ENCAP_CISCOHDLCK) return -EINVAL; switch (cmd) { /* get/set keepalive period */ case SIOCGKEEPPERIOD: len = (unsigned long)sizeof(lp->cisco_keepalive_period); if (copy_to_user(ifr->ifr_data, &lp->cisco_keepalive_period, len)) rc = -EFAULT; break; case SIOCSKEEPPERIOD: tmp = lp->cisco_keepalive_period; len = (unsigned long)sizeof(lp->cisco_keepalive_period); if (copy_from_user(&period, ifr->ifr_data, len)) rc = -EFAULT; if ((period > 0) && (period <= 32767)) lp->cisco_keepalive_period = period; else rc = -EINVAL; if (!rc && (tmp != lp->cisco_keepalive_period)) { expires = (unsigned long)(jiffies + lp->cisco_keepalive_period * HZ); mod_timer(&lp->cisco_timer, expires); printk(KERN_INFO "%s: Keepalive period set " "to %d seconds.\n", dev->name, lp->cisco_keepalive_period); } break; /* get/set debugging */ case SIOCGDEBSERINT: len = (unsigned long)sizeof(lp->cisco_debserint); if (copy_to_user(ifr->ifr_data, &lp->cisco_debserint, len)) rc = -EFAULT; break; case SIOCSDEBSERINT: len = (unsigned long)sizeof(lp->cisco_debserint); if (copy_from_user(&debserint, ifr->ifr_data, len)) rc = -EFAULT; if ((debserint >= 0) && (debserint <= 64)) lp->cisco_debserint = debserint; else rc = -EINVAL; break; default: rc = -EINVAL; break; } return (rc); } static int isdn_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { isdn_net_local *lp = netdev_priv(dev); switch (lp->p_encap) { #ifdef CONFIG_ISDN_PPP case ISDN_NET_ENCAP_SYNCPPP: return isdn_ppp_dev_ioctl(dev, ifr, cmd); #endif case ISDN_NET_ENCAP_CISCOHDLCK: return isdn_ciscohdlck_dev_ioctl(dev, ifr, cmd); default: return -EINVAL; } } /* called via cisco_timer.function */ static void isdn_net_ciscohdlck_slarp_send_keepalive(unsigned long data) { isdn_net_local *lp = (isdn_net_local *) data; struct sk_buff *skb; unsigned char *p; unsigned long last_cisco_myseq = lp->cisco_myseq; int myseq_diff = 0; if (!(lp->flags & ISDN_NET_CONNECTED) || lp->dialstate) { printk("isdn BUG at %s:%d!\n", __FILE__, __LINE__); return; } lp->cisco_myseq++; myseq_diff = (lp->cisco_myseq - lp->cisco_mineseen); if ((lp->cisco_line_state) && ((myseq_diff >= 3)||(myseq_diff <= -3))) { /* line up -> down */ lp->cisco_line_state = 0; printk (KERN_WARNING "UPDOWN: Line protocol on Interface %s," " changed state to down\n", lp->netdev->dev->name); /* should stop routing higher-level data across */ } else if ((!lp->cisco_line_state) && (myseq_diff >= 0) && (myseq_diff <= 2)) { /* line down -> up */ lp->cisco_line_state = 1; printk (KERN_WARNING "UPDOWN: Line protocol on Interface %s," " changed state to up\n", lp->netdev->dev->name); /* restart routing higher-level data across */ } if (lp->cisco_debserint) printk (KERN_DEBUG "%s: HDLC " "myseq %lu, mineseen %lu%c, yourseen %lu, %s\n", lp->netdev->dev->name, last_cisco_myseq, lp->cisco_mineseen, ((last_cisco_myseq == lp->cisco_mineseen) ? '*' : 040), lp->cisco_yourseq, ((lp->cisco_line_state) ? "line up" : "line down")); skb = isdn_net_ciscohdlck_alloc_skb(lp, 4 + 14); if (!skb) return; p = skb_put(skb, 4 + 14); /* cisco header */ *(u8 *)(p + 0) = CISCO_ADDR_UNICAST; *(u8 *)(p + 1) = CISCO_CTRL; *(__be16 *)(p + 2) = cpu_to_be16(CISCO_TYPE_SLARP); /* slarp keepalive */ *(__be32 *)(p + 4) = cpu_to_be32(CISCO_SLARP_KEEPALIVE); *(__be32 *)(p + 8) = cpu_to_be32(lp->cisco_myseq); *(__be32 *)(p + 12) = cpu_to_be32(lp->cisco_yourseq); *(__be16 *)(p + 16) = cpu_to_be16(0xffff); // reliability, always 0xffff p += 18; isdn_net_write_super(lp, skb); lp->cisco_timer.expires = jiffies + lp->cisco_keepalive_period * HZ; add_timer(&lp->cisco_timer); } static void isdn_net_ciscohdlck_slarp_send_request(isdn_net_local *lp) { struct sk_buff *skb; unsigned char *p; skb = isdn_net_ciscohdlck_alloc_skb(lp, 4 + 14); if (!skb) return; p = skb_put(skb, 4 + 14); /* cisco header */ *(u8 *)(p + 0) = CISCO_ADDR_UNICAST; *(u8 *)(p + 1) = CISCO_CTRL; *(__be16 *)(p + 2) = cpu_to_be16(CISCO_TYPE_SLARP); /* slarp request */ *(__be32 *)(p + 4) = cpu_to_be32(CISCO_SLARP_REQUEST); *(__be32 *)(p + 8) = cpu_to_be32(0); // address *(__be32 *)(p + 12) = cpu_to_be32(0); // netmask *(__be16 *)(p + 16) = cpu_to_be16(0); // unused p += 18; isdn_net_write_super(lp, skb); } static void isdn_net_ciscohdlck_connected(isdn_net_local *lp) { lp->cisco_myseq = 0; lp->cisco_mineseen = 0; lp->cisco_yourseq = 0; lp->cisco_keepalive_period = ISDN_TIMER_KEEPINT; lp->cisco_last_slarp_in = 0; lp->cisco_line_state = 0; lp->cisco_debserint = 0; /* send slarp request because interface/seq.no.s reset */ isdn_net_ciscohdlck_slarp_send_request(lp); init_timer(&lp->cisco_timer); lp->cisco_timer.data = (unsigned long) lp; lp->cisco_timer.function = isdn_net_ciscohdlck_slarp_send_keepalive; lp->cisco_timer.expires = jiffies + lp->cisco_keepalive_period * HZ; add_timer(&lp->cisco_timer); } static void isdn_net_ciscohdlck_disconnected(isdn_net_local *lp) { del_timer(&lp->cisco_timer); } static void isdn_net_ciscohdlck_slarp_send_reply(isdn_net_local *lp) { struct sk_buff *skb; unsigned char *p; struct in_device *in_dev = NULL; __be32 addr = 0; /* local ipv4 address */ __be32 mask = 0; /* local netmask */ if ((in_dev = lp->netdev->dev->ip_ptr) != NULL) { /* take primary(first) address of interface */ struct in_ifaddr *ifa = in_dev->ifa_list; if (ifa != NULL) { addr = ifa->ifa_local; mask = ifa->ifa_mask; } } skb = isdn_net_ciscohdlck_alloc_skb(lp, 4 + 14); if (!skb) return; p = skb_put(skb, 4 + 14); /* cisco header */ *(u8 *)(p + 0) = CISCO_ADDR_UNICAST; *(u8 *)(p + 1) = CISCO_CTRL; *(__be16 *)(p + 2) = cpu_to_be16(CISCO_TYPE_SLARP); /* slarp reply, send own ip/netmask; if values are nonsense remote * should think we are unable to provide it with an address via SLARP */ *(__be32 *)(p + 4) = cpu_to_be32(CISCO_SLARP_REPLY); *(__be32 *)(p + 8) = addr; // address *(__be32 *)(p + 12) = mask; // netmask *(__be16 *)(p + 16) = cpu_to_be16(0); // unused p += 18; isdn_net_write_super(lp, skb); } static void isdn_net_ciscohdlck_slarp_in(isdn_net_local *lp, struct sk_buff *skb) { unsigned char *p; int period; u32 code; u32 my_seq; u32 your_seq; __be32 local; __be32 *addr, *mask; if (skb->len < 14) return; p = skb->data; code = be32_to_cpup((__be32 *)p); p += 4; switch (code) { case CISCO_SLARP_REQUEST: lp->cisco_yourseq = 0; isdn_net_ciscohdlck_slarp_send_reply(lp); break; case CISCO_SLARP_REPLY: addr = (__be32 *)p; mask = (__be32 *)(p + 4); if (*mask != cpu_to_be32(0xfffffffc)) goto slarp_reply_out; if ((*addr & cpu_to_be32(3)) == cpu_to_be32(0) || (*addr & cpu_to_be32(3)) == cpu_to_be32(3)) goto slarp_reply_out; local = *addr ^ cpu_to_be32(3); printk(KERN_INFO "%s: got slarp reply: remote ip: %pI4, local ip: %pI4 mask: %pI4\n", lp->netdev->dev->name, addr, &local, mask); break; slarp_reply_out: printk(KERN_INFO "%s: got invalid slarp reply (%pI4/%pI4) - ignored\n", lp->netdev->dev->name, addr, mask); break; case CISCO_SLARP_KEEPALIVE: period = (int)((jiffies - lp->cisco_last_slarp_in + HZ/2 - 1) / HZ); if (lp->cisco_debserint && (period != lp->cisco_keepalive_period) && lp->cisco_last_slarp_in) { printk(KERN_DEBUG "%s: Keepalive period mismatch - " "is %d but should be %d.\n", lp->netdev->dev->name, period, lp->cisco_keepalive_period); } lp->cisco_last_slarp_in = jiffies; my_seq = be32_to_cpup((__be32 *)(p + 0)); your_seq = be32_to_cpup((__be32 *)(p + 4)); p += 10; lp->cisco_yourseq = my_seq; lp->cisco_mineseen = your_seq; break; } } static void isdn_net_ciscohdlck_receive(isdn_net_local *lp, struct sk_buff *skb) { unsigned char *p; u8 addr; u8 ctrl; u16 type; if (skb->len < 4) goto out_free; p = skb->data; addr = *(u8 *)(p + 0); ctrl = *(u8 *)(p + 1); type = be16_to_cpup((__be16 *)(p + 2)); p += 4; skb_pull(skb, 4); if (addr != CISCO_ADDR_UNICAST && addr != CISCO_ADDR_BROADCAST) { printk(KERN_WARNING "%s: Unknown Cisco addr 0x%02x\n", lp->netdev->dev->name, addr); goto out_free; } if (ctrl != CISCO_CTRL) { printk(KERN_WARNING "%s: Unknown Cisco ctrl 0x%02x\n", lp->netdev->dev->name, ctrl); goto out_free; } switch (type) { case CISCO_TYPE_SLARP: isdn_net_ciscohdlck_slarp_in(lp, skb); goto out_free; case CISCO_TYPE_CDP: if (lp->cisco_debserint) printk(KERN_DEBUG "%s: Received CDP packet. use " "\"no cdp enable\" on cisco.\n", lp->netdev->dev->name); goto out_free; default: /* no special cisco protocol */ skb->protocol = htons(type); netif_rx(skb); return; } out_free: kfree_skb(skb); } /* * Got a packet from ISDN-Channel. */ static void isdn_net_receive(struct net_device *ndev, struct sk_buff *skb) { isdn_net_local *lp = netdev_priv(ndev); isdn_net_local *olp = lp; /* original 'lp' */ #ifdef CONFIG_ISDN_X25 struct concap_proto *cprot = lp -> netdev -> cprot; #endif lp->transcount += skb->len; lp->stats.rx_packets++; lp->stats.rx_bytes += skb->len; if (lp->master) { /* Bundling: If device is a slave-device, deliver to master, also * handle master's statistics and hangup-timeout */ ndev = lp->master; lp = netdev_priv(ndev); lp->stats.rx_packets++; lp->stats.rx_bytes += skb->len; } skb->dev = ndev; skb->pkt_type = PACKET_HOST; skb_reset_mac_header(skb); #ifdef ISDN_DEBUG_NET_DUMP isdn_dumppkt("R:", skb->data, skb->len, 40); #endif switch (lp->p_encap) { case ISDN_NET_ENCAP_ETHER: /* Ethernet over ISDN */ olp->huptimer = 0; lp->huptimer = 0; skb->protocol = isdn_net_type_trans(skb, ndev); break; case ISDN_NET_ENCAP_UIHDLC: /* HDLC with UI-frame (for ispa with -h1 option) */ olp->huptimer = 0; lp->huptimer = 0; skb_pull(skb, 2); /* Fall through */ case ISDN_NET_ENCAP_RAWIP: /* RAW-IP without MAC-Header */ olp->huptimer = 0; lp->huptimer = 0; skb->protocol = htons(ETH_P_IP); break; case ISDN_NET_ENCAP_CISCOHDLCK: isdn_net_ciscohdlck_receive(lp, skb); return; case ISDN_NET_ENCAP_CISCOHDLC: /* CISCO-HDLC IP with type field and fake I-frame-header */ skb_pull(skb, 2); /* Fall through */ case ISDN_NET_ENCAP_IPTYP: /* IP with type field */ olp->huptimer = 0; lp->huptimer = 0; skb->protocol = *(__be16 *)&(skb->data[0]); skb_pull(skb, 2); if (*(unsigned short *) skb->data == 0xFFFF) skb->protocol = htons(ETH_P_802_3); break; #ifdef CONFIG_ISDN_PPP case ISDN_NET_ENCAP_SYNCPPP: /* huptimer is done in isdn_ppp_push_higher */ isdn_ppp_receive(lp->netdev, olp, skb); return; #endif default: #ifdef CONFIG_ISDN_X25 /* try if there are generic sync_device receiver routines */ if(cprot) if(cprot -> pops) if( cprot -> pops -> data_ind){ cprot -> pops -> data_ind(cprot,skb); return; }; #endif /* CONFIG_ISDN_X25 */ printk(KERN_WARNING "%s: unknown encapsulation, dropping\n", lp->netdev->dev->name); kfree_skb(skb); return; } netif_rx(skb); return; } /* * A packet arrived via ISDN. Search interface-chain for a corresponding * interface. If found, deliver packet to receiver-function and return 1, * else return 0. */ int isdn_net_rcv_skb(int idx, struct sk_buff *skb) { isdn_net_dev *p = dev->rx_netdev[idx]; if (p) { isdn_net_local *lp = p->local; if ((lp->flags & ISDN_NET_CONNECTED) && (!lp->dialstate)) { isdn_net_receive(p->dev, skb); return 1; } } return 0; } /* * build an header * depends on encaps that is being used. */ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned plen) { isdn_net_local *lp = netdev_priv(dev); unsigned char *p; ushort len = 0; switch (lp->p_encap) { case ISDN_NET_ENCAP_ETHER: len = eth_header(skb, dev, type, daddr, saddr, plen); break; #ifdef CONFIG_ISDN_PPP case ISDN_NET_ENCAP_SYNCPPP: /* stick on a fake header to keep fragmentation code happy. */ len = IPPP_MAX_HEADER; skb_push(skb,len); break; #endif case ISDN_NET_ENCAP_RAWIP: printk(KERN_WARNING "isdn_net_header called with RAW_IP!\n"); len = 0; break; case ISDN_NET_ENCAP_IPTYP: /* ethernet type field */ *((__be16 *)skb_push(skb, 2)) = htons(type); len = 2; break; case ISDN_NET_ENCAP_UIHDLC: /* HDLC with UI-Frames (for ispa with -h1 option) */ *((__be16 *)skb_push(skb, 2)) = htons(0x0103); len = 2; break; case ISDN_NET_ENCAP_CISCOHDLC: case ISDN_NET_ENCAP_CISCOHDLCK: p = skb_push(skb, 4); *(u8 *)(p + 0) = CISCO_ADDR_UNICAST; *(u8 *)(p + 1) = CISCO_CTRL; *(__be16 *)(p + 2) = cpu_to_be16(type); p += 4; len = 4; break; #ifdef CONFIG_ISDN_X25 default: /* try if there are generic concap protocol routines */ if( lp-> netdev -> cprot ){ printk(KERN_WARNING "isdn_net_header called with concap_proto!\n"); len = 0; break; } break; #endif /* CONFIG_ISDN_X25 */ } return len; } /* We don't need to send arp, because we have point-to-point connections. */ static int isdn_net_rebuild_header(struct sk_buff *skb) { struct net_device *dev = skb->dev; isdn_net_local *lp = netdev_priv(dev); int ret = 0; if (lp->p_encap == ISDN_NET_ENCAP_ETHER) { struct ethhdr *eth = (struct ethhdr *) skb->data; /* * Only ARP/IP is currently supported */ if (eth->h_proto != htons(ETH_P_IP)) { printk(KERN_WARNING "isdn_net: %s don't know how to resolve type %d addresses?\n", dev->name, (int) eth->h_proto); memcpy(eth->h_source, dev->dev_addr, dev->addr_len); return 0; } /* * Try to get ARP to resolve the header. */ #ifdef CONFIG_INET ret = arp_find(eth->h_dest, skb); #endif } return ret; } static int isdn_header_cache(const struct neighbour *neigh, struct hh_cache *hh) { const struct net_device *dev = neigh->dev; isdn_net_local *lp = netdev_priv(dev); if (lp->p_encap == ISDN_NET_ENCAP_ETHER) return eth_header_cache(neigh, hh); return -1; } static void isdn_header_cache_update(struct hh_cache *hh, const struct net_device *dev, const unsigned char *haddr) { isdn_net_local *lp = netdev_priv(dev); if (lp->p_encap == ISDN_NET_ENCAP_ETHER) eth_header_cache_update(hh, dev, haddr); } static const struct header_ops isdn_header_ops = { .create = isdn_net_header, .rebuild = isdn_net_rebuild_header, .cache = isdn_header_cache, .cache_update = isdn_header_cache_update, }; /* * Interface-setup. (just after registering a new interface) */ static int isdn_net_init(struct net_device *ndev) { ushort max_hlhdr_len = 0; int drvidx; /* * up till binding we ask the protocol layer to reserve as much * as we might need for HL layer */ for (drvidx = 0; drvidx < ISDN_MAX_DRIVERS; drvidx++) if (dev->drv[drvidx]) if (max_hlhdr_len < dev->drv[drvidx]->interface->hl_hdrlen) max_hlhdr_len = dev->drv[drvidx]->interface->hl_hdrlen; ndev->hard_header_len = ETH_HLEN + max_hlhdr_len; return 0; } static void isdn_net_swapbind(int drvidx) { isdn_net_dev *p; #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: swapping ch of %d\n", drvidx); #endif p = dev->netdev; while (p) { if (p->local->pre_device == drvidx) switch (p->local->pre_channel) { case 0: p->local->pre_channel = 1; break; case 1: p->local->pre_channel = 0; break; } p = (isdn_net_dev *) p->next; } } static void isdn_net_swap_usage(int i1, int i2) { int u1 = dev->usage[i1] & ISDN_USAGE_EXCLUSIVE; int u2 = dev->usage[i2] & ISDN_USAGE_EXCLUSIVE; #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: usage of %d and %d\n", i1, i2); #endif dev->usage[i1] &= ~ISDN_USAGE_EXCLUSIVE; dev->usage[i1] |= u2; dev->usage[i2] &= ~ISDN_USAGE_EXCLUSIVE; dev->usage[i2] |= u1; isdn_info_update(); } /* * An incoming call-request has arrived. * Search the interface-chain for an appropriate interface. * If found, connect the interface to the ISDN-channel and initiate * D- and B-Channel-setup. If secure-flag is set, accept only * configured phone-numbers. If callback-flag is set, initiate * callback-dialing. * * Return-Value: 0 = No appropriate interface for this call. * 1 = Call accepted * 2 = Reject call, wait cbdelay, then call back * 3 = Reject call * 4 = Wait cbdelay, then call back * 5 = No appropriate interface for this call, * would eventually match if CID was longer. */ int isdn_net_find_icall(int di, int ch, int idx, setup_parm *setup) { char *eaz; int si1; int si2; int ematch; int wret; int swapped; int sidx = 0; u_long flags; isdn_net_dev *p; isdn_net_phone *n; char nr[ISDN_MSNLEN]; char *my_eaz; /* Search name in netdev-chain */ if (!setup->phone[0]) { nr[0] = '0'; nr[1] = '\0'; printk(KERN_INFO "isdn_net: Incoming call without OAD, assuming '0'\n"); } else strlcpy(nr, setup->phone, ISDN_MSNLEN); si1 = (int) setup->si1; si2 = (int) setup->si2; if (!setup->eazmsn[0]) { printk(KERN_WARNING "isdn_net: Incoming call without CPN, assuming '0'\n"); eaz = "0"; } else eaz = setup->eazmsn; if (dev->net_verbose > 1) printk(KERN_INFO "isdn_net: call from %s,%d,%d -> %s\n", nr, si1, si2, eaz); /* Accept DATA and VOICE calls at this stage * local eaz is checked later for allowed call types */ if ((si1 != 7) && (si1 != 1)) { if (dev->net_verbose > 1) printk(KERN_INFO "isdn_net: Service-Indicator not 1 or 7, ignored\n"); return 0; } n = (isdn_net_phone *) 0; p = dev->netdev; ematch = wret = swapped = 0; #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: di=%d ch=%d idx=%d usg=%d\n", di, ch, idx, dev->usage[idx]); #endif while (p) { int matchret; isdn_net_local *lp = p->local; /* If last check has triggered as binding-swap, revert it */ switch (swapped) { case 2: isdn_net_swap_usage(idx, sidx); /* fall through */ case 1: isdn_net_swapbind(di); break; } swapped = 0; /* check acceptable call types for DOV */ my_eaz = isdn_map_eaz2msn(lp->msn, di); if (si1 == 1) { /* it's a DOV call, check if we allow it */ if (*my_eaz == 'v' || *my_eaz == 'V' || *my_eaz == 'b' || *my_eaz == 'B') my_eaz++; /* skip to allow a match */ else my_eaz = NULL; /* force non match */ } else { /* it's a DATA call, check if we allow it */ if (*my_eaz == 'b' || *my_eaz == 'B') my_eaz++; /* skip to allow a match */ } if (my_eaz) matchret = isdn_msncmp(eaz, my_eaz); else matchret = 1; if (!matchret) ematch = 1; /* Remember if more numbers eventually can match */ if (matchret > wret) wret = matchret; #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: if='%s', l.msn=%s, l.flags=%d, l.dstate=%d\n", p->dev->name, lp->msn, lp->flags, lp->dialstate); #endif if ((!matchret) && /* EAZ is matching */ (((!(lp->flags & ISDN_NET_CONNECTED)) && /* but not connected */ (USG_NONE(dev->usage[idx]))) || /* and ch. unused or */ ((((lp->dialstate == 4) || (lp->dialstate == 12)) && /* if dialing */ (!(lp->flags & ISDN_NET_CALLBACK))) /* but no callback */ ))) { #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: match1, pdev=%d pch=%d\n", lp->pre_device, lp->pre_channel); #endif if (dev->usage[idx] & ISDN_USAGE_EXCLUSIVE) { if ((lp->pre_channel != ch) || (lp->pre_device != di)) { /* Here we got a problem: * If using an ICN-Card, an incoming call is always signaled on * on the first channel of the card, if both channels are * down. However this channel may be bound exclusive. If the * second channel is free, this call should be accepted. * The solution is horribly but it runs, so what: * We exchange the exclusive bindings of the two channels, the * corresponding variables in the interface-structs. */ if (ch == 0) { sidx = isdn_dc2minor(di, 1); #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: ch is 0\n"); #endif if (USG_NONE(dev->usage[sidx])) { /* Second Channel is free, now see if it is bound * exclusive too. */ if (dev->usage[sidx] & ISDN_USAGE_EXCLUSIVE) { #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: 2nd channel is down and bound\n"); #endif /* Yes, swap bindings only, if the original * binding is bound to channel 1 of this driver */ if ((lp->pre_device == di) && (lp->pre_channel == 1)) { isdn_net_swapbind(di); swapped = 1; } else { /* ... else iterate next device */ p = (isdn_net_dev *) p->next; continue; } } else { #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: 2nd channel is down and unbound\n"); #endif /* No, swap always and swap excl-usage also */ isdn_net_swap_usage(idx, sidx); isdn_net_swapbind(di); swapped = 2; } /* Now check for exclusive binding again */ #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: final check\n"); #endif if ((dev->usage[idx] & ISDN_USAGE_EXCLUSIVE) && ((lp->pre_channel != ch) || (lp->pre_device != di))) { #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: final check failed\n"); #endif p = (isdn_net_dev *) p->next; continue; } } } else { /* We are already on the second channel, so nothing to do */ #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: already on 2nd channel\n"); #endif } } } #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: match2\n"); #endif n = lp->phone[0]; if (lp->flags & ISDN_NET_SECURE) { while (n) { if (!isdn_msncmp(nr, n->num)) break; n = (isdn_net_phone *) n->next; } } if (n || (!(lp->flags & ISDN_NET_SECURE))) { #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: match3\n"); #endif /* matching interface found */ /* * Is the state STOPPED? * If so, no dialin is allowed, * so reject actively. * */ if (ISDN_NET_DIALMODE(*lp) == ISDN_NET_DM_OFF) { printk(KERN_INFO "incoming call, interface %s `stopped' -> rejected\n", p->dev->name); return 3; } /* * Is the interface up? * If not, reject the call actively. */ if (!isdn_net_device_started(p)) { printk(KERN_INFO "%s: incoming call, interface down -> rejected\n", p->dev->name); return 3; } /* Interface is up, now see if it's a slave. If so, see if * it's master and parent slave is online. If not, reject the call. */ if (lp->master) { isdn_net_local *mlp = ISDN_MASTER_PRIV(lp); printk(KERN_DEBUG "ICALLslv: %s\n", p->dev->name); printk(KERN_DEBUG "master=%s\n", lp->master->name); if (mlp->flags & ISDN_NET_CONNECTED) { printk(KERN_DEBUG "master online\n"); /* Master is online, find parent-slave (master if first slave) */ while (mlp->slave) { if (ISDN_SLAVE_PRIV(mlp) == lp) break; mlp = ISDN_SLAVE_PRIV(mlp); } } else printk(KERN_DEBUG "master offline\n"); /* Found parent, if it's offline iterate next device */ printk(KERN_DEBUG "mlpf: %d\n", mlp->flags & ISDN_NET_CONNECTED); if (!(mlp->flags & ISDN_NET_CONNECTED)) { p = (isdn_net_dev *) p->next; continue; } } if (lp->flags & ISDN_NET_CALLBACK) { int chi; /* * Is the state MANUAL? * If so, no callback can be made, * so reject actively. * */ if (ISDN_NET_DIALMODE(*lp) == ISDN_NET_DM_OFF) { printk(KERN_INFO "incoming call for callback, interface %s `off' -> rejected\n", p->dev->name); return 3; } printk(KERN_DEBUG "%s: call from %s -> %s, start callback\n", p->dev->name, nr, eaz); if (lp->phone[1]) { /* Grab a free ISDN-Channel */ spin_lock_irqsave(&dev->lock, flags); if ((chi = isdn_get_free_channel( ISDN_USAGE_NET, lp->l2_proto, lp->l3_proto, lp->pre_device, lp->pre_channel, lp->msn) ) < 0) { printk(KERN_WARNING "isdn_net_find_icall: No channel for %s\n", p->dev->name); spin_unlock_irqrestore(&dev->lock, flags); return 0; } /* Setup dialstate. */ lp->dtimer = 0; lp->dialstate = 11; /* Connect interface with channel */ isdn_net_bind_channel(lp, chi); #ifdef CONFIG_ISDN_PPP if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP) if (isdn_ppp_bind(lp) < 0) { spin_unlock_irqrestore(&dev->lock, flags); isdn_net_unbind_channel(lp); return 0; } #endif spin_unlock_irqrestore(&dev->lock, flags); /* Initiate dialing by returning 2 or 4 */ return (lp->flags & ISDN_NET_CBHUP) ? 2 : 4; } else printk(KERN_WARNING "isdn_net: %s: No phone number\n", p->dev->name); return 0; } else { printk(KERN_DEBUG "%s: call from %s -> %s accepted\n", p->dev->name, nr, eaz); /* if this interface is dialing, it does it probably on a different device, so free this device */ if ((lp->dialstate == 4) || (lp->dialstate == 12)) { #ifdef CONFIG_ISDN_PPP if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP) isdn_ppp_free(lp); #endif isdn_net_lp_disconnected(lp); isdn_free_channel(lp->isdn_device, lp->isdn_channel, ISDN_USAGE_NET); } spin_lock_irqsave(&dev->lock, flags); dev->usage[idx] &= ISDN_USAGE_EXCLUSIVE; dev->usage[idx] |= ISDN_USAGE_NET; strcpy(dev->num[idx], nr); isdn_info_update(); dev->st_netdev[idx] = lp->netdev; lp->isdn_device = di; lp->isdn_channel = ch; lp->ppp_slot = -1; lp->flags |= ISDN_NET_CONNECTED; lp->dialstate = 7; lp->dtimer = 0; lp->outgoing = 0; lp->huptimer = 0; lp->hupflags |= ISDN_WAITCHARGE; lp->hupflags &= ~ISDN_HAVECHARGE; #ifdef CONFIG_ISDN_PPP if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP) { if (isdn_ppp_bind(lp) < 0) { isdn_net_unbind_channel(lp); spin_unlock_irqrestore(&dev->lock, flags); return 0; } } #endif spin_unlock_irqrestore(&dev->lock, flags); return 1; } } } p = (isdn_net_dev *) p->next; } /* If none of configured EAZ/MSN matched and not verbose, be silent */ if (!ematch || dev->net_verbose) printk(KERN_INFO "isdn_net: call from %s -> %d %s ignored\n", nr, di, eaz); return (wret == 2)?5:0; } /* * Search list of net-interfaces for an interface with given name. */ isdn_net_dev * isdn_net_findif(char *name) { isdn_net_dev *p = dev->netdev; while (p) { if (!strcmp(p->dev->name, name)) return p; p = (isdn_net_dev *) p->next; } return (isdn_net_dev *) NULL; } /* * Force a net-interface to dial out. * This is called from the userlevel-routine below or * from isdn_net_start_xmit(). */ static int isdn_net_force_dial_lp(isdn_net_local * lp) { if ((!(lp->flags & ISDN_NET_CONNECTED)) && !lp->dialstate) { int chi; if (lp->phone[1]) { ulong flags; /* Grab a free ISDN-Channel */ spin_lock_irqsave(&dev->lock, flags); if ((chi = isdn_get_free_channel( ISDN_USAGE_NET, lp->l2_proto, lp->l3_proto, lp->pre_device, lp->pre_channel, lp->msn)) < 0) { printk(KERN_WARNING "isdn_net_force_dial: No channel for %s\n", lp->netdev->dev->name); spin_unlock_irqrestore(&dev->lock, flags); return -EAGAIN; } lp->dialstate = 1; /* Connect interface with channel */ isdn_net_bind_channel(lp, chi); #ifdef CONFIG_ISDN_PPP if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP) if (isdn_ppp_bind(lp) < 0) { isdn_net_unbind_channel(lp); spin_unlock_irqrestore(&dev->lock, flags); return -EAGAIN; } #endif /* Initiate dialing */ spin_unlock_irqrestore(&dev->lock, flags); isdn_net_dial(); return 0; } else return -EINVAL; } else return -EBUSY; } /* * This is called from certain upper protocol layers (multilink ppp * and x25iface encapsulation module) that want to initiate dialing * themselves. */ int isdn_net_dial_req(isdn_net_local * lp) { /* is there a better error code? */ if (!(ISDN_NET_DIALMODE(*lp) == ISDN_NET_DM_AUTO)) return -EBUSY; return isdn_net_force_dial_lp(lp); } /* * Force a net-interface to dial out. * This is always called from within userspace (ISDN_IOCTL_NET_DIAL). */ int isdn_net_force_dial(char *name) { isdn_net_dev *p = isdn_net_findif(name); if (!p) return -ENODEV; return (isdn_net_force_dial_lp(p->local)); } /* The ISDN-specific entries in the device structure. */ static const struct net_device_ops isdn_netdev_ops = { .ndo_init = isdn_net_init, .ndo_open = isdn_net_open, .ndo_stop = isdn_net_close, .ndo_do_ioctl = isdn_net_ioctl, .ndo_start_xmit = isdn_net_start_xmit, .ndo_get_stats = isdn_net_get_stats, .ndo_tx_timeout = isdn_net_tx_timeout, }; /* * Helper for alloc_netdev() */ static void _isdn_setup(struct net_device *dev) { isdn_net_local *lp = netdev_priv(dev); ether_setup(dev); /* Setup the generic properties */ dev->flags = IFF_NOARP|IFF_POINTOPOINT; /* isdn prepends a header in the tx path, can't share skbs */ dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->header_ops = NULL; dev->netdev_ops = &isdn_netdev_ops; /* for clients with MPPP maybe higher values better */ dev->tx_queue_len = 30; lp->p_encap = ISDN_NET_ENCAP_RAWIP; lp->magic = ISDN_NET_MAGIC; lp->last = lp; lp->next = lp; lp->isdn_device = -1; lp->isdn_channel = -1; lp->pre_device = -1; lp->pre_channel = -1; lp->exclusive = -1; lp->ppp_slot = -1; lp->pppbind = -1; skb_queue_head_init(&lp->super_tx_queue); lp->l2_proto = ISDN_PROTO_L2_X75I; lp->l3_proto = ISDN_PROTO_L3_TRANS; lp->triggercps = 6000; lp->slavedelay = 10 * HZ; lp->hupflags = ISDN_INHUP; /* Do hangup even on incoming calls */ lp->onhtime = 10; /* Default hangup-time for saving costs */ lp->dialmax = 1; /* Hangup before Callback, manual dial */ lp->flags = ISDN_NET_CBHUP | ISDN_NET_DM_MANUAL; lp->cbdelay = 25; /* Wait 5 secs before Callback */ lp->dialtimeout = -1; /* Infinite Dial-Timeout */ lp->dialwait = 5 * HZ; /* Wait 5 sec. after failed dial */ lp->dialstarted = 0; /* Jiffies of last dial-start */ lp->dialwait_timer = 0; /* Jiffies of earliest next dial-start */ } /* * Allocate a new network-interface and initialize its data structures. */ char * isdn_net_new(char *name, struct net_device *master) { isdn_net_dev *netdev; /* Avoid creating an existing interface */ if (isdn_net_findif(name)) { printk(KERN_WARNING "isdn_net: interface %s already exists\n", name); return NULL; } if (name == NULL) return NULL; if (!(netdev = kzalloc(sizeof(isdn_net_dev), GFP_KERNEL))) { printk(KERN_WARNING "isdn_net: Could not allocate net-device\n"); return NULL; } netdev->dev = alloc_netdev(sizeof(isdn_net_local), name, _isdn_setup); if (!netdev->dev) { printk(KERN_WARNING "isdn_net: Could not allocate network device\n"); kfree(netdev); return NULL; } netdev->local = netdev_priv(netdev->dev); if (master) { /* Device shall be a slave */ struct net_device *p = MASTER_TO_SLAVE(master); struct net_device *q = master; netdev->local->master = master; /* Put device at end of slave-chain */ while (p) { q = p; p = MASTER_TO_SLAVE(p); } MASTER_TO_SLAVE(q) = netdev->dev; } else { /* Device shall be a master */ /* * Watchdog timer (currently) for master only. */ netdev->dev->watchdog_timeo = ISDN_NET_TX_TIMEOUT; if (register_netdev(netdev->dev) != 0) { printk(KERN_WARNING "isdn_net: Could not register net-device\n"); free_netdev(netdev->dev); kfree(netdev); return NULL; } } netdev->queue = netdev->local; spin_lock_init(&netdev->queue_lock); netdev->local->netdev = netdev; INIT_WORK(&netdev->local->tqueue, isdn_net_softint); spin_lock_init(&netdev->local->xmit_lock); /* Put into to netdev-chain */ netdev->next = (void *) dev->netdev; dev->netdev = netdev; return netdev->dev->name; } char * isdn_net_newslave(char *parm) { char *p = strchr(parm, ','); isdn_net_dev *n; char newname[10]; if (p) { /* Slave-Name MUST not be empty */ if (!strlen(p + 1)) return NULL; strcpy(newname, p + 1); *p = 0; /* Master must already exist */ if (!(n = isdn_net_findif(parm))) return NULL; /* Master must be a real interface, not a slave */ if (n->local->master) return NULL; /* Master must not be started yet */ if (isdn_net_device_started(n)) return NULL; return (isdn_net_new(newname, n->dev)); } return NULL; } /* * Set interface-parameters. * Always set all parameters, so the user-level application is responsible * for not overwriting existing setups. It has to get the current * setup first, if only selected parameters are to be changed. */ int isdn_net_setcfg(isdn_net_ioctl_cfg * cfg) { isdn_net_dev *p = isdn_net_findif(cfg->name); ulong features; int i; int drvidx; int chidx; char drvid[25]; if (p) { isdn_net_local *lp = p->local; /* See if any registered driver supports the features we want */ features = ((1 << cfg->l2_proto) << ISDN_FEATURE_L2_SHIFT) | ((1 << cfg->l3_proto) << ISDN_FEATURE_L3_SHIFT); for (i = 0; i < ISDN_MAX_DRIVERS; i++) if (dev->drv[i]) if ((dev->drv[i]->interface->features & features) == features) break; if (i == ISDN_MAX_DRIVERS) { printk(KERN_WARNING "isdn_net: No driver with selected features\n"); return -ENODEV; } if (lp->p_encap != cfg->p_encap){ #ifdef CONFIG_ISDN_X25 struct concap_proto * cprot = p -> cprot; #endif if (isdn_net_device_started(p)) { printk(KERN_WARNING "%s: cannot change encap when if is up\n", p->dev->name); return -EBUSY; } #ifdef CONFIG_ISDN_X25 if( cprot && cprot -> pops ) cprot -> pops -> proto_del ( cprot ); p -> cprot = NULL; lp -> dops = NULL; /* ... , prepare for configuration of new one ... */ switch ( cfg -> p_encap ){ case ISDN_NET_ENCAP_X25IFACE: lp -> dops = &isdn_concap_reliable_dl_dops; } /* ... and allocate new one ... */ p -> cprot = isdn_concap_new( cfg -> p_encap ); /* p -> cprot == NULL now if p_encap is not supported by means of the concap_proto mechanism */ /* the protocol is not configured yet; this will happen later when isdn_net_reset() is called */ #endif } switch ( cfg->p_encap ) { case ISDN_NET_ENCAP_SYNCPPP: #ifndef CONFIG_ISDN_PPP printk(KERN_WARNING "%s: SyncPPP support not configured\n", p->dev->name); return -EINVAL; #else p->dev->type = ARPHRD_PPP; /* change ARP type */ p->dev->addr_len = 0; #endif break; case ISDN_NET_ENCAP_X25IFACE: #ifndef CONFIG_ISDN_X25 printk(KERN_WARNING "%s: isdn-x25 support not configured\n", p->dev->name); return -EINVAL; #else p->dev->type = ARPHRD_X25; /* change ARP type */ p->dev->addr_len = 0; #endif break; case ISDN_NET_ENCAP_CISCOHDLCK: break; default: if( cfg->p_encap >= 0 && cfg->p_encap <= ISDN_NET_ENCAP_MAX_ENCAP ) break; printk(KERN_WARNING "%s: encapsulation protocol %d not supported\n", p->dev->name, cfg->p_encap); return -EINVAL; } if (strlen(cfg->drvid)) { /* A bind has been requested ... */ char *c, *e; drvidx = -1; chidx = -1; strcpy(drvid, cfg->drvid); if ((c = strchr(drvid, ','))) { /* The channel-number is appended to the driver-Id with a comma */ chidx = (int) simple_strtoul(c + 1, &e, 10); if (e == c) chidx = -1; *c = '\0'; } for (i = 0; i < ISDN_MAX_DRIVERS; i++) /* Lookup driver-Id in array */ if (!(strcmp(dev->drvid[i], drvid))) { drvidx = i; break; } if ((drvidx == -1) || (chidx == -1)) /* Either driver-Id or channel-number invalid */ return -ENODEV; } else { /* Parameters are valid, so get them */ drvidx = lp->pre_device; chidx = lp->pre_channel; } if (cfg->exclusive > 0) { unsigned long flags; /* If binding is exclusive, try to grab the channel */ spin_lock_irqsave(&dev->lock, flags); if ((i = isdn_get_free_channel(ISDN_USAGE_NET, lp->l2_proto, lp->l3_proto, drvidx, chidx, lp->msn)) < 0) { /* Grab failed, because desired channel is in use */ lp->exclusive = -1; spin_unlock_irqrestore(&dev->lock, flags); return -EBUSY; } /* All went ok, so update isdninfo */ dev->usage[i] = ISDN_USAGE_EXCLUSIVE; isdn_info_update(); spin_unlock_irqrestore(&dev->lock, flags); lp->exclusive = i; } else { /* Non-exclusive binding or unbind. */ lp->exclusive = -1; if ((lp->pre_device != -1) && (cfg->exclusive == -1)) { isdn_unexclusive_channel(lp->pre_device, lp->pre_channel); isdn_free_channel(lp->pre_device, lp->pre_channel, ISDN_USAGE_NET); drvidx = -1; chidx = -1; } } strlcpy(lp->msn, cfg->eaz, sizeof(lp->msn)); lp->pre_device = drvidx; lp->pre_channel = chidx; lp->onhtime = cfg->onhtime; lp->charge = cfg->charge; lp->l2_proto = cfg->l2_proto; lp->l3_proto = cfg->l3_proto; lp->cbdelay = cfg->cbdelay; lp->dialmax = cfg->dialmax; lp->triggercps = cfg->triggercps; lp->slavedelay = cfg->slavedelay * HZ; lp->pppbind = cfg->pppbind; lp->dialtimeout = cfg->dialtimeout >= 0 ? cfg->dialtimeout * HZ : -1; lp->dialwait = cfg->dialwait * HZ; if (cfg->secure) lp->flags |= ISDN_NET_SECURE; else lp->flags &= ~ISDN_NET_SECURE; if (cfg->cbhup) lp->flags |= ISDN_NET_CBHUP; else lp->flags &= ~ISDN_NET_CBHUP; switch (cfg->callback) { case 0: lp->flags &= ~(ISDN_NET_CALLBACK | ISDN_NET_CBOUT); break; case 1: lp->flags |= ISDN_NET_CALLBACK; lp->flags &= ~ISDN_NET_CBOUT; break; case 2: lp->flags |= ISDN_NET_CBOUT; lp->flags &= ~ISDN_NET_CALLBACK; break; } lp->flags &= ~ISDN_NET_DIALMODE_MASK; /* first all bits off */ if (cfg->dialmode && !(cfg->dialmode & ISDN_NET_DIALMODE_MASK)) { /* old isdnctrl version, where only 0 or 1 is given */ printk(KERN_WARNING "Old isdnctrl version detected! Please update.\n"); lp->flags |= ISDN_NET_DM_OFF; /* turn on `off' bit */ } else { lp->flags |= cfg->dialmode; /* turn on selected bits */ } if (cfg->chargehup) lp->hupflags |= ISDN_CHARGEHUP; else lp->hupflags &= ~ISDN_CHARGEHUP; if (cfg->ihup) lp->hupflags |= ISDN_INHUP; else lp->hupflags &= ~ISDN_INHUP; if (cfg->chargeint > 10) { lp->hupflags |= ISDN_CHARGEHUP | ISDN_HAVECHARGE | ISDN_MANCHARGE; lp->chargeint = cfg->chargeint * HZ; } if (cfg->p_encap != lp->p_encap) { if (cfg->p_encap == ISDN_NET_ENCAP_RAWIP) { p->dev->header_ops = NULL; p->dev->flags = IFF_NOARP|IFF_POINTOPOINT; } else { p->dev->header_ops = &isdn_header_ops; if (cfg->p_encap == ISDN_NET_ENCAP_ETHER) p->dev->flags = IFF_BROADCAST | IFF_MULTICAST; else p->dev->flags = IFF_NOARP|IFF_POINTOPOINT; } } lp->p_encap = cfg->p_encap; return 0; } return -ENODEV; } /* * Perform get-interface-parameters.ioctl */ int isdn_net_getcfg(isdn_net_ioctl_cfg * cfg) { isdn_net_dev *p = isdn_net_findif(cfg->name); if (p) { isdn_net_local *lp = p->local; strcpy(cfg->eaz, lp->msn); cfg->exclusive = lp->exclusive; if (lp->pre_device >= 0) { sprintf(cfg->drvid, "%s,%d", dev->drvid[lp->pre_device], lp->pre_channel); } else cfg->drvid[0] = '\0'; cfg->onhtime = lp->onhtime; cfg->charge = lp->charge; cfg->l2_proto = lp->l2_proto; cfg->l3_proto = lp->l3_proto; cfg->p_encap = lp->p_encap; cfg->secure = (lp->flags & ISDN_NET_SECURE) ? 1 : 0; cfg->callback = 0; if (lp->flags & ISDN_NET_CALLBACK) cfg->callback = 1; if (lp->flags & ISDN_NET_CBOUT) cfg->callback = 2; cfg->cbhup = (lp->flags & ISDN_NET_CBHUP) ? 1 : 0; cfg->dialmode = lp->flags & ISDN_NET_DIALMODE_MASK; cfg->chargehup = (lp->hupflags & 4) ? 1 : 0; cfg->ihup = (lp->hupflags & 8) ? 1 : 0; cfg->cbdelay = lp->cbdelay; cfg->dialmax = lp->dialmax; cfg->triggercps = lp->triggercps; cfg->slavedelay = lp->slavedelay / HZ; cfg->chargeint = (lp->hupflags & ISDN_CHARGEHUP) ? (lp->chargeint / HZ) : 0; cfg->pppbind = lp->pppbind; cfg->dialtimeout = lp->dialtimeout >= 0 ? lp->dialtimeout / HZ : -1; cfg->dialwait = lp->dialwait / HZ; if (lp->slave) { if (strlen(lp->slave->name) >= 10) strcpy(cfg->slave, "too-long"); else strcpy(cfg->slave, lp->slave->name); } else cfg->slave[0] = '\0'; if (lp->master) { if (strlen(lp->master->name) >= 10) strcpy(cfg->master, "too-long"); else strcpy(cfg->master, lp->master->name); } else cfg->master[0] = '\0'; return 0; } return -ENODEV; } /* * Add a phone-number to an interface. */ int isdn_net_addphone(isdn_net_ioctl_phone * phone) { isdn_net_dev *p = isdn_net_findif(phone->name); isdn_net_phone *n; if (p) { if (!(n = kmalloc(sizeof(isdn_net_phone), GFP_KERNEL))) return -ENOMEM; strlcpy(n->num, phone->phone, sizeof(n->num)); n->next = p->local->phone[phone->outgoing & 1]; p->local->phone[phone->outgoing & 1] = n; return 0; } return -ENODEV; } /* * Copy a string of all phone-numbers of an interface to user space. * This might sleep and must be called with the isdn semaphore down. */ int isdn_net_getphones(isdn_net_ioctl_phone * phone, char __user *phones) { isdn_net_dev *p = isdn_net_findif(phone->name); int inout = phone->outgoing & 1; int more = 0; int count = 0; isdn_net_phone *n; if (!p) return -ENODEV; inout &= 1; for (n = p->local->phone[inout]; n; n = n->next) { if (more) { put_user(' ', phones++); count++; } if (copy_to_user(phones, n->num, strlen(n->num) + 1)) { return -EFAULT; } phones += strlen(n->num); count += strlen(n->num); more = 1; } put_user(0, phones); count++; return count; } /* * Copy a string containing the peer's phone number of a connected interface * to user space. */ int isdn_net_getpeer(isdn_net_ioctl_phone *phone, isdn_net_ioctl_phone __user *peer) { isdn_net_dev *p = isdn_net_findif(phone->name); int ch, dv, idx; if (!p) return -ENODEV; /* * Theoretical race: while this executes, the remote number might * become invalid (hang up) or change (new connection), resulting * in (partially) wrong number copied to user. This race * currently ignored. */ ch = p->local->isdn_channel; dv = p->local->isdn_device; if(ch < 0 && dv < 0) return -ENOTCONN; idx = isdn_dc2minor(dv, ch); if (idx <0 ) return -ENODEV; /* for pre-bound channels, we need this extra check */ if (strncmp(dev->num[idx], "???", 3) == 0) return -ENOTCONN; strncpy(phone->phone, dev->num[idx], ISDN_MSNLEN); phone->outgoing = USG_OUTGOING(dev->usage[idx]); if (copy_to_user(peer, phone, sizeof(*peer))) return -EFAULT; return 0; } /* * Delete a phone-number from an interface. */ int isdn_net_delphone(isdn_net_ioctl_phone * phone) { isdn_net_dev *p = isdn_net_findif(phone->name); int inout = phone->outgoing & 1; isdn_net_phone *n; isdn_net_phone *m; if (p) { n = p->local->phone[inout]; m = NULL; while (n) { if (!strcmp(n->num, phone->phone)) { if (p->local->dial == n) p->local->dial = n->next; if (m) m->next = n->next; else p->local->phone[inout] = n->next; kfree(n); return 0; } m = n; n = (isdn_net_phone *) n->next; } return -EINVAL; } return -ENODEV; } /* * Delete all phone-numbers of an interface. */ static int isdn_net_rmallphone(isdn_net_dev * p) { isdn_net_phone *n; isdn_net_phone *m; int i; for (i = 0; i < 2; i++) { n = p->local->phone[i]; while (n) { m = n->next; kfree(n); n = m; } p->local->phone[i] = NULL; } p->local->dial = NULL; return 0; } /* * Force a hangup of a network-interface. */ int isdn_net_force_hangup(char *name) { isdn_net_dev *p = isdn_net_findif(name); struct net_device *q; if (p) { if (p->local->isdn_device < 0) return 1; q = p->local->slave; /* If this interface has slaves, do a hangup for them also. */ while (q) { isdn_net_hangup(q); q = MASTER_TO_SLAVE(q); } isdn_net_hangup(p->dev); return 0; } return -ENODEV; } /* * Helper-function for isdn_net_rm: Do the real work. */ static int isdn_net_realrm(isdn_net_dev * p, isdn_net_dev * q) { u_long flags; if (isdn_net_device_started(p)) { return -EBUSY; } #ifdef CONFIG_ISDN_X25 if( p -> cprot && p -> cprot -> pops ) p -> cprot -> pops -> proto_del ( p -> cprot ); #endif /* Free all phone-entries */ isdn_net_rmallphone(p); /* If interface is bound exclusive, free channel-usage */ if (p->local->exclusive != -1) isdn_unexclusive_channel(p->local->pre_device, p->local->pre_channel); if (p->local->master) { /* It's a slave-device, so update master's slave-pointer if necessary */ if (((isdn_net_local *) ISDN_MASTER_PRIV(p->local))->slave == p->dev) ((isdn_net_local *)ISDN_MASTER_PRIV(p->local))->slave = p->local->slave; } else { /* Unregister only if it's a master-device */ unregister_netdev(p->dev); } /* Unlink device from chain */ spin_lock_irqsave(&dev->lock, flags); if (q) q->next = p->next; else dev->netdev = p->next; if (p->local->slave) { /* If this interface has a slave, remove it also */ char *slavename = p->local->slave->name; isdn_net_dev *n = dev->netdev; q = NULL; while (n) { if (!strcmp(n->dev->name, slavename)) { spin_unlock_irqrestore(&dev->lock, flags); isdn_net_realrm(n, q); spin_lock_irqsave(&dev->lock, flags); break; } q = n; n = (isdn_net_dev *)n->next; } } spin_unlock_irqrestore(&dev->lock, flags); /* If no more net-devices remain, disable auto-hangup timer */ if (dev->netdev == NULL) isdn_timer_ctrl(ISDN_TIMER_NETHANGUP, 0); free_netdev(p->dev); kfree(p); return 0; } /* * Remove a single network-interface. */ int isdn_net_rm(char *name) { u_long flags; isdn_net_dev *p; isdn_net_dev *q; /* Search name in netdev-chain */ spin_lock_irqsave(&dev->lock, flags); p = dev->netdev; q = NULL; while (p) { if (!strcmp(p->dev->name, name)) { spin_unlock_irqrestore(&dev->lock, flags); return (isdn_net_realrm(p, q)); } q = p; p = (isdn_net_dev *) p->next; } spin_unlock_irqrestore(&dev->lock, flags); /* If no more net-devices remain, disable auto-hangup timer */ if (dev->netdev == NULL) isdn_timer_ctrl(ISDN_TIMER_NETHANGUP, 0); return -ENODEV; } /* * Remove all network-interfaces */ int isdn_net_rmall(void) { u_long flags; int ret; /* Walk through netdev-chain */ spin_lock_irqsave(&dev->lock, flags); while (dev->netdev) { if (!dev->netdev->local->master) { /* Remove master-devices only, slaves get removed with their master */ spin_unlock_irqrestore(&dev->lock, flags); if ((ret = isdn_net_realrm(dev->netdev, NULL))) { return ret; } spin_lock_irqsave(&dev->lock, flags); } } dev->netdev = NULL; spin_unlock_irqrestore(&dev->lock, flags); return 0; }
gpl-2.0
calixtolinuxplatform/linux-3.12.10-ti-calixto
drivers/ssb/driver_mipscore.c
3116
8573
/* * Sonics Silicon Backplane * Broadcom MIPS core driver * * Copyright 2005, Broadcom Corporation * Copyright 2006, 2007, Michael Buesch <m@bues.ch> * * Licensed under the GNU/GPL. See COPYING for details. */ #include <linux/ssb/ssb.h> #include <linux/mtd/physmap.h> #include <linux/serial.h> #include <linux/serial_core.h> #include <linux/serial_reg.h> #include <linux/time.h> #include "ssb_private.h" static const char * const part_probes[] = { "bcm47xxpart", NULL }; static struct physmap_flash_data ssb_pflash_data = { .part_probe_types = part_probes, }; static struct resource ssb_pflash_resource = { .name = "ssb_pflash", .flags = IORESOURCE_MEM, }; struct platform_device ssb_pflash_dev = { .name = "physmap-flash", .dev = { .platform_data = &ssb_pflash_data, }, .resource = &ssb_pflash_resource, .num_resources = 1, }; static inline u32 mips_read32(struct ssb_mipscore *mcore, u16 offset) { return ssb_read32(mcore->dev, offset); } static inline void mips_write32(struct ssb_mipscore *mcore, u16 offset, u32 value) { ssb_write32(mcore->dev, offset, value); } static const u32 ipsflag_irq_mask[] = { 0, SSB_IPSFLAG_IRQ1, SSB_IPSFLAG_IRQ2, SSB_IPSFLAG_IRQ3, SSB_IPSFLAG_IRQ4, }; static const u32 ipsflag_irq_shift[] = { 0, SSB_IPSFLAG_IRQ1_SHIFT, SSB_IPSFLAG_IRQ2_SHIFT, SSB_IPSFLAG_IRQ3_SHIFT, SSB_IPSFLAG_IRQ4_SHIFT, }; static inline u32 ssb_irqflag(struct ssb_device *dev) { u32 tpsflag = ssb_read32(dev, SSB_TPSFLAG); if (tpsflag) return ssb_read32(dev, SSB_TPSFLAG) & SSB_TPSFLAG_BPFLAG; else /* not irq supported */ return 0x3f; } static struct ssb_device *find_device(struct ssb_device *rdev, int irqflag) { struct ssb_bus *bus = rdev->bus; int i; for (i = 0; i < bus->nr_devices; i++) { struct ssb_device *dev; dev = &(bus->devices[i]); if (ssb_irqflag(dev) == irqflag) return dev; } return NULL; } /* Get the MIPS IRQ assignment for a specified device. * If unassigned, 0 is returned. * If disabled, 5 is returned. * If not supported, 6 is returned. */ unsigned int ssb_mips_irq(struct ssb_device *dev) { struct ssb_bus *bus = dev->bus; struct ssb_device *mdev = bus->mipscore.dev; u32 irqflag; u32 ipsflag; u32 tmp; unsigned int irq; irqflag = ssb_irqflag(dev); if (irqflag == 0x3f) return 6; ipsflag = ssb_read32(bus->mipscore.dev, SSB_IPSFLAG); for (irq = 1; irq <= 4; irq++) { tmp = ((ipsflag & ipsflag_irq_mask[irq]) >> ipsflag_irq_shift[irq]); if (tmp == irqflag) break; } if (irq == 5) { if ((1 << irqflag) & ssb_read32(mdev, SSB_INTVEC)) irq = 0; } return irq; } static void clear_irq(struct ssb_bus *bus, unsigned int irq) { struct ssb_device *dev = bus->mipscore.dev; /* Clear the IRQ in the MIPScore backplane registers */ if (irq == 0) { ssb_write32(dev, SSB_INTVEC, 0); } else { ssb_write32(dev, SSB_IPSFLAG, ssb_read32(dev, SSB_IPSFLAG) | ipsflag_irq_mask[irq]); } } static void set_irq(struct ssb_device *dev, unsigned int irq) { unsigned int oldirq = ssb_mips_irq(dev); struct ssb_bus *bus = dev->bus; struct ssb_device *mdev = bus->mipscore.dev; u32 irqflag = ssb_irqflag(dev); BUG_ON(oldirq == 6); dev->irq = irq + 2; /* clear the old irq */ if (oldirq == 0) ssb_write32(mdev, SSB_INTVEC, (~(1 << irqflag) & ssb_read32(mdev, SSB_INTVEC))); else if (oldirq != 5) clear_irq(bus, oldirq); /* assign the new one */ if (irq == 0) { ssb_write32(mdev, SSB_INTVEC, ((1 << irqflag) | ssb_read32(mdev, SSB_INTVEC))); } else { u32 ipsflag = ssb_read32(mdev, SSB_IPSFLAG); if ((ipsflag & ipsflag_irq_mask[irq]) != ipsflag_irq_mask[irq]) { u32 oldipsflag = (ipsflag & ipsflag_irq_mask[irq]) >> ipsflag_irq_shift[irq]; struct ssb_device *olddev = find_device(dev, oldipsflag); if (olddev) set_irq(olddev, 0); } irqflag <<= ipsflag_irq_shift[irq]; irqflag |= (ipsflag & ~ipsflag_irq_mask[irq]); ssb_write32(mdev, SSB_IPSFLAG, irqflag); } ssb_dbg("set_irq: core 0x%04x, irq %d => %d\n", dev->id.coreid, oldirq+2, irq+2); } static void print_irq(struct ssb_device *dev, unsigned int irq) { static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"}; ssb_dbg("core 0x%04x, irq : %s%s %s%s %s%s %s%s %s%s %s%s %s%s\n", dev->id.coreid, irq_name[0], irq == 0 ? "*" : " ", irq_name[1], irq == 1 ? "*" : " ", irq_name[2], irq == 2 ? "*" : " ", irq_name[3], irq == 3 ? "*" : " ", irq_name[4], irq == 4 ? "*" : " ", irq_name[5], irq == 5 ? "*" : " ", irq_name[6], irq == 6 ? "*" : " "); } static void dump_irq(struct ssb_bus *bus) { int i; for (i = 0; i < bus->nr_devices; i++) { struct ssb_device *dev; dev = &(bus->devices[i]); print_irq(dev, ssb_mips_irq(dev)); } } static void ssb_mips_serial_init(struct ssb_mipscore *mcore) { struct ssb_bus *bus = mcore->dev->bus; if (ssb_extif_available(&bus->extif)) mcore->nr_serial_ports = ssb_extif_serial_init(&bus->extif, mcore->serial_ports); else if (ssb_chipco_available(&bus->chipco)) mcore->nr_serial_ports = ssb_chipco_serial_init(&bus->chipco, mcore->serial_ports); else mcore->nr_serial_ports = 0; } static void ssb_mips_flash_detect(struct ssb_mipscore *mcore) { struct ssb_bus *bus = mcore->dev->bus; struct ssb_pflash *pflash = &mcore->pflash; /* When there is no chipcommon on the bus there is 4MB flash */ if (!ssb_chipco_available(&bus->chipco)) { pflash->present = true; pflash->buswidth = 2; pflash->window = SSB_FLASH1; pflash->window_size = SSB_FLASH1_SZ; goto ssb_pflash; } /* There is ChipCommon, so use it to read info about flash */ switch (bus->chipco.capabilities & SSB_CHIPCO_CAP_FLASHT) { case SSB_CHIPCO_FLASHT_STSER: case SSB_CHIPCO_FLASHT_ATSER: pr_debug("Found serial flash\n"); ssb_sflash_init(&bus->chipco); break; case SSB_CHIPCO_FLASHT_PARA: pr_debug("Found parallel flash\n"); pflash->present = true; pflash->window = SSB_FLASH2; pflash->window_size = SSB_FLASH2_SZ; if ((ssb_read32(bus->chipco.dev, SSB_CHIPCO_FLASH_CFG) & SSB_CHIPCO_CFG_DS16) == 0) pflash->buswidth = 1; else pflash->buswidth = 2; break; } ssb_pflash: if (pflash->present) { ssb_pflash_data.width = pflash->buswidth; ssb_pflash_resource.start = pflash->window; ssb_pflash_resource.end = pflash->window + pflash->window_size; } } u32 ssb_cpu_clock(struct ssb_mipscore *mcore) { struct ssb_bus *bus = mcore->dev->bus; u32 pll_type, n, m, rate = 0; if (bus->chipco.capabilities & SSB_CHIPCO_CAP_PMU) return ssb_pmu_get_cpu_clock(&bus->chipco); if (ssb_extif_available(&bus->extif)) { ssb_extif_get_clockcontrol(&bus->extif, &pll_type, &n, &m); } else if (ssb_chipco_available(&bus->chipco)) { ssb_chipco_get_clockcpu(&bus->chipco, &pll_type, &n, &m); } else return 0; if ((pll_type == SSB_PLLTYPE_5) || (bus->chip_id == 0x5365)) { rate = 200000000; } else { rate = ssb_calc_clock_rate(pll_type, n, m); } if (pll_type == SSB_PLLTYPE_6) { rate *= 2; } return rate; } void ssb_mipscore_init(struct ssb_mipscore *mcore) { struct ssb_bus *bus; struct ssb_device *dev; unsigned long hz, ns; unsigned int irq, i; if (!mcore->dev) return; /* We don't have a MIPS core */ ssb_dbg("Initializing MIPS core...\n"); bus = mcore->dev->bus; hz = ssb_clockspeed(bus); if (!hz) hz = 100000000; ns = 1000000000 / hz; if (ssb_extif_available(&bus->extif)) ssb_extif_timing_init(&bus->extif, ns); else if (ssb_chipco_available(&bus->chipco)) ssb_chipco_timing_init(&bus->chipco, ns); /* Assign IRQs to all cores on the bus, start with irq line 2, because serial usually takes 1 */ for (irq = 2, i = 0; i < bus->nr_devices; i++) { int mips_irq; dev = &(bus->devices[i]); mips_irq = ssb_mips_irq(dev); if (mips_irq > 4) dev->irq = 0; else dev->irq = mips_irq + 2; if (dev->irq > 5) continue; switch (dev->id.coreid) { case SSB_DEV_USB11_HOST: /* shouldn't need a separate irq line for non-4710, most of them have a proper * external usb controller on the pci */ if ((bus->chip_id == 0x4710) && (irq <= 4)) { set_irq(dev, irq++); } break; case SSB_DEV_PCI: case SSB_DEV_ETHERNET: case SSB_DEV_ETHERNET_GBIT: case SSB_DEV_80211: case SSB_DEV_USB20_HOST: /* These devices get their own IRQ line if available, the rest goes on IRQ0 */ if (irq <= 4) { set_irq(dev, irq++); break; } /* fallthrough */ case SSB_DEV_EXTIF: set_irq(dev, 0); break; } } ssb_dbg("after irq reconfiguration\n"); dump_irq(bus); ssb_mips_serial_init(mcore); ssb_mips_flash_detect(mcore); }
gpl-2.0
ncultra/linux-stable
drivers/ssb/driver_mipscore.c
3116
8573
/* * Sonics Silicon Backplane * Broadcom MIPS core driver * * Copyright 2005, Broadcom Corporation * Copyright 2006, 2007, Michael Buesch <m@bues.ch> * * Licensed under the GNU/GPL. See COPYING for details. */ #include <linux/ssb/ssb.h> #include <linux/mtd/physmap.h> #include <linux/serial.h> #include <linux/serial_core.h> #include <linux/serial_reg.h> #include <linux/time.h> #include "ssb_private.h" static const char * const part_probes[] = { "bcm47xxpart", NULL }; static struct physmap_flash_data ssb_pflash_data = { .part_probe_types = part_probes, }; static struct resource ssb_pflash_resource = { .name = "ssb_pflash", .flags = IORESOURCE_MEM, }; struct platform_device ssb_pflash_dev = { .name = "physmap-flash", .dev = { .platform_data = &ssb_pflash_data, }, .resource = &ssb_pflash_resource, .num_resources = 1, }; static inline u32 mips_read32(struct ssb_mipscore *mcore, u16 offset) { return ssb_read32(mcore->dev, offset); } static inline void mips_write32(struct ssb_mipscore *mcore, u16 offset, u32 value) { ssb_write32(mcore->dev, offset, value); } static const u32 ipsflag_irq_mask[] = { 0, SSB_IPSFLAG_IRQ1, SSB_IPSFLAG_IRQ2, SSB_IPSFLAG_IRQ3, SSB_IPSFLAG_IRQ4, }; static const u32 ipsflag_irq_shift[] = { 0, SSB_IPSFLAG_IRQ1_SHIFT, SSB_IPSFLAG_IRQ2_SHIFT, SSB_IPSFLAG_IRQ3_SHIFT, SSB_IPSFLAG_IRQ4_SHIFT, }; static inline u32 ssb_irqflag(struct ssb_device *dev) { u32 tpsflag = ssb_read32(dev, SSB_TPSFLAG); if (tpsflag) return ssb_read32(dev, SSB_TPSFLAG) & SSB_TPSFLAG_BPFLAG; else /* not irq supported */ return 0x3f; } static struct ssb_device *find_device(struct ssb_device *rdev, int irqflag) { struct ssb_bus *bus = rdev->bus; int i; for (i = 0; i < bus->nr_devices; i++) { struct ssb_device *dev; dev = &(bus->devices[i]); if (ssb_irqflag(dev) == irqflag) return dev; } return NULL; } /* Get the MIPS IRQ assignment for a specified device. * If unassigned, 0 is returned. * If disabled, 5 is returned. * If not supported, 6 is returned. */ unsigned int ssb_mips_irq(struct ssb_device *dev) { struct ssb_bus *bus = dev->bus; struct ssb_device *mdev = bus->mipscore.dev; u32 irqflag; u32 ipsflag; u32 tmp; unsigned int irq; irqflag = ssb_irqflag(dev); if (irqflag == 0x3f) return 6; ipsflag = ssb_read32(bus->mipscore.dev, SSB_IPSFLAG); for (irq = 1; irq <= 4; irq++) { tmp = ((ipsflag & ipsflag_irq_mask[irq]) >> ipsflag_irq_shift[irq]); if (tmp == irqflag) break; } if (irq == 5) { if ((1 << irqflag) & ssb_read32(mdev, SSB_INTVEC)) irq = 0; } return irq; } static void clear_irq(struct ssb_bus *bus, unsigned int irq) { struct ssb_device *dev = bus->mipscore.dev; /* Clear the IRQ in the MIPScore backplane registers */ if (irq == 0) { ssb_write32(dev, SSB_INTVEC, 0); } else { ssb_write32(dev, SSB_IPSFLAG, ssb_read32(dev, SSB_IPSFLAG) | ipsflag_irq_mask[irq]); } } static void set_irq(struct ssb_device *dev, unsigned int irq) { unsigned int oldirq = ssb_mips_irq(dev); struct ssb_bus *bus = dev->bus; struct ssb_device *mdev = bus->mipscore.dev; u32 irqflag = ssb_irqflag(dev); BUG_ON(oldirq == 6); dev->irq = irq + 2; /* clear the old irq */ if (oldirq == 0) ssb_write32(mdev, SSB_INTVEC, (~(1 << irqflag) & ssb_read32(mdev, SSB_INTVEC))); else if (oldirq != 5) clear_irq(bus, oldirq); /* assign the new one */ if (irq == 0) { ssb_write32(mdev, SSB_INTVEC, ((1 << irqflag) | ssb_read32(mdev, SSB_INTVEC))); } else { u32 ipsflag = ssb_read32(mdev, SSB_IPSFLAG); if ((ipsflag & ipsflag_irq_mask[irq]) != ipsflag_irq_mask[irq]) { u32 oldipsflag = (ipsflag & ipsflag_irq_mask[irq]) >> ipsflag_irq_shift[irq]; struct ssb_device *olddev = find_device(dev, oldipsflag); if (olddev) set_irq(olddev, 0); } irqflag <<= ipsflag_irq_shift[irq]; irqflag |= (ipsflag & ~ipsflag_irq_mask[irq]); ssb_write32(mdev, SSB_IPSFLAG, irqflag); } ssb_dbg("set_irq: core 0x%04x, irq %d => %d\n", dev->id.coreid, oldirq+2, irq+2); } static void print_irq(struct ssb_device *dev, unsigned int irq) { static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"}; ssb_dbg("core 0x%04x, irq : %s%s %s%s %s%s %s%s %s%s %s%s %s%s\n", dev->id.coreid, irq_name[0], irq == 0 ? "*" : " ", irq_name[1], irq == 1 ? "*" : " ", irq_name[2], irq == 2 ? "*" : " ", irq_name[3], irq == 3 ? "*" : " ", irq_name[4], irq == 4 ? "*" : " ", irq_name[5], irq == 5 ? "*" : " ", irq_name[6], irq == 6 ? "*" : " "); } static void dump_irq(struct ssb_bus *bus) { int i; for (i = 0; i < bus->nr_devices; i++) { struct ssb_device *dev; dev = &(bus->devices[i]); print_irq(dev, ssb_mips_irq(dev)); } } static void ssb_mips_serial_init(struct ssb_mipscore *mcore) { struct ssb_bus *bus = mcore->dev->bus; if (ssb_extif_available(&bus->extif)) mcore->nr_serial_ports = ssb_extif_serial_init(&bus->extif, mcore->serial_ports); else if (ssb_chipco_available(&bus->chipco)) mcore->nr_serial_ports = ssb_chipco_serial_init(&bus->chipco, mcore->serial_ports); else mcore->nr_serial_ports = 0; } static void ssb_mips_flash_detect(struct ssb_mipscore *mcore) { struct ssb_bus *bus = mcore->dev->bus; struct ssb_pflash *pflash = &mcore->pflash; /* When there is no chipcommon on the bus there is 4MB flash */ if (!ssb_chipco_available(&bus->chipco)) { pflash->present = true; pflash->buswidth = 2; pflash->window = SSB_FLASH1; pflash->window_size = SSB_FLASH1_SZ; goto ssb_pflash; } /* There is ChipCommon, so use it to read info about flash */ switch (bus->chipco.capabilities & SSB_CHIPCO_CAP_FLASHT) { case SSB_CHIPCO_FLASHT_STSER: case SSB_CHIPCO_FLASHT_ATSER: pr_debug("Found serial flash\n"); ssb_sflash_init(&bus->chipco); break; case SSB_CHIPCO_FLASHT_PARA: pr_debug("Found parallel flash\n"); pflash->present = true; pflash->window = SSB_FLASH2; pflash->window_size = SSB_FLASH2_SZ; if ((ssb_read32(bus->chipco.dev, SSB_CHIPCO_FLASH_CFG) & SSB_CHIPCO_CFG_DS16) == 0) pflash->buswidth = 1; else pflash->buswidth = 2; break; } ssb_pflash: if (pflash->present) { ssb_pflash_data.width = pflash->buswidth; ssb_pflash_resource.start = pflash->window; ssb_pflash_resource.end = pflash->window + pflash->window_size; } } u32 ssb_cpu_clock(struct ssb_mipscore *mcore) { struct ssb_bus *bus = mcore->dev->bus; u32 pll_type, n, m, rate = 0; if (bus->chipco.capabilities & SSB_CHIPCO_CAP_PMU) return ssb_pmu_get_cpu_clock(&bus->chipco); if (ssb_extif_available(&bus->extif)) { ssb_extif_get_clockcontrol(&bus->extif, &pll_type, &n, &m); } else if (ssb_chipco_available(&bus->chipco)) { ssb_chipco_get_clockcpu(&bus->chipco, &pll_type, &n, &m); } else return 0; if ((pll_type == SSB_PLLTYPE_5) || (bus->chip_id == 0x5365)) { rate = 200000000; } else { rate = ssb_calc_clock_rate(pll_type, n, m); } if (pll_type == SSB_PLLTYPE_6) { rate *= 2; } return rate; } void ssb_mipscore_init(struct ssb_mipscore *mcore) { struct ssb_bus *bus; struct ssb_device *dev; unsigned long hz, ns; unsigned int irq, i; if (!mcore->dev) return; /* We don't have a MIPS core */ ssb_dbg("Initializing MIPS core...\n"); bus = mcore->dev->bus; hz = ssb_clockspeed(bus); if (!hz) hz = 100000000; ns = 1000000000 / hz; if (ssb_extif_available(&bus->extif)) ssb_extif_timing_init(&bus->extif, ns); else if (ssb_chipco_available(&bus->chipco)) ssb_chipco_timing_init(&bus->chipco, ns); /* Assign IRQs to all cores on the bus, start with irq line 2, because serial usually takes 1 */ for (irq = 2, i = 0; i < bus->nr_devices; i++) { int mips_irq; dev = &(bus->devices[i]); mips_irq = ssb_mips_irq(dev); if (mips_irq > 4) dev->irq = 0; else dev->irq = mips_irq + 2; if (dev->irq > 5) continue; switch (dev->id.coreid) { case SSB_DEV_USB11_HOST: /* shouldn't need a separate irq line for non-4710, most of them have a proper * external usb controller on the pci */ if ((bus->chip_id == 0x4710) && (irq <= 4)) { set_irq(dev, irq++); } break; case SSB_DEV_PCI: case SSB_DEV_ETHERNET: case SSB_DEV_ETHERNET_GBIT: case SSB_DEV_80211: case SSB_DEV_USB20_HOST: /* These devices get their own IRQ line if available, the rest goes on IRQ0 */ if (irq <= 4) { set_irq(dev, irq++); break; } /* fallthrough */ case SSB_DEV_EXTIF: set_irq(dev, 0); break; } } ssb_dbg("after irq reconfiguration\n"); dump_irq(bus); ssb_mips_serial_init(mcore); ssb_mips_flash_detect(mcore); }
gpl-2.0
omega-roms/G900F_Omega_Kernel_LL_5.0
drivers/video/msm/mipi_novatek_video_qhd_pt.c
3628
2922
/* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "msm_fb.h" #include "mipi_dsi.h" #include "mipi_novatek.h" static struct msm_panel_info pinfo; static struct mipi_dsi_phy_ctrl dsi_video_mode_phy_db = { /* DSI_BIT_CLK at 500MHz, 2 lane, RGB888 */ {0x03, 0x01, 0x01, 0x00}, /* regulator */ /* timing */ {0x82, 0x31, 0x13, 0x0, 0x42, 0x4D, 0x18, 0x35, 0x21, 0x03, 0x04}, {0x7f, 0x00, 0x00, 0x00}, /* phy ctrl */ {0xee, 0x02, 0x86, 0x00}, /* strength */ /* pll control */ {0x40, 0xf9, 0xb0, 0xda, 0x00, 0x50, 0x48, 0x63, #if defined(NOVATEK_TWO_LANE) 0x30, 0x07, 0x03, #else /* default set to 1 lane */ 0x30, 0x07, 0x07, #endif 0x05, 0x14, 0x03, 0x0, 0x0, 0x54, 0x06, 0x10, 0x04, 0x0}, }; static int __init mipi_video_novatek_qhd_pt_init(void) { int ret; if (msm_fb_detect_client("mipi_video_novatek_qhd")) return 0; pinfo.xres = 540; pinfo.yres = 960; pinfo.type = MIPI_VIDEO_PANEL; pinfo.pdest = DISPLAY_1; pinfo.wait_cycle = 0; pinfo.bpp = 24; pinfo.lcdc.h_back_porch = 80; pinfo.lcdc.h_front_porch = 24; pinfo.lcdc.h_pulse_width = 8; pinfo.lcdc.v_back_porch = 16; pinfo.lcdc.v_front_porch = 8; pinfo.lcdc.v_pulse_width = 1; pinfo.lcdc.border_clr = 0; /* blk */ pinfo.lcdc.underflow_clr = 0xff; /* blue */ pinfo.lcdc.hsync_skew = 0; pinfo.bl_max = 15; pinfo.bl_min = 1; pinfo.fb_num = 2; pinfo.mipi.mode = DSI_VIDEO_MODE; pinfo.mipi.pulse_mode_hsa_he = TRUE; pinfo.mipi.hfp_power_stop = FALSE; pinfo.mipi.hbp_power_stop = FALSE; pinfo.mipi.hsa_power_stop = FALSE; pinfo.mipi.eof_bllp_power_stop = TRUE; pinfo.mipi.bllp_power_stop = TRUE; pinfo.mipi.traffic_mode = DSI_NON_BURST_SYNCH_PULSE; pinfo.mipi.dst_format = DSI_VIDEO_DST_FORMAT_RGB888; pinfo.mipi.vc = 0; pinfo.mipi.rgb_swap = DSI_RGB_SWAP_BGR; pinfo.mipi.data_lane0 = TRUE; pinfo.mipi.esc_byte_ratio = 4; #if defined(NOVATEK_TWO_LANE) pinfo.mipi.data_lane1 = TRUE; #endif pinfo.mipi.tx_eot_append = TRUE; pinfo.mipi.t_clk_post = 0x04; pinfo.mipi.t_clk_pre = 0x1c; pinfo.mipi.stream = 0; /* dma_p */ pinfo.mipi.mdp_trigger = DSI_CMD_TRIGGER_SW; pinfo.mipi.dma_trigger = DSI_CMD_TRIGGER_SW; pinfo.mipi.frame_rate = 60; pinfo.mipi.dsi_phy_db = &dsi_video_mode_phy_db; ret = mipi_novatek_device_register(&pinfo, MIPI_DSI_PRIM, MIPI_DSI_PANEL_QHD_PT); if (ret) pr_err("%s: failed to register device!\n", __func__); return ret; } module_init(mipi_video_novatek_qhd_pt_init);
gpl-2.0
phiexz/kernel-cyanogen-gio
drivers/net/de600.c
4140
13294
static const char version[] = "de600.c: $Revision: 1.41-2.5 $, Bjorn Ekwall (bj0rn@blox.se)\n"; /* * de600.c * * Linux driver for the D-Link DE-600 Ethernet pocket adapter. * * Portions (C) Copyright 1993, 1994 by Bjorn Ekwall * The Author may be reached as bj0rn@blox.se * * Based on adapter information gathered from DE600.ASM by D-Link Inc., * as included on disk C in the v.2.11 of PC/TCP from FTP Software. * For DE600.asm: * Portions (C) Copyright 1990 D-Link, Inc. * Copyright, 1988-1992, Russell Nelson, Crynwr Software * * Adapted to the sample network driver core for linux, * written by: Donald Becker <becker@super.org> * (Now at <becker@scyld.com>) * **************************************************************/ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * **************************************************************/ /* Add more time here if your adapter won't work OK: */ #define DE600_SLOW_DOWN udelay(delay_time) #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/string.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <asm/system.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <asm/io.h> #include "de600.h" static unsigned int check_lost = 1; module_param(check_lost, bool, 0); MODULE_PARM_DESC(check_lost, "If set then check for unplugged de600"); static unsigned int delay_time = 10; module_param(delay_time, int, 0); MODULE_PARM_DESC(delay_time, "DE-600 deley on I/O in microseconds"); /* * D-Link driver variables: */ static volatile int rx_page; #define TX_PAGES 2 static volatile int tx_fifo[TX_PAGES]; static volatile int tx_fifo_in; static volatile int tx_fifo_out; static volatile int free_tx_pages = TX_PAGES; static int was_down; static DEFINE_SPINLOCK(de600_lock); static inline u8 de600_read_status(struct net_device *dev) { u8 status; outb_p(STATUS, DATA_PORT); status = inb(STATUS_PORT); outb_p(NULL_COMMAND | HI_NIBBLE, DATA_PORT); return status; } static inline u8 de600_read_byte(unsigned char type, struct net_device *dev) { /* dev used by macros */ u8 lo; outb_p((type), DATA_PORT); lo = ((unsigned char)inb(STATUS_PORT)) >> 4; outb_p((type) | HI_NIBBLE, DATA_PORT); return ((unsigned char)inb(STATUS_PORT) & (unsigned char)0xf0) | lo; } /* * Open/initialize the board. This is called (in the current kernel) * after booting when 'ifconfig <dev->name> $IP_ADDR' is run (in rc.inet1). * * This routine should set everything up anew at each open, even * registers that "should" only need to be set once at boot, so that * there is a non-reboot way to recover if something goes wrong. */ static int de600_open(struct net_device *dev) { unsigned long flags; int ret = request_irq(DE600_IRQ, de600_interrupt, 0, dev->name, dev); if (ret) { printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name, DE600_IRQ); return ret; } spin_lock_irqsave(&de600_lock, flags); ret = adapter_init(dev); spin_unlock_irqrestore(&de600_lock, flags); return ret; } /* * The inverse routine to de600_open(). */ static int de600_close(struct net_device *dev) { select_nic(); rx_page = 0; de600_put_command(RESET); de600_put_command(STOP_RESET); de600_put_command(0); select_prn(); free_irq(DE600_IRQ, dev); return 0; } static inline void trigger_interrupt(struct net_device *dev) { de600_put_command(FLIP_IRQ); select_prn(); DE600_SLOW_DOWN; select_nic(); de600_put_command(0); } /* * Copy a buffer to the adapter transmit page memory. * Start sending. */ static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned long flags; int transmit_from; int len; int tickssofar; u8 *buffer = skb->data; int i; if (free_tx_pages <= 0) { /* Do timeouts, to avoid hangs. */ tickssofar = jiffies - dev_trans_start(dev); if (tickssofar < HZ/20) return NETDEV_TX_BUSY; /* else */ printk(KERN_WARNING "%s: transmit timed out (%d), %s?\n", dev->name, tickssofar, "network cable problem"); /* Restart the adapter. */ spin_lock_irqsave(&de600_lock, flags); if (adapter_init(dev)) { spin_unlock_irqrestore(&de600_lock, flags); return NETDEV_TX_BUSY; } spin_unlock_irqrestore(&de600_lock, flags); } /* Start real output */ pr_debug("de600_start_xmit:len=%d, page %d/%d\n", skb->len, tx_fifo_in, free_tx_pages); if ((len = skb->len) < RUNT) len = RUNT; spin_lock_irqsave(&de600_lock, flags); select_nic(); tx_fifo[tx_fifo_in] = transmit_from = tx_page_adr(tx_fifo_in) - len; tx_fifo_in = (tx_fifo_in + 1) % TX_PAGES; /* Next free tx page */ if(check_lost) { /* This costs about 40 instructions per packet... */ de600_setup_address(NODE_ADDRESS, RW_ADDR); de600_read_byte(READ_DATA, dev); if (was_down || (de600_read_byte(READ_DATA, dev) != 0xde)) { if (adapter_init(dev)) { spin_unlock_irqrestore(&de600_lock, flags); return NETDEV_TX_BUSY; } } } de600_setup_address(transmit_from, RW_ADDR); for (i = 0; i < skb->len ; ++i, ++buffer) de600_put_byte(*buffer); for (; i < len; ++i) de600_put_byte(0); if (free_tx_pages-- == TX_PAGES) { /* No transmission going on */ dev->trans_start = jiffies; netif_start_queue(dev); /* allow more packets into adapter */ /* Send page and generate a faked interrupt */ de600_setup_address(transmit_from, TX_ADDR); de600_put_command(TX_ENABLE); } else { if (free_tx_pages) netif_start_queue(dev); else netif_stop_queue(dev); select_prn(); } spin_unlock_irqrestore(&de600_lock, flags); dev_kfree_skb(skb); return NETDEV_TX_OK; } /* * The typical workload of the driver: * Handle the network interface interrupts. */ static irqreturn_t de600_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; u8 irq_status; int retrig = 0; int boguscount = 0; spin_lock(&de600_lock); select_nic(); irq_status = de600_read_status(dev); do { pr_debug("de600_interrupt (%02X)\n", irq_status); if (irq_status & RX_GOOD) de600_rx_intr(dev); else if (!(irq_status & RX_BUSY)) de600_put_command(RX_ENABLE); /* Any transmission in progress? */ if (free_tx_pages < TX_PAGES) retrig = de600_tx_intr(dev, irq_status); else retrig = 0; irq_status = de600_read_status(dev); } while ( (irq_status & RX_GOOD) || ((++boguscount < 100) && retrig) ); /* * Yeah, it _looks_ like busy waiting, smells like busy waiting * and I know it's not PC, but please, it will only occur once * in a while and then only for a loop or so (< 1ms for sure!) */ /* Enable adapter interrupts */ select_prn(); if (retrig) trigger_interrupt(dev); spin_unlock(&de600_lock); return IRQ_HANDLED; } static int de600_tx_intr(struct net_device *dev, int irq_status) { /* * Returns 1 if tx still not done */ /* Check if current transmission is done yet */ if (irq_status & TX_BUSY) return 1; /* tx not done, try again */ /* else */ /* If last transmission OK then bump fifo index */ if (!(irq_status & TX_FAILED16)) { tx_fifo_out = (tx_fifo_out + 1) % TX_PAGES; ++free_tx_pages; dev->stats.tx_packets++; netif_wake_queue(dev); } /* More to send, or resend last packet? */ if ((free_tx_pages < TX_PAGES) || (irq_status & TX_FAILED16)) { dev->trans_start = jiffies; de600_setup_address(tx_fifo[tx_fifo_out], TX_ADDR); de600_put_command(TX_ENABLE); return 1; } /* else */ return 0; } /* * We have a good packet, get it out of the adapter. */ static void de600_rx_intr(struct net_device *dev) { struct sk_buff *skb; int i; int read_from; int size; unsigned char *buffer; /* Get size of received packet */ size = de600_read_byte(RX_LEN, dev); /* low byte */ size += (de600_read_byte(RX_LEN, dev) << 8); /* high byte */ size -= 4; /* Ignore trailing 4 CRC-bytes */ /* Tell adapter where to store next incoming packet, enable receiver */ read_from = rx_page_adr(); next_rx_page(); de600_put_command(RX_ENABLE); if ((size < 32) || (size > 1535)) { printk(KERN_WARNING "%s: Bogus packet size %d.\n", dev->name, size); if (size > 10000) adapter_init(dev); return; } skb = dev_alloc_skb(size+2); if (skb == NULL) { printk("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size); return; } /* else */ skb_reserve(skb,2); /* Align */ /* 'skb->data' points to the start of sk_buff data area. */ buffer = skb_put(skb,size); /* copy the packet into the buffer */ de600_setup_address(read_from, RW_ADDR); for (i = size; i > 0; --i, ++buffer) *buffer = de600_read_byte(READ_DATA, dev); skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); /* update stats */ dev->stats.rx_packets++; /* count all receives */ dev->stats.rx_bytes += size; /* count all received bytes */ /* * If any worth-while packets have been received, netif_rx() * will work on them when we get to the tasklets. */ } static const struct net_device_ops de600_netdev_ops = { .ndo_open = de600_open, .ndo_stop = de600_close, .ndo_start_xmit = de600_start_xmit, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static struct net_device * __init de600_probe(void) { int i; struct net_device *dev; int err; dev = alloc_etherdev(0); if (!dev) return ERR_PTR(-ENOMEM); if (!request_region(DE600_IO, 3, "de600")) { printk(KERN_WARNING "DE600: port 0x%x busy\n", DE600_IO); err = -EBUSY; goto out; } printk(KERN_INFO "%s: D-Link DE-600 pocket adapter", dev->name); /* Alpha testers must have the version number to report bugs. */ pr_debug("%s", version); /* probe for adapter */ err = -ENODEV; rx_page = 0; select_nic(); (void)de600_read_status(dev); de600_put_command(RESET); de600_put_command(STOP_RESET); if (de600_read_status(dev) & 0xf0) { printk(": not at I/O %#3x.\n", DATA_PORT); goto out1; } /* * Maybe we found one, * have to check if it is a D-Link DE-600 adapter... */ /* Get the adapter ethernet address from the ROM */ de600_setup_address(NODE_ADDRESS, RW_ADDR); for (i = 0; i < ETH_ALEN; i++) { dev->dev_addr[i] = de600_read_byte(READ_DATA, dev); dev->broadcast[i] = 0xff; } /* Check magic code */ if ((dev->dev_addr[1] == 0xde) && (dev->dev_addr[2] == 0x15)) { /* OK, install real address */ dev->dev_addr[0] = 0x00; dev->dev_addr[1] = 0x80; dev->dev_addr[2] = 0xc8; dev->dev_addr[3] &= 0x0f; dev->dev_addr[3] |= 0x70; } else { printk(" not identified in the printer port\n"); goto out1; } printk(", Ethernet Address: %pM\n", dev->dev_addr); dev->netdev_ops = &de600_netdev_ops; dev->flags&=~IFF_MULTICAST; select_prn(); err = register_netdev(dev); if (err) goto out1; return dev; out1: release_region(DE600_IO, 3); out: free_netdev(dev); return ERR_PTR(err); } static int adapter_init(struct net_device *dev) { int i; select_nic(); rx_page = 0; /* used by RESET */ de600_put_command(RESET); de600_put_command(STOP_RESET); /* Check if it is still there... */ /* Get the some bytes of the adapter ethernet address from the ROM */ de600_setup_address(NODE_ADDRESS, RW_ADDR); de600_read_byte(READ_DATA, dev); if ((de600_read_byte(READ_DATA, dev) != 0xde) || (de600_read_byte(READ_DATA, dev) != 0x15)) { /* was: if (de600_read_status(dev) & 0xf0) { */ printk("Something has happened to the DE-600! Please check it and do a new ifconfig!\n"); /* Goodbye, cruel world... */ dev->flags &= ~IFF_UP; de600_close(dev); was_down = 1; netif_stop_queue(dev); /* Transmit busy... */ return 1; /* failed */ } if (was_down) { printk(KERN_INFO "%s: Thanks, I feel much better now!\n", dev->name); was_down = 0; } tx_fifo_in = 0; tx_fifo_out = 0; free_tx_pages = TX_PAGES; /* set the ether address. */ de600_setup_address(NODE_ADDRESS, RW_ADDR); for (i = 0; i < ETH_ALEN; i++) de600_put_byte(dev->dev_addr[i]); /* where to start saving incoming packets */ rx_page = RX_BP | RX_BASE_PAGE; de600_setup_address(MEM_4K, RW_ADDR); /* Enable receiver */ de600_put_command(RX_ENABLE); select_prn(); netif_start_queue(dev); return 0; /* OK */ } static struct net_device *de600_dev; static int __init de600_init(void) { de600_dev = de600_probe(); if (IS_ERR(de600_dev)) return PTR_ERR(de600_dev); return 0; } static void __exit de600_exit(void) { unregister_netdev(de600_dev); release_region(DE600_IO, 3); free_netdev(de600_dev); } module_init(de600_init); module_exit(de600_exit); MODULE_LICENSE("GPL");
gpl-2.0
MaxiCM/android_kernel_samsung_degaswifi
arch/sh/kernel/hw_breakpoint.c
7212
8820
/* * arch/sh/kernel/hw_breakpoint.c * * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC. * * Copyright (C) 2009 - 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/perf_event.h> #include <linux/hw_breakpoint.h> #include <linux/percpu.h> #include <linux/kallsyms.h> #include <linux/notifier.h> #include <linux/kprobes.h> #include <linux/kdebug.h> #include <linux/io.h> #include <linux/clk.h> #include <asm/hw_breakpoint.h> #include <asm/mmu_context.h> #include <asm/ptrace.h> #include <asm/traps.h> /* * Stores the breakpoints currently in use on each breakpoint address * register for each cpus */ static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]); /* * A dummy placeholder for early accesses until the CPUs get a chance to * register their UBCs later in the boot process. */ static struct sh_ubc ubc_dummy = { .num_events = 0 }; static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy; /* * Install a perf counter breakpoint. * * We seek a free UBC channel and use it for this breakpoint. * * Atomic: we hold the counter->ctx->lock and we only handle variables * and registers local to this cpu. */ int arch_install_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); int i; for (i = 0; i < sh_ubc->num_events; i++) { struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); if (!*slot) { *slot = bp; break; } } if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot")) return -EBUSY; clk_enable(sh_ubc->clk); sh_ubc->enable(info, i); return 0; } /* * Uninstall the breakpoint contained in the given counter. * * First we search the debug address register it uses and then we disable * it. * * Atomic: we hold the counter->ctx->lock and we only handle variables * and registers local to this cpu. */ void arch_uninstall_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); int i; for (i = 0; i < sh_ubc->num_events; i++) { struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); if (*slot == bp) { *slot = NULL; break; } } if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot")) return; sh_ubc->disable(info, i); clk_disable(sh_ubc->clk); } static int get_hbp_len(u16 hbp_len) { unsigned int len_in_bytes = 0; switch (hbp_len) { case SH_BREAKPOINT_LEN_1: len_in_bytes = 1; break; case SH_BREAKPOINT_LEN_2: len_in_bytes = 2; break; case SH_BREAKPOINT_LEN_4: len_in_bytes = 4; break; case SH_BREAKPOINT_LEN_8: len_in_bytes = 8; break; } return len_in_bytes; } /* * Check for virtual address in kernel space. */ int arch_check_bp_in_kernelspace(struct perf_event *bp) { unsigned int len; unsigned long va; struct arch_hw_breakpoint *info = counter_arch_bp(bp); va = info->address; len = get_hbp_len(info->len); return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); } int arch_bp_generic_fields(int sh_len, int sh_type, int *gen_len, int *gen_type) { /* Len */ switch (sh_len) { case SH_BREAKPOINT_LEN_1: *gen_len = HW_BREAKPOINT_LEN_1; break; case SH_BREAKPOINT_LEN_2: *gen_len = HW_BREAKPOINT_LEN_2; break; case SH_BREAKPOINT_LEN_4: *gen_len = HW_BREAKPOINT_LEN_4; break; case SH_BREAKPOINT_LEN_8: *gen_len = HW_BREAKPOINT_LEN_8; break; default: return -EINVAL; } /* Type */ switch (sh_type) { case SH_BREAKPOINT_READ: *gen_type = HW_BREAKPOINT_R; case SH_BREAKPOINT_WRITE: *gen_type = HW_BREAKPOINT_W; break; case SH_BREAKPOINT_RW: *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R; break; default: return -EINVAL; } return 0; } static int arch_build_bp_info(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); info->address = bp->attr.bp_addr; /* Len */ switch (bp->attr.bp_len) { case HW_BREAKPOINT_LEN_1: info->len = SH_BREAKPOINT_LEN_1; break; case HW_BREAKPOINT_LEN_2: info->len = SH_BREAKPOINT_LEN_2; break; case HW_BREAKPOINT_LEN_4: info->len = SH_BREAKPOINT_LEN_4; break; case HW_BREAKPOINT_LEN_8: info->len = SH_BREAKPOINT_LEN_8; break; default: return -EINVAL; } /* Type */ switch (bp->attr.bp_type) { case HW_BREAKPOINT_R: info->type = SH_BREAKPOINT_READ; break; case HW_BREAKPOINT_W: info->type = SH_BREAKPOINT_WRITE; break; case HW_BREAKPOINT_W | HW_BREAKPOINT_R: info->type = SH_BREAKPOINT_RW; break; default: return -EINVAL; } return 0; } /* * Validate the arch-specific HW Breakpoint register settings */ int arch_validate_hwbkpt_settings(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); unsigned int align; int ret; ret = arch_build_bp_info(bp); if (ret) return ret; ret = -EINVAL; switch (info->len) { case SH_BREAKPOINT_LEN_1: align = 0; break; case SH_BREAKPOINT_LEN_2: align = 1; break; case SH_BREAKPOINT_LEN_4: align = 3; break; case SH_BREAKPOINT_LEN_8: align = 7; break; default: return ret; } /* * For kernel-addresses, either the address or symbol name can be * specified. */ if (info->name) info->address = (unsigned long)kallsyms_lookup_name(info->name); /* * Check that the low-order bits of the address are appropriate * for the alignment implied by len. */ if (info->address & align) return -EINVAL; return 0; } /* * Release the user breakpoints used by ptrace */ void flush_ptrace_hw_breakpoint(struct task_struct *tsk) { int i; struct thread_struct *t = &tsk->thread; for (i = 0; i < sh_ubc->num_events; i++) { unregister_hw_breakpoint(t->ptrace_bps[i]); t->ptrace_bps[i] = NULL; } } static int __kprobes hw_breakpoint_handler(struct die_args *args) { int cpu, i, rc = NOTIFY_STOP; struct perf_event *bp; unsigned int cmf, resume_mask; /* * Do an early return if none of the channels triggered. */ cmf = sh_ubc->triggered_mask(); if (unlikely(!cmf)) return NOTIFY_DONE; /* * By default, resume all of the active channels. */ resume_mask = sh_ubc->active_mask(); /* * Disable breakpoints during exception handling. */ sh_ubc->disable_all(); cpu = get_cpu(); for (i = 0; i < sh_ubc->num_events; i++) { unsigned long event_mask = (1 << i); if (likely(!(cmf & event_mask))) continue; /* * The counter may be concurrently released but that can only * occur from a call_rcu() path. We can then safely fetch * the breakpoint, use its callback, touch its counter * while we are in an rcu_read_lock() path. */ rcu_read_lock(); bp = per_cpu(bp_per_reg[i], cpu); if (bp) rc = NOTIFY_DONE; /* * Reset the condition match flag to denote completion of * exception handling. */ sh_ubc->clear_triggered_mask(event_mask); /* * bp can be NULL due to concurrent perf counter * removing. */ if (!bp) { rcu_read_unlock(); break; } /* * Don't restore the channel if the breakpoint is from * ptrace, as it always operates in one-shot mode. */ if (bp->overflow_handler == ptrace_triggered) resume_mask &= ~(1 << i); perf_bp_event(bp, args->regs); /* Deliver the signal to userspace */ if (!arch_check_bp_in_kernelspace(bp)) { siginfo_t info; info.si_signo = args->signr; info.si_errno = notifier_to_errno(rc); info.si_code = TRAP_HWBKPT; force_sig_info(args->signr, &info, current); } rcu_read_unlock(); } if (cmf == 0) rc = NOTIFY_DONE; sh_ubc->enable_all(resume_mask); put_cpu(); return rc; } BUILD_TRAP_HANDLER(breakpoint) { unsigned long ex = lookup_exception_vector(); TRAP_HANDLER_DECL; notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP); } /* * Handle debug exception notifications. */ int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data) { struct die_args *args = data; if (val != DIE_BREAKPOINT) return NOTIFY_DONE; /* * If the breakpoint hasn't been triggered by the UBC, it's * probably from a debugger, so don't do anything more here. * * This also permits the UBC interface clock to remain off for * non-UBC breakpoints, as we don't need to check the triggered * or active channel masks. */ if (args->trapnr != sh_ubc->trap_nr) return NOTIFY_DONE; return hw_breakpoint_handler(data); } void hw_breakpoint_pmu_read(struct perf_event *bp) { /* TODO */ } int register_sh_ubc(struct sh_ubc *ubc) { /* Bail if it's already assigned */ if (sh_ubc != &ubc_dummy) return -EBUSY; sh_ubc = ubc; pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name); WARN_ON(ubc->num_events > HBP_NUM); return 0; }
gpl-2.0
VegaDevTeam/android_kernel_pantech_ef60s
drivers/s390/net/smsgiucv.c
8236
6390
/* * IUCV special message driver * * Copyright IBM Corp. 2003, 2009 * * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/device.h> #include <linux/slab.h> #include <net/iucv/iucv.h> #include <asm/cpcmd.h> #include <asm/ebcdic.h> #include "smsgiucv.h" struct smsg_callback { struct list_head list; const char *prefix; int len; void (*callback)(const char *from, char *str); }; MODULE_AUTHOR ("(C) 2003 IBM Corporation by Martin Schwidefsky (schwidefsky@de.ibm.com)"); MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver"); static struct iucv_path *smsg_path; /* dummy device used as trigger for PM functions */ static struct device *smsg_dev; static DEFINE_SPINLOCK(smsg_list_lock); static LIST_HEAD(smsg_list); static int iucv_path_connected; static int smsg_path_pending(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]); static void smsg_message_pending(struct iucv_path *, struct iucv_message *); static struct iucv_handler smsg_handler = { .path_pending = smsg_path_pending, .message_pending = smsg_message_pending, }; static int smsg_path_pending(struct iucv_path *path, u8 ipvmid[8], u8 ipuser[16]) { if (strncmp(ipvmid, "*MSG ", 8) != 0) return -EINVAL; /* Path pending from *MSG. */ return iucv_path_accept(path, &smsg_handler, "SMSGIUCV ", NULL); } static void smsg_message_pending(struct iucv_path *path, struct iucv_message *msg) { struct smsg_callback *cb; unsigned char *buffer; unsigned char sender[9]; int rc, i; buffer = kmalloc(msg->length + 1, GFP_ATOMIC | GFP_DMA); if (!buffer) { iucv_message_reject(path, msg); return; } rc = iucv_message_receive(path, msg, 0, buffer, msg->length, NULL); if (rc == 0) { buffer[msg->length] = 0; EBCASC(buffer, msg->length); memcpy(sender, buffer, 8); sender[8] = 0; /* Remove trailing whitespace from the sender name. */ for (i = 7; i >= 0; i--) { if (sender[i] != ' ' && sender[i] != '\t') break; sender[i] = 0; } spin_lock(&smsg_list_lock); list_for_each_entry(cb, &smsg_list, list) if (strncmp(buffer + 8, cb->prefix, cb->len) == 0) { cb->callback(sender, buffer + 8); break; } spin_unlock(&smsg_list_lock); } kfree(buffer); } int smsg_register_callback(const char *prefix, void (*callback)(const char *from, char *str)) { struct smsg_callback *cb; cb = kmalloc(sizeof(struct smsg_callback), GFP_KERNEL); if (!cb) return -ENOMEM; cb->prefix = prefix; cb->len = strlen(prefix); cb->callback = callback; spin_lock_bh(&smsg_list_lock); list_add_tail(&cb->list, &smsg_list); spin_unlock_bh(&smsg_list_lock); return 0; } void smsg_unregister_callback(const char *prefix, void (*callback)(const char *from, char *str)) { struct smsg_callback *cb, *tmp; spin_lock_bh(&smsg_list_lock); cb = NULL; list_for_each_entry(tmp, &smsg_list, list) if (tmp->callback == callback && strcmp(tmp->prefix, prefix) == 0) { cb = tmp; list_del(&cb->list); break; } spin_unlock_bh(&smsg_list_lock); kfree(cb); } static int smsg_pm_freeze(struct device *dev) { #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "smsg_pm_freeze\n"); #endif if (smsg_path && iucv_path_connected) { iucv_path_sever(smsg_path, NULL); iucv_path_connected = 0; } return 0; } static int smsg_pm_restore_thaw(struct device *dev) { int rc; #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "smsg_pm_restore_thaw\n"); #endif if (smsg_path && iucv_path_connected) { memset(smsg_path, 0, sizeof(*smsg_path)); smsg_path->msglim = 255; smsg_path->flags = 0; rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", NULL, NULL, NULL); #ifdef CONFIG_PM_DEBUG if (rc) printk(KERN_ERR "iucv_path_connect returned with rc %i\n", rc); #endif if (!rc) iucv_path_connected = 1; cpcmd("SET SMSG IUCV", NULL, 0, NULL); } return 0; } static const struct dev_pm_ops smsg_pm_ops = { .freeze = smsg_pm_freeze, .thaw = smsg_pm_restore_thaw, .restore = smsg_pm_restore_thaw, }; static struct device_driver smsg_driver = { .owner = THIS_MODULE, .name = SMSGIUCV_DRV_NAME, .bus = &iucv_bus, .pm = &smsg_pm_ops, }; static void __exit smsg_exit(void) { cpcmd("SET SMSG IUCV", NULL, 0, NULL); device_unregister(smsg_dev); iucv_unregister(&smsg_handler, 1); driver_unregister(&smsg_driver); } static int __init smsg_init(void) { int rc; if (!MACHINE_IS_VM) { rc = -EPROTONOSUPPORT; goto out; } rc = driver_register(&smsg_driver); if (rc != 0) goto out; rc = iucv_register(&smsg_handler, 1); if (rc) goto out_driver; smsg_path = iucv_path_alloc(255, 0, GFP_KERNEL); if (!smsg_path) { rc = -ENOMEM; goto out_register; } rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", NULL, NULL, NULL); if (rc) goto out_free_path; else iucv_path_connected = 1; smsg_dev = kzalloc(sizeof(struct device), GFP_KERNEL); if (!smsg_dev) { rc = -ENOMEM; goto out_free_path; } dev_set_name(smsg_dev, "smsg_iucv"); smsg_dev->bus = &iucv_bus; smsg_dev->parent = iucv_root; smsg_dev->release = (void (*)(struct device *))kfree; smsg_dev->driver = &smsg_driver; rc = device_register(smsg_dev); if (rc) goto out_put; cpcmd("SET SMSG IUCV", NULL, 0, NULL); return 0; out_put: put_device(smsg_dev); out_free_path: iucv_path_free(smsg_path); smsg_path = NULL; out_register: iucv_unregister(&smsg_handler, 1); out_driver: driver_unregister(&smsg_driver); out: return rc; } module_init(smsg_init); module_exit(smsg_exit); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(smsg_register_callback); EXPORT_SYMBOL(smsg_unregister_callback);
gpl-2.0
Testing1235678/android_kernel_samsung_t1
drivers/media/video/pvrusb2/pvrusb2-io.c
12332
18789
/* * * * Copyright (C) 2005 Mike Isely <isely@pobox.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include "pvrusb2-io.h" #include "pvrusb2-debug.h" #include <linux/errno.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/mutex.h> static const char *pvr2_buffer_state_decode(enum pvr2_buffer_state); #define BUFFER_SIG 0x47653271 // #define SANITY_CHECK_BUFFERS #ifdef SANITY_CHECK_BUFFERS #define BUFFER_CHECK(bp) do { \ if ((bp)->signature != BUFFER_SIG) { \ pvr2_trace(PVR2_TRACE_ERROR_LEGS, \ "Buffer %p is bad at %s:%d", \ (bp),__FILE__,__LINE__); \ pvr2_buffer_describe(bp,"BadSig"); \ BUG(); \ } \ } while (0) #else #define BUFFER_CHECK(bp) do {} while(0) #endif struct pvr2_stream { /* Buffers queued for reading */ struct list_head queued_list; unsigned int q_count; unsigned int q_bcount; /* Buffers with retrieved data */ struct list_head ready_list; unsigned int r_count; unsigned int r_bcount; /* Buffers available for use */ struct list_head idle_list; unsigned int i_count; unsigned int i_bcount; /* Pointers to all buffers */ struct pvr2_buffer **buffers; /* Array size of buffers */ unsigned int buffer_slot_count; /* Total buffers actually in circulation */ unsigned int buffer_total_count; /* Designed number of buffers to be in circulation */ unsigned int buffer_target_count; /* Executed when ready list become non-empty */ pvr2_stream_callback callback_func; void *callback_data; /* Context for transfer endpoint */ struct usb_device *dev; int endpoint; /* Overhead for mutex enforcement */ spinlock_t list_lock; struct mutex mutex; /* Tracking state for tolerating errors */ unsigned int fail_count; unsigned int fail_tolerance; unsigned int buffers_processed; unsigned int buffers_failed; unsigned int bytes_processed; }; struct pvr2_buffer { int id; int signature; enum pvr2_buffer_state state; void *ptr; /* Pointer to storage area */ unsigned int max_count; /* Size of storage area */ unsigned int used_count; /* Amount of valid data in storage area */ int status; /* Transfer result status */ struct pvr2_stream *stream; struct list_head list_overhead; struct urb *purb; }; static const char *pvr2_buffer_state_decode(enum pvr2_buffer_state st) { switch (st) { case pvr2_buffer_state_none: return "none"; case pvr2_buffer_state_idle: return "idle"; case pvr2_buffer_state_queued: return "queued"; case pvr2_buffer_state_ready: return "ready"; } return "unknown"; } #ifdef SANITY_CHECK_BUFFERS static void pvr2_buffer_describe(struct pvr2_buffer *bp,const char *msg) { pvr2_trace(PVR2_TRACE_INFO, "buffer%s%s %p state=%s id=%d status=%d" " stream=%p purb=%p sig=0x%x", (msg ? " " : ""), (msg ? msg : ""), bp, (bp ? pvr2_buffer_state_decode(bp->state) : "(invalid)"), (bp ? bp->id : 0), (bp ? bp->status : 0), (bp ? bp->stream : NULL), (bp ? bp->purb : NULL), (bp ? bp->signature : 0)); } #endif /* SANITY_CHECK_BUFFERS */ static void pvr2_buffer_remove(struct pvr2_buffer *bp) { unsigned int *cnt; unsigned int *bcnt; unsigned int ccnt; struct pvr2_stream *sp = bp->stream; switch (bp->state) { case pvr2_buffer_state_idle: cnt = &sp->i_count; bcnt = &sp->i_bcount; ccnt = bp->max_count; break; case pvr2_buffer_state_queued: cnt = &sp->q_count; bcnt = &sp->q_bcount; ccnt = bp->max_count; break; case pvr2_buffer_state_ready: cnt = &sp->r_count; bcnt = &sp->r_bcount; ccnt = bp->used_count; break; default: return; } list_del_init(&bp->list_overhead); (*cnt)--; (*bcnt) -= ccnt; pvr2_trace(PVR2_TRACE_BUF_FLOW, "/*---TRACE_FLOW---*/" " bufferPool %8s dec cap=%07d cnt=%02d", pvr2_buffer_state_decode(bp->state),*bcnt,*cnt); bp->state = pvr2_buffer_state_none; } static void pvr2_buffer_set_none(struct pvr2_buffer *bp) { unsigned long irq_flags; struct pvr2_stream *sp; BUFFER_CHECK(bp); sp = bp->stream; pvr2_trace(PVR2_TRACE_BUF_FLOW, "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s", bp, pvr2_buffer_state_decode(bp->state), pvr2_buffer_state_decode(pvr2_buffer_state_none)); spin_lock_irqsave(&sp->list_lock,irq_flags); pvr2_buffer_remove(bp); spin_unlock_irqrestore(&sp->list_lock,irq_flags); } static int pvr2_buffer_set_ready(struct pvr2_buffer *bp) { int fl; unsigned long irq_flags; struct pvr2_stream *sp; BUFFER_CHECK(bp); sp = bp->stream; pvr2_trace(PVR2_TRACE_BUF_FLOW, "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s", bp, pvr2_buffer_state_decode(bp->state), pvr2_buffer_state_decode(pvr2_buffer_state_ready)); spin_lock_irqsave(&sp->list_lock,irq_flags); fl = (sp->r_count == 0); pvr2_buffer_remove(bp); list_add_tail(&bp->list_overhead,&sp->ready_list); bp->state = pvr2_buffer_state_ready; (sp->r_count)++; sp->r_bcount += bp->used_count; pvr2_trace(PVR2_TRACE_BUF_FLOW, "/*---TRACE_FLOW---*/" " bufferPool %8s inc cap=%07d cnt=%02d", pvr2_buffer_state_decode(bp->state), sp->r_bcount,sp->r_count); spin_unlock_irqrestore(&sp->list_lock,irq_flags); return fl; } static void pvr2_buffer_set_idle(struct pvr2_buffer *bp) { unsigned long irq_flags; struct pvr2_stream *sp; BUFFER_CHECK(bp); sp = bp->stream; pvr2_trace(PVR2_TRACE_BUF_FLOW, "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s", bp, pvr2_buffer_state_decode(bp->state), pvr2_buffer_state_decode(pvr2_buffer_state_idle)); spin_lock_irqsave(&sp->list_lock,irq_flags); pvr2_buffer_remove(bp); list_add_tail(&bp->list_overhead,&sp->idle_list); bp->state = pvr2_buffer_state_idle; (sp->i_count)++; sp->i_bcount += bp->max_count; pvr2_trace(PVR2_TRACE_BUF_FLOW, "/*---TRACE_FLOW---*/" " bufferPool %8s inc cap=%07d cnt=%02d", pvr2_buffer_state_decode(bp->state), sp->i_bcount,sp->i_count); spin_unlock_irqrestore(&sp->list_lock,irq_flags); } static void pvr2_buffer_set_queued(struct pvr2_buffer *bp) { unsigned long irq_flags; struct pvr2_stream *sp; BUFFER_CHECK(bp); sp = bp->stream; pvr2_trace(PVR2_TRACE_BUF_FLOW, "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s", bp, pvr2_buffer_state_decode(bp->state), pvr2_buffer_state_decode(pvr2_buffer_state_queued)); spin_lock_irqsave(&sp->list_lock,irq_flags); pvr2_buffer_remove(bp); list_add_tail(&bp->list_overhead,&sp->queued_list); bp->state = pvr2_buffer_state_queued; (sp->q_count)++; sp->q_bcount += bp->max_count; pvr2_trace(PVR2_TRACE_BUF_FLOW, "/*---TRACE_FLOW---*/" " bufferPool %8s inc cap=%07d cnt=%02d", pvr2_buffer_state_decode(bp->state), sp->q_bcount,sp->q_count); spin_unlock_irqrestore(&sp->list_lock,irq_flags); } static void pvr2_buffer_wipe(struct pvr2_buffer *bp) { if (bp->state == pvr2_buffer_state_queued) { usb_kill_urb(bp->purb); } } static int pvr2_buffer_init(struct pvr2_buffer *bp, struct pvr2_stream *sp, unsigned int id) { memset(bp,0,sizeof(*bp)); bp->signature = BUFFER_SIG; bp->id = id; pvr2_trace(PVR2_TRACE_BUF_POOL, "/*---TRACE_FLOW---*/ bufferInit %p stream=%p",bp,sp); bp->stream = sp; bp->state = pvr2_buffer_state_none; INIT_LIST_HEAD(&bp->list_overhead); bp->purb = usb_alloc_urb(0,GFP_KERNEL); if (! bp->purb) return -ENOMEM; #ifdef SANITY_CHECK_BUFFERS pvr2_buffer_describe(bp,"create"); #endif return 0; } static void pvr2_buffer_done(struct pvr2_buffer *bp) { #ifdef SANITY_CHECK_BUFFERS pvr2_buffer_describe(bp,"delete"); #endif pvr2_buffer_wipe(bp); pvr2_buffer_set_none(bp); bp->signature = 0; bp->stream = NULL; usb_free_urb(bp->purb); pvr2_trace(PVR2_TRACE_BUF_POOL,"/*---TRACE_FLOW---*/" " bufferDone %p",bp); } static int pvr2_stream_buffer_count(struct pvr2_stream *sp,unsigned int cnt) { int ret; unsigned int scnt; /* Allocate buffers pointer array in multiples of 32 entries */ if (cnt == sp->buffer_total_count) return 0; pvr2_trace(PVR2_TRACE_BUF_POOL, "/*---TRACE_FLOW---*/ poolResize " " stream=%p cur=%d adj=%+d", sp, sp->buffer_total_count, cnt-sp->buffer_total_count); scnt = cnt & ~0x1f; if (cnt > scnt) scnt += 0x20; if (cnt > sp->buffer_total_count) { if (scnt > sp->buffer_slot_count) { struct pvr2_buffer **nb; nb = kmalloc(scnt * sizeof(*nb),GFP_KERNEL); if (!nb) return -ENOMEM; if (sp->buffer_slot_count) { memcpy(nb,sp->buffers, sp->buffer_slot_count * sizeof(*nb)); kfree(sp->buffers); } sp->buffers = nb; sp->buffer_slot_count = scnt; } while (sp->buffer_total_count < cnt) { struct pvr2_buffer *bp; bp = kmalloc(sizeof(*bp),GFP_KERNEL); if (!bp) return -ENOMEM; ret = pvr2_buffer_init(bp,sp,sp->buffer_total_count); if (ret) { kfree(bp); return -ENOMEM; } sp->buffers[sp->buffer_total_count] = bp; (sp->buffer_total_count)++; pvr2_buffer_set_idle(bp); } } else { while (sp->buffer_total_count > cnt) { struct pvr2_buffer *bp; bp = sp->buffers[sp->buffer_total_count - 1]; /* Paranoia */ sp->buffers[sp->buffer_total_count - 1] = NULL; (sp->buffer_total_count)--; pvr2_buffer_done(bp); kfree(bp); } if (scnt < sp->buffer_slot_count) { struct pvr2_buffer **nb = NULL; if (scnt) { nb = kmalloc(scnt * sizeof(*nb),GFP_KERNEL); if (!nb) return -ENOMEM; memcpy(nb,sp->buffers,scnt * sizeof(*nb)); } kfree(sp->buffers); sp->buffers = nb; sp->buffer_slot_count = scnt; } } return 0; } static int pvr2_stream_achieve_buffer_count(struct pvr2_stream *sp) { struct pvr2_buffer *bp; unsigned int cnt; if (sp->buffer_total_count == sp->buffer_target_count) return 0; pvr2_trace(PVR2_TRACE_BUF_POOL, "/*---TRACE_FLOW---*/" " poolCheck stream=%p cur=%d tgt=%d", sp,sp->buffer_total_count,sp->buffer_target_count); if (sp->buffer_total_count < sp->buffer_target_count) { return pvr2_stream_buffer_count(sp,sp->buffer_target_count); } cnt = 0; while ((sp->buffer_total_count - cnt) > sp->buffer_target_count) { bp = sp->buffers[sp->buffer_total_count - (cnt + 1)]; if (bp->state != pvr2_buffer_state_idle) break; cnt++; } if (cnt) { pvr2_stream_buffer_count(sp,sp->buffer_total_count - cnt); } return 0; } static void pvr2_stream_internal_flush(struct pvr2_stream *sp) { struct list_head *lp; struct pvr2_buffer *bp1; while ((lp = sp->queued_list.next) != &sp->queued_list) { bp1 = list_entry(lp,struct pvr2_buffer,list_overhead); pvr2_buffer_wipe(bp1); /* At this point, we should be guaranteed that no completion callback may happen on this buffer. But it's possible that it might have completed after we noticed it but before we wiped it. So double check its status here first. */ if (bp1->state != pvr2_buffer_state_queued) continue; pvr2_buffer_set_idle(bp1); } if (sp->buffer_total_count != sp->buffer_target_count) { pvr2_stream_achieve_buffer_count(sp); } } static void pvr2_stream_init(struct pvr2_stream *sp) { spin_lock_init(&sp->list_lock); mutex_init(&sp->mutex); INIT_LIST_HEAD(&sp->queued_list); INIT_LIST_HEAD(&sp->ready_list); INIT_LIST_HEAD(&sp->idle_list); } static void pvr2_stream_done(struct pvr2_stream *sp) { mutex_lock(&sp->mutex); do { pvr2_stream_internal_flush(sp); pvr2_stream_buffer_count(sp,0); } while (0); mutex_unlock(&sp->mutex); } static void buffer_complete(struct urb *urb) { struct pvr2_buffer *bp = urb->context; struct pvr2_stream *sp; unsigned long irq_flags; BUFFER_CHECK(bp); sp = bp->stream; bp->used_count = 0; bp->status = 0; pvr2_trace(PVR2_TRACE_BUF_FLOW, "/*---TRACE_FLOW---*/ bufferComplete %p stat=%d cnt=%d", bp,urb->status,urb->actual_length); spin_lock_irqsave(&sp->list_lock,irq_flags); if ((!(urb->status)) || (urb->status == -ENOENT) || (urb->status == -ECONNRESET) || (urb->status == -ESHUTDOWN)) { (sp->buffers_processed)++; sp->bytes_processed += urb->actual_length; bp->used_count = urb->actual_length; if (sp->fail_count) { pvr2_trace(PVR2_TRACE_TOLERANCE, "stream %p transfer ok" " - fail count reset",sp); sp->fail_count = 0; } } else if (sp->fail_count < sp->fail_tolerance) { // We can tolerate this error, because we're below the // threshold... (sp->fail_count)++; (sp->buffers_failed)++; pvr2_trace(PVR2_TRACE_TOLERANCE, "stream %p ignoring error %d" " - fail count increased to %u", sp,urb->status,sp->fail_count); } else { (sp->buffers_failed)++; bp->status = urb->status; } spin_unlock_irqrestore(&sp->list_lock,irq_flags); pvr2_buffer_set_ready(bp); if (sp && sp->callback_func) { sp->callback_func(sp->callback_data); } } struct pvr2_stream *pvr2_stream_create(void) { struct pvr2_stream *sp; sp = kzalloc(sizeof(*sp),GFP_KERNEL); if (!sp) return sp; pvr2_trace(PVR2_TRACE_INIT,"pvr2_stream_create: sp=%p",sp); pvr2_stream_init(sp); return sp; } void pvr2_stream_destroy(struct pvr2_stream *sp) { if (!sp) return; pvr2_trace(PVR2_TRACE_INIT,"pvr2_stream_destroy: sp=%p",sp); pvr2_stream_done(sp); kfree(sp); } void pvr2_stream_setup(struct pvr2_stream *sp, struct usb_device *dev, int endpoint, unsigned int tolerance) { mutex_lock(&sp->mutex); do { pvr2_stream_internal_flush(sp); sp->dev = dev; sp->endpoint = endpoint; sp->fail_tolerance = tolerance; } while(0); mutex_unlock(&sp->mutex); } void pvr2_stream_set_callback(struct pvr2_stream *sp, pvr2_stream_callback func, void *data) { unsigned long irq_flags; mutex_lock(&sp->mutex); do { spin_lock_irqsave(&sp->list_lock,irq_flags); sp->callback_data = data; sp->callback_func = func; spin_unlock_irqrestore(&sp->list_lock,irq_flags); } while(0); mutex_unlock(&sp->mutex); } void pvr2_stream_get_stats(struct pvr2_stream *sp, struct pvr2_stream_stats *stats, int zero_counts) { unsigned long irq_flags; spin_lock_irqsave(&sp->list_lock,irq_flags); if (stats) { stats->buffers_in_queue = sp->q_count; stats->buffers_in_idle = sp->i_count; stats->buffers_in_ready = sp->r_count; stats->buffers_processed = sp->buffers_processed; stats->buffers_failed = sp->buffers_failed; stats->bytes_processed = sp->bytes_processed; } if (zero_counts) { sp->buffers_processed = 0; sp->buffers_failed = 0; sp->bytes_processed = 0; } spin_unlock_irqrestore(&sp->list_lock,irq_flags); } /* Query / set the nominal buffer count */ int pvr2_stream_get_buffer_count(struct pvr2_stream *sp) { return sp->buffer_target_count; } int pvr2_stream_set_buffer_count(struct pvr2_stream *sp,unsigned int cnt) { int ret; if (sp->buffer_target_count == cnt) return 0; mutex_lock(&sp->mutex); do { sp->buffer_target_count = cnt; ret = pvr2_stream_achieve_buffer_count(sp); } while(0); mutex_unlock(&sp->mutex); return ret; } struct pvr2_buffer *pvr2_stream_get_idle_buffer(struct pvr2_stream *sp) { struct list_head *lp = sp->idle_list.next; if (lp == &sp->idle_list) return NULL; return list_entry(lp,struct pvr2_buffer,list_overhead); } struct pvr2_buffer *pvr2_stream_get_ready_buffer(struct pvr2_stream *sp) { struct list_head *lp = sp->ready_list.next; if (lp == &sp->ready_list) return NULL; return list_entry(lp,struct pvr2_buffer,list_overhead); } struct pvr2_buffer *pvr2_stream_get_buffer(struct pvr2_stream *sp,int id) { if (id < 0) return NULL; if (id >= sp->buffer_total_count) return NULL; return sp->buffers[id]; } int pvr2_stream_get_ready_count(struct pvr2_stream *sp) { return sp->r_count; } void pvr2_stream_kill(struct pvr2_stream *sp) { struct pvr2_buffer *bp; mutex_lock(&sp->mutex); do { pvr2_stream_internal_flush(sp); while ((bp = pvr2_stream_get_ready_buffer(sp)) != NULL) { pvr2_buffer_set_idle(bp); } if (sp->buffer_total_count != sp->buffer_target_count) { pvr2_stream_achieve_buffer_count(sp); } } while(0); mutex_unlock(&sp->mutex); } int pvr2_buffer_queue(struct pvr2_buffer *bp) { #undef SEED_BUFFER #ifdef SEED_BUFFER unsigned int idx; unsigned int val; #endif int ret = 0; struct pvr2_stream *sp; if (!bp) return -EINVAL; sp = bp->stream; mutex_lock(&sp->mutex); do { pvr2_buffer_wipe(bp); if (!sp->dev) { ret = -EIO; break; } pvr2_buffer_set_queued(bp); #ifdef SEED_BUFFER for (idx = 0; idx < (bp->max_count) / 4; idx++) { val = bp->id << 24; val |= idx; ((unsigned int *)(bp->ptr))[idx] = val; } #endif bp->status = -EINPROGRESS; usb_fill_bulk_urb(bp->purb, // struct urb *urb sp->dev, // struct usb_device *dev // endpoint (below) usb_rcvbulkpipe(sp->dev,sp->endpoint), bp->ptr, // void *transfer_buffer bp->max_count, // int buffer_length buffer_complete, bp); usb_submit_urb(bp->purb,GFP_KERNEL); } while(0); mutex_unlock(&sp->mutex); return ret; } int pvr2_buffer_set_buffer(struct pvr2_buffer *bp,void *ptr,unsigned int cnt) { int ret = 0; unsigned long irq_flags; struct pvr2_stream *sp; if (!bp) return -EINVAL; sp = bp->stream; mutex_lock(&sp->mutex); do { spin_lock_irqsave(&sp->list_lock,irq_flags); if (bp->state != pvr2_buffer_state_idle) { ret = -EPERM; } else { bp->ptr = ptr; bp->stream->i_bcount -= bp->max_count; bp->max_count = cnt; bp->stream->i_bcount += bp->max_count; pvr2_trace(PVR2_TRACE_BUF_FLOW, "/*---TRACE_FLOW---*/ bufferPool " " %8s cap cap=%07d cnt=%02d", pvr2_buffer_state_decode( pvr2_buffer_state_idle), bp->stream->i_bcount,bp->stream->i_count); } spin_unlock_irqrestore(&sp->list_lock,irq_flags); } while(0); mutex_unlock(&sp->mutex); return ret; } unsigned int pvr2_buffer_get_count(struct pvr2_buffer *bp) { return bp->used_count; } int pvr2_buffer_get_status(struct pvr2_buffer *bp) { return bp->status; } int pvr2_buffer_get_id(struct pvr2_buffer *bp) { return bp->id; } /* Stuff for Emacs to see, in order to encourage consistent editing style: *** Local Variables: *** *** mode: c *** *** fill-column: 75 *** *** tab-width: 8 *** *** c-basic-offset: 8 *** *** End: *** */
gpl-2.0
nimengyu2/ti-arm9-linux-03.21.00.04
arch/m32r/lib/delay.c
13612
2985
/* * linux/arch/m32r/lib/delay.c * * Copyright (c) 2002 Hitoshi Yamamoto, Hirokazu Takata * Copyright (c) 2004 Hirokazu Takata */ #include <linux/param.h> #include <linux/module.h> #ifdef CONFIG_SMP #include <linux/sched.h> #include <asm/current.h> #include <asm/smp.h> #endif /* CONFIG_SMP */ #include <asm/processor.h> void __delay(unsigned long loops) { #ifdef CONFIG_ISA_DUAL_ISSUE __asm__ __volatile__ ( "beqz %0, 2f \n\t" "addi %0, #-1 \n\t" " .fillinsn \n\t" "1: \n\t" "cmpz %0 || addi %0, #-1 \n\t" "bc 2f || cmpz %0 \n\t" "bc 2f || addi %0, #-1 \n\t" "cmpz %0 || addi %0, #-1 \n\t" "bc 2f || cmpz %0 \n\t" "bnc 1b || addi %0, #-1 \n\t" " .fillinsn \n\t" "2: \n\t" : "+r" (loops) : "r" (0) : "cbit" ); #else __asm__ __volatile__ ( "beqz %0, 2f \n\t" " .fillinsn \n\t" "1: \n\t" "addi %0, #-1 \n\t" "blez %0, 2f \n\t" "addi %0, #-1 \n\t" "blez %0, 2f \n\t" "addi %0, #-1 \n\t" "blez %0, 2f \n\t" "addi %0, #-1 \n\t" "bgtz %0, 1b \n\t" " .fillinsn \n\t" "2: \n\t" : "+r" (loops) : "r" (0) ); #endif } void __const_udelay(unsigned long xloops) { #if defined(CONFIG_ISA_M32R2) && defined(CONFIG_ISA_DSP_LEVEL2) /* * loops [1] = (xloops >> 32) [sec] * loops_per_jiffy [1/jiffy] * * HZ [jiffy/sec] * = (xloops >> 32) [sec] * (loops_per_jiffy * HZ) [1/sec] * = (((xloops * loops_per_jiffy) >> 32) * HZ) [1] * * NOTE: * - '[]' depicts variable's dimension in the above equation. * - "rac" instruction rounds the accumulator in word size. */ __asm__ __volatile__ ( "srli %0, #1 \n\t" "mulwhi %0, %1 ; a0 \n\t" "mulwu1 %0, %1 ; a1 \n\t" "sadd ; a0 += (a1 >> 16) \n\t" "rac a0, a0, #1 \n\t" "mvfacmi %0, a0 \n\t" : "+r" (xloops) : "r" (current_cpu_data.loops_per_jiffy) : "a0", "a1" ); #elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R) /* * u64 ull; * ull = (u64)xloops * (u64)current_cpu_data.loops_per_jiffy; * xloops = (ull >> 32); */ __asm__ __volatile__ ( "and3 r4, %0, #0xffff \n\t" "and3 r5, %1, #0xffff \n\t" "mul r4, r5 \n\t" "srl3 r6, %0, #16 \n\t" "srli r4, #16 \n\t" "mul r5, r6 \n\t" "add r4, r5 \n\t" "and3 r5, %0, #0xffff \n\t" "srl3 r6, %1, #16 \n\t" "mul r5, r6 \n\t" "add r4, r5 \n\t" "srl3 r5, %0, #16 \n\t" "srli r4, #16 \n\t" "mul r5, r6 \n\t" "add r4, r5 \n\t" "mv %0, r4 \n\t" : "+r" (xloops) : "r" (current_cpu_data.loops_per_jiffy) : "r4", "r5", "r6" ); #else #error unknown isa configuration #endif __delay(xloops * HZ); } void __udelay(unsigned long usecs) { __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */ } void __ndelay(unsigned long nsecs) { __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ } EXPORT_SYMBOL(__delay); EXPORT_SYMBOL(__const_udelay); EXPORT_SYMBOL(__udelay); EXPORT_SYMBOL(__ndelay);
gpl-2.0
jameskdev/lge-kernel-msm8960-common
arch/m68k/platform/5407/gpio.c
14892
1361
/* * Coldfire generic GPIO support * * (C) Copyright 2009, Steven King <sfking@fdwdc.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <asm/coldfire.h> #include <asm/mcfsim.h> #include <asm/mcfgpio.h> static struct mcf_gpio_chip mcf_gpio_chips[] = { { .gpio_chip = { .label = "PP", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value, .ngpio = 16, }, .pddr = (void __iomem *) MCFSIM_PADDR, .podr = (void __iomem *) MCFSIM_PADAT, .ppdr = (void __iomem *) MCFSIM_PADAT, }, }; static int __init mcf_gpio_init(void) { unsigned i = 0; while (i < ARRAY_SIZE(mcf_gpio_chips)) (void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]); return 0; } core_initcall(mcf_gpio_init);
gpl-2.0
cnwzhjs/rpairmon
3rdparty/curl-7.41.0/docs/examples/ghiper.c
45
11937
/*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) 1998 - 2014, Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at http://curl.haxx.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ***************************************************************************/ /* Example application source code using the multi socket interface to * download many files at once. * * Written by Jeff Pohlmeyer Requires glib-2.x and a (POSIX?) system that has mkfifo(). This is an adaptation of libcurl's "hipev.c" and libevent's "event-test.c" sample programs, adapted to use glib's g_io_channel in place of libevent. When running, the program creates the named pipe "hiper.fifo" Whenever there is input into the fifo, the program reads the input as a list of URL's and creates some new easy handles to fetch each URL via the curl_multi "hiper" API. Thus, you can try a single URL: % echo http://www.yahoo.com > hiper.fifo Or a whole bunch of them: % cat my-url-list > hiper.fifo The fifo buffer is handled almost instantly, so you can even add more URL's while the previous requests are still being downloaded. This is purely a demo app, all retrieved data is simply discarded by the write callback. */ #include <glib.h> #include <sys/stat.h> #include <unistd.h> #include <fcntl.h> #include <stdlib.h> #include <stdio.h> #include <errno.h> #include <curl/curl.h> #define MSG_OUT g_print /* Change to "g_error" to write to stderr */ #define SHOW_VERBOSE 0 /* Set to non-zero for libcurl messages */ #define SHOW_PROGRESS 0 /* Set to non-zero to enable progress callback */ /* Global information, common to all connections */ typedef struct _GlobalInfo { CURLM *multi; guint timer_event; int still_running; } GlobalInfo; /* Information associated with a specific easy handle */ typedef struct _ConnInfo { CURL *easy; char *url; GlobalInfo *global; char error[CURL_ERROR_SIZE]; } ConnInfo; /* Information associated with a specific socket */ typedef struct _SockInfo { curl_socket_t sockfd; CURL *easy; int action; long timeout; GIOChannel *ch; guint ev; GlobalInfo *global; } SockInfo; /* Die if we get a bad CURLMcode somewhere */ static void mcode_or_die(const char *where, CURLMcode code) { if ( CURLM_OK != code ) { const char *s; switch (code) { case CURLM_BAD_HANDLE: s="CURLM_BAD_HANDLE"; break; case CURLM_BAD_EASY_HANDLE: s="CURLM_BAD_EASY_HANDLE"; break; case CURLM_OUT_OF_MEMORY: s="CURLM_OUT_OF_MEMORY"; break; case CURLM_INTERNAL_ERROR: s="CURLM_INTERNAL_ERROR"; break; case CURLM_BAD_SOCKET: s="CURLM_BAD_SOCKET"; break; case CURLM_UNKNOWN_OPTION: s="CURLM_UNKNOWN_OPTION"; break; case CURLM_LAST: s="CURLM_LAST"; break; default: s="CURLM_unknown"; } MSG_OUT("ERROR: %s returns %s\n", where, s); exit(code); } } /* Check for completed transfers, and remove their easy handles */ static void check_multi_info(GlobalInfo *g) { char *eff_url; CURLMsg *msg; int msgs_left; ConnInfo *conn; CURL *easy; CURLcode res; MSG_OUT("REMAINING: %d\n", g->still_running); while ((msg = curl_multi_info_read(g->multi, &msgs_left))) { if (msg->msg == CURLMSG_DONE) { easy = msg->easy_handle; res = msg->data.result; curl_easy_getinfo(easy, CURLINFO_PRIVATE, &conn); curl_easy_getinfo(easy, CURLINFO_EFFECTIVE_URL, &eff_url); MSG_OUT("DONE: %s => (%d) %s\n", eff_url, res, conn->error); curl_multi_remove_handle(g->multi, easy); free(conn->url); curl_easy_cleanup(easy); free(conn); } } } /* Called by glib when our timeout expires */ static gboolean timer_cb(gpointer data) { GlobalInfo *g = (GlobalInfo *)data; CURLMcode rc; rc = curl_multi_socket_action(g->multi, CURL_SOCKET_TIMEOUT, 0, &g->still_running); mcode_or_die("timer_cb: curl_multi_socket_action", rc); check_multi_info(g); return FALSE; } /* Update the event timer after curl_multi library calls */ static int update_timeout_cb(CURLM *multi, long timeout_ms, void *userp) { struct timeval timeout; GlobalInfo *g=(GlobalInfo *)userp; timeout.tv_sec = timeout_ms/1000; timeout.tv_usec = (timeout_ms%1000)*1000; MSG_OUT("*** update_timeout_cb %ld => %ld:%ld ***\n", timeout_ms, timeout.tv_sec, timeout.tv_usec); g->timer_event = g_timeout_add(timeout_ms, timer_cb, g); return 0; } /* Called by glib when we get action on a multi socket */ static gboolean event_cb(GIOChannel *ch, GIOCondition condition, gpointer data) { GlobalInfo *g = (GlobalInfo*) data; CURLMcode rc; int fd=g_io_channel_unix_get_fd(ch); int action = (condition & G_IO_IN ? CURL_CSELECT_IN : 0) | (condition & G_IO_OUT ? CURL_CSELECT_OUT : 0); rc = curl_multi_socket_action(g->multi, fd, action, &g->still_running); mcode_or_die("event_cb: curl_multi_socket_action", rc); check_multi_info(g); if(g->still_running) { return TRUE; } else { MSG_OUT("last transfer done, kill timeout\n"); if (g->timer_event) { g_source_remove(g->timer_event); } return FALSE; } } /* Clean up the SockInfo structure */ static void remsock(SockInfo *f) { if (!f) { return; } if (f->ev) { g_source_remove(f->ev); } g_free(f); } /* Assign information to a SockInfo structure */ static void setsock(SockInfo*f, curl_socket_t s, CURL*e, int act, GlobalInfo*g) { GIOCondition kind = (act&CURL_POLL_IN?G_IO_IN:0)|(act&CURL_POLL_OUT?G_IO_OUT:0); f->sockfd = s; f->action = act; f->easy = e; if (f->ev) { g_source_remove(f->ev); } f->ev=g_io_add_watch(f->ch, kind, event_cb,g); } /* Initialize a new SockInfo structure */ static void addsock(curl_socket_t s, CURL *easy, int action, GlobalInfo *g) { SockInfo *fdp = g_malloc0(sizeof(SockInfo)); fdp->global = g; fdp->ch=g_io_channel_unix_new(s); setsock(fdp, s, easy, action, g); curl_multi_assign(g->multi, s, fdp); } /* CURLMOPT_SOCKETFUNCTION */ static int sock_cb(CURL *e, curl_socket_t s, int what, void *cbp, void *sockp) { GlobalInfo *g = (GlobalInfo*) cbp; SockInfo *fdp = (SockInfo*) sockp; static const char *whatstr[]={ "none", "IN", "OUT", "INOUT", "REMOVE" }; MSG_OUT("socket callback: s=%d e=%p what=%s ", s, e, whatstr[what]); if (what == CURL_POLL_REMOVE) { MSG_OUT("\n"); remsock(fdp); } else { if (!fdp) { MSG_OUT("Adding data: %s%s\n", what&CURL_POLL_IN?"READ":"", what&CURL_POLL_OUT?"WRITE":"" ); addsock(s, e, what, g); } else { MSG_OUT( "Changing action from %d to %d\n", fdp->action, what); setsock(fdp, s, e, what, g); } } return 0; } /* CURLOPT_WRITEFUNCTION */ static size_t write_cb(void *ptr, size_t size, size_t nmemb, void *data) { size_t realsize = size * nmemb; ConnInfo *conn = (ConnInfo*) data; (void)ptr; (void)conn; return realsize; } /* CURLOPT_PROGRESSFUNCTION */ static int prog_cb (void *p, double dltotal, double dlnow, double ult, double uln) { ConnInfo *conn = (ConnInfo *)p; MSG_OUT("Progress: %s (%g/%g)\n", conn->url, dlnow, dltotal); return 0; } /* Create a new easy handle, and add it to the global curl_multi */ static void new_conn(char *url, GlobalInfo *g ) { ConnInfo *conn; CURLMcode rc; conn = g_malloc0(sizeof(ConnInfo)); conn->error[0]='\0'; conn->easy = curl_easy_init(); if (!conn->easy) { MSG_OUT("curl_easy_init() failed, exiting!\n"); exit(2); } conn->global = g; conn->url = g_strdup(url); curl_easy_setopt(conn->easy, CURLOPT_URL, conn->url); curl_easy_setopt(conn->easy, CURLOPT_WRITEFUNCTION, write_cb); curl_easy_setopt(conn->easy, CURLOPT_WRITEDATA, &conn); curl_easy_setopt(conn->easy, CURLOPT_VERBOSE, (long)SHOW_VERBOSE); curl_easy_setopt(conn->easy, CURLOPT_ERRORBUFFER, conn->error); curl_easy_setopt(conn->easy, CURLOPT_PRIVATE, conn); curl_easy_setopt(conn->easy, CURLOPT_NOPROGRESS, SHOW_PROGRESS?0L:1L); curl_easy_setopt(conn->easy, CURLOPT_PROGRESSFUNCTION, prog_cb); curl_easy_setopt(conn->easy, CURLOPT_PROGRESSDATA, conn); curl_easy_setopt(conn->easy, CURLOPT_FOLLOWLOCATION, 1L); curl_easy_setopt(conn->easy, CURLOPT_CONNECTTIMEOUT, 30L); curl_easy_setopt(conn->easy, CURLOPT_LOW_SPEED_LIMIT, 1L); curl_easy_setopt(conn->easy, CURLOPT_LOW_SPEED_TIME, 30L); MSG_OUT("Adding easy %p to multi %p (%s)\n", conn->easy, g->multi, url); rc =curl_multi_add_handle(g->multi, conn->easy); mcode_or_die("new_conn: curl_multi_add_handle", rc); /* note that the add_handle() will set a time-out to trigger very soon so that the necessary socket_action() call will be called by this app */ } /* This gets called by glib whenever data is received from the fifo */ static gboolean fifo_cb (GIOChannel *ch, GIOCondition condition, gpointer data) { #define BUF_SIZE 1024 gsize len, tp; gchar *buf, *tmp, *all=NULL; GIOStatus rv; do { GError *err=NULL; rv = g_io_channel_read_line (ch,&buf,&len,&tp,&err); if ( buf ) { if (tp) { buf[tp]='\0'; } new_conn(buf,(GlobalInfo*)data); g_free(buf); } else { buf = g_malloc(BUF_SIZE+1); while (TRUE) { buf[BUF_SIZE]='\0'; g_io_channel_read_chars(ch,buf,BUF_SIZE,&len,&err); if (len) { buf[len]='\0'; if (all) { tmp=all; all=g_strdup_printf("%s%s", tmp, buf); g_free(tmp); } else { all = g_strdup(buf); } } else { break; } } if (all) { new_conn(all,(GlobalInfo*)data); g_free(all); } g_free(buf); } if ( err ) { g_error("fifo_cb: %s", err->message); g_free(err); break; } } while ( (len) && (rv == G_IO_STATUS_NORMAL) ); return TRUE; } int init_fifo(void) { struct stat st; const char *fifo = "hiper.fifo"; int socket; if (lstat (fifo, &st) == 0) { if ((st.st_mode & S_IFMT) == S_IFREG) { errno = EEXIST; perror("lstat"); exit (1); } } unlink (fifo); if (mkfifo (fifo, 0600) == -1) { perror("mkfifo"); exit (1); } socket = open (fifo, O_RDWR | O_NONBLOCK, 0); if (socket == -1) { perror("open"); exit (1); } MSG_OUT("Now, pipe some URL's into > %s\n", fifo); return socket; } int main(int argc, char **argv) { GlobalInfo *g; CURLMcode rc; GMainLoop*gmain; int fd; GIOChannel* ch; g=g_malloc0(sizeof(GlobalInfo)); fd=init_fifo(); ch=g_io_channel_unix_new(fd); g_io_add_watch(ch,G_IO_IN,fifo_cb,g); gmain=g_main_loop_new(NULL,FALSE); g->multi = curl_multi_init(); curl_multi_setopt(g->multi, CURLMOPT_SOCKETFUNCTION, sock_cb); curl_multi_setopt(g->multi, CURLMOPT_SOCKETDATA, g); curl_multi_setopt(g->multi, CURLMOPT_TIMERFUNCTION, update_timeout_cb); curl_multi_setopt(g->multi, CURLMOPT_TIMERDATA, g); /* we don't call any curl_multi_socket*() function yet as we have no handles added! */ g_main_loop_run(gmain); curl_multi_cleanup(g->multi); return 0; }
gpl-2.0
davidmueller13/android_kernel_lge_msm8974
drivers/video/backlight/lm3630.c
45
18476
/* drivers/video/backlight/lm3630_bl.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/backlight.h> #include <linux/fb.h> #include <linux/delay.h> #include <linux/gpio.h> #include <mach/board.h> #include <linux/i2c.h> #include <linux/of_gpio.h> #include <mach/board_lge.h> #include <linux/earlysuspend.h> #define I2C_BL_NAME "lm3630" #define MAX_BRIGHTNESS_LM3630 0xFF #define DEFAULT_BRIGHTNESS 0xFF #define BL_ON 1 #define BL_OFF 0 #if defined(CONFIG_B1_LGD_PANEL) #define PWM_THRESHOLD 165 /* UI bar 56 % */ #define PWM_OFF 0 #define PWM_ON 1 #elif defined(CONFIG_G2_LGD_PANEL) #define PWM_THRESHOLD 135 /* UI bar 41 % */ #define PWM_OFF 0 #define PWM_ON 1 #endif static struct i2c_client *lm3630_i2c_client; static int store_level_used = 0; #if defined(CONFIG_B1_LGD_PANEL) static int factory_boot = 0; #endif struct backlight_platform_data { void (*platform_init)(void); int gpio; unsigned int mode; int max_current; int init_on_boot; int min_brightness; int max_brightness; int default_brightness; int factory_brightness; int blmap_size; char *blmap; }; struct lm3630_device { struct i2c_client *client; struct backlight_device *bl_dev; int gpio; int max_current; int min_brightness; int max_brightness; int factory_brightness; struct mutex bl_mutex; int blmap_size; char *blmap; }; static const struct i2c_device_id lm3630_bl_id[] = { { I2C_BL_NAME, 0 }, { }, }; #if defined(CONFIG_BACKLIGHT_CABC_DEBUG_ENABLE) static int lm3630_read_reg(struct i2c_client *client, u8 reg, u8 *buf); #endif static int lm3630_write_reg(struct i2c_client *client, unsigned char reg, unsigned char val); static int cur_main_lcd_level = DEFAULT_BRIGHTNESS; static int saved_main_lcd_level = DEFAULT_BRIGHTNESS; #if defined(CONFIG_MACH_LGE) int backlight_status = BL_OFF; #else static int backlight_status = BL_OFF; #endif static int lm3630_pwm_enable; static struct lm3630_device *main_lm3630_dev; #ifdef CONFIG_LGE_WIRELESS_CHARGER int wireless_backlight_state(void) { return backlight_status; } EXPORT_SYMBOL(wireless_backlight_state); #endif #if defined(CONFIG_G2_LGD_PANEL) || defined(CONFIG_B1_LGD_PANEL) static void bl_set_pwm_mode(int mode) { if (mode) lm3630_write_reg(main_lm3630_dev->client, 0x01, 0x09); else lm3630_write_reg(main_lm3630_dev->client, 0x01, 0x08); } #endif static void lm3630_hw_reset(void) { int gpio = main_lm3630_dev->gpio; /* LGE_CHANGE * Fix GPIO Setting Warning * 2011. 12. 14, kyunghoo.ryu@lge.com */ if (gpio_is_valid(gpio)) { gpio_direction_output(gpio, 1); gpio_set_value_cansleep(gpio, 1); mdelay(10); } else pr_err("%s: gpio is not valid !!\n", __func__); } #if defined(CONFIG_BACKLIGHT_CABC_DEBUG_ENABLE) static int lm3630_read_reg(struct i2c_client *client, u8 reg, u8 *buf) { s32 ret; pr_debug("reg: %x\n", reg); ret = i2c_smbus_read_byte_data(client, reg); if (ret < 0) pr_err("%s read register error\n", __func__); *buf = ret; return 0; } #endif static int lm3630_write_reg(struct i2c_client *client, unsigned char reg, unsigned char val) { int err; u8 buf[2]; struct i2c_msg msg = { client->addr, 0, 2, buf }; buf[0] = reg; buf[1] = val; err = i2c_transfer(client->adapter, &msg, 1); if (err < 0) dev_err(&client->dev, "i2c write error\n"); return 0; } static int exp_min_value = 150; static int cal_value; static void lm3630_set_main_current_level(struct i2c_client *client, int level) { struct lm3630_device *dev = i2c_get_clientdata(client); int min_brightness = dev->min_brightness; int max_brightness = dev->max_brightness; cur_main_lcd_level = level; dev->bl_dev->props.brightness = cur_main_lcd_level; store_level_used = 0; mutex_lock(&dev->bl_mutex); #if defined(CONFIG_B1_LGD_PANEL) if(factory_boot) level = min_brightness; #endif #if defined(CONFIG_G2_LGD_PANEL) || defined(CONFIG_B1_LGD_PANEL) if (level < PWM_THRESHOLD) bl_set_pwm_mode(PWM_OFF); else bl_set_pwm_mode(PWM_ON); #endif if (level != 0) { if (level > 0 && level <= min_brightness) level = min_brightness; else if (level > max_brightness) level = max_brightness; if (dev->blmap) { if (level < dev->blmap_size) { cal_value = dev->blmap[level]; lm3630_write_reg(client, 0x03, cal_value); } else dev_warn(&client->dev, "invalid index %d:%d\n", dev->blmap_size, level); } else { cal_value = level; lm3630_write_reg(client, 0x03, cal_value); } } else lm3630_write_reg(client, 0x00, 0x00); mutex_unlock(&dev->bl_mutex); pr_info("%s : backlight level=%d, cal_value=%d \n", __func__, level, cal_value); } static void lm3630_set_main_current_level_no_mapping( struct i2c_client *client, int level) { struct lm3630_device *dev; dev = (struct lm3630_device *)i2c_get_clientdata(client); if (level > 255) level = 255; else if (level < 0) level = 0; cur_main_lcd_level = level; dev->bl_dev->props.brightness = cur_main_lcd_level; store_level_used = 1; mutex_lock(&main_lm3630_dev->bl_mutex); if (level != 0) { lm3630_write_reg(client, 0x03, level); } else { lm3630_write_reg(client, 0x00, 0x00); } mutex_unlock(&main_lm3630_dev->bl_mutex); } void lm3630_backlight_on(int level) { if (backlight_status == BL_OFF) { pr_info("%s : level = %d\n", __func__, level); #if defined(CONFIG_B1_LGD_PANEL) mdelay(30); #elif defined(CONFIG_G2_LGD_PANEL) mdelay(15); #endif lm3630_hw_reset(); /* OVP(24V),OCP(1.0A) , Boost Frequency(500khz) */ lm3630_write_reg(main_lm3630_dev->client, 0x02, 0x30); if (lm3630_pwm_enable) { /* eble Feedback , disable PWM for BANK A,B */ lm3630_write_reg(main_lm3630_dev->client, 0x01, 0x09); } else { /* eble Feedback , disable PWM for BANK A,B */ lm3630_write_reg(main_lm3630_dev->client, 0x01, 0x08); } /* Brightness Code Setting Max on Bank A */ /* Full-Scale Current (20.2mA) of BANK A */ /* 20.2mA : 0x13 , 23.4mA : 0x17 */ lm3630_write_reg(main_lm3630_dev->client, 0x05, 0x16); /* Enable LED A to Exponential, LED2 is connected to BANK_A */ lm3630_write_reg(main_lm3630_dev->client, 0x00, 0x15); } mdelay(1); lm3630_set_main_current_level(main_lm3630_dev->client, level); backlight_status = BL_ON; return; } void lm3630_backlight_off(void) { int gpio = main_lm3630_dev->gpio; if (backlight_status == BL_OFF) return; saved_main_lcd_level = cur_main_lcd_level; lm3630_set_main_current_level(main_lm3630_dev->client, 0); backlight_status = BL_OFF; pr_info("%s\n", __func__); gpio_direction_output(gpio, 0); msleep(6); return; } void lm3630_lcd_backlight_set_level(int level) { if (level > MAX_BRIGHTNESS_LM3630) level = MAX_BRIGHTNESS_LM3630; if (lm3630_i2c_client != NULL) { if (level == 0) { lm3630_backlight_off(); } else { lm3630_backlight_on(level); } } else { pr_err("%s(): No client\n", __func__); } } EXPORT_SYMBOL(lm3630_lcd_backlight_set_level); static int bl_set_intensity(struct backlight_device *bd) { #if defined(CONFIG_MACH_LGE_BACKLIGHT_SUPPORT) lm3630_lcd_backlight_set_level(bd->props.brightness); #else struct i2c_client *client = to_i2c_client(bd->dev.parent); /* LGE_CHANGE * if it's trying to set same backlight value, * skip it. * 2013-02-15, baryun.hwang@lge.com */ if (bd->props.brightness == cur_main_lcd_level) { pr_debug("%s level is already set. skip it\n", __func__); return 0; } lm3630_set_main_current_level(client, bd->props.brightness); cur_main_lcd_level = bd->props.brightness; #endif return 0; } static int bl_get_intensity(struct backlight_device *bd) { unsigned char val = 0; val &= 0x1f; return (int)val; } static ssize_t lcd_backlight_show_level(struct device *dev, struct device_attribute *attr, char *buf) { int r = 0; if (store_level_used == 0) r = snprintf(buf, PAGE_SIZE, "LCD Backlight Level is : %d\n", cal_value); else if (store_level_used == 1) r = snprintf(buf, PAGE_SIZE, "LCD Backlight Level is : %d\n", cur_main_lcd_level); return r; } static ssize_t lcd_backlight_store_level(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int level; struct i2c_client *client = to_i2c_client(dev); if (!count) return -EINVAL; level = simple_strtoul(buf, NULL, 10); lm3630_set_main_current_level_no_mapping(client, level); pr_info("write %d direct to " "backlight register\n", level); return count; } static int lm3630_bl_resume(struct i2c_client *client) { lm3630_lcd_backlight_set_level(saved_main_lcd_level); return 0; } static int lm3630_bl_suspend(struct i2c_client *client, pm_message_t state) { pr_info("%s: new state: %d\n", __func__, state.event); lm3630_lcd_backlight_set_level(saved_main_lcd_level); return 0; } static ssize_t lcd_backlight_show_on_off(struct device *dev, struct device_attribute *attr, char *buf) { int r = 0; pr_info("%s received (prev backlight_status: %s)\n", __func__, backlight_status ? "ON" : "OFF"); return r; } static ssize_t lcd_backlight_store_on_off(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int on_off; struct i2c_client *client = to_i2c_client(dev); if (!count) return -EINVAL; pr_info("%s received (prev backlight_status: %s)\n", __func__, backlight_status ? "ON" : "OFF"); on_off = simple_strtoul(buf, NULL, 10); pr_info(" %d", on_off); if (on_off == 1) lm3630_bl_resume(client); else if (on_off == 0) lm3630_bl_suspend(client, PMSG_SUSPEND); return count; } static ssize_t lcd_backlight_show_exp_min_value(struct device *dev, struct device_attribute *attr, char *buf) { int r; r = snprintf(buf, PAGE_SIZE, "LCD Backlight : %d\n", exp_min_value); return r; } static ssize_t lcd_backlight_store_exp_min_value(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int value; if (!count) return -EINVAL; value = simple_strtoul(buf, NULL, 10); exp_min_value = value; return count; } #if defined(CONFIG_BACKLIGHT_CABC_DEBUG_ENABLE) static ssize_t lcd_backlight_show_pwm(struct device *dev, struct device_attribute *attr, char *buf) { int r; u8 level, pwm_low, pwm_high, config; mutex_lock(&main_lm3630_dev->bl_mutex); lm3630_read_reg(main_lm3630_dev->client, 0x01, &config); mdelay(3); lm3630_read_reg(main_lm3630_dev->client, 0x03, &level); mdelay(3); lm3630_read_reg(main_lm3630_dev->client, 0x12, &pwm_low); mdelay(3); lm3630_read_reg(main_lm3630_dev->client, 0x13, &pwm_high); mdelay(3); mutex_unlock(&main_lm3630_dev->bl_mutex); r = snprintf(buf, PAGE_SIZE, "Show PWM level: %d pwm_low: %d " "pwm_high: %d config: %d\n", level, pwm_low, pwm_high, config); return r; } static ssize_t lcd_backlight_store_pwm(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return count; } #endif DEVICE_ATTR(lm3630_level, 0644, lcd_backlight_show_level, lcd_backlight_store_level); DEVICE_ATTR(lm3630_backlight_on_off, 0644, lcd_backlight_show_on_off, lcd_backlight_store_on_off); DEVICE_ATTR(lm3630_exp_min_value, 0644, lcd_backlight_show_exp_min_value, lcd_backlight_store_exp_min_value); #if defined(CONFIG_BACKLIGHT_CABC_DEBUG_ENABLE) DEVICE_ATTR(lm3630_pwm, 0644, lcd_backlight_show_pwm, lcd_backlight_store_pwm); #endif #ifdef CONFIG_OF static int lm3630_parse_dt(struct device *dev, struct backlight_platform_data *pdata) { int rc = 0, i; u32 *array; struct device_node *np = dev->of_node; pdata->gpio = of_get_named_gpio_flags(np, "lm3630,lcd_bl_en", 0, NULL); rc = of_property_read_u32(np, "lm3630,max_current", &pdata->max_current); rc = of_property_read_u32(np, "lm3630,min_brightness", &pdata->min_brightness); rc = of_property_read_u32(np, "lm3630,default_brightness", &pdata->default_brightness); rc = of_property_read_u32(np, "lm3630,max_brightness", &pdata->max_brightness); rc = of_property_read_u32(np, "lm3630,enable_pwm", &lm3630_pwm_enable); if (rc == -EINVAL) lm3630_pwm_enable = 1; rc = of_property_read_u32(np, "lm3630,blmap_size", &pdata->blmap_size); if (pdata->blmap_size) { array = kzalloc(sizeof(u32) * pdata->blmap_size, GFP_KERNEL); pr_info("%s : backlight parse dt3\n", __func__); if (!array) return -ENOMEM; rc = of_property_read_u32_array(np, "lm3630,blmap", array, pdata->blmap_size); if (rc) { pr_err("%s:%d, uable to read backlight map\n", __func__, __LINE__); return -EINVAL; } pdata->blmap = kzalloc(sizeof(char) * pdata->blmap_size, GFP_KERNEL); if (!pdata->blmap) return -ENOMEM; for (i = 0; i < pdata->blmap_size; i++) pdata->blmap[i] = (char)array[i]; if (array) kfree(array); } else { pdata->blmap = NULL; } pr_info("%s gpio: %d, max_current: %d, min: %d, " "default: %d, max: %d, pwm : %d , blmap_size : %d\n", __func__, pdata->gpio, pdata->max_current, pdata->min_brightness, pdata->default_brightness, pdata->max_brightness, lm3630_pwm_enable, pdata->blmap_size); return rc; } #endif static struct backlight_ops lm3630_bl_ops = { .update_status = bl_set_intensity, .get_brightness = bl_get_intensity, }; static int lm3630_probe(struct i2c_client *i2c_dev, const struct i2c_device_id *id) { struct backlight_platform_data *pdata; struct lm3630_device *dev; struct backlight_device *bl_dev; struct backlight_properties props; int err; pr_info(" %s: i2c probe start\n", __func__); #ifdef CONFIG_OF if (&i2c_dev->dev.of_node) { pdata = devm_kzalloc(&i2c_dev->dev, sizeof(struct backlight_platform_data), GFP_KERNEL); if (!pdata) { pr_err("%s: Failed to allocate memory\n", __func__); return -ENOMEM; } err = lm3630_parse_dt(&i2c_dev->dev, pdata); if (err != 0) return err; } else { pdata = i2c_dev->dev.platform_data; } #else pdata = i2c_dev->dev.platform_data; #endif pr_info("%s: gpio = %d\n", __func__, pdata->gpio); if (pdata->gpio && gpio_request(pdata->gpio, "lm3630 reset") != 0) { return -ENODEV; } lm3630_i2c_client = i2c_dev; dev = kzalloc(sizeof(struct lm3630_device), GFP_KERNEL); if (dev == NULL) { dev_err(&i2c_dev->dev, "fail alloc for lm3630_device\n"); return 0; } main_lm3630_dev = dev; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = MAX_BRIGHTNESS_LM3630; bl_dev = backlight_device_register(I2C_BL_NAME, &i2c_dev->dev, NULL, &lm3630_bl_ops, &props); bl_dev->props.max_brightness = MAX_BRIGHTNESS_LM3630; #if defined(CONFIG_B1_LGD_PANEL) if(lge_get_boot_mode() == LGE_BOOT_MODE_CHARGERLOGO) bl_dev->props.brightness = 0x99; // same to LK else if(lge_get_boot_mode() == LGE_BOOT_MODE_FACTORY || lge_get_boot_mode() == LGE_BOOT_MODE_FACTORY2 || lge_get_boot_mode() == LGE_BOOT_MODE_FACTORY3 || lge_get_boot_mode() == LGE_BOOT_MODE_PIFBOOT || lge_get_boot_mode() == LGE_BOOT_MODE_PIFBOOT2 || lge_get_boot_mode() == LGE_BOOT_MODE_PIFBOOT3) factory_boot = 1; else bl_dev->props.brightness = DEFAULT_BRIGHTNESS; #else bl_dev->props.brightness = pdata->default_brightness; #endif bl_dev->props.power = FB_BLANK_UNBLANK; dev->bl_dev = bl_dev; dev->client = i2c_dev; dev->gpio = pdata->gpio; dev->max_current = pdata->max_current; dev->min_brightness = pdata->min_brightness; dev->max_brightness = pdata->max_brightness; dev->blmap_size = pdata->blmap_size; if (dev->blmap_size) { dev->blmap = kzalloc(sizeof(char) * dev->blmap_size, GFP_KERNEL); if (!dev->blmap) { pr_err("%s: Failed to allocate memory\n", __func__); return -ENOMEM; } memcpy(dev->blmap, pdata->blmap, dev->blmap_size); } else { dev->blmap = NULL; } #ifdef CONFIG_LGE_LCD_OFF_DIMMING if ((lge_get_bootreason() == 0x77665560) || (lge_get_bootreason() == 0x77665561)) { dev->bl_dev->props.brightness = 50; pr_info("%s : fota reboot - backlight set 50\n", __func__); } #endif if (gpio_get_value(dev->gpio)) backlight_status = BL_ON; else backlight_status = BL_OFF; i2c_set_clientdata(i2c_dev, dev); mutex_init(&dev->bl_mutex); err = device_create_file(&i2c_dev->dev, &dev_attr_lm3630_level); err = device_create_file(&i2c_dev->dev, &dev_attr_lm3630_backlight_on_off); err = device_create_file(&i2c_dev->dev, &dev_attr_lm3630_exp_min_value); #if defined(CONFIG_BACKLIGHT_CABC_DEBUG_ENABLE) err = device_create_file(&i2c_dev->dev, &dev_attr_lm3630_pwm); #endif pr_info("%s: i2c probe done\n", __func__); return 0; } static int lm3630_remove(struct i2c_client *i2c_dev) { struct lm3630_device *dev; int gpio = main_lm3630_dev->gpio; device_remove_file(&i2c_dev->dev, &dev_attr_lm3630_level); device_remove_file(&i2c_dev->dev, &dev_attr_lm3630_backlight_on_off); dev = (struct lm3630_device *)i2c_get_clientdata(i2c_dev); backlight_device_unregister(dev->bl_dev); i2c_set_clientdata(i2c_dev, NULL); if (gpio_is_valid(gpio)) gpio_free(gpio); return 0; } #ifdef CONFIG_OF static struct of_device_id lm3630_match_table[] = { { .compatible = "backlight,lm3630",}, { }, }; #endif static struct i2c_driver main_lm3630_driver = { .probe = lm3630_probe, .remove = lm3630_remove, .suspend = NULL, .resume = NULL, .id_table = lm3630_bl_id, .driver = { .name = I2C_BL_NAME, .owner = THIS_MODULE, #ifdef CONFIG_OF .of_match_table = lm3630_match_table, #endif }, }; static int __init lcd_backlight_init(void) { static int err; err = i2c_add_driver(&main_lm3630_driver); pr_info("%s : backlight init\n", __func__); return err; } module_init(lcd_backlight_init); MODULE_DESCRIPTION("LM3630 Backlight Control"); MODULE_AUTHOR("daewoo kwak"); MODULE_LICENSE("GPL");
gpl-2.0
pinkflozd/android_kernel_motorola_falcon
fs/sync.c
45
15106
/* * High-level sync()-related operations */ #include <linux/kernel.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/namei.h> #include <linux/sched.h> #include <linux/writeback.h> #include <linux/syscalls.h> #include <linux/linkage.h> #include <linux/pagemap.h> #include <linux/quotaops.h> #include <linux/backing-dev.h> #include "internal.h" #ifdef CONFIG_ASYNC_FSYNC #include <linux/statfs.h> #endif #ifdef CONFIG_DYNAMIC_FSYNC extern bool power_suspend_active; extern bool dyn_fsync_active; #endif #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \ SYNC_FILE_RANGE_WAIT_AFTER) #ifdef CONFIG_ASYNC_FSYNC #define FLAG_ASYNC_FSYNC 0x1 static struct workqueue_struct *fsync_workqueue = NULL; struct fsync_work { struct work_struct work; char pathname[256]; }; #endif /* * Do the filesystem syncing work. For simple filesystems * writeback_inodes_sb(sb) just dirties buffers with inodes so we have to * submit IO for these buffers via __sync_blockdev(). This also speeds up the * wait == 1 case since in that case write_inode() functions do * sync_dirty_buffer() and thus effectively write one block at a time. */ static int __sync_filesystem(struct super_block *sb, int wait) { /* * This should be safe, as we require bdi backing to actually * write out data in the first place */ if (sb->s_bdi == &noop_backing_dev_info) return 0; if (sb->s_qcop && sb->s_qcop->quota_sync) sb->s_qcop->quota_sync(sb, -1, wait); if (wait) sync_inodes_sb(sb); else writeback_inodes_sb(sb, WB_REASON_SYNC); if (sb->s_op->sync_fs) sb->s_op->sync_fs(sb, wait); return __sync_blockdev(sb->s_bdev, wait); } /* * Write out and wait upon all dirty data associated with this * superblock. Filesystem data as well as the underlying block * device. Takes the superblock lock. */ int sync_filesystem(struct super_block *sb) { int ret; /* * We need to be protected against the filesystem going from * r/o to r/w or vice versa. */ WARN_ON(!rwsem_is_locked(&sb->s_umount)); /* * No point in syncing out anything if the filesystem is read-only. */ if (sb->s_flags & MS_RDONLY) return 0; ret = __sync_filesystem(sb, 0); if (ret < 0) return ret; return __sync_filesystem(sb, 1); } EXPORT_SYMBOL_GPL(sync_filesystem); static void sync_one_sb(struct super_block *sb, void *arg) { if (!(sb->s_flags & MS_RDONLY)) __sync_filesystem(sb, *(int *)arg); } /* * Sync all the data for all the filesystems (called by sys_sync() and * emergency sync) */ #ifndef CONFIG_DYNAMIC_FSYNC static #endif void sync_filesystems(int wait) { iterate_supers(sync_one_sb, &wait); } #ifdef CONFIG_DYNAMIC_FSYNC EXPORT_SYMBOL_GPL(sync_filesystems); #endif /* * sync everything. Start out by waking pdflush, because that writes back * all queues in parallel. */ static void do_sync(void) { wakeup_flusher_threads(0, WB_REASON_SYNC); sync_filesystems(0); sync_filesystems(1); if (unlikely(laptop_mode)) laptop_sync_completion(); return; } static DEFINE_MUTEX(sync_mutex); /* One do_sync() at a time. */ static unsigned long sync_seq; /* Many sync()s from one do_sync(). */ /* Overflow harmless, extra wait. */ /* * Only allow one task to do sync() at a time, and further allow * concurrent sync() calls to be satisfied by a single do_sync() * invocation. */ SYSCALL_DEFINE0(sync) { unsigned long snap; unsigned long snap_done; snap = ACCESS_ONCE(sync_seq); smp_mb(); /* Prevent above from bleeding into critical section. */ mutex_lock(&sync_mutex); snap_done = sync_seq; /* * If the value in snap is odd, we need to wait for the current * do_sync() to complete, then wait for the next one, in other * words, we need the value of snap_done to be three larger than * the value of snap. On the other hand, if the value in snap is * even, we only have to wait for the next request to complete, * in other words, we need the value of snap_done to be only two * greater than the value of snap. The "(snap + 3) & 0x1" computes * this for us (thank you, Linus!). */ if (ULONG_CMP_GE(snap_done, (snap + 3) & ~0x1)) { /* * A full do_sync() executed between our two fetches from * sync_seq, so our work is done! */ smp_mb(); /* Order test with caller's subsequent code. */ mutex_unlock(&sync_mutex); return 0; } /* Record the start of do_sync(). */ ACCESS_ONCE(sync_seq)++; WARN_ON_ONCE((sync_seq & 0x1) != 1); smp_mb(); /* Keep prior increment out of do_sync(). */ do_sync(); /* Record the end of do_sync(). */ smp_mb(); /* Keep subsequent increment out of do_sync(). */ ACCESS_ONCE(sync_seq)++; WARN_ON_ONCE((sync_seq & 0x1) != 0); mutex_unlock(&sync_mutex); return 0; } static void do_sync_work(struct work_struct *work) { /* * Sync twice to reduce the possibility we skipped some inodes / pages * because they were temporarily locked */ sync_filesystems(0); sync_filesystems(0); printk("Emergency Sync complete\n"); kfree(work); } void emergency_sync(void) { struct work_struct *work; work = kmalloc(sizeof(*work), GFP_ATOMIC); if (work) { INIT_WORK(work, do_sync_work); schedule_work(work); } } /* * sync a single super */ SYSCALL_DEFINE1(syncfs, int, fd) { struct file *file; struct super_block *sb; int ret; int fput_needed; file = fget_light(fd, &fput_needed); if (!file) return -EBADF; sb = file->f_dentry->d_sb; down_read(&sb->s_umount); ret = sync_filesystem(sb); up_read(&sb->s_umount); fput_light(file, fput_needed); return ret; } /** * vfs_fsync_range - helper to sync a range of data & metadata to disk * @file: file to sync * @start: offset in bytes of the beginning of data range to sync * @end: offset in bytes of the end of data range (inclusive) * @datasync: perform only datasync * * Write back data in range @start..@end and metadata for @file to disk. If * @datasync is set only metadata needed to access modified file data is * written. */ int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync) { #ifdef CONFIG_DYNAMIC_FSYNC if (likely(dyn_fsync_active && !power_suspend_active)) return 0; else { #endif if (!file->f_op || !file->f_op->fsync) return -EINVAL; return file->f_op->fsync(file, start, end, datasync); #ifdef CONFIG_DYNAMIC_FSYNC } #endif } EXPORT_SYMBOL(vfs_fsync_range); /** * vfs_fsync - perform a fsync or fdatasync on a file * @file: file to sync * @datasync: only perform a fdatasync operation * * Write back data and metadata for @file to disk. If @datasync is * set only metadata needed to access modified file data is written. */ int vfs_fsync(struct file *file, int datasync) { return vfs_fsync_range(file, 0, LLONG_MAX, datasync); } EXPORT_SYMBOL(vfs_fsync); #ifdef CONFIG_ASYNC_FSYNC extern int emmc_perf_degr(void); #define LOW_STORAGE_THRESHOLD 786432 int async_fsync(struct file *file, int fd) { struct inode *inode = file->f_mapping->host; struct super_block *sb = inode->i_sb; struct kstatfs st; if ((sb->fsync_flags & FLAG_ASYNC_FSYNC) == 0) return 0; if (!emmc_perf_degr()) return 0; if (fd_statfs(fd, &st)) return 0; if (st.f_bfree > LOW_STORAGE_THRESHOLD) return 0; return 1; } static int do_async_fsync(char *pathname) { struct file *file; int ret; file = filp_open(pathname, O_RDWR, 0); if (IS_ERR(file)) { pr_debug("%s: can't open %s\n", __func__, pathname); return -EBADF; } ret = vfs_fsync(file, 0); filp_close(file, NULL); return ret; } static void do_afsync_work(struct work_struct *work) { struct fsync_work *fwork = container_of(work, struct fsync_work, work); int ret = -EBADF; pr_debug("afsync: %s\n", fwork->pathname); ret = do_async_fsync(fwork->pathname); if (ret != 0 && ret != -EBADF) pr_info("afsync return %d\n", ret); else pr_debug("afsync: %s done\n", fwork->pathname); kfree(fwork); } #endif static int do_fsync(unsigned int fd, int datasync) { struct file *file; int ret = -EBADF; #ifdef CONFIG_ASYNC_FSYNC struct fsync_work *fwork; #endif file = fget(fd); if (file) { ktime_t fsync_t, fsync_diff; char pathname[256], *path; path = d_path(&(file->f_path), pathname, sizeof(pathname)); if (IS_ERR(path)) path = "(unknown)"; #ifdef CONFIG_ASYNC_FSYNC else if (async_fsync(file, fd)) { if (!fsync_workqueue) fsync_workqueue = create_singlethread_workqueue("fsync"); if (!fsync_workqueue) goto no_async; if (IS_ERR(path)) goto no_async; fwork = kmalloc(sizeof(*fwork), GFP_KERNEL); if (fwork) { strncpy(fwork->pathname, path, sizeof(fwork->pathname) - 1); INIT_WORK(&fwork->work, do_afsync_work); queue_work(fsync_workqueue, &fwork->work); fput(file); return 0; } } no_async: #endif fsync_t = ktime_get(); ret = vfs_fsync(file, datasync); fput(file); fsync_diff = ktime_sub(ktime_get(), fsync_t); if (ktime_to_ms(fsync_diff) >= 5000) { pr_info("VFS: %s pid:%d(%s)(parent:%d/%s)\ takes %lld ms to fsync %s.\n", __func__, current->pid, current->comm, current->parent->pid, current->parent->comm, ktime_to_ms(fsync_diff), path); } } return ret; } SYSCALL_DEFINE1(fsync, unsigned int, fd) { #ifdef CONFIG_DYNAMIC_FSYNC if (likely(dyn_fsync_active && !power_suspend_active)) return 0; else #endif return do_fsync(fd, 0); } SYSCALL_DEFINE1(fdatasync, unsigned int, fd) { #if 0 if (likely(dyn_fsync_active && !power_suspend_active)) return 0; else #endif return do_fsync(fd, 1); } /** * generic_write_sync - perform syncing after a write if file / inode is sync * @file: file to which the write happened * @pos: offset where the write started * @count: length of the write * * This is just a simple wrapper about our general syncing function. */ int generic_write_sync(struct file *file, loff_t pos, loff_t count) { if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host)) return 0; return vfs_fsync_range(file, pos, pos + count - 1, (file->f_flags & __O_SYNC) ? 0 : 1); } EXPORT_SYMBOL(generic_write_sync); /* * sys_sync_file_range() permits finely controlled syncing over a segment of * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is * zero then sys_sync_file_range() will operate from offset out to EOF. * * The flag bits are: * * SYNC_FILE_RANGE_WAIT_BEFORE: wait upon writeout of all pages in the range * before performing the write. * * SYNC_FILE_RANGE_WRITE: initiate writeout of all those dirty pages in the * range which are not presently under writeback. Note that this may block for * significant periods due to exhaustion of disk request structures. * * SYNC_FILE_RANGE_WAIT_AFTER: wait upon writeout of all pages in the range * after performing the write. * * Useful combinations of the flag bits are: * * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE: ensures that all pages * in the range which were dirty on entry to sys_sync_file_range() are placed * under writeout. This is a start-write-for-data-integrity operation. * * SYNC_FILE_RANGE_WRITE: start writeout of all dirty pages in the range which * are not presently under writeout. This is an asynchronous flush-to-disk * operation. Not suitable for data integrity operations. * * SYNC_FILE_RANGE_WAIT_BEFORE (or SYNC_FILE_RANGE_WAIT_AFTER): wait for * completion of writeout of all pages in the range. This will be used after an * earlier SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE operation to wait * for that operation to complete and to return the result. * * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE|SYNC_FILE_RANGE_WAIT_AFTER: * a traditional sync() operation. This is a write-for-data-integrity operation * which will ensure that all pages in the range which were dirty on entry to * sys_sync_file_range() are committed to disk. * * * SYNC_FILE_RANGE_WAIT_BEFORE and SYNC_FILE_RANGE_WAIT_AFTER will detect any * I/O errors or ENOSPC conditions and will return those to the caller, after * clearing the EIO and ENOSPC flags in the address_space. * * It should be noted that none of these operations write out the file's * metadata. So unless the application is strictly performing overwrites of * already-instantiated disk blocks, there are no guarantees here that the data * will be available after a crash. */ SYSCALL_DEFINE(sync_file_range)(int fd, loff_t offset, loff_t nbytes, unsigned int flags) { #ifdef CONFIG_DYNAMIC_FSYNC if (likely(dyn_fsync_active && !power_suspend_active)) return 0; else { #endif int ret; struct file *file; struct address_space *mapping; loff_t endbyte; /* inclusive */ int fput_needed; umode_t i_mode; ret = -EINVAL; if (flags & ~VALID_FLAGS) goto out; endbyte = offset + nbytes; if ((s64)offset < 0) goto out; if ((s64)endbyte < 0) goto out; if (endbyte < offset) goto out; if (sizeof(pgoff_t) == 4) { if (offset >= (0x100000000ULL << PAGE_CACHE_SHIFT)) { /* * The range starts outside a 32 bit machine's * pagecache addressing capabilities. Let it "succeed" */ ret = 0; goto out; } if (endbyte >= (0x100000000ULL << PAGE_CACHE_SHIFT)) { /* * Out to EOF */ nbytes = 0; } } if (nbytes == 0) endbyte = LLONG_MAX; else endbyte--; /* inclusive */ ret = -EBADF; file = fget_light(fd, &fput_needed); if (!file) goto out; i_mode = file->f_path.dentry->d_inode->i_mode; ret = -ESPIPE; if (!S_ISREG(i_mode) && !S_ISBLK(i_mode) && !S_ISDIR(i_mode) && !S_ISLNK(i_mode)) goto out_put; mapping = file->f_mapping; if (!mapping) { ret = -EINVAL; goto out_put; } ret = 0; if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) { ret = filemap_fdatawait_range(mapping, offset, endbyte); if (ret < 0) goto out_put; } if (flags & SYNC_FILE_RANGE_WRITE) { ret = filemap_fdatawrite_range(mapping, offset, endbyte); if (ret < 0) goto out_put; } if (flags & SYNC_FILE_RANGE_WAIT_AFTER) ret = filemap_fdatawait_range(mapping, offset, endbyte); out_put: fput_light(file, fput_needed); out: return ret; #ifdef CONFIG_DYNAMIC_FSYNC } #endif } #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS asmlinkage long SyS_sync_file_range(long fd, loff_t offset, loff_t nbytes, long flags) { return SYSC_sync_file_range((int) fd, offset, nbytes, (unsigned int) flags); } SYSCALL_ALIAS(sys_sync_file_range, SyS_sync_file_range); #endif /* It would be nice if people remember that not all the world's an i386 when they introduce new system calls */ SYSCALL_DEFINE(sync_file_range2)(int fd, unsigned int flags, loff_t offset, loff_t nbytes) { #ifdef CONFIG_DYNAMIC_FSYNC if (likely(dyn_fsync_active && !power_suspend_active)) return 0; else #endif return sys_sync_file_range(fd, offset, nbytes, flags); } #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS asmlinkage long SyS_sync_file_range2(long fd, long flags, loff_t offset, loff_t nbytes) { return SYSC_sync_file_range2((int) fd, (unsigned int) flags, offset, nbytes); } SYSCALL_ALIAS(sys_sync_file_range2, SyS_sync_file_range2); #endif
gpl-2.0
bw-oss/linux
arch/mips/alchemy/common/clock.c
301
27414
/* * Alchemy clocks. * * Exposes all configurable internal clock sources to the clk framework. * * We have: * - Root source, usually 12MHz supplied by an external crystal * - 3 PLLs which generate multiples of root rate [AUX, CPU, AUX2] * * Dividers: * - 6 clock dividers with: * * selectable source [one of the PLLs], * * output divided between [2 .. 512 in steps of 2] (!Au1300) * or [1 .. 256 in steps of 1] (Au1300), * * can be enabled individually. * * - up to 6 "internal" (fixed) consumers which: * * take either AUXPLL or one of the above 6 dividers as input, * * divide this input by 1, 2, or 4 (and 3 on Au1300). * * can be disabled separately. * * Misc clocks: * - sysbus clock: CPU core clock (CPUPLL) divided by 2, 3 or 4. * depends on board design and should be set by bootloader, read-only. * - peripheral clock: half the rate of sysbus clock, source for a lot * of peripheral blocks, read-only. * - memory clock: clk rate to main memory chips, depends on board * design and is read-only, * - lrclk: the static bus clock signal for synchronous operation. * depends on board design, must be set by bootloader, * but may be required to correctly configure devices attached to * the static bus. The Au1000/1500/1100 manuals call it LCLK, on * later models it's called RCLK. */ #include <linux/init.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/clkdev.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/types.h> #include <asm/mach-au1x00/au1000.h> /* Base clock: 12MHz is the default in all databooks, and I haven't * found any board yet which uses a different rate. */ #define ALCHEMY_ROOTCLK_RATE 12000000 /* * the internal sources which can be driven by the PLLs and dividers. * Names taken from the databooks, refer to them for more information, * especially which ones are share a clock line. */ static const char * const alchemy_au1300_intclknames[] = { "lcd_intclk", "gpemgp_clk", "maempe_clk", "maebsa_clk", "EXTCLK0", "EXTCLK1" }; static const char * const alchemy_au1200_intclknames[] = { "lcd_intclk", NULL, NULL, NULL, "EXTCLK0", "EXTCLK1" }; static const char * const alchemy_au1550_intclknames[] = { "usb_clk", "psc0_intclk", "psc1_intclk", "pci_clko", "EXTCLK0", "EXTCLK1" }; static const char * const alchemy_au1100_intclknames[] = { "usb_clk", "lcd_intclk", NULL, "i2s_clk", "EXTCLK0", "EXTCLK1" }; static const char * const alchemy_au1500_intclknames[] = { NULL, "usbd_clk", "usbh_clk", "pci_clko", "EXTCLK0", "EXTCLK1" }; static const char * const alchemy_au1000_intclknames[] = { "irda_clk", "usbd_clk", "usbh_clk", "i2s_clk", "EXTCLK0", "EXTCLK1" }; /* aliases for a few on-chip sources which are either shared * or have gone through name changes. */ static struct clk_aliastable { char *alias; char *base; int cputype; } alchemy_clk_aliases[] __initdata = { { "usbh_clk", "usb_clk", ALCHEMY_CPU_AU1100 }, { "usbd_clk", "usb_clk", ALCHEMY_CPU_AU1100 }, { "irda_clk", "usb_clk", ALCHEMY_CPU_AU1100 }, { "usbh_clk", "usb_clk", ALCHEMY_CPU_AU1550 }, { "usbd_clk", "usb_clk", ALCHEMY_CPU_AU1550 }, { "psc2_intclk", "usb_clk", ALCHEMY_CPU_AU1550 }, { "psc3_intclk", "EXTCLK0", ALCHEMY_CPU_AU1550 }, { "psc0_intclk", "EXTCLK0", ALCHEMY_CPU_AU1200 }, { "psc1_intclk", "EXTCLK1", ALCHEMY_CPU_AU1200 }, { "psc0_intclk", "EXTCLK0", ALCHEMY_CPU_AU1300 }, { "psc2_intclk", "EXTCLK0", ALCHEMY_CPU_AU1300 }, { "psc1_intclk", "EXTCLK1", ALCHEMY_CPU_AU1300 }, { "psc3_intclk", "EXTCLK1", ALCHEMY_CPU_AU1300 }, { NULL, NULL, 0 }, }; #define IOMEM(x) ((void __iomem *)(KSEG1ADDR(CPHYSADDR(x)))) /* access locks to SYS_FREQCTRL0/1 and SYS_CLKSRC registers */ static spinlock_t alchemy_clk_fg0_lock; static spinlock_t alchemy_clk_fg1_lock; static spinlock_t alchemy_clk_csrc_lock; /* CPU Core clock *****************************************************/ static unsigned long alchemy_clk_cpu_recalc(struct clk_hw *hw, unsigned long parent_rate) { unsigned long t; /* * On early Au1000, sys_cpupll was write-only. Since these * silicon versions of Au1000 are not sold, we don't bend * over backwards trying to determine the frequency. */ if (unlikely(au1xxx_cpu_has_pll_wo())) t = 396000000; else { t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f; if (alchemy_get_cputype() < ALCHEMY_CPU_AU1300) t &= 0x3f; t *= parent_rate; } return t; } void __init alchemy_set_lpj(void) { preset_lpj = alchemy_clk_cpu_recalc(NULL, ALCHEMY_ROOTCLK_RATE); preset_lpj /= 2 * HZ; } static struct clk_ops alchemy_clkops_cpu = { .recalc_rate = alchemy_clk_cpu_recalc, }; static struct clk __init *alchemy_clk_setup_cpu(const char *parent_name, int ctype) { struct clk_init_data id; struct clk_hw *h; h = kzalloc(sizeof(*h), GFP_KERNEL); if (!h) return ERR_PTR(-ENOMEM); id.name = ALCHEMY_CPU_CLK; id.parent_names = &parent_name; id.num_parents = 1; id.flags = CLK_IS_BASIC; id.ops = &alchemy_clkops_cpu; h->init = &id; return clk_register(NULL, h); } /* AUXPLLs ************************************************************/ struct alchemy_auxpll_clk { struct clk_hw hw; unsigned long reg; /* au1300 has also AUXPLL2 */ int maxmult; /* max multiplier */ }; #define to_auxpll_clk(x) container_of(x, struct alchemy_auxpll_clk, hw) static unsigned long alchemy_clk_aux_recalc(struct clk_hw *hw, unsigned long parent_rate) { struct alchemy_auxpll_clk *a = to_auxpll_clk(hw); return (alchemy_rdsys(a->reg) & 0xff) * parent_rate; } static int alchemy_clk_aux_setr(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct alchemy_auxpll_clk *a = to_auxpll_clk(hw); unsigned long d = rate; if (rate) d /= parent_rate; else d = 0; /* minimum is 84MHz, max is 756-1032 depending on variant */ if (((d < 7) && (d != 0)) || (d > a->maxmult)) return -EINVAL; alchemy_wrsys(d, a->reg); return 0; } static long alchemy_clk_aux_roundr(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { struct alchemy_auxpll_clk *a = to_auxpll_clk(hw); unsigned long mult; if (!rate || !*parent_rate) return 0; mult = rate / (*parent_rate); if (mult && (mult < 7)) mult = 7; if (mult > a->maxmult) mult = a->maxmult; return (*parent_rate) * mult; } static struct clk_ops alchemy_clkops_aux = { .recalc_rate = alchemy_clk_aux_recalc, .set_rate = alchemy_clk_aux_setr, .round_rate = alchemy_clk_aux_roundr, }; static struct clk __init *alchemy_clk_setup_aux(const char *parent_name, char *name, int maxmult, unsigned long reg) { struct clk_init_data id; struct clk *c; struct alchemy_auxpll_clk *a; a = kzalloc(sizeof(*a), GFP_KERNEL); if (!a) return ERR_PTR(-ENOMEM); id.name = name; id.parent_names = &parent_name; id.num_parents = 1; id.flags = CLK_GET_RATE_NOCACHE; id.ops = &alchemy_clkops_aux; a->reg = reg; a->maxmult = maxmult; a->hw.init = &id; c = clk_register(NULL, &a->hw); if (!IS_ERR(c)) clk_register_clkdev(c, name, NULL); else kfree(a); return c; } /* sysbus_clk *********************************************************/ static struct clk __init *alchemy_clk_setup_sysbus(const char *pn) { unsigned long v = (alchemy_rdsys(AU1000_SYS_POWERCTRL) & 3) + 2; struct clk *c; c = clk_register_fixed_factor(NULL, ALCHEMY_SYSBUS_CLK, pn, 0, 1, v); if (!IS_ERR(c)) clk_register_clkdev(c, ALCHEMY_SYSBUS_CLK, NULL); return c; } /* Peripheral Clock ***************************************************/ static struct clk __init *alchemy_clk_setup_periph(const char *pn) { /* Peripheral clock runs at half the rate of sysbus clk */ struct clk *c; c = clk_register_fixed_factor(NULL, ALCHEMY_PERIPH_CLK, pn, 0, 1, 2); if (!IS_ERR(c)) clk_register_clkdev(c, ALCHEMY_PERIPH_CLK, NULL); return c; } /* mem clock **********************************************************/ static struct clk __init *alchemy_clk_setup_mem(const char *pn, int ct) { void __iomem *addr = IOMEM(AU1000_MEM_PHYS_ADDR); unsigned long v; struct clk *c; int div; switch (ct) { case ALCHEMY_CPU_AU1550: case ALCHEMY_CPU_AU1200: v = __raw_readl(addr + AU1550_MEM_SDCONFIGB); div = (v & (1 << 15)) ? 1 : 2; break; case ALCHEMY_CPU_AU1300: v = __raw_readl(addr + AU1550_MEM_SDCONFIGB); div = (v & (1 << 31)) ? 1 : 2; break; case ALCHEMY_CPU_AU1000: case ALCHEMY_CPU_AU1500: case ALCHEMY_CPU_AU1100: default: div = 2; break; } c = clk_register_fixed_factor(NULL, ALCHEMY_MEM_CLK, pn, 0, 1, div); if (!IS_ERR(c)) clk_register_clkdev(c, ALCHEMY_MEM_CLK, NULL); return c; } /* lrclk: external synchronous static bus clock ***********************/ static struct clk __init *alchemy_clk_setup_lrclk(const char *pn, int t) { /* Au1000, Au1500: MEM_STCFG0[11]: If bit is set, lrclk=pclk/5, * otherwise lrclk=pclk/4. * All other variants: MEM_STCFG0[15:13] = divisor. * L/RCLK = periph_clk / (divisor + 1) * On Au1000, Au1500, Au1100 it's called LCLK, * on later models it's called RCLK, but it's the same thing. */ struct clk *c; unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0); switch (t) { case ALCHEMY_CPU_AU1000: case ALCHEMY_CPU_AU1500: v = 4 + ((v >> 11) & 1); break; default: /* all other models */ v = ((v >> 13) & 7) + 1; } c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK, pn, 0, 1, v); if (!IS_ERR(c)) clk_register_clkdev(c, ALCHEMY_LR_CLK, NULL); return c; } /* Clock dividers and muxes *******************************************/ /* data for fgen and csrc mux-dividers */ struct alchemy_fgcs_clk { struct clk_hw hw; spinlock_t *reglock; /* register lock */ unsigned long reg; /* SYS_FREQCTRL0/1 */ int shift; /* offset in register */ int parent; /* parent before disable [Au1300] */ int isen; /* is it enabled? */ int *dt; /* dividertable for csrc */ }; #define to_fgcs_clk(x) container_of(x, struct alchemy_fgcs_clk, hw) static long alchemy_calc_div(unsigned long rate, unsigned long prate, int scale, int maxdiv, unsigned long *rv) { long div1, div2; div1 = prate / rate; if ((prate / div1) > rate) div1++; if (scale == 2) { /* only div-by-multiple-of-2 possible */ if (div1 & 1) div1++; /* stay <=prate */ } div2 = (div1 / scale) - 1; /* value to write to register */ if (div2 > maxdiv) div2 = maxdiv; if (rv) *rv = div2; div1 = ((div2 + 1) * scale); return div1; } static int alchemy_clk_fgcs_detr(struct clk_hw *hw, struct clk_rate_request *req, int scale, int maxdiv) { struct clk_hw *pc, *bpc, *free; long tdv, tpr, pr, nr, br, bpr, diff, lastdiff; int j; lastdiff = INT_MAX; bpr = 0; bpc = NULL; br = -EINVAL; free = NULL; /* look at the rates each enabled parent supplies and select * the one that gets closest to but not over the requested rate. */ for (j = 0; j < 7; j++) { pc = clk_hw_get_parent_by_index(hw, j); if (!pc) break; /* if this parent is currently unused, remember it. * XXX: we would actually want clk_has_active_children() * but this is a good-enough approximation for now. */ if (!clk_hw_is_prepared(pc)) { if (!free) free = pc; } pr = clk_hw_get_rate(pc); if (pr < req->rate) continue; /* what can hardware actually provide */ tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv, NULL); nr = pr / tdv; diff = req->rate - nr; if (nr > req->rate) continue; if (diff < lastdiff) { lastdiff = diff; bpr = pr; bpc = pc; br = nr; } if (diff == 0) break; } /* if we couldn't get the exact rate we wanted from the enabled * parents, maybe we can tell an available disabled/inactive one * to give us a rate we can divide down to the requested rate. */ if (lastdiff && free) { for (j = (maxdiv == 4) ? 1 : scale; j <= maxdiv; j += scale) { tpr = req->rate * j; if (tpr < 0) break; pr = clk_hw_round_rate(free, tpr); tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv, NULL); nr = pr / tdv; diff = req->rate - nr; if (nr > req->rate) continue; if (diff < lastdiff) { lastdiff = diff; bpr = pr; bpc = free; br = nr; } if (diff == 0) break; } } if (br < 0) return br; req->best_parent_rate = bpr; req->best_parent_hw = bpc; req->rate = br; return 0; } static int alchemy_clk_fgv1_en(struct clk_hw *hw) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); unsigned long v, flags; spin_lock_irqsave(c->reglock, flags); v = alchemy_rdsys(c->reg); v |= (1 << 1) << c->shift; alchemy_wrsys(v, c->reg); spin_unlock_irqrestore(c->reglock, flags); return 0; } static int alchemy_clk_fgv1_isen(struct clk_hw *hw) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 1); return v & 1; } static void alchemy_clk_fgv1_dis(struct clk_hw *hw) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); unsigned long v, flags; spin_lock_irqsave(c->reglock, flags); v = alchemy_rdsys(c->reg); v &= ~((1 << 1) << c->shift); alchemy_wrsys(v, c->reg); spin_unlock_irqrestore(c->reglock, flags); } static int alchemy_clk_fgv1_setp(struct clk_hw *hw, u8 index) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); unsigned long v, flags; spin_lock_irqsave(c->reglock, flags); v = alchemy_rdsys(c->reg); if (index) v |= (1 << c->shift); else v &= ~(1 << c->shift); alchemy_wrsys(v, c->reg); spin_unlock_irqrestore(c->reglock, flags); return 0; } static u8 alchemy_clk_fgv1_getp(struct clk_hw *hw) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); return (alchemy_rdsys(c->reg) >> c->shift) & 1; } static int alchemy_clk_fgv1_setr(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); unsigned long div, v, flags, ret; int sh = c->shift + 2; if (!rate || !parent_rate || rate > (parent_rate / 2)) return -EINVAL; ret = alchemy_calc_div(rate, parent_rate, 2, 512, &div); spin_lock_irqsave(c->reglock, flags); v = alchemy_rdsys(c->reg); v &= ~(0xff << sh); v |= div << sh; alchemy_wrsys(v, c->reg); spin_unlock_irqrestore(c->reglock, flags); return 0; } static unsigned long alchemy_clk_fgv1_recalc(struct clk_hw *hw, unsigned long parent_rate) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 2); v = ((v & 0xff) + 1) * 2; return parent_rate / v; } static int alchemy_clk_fgv1_detr(struct clk_hw *hw, struct clk_rate_request *req) { return alchemy_clk_fgcs_detr(hw, req, 2, 512); } /* Au1000, Au1100, Au15x0, Au12x0 */ static struct clk_ops alchemy_clkops_fgenv1 = { .recalc_rate = alchemy_clk_fgv1_recalc, .determine_rate = alchemy_clk_fgv1_detr, .set_rate = alchemy_clk_fgv1_setr, .set_parent = alchemy_clk_fgv1_setp, .get_parent = alchemy_clk_fgv1_getp, .enable = alchemy_clk_fgv1_en, .disable = alchemy_clk_fgv1_dis, .is_enabled = alchemy_clk_fgv1_isen, }; static void __alchemy_clk_fgv2_en(struct alchemy_fgcs_clk *c) { unsigned long v = alchemy_rdsys(c->reg); v &= ~(3 << c->shift); v |= (c->parent & 3) << c->shift; alchemy_wrsys(v, c->reg); c->isen = 1; } static int alchemy_clk_fgv2_en(struct clk_hw *hw) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); unsigned long flags; /* enable by setting the previous parent clock */ spin_lock_irqsave(c->reglock, flags); __alchemy_clk_fgv2_en(c); spin_unlock_irqrestore(c->reglock, flags); return 0; } static int alchemy_clk_fgv2_isen(struct clk_hw *hw) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); return ((alchemy_rdsys(c->reg) >> c->shift) & 3) != 0; } static void alchemy_clk_fgv2_dis(struct clk_hw *hw) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); unsigned long v, flags; spin_lock_irqsave(c->reglock, flags); v = alchemy_rdsys(c->reg); v &= ~(3 << c->shift); /* set input mux to "disabled" state */ alchemy_wrsys(v, c->reg); c->isen = 0; spin_unlock_irqrestore(c->reglock, flags); } static int alchemy_clk_fgv2_setp(struct clk_hw *hw, u8 index) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); unsigned long flags; spin_lock_irqsave(c->reglock, flags); c->parent = index + 1; /* value to write to register */ if (c->isen) __alchemy_clk_fgv2_en(c); spin_unlock_irqrestore(c->reglock, flags); return 0; } static u8 alchemy_clk_fgv2_getp(struct clk_hw *hw) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); unsigned long flags, v; spin_lock_irqsave(c->reglock, flags); v = c->parent - 1; spin_unlock_irqrestore(c->reglock, flags); return v; } /* fg0-2 and fg4-6 share a "scale"-bit. With this bit cleared, the * dividers behave exactly as on previous models (dividers are multiples * of 2); with the bit set, dividers are multiples of 1, halving their * range, but making them also much more flexible. */ static int alchemy_clk_fgv2_setr(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); int sh = c->shift + 2; unsigned long div, v, flags, ret; if (!rate || !parent_rate || rate > parent_rate) return -EINVAL; v = alchemy_rdsys(c->reg) & (1 << 30); /* test "scale" bit */ ret = alchemy_calc_div(rate, parent_rate, v ? 1 : 2, v ? 256 : 512, &div); spin_lock_irqsave(c->reglock, flags); v = alchemy_rdsys(c->reg); v &= ~(0xff << sh); v |= (div & 0xff) << sh; alchemy_wrsys(v, c->reg); spin_unlock_irqrestore(c->reglock, flags); return 0; } static unsigned long alchemy_clk_fgv2_recalc(struct clk_hw *hw, unsigned long parent_rate) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); int sh = c->shift + 2; unsigned long v, t; v = alchemy_rdsys(c->reg); t = parent_rate / (((v >> sh) & 0xff) + 1); if ((v & (1 << 30)) == 0) /* test scale bit */ t /= 2; return t; } static int alchemy_clk_fgv2_detr(struct clk_hw *hw, struct clk_rate_request *req) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); int scale, maxdiv; if (alchemy_rdsys(c->reg) & (1 << 30)) { scale = 1; maxdiv = 256; } else { scale = 2; maxdiv = 512; } return alchemy_clk_fgcs_detr(hw, req, scale, maxdiv); } /* Au1300 larger input mux, no separate disable bit, flexible divider */ static struct clk_ops alchemy_clkops_fgenv2 = { .recalc_rate = alchemy_clk_fgv2_recalc, .determine_rate = alchemy_clk_fgv2_detr, .set_rate = alchemy_clk_fgv2_setr, .set_parent = alchemy_clk_fgv2_setp, .get_parent = alchemy_clk_fgv2_getp, .enable = alchemy_clk_fgv2_en, .disable = alchemy_clk_fgv2_dis, .is_enabled = alchemy_clk_fgv2_isen, }; static const char * const alchemy_clk_fgv1_parents[] = { ALCHEMY_CPU_CLK, ALCHEMY_AUXPLL_CLK }; static const char * const alchemy_clk_fgv2_parents[] = { ALCHEMY_AUXPLL2_CLK, ALCHEMY_CPU_CLK, ALCHEMY_AUXPLL_CLK }; static const char * const alchemy_clk_fgen_names[] = { ALCHEMY_FG0_CLK, ALCHEMY_FG1_CLK, ALCHEMY_FG2_CLK, ALCHEMY_FG3_CLK, ALCHEMY_FG4_CLK, ALCHEMY_FG5_CLK }; static int __init alchemy_clk_init_fgens(int ctype) { struct clk *c; struct clk_init_data id; struct alchemy_fgcs_clk *a; unsigned long v; int i, ret; switch (ctype) { case ALCHEMY_CPU_AU1000...ALCHEMY_CPU_AU1200: id.ops = &alchemy_clkops_fgenv1; id.parent_names = alchemy_clk_fgv1_parents; id.num_parents = 2; break; case ALCHEMY_CPU_AU1300: id.ops = &alchemy_clkops_fgenv2; id.parent_names = alchemy_clk_fgv2_parents; id.num_parents = 3; break; default: return -ENODEV; } id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE; a = kzalloc((sizeof(*a)) * 6, GFP_KERNEL); if (!a) return -ENOMEM; spin_lock_init(&alchemy_clk_fg0_lock); spin_lock_init(&alchemy_clk_fg1_lock); ret = 0; for (i = 0; i < 6; i++) { id.name = alchemy_clk_fgen_names[i]; a->shift = 10 * (i < 3 ? i : i - 3); if (i > 2) { a->reg = AU1000_SYS_FREQCTRL1; a->reglock = &alchemy_clk_fg1_lock; } else { a->reg = AU1000_SYS_FREQCTRL0; a->reglock = &alchemy_clk_fg0_lock; } /* default to first parent if bootloader has set * the mux to disabled state. */ if (ctype == ALCHEMY_CPU_AU1300) { v = alchemy_rdsys(a->reg); a->parent = (v >> a->shift) & 3; if (!a->parent) { a->parent = 1; a->isen = 0; } else a->isen = 1; } a->hw.init = &id; c = clk_register(NULL, &a->hw); if (IS_ERR(c)) ret++; else clk_register_clkdev(c, id.name, NULL); a++; } return ret; } /* internal sources muxes *********************************************/ static int alchemy_clk_csrc_isen(struct clk_hw *hw) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); unsigned long v = alchemy_rdsys(c->reg); return (((v >> c->shift) >> 2) & 7) != 0; } static void __alchemy_clk_csrc_en(struct alchemy_fgcs_clk *c) { unsigned long v = alchemy_rdsys(c->reg); v &= ~((7 << 2) << c->shift); v |= ((c->parent & 7) << 2) << c->shift; alchemy_wrsys(v, c->reg); c->isen = 1; } static int alchemy_clk_csrc_en(struct clk_hw *hw) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); unsigned long flags; /* enable by setting the previous parent clock */ spin_lock_irqsave(c->reglock, flags); __alchemy_clk_csrc_en(c); spin_unlock_irqrestore(c->reglock, flags); return 0; } static void alchemy_clk_csrc_dis(struct clk_hw *hw) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); unsigned long v, flags; spin_lock_irqsave(c->reglock, flags); v = alchemy_rdsys(c->reg); v &= ~((3 << 2) << c->shift); /* mux to "disabled" state */ alchemy_wrsys(v, c->reg); c->isen = 0; spin_unlock_irqrestore(c->reglock, flags); } static int alchemy_clk_csrc_setp(struct clk_hw *hw, u8 index) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); unsigned long flags; spin_lock_irqsave(c->reglock, flags); c->parent = index + 1; /* value to write to register */ if (c->isen) __alchemy_clk_csrc_en(c); spin_unlock_irqrestore(c->reglock, flags); return 0; } static u8 alchemy_clk_csrc_getp(struct clk_hw *hw) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); return c->parent - 1; } static unsigned long alchemy_clk_csrc_recalc(struct clk_hw *hw, unsigned long parent_rate) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); unsigned long v = (alchemy_rdsys(c->reg) >> c->shift) & 3; return parent_rate / c->dt[v]; } static int alchemy_clk_csrc_setr(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); unsigned long d, v, flags; int i; if (!rate || !parent_rate || rate > parent_rate) return -EINVAL; d = (parent_rate + (rate / 2)) / rate; if (d > 4) return -EINVAL; if ((d == 3) && (c->dt[2] != 3)) d = 4; for (i = 0; i < 4; i++) if (c->dt[i] == d) break; if (i >= 4) return -EINVAL; /* oops */ spin_lock_irqsave(c->reglock, flags); v = alchemy_rdsys(c->reg); v &= ~(3 << c->shift); v |= (i & 3) << c->shift; alchemy_wrsys(v, c->reg); spin_unlock_irqrestore(c->reglock, flags); return 0; } static int alchemy_clk_csrc_detr(struct clk_hw *hw, struct clk_rate_request *req) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); int scale = c->dt[2] == 3 ? 1 : 2; /* au1300 check */ return alchemy_clk_fgcs_detr(hw, req, scale, 4); } static struct clk_ops alchemy_clkops_csrc = { .recalc_rate = alchemy_clk_csrc_recalc, .determine_rate = alchemy_clk_csrc_detr, .set_rate = alchemy_clk_csrc_setr, .set_parent = alchemy_clk_csrc_setp, .get_parent = alchemy_clk_csrc_getp, .enable = alchemy_clk_csrc_en, .disable = alchemy_clk_csrc_dis, .is_enabled = alchemy_clk_csrc_isen, }; static const char * const alchemy_clk_csrc_parents[] = { /* disabled at index 0 */ ALCHEMY_AUXPLL_CLK, ALCHEMY_FG0_CLK, ALCHEMY_FG1_CLK, ALCHEMY_FG2_CLK, ALCHEMY_FG3_CLK, ALCHEMY_FG4_CLK, ALCHEMY_FG5_CLK }; /* divider tables */ static int alchemy_csrc_dt1[] = { 1, 4, 1, 2 }; /* rest */ static int alchemy_csrc_dt2[] = { 1, 4, 3, 2 }; /* Au1300 */ static int __init alchemy_clk_setup_imux(int ctype) { struct alchemy_fgcs_clk *a; const char * const *names; struct clk_init_data id; unsigned long v; int i, ret, *dt; struct clk *c; id.ops = &alchemy_clkops_csrc; id.parent_names = alchemy_clk_csrc_parents; id.num_parents = 7; id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE; dt = alchemy_csrc_dt1; switch (ctype) { case ALCHEMY_CPU_AU1000: names = alchemy_au1000_intclknames; break; case ALCHEMY_CPU_AU1500: names = alchemy_au1500_intclknames; break; case ALCHEMY_CPU_AU1100: names = alchemy_au1100_intclknames; break; case ALCHEMY_CPU_AU1550: names = alchemy_au1550_intclknames; break; case ALCHEMY_CPU_AU1200: names = alchemy_au1200_intclknames; break; case ALCHEMY_CPU_AU1300: dt = alchemy_csrc_dt2; names = alchemy_au1300_intclknames; break; default: return -ENODEV; } a = kzalloc((sizeof(*a)) * 6, GFP_KERNEL); if (!a) return -ENOMEM; spin_lock_init(&alchemy_clk_csrc_lock); ret = 0; for (i = 0; i < 6; i++) { id.name = names[i]; if (!id.name) goto next; a->shift = i * 5; a->reg = AU1000_SYS_CLKSRC; a->reglock = &alchemy_clk_csrc_lock; a->dt = dt; /* default to first parent clock if mux is initially * set to disabled state. */ v = alchemy_rdsys(a->reg); a->parent = ((v >> a->shift) >> 2) & 7; if (!a->parent) { a->parent = 1; a->isen = 0; } else a->isen = 1; a->hw.init = &id; c = clk_register(NULL, &a->hw); if (IS_ERR(c)) ret++; else clk_register_clkdev(c, id.name, NULL); next: a++; } return ret; } /**********************************************************************/ #define ERRCK(x) \ if (IS_ERR(x)) { \ ret = PTR_ERR(x); \ goto out; \ } static int __init alchemy_clk_init(void) { int ctype = alchemy_get_cputype(), ret, i; struct clk_aliastable *t = alchemy_clk_aliases; struct clk *c; /* Root of the Alchemy clock tree: external 12MHz crystal osc */ c = clk_register_fixed_rate(NULL, ALCHEMY_ROOT_CLK, NULL, 0, ALCHEMY_ROOTCLK_RATE); ERRCK(c) /* CPU core clock */ c = alchemy_clk_setup_cpu(ALCHEMY_ROOT_CLK, ctype); ERRCK(c) /* AUXPLLs: max 1GHz on Au1300, 748MHz on older models */ i = (ctype == ALCHEMY_CPU_AU1300) ? 84 : 63; c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK, ALCHEMY_AUXPLL_CLK, i, AU1000_SYS_AUXPLL); ERRCK(c) if (ctype == ALCHEMY_CPU_AU1300) { c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK, ALCHEMY_AUXPLL2_CLK, i, AU1300_SYS_AUXPLL2); ERRCK(c) } /* sysbus clock: cpu core clock divided by 2, 3 or 4 */ c = alchemy_clk_setup_sysbus(ALCHEMY_CPU_CLK); ERRCK(c) /* peripheral clock: runs at half rate of sysbus clk */ c = alchemy_clk_setup_periph(ALCHEMY_SYSBUS_CLK); ERRCK(c) /* SDR/DDR memory clock */ c = alchemy_clk_setup_mem(ALCHEMY_SYSBUS_CLK, ctype); ERRCK(c) /* L/RCLK: external static bus clock for synchronous mode */ c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK, ctype); ERRCK(c) /* Frequency dividers 0-5 */ ret = alchemy_clk_init_fgens(ctype); if (ret) { ret = -ENODEV; goto out; } /* diving muxes for internal sources */ ret = alchemy_clk_setup_imux(ctype); if (ret) { ret = -ENODEV; goto out; } /* set up aliases drivers might look for */ while (t->base) { if (t->cputype == ctype) clk_add_alias(t->alias, NULL, t->base, NULL); t++; } pr_info("Alchemy clocktree installed\n"); return 0; out: return ret; } postcore_initcall(alchemy_clk_init);
gpl-2.0
barakinflorida/Vibrant-open
drivers/power/pcf50633-charger.c
557
10620
/* NXP PCF50633 Main Battery Charger Driver * * (C) 2006-2008 by Openmoko, Inc. * Author: Balaji Rao <balajirrao@openmoko.org> * All rights reserved. * * Broken down from monstrous PCF50633 driver mainly by * Harald Welte, Andy Green and Werner Almesberger * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/device.h> #include <linux/sysfs.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/mfd/pcf50633/core.h> #include <linux/mfd/pcf50633/mbc.h> struct pcf50633_mbc { struct pcf50633 *pcf; int adapter_active; int adapter_online; int usb_active; int usb_online; struct power_supply usb; struct power_supply adapter; struct delayed_work charging_restart_work; }; int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma) { struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev); int ret = 0; u8 bits; int charging_start = 1; u8 mbcs2, chgmod; if (ma >= 1000) bits = PCF50633_MBCC7_USB_1000mA; else if (ma >= 500) bits = PCF50633_MBCC7_USB_500mA; else if (ma >= 100) bits = PCF50633_MBCC7_USB_100mA; else { bits = PCF50633_MBCC7_USB_SUSPEND; charging_start = 0; } ret = pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC7, PCF50633_MBCC7_USB_MASK, bits); if (ret) dev_err(pcf->dev, "error setting usb curlim to %d mA\n", ma); else dev_info(pcf->dev, "usb curlim to %d mA\n", ma); /* Manual charging start */ mbcs2 = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2); chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK); /* If chgmod == BATFULL, setting chgena has no effect. * We need to set resume instead. */ if (chgmod != PCF50633_MBCS2_MBC_BAT_FULL) pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC1, PCF50633_MBCC1_CHGENA, PCF50633_MBCC1_CHGENA); else pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC1, PCF50633_MBCC1_RESUME, PCF50633_MBCC1_RESUME); mbc->usb_active = charging_start; power_supply_changed(&mbc->usb); return ret; } EXPORT_SYMBOL_GPL(pcf50633_mbc_usb_curlim_set); int pcf50633_mbc_get_status(struct pcf50633 *pcf) { struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev); int status = 0; if (mbc->usb_online) status |= PCF50633_MBC_USB_ONLINE; if (mbc->usb_active) status |= PCF50633_MBC_USB_ACTIVE; if (mbc->adapter_online) status |= PCF50633_MBC_ADAPTER_ONLINE; if (mbc->adapter_active) status |= PCF50633_MBC_ADAPTER_ACTIVE; return status; } EXPORT_SYMBOL_GPL(pcf50633_mbc_get_status); static ssize_t show_chgmode(struct device *dev, struct device_attribute *attr, char *buf) { struct pcf50633_mbc *mbc = dev_get_drvdata(dev); u8 mbcs2 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2); u8 chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK); return sprintf(buf, "%d\n", chgmod); } static DEVICE_ATTR(chgmode, S_IRUGO, show_chgmode, NULL); static ssize_t show_usblim(struct device *dev, struct device_attribute *attr, char *buf) { struct pcf50633_mbc *mbc = dev_get_drvdata(dev); u8 usblim = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCC7) & PCF50633_MBCC7_USB_MASK; unsigned int ma; if (usblim == PCF50633_MBCC7_USB_1000mA) ma = 1000; else if (usblim == PCF50633_MBCC7_USB_500mA) ma = 500; else if (usblim == PCF50633_MBCC7_USB_100mA) ma = 100; else ma = 0; return sprintf(buf, "%u\n", ma); } static ssize_t set_usblim(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pcf50633_mbc *mbc = dev_get_drvdata(dev); unsigned long ma; int ret; ret = strict_strtoul(buf, 10, &ma); if (ret) return -EINVAL; pcf50633_mbc_usb_curlim_set(mbc->pcf, ma); return count; } static DEVICE_ATTR(usb_curlim, S_IRUGO | S_IWUSR, show_usblim, set_usblim); static struct attribute *pcf50633_mbc_sysfs_entries[] = { &dev_attr_chgmode.attr, &dev_attr_usb_curlim.attr, NULL, }; static struct attribute_group mbc_attr_group = { .name = NULL, /* put in device directory */ .attrs = pcf50633_mbc_sysfs_entries, }; /* MBC state machine switches into charging mode when the battery voltage * falls below 96% of a battery float voltage. But the voltage drop in Li-ion * batteries is marginal(1~2 %) till about 80% of its capacity - which means, * after a BATFULL, charging won't be restarted until 80%. * * This work_struct function restarts charging at regular intervals to make * sure we don't discharge too much */ static void pcf50633_mbc_charging_restart(struct work_struct *work) { struct pcf50633_mbc *mbc; u8 mbcs2, chgmod; mbc = container_of(work, struct pcf50633_mbc, charging_restart_work.work); mbcs2 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2); chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK); if (chgmod != PCF50633_MBCS2_MBC_BAT_FULL) return; /* Restart charging */ pcf50633_reg_set_bit_mask(mbc->pcf, PCF50633_REG_MBCC1, PCF50633_MBCC1_RESUME, PCF50633_MBCC1_RESUME); mbc->usb_active = 1; power_supply_changed(&mbc->usb); dev_info(mbc->pcf->dev, "Charging restarted\n"); } static void pcf50633_mbc_irq_handler(int irq, void *data) { struct pcf50633_mbc *mbc = data; int chg_restart_interval = mbc->pcf->pdata->charging_restart_interval; /* USB */ if (irq == PCF50633_IRQ_USBINS) { mbc->usb_online = 1; } else if (irq == PCF50633_IRQ_USBREM) { mbc->usb_online = 0; mbc->usb_active = 0; pcf50633_mbc_usb_curlim_set(mbc->pcf, 0); cancel_delayed_work_sync(&mbc->charging_restart_work); } /* Adapter */ if (irq == PCF50633_IRQ_ADPINS) { mbc->adapter_online = 1; mbc->adapter_active = 1; } else if (irq == PCF50633_IRQ_ADPREM) { mbc->adapter_online = 0; mbc->adapter_active = 0; } if (irq == PCF50633_IRQ_BATFULL) { mbc->usb_active = 0; mbc->adapter_active = 0; if (chg_restart_interval > 0) schedule_delayed_work(&mbc->charging_restart_work, chg_restart_interval); } else if (irq == PCF50633_IRQ_USBLIMON) mbc->usb_active = 0; else if (irq == PCF50633_IRQ_USBLIMOFF) mbc->usb_active = 1; power_supply_changed(&mbc->usb); power_supply_changed(&mbc->adapter); if (mbc->pcf->pdata->mbc_event_callback) mbc->pcf->pdata->mbc_event_callback(mbc->pcf, irq); } static int adapter_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct pcf50633_mbc *mbc = container_of(psy, struct pcf50633_mbc, adapter); int ret = 0; switch (psp) { case POWER_SUPPLY_PROP_ONLINE: val->intval = mbc->adapter_online; break; default: ret = -EINVAL; break; } return ret; } static int usb_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct pcf50633_mbc *mbc = container_of(psy, struct pcf50633_mbc, usb); int ret = 0; switch (psp) { case POWER_SUPPLY_PROP_ONLINE: val->intval = mbc->usb_online; break; default: ret = -EINVAL; break; } return ret; } static enum power_supply_property power_props[] = { POWER_SUPPLY_PROP_ONLINE, }; static const u8 mbc_irq_handlers[] = { PCF50633_IRQ_ADPINS, PCF50633_IRQ_ADPREM, PCF50633_IRQ_USBINS, PCF50633_IRQ_USBREM, PCF50633_IRQ_BATFULL, PCF50633_IRQ_CHGHALT, PCF50633_IRQ_THLIMON, PCF50633_IRQ_THLIMOFF, PCF50633_IRQ_USBLIMON, PCF50633_IRQ_USBLIMOFF, PCF50633_IRQ_LOWSYS, PCF50633_IRQ_LOWBAT, }; static int __devinit pcf50633_mbc_probe(struct platform_device *pdev) { struct pcf50633_mbc *mbc; struct pcf50633_subdev_pdata *pdata = pdev->dev.platform_data; int ret; int i; u8 mbcs1; mbc = kzalloc(sizeof(*mbc), GFP_KERNEL); if (!mbc) return -ENOMEM; platform_set_drvdata(pdev, mbc); mbc->pcf = pdata->pcf; /* Set up IRQ handlers */ for (i = 0; i < ARRAY_SIZE(mbc_irq_handlers); i++) pcf50633_register_irq(mbc->pcf, mbc_irq_handlers[i], pcf50633_mbc_irq_handler, mbc); /* Create power supplies */ mbc->adapter.name = "adapter"; mbc->adapter.type = POWER_SUPPLY_TYPE_MAINS; mbc->adapter.properties = power_props; mbc->adapter.num_properties = ARRAY_SIZE(power_props); mbc->adapter.get_property = &adapter_get_property; mbc->adapter.supplied_to = mbc->pcf->pdata->batteries; mbc->adapter.num_supplicants = mbc->pcf->pdata->num_batteries; mbc->usb.name = "usb"; mbc->usb.type = POWER_SUPPLY_TYPE_USB; mbc->usb.properties = power_props; mbc->usb.num_properties = ARRAY_SIZE(power_props); mbc->usb.get_property = usb_get_property; mbc->usb.supplied_to = mbc->pcf->pdata->batteries; mbc->usb.num_supplicants = mbc->pcf->pdata->num_batteries; ret = power_supply_register(&pdev->dev, &mbc->adapter); if (ret) { dev_err(mbc->pcf->dev, "failed to register adapter\n"); kfree(mbc); return ret; } ret = power_supply_register(&pdev->dev, &mbc->usb); if (ret) { dev_err(mbc->pcf->dev, "failed to register usb\n"); power_supply_unregister(&mbc->adapter); kfree(mbc); return ret; } INIT_DELAYED_WORK(&mbc->charging_restart_work, pcf50633_mbc_charging_restart); ret = sysfs_create_group(&pdev->dev.kobj, &mbc_attr_group); if (ret) dev_err(mbc->pcf->dev, "failed to create sysfs entries\n"); mbcs1 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS1); if (mbcs1 & PCF50633_MBCS1_USBPRES) pcf50633_mbc_irq_handler(PCF50633_IRQ_USBINS, mbc); if (mbcs1 & PCF50633_MBCS1_ADAPTPRES) pcf50633_mbc_irq_handler(PCF50633_IRQ_ADPINS, mbc); return 0; } static int __devexit pcf50633_mbc_remove(struct platform_device *pdev) { struct pcf50633_mbc *mbc = platform_get_drvdata(pdev); int i; /* Remove IRQ handlers */ for (i = 0; i < ARRAY_SIZE(mbc_irq_handlers); i++) pcf50633_free_irq(mbc->pcf, mbc_irq_handlers[i]); power_supply_unregister(&mbc->usb); power_supply_unregister(&mbc->adapter); cancel_delayed_work_sync(&mbc->charging_restart_work); kfree(mbc); return 0; } static struct platform_driver pcf50633_mbc_driver = { .driver = { .name = "pcf50633-mbc", }, .probe = pcf50633_mbc_probe, .remove = __devexit_p(pcf50633_mbc_remove), }; static int __init pcf50633_mbc_init(void) { return platform_driver_register(&pcf50633_mbc_driver); } module_init(pcf50633_mbc_init); static void __exit pcf50633_mbc_exit(void) { platform_driver_unregister(&pcf50633_mbc_driver); } module_exit(pcf50633_mbc_exit); MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>"); MODULE_DESCRIPTION("PCF50633 mbc driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pcf50633-mbc");
gpl-2.0
varigit/VAR-SOM-AMx3-Kernel-4-1
drivers/staging/speakup/speakup_dtlk.c
813
11612
/* * originally written by: Kirk Reiser <kirk@braille.uwo.ca> * this version considerably modified by David Borowski, david575@rogers.com * * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * specificly written as a driver for the speakup screenreview * package it's not a general device driver. * This driver is for the RC Systems DoubleTalk PC internal synthesizer. */ #include <linux/jiffies.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/kthread.h> #include "spk_priv.h" #include "serialio.h" #include "speakup_dtlk.h" /* local header file for DoubleTalk values */ #include "speakup.h" #define DRV_VERSION "2.10" #define PROCSPEECH 0x00 static int synth_probe(struct spk_synth *synth); static void dtlk_release(void); static const char *synth_immediate(struct spk_synth *synth, const char *buf); static void do_catch_up(struct spk_synth *synth); static void synth_flush(struct spk_synth *synth); static int synth_lpc; static int port_forced; static unsigned int synth_portlist[] = { 0x25e, 0x29e, 0x2de, 0x31e, 0x35e, 0x39e, 0 }; static u_char synth_status; static struct var_t vars[] = { { CAPS_START, .u.s = {"\x01+35p" } }, { CAPS_STOP, .u.s = {"\x01-35p" } }, { RATE, .u.n = {"\x01%ds", 8, 0, 9, 0, 0, NULL } }, { PITCH, .u.n = {"\x01%dp", 50, 0, 99, 0, 0, NULL } }, { VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } }, { TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } }, { PUNCT, .u.n = {"\x01%db", 7, 0, 15, 0, 0, NULL } }, { VOICE, .u.n = {"\x01%do", 0, 0, 7, 0, 0, NULL } }, { FREQUENCY, .u.n = {"\x01%df", 5, 0, 9, 0, 0, NULL } }, { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; /* * These attributes will appear in /sys/accessibility/speakup/dtlk. */ static struct kobj_attribute caps_start_attribute = __ATTR(caps_start, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute caps_stop_attribute = __ATTR(caps_stop, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute freq_attribute = __ATTR(freq, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute pitch_attribute = __ATTR(pitch, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute punct_attribute = __ATTR(punct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute rate_attribute = __ATTR(rate, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute tone_attribute = __ATTR(tone, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute voice_attribute = __ATTR(voice, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute vol_attribute = __ATTR(vol, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute delay_time_attribute = __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute direct_attribute = __ATTR(direct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute full_time_attribute = __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute jiffy_delta_attribute = __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute trigger_time_attribute = __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); /* * Create a group of attributes so that we can create and destroy them all * at once. */ static struct attribute *synth_attrs[] = { &caps_start_attribute.attr, &caps_stop_attribute.attr, &freq_attribute.attr, &pitch_attribute.attr, &punct_attribute.attr, &rate_attribute.attr, &tone_attribute.attr, &voice_attribute.attr, &vol_attribute.attr, &delay_time_attribute.attr, &direct_attribute.attr, &full_time_attribute.attr, &jiffy_delta_attribute.attr, &trigger_time_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; static struct spk_synth synth_dtlk = { .name = "dtlk", .version = DRV_VERSION, .long_name = "DoubleTalk PC", .init = "\x01@\x01\x31y", .procspeech = PROCSPEECH, .clear = SYNTH_CLEAR, .delay = 500, .trigger = 30, .jiffies = 50, .full = 1000, .startup = SYNTH_START, .checkval = SYNTH_CHECK, .vars = vars, .probe = synth_probe, .release = dtlk_release, .synth_immediate = synth_immediate, .catch_up = do_catch_up, .flush = synth_flush, .is_alive = spk_synth_is_alive_nop, .synth_adjust = NULL, .read_buff_add = NULL, .get_index = spk_serial_in_nowait, .indexing = { .command = "\x01%di", .lowindex = 1, .highindex = 5, .currindex = 1, }, .attributes = { .attrs = synth_attrs, .name = "dtlk", }, }; static inline bool synth_readable(void) { synth_status = inb_p(speakup_info.port_tts + UART_RX); return (synth_status & TTS_READABLE) != 0; } static inline bool synth_writable(void) { synth_status = inb_p(speakup_info.port_tts + UART_RX); return (synth_status & TTS_WRITABLE) != 0; } static inline bool synth_full(void) { synth_status = inb_p(speakup_info.port_tts + UART_RX); return (synth_status & TTS_ALMOST_FULL) != 0; } static void spk_out(const char ch) { int timeout = SPK_XMITR_TIMEOUT; while (!synth_writable()) { if (!--timeout) break; udelay(1); } outb_p(ch, speakup_info.port_tts); timeout = SPK_XMITR_TIMEOUT; while (synth_writable()) { if (!--timeout) break; udelay(1); } } static void do_catch_up(struct spk_synth *synth) { u_char ch; unsigned long flags; unsigned long jiff_max; struct var_t *jiffy_delta; struct var_t *delay_time; int jiffy_delta_val; int delay_time_val; jiffy_delta = spk_get_var(JIFFY); delay_time = spk_get_var(DELAY); spin_lock_irqsave(&speakup_info.spinlock, flags); jiffy_delta_val = jiffy_delta->u.n.value; spin_unlock_irqrestore(&speakup_info.spinlock, flags); jiff_max = jiffies + jiffy_delta_val; while (!kthread_should_stop()) { spin_lock_irqsave(&speakup_info.spinlock, flags); if (speakup_info.flushing) { speakup_info.flushing = 0; spin_unlock_irqrestore(&speakup_info.spinlock, flags); synth->flush(synth); continue; } if (synth_buffer_empty()) { spin_unlock_irqrestore(&speakup_info.spinlock, flags); break; } set_current_state(TASK_INTERRUPTIBLE); delay_time_val = delay_time->u.n.value; spin_unlock_irqrestore(&speakup_info.spinlock, flags); if (synth_full()) { schedule_timeout(msecs_to_jiffies(delay_time_val)); continue; } set_current_state(TASK_RUNNING); spin_lock_irqsave(&speakup_info.spinlock, flags); ch = synth_buffer_getc(); spin_unlock_irqrestore(&speakup_info.spinlock, flags); if (ch == '\n') ch = PROCSPEECH; spk_out(ch); if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) { spk_out(PROCSPEECH); spin_lock_irqsave(&speakup_info.spinlock, flags); delay_time_val = delay_time->u.n.value; jiffy_delta_val = jiffy_delta->u.n.value; spin_unlock_irqrestore(&speakup_info.spinlock, flags); schedule_timeout(msecs_to_jiffies(delay_time_val)); jiff_max = jiffies + jiffy_delta_val; } } spk_out(PROCSPEECH); } static const char *synth_immediate(struct spk_synth *synth, const char *buf) { u_char ch; while ((ch = (u_char)*buf)) { if (synth_full()) return buf; if (ch == '\n') ch = PROCSPEECH; spk_out(ch); buf++; } return NULL; } static void synth_flush(struct spk_synth *synth) { outb_p(SYNTH_CLEAR, speakup_info.port_tts); while (synth_writable()) cpu_relax(); } static char synth_read_tts(void) { u_char ch; while (!synth_readable()) cpu_relax(); ch = synth_status & 0x7f; outb_p(ch, speakup_info.port_tts); while (synth_readable()) cpu_relax(); return (char) ch; } /* interrogate the DoubleTalk PC and return its settings */ static struct synth_settings *synth_interrogate(struct spk_synth *synth) { u_char *t; static char buf[sizeof(struct synth_settings) + 1]; int total, i; static struct synth_settings status; synth_immediate(synth, "\x18\x01?"); for (total = 0, i = 0; i < 50; i++) { buf[total] = synth_read_tts(); if (total > 2 && buf[total] == 0x7f) break; if (total < sizeof(struct synth_settings)) total++; } t = buf; /* serial number is little endian */ status.serial_number = t[0] + t[1]*256; t += 2; for (i = 0; *t != '\r'; t++) { status.rom_version[i] = *t; if (i < sizeof(status.rom_version)-1) i++; } status.rom_version[i] = 0; t++; status.mode = *t++; status.punc_level = *t++; status.formant_freq = *t++; status.pitch = *t++; status.speed = *t++; status.volume = *t++; status.tone = *t++; status.expression = *t++; status.ext_dict_loaded = *t++; status.ext_dict_status = *t++; status.free_ram = *t++; status.articulation = *t++; status.reverb = *t++; status.eob = *t++; return &status; } static int synth_probe(struct spk_synth *synth) { unsigned int port_val = 0; int i = 0; struct synth_settings *sp; pr_info("Probing for DoubleTalk.\n"); if (port_forced) { speakup_info.port_tts = port_forced; pr_info("probe forced to %x by kernel command line\n", speakup_info.port_tts); if ((port_forced & 0xf) != 0xf) pr_info("warning: port base should probably end with f\n"); if (synth_request_region(speakup_info.port_tts-1, SYNTH_IO_EXTENT)) { pr_warn("sorry, port already reserved\n"); return -EBUSY; } port_val = inw(speakup_info.port_tts-1); synth_lpc = speakup_info.port_tts-1; } else { for (i = 0; synth_portlist[i]; i++) { if (synth_request_region(synth_portlist[i], SYNTH_IO_EXTENT)) continue; port_val = inw(synth_portlist[i]) & 0xfbff; if (port_val == 0x107f) { synth_lpc = synth_portlist[i]; speakup_info.port_tts = synth_lpc+1; break; } synth_release_region(synth_portlist[i], SYNTH_IO_EXTENT); } } port_val &= 0xfbff; if (port_val != 0x107f) { pr_info("DoubleTalk PC: not found\n"); if (synth_lpc) synth_release_region(synth_lpc, SYNTH_IO_EXTENT); return -ENODEV; } while (inw_p(synth_lpc) != 0x147f) cpu_relax(); /* wait until it's ready */ sp = synth_interrogate(synth); pr_info("%s: %03x-%03x, ROM ver %s, s/n %u, driver: %s\n", synth->long_name, synth_lpc, synth_lpc+SYNTH_IO_EXTENT - 1, sp->rom_version, sp->serial_number, synth->version); synth->alive = 1; return 0; } static void dtlk_release(void) { if (speakup_info.port_tts) synth_release_region(speakup_info.port_tts-1, SYNTH_IO_EXTENT); speakup_info.port_tts = 0; } module_param_named(port, port_forced, int, S_IRUGO); module_param_named(start, synth_dtlk.startup, short, S_IRUGO); MODULE_PARM_DESC(port, "Set the port for the synthesizer (override probing)."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); module_spk_synth(synth_dtlk); MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>"); MODULE_AUTHOR("David Borowski"); MODULE_DESCRIPTION("Speakup support for DoubleTalk PC synthesizers"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
tsuibin/linux-4.x.y
drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
813
6807
/****************************************************************************** * * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * ******************************************************************************/ #define _USB_OPS_LINUX_C_ #include <drv_types.h> #include <usb_ops_linux.h> #include <rtw_sreset.h> void rtl8723au_read_port_cancel(struct rtw_adapter *padapter) { struct recv_buf *precvbuf; int i; precvbuf = (struct recv_buf *)padapter->recvpriv.precv_buf; DBG_8723A("%s\n", __func__); padapter->bReadPortCancel = true; for (i = 0; i < NR_RECVBUFF ; i++) { if (precvbuf->purb) usb_kill_urb(precvbuf->purb); precvbuf++; } usb_kill_urb(padapter->recvpriv.int_in_urb); } static void usb_write_port23a_complete(struct urb *purb) { struct xmit_buf *pxmitbuf = (struct xmit_buf *)purb->context; struct rtw_adapter *padapter = pxmitbuf->padapter; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct hal_data_8723a *phaldata; unsigned long irqL; switch (pxmitbuf->flags) { case HIGH_QUEUE_INX: #ifdef CONFIG_8723AU_AP_MODE rtw_chk_hi_queue_cmd23a(padapter); #endif break; default: break; } if (padapter->bSurpriseRemoved || padapter->bDriverStopped || padapter->bWritePortCancel) { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, "usb_write_port23a_complete:bDriverStopped(%d) OR bSurpriseRemoved(%d)\n", padapter->bDriverStopped, padapter->bSurpriseRemoved); DBG_8723A("%s(): TX Warning! bDriverStopped(%d) OR " "bSurpriseRemoved(%d) bWritePortCancel(%d) " "pxmitbuf->ext_tag(%x)\n", __func__, padapter->bDriverStopped, padapter->bSurpriseRemoved, padapter->bReadPortCancel, pxmitbuf->ext_tag); goto check_completion; } if (purb->status) { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, "usb_write_port23a_complete : purb->status(%d) != 0\n", purb->status); DBG_8723A("###=> urb_write_port_complete status(%d)\n", purb->status); if (purb->status == -EPIPE || purb->status == -EPROTO) { } else if (purb->status == -EINPROGRESS) { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, "usb_write_port23a_complete: EINPROGESS\n"); goto check_completion; } else if (purb->status == -ENOENT) { DBG_8723A("%s: -ENOENT\n", __func__); goto check_completion; } else if (purb->status == -ECONNRESET) { DBG_8723A("%s: -ECONNRESET\n", __func__); goto check_completion; } else if (purb->status == -ESHUTDOWN) { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, "usb_write_port23a_complete: ESHUTDOWN\n"); padapter->bDriverStopped = true; RT_TRACE(_module_hci_ops_os_c_, _drv_err_, "usb_write_port23a_complete:bDriverStopped = true\n"); goto check_completion; } else { padapter->bSurpriseRemoved = true; DBG_8723A("bSurpriseRemoved = true\n"); RT_TRACE(_module_hci_ops_os_c_, _drv_err_, "usb_write_port23a_complete:bSurpriseRemoved = true\n"); goto check_completion; } } phaldata = GET_HAL_DATA(padapter); phaldata->srestpriv.last_tx_complete_time = jiffies; check_completion: spin_lock_irqsave(&pxmitpriv->lock_sctx, irqL); rtw23a_sctx_done_err(&pxmitbuf->sctx, purb->status ? RTW_SCTX_DONE_WRITE_PORT_ERR : RTW_SCTX_DONE_SUCCESS); spin_unlock_irqrestore(&pxmitpriv->lock_sctx, irqL); rtw_free_xmitbuf23a(pxmitpriv, pxmitbuf); tasklet_hi_schedule(&pxmitpriv->xmit_tasklet); } int rtl8723au_write_port(struct rtw_adapter *padapter, u32 addr, u32 cnt, struct xmit_buf *pxmitbuf) { struct urb *purb = NULL; struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter); struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct xmit_frame *pxmitframe; struct usb_device *pusbd = pdvobj->pusbdev; unsigned long irqL; unsigned int pipe, ep_num; int status; int ret = _FAIL; RT_TRACE(_module_hci_ops_os_c_, _drv_err_, "+usb_write_port23a\n"); if (padapter->bDriverStopped || padapter->bSurpriseRemoved) { RT_TRACE(_module_hci_ops_os_c_, _drv_err_, "%s:(padapter->bDriverStopped || padapter->bSurpriseRemoved)!!!\n", __func__); rtw23a_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_TX_DENY); goto exit; } pxmitframe = (struct xmit_frame *)pxmitbuf->priv_data; spin_lock_irqsave(&pxmitpriv->lock, irqL); switch (addr) { case VO_QUEUE_INX: pxmitbuf->flags = VO_QUEUE_INX; break; case VI_QUEUE_INX: pxmitbuf->flags = VI_QUEUE_INX; break; case BE_QUEUE_INX: pxmitbuf->flags = BE_QUEUE_INX; break; case BK_QUEUE_INX: pxmitbuf->flags = BK_QUEUE_INX; break; case HIGH_QUEUE_INX: pxmitbuf->flags = HIGH_QUEUE_INX; break; default: pxmitbuf->flags = MGT_QUEUE_INX; break; } spin_unlock_irqrestore(&pxmitpriv->lock, irqL); purb = pxmitbuf->pxmit_urb[0]; /* translate DMA FIFO addr to pipehandle */ ep_num = pdvobj->Queue2Pipe[addr]; pipe = usb_sndbulkpipe(pusbd, ep_num); usb_fill_bulk_urb(purb, pusbd, pipe, pxmitframe->buf_addr, /* pxmitbuf->pbuf */ cnt, usb_write_port23a_complete, pxmitbuf);/* context is pxmitbuf */ status = usb_submit_urb(purb, GFP_ATOMIC); if (!status) { struct hal_data_8723a *phaldata = GET_HAL_DATA(padapter); phaldata->srestpriv.last_tx_time = jiffies; } else { rtw23a_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_WRITE_PORT_ERR); DBG_8723A("usb_write_port23a, status =%d\n", status); RT_TRACE(_module_hci_ops_os_c_, _drv_err_, "usb_write_port23a(): usb_submit_urb, status =%x\n", status); switch (status) { case -ENODEV: padapter->bDriverStopped = true; break; default: break; } goto exit; } ret = _SUCCESS; RT_TRACE(_module_hci_ops_os_c_, _drv_err_, "-usb_write_port23a\n"); exit: if (ret != _SUCCESS) rtw_free_xmitbuf23a(pxmitpriv, pxmitbuf); return ret; } void rtl8723au_write_port_cancel(struct rtw_adapter *padapter) { struct xmit_buf *pxmitbuf; struct list_head *plist; int j; DBG_8723A("%s\n", __func__); padapter->bWritePortCancel = true; list_for_each(plist, &padapter->xmitpriv.xmitbuf_list) { pxmitbuf = container_of(plist, struct xmit_buf, list2); for (j = 0; j < 8; j++) { if (pxmitbuf->pxmit_urb[j]) usb_kill_urb(pxmitbuf->pxmit_urb[j]); } } list_for_each(plist, &padapter->xmitpriv.xmitextbuf_list) { pxmitbuf = container_of(plist, struct xmit_buf, list2); for (j = 0; j < 8; j++) { if (pxmitbuf->pxmit_urb[j]) usb_kill_urb(pxmitbuf->pxmit_urb[j]); } } }
gpl-2.0
philenotfound/linux-stable-15khz
drivers/staging/speakup/speakup_bns.c
813
4657
/* * originally written by: Kirk Reiser <kirk@braille.uwo.ca> * this version considerably modified by David Borowski, david575@rogers.com * * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * this code is specificly written as a driver for the speakup screenreview * package and is not a general device driver. */ #include "spk_priv.h" #include "speakup.h" #define DRV_VERSION "2.11" #define SYNTH_CLEAR 0x18 #define PROCSPEECH '\r' static struct var_t vars[] = { { CAPS_START, .u.s = {"\x05\x31\x32P" } }, { CAPS_STOP, .u.s = {"\x05\x38P" } }, { RATE, .u.n = {"\x05%dE", 8, 1, 16, 0, 0, NULL } }, { PITCH, .u.n = {"\x05%dP", 8, 0, 16, 0, 0, NULL } }, { VOL, .u.n = {"\x05%dV", 8, 0, 16, 0, 0, NULL } }, { TONE, .u.n = {"\x05%dT", 8, 0, 16, 0, 0, NULL } }, { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; /* * These attributes will appear in /sys/accessibility/speakup/bns. */ static struct kobj_attribute caps_start_attribute = __ATTR(caps_start, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute caps_stop_attribute = __ATTR(caps_stop, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute pitch_attribute = __ATTR(pitch, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute rate_attribute = __ATTR(rate, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute tone_attribute = __ATTR(tone, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute vol_attribute = __ATTR(vol, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute delay_time_attribute = __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute direct_attribute = __ATTR(direct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute full_time_attribute = __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute jiffy_delta_attribute = __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute trigger_time_attribute = __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); /* * Create a group of attributes so that we can create and destroy them all * at once. */ static struct attribute *synth_attrs[] = { &caps_start_attribute.attr, &caps_stop_attribute.attr, &pitch_attribute.attr, &rate_attribute.attr, &tone_attribute.attr, &vol_attribute.attr, &delay_time_attribute.attr, &direct_attribute.attr, &full_time_attribute.attr, &jiffy_delta_attribute.attr, &trigger_time_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; static struct spk_synth synth_bns = { .name = "bns", .version = DRV_VERSION, .long_name = "Braille 'N Speak", .init = "\x05Z\x05\x43", .procspeech = PROCSPEECH, .clear = SYNTH_CLEAR, .delay = 500, .trigger = 50, .jiffies = 50, .full = 40000, .startup = SYNTH_START, .checkval = SYNTH_CHECK, .vars = vars, .probe = spk_serial_synth_probe, .release = spk_serial_release, .synth_immediate = spk_synth_immediate, .catch_up = spk_do_catch_up, .flush = spk_synth_flush, .is_alive = spk_synth_is_alive_restart, .synth_adjust = NULL, .read_buff_add = NULL, .get_index = NULL, .indexing = { .command = NULL, .lowindex = 0, .highindex = 0, .currindex = 0, }, .attributes = { .attrs = synth_attrs, .name = "bns", }, }; module_param_named(ser, synth_bns.ser, int, S_IRUGO); module_param_named(start, synth_bns.startup, short, S_IRUGO); MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based)."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); module_spk_synth(synth_bns); MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>"); MODULE_AUTHOR("David Borowski"); MODULE_DESCRIPTION("Speakup support for Braille 'n Speak synthesizers"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
silvesterlee/linux
drivers/staging/rts5208/rtsx_transport.c
813
19933
/* Driver for Realtek PCI-Express card reader * * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, see <http://www.gnu.org/licenses/>. * * Author: * Wei WANG (wei_wang@realsil.com.cn) * Micky Ching (micky_ching@realsil.com.cn) */ #include <linux/blkdev.h> #include <linux/kthread.h> #include <linux/sched.h> #include "rtsx.h" /*********************************************************************** * Scatter-gather transfer buffer access routines ***********************************************************************/ /* Copy a buffer of length buflen to/from the srb's transfer buffer. * (Note: for scatter-gather transfers (srb->use_sg > 0), srb->request_buffer * points to a list of s-g entries and we ignore srb->request_bufflen. * For non-scatter-gather transfers, srb->request_buffer points to the * transfer buffer itself and srb->request_bufflen is the buffer's length.) * Update the *index and *offset variables so that the next copy will * pick up from where this one left off. */ unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer, unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index, unsigned int *offset, enum xfer_buf_dir dir) { unsigned int cnt; /* If not using scatter-gather, just transfer the data directly. * Make certain it will fit in the available buffer space. */ if (scsi_sg_count(srb) == 0) { if (*offset >= scsi_bufflen(srb)) return 0; cnt = min(buflen, scsi_bufflen(srb) - *offset); if (dir == TO_XFER_BUF) memcpy((unsigned char *) scsi_sglist(srb) + *offset, buffer, cnt); else memcpy(buffer, (unsigned char *) scsi_sglist(srb) + *offset, cnt); *offset += cnt; /* Using scatter-gather. We have to go through the list one entry * at a time. Each s-g entry contains some number of pages, and * each page has to be kmap()'ed separately. If the page is already * in kernel-addressable memory then kmap() will return its address. * If the page is not directly accessible -- such as a user buffer * located in high memory -- then kmap() will map it to a temporary * position in the kernel's virtual address space. */ } else { struct scatterlist *sg = (struct scatterlist *) scsi_sglist(srb) + *index; /* This loop handles a single s-g list entry, which may * include multiple pages. Find the initial page structure * and the starting offset within the page, and update * the *offset and *index values for the next loop. */ cnt = 0; while (cnt < buflen && *index < scsi_sg_count(srb)) { struct page *page = sg_page(sg) + ((sg->offset + *offset) >> PAGE_SHIFT); unsigned int poff = (sg->offset + *offset) & (PAGE_SIZE-1); unsigned int sglen = sg->length - *offset; if (sglen > buflen - cnt) { /* Transfer ends within this s-g entry */ sglen = buflen - cnt; *offset += sglen; } else { /* Transfer continues to next s-g entry */ *offset = 0; ++*index; ++sg; } /* Transfer the data for all the pages in this * s-g entry. For each page: call kmap(), do the * transfer, and call kunmap() immediately after. */ while (sglen > 0) { unsigned int plen = min(sglen, (unsigned int) PAGE_SIZE - poff); unsigned char *ptr = kmap(page); if (dir == TO_XFER_BUF) memcpy(ptr + poff, buffer + cnt, plen); else memcpy(buffer + cnt, ptr + poff, plen); kunmap(page); /* Start at the beginning of the next page */ poff = 0; ++page; cnt += plen; sglen -= plen; } } } /* Return the amount actually transferred */ return cnt; } /* Store the contents of buffer into srb's transfer buffer and set the * SCSI residue. */ void rtsx_stor_set_xfer_buf(unsigned char *buffer, unsigned int buflen, struct scsi_cmnd *srb) { unsigned int index = 0, offset = 0; rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset, TO_XFER_BUF); if (buflen < scsi_bufflen(srb)) scsi_set_resid(srb, scsi_bufflen(srb) - buflen); } void rtsx_stor_get_xfer_buf(unsigned char *buffer, unsigned int buflen, struct scsi_cmnd *srb) { unsigned int index = 0, offset = 0; rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset, FROM_XFER_BUF); if (buflen < scsi_bufflen(srb)) scsi_set_resid(srb, scsi_bufflen(srb) - buflen); } /*********************************************************************** * Transport routines ***********************************************************************/ /* Invoke the transport and basic error-handling/recovery methods * * This is used to send the message to the device and receive the response. */ void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip) { int result; result = rtsx_scsi_handler(srb, chip); /* if the command gets aborted by the higher layers, we need to * short-circuit all other processing */ if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) { dev_dbg(rtsx_dev(chip), "-- command was aborted\n"); srb->result = DID_ABORT << 16; goto Handle_Errors; } /* if there is a transport error, reset and don't auto-sense */ if (result == TRANSPORT_ERROR) { dev_dbg(rtsx_dev(chip), "-- transport indicates error, resetting\n"); srb->result = DID_ERROR << 16; goto Handle_Errors; } srb->result = SAM_STAT_GOOD; /* * If we have a failure, we're going to do a REQUEST_SENSE * automatically. Note that we differentiate between a command * "failure" and an "error" in the transport mechanism. */ if (result == TRANSPORT_FAILED) { /* set the result so the higher layers expect this data */ srb->result = SAM_STAT_CHECK_CONDITION; memcpy(srb->sense_buffer, (unsigned char *)&(chip->sense_buffer[SCSI_LUN(srb)]), sizeof(struct sense_data_t)); } return; /* Error and abort processing: try to resynchronize with the device * by issuing a port reset. If that fails, try a class-specific * device reset. */ Handle_Errors: return; } void rtsx_add_cmd(struct rtsx_chip *chip, u8 cmd_type, u16 reg_addr, u8 mask, u8 data) { u32 *cb = (u32 *)(chip->host_cmds_ptr); u32 val = 0; val |= (u32)(cmd_type & 0x03) << 30; val |= (u32)(reg_addr & 0x3FFF) << 16; val |= (u32)mask << 8; val |= (u32)data; spin_lock_irq(&chip->rtsx->reg_lock); if (chip->ci < (HOST_CMDS_BUF_LEN / 4)) cb[(chip->ci)++] = cpu_to_le32(val); spin_unlock_irq(&chip->rtsx->reg_lock); } void rtsx_send_cmd_no_wait(struct rtsx_chip *chip) { u32 val = 1 << 31; rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr); val |= (u32)(chip->ci * 4) & 0x00FFFFFF; /* Hardware Auto Response */ val |= 0x40000000; rtsx_writel(chip, RTSX_HCBCTLR, val); } int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout) { struct rtsx_dev *rtsx = chip->rtsx; struct completion trans_done; u32 val = 1 << 31; long timeleft; int err = 0; if (card == SD_CARD) rtsx->check_card_cd = SD_EXIST; else if (card == MS_CARD) rtsx->check_card_cd = MS_EXIST; else if (card == XD_CARD) rtsx->check_card_cd = XD_EXIST; else rtsx->check_card_cd = 0; spin_lock_irq(&rtsx->reg_lock); /* set up data structures for the wakeup system */ rtsx->done = &trans_done; rtsx->trans_result = TRANS_NOT_READY; init_completion(&trans_done); rtsx->trans_state = STATE_TRANS_CMD; rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr); val |= (u32)(chip->ci * 4) & 0x00FFFFFF; /* Hardware Auto Response */ val |= 0x40000000; rtsx_writel(chip, RTSX_HCBCTLR, val); spin_unlock_irq(&rtsx->reg_lock); /* Wait for TRANS_OK_INT */ timeleft = wait_for_completion_interruptible_timeout( &trans_done, msecs_to_jiffies(timeout)); if (timeleft <= 0) { dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n", chip->int_reg); err = -ETIMEDOUT; rtsx_trace(chip); goto finish_send_cmd; } spin_lock_irq(&rtsx->reg_lock); if (rtsx->trans_result == TRANS_RESULT_FAIL) err = -EIO; else if (rtsx->trans_result == TRANS_RESULT_OK) err = 0; spin_unlock_irq(&rtsx->reg_lock); finish_send_cmd: rtsx->done = NULL; rtsx->trans_state = STATE_TRANS_NONE; if (err < 0) rtsx_stop_cmd(chip, card); return err; } static inline void rtsx_add_sg_tbl( struct rtsx_chip *chip, u32 addr, u32 len, u8 option) { u64 *sgb = (u64 *)(chip->host_sg_tbl_ptr); u64 val = 0; u32 temp_len = 0; u8 temp_opt = 0; do { if (len > 0x80000) { temp_len = 0x80000; temp_opt = option & (~SG_END); } else { temp_len = len; temp_opt = option; } val = ((u64)addr << 32) | ((u64)temp_len << 12) | temp_opt; if (chip->sgi < (HOST_SG_TBL_BUF_LEN / 8)) sgb[(chip->sgi)++] = cpu_to_le64(val); len -= temp_len; addr += temp_len; } while (len); } static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card, struct scatterlist *sg, int num_sg, unsigned int *index, unsigned int *offset, int size, enum dma_data_direction dma_dir, int timeout) { struct rtsx_dev *rtsx = chip->rtsx; struct completion trans_done; u8 dir; int sg_cnt, i, resid; int err = 0; long timeleft; struct scatterlist *sg_ptr; u32 val = TRIG_DMA; if ((sg == NULL) || (num_sg <= 0) || !offset || !index) return -EIO; if (dma_dir == DMA_TO_DEVICE) dir = HOST_TO_DEVICE; else if (dma_dir == DMA_FROM_DEVICE) dir = DEVICE_TO_HOST; else return -ENXIO; if (card == SD_CARD) rtsx->check_card_cd = SD_EXIST; else if (card == MS_CARD) rtsx->check_card_cd = MS_EXIST; else if (card == XD_CARD) rtsx->check_card_cd = XD_EXIST; else rtsx->check_card_cd = 0; spin_lock_irq(&rtsx->reg_lock); /* set up data structures for the wakeup system */ rtsx->done = &trans_done; rtsx->trans_state = STATE_TRANS_SG; rtsx->trans_result = TRANS_NOT_READY; spin_unlock_irq(&rtsx->reg_lock); sg_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir); resid = size; sg_ptr = sg; chip->sgi = 0; /* Usually the next entry will be @sg@ + 1, but if this sg element * is part of a chained scatterlist, it could jump to the start of * a new scatterlist array. So here we use sg_next to move to * the proper sg */ for (i = 0; i < *index; i++) sg_ptr = sg_next(sg_ptr); for (i = *index; i < sg_cnt; i++) { dma_addr_t addr; unsigned int len; u8 option; addr = sg_dma_address(sg_ptr); len = sg_dma_len(sg_ptr); dev_dbg(rtsx_dev(chip), "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len); dev_dbg(rtsx_dev(chip), "*index = %d, *offset = %d\n", *index, *offset); addr += *offset; if ((len - *offset) > resid) { *offset += resid; len = resid; resid = 0; } else { resid -= (len - *offset); len -= *offset; *offset = 0; *index = *index + 1; } if ((i == (sg_cnt - 1)) || !resid) option = SG_VALID | SG_END | SG_TRANS_DATA; else option = SG_VALID | SG_TRANS_DATA; rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option); if (!resid) break; sg_ptr = sg_next(sg_ptr); } dev_dbg(rtsx_dev(chip), "SG table count = %d\n", chip->sgi); val |= (u32)(dir & 0x01) << 29; val |= ADMA_MODE; spin_lock_irq(&rtsx->reg_lock); init_completion(&trans_done); rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr); rtsx_writel(chip, RTSX_HDBCTLR, val); spin_unlock_irq(&rtsx->reg_lock); timeleft = wait_for_completion_interruptible_timeout( &trans_done, msecs_to_jiffies(timeout)); if (timeleft <= 0) { dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n", __func__, __LINE__); dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n", chip->int_reg); err = -ETIMEDOUT; goto out; } spin_lock_irq(&rtsx->reg_lock); if (rtsx->trans_result == TRANS_RESULT_FAIL) { err = -EIO; spin_unlock_irq(&rtsx->reg_lock); goto out; } spin_unlock_irq(&rtsx->reg_lock); /* Wait for TRANS_OK_INT */ spin_lock_irq(&rtsx->reg_lock); if (rtsx->trans_result == TRANS_NOT_READY) { init_completion(&trans_done); spin_unlock_irq(&rtsx->reg_lock); timeleft = wait_for_completion_interruptible_timeout( &trans_done, msecs_to_jiffies(timeout)); if (timeleft <= 0) { dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n", __func__, __LINE__); dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n", chip->int_reg); err = -ETIMEDOUT; goto out; } } else { spin_unlock_irq(&rtsx->reg_lock); } spin_lock_irq(&rtsx->reg_lock); if (rtsx->trans_result == TRANS_RESULT_FAIL) err = -EIO; else if (rtsx->trans_result == TRANS_RESULT_OK) err = 0; spin_unlock_irq(&rtsx->reg_lock); out: rtsx->done = NULL; rtsx->trans_state = STATE_TRANS_NONE; dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir); if (err < 0) rtsx_stop_cmd(chip, card); return err; } static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card, struct scatterlist *sg, int num_sg, enum dma_data_direction dma_dir, int timeout) { struct rtsx_dev *rtsx = chip->rtsx; struct completion trans_done; u8 dir; int buf_cnt, i; int err = 0; long timeleft; struct scatterlist *sg_ptr; if ((sg == NULL) || (num_sg <= 0)) return -EIO; if (dma_dir == DMA_TO_DEVICE) dir = HOST_TO_DEVICE; else if (dma_dir == DMA_FROM_DEVICE) dir = DEVICE_TO_HOST; else return -ENXIO; if (card == SD_CARD) rtsx->check_card_cd = SD_EXIST; else if (card == MS_CARD) rtsx->check_card_cd = MS_EXIST; else if (card == XD_CARD) rtsx->check_card_cd = XD_EXIST; else rtsx->check_card_cd = 0; spin_lock_irq(&rtsx->reg_lock); /* set up data structures for the wakeup system */ rtsx->done = &trans_done; rtsx->trans_state = STATE_TRANS_SG; rtsx->trans_result = TRANS_NOT_READY; spin_unlock_irq(&rtsx->reg_lock); buf_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir); sg_ptr = sg; for (i = 0; i <= buf_cnt / (HOST_SG_TBL_BUF_LEN / 8); i++) { u32 val = TRIG_DMA; int sg_cnt, j; if (i == buf_cnt / (HOST_SG_TBL_BUF_LEN / 8)) sg_cnt = buf_cnt % (HOST_SG_TBL_BUF_LEN / 8); else sg_cnt = HOST_SG_TBL_BUF_LEN / 8; chip->sgi = 0; for (j = 0; j < sg_cnt; j++) { dma_addr_t addr = sg_dma_address(sg_ptr); unsigned int len = sg_dma_len(sg_ptr); u8 option; dev_dbg(rtsx_dev(chip), "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len); if (j == (sg_cnt - 1)) option = SG_VALID | SG_END | SG_TRANS_DATA; else option = SG_VALID | SG_TRANS_DATA; rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option); sg_ptr = sg_next(sg_ptr); } dev_dbg(rtsx_dev(chip), "SG table count = %d\n", chip->sgi); val |= (u32)(dir & 0x01) << 29; val |= ADMA_MODE; spin_lock_irq(&rtsx->reg_lock); init_completion(&trans_done); rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr); rtsx_writel(chip, RTSX_HDBCTLR, val); spin_unlock_irq(&rtsx->reg_lock); timeleft = wait_for_completion_interruptible_timeout( &trans_done, msecs_to_jiffies(timeout)); if (timeleft <= 0) { dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n", __func__, __LINE__); dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n", chip->int_reg); err = -ETIMEDOUT; goto out; } spin_lock_irq(&rtsx->reg_lock); if (rtsx->trans_result == TRANS_RESULT_FAIL) { err = -EIO; spin_unlock_irq(&rtsx->reg_lock); goto out; } spin_unlock_irq(&rtsx->reg_lock); sg_ptr += sg_cnt; } /* Wait for TRANS_OK_INT */ spin_lock_irq(&rtsx->reg_lock); if (rtsx->trans_result == TRANS_NOT_READY) { init_completion(&trans_done); spin_unlock_irq(&rtsx->reg_lock); timeleft = wait_for_completion_interruptible_timeout( &trans_done, msecs_to_jiffies(timeout)); if (timeleft <= 0) { dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n", __func__, __LINE__); dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n", chip->int_reg); err = -ETIMEDOUT; goto out; } } else { spin_unlock_irq(&rtsx->reg_lock); } spin_lock_irq(&rtsx->reg_lock); if (rtsx->trans_result == TRANS_RESULT_FAIL) err = -EIO; else if (rtsx->trans_result == TRANS_RESULT_OK) err = 0; spin_unlock_irq(&rtsx->reg_lock); out: rtsx->done = NULL; rtsx->trans_state = STATE_TRANS_NONE; dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir); if (err < 0) rtsx_stop_cmd(chip, card); return err; } static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf, size_t len, enum dma_data_direction dma_dir, int timeout) { struct rtsx_dev *rtsx = chip->rtsx; struct completion trans_done; dma_addr_t addr; u8 dir; int err = 0; u32 val = 1 << 31; long timeleft; if ((buf == NULL) || (len <= 0)) return -EIO; if (dma_dir == DMA_TO_DEVICE) dir = HOST_TO_DEVICE; else if (dma_dir == DMA_FROM_DEVICE) dir = DEVICE_TO_HOST; else return -ENXIO; addr = dma_map_single(&(rtsx->pci->dev), buf, len, dma_dir); if (!addr) return -ENOMEM; if (card == SD_CARD) rtsx->check_card_cd = SD_EXIST; else if (card == MS_CARD) rtsx->check_card_cd = MS_EXIST; else if (card == XD_CARD) rtsx->check_card_cd = XD_EXIST; else rtsx->check_card_cd = 0; val |= (u32)(dir & 0x01) << 29; val |= (u32)(len & 0x00FFFFFF); spin_lock_irq(&rtsx->reg_lock); /* set up data structures for the wakeup system */ rtsx->done = &trans_done; init_completion(&trans_done); rtsx->trans_state = STATE_TRANS_BUF; rtsx->trans_result = TRANS_NOT_READY; rtsx_writel(chip, RTSX_HDBAR, addr); rtsx_writel(chip, RTSX_HDBCTLR, val); spin_unlock_irq(&rtsx->reg_lock); /* Wait for TRANS_OK_INT */ timeleft = wait_for_completion_interruptible_timeout( &trans_done, msecs_to_jiffies(timeout)); if (timeleft <= 0) { dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n", __func__, __LINE__); dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n", chip->int_reg); err = -ETIMEDOUT; goto out; } spin_lock_irq(&rtsx->reg_lock); if (rtsx->trans_result == TRANS_RESULT_FAIL) err = -EIO; else if (rtsx->trans_result == TRANS_RESULT_OK) err = 0; spin_unlock_irq(&rtsx->reg_lock); out: rtsx->done = NULL; rtsx->trans_state = STATE_TRANS_NONE; dma_unmap_single(&(rtsx->pci->dev), addr, len, dma_dir); if (err < 0) rtsx_stop_cmd(chip, card); return err; } int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card, void *buf, size_t len, int use_sg, unsigned int *index, unsigned int *offset, enum dma_data_direction dma_dir, int timeout) { int err = 0; /* don't transfer data during abort processing */ if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) return -EIO; if (use_sg) err = rtsx_transfer_sglist_adma_partial(chip, card, (struct scatterlist *)buf, use_sg, index, offset, (int)len, dma_dir, timeout); else err = rtsx_transfer_buf(chip, card, buf, len, dma_dir, timeout); if (err < 0) { if (RTSX_TST_DELINK(chip)) { RTSX_CLR_DELINK(chip); chip->need_reinit = SD_CARD | MS_CARD | XD_CARD; rtsx_reinit_cards(chip, 1); } } return err; } int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len, int use_sg, enum dma_data_direction dma_dir, int timeout) { int err = 0; dev_dbg(rtsx_dev(chip), "use_sg = %d\n", use_sg); /* don't transfer data during abort processing */ if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) return -EIO; if (use_sg) { err = rtsx_transfer_sglist_adma(chip, card, (struct scatterlist *)buf, use_sg, dma_dir, timeout); } else { err = rtsx_transfer_buf(chip, card, buf, len, dma_dir, timeout); } if (err < 0) { if (RTSX_TST_DELINK(chip)) { RTSX_CLR_DELINK(chip); chip->need_reinit = SD_CARD | MS_CARD | XD_CARD; rtsx_reinit_cards(chip, 1); } } return err; }
gpl-2.0
Jackeagle/android_kernel_lge_d838
mm/nommu.c
1581
53001
/* * linux/mm/nommu.c * * Replacement code for mm functions to support CPU's that don't * have any form of memory management unit (thus no virtual memory). * * See Documentation/nommu-mmap.txt * * Copyright (c) 2004-2008 David Howells <dhowells@redhat.com> * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com> * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com> * Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org> */ #include <linux/export.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/swap.h> #include <linux/file.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/blkdev.h> #include <linux/backing-dev.h> #include <linux/mount.h> #include <linux/personality.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/audit.h> #include <asm/uaccess.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> #include "internal.h" #if 0 #define kenter(FMT, ...) \ printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__) #define kleave(FMT, ...) \ printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__) #define kdebug(FMT, ...) \ printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__) #else #define kenter(FMT, ...) \ no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__) #define kleave(FMT, ...) \ no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__) #define kdebug(FMT, ...) \ no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__) #endif void *high_memory; struct page *mem_map; unsigned long max_mapnr; unsigned long num_physpages; unsigned long highest_memmap_pfn; struct percpu_counter vm_committed_as; int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ int sysctl_overcommit_ratio = 50; /* default is 50% */ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; int heap_stack_gap = 0; atomic_long_t mmap_pages_allocated; EXPORT_SYMBOL(mem_map); EXPORT_SYMBOL(num_physpages); /* list of mapped, potentially shareable regions */ static struct kmem_cache *vm_region_jar; struct rb_root nommu_region_tree = RB_ROOT; DECLARE_RWSEM(nommu_region_sem); const struct vm_operations_struct generic_file_vm_ops = { }; /* * Return the total memory allocated for this pointer, not * just what the caller asked for. * * Doesn't have to be accurate, i.e. may have races. */ unsigned int kobjsize(const void *objp) { struct page *page; /* * If the object we have should not have ksize performed on it, * return size of 0 */ if (!objp || !virt_addr_valid(objp)) return 0; page = virt_to_head_page(objp); /* * If the allocator sets PageSlab, we know the pointer came from * kmalloc(). */ if (PageSlab(page)) return ksize(objp); /* * If it's not a compound page, see if we have a matching VMA * region. This test is intentionally done in reverse order, * so if there's no VMA, we still fall through and hand back * PAGE_SIZE for 0-order pages. */ if (!PageCompound(page)) { struct vm_area_struct *vma; vma = find_vma(current->mm, (unsigned long)objp); if (vma) return vma->vm_end - vma->vm_start; } /* * The ksize() function is only guaranteed to work for pointers * returned by kmalloc(). So handle arbitrary pointers here. */ return PAGE_SIZE << compound_order(page); } int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int nr_pages, unsigned int foll_flags, struct page **pages, struct vm_area_struct **vmas, int *retry) { struct vm_area_struct *vma; unsigned long vm_flags; int i; /* calculate required read or write permissions. * If FOLL_FORCE is set, we only require the "MAY" flags. */ vm_flags = (foll_flags & FOLL_WRITE) ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); vm_flags &= (foll_flags & FOLL_FORCE) ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); for (i = 0; i < nr_pages; i++) { vma = find_vma(mm, start); if (!vma) goto finish_or_fault; /* protect what we can, including chardevs */ if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || !(vm_flags & vma->vm_flags)) goto finish_or_fault; if (pages) { pages[i] = virt_to_page(start); if (pages[i]) page_cache_get(pages[i]); } if (vmas) vmas[i] = vma; start = (start + PAGE_SIZE) & PAGE_MASK; } return i; finish_or_fault: return i ? : -EFAULT; } /* * get a list of pages in an address range belonging to the specified process * and indicate the VMA that covers each page * - this is potentially dodgy as we may end incrementing the page count of a * slab page or a secondary page from a compound page * - don't permit access to VMAs that don't support it, such as I/O mappings */ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas) { int flags = 0; if (write) flags |= FOLL_WRITE; if (force) flags |= FOLL_FORCE; return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, NULL); } EXPORT_SYMBOL(get_user_pages); /** * follow_pfn - look up PFN at a user virtual address * @vma: memory mapping * @address: user virtual address * @pfn: location to store found PFN * * Only IO mappings and raw PFN mappings are allowed. * * Returns zero and the pfn at @pfn on success, -ve otherwise. */ int follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn) { if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) return -EINVAL; *pfn = address >> PAGE_SHIFT; return 0; } EXPORT_SYMBOL(follow_pfn); DEFINE_RWLOCK(vmlist_lock); struct vm_struct *vmlist; void vfree(const void *addr) { kfree(addr); } EXPORT_SYMBOL(vfree); void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) { /* * You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc() * returns only a logical address. */ return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); } EXPORT_SYMBOL(__vmalloc); void *vmalloc_user(unsigned long size) { void *ret; ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); if (ret) { struct vm_area_struct *vma; down_write(&current->mm->mmap_sem); vma = find_vma(current->mm, (unsigned long)ret); if (vma) vma->vm_flags |= VM_USERMAP; up_write(&current->mm->mmap_sem); } return ret; } EXPORT_SYMBOL(vmalloc_user); struct page *vmalloc_to_page(const void *addr) { return virt_to_page(addr); } EXPORT_SYMBOL(vmalloc_to_page); unsigned long vmalloc_to_pfn(const void *addr) { return page_to_pfn(virt_to_page(addr)); } EXPORT_SYMBOL(vmalloc_to_pfn); long vread(char *buf, char *addr, unsigned long count) { memcpy(buf, addr, count); return count; } long vwrite(char *buf, char *addr, unsigned long count) { /* Don't allow overflow */ if ((unsigned long) addr + count < count) count = -(unsigned long) addr; memcpy(addr, buf, count); return(count); } /* * vmalloc - allocate virtually continguos memory * * @size: allocation size * * Allocate enough pages to cover @size from the page level * allocator and map them into continguos kernel virtual space. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. */ void *vmalloc(unsigned long size) { return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); } EXPORT_SYMBOL(vmalloc); /* * vzalloc - allocate virtually continguos memory with zero fill * * @size: allocation size * * Allocate enough pages to cover @size from the page level * allocator and map them into continguos kernel virtual space. * The memory allocated is set to zero. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. */ void *vzalloc(unsigned long size) { return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); } EXPORT_SYMBOL(vzalloc); /** * vmalloc_node - allocate memory on a specific node * @size: allocation size * @node: numa node * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. */ void *vmalloc_node(unsigned long size, int node) { return vmalloc(size); } EXPORT_SYMBOL(vmalloc_node); /** * vzalloc_node - allocate memory on a specific node with zero fill * @size: allocation size * @node: numa node * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * The memory allocated is set to zero. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. */ void *vzalloc_node(unsigned long size, int node) { return vzalloc(size); } EXPORT_SYMBOL(vzalloc_node); #ifndef PAGE_KERNEL_EXEC # define PAGE_KERNEL_EXEC PAGE_KERNEL #endif /** * vmalloc_exec - allocate virtually contiguous, executable memory * @size: allocation size * * Kernel-internal function to allocate enough pages to cover @size * the page level allocator and map them into contiguous and * executable kernel virtual space. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. */ void *vmalloc_exec(unsigned long size) { return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); } /** * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) * @size: allocation size * * Allocate enough 32bit PA addressable pages to cover @size from the * page level allocator and map them into continguos kernel virtual space. */ void *vmalloc_32(unsigned long size) { return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); } EXPORT_SYMBOL(vmalloc_32); /** * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory * @size: allocation size * * The resulting memory area is 32bit addressable and zeroed so it can be * mapped to userspace without leaking data. * * VM_USERMAP is set on the corresponding VMA so that subsequent calls to * remap_vmalloc_range() are permissible. */ void *vmalloc_32_user(unsigned long size) { /* * We'll have to sort out the ZONE_DMA bits for 64-bit, * but for now this can simply use vmalloc_user() directly. */ return vmalloc_user(size); } EXPORT_SYMBOL(vmalloc_32_user); void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) { BUG(); return NULL; } EXPORT_SYMBOL(vmap); void vunmap(const void *addr) { BUG(); } EXPORT_SYMBOL(vunmap); void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) { BUG(); return NULL; } EXPORT_SYMBOL(vm_map_ram); void vm_unmap_ram(const void *mem, unsigned int count) { BUG(); } EXPORT_SYMBOL(vm_unmap_ram); void vm_unmap_aliases(void) { } EXPORT_SYMBOL_GPL(vm_unmap_aliases); /* * Implement a stub for vmalloc_sync_all() if the architecture chose not to * have one. */ void __attribute__((weak)) vmalloc_sync_all(void) { } /** * alloc_vm_area - allocate a range of kernel address space * @size: size of the area * * Returns: NULL on failure, vm_struct on success * * This function reserves a range of kernel address space, and * allocates pagetables to map that range. No actual mappings * are created. If the kernel address space is not shared * between processes, it syncs the pagetable across all * processes. */ struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) { BUG(); return NULL; } EXPORT_SYMBOL_GPL(alloc_vm_area); void free_vm_area(struct vm_struct *area) { BUG(); } EXPORT_SYMBOL_GPL(free_vm_area); int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) { return -EINVAL; } EXPORT_SYMBOL(vm_insert_page); /* * sys_brk() for the most part doesn't need the global kernel * lock, except when an application is doing something nasty * like trying to un-brk an area that has already been mapped * to a regular file. in this case, the unmapping will need * to invoke file system routines that need the global lock. */ SYSCALL_DEFINE1(brk, unsigned long, brk) { struct mm_struct *mm = current->mm; if (brk < mm->start_brk || brk > mm->context.end_brk) return mm->brk; if (mm->brk == brk) return mm->brk; /* * Always allow shrinking brk */ if (brk <= mm->brk) { mm->brk = brk; return brk; } /* * Ok, looks good - let it rip. */ flush_icache_range(mm->brk, brk); return mm->brk = brk; } /* * initialise the VMA and region record slabs */ void __init mmap_init(void) { int ret; ret = percpu_counter_init(&vm_committed_as, 0); VM_BUG_ON(ret); vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC); } /* * validate the region tree * - the caller must hold the region lock */ #ifdef CONFIG_DEBUG_NOMMU_REGIONS static noinline void validate_nommu_regions(void) { struct vm_region *region, *last; struct rb_node *p, *lastp; lastp = rb_first(&nommu_region_tree); if (!lastp) return; last = rb_entry(lastp, struct vm_region, vm_rb); BUG_ON(unlikely(last->vm_end <= last->vm_start)); BUG_ON(unlikely(last->vm_top < last->vm_end)); while ((p = rb_next(lastp))) { region = rb_entry(p, struct vm_region, vm_rb); last = rb_entry(lastp, struct vm_region, vm_rb); BUG_ON(unlikely(region->vm_end <= region->vm_start)); BUG_ON(unlikely(region->vm_top < region->vm_end)); BUG_ON(unlikely(region->vm_start < last->vm_top)); lastp = p; } } #else static void validate_nommu_regions(void) { } #endif /* * add a region into the global tree */ static void add_nommu_region(struct vm_region *region) { struct vm_region *pregion; struct rb_node **p, *parent; validate_nommu_regions(); parent = NULL; p = &nommu_region_tree.rb_node; while (*p) { parent = *p; pregion = rb_entry(parent, struct vm_region, vm_rb); if (region->vm_start < pregion->vm_start) p = &(*p)->rb_left; else if (region->vm_start > pregion->vm_start) p = &(*p)->rb_right; else if (pregion == region) return; else BUG(); } rb_link_node(&region->vm_rb, parent, p); rb_insert_color(&region->vm_rb, &nommu_region_tree); validate_nommu_regions(); } /* * delete a region from the global tree */ static void delete_nommu_region(struct vm_region *region) { BUG_ON(!nommu_region_tree.rb_node); validate_nommu_regions(); rb_erase(&region->vm_rb, &nommu_region_tree); validate_nommu_regions(); } /* * free a contiguous series of pages */ static void free_page_series(unsigned long from, unsigned long to) { for (; from < to; from += PAGE_SIZE) { struct page *page = virt_to_page(from); kdebug("- free %lx", from); atomic_long_dec(&mmap_pages_allocated); if (page_count(page) != 1) kdebug("free page %p: refcount not one: %d", page, page_count(page)); put_page(page); } } /* * release a reference to a region * - the caller must hold the region semaphore for writing, which this releases * - the region may not have been added to the tree yet, in which case vm_top * will equal vm_start */ static void __put_nommu_region(struct vm_region *region) __releases(nommu_region_sem) { kenter("%p{%d}", region, region->vm_usage); BUG_ON(!nommu_region_tree.rb_node); if (--region->vm_usage == 0) { if (region->vm_top > region->vm_start) delete_nommu_region(region); up_write(&nommu_region_sem); if (region->vm_file) fput(region->vm_file); /* IO memory and memory shared directly out of the pagecache * from ramfs/tmpfs mustn't be released here */ if (region->vm_flags & VM_MAPPED_COPY) { kdebug("free series"); free_page_series(region->vm_start, region->vm_top); } kmem_cache_free(vm_region_jar, region); } else { up_write(&nommu_region_sem); } } /* * release a reference to a region */ static void put_nommu_region(struct vm_region *region) { down_write(&nommu_region_sem); __put_nommu_region(region); } /* * update protection on a vma */ static void protect_vma(struct vm_area_struct *vma, unsigned long flags) { #ifdef CONFIG_MPU struct mm_struct *mm = vma->vm_mm; long start = vma->vm_start & PAGE_MASK; while (start < vma->vm_end) { protect_page(mm, start, flags); start += PAGE_SIZE; } update_protections(mm); #endif } /* * add a VMA into a process's mm_struct in the appropriate place in the list * and tree and add to the address space's page tree also if not an anonymous * page * - should be called with mm->mmap_sem held writelocked */ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) { struct vm_area_struct *pvma, *prev; struct address_space *mapping; struct rb_node **p, *parent, *rb_prev; kenter(",%p", vma); BUG_ON(!vma->vm_region); mm->map_count++; vma->vm_mm = mm; protect_vma(vma, vma->vm_flags); /* add the VMA to the mapping */ if (vma->vm_file) { mapping = vma->vm_file->f_mapping; mutex_lock(&mapping->i_mmap_mutex); flush_dcache_mmap_lock(mapping); vma_prio_tree_insert(vma, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); mutex_unlock(&mapping->i_mmap_mutex); } /* add the VMA to the tree */ parent = rb_prev = NULL; p = &mm->mm_rb.rb_node; while (*p) { parent = *p; pvma = rb_entry(parent, struct vm_area_struct, vm_rb); /* sort by: start addr, end addr, VMA struct addr in that order * (the latter is necessary as we may get identical VMAs) */ if (vma->vm_start < pvma->vm_start) p = &(*p)->rb_left; else if (vma->vm_start > pvma->vm_start) { rb_prev = parent; p = &(*p)->rb_right; } else if (vma->vm_end < pvma->vm_end) p = &(*p)->rb_left; else if (vma->vm_end > pvma->vm_end) { rb_prev = parent; p = &(*p)->rb_right; } else if (vma < pvma) p = &(*p)->rb_left; else if (vma > pvma) { rb_prev = parent; p = &(*p)->rb_right; } else BUG(); } rb_link_node(&vma->vm_rb, parent, p); rb_insert_color(&vma->vm_rb, &mm->mm_rb); /* add VMA to the VMA list also */ prev = NULL; if (rb_prev) prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); __vma_link_list(mm, vma, prev, parent); } /* * delete a VMA from its owning mm_struct and address space */ static void delete_vma_from_mm(struct vm_area_struct *vma) { struct address_space *mapping; struct mm_struct *mm = vma->vm_mm; kenter("%p", vma); protect_vma(vma, 0); mm->map_count--; if (mm->mmap_cache == vma) mm->mmap_cache = NULL; /* remove the VMA from the mapping */ if (vma->vm_file) { mapping = vma->vm_file->f_mapping; mutex_lock(&mapping->i_mmap_mutex); flush_dcache_mmap_lock(mapping); vma_prio_tree_remove(vma, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); mutex_unlock(&mapping->i_mmap_mutex); } /* remove from the MM's tree and list */ rb_erase(&vma->vm_rb, &mm->mm_rb); if (vma->vm_prev) vma->vm_prev->vm_next = vma->vm_next; else mm->mmap = vma->vm_next; if (vma->vm_next) vma->vm_next->vm_prev = vma->vm_prev; } /* * destroy a VMA record */ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) { kenter("%p", vma); if (vma->vm_ops && vma->vm_ops->close) vma->vm_ops->close(vma); if (vma->vm_file) { fput(vma->vm_file); if (vma->vm_flags & VM_EXECUTABLE) removed_exe_file_vma(mm); } put_nommu_region(vma->vm_region); kmem_cache_free(vm_area_cachep, vma); } /* * look up the first VMA in which addr resides, NULL if none * - should be called with mm->mmap_sem at least held readlocked */ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma; /* check the cache first */ vma = mm->mmap_cache; if (vma && vma->vm_start <= addr && vma->vm_end > addr) return vma; /* trawl the list (there may be multiple mappings in which addr * resides) */ for (vma = mm->mmap; vma; vma = vma->vm_next) { if (vma->vm_start > addr) return NULL; if (vma->vm_end > addr) { mm->mmap_cache = vma; return vma; } } return NULL; } EXPORT_SYMBOL(find_vma); /* * find a VMA * - we don't extend stack VMAs under NOMMU conditions */ struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) { return find_vma(mm, addr); } /* * expand a stack to a given address * - not supported under NOMMU conditions */ int expand_stack(struct vm_area_struct *vma, unsigned long address) { return -ENOMEM; } /* * look up the first VMA exactly that exactly matches addr * - should be called with mm->mmap_sem at least held readlocked */ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm, unsigned long addr, unsigned long len) { struct vm_area_struct *vma; unsigned long end = addr + len; /* check the cache first */ vma = mm->mmap_cache; if (vma && vma->vm_start == addr && vma->vm_end == end) return vma; /* trawl the list (there may be multiple mappings in which addr * resides) */ for (vma = mm->mmap; vma; vma = vma->vm_next) { if (vma->vm_start < addr) continue; if (vma->vm_start > addr) return NULL; if (vma->vm_end == end) { mm->mmap_cache = vma; return vma; } } return NULL; } /* * determine whether a mapping should be permitted and, if so, what sort of * mapping we're capable of supporting */ static int validate_mmap_request(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff, unsigned long *_capabilities) { unsigned long capabilities, rlen; unsigned long reqprot = prot; int ret; /* do the simple checks first */ if (flags & MAP_FIXED) { printk(KERN_DEBUG "%d: Can't do fixed-address/overlay mmap of RAM\n", current->pid); return -EINVAL; } if ((flags & MAP_TYPE) != MAP_PRIVATE && (flags & MAP_TYPE) != MAP_SHARED) return -EINVAL; if (!len) return -EINVAL; /* Careful about overflows.. */ rlen = PAGE_ALIGN(len); if (!rlen || rlen > TASK_SIZE) return -ENOMEM; /* offset overflow? */ if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff) return -EOVERFLOW; if (file) { /* validate file mapping requests */ struct address_space *mapping; /* files must support mmap */ if (!file->f_op || !file->f_op->mmap) return -ENODEV; /* work out if what we've got could possibly be shared * - we support chardevs that provide their own "memory" * - we support files/blockdevs that are memory backed */ mapping = file->f_mapping; if (!mapping) mapping = file->f_path.dentry->d_inode->i_mapping; capabilities = 0; if (mapping && mapping->backing_dev_info) capabilities = mapping->backing_dev_info->capabilities; if (!capabilities) { /* no explicit capabilities set, so assume some * defaults */ switch (file->f_path.dentry->d_inode->i_mode & S_IFMT) { case S_IFREG: case S_IFBLK: capabilities = BDI_CAP_MAP_COPY; break; case S_IFCHR: capabilities = BDI_CAP_MAP_DIRECT | BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP; break; default: return -EINVAL; } } /* eliminate any capabilities that we can't support on this * device */ if (!file->f_op->get_unmapped_area) capabilities &= ~BDI_CAP_MAP_DIRECT; if (!file->f_op->read) capabilities &= ~BDI_CAP_MAP_COPY; /* The file shall have been opened with read permission. */ if (!(file->f_mode & FMODE_READ)) return -EACCES; if (flags & MAP_SHARED) { /* do checks for writing, appending and locking */ if ((prot & PROT_WRITE) && !(file->f_mode & FMODE_WRITE)) return -EACCES; if (IS_APPEND(file->f_path.dentry->d_inode) && (file->f_mode & FMODE_WRITE)) return -EACCES; if (locks_verify_locked(file->f_path.dentry->d_inode)) return -EAGAIN; if (!(capabilities & BDI_CAP_MAP_DIRECT)) return -ENODEV; /* we mustn't privatise shared mappings */ capabilities &= ~BDI_CAP_MAP_COPY; } else { /* we're going to read the file into private memory we * allocate */ if (!(capabilities & BDI_CAP_MAP_COPY)) return -ENODEV; /* we don't permit a private writable mapping to be * shared with the backing device */ if (prot & PROT_WRITE) capabilities &= ~BDI_CAP_MAP_DIRECT; } if (capabilities & BDI_CAP_MAP_DIRECT) { if (((prot & PROT_READ) && !(capabilities & BDI_CAP_READ_MAP)) || ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) || ((prot & PROT_EXEC) && !(capabilities & BDI_CAP_EXEC_MAP)) ) { capabilities &= ~BDI_CAP_MAP_DIRECT; if (flags & MAP_SHARED) { printk(KERN_WARNING "MAP_SHARED not completely supported on !MMU\n"); return -EINVAL; } } } /* handle executable mappings and implied executable * mappings */ if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) { if (prot & PROT_EXEC) return -EPERM; } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) { /* handle implication of PROT_EXEC by PROT_READ */ if (current->personality & READ_IMPLIES_EXEC) { if (capabilities & BDI_CAP_EXEC_MAP) prot |= PROT_EXEC; } } else if ((prot & PROT_READ) && (prot & PROT_EXEC) && !(capabilities & BDI_CAP_EXEC_MAP) ) { /* backing file is not executable, try to copy */ capabilities &= ~BDI_CAP_MAP_DIRECT; } } else { /* anonymous mappings are always memory backed and can be * privately mapped */ capabilities = BDI_CAP_MAP_COPY; /* handle PROT_EXEC implication by PROT_READ */ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) prot |= PROT_EXEC; } /* allow the security API to have its say */ ret = security_file_mmap(file, reqprot, prot, flags, addr, 0); if (ret < 0) return ret; /* looks okay */ *_capabilities = capabilities; return 0; } /* * we've determined that we can make the mapping, now translate what we * now know into VMA flags */ static unsigned long determine_vm_flags(struct file *file, unsigned long prot, unsigned long flags, unsigned long capabilities) { unsigned long vm_flags; vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags); /* vm_flags |= mm->def_flags; */ if (!(capabilities & BDI_CAP_MAP_DIRECT)) { /* attempt to share read-only copies of mapped file chunks */ vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; if (file && !(prot & PROT_WRITE)) vm_flags |= VM_MAYSHARE; } else { /* overlay a shareable mapping on the backing device or inode * if possible - used for chardevs, ramfs/tmpfs/shmfs and * romfs/cramfs */ vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS); if (flags & MAP_SHARED) vm_flags |= VM_SHARED; } /* refuse to let anyone share private mappings with this process if * it's being traced - otherwise breakpoints set in it may interfere * with another untraced process */ if ((flags & MAP_PRIVATE) && current->ptrace) vm_flags &= ~VM_MAYSHARE; return vm_flags; } /* * set up a shared mapping on a file (the driver or filesystem provides and * pins the storage) */ static int do_mmap_shared_file(struct vm_area_struct *vma) { int ret; ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); if (ret == 0) { vma->vm_region->vm_top = vma->vm_region->vm_end; return 0; } if (ret != -ENOSYS) return ret; /* getting -ENOSYS indicates that direct mmap isn't possible (as * opposed to tried but failed) so we can only give a suitable error as * it's not possible to make a private copy if MAP_SHARED was given */ return -ENODEV; } /* * set up a private mapping or an anonymous shared mapping */ static int do_mmap_private(struct vm_area_struct *vma, struct vm_region *region, unsigned long len, unsigned long capabilities) { struct page *pages; unsigned long total, point, n; void *base; int ret, order; /* invoke the file's mapping function so that it can keep track of * shared mappings on devices or memory * - VM_MAYSHARE will be set if it may attempt to share */ if (capabilities & BDI_CAP_MAP_DIRECT) { ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); if (ret == 0) { /* shouldn't return success if we're not sharing */ BUG_ON(!(vma->vm_flags & VM_MAYSHARE)); vma->vm_region->vm_top = vma->vm_region->vm_end; return 0; } if (ret != -ENOSYS) return ret; /* getting an ENOSYS error indicates that direct mmap isn't * possible (as opposed to tried but failed) so we'll try to * make a private copy of the data and map that instead */ } /* allocate some memory to hold the mapping * - note that this may not return a page-aligned address if the object * we're allocating is smaller than a page */ order = get_order(len); kdebug("alloc order %d for %lx", order, len); pages = alloc_pages(GFP_KERNEL, order); if (!pages) goto enomem; total = 1 << order; atomic_long_add(total, &mmap_pages_allocated); point = len >> PAGE_SHIFT; /* we allocated a power-of-2 sized page set, so we may want to trim off * the excess */ if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) { while (total > point) { order = ilog2(total - point); n = 1 << order; kdebug("shave %lu/%lu @%lu", n, total - point, total); atomic_long_sub(n, &mmap_pages_allocated); total -= n; set_page_refcounted(pages + total); __free_pages(pages + total, order); } } for (point = 1; point < total; point++) set_page_refcounted(&pages[point]); base = page_address(pages); region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; region->vm_start = (unsigned long) base; region->vm_end = region->vm_start + len; region->vm_top = region->vm_start + (total << PAGE_SHIFT); vma->vm_start = region->vm_start; vma->vm_end = region->vm_start + len; if (vma->vm_file) { /* read the contents of a file into the copy */ mm_segment_t old_fs; loff_t fpos; fpos = vma->vm_pgoff; fpos <<= PAGE_SHIFT; old_fs = get_fs(); set_fs(KERNEL_DS); ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos); set_fs(old_fs); if (ret < 0) goto error_free; /* clear the last little bit */ if (ret < len) memset(base + ret, 0, len - ret); } return 0; error_free: free_page_series(region->vm_start, region->vm_top); region->vm_start = vma->vm_start = 0; region->vm_end = vma->vm_end = 0; region->vm_top = 0; return ret; enomem: printk("Allocation of length %lu from process %d (%s) failed\n", len, current->pid, current->comm); show_free_areas(0); return -ENOMEM; } /* * handle mapping creation for uClinux */ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff) { struct vm_area_struct *vma; struct vm_region *region; struct rb_node *rb; unsigned long capabilities, vm_flags, result; int ret; kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff); /* decide whether we should attempt the mapping, and if so what sort of * mapping */ ret = validate_mmap_request(file, addr, len, prot, flags, pgoff, &capabilities); if (ret < 0) { kleave(" = %d [val]", ret); return ret; } /* we ignore the address hint */ addr = 0; len = PAGE_ALIGN(len); /* we've determined that we can make the mapping, now translate what we * now know into VMA flags */ vm_flags = determine_vm_flags(file, prot, flags, capabilities); /* we're going to need to record the mapping */ region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL); if (!region) goto error_getting_region; vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (!vma) goto error_getting_vma; region->vm_usage = 1; region->vm_flags = vm_flags; region->vm_pgoff = pgoff; INIT_LIST_HEAD(&vma->anon_vma_chain); vma->vm_flags = vm_flags; vma->vm_pgoff = pgoff; if (file) { region->vm_file = file; get_file(file); vma->vm_file = file; get_file(file); if (vm_flags & VM_EXECUTABLE) { added_exe_file_vma(current->mm); vma->vm_mm = current->mm; } } down_write(&nommu_region_sem); /* if we want to share, we need to check for regions created by other * mmap() calls that overlap with our proposed mapping * - we can only share with a superset match on most regular files * - shared mappings on character devices and memory backed files are * permitted to overlap inexactly as far as we are concerned for in * these cases, sharing is handled in the driver or filesystem rather * than here */ if (vm_flags & VM_MAYSHARE) { struct vm_region *pregion; unsigned long pglen, rpglen, pgend, rpgend, start; pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; pgend = pgoff + pglen; for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) { pregion = rb_entry(rb, struct vm_region, vm_rb); if (!(pregion->vm_flags & VM_MAYSHARE)) continue; /* search for overlapping mappings on the same file */ if (pregion->vm_file->f_path.dentry->d_inode != file->f_path.dentry->d_inode) continue; if (pregion->vm_pgoff >= pgend) continue; rpglen = pregion->vm_end - pregion->vm_start; rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT; rpgend = pregion->vm_pgoff + rpglen; if (pgoff >= rpgend) continue; /* handle inexactly overlapping matches between * mappings */ if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { /* new mapping is not a subset of the region */ if (!(capabilities & BDI_CAP_MAP_DIRECT)) goto sharing_violation; continue; } /* we've found a region we can share */ pregion->vm_usage++; vma->vm_region = pregion; start = pregion->vm_start; start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT; vma->vm_start = start; vma->vm_end = start + len; if (pregion->vm_flags & VM_MAPPED_COPY) { kdebug("share copy"); vma->vm_flags |= VM_MAPPED_COPY; } else { kdebug("share mmap"); ret = do_mmap_shared_file(vma); if (ret < 0) { vma->vm_region = NULL; vma->vm_start = 0; vma->vm_end = 0; pregion->vm_usage--; pregion = NULL; goto error_just_free; } } fput(region->vm_file); kmem_cache_free(vm_region_jar, region); region = pregion; result = start; goto share; } /* obtain the address at which to make a shared mapping * - this is the hook for quasi-memory character devices to * tell us the location of a shared mapping */ if (capabilities & BDI_CAP_MAP_DIRECT) { addr = file->f_op->get_unmapped_area(file, addr, len, pgoff, flags); if (IS_ERR_VALUE(addr)) { ret = addr; if (ret != -ENOSYS) goto error_just_free; /* the driver refused to tell us where to site * the mapping so we'll have to attempt to copy * it */ ret = -ENODEV; if (!(capabilities & BDI_CAP_MAP_COPY)) goto error_just_free; capabilities &= ~BDI_CAP_MAP_DIRECT; } else { vma->vm_start = region->vm_start = addr; vma->vm_end = region->vm_end = addr + len; } } } vma->vm_region = region; /* set up the mapping * - the region is filled in if BDI_CAP_MAP_DIRECT is still set */ if (file && vma->vm_flags & VM_SHARED) ret = do_mmap_shared_file(vma); else ret = do_mmap_private(vma, region, len, capabilities); if (ret < 0) goto error_just_free; add_nommu_region(region); /* clear anonymous mappings that don't ask for uninitialized data */ if (!vma->vm_file && !(flags & MAP_UNINITIALIZED)) memset((void *)region->vm_start, 0, region->vm_end - region->vm_start); /* okay... we have a mapping; now we have to register it */ result = vma->vm_start; current->mm->total_vm += len >> PAGE_SHIFT; share: add_vma_to_mm(current->mm, vma); /* we flush the region from the icache only when the first executable * mapping of it is made */ if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) { flush_icache_range(region->vm_start, region->vm_end); region->vm_icache_flushed = true; } up_write(&nommu_region_sem); kleave(" = %lx", result); return result; error_just_free: up_write(&nommu_region_sem); error: if (region->vm_file) fput(region->vm_file); kmem_cache_free(vm_region_jar, region); if (vma->vm_file) fput(vma->vm_file); if (vma->vm_flags & VM_EXECUTABLE) removed_exe_file_vma(vma->vm_mm); kmem_cache_free(vm_area_cachep, vma); kleave(" = %d", ret); return ret; sharing_violation: up_write(&nommu_region_sem); printk(KERN_WARNING "Attempt to share mismatched mappings\n"); ret = -EINVAL; goto error; error_getting_vma: kmem_cache_free(vm_region_jar, region); printk(KERN_WARNING "Allocation of vma for %lu byte allocation" " from process %d failed\n", len, current->pid); show_free_areas(0); return -ENOMEM; error_getting_region: printk(KERN_WARNING "Allocation of vm region for %lu byte allocation" " from process %d failed\n", len, current->pid); show_free_areas(0); return -ENOMEM; } unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long offset) { if (unlikely(offset + PAGE_ALIGN(len) < offset)) return -EINVAL; if (unlikely(offset & ~PAGE_MASK)) return -EINVAL; return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); } EXPORT_SYMBOL(do_mmap); unsigned long vm_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long offset) { unsigned long ret; struct mm_struct *mm = current->mm; down_write(&mm->mmap_sem); ret = do_mmap(file, addr, len, prot, flag, offset); up_write(&mm->mmap_sem); return ret; } EXPORT_SYMBOL(vm_mmap); SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, pgoff) { struct file *file = NULL; unsigned long retval = -EBADF; audit_mmap_fd(fd, flags); if (!(flags & MAP_ANONYMOUS)) { file = fget(fd); if (!file) goto out; } flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); down_write(&current->mm->mmap_sem); retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); up_write(&current->mm->mmap_sem); if (file) fput(file); out: return retval; } #ifdef __ARCH_WANT_SYS_OLD_MMAP struct mmap_arg_struct { unsigned long addr; unsigned long len; unsigned long prot; unsigned long flags; unsigned long fd; unsigned long offset; }; SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) { struct mmap_arg_struct a; if (copy_from_user(&a, arg, sizeof(a))) return -EFAULT; if (a.offset & ~PAGE_MASK) return -EINVAL; return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); } #endif /* __ARCH_WANT_SYS_OLD_MMAP */ /* * split a vma into two pieces at address 'addr', a new vma is allocated either * for the first part or the tail. */ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) { struct vm_area_struct *new; struct vm_region *region; unsigned long npages; kenter(""); /* we're only permitted to split anonymous regions (these should have * only a single usage on the region) */ if (vma->vm_file) return -ENOMEM; if (mm->map_count >= sysctl_max_map_count) return -ENOMEM; region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL); if (!region) return -ENOMEM; new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (!new) { kmem_cache_free(vm_region_jar, region); return -ENOMEM; } /* most fields are the same, copy all, and then fixup */ *new = *vma; *region = *vma->vm_region; new->vm_region = region; npages = (addr - vma->vm_start) >> PAGE_SHIFT; if (new_below) { region->vm_top = region->vm_end = new->vm_end = addr; } else { region->vm_start = new->vm_start = addr; region->vm_pgoff = new->vm_pgoff += npages; } if (new->vm_ops && new->vm_ops->open) new->vm_ops->open(new); delete_vma_from_mm(vma); down_write(&nommu_region_sem); delete_nommu_region(vma->vm_region); if (new_below) { vma->vm_region->vm_start = vma->vm_start = addr; vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; } else { vma->vm_region->vm_end = vma->vm_end = addr; vma->vm_region->vm_top = addr; } add_nommu_region(vma->vm_region); add_nommu_region(new->vm_region); up_write(&nommu_region_sem); add_vma_to_mm(mm, vma); add_vma_to_mm(mm, new); return 0; } /* * shrink a VMA by removing the specified chunk from either the beginning or * the end */ static int shrink_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long from, unsigned long to) { struct vm_region *region; kenter(""); /* adjust the VMA's pointers, which may reposition it in the MM's tree * and list */ delete_vma_from_mm(vma); if (from > vma->vm_start) vma->vm_end = from; else vma->vm_start = to; add_vma_to_mm(mm, vma); /* cut the backing region down to size */ region = vma->vm_region; BUG_ON(region->vm_usage != 1); down_write(&nommu_region_sem); delete_nommu_region(region); if (from > region->vm_start) { to = region->vm_top; region->vm_top = region->vm_end = from; } else { region->vm_start = to; } add_nommu_region(region); up_write(&nommu_region_sem); free_page_series(from, to); return 0; } /* * release a mapping * - under NOMMU conditions the chunk to be unmapped must be backed by a single * VMA, though it need not cover the whole VMA */ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) { struct vm_area_struct *vma; unsigned long end; int ret; kenter(",%lx,%zx", start, len); len = PAGE_ALIGN(len); if (len == 0) return -EINVAL; end = start + len; /* find the first potentially overlapping VMA */ vma = find_vma(mm, start); if (!vma) { static int limit = 0; if (limit < 5) { printk(KERN_WARNING "munmap of memory not mmapped by process %d" " (%s): 0x%lx-0x%lx\n", current->pid, current->comm, start, start + len - 1); limit++; } return -EINVAL; } /* we're allowed to split an anonymous VMA but not a file-backed one */ if (vma->vm_file) { do { if (start > vma->vm_start) { kleave(" = -EINVAL [miss]"); return -EINVAL; } if (end == vma->vm_end) goto erase_whole_vma; vma = vma->vm_next; } while (vma); kleave(" = -EINVAL [split file]"); return -EINVAL; } else { /* the chunk must be a subset of the VMA found */ if (start == vma->vm_start && end == vma->vm_end) goto erase_whole_vma; if (start < vma->vm_start || end > vma->vm_end) { kleave(" = -EINVAL [superset]"); return -EINVAL; } if (start & ~PAGE_MASK) { kleave(" = -EINVAL [unaligned start]"); return -EINVAL; } if (end != vma->vm_end && end & ~PAGE_MASK) { kleave(" = -EINVAL [unaligned split]"); return -EINVAL; } if (start != vma->vm_start && end != vma->vm_end) { ret = split_vma(mm, vma, start, 1); if (ret < 0) { kleave(" = %d [split]", ret); return ret; } } return shrink_vma(mm, vma, start, end); } erase_whole_vma: delete_vma_from_mm(vma); delete_vma(mm, vma); kleave(" = 0"); return 0; } EXPORT_SYMBOL(do_munmap); int vm_munmap(unsigned long addr, size_t len) { struct mm_struct *mm = current->mm; int ret; down_write(&mm->mmap_sem); ret = do_munmap(mm, addr, len); up_write(&mm->mmap_sem); return ret; } EXPORT_SYMBOL(vm_munmap); SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) { return vm_munmap(addr, len); } /* * release all the mappings made in a process's VM space */ void exit_mmap(struct mm_struct *mm) { struct vm_area_struct *vma; if (!mm) return; kenter(""); mm->total_vm = 0; while ((vma = mm->mmap)) { mm->mmap = vma->vm_next; delete_vma_from_mm(vma); delete_vma(mm, vma); cond_resched(); } kleave(""); } unsigned long vm_brk(unsigned long addr, unsigned long len) { return -ENOMEM; } /* * expand (or shrink) an existing mapping, potentially moving it at the same * time (controlled by the MREMAP_MAYMOVE flag and available VM space) * * under NOMMU conditions, we only permit changing a mapping's size, and only * as long as it stays within the region allocated by do_mmap_private() and the * block is not shareable * * MREMAP_FIXED is not supported under NOMMU conditions */ unsigned long do_mremap(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr) { struct vm_area_struct *vma; /* insanity checks first */ old_len = PAGE_ALIGN(old_len); new_len = PAGE_ALIGN(new_len); if (old_len == 0 || new_len == 0) return (unsigned long) -EINVAL; if (addr & ~PAGE_MASK) return -EINVAL; if (flags & MREMAP_FIXED && new_addr != addr) return (unsigned long) -EINVAL; vma = find_vma_exact(current->mm, addr, old_len); if (!vma) return (unsigned long) -EINVAL; if (vma->vm_end != vma->vm_start + old_len) return (unsigned long) -EFAULT; if (vma->vm_flags & VM_MAYSHARE) return (unsigned long) -EPERM; if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start) return (unsigned long) -ENOMEM; /* all checks complete - do it */ vma->vm_end = vma->vm_start + new_len; return vma->vm_start; } EXPORT_SYMBOL(do_mremap); SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr) { unsigned long ret; down_write(&current->mm->mmap_sem); ret = do_mremap(addr, old_len, new_len, flags, new_addr); up_write(&current->mm->mmap_sem); return ret; } struct page *follow_page(struct vm_area_struct *vma, unsigned long address, unsigned int foll_flags) { return NULL; } int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot) { if (addr != (pfn << PAGE_SHIFT)) return -EINVAL; vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; return 0; } EXPORT_SYMBOL(remap_pfn_range); int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff) { unsigned int size = vma->vm_end - vma->vm_start; if (!(vma->vm_flags & VM_USERMAP)) return -EINVAL; vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT)); vma->vm_end = vma->vm_start + size; return 0; } EXPORT_SYMBOL(remap_vmalloc_range); unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { return -ENOMEM; } void arch_unmap_area(struct mm_struct *mm, unsigned long addr) { } void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows) { } EXPORT_SYMBOL(unmap_mapping_range); /* * Check that a process has enough memory to allocate a new virtual * mapping. 0 means there is enough memory for the allocation to * succeed and -ENOMEM implies there is not. * * We currently support three overcommit policies, which are set via the * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting * * Strict overcommit modes added 2002 Feb 26 by Alan Cox. * Additional code 2002 Jul 20 by Robert Love. * * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. * * Note this is a helper function intended to be used by LSMs which * wish to use this logic. */ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) { unsigned long free, allowed; vm_acct_memory(pages); /* * Sometimes we want to use more memory than we have */ if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) return 0; if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { free = global_page_state(NR_FREE_PAGES); free += global_page_state(NR_FILE_PAGES); /* * shmem pages shouldn't be counted as free in this * case, they can't be purged, only swapped out, and * that won't affect the overall amount of available * memory in the system. */ free -= global_page_state(NR_SHMEM); free += get_nr_swap_pages(); /* * Any slabs which are created with the * SLAB_RECLAIM_ACCOUNT flag claim to have contents * which are reclaimable, under pressure. The dentry * cache and most inode caches should fall into this */ free += global_page_state(NR_SLAB_RECLAIMABLE); /* * Leave reserved pages. The pages are not for anonymous pages. */ if (free <= totalreserve_pages) goto error; else free -= totalreserve_pages; /* * Leave the last 3% for root */ if (!cap_sys_admin) free -= free / 32; if (free > pages) return 0; goto error; } allowed = totalram_pages * sysctl_overcommit_ratio / 100; /* * Leave the last 3% for root */ if (!cap_sys_admin) allowed -= allowed / 32; allowed += total_swap_pages; /* Don't let a single process grow too big: leave 3% of the size of this process for other processes */ if (mm) allowed -= mm->total_vm / 32; if (percpu_counter_read_positive(&vm_committed_as) < allowed) return 0; error: vm_unacct_memory(pages); return -ENOMEM; } int in_gate_area_no_mm(unsigned long addr) { return 0; } int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { BUG(); return 0; } EXPORT_SYMBOL(filemap_fault); static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, unsigned long addr, void *buf, int len, int write) { struct vm_area_struct *vma; down_read(&mm->mmap_sem); /* the access must start within one of the target process's mappings */ vma = find_vma(mm, addr); if (vma) { /* don't overrun this mapping */ if (addr + len >= vma->vm_end) len = vma->vm_end - addr; /* only read or write mappings where it is permitted */ if (write && vma->vm_flags & VM_MAYWRITE) copy_to_user_page(vma, NULL, addr, (void *) addr, buf, len); else if (!write && vma->vm_flags & VM_MAYREAD) copy_from_user_page(vma, NULL, addr, buf, (void *) addr, len); else len = 0; } else { len = 0; } up_read(&mm->mmap_sem); return len; } /** * @access_remote_vm - access another process' address space * @mm: the mm_struct of the target address space * @addr: start address to access * @buf: source or destination buffer * @len: number of bytes to transfer * @write: whether the access is a write * * The caller must hold a reference on @mm. */ int access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, int write) { return __access_remote_vm(NULL, mm, addr, buf, len, write); } /* * Access another process' address space. * - source/target buffer must be kernel space */ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) { struct mm_struct *mm; if (addr + len < addr) return 0; mm = get_task_mm(tsk); if (!mm) return 0; len = __access_remote_vm(tsk, mm, addr, buf, len, write); mmput(mm); return len; } /** * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode * @inode: The inode to check * @size: The current filesize of the inode * @newsize: The proposed filesize of the inode * * Check the shared mappings on an inode on behalf of a shrinking truncate to * make sure that that any outstanding VMAs aren't broken and then shrink the * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't * automatically grant mappings that are too large. */ int nommu_shrink_inode_mappings(struct inode *inode, size_t size, size_t newsize) { struct vm_area_struct *vma; struct prio_tree_iter iter; struct vm_region *region; pgoff_t low, high; size_t r_size, r_top; low = newsize >> PAGE_SHIFT; high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; down_write(&nommu_region_sem); mutex_lock(&inode->i_mapping->i_mmap_mutex); /* search for VMAs that fall within the dead zone */ vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap, low, high) { /* found one - only interested if it's shared out of the page * cache */ if (vma->vm_flags & VM_SHARED) { mutex_unlock(&inode->i_mapping->i_mmap_mutex); up_write(&nommu_region_sem); return -ETXTBSY; /* not quite true, but near enough */ } } /* reduce any regions that overlap the dead zone - if in existence, * these will be pointed to by VMAs that don't overlap the dead zone * * we don't check for any regions that start beyond the EOF as there * shouldn't be any */ vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap, 0, ULONG_MAX) { if (!(vma->vm_flags & VM_SHARED)) continue; region = vma->vm_region; r_size = region->vm_top - region->vm_start; r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size; if (r_top > newsize) { region->vm_top -= r_top - newsize; if (region->vm_end > region->vm_top) region->vm_end = region->vm_top; } } mutex_unlock(&inode->i_mapping->i_mmap_mutex); up_write(&nommu_region_sem); return 0; }
gpl-2.0
RezaSR/android_kernel_asus_tf303cl
drivers/media/i2c/msp3400-driver.c
2093
26965
/* * Programming the mspx4xx sound processor family * * (c) 1997-2001 Gerd Knorr <kraxel@bytesex.org> * * what works and what doesn't: * * AM-Mono * Support for Hauppauge cards added (decoding handled by tuner) added by * Frederic Crozat <fcrozat@mail.dotcom.fr> * * FM-Mono * should work. The stereo modes are backward compatible to FM-mono, * therefore FM-Mono should be allways available. * * FM-Stereo (B/G, used in germany) * should work, with autodetect * * FM-Stereo (satellite) * should work, no autodetect (i.e. default is mono, but you can * switch to stereo -- untested) * * NICAM (B/G, L , used in UK, Scandinavia, Spain and France) * should work, with autodetect. Support for NICAM was added by * Pekka Pietikainen <pp@netppl.fi> * * TODO: * - better SAT support * * 980623 Thomas Sailer (sailer@ife.ee.ethz.ch) * using soundcore instead of OSS * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/msp3400.h> #include <media/tvaudio.h> #include "msp3400-driver.h" /* ---------------------------------------------------------------------- */ MODULE_DESCRIPTION("device driver for msp34xx TV sound processor"); MODULE_AUTHOR("Gerd Knorr"); MODULE_LICENSE("GPL"); /* module parameters */ static int opmode = OPMODE_AUTO; int msp_debug; /* msp_debug output */ bool msp_once; /* no continuous stereo monitoring */ bool msp_amsound; /* hard-wire AM sound at 6.5 Hz (france), the autoscan seems work well only with FM... */ int msp_standard = 1; /* Override auto detect of audio msp_standard, if needed. */ bool msp_dolby; int msp_stereo_thresh = 0x190; /* a2 threshold for stereo/bilingual (msp34xxg only) 0x00a0-0x03c0 */ /* read-only */ module_param(opmode, int, 0444); /* read-write */ module_param_named(once, msp_once, bool, 0644); module_param_named(debug, msp_debug, int, 0644); module_param_named(stereo_threshold, msp_stereo_thresh, int, 0644); module_param_named(standard, msp_standard, int, 0644); module_param_named(amsound, msp_amsound, bool, 0644); module_param_named(dolby, msp_dolby, bool, 0644); MODULE_PARM_DESC(opmode, "Forces a MSP3400 opmode. 0=Manual, 1=Autodetect, 2=Autodetect and autoselect"); MODULE_PARM_DESC(once, "No continuous stereo monitoring"); MODULE_PARM_DESC(debug, "Enable debug messages [0-3]"); MODULE_PARM_DESC(stereo_threshold, "Sets signal threshold to activate stereo"); MODULE_PARM_DESC(standard, "Specify audio standard: 32 = NTSC, 64 = radio, Default: Autodetect"); MODULE_PARM_DESC(amsound, "Hardwire AM sound at 6.5Hz (France), FM can autoscan"); MODULE_PARM_DESC(dolby, "Activates Dolby processing"); /* ---------------------------------------------------------------------- */ /* control subaddress */ #define I2C_MSP_CONTROL 0x00 /* demodulator unit subaddress */ #define I2C_MSP_DEM 0x10 /* DSP unit subaddress */ #define I2C_MSP_DSP 0x12 /* ----------------------------------------------------------------------- */ /* functions for talking to the MSP3400C Sound processor */ int msp_reset(struct i2c_client *client) { /* reset and read revision code */ static u8 reset_off[3] = { I2C_MSP_CONTROL, 0x80, 0x00 }; static u8 reset_on[3] = { I2C_MSP_CONTROL, 0x00, 0x00 }; static u8 write[3] = { I2C_MSP_DSP + 1, 0x00, 0x1e }; u8 read[2]; struct i2c_msg reset[2] = { { .addr = client->addr, .flags = I2C_M_IGNORE_NAK, .len = 3, .buf = reset_off }, { .addr = client->addr, .flags = I2C_M_IGNORE_NAK, .len = 3, .buf = reset_on }, }; struct i2c_msg test[2] = { { .addr = client->addr, .len = 3, .buf = write }, { .addr = client->addr, .flags = I2C_M_RD, .len = 2, .buf = read }, }; v4l_dbg(3, msp_debug, client, "msp_reset\n"); if (i2c_transfer(client->adapter, &reset[0], 1) != 1 || i2c_transfer(client->adapter, &reset[1], 1) != 1 || i2c_transfer(client->adapter, test, 2) != 2) { v4l_err(client, "chip reset failed\n"); return -1; } return 0; } static int msp_read(struct i2c_client *client, int dev, int addr) { int err, retval; u8 write[3]; u8 read[2]; struct i2c_msg msgs[2] = { { .addr = client->addr, .len = 3, .buf = write }, { .addr = client->addr, .flags = I2C_M_RD, .len = 2, .buf = read } }; write[0] = dev + 1; write[1] = addr >> 8; write[2] = addr & 0xff; for (err = 0; err < 3; err++) { if (i2c_transfer(client->adapter, msgs, 2) == 2) break; v4l_warn(client, "I/O error #%d (read 0x%02x/0x%02x)\n", err, dev, addr); schedule_timeout_interruptible(msecs_to_jiffies(10)); } if (err == 3) { v4l_warn(client, "resetting chip, sound will go off.\n"); msp_reset(client); return -1; } retval = read[0] << 8 | read[1]; v4l_dbg(3, msp_debug, client, "msp_read(0x%x, 0x%x): 0x%x\n", dev, addr, retval); return retval; } int msp_read_dem(struct i2c_client *client, int addr) { return msp_read(client, I2C_MSP_DEM, addr); } int msp_read_dsp(struct i2c_client *client, int addr) { return msp_read(client, I2C_MSP_DSP, addr); } static int msp_write(struct i2c_client *client, int dev, int addr, int val) { int err; u8 buffer[5]; buffer[0] = dev; buffer[1] = addr >> 8; buffer[2] = addr & 0xff; buffer[3] = val >> 8; buffer[4] = val & 0xff; v4l_dbg(3, msp_debug, client, "msp_write(0x%x, 0x%x, 0x%x)\n", dev, addr, val); for (err = 0; err < 3; err++) { if (i2c_master_send(client, buffer, 5) == 5) break; v4l_warn(client, "I/O error #%d (write 0x%02x/0x%02x)\n", err, dev, addr); schedule_timeout_interruptible(msecs_to_jiffies(10)); } if (err == 3) { v4l_warn(client, "resetting chip, sound will go off.\n"); msp_reset(client); return -1; } return 0; } int msp_write_dem(struct i2c_client *client, int addr, int val) { return msp_write(client, I2C_MSP_DEM, addr, val); } int msp_write_dsp(struct i2c_client *client, int addr, int val) { return msp_write(client, I2C_MSP_DSP, addr, val); } /* ----------------------------------------------------------------------- * * bits 9 8 5 - SCART DSP input Select: * 0 0 0 - SCART 1 to DSP input (reset position) * 0 1 0 - MONO to DSP input * 1 0 0 - SCART 2 to DSP input * 1 1 1 - Mute DSP input * * bits 11 10 6 - SCART 1 Output Select: * 0 0 0 - undefined (reset position) * 0 1 0 - SCART 2 Input to SCART 1 Output (for devices with 2 SCARTS) * 1 0 0 - MONO input to SCART 1 Output * 1 1 0 - SCART 1 DA to SCART 1 Output * 0 0 1 - SCART 2 DA to SCART 1 Output * 0 1 1 - SCART 1 Input to SCART 1 Output * 1 1 1 - Mute SCART 1 Output * * bits 13 12 7 - SCART 2 Output Select (for devices with 2 Output SCART): * 0 0 0 - SCART 1 DA to SCART 2 Output (reset position) * 0 1 0 - SCART 1 Input to SCART 2 Output * 1 0 0 - MONO input to SCART 2 Output * 0 0 1 - SCART 2 DA to SCART 2 Output * 0 1 1 - SCART 2 Input to SCART 2 Output * 1 1 0 - Mute SCART 2 Output * * Bits 4 to 0 should be zero. * ----------------------------------------------------------------------- */ static int scarts[3][9] = { /* MASK IN1 IN2 IN3 IN4 IN1_DA IN2_DA MONO MUTE */ /* SCART DSP Input select */ { 0x0320, 0x0000, 0x0200, 0x0300, 0x0020, -1, -1, 0x0100, 0x0320 }, /* SCART1 Output select */ { 0x0c40, 0x0440, 0x0400, 0x0000, 0x0840, 0x0c00, 0x0040, 0x0800, 0x0c40 }, /* SCART2 Output select */ { 0x3080, 0x1000, 0x1080, 0x2080, 0x3080, 0x0000, 0x0080, 0x2000, 0x3000 }, }; static char *scart_names[] = { "in1", "in2", "in3", "in4", "in1 da", "in2 da", "mono", "mute" }; void msp_set_scart(struct i2c_client *client, int in, int out) { struct msp_state *state = to_state(i2c_get_clientdata(client)); state->in_scart = in; if (in >= 0 && in <= 7 && out >= 0 && out <= 2) { if (-1 == scarts[out][in + 1]) return; state->acb &= ~scarts[out][0]; state->acb |= scarts[out][in + 1]; } else state->acb = 0xf60; /* Mute Input and SCART 1 Output */ v4l_dbg(1, msp_debug, client, "scart switch: %s => %d (ACB=0x%04x)\n", scart_names[in], out, state->acb); msp_write_dsp(client, 0x13, state->acb); /* Sets I2S speed 0 = 1.024 Mbps, 1 = 2.048 Mbps */ if (state->has_i2s_conf) msp_write_dem(client, 0x40, state->i2s_mode); } /* ------------------------------------------------------------------------ */ static void msp_wake_thread(struct i2c_client *client) { struct msp_state *state = to_state(i2c_get_clientdata(client)); if (NULL == state->kthread) return; state->watch_stereo = 0; state->restart = 1; wake_up_interruptible(&state->wq); } int msp_sleep(struct msp_state *state, int timeout) { DECLARE_WAITQUEUE(wait, current); add_wait_queue(&state->wq, &wait); if (!kthread_should_stop()) { if (timeout < 0) { set_current_state(TASK_INTERRUPTIBLE); schedule(); } else { schedule_timeout_interruptible (msecs_to_jiffies(timeout)); } } remove_wait_queue(&state->wq, &wait); try_to_freeze(); return state->restart; } /* ------------------------------------------------------------------------ */ static int msp_s_ctrl(struct v4l2_ctrl *ctrl) { struct msp_state *state = ctrl_to_state(ctrl); struct i2c_client *client = v4l2_get_subdevdata(&state->sd); int val = ctrl->val; switch (ctrl->id) { case V4L2_CID_AUDIO_VOLUME: { /* audio volume cluster */ int reallymuted = state->muted->val | state->scan_in_progress; if (!reallymuted) val = (val * 0x7f / 65535) << 8; v4l_dbg(1, msp_debug, client, "mute=%s scanning=%s volume=%d\n", state->muted->val ? "on" : "off", state->scan_in_progress ? "yes" : "no", state->volume->val); msp_write_dsp(client, 0x0000, val); msp_write_dsp(client, 0x0007, reallymuted ? 0x1 : (val | 0x1)); if (state->has_scart2_out_volume) msp_write_dsp(client, 0x0040, reallymuted ? 0x1 : (val | 0x1)); if (state->has_headphones) msp_write_dsp(client, 0x0006, val); break; } case V4L2_CID_AUDIO_BASS: val = ((val - 32768) * 0x60 / 65535) << 8; msp_write_dsp(client, 0x0002, val); if (state->has_headphones) msp_write_dsp(client, 0x0031, val); break; case V4L2_CID_AUDIO_TREBLE: val = ((val - 32768) * 0x60 / 65535) << 8; msp_write_dsp(client, 0x0003, val); if (state->has_headphones) msp_write_dsp(client, 0x0032, val); break; case V4L2_CID_AUDIO_LOUDNESS: val = val ? ((5 * 4) << 8) : 0; msp_write_dsp(client, 0x0004, val); if (state->has_headphones) msp_write_dsp(client, 0x0033, val); break; case V4L2_CID_AUDIO_BALANCE: val = (u8)((val / 256) - 128); msp_write_dsp(client, 0x0001, val << 8); if (state->has_headphones) msp_write_dsp(client, 0x0030, val << 8); break; default: return -EINVAL; } return 0; } void msp_update_volume(struct msp_state *state) { /* Force an update of the volume/mute cluster */ v4l2_ctrl_lock(state->volume); state->volume->val = state->volume->cur.val; state->muted->val = state->muted->cur.val; msp_s_ctrl(state->volume); v4l2_ctrl_unlock(state->volume); } /* --- v4l2 ioctls --- */ static int msp_s_radio(struct v4l2_subdev *sd) { struct msp_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); if (state->radio) return 0; state->radio = 1; v4l_dbg(1, msp_debug, client, "switching to radio mode\n"); state->watch_stereo = 0; switch (state->opmode) { case OPMODE_MANUAL: /* set msp3400 to FM radio mode */ msp3400c_set_mode(client, MSP_MODE_FM_RADIO); msp3400c_set_carrier(client, MSP_CARRIER(10.7), MSP_CARRIER(10.7)); msp_update_volume(state); break; case OPMODE_AUTODETECT: case OPMODE_AUTOSELECT: /* the thread will do for us */ msp_wake_thread(client); break; } return 0; } static int msp_s_frequency(struct v4l2_subdev *sd, const struct v4l2_frequency *freq) { struct i2c_client *client = v4l2_get_subdevdata(sd); /* new channel -- kick audio carrier scan */ msp_wake_thread(client); return 0; } static int msp_querystd(struct v4l2_subdev *sd, v4l2_std_id *id) { struct msp_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); *id &= state->detected_std; v4l_dbg(2, msp_debug, client, "detected standard: %s(0x%08Lx)\n", msp_standard_std_name(state->std), state->detected_std); return 0; } static int msp_s_std(struct v4l2_subdev *sd, v4l2_std_id id) { struct msp_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int update = state->radio || state->v4l2_std != id; state->v4l2_std = id; state->radio = 0; if (update) msp_wake_thread(client); return 0; } static int msp_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct msp_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int tuner = (input >> 3) & 1; int sc_in = input & 0x7; int sc1_out = output & 0xf; int sc2_out = (output >> 4) & 0xf; u16 val, reg; int i; int extern_input = 1; if (state->route_in == input && state->route_out == output) return 0; state->route_in = input; state->route_out = output; /* check if the tuner input is used */ for (i = 0; i < 5; i++) { if (((input >> (4 + i * 4)) & 0xf) == 0) extern_input = 0; } state->mode = extern_input ? MSP_MODE_EXTERN : MSP_MODE_AM_DETECT; state->rxsubchans = V4L2_TUNER_SUB_STEREO; msp_set_scart(client, sc_in, 0); msp_set_scart(client, sc1_out, 1); msp_set_scart(client, sc2_out, 2); msp_set_audmode(client); reg = (state->opmode == OPMODE_AUTOSELECT) ? 0x30 : 0xbb; val = msp_read_dem(client, reg); msp_write_dem(client, reg, (val & ~0x100) | (tuner << 8)); /* wake thread when a new input is chosen */ msp_wake_thread(client); return 0; } static int msp_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct msp_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); if (vt->type != V4L2_TUNER_ANALOG_TV) return 0; if (!state->radio) { if (state->opmode == OPMODE_AUTOSELECT) msp_detect_stereo(client); vt->rxsubchans = state->rxsubchans; } vt->audmode = state->audmode; vt->capability |= V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2; return 0; } static int msp_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *vt) { struct msp_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); if (state->radio) /* TODO: add mono/stereo support for radio */ return 0; if (state->audmode == vt->audmode) return 0; state->audmode = vt->audmode; /* only set audmode */ msp_set_audmode(client); return 0; } static int msp_s_i2s_clock_freq(struct v4l2_subdev *sd, u32 freq) { struct msp_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); v4l_dbg(1, msp_debug, client, "Setting I2S speed to %d\n", freq); switch (freq) { case 1024000: state->i2s_mode = 0; break; case 2048000: state->i2s_mode = 1; break; default: return -EINVAL; } return 0; } static int msp_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct msp_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, state->ident, (state->rev1 << 16) | state->rev2); } static int msp_log_status(struct v4l2_subdev *sd) { struct msp_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); const char *p; char prefix[V4L2_SUBDEV_NAME_SIZE + 20]; if (state->opmode == OPMODE_AUTOSELECT) msp_detect_stereo(client); v4l_info(client, "%s rev1 = 0x%04x rev2 = 0x%04x\n", client->name, state->rev1, state->rev2); snprintf(prefix, sizeof(prefix), "%s: Audio: ", sd->name); v4l2_ctrl_handler_log_status(&state->hdl, prefix); switch (state->mode) { case MSP_MODE_AM_DETECT: p = "AM (for carrier detect)"; break; case MSP_MODE_FM_RADIO: p = "FM Radio"; break; case MSP_MODE_FM_TERRA: p = "Terrestrial FM-mono/stereo"; break; case MSP_MODE_FM_SAT: p = "Satellite FM-mono"; break; case MSP_MODE_FM_NICAM1: p = "NICAM/FM (B/G, D/K)"; break; case MSP_MODE_FM_NICAM2: p = "NICAM/FM (I)"; break; case MSP_MODE_AM_NICAM: p = "NICAM/AM (L)"; break; case MSP_MODE_BTSC: p = "BTSC"; break; case MSP_MODE_EXTERN: p = "External input"; break; default: p = "unknown"; break; } if (state->mode == MSP_MODE_EXTERN) { v4l_info(client, "Mode: %s\n", p); } else if (state->opmode == OPMODE_MANUAL) { v4l_info(client, "Mode: %s (%s%s)\n", p, (state->rxsubchans & V4L2_TUNER_SUB_STEREO) ? "stereo" : "mono", (state->rxsubchans & V4L2_TUNER_SUB_LANG2) ? ", dual" : ""); } else { if (state->opmode == OPMODE_AUTODETECT) v4l_info(client, "Mode: %s\n", p); v4l_info(client, "Standard: %s (%s%s)\n", msp_standard_std_name(state->std), (state->rxsubchans & V4L2_TUNER_SUB_STEREO) ? "stereo" : "mono", (state->rxsubchans & V4L2_TUNER_SUB_LANG2) ? ", dual" : ""); } v4l_info(client, "Audmode: 0x%04x\n", state->audmode); v4l_info(client, "Routing: 0x%08x (input) 0x%08x (output)\n", state->route_in, state->route_out); v4l_info(client, "ACB: 0x%04x\n", state->acb); return 0; } #ifdef CONFIG_PM_SLEEP static int msp_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); v4l_dbg(1, msp_debug, client, "suspend\n"); msp_reset(client); return 0; } static int msp_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); v4l_dbg(1, msp_debug, client, "resume\n"); msp_wake_thread(client); return 0; } #endif /* ----------------------------------------------------------------------- */ static const struct v4l2_ctrl_ops msp_ctrl_ops = { .s_ctrl = msp_s_ctrl, }; static const struct v4l2_subdev_core_ops msp_core_ops = { .log_status = msp_log_status, .g_chip_ident = msp_g_chip_ident, .g_ext_ctrls = v4l2_subdev_g_ext_ctrls, .try_ext_ctrls = v4l2_subdev_try_ext_ctrls, .s_ext_ctrls = v4l2_subdev_s_ext_ctrls, .g_ctrl = v4l2_subdev_g_ctrl, .s_ctrl = v4l2_subdev_s_ctrl, .queryctrl = v4l2_subdev_queryctrl, .querymenu = v4l2_subdev_querymenu, .s_std = msp_s_std, }; static const struct v4l2_subdev_video_ops msp_video_ops = { .querystd = msp_querystd, }; static const struct v4l2_subdev_tuner_ops msp_tuner_ops = { .s_frequency = msp_s_frequency, .g_tuner = msp_g_tuner, .s_tuner = msp_s_tuner, .s_radio = msp_s_radio, }; static const struct v4l2_subdev_audio_ops msp_audio_ops = { .s_routing = msp_s_routing, .s_i2s_clock_freq = msp_s_i2s_clock_freq, }; static const struct v4l2_subdev_ops msp_ops = { .core = &msp_core_ops, .video = &msp_video_ops, .tuner = &msp_tuner_ops, .audio = &msp_audio_ops, }; /* ----------------------------------------------------------------------- */ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct msp_state *state; struct v4l2_subdev *sd; struct v4l2_ctrl_handler *hdl; int (*thread_func)(void *data) = NULL; int msp_hard; int msp_family; int msp_revision; int msp_product, msp_prod_hi, msp_prod_lo; int msp_rom; if (!id) strlcpy(client->name, "msp3400", sizeof(client->name)); if (msp_reset(client) == -1) { v4l_dbg(1, msp_debug, client, "msp3400 not found\n"); return -ENODEV; } state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) return -ENOMEM; sd = &state->sd; v4l2_i2c_subdev_init(sd, client, &msp_ops); state->v4l2_std = V4L2_STD_NTSC; state->detected_std = V4L2_STD_ALL; state->audmode = V4L2_TUNER_MODE_STEREO; state->input = -1; state->i2s_mode = 0; init_waitqueue_head(&state->wq); /* These are the reset input/output positions */ state->route_in = MSP_INPUT_DEFAULT; state->route_out = MSP_OUTPUT_DEFAULT; state->rev1 = msp_read_dsp(client, 0x1e); if (state->rev1 != -1) state->rev2 = msp_read_dsp(client, 0x1f); v4l_dbg(1, msp_debug, client, "rev1=0x%04x, rev2=0x%04x\n", state->rev1, state->rev2); if (state->rev1 == -1 || (state->rev1 == 0 && state->rev2 == 0)) { v4l_dbg(1, msp_debug, client, "not an msp3400 (cannot read chip version)\n"); kfree(state); return -ENODEV; } msp_family = ((state->rev1 >> 4) & 0x0f) + 3; msp_product = (state->rev2 >> 8) & 0xff; msp_prod_hi = msp_product / 10; msp_prod_lo = msp_product % 10; msp_revision = (state->rev1 & 0x0f) + '@'; msp_hard = ((state->rev1 >> 8) & 0xff) + '@'; msp_rom = state->rev2 & 0x1f; /* Rev B=2, C=3, D=4, G=7 */ state->ident = msp_family * 10000 + 4000 + msp_product * 10 + msp_revision - '@'; /* Has NICAM support: all mspx41x and mspx45x products have NICAM */ state->has_nicam = msp_prod_hi == 1 || msp_prod_hi == 5; /* Has radio support: was added with revision G */ state->has_radio = msp_revision >= 'G'; /* Has headphones output: not for stripped down products */ state->has_headphones = msp_prod_lo < 5; /* Has scart2 input: not in stripped down products of the '3' family */ state->has_scart2 = msp_family >= 4 || msp_prod_lo < 7; /* Has scart3 input: not in stripped down products of the '3' family */ state->has_scart3 = msp_family >= 4 || msp_prod_lo < 5; /* Has scart4 input: not in pre D revisions, not in stripped D revs */ state->has_scart4 = msp_family >= 4 || (msp_revision >= 'D' && msp_prod_lo < 5); /* Has scart2 output: not in stripped down products of * the '3' family */ state->has_scart2_out = msp_family >= 4 || msp_prod_lo < 5; /* Has scart2 a volume control? Not in pre-D revisions. */ state->has_scart2_out_volume = msp_revision > 'C' && state->has_scart2_out; /* Has a configurable i2s out? */ state->has_i2s_conf = msp_revision >= 'G' && msp_prod_lo < 7; /* Has subwoofer output: not in pre-D revs and not in stripped down * products */ state->has_subwoofer = msp_revision >= 'D' && msp_prod_lo < 5; /* Has soundprocessing (bass/treble/balance/loudness/equalizer): * not in stripped down products */ state->has_sound_processing = msp_prod_lo < 7; /* Has Virtual Dolby Surround: only in msp34x1 */ state->has_virtual_dolby_surround = msp_revision == 'G' && msp_prod_lo == 1; /* Has Virtual Dolby Surround & Dolby Pro Logic: only in msp34x2 */ state->has_dolby_pro_logic = msp_revision == 'G' && msp_prod_lo == 2; /* The msp343xG supports BTSC only and cannot do Automatic Standard * Detection. */ state->force_btsc = msp_family == 3 && msp_revision == 'G' && msp_prod_hi == 3; state->opmode = opmode; if (state->opmode == OPMODE_AUTO) { /* MSP revision G and up have both autodetect and autoselect */ if (msp_revision >= 'G') state->opmode = OPMODE_AUTOSELECT; /* MSP revision D and up have autodetect */ else if (msp_revision >= 'D') state->opmode = OPMODE_AUTODETECT; else state->opmode = OPMODE_MANUAL; } hdl = &state->hdl; v4l2_ctrl_handler_init(hdl, 6); if (state->has_sound_processing) { v4l2_ctrl_new_std(hdl, &msp_ctrl_ops, V4L2_CID_AUDIO_BASS, 0, 65535, 65535 / 100, 32768); v4l2_ctrl_new_std(hdl, &msp_ctrl_ops, V4L2_CID_AUDIO_TREBLE, 0, 65535, 65535 / 100, 32768); v4l2_ctrl_new_std(hdl, &msp_ctrl_ops, V4L2_CID_AUDIO_LOUDNESS, 0, 1, 1, 0); } state->volume = v4l2_ctrl_new_std(hdl, &msp_ctrl_ops, V4L2_CID_AUDIO_VOLUME, 0, 65535, 65535 / 100, 58880); v4l2_ctrl_new_std(hdl, &msp_ctrl_ops, V4L2_CID_AUDIO_BALANCE, 0, 65535, 65535 / 100, 32768); state->muted = v4l2_ctrl_new_std(hdl, &msp_ctrl_ops, V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0); sd->ctrl_handler = hdl; if (hdl->error) { int err = hdl->error; v4l2_ctrl_handler_free(hdl); kfree(state); return err; } v4l2_ctrl_cluster(2, &state->volume); v4l2_ctrl_handler_setup(hdl); /* hello world :-) */ v4l_info(client, "MSP%d4%02d%c-%c%d found @ 0x%x (%s)\n", msp_family, msp_product, msp_revision, msp_hard, msp_rom, client->addr << 1, client->adapter->name); v4l_info(client, "%s ", client->name); if (state->has_nicam && state->has_radio) printk(KERN_CONT "supports nicam and radio, "); else if (state->has_nicam) printk(KERN_CONT "supports nicam, "); else if (state->has_radio) printk(KERN_CONT "supports radio, "); printk(KERN_CONT "mode is "); /* version-specific initialization */ switch (state->opmode) { case OPMODE_MANUAL: printk(KERN_CONT "manual"); thread_func = msp3400c_thread; break; case OPMODE_AUTODETECT: printk(KERN_CONT "autodetect"); thread_func = msp3410d_thread; break; case OPMODE_AUTOSELECT: printk(KERN_CONT "autodetect and autoselect"); thread_func = msp34xxg_thread; break; } printk(KERN_CONT "\n"); /* startup control thread if needed */ if (thread_func) { state->kthread = kthread_run(thread_func, client, "msp34xx"); if (IS_ERR(state->kthread)) v4l_warn(client, "kernel_thread() failed\n"); msp_wake_thread(client); } return 0; } static int msp_remove(struct i2c_client *client) { struct msp_state *state = to_state(i2c_get_clientdata(client)); v4l2_device_unregister_subdev(&state->sd); /* shutdown control thread */ if (state->kthread) { state->restart = 1; kthread_stop(state->kthread); } msp_reset(client); v4l2_ctrl_handler_free(&state->hdl); kfree(state); return 0; } /* ----------------------------------------------------------------------- */ static const struct dev_pm_ops msp3400_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(msp_suspend, msp_resume) }; static const struct i2c_device_id msp_id[] = { { "msp3400", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, msp_id); static struct i2c_driver msp_driver = { .driver = { .owner = THIS_MODULE, .name = "msp3400", .pm = &msp3400_pm_ops, }, .probe = msp_probe, .remove = msp_remove, .id_table = msp_id, }; module_i2c_driver(msp_driver); /* * Overrides for Emacs so that we follow Linus's tabbing style. * --------------------------------------------------------------------------- * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
rubiojr/surface3-kernel
drivers/video/exynos/exynos_mipi_dsi_lowlevel.c
2093
15880
/* linux/drivers/video/exynos/exynos_mipi_dsi_lowlevel.c * * Samsung SoC MIPI-DSI lowlevel driver. * * Copyright (c) 2012 Samsung Electronics Co., Ltd * * InKi Dae, <inki.dae@samsung.com> * Donghwa Lee, <dh09.lee@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/mutex.h> #include <linux/wait.h> #include <linux/delay.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/ctype.h> #include <linux/platform_device.h> #include <linux/io.h> #include <video/exynos_mipi_dsim.h> #include "exynos_mipi_dsi_regs.h" #include "exynos_mipi_dsi_lowlevel.h" void exynos_mipi_dsi_func_reset(struct mipi_dsim_device *dsim) { unsigned int reg; reg = readl(dsim->reg_base + EXYNOS_DSIM_SWRST); reg |= DSIM_FUNCRST; writel(reg, dsim->reg_base + EXYNOS_DSIM_SWRST); } void exynos_mipi_dsi_sw_reset(struct mipi_dsim_device *dsim) { unsigned int reg; reg = readl(dsim->reg_base + EXYNOS_DSIM_SWRST); reg |= DSIM_SWRST; writel(reg, dsim->reg_base + EXYNOS_DSIM_SWRST); } void exynos_mipi_dsi_sw_reset_release(struct mipi_dsim_device *dsim) { unsigned int reg; reg = readl(dsim->reg_base + EXYNOS_DSIM_INTSRC); reg |= INTSRC_SW_RST_RELEASE; writel(reg, dsim->reg_base + EXYNOS_DSIM_INTSRC); } int exynos_mipi_dsi_get_sw_reset_release(struct mipi_dsim_device *dsim) { return (readl(dsim->reg_base + EXYNOS_DSIM_INTSRC)) & INTSRC_SW_RST_RELEASE; } unsigned int exynos_mipi_dsi_read_interrupt_mask(struct mipi_dsim_device *dsim) { unsigned int reg; reg = readl(dsim->reg_base + EXYNOS_DSIM_INTMSK); return reg; } void exynos_mipi_dsi_set_interrupt_mask(struct mipi_dsim_device *dsim, unsigned int mode, unsigned int mask) { unsigned int reg = 0; if (mask) reg |= mode; else reg &= ~mode; writel(reg, dsim->reg_base + EXYNOS_DSIM_INTMSK); } void exynos_mipi_dsi_init_fifo_pointer(struct mipi_dsim_device *dsim, unsigned int cfg) { unsigned int reg; reg = readl(dsim->reg_base + EXYNOS_DSIM_FIFOCTRL); writel(reg & ~(cfg), dsim->reg_base + EXYNOS_DSIM_FIFOCTRL); mdelay(10); reg |= cfg; writel(reg, dsim->reg_base + EXYNOS_DSIM_FIFOCTRL); } /* * this function set PLL P, M and S value in D-PHY */ void exynos_mipi_dsi_set_phy_tunning(struct mipi_dsim_device *dsim, unsigned int value) { writel(DSIM_AFC_CTL(value), dsim->reg_base + EXYNOS_DSIM_PHYACCHR); } void exynos_mipi_dsi_set_main_stand_by(struct mipi_dsim_device *dsim, unsigned int enable) { unsigned int reg; reg = readl(dsim->reg_base + EXYNOS_DSIM_MDRESOL); reg &= ~DSIM_MAIN_STAND_BY; if (enable) reg |= DSIM_MAIN_STAND_BY; writel(reg, dsim->reg_base + EXYNOS_DSIM_MDRESOL); } void exynos_mipi_dsi_set_main_disp_resol(struct mipi_dsim_device *dsim, unsigned int width_resol, unsigned int height_resol) { unsigned int reg; /* standby should be set after configuration so set to not ready*/ reg = (readl(dsim->reg_base + EXYNOS_DSIM_MDRESOL)) & ~(DSIM_MAIN_STAND_BY); writel(reg, dsim->reg_base + EXYNOS_DSIM_MDRESOL); reg &= ~((0x7ff << 16) | (0x7ff << 0)); reg |= DSIM_MAIN_VRESOL(height_resol) | DSIM_MAIN_HRESOL(width_resol); reg |= DSIM_MAIN_STAND_BY; writel(reg, dsim->reg_base + EXYNOS_DSIM_MDRESOL); } void exynos_mipi_dsi_set_main_disp_vporch(struct mipi_dsim_device *dsim, unsigned int cmd_allow, unsigned int vfront, unsigned int vback) { unsigned int reg; reg = (readl(dsim->reg_base + EXYNOS_DSIM_MVPORCH)) & ~((DSIM_CMD_ALLOW_MASK) | (DSIM_STABLE_VFP_MASK) | (DSIM_MAIN_VBP_MASK)); reg |= (DSIM_CMD_ALLOW_SHIFT(cmd_allow & 0xf) | DSIM_STABLE_VFP_SHIFT(vfront & 0x7ff) | DSIM_MAIN_VBP_SHIFT(vback & 0x7ff)); writel(reg, dsim->reg_base + EXYNOS_DSIM_MVPORCH); } void exynos_mipi_dsi_set_main_disp_hporch(struct mipi_dsim_device *dsim, unsigned int front, unsigned int back) { unsigned int reg; reg = (readl(dsim->reg_base + EXYNOS_DSIM_MHPORCH)) & ~((DSIM_MAIN_HFP_MASK) | (DSIM_MAIN_HBP_MASK)); reg |= DSIM_MAIN_HFP_SHIFT(front) | DSIM_MAIN_HBP_SHIFT(back); writel(reg, dsim->reg_base + EXYNOS_DSIM_MHPORCH); } void exynos_mipi_dsi_set_main_disp_sync_area(struct mipi_dsim_device *dsim, unsigned int vert, unsigned int hori) { unsigned int reg; reg = (readl(dsim->reg_base + EXYNOS_DSIM_MSYNC)) & ~((DSIM_MAIN_VSA_MASK) | (DSIM_MAIN_HSA_MASK)); reg |= (DSIM_MAIN_VSA_SHIFT(vert & 0x3ff) | DSIM_MAIN_HSA_SHIFT(hori)); writel(reg, dsim->reg_base + EXYNOS_DSIM_MSYNC); } void exynos_mipi_dsi_set_sub_disp_resol(struct mipi_dsim_device *dsim, unsigned int vert, unsigned int hori) { unsigned int reg; reg = (readl(dsim->reg_base + EXYNOS_DSIM_SDRESOL)) & ~(DSIM_SUB_STANDY_MASK); writel(reg, dsim->reg_base + EXYNOS_DSIM_SDRESOL); reg &= ~(DSIM_SUB_VRESOL_MASK) | ~(DSIM_SUB_HRESOL_MASK); reg |= (DSIM_SUB_VRESOL_SHIFT(vert & 0x7ff) | DSIM_SUB_HRESOL_SHIFT(hori & 0x7ff)); writel(reg, dsim->reg_base + EXYNOS_DSIM_SDRESOL); reg |= DSIM_SUB_STANDY_SHIFT(1); writel(reg, dsim->reg_base + EXYNOS_DSIM_SDRESOL); } void exynos_mipi_dsi_init_config(struct mipi_dsim_device *dsim) { struct mipi_dsim_config *dsim_config = dsim->dsim_config; unsigned int cfg = (readl(dsim->reg_base + EXYNOS_DSIM_CONFIG)) & ~((1 << 28) | (0x1f << 20) | (0x3 << 5)); cfg = ((DSIM_AUTO_FLUSH(dsim_config->auto_flush)) | (DSIM_EOT_DISABLE(dsim_config->eot_disable)) | (DSIM_AUTO_MODE_SHIFT(dsim_config->auto_vertical_cnt)) | (DSIM_HSE_MODE_SHIFT(dsim_config->hse)) | (DSIM_HFP_MODE_SHIFT(dsim_config->hfp)) | (DSIM_HBP_MODE_SHIFT(dsim_config->hbp)) | (DSIM_HSA_MODE_SHIFT(dsim_config->hsa)) | (DSIM_NUM_OF_DATALANE_SHIFT(dsim_config->e_no_data_lane))); writel(cfg, dsim->reg_base + EXYNOS_DSIM_CONFIG); } void exynos_mipi_dsi_display_config(struct mipi_dsim_device *dsim, struct mipi_dsim_config *dsim_config) { u32 reg = (readl(dsim->reg_base + EXYNOS_DSIM_CONFIG)) & ~((0x3 << 26) | (1 << 25) | (0x3 << 18) | (0x7 << 12) | (0x3 << 16) | (0x7 << 8)); if (dsim_config->e_interface == DSIM_VIDEO) reg |= (1 << 25); else if (dsim_config->e_interface == DSIM_COMMAND) reg &= ~(1 << 25); else { dev_err(dsim->dev, "unknown lcd type.\n"); return; } /* main lcd */ reg |= ((u8) (dsim_config->e_burst_mode) & 0x3) << 26 | ((u8) (dsim_config->e_virtual_ch) & 0x3) << 18 | ((u8) (dsim_config->e_pixel_format) & 0x7) << 12; writel(reg, dsim->reg_base + EXYNOS_DSIM_CONFIG); } void exynos_mipi_dsi_enable_lane(struct mipi_dsim_device *dsim, unsigned int lane, unsigned int enable) { unsigned int reg; reg = readl(dsim->reg_base + EXYNOS_DSIM_CONFIG); if (enable) reg |= DSIM_LANE_ENx(lane); else reg &= ~DSIM_LANE_ENx(lane); writel(reg, dsim->reg_base + EXYNOS_DSIM_CONFIG); } void exynos_mipi_dsi_set_data_lane_number(struct mipi_dsim_device *dsim, unsigned int count) { unsigned int cfg; /* get the data lane number. */ cfg = DSIM_NUM_OF_DATALANE_SHIFT(count); writel(cfg, dsim->reg_base + EXYNOS_DSIM_CONFIG); } void exynos_mipi_dsi_enable_afc(struct mipi_dsim_device *dsim, unsigned int enable, unsigned int afc_code) { unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_PHYACCHR); if (enable) { reg |= (1 << 14); reg &= ~(0x7 << 5); reg |= (afc_code & 0x7) << 5; } else reg &= ~(1 << 14); writel(reg, dsim->reg_base + EXYNOS_DSIM_PHYACCHR); } void exynos_mipi_dsi_enable_pll_bypass(struct mipi_dsim_device *dsim, unsigned int enable) { unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_CLKCTRL)) & ~(DSIM_PLL_BYPASS_SHIFT(0x1)); reg |= DSIM_PLL_BYPASS_SHIFT(enable); writel(reg, dsim->reg_base + EXYNOS_DSIM_CLKCTRL); } void exynos_mipi_dsi_set_pll_pms(struct mipi_dsim_device *dsim, unsigned int p, unsigned int m, unsigned int s) { unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_PLLCTRL); reg |= ((p & 0x3f) << 13) | ((m & 0x1ff) << 4) | ((s & 0x7) << 1); writel(reg, dsim->reg_base + EXYNOS_DSIM_PLLCTRL); } void exynos_mipi_dsi_pll_freq_band(struct mipi_dsim_device *dsim, unsigned int freq_band) { unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_PLLCTRL)) & ~(DSIM_FREQ_BAND_SHIFT(0x1f)); reg |= DSIM_FREQ_BAND_SHIFT(freq_band & 0x1f); writel(reg, dsim->reg_base + EXYNOS_DSIM_PLLCTRL); } void exynos_mipi_dsi_pll_freq(struct mipi_dsim_device *dsim, unsigned int pre_divider, unsigned int main_divider, unsigned int scaler) { unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_PLLCTRL)) & ~(0x7ffff << 1); reg |= (pre_divider & 0x3f) << 13 | (main_divider & 0x1ff) << 4 | (scaler & 0x7) << 1; writel(reg, dsim->reg_base + EXYNOS_DSIM_PLLCTRL); } void exynos_mipi_dsi_pll_stable_time(struct mipi_dsim_device *dsim, unsigned int lock_time) { writel(lock_time, dsim->reg_base + EXYNOS_DSIM_PLLTMR); } void exynos_mipi_dsi_enable_pll(struct mipi_dsim_device *dsim, unsigned int enable) { unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_PLLCTRL)) & ~(DSIM_PLL_EN_SHIFT(0x1)); reg |= DSIM_PLL_EN_SHIFT(enable & 0x1); writel(reg, dsim->reg_base + EXYNOS_DSIM_PLLCTRL); } void exynos_mipi_dsi_set_byte_clock_src(struct mipi_dsim_device *dsim, unsigned int src) { unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_CLKCTRL)) & ~(DSIM_BYTE_CLK_SRC_SHIFT(0x3)); reg |= (DSIM_BYTE_CLK_SRC_SHIFT(src)); writel(reg, dsim->reg_base + EXYNOS_DSIM_CLKCTRL); } void exynos_mipi_dsi_enable_byte_clock(struct mipi_dsim_device *dsim, unsigned int enable) { unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_CLKCTRL)) & ~(DSIM_BYTE_CLKEN_SHIFT(0x1)); reg |= DSIM_BYTE_CLKEN_SHIFT(enable); writel(reg, dsim->reg_base + EXYNOS_DSIM_CLKCTRL); } void exynos_mipi_dsi_set_esc_clk_prs(struct mipi_dsim_device *dsim, unsigned int enable, unsigned int prs_val) { unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_CLKCTRL)) & ~(DSIM_ESC_CLKEN_SHIFT(0x1) | 0xffff); reg |= DSIM_ESC_CLKEN_SHIFT(enable); if (enable) reg |= prs_val; writel(reg, dsim->reg_base + EXYNOS_DSIM_CLKCTRL); } void exynos_mipi_dsi_enable_esc_clk_on_lane(struct mipi_dsim_device *dsim, unsigned int lane_sel, unsigned int enable) { unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_CLKCTRL); if (enable) reg |= DSIM_LANE_ESC_CLKEN(lane_sel); else reg &= ~DSIM_LANE_ESC_CLKEN(lane_sel); writel(reg, dsim->reg_base + EXYNOS_DSIM_CLKCTRL); } void exynos_mipi_dsi_force_dphy_stop_state(struct mipi_dsim_device *dsim, unsigned int enable) { unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_ESCMODE)) & ~(DSIM_FORCE_STOP_STATE_SHIFT(0x1)); reg |= (DSIM_FORCE_STOP_STATE_SHIFT(enable & 0x1)); writel(reg, dsim->reg_base + EXYNOS_DSIM_ESCMODE); } unsigned int exynos_mipi_dsi_is_lane_state(struct mipi_dsim_device *dsim) { unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_STATUS); /** * check clock and data lane states. * if MIPI-DSI controller was enabled at bootloader then * TX_READY_HS_CLK is enabled otherwise STOP_STATE_CLK. * so it should be checked for two case. */ if ((reg & DSIM_STOP_STATE_DAT(0xf)) && ((reg & DSIM_STOP_STATE_CLK) || (reg & DSIM_TX_READY_HS_CLK))) return 1; return 0; } void exynos_mipi_dsi_set_stop_state_counter(struct mipi_dsim_device *dsim, unsigned int cnt_val) { unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_ESCMODE)) & ~(DSIM_STOP_STATE_CNT_SHIFT(0x7ff)); reg |= (DSIM_STOP_STATE_CNT_SHIFT(cnt_val & 0x7ff)); writel(reg, dsim->reg_base + EXYNOS_DSIM_ESCMODE); } void exynos_mipi_dsi_set_bta_timeout(struct mipi_dsim_device *dsim, unsigned int timeout) { unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_TIMEOUT)) & ~(DSIM_BTA_TOUT_SHIFT(0xff)); reg |= (DSIM_BTA_TOUT_SHIFT(timeout)); writel(reg, dsim->reg_base + EXYNOS_DSIM_TIMEOUT); } void exynos_mipi_dsi_set_lpdr_timeout(struct mipi_dsim_device *dsim, unsigned int timeout) { unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_TIMEOUT)) & ~(DSIM_LPDR_TOUT_SHIFT(0xffff)); reg |= (DSIM_LPDR_TOUT_SHIFT(timeout)); writel(reg, dsim->reg_base + EXYNOS_DSIM_TIMEOUT); } void exynos_mipi_dsi_set_cpu_transfer_mode(struct mipi_dsim_device *dsim, unsigned int lp) { unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_ESCMODE); reg &= ~DSIM_CMD_LPDT_LP; if (lp) reg |= DSIM_CMD_LPDT_LP; writel(reg, dsim->reg_base + EXYNOS_DSIM_ESCMODE); } void exynos_mipi_dsi_set_lcdc_transfer_mode(struct mipi_dsim_device *dsim, unsigned int lp) { unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_ESCMODE); reg &= ~DSIM_TX_LPDT_LP; if (lp) reg |= DSIM_TX_LPDT_LP; writel(reg, dsim->reg_base + EXYNOS_DSIM_ESCMODE); } void exynos_mipi_dsi_enable_hs_clock(struct mipi_dsim_device *dsim, unsigned int enable) { unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_CLKCTRL)) & ~(DSIM_TX_REQUEST_HSCLK_SHIFT(0x1)); reg |= DSIM_TX_REQUEST_HSCLK_SHIFT(enable); writel(reg, dsim->reg_base + EXYNOS_DSIM_CLKCTRL); } void exynos_mipi_dsi_dp_dn_swap(struct mipi_dsim_device *dsim, unsigned int swap_en) { unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_PHYACCHR1); reg &= ~(0x3 << 0); reg |= (swap_en & 0x3) << 0; writel(reg, dsim->reg_base + EXYNOS_DSIM_PHYACCHR1); } void exynos_mipi_dsi_hs_zero_ctrl(struct mipi_dsim_device *dsim, unsigned int hs_zero) { unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_PLLCTRL)) & ~(0xf << 28); reg |= ((hs_zero & 0xf) << 28); writel(reg, dsim->reg_base + EXYNOS_DSIM_PLLCTRL); } void exynos_mipi_dsi_prep_ctrl(struct mipi_dsim_device *dsim, unsigned int prep) { unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_PLLCTRL)) & ~(0x7 << 20); reg |= ((prep & 0x7) << 20); writel(reg, dsim->reg_base + EXYNOS_DSIM_PLLCTRL); } unsigned int exynos_mipi_dsi_read_interrupt(struct mipi_dsim_device *dsim) { return readl(dsim->reg_base + EXYNOS_DSIM_INTSRC); } void exynos_mipi_dsi_clear_interrupt(struct mipi_dsim_device *dsim, unsigned int src) { unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_INTSRC); reg |= src; writel(reg, dsim->reg_base + EXYNOS_DSIM_INTSRC); } void exynos_mipi_dsi_set_interrupt(struct mipi_dsim_device *dsim, unsigned int src, unsigned int enable) { unsigned int reg = 0; if (enable) reg |= src; else reg &= ~src; writel(reg, dsim->reg_base + EXYNOS_DSIM_INTSRC); } unsigned int exynos_mipi_dsi_is_pll_stable(struct mipi_dsim_device *dsim) { unsigned int reg; reg = readl(dsim->reg_base + EXYNOS_DSIM_STATUS); return reg & (1 << 31) ? 1 : 0; } unsigned int exynos_mipi_dsi_get_fifo_state(struct mipi_dsim_device *dsim) { return readl(dsim->reg_base + EXYNOS_DSIM_FIFOCTRL) & ~(0x1f); } void exynos_mipi_dsi_wr_tx_header(struct mipi_dsim_device *dsim, unsigned int di, unsigned int data0, unsigned int data1) { unsigned int reg = (data1 << 16) | (data0 << 8) | ((di & 0x3f) << 0); writel(reg, dsim->reg_base + EXYNOS_DSIM_PKTHDR); } void exynos_mipi_dsi_rd_tx_header(struct mipi_dsim_device *dsim, unsigned int di, unsigned int data0) { unsigned int reg = (data0 << 8) | (di << 0); writel(reg, dsim->reg_base + EXYNOS_DSIM_PKTHDR); } unsigned int exynos_mipi_dsi_rd_rx_fifo(struct mipi_dsim_device *dsim) { return readl(dsim->reg_base + EXYNOS_DSIM_RXFIFO); } unsigned int _exynos_mipi_dsi_get_frame_done_status(struct mipi_dsim_device *dsim) { unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_INTSRC); return (reg & INTSRC_FRAME_DONE) ? 1 : 0; } void _exynos_mipi_dsi_clear_frame_done(struct mipi_dsim_device *dsim) { unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_INTSRC); writel(reg | INTSRC_FRAME_DONE, dsim->reg_base + EXYNOS_DSIM_INTSRC); } void exynos_mipi_dsi_wr_tx_data(struct mipi_dsim_device *dsim, unsigned int tx_data) { writel(tx_data, dsim->reg_base + EXYNOS_DSIM_PAYLOAD); }
gpl-2.0
A2ronLil/android_kernel_motorola_fleming
drivers/usb/gadget/omap_udc.c
2349
82149
/* * omap_udc.c -- for OMAP full speed udc; most chips support OTG. * * Copyright (C) 2004 Texas Instruments, Inc. * Copyright (C) 2004-2005 David Brownell * * OMAP2 & DMA support by Kyungmin Park <kyungmin.park@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #undef DEBUG #undef VERBOSE #include <linux/module.h> #include <linux/kernel.h> #include <linux/ioport.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/timer.h> #include <linux/list.h> #include <linux/interrupt.h> #include <linux/proc_fs.h> #include <linux/mm.h> #include <linux/moduleparam.h> #include <linux/platform_device.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/usb/otg.h> #include <linux/dma-mapping.h> #include <linux/clk.h> #include <linux/prefetch.h> #include <asm/byteorder.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/system.h> #include <asm/unaligned.h> #include <asm/mach-types.h> #include <plat/dma.h> #include <plat/usb.h> #include "omap_udc.h" #undef USB_TRACE /* bulk DMA seems to be behaving for both IN and OUT */ #define USE_DMA /* ISO too */ #define USE_ISO #define DRIVER_DESC "OMAP UDC driver" #define DRIVER_VERSION "4 October 2004" #define DMA_ADDR_INVALID (~(dma_addr_t)0) #define OMAP2_DMA_CH(ch) (((ch) - 1) << 1) #define OMAP24XX_DMA(name, ch) (OMAP24XX_DMA_##name + OMAP2_DMA_CH(ch)) /* * The OMAP UDC needs _very_ early endpoint setup: before enabling the * D+ pullup to allow enumeration. That's too early for the gadget * framework to use from usb_endpoint_enable(), which happens after * enumeration as part of activating an interface. (But if we add an * optional new "UDC not yet running" state to the gadget driver model, * even just during driver binding, the endpoint autoconfig logic is the * natural spot to manufacture new endpoints.) * * So instead of using endpoint enable calls to control the hardware setup, * this driver defines a "fifo mode" parameter. It's used during driver * initialization to choose among a set of pre-defined endpoint configs. * See omap_udc_setup() for available modes, or to add others. That code * lives in an init section, so use this driver as a module if you need * to change the fifo mode after the kernel boots. * * Gadget drivers normally ignore endpoints they don't care about, and * won't include them in configuration descriptors. That means only * misbehaving hosts would even notice they exist. */ #ifdef USE_ISO static unsigned fifo_mode = 3; #else static unsigned fifo_mode = 0; #endif /* "modprobe omap_udc fifo_mode=42", or else as a kernel * boot parameter "omap_udc:fifo_mode=42" */ module_param (fifo_mode, uint, 0); MODULE_PARM_DESC (fifo_mode, "endpoint configuration"); #ifdef USE_DMA static unsigned use_dma = 1; /* "modprobe omap_udc use_dma=y", or else as a kernel * boot parameter "omap_udc:use_dma=y" */ module_param (use_dma, bool, 0); MODULE_PARM_DESC (use_dma, "enable/disable DMA"); #else /* !USE_DMA */ /* save a bit of code */ #define use_dma 0 #endif /* !USE_DMA */ static const char driver_name [] = "omap_udc"; static const char driver_desc [] = DRIVER_DESC; /*-------------------------------------------------------------------------*/ /* there's a notion of "current endpoint" for modifying endpoint * state, and PIO access to its FIFO. */ static void use_ep(struct omap_ep *ep, u16 select) { u16 num = ep->bEndpointAddress & 0x0f; if (ep->bEndpointAddress & USB_DIR_IN) num |= UDC_EP_DIR; omap_writew(num | select, UDC_EP_NUM); /* when select, MUST deselect later !! */ } static inline void deselect_ep(void) { u16 w; w = omap_readw(UDC_EP_NUM); w &= ~UDC_EP_SEL; omap_writew(w, UDC_EP_NUM); /* 6 wait states before TX will happen */ } static void dma_channel_claim(struct omap_ep *ep, unsigned preferred); /*-------------------------------------------------------------------------*/ static int omap_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) { struct omap_ep *ep = container_of(_ep, struct omap_ep, ep); struct omap_udc *udc; unsigned long flags; u16 maxp; /* catch various bogus parameters */ if (!_ep || !desc || ep->desc || desc->bDescriptorType != USB_DT_ENDPOINT || ep->bEndpointAddress != desc->bEndpointAddress || ep->maxpacket < le16_to_cpu (desc->wMaxPacketSize)) { DBG("%s, bad ep or descriptor\n", __func__); return -EINVAL; } maxp = le16_to_cpu (desc->wMaxPacketSize); if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK && maxp != ep->maxpacket) || le16_to_cpu(desc->wMaxPacketSize) > ep->maxpacket || !desc->wMaxPacketSize) { DBG("%s, bad %s maxpacket\n", __func__, _ep->name); return -ERANGE; } #ifdef USE_ISO if ((desc->bmAttributes == USB_ENDPOINT_XFER_ISOC && desc->bInterval != 1)) { /* hardware wants period = 1; USB allows 2^(Interval-1) */ DBG("%s, unsupported ISO period %dms\n", _ep->name, 1 << (desc->bInterval - 1)); return -EDOM; } #else if (desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { DBG("%s, ISO nyet\n", _ep->name); return -EDOM; } #endif /* xfer types must match, except that interrupt ~= bulk */ if (ep->bmAttributes != desc->bmAttributes && ep->bmAttributes != USB_ENDPOINT_XFER_BULK && desc->bmAttributes != USB_ENDPOINT_XFER_INT) { DBG("%s, %s type mismatch\n", __func__, _ep->name); return -EINVAL; } udc = ep->udc; if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) { DBG("%s, bogus device state\n", __func__); return -ESHUTDOWN; } spin_lock_irqsave(&udc->lock, flags); ep->desc = desc; ep->irqs = 0; ep->stopped = 0; ep->ep.maxpacket = maxp; /* set endpoint to initial state */ ep->dma_channel = 0; ep->has_dma = 0; ep->lch = -1; use_ep(ep, UDC_EP_SEL); omap_writew(udc->clr_halt, UDC_CTRL); ep->ackwait = 0; deselect_ep(); if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) list_add(&ep->iso, &udc->iso); /* maybe assign a DMA channel to this endpoint */ if (use_dma && desc->bmAttributes == USB_ENDPOINT_XFER_BULK) /* FIXME ISO can dma, but prefers first channel */ dma_channel_claim(ep, 0); /* PIO OUT may RX packets */ if (desc->bmAttributes != USB_ENDPOINT_XFER_ISOC && !ep->has_dma && !(ep->bEndpointAddress & USB_DIR_IN)) { omap_writew(UDC_SET_FIFO_EN, UDC_CTRL); ep->ackwait = 1 + ep->double_buf; } spin_unlock_irqrestore(&udc->lock, flags); VDBG("%s enabled\n", _ep->name); return 0; } static void nuke(struct omap_ep *, int status); static int omap_ep_disable(struct usb_ep *_ep) { struct omap_ep *ep = container_of(_ep, struct omap_ep, ep); unsigned long flags; if (!_ep || !ep->desc) { DBG("%s, %s not enabled\n", __func__, _ep ? ep->ep.name : NULL); return -EINVAL; } spin_lock_irqsave(&ep->udc->lock, flags); ep->desc = NULL; nuke (ep, -ESHUTDOWN); ep->ep.maxpacket = ep->maxpacket; ep->has_dma = 0; omap_writew(UDC_SET_HALT, UDC_CTRL); list_del_init(&ep->iso); del_timer(&ep->timer); spin_unlock_irqrestore(&ep->udc->lock, flags); VDBG("%s disabled\n", _ep->name); return 0; } /*-------------------------------------------------------------------------*/ static struct usb_request * omap_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) { struct omap_req *req; req = kzalloc(sizeof(*req), gfp_flags); if (req) { req->req.dma = DMA_ADDR_INVALID; INIT_LIST_HEAD (&req->queue); } return &req->req; } static void omap_free_request(struct usb_ep *ep, struct usb_request *_req) { struct omap_req *req = container_of(_req, struct omap_req, req); if (_req) kfree (req); } /*-------------------------------------------------------------------------*/ static void done(struct omap_ep *ep, struct omap_req *req, int status) { unsigned stopped = ep->stopped; list_del_init(&req->queue); if (req->req.status == -EINPROGRESS) req->req.status = status; else status = req->req.status; if (use_dma && ep->has_dma) { if (req->mapped) { dma_unmap_single(ep->udc->gadget.dev.parent, req->req.dma, req->req.length, (ep->bEndpointAddress & USB_DIR_IN) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->req.dma = DMA_ADDR_INVALID; req->mapped = 0; } else dma_sync_single_for_cpu(ep->udc->gadget.dev.parent, req->req.dma, req->req.length, (ep->bEndpointAddress & USB_DIR_IN) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); } #ifndef USB_TRACE if (status && status != -ESHUTDOWN) #endif VDBG("complete %s req %p stat %d len %u/%u\n", ep->ep.name, &req->req, status, req->req.actual, req->req.length); /* don't modify queue heads during completion callback */ ep->stopped = 1; spin_unlock(&ep->udc->lock); req->req.complete(&ep->ep, &req->req); spin_lock(&ep->udc->lock); ep->stopped = stopped; } /*-------------------------------------------------------------------------*/ #define UDC_FIFO_FULL (UDC_NON_ISO_FIFO_FULL | UDC_ISO_FIFO_FULL) #define UDC_FIFO_UNWRITABLE (UDC_EP_HALTED | UDC_FIFO_FULL) #define FIFO_EMPTY (UDC_NON_ISO_FIFO_EMPTY | UDC_ISO_FIFO_EMPTY) #define FIFO_UNREADABLE (UDC_EP_HALTED | FIFO_EMPTY) static inline int write_packet(u8 *buf, struct omap_req *req, unsigned max) { unsigned len; u16 *wp; len = min(req->req.length - req->req.actual, max); req->req.actual += len; max = len; if (likely((((int)buf) & 1) == 0)) { wp = (u16 *)buf; while (max >= 2) { omap_writew(*wp++, UDC_DATA); max -= 2; } buf = (u8 *)wp; } while (max--) omap_writeb(*buf++, UDC_DATA); return len; } // FIXME change r/w fifo calling convention // return: 0 = still running, 1 = completed, negative = errno static int write_fifo(struct omap_ep *ep, struct omap_req *req) { u8 *buf; unsigned count; int is_last; u16 ep_stat; buf = req->req.buf + req->req.actual; prefetch(buf); /* PIO-IN isn't double buffered except for iso */ ep_stat = omap_readw(UDC_STAT_FLG); if (ep_stat & UDC_FIFO_UNWRITABLE) return 0; count = ep->ep.maxpacket; count = write_packet(buf, req, count); omap_writew(UDC_SET_FIFO_EN, UDC_CTRL); ep->ackwait = 1; /* last packet is often short (sometimes a zlp) */ if (count != ep->ep.maxpacket) is_last = 1; else if (req->req.length == req->req.actual && !req->req.zero) is_last = 1; else is_last = 0; /* NOTE: requests complete when all IN data is in a * FIFO (or sometimes later, if a zlp was needed). * Use usb_ep_fifo_status() where needed. */ if (is_last) done(ep, req, 0); return is_last; } static inline int read_packet(u8 *buf, struct omap_req *req, unsigned avail) { unsigned len; u16 *wp; len = min(req->req.length - req->req.actual, avail); req->req.actual += len; avail = len; if (likely((((int)buf) & 1) == 0)) { wp = (u16 *)buf; while (avail >= 2) { *wp++ = omap_readw(UDC_DATA); avail -= 2; } buf = (u8 *)wp; } while (avail--) *buf++ = omap_readb(UDC_DATA); return len; } // return: 0 = still running, 1 = queue empty, negative = errno static int read_fifo(struct omap_ep *ep, struct omap_req *req) { u8 *buf; unsigned count, avail; int is_last; buf = req->req.buf + req->req.actual; prefetchw(buf); for (;;) { u16 ep_stat = omap_readw(UDC_STAT_FLG); is_last = 0; if (ep_stat & FIFO_EMPTY) { if (!ep->double_buf) break; ep->fnf = 1; } if (ep_stat & UDC_EP_HALTED) break; if (ep_stat & UDC_FIFO_FULL) avail = ep->ep.maxpacket; else { avail = omap_readw(UDC_RXFSTAT); ep->fnf = ep->double_buf; } count = read_packet(buf, req, avail); /* partial packet reads may not be errors */ if (count < ep->ep.maxpacket) { is_last = 1; /* overflowed this request? flush extra data */ if (count != avail) { req->req.status = -EOVERFLOW; avail -= count; while (avail--) omap_readw(UDC_DATA); } } else if (req->req.length == req->req.actual) is_last = 1; else is_last = 0; if (!ep->bEndpointAddress) break; if (is_last) done(ep, req, 0); break; } return is_last; } /*-------------------------------------------------------------------------*/ static u16 dma_src_len(struct omap_ep *ep, dma_addr_t start) { dma_addr_t end; /* IN-DMA needs this on fault/cancel paths, so 15xx misreports * the last transfer's bytecount by more than a FIFO's worth. */ if (cpu_is_omap15xx()) return 0; end = omap_get_dma_src_pos(ep->lch); if (end == ep->dma_counter) return 0; end |= start & (0xffff << 16); if (end < start) end += 0x10000; return end - start; } static u16 dma_dest_len(struct omap_ep *ep, dma_addr_t start) { dma_addr_t end; end = omap_get_dma_dst_pos(ep->lch); if (end == ep->dma_counter) return 0; end |= start & (0xffff << 16); if (cpu_is_omap15xx()) end++; if (end < start) end += 0x10000; return end - start; } /* Each USB transfer request using DMA maps to one or more DMA transfers. * When DMA completion isn't request completion, the UDC continues with * the next DMA transfer for that USB transfer. */ static void next_in_dma(struct omap_ep *ep, struct omap_req *req) { u16 txdma_ctrl, w; unsigned length = req->req.length - req->req.actual; const int sync_mode = cpu_is_omap15xx() ? OMAP_DMA_SYNC_FRAME : OMAP_DMA_SYNC_ELEMENT; int dma_trigger = 0; if (cpu_is_omap24xx()) dma_trigger = OMAP24XX_DMA(USB_W2FC_TX0, ep->dma_channel); /* measure length in either bytes or packets */ if ((cpu_is_omap16xx() && length <= UDC_TXN_TSC) || (cpu_is_omap24xx() && length < ep->maxpacket) || (cpu_is_omap15xx() && length < ep->maxpacket)) { txdma_ctrl = UDC_TXN_EOT | length; omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S8, length, 1, sync_mode, dma_trigger, 0); } else { length = min(length / ep->maxpacket, (unsigned) UDC_TXN_TSC + 1); txdma_ctrl = length; omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16, ep->ep.maxpacket >> 1, length, sync_mode, dma_trigger, 0); length *= ep->maxpacket; } omap_set_dma_src_params(ep->lch, OMAP_DMA_PORT_EMIFF, OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual, 0, 0); omap_start_dma(ep->lch); ep->dma_counter = omap_get_dma_src_pos(ep->lch); w = omap_readw(UDC_DMA_IRQ_EN); w |= UDC_TX_DONE_IE(ep->dma_channel); omap_writew(w, UDC_DMA_IRQ_EN); omap_writew(UDC_TXN_START | txdma_ctrl, UDC_TXDMA(ep->dma_channel)); req->dma_bytes = length; } static void finish_in_dma(struct omap_ep *ep, struct omap_req *req, int status) { u16 w; if (status == 0) { req->req.actual += req->dma_bytes; /* return if this request needs to send data or zlp */ if (req->req.actual < req->req.length) return; if (req->req.zero && req->dma_bytes != 0 && (req->req.actual % ep->maxpacket) == 0) return; } else req->req.actual += dma_src_len(ep, req->req.dma + req->req.actual); /* tx completion */ omap_stop_dma(ep->lch); w = omap_readw(UDC_DMA_IRQ_EN); w &= ~UDC_TX_DONE_IE(ep->dma_channel); omap_writew(w, UDC_DMA_IRQ_EN); done(ep, req, status); } static void next_out_dma(struct omap_ep *ep, struct omap_req *req) { unsigned packets = req->req.length - req->req.actual; int dma_trigger = 0; u16 w; if (cpu_is_omap24xx()) dma_trigger = OMAP24XX_DMA(USB_W2FC_RX0, ep->dma_channel); /* NOTE: we filtered out "short reads" before, so we know * the buffer has only whole numbers of packets. * except MODE SELECT(6) sent the 24 bytes data in OMAP24XX DMA mode */ if (cpu_is_omap24xx() && packets < ep->maxpacket) { omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S8, packets, 1, OMAP_DMA_SYNC_ELEMENT, dma_trigger, 0); req->dma_bytes = packets; } else { /* set up this DMA transfer, enable the fifo, start */ packets /= ep->ep.maxpacket; packets = min(packets, (unsigned)UDC_RXN_TC + 1); req->dma_bytes = packets * ep->ep.maxpacket; omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16, ep->ep.maxpacket >> 1, packets, OMAP_DMA_SYNC_ELEMENT, dma_trigger, 0); } omap_set_dma_dest_params(ep->lch, OMAP_DMA_PORT_EMIFF, OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual, 0, 0); ep->dma_counter = omap_get_dma_dst_pos(ep->lch); omap_writew(UDC_RXN_STOP | (packets - 1), UDC_RXDMA(ep->dma_channel)); w = omap_readw(UDC_DMA_IRQ_EN); w |= UDC_RX_EOT_IE(ep->dma_channel); omap_writew(w, UDC_DMA_IRQ_EN); omap_writew(ep->bEndpointAddress & 0xf, UDC_EP_NUM); omap_writew(UDC_SET_FIFO_EN, UDC_CTRL); omap_start_dma(ep->lch); } static void finish_out_dma(struct omap_ep *ep, struct omap_req *req, int status, int one) { u16 count, w; if (status == 0) ep->dma_counter = (u16) (req->req.dma + req->req.actual); count = dma_dest_len(ep, req->req.dma + req->req.actual); count += req->req.actual; if (one) count--; if (count <= req->req.length) req->req.actual = count; if (count != req->dma_bytes || status) omap_stop_dma(ep->lch); /* if this wasn't short, request may need another transfer */ else if (req->req.actual < req->req.length) return; /* rx completion */ w = omap_readw(UDC_DMA_IRQ_EN); w &= ~UDC_RX_EOT_IE(ep->dma_channel); omap_writew(w, UDC_DMA_IRQ_EN); done(ep, req, status); } static void dma_irq(struct omap_udc *udc, u16 irq_src) { u16 dman_stat = omap_readw(UDC_DMAN_STAT); struct omap_ep *ep; struct omap_req *req; /* IN dma: tx to host */ if (irq_src & UDC_TXN_DONE) { ep = &udc->ep[16 + UDC_DMA_TX_SRC(dman_stat)]; ep->irqs++; /* can see TXN_DONE after dma abort */ if (!list_empty(&ep->queue)) { req = container_of(ep->queue.next, struct omap_req, queue); finish_in_dma(ep, req, 0); } omap_writew(UDC_TXN_DONE, UDC_IRQ_SRC); if (!list_empty (&ep->queue)) { req = container_of(ep->queue.next, struct omap_req, queue); next_in_dma(ep, req); } } /* OUT dma: rx from host */ if (irq_src & UDC_RXN_EOT) { ep = &udc->ep[UDC_DMA_RX_SRC(dman_stat)]; ep->irqs++; /* can see RXN_EOT after dma abort */ if (!list_empty(&ep->queue)) { req = container_of(ep->queue.next, struct omap_req, queue); finish_out_dma(ep, req, 0, dman_stat & UDC_DMA_RX_SB); } omap_writew(UDC_RXN_EOT, UDC_IRQ_SRC); if (!list_empty (&ep->queue)) { req = container_of(ep->queue.next, struct omap_req, queue); next_out_dma(ep, req); } } if (irq_src & UDC_RXN_CNT) { ep = &udc->ep[UDC_DMA_RX_SRC(dman_stat)]; ep->irqs++; /* omap15xx does this unasked... */ VDBG("%s, RX_CNT irq?\n", ep->ep.name); omap_writew(UDC_RXN_CNT, UDC_IRQ_SRC); } } static void dma_error(int lch, u16 ch_status, void *data) { struct omap_ep *ep = data; /* if ch_status & OMAP_DMA_DROP_IRQ ... */ /* if ch_status & OMAP1_DMA_TOUT_IRQ ... */ ERR("%s dma error, lch %d status %02x\n", ep->ep.name, lch, ch_status); /* complete current transfer ... */ } static void dma_channel_claim(struct omap_ep *ep, unsigned channel) { u16 reg; int status, restart, is_in; int dma_channel; is_in = ep->bEndpointAddress & USB_DIR_IN; if (is_in) reg = omap_readw(UDC_TXDMA_CFG); else reg = omap_readw(UDC_RXDMA_CFG); reg |= UDC_DMA_REQ; /* "pulse" activated */ ep->dma_channel = 0; ep->lch = -1; if (channel == 0 || channel > 3) { if ((reg & 0x0f00) == 0) channel = 3; else if ((reg & 0x00f0) == 0) channel = 2; else if ((reg & 0x000f) == 0) /* preferred for ISO */ channel = 1; else { status = -EMLINK; goto just_restart; } } reg |= (0x0f & ep->bEndpointAddress) << (4 * (channel - 1)); ep->dma_channel = channel; if (is_in) { if (cpu_is_omap24xx()) dma_channel = OMAP24XX_DMA(USB_W2FC_TX0, channel); else dma_channel = OMAP_DMA_USB_W2FC_TX0 - 1 + channel; status = omap_request_dma(dma_channel, ep->ep.name, dma_error, ep, &ep->lch); if (status == 0) { omap_writew(reg, UDC_TXDMA_CFG); /* EMIFF or SDRC */ omap_set_dma_src_burst_mode(ep->lch, OMAP_DMA_DATA_BURST_4); omap_set_dma_src_data_pack(ep->lch, 1); /* TIPB */ omap_set_dma_dest_params(ep->lch, OMAP_DMA_PORT_TIPB, OMAP_DMA_AMODE_CONSTANT, UDC_DATA_DMA, 0, 0); } } else { if (cpu_is_omap24xx()) dma_channel = OMAP24XX_DMA(USB_W2FC_RX0, channel); else dma_channel = OMAP_DMA_USB_W2FC_RX0 - 1 + channel; status = omap_request_dma(dma_channel, ep->ep.name, dma_error, ep, &ep->lch); if (status == 0) { omap_writew(reg, UDC_RXDMA_CFG); /* TIPB */ omap_set_dma_src_params(ep->lch, OMAP_DMA_PORT_TIPB, OMAP_DMA_AMODE_CONSTANT, UDC_DATA_DMA, 0, 0); /* EMIFF or SDRC */ omap_set_dma_dest_burst_mode(ep->lch, OMAP_DMA_DATA_BURST_4); omap_set_dma_dest_data_pack(ep->lch, 1); } } if (status) ep->dma_channel = 0; else { ep->has_dma = 1; omap_disable_dma_irq(ep->lch, OMAP_DMA_BLOCK_IRQ); /* channel type P: hw synch (fifo) */ if (cpu_class_is_omap1() && !cpu_is_omap15xx()) omap_set_dma_channel_mode(ep->lch, OMAP_DMA_LCH_P); } just_restart: /* restart any queue, even if the claim failed */ restart = !ep->stopped && !list_empty(&ep->queue); if (status) DBG("%s no dma channel: %d%s\n", ep->ep.name, status, restart ? " (restart)" : ""); else DBG("%s claimed %cxdma%d lch %d%s\n", ep->ep.name, is_in ? 't' : 'r', ep->dma_channel - 1, ep->lch, restart ? " (restart)" : ""); if (restart) { struct omap_req *req; req = container_of(ep->queue.next, struct omap_req, queue); if (ep->has_dma) (is_in ? next_in_dma : next_out_dma)(ep, req); else { use_ep(ep, UDC_EP_SEL); (is_in ? write_fifo : read_fifo)(ep, req); deselect_ep(); if (!is_in) { omap_writew(UDC_SET_FIFO_EN, UDC_CTRL); ep->ackwait = 1 + ep->double_buf; } /* IN: 6 wait states before it'll tx */ } } } static void dma_channel_release(struct omap_ep *ep) { int shift = 4 * (ep->dma_channel - 1); u16 mask = 0x0f << shift; struct omap_req *req; int active; /* abort any active usb transfer request */ if (!list_empty(&ep->queue)) req = container_of(ep->queue.next, struct omap_req, queue); else req = NULL; active = omap_get_dma_active_status(ep->lch); DBG("%s release %s %cxdma%d %p\n", ep->ep.name, active ? "active" : "idle", (ep->bEndpointAddress & USB_DIR_IN) ? 't' : 'r', ep->dma_channel - 1, req); /* NOTE: re-setting RX_REQ/TX_REQ because of a chip bug (before * OMAP 1710 ES2.0) where reading the DMA_CFG can clear them. */ /* wait till current packet DMA finishes, and fifo empties */ if (ep->bEndpointAddress & USB_DIR_IN) { omap_writew((omap_readw(UDC_TXDMA_CFG) & ~mask) | UDC_DMA_REQ, UDC_TXDMA_CFG); if (req) { finish_in_dma(ep, req, -ECONNRESET); /* clear FIFO; hosts probably won't empty it */ use_ep(ep, UDC_EP_SEL); omap_writew(UDC_CLR_EP, UDC_CTRL); deselect_ep(); } while (omap_readw(UDC_TXDMA_CFG) & mask) udelay(10); } else { omap_writew((omap_readw(UDC_RXDMA_CFG) & ~mask) | UDC_DMA_REQ, UDC_RXDMA_CFG); /* dma empties the fifo */ while (omap_readw(UDC_RXDMA_CFG) & mask) udelay(10); if (req) finish_out_dma(ep, req, -ECONNRESET, 0); } omap_free_dma(ep->lch); ep->dma_channel = 0; ep->lch = -1; /* has_dma still set, till endpoint is fully quiesced */ } /*-------------------------------------------------------------------------*/ static int omap_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) { struct omap_ep *ep = container_of(_ep, struct omap_ep, ep); struct omap_req *req = container_of(_req, struct omap_req, req); struct omap_udc *udc; unsigned long flags; int is_iso = 0; /* catch various bogus parameters */ if (!_req || !req->req.complete || !req->req.buf || !list_empty(&req->queue)) { DBG("%s, bad params\n", __func__); return -EINVAL; } if (!_ep || (!ep->desc && ep->bEndpointAddress)) { DBG("%s, bad ep\n", __func__); return -EINVAL; } if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) { if (req->req.length > ep->ep.maxpacket) return -EMSGSIZE; is_iso = 1; } /* this isn't bogus, but OMAP DMA isn't the only hardware to * have a hard time with partial packet reads... reject it. * Except OMAP2 can handle the small packets. */ if (use_dma && ep->has_dma && ep->bEndpointAddress != 0 && (ep->bEndpointAddress & USB_DIR_IN) == 0 && !cpu_class_is_omap2() && (req->req.length % ep->ep.maxpacket) != 0) { DBG("%s, no partial packet OUT reads\n", __func__); return -EMSGSIZE; } udc = ep->udc; if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; if (use_dma && ep->has_dma) { if (req->req.dma == DMA_ADDR_INVALID) { req->req.dma = dma_map_single( ep->udc->gadget.dev.parent, req->req.buf, req->req.length, (ep->bEndpointAddress & USB_DIR_IN) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->mapped = 1; } else { dma_sync_single_for_device( ep->udc->gadget.dev.parent, req->req.dma, req->req.length, (ep->bEndpointAddress & USB_DIR_IN) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->mapped = 0; } } VDBG("%s queue req %p, len %d buf %p\n", ep->ep.name, _req, _req->length, _req->buf); spin_lock_irqsave(&udc->lock, flags); req->req.status = -EINPROGRESS; req->req.actual = 0; /* maybe kickstart non-iso i/o queues */ if (is_iso) { u16 w; w = omap_readw(UDC_IRQ_EN); w |= UDC_SOF_IE; omap_writew(w, UDC_IRQ_EN); } else if (list_empty(&ep->queue) && !ep->stopped && !ep->ackwait) { int is_in; if (ep->bEndpointAddress == 0) { if (!udc->ep0_pending || !list_empty (&ep->queue)) { spin_unlock_irqrestore(&udc->lock, flags); return -EL2HLT; } /* empty DATA stage? */ is_in = udc->ep0_in; if (!req->req.length) { /* chip became CONFIGURED or ADDRESSED * earlier; drivers may already have queued * requests to non-control endpoints */ if (udc->ep0_set_config) { u16 irq_en = omap_readw(UDC_IRQ_EN); irq_en |= UDC_DS_CHG_IE | UDC_EP0_IE; if (!udc->ep0_reset_config) irq_en |= UDC_EPN_RX_IE | UDC_EPN_TX_IE; omap_writew(irq_en, UDC_IRQ_EN); } /* STATUS for zero length DATA stages is * always an IN ... even for IN transfers, * a weird case which seem to stall OMAP. */ omap_writew(UDC_EP_SEL | UDC_EP_DIR, UDC_EP_NUM); omap_writew(UDC_CLR_EP, UDC_CTRL); omap_writew(UDC_SET_FIFO_EN, UDC_CTRL); omap_writew(UDC_EP_DIR, UDC_EP_NUM); /* cleanup */ udc->ep0_pending = 0; done(ep, req, 0); req = NULL; /* non-empty DATA stage */ } else if (is_in) { omap_writew(UDC_EP_SEL | UDC_EP_DIR, UDC_EP_NUM); } else { if (udc->ep0_setup) goto irq_wait; omap_writew(UDC_EP_SEL, UDC_EP_NUM); } } else { is_in = ep->bEndpointAddress & USB_DIR_IN; if (!ep->has_dma) use_ep(ep, UDC_EP_SEL); /* if ISO: SOF IRQs must be enabled/disabled! */ } if (ep->has_dma) (is_in ? next_in_dma : next_out_dma)(ep, req); else if (req) { if ((is_in ? write_fifo : read_fifo)(ep, req) == 1) req = NULL; deselect_ep(); if (!is_in) { omap_writew(UDC_SET_FIFO_EN, UDC_CTRL); ep->ackwait = 1 + ep->double_buf; } /* IN: 6 wait states before it'll tx */ } } irq_wait: /* irq handler advances the queue */ if (req != NULL) list_add_tail(&req->queue, &ep->queue); spin_unlock_irqrestore(&udc->lock, flags); return 0; } static int omap_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) { struct omap_ep *ep = container_of(_ep, struct omap_ep, ep); struct omap_req *req; unsigned long flags; if (!_ep || !_req) return -EINVAL; spin_lock_irqsave(&ep->udc->lock, flags); /* make sure it's actually queued on this endpoint */ list_for_each_entry (req, &ep->queue, queue) { if (&req->req == _req) break; } if (&req->req != _req) { spin_unlock_irqrestore(&ep->udc->lock, flags); return -EINVAL; } if (use_dma && ep->dma_channel && ep->queue.next == &req->queue) { int channel = ep->dma_channel; /* releasing the channel cancels the request, * reclaiming the channel restarts the queue */ dma_channel_release(ep); dma_channel_claim(ep, channel); } else done(ep, req, -ECONNRESET); spin_unlock_irqrestore(&ep->udc->lock, flags); return 0; } /*-------------------------------------------------------------------------*/ static int omap_ep_set_halt(struct usb_ep *_ep, int value) { struct omap_ep *ep = container_of(_ep, struct omap_ep, ep); unsigned long flags; int status = -EOPNOTSUPP; spin_lock_irqsave(&ep->udc->lock, flags); /* just use protocol stalls for ep0; real halts are annoying */ if (ep->bEndpointAddress == 0) { if (!ep->udc->ep0_pending) status = -EINVAL; else if (value) { if (ep->udc->ep0_set_config) { WARNING("error changing config?\n"); omap_writew(UDC_CLR_CFG, UDC_SYSCON2); } omap_writew(UDC_STALL_CMD, UDC_SYSCON2); ep->udc->ep0_pending = 0; status = 0; } else /* NOP */ status = 0; /* otherwise, all active non-ISO endpoints can halt */ } else if (ep->bmAttributes != USB_ENDPOINT_XFER_ISOC && ep->desc) { /* IN endpoints must already be idle */ if ((ep->bEndpointAddress & USB_DIR_IN) && !list_empty(&ep->queue)) { status = -EAGAIN; goto done; } if (value) { int channel; if (use_dma && ep->dma_channel && !list_empty(&ep->queue)) { channel = ep->dma_channel; dma_channel_release(ep); } else channel = 0; use_ep(ep, UDC_EP_SEL); if (omap_readw(UDC_STAT_FLG) & UDC_NON_ISO_FIFO_EMPTY) { omap_writew(UDC_SET_HALT, UDC_CTRL); status = 0; } else status = -EAGAIN; deselect_ep(); if (channel) dma_channel_claim(ep, channel); } else { use_ep(ep, 0); omap_writew(ep->udc->clr_halt, UDC_CTRL); ep->ackwait = 0; if (!(ep->bEndpointAddress & USB_DIR_IN)) { omap_writew(UDC_SET_FIFO_EN, UDC_CTRL); ep->ackwait = 1 + ep->double_buf; } } } done: VDBG("%s %s halt stat %d\n", ep->ep.name, value ? "set" : "clear", status); spin_unlock_irqrestore(&ep->udc->lock, flags); return status; } static struct usb_ep_ops omap_ep_ops = { .enable = omap_ep_enable, .disable = omap_ep_disable, .alloc_request = omap_alloc_request, .free_request = omap_free_request, .queue = omap_ep_queue, .dequeue = omap_ep_dequeue, .set_halt = omap_ep_set_halt, // fifo_status ... report bytes in fifo // fifo_flush ... flush fifo }; /*-------------------------------------------------------------------------*/ static int omap_get_frame(struct usb_gadget *gadget) { u16 sof = omap_readw(UDC_SOF); return (sof & UDC_TS_OK) ? (sof & UDC_TS) : -EL2NSYNC; } static int omap_wakeup(struct usb_gadget *gadget) { struct omap_udc *udc; unsigned long flags; int retval = -EHOSTUNREACH; udc = container_of(gadget, struct omap_udc, gadget); spin_lock_irqsave(&udc->lock, flags); if (udc->devstat & UDC_SUS) { /* NOTE: OTG spec erratum says that OTG devices may * issue wakeups without host enable. */ if (udc->devstat & (UDC_B_HNP_ENABLE|UDC_R_WK_OK)) { DBG("remote wakeup...\n"); omap_writew(UDC_RMT_WKP, UDC_SYSCON2); retval = 0; } /* NOTE: non-OTG systems may use SRP TOO... */ } else if (!(udc->devstat & UDC_ATT)) { if (udc->transceiver) retval = otg_start_srp(udc->transceiver); } spin_unlock_irqrestore(&udc->lock, flags); return retval; } static int omap_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered) { struct omap_udc *udc; unsigned long flags; u16 syscon1; udc = container_of(gadget, struct omap_udc, gadget); spin_lock_irqsave(&udc->lock, flags); syscon1 = omap_readw(UDC_SYSCON1); if (is_selfpowered) syscon1 |= UDC_SELF_PWR; else syscon1 &= ~UDC_SELF_PWR; omap_writew(syscon1, UDC_SYSCON1); spin_unlock_irqrestore(&udc->lock, flags); return 0; } static int can_pullup(struct omap_udc *udc) { return udc->driver && udc->softconnect && udc->vbus_active; } static void pullup_enable(struct omap_udc *udc) { u16 w; w = omap_readw(UDC_SYSCON1); w |= UDC_PULLUP_EN; omap_writew(w, UDC_SYSCON1); if (!gadget_is_otg(&udc->gadget) && !cpu_is_omap15xx()) { u32 l; l = omap_readl(OTG_CTRL); l |= OTG_BSESSVLD; omap_writel(l, OTG_CTRL); } omap_writew(UDC_DS_CHG_IE, UDC_IRQ_EN); } static void pullup_disable(struct omap_udc *udc) { u16 w; if (!gadget_is_otg(&udc->gadget) && !cpu_is_omap15xx()) { u32 l; l = omap_readl(OTG_CTRL); l &= ~OTG_BSESSVLD; omap_writel(l, OTG_CTRL); } omap_writew(UDC_DS_CHG_IE, UDC_IRQ_EN); w = omap_readw(UDC_SYSCON1); w &= ~UDC_PULLUP_EN; omap_writew(w, UDC_SYSCON1); } static struct omap_udc *udc; static void omap_udc_enable_clock(int enable) { if (udc == NULL || udc->dc_clk == NULL || udc->hhc_clk == NULL) return; if (enable) { clk_enable(udc->dc_clk); clk_enable(udc->hhc_clk); udelay(100); } else { clk_disable(udc->hhc_clk); clk_disable(udc->dc_clk); } } /* * Called by whatever detects VBUS sessions: external transceiver * driver, or maybe GPIO0 VBUS IRQ. May request 48 MHz clock. */ static int omap_vbus_session(struct usb_gadget *gadget, int is_active) { struct omap_udc *udc; unsigned long flags; u32 l; udc = container_of(gadget, struct omap_udc, gadget); spin_lock_irqsave(&udc->lock, flags); VDBG("VBUS %s\n", is_active ? "on" : "off"); udc->vbus_active = (is_active != 0); if (cpu_is_omap15xx()) { /* "software" detect, ignored if !VBUS_MODE_1510 */ l = omap_readl(FUNC_MUX_CTRL_0); if (is_active) l |= VBUS_CTRL_1510; else l &= ~VBUS_CTRL_1510; omap_writel(l, FUNC_MUX_CTRL_0); } if (udc->dc_clk != NULL && is_active) { if (!udc->clk_requested) { omap_udc_enable_clock(1); udc->clk_requested = 1; } } if (can_pullup(udc)) pullup_enable(udc); else pullup_disable(udc); if (udc->dc_clk != NULL && !is_active) { if (udc->clk_requested) { omap_udc_enable_clock(0); udc->clk_requested = 0; } } spin_unlock_irqrestore(&udc->lock, flags); return 0; } static int omap_vbus_draw(struct usb_gadget *gadget, unsigned mA) { struct omap_udc *udc; udc = container_of(gadget, struct omap_udc, gadget); if (udc->transceiver) return otg_set_power(udc->transceiver, mA); return -EOPNOTSUPP; } static int omap_pullup(struct usb_gadget *gadget, int is_on) { struct omap_udc *udc; unsigned long flags; udc = container_of(gadget, struct omap_udc, gadget); spin_lock_irqsave(&udc->lock, flags); udc->softconnect = (is_on != 0); if (can_pullup(udc)) pullup_enable(udc); else pullup_disable(udc); spin_unlock_irqrestore(&udc->lock, flags); return 0; } static struct usb_gadget_ops omap_gadget_ops = { .get_frame = omap_get_frame, .wakeup = omap_wakeup, .set_selfpowered = omap_set_selfpowered, .vbus_session = omap_vbus_session, .vbus_draw = omap_vbus_draw, .pullup = omap_pullup, }; /*-------------------------------------------------------------------------*/ /* dequeue ALL requests; caller holds udc->lock */ static void nuke(struct omap_ep *ep, int status) { struct omap_req *req; ep->stopped = 1; if (use_dma && ep->dma_channel) dma_channel_release(ep); use_ep(ep, 0); omap_writew(UDC_CLR_EP, UDC_CTRL); if (ep->bEndpointAddress && ep->bmAttributes != USB_ENDPOINT_XFER_ISOC) omap_writew(UDC_SET_HALT, UDC_CTRL); while (!list_empty(&ep->queue)) { req = list_entry(ep->queue.next, struct omap_req, queue); done(ep, req, status); } } /* caller holds udc->lock */ static void udc_quiesce(struct omap_udc *udc) { struct omap_ep *ep; udc->gadget.speed = USB_SPEED_UNKNOWN; nuke(&udc->ep[0], -ESHUTDOWN); list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) nuke(ep, -ESHUTDOWN); } /*-------------------------------------------------------------------------*/ static void update_otg(struct omap_udc *udc) { u16 devstat; if (!gadget_is_otg(&udc->gadget)) return; if (omap_readl(OTG_CTRL) & OTG_ID) devstat = omap_readw(UDC_DEVSTAT); else devstat = 0; udc->gadget.b_hnp_enable = !!(devstat & UDC_B_HNP_ENABLE); udc->gadget.a_hnp_support = !!(devstat & UDC_A_HNP_SUPPORT); udc->gadget.a_alt_hnp_support = !!(devstat & UDC_A_ALT_HNP_SUPPORT); /* Enable HNP early, avoiding races on suspend irq path. * ASSUMES OTG state machine B_BUS_REQ input is true. */ if (udc->gadget.b_hnp_enable) { u32 l; l = omap_readl(OTG_CTRL); l |= OTG_B_HNPEN | OTG_B_BUSREQ; l &= ~OTG_PULLUP; omap_writel(l, OTG_CTRL); } } static void ep0_irq(struct omap_udc *udc, u16 irq_src) { struct omap_ep *ep0 = &udc->ep[0]; struct omap_req *req = NULL; ep0->irqs++; /* Clear any pending requests and then scrub any rx/tx state * before starting to handle the SETUP request. */ if (irq_src & UDC_SETUP) { u16 ack = irq_src & (UDC_EP0_TX|UDC_EP0_RX); nuke(ep0, 0); if (ack) { omap_writew(ack, UDC_IRQ_SRC); irq_src = UDC_SETUP; } } /* IN/OUT packets mean we're in the DATA or STATUS stage. * This driver uses only uses protocol stalls (ep0 never halts), * and if we got this far the gadget driver already had a * chance to stall. Tries to be forgiving of host oddities. * * NOTE: the last chance gadget drivers have to stall control * requests is during their request completion callback. */ if (!list_empty(&ep0->queue)) req = container_of(ep0->queue.next, struct omap_req, queue); /* IN == TX to host */ if (irq_src & UDC_EP0_TX) { int stat; omap_writew(UDC_EP0_TX, UDC_IRQ_SRC); omap_writew(UDC_EP_SEL|UDC_EP_DIR, UDC_EP_NUM); stat = omap_readw(UDC_STAT_FLG); if (stat & UDC_ACK) { if (udc->ep0_in) { /* write next IN packet from response, * or set up the status stage. */ if (req) stat = write_fifo(ep0, req); omap_writew(UDC_EP_DIR, UDC_EP_NUM); if (!req && udc->ep0_pending) { omap_writew(UDC_EP_SEL, UDC_EP_NUM); omap_writew(UDC_CLR_EP, UDC_CTRL); omap_writew(UDC_SET_FIFO_EN, UDC_CTRL); omap_writew(0, UDC_EP_NUM); udc->ep0_pending = 0; } /* else: 6 wait states before it'll tx */ } else { /* ack status stage of OUT transfer */ omap_writew(UDC_EP_DIR, UDC_EP_NUM); if (req) done(ep0, req, 0); } req = NULL; } else if (stat & UDC_STALL) { omap_writew(UDC_CLR_HALT, UDC_CTRL); omap_writew(UDC_EP_DIR, UDC_EP_NUM); } else { omap_writew(UDC_EP_DIR, UDC_EP_NUM); } } /* OUT == RX from host */ if (irq_src & UDC_EP0_RX) { int stat; omap_writew(UDC_EP0_RX, UDC_IRQ_SRC); omap_writew(UDC_EP_SEL, UDC_EP_NUM); stat = omap_readw(UDC_STAT_FLG); if (stat & UDC_ACK) { if (!udc->ep0_in) { stat = 0; /* read next OUT packet of request, maybe * reactiviting the fifo; stall on errors. */ if (!req || (stat = read_fifo(ep0, req)) < 0) { omap_writew(UDC_STALL_CMD, UDC_SYSCON2); udc->ep0_pending = 0; stat = 0; } else if (stat == 0) omap_writew(UDC_SET_FIFO_EN, UDC_CTRL); omap_writew(0, UDC_EP_NUM); /* activate status stage */ if (stat == 1) { done(ep0, req, 0); /* that may have STALLed ep0... */ omap_writew(UDC_EP_SEL | UDC_EP_DIR, UDC_EP_NUM); omap_writew(UDC_CLR_EP, UDC_CTRL); omap_writew(UDC_SET_FIFO_EN, UDC_CTRL); omap_writew(UDC_EP_DIR, UDC_EP_NUM); udc->ep0_pending = 0; } } else { /* ack status stage of IN transfer */ omap_writew(0, UDC_EP_NUM); if (req) done(ep0, req, 0); } } else if (stat & UDC_STALL) { omap_writew(UDC_CLR_HALT, UDC_CTRL); omap_writew(0, UDC_EP_NUM); } else { omap_writew(0, UDC_EP_NUM); } } /* SETUP starts all control transfers */ if (irq_src & UDC_SETUP) { union u { u16 word[4]; struct usb_ctrlrequest r; } u; int status = -EINVAL; struct omap_ep *ep; /* read the (latest) SETUP message */ do { omap_writew(UDC_SETUP_SEL, UDC_EP_NUM); /* two bytes at a time */ u.word[0] = omap_readw(UDC_DATA); u.word[1] = omap_readw(UDC_DATA); u.word[2] = omap_readw(UDC_DATA); u.word[3] = omap_readw(UDC_DATA); omap_writew(0, UDC_EP_NUM); } while (omap_readw(UDC_IRQ_SRC) & UDC_SETUP); #define w_value le16_to_cpu(u.r.wValue) #define w_index le16_to_cpu(u.r.wIndex) #define w_length le16_to_cpu(u.r.wLength) /* Delegate almost all control requests to the gadget driver, * except for a handful of ch9 status/feature requests that * hardware doesn't autodecode _and_ the gadget API hides. */ udc->ep0_in = (u.r.bRequestType & USB_DIR_IN) != 0; udc->ep0_set_config = 0; udc->ep0_pending = 1; ep0->stopped = 0; ep0->ackwait = 0; switch (u.r.bRequest) { case USB_REQ_SET_CONFIGURATION: /* udc needs to know when ep != 0 is valid */ if (u.r.bRequestType != USB_RECIP_DEVICE) goto delegate; if (w_length != 0) goto do_stall; udc->ep0_set_config = 1; udc->ep0_reset_config = (w_value == 0); VDBG("set config %d\n", w_value); /* update udc NOW since gadget driver may start * queueing requests immediately; clear config * later if it fails the request. */ if (udc->ep0_reset_config) omap_writew(UDC_CLR_CFG, UDC_SYSCON2); else omap_writew(UDC_DEV_CFG, UDC_SYSCON2); update_otg(udc); goto delegate; case USB_REQ_CLEAR_FEATURE: /* clear endpoint halt */ if (u.r.bRequestType != USB_RECIP_ENDPOINT) goto delegate; if (w_value != USB_ENDPOINT_HALT || w_length != 0) goto do_stall; ep = &udc->ep[w_index & 0xf]; if (ep != ep0) { if (w_index & USB_DIR_IN) ep += 16; if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC || !ep->desc) goto do_stall; use_ep(ep, 0); omap_writew(udc->clr_halt, UDC_CTRL); ep->ackwait = 0; if (!(ep->bEndpointAddress & USB_DIR_IN)) { omap_writew(UDC_SET_FIFO_EN, UDC_CTRL); ep->ackwait = 1 + ep->double_buf; } /* NOTE: assumes the host behaves sanely, * only clearing real halts. Else we may * need to kill pending transfers and then * restart the queue... very messy for DMA! */ } VDBG("%s halt cleared by host\n", ep->name); goto ep0out_status_stage; case USB_REQ_SET_FEATURE: /* set endpoint halt */ if (u.r.bRequestType != USB_RECIP_ENDPOINT) goto delegate; if (w_value != USB_ENDPOINT_HALT || w_length != 0) goto do_stall; ep = &udc->ep[w_index & 0xf]; if (w_index & USB_DIR_IN) ep += 16; if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC || ep == ep0 || !ep->desc) goto do_stall; if (use_dma && ep->has_dma) { /* this has rude side-effects (aborts) and * can't really work if DMA-IN is active */ DBG("%s host set_halt, NYET \n", ep->name); goto do_stall; } use_ep(ep, 0); /* can't halt if fifo isn't empty... */ omap_writew(UDC_CLR_EP, UDC_CTRL); omap_writew(UDC_SET_HALT, UDC_CTRL); VDBG("%s halted by host\n", ep->name); ep0out_status_stage: status = 0; omap_writew(UDC_EP_SEL|UDC_EP_DIR, UDC_EP_NUM); omap_writew(UDC_CLR_EP, UDC_CTRL); omap_writew(UDC_SET_FIFO_EN, UDC_CTRL); omap_writew(UDC_EP_DIR, UDC_EP_NUM); udc->ep0_pending = 0; break; case USB_REQ_GET_STATUS: /* USB_ENDPOINT_HALT status? */ if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT)) goto intf_status; /* ep0 never stalls */ if (!(w_index & 0xf)) goto zero_status; /* only active endpoints count */ ep = &udc->ep[w_index & 0xf]; if (w_index & USB_DIR_IN) ep += 16; if (!ep->desc) goto do_stall; /* iso never stalls */ if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) goto zero_status; /* FIXME don't assume non-halted endpoints!! */ ERR("%s status, can't report\n", ep->ep.name); goto do_stall; intf_status: /* return interface status. if we were pedantic, * we'd detect non-existent interfaces, and stall. */ if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE)) goto delegate; zero_status: /* return two zero bytes */ omap_writew(UDC_EP_SEL|UDC_EP_DIR, UDC_EP_NUM); omap_writew(0, UDC_DATA); omap_writew(UDC_SET_FIFO_EN, UDC_CTRL); omap_writew(UDC_EP_DIR, UDC_EP_NUM); status = 0; VDBG("GET_STATUS, interface %d\n", w_index); /* next, status stage */ break; default: delegate: /* activate the ep0out fifo right away */ if (!udc->ep0_in && w_length) { omap_writew(0, UDC_EP_NUM); omap_writew(UDC_SET_FIFO_EN, UDC_CTRL); } /* gadget drivers see class/vendor specific requests, * {SET,GET}_{INTERFACE,DESCRIPTOR,CONFIGURATION}, * and more */ VDBG("SETUP %02x.%02x v%04x i%04x l%04x\n", u.r.bRequestType, u.r.bRequest, w_value, w_index, w_length); #undef w_value #undef w_index #undef w_length /* The gadget driver may return an error here, * causing an immediate protocol stall. * * Else it must issue a response, either queueing a * response buffer for the DATA stage, or halting ep0 * (causing a protocol stall, not a real halt). A * zero length buffer means no DATA stage. * * It's fine to issue that response after the setup() * call returns, and this IRQ was handled. */ udc->ep0_setup = 1; spin_unlock(&udc->lock); status = udc->driver->setup (&udc->gadget, &u.r); spin_lock(&udc->lock); udc->ep0_setup = 0; } if (status < 0) { do_stall: VDBG("req %02x.%02x protocol STALL; stat %d\n", u.r.bRequestType, u.r.bRequest, status); if (udc->ep0_set_config) { if (udc->ep0_reset_config) WARNING("error resetting config?\n"); else omap_writew(UDC_CLR_CFG, UDC_SYSCON2); } omap_writew(UDC_STALL_CMD, UDC_SYSCON2); udc->ep0_pending = 0; } } } /*-------------------------------------------------------------------------*/ #define OTG_FLAGS (UDC_B_HNP_ENABLE|UDC_A_HNP_SUPPORT|UDC_A_ALT_HNP_SUPPORT) static void devstate_irq(struct omap_udc *udc, u16 irq_src) { u16 devstat, change; devstat = omap_readw(UDC_DEVSTAT); change = devstat ^ udc->devstat; udc->devstat = devstat; if (change & (UDC_USB_RESET|UDC_ATT)) { udc_quiesce(udc); if (change & UDC_ATT) { /* driver for any external transceiver will * have called omap_vbus_session() already */ if (devstat & UDC_ATT) { udc->gadget.speed = USB_SPEED_FULL; VDBG("connect\n"); if (!udc->transceiver) pullup_enable(udc); // if (driver->connect) call it } else if (udc->gadget.speed != USB_SPEED_UNKNOWN) { udc->gadget.speed = USB_SPEED_UNKNOWN; if (!udc->transceiver) pullup_disable(udc); DBG("disconnect, gadget %s\n", udc->driver->driver.name); if (udc->driver->disconnect) { spin_unlock(&udc->lock); udc->driver->disconnect(&udc->gadget); spin_lock(&udc->lock); } } change &= ~UDC_ATT; } if (change & UDC_USB_RESET) { if (devstat & UDC_USB_RESET) { VDBG("RESET=1\n"); } else { udc->gadget.speed = USB_SPEED_FULL; INFO("USB reset done, gadget %s\n", udc->driver->driver.name); /* ep0 traffic is legal from now on */ omap_writew(UDC_DS_CHG_IE | UDC_EP0_IE, UDC_IRQ_EN); } change &= ~UDC_USB_RESET; } } if (change & UDC_SUS) { if (udc->gadget.speed != USB_SPEED_UNKNOWN) { // FIXME tell isp1301 to suspend/resume (?) if (devstat & UDC_SUS) { VDBG("suspend\n"); update_otg(udc); /* HNP could be under way already */ if (udc->gadget.speed == USB_SPEED_FULL && udc->driver->suspend) { spin_unlock(&udc->lock); udc->driver->suspend(&udc->gadget); spin_lock(&udc->lock); } if (udc->transceiver) otg_set_suspend(udc->transceiver, 1); } else { VDBG("resume\n"); if (udc->transceiver) otg_set_suspend(udc->transceiver, 0); if (udc->gadget.speed == USB_SPEED_FULL && udc->driver->resume) { spin_unlock(&udc->lock); udc->driver->resume(&udc->gadget); spin_lock(&udc->lock); } } } change &= ~UDC_SUS; } if (!cpu_is_omap15xx() && (change & OTG_FLAGS)) { update_otg(udc); change &= ~OTG_FLAGS; } change &= ~(UDC_CFG|UDC_DEF|UDC_ADD); if (change) VDBG("devstat %03x, ignore change %03x\n", devstat, change); omap_writew(UDC_DS_CHG, UDC_IRQ_SRC); } static irqreturn_t omap_udc_irq(int irq, void *_udc) { struct omap_udc *udc = _udc; u16 irq_src; irqreturn_t status = IRQ_NONE; unsigned long flags; spin_lock_irqsave(&udc->lock, flags); irq_src = omap_readw(UDC_IRQ_SRC); /* Device state change (usb ch9 stuff) */ if (irq_src & UDC_DS_CHG) { devstate_irq(_udc, irq_src); status = IRQ_HANDLED; irq_src &= ~UDC_DS_CHG; } /* EP0 control transfers */ if (irq_src & (UDC_EP0_RX|UDC_SETUP|UDC_EP0_TX)) { ep0_irq(_udc, irq_src); status = IRQ_HANDLED; irq_src &= ~(UDC_EP0_RX|UDC_SETUP|UDC_EP0_TX); } /* DMA transfer completion */ if (use_dma && (irq_src & (UDC_TXN_DONE|UDC_RXN_CNT|UDC_RXN_EOT))) { dma_irq(_udc, irq_src); status = IRQ_HANDLED; irq_src &= ~(UDC_TXN_DONE|UDC_RXN_CNT|UDC_RXN_EOT); } irq_src &= ~(UDC_IRQ_SOF | UDC_EPN_TX|UDC_EPN_RX); if (irq_src) DBG("udc_irq, unhandled %03x\n", irq_src); spin_unlock_irqrestore(&udc->lock, flags); return status; } /* workaround for seemingly-lost IRQs for RX ACKs... */ #define PIO_OUT_TIMEOUT (jiffies + HZ/3) #define HALF_FULL(f) (!((f)&(UDC_NON_ISO_FIFO_FULL|UDC_NON_ISO_FIFO_EMPTY))) static void pio_out_timer(unsigned long _ep) { struct omap_ep *ep = (void *) _ep; unsigned long flags; u16 stat_flg; spin_lock_irqsave(&ep->udc->lock, flags); if (!list_empty(&ep->queue) && ep->ackwait) { use_ep(ep, UDC_EP_SEL); stat_flg = omap_readw(UDC_STAT_FLG); if ((stat_flg & UDC_ACK) && (!(stat_flg & UDC_FIFO_EN) || (ep->double_buf && HALF_FULL(stat_flg)))) { struct omap_req *req; VDBG("%s: lose, %04x\n", ep->ep.name, stat_flg); req = container_of(ep->queue.next, struct omap_req, queue); (void) read_fifo(ep, req); omap_writew(ep->bEndpointAddress, UDC_EP_NUM); omap_writew(UDC_SET_FIFO_EN, UDC_CTRL); ep->ackwait = 1 + ep->double_buf; } else deselect_ep(); } mod_timer(&ep->timer, PIO_OUT_TIMEOUT); spin_unlock_irqrestore(&ep->udc->lock, flags); } static irqreturn_t omap_udc_pio_irq(int irq, void *_dev) { u16 epn_stat, irq_src; irqreturn_t status = IRQ_NONE; struct omap_ep *ep; int epnum; struct omap_udc *udc = _dev; struct omap_req *req; unsigned long flags; spin_lock_irqsave(&udc->lock, flags); epn_stat = omap_readw(UDC_EPN_STAT); irq_src = omap_readw(UDC_IRQ_SRC); /* handle OUT first, to avoid some wasteful NAKs */ if (irq_src & UDC_EPN_RX) { epnum = (epn_stat >> 8) & 0x0f; omap_writew(UDC_EPN_RX, UDC_IRQ_SRC); status = IRQ_HANDLED; ep = &udc->ep[epnum]; ep->irqs++; omap_writew(epnum | UDC_EP_SEL, UDC_EP_NUM); ep->fnf = 0; if (omap_readw(UDC_STAT_FLG) & UDC_ACK) { ep->ackwait--; if (!list_empty(&ep->queue)) { int stat; req = container_of(ep->queue.next, struct omap_req, queue); stat = read_fifo(ep, req); if (!ep->double_buf) ep->fnf = 1; } } /* min 6 clock delay before clearing EP_SEL ... */ epn_stat = omap_readw(UDC_EPN_STAT); epn_stat = omap_readw(UDC_EPN_STAT); omap_writew(epnum, UDC_EP_NUM); /* enabling fifo _after_ clearing ACK, contrary to docs, * reduces lossage; timer still needed though (sigh). */ if (ep->fnf) { omap_writew(UDC_SET_FIFO_EN, UDC_CTRL); ep->ackwait = 1 + ep->double_buf; } mod_timer(&ep->timer, PIO_OUT_TIMEOUT); } /* then IN transfers */ else if (irq_src & UDC_EPN_TX) { epnum = epn_stat & 0x0f; omap_writew(UDC_EPN_TX, UDC_IRQ_SRC); status = IRQ_HANDLED; ep = &udc->ep[16 + epnum]; ep->irqs++; omap_writew(epnum | UDC_EP_DIR | UDC_EP_SEL, UDC_EP_NUM); if (omap_readw(UDC_STAT_FLG) & UDC_ACK) { ep->ackwait = 0; if (!list_empty(&ep->queue)) { req = container_of(ep->queue.next, struct omap_req, queue); (void) write_fifo(ep, req); } } /* min 6 clock delay before clearing EP_SEL ... */ epn_stat = omap_readw(UDC_EPN_STAT); epn_stat = omap_readw(UDC_EPN_STAT); omap_writew(epnum | UDC_EP_DIR, UDC_EP_NUM); /* then 6 clocks before it'd tx */ } spin_unlock_irqrestore(&udc->lock, flags); return status; } #ifdef USE_ISO static irqreturn_t omap_udc_iso_irq(int irq, void *_dev) { struct omap_udc *udc = _dev; struct omap_ep *ep; int pending = 0; unsigned long flags; spin_lock_irqsave(&udc->lock, flags); /* handle all non-DMA ISO transfers */ list_for_each_entry (ep, &udc->iso, iso) { u16 stat; struct omap_req *req; if (ep->has_dma || list_empty(&ep->queue)) continue; req = list_entry(ep->queue.next, struct omap_req, queue); use_ep(ep, UDC_EP_SEL); stat = omap_readw(UDC_STAT_FLG); /* NOTE: like the other controller drivers, this isn't * currently reporting lost or damaged frames. */ if (ep->bEndpointAddress & USB_DIR_IN) { if (stat & UDC_MISS_IN) /* done(ep, req, -EPROTO) */; else write_fifo(ep, req); } else { int status = 0; if (stat & UDC_NO_RXPACKET) status = -EREMOTEIO; else if (stat & UDC_ISO_ERR) status = -EILSEQ; else if (stat & UDC_DATA_FLUSH) status = -ENOSR; if (status) /* done(ep, req, status) */; else read_fifo(ep, req); } deselect_ep(); /* 6 wait states before next EP */ ep->irqs++; if (!list_empty(&ep->queue)) pending = 1; } if (!pending) { u16 w; w = omap_readw(UDC_IRQ_EN); w &= ~UDC_SOF_IE; omap_writew(w, UDC_IRQ_EN); } omap_writew(UDC_IRQ_SOF, UDC_IRQ_SRC); spin_unlock_irqrestore(&udc->lock, flags); return IRQ_HANDLED; } #endif /*-------------------------------------------------------------------------*/ static inline int machine_without_vbus_sense(void) { return (machine_is_omap_innovator() || machine_is_omap_osk() || machine_is_omap_apollon() #ifndef CONFIG_MACH_OMAP_H4_OTG || machine_is_omap_h4() #endif || machine_is_sx1() || cpu_is_omap7xx() /* No known omap7xx boards with vbus sense */ ); } int usb_gadget_probe_driver(struct usb_gadget_driver *driver, int (*bind)(struct usb_gadget *)) { int status = -ENODEV; struct omap_ep *ep; unsigned long flags; /* basic sanity tests */ if (!udc) return -ENODEV; if (!driver // FIXME if otg, check: driver->is_otg || driver->speed < USB_SPEED_FULL || !bind || !driver->setup) return -EINVAL; spin_lock_irqsave(&udc->lock, flags); if (udc->driver) { spin_unlock_irqrestore(&udc->lock, flags); return -EBUSY; } /* reset state */ list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) { ep->irqs = 0; if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) continue; use_ep(ep, 0); omap_writew(UDC_SET_HALT, UDC_CTRL); } udc->ep0_pending = 0; udc->ep[0].irqs = 0; udc->softconnect = 1; /* hook up the driver */ driver->driver.bus = NULL; udc->driver = driver; udc->gadget.dev.driver = &driver->driver; spin_unlock_irqrestore(&udc->lock, flags); if (udc->dc_clk != NULL) omap_udc_enable_clock(1); status = bind(&udc->gadget); if (status) { DBG("bind to %s --> %d\n", driver->driver.name, status); udc->gadget.dev.driver = NULL; udc->driver = NULL; goto done; } DBG("bound to driver %s\n", driver->driver.name); omap_writew(UDC_IRQ_SRC_MASK, UDC_IRQ_SRC); /* connect to bus through transceiver */ if (udc->transceiver) { status = otg_set_peripheral(udc->transceiver, &udc->gadget); if (status < 0) { ERR("can't bind to transceiver\n"); if (driver->unbind) { driver->unbind (&udc->gadget); udc->gadget.dev.driver = NULL; udc->driver = NULL; } goto done; } } else { if (can_pullup(udc)) pullup_enable (udc); else pullup_disable (udc); } /* boards that don't have VBUS sensing can't autogate 48MHz; * can't enter deep sleep while a gadget driver is active. */ if (machine_without_vbus_sense()) omap_vbus_session(&udc->gadget, 1); done: if (udc->dc_clk != NULL) omap_udc_enable_clock(0); return status; } EXPORT_SYMBOL(usb_gadget_probe_driver); int usb_gadget_unregister_driver (struct usb_gadget_driver *driver) { unsigned long flags; int status = -ENODEV; if (!udc) return -ENODEV; if (!driver || driver != udc->driver || !driver->unbind) return -EINVAL; if (udc->dc_clk != NULL) omap_udc_enable_clock(1); if (machine_without_vbus_sense()) omap_vbus_session(&udc->gadget, 0); if (udc->transceiver) (void) otg_set_peripheral(udc->transceiver, NULL); else pullup_disable(udc); spin_lock_irqsave(&udc->lock, flags); udc_quiesce(udc); spin_unlock_irqrestore(&udc->lock, flags); driver->unbind(&udc->gadget); udc->gadget.dev.driver = NULL; udc->driver = NULL; if (udc->dc_clk != NULL) omap_udc_enable_clock(0); DBG("unregistered driver '%s'\n", driver->driver.name); return status; } EXPORT_SYMBOL(usb_gadget_unregister_driver); /*-------------------------------------------------------------------------*/ #ifdef CONFIG_USB_GADGET_DEBUG_FILES #include <linux/seq_file.h> static const char proc_filename[] = "driver/udc"; #define FOURBITS "%s%s%s%s" #define EIGHTBITS FOURBITS FOURBITS static void proc_ep_show(struct seq_file *s, struct omap_ep *ep) { u16 stat_flg; struct omap_req *req; char buf[20]; use_ep(ep, 0); if (use_dma && ep->has_dma) snprintf(buf, sizeof buf, "(%cxdma%d lch%d) ", (ep->bEndpointAddress & USB_DIR_IN) ? 't' : 'r', ep->dma_channel - 1, ep->lch); else buf[0] = 0; stat_flg = omap_readw(UDC_STAT_FLG); seq_printf(s, "\n%s %s%s%sirqs %ld stat %04x " EIGHTBITS FOURBITS "%s\n", ep->name, buf, ep->double_buf ? "dbuf " : "", ({char *s; switch(ep->ackwait){ case 0: s = ""; break; case 1: s = "(ackw) "; break; case 2: s = "(ackw2) "; break; default: s = "(?) "; break; } s;}), ep->irqs, stat_flg, (stat_flg & UDC_NO_RXPACKET) ? "no_rxpacket " : "", (stat_flg & UDC_MISS_IN) ? "miss_in " : "", (stat_flg & UDC_DATA_FLUSH) ? "data_flush " : "", (stat_flg & UDC_ISO_ERR) ? "iso_err " : "", (stat_flg & UDC_ISO_FIFO_EMPTY) ? "iso_fifo_empty " : "", (stat_flg & UDC_ISO_FIFO_FULL) ? "iso_fifo_full " : "", (stat_flg & UDC_EP_HALTED) ? "HALT " : "", (stat_flg & UDC_STALL) ? "STALL " : "", (stat_flg & UDC_NAK) ? "NAK " : "", (stat_flg & UDC_ACK) ? "ACK " : "", (stat_flg & UDC_FIFO_EN) ? "fifo_en " : "", (stat_flg & UDC_NON_ISO_FIFO_EMPTY) ? "fifo_empty " : "", (stat_flg & UDC_NON_ISO_FIFO_FULL) ? "fifo_full " : ""); if (list_empty (&ep->queue)) seq_printf(s, "\t(queue empty)\n"); else list_for_each_entry (req, &ep->queue, queue) { unsigned length = req->req.actual; if (use_dma && buf[0]) { length += ((ep->bEndpointAddress & USB_DIR_IN) ? dma_src_len : dma_dest_len) (ep, req->req.dma + length); buf[0] = 0; } seq_printf(s, "\treq %p len %d/%d buf %p\n", &req->req, length, req->req.length, req->req.buf); } } static char *trx_mode(unsigned m, int enabled) { switch (m) { case 0: return enabled ? "*6wire" : "unused"; case 1: return "4wire"; case 2: return "3wire"; case 3: return "6wire"; default: return "unknown"; } } static int proc_otg_show(struct seq_file *s) { u32 tmp; u32 trans = 0; char *ctrl_name = "(UNKNOWN)"; /* XXX This needs major revision for OMAP2+ */ tmp = omap_readl(OTG_REV); if (cpu_class_is_omap1()) { ctrl_name = "tranceiver_ctrl"; trans = omap_readw(USB_TRANSCEIVER_CTRL); } seq_printf(s, "\nOTG rev %d.%d, %s %05x\n", tmp >> 4, tmp & 0xf, ctrl_name, trans); tmp = omap_readw(OTG_SYSCON_1); seq_printf(s, "otg_syscon1 %08x usb2 %s, usb1 %s, usb0 %s," FOURBITS "\n", tmp, trx_mode(USB2_TRX_MODE(tmp), trans & CONF_USB2_UNI_R), trx_mode(USB1_TRX_MODE(tmp), trans & CONF_USB1_UNI_R), (USB0_TRX_MODE(tmp) == 0 && !cpu_is_omap1710()) ? "internal" : trx_mode(USB0_TRX_MODE(tmp), 1), (tmp & OTG_IDLE_EN) ? " !otg" : "", (tmp & HST_IDLE_EN) ? " !host" : "", (tmp & DEV_IDLE_EN) ? " !dev" : "", (tmp & OTG_RESET_DONE) ? " reset_done" : " reset_active"); tmp = omap_readl(OTG_SYSCON_2); seq_printf(s, "otg_syscon2 %08x%s" EIGHTBITS " b_ase_brst=%d hmc=%d\n", tmp, (tmp & OTG_EN) ? " otg_en" : "", (tmp & USBX_SYNCHRO) ? " synchro" : "", // much more SRP stuff (tmp & SRP_DATA) ? " srp_data" : "", (tmp & SRP_VBUS) ? " srp_vbus" : "", (tmp & OTG_PADEN) ? " otg_paden" : "", (tmp & HMC_PADEN) ? " hmc_paden" : "", (tmp & UHOST_EN) ? " uhost_en" : "", (tmp & HMC_TLLSPEED) ? " tllspeed" : "", (tmp & HMC_TLLATTACH) ? " tllattach" : "", B_ASE_BRST(tmp), OTG_HMC(tmp)); tmp = omap_readl(OTG_CTRL); seq_printf(s, "otg_ctrl %06x" EIGHTBITS EIGHTBITS "%s\n", tmp, (tmp & OTG_ASESSVLD) ? " asess" : "", (tmp & OTG_BSESSEND) ? " bsess_end" : "", (tmp & OTG_BSESSVLD) ? " bsess" : "", (tmp & OTG_VBUSVLD) ? " vbus" : "", (tmp & OTG_ID) ? " id" : "", (tmp & OTG_DRIVER_SEL) ? " DEVICE" : " HOST", (tmp & OTG_A_SETB_HNPEN) ? " a_setb_hnpen" : "", (tmp & OTG_A_BUSREQ) ? " a_bus" : "", (tmp & OTG_B_HNPEN) ? " b_hnpen" : "", (tmp & OTG_B_BUSREQ) ? " b_bus" : "", (tmp & OTG_BUSDROP) ? " busdrop" : "", (tmp & OTG_PULLDOWN) ? " down" : "", (tmp & OTG_PULLUP) ? " up" : "", (tmp & OTG_DRV_VBUS) ? " drv" : "", (tmp & OTG_PD_VBUS) ? " pd_vb" : "", (tmp & OTG_PU_VBUS) ? " pu_vb" : "", (tmp & OTG_PU_ID) ? " pu_id" : "" ); tmp = omap_readw(OTG_IRQ_EN); seq_printf(s, "otg_irq_en %04x" "\n", tmp); tmp = omap_readw(OTG_IRQ_SRC); seq_printf(s, "otg_irq_src %04x" "\n", tmp); tmp = omap_readw(OTG_OUTCTRL); seq_printf(s, "otg_outctrl %04x" "\n", tmp); tmp = omap_readw(OTG_TEST); seq_printf(s, "otg_test %04x" "\n", tmp); return 0; } static int proc_udc_show(struct seq_file *s, void *_) { u32 tmp; struct omap_ep *ep; unsigned long flags; spin_lock_irqsave(&udc->lock, flags); seq_printf(s, "%s, version: " DRIVER_VERSION #ifdef USE_ISO " (iso)" #endif "%s\n", driver_desc, use_dma ? " (dma)" : ""); tmp = omap_readw(UDC_REV) & 0xff; seq_printf(s, "UDC rev %d.%d, fifo mode %d, gadget %s\n" "hmc %d, transceiver %s\n", tmp >> 4, tmp & 0xf, fifo_mode, udc->driver ? udc->driver->driver.name : "(none)", HMC, udc->transceiver ? udc->transceiver->label : ((cpu_is_omap1710() || cpu_is_omap24xx()) ? "external" : "(none)")); if (cpu_class_is_omap1()) { seq_printf(s, "ULPD control %04x req %04x status %04x\n", omap_readw(ULPD_CLOCK_CTRL), omap_readw(ULPD_SOFT_REQ), omap_readw(ULPD_STATUS_REQ)); } /* OTG controller registers */ if (!cpu_is_omap15xx()) proc_otg_show(s); tmp = omap_readw(UDC_SYSCON1); seq_printf(s, "\nsyscon1 %04x" EIGHTBITS "\n", tmp, (tmp & UDC_CFG_LOCK) ? " cfg_lock" : "", (tmp & UDC_DATA_ENDIAN) ? " data_endian" : "", (tmp & UDC_DMA_ENDIAN) ? " dma_endian" : "", (tmp & UDC_NAK_EN) ? " nak" : "", (tmp & UDC_AUTODECODE_DIS) ? " autodecode_dis" : "", (tmp & UDC_SELF_PWR) ? " self_pwr" : "", (tmp & UDC_SOFF_DIS) ? " soff_dis" : "", (tmp & UDC_PULLUP_EN) ? " PULLUP" : ""); // syscon2 is write-only /* UDC controller registers */ if (!(tmp & UDC_PULLUP_EN)) { seq_printf(s, "(suspended)\n"); spin_unlock_irqrestore(&udc->lock, flags); return 0; } tmp = omap_readw(UDC_DEVSTAT); seq_printf(s, "devstat %04x" EIGHTBITS "%s%s\n", tmp, (tmp & UDC_B_HNP_ENABLE) ? " b_hnp" : "", (tmp & UDC_A_HNP_SUPPORT) ? " a_hnp" : "", (tmp & UDC_A_ALT_HNP_SUPPORT) ? " a_alt_hnp" : "", (tmp & UDC_R_WK_OK) ? " r_wk_ok" : "", (tmp & UDC_USB_RESET) ? " usb_reset" : "", (tmp & UDC_SUS) ? " SUS" : "", (tmp & UDC_CFG) ? " CFG" : "", (tmp & UDC_ADD) ? " ADD" : "", (tmp & UDC_DEF) ? " DEF" : "", (tmp & UDC_ATT) ? " ATT" : ""); seq_printf(s, "sof %04x\n", omap_readw(UDC_SOF)); tmp = omap_readw(UDC_IRQ_EN); seq_printf(s, "irq_en %04x" FOURBITS "%s\n", tmp, (tmp & UDC_SOF_IE) ? " sof" : "", (tmp & UDC_EPN_RX_IE) ? " epn_rx" : "", (tmp & UDC_EPN_TX_IE) ? " epn_tx" : "", (tmp & UDC_DS_CHG_IE) ? " ds_chg" : "", (tmp & UDC_EP0_IE) ? " ep0" : ""); tmp = omap_readw(UDC_IRQ_SRC); seq_printf(s, "irq_src %04x" EIGHTBITS "%s%s\n", tmp, (tmp & UDC_TXN_DONE) ? " txn_done" : "", (tmp & UDC_RXN_CNT) ? " rxn_cnt" : "", (tmp & UDC_RXN_EOT) ? " rxn_eot" : "", (tmp & UDC_IRQ_SOF) ? " sof" : "", (tmp & UDC_EPN_RX) ? " epn_rx" : "", (tmp & UDC_EPN_TX) ? " epn_tx" : "", (tmp & UDC_DS_CHG) ? " ds_chg" : "", (tmp & UDC_SETUP) ? " setup" : "", (tmp & UDC_EP0_RX) ? " ep0out" : "", (tmp & UDC_EP0_TX) ? " ep0in" : ""); if (use_dma) { unsigned i; tmp = omap_readw(UDC_DMA_IRQ_EN); seq_printf(s, "dma_irq_en %04x%s" EIGHTBITS "\n", tmp, (tmp & UDC_TX_DONE_IE(3)) ? " tx2_done" : "", (tmp & UDC_RX_CNT_IE(3)) ? " rx2_cnt" : "", (tmp & UDC_RX_EOT_IE(3)) ? " rx2_eot" : "", (tmp & UDC_TX_DONE_IE(2)) ? " tx1_done" : "", (tmp & UDC_RX_CNT_IE(2)) ? " rx1_cnt" : "", (tmp & UDC_RX_EOT_IE(2)) ? " rx1_eot" : "", (tmp & UDC_TX_DONE_IE(1)) ? " tx0_done" : "", (tmp & UDC_RX_CNT_IE(1)) ? " rx0_cnt" : "", (tmp & UDC_RX_EOT_IE(1)) ? " rx0_eot" : ""); tmp = omap_readw(UDC_RXDMA_CFG); seq_printf(s, "rxdma_cfg %04x\n", tmp); if (tmp) { for (i = 0; i < 3; i++) { if ((tmp & (0x0f << (i * 4))) == 0) continue; seq_printf(s, "rxdma[%d] %04x\n", i, omap_readw(UDC_RXDMA(i + 1))); } } tmp = omap_readw(UDC_TXDMA_CFG); seq_printf(s, "txdma_cfg %04x\n", tmp); if (tmp) { for (i = 0; i < 3; i++) { if (!(tmp & (0x0f << (i * 4)))) continue; seq_printf(s, "txdma[%d] %04x\n", i, omap_readw(UDC_TXDMA(i + 1))); } } } tmp = omap_readw(UDC_DEVSTAT); if (tmp & UDC_ATT) { proc_ep_show(s, &udc->ep[0]); if (tmp & UDC_ADD) { list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) { if (ep->desc) proc_ep_show(s, ep); } } } spin_unlock_irqrestore(&udc->lock, flags); return 0; } static int proc_udc_open(struct inode *inode, struct file *file) { return single_open(file, proc_udc_show, NULL); } static const struct file_operations proc_ops = { .owner = THIS_MODULE, .open = proc_udc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void create_proc_file(void) { proc_create(proc_filename, 0, NULL, &proc_ops); } static void remove_proc_file(void) { remove_proc_entry(proc_filename, NULL); } #else static inline void create_proc_file(void) {} static inline void remove_proc_file(void) {} #endif /*-------------------------------------------------------------------------*/ /* Before this controller can enumerate, we need to pick an endpoint * configuration, or "fifo_mode" That involves allocating 2KB of packet * buffer space among the endpoints we'll be operating. * * NOTE: as of OMAP 1710 ES2.0, writing a new endpoint config when * UDC_SYSCON_1.CFG_LOCK is set can now work. We won't use that * capability yet though. */ static unsigned __init omap_ep_setup(char *name, u8 addr, u8 type, unsigned buf, unsigned maxp, int dbuf) { struct omap_ep *ep; u16 epn_rxtx = 0; /* OUT endpoints first, then IN */ ep = &udc->ep[addr & 0xf]; if (addr & USB_DIR_IN) ep += 16; /* in case of ep init table bugs */ BUG_ON(ep->name[0]); /* chip setup ... bit values are same for IN, OUT */ if (type == USB_ENDPOINT_XFER_ISOC) { switch (maxp) { case 8: epn_rxtx = 0 << 12; break; case 16: epn_rxtx = 1 << 12; break; case 32: epn_rxtx = 2 << 12; break; case 64: epn_rxtx = 3 << 12; break; case 128: epn_rxtx = 4 << 12; break; case 256: epn_rxtx = 5 << 12; break; case 512: epn_rxtx = 6 << 12; break; default: BUG(); } epn_rxtx |= UDC_EPN_RX_ISO; dbuf = 1; } else { /* double-buffering "not supported" on 15xx, * and ignored for PIO-IN on newer chips * (for more reliable behavior) */ if (!use_dma || cpu_is_omap15xx() || cpu_is_omap24xx()) dbuf = 0; switch (maxp) { case 8: epn_rxtx = 0 << 12; break; case 16: epn_rxtx = 1 << 12; break; case 32: epn_rxtx = 2 << 12; break; case 64: epn_rxtx = 3 << 12; break; default: BUG(); } if (dbuf && addr) epn_rxtx |= UDC_EPN_RX_DB; init_timer(&ep->timer); ep->timer.function = pio_out_timer; ep->timer.data = (unsigned long) ep; } if (addr) epn_rxtx |= UDC_EPN_RX_VALID; BUG_ON(buf & 0x07); epn_rxtx |= buf >> 3; DBG("%s addr %02x rxtx %04x maxp %d%s buf %d\n", name, addr, epn_rxtx, maxp, dbuf ? "x2" : "", buf); if (addr & USB_DIR_IN) omap_writew(epn_rxtx, UDC_EP_TX(addr & 0xf)); else omap_writew(epn_rxtx, UDC_EP_RX(addr)); /* next endpoint's buffer starts after this one's */ buf += maxp; if (dbuf) buf += maxp; BUG_ON(buf > 2048); /* set up driver data structures */ BUG_ON(strlen(name) >= sizeof ep->name); strlcpy(ep->name, name, sizeof ep->name); INIT_LIST_HEAD(&ep->queue); INIT_LIST_HEAD(&ep->iso); ep->bEndpointAddress = addr; ep->bmAttributes = type; ep->double_buf = dbuf; ep->udc = udc; ep->ep.name = ep->name; ep->ep.ops = &omap_ep_ops; ep->ep.maxpacket = ep->maxpacket = maxp; list_add_tail (&ep->ep.ep_list, &udc->gadget.ep_list); return buf; } static void omap_udc_release(struct device *dev) { complete(udc->done); kfree (udc); udc = NULL; } static int __init omap_udc_setup(struct platform_device *odev, struct otg_transceiver *xceiv) { unsigned tmp, buf; /* abolish any previous hardware state */ omap_writew(0, UDC_SYSCON1); omap_writew(0, UDC_IRQ_EN); omap_writew(UDC_IRQ_SRC_MASK, UDC_IRQ_SRC); omap_writew(0, UDC_DMA_IRQ_EN); omap_writew(0, UDC_RXDMA_CFG); omap_writew(0, UDC_TXDMA_CFG); /* UDC_PULLUP_EN gates the chip clock */ // OTG_SYSCON_1 |= DEV_IDLE_EN; udc = kzalloc(sizeof(*udc), GFP_KERNEL); if (!udc) return -ENOMEM; spin_lock_init (&udc->lock); udc->gadget.ops = &omap_gadget_ops; udc->gadget.ep0 = &udc->ep[0].ep; INIT_LIST_HEAD(&udc->gadget.ep_list); INIT_LIST_HEAD(&udc->iso); udc->gadget.speed = USB_SPEED_UNKNOWN; udc->gadget.name = driver_name; device_initialize(&udc->gadget.dev); dev_set_name(&udc->gadget.dev, "gadget"); udc->gadget.dev.release = omap_udc_release; udc->gadget.dev.parent = &odev->dev; if (use_dma) udc->gadget.dev.dma_mask = odev->dev.dma_mask; udc->transceiver = xceiv; /* ep0 is special; put it right after the SETUP buffer */ buf = omap_ep_setup("ep0", 0, USB_ENDPOINT_XFER_CONTROL, 8 /* after SETUP */, 64 /* maxpacket */, 0); list_del_init(&udc->ep[0].ep.ep_list); /* initially disable all non-ep0 endpoints */ for (tmp = 1; tmp < 15; tmp++) { omap_writew(0, UDC_EP_RX(tmp)); omap_writew(0, UDC_EP_TX(tmp)); } #define OMAP_BULK_EP(name,addr) \ buf = omap_ep_setup(name "-bulk", addr, \ USB_ENDPOINT_XFER_BULK, buf, 64, 1); #define OMAP_INT_EP(name,addr, maxp) \ buf = omap_ep_setup(name "-int", addr, \ USB_ENDPOINT_XFER_INT, buf, maxp, 0); #define OMAP_ISO_EP(name,addr, maxp) \ buf = omap_ep_setup(name "-iso", addr, \ USB_ENDPOINT_XFER_ISOC, buf, maxp, 1); switch (fifo_mode) { case 0: OMAP_BULK_EP("ep1in", USB_DIR_IN | 1); OMAP_BULK_EP("ep2out", USB_DIR_OUT | 2); OMAP_INT_EP("ep3in", USB_DIR_IN | 3, 16); break; case 1: OMAP_BULK_EP("ep1in", USB_DIR_IN | 1); OMAP_BULK_EP("ep2out", USB_DIR_OUT | 2); OMAP_INT_EP("ep9in", USB_DIR_IN | 9, 16); OMAP_BULK_EP("ep3in", USB_DIR_IN | 3); OMAP_BULK_EP("ep4out", USB_DIR_OUT | 4); OMAP_INT_EP("ep10in", USB_DIR_IN | 10, 16); OMAP_BULK_EP("ep5in", USB_DIR_IN | 5); OMAP_BULK_EP("ep5out", USB_DIR_OUT | 5); OMAP_INT_EP("ep11in", USB_DIR_IN | 11, 16); OMAP_BULK_EP("ep6in", USB_DIR_IN | 6); OMAP_BULK_EP("ep6out", USB_DIR_OUT | 6); OMAP_INT_EP("ep12in", USB_DIR_IN | 12, 16); OMAP_BULK_EP("ep7in", USB_DIR_IN | 7); OMAP_BULK_EP("ep7out", USB_DIR_OUT | 7); OMAP_INT_EP("ep13in", USB_DIR_IN | 13, 16); OMAP_INT_EP("ep13out", USB_DIR_OUT | 13, 16); OMAP_BULK_EP("ep8in", USB_DIR_IN | 8); OMAP_BULK_EP("ep8out", USB_DIR_OUT | 8); OMAP_INT_EP("ep14in", USB_DIR_IN | 14, 16); OMAP_INT_EP("ep14out", USB_DIR_OUT | 14, 16); OMAP_BULK_EP("ep15in", USB_DIR_IN | 15); OMAP_BULK_EP("ep15out", USB_DIR_OUT | 15); break; #ifdef USE_ISO case 2: /* mixed iso/bulk */ OMAP_ISO_EP("ep1in", USB_DIR_IN | 1, 256); OMAP_ISO_EP("ep2out", USB_DIR_OUT | 2, 256); OMAP_ISO_EP("ep3in", USB_DIR_IN | 3, 128); OMAP_ISO_EP("ep4out", USB_DIR_OUT | 4, 128); OMAP_INT_EP("ep5in", USB_DIR_IN | 5, 16); OMAP_BULK_EP("ep6in", USB_DIR_IN | 6); OMAP_BULK_EP("ep7out", USB_DIR_OUT | 7); OMAP_INT_EP("ep8in", USB_DIR_IN | 8, 16); break; case 3: /* mixed bulk/iso */ OMAP_BULK_EP("ep1in", USB_DIR_IN | 1); OMAP_BULK_EP("ep2out", USB_DIR_OUT | 2); OMAP_INT_EP("ep3in", USB_DIR_IN | 3, 16); OMAP_BULK_EP("ep4in", USB_DIR_IN | 4); OMAP_BULK_EP("ep5out", USB_DIR_OUT | 5); OMAP_INT_EP("ep6in", USB_DIR_IN | 6, 16); OMAP_ISO_EP("ep7in", USB_DIR_IN | 7, 256); OMAP_ISO_EP("ep8out", USB_DIR_OUT | 8, 256); OMAP_INT_EP("ep9in", USB_DIR_IN | 9, 16); break; #endif /* add more modes as needed */ default: ERR("unsupported fifo_mode #%d\n", fifo_mode); return -ENODEV; } omap_writew(UDC_CFG_LOCK|UDC_SELF_PWR, UDC_SYSCON1); INFO("fifo mode %d, %d bytes not used\n", fifo_mode, 2048 - buf); return 0; } static int __init omap_udc_probe(struct platform_device *pdev) { int status = -ENODEV; int hmc; struct otg_transceiver *xceiv = NULL; const char *type = NULL; struct omap_usb_config *config = pdev->dev.platform_data; struct clk *dc_clk; struct clk *hhc_clk; /* NOTE: "knows" the order of the resources! */ if (!request_mem_region(pdev->resource[0].start, pdev->resource[0].end - pdev->resource[0].start + 1, driver_name)) { DBG("request_mem_region failed\n"); return -EBUSY; } if (cpu_is_omap16xx()) { dc_clk = clk_get(&pdev->dev, "usb_dc_ck"); hhc_clk = clk_get(&pdev->dev, "usb_hhc_ck"); BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk)); /* can't use omap_udc_enable_clock yet */ clk_enable(dc_clk); clk_enable(hhc_clk); udelay(100); } if (cpu_is_omap24xx()) { dc_clk = clk_get(&pdev->dev, "usb_fck"); hhc_clk = clk_get(&pdev->dev, "usb_l4_ick"); BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk)); /* can't use omap_udc_enable_clock yet */ clk_enable(dc_clk); clk_enable(hhc_clk); udelay(100); } if (cpu_is_omap7xx()) { dc_clk = clk_get(&pdev->dev, "usb_dc_ck"); hhc_clk = clk_get(&pdev->dev, "l3_ocpi_ck"); BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk)); /* can't use omap_udc_enable_clock yet */ clk_enable(dc_clk); clk_enable(hhc_clk); udelay(100); } INFO("OMAP UDC rev %d.%d%s\n", omap_readw(UDC_REV) >> 4, omap_readw(UDC_REV) & 0xf, config->otg ? ", Mini-AB" : ""); /* use the mode given to us by board init code */ if (cpu_is_omap15xx()) { hmc = HMC_1510; type = "(unknown)"; if (machine_without_vbus_sense()) { /* just set up software VBUS detect, and then * later rig it so we always report VBUS. * FIXME without really sensing VBUS, we can't * know when to turn PULLUP_EN on/off; and that * means we always "need" the 48MHz clock. */ u32 tmp = omap_readl(FUNC_MUX_CTRL_0); tmp &= ~VBUS_CTRL_1510; omap_writel(tmp, FUNC_MUX_CTRL_0); tmp |= VBUS_MODE_1510; tmp &= ~VBUS_CTRL_1510; omap_writel(tmp, FUNC_MUX_CTRL_0); } } else { /* The transceiver may package some GPIO logic or handle * loopback and/or transceiverless setup; if we find one, * use it. Except for OTG, we don't _need_ to talk to one; * but not having one probably means no VBUS detection. */ xceiv = otg_get_transceiver(); if (xceiv) type = xceiv->label; else if (config->otg) { DBG("OTG requires external transceiver!\n"); goto cleanup0; } hmc = HMC_1610; if (cpu_is_omap24xx()) { /* this could be transceiverless in one of the * "we don't need to know" modes. */ type = "external"; goto known; } switch (hmc) { case 0: /* POWERUP DEFAULT == 0 */ case 4: case 12: case 20: if (!cpu_is_omap1710()) { type = "integrated"; break; } /* FALL THROUGH */ case 3: case 11: case 16: case 19: case 25: if (!xceiv) { DBG("external transceiver not registered!\n"); type = "unknown"; } break; case 21: /* internal loopback */ type = "loopback"; break; case 14: /* transceiverless */ if (cpu_is_omap1710()) goto bad_on_1710; /* FALL THROUGH */ case 13: case 15: type = "no"; break; default: bad_on_1710: ERR("unrecognized UDC HMC mode %d\n", hmc); goto cleanup0; } } known: INFO("hmc mode %d, %s transceiver\n", hmc, type); /* a "gadget" abstracts/virtualizes the controller */ status = omap_udc_setup(pdev, xceiv); if (status) { goto cleanup0; } xceiv = NULL; // "udc" is now valid pullup_disable(udc); #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) udc->gadget.is_otg = (config->otg != 0); #endif /* starting with omap1710 es2.0, clear toggle is a separate bit */ if (omap_readw(UDC_REV) >= 0x61) udc->clr_halt = UDC_RESET_EP | UDC_CLRDATA_TOGGLE; else udc->clr_halt = UDC_RESET_EP; /* USB general purpose IRQ: ep0, state changes, dma, etc */ status = request_irq(pdev->resource[1].start, omap_udc_irq, IRQF_SAMPLE_RANDOM, driver_name, udc); if (status != 0) { ERR("can't get irq %d, err %d\n", (int) pdev->resource[1].start, status); goto cleanup1; } /* USB "non-iso" IRQ (PIO for all but ep0) */ status = request_irq(pdev->resource[2].start, omap_udc_pio_irq, IRQF_SAMPLE_RANDOM, "omap_udc pio", udc); if (status != 0) { ERR("can't get irq %d, err %d\n", (int) pdev->resource[2].start, status); goto cleanup2; } #ifdef USE_ISO status = request_irq(pdev->resource[3].start, omap_udc_iso_irq, IRQF_DISABLED, "omap_udc iso", udc); if (status != 0) { ERR("can't get irq %d, err %d\n", (int) pdev->resource[3].start, status); goto cleanup3; } #endif if (cpu_is_omap16xx() || cpu_is_omap7xx()) { udc->dc_clk = dc_clk; udc->hhc_clk = hhc_clk; clk_disable(hhc_clk); clk_disable(dc_clk); } if (cpu_is_omap24xx()) { udc->dc_clk = dc_clk; udc->hhc_clk = hhc_clk; /* FIXME OMAP2 don't release hhc & dc clock */ #if 0 clk_disable(hhc_clk); clk_disable(dc_clk); #endif } create_proc_file(); status = device_add(&udc->gadget.dev); if (!status) return status; /* If fail, fall through */ #ifdef USE_ISO cleanup3: free_irq(pdev->resource[2].start, udc); #endif cleanup2: free_irq(pdev->resource[1].start, udc); cleanup1: kfree (udc); udc = NULL; cleanup0: if (xceiv) otg_put_transceiver(xceiv); if (cpu_is_omap16xx() || cpu_is_omap24xx() || cpu_is_omap7xx()) { clk_disable(hhc_clk); clk_disable(dc_clk); clk_put(hhc_clk); clk_put(dc_clk); } release_mem_region(pdev->resource[0].start, pdev->resource[0].end - pdev->resource[0].start + 1); return status; } static int __exit omap_udc_remove(struct platform_device *pdev) { DECLARE_COMPLETION_ONSTACK(done); if (!udc) return -ENODEV; if (udc->driver) return -EBUSY; udc->done = &done; pullup_disable(udc); if (udc->transceiver) { otg_put_transceiver(udc->transceiver); udc->transceiver = NULL; } omap_writew(0, UDC_SYSCON1); remove_proc_file(); #ifdef USE_ISO free_irq(pdev->resource[3].start, udc); #endif free_irq(pdev->resource[2].start, udc); free_irq(pdev->resource[1].start, udc); if (udc->dc_clk) { if (udc->clk_requested) omap_udc_enable_clock(0); clk_put(udc->hhc_clk); clk_put(udc->dc_clk); } release_mem_region(pdev->resource[0].start, pdev->resource[0].end - pdev->resource[0].start + 1); device_unregister(&udc->gadget.dev); wait_for_completion(&done); return 0; } /* suspend/resume/wakeup from sysfs (echo > power/state) or when the * system is forced into deep sleep * * REVISIT we should probably reject suspend requests when there's a host * session active, rather than disconnecting, at least on boards that can * report VBUS irqs (UDC_DEVSTAT.UDC_ATT). And in any case, we need to * make host resumes and VBUS detection trigger OMAP wakeup events; that * may involve talking to an external transceiver (e.g. isp1301). */ static int omap_udc_suspend(struct platform_device *dev, pm_message_t message) { u32 devstat; devstat = omap_readw(UDC_DEVSTAT); /* we're requesting 48 MHz clock if the pullup is enabled * (== we're attached to the host) and we're not suspended, * which would prevent entry to deep sleep... */ if ((devstat & UDC_ATT) != 0 && (devstat & UDC_SUS) == 0) { WARNING("session active; suspend requires disconnect\n"); omap_pullup(&udc->gadget, 0); } return 0; } static int omap_udc_resume(struct platform_device *dev) { DBG("resume + wakeup/SRP\n"); omap_pullup(&udc->gadget, 1); /* maybe the host would enumerate us if we nudged it */ msleep(100); return omap_wakeup(&udc->gadget); } /*-------------------------------------------------------------------------*/ static struct platform_driver udc_driver = { .remove = __exit_p(omap_udc_remove), .suspend = omap_udc_suspend, .resume = omap_udc_resume, .driver = { .owner = THIS_MODULE, .name = (char *) driver_name, }, }; static int __init udc_init(void) { /* Disable DMA for omap7xx -- it doesn't work right. */ if (cpu_is_omap7xx()) use_dma = 0; INFO("%s, version: " DRIVER_VERSION #ifdef USE_ISO " (iso)" #endif "%s\n", driver_desc, use_dma ? " (dma)" : ""); return platform_driver_probe(&udc_driver, omap_udc_probe); } module_init(udc_init); static void __exit udc_exit(void) { platform_driver_unregister(&udc_driver); } module_exit(udc_exit); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:omap_udc");
gpl-2.0
kasperhettinga/p4wifi_stock
drivers/input/touchscreen/lpc32xx_ts.c
3117
10535
/* * LPC32xx built-in touchscreen driver * * Copyright (C) 2010 NXP Semiconductors * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/slab.h> /* * Touchscreen controller register offsets */ #define LPC32XX_TSC_STAT 0x00 #define LPC32XX_TSC_SEL 0x04 #define LPC32XX_TSC_CON 0x08 #define LPC32XX_TSC_FIFO 0x0C #define LPC32XX_TSC_DTR 0x10 #define LPC32XX_TSC_RTR 0x14 #define LPC32XX_TSC_UTR 0x18 #define LPC32XX_TSC_TTR 0x1C #define LPC32XX_TSC_DXP 0x20 #define LPC32XX_TSC_MIN_X 0x24 #define LPC32XX_TSC_MAX_X 0x28 #define LPC32XX_TSC_MIN_Y 0x2C #define LPC32XX_TSC_MAX_Y 0x30 #define LPC32XX_TSC_AUX_UTR 0x34 #define LPC32XX_TSC_AUX_MIN 0x38 #define LPC32XX_TSC_AUX_MAX 0x3C #define LPC32XX_TSC_STAT_FIFO_OVRRN (1 << 8) #define LPC32XX_TSC_STAT_FIFO_EMPTY (1 << 7) #define LPC32XX_TSC_SEL_DEFVAL 0x0284 #define LPC32XX_TSC_ADCCON_IRQ_TO_FIFO_4 (0x1 << 11) #define LPC32XX_TSC_ADCCON_X_SAMPLE_SIZE(s) ((10 - (s)) << 7) #define LPC32XX_TSC_ADCCON_Y_SAMPLE_SIZE(s) ((10 - (s)) << 4) #define LPC32XX_TSC_ADCCON_POWER_UP (1 << 2) #define LPC32XX_TSC_ADCCON_AUTO_EN (1 << 0) #define LPC32XX_TSC_FIFO_TS_P_LEVEL (1 << 31) #define LPC32XX_TSC_FIFO_NORMALIZE_X_VAL(x) (((x) & 0x03FF0000) >> 16) #define LPC32XX_TSC_FIFO_NORMALIZE_Y_VAL(y) ((y) & 0x000003FF) #define LPC32XX_TSC_ADCDAT_VALUE_MASK 0x000003FF #define LPC32XX_TSC_MIN_XY_VAL 0x0 #define LPC32XX_TSC_MAX_XY_VAL 0x3FF #define MOD_NAME "ts-lpc32xx" #define tsc_readl(dev, reg) \ __raw_readl((dev)->tsc_base + (reg)) #define tsc_writel(dev, reg, val) \ __raw_writel((val), (dev)->tsc_base + (reg)) struct lpc32xx_tsc { struct input_dev *dev; void __iomem *tsc_base; int irq; struct clk *clk; }; static void lpc32xx_fifo_clear(struct lpc32xx_tsc *tsc) { while (!(tsc_readl(tsc, LPC32XX_TSC_STAT) & LPC32XX_TSC_STAT_FIFO_EMPTY)) tsc_readl(tsc, LPC32XX_TSC_FIFO); } static irqreturn_t lpc32xx_ts_interrupt(int irq, void *dev_id) { u32 tmp, rv[4], xs[4], ys[4]; int idx; struct lpc32xx_tsc *tsc = dev_id; struct input_dev *input = tsc->dev; tmp = tsc_readl(tsc, LPC32XX_TSC_STAT); if (tmp & LPC32XX_TSC_STAT_FIFO_OVRRN) { /* FIFO overflow - throw away samples */ lpc32xx_fifo_clear(tsc); return IRQ_HANDLED; } /* * Gather and normalize 4 samples. Pen-up events may have less * than 4 samples, but its ok to pop 4 and let the last sample * pen status check drop the samples. */ idx = 0; while (idx < 4 && !(tsc_readl(tsc, LPC32XX_TSC_STAT) & LPC32XX_TSC_STAT_FIFO_EMPTY)) { tmp = tsc_readl(tsc, LPC32XX_TSC_FIFO); xs[idx] = LPC32XX_TSC_ADCDAT_VALUE_MASK - LPC32XX_TSC_FIFO_NORMALIZE_X_VAL(tmp); ys[idx] = LPC32XX_TSC_ADCDAT_VALUE_MASK - LPC32XX_TSC_FIFO_NORMALIZE_Y_VAL(tmp); rv[idx] = tmp; idx++; } /* Data is only valid if pen is still down in last sample */ if (!(rv[3] & LPC32XX_TSC_FIFO_TS_P_LEVEL) && idx == 4) { /* Use average of 2nd and 3rd sample for position */ input_report_abs(input, ABS_X, (xs[1] + xs[2]) / 2); input_report_abs(input, ABS_Y, (ys[1] + ys[2]) / 2); input_report_key(input, BTN_TOUCH, 1); } else { input_report_key(input, BTN_TOUCH, 0); } input_sync(input); return IRQ_HANDLED; } static void lpc32xx_stop_tsc(struct lpc32xx_tsc *tsc) { /* Disable auto mode */ tsc_writel(tsc, LPC32XX_TSC_CON, tsc_readl(tsc, LPC32XX_TSC_CON) & ~LPC32XX_TSC_ADCCON_AUTO_EN); clk_disable(tsc->clk); } static void lpc32xx_setup_tsc(struct lpc32xx_tsc *tsc) { u32 tmp; clk_enable(tsc->clk); tmp = tsc_readl(tsc, LPC32XX_TSC_CON) & ~LPC32XX_TSC_ADCCON_POWER_UP; /* Set the TSC FIFO depth to 4 samples @ 10-bits per sample (max) */ tmp = LPC32XX_TSC_ADCCON_IRQ_TO_FIFO_4 | LPC32XX_TSC_ADCCON_X_SAMPLE_SIZE(10) | LPC32XX_TSC_ADCCON_Y_SAMPLE_SIZE(10); tsc_writel(tsc, LPC32XX_TSC_CON, tmp); /* These values are all preset */ tsc_writel(tsc, LPC32XX_TSC_SEL, LPC32XX_TSC_SEL_DEFVAL); tsc_writel(tsc, LPC32XX_TSC_MIN_X, LPC32XX_TSC_MIN_XY_VAL); tsc_writel(tsc, LPC32XX_TSC_MAX_X, LPC32XX_TSC_MAX_XY_VAL); tsc_writel(tsc, LPC32XX_TSC_MIN_Y, LPC32XX_TSC_MIN_XY_VAL); tsc_writel(tsc, LPC32XX_TSC_MAX_Y, LPC32XX_TSC_MAX_XY_VAL); /* Aux support is not used */ tsc_writel(tsc, LPC32XX_TSC_AUX_UTR, 0); tsc_writel(tsc, LPC32XX_TSC_AUX_MIN, 0); tsc_writel(tsc, LPC32XX_TSC_AUX_MAX, 0); /* * Set sample rate to about 240Hz per X/Y pair. A single measurement * consists of 4 pairs which gives about a 60Hz sample rate based on * a stable 32768Hz clock source. Values are in clocks. * Rate is (32768 / (RTR + XCONV + RTR + YCONV + DXP + TTR + UTR) / 4 */ tsc_writel(tsc, LPC32XX_TSC_RTR, 0x2); tsc_writel(tsc, LPC32XX_TSC_DTR, 0x2); tsc_writel(tsc, LPC32XX_TSC_TTR, 0x10); tsc_writel(tsc, LPC32XX_TSC_DXP, 0x4); tsc_writel(tsc, LPC32XX_TSC_UTR, 88); lpc32xx_fifo_clear(tsc); /* Enable automatic ts event capture */ tsc_writel(tsc, LPC32XX_TSC_CON, tmp | LPC32XX_TSC_ADCCON_AUTO_EN); } static int lpc32xx_ts_open(struct input_dev *dev) { struct lpc32xx_tsc *tsc = input_get_drvdata(dev); lpc32xx_setup_tsc(tsc); return 0; } static void lpc32xx_ts_close(struct input_dev *dev) { struct lpc32xx_tsc *tsc = input_get_drvdata(dev); lpc32xx_stop_tsc(tsc); } static int __devinit lpc32xx_ts_probe(struct platform_device *pdev) { struct lpc32xx_tsc *tsc; struct input_dev *input; struct resource *res; resource_size_t size; int irq; int error; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "Can't get memory resource\n"); return -ENOENT; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "Can't get interrupt resource\n"); return irq; } tsc = kzalloc(sizeof(*tsc), GFP_KERNEL); input = input_allocate_device(); if (!tsc || !input) { dev_err(&pdev->dev, "failed allocating memory\n"); error = -ENOMEM; goto err_free_mem; } tsc->dev = input; tsc->irq = irq; size = resource_size(res); if (!request_mem_region(res->start, size, pdev->name)) { dev_err(&pdev->dev, "TSC registers are not free\n"); error = -EBUSY; goto err_free_mem; } tsc->tsc_base = ioremap(res->start, size); if (!tsc->tsc_base) { dev_err(&pdev->dev, "Can't map memory\n"); error = -ENOMEM; goto err_release_mem; } tsc->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(tsc->clk)) { dev_err(&pdev->dev, "failed getting clock\n"); error = PTR_ERR(tsc->clk); goto err_unmap; } input->name = MOD_NAME; input->phys = "lpc32xx/input0"; input->id.bustype = BUS_HOST; input->id.vendor = 0x0001; input->id.product = 0x0002; input->id.version = 0x0100; input->dev.parent = &pdev->dev; input->open = lpc32xx_ts_open; input->close = lpc32xx_ts_close; input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(input, ABS_X, LPC32XX_TSC_MIN_XY_VAL, LPC32XX_TSC_MAX_XY_VAL, 0, 0); input_set_abs_params(input, ABS_Y, LPC32XX_TSC_MIN_XY_VAL, LPC32XX_TSC_MAX_XY_VAL, 0, 0); input_set_drvdata(input, tsc); error = request_irq(tsc->irq, lpc32xx_ts_interrupt, IRQF_DISABLED, pdev->name, tsc); if (error) { dev_err(&pdev->dev, "failed requesting interrupt\n"); goto err_put_clock; } error = input_register_device(input); if (error) { dev_err(&pdev->dev, "failed registering input device\n"); goto err_free_irq; } platform_set_drvdata(pdev, tsc); device_init_wakeup(&pdev->dev, 1); return 0; err_free_irq: free_irq(tsc->irq, tsc); err_put_clock: clk_put(tsc->clk); err_unmap: iounmap(tsc->tsc_base); err_release_mem: release_mem_region(res->start, size); err_free_mem: input_free_device(input); kfree(tsc); return error; } static int __devexit lpc32xx_ts_remove(struct platform_device *pdev) { struct lpc32xx_tsc *tsc = platform_get_drvdata(pdev); struct resource *res; device_init_wakeup(&pdev->dev, 0); free_irq(tsc->irq, tsc); input_unregister_device(tsc->dev); clk_put(tsc->clk); iounmap(tsc->tsc_base); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); kfree(tsc); return 0; } #ifdef CONFIG_PM static int lpc32xx_ts_suspend(struct device *dev) { struct lpc32xx_tsc *tsc = dev_get_drvdata(dev); struct input_dev *input = tsc->dev; /* * Suspend and resume can be called when the device hasn't been * enabled. If there are no users that have the device open, then * avoid calling the TSC stop and start functions as the TSC * isn't yet clocked. */ mutex_lock(&input->mutex); if (input->users) { if (device_may_wakeup(dev)) enable_irq_wake(tsc->irq); else lpc32xx_stop_tsc(tsc); } mutex_unlock(&input->mutex); return 0; } static int lpc32xx_ts_resume(struct device *dev) { struct lpc32xx_tsc *tsc = dev_get_drvdata(dev); struct input_dev *input = tsc->dev; mutex_lock(&input->mutex); if (input->users) { if (device_may_wakeup(dev)) disable_irq_wake(tsc->irq); else lpc32xx_setup_tsc(tsc); } mutex_unlock(&input->mutex); return 0; } static const struct dev_pm_ops lpc32xx_ts_pm_ops = { .suspend = lpc32xx_ts_suspend, .resume = lpc32xx_ts_resume, }; #define LPC32XX_TS_PM_OPS (&lpc32xx_ts_pm_ops) #else #define LPC32XX_TS_PM_OPS NULL #endif static struct platform_driver lpc32xx_ts_driver = { .probe = lpc32xx_ts_probe, .remove = __devexit_p(lpc32xx_ts_remove), .driver = { .name = MOD_NAME, .owner = THIS_MODULE, .pm = LPC32XX_TS_PM_OPS, }, }; static int __init lpc32xx_ts_init(void) { return platform_driver_register(&lpc32xx_ts_driver); } module_init(lpc32xx_ts_init); static void __exit lpc32xx_ts_exit(void) { platform_driver_unregister(&lpc32xx_ts_driver); } module_exit(lpc32xx_ts_exit); MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com"); MODULE_DESCRIPTION("LPC32XX TSC Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:lpc32xx_ts");
gpl-2.0
lyapota/m8_sense_marshmallow
arch/arm/mach-s5p64x0/setup-spi.c
4909
1344
/* linux/arch/arm/mach-s5p64x0/setup-spi.c * * Copyright (C) 2011 Samsung Electronics Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gpio.h> #include <linux/platform_device.h> #include <linux/io.h> #include <plat/gpio-cfg.h> #include <plat/cpu.h> #include <plat/s3c64xx-spi.h> #ifdef CONFIG_S3C64XX_DEV_SPI0 struct s3c64xx_spi_info s3c64xx_spi0_pdata __initdata = { .fifo_lvl_mask = 0x1ff, .rx_lvl_offset = 15, .tx_st_done = 25, }; int s3c64xx_spi0_cfg_gpio(struct platform_device *dev) { if (soc_is_s5p6450()) s3c_gpio_cfgall_range(S5P6450_GPC(0), 3, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); else s3c_gpio_cfgall_range(S5P6440_GPC(0), 3, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); return 0; } #endif #ifdef CONFIG_S3C64XX_DEV_SPI1 struct s3c64xx_spi_info s3c64xx_spi1_pdata __initdata = { .fifo_lvl_mask = 0x7f, .rx_lvl_offset = 15, .tx_st_done = 25, }; int s3c64xx_spi1_cfg_gpio(struct platform_device *dev) { if (soc_is_s5p6450()) s3c_gpio_cfgall_range(S5P6450_GPC(4), 3, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); else s3c_gpio_cfgall_range(S5P6440_GPC(4), 3, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); return 0; } #endif
gpl-2.0
brymaster5000/m7-gpe-444
arch/arm/mach-pxa/leds-idp.c
4909
2143
/* * linux/arch/arm/mach-pxa/leds-idp.c * * Copyright (C) 2000 John Dorsey <john+@cs.cmu.edu> * * Copyright (c) 2001 Jeff Sutherland <jeffs@accelent.com> * * Original (leds-footbridge.c) by Russell King * * Macros for actual LED manipulation should be in machine specific * files in this 'mach' directory. */ #include <linux/init.h> #include <mach/hardware.h> #include <asm/leds.h> #include <mach/pxa25x.h> #include <mach/idp.h> #include "leds.h" #define LED_STATE_ENABLED 1 #define LED_STATE_CLAIMED 2 static unsigned int led_state; static unsigned int hw_led_state; void idp_leds_event(led_event_t evt) { unsigned long flags; local_irq_save(flags); switch (evt) { case led_start: hw_led_state = IDP_HB_LED | IDP_BUSY_LED; led_state = LED_STATE_ENABLED; break; case led_stop: led_state &= ~LED_STATE_ENABLED; break; case led_claim: led_state |= LED_STATE_CLAIMED; hw_led_state = IDP_HB_LED | IDP_BUSY_LED; break; case led_release: led_state &= ~LED_STATE_CLAIMED; hw_led_state = IDP_HB_LED | IDP_BUSY_LED; break; #ifdef CONFIG_LEDS_TIMER case led_timer: if (!(led_state & LED_STATE_CLAIMED)) hw_led_state ^= IDP_HB_LED; break; #endif #ifdef CONFIG_LEDS_CPU case led_idle_start: if (!(led_state & LED_STATE_CLAIMED)) hw_led_state &= ~IDP_BUSY_LED; break; case led_idle_end: if (!(led_state & LED_STATE_CLAIMED)) hw_led_state |= IDP_BUSY_LED; break; #endif case led_halted: break; case led_green_on: if (led_state & LED_STATE_CLAIMED) hw_led_state |= IDP_HB_LED; break; case led_green_off: if (led_state & LED_STATE_CLAIMED) hw_led_state &= ~IDP_HB_LED; break; case led_amber_on: break; case led_amber_off: break; case led_red_on: if (led_state & LED_STATE_CLAIMED) hw_led_state |= IDP_BUSY_LED; break; case led_red_off: if (led_state & LED_STATE_CLAIMED) hw_led_state &= ~IDP_BUSY_LED; break; default: break; } if (led_state & LED_STATE_ENABLED) IDP_CPLD_LED_CONTROL = ( (IDP_CPLD_LED_CONTROL | IDP_LEDS_MASK) & ~hw_led_state); else IDP_CPLD_LED_CONTROL |= IDP_LEDS_MASK; local_irq_restore(flags); }
gpl-2.0