repo_name
string
path
string
copies
string
size
string
content
string
license
string
allan888/Linux_kernel_asynchronous
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
552
5733
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "priv.h" #include <core/client.h> #include <core/device.h> #include <nvif/class.h> #include <nvif/unpack.h> struct nv04_disp_priv { struct nvkm_disp base; }; static int nv04_disp_scanoutpos(struct nvkm_object *object, struct nv04_disp_priv *priv, void *data, u32 size, int head) { const u32 hoff = head * 0x2000; union { struct nv04_disp_scanoutpos_v0 v0; } *args = data; u32 line; int ret; nv_ioctl(object, "disp scanoutpos size %d\n", size); if (nvif_unpack(args->v0, 0, 0, false)) { nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version); args->v0.vblanks = nv_rd32(priv, 0x680800 + hoff) & 0xffff; args->v0.vtotal = nv_rd32(priv, 0x680804 + hoff) & 0xffff; args->v0.vblanke = args->v0.vtotal - 1; args->v0.hblanks = nv_rd32(priv, 0x680820 + hoff) & 0xffff; args->v0.htotal = nv_rd32(priv, 0x680824 + hoff) & 0xffff; args->v0.hblanke = args->v0.htotal - 1; /* * If output is vga instead of digital then vtotal/htotal is * invalid so we have to give up and trigger the timestamping * fallback in the drm core. */ if (!args->v0.vtotal || !args->v0.htotal) return -ENOTSUPP; args->v0.time[0] = ktime_to_ns(ktime_get()); line = nv_rd32(priv, 0x600868 + hoff); args->v0.time[1] = ktime_to_ns(ktime_get()); args->v0.hline = (line & 0xffff0000) >> 16; args->v0.vline = (line & 0x0000ffff); } else return ret; return 0; } static int nv04_disp_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) { union { struct nv04_disp_mthd_v0 v0; } *args = data; struct nv04_disp_priv *priv = (void *)object->engine; int head, ret; nv_ioctl(object, "disp mthd size %d\n", size); if (nvif_unpack(args->v0, 0, 0, true)) { nv_ioctl(object, "disp mthd vers %d mthd %02x head %d\n", args->v0.version, args->v0.method, args->v0.head); mthd = args->v0.method; head = args->v0.head; } else return ret; if (head < 0 || head >= 2) return -ENXIO; switch (mthd) { case NV04_DISP_SCANOUTPOS: return nv04_disp_scanoutpos(object, priv, data, size, head); default: break; } return -EINVAL; } static struct nvkm_ofuncs nv04_disp_ofuncs = { .ctor = _nvkm_object_ctor, .dtor = nvkm_object_destroy, .init = nvkm_object_init, .fini = nvkm_object_fini, .mthd = nv04_disp_mthd, .ntfy = nvkm_disp_ntfy, }; static struct nvkm_oclass nv04_disp_sclass[] = { { NV04_DISP, &nv04_disp_ofuncs }, {}, }; /******************************************************************************* * Display engine implementation ******************************************************************************/ static void nv04_disp_vblank_init(struct nvkm_event *event, int type, int head) { struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank); nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000001); } static void nv04_disp_vblank_fini(struct nvkm_event *event, int type, int head) { struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank); nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000000); } static const struct nvkm_event_func nv04_disp_vblank_func = { .ctor = nvkm_disp_vblank_ctor, .init = nv04_disp_vblank_init, .fini = nv04_disp_vblank_fini, }; static void nv04_disp_intr(struct nvkm_subdev *subdev) { struct nv04_disp_priv *priv = (void *)subdev; u32 crtc0 = nv_rd32(priv, 0x600100); u32 crtc1 = nv_rd32(priv, 0x602100); u32 pvideo; if (crtc0 & 0x00000001) { nvkm_disp_vblank(&priv->base, 0); nv_wr32(priv, 0x600100, 0x00000001); } if (crtc1 & 0x00000001) { nvkm_disp_vblank(&priv->base, 1); nv_wr32(priv, 0x602100, 0x00000001); } if (nv_device(priv)->chipset >= 0x10 && nv_device(priv)->chipset <= 0x40) { pvideo = nv_rd32(priv, 0x8100); if (pvideo & ~0x11) nv_info(priv, "PVIDEO intr: %08x\n", pvideo); nv_wr32(priv, 0x8100, pvideo); } } static int nv04_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_object **pobject) { struct nv04_disp_priv *priv; int ret; ret = nvkm_disp_create(parent, engine, oclass, 2, "DISPLAY", "display", &priv); *pobject = nv_object(priv); if (ret) return ret; nv_engine(priv)->sclass = nv04_disp_sclass; nv_subdev(priv)->intr = nv04_disp_intr; return 0; } struct nvkm_oclass * nv04_disp_oclass = &(struct nvkm_disp_impl) { .base.handle = NV_ENGINE(DISP, 0x04), .base.ofuncs = &(struct nvkm_ofuncs) { .ctor = nv04_disp_ctor, .dtor = _nvkm_disp_dtor, .init = _nvkm_disp_init, .fini = _nvkm_disp_fini, }, .vblank = &nv04_disp_vblank_func, }.base;
gpl-2.0
RonGokhale/lge-kernel-pecan
arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
552
8643
/* * Pentium 4/Xeon CPU on demand clock modulation/speed scaling * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> * (C) 2002 Zwane Mwaikambo <zwane@commfireservices.com> * (C) 2002 Arjan van de Ven <arjanv@redhat.com> * (C) 2002 Tora T. Engstad * All Rights Reserved * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * The author(s) of this software shall not be held liable for damages * of any nature resulting due to the use of this software. This * software is provided AS-IS with no warranties. * * Date Errata Description * 20020525 N44, O17 12.5% or 25% DC causes lockup * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/smp.h> #include <linux/cpufreq.h> #include <linux/slab.h> #include <linux/cpumask.h> #include <linux/timex.h> #include <asm/processor.h> #include <asm/msr.h> #include <asm/timer.h> #include "speedstep-lib.h" #define PFX "p4-clockmod: " #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ "p4-clockmod", msg) /* * Duty Cycle (3bits), note DC_DISABLE is not specified in * intel docs i just use it to mean disable */ enum { DC_RESV, DC_DFLT, DC_25PT, DC_38PT, DC_50PT, DC_64PT, DC_75PT, DC_88PT, DC_DISABLE }; #define DC_ENTRIES 8 static int has_N44_O17_errata[NR_CPUS]; static unsigned int stock_freq; static struct cpufreq_driver p4clockmod_driver; static unsigned int cpufreq_p4_get(unsigned int cpu); static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) { u32 l, h; if (!cpu_online(cpu) || (newstate > DC_DISABLE) || (newstate == DC_RESV)) return -EINVAL; rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h); if (l & 0x01) dprintk("CPU#%d currently thermal throttled\n", cpu); if (has_N44_O17_errata[cpu] && (newstate == DC_25PT || newstate == DC_DFLT)) newstate = DC_38PT; rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); if (newstate == DC_DISABLE) { dprintk("CPU#%d disabling modulation\n", cpu); wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h); } else { dprintk("CPU#%d setting duty cycle to %d%%\n", cpu, ((125 * newstate) / 10)); /* bits 63 - 5 : reserved * bit 4 : enable/disable * bits 3-1 : duty cycle * bit 0 : reserved */ l = (l & ~14); l = l | (1<<4) | ((newstate & 0x7)<<1); wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l, h); } return 0; } static struct cpufreq_frequency_table p4clockmod_table[] = { {DC_RESV, CPUFREQ_ENTRY_INVALID}, {DC_DFLT, 0}, {DC_25PT, 0}, {DC_38PT, 0}, {DC_50PT, 0}, {DC_64PT, 0}, {DC_75PT, 0}, {DC_88PT, 0}, {DC_DISABLE, 0}, {DC_RESV, CPUFREQ_TABLE_END}, }; static int cpufreq_p4_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int newstate = DC_RESV; struct cpufreq_freqs freqs; int i; if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0], target_freq, relation, &newstate)) return -EINVAL; freqs.old = cpufreq_p4_get(policy->cpu); freqs.new = stock_freq * p4clockmod_table[newstate].index / 8; if (freqs.new == freqs.old) return 0; /* notifiers */ for_each_cpu(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } /* run on each logical CPU, * see section 13.15.3 of IA32 Intel Architecture Software * Developer's Manual, Volume 3 */ for_each_cpu(i, policy->cpus) cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); /* notifiers */ for_each_cpu(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } return 0; } static int cpufreq_p4_verify(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, &p4clockmod_table[0]); } static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) { if (c->x86 == 0x06) { if (cpu_has(c, X86_FEATURE_EST)) printk(KERN_WARNING PFX "Warning: EST-capable CPU " "detected. The acpi-cpufreq module offers " "voltage scaling in addition of frequency " "scaling. You should use that instead of " "p4-clockmod, if possible.\n"); switch (c->x86_model) { case 0x0E: /* Core */ case 0x0F: /* Core Duo */ case 0x16: /* Celeron Core */ case 0x1C: /* Atom */ p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE); case 0x0D: /* Pentium M (Dothan) */ p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; /* fall through */ case 0x09: /* Pentium M (Banias) */ return speedstep_get_frequency(SPEEDSTEP_CPU_PM); } } if (c->x86 != 0xF) { if (!cpu_has(c, X86_FEATURE_EST)) printk(KERN_WARNING PFX "Unknown CPU. " "Please send an e-mail to " "<cpufreq@vger.kernel.org>\n"); return 0; } /* on P-4s, the TSC runs with constant frequency independent whether * throttling is active or not. */ p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) { printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. " "The speedstep-ich or acpi cpufreq modules offer " "voltage scaling in addition of frequency scaling. " "You should use either one instead of p4-clockmod, " "if possible.\n"); return speedstep_get_frequency(SPEEDSTEP_CPU_P4M); } return speedstep_get_frequency(SPEEDSTEP_CPU_P4D); } static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) { struct cpuinfo_x86 *c = &cpu_data(policy->cpu); int cpuid = 0; unsigned int i; #ifdef CONFIG_SMP cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); #endif /* Errata workaround */ cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask; switch (cpuid) { case 0x0f07: case 0x0f0a: case 0x0f11: case 0x0f12: has_N44_O17_errata[policy->cpu] = 1; dprintk("has errata -- disabling low frequencies\n"); } if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4D && c->x86_model < 2) { /* switch to maximum frequency and measure result */ cpufreq_p4_setdc(policy->cpu, DC_DISABLE); recalibrate_cpu_khz(); } /* get max frequency */ stock_freq = cpufreq_p4_get_frequency(c); if (!stock_freq) return -EINVAL; /* table init */ for (i = 1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) { if ((i < 2) && (has_N44_O17_errata[policy->cpu])) p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID; else p4clockmod_table[i].frequency = (stock_freq * i)/8; } cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu); /* cpuinfo and default policy values */ /* the transition latency is set to be 1 higher than the maximum * transition latency of the ondemand governor */ policy->cpuinfo.transition_latency = 10000001; policy->cur = stock_freq; return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]); } static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy) { cpufreq_frequency_table_put_attr(policy->cpu); return 0; } static unsigned int cpufreq_p4_get(unsigned int cpu) { u32 l, h; rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); if (l & 0x10) { l = l >> 1; l &= 0x7; } else l = DC_DISABLE; if (l != DC_DISABLE) return stock_freq * l / 8; return stock_freq; } static struct freq_attr *p4clockmod_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; static struct cpufreq_driver p4clockmod_driver = { .verify = cpufreq_p4_verify, .target = cpufreq_p4_target, .init = cpufreq_p4_cpu_init, .exit = cpufreq_p4_cpu_exit, .get = cpufreq_p4_get, .name = "p4-clockmod", .owner = THIS_MODULE, .attr = p4clockmod_attr, }; static int __init cpufreq_p4_init(void) { struct cpuinfo_x86 *c = &cpu_data(0); int ret; /* * THERM_CONTROL is architectural for IA32 now, so * we can rely on the capability checks */ if (c->x86_vendor != X86_VENDOR_INTEL) return -ENODEV; if (!test_cpu_cap(c, X86_FEATURE_ACPI) || !test_cpu_cap(c, X86_FEATURE_ACC)) return -ENODEV; ret = cpufreq_register_driver(&p4clockmod_driver); if (!ret) printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock " "Modulation available\n"); return ret; } static void __exit cpufreq_p4_exit(void) { cpufreq_unregister_driver(&p4clockmod_driver); } MODULE_AUTHOR("Zwane Mwaikambo <zwane@commfireservices.com>"); MODULE_DESCRIPTION("cpufreq driver for Pentium(TM) 4/Xeon(TM)"); MODULE_LICENSE("GPL"); late_initcall(cpufreq_p4_init); module_exit(cpufreq_p4_exit);
gpl-2.0
jamesjjliao/linux
drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
552
2455
/* * Copyright (C) 2010 Francisco Jerez. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include "nv04.h" void nv40_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags, struct nvkm_fb_tile *tile) { u32 tiles = DIV_ROUND_UP(size, 0x80); u32 tags = round_up(tiles / pfb->ram->parts, 0x100); if ( (flags & 2) && !nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) { tile->zcomp = 0x28000000; /* Z24S8_SPLIT_GRAD */ tile->zcomp |= ((tile->tag->offset ) >> 8); tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13; #ifdef __BIG_ENDIAN tile->zcomp |= 0x40000000; #endif } } static int nv40_fb_init(struct nvkm_object *object) { struct nv04_fb_priv *priv = (void *)object; int ret; ret = nvkm_fb_init(&priv->base); if (ret) return ret; nv_mask(priv, 0x10033c, 0x00008000, 0x00000000); return 0; } struct nvkm_oclass * nv40_fb_oclass = &(struct nv04_fb_impl) { .base.base.handle = NV_SUBDEV(FB, 0x40), .base.base.ofuncs = &(struct nvkm_ofuncs) { .ctor = nv04_fb_ctor, .dtor = _nvkm_fb_dtor, .init = nv40_fb_init, .fini = _nvkm_fb_fini, }, .base.memtype = nv04_fb_memtype_valid, .base.ram = &nv40_ram_oclass, .tile.regions = 8, .tile.init = nv30_fb_tile_init, .tile.comp = nv40_fb_tile_comp, .tile.fini = nv20_fb_tile_fini, .tile.prog = nv20_fb_tile_prog, }.base.base;
gpl-2.0
charansingh/raiderx_htcleo
drivers/usb/gadget/serial.c
1320
7747
/* * serial.c -- USB gadget serial driver * * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com) * Copyright (C) 2008 by David Brownell * Copyright (C) 2008 by Nokia Corporation * * This software is distributed under the terms of the GNU General * Public License ("GPL") as published by the Free Software Foundation, * either version 2 of that License or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/utsname.h> #include <linux/device.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include "u_serial.h" #include "gadget_chips.h" /* Defines */ #define GS_VERSION_STR "v2.4" #define GS_VERSION_NUM 0x2400 #define GS_LONG_NAME "Gadget Serial" #define GS_VERSION_NAME GS_LONG_NAME " " GS_VERSION_STR /*-------------------------------------------------------------------------*/ /* * Kbuild is not very cooperative with respect to linking separately * compiled library objects into one module. So for now we won't use * separate compilation ... ensuring init/exit sections work to shrink * the runtime footprint, and giving us at least some parts of what * a "gcc --combine ... part1.c part2.c part3.c ... " build would. */ #include "composite.c" #include "usbstring.c" #include "config.c" #include "epautoconf.c" #include "f_acm.c" #include "f_obex.c" #include "f_serial.c" #include "u_serial.c" /*-------------------------------------------------------------------------*/ /* Thanks to NetChip Technologies for donating this product ID. * * DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!! * Instead: allocate your own, using normal USB-IF procedures. */ #define GS_VENDOR_ID 0x0525 /* NetChip */ #define GS_PRODUCT_ID 0xa4a6 /* Linux-USB Serial Gadget */ #define GS_CDC_PRODUCT_ID 0xa4a7 /* ... as CDC-ACM */ #define GS_CDC_OBEX_PRODUCT_ID 0xa4a9 /* ... as CDC-OBEX */ /* string IDs are assigned dynamically */ #define STRING_MANUFACTURER_IDX 0 #define STRING_PRODUCT_IDX 1 #define STRING_DESCRIPTION_IDX 2 static char manufacturer[50]; static struct usb_string strings_dev[] = { [STRING_MANUFACTURER_IDX].s = manufacturer, [STRING_PRODUCT_IDX].s = GS_VERSION_NAME, [STRING_DESCRIPTION_IDX].s = NULL /* updated; f(use_acm) */, { } /* end of list */ }; static struct usb_gadget_strings stringtab_dev = { .language = 0x0409, /* en-us */ .strings = strings_dev, }; static struct usb_gadget_strings *dev_strings[] = { &stringtab_dev, NULL, }; static struct usb_device_descriptor device_desc = { .bLength = USB_DT_DEVICE_SIZE, .bDescriptorType = USB_DT_DEVICE, .bcdUSB = cpu_to_le16(0x0200), /* .bDeviceClass = f(use_acm) */ .bDeviceSubClass = 0, .bDeviceProtocol = 0, /* .bMaxPacketSize0 = f(hardware) */ .idVendor = cpu_to_le16(GS_VENDOR_ID), /* .idProduct = f(use_acm) */ /* .bcdDevice = f(hardware) */ /* .iManufacturer = DYNAMIC */ /* .iProduct = DYNAMIC */ .bNumConfigurations = 1, }; static struct usb_otg_descriptor otg_descriptor = { .bLength = sizeof otg_descriptor, .bDescriptorType = USB_DT_OTG, /* REVISIT SRP-only hardware is possible, although * it would not be called "OTG" ... */ .bmAttributes = USB_OTG_SRP | USB_OTG_HNP, }; static const struct usb_descriptor_header *otg_desc[] = { (struct usb_descriptor_header *) &otg_descriptor, NULL, }; /*-------------------------------------------------------------------------*/ /* Module */ MODULE_DESCRIPTION(GS_VERSION_NAME); MODULE_AUTHOR("Al Borchers"); MODULE_AUTHOR("David Brownell"); MODULE_LICENSE("GPL"); static int use_acm = true; module_param(use_acm, bool, 0); MODULE_PARM_DESC(use_acm, "Use CDC ACM, default=yes"); static int use_obex = false; module_param(use_obex, bool, 0); MODULE_PARM_DESC(use_obex, "Use CDC OBEX, default=no"); static unsigned n_ports = 1; module_param(n_ports, uint, 0); MODULE_PARM_DESC(n_ports, "number of ports to create, default=1"); /*-------------------------------------------------------------------------*/ static int __init serial_bind_config(struct usb_configuration *c) { unsigned i; int status = 0; for (i = 0; i < n_ports && status == 0; i++) { if (use_acm) status = acm_bind_config(c, i); else if (use_obex) status = obex_bind_config(c, i); else status = gser_bind_config(c, i); } return status; } static struct usb_configuration serial_config_driver = { /* .label = f(use_acm) */ .bind = serial_bind_config, /* .bConfigurationValue = f(use_acm) */ /* .iConfiguration = DYNAMIC */ .bmAttributes = USB_CONFIG_ATT_SELFPOWER, }; static int __init gs_bind(struct usb_composite_dev *cdev) { int gcnum; struct usb_gadget *gadget = cdev->gadget; int status; status = gserial_setup(cdev->gadget, n_ports); if (status < 0) return status; /* Allocate string descriptor numbers ... note that string * contents can be overridden by the composite_dev glue. */ /* device description: manufacturer, product */ snprintf(manufacturer, sizeof manufacturer, "%s %s with %s", init_utsname()->sysname, init_utsname()->release, gadget->name); status = usb_string_id(cdev); if (status < 0) goto fail; strings_dev[STRING_MANUFACTURER_IDX].id = status; device_desc.iManufacturer = status; status = usb_string_id(cdev); if (status < 0) goto fail; strings_dev[STRING_PRODUCT_IDX].id = status; device_desc.iProduct = status; /* config description */ status = usb_string_id(cdev); if (status < 0) goto fail; strings_dev[STRING_DESCRIPTION_IDX].id = status; serial_config_driver.iConfiguration = status; /* set up other descriptors */ gcnum = usb_gadget_controller_number(gadget); if (gcnum >= 0) device_desc.bcdDevice = cpu_to_le16(GS_VERSION_NUM | gcnum); else { /* this is so simple (for now, no altsettings) that it * SHOULD NOT have problems with bulk-capable hardware. * so warn about unrcognized controllers -- don't panic. * * things like configuration and altsetting numbering * can need hardware-specific attention though. */ pr_warning("gs_bind: controller '%s' not recognized\n", gadget->name); device_desc.bcdDevice = cpu_to_le16(GS_VERSION_NUM | 0x0099); } if (gadget_is_otg(cdev->gadget)) { serial_config_driver.descriptors = otg_desc; serial_config_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP; } /* register our configuration */ status = usb_add_config(cdev, &serial_config_driver); if (status < 0) goto fail; INFO(cdev, "%s\n", GS_VERSION_NAME); return 0; fail: gserial_cleanup(); return status; } static struct usb_composite_driver gserial_driver = { .name = "g_serial", .dev = &device_desc, .strings = dev_strings, .bind = gs_bind, }; static int __init init(void) { /* We *could* export two configs; that'd be much cleaner... * but neither of these product IDs was defined that way. */ if (use_acm) { serial_config_driver.label = "CDC ACM config"; serial_config_driver.bConfigurationValue = 2; device_desc.bDeviceClass = USB_CLASS_COMM; device_desc.idProduct = cpu_to_le16(GS_CDC_PRODUCT_ID); } else if (use_obex) { serial_config_driver.label = "CDC OBEX config"; serial_config_driver.bConfigurationValue = 3; device_desc.bDeviceClass = USB_CLASS_COMM; device_desc.idProduct = cpu_to_le16(GS_CDC_OBEX_PRODUCT_ID); } else { serial_config_driver.label = "Generic Serial config"; serial_config_driver.bConfigurationValue = 1; device_desc.bDeviceClass = USB_CLASS_VENDOR_SPEC; device_desc.idProduct = cpu_to_le16(GS_PRODUCT_ID); } strings_dev[STRING_DESCRIPTION_IDX].s = serial_config_driver.label; return usb_composite_register(&gserial_driver); } module_init(init); static void __exit cleanup(void) { usb_composite_unregister(&gserial_driver); gserial_cleanup(); } module_exit(cleanup);
gpl-2.0
mythos234/AndromedaN910F-CM12
drivers/ata/sata_mv.c
1576
124566
/* * sata_mv.c - Marvell SATA support * * Copyright 2008-2009: Marvell Corporation, all rights reserved. * Copyright 2005: EMC Corporation, all rights reserved. * Copyright 2005 Red Hat, Inc. All rights reserved. * * Originally written by Brett Russ. * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>. * * Please ALWAYS copy linux-ide@vger.kernel.org on emails. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* * sata_mv TODO list: * * --> Develop a low-power-consumption strategy, and implement it. * * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds. * * --> [Experiment, Marvell value added] Is it possible to use target * mode to cross-connect two Linux boxes with Marvell cards? If so, * creating LibATA target mode support would be very interesting. * * Target mode, for those without docs, is the ability to directly * connect two SATA ports. */ /* * 80x1-B2 errata PCI#11: * * Users of the 6041/6081 Rev.B2 chips (current is C0) * should be careful to insert those cards only onto PCI-X bus #0, * and only in device slots 0..7, not higher. The chips may not * work correctly otherwise (note: this is a pretty rare condition). */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/dmapool.h> #include <linux/dma-mapping.h> #include <linux/device.h> #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/ata_platform.h> #include <linux/mbus.h> #include <linux/bitops.h> #include <linux/gfp.h> #include <linux/of.h> #include <linux/of_irq.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <linux/libata.h> #define DRV_NAME "sata_mv" #define DRV_VERSION "1.28" /* * module options */ #ifdef CONFIG_PCI static int msi; module_param(msi, int, S_IRUGO); MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)"); #endif static int irq_coalescing_io_count; module_param(irq_coalescing_io_count, int, S_IRUGO); MODULE_PARM_DESC(irq_coalescing_io_count, "IRQ coalescing I/O count threshold (0..255)"); static int irq_coalescing_usecs; module_param(irq_coalescing_usecs, int, S_IRUGO); MODULE_PARM_DESC(irq_coalescing_usecs, "IRQ coalescing time threshold in usecs"); enum { /* BAR's are enumerated in terms of pci_resource_start() terms */ MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */ MV_IO_BAR = 2, /* offset 0x18: IO space */ MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */ MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */ MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */ /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */ COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */ MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */ MAX_COAL_IO_COUNT = 255, /* completed I/O count */ MV_PCI_REG_BASE = 0, /* * Per-chip ("all ports") interrupt coalescing feature. * This is only for GEN_II / GEN_IIE hardware. * * Coalescing defers the interrupt until either the IO_THRESHOLD * (count of completed I/Os) is met, or the TIME_THRESHOLD is met. */ COAL_REG_BASE = 0x18000, IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08), ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */ IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc), IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0), /* * Registers for the (unused here) transaction coalescing feature: */ TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88), TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c), SATAHC0_REG_BASE = 0x20000, FLASH_CTL = 0x1046c, GPIO_PORT_CTL = 0x104f0, RESET_CFG = 0x180d8, MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, MV_MAX_Q_DEPTH = 32, MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1, /* CRQB needs alignment on a 1KB boundary. Size == 1KB * CRPB needs alignment on a 256B boundary. Size == 256B * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B */ MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), MV_MAX_SG_CT = 256, MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */ MV_PORT_HC_SHIFT = 2, MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */ /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */ MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */ /* Host Flags */ MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING, MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI, MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ | ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA, MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN, CRQB_FLAG_READ = (1 << 0), CRQB_TAG_SHIFT = 1, CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */ CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */ CRQB_CMD_ADDR_SHIFT = 8, CRQB_CMD_CS = (0x2 << 11), CRQB_CMD_LAST = (1 << 15), CRPB_FLAG_STATUS_SHIFT = 8, CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */ CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */ EPRD_FLAG_END_OF_TBL = (1 << 31), /* PCI interface registers */ MV_PCI_COMMAND = 0xc00, MV_PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */ MV_PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */ PCI_MAIN_CMD_STS = 0xd30, STOP_PCI_MASTER = (1 << 2), PCI_MASTER_EMPTY = (1 << 3), GLOB_SFT_RST = (1 << 4), MV_PCI_MODE = 0xd00, MV_PCI_MODE_MASK = 0x30, MV_PCI_EXP_ROM_BAR_CTL = 0xd2c, MV_PCI_DISC_TIMER = 0xd04, MV_PCI_MSI_TRIGGER = 0xc38, MV_PCI_SERR_MASK = 0xc28, MV_PCI_XBAR_TMOUT = 0x1d04, MV_PCI_ERR_LOW_ADDRESS = 0x1d40, MV_PCI_ERR_HIGH_ADDRESS = 0x1d44, MV_PCI_ERR_ATTRIBUTE = 0x1d48, MV_PCI_ERR_COMMAND = 0x1d50, PCI_IRQ_CAUSE = 0x1d58, PCI_IRQ_MASK = 0x1d5c, PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */ PCIE_IRQ_CAUSE = 0x1900, PCIE_IRQ_MASK = 0x1910, PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */ /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */ PCI_HC_MAIN_IRQ_CAUSE = 0x1d60, PCI_HC_MAIN_IRQ_MASK = 0x1d64, SOC_HC_MAIN_IRQ_CAUSE = 0x20020, SOC_HC_MAIN_IRQ_MASK = 0x20024, ERR_IRQ = (1 << 0), /* shift by (2 * port #) */ DONE_IRQ = (1 << 1), /* shift by (2 * port #) */ HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */ HC_SHIFT = 9, /* bits 9-17 = HC1's ports */ DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */ DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */ PCI_ERR = (1 << 18), TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */ TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */ PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */ PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */ ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */ GPIO_INT = (1 << 22), SELF_INT = (1 << 23), TWSI_INT = (1 << 24), HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ /* SATAHC registers */ HC_CFG = 0x00, HC_IRQ_CAUSE = 0x14, DMA_IRQ = (1 << 0), /* shift by port # */ HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */ DEV_IRQ = (1 << 8), /* shift by port # */ /* * Per-HC (Host-Controller) interrupt coalescing feature. * This is present on all chip generations. * * Coalescing defers the interrupt until either the IO_THRESHOLD * (count of completed I/Os) is met, or the TIME_THRESHOLD is met. */ HC_IRQ_COAL_IO_THRESHOLD = 0x000c, HC_IRQ_COAL_TIME_THRESHOLD = 0x0010, SOC_LED_CTRL = 0x2c, SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */ SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */ /* with dev activity LED */ /* Shadow block registers */ SHD_BLK = 0x100, SHD_CTL_AST = 0x20, /* ofs from SHD_BLK */ /* SATA registers */ SATA_STATUS = 0x300, /* ctrl, err regs follow status */ SATA_ACTIVE = 0x350, FIS_IRQ_CAUSE = 0x364, FIS_IRQ_CAUSE_AN = (1 << 9), /* async notification */ LTMODE = 0x30c, /* requires read-after-write */ LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */ PHY_MODE2 = 0x330, PHY_MODE3 = 0x310, PHY_MODE4 = 0x314, /* requires read-after-write */ PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */ PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */ PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */ PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */ SATA_IFCTL = 0x344, SATA_TESTCTL = 0x348, SATA_IFSTAT = 0x34c, VENDOR_UNIQUE_FIS = 0x35c, FISCFG = 0x360, FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */ FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ PHY_MODE9_GEN2 = 0x398, PHY_MODE9_GEN1 = 0x39c, PHYCFG_OFS = 0x3a0, /* only in 65n devices */ MV5_PHY_MODE = 0x74, MV5_LTMODE = 0x30, MV5_PHY_CTL = 0x0C, SATA_IFCFG = 0x050, LP_PHY_CTL = 0x058, MV_M2_PREAMP_MASK = 0x7e0, /* Port registers */ EDMA_CFG = 0, EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */ EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */ EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */ EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */ EDMA_ERR_IRQ_CAUSE = 0x8, EDMA_ERR_IRQ_MASK = 0xc, EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */ EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */ EDMA_ERR_DEV = (1 << 2), /* device error */ EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */ EDMA_ERR_DEV_CON = (1 << 4), /* device connected */ EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */ EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */ EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */ EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */ EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */ EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */ EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */ EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */ EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */ EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */ EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */ EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */ EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */ EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */ EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */ EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */ EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */ EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */ EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */ EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */ EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */ EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */ EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */ EDMA_ERR_OVERRUN_5 = (1 << 5), EDMA_ERR_UNDERRUN_5 = (1 << 6), EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 | EDMA_ERR_LNK_CTRL_RX_1 | EDMA_ERR_LNK_CTRL_RX_3 | EDMA_ERR_LNK_CTRL_TX, EDMA_EH_FREEZE = EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON | EDMA_ERR_SERR | EDMA_ERR_SELF_DIS | EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR | EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 | EDMA_ERR_LNK_DATA_RX | EDMA_ERR_LNK_DATA_TX | EDMA_ERR_TRANS_PROTO, EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON | EDMA_ERR_OVERRUN_5 | EDMA_ERR_UNDERRUN_5 | EDMA_ERR_SELF_DIS_5 | EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR | EDMA_ERR_IORDY, EDMA_REQ_Q_BASE_HI = 0x10, EDMA_REQ_Q_IN_PTR = 0x14, /* also contains BASE_LO */ EDMA_REQ_Q_OUT_PTR = 0x18, EDMA_REQ_Q_PTR_SHIFT = 5, EDMA_RSP_Q_BASE_HI = 0x1c, EDMA_RSP_Q_IN_PTR = 0x20, EDMA_RSP_Q_OUT_PTR = 0x24, /* also contains BASE_LO */ EDMA_RSP_Q_PTR_SHIFT = 3, EDMA_CMD = 0x28, /* EDMA command register */ EDMA_EN = (1 << 0), /* enable EDMA */ EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */ EDMA_STATUS = 0x30, /* EDMA engine status */ EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */ EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */ EDMA_IORDY_TMOUT = 0x34, EDMA_ARB_CFG = 0x38, EDMA_HALTCOND = 0x60, /* GenIIe halt conditions */ EDMA_UNKNOWN_RSVD = 0x6C, /* GenIIe unknown/reserved */ BMDMA_CMD = 0x224, /* bmdma command register */ BMDMA_STATUS = 0x228, /* bmdma status register */ BMDMA_PRD_LOW = 0x22c, /* bmdma PRD addr 31:0 */ BMDMA_PRD_HIGH = 0x230, /* bmdma PRD addr 63:32 */ /* Host private flags (hp_flags) */ MV_HP_FLAG_MSI = (1 << 0), MV_HP_ERRATA_50XXB0 = (1 << 1), MV_HP_ERRATA_50XXB2 = (1 << 2), MV_HP_ERRATA_60X1B2 = (1 << 3), MV_HP_ERRATA_60X1C0 = (1 << 4), MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */ MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */ MV_HP_FIX_LP_PHY_CTL = (1 << 13), /* fix speed in LP_PHY_CTL ? */ /* Port private flags (pp_flags) */ MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */ MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */ MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */ }; #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC) #define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) #define WINDOW_BASE(i) (0x20034 + ((i) << 4)) enum { /* DMA boundary 0xffff is required by the s/g splitting * we need on /length/ in mv_fill-sg(). */ MV_DMA_BOUNDARY = 0xffffU, /* mask of register bits containing lower 32 bits * of EDMA request queue DMA address */ EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, /* ditto, for response queue */ EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U, }; enum chip_type { chip_504x, chip_508x, chip_5080, chip_604x, chip_608x, chip_6042, chip_7042, chip_soc, }; /* Command ReQuest Block: 32B */ struct mv_crqb { __le32 sg_addr; __le32 sg_addr_hi; __le16 ctrl_flags; __le16 ata_cmd[11]; }; struct mv_crqb_iie { __le32 addr; __le32 addr_hi; __le32 flags; __le32 len; __le32 ata_cmd[4]; }; /* Command ResPonse Block: 8B */ struct mv_crpb { __le16 id; __le16 flags; __le32 tmstmp; }; /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */ struct mv_sg { __le32 addr; __le32 flags_size; __le32 addr_hi; __le32 reserved; }; /* * We keep a local cache of a few frequently accessed port * registers here, to avoid having to read them (very slow) * when switching between EDMA and non-EDMA modes. */ struct mv_cached_regs { u32 fiscfg; u32 ltmode; u32 haltcond; u32 unknown_rsvd; }; struct mv_port_priv { struct mv_crqb *crqb; dma_addr_t crqb_dma; struct mv_crpb *crpb; dma_addr_t crpb_dma; struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH]; dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH]; unsigned int req_idx; unsigned int resp_idx; u32 pp_flags; struct mv_cached_regs cached; unsigned int delayed_eh_pmp_map; }; struct mv_port_signal { u32 amps; u32 pre; }; struct mv_host_priv { u32 hp_flags; unsigned int board_idx; u32 main_irq_mask; struct mv_port_signal signal[8]; const struct mv_hw_ops *ops; int n_ports; void __iomem *base; void __iomem *main_irq_cause_addr; void __iomem *main_irq_mask_addr; u32 irq_cause_offset; u32 irq_mask_offset; u32 unmask_all_irqs; #if defined(CONFIG_HAVE_CLK) struct clk *clk; struct clk **port_clks; #endif /* * These consistent DMA memory pools give us guaranteed * alignment for hardware-accessed data structures, * and less memory waste in accomplishing the alignment. */ struct dma_pool *crqb_pool; struct dma_pool *crpb_pool; struct dma_pool *sg_tbl_pool; }; struct mv_hw_ops { void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port); void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio); void (*read_preamp)(struct mv_host_priv *hpriv, int idx, void __iomem *mmio); int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc); void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio); void (*reset_bus)(struct ata_host *host, void __iomem *mmio); }; static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val); static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val); static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val); static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val); static int mv_port_start(struct ata_port *ap); static void mv_port_stop(struct ata_port *ap); static int mv_qc_defer(struct ata_queued_cmd *qc); static void mv_qc_prep(struct ata_queued_cmd *qc); static void mv_qc_prep_iie(struct ata_queued_cmd *qc); static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); static int mv_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); static void mv_eh_freeze(struct ata_port *ap); static void mv_eh_thaw(struct ata_port *ap); static void mv6_dev_config(struct ata_device *dev); static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port); static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, void __iomem *mmio); static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc); static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio); static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port); static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, void __iomem *mmio); static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc); static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv_soc_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, void __iomem *mmio); static int mv_soc_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc); static void mv_soc_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio); static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port); static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio); static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port_no); static int mv_stop_edma(struct ata_port *ap); static int mv_stop_edma_engine(void __iomem *port_mmio); static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma); static void mv_pmp_select(struct ata_port *ap, int pmp); static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); static int mv_softreset(struct ata_link *link, unsigned int *class, unsigned long deadline); static void mv_pmp_error_handler(struct ata_port *ap); static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp); static void mv_sff_irq_clear(struct ata_port *ap); static int mv_check_atapi_dma(struct ata_queued_cmd *qc); static void mv_bmdma_setup(struct ata_queued_cmd *qc); static void mv_bmdma_start(struct ata_queued_cmd *qc); static void mv_bmdma_stop(struct ata_queued_cmd *qc); static u8 mv_bmdma_status(struct ata_port *ap); static u8 mv_sff_check_status(struct ata_port *ap); /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below * because we have to allow room for worst case splitting of * PRDs for 64K boundaries in mv_fill_sg(). */ #ifdef CONFIG_PCI static struct scsi_host_template mv5_sht = { ATA_BASE_SHT(DRV_NAME), .sg_tablesize = MV_MAX_SG_CT / 2, .dma_boundary = MV_DMA_BOUNDARY, }; #endif static struct scsi_host_template mv6_sht = { ATA_NCQ_SHT(DRV_NAME), .can_queue = MV_MAX_Q_DEPTH - 1, .sg_tablesize = MV_MAX_SG_CT / 2, .dma_boundary = MV_DMA_BOUNDARY, }; static struct ata_port_operations mv5_ops = { .inherits = &ata_sff_port_ops, .lost_interrupt = ATA_OP_NULL, .qc_defer = mv_qc_defer, .qc_prep = mv_qc_prep, .qc_issue = mv_qc_issue, .freeze = mv_eh_freeze, .thaw = mv_eh_thaw, .hardreset = mv_hardreset, .scr_read = mv5_scr_read, .scr_write = mv5_scr_write, .port_start = mv_port_start, .port_stop = mv_port_stop, }; static struct ata_port_operations mv6_ops = { .inherits = &ata_bmdma_port_ops, .lost_interrupt = ATA_OP_NULL, .qc_defer = mv_qc_defer, .qc_prep = mv_qc_prep, .qc_issue = mv_qc_issue, .dev_config = mv6_dev_config, .freeze = mv_eh_freeze, .thaw = mv_eh_thaw, .hardreset = mv_hardreset, .softreset = mv_softreset, .pmp_hardreset = mv_pmp_hardreset, .pmp_softreset = mv_softreset, .error_handler = mv_pmp_error_handler, .scr_read = mv_scr_read, .scr_write = mv_scr_write, .sff_check_status = mv_sff_check_status, .sff_irq_clear = mv_sff_irq_clear, .check_atapi_dma = mv_check_atapi_dma, .bmdma_setup = mv_bmdma_setup, .bmdma_start = mv_bmdma_start, .bmdma_stop = mv_bmdma_stop, .bmdma_status = mv_bmdma_status, .port_start = mv_port_start, .port_stop = mv_port_stop, }; static struct ata_port_operations mv_iie_ops = { .inherits = &mv6_ops, .dev_config = ATA_OP_NULL, .qc_prep = mv_qc_prep_iie, }; static const struct ata_port_info mv_port_info[] = { { /* chip_504x */ .flags = MV_GEN_I_FLAGS, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &mv5_ops, }, { /* chip_508x */ .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &mv5_ops, }, { /* chip_5080 */ .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &mv5_ops, }, { /* chip_604x */ .flags = MV_GEN_II_FLAGS, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &mv6_ops, }, { /* chip_608x */ .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &mv6_ops, }, { /* chip_6042 */ .flags = MV_GEN_IIE_FLAGS, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &mv_iie_ops, }, { /* chip_7042 */ .flags = MV_GEN_IIE_FLAGS, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &mv_iie_ops, }, { /* chip_soc */ .flags = MV_GEN_IIE_FLAGS, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &mv_iie_ops, }, }; static const struct pci_device_id mv_pci_tbl[] = { { PCI_VDEVICE(MARVELL, 0x5040), chip_504x }, { PCI_VDEVICE(MARVELL, 0x5041), chip_504x }, { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 }, { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, /* RocketRAID 1720/174x have different identifiers */ { PCI_VDEVICE(TTI, 0x1720), chip_6042 }, { PCI_VDEVICE(TTI, 0x1740), chip_6042 }, { PCI_VDEVICE(TTI, 0x1742), chip_6042 }, { PCI_VDEVICE(MARVELL, 0x6040), chip_604x }, { PCI_VDEVICE(MARVELL, 0x6041), chip_604x }, { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 }, { PCI_VDEVICE(MARVELL, 0x6080), chip_608x }, { PCI_VDEVICE(MARVELL, 0x6081), chip_608x }, { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x }, /* Adaptec 1430SA */ { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 }, /* Marvell 7042 support */ { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 }, /* Highpoint RocketRAID PCIe series */ { PCI_VDEVICE(TTI, 0x2300), chip_7042 }, { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, { } /* terminate list */ }; static const struct mv_hw_ops mv5xxx_ops = { .phy_errata = mv5_phy_errata, .enable_leds = mv5_enable_leds, .read_preamp = mv5_read_preamp, .reset_hc = mv5_reset_hc, .reset_flash = mv5_reset_flash, .reset_bus = mv5_reset_bus, }; static const struct mv_hw_ops mv6xxx_ops = { .phy_errata = mv6_phy_errata, .enable_leds = mv6_enable_leds, .read_preamp = mv6_read_preamp, .reset_hc = mv6_reset_hc, .reset_flash = mv6_reset_flash, .reset_bus = mv_reset_pci_bus, }; static const struct mv_hw_ops mv_soc_ops = { .phy_errata = mv6_phy_errata, .enable_leds = mv_soc_enable_leds, .read_preamp = mv_soc_read_preamp, .reset_hc = mv_soc_reset_hc, .reset_flash = mv_soc_reset_flash, .reset_bus = mv_soc_reset_bus, }; static const struct mv_hw_ops mv_soc_65n_ops = { .phy_errata = mv_soc_65n_phy_errata, .enable_leds = mv_soc_enable_leds, .reset_hc = mv_soc_reset_hc, .reset_flash = mv_soc_reset_flash, .reset_bus = mv_soc_reset_bus, }; /* * Functions */ static inline void writelfl(unsigned long data, void __iomem *addr) { writel(data, addr); (void) readl(addr); /* flush to avoid PCI posted write */ } static inline unsigned int mv_hc_from_port(unsigned int port) { return port >> MV_PORT_HC_SHIFT; } static inline unsigned int mv_hardport_from_port(unsigned int port) { return port & MV_PORT_MASK; } /* * Consolidate some rather tricky bit shift calculations. * This is hot-path stuff, so not a function. * Simple code, with two return values, so macro rather than inline. * * port is the sole input, in range 0..7. * shift is one output, for use with main_irq_cause / main_irq_mask registers. * hardport is the other output, in range 0..3. * * Note that port and hardport may be the same variable in some cases. */ #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \ { \ shift = mv_hc_from_port(port) * HC_SHIFT; \ hardport = mv_hardport_from_port(port); \ shift += hardport * 2; \ } static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) { return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); } static inline void __iomem *mv_hc_base_from_port(void __iomem *base, unsigned int port) { return mv_hc_base(base, mv_hc_from_port(port)); } static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port) { return mv_hc_base_from_port(base, port) + MV_SATAHC_ARBTR_REG_SZ + (mv_hardport_from_port(port) * MV_PORT_REG_SZ); } static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port) { void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port); unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL; return hc_mmio + ofs; } static inline void __iomem *mv_host_base(struct ata_host *host) { struct mv_host_priv *hpriv = host->private_data; return hpriv->base; } static inline void __iomem *mv_ap_base(struct ata_port *ap) { return mv_port_base(mv_host_base(ap->host), ap->port_no); } static inline int mv_get_hc_count(unsigned long port_flags) { return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1); } /** * mv_save_cached_regs - (re-)initialize cached port registers * @ap: the port whose registers we are caching * * Initialize the local cache of port registers, * so that reading them over and over again can * be avoided on the hotter paths of this driver. * This saves a few microseconds each time we switch * to/from EDMA mode to perform (eg.) a drive cache flush. */ static void mv_save_cached_regs(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); struct mv_port_priv *pp = ap->private_data; pp->cached.fiscfg = readl(port_mmio + FISCFG); pp->cached.ltmode = readl(port_mmio + LTMODE); pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND); pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD); } /** * mv_write_cached_reg - write to a cached port register * @addr: hardware address of the register * @old: pointer to cached value of the register * @new: new value for the register * * Write a new value to a cached register, * but only if the value is different from before. */ static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new) { if (new != *old) { unsigned long laddr; *old = new; /* * Workaround for 88SX60x1-B2 FEr SATA#13: * Read-after-write is needed to prevent generating 64-bit * write cycles on the PCI bus for SATA interface registers * at offsets ending in 0x4 or 0xc. * * Looks like a lot of fuss, but it avoids an unnecessary * +1 usec read-after-write delay for unaffected registers. */ laddr = (long)addr & 0xffff; if (laddr >= 0x300 && laddr <= 0x33c) { laddr &= 0x000f; if (laddr == 0x4 || laddr == 0xc) { writelfl(new, addr); /* read after write */ return; } } writel(new, addr); /* unaffected by the errata */ } } static void mv_set_edma_ptrs(void __iomem *port_mmio, struct mv_host_priv *hpriv, struct mv_port_priv *pp) { u32 index; /* * initialize request queue */ pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; WARN_ON(pp->crqb_dma & 0x3ff); writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI); writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, port_mmio + EDMA_REQ_Q_IN_PTR); writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR); /* * initialize response queue */ pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT; WARN_ON(pp->crpb_dma & 0xff); writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI); writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR); writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, port_mmio + EDMA_RSP_Q_OUT_PTR); } static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv) { /* * When writing to the main_irq_mask in hardware, * we must ensure exclusivity between the interrupt coalescing bits * and the corresponding individual port DONE_IRQ bits. * * Note that this register is really an "IRQ enable" register, * not an "IRQ mask" register as Marvell's naming might suggest. */ if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE)) mask &= ~DONE_IRQ_0_3; if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE)) mask &= ~DONE_IRQ_4_7; writelfl(mask, hpriv->main_irq_mask_addr); } static void mv_set_main_irq_mask(struct ata_host *host, u32 disable_bits, u32 enable_bits) { struct mv_host_priv *hpriv = host->private_data; u32 old_mask, new_mask; old_mask = hpriv->main_irq_mask; new_mask = (old_mask & ~disable_bits) | enable_bits; if (new_mask != old_mask) { hpriv->main_irq_mask = new_mask; mv_write_main_irq_mask(new_mask, hpriv); } } static void mv_enable_port_irqs(struct ata_port *ap, unsigned int port_bits) { unsigned int shift, hardport, port = ap->port_no; u32 disable_bits, enable_bits; MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); disable_bits = (DONE_IRQ | ERR_IRQ) << shift; enable_bits = port_bits << shift; mv_set_main_irq_mask(ap->host, disable_bits, enable_bits); } static void mv_clear_and_enable_port_irqs(struct ata_port *ap, void __iomem *port_mmio, unsigned int port_irqs) { struct mv_host_priv *hpriv = ap->host->private_data; int hardport = mv_hardport_from_port(ap->port_no); void __iomem *hc_mmio = mv_hc_base_from_port( mv_host_base(ap->host), ap->port_no); u32 hc_irq_cause; /* clear EDMA event indicators, if any */ writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE); /* clear pending irq events */ hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE); /* clear FIS IRQ Cause */ if (IS_GEN_IIE(hpriv)) writelfl(0, port_mmio + FIS_IRQ_CAUSE); mv_enable_port_irqs(ap, port_irqs); } static void mv_set_irq_coalescing(struct ata_host *host, unsigned int count, unsigned int usecs) { struct mv_host_priv *hpriv = host->private_data; void __iomem *mmio = hpriv->base, *hc_mmio; u32 coal_enable = 0; unsigned long flags; unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC; const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE | ALL_PORTS_COAL_DONE; /* Disable IRQ coalescing if either threshold is zero */ if (!usecs || !count) { clks = count = 0; } else { /* Respect maximum limits of the hardware */ clks = usecs * COAL_CLOCKS_PER_USEC; if (clks > MAX_COAL_TIME_THRESHOLD) clks = MAX_COAL_TIME_THRESHOLD; if (count > MAX_COAL_IO_COUNT) count = MAX_COAL_IO_COUNT; } spin_lock_irqsave(&host->lock, flags); mv_set_main_irq_mask(host, coal_disable, 0); if (is_dual_hc && !IS_GEN_I(hpriv)) { /* * GEN_II/GEN_IIE with dual host controllers: * one set of global thresholds for the entire chip. */ writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD); writel(count, mmio + IRQ_COAL_IO_THRESHOLD); /* clear leftover coal IRQ bit */ writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE); if (count) coal_enable = ALL_PORTS_COAL_DONE; clks = count = 0; /* force clearing of regular regs below */ } /* * All chips: independent thresholds for each HC on the chip. */ hc_mmio = mv_hc_base_from_port(mmio, 0); writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD); writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD); writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE); if (count) coal_enable |= PORTS_0_3_COAL_DONE; if (is_dual_hc) { hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC); writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD); writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD); writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE); if (count) coal_enable |= PORTS_4_7_COAL_DONE; } mv_set_main_irq_mask(host, 0, coal_enable); spin_unlock_irqrestore(&host->lock, flags); } /** * mv_start_edma - Enable eDMA engine * @base: port base address * @pp: port private data * * Verify the local cache of the eDMA state is accurate with a * WARN_ON. * * LOCKING: * Inherited from caller. */ static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio, struct mv_port_priv *pp, u8 protocol) { int want_ncq = (protocol == ATA_PROT_NCQ); if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); if (want_ncq != using_ncq) mv_stop_edma(ap); } if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { struct mv_host_priv *hpriv = ap->host->private_data; mv_edma_cfg(ap, want_ncq, 1); mv_set_edma_ptrs(port_mmio, hpriv, pp); mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ); writelfl(EDMA_EN, port_mmio + EDMA_CMD); pp->pp_flags |= MV_PP_FLAG_EDMA_EN; } } static void mv_wait_for_edma_empty_idle(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE); const int per_loop = 5, timeout = (15 * 1000 / per_loop); int i; /* * Wait for the EDMA engine to finish transactions in progress. * No idea what a good "timeout" value might be, but measurements * indicate that it often requires hundreds of microseconds * with two drives in-use. So we use the 15msec value above * as a rough guess at what even more drives might require. */ for (i = 0; i < timeout; ++i) { u32 edma_stat = readl(port_mmio + EDMA_STATUS); if ((edma_stat & empty_idle) == empty_idle) break; udelay(per_loop); } /* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */ } /** * mv_stop_edma_engine - Disable eDMA engine * @port_mmio: io base address * * LOCKING: * Inherited from caller. */ static int mv_stop_edma_engine(void __iomem *port_mmio) { int i; /* Disable eDMA. The disable bit auto clears. */ writelfl(EDMA_DS, port_mmio + EDMA_CMD); /* Wait for the chip to confirm eDMA is off. */ for (i = 10000; i > 0; i--) { u32 reg = readl(port_mmio + EDMA_CMD); if (!(reg & EDMA_EN)) return 0; udelay(10); } return -EIO; } static int mv_stop_edma(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); struct mv_port_priv *pp = ap->private_data; int err = 0; if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) return 0; pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; mv_wait_for_edma_empty_idle(ap); if (mv_stop_edma_engine(port_mmio)) { ata_port_err(ap, "Unable to stop eDMA\n"); err = -EIO; } mv_edma_cfg(ap, 0, 0); return err; } #ifdef ATA_DEBUG static void mv_dump_mem(void __iomem *start, unsigned bytes) { int b, w; for (b = 0; b < bytes; ) { DPRINTK("%p: ", start + b); for (w = 0; b < bytes && w < 4; w++) { printk("%08x ", readl(start + b)); b += sizeof(u32); } printk("\n"); } } #endif #if defined(ATA_DEBUG) || defined(CONFIG_PCI) static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes) { #ifdef ATA_DEBUG int b, w; u32 dw; for (b = 0; b < bytes; ) { DPRINTK("%02x: ", b); for (w = 0; b < bytes && w < 4; w++) { (void) pci_read_config_dword(pdev, b, &dw); printk("%08x ", dw); b += sizeof(u32); } printk("\n"); } #endif } #endif static void mv_dump_all_regs(void __iomem *mmio_base, int port, struct pci_dev *pdev) { #ifdef ATA_DEBUG void __iomem *hc_base = mv_hc_base(mmio_base, port >> MV_PORT_HC_SHIFT); void __iomem *port_base; int start_port, num_ports, p, start_hc, num_hcs, hc; if (0 > port) { start_hc = start_port = 0; num_ports = 8; /* shld be benign for 4 port devs */ num_hcs = 2; } else { start_hc = port >> MV_PORT_HC_SHIFT; start_port = port; num_ports = num_hcs = 1; } DPRINTK("All registers for port(s) %u-%u:\n", start_port, num_ports > 1 ? num_ports - 1 : start_port); if (NULL != pdev) { DPRINTK("PCI config space regs:\n"); mv_dump_pci_cfg(pdev, 0x68); } DPRINTK("PCI regs:\n"); mv_dump_mem(mmio_base+0xc00, 0x3c); mv_dump_mem(mmio_base+0xd00, 0x34); mv_dump_mem(mmio_base+0xf00, 0x4); mv_dump_mem(mmio_base+0x1d00, 0x6c); for (hc = start_hc; hc < start_hc + num_hcs; hc++) { hc_base = mv_hc_base(mmio_base, hc); DPRINTK("HC regs (HC %i):\n", hc); mv_dump_mem(hc_base, 0x1c); } for (p = start_port; p < start_port + num_ports; p++) { port_base = mv_port_base(mmio_base, p); DPRINTK("EDMA regs (port %i):\n", p); mv_dump_mem(port_base, 0x54); DPRINTK("SATA regs (port %i):\n", p); mv_dump_mem(port_base+0x300, 0x60); } #endif } static unsigned int mv_scr_offset(unsigned int sc_reg_in) { unsigned int ofs; switch (sc_reg_in) { case SCR_STATUS: case SCR_CONTROL: case SCR_ERROR: ofs = SATA_STATUS + (sc_reg_in * sizeof(u32)); break; case SCR_ACTIVE: ofs = SATA_ACTIVE; /* active is not with the others */ break; default: ofs = 0xffffffffU; break; } return ofs; } static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val) { unsigned int ofs = mv_scr_offset(sc_reg_in); if (ofs != 0xffffffffU) { *val = readl(mv_ap_base(link->ap) + ofs); return 0; } else return -EINVAL; } static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) { unsigned int ofs = mv_scr_offset(sc_reg_in); if (ofs != 0xffffffffU) { void __iomem *addr = mv_ap_base(link->ap) + ofs; struct mv_host_priv *hpriv = link->ap->host->private_data; if (sc_reg_in == SCR_CONTROL) { /* * Workaround for 88SX60x1 FEr SATA#26: * * COMRESETs have to take care not to accidentally * put the drive to sleep when writing SCR_CONTROL. * Setting bits 12..15 prevents this problem. * * So if we see an outbound COMMRESET, set those bits. * Ditto for the followup write that clears the reset. * * The proprietary driver does this for * all chip versions, and so do we. */ if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1) val |= 0xf000; if (hpriv->hp_flags & MV_HP_FIX_LP_PHY_CTL) { void __iomem *lp_phy_addr = mv_ap_base(link->ap) + LP_PHY_CTL; /* * Set PHY speed according to SControl speed. */ if ((val & 0xf0) == 0x10) writelfl(0x7, lp_phy_addr); else writelfl(0x227, lp_phy_addr); } } writelfl(val, addr); return 0; } else return -EINVAL; } static void mv6_dev_config(struct ata_device *adev) { /* * Deal with Gen-II ("mv6") hardware quirks/restrictions: * * Gen-II does not support NCQ over a port multiplier * (no FIS-based switching). */ if (adev->flags & ATA_DFLAG_NCQ) { if (sata_pmp_attached(adev->link->ap)) { adev->flags &= ~ATA_DFLAG_NCQ; ata_dev_info(adev, "NCQ disabled for command-based switching\n"); } } } static int mv_qc_defer(struct ata_queued_cmd *qc) { struct ata_link *link = qc->dev->link; struct ata_port *ap = link->ap; struct mv_port_priv *pp = ap->private_data; /* * Don't allow new commands if we're in a delayed EH state * for NCQ and/or FIS-based switching. */ if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) return ATA_DEFER_PORT; /* PIO commands need exclusive link: no other commands [DMA or PIO] * can run concurrently. * set excl_link when we want to send a PIO command in DMA mode * or a non-NCQ command in NCQ mode. * When we receive a command from that link, and there are no * outstanding commands, mark a flag to clear excl_link and let * the command go through. */ if (unlikely(ap->excl_link)) { if (link == ap->excl_link) { if (ap->nr_active_links) return ATA_DEFER_PORT; qc->flags |= ATA_QCFLAG_CLEAR_EXCL; return 0; } else return ATA_DEFER_PORT; } /* * If the port is completely idle, then allow the new qc. */ if (ap->nr_active_links == 0) return 0; /* * The port is operating in host queuing mode (EDMA) with NCQ * enabled, allow multiple NCQ commands. EDMA also allows * queueing multiple DMA commands but libata core currently * doesn't allow it. */ if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) && (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) { if (ata_is_ncq(qc->tf.protocol)) return 0; else { ap->excl_link = link; return ATA_DEFER_PORT; } } return ATA_DEFER_PORT; } static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs) { struct mv_port_priv *pp = ap->private_data; void __iomem *port_mmio; u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg; u32 ltmode, *old_ltmode = &pp->cached.ltmode; u32 haltcond, *old_haltcond = &pp->cached.haltcond; ltmode = *old_ltmode & ~LTMODE_BIT8; haltcond = *old_haltcond | EDMA_ERR_DEV; if (want_fbs) { fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC; ltmode = *old_ltmode | LTMODE_BIT8; if (want_ncq) haltcond &= ~EDMA_ERR_DEV; else fiscfg |= FISCFG_WAIT_DEV_ERR; } else { fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR); } port_mmio = mv_ap_base(ap); mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg); mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode); mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond); } static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) { struct mv_host_priv *hpriv = ap->host->private_data; u32 old, new; /* workaround for 88SX60x1 FEr SATA#25 (part 1) */ old = readl(hpriv->base + GPIO_PORT_CTL); if (want_ncq) new = old | (1 << 22); else new = old & ~(1 << 22); if (new != old) writel(new, hpriv->base + GPIO_PORT_CTL); } /** * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma * @ap: Port being initialized * * There are two DMA modes on these chips: basic DMA, and EDMA. * * Bit-0 of the "EDMA RESERVED" register enables/disables use * of basic DMA on the GEN_IIE versions of the chips. * * This bit survives EDMA resets, and must be set for basic DMA * to function, and should be cleared when EDMA is active. */ static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma) { struct mv_port_priv *pp = ap->private_data; u32 new, *old = &pp->cached.unknown_rsvd; if (enable_bmdma) new = *old | 1; else new = *old & ~1; mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new); } /* * SOC chips have an issue whereby the HDD LEDs don't always blink * during I/O when NCQ is enabled. Enabling a special "LED blink" mode * of the SOC takes care of it, generating a steady blink rate when * any drive on the chip is active. * * Unfortunately, the blink mode is a global hardware setting for the SOC, * so we must use it whenever at least one port on the SOC has NCQ enabled. * * We turn "LED blink" off when NCQ is not in use anywhere, because the normal * LED operation works then, and provides better (more accurate) feedback. * * Note that this code assumes that an SOC never has more than one HC onboard. */ static void mv_soc_led_blink_enable(struct ata_port *ap) { struct ata_host *host = ap->host; struct mv_host_priv *hpriv = host->private_data; void __iomem *hc_mmio; u32 led_ctrl; if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN) return; hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN; hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no); led_ctrl = readl(hc_mmio + SOC_LED_CTRL); writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL); } static void mv_soc_led_blink_disable(struct ata_port *ap) { struct ata_host *host = ap->host; struct mv_host_priv *hpriv = host->private_data; void __iomem *hc_mmio; u32 led_ctrl; unsigned int port; if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)) return; /* disable led-blink only if no ports are using NCQ */ for (port = 0; port < hpriv->n_ports; port++) { struct ata_port *this_ap = host->ports[port]; struct mv_port_priv *pp = this_ap->private_data; if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) return; } hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN; hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no); led_ctrl = readl(hc_mmio + SOC_LED_CTRL); writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL); } static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma) { u32 cfg; struct mv_port_priv *pp = ap->private_data; struct mv_host_priv *hpriv = ap->host->private_data; void __iomem *port_mmio = mv_ap_base(ap); /* set up non-NCQ EDMA configuration */ cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ pp->pp_flags &= ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY); if (IS_GEN_I(hpriv)) cfg |= (1 << 8); /* enab config burst size mask */ else if (IS_GEN_II(hpriv)) { cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; mv_60x1_errata_sata25(ap, want_ncq); } else if (IS_GEN_IIE(hpriv)) { int want_fbs = sata_pmp_attached(ap); /* * Possible future enhancement: * * The chip can use FBS with non-NCQ, if we allow it, * But first we need to have the error handling in place * for this mode (datasheet section 7.3.15.4.2.3). * So disallow non-NCQ FBS for now. */ want_fbs &= want_ncq; mv_config_fbs(ap, want_ncq, want_fbs); if (want_fbs) { pp->pp_flags |= MV_PP_FLAG_FBS_EN; cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */ } cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ if (want_edma) { cfg |= (1 << 22); /* enab 4-entry host queue cache */ if (!IS_SOC(hpriv)) cfg |= (1 << 18); /* enab early completion */ } if (hpriv->hp_flags & MV_HP_CUT_THROUGH) cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ mv_bmdma_enable_iie(ap, !want_edma); if (IS_SOC(hpriv)) { if (want_ncq) mv_soc_led_blink_enable(ap); else mv_soc_led_blink_disable(ap); } } if (want_ncq) { cfg |= EDMA_CFG_NCQ; pp->pp_flags |= MV_PP_FLAG_NCQ_EN; } writelfl(cfg, port_mmio + EDMA_CFG); } static void mv_port_free_dma_mem(struct ata_port *ap) { struct mv_host_priv *hpriv = ap->host->private_data; struct mv_port_priv *pp = ap->private_data; int tag; if (pp->crqb) { dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma); pp->crqb = NULL; } if (pp->crpb) { dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma); pp->crpb = NULL; } /* * For GEN_I, there's no NCQ, so we have only a single sg_tbl. * For later hardware, we have one unique sg_tbl per NCQ tag. */ for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { if (pp->sg_tbl[tag]) { if (tag == 0 || !IS_GEN_I(hpriv)) dma_pool_free(hpriv->sg_tbl_pool, pp->sg_tbl[tag], pp->sg_tbl_dma[tag]); pp->sg_tbl[tag] = NULL; } } } /** * mv_port_start - Port specific init/start routine. * @ap: ATA channel to manipulate * * Allocate and point to DMA memory, init port private memory, * zero indices. * * LOCKING: * Inherited from caller. */ static int mv_port_start(struct ata_port *ap) { struct device *dev = ap->host->dev; struct mv_host_priv *hpriv = ap->host->private_data; struct mv_port_priv *pp; unsigned long flags; int tag; pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); if (!pp) return -ENOMEM; ap->private_data = pp; pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); if (!pp->crqb) return -ENOMEM; memset(pp->crqb, 0, MV_CRQB_Q_SZ); pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma); if (!pp->crpb) goto out_port_free_dma_mem; memset(pp->crpb, 0, MV_CRPB_Q_SZ); /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */ if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0) ap->flags |= ATA_FLAG_AN; /* * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl. * For later hardware, we need one unique sg_tbl per NCQ tag. */ for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { if (tag == 0 || !IS_GEN_I(hpriv)) { pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool, GFP_KERNEL, &pp->sg_tbl_dma[tag]); if (!pp->sg_tbl[tag]) goto out_port_free_dma_mem; } else { pp->sg_tbl[tag] = pp->sg_tbl[0]; pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; } } spin_lock_irqsave(ap->lock, flags); mv_save_cached_regs(ap); mv_edma_cfg(ap, 0, 0); spin_unlock_irqrestore(ap->lock, flags); return 0; out_port_free_dma_mem: mv_port_free_dma_mem(ap); return -ENOMEM; } /** * mv_port_stop - Port specific cleanup/stop routine. * @ap: ATA channel to manipulate * * Stop DMA, cleanup port memory. * * LOCKING: * This routine uses the host lock to protect the DMA stop. */ static void mv_port_stop(struct ata_port *ap) { unsigned long flags; spin_lock_irqsave(ap->lock, flags); mv_stop_edma(ap); mv_enable_port_irqs(ap, 0); spin_unlock_irqrestore(ap->lock, flags); mv_port_free_dma_mem(ap); } /** * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries * @qc: queued command whose SG list to source from * * Populate the SG list and mark the last entry. * * LOCKING: * Inherited from caller. */ static void mv_fill_sg(struct ata_queued_cmd *qc) { struct mv_port_priv *pp = qc->ap->private_data; struct scatterlist *sg; struct mv_sg *mv_sg, *last_sg = NULL; unsigned int si; mv_sg = pp->sg_tbl[qc->tag]; for_each_sg(qc->sg, sg, qc->n_elem, si) { dma_addr_t addr = sg_dma_address(sg); u32 sg_len = sg_dma_len(sg); while (sg_len) { u32 offset = addr & 0xffff; u32 len = sg_len; if (offset + len > 0x10000) len = 0x10000 - offset; mv_sg->addr = cpu_to_le32(addr & 0xffffffff); mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); mv_sg->flags_size = cpu_to_le32(len & 0xffff); mv_sg->reserved = 0; sg_len -= len; addr += len; last_sg = mv_sg; mv_sg++; } } if (likely(last_sg)) last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); mb(); /* ensure data structure is visible to the chipset */ } static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) { u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | (last ? CRQB_CMD_LAST : 0); *cmdw = cpu_to_le16(tmp); } /** * mv_sff_irq_clear - Clear hardware interrupt after DMA. * @ap: Port associated with this ATA transaction. * * We need this only for ATAPI bmdma transactions, * as otherwise we experience spurious interrupts * after libata-sff handles the bmdma interrupts. */ static void mv_sff_irq_clear(struct ata_port *ap) { mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ); } /** * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA. * @qc: queued command to check for chipset/DMA compatibility. * * The bmdma engines cannot handle speculative data sizes * (bytecount under/over flow). So only allow DMA for * data transfer commands with known data sizes. * * LOCKING: * Inherited from caller. */ static int mv_check_atapi_dma(struct ata_queued_cmd *qc) { struct scsi_cmnd *scmd = qc->scsicmd; if (scmd) { switch (scmd->cmnd[0]) { case READ_6: case READ_10: case READ_12: case WRITE_6: case WRITE_10: case WRITE_12: case GPCMD_READ_CD: case GPCMD_SEND_DVD_STRUCTURE: case GPCMD_SEND_CUE_SHEET: return 0; /* DMA is safe */ } } return -EOPNOTSUPP; /* use PIO instead */ } /** * mv_bmdma_setup - Set up BMDMA transaction * @qc: queued command to prepare DMA for. * * LOCKING: * Inherited from caller. */ static void mv_bmdma_setup(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; void __iomem *port_mmio = mv_ap_base(ap); struct mv_port_priv *pp = ap->private_data; mv_fill_sg(qc); /* clear all DMA cmd bits */ writel(0, port_mmio + BMDMA_CMD); /* load PRD table addr. */ writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16, port_mmio + BMDMA_PRD_HIGH); writelfl(pp->sg_tbl_dma[qc->tag], port_mmio + BMDMA_PRD_LOW); /* issue r/w command */ ap->ops->sff_exec_command(ap, &qc->tf); } /** * mv_bmdma_start - Start a BMDMA transaction * @qc: queued command to start DMA on. * * LOCKING: * Inherited from caller. */ static void mv_bmdma_start(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; void __iomem *port_mmio = mv_ap_base(ap); unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START; /* start host DMA transaction */ writelfl(cmd, port_mmio + BMDMA_CMD); } /** * mv_bmdma_stop - Stop BMDMA transfer * @qc: queued command to stop DMA on. * * Clears the ATA_DMA_START flag in the bmdma control register * * LOCKING: * Inherited from caller. */ static void mv_bmdma_stop_ap(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); u32 cmd; /* clear start/stop bit */ cmd = readl(port_mmio + BMDMA_CMD); if (cmd & ATA_DMA_START) { cmd &= ~ATA_DMA_START; writelfl(cmd, port_mmio + BMDMA_CMD); /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ ata_sff_dma_pause(ap); } } static void mv_bmdma_stop(struct ata_queued_cmd *qc) { mv_bmdma_stop_ap(qc->ap); } /** * mv_bmdma_status - Read BMDMA status * @ap: port for which to retrieve DMA status. * * Read and return equivalent of the sff BMDMA status register. * * LOCKING: * Inherited from caller. */ static u8 mv_bmdma_status(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); u32 reg, status; /* * Other bits are valid only if ATA_DMA_ACTIVE==0, * and the ATA_DMA_INTR bit doesn't exist. */ reg = readl(port_mmio + BMDMA_STATUS); if (reg & ATA_DMA_ACTIVE) status = ATA_DMA_ACTIVE; else if (reg & ATA_DMA_ERR) status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR; else { /* * Just because DMA_ACTIVE is 0 (DMA completed), * this does _not_ mean the device is "done". * So we should not yet be signalling ATA_DMA_INTR * in some cases. Eg. DSM/TRIM, and perhaps others. */ mv_bmdma_stop_ap(ap); if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY) status = 0; else status = ATA_DMA_INTR; } return status; } static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc) { struct ata_taskfile *tf = &qc->tf; /* * Workaround for 88SX60x1 FEr SATA#24. * * Chip may corrupt WRITEs if multi_count >= 4kB. * Note that READs are unaffected. * * It's not clear if this errata really means "4K bytes", * or if it always happens for multi_count > 7 * regardless of device sector_size. * * So, for safety, any write with multi_count > 7 * gets converted here into a regular PIO write instead: */ if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) { if (qc->dev->multi_count > 7) { switch (tf->command) { case ATA_CMD_WRITE_MULTI: tf->command = ATA_CMD_PIO_WRITE; break; case ATA_CMD_WRITE_MULTI_FUA_EXT: tf->flags &= ~ATA_TFLAG_FUA; /* ugh */ /* fall through */ case ATA_CMD_WRITE_MULTI_EXT: tf->command = ATA_CMD_PIO_WRITE_EXT; break; } } } } /** * mv_qc_prep - Host specific command preparation. * @qc: queued command to prepare * * This routine simply redirects to the general purpose routine * if command is not DMA. Else, it handles prep of the CRQB * (command request block), does some sanity checking, and calls * the SG load routine. * * LOCKING: * Inherited from caller. */ static void mv_qc_prep(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct mv_port_priv *pp = ap->private_data; __le16 *cw; struct ata_taskfile *tf = &qc->tf; u16 flags = 0; unsigned in_index; switch (tf->protocol) { case ATA_PROT_DMA: if (tf->command == ATA_CMD_DSM) return; /* fall-thru */ case ATA_PROT_NCQ: break; /* continue below */ case ATA_PROT_PIO: mv_rw_multi_errata_sata24(qc); return; default: return; } /* Fill in command request block */ if (!(tf->flags & ATA_TFLAG_WRITE)) flags |= CRQB_FLAG_READ; WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); flags |= qc->tag << CRQB_TAG_SHIFT; flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; /* get current queue index from software */ in_index = pp->req_idx; pp->crqb[in_index].sg_addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); pp->crqb[in_index].sg_addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); cw = &pp->crqb[in_index].ata_cmd[0]; /* Sadly, the CRQB cannot accommodate all registers--there are * only 11 bytes...so we must pick and choose required * registers based on the command. So, we drop feature and * hob_feature for [RW] DMA commands, but they are needed for * NCQ. NCQ will drop hob_nsect, which is not needed there * (nsect is used only for the tag; feat/hob_feat hold true nsect). */ switch (tf->command) { case ATA_CMD_READ: case ATA_CMD_READ_EXT: case ATA_CMD_WRITE: case ATA_CMD_WRITE_EXT: case ATA_CMD_WRITE_FUA_EXT: mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); break; case ATA_CMD_FPDMA_READ: case ATA_CMD_FPDMA_WRITE: mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); break; default: /* The only other commands EDMA supports in non-queued and * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none * of which are defined/used by Linux. If we get here, this * driver needs work. * * FIXME: modify libata to give qc_prep a return value and * return error here. */ BUG_ON(tf->command); break; } mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0); mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0); mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0); mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0); mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0); mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ if (!(qc->flags & ATA_QCFLAG_DMAMAP)) return; mv_fill_sg(qc); } /** * mv_qc_prep_iie - Host specific command preparation. * @qc: queued command to prepare * * This routine simply redirects to the general purpose routine * if command is not DMA. Else, it handles prep of the CRQB * (command request block), does some sanity checking, and calls * the SG load routine. * * LOCKING: * Inherited from caller. */ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct mv_port_priv *pp = ap->private_data; struct mv_crqb_iie *crqb; struct ata_taskfile *tf = &qc->tf; unsigned in_index; u32 flags = 0; if ((tf->protocol != ATA_PROT_DMA) && (tf->protocol != ATA_PROT_NCQ)) return; if (tf->command == ATA_CMD_DSM) return; /* use bmdma for this */ /* Fill in Gen IIE command request block */ if (!(tf->flags & ATA_TFLAG_WRITE)) flags |= CRQB_FLAG_READ; WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); flags |= qc->tag << CRQB_TAG_SHIFT; flags |= qc->tag << CRQB_HOSTQ_SHIFT; flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; /* get current queue index from software */ in_index = pp->req_idx; crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); crqb->flags = cpu_to_le32(flags); crqb->ata_cmd[0] = cpu_to_le32( (tf->command << 16) | (tf->feature << 24) ); crqb->ata_cmd[1] = cpu_to_le32( (tf->lbal << 0) | (tf->lbam << 8) | (tf->lbah << 16) | (tf->device << 24) ); crqb->ata_cmd[2] = cpu_to_le32( (tf->hob_lbal << 0) | (tf->hob_lbam << 8) | (tf->hob_lbah << 16) | (tf->hob_feature << 24) ); crqb->ata_cmd[3] = cpu_to_le32( (tf->nsect << 0) | (tf->hob_nsect << 8) ); if (!(qc->flags & ATA_QCFLAG_DMAMAP)) return; mv_fill_sg(qc); } /** * mv_sff_check_status - fetch device status, if valid * @ap: ATA port to fetch status from * * When using command issue via mv_qc_issue_fis(), * the initial ATA_BUSY state does not show up in the * ATA status (shadow) register. This can confuse libata! * * So we have a hook here to fake ATA_BUSY for that situation, * until the first time a BUSY, DRQ, or ERR bit is seen. * * The rest of the time, it simply returns the ATA status register. */ static u8 mv_sff_check_status(struct ata_port *ap) { u8 stat = ioread8(ap->ioaddr.status_addr); struct mv_port_priv *pp = ap->private_data; if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) { if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR)) pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; else stat = ATA_BUSY; } return stat; } /** * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register * @fis: fis to be sent * @nwords: number of 32-bit words in the fis */ static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords) { void __iomem *port_mmio = mv_ap_base(ap); u32 ifctl, old_ifctl, ifstat; int i, timeout = 200, final_word = nwords - 1; /* Initiate FIS transmission mode */ old_ifctl = readl(port_mmio + SATA_IFCTL); ifctl = 0x100 | (old_ifctl & 0xf); writelfl(ifctl, port_mmio + SATA_IFCTL); /* Send all words of the FIS except for the final word */ for (i = 0; i < final_word; ++i) writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS); /* Flag end-of-transmission, and then send the final word */ writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL); writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS); /* * Wait for FIS transmission to complete. * This typically takes just a single iteration. */ do { ifstat = readl(port_mmio + SATA_IFSTAT); } while (!(ifstat & 0x1000) && --timeout); /* Restore original port configuration */ writelfl(old_ifctl, port_mmio + SATA_IFCTL); /* See if it worked */ if ((ifstat & 0x3000) != 0x1000) { ata_port_warn(ap, "%s transmission error, ifstat=%08x\n", __func__, ifstat); return AC_ERR_OTHER; } return 0; } /** * mv_qc_issue_fis - Issue a command directly as a FIS * @qc: queued command to start * * Note that the ATA shadow registers are not updated * after command issue, so the device will appear "READY" * if polled, even while it is BUSY processing the command. * * So we use a status hook to fake ATA_BUSY until the drive changes state. * * Note: we don't get updated shadow regs on *completion* * of non-data commands. So avoid sending them via this function, * as they will appear to have completed immediately. * * GEN_IIE has special registers that we could get the result tf from, * but earlier chipsets do not. For now, we ignore those registers. */ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct mv_port_priv *pp = ap->private_data; struct ata_link *link = qc->dev->link; u32 fis[5]; int err = 0; ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis); err = mv_send_fis(ap, fis, ARRAY_SIZE(fis)); if (err) return err; switch (qc->tf.protocol) { case ATAPI_PROT_PIO: pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; /* fall through */ case ATAPI_PROT_NODATA: ap->hsm_task_state = HSM_ST_FIRST; break; case ATA_PROT_PIO: pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; if (qc->tf.flags & ATA_TFLAG_WRITE) ap->hsm_task_state = HSM_ST_FIRST; else ap->hsm_task_state = HSM_ST; break; default: ap->hsm_task_state = HSM_ST_LAST; break; } if (qc->tf.flags & ATA_TFLAG_POLLING) ata_sff_queue_pio_task(link, 0); return 0; } /** * mv_qc_issue - Initiate a command to the host * @qc: queued command to start * * This routine simply redirects to the general purpose routine * if command is not DMA. Else, it sanity checks our local * caches of the request producer/consumer indices then enables * DMA and bumps the request producer index. * * LOCKING: * Inherited from caller. */ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) { static int limit_warnings = 10; struct ata_port *ap = qc->ap; void __iomem *port_mmio = mv_ap_base(ap); struct mv_port_priv *pp = ap->private_data; u32 in_index; unsigned int port_irqs; pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */ switch (qc->tf.protocol) { case ATA_PROT_DMA: if (qc->tf.command == ATA_CMD_DSM) { if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */ return AC_ERR_OTHER; break; /* use bmdma for this */ } /* fall thru */ case ATA_PROT_NCQ: mv_start_edma(ap, port_mmio, pp, qc->tf.protocol); pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; /* Write the request in pointer to kick the EDMA to life */ writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, port_mmio + EDMA_REQ_Q_IN_PTR); return 0; case ATA_PROT_PIO: /* * Errata SATA#16, SATA#24: warn if multiple DRQs expected. * * Someday, we might implement special polling workarounds * for these, but it all seems rather unnecessary since we * normally use only DMA for commands which transfer more * than a single block of data. * * Much of the time, this could just work regardless. * So for now, just log the incident, and allow the attempt. */ if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) { --limit_warnings; ata_link_warn(qc->dev->link, DRV_NAME ": attempting PIO w/multiple DRQ: " "this may fail due to h/w errata\n"); } /* drop through */ case ATA_PROT_NODATA: case ATAPI_PROT_PIO: case ATAPI_PROT_NODATA: if (ap->flags & ATA_FLAG_PIO_POLLING) qc->tf.flags |= ATA_TFLAG_POLLING; break; } if (qc->tf.flags & ATA_TFLAG_POLLING) port_irqs = ERR_IRQ; /* mask device interrupt when polling */ else port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */ /* * We're about to send a non-EDMA capable command to the * port. Turn off EDMA so there won't be problems accessing * shadow block, etc registers. */ mv_stop_edma(ap); mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs); mv_pmp_select(ap, qc->dev->link->pmp); if (qc->tf.command == ATA_CMD_READ_LOG_EXT) { struct mv_host_priv *hpriv = ap->host->private_data; /* * Workaround for 88SX60x1 FEr SATA#25 (part 2). * * After any NCQ error, the READ_LOG_EXT command * from libata-eh *must* use mv_qc_issue_fis(). * Otherwise it might fail, due to chip errata. * * Rather than special-case it, we'll just *always* * use this method here for READ_LOG_EXT, making for * easier testing. */ if (IS_GEN_II(hpriv)) return mv_qc_issue_fis(qc); } return ata_bmdma_qc_issue(qc); } static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) { struct mv_port_priv *pp = ap->private_data; struct ata_queued_cmd *qc; if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) return NULL; qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) return qc; return NULL; } static void mv_pmp_error_handler(struct ata_port *ap) { unsigned int pmp, pmp_map; struct mv_port_priv *pp = ap->private_data; if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) { /* * Perform NCQ error analysis on failed PMPs * before we freeze the port entirely. * * The failed PMPs are marked earlier by mv_pmp_eh_prep(). */ pmp_map = pp->delayed_eh_pmp_map; pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH; for (pmp = 0; pmp_map != 0; pmp++) { unsigned int this_pmp = (1 << pmp); if (pmp_map & this_pmp) { struct ata_link *link = &ap->pmp_link[pmp]; pmp_map &= ~this_pmp; ata_eh_analyze_ncq_error(link); } } ata_port_freeze(ap); } sata_pmp_error_handler(ap); } static unsigned int mv_get_err_pmp_map(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); return readl(port_mmio + SATA_TESTCTL) >> 16; } static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map) { struct ata_eh_info *ehi; unsigned int pmp; /* * Initialize EH info for PMPs which saw device errors */ ehi = &ap->link.eh_info; for (pmp = 0; pmp_map != 0; pmp++) { unsigned int this_pmp = (1 << pmp); if (pmp_map & this_pmp) { struct ata_link *link = &ap->pmp_link[pmp]; pmp_map &= ~this_pmp; ehi = &link->eh_info; ata_ehi_clear_desc(ehi); ata_ehi_push_desc(ehi, "dev err"); ehi->err_mask |= AC_ERR_DEV; ehi->action |= ATA_EH_RESET; ata_link_abort(link); } } } static int mv_req_q_empty(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); u32 in_ptr, out_ptr; in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR) >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR) >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; return (in_ptr == out_ptr); /* 1 == queue_is_empty */ } static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) { struct mv_port_priv *pp = ap->private_data; int failed_links; unsigned int old_map, new_map; /* * Device error during FBS+NCQ operation: * * Set a port flag to prevent further I/O being enqueued. * Leave the EDMA running to drain outstanding commands from this port. * Perform the post-mortem/EH only when all responses are complete. * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2). */ if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) { pp->pp_flags |= MV_PP_FLAG_DELAYED_EH; pp->delayed_eh_pmp_map = 0; } old_map = pp->delayed_eh_pmp_map; new_map = old_map | mv_get_err_pmp_map(ap); if (old_map != new_map) { pp->delayed_eh_pmp_map = new_map; mv_pmp_eh_prep(ap, new_map & ~old_map); } failed_links = hweight16(new_map); ata_port_info(ap, "%s: pmp_map=%04x qc_map=%04x failed_links=%d nr_active_links=%d\n", __func__, pp->delayed_eh_pmp_map, ap->qc_active, failed_links, ap->nr_active_links); if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) { mv_process_crpb_entries(ap, pp); mv_stop_edma(ap); mv_eh_freeze(ap); ata_port_info(ap, "%s: done\n", __func__); return 1; /* handled */ } ata_port_info(ap, "%s: waiting\n", __func__); return 1; /* handled */ } static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap) { /* * Possible future enhancement: * * FBS+non-NCQ operation is not yet implemented. * See related notes in mv_edma_cfg(). * * Device error during FBS+non-NCQ operation: * * We need to snapshot the shadow registers for each failed command. * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3). */ return 0; /* not handled */ } static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause) { struct mv_port_priv *pp = ap->private_data; if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) return 0; /* EDMA was not active: not handled */ if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN)) return 0; /* FBS was not active: not handled */ if (!(edma_err_cause & EDMA_ERR_DEV)) return 0; /* non DEV error: not handled */ edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT; if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS)) return 0; /* other problems: not handled */ if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { /* * EDMA should NOT have self-disabled for this case. * If it did, then something is wrong elsewhere, * and we cannot handle it here. */ if (edma_err_cause & EDMA_ERR_SELF_DIS) { ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n", __func__, edma_err_cause, pp->pp_flags); return 0; /* not handled */ } return mv_handle_fbs_ncq_dev_err(ap); } else { /* * EDMA should have self-disabled for this case. * If it did not, then something is wrong elsewhere, * and we cannot handle it here. */ if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) { ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n", __func__, edma_err_cause, pp->pp_flags); return 0; /* not handled */ } return mv_handle_fbs_non_ncq_dev_err(ap); } return 0; /* not handled */ } static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) { struct ata_eh_info *ehi = &ap->link.eh_info; char *when = "idle"; ata_ehi_clear_desc(ehi); if (edma_was_enabled) { when = "EDMA enabled"; } else { struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) when = "polling"; } ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when); ehi->err_mask |= AC_ERR_OTHER; ehi->action |= ATA_EH_RESET; ata_port_freeze(ap); } /** * mv_err_intr - Handle error interrupts on the port * @ap: ATA channel to manipulate * * Most cases require a full reset of the chip's state machine, * which also performs a COMRESET. * Also, if the port disabled DMA, update our cached copy to match. * * LOCKING: * Inherited from caller. */ static void mv_err_intr(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); u32 edma_err_cause, eh_freeze_mask, serr = 0; u32 fis_cause = 0; struct mv_port_priv *pp = ap->private_data; struct mv_host_priv *hpriv = ap->host->private_data; unsigned int action = 0, err_mask = 0; struct ata_eh_info *ehi = &ap->link.eh_info; struct ata_queued_cmd *qc; int abort = 0; /* * Read and clear the SError and err_cause bits. * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear * the FIS_IRQ_CAUSE register before clearing edma_err_cause. */ sata_scr_read(&ap->link, SCR_ERROR, &serr); sata_scr_write_flush(&ap->link, SCR_ERROR, serr); edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE); if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { fis_cause = readl(port_mmio + FIS_IRQ_CAUSE); writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE); } writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE); if (edma_err_cause & EDMA_ERR_DEV) { /* * Device errors during FIS-based switching operation * require special handling. */ if (mv_handle_dev_err(ap, edma_err_cause)) return; } qc = mv_get_active_qc(ap); ata_ehi_clear_desc(ehi); ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x", edma_err_cause, pp->pp_flags); if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause); if (fis_cause & FIS_IRQ_CAUSE_AN) { u32 ec = edma_err_cause & ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT); sata_async_notification(ap); if (!ec) return; /* Just an AN; no need for the nukes */ ata_ehi_push_desc(ehi, "SDB notify"); } } /* * All generations share these EDMA error cause bits: */ if (edma_err_cause & EDMA_ERR_DEV) { err_mask |= AC_ERR_DEV; action |= ATA_EH_RESET; ata_ehi_push_desc(ehi, "dev error"); } if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR)) { err_mask |= AC_ERR_ATA_BUS; action |= ATA_EH_RESET; ata_ehi_push_desc(ehi, "parity error"); } if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) { ata_ehi_hotplugged(ehi); ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? "dev disconnect" : "dev connect"); action |= ATA_EH_RESET; } /* * Gen-I has a different SELF_DIS bit, * different FREEZE bits, and no SERR bit: */ if (IS_GEN_I(hpriv)) { eh_freeze_mask = EDMA_EH_FREEZE_5; if (edma_err_cause & EDMA_ERR_SELF_DIS_5) { pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; ata_ehi_push_desc(ehi, "EDMA self-disable"); } } else { eh_freeze_mask = EDMA_EH_FREEZE; if (edma_err_cause & EDMA_ERR_SELF_DIS) { pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; ata_ehi_push_desc(ehi, "EDMA self-disable"); } if (edma_err_cause & EDMA_ERR_SERR) { ata_ehi_push_desc(ehi, "SError=%08x", serr); err_mask |= AC_ERR_ATA_BUS; action |= ATA_EH_RESET; } } if (!err_mask) { err_mask = AC_ERR_OTHER; action |= ATA_EH_RESET; } ehi->serror |= serr; ehi->action |= action; if (qc) qc->err_mask |= err_mask; else ehi->err_mask |= err_mask; if (err_mask == AC_ERR_DEV) { /* * Cannot do ata_port_freeze() here, * because it would kill PIO access, * which is needed for further diagnosis. */ mv_eh_freeze(ap); abort = 1; } else if (edma_err_cause & eh_freeze_mask) { /* * Note to self: ata_port_freeze() calls ata_port_abort() */ ata_port_freeze(ap); } else { abort = 1; } if (abort) { if (qc) ata_link_abort(qc->dev->link); else ata_port_abort(ap); } } static bool mv_process_crpb_response(struct ata_port *ap, struct mv_crpb *response, unsigned int tag, int ncq_enabled) { u8 ata_status; u16 edma_status = le16_to_cpu(response->flags); /* * edma_status from a response queue entry: * LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only). * MSB is saved ATA status from command completion. */ if (!ncq_enabled) { u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV; if (err_cause) { /* * Error will be seen/handled by * mv_err_intr(). So do nothing at all here. */ return false; } } ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; if (!ac_err_mask(ata_status)) return true; /* else: leave it for mv_err_intr() */ return false; } static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp) { void __iomem *port_mmio = mv_ap_base(ap); struct mv_host_priv *hpriv = ap->host->private_data; u32 in_index; bool work_done = false; u32 done_mask = 0; int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN); /* Get the hardware queue position index */ in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR) >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; /* Process new responses from since the last time we looked */ while (in_index != pp->resp_idx) { unsigned int tag; struct mv_crpb *response = &pp->crpb[pp->resp_idx]; pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK; if (IS_GEN_I(hpriv)) { /* 50xx: no NCQ, only one command active at a time */ tag = ap->link.active_tag; } else { /* Gen II/IIE: get command tag from CRPB entry */ tag = le16_to_cpu(response->id) & 0x1f; } if (mv_process_crpb_response(ap, response, tag, ncq_enabled)) done_mask |= 1 << tag; work_done = true; } if (work_done) { ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); /* Update the software queue position index in hardware */ writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT), port_mmio + EDMA_RSP_Q_OUT_PTR); } } static void mv_port_intr(struct ata_port *ap, u32 port_cause) { struct mv_port_priv *pp; int edma_was_enabled; /* * Grab a snapshot of the EDMA_EN flag setting, * so that we have a consistent view for this port, * even if something we call of our routines changes it. */ pp = ap->private_data; edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); /* * Process completed CRPB response(s) before other events. */ if (edma_was_enabled && (port_cause & DONE_IRQ)) { mv_process_crpb_entries(ap, pp); if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) mv_handle_fbs_ncq_dev_err(ap); } /* * Handle chip-reported errors, or continue on to handle PIO. */ if (unlikely(port_cause & ERR_IRQ)) { mv_err_intr(ap); } else if (!edma_was_enabled) { struct ata_queued_cmd *qc = mv_get_active_qc(ap); if (qc) ata_bmdma_port_intr(ap, qc); else mv_unexpected_intr(ap, edma_was_enabled); } } /** * mv_host_intr - Handle all interrupts on the given host controller * @host: host specific structure * @main_irq_cause: Main interrupt cause register for the chip. * * LOCKING: * Inherited from caller. */ static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) { struct mv_host_priv *hpriv = host->private_data; void __iomem *mmio = hpriv->base, *hc_mmio; unsigned int handled = 0, port; /* If asserted, clear the "all ports" IRQ coalescing bit */ if (main_irq_cause & ALL_PORTS_COAL_DONE) writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE); for (port = 0; port < hpriv->n_ports; port++) { struct ata_port *ap = host->ports[port]; unsigned int p, shift, hardport, port_cause; MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); /* * Each hc within the host has its own hc_irq_cause register, * where the interrupting ports bits get ack'd. */ if (hardport == 0) { /* first port on this hc ? */ u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND; u32 port_mask, ack_irqs; /* * Skip this entire hc if nothing pending for any ports */ if (!hc_cause) { port += MV_PORTS_PER_HC - 1; continue; } /* * We don't need/want to read the hc_irq_cause register, * because doing so hurts performance, and * main_irq_cause already gives us everything we need. * * But we do have to *write* to the hc_irq_cause to ack * the ports that we are handling this time through. * * This requires that we create a bitmap for those * ports which interrupted us, and use that bitmap * to ack (only) those ports via hc_irq_cause. */ ack_irqs = 0; if (hc_cause & PORTS_0_3_COAL_DONE) ack_irqs = HC_COAL_IRQ; for (p = 0; p < MV_PORTS_PER_HC; ++p) { if ((port + p) >= hpriv->n_ports) break; port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2); if (hc_cause & port_mask) ack_irqs |= (DMA_IRQ | DEV_IRQ) << p; } hc_mmio = mv_hc_base_from_port(mmio, port); writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE); handled = 1; } /* * Handle interrupts signalled for this port: */ port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ); if (port_cause) mv_port_intr(ap, port_cause); } return handled; } static int mv_pci_error(struct ata_host *host, void __iomem *mmio) { struct mv_host_priv *hpriv = host->private_data; struct ata_port *ap; struct ata_queued_cmd *qc; struct ata_eh_info *ehi; unsigned int i, err_mask, printed = 0; u32 err_cause; err_cause = readl(mmio + hpriv->irq_cause_offset); dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause); DPRINTK("All regs @ PCI error\n"); mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); writelfl(0, mmio + hpriv->irq_cause_offset); for (i = 0; i < host->n_ports; i++) { ap = host->ports[i]; if (!ata_link_offline(&ap->link)) { ehi = &ap->link.eh_info; ata_ehi_clear_desc(ehi); if (!printed++) ata_ehi_push_desc(ehi, "PCI err cause 0x%08x", err_cause); err_mask = AC_ERR_HOST_BUS; ehi->action = ATA_EH_RESET; qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc) qc->err_mask |= err_mask; else ehi->err_mask |= err_mask; ata_port_freeze(ap); } } return 1; /* handled */ } /** * mv_interrupt - Main interrupt event handler * @irq: unused * @dev_instance: private data; in this case the host structure * * Read the read only register to determine if any host * controllers have pending interrupts. If so, call lower level * routine to handle. Also check for PCI errors which are only * reported here. * * LOCKING: * This routine holds the host lock while processing pending * interrupts. */ static irqreturn_t mv_interrupt(int irq, void *dev_instance) { struct ata_host *host = dev_instance; struct mv_host_priv *hpriv = host->private_data; unsigned int handled = 0; int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI; u32 main_irq_cause, pending_irqs; spin_lock(&host->lock); /* for MSI: block new interrupts while in here */ if (using_msi) mv_write_main_irq_mask(0, hpriv); main_irq_cause = readl(hpriv->main_irq_cause_addr); pending_irqs = main_irq_cause & hpriv->main_irq_mask; /* * Deal with cases where we either have nothing pending, or have read * a bogus register value which can indicate HW removal or PCI fault. */ if (pending_irqs && main_irq_cause != 0xffffffffU) { if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv))) handled = mv_pci_error(host, hpriv->base); else handled = mv_host_intr(host, pending_irqs); } /* for MSI: unmask; interrupt cause bits will retrigger now */ if (using_msi) mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv); spin_unlock(&host->lock); return IRQ_RETVAL(handled); } static unsigned int mv5_scr_offset(unsigned int sc_reg_in) { unsigned int ofs; switch (sc_reg_in) { case SCR_STATUS: case SCR_ERROR: case SCR_CONTROL: ofs = sc_reg_in * sizeof(u32); break; default: ofs = 0xffffffffU; break; } return ofs; } static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val) { struct mv_host_priv *hpriv = link->ap->host->private_data; void __iomem *mmio = hpriv->base; void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no); unsigned int ofs = mv5_scr_offset(sc_reg_in); if (ofs != 0xffffffffU) { *val = readl(addr + ofs); return 0; } else return -EINVAL; } static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) { struct mv_host_priv *hpriv = link->ap->host->private_data; void __iomem *mmio = hpriv->base; void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no); unsigned int ofs = mv5_scr_offset(sc_reg_in); if (ofs != 0xffffffffU) { writelfl(val, addr + ofs); return 0; } else return -EINVAL; } static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio) { struct pci_dev *pdev = to_pci_dev(host->dev); int early_5080; early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0); if (!early_5080) { u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); tmp |= (1 << 0); writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); } mv_reset_pci_bus(host, mmio); } static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) { writel(0x0fcfffff, mmio + FLASH_CTL); } static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, void __iomem *mmio) { void __iomem *phy_mmio = mv5_phy_base(mmio, idx); u32 tmp; tmp = readl(phy_mmio + MV5_PHY_MODE); hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */ hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */ } static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) { u32 tmp; writel(0, mmio + GPIO_PORT_CTL); /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */ tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); tmp |= ~(1 << 0); writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); } static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port) { void __iomem *phy_mmio = mv5_phy_base(mmio, port); const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5); u32 tmp; int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); if (fix_apm_sq) { tmp = readl(phy_mmio + MV5_LTMODE); tmp |= (1 << 19); writel(tmp, phy_mmio + MV5_LTMODE); tmp = readl(phy_mmio + MV5_PHY_CTL); tmp &= ~0x3; tmp |= 0x1; writel(tmp, phy_mmio + MV5_PHY_CTL); } tmp = readl(phy_mmio + MV5_PHY_MODE); tmp &= ~mask; tmp |= hpriv->signal[port].pre; tmp |= hpriv->signal[port].amps; writel(tmp, phy_mmio + MV5_PHY_MODE); } #undef ZERO #define ZERO(reg) writel(0, port_mmio + (reg)) static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port) { void __iomem *port_mmio = mv_port_base(mmio, port); mv_reset_channel(hpriv, mmio, port); ZERO(0x028); /* command */ writel(0x11f, port_mmio + EDMA_CFG); ZERO(0x004); /* timer */ ZERO(0x008); /* irq err cause */ ZERO(0x00c); /* irq err mask */ ZERO(0x010); /* rq bah */ ZERO(0x014); /* rq inp */ ZERO(0x018); /* rq outp */ ZERO(0x01c); /* respq bah */ ZERO(0x024); /* respq outp */ ZERO(0x020); /* respq inp */ ZERO(0x02c); /* test control */ writel(0xbc, port_mmio + EDMA_IORDY_TMOUT); } #undef ZERO #define ZERO(reg) writel(0, hc_mmio + (reg)) static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int hc) { void __iomem *hc_mmio = mv_hc_base(mmio, hc); u32 tmp; ZERO(0x00c); ZERO(0x010); ZERO(0x014); ZERO(0x018); tmp = readl(hc_mmio + 0x20); tmp &= 0x1c1c1c1c; tmp |= 0x03030303; writel(tmp, hc_mmio + 0x20); } #undef ZERO static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc) { unsigned int hc, port; for (hc = 0; hc < n_hc; hc++) { for (port = 0; port < MV_PORTS_PER_HC; port++) mv5_reset_hc_port(hpriv, mmio, (hc * MV_PORTS_PER_HC) + port); mv5_reset_one_hc(hpriv, mmio, hc); } return 0; } #undef ZERO #define ZERO(reg) writel(0, mmio + (reg)) static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio) { struct mv_host_priv *hpriv = host->private_data; u32 tmp; tmp = readl(mmio + MV_PCI_MODE); tmp &= 0xff00ffff; writel(tmp, mmio + MV_PCI_MODE); ZERO(MV_PCI_DISC_TIMER); ZERO(MV_PCI_MSI_TRIGGER); writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT); ZERO(MV_PCI_SERR_MASK); ZERO(hpriv->irq_cause_offset); ZERO(hpriv->irq_mask_offset); ZERO(MV_PCI_ERR_LOW_ADDRESS); ZERO(MV_PCI_ERR_HIGH_ADDRESS); ZERO(MV_PCI_ERR_ATTRIBUTE); ZERO(MV_PCI_ERR_COMMAND); } #undef ZERO static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) { u32 tmp; mv5_reset_flash(hpriv, mmio); tmp = readl(mmio + GPIO_PORT_CTL); tmp &= 0x3; tmp |= (1 << 5) | (1 << 6); writel(tmp, mmio + GPIO_PORT_CTL); } /** * mv6_reset_hc - Perform the 6xxx global soft reset * @mmio: base address of the HBA * * This routine only applies to 6xxx parts. * * LOCKING: * Inherited from caller. */ static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc) { void __iomem *reg = mmio + PCI_MAIN_CMD_STS; int i, rc = 0; u32 t; /* Following procedure defined in PCI "main command and status * register" table. */ t = readl(reg); writel(t | STOP_PCI_MASTER, reg); for (i = 0; i < 1000; i++) { udelay(1); t = readl(reg); if (PCI_MASTER_EMPTY & t) break; } if (!(PCI_MASTER_EMPTY & t)) { printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); rc = 1; goto done; } /* set reset */ i = 5; do { writel(t | GLOB_SFT_RST, reg); t = readl(reg); udelay(1); } while (!(GLOB_SFT_RST & t) && (i-- > 0)); if (!(GLOB_SFT_RST & t)) { printk(KERN_ERR DRV_NAME ": can't set global reset\n"); rc = 1; goto done; } /* clear reset and *reenable the PCI master* (not mentioned in spec) */ i = 5; do { writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg); t = readl(reg); udelay(1); } while ((GLOB_SFT_RST & t) && (i-- > 0)); if (GLOB_SFT_RST & t) { printk(KERN_ERR DRV_NAME ": can't clear global reset\n"); rc = 1; } done: return rc; } static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, void __iomem *mmio) { void __iomem *port_mmio; u32 tmp; tmp = readl(mmio + RESET_CFG); if ((tmp & (1 << 0)) == 0) { hpriv->signal[idx].amps = 0x7 << 8; hpriv->signal[idx].pre = 0x1 << 5; return; } port_mmio = mv_port_base(mmio, idx); tmp = readl(port_mmio + PHY_MODE2); hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ } static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) { writel(0x00000060, mmio + GPIO_PORT_CTL); } static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port) { void __iomem *port_mmio = mv_port_base(mmio, port); u32 hp_flags = hpriv->hp_flags; int fix_phy_mode2 = hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); int fix_phy_mode4 = hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); u32 m2, m3; if (fix_phy_mode2) { m2 = readl(port_mmio + PHY_MODE2); m2 &= ~(1 << 16); m2 |= (1 << 31); writel(m2, port_mmio + PHY_MODE2); udelay(200); m2 = readl(port_mmio + PHY_MODE2); m2 &= ~((1 << 16) | (1 << 31)); writel(m2, port_mmio + PHY_MODE2); udelay(200); } /* * Gen-II/IIe PHY_MODE3 errata RM#2: * Achieves better receiver noise performance than the h/w default: */ m3 = readl(port_mmio + PHY_MODE3); m3 = (m3 & 0x1f) | (0x5555601 << 5); /* Guideline 88F5182 (GL# SATA-S11) */ if (IS_SOC(hpriv)) m3 &= ~0x1c; if (fix_phy_mode4) { u32 m4 = readl(port_mmio + PHY_MODE4); /* * Enforce reserved-bit restrictions on GenIIe devices only. * For earlier chipsets, force only the internal config field * (workaround for errata FEr SATA#10 part 1). */ if (IS_GEN_IIE(hpriv)) m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES; else m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE; writel(m4, port_mmio + PHY_MODE4); } /* * Workaround for 60x1-B2 errata SATA#13: * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3, * so we must always rewrite PHY_MODE3 after PHY_MODE4. * Or ensure we use writelfl() when writing PHY_MODE4. */ writel(m3, port_mmio + PHY_MODE3); /* Revert values of pre-emphasis and signal amps to the saved ones */ m2 = readl(port_mmio + PHY_MODE2); m2 &= ~MV_M2_PREAMP_MASK; m2 |= hpriv->signal[port].amps; m2 |= hpriv->signal[port].pre; m2 &= ~(1 << 16); /* according to mvSata 3.6.1, some IIE values are fixed */ if (IS_GEN_IIE(hpriv)) { m2 &= ~0xC30FF01F; m2 |= 0x0000900F; } writel(m2, port_mmio + PHY_MODE2); } /* TODO: use the generic LED interface to configure the SATA Presence */ /* & Acitivy LEDs on the board */ static void mv_soc_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) { return; } static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, void __iomem *mmio) { void __iomem *port_mmio; u32 tmp; port_mmio = mv_port_base(mmio, idx); tmp = readl(port_mmio + PHY_MODE2); hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ } #undef ZERO #define ZERO(reg) writel(0, port_mmio + (reg)) static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port) { void __iomem *port_mmio = mv_port_base(mmio, port); mv_reset_channel(hpriv, mmio, port); ZERO(0x028); /* command */ writel(0x101f, port_mmio + EDMA_CFG); ZERO(0x004); /* timer */ ZERO(0x008); /* irq err cause */ ZERO(0x00c); /* irq err mask */ ZERO(0x010); /* rq bah */ ZERO(0x014); /* rq inp */ ZERO(0x018); /* rq outp */ ZERO(0x01c); /* respq bah */ ZERO(0x024); /* respq outp */ ZERO(0x020); /* respq inp */ ZERO(0x02c); /* test control */ writel(0x800, port_mmio + EDMA_IORDY_TMOUT); } #undef ZERO #define ZERO(reg) writel(0, hc_mmio + (reg)) static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio) { void __iomem *hc_mmio = mv_hc_base(mmio, 0); ZERO(0x00c); ZERO(0x010); ZERO(0x014); } #undef ZERO static int mv_soc_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc) { unsigned int port; for (port = 0; port < hpriv->n_ports; port++) mv_soc_reset_hc_port(hpriv, mmio, port); mv_soc_reset_one_hc(hpriv, mmio); return 0; } static void mv_soc_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) { return; } static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio) { return; } static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port) { void __iomem *port_mmio = mv_port_base(mmio, port); u32 reg; reg = readl(port_mmio + PHY_MODE3); reg &= ~(0x3 << 27); /* SELMUPF (bits 28:27) to 1 */ reg |= (0x1 << 27); reg &= ~(0x3 << 29); /* SELMUPI (bits 30:29) to 1 */ reg |= (0x1 << 29); writel(reg, port_mmio + PHY_MODE3); reg = readl(port_mmio + PHY_MODE4); reg &= ~0x1; /* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */ reg |= (0x1 << 16); writel(reg, port_mmio + PHY_MODE4); reg = readl(port_mmio + PHY_MODE9_GEN2); reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */ reg |= 0x8; reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */ writel(reg, port_mmio + PHY_MODE9_GEN2); reg = readl(port_mmio + PHY_MODE9_GEN1); reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */ reg |= 0x8; reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */ writel(reg, port_mmio + PHY_MODE9_GEN1); } /** * soc_is_65 - check if the soc is 65 nano device * * Detect the type of the SoC, this is done by reading the PHYCFG_OFS * register, this register should contain non-zero value and it exists only * in the 65 nano devices, when reading it from older devices we get 0. */ static bool soc_is_65n(struct mv_host_priv *hpriv) { void __iomem *port0_mmio = mv_port_base(hpriv->base, 0); if (readl(port0_mmio + PHYCFG_OFS)) return true; return false; } static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i) { u32 ifcfg = readl(port_mmio + SATA_IFCFG); ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */ if (want_gen2i) ifcfg |= (1 << 7); /* enable gen2i speed */ writelfl(ifcfg, port_mmio + SATA_IFCFG); } static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port_no) { void __iomem *port_mmio = mv_port_base(mmio, port_no); /* * The datasheet warns against setting EDMA_RESET when EDMA is active * (but doesn't say what the problem might be). So we first try * to disable the EDMA engine before doing the EDMA_RESET operation. */ mv_stop_edma_engine(port_mmio); writelfl(EDMA_RESET, port_mmio + EDMA_CMD); if (!IS_GEN_I(hpriv)) { /* Enable 3.0gb/s link speed: this survives EDMA_RESET */ mv_setup_ifcfg(port_mmio, 1); } /* * Strobing EDMA_RESET here causes a hard reset of the SATA transport, * link, and physical layers. It resets all SATA interface registers * (except for SATA_IFCFG), and issues a COMRESET to the dev. */ writelfl(EDMA_RESET, port_mmio + EDMA_CMD); udelay(25); /* allow reset propagation */ writelfl(0, port_mmio + EDMA_CMD); hpriv->ops->phy_errata(hpriv, mmio, port_no); if (IS_GEN_I(hpriv)) mdelay(1); } static void mv_pmp_select(struct ata_port *ap, int pmp) { if (sata_pmp_supported(ap)) { void __iomem *port_mmio = mv_ap_base(ap); u32 reg = readl(port_mmio + SATA_IFCTL); int old = reg & 0xf; if (old != pmp) { reg = (reg & ~0xf) | pmp; writelfl(reg, port_mmio + SATA_IFCTL); } } } static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline) { mv_pmp_select(link->ap, sata_srst_pmp(link)); return sata_std_hardreset(link, class, deadline); } static int mv_softreset(struct ata_link *link, unsigned int *class, unsigned long deadline) { mv_pmp_select(link->ap, sata_srst_pmp(link)); return ata_sff_softreset(link, class, deadline); } static int mv_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline) { struct ata_port *ap = link->ap; struct mv_host_priv *hpriv = ap->host->private_data; struct mv_port_priv *pp = ap->private_data; void __iomem *mmio = hpriv->base; int rc, attempts = 0, extra = 0; u32 sstatus; bool online; mv_reset_channel(hpriv, mmio, ap->port_no); pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; pp->pp_flags &= ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY); /* Workaround for errata FEr SATA#10 (part 2) */ do { const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); rc = sata_link_hardreset(link, timing, deadline + extra, &online, NULL); rc = online ? -EAGAIN : rc; if (rc) return rc; sata_scr_read(link, SCR_STATUS, &sstatus); if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) { /* Force 1.5gb/s link speed and try again */ mv_setup_ifcfg(mv_ap_base(ap), 0); if (time_after(jiffies + HZ, deadline)) extra = HZ; /* only extend it once, max */ } } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123); mv_save_cached_regs(ap); mv_edma_cfg(ap, 0, 0); return rc; } static void mv_eh_freeze(struct ata_port *ap) { mv_stop_edma(ap); mv_enable_port_irqs(ap, 0); } static void mv_eh_thaw(struct ata_port *ap) { struct mv_host_priv *hpriv = ap->host->private_data; unsigned int port = ap->port_no; unsigned int hardport = mv_hardport_from_port(port); void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port); void __iomem *port_mmio = mv_ap_base(ap); u32 hc_irq_cause; /* clear EDMA errors on this port */ writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE); /* clear pending irq events */ hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE); mv_enable_port_irqs(ap, ERR_IRQ); } /** * mv_port_init - Perform some early initialization on a single port. * @port: libata data structure storing shadow register addresses * @port_mmio: base address of the port * * Initialize shadow register mmio addresses, clear outstanding * interrupts on the port, and unmask interrupts for the future * start of the port. * * LOCKING: * Inherited from caller. */ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) { void __iomem *serr, *shd_base = port_mmio + SHD_BLK; /* PIO related setup */ port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA); port->error_addr = port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR); port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT); port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL); port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM); port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH); port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE); port->status_addr = port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS); /* special case: control/altstatus doesn't have ATA_REG_ address */ port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST; /* Clear any currently outstanding port interrupt conditions */ serr = port_mmio + mv_scr_offset(SCR_ERROR); writelfl(readl(serr), serr); writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE); /* unmask all non-transient EDMA error interrupts */ writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK); VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", readl(port_mmio + EDMA_CFG), readl(port_mmio + EDMA_ERR_IRQ_CAUSE), readl(port_mmio + EDMA_ERR_IRQ_MASK)); } static unsigned int mv_in_pcix_mode(struct ata_host *host) { struct mv_host_priv *hpriv = host->private_data; void __iomem *mmio = hpriv->base; u32 reg; if (IS_SOC(hpriv) || !IS_PCIE(hpriv)) return 0; /* not PCI-X capable */ reg = readl(mmio + MV_PCI_MODE); if ((reg & MV_PCI_MODE_MASK) == 0) return 0; /* conventional PCI mode */ return 1; /* chip is in PCI-X mode */ } static int mv_pci_cut_through_okay(struct ata_host *host) { struct mv_host_priv *hpriv = host->private_data; void __iomem *mmio = hpriv->base; u32 reg; if (!mv_in_pcix_mode(host)) { reg = readl(mmio + MV_PCI_COMMAND); if (reg & MV_PCI_COMMAND_MRDTRIG) return 0; /* not okay */ } return 1; /* okay */ } static void mv_60x1b2_errata_pci7(struct ata_host *host) { struct mv_host_priv *hpriv = host->private_data; void __iomem *mmio = hpriv->base; /* workaround for 60x1-B2 errata PCI#7 */ if (mv_in_pcix_mode(host)) { u32 reg = readl(mmio + MV_PCI_COMMAND); writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND); } } static int mv_chip_id(struct ata_host *host, unsigned int board_idx) { struct pci_dev *pdev = to_pci_dev(host->dev); struct mv_host_priv *hpriv = host->private_data; u32 hp_flags = hpriv->hp_flags; switch (board_idx) { case chip_5080: hpriv->ops = &mv5xxx_ops; hp_flags |= MV_HP_GEN_I; switch (pdev->revision) { case 0x1: hp_flags |= MV_HP_ERRATA_50XXB0; break; case 0x3: hp_flags |= MV_HP_ERRATA_50XXB2; break; default: dev_warn(&pdev->dev, "Applying 50XXB2 workarounds to unknown rev\n"); hp_flags |= MV_HP_ERRATA_50XXB2; break; } break; case chip_504x: case chip_508x: hpriv->ops = &mv5xxx_ops; hp_flags |= MV_HP_GEN_I; switch (pdev->revision) { case 0x0: hp_flags |= MV_HP_ERRATA_50XXB0; break; case 0x3: hp_flags |= MV_HP_ERRATA_50XXB2; break; default: dev_warn(&pdev->dev, "Applying B2 workarounds to unknown rev\n"); hp_flags |= MV_HP_ERRATA_50XXB2; break; } break; case chip_604x: case chip_608x: hpriv->ops = &mv6xxx_ops; hp_flags |= MV_HP_GEN_II; switch (pdev->revision) { case 0x7: mv_60x1b2_errata_pci7(host); hp_flags |= MV_HP_ERRATA_60X1B2; break; case 0x9: hp_flags |= MV_HP_ERRATA_60X1C0; break; default: dev_warn(&pdev->dev, "Applying B2 workarounds to unknown rev\n"); hp_flags |= MV_HP_ERRATA_60X1B2; break; } break; case chip_7042: hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH; if (pdev->vendor == PCI_VENDOR_ID_TTI && (pdev->device == 0x2300 || pdev->device == 0x2310)) { /* * Highpoint RocketRAID PCIe 23xx series cards: * * Unconfigured drives are treated as "Legacy" * by the BIOS, and it overwrites sector 8 with * a "Lgcy" metadata block prior to Linux boot. * * Configured drives (RAID or JBOD) leave sector 8 * alone, but instead overwrite a high numbered * sector for the RAID metadata. This sector can * be determined exactly, by truncating the physical * drive capacity to a nice even GB value. * * RAID metadata is at: (dev->n_sectors & ~0xfffff) * * Warn the user, lest they think we're just buggy. */ printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID" " BIOS CORRUPTS DATA on all attached drives," " regardless of if/how they are configured." " BEWARE!\n"); printk(KERN_WARNING DRV_NAME ": For data safety, do not" " use sectors 8-9 on \"Legacy\" drives," " and avoid the final two gigabytes on" " all RocketRAID BIOS initialized drives.\n"); } /* drop through */ case chip_6042: hpriv->ops = &mv6xxx_ops; hp_flags |= MV_HP_GEN_IIE; if (board_idx == chip_6042 && mv_pci_cut_through_okay(host)) hp_flags |= MV_HP_CUT_THROUGH; switch (pdev->revision) { case 0x2: /* Rev.B0: the first/only public release */ hp_flags |= MV_HP_ERRATA_60X1C0; break; default: dev_warn(&pdev->dev, "Applying 60X1C0 workarounds to unknown rev\n"); hp_flags |= MV_HP_ERRATA_60X1C0; break; } break; case chip_soc: if (soc_is_65n(hpriv)) hpriv->ops = &mv_soc_65n_ops; else hpriv->ops = &mv_soc_ops; hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE | MV_HP_ERRATA_60X1C0; break; default: dev_err(host->dev, "BUG: invalid board index %u\n", board_idx); return 1; } hpriv->hp_flags = hp_flags; if (hp_flags & MV_HP_PCIE) { hpriv->irq_cause_offset = PCIE_IRQ_CAUSE; hpriv->irq_mask_offset = PCIE_IRQ_MASK; hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS; } else { hpriv->irq_cause_offset = PCI_IRQ_CAUSE; hpriv->irq_mask_offset = PCI_IRQ_MASK; hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS; } return 0; } /** * mv_init_host - Perform some early initialization of the host. * @host: ATA host to initialize * * If possible, do an early global reset of the host. Then do * our port init and clear/unmask all/relevant host interrupts. * * LOCKING: * Inherited from caller. */ static int mv_init_host(struct ata_host *host) { int rc = 0, n_hc, port, hc; struct mv_host_priv *hpriv = host->private_data; void __iomem *mmio = hpriv->base; rc = mv_chip_id(host, hpriv->board_idx); if (rc) goto done; if (IS_SOC(hpriv)) { hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE; hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK; } else { hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE; hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK; } /* initialize shadow irq mask with register's value */ hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr); /* global interrupt mask: 0 == mask everything */ mv_set_main_irq_mask(host, ~0, 0); n_hc = mv_get_hc_count(host->ports[0]->flags); for (port = 0; port < host->n_ports; port++) if (hpriv->ops->read_preamp) hpriv->ops->read_preamp(hpriv, port, mmio); rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc); if (rc) goto done; hpriv->ops->reset_flash(hpriv, mmio); hpriv->ops->reset_bus(host, mmio); hpriv->ops->enable_leds(hpriv, mmio); for (port = 0; port < host->n_ports; port++) { struct ata_port *ap = host->ports[port]; void __iomem *port_mmio = mv_port_base(mmio, port); mv_port_init(&ap->ioaddr, port_mmio); } for (hc = 0; hc < n_hc; hc++) { void __iomem *hc_mmio = mv_hc_base(mmio, hc); VPRINTK("HC%i: HC config=0x%08x HC IRQ cause " "(before clear)=0x%08x\n", hc, readl(hc_mmio + HC_CFG), readl(hc_mmio + HC_IRQ_CAUSE)); /* Clear any currently outstanding hc interrupt conditions */ writelfl(0, hc_mmio + HC_IRQ_CAUSE); } if (!IS_SOC(hpriv)) { /* Clear any currently outstanding host interrupt conditions */ writelfl(0, mmio + hpriv->irq_cause_offset); /* and unmask interrupt generation for host regs */ writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset); } /* * enable only global host interrupts for now. * The per-port interrupts get done later as ports are set up. */ mv_set_main_irq_mask(host, 0, PCI_ERR); mv_set_irq_coalescing(host, irq_coalescing_io_count, irq_coalescing_usecs); done: return rc; } static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev) { hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ, MV_CRQB_Q_SZ, 0); if (!hpriv->crqb_pool) return -ENOMEM; hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ, MV_CRPB_Q_SZ, 0); if (!hpriv->crpb_pool) return -ENOMEM; hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ, MV_SG_TBL_SZ, 0); if (!hpriv->sg_tbl_pool) return -ENOMEM; return 0; } static void mv_conf_mbus_windows(struct mv_host_priv *hpriv, const struct mbus_dram_target_info *dram) { int i; for (i = 0; i < 4; i++) { writel(0, hpriv->base + WINDOW_CTRL(i)); writel(0, hpriv->base + WINDOW_BASE(i)); } for (i = 0; i < dram->num_cs; i++) { const struct mbus_dram_window *cs = dram->cs + i; writel(((cs->size - 1) & 0xffff0000) | (cs->mbus_attr << 8) | (dram->mbus_dram_target_id << 4) | 1, hpriv->base + WINDOW_CTRL(i)); writel(cs->base, hpriv->base + WINDOW_BASE(i)); } } /** * mv_platform_probe - handle a positive probe of an soc Marvell * host * @pdev: platform device found * * LOCKING: * Inherited from caller. */ static int mv_platform_probe(struct platform_device *pdev) { const struct mv_sata_platform_data *mv_platform_data; const struct mbus_dram_target_info *dram; const struct ata_port_info *ppi[] = { &mv_port_info[chip_soc], NULL }; struct ata_host *host; struct mv_host_priv *hpriv; struct resource *res; int n_ports = 0, irq = 0; int rc; #if defined(CONFIG_HAVE_CLK) int port; #endif ata_print_version_once(&pdev->dev, DRV_VERSION); /* * Simple resource validation .. */ if (unlikely(pdev->num_resources != 2)) { dev_err(&pdev->dev, "invalid number of resources\n"); return -EINVAL; } /* * Get the register base first */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) return -EINVAL; /* allocate host */ if (pdev->dev.of_node) { of_property_read_u32(pdev->dev.of_node, "nr-ports", &n_ports); irq = irq_of_parse_and_map(pdev->dev.of_node, 0); } else { mv_platform_data = pdev->dev.platform_data; n_ports = mv_platform_data->n_ports; irq = platform_get_irq(pdev, 0); } host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); if (!host || !hpriv) return -ENOMEM; #if defined(CONFIG_HAVE_CLK) hpriv->port_clks = devm_kzalloc(&pdev->dev, sizeof(struct clk *) * n_ports, GFP_KERNEL); if (!hpriv->port_clks) return -ENOMEM; #endif host->private_data = hpriv; hpriv->n_ports = n_ports; hpriv->board_idx = chip_soc; host->iomap = NULL; hpriv->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); hpriv->base -= SATAHC0_REG_BASE; #if defined(CONFIG_HAVE_CLK) hpriv->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(hpriv->clk)) dev_notice(&pdev->dev, "cannot get optional clkdev\n"); else clk_prepare_enable(hpriv->clk); for (port = 0; port < n_ports; port++) { char port_number[16]; sprintf(port_number, "%d", port); hpriv->port_clks[port] = clk_get(&pdev->dev, port_number); if (!IS_ERR(hpriv->port_clks[port])) clk_prepare_enable(hpriv->port_clks[port]); } #endif /* * (Re-)program MBUS remapping windows if we are asked to. */ dram = mv_mbus_dram_info(); if (dram) mv_conf_mbus_windows(hpriv, dram); rc = mv_create_dma_pools(hpriv, &pdev->dev); if (rc) goto err; /* * To allow disk hotplug on Armada 370/XP SoCs, the PHY speed must be * updated in the LP_PHY_CTL register. */ if (pdev->dev.of_node && of_device_is_compatible(pdev->dev.of_node, "marvell,armada-370-sata")) hpriv->hp_flags |= MV_HP_FIX_LP_PHY_CTL; /* initialize adapter */ rc = mv_init_host(host); if (rc) goto err; dev_info(&pdev->dev, "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH, host->n_ports); rc = ata_host_activate(host, irq, mv_interrupt, IRQF_SHARED, &mv6_sht); if (!rc) return 0; err: #if defined(CONFIG_HAVE_CLK) if (!IS_ERR(hpriv->clk)) { clk_disable_unprepare(hpriv->clk); clk_put(hpriv->clk); } for (port = 0; port < n_ports; port++) { if (!IS_ERR(hpriv->port_clks[port])) { clk_disable_unprepare(hpriv->port_clks[port]); clk_put(hpriv->port_clks[port]); } } #endif return rc; } /* * * mv_platform_remove - unplug a platform interface * @pdev: platform device * * A platform bus SATA device has been unplugged. Perform the needed * cleanup. Also called on module unload for any active devices. */ static int mv_platform_remove(struct platform_device *pdev) { struct ata_host *host = platform_get_drvdata(pdev); #if defined(CONFIG_HAVE_CLK) struct mv_host_priv *hpriv = host->private_data; int port; #endif ata_host_detach(host); #if defined(CONFIG_HAVE_CLK) if (!IS_ERR(hpriv->clk)) { clk_disable_unprepare(hpriv->clk); clk_put(hpriv->clk); } for (port = 0; port < host->n_ports; port++) { if (!IS_ERR(hpriv->port_clks[port])) { clk_disable_unprepare(hpriv->port_clks[port]); clk_put(hpriv->port_clks[port]); } } #endif return 0; } #ifdef CONFIG_PM static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state) { struct ata_host *host = platform_get_drvdata(pdev); if (host) return ata_host_suspend(host, state); else return 0; } static int mv_platform_resume(struct platform_device *pdev) { struct ata_host *host = platform_get_drvdata(pdev); const struct mbus_dram_target_info *dram; int ret; if (host) { struct mv_host_priv *hpriv = host->private_data; /* * (Re-)program MBUS remapping windows if we are asked to. */ dram = mv_mbus_dram_info(); if (dram) mv_conf_mbus_windows(hpriv, dram); /* initialize adapter */ ret = mv_init_host(host); if (ret) { printk(KERN_ERR DRV_NAME ": Error during HW init\n"); return ret; } ata_host_resume(host); } return 0; } #else #define mv_platform_suspend NULL #define mv_platform_resume NULL #endif #ifdef CONFIG_OF static struct of_device_id mv_sata_dt_ids[] = { { .compatible = "marvell,armada-370-sata", }, { .compatible = "marvell,orion-sata", }, {}, }; MODULE_DEVICE_TABLE(of, mv_sata_dt_ids); #endif static struct platform_driver mv_platform_driver = { .probe = mv_platform_probe, .remove = mv_platform_remove, .suspend = mv_platform_suspend, .resume = mv_platform_resume, .driver = { .name = DRV_NAME, .owner = THIS_MODULE, .of_match_table = of_match_ptr(mv_sata_dt_ids), }, }; #ifdef CONFIG_PCI static int mv_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); #ifdef CONFIG_PM static int mv_pci_device_resume(struct pci_dev *pdev); #endif static struct pci_driver mv_pci_driver = { .name = DRV_NAME, .id_table = mv_pci_tbl, .probe = mv_pci_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = mv_pci_device_resume, #endif }; /* move to PCI layer or libata core? */ static int pci_go_64(struct pci_dev *pdev) { int rc; if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (rc) { rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_err(&pdev->dev, "64-bit DMA enable failed\n"); return rc; } } } else { rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_err(&pdev->dev, "32-bit DMA enable failed\n"); return rc; } rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n"); return rc; } } return rc; } /** * mv_print_info - Dump key info to kernel log for perusal. * @host: ATA host to print info about * * FIXME: complete this. * * LOCKING: * Inherited from caller. */ static void mv_print_info(struct ata_host *host) { struct pci_dev *pdev = to_pci_dev(host->dev); struct mv_host_priv *hpriv = host->private_data; u8 scc; const char *scc_s, *gen; /* Use this to determine the HW stepping of the chip so we know * what errata to workaround */ pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc); if (scc == 0) scc_s = "SCSI"; else if (scc == 0x01) scc_s = "RAID"; else scc_s = "?"; if (IS_GEN_I(hpriv)) gen = "I"; else if (IS_GEN_II(hpriv)) gen = "II"; else if (IS_GEN_IIE(hpriv)) gen = "IIE"; else gen = "?"; dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n", gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports, scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); } /** * mv_pci_init_one - handle a positive probe of a PCI Marvell host * @pdev: PCI device found * @ent: PCI device ID entry for the matched host * * LOCKING: * Inherited from caller. */ static int mv_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned int board_idx = (unsigned int)ent->driver_data; const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL }; struct ata_host *host; struct mv_host_priv *hpriv; int n_ports, port, rc; ata_print_version_once(&pdev->dev, DRV_VERSION); /* allocate host */ n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC; host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); if (!host || !hpriv) return -ENOMEM; host->private_data = hpriv; hpriv->n_ports = n_ports; hpriv->board_idx = board_idx; /* acquire resources */ rc = pcim_enable_device(pdev); if (rc) return rc; rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME); if (rc == -EBUSY) pcim_pin_device(pdev); if (rc) return rc; host->iomap = pcim_iomap_table(pdev); hpriv->base = host->iomap[MV_PRIMARY_BAR]; rc = pci_go_64(pdev); if (rc) return rc; rc = mv_create_dma_pools(hpriv, &pdev->dev); if (rc) return rc; for (port = 0; port < host->n_ports; port++) { struct ata_port *ap = host->ports[port]; void __iomem *port_mmio = mv_port_base(hpriv->base, port); unsigned int offset = port_mmio - hpriv->base; ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); } /* initialize adapter */ rc = mv_init_host(host); if (rc) return rc; /* Enable message-switched interrupts, if requested */ if (msi && pci_enable_msi(pdev) == 0) hpriv->hp_flags |= MV_HP_FLAG_MSI; mv_dump_pci_cfg(pdev, 0x68); mv_print_info(host); pci_set_master(pdev); pci_try_set_mwi(pdev); return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht); } #ifdef CONFIG_PM static int mv_pci_device_resume(struct pci_dev *pdev) { struct ata_host *host = pci_get_drvdata(pdev); int rc; rc = ata_pci_device_do_resume(pdev); if (rc) return rc; /* initialize adapter */ rc = mv_init_host(host); if (rc) return rc; ata_host_resume(host); return 0; } #endif #endif static int mv_platform_probe(struct platform_device *pdev); static int mv_platform_remove(struct platform_device *pdev); static int __init mv_init(void) { int rc = -ENODEV; #ifdef CONFIG_PCI rc = pci_register_driver(&mv_pci_driver); if (rc < 0) return rc; #endif rc = platform_driver_register(&mv_platform_driver); #ifdef CONFIG_PCI if (rc < 0) pci_unregister_driver(&mv_pci_driver); #endif return rc; } static void __exit mv_exit(void) { #ifdef CONFIG_PCI pci_unregister_driver(&mv_pci_driver); #endif platform_driver_unregister(&mv_platform_driver); } MODULE_AUTHOR("Brett Russ"); MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, mv_pci_tbl); MODULE_VERSION(DRV_VERSION); MODULE_ALIAS("platform:" DRV_NAME); module_init(mv_init); module_exit(mv_exit);
gpl-2.0
lzh6710/ubuntu-trusty
fs/hfsplus/attributes.c
1832
8427
/* * linux/fs/hfsplus/attributes.c * * Vyacheslav Dubeyko <slava@dubeyko.com> * * Handling of records in attributes tree */ #include "hfsplus_fs.h" #include "hfsplus_raw.h" static struct kmem_cache *hfsplus_attr_tree_cachep; int __init hfsplus_create_attr_tree_cache(void) { if (hfsplus_attr_tree_cachep) return -EEXIST; hfsplus_attr_tree_cachep = kmem_cache_create("hfsplus_attr_cache", sizeof(hfsplus_attr_entry), 0, SLAB_HWCACHE_ALIGN, NULL); if (!hfsplus_attr_tree_cachep) return -ENOMEM; return 0; } void hfsplus_destroy_attr_tree_cache(void) { kmem_cache_destroy(hfsplus_attr_tree_cachep); } int hfsplus_attr_bin_cmp_key(const hfsplus_btree_key *k1, const hfsplus_btree_key *k2) { __be32 k1_cnid, k2_cnid; k1_cnid = k1->attr.cnid; k2_cnid = k2->attr.cnid; if (k1_cnid != k2_cnid) return be32_to_cpu(k1_cnid) < be32_to_cpu(k2_cnid) ? -1 : 1; return hfsplus_strcmp( (const struct hfsplus_unistr *)&k1->attr.key_name, (const struct hfsplus_unistr *)&k2->attr.key_name); } int hfsplus_attr_build_key(struct super_block *sb, hfsplus_btree_key *key, u32 cnid, const char *name) { int len; memset(key, 0, sizeof(struct hfsplus_attr_key)); key->attr.cnid = cpu_to_be32(cnid); if (name) { int res = hfsplus_asc2uni(sb, (struct hfsplus_unistr *)&key->attr.key_name, HFSPLUS_ATTR_MAX_STRLEN, name, strlen(name)); if (res) return res; len = be16_to_cpu(key->attr.key_name.length); } else { key->attr.key_name.length = 0; len = 0; } /* The length of the key, as stored in key_len field, does not include * the size of the key_len field itself. * So, offsetof(hfsplus_attr_key, key_name) is a trick because * it takes into consideration key_len field (__be16) of * hfsplus_attr_key structure instead of length field (__be16) of * hfsplus_attr_unistr structure. */ key->key_len = cpu_to_be16(offsetof(struct hfsplus_attr_key, key_name) + 2 * len); return 0; } hfsplus_attr_entry *hfsplus_alloc_attr_entry(void) { return kmem_cache_alloc(hfsplus_attr_tree_cachep, GFP_KERNEL); } void hfsplus_destroy_attr_entry(hfsplus_attr_entry *entry) { if (entry) kmem_cache_free(hfsplus_attr_tree_cachep, entry); } #define HFSPLUS_INVALID_ATTR_RECORD -1 static int hfsplus_attr_build_record(hfsplus_attr_entry *entry, int record_type, u32 cnid, const void *value, size_t size) { if (record_type == HFSPLUS_ATTR_FORK_DATA) { /* * Mac OS X supports only inline data attributes. * Do nothing */ memset(entry, 0, sizeof(*entry)); return sizeof(struct hfsplus_attr_fork_data); } else if (record_type == HFSPLUS_ATTR_EXTENTS) { /* * Mac OS X supports only inline data attributes. * Do nothing. */ memset(entry, 0, sizeof(*entry)); return sizeof(struct hfsplus_attr_extents); } else if (record_type == HFSPLUS_ATTR_INLINE_DATA) { u16 len; memset(entry, 0, sizeof(struct hfsplus_attr_inline_data)); entry->inline_data.record_type = cpu_to_be32(record_type); if (size <= HFSPLUS_MAX_INLINE_DATA_SIZE) len = size; else return HFSPLUS_INVALID_ATTR_RECORD; entry->inline_data.length = cpu_to_be16(len); memcpy(entry->inline_data.raw_bytes, value, len); /* * Align len on two-byte boundary. * It needs to add pad byte if we have odd len. */ len = round_up(len, 2); return offsetof(struct hfsplus_attr_inline_data, raw_bytes) + len; } else /* invalid input */ memset(entry, 0, sizeof(*entry)); return HFSPLUS_INVALID_ATTR_RECORD; } int hfsplus_find_attr(struct super_block *sb, u32 cnid, const char *name, struct hfs_find_data *fd) { int err = 0; hfs_dbg(ATTR_MOD, "find_attr: %s,%d\n", name ? name : NULL, cnid); if (!HFSPLUS_SB(sb)->attr_tree) { pr_err("attributes file doesn't exist\n"); return -EINVAL; } if (name) { err = hfsplus_attr_build_key(sb, fd->search_key, cnid, name); if (err) goto failed_find_attr; err = hfs_brec_find(fd, hfs_find_rec_by_key); if (err) goto failed_find_attr; } else { err = hfsplus_attr_build_key(sb, fd->search_key, cnid, NULL); if (err) goto failed_find_attr; err = hfs_brec_find(fd, hfs_find_1st_rec_by_cnid); if (err) goto failed_find_attr; } failed_find_attr: return err; } int hfsplus_attr_exists(struct inode *inode, const char *name) { int err = 0; struct super_block *sb = inode->i_sb; struct hfs_find_data fd; if (!HFSPLUS_SB(sb)->attr_tree) return 0; err = hfs_find_init(HFSPLUS_SB(sb)->attr_tree, &fd); if (err) return 0; err = hfsplus_find_attr(sb, inode->i_ino, name, &fd); if (err) goto attr_not_found; hfs_find_exit(&fd); return 1; attr_not_found: hfs_find_exit(&fd); return 0; } int hfsplus_create_attr(struct inode *inode, const char *name, const void *value, size_t size) { struct super_block *sb = inode->i_sb; struct hfs_find_data fd; hfsplus_attr_entry *entry_ptr; int entry_size; int err; hfs_dbg(ATTR_MOD, "create_attr: %s,%ld\n", name ? name : NULL, inode->i_ino); if (!HFSPLUS_SB(sb)->attr_tree) { pr_err("attributes file doesn't exist\n"); return -EINVAL; } entry_ptr = hfsplus_alloc_attr_entry(); if (!entry_ptr) return -ENOMEM; err = hfs_find_init(HFSPLUS_SB(sb)->attr_tree, &fd); if (err) goto failed_init_create_attr; if (name) { err = hfsplus_attr_build_key(sb, fd.search_key, inode->i_ino, name); if (err) goto failed_create_attr; } else { err = -EINVAL; goto failed_create_attr; } /* Mac OS X supports only inline data attributes. */ entry_size = hfsplus_attr_build_record(entry_ptr, HFSPLUS_ATTR_INLINE_DATA, inode->i_ino, value, size); if (entry_size == HFSPLUS_INVALID_ATTR_RECORD) { err = -EINVAL; goto failed_create_attr; } err = hfs_brec_find(&fd, hfs_find_rec_by_key); if (err != -ENOENT) { if (!err) err = -EEXIST; goto failed_create_attr; } err = hfs_brec_insert(&fd, entry_ptr, entry_size); if (err) goto failed_create_attr; hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ATTR_DIRTY); failed_create_attr: hfs_find_exit(&fd); failed_init_create_attr: hfsplus_destroy_attr_entry(entry_ptr); return err; } static int __hfsplus_delete_attr(struct inode *inode, u32 cnid, struct hfs_find_data *fd) { int err = 0; __be32 found_cnid, record_type; hfs_bnode_read(fd->bnode, &found_cnid, fd->keyoffset + offsetof(struct hfsplus_attr_key, cnid), sizeof(__be32)); if (cnid != be32_to_cpu(found_cnid)) return -ENOENT; hfs_bnode_read(fd->bnode, &record_type, fd->entryoffset, sizeof(record_type)); switch (be32_to_cpu(record_type)) { case HFSPLUS_ATTR_INLINE_DATA: /* All is OK. Do nothing. */ break; case HFSPLUS_ATTR_FORK_DATA: case HFSPLUS_ATTR_EXTENTS: pr_err("only inline data xattr are supported\n"); return -EOPNOTSUPP; default: pr_err("invalid extended attribute record\n"); return -ENOENT; } err = hfs_brec_remove(fd); if (err) return err; hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ATTR_DIRTY); return err; } int hfsplus_delete_attr(struct inode *inode, const char *name) { int err = 0; struct super_block *sb = inode->i_sb; struct hfs_find_data fd; hfs_dbg(ATTR_MOD, "delete_attr: %s,%ld\n", name ? name : NULL, inode->i_ino); if (!HFSPLUS_SB(sb)->attr_tree) { pr_err("attributes file doesn't exist\n"); return -EINVAL; } err = hfs_find_init(HFSPLUS_SB(sb)->attr_tree, &fd); if (err) return err; if (name) { err = hfsplus_attr_build_key(sb, fd.search_key, inode->i_ino, name); if (err) goto out; } else { pr_err("invalid extended attribute name\n"); err = -EINVAL; goto out; } err = hfs_brec_find(&fd, hfs_find_rec_by_key); if (err) goto out; err = __hfsplus_delete_attr(inode, inode->i_ino, &fd); if (err) goto out; out: hfs_find_exit(&fd); return err; } int hfsplus_delete_all_attrs(struct inode *dir, u32 cnid) { int err = 0; struct hfs_find_data fd; hfs_dbg(ATTR_MOD, "delete_all_attrs: %d\n", cnid); if (!HFSPLUS_SB(dir->i_sb)->attr_tree) { pr_err("attributes file doesn't exist\n"); return -EINVAL; } err = hfs_find_init(HFSPLUS_SB(dir->i_sb)->attr_tree, &fd); if (err) return err; for (;;) { err = hfsplus_find_attr(dir->i_sb, cnid, NULL, &fd); if (err) { if (err != -ENOENT) pr_err("xattr search failed\n"); goto end_delete_all; } err = __hfsplus_delete_attr(dir, cnid, &fd); if (err) goto end_delete_all; } end_delete_all: hfs_find_exit(&fd); return err; }
gpl-2.0
alesaiko/UK-PRO5
drivers/mfd/ab3100-otp.c
2088
6775
/* * drivers/mfd/ab3100_otp.c * * Copyright (C) 2007-2009 ST-Ericsson AB * License terms: GNU General Public License (GPL) version 2 * Driver to read out OTP from the AB3100 Mixed-signal circuit * Author: Linus Walleij <linus.walleij@stericsson.com> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mfd/abx500.h> #include <linux/debugfs.h> #include <linux/seq_file.h> /* The OTP registers */ #define AB3100_OTP0 0xb0 #define AB3100_OTP1 0xb1 #define AB3100_OTP2 0xb2 #define AB3100_OTP3 0xb3 #define AB3100_OTP4 0xb4 #define AB3100_OTP5 0xb5 #define AB3100_OTP6 0xb6 #define AB3100_OTP7 0xb7 #define AB3100_OTPP 0xbf /** * struct ab3100_otp * @dev containing device * @locked whether the OTP is locked, after locking, no more bits * can be changed but before locking it is still possible * to change bits from 1->0. * @freq clocking frequency for the OTP, this frequency is either * 32768Hz or 1MHz/30 * @paf product activation flag, indicates whether this is a real * product (paf true) or a lab board etc (paf false) * @imeich if this is set it is possible to override the * IMEI number found in the tac, fac and svn fields with * (secured) software * @cid customer ID * @tac type allocation code of the IMEI * @fac final assembly code of the IMEI * @svn software version number of the IMEI * @debugfs a debugfs file used when dumping to file */ struct ab3100_otp { struct device *dev; bool locked; u32 freq; bool paf; bool imeich; u16 cid:14; u32 tac:20; u8 fac; u32 svn:20; struct dentry *debugfs; }; static int __init ab3100_otp_read(struct ab3100_otp *otp) { u8 otpval[8]; u8 otpp; int err; err = abx500_get_register_interruptible(otp->dev, 0, AB3100_OTPP, &otpp); if (err) { dev_err(otp->dev, "unable to read OTPP register\n"); return err; } err = abx500_get_register_page_interruptible(otp->dev, 0, AB3100_OTP0, otpval, 8); if (err) { dev_err(otp->dev, "unable to read OTP register page\n"); return err; } /* Cache OTP properties, they never change by nature */ otp->locked = (otpp & 0x80); otp->freq = (otpp & 0x40) ? 32768 : 34100; otp->paf = (otpval[1] & 0x80); otp->imeich = (otpval[1] & 0x40); otp->cid = ((otpval[1] << 8) | otpval[0]) & 0x3fff; otp->tac = ((otpval[4] & 0x0f) << 16) | (otpval[3] << 8) | otpval[2]; otp->fac = ((otpval[5] & 0x0f) << 4) | (otpval[4] >> 4); otp->svn = (otpval[7] << 12) | (otpval[6] << 4) | (otpval[5] >> 4); return 0; } /* * This is a simple debugfs human-readable file that dumps out * the contents of the OTP. */ #ifdef CONFIG_DEBUG_FS static int ab3100_show_otp(struct seq_file *s, void *v) { struct ab3100_otp *otp = s->private; seq_printf(s, "OTP is %s\n", otp->locked ? "LOCKED" : "UNLOCKED"); seq_printf(s, "OTP clock switch startup is %uHz\n", otp->freq); seq_printf(s, "PAF is %s\n", otp->paf ? "SET" : "NOT SET"); seq_printf(s, "IMEI is %s\n", otp->imeich ? "CHANGEABLE" : "NOT CHANGEABLE"); seq_printf(s, "CID: 0x%04x (decimal: %d)\n", otp->cid, otp->cid); seq_printf(s, "IMEI: %u-%u-%u\n", otp->tac, otp->fac, otp->svn); return 0; } static int ab3100_otp_open(struct inode *inode, struct file *file) { return single_open(file, ab3100_show_otp, inode->i_private); } static const struct file_operations ab3100_otp_operations = { .open = ab3100_otp_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init ab3100_otp_init_debugfs(struct device *dev, struct ab3100_otp *otp) { otp->debugfs = debugfs_create_file("ab3100_otp", S_IFREG | S_IRUGO, NULL, otp, &ab3100_otp_operations); if (!otp->debugfs) { dev_err(dev, "AB3100 debugfs OTP file registration failed!\n"); return -ENOENT; } return 0; } static void __exit ab3100_otp_exit_debugfs(struct ab3100_otp *otp) { debugfs_remove(otp->debugfs); } #else /* Compile this out if debugfs not selected */ static inline int __init ab3100_otp_init_debugfs(struct device *dev, struct ab3100_otp *otp) { return 0; } static inline void __exit ab3100_otp_exit_debugfs(struct ab3100_otp *otp) { } #endif #define SHOW_AB3100_ATTR(name) \ static ssize_t ab3100_otp_##name##_show(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ {\ struct ab3100_otp *otp = dev_get_drvdata(dev); \ return sprintf(buf, "%u\n", otp->name); \ } SHOW_AB3100_ATTR(locked) SHOW_AB3100_ATTR(freq) SHOW_AB3100_ATTR(paf) SHOW_AB3100_ATTR(imeich) SHOW_AB3100_ATTR(cid) SHOW_AB3100_ATTR(fac) SHOW_AB3100_ATTR(tac) SHOW_AB3100_ATTR(svn) static struct device_attribute ab3100_otp_attrs[] = { __ATTR(locked, S_IRUGO, ab3100_otp_locked_show, NULL), __ATTR(freq, S_IRUGO, ab3100_otp_freq_show, NULL), __ATTR(paf, S_IRUGO, ab3100_otp_paf_show, NULL), __ATTR(imeich, S_IRUGO, ab3100_otp_imeich_show, NULL), __ATTR(cid, S_IRUGO, ab3100_otp_cid_show, NULL), __ATTR(fac, S_IRUGO, ab3100_otp_fac_show, NULL), __ATTR(tac, S_IRUGO, ab3100_otp_tac_show, NULL), __ATTR(svn, S_IRUGO, ab3100_otp_svn_show, NULL), }; static int __init ab3100_otp_probe(struct platform_device *pdev) { struct ab3100_otp *otp; int err = 0; int i; otp = kzalloc(sizeof(struct ab3100_otp), GFP_KERNEL); if (!otp) { dev_err(&pdev->dev, "could not allocate AB3100 OTP device\n"); return -ENOMEM; } otp->dev = &pdev->dev; /* Replace platform data coming in with a local struct */ platform_set_drvdata(pdev, otp); err = ab3100_otp_read(otp); if (err) goto err_otp_read; dev_info(&pdev->dev, "AB3100 OTP readout registered\n"); /* sysfs entries */ for (i = 0; i < ARRAY_SIZE(ab3100_otp_attrs); i++) { err = device_create_file(&pdev->dev, &ab3100_otp_attrs[i]); if (err) goto err_create_file; } /* debugfs entries */ err = ab3100_otp_init_debugfs(&pdev->dev, otp); if (err) goto err_init_debugfs; return 0; err_init_debugfs: err_create_file: while (--i >= 0) device_remove_file(&pdev->dev, &ab3100_otp_attrs[i]); err_otp_read: kfree(otp); return err; } static int __exit ab3100_otp_remove(struct platform_device *pdev) { struct ab3100_otp *otp = platform_get_drvdata(pdev); int i; for (i = 0; i < ARRAY_SIZE(ab3100_otp_attrs); i++) device_remove_file(&pdev->dev, &ab3100_otp_attrs[i]); ab3100_otp_exit_debugfs(otp); kfree(otp); return 0; } static struct platform_driver ab3100_otp_driver = { .driver = { .name = "ab3100-otp", .owner = THIS_MODULE, }, .remove = __exit_p(ab3100_otp_remove), }; module_platform_driver_probe(ab3100_otp_driver, ab3100_otp_probe); MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>"); MODULE_DESCRIPTION("AB3100 OTP Readout Driver"); MODULE_LICENSE("GPL");
gpl-2.0
OldDroid/android_kernel_samsung_tblte
drivers/gpio/gpio-sch.c
2600
7095
/* * GPIO interface for Intel Poulsbo SCH * * Copyright (c) 2010 CompuLab Ltd * Author: Denis Turischev <denis@compulab.co.il> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/io.h> #include <linux/errno.h> #include <linux/acpi.h> #include <linux/platform_device.h> #include <linux/pci_ids.h> #include <linux/gpio.h> static DEFINE_SPINLOCK(gpio_lock); #define CGEN (0x00) #define CGIO (0x04) #define CGLV (0x08) #define RGEN (0x20) #define RGIO (0x24) #define RGLV (0x28) static unsigned short gpio_ba; static int sch_gpio_core_direction_in(struct gpio_chip *gc, unsigned gpio_num) { u8 curr_dirs; unsigned short offset, bit; spin_lock(&gpio_lock); offset = CGIO + gpio_num / 8; bit = gpio_num % 8; curr_dirs = inb(gpio_ba + offset); if (!(curr_dirs & (1 << bit))) outb(curr_dirs | (1 << bit), gpio_ba + offset); spin_unlock(&gpio_lock); return 0; } static int sch_gpio_core_get(struct gpio_chip *gc, unsigned gpio_num) { int res; unsigned short offset, bit; offset = CGLV + gpio_num / 8; bit = gpio_num % 8; res = !!(inb(gpio_ba + offset) & (1 << bit)); return res; } static void sch_gpio_core_set(struct gpio_chip *gc, unsigned gpio_num, int val) { u8 curr_vals; unsigned short offset, bit; spin_lock(&gpio_lock); offset = CGLV + gpio_num / 8; bit = gpio_num % 8; curr_vals = inb(gpio_ba + offset); if (val) outb(curr_vals | (1 << bit), gpio_ba + offset); else outb((curr_vals & ~(1 << bit)), gpio_ba + offset); spin_unlock(&gpio_lock); } static int sch_gpio_core_direction_out(struct gpio_chip *gc, unsigned gpio_num, int val) { u8 curr_dirs; unsigned short offset, bit; sch_gpio_core_set(gc, gpio_num, val); spin_lock(&gpio_lock); offset = CGIO + gpio_num / 8; bit = gpio_num % 8; curr_dirs = inb(gpio_ba + offset); if (curr_dirs & (1 << bit)) outb(curr_dirs & ~(1 << bit), gpio_ba + offset); spin_unlock(&gpio_lock); return 0; } static struct gpio_chip sch_gpio_core = { .label = "sch_gpio_core", .owner = THIS_MODULE, .direction_input = sch_gpio_core_direction_in, .get = sch_gpio_core_get, .direction_output = sch_gpio_core_direction_out, .set = sch_gpio_core_set, }; static int sch_gpio_resume_direction_in(struct gpio_chip *gc, unsigned gpio_num) { u8 curr_dirs; unsigned short offset, bit; spin_lock(&gpio_lock); offset = RGIO + gpio_num / 8; bit = gpio_num % 8; curr_dirs = inb(gpio_ba + offset); if (!(curr_dirs & (1 << bit))) outb(curr_dirs | (1 << bit), gpio_ba + offset); spin_unlock(&gpio_lock); return 0; } static int sch_gpio_resume_get(struct gpio_chip *gc, unsigned gpio_num) { unsigned short offset, bit; offset = RGLV + gpio_num / 8; bit = gpio_num % 8; return !!(inb(gpio_ba + offset) & (1 << bit)); } static void sch_gpio_resume_set(struct gpio_chip *gc, unsigned gpio_num, int val) { u8 curr_vals; unsigned short offset, bit; spin_lock(&gpio_lock); offset = RGLV + gpio_num / 8; bit = gpio_num % 8; curr_vals = inb(gpio_ba + offset); if (val) outb(curr_vals | (1 << bit), gpio_ba + offset); else outb((curr_vals & ~(1 << bit)), gpio_ba + offset); spin_unlock(&gpio_lock); } static int sch_gpio_resume_direction_out(struct gpio_chip *gc, unsigned gpio_num, int val) { u8 curr_dirs; unsigned short offset, bit; sch_gpio_resume_set(gc, gpio_num, val); offset = RGIO + gpio_num / 8; bit = gpio_num % 8; spin_lock(&gpio_lock); curr_dirs = inb(gpio_ba + offset); if (curr_dirs & (1 << bit)) outb(curr_dirs & ~(1 << bit), gpio_ba + offset); spin_unlock(&gpio_lock); return 0; } static struct gpio_chip sch_gpio_resume = { .label = "sch_gpio_resume", .owner = THIS_MODULE, .direction_input = sch_gpio_resume_direction_in, .get = sch_gpio_resume_get, .direction_output = sch_gpio_resume_direction_out, .set = sch_gpio_resume_set, }; static int sch_gpio_probe(struct platform_device *pdev) { struct resource *res; int err, id; id = pdev->id; if (!id) return -ENODEV; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!res) return -EBUSY; if (!request_region(res->start, resource_size(res), pdev->name)) return -EBUSY; gpio_ba = res->start; switch (id) { case PCI_DEVICE_ID_INTEL_SCH_LPC: sch_gpio_core.base = 0; sch_gpio_core.ngpio = 10; sch_gpio_resume.base = 10; sch_gpio_resume.ngpio = 4; /* * GPIO[6:0] enabled by default * GPIO7 is configured by the CMC as SLPIOVR * Enable GPIO[9:8] core powered gpios explicitly */ outb(0x3, gpio_ba + CGEN + 1); /* * SUS_GPIO[2:0] enabled by default * Enable SUS_GPIO3 resume powered gpio explicitly */ outb(0x8, gpio_ba + RGEN); break; case PCI_DEVICE_ID_INTEL_ITC_LPC: sch_gpio_core.base = 0; sch_gpio_core.ngpio = 5; sch_gpio_resume.base = 5; sch_gpio_resume.ngpio = 9; break; case PCI_DEVICE_ID_INTEL_CENTERTON_ILB: sch_gpio_core.base = 0; sch_gpio_core.ngpio = 21; sch_gpio_resume.base = 21; sch_gpio_resume.ngpio = 9; break; default: err = -ENODEV; goto err_sch_gpio_core; } sch_gpio_core.dev = &pdev->dev; sch_gpio_resume.dev = &pdev->dev; err = gpiochip_add(&sch_gpio_core); if (err < 0) goto err_sch_gpio_core; err = gpiochip_add(&sch_gpio_resume); if (err < 0) goto err_sch_gpio_resume; return 0; err_sch_gpio_resume: if (gpiochip_remove(&sch_gpio_core)) dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__); err_sch_gpio_core: release_region(res->start, resource_size(res)); gpio_ba = 0; return err; } static int sch_gpio_remove(struct platform_device *pdev) { struct resource *res; if (gpio_ba) { int err; err = gpiochip_remove(&sch_gpio_core); if (err) dev_err(&pdev->dev, "%s failed, %d\n", "gpiochip_remove()", err); err = gpiochip_remove(&sch_gpio_resume); if (err) dev_err(&pdev->dev, "%s failed, %d\n", "gpiochip_remove()", err); res = platform_get_resource(pdev, IORESOURCE_IO, 0); release_region(res->start, resource_size(res)); gpio_ba = 0; return err; } return 0; } static struct platform_driver sch_gpio_driver = { .driver = { .name = "sch_gpio", .owner = THIS_MODULE, }, .probe = sch_gpio_probe, .remove = sch_gpio_remove, }; module_platform_driver(sch_gpio_driver); MODULE_AUTHOR("Denis Turischev <denis@compulab.co.il>"); MODULE_DESCRIPTION("GPIO interface for Intel Poulsbo SCH"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:sch_gpio");
gpl-2.0
intervigilium/android_kernel_htc_msm7x30
drivers/infiniband/hw/mthca/mthca_mcg.c
3624
8860
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/string.h> #include <linux/gfp.h> #include "mthca_dev.h" #include "mthca_cmd.h" struct mthca_mgm { __be32 next_gid_index; u32 reserved[3]; u8 gid[16]; __be32 qp[MTHCA_QP_PER_MGM]; }; static const u8 zero_gid[16]; /* automatically initialized to 0 */ /* * Caller must hold MCG table semaphore. gid and mgm parameters must * be properly aligned for command interface. * * Returns 0 unless a firmware command error occurs. * * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1 * and *mgm holds MGM entry. * * if GID is found in AMGM, *index = index in AMGM, *prev = index of * previous entry in hash chain and *mgm holds AMGM entry. * * If no AMGM exists for given gid, *index = -1, *prev = index of last * entry in hash chain and *mgm holds end of hash chain. */ static int find_mgm(struct mthca_dev *dev, u8 *gid, struct mthca_mailbox *mgm_mailbox, u16 *hash, int *prev, int *index) { struct mthca_mailbox *mailbox; struct mthca_mgm *mgm = mgm_mailbox->buf; u8 *mgid; int err; u8 status; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return -ENOMEM; mgid = mailbox->buf; memcpy(mgid, gid, 16); err = mthca_MGID_HASH(dev, mailbox, hash, &status); if (err) goto out; if (status) { mthca_err(dev, "MGID_HASH returned status %02x\n", status); err = -EINVAL; goto out; } if (0) mthca_dbg(dev, "Hash for %pI6 is %04x\n", gid, *hash); *index = *hash; *prev = -1; do { err = mthca_READ_MGM(dev, *index, mgm_mailbox, &status); if (err) goto out; if (status) { mthca_err(dev, "READ_MGM returned status %02x\n", status); err = -EINVAL; goto out; } if (!memcmp(mgm->gid, zero_gid, 16)) { if (*index != *hash) { mthca_err(dev, "Found zero MGID in AMGM.\n"); err = -EINVAL; } goto out; } if (!memcmp(mgm->gid, gid, 16)) goto out; *prev = *index; *index = be32_to_cpu(mgm->next_gid_index) >> 6; } while (*index); *index = -1; out: mthca_free_mailbox(dev, mailbox); return err; } int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_mailbox *mailbox; struct mthca_mgm *mgm; u16 hash; int index, prev; int link = 0; int i; int err; u8 status; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); mgm = mailbox->buf; mutex_lock(&dev->mcg_table.mutex); err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index); if (err) goto out; if (index != -1) { if (!memcmp(mgm->gid, zero_gid, 16)) memcpy(mgm->gid, gid->raw, 16); } else { link = 1; index = mthca_alloc(&dev->mcg_table.alloc); if (index == -1) { mthca_err(dev, "No AMGM entries left\n"); err = -ENOMEM; goto out; } err = mthca_READ_MGM(dev, index, mailbox, &status); if (err) goto out; if (status) { mthca_err(dev, "READ_MGM returned status %02x\n", status); err = -EINVAL; goto out; } memset(mgm, 0, sizeof *mgm); memcpy(mgm->gid, gid->raw, 16); } for (i = 0; i < MTHCA_QP_PER_MGM; ++i) if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) { mthca_dbg(dev, "QP %06x already a member of MGM\n", ibqp->qp_num); err = 0; goto out; } else if (!(mgm->qp[i] & cpu_to_be32(1 << 31))) { mgm->qp[i] = cpu_to_be32(ibqp->qp_num | (1 << 31)); break; } if (i == MTHCA_QP_PER_MGM) { mthca_err(dev, "MGM at index %x is full.\n", index); err = -ENOMEM; goto out; } err = mthca_WRITE_MGM(dev, index, mailbox, &status); if (err) goto out; if (status) { mthca_err(dev, "WRITE_MGM returned status %02x\n", status); err = -EINVAL; goto out; } if (!link) goto out; err = mthca_READ_MGM(dev, prev, mailbox, &status); if (err) goto out; if (status) { mthca_err(dev, "READ_MGM returned status %02x\n", status); err = -EINVAL; goto out; } mgm->next_gid_index = cpu_to_be32(index << 6); err = mthca_WRITE_MGM(dev, prev, mailbox, &status); if (err) goto out; if (status) { mthca_err(dev, "WRITE_MGM returned status %02x\n", status); err = -EINVAL; } out: if (err && link && index != -1) { BUG_ON(index < dev->limits.num_mgms); mthca_free(&dev->mcg_table.alloc, index); } mutex_unlock(&dev->mcg_table.mutex); mthca_free_mailbox(dev, mailbox); return err; } int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_mailbox *mailbox; struct mthca_mgm *mgm; u16 hash; int prev, index; int i, loc; int err; u8 status; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); mgm = mailbox->buf; mutex_lock(&dev->mcg_table.mutex); err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index); if (err) goto out; if (index == -1) { mthca_err(dev, "MGID %pI6 not found\n", gid->raw); err = -EINVAL; goto out; } for (loc = -1, i = 0; i < MTHCA_QP_PER_MGM; ++i) { if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) loc = i; if (!(mgm->qp[i] & cpu_to_be32(1 << 31))) break; } if (loc == -1) { mthca_err(dev, "QP %06x not found in MGM\n", ibqp->qp_num); err = -EINVAL; goto out; } mgm->qp[loc] = mgm->qp[i - 1]; mgm->qp[i - 1] = 0; err = mthca_WRITE_MGM(dev, index, mailbox, &status); if (err) goto out; if (status) { mthca_err(dev, "WRITE_MGM returned status %02x\n", status); err = -EINVAL; goto out; } if (i != 1) goto out; if (prev == -1) { /* Remove entry from MGM */ int amgm_index_to_free = be32_to_cpu(mgm->next_gid_index) >> 6; if (amgm_index_to_free) { err = mthca_READ_MGM(dev, amgm_index_to_free, mailbox, &status); if (err) goto out; if (status) { mthca_err(dev, "READ_MGM returned status %02x\n", status); err = -EINVAL; goto out; } } else memset(mgm->gid, 0, 16); err = mthca_WRITE_MGM(dev, index, mailbox, &status); if (err) goto out; if (status) { mthca_err(dev, "WRITE_MGM returned status %02x\n", status); err = -EINVAL; goto out; } if (amgm_index_to_free) { BUG_ON(amgm_index_to_free < dev->limits.num_mgms); mthca_free(&dev->mcg_table.alloc, amgm_index_to_free); } } else { /* Remove entry from AMGM */ int curr_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; err = mthca_READ_MGM(dev, prev, mailbox, &status); if (err) goto out; if (status) { mthca_err(dev, "READ_MGM returned status %02x\n", status); err = -EINVAL; goto out; } mgm->next_gid_index = cpu_to_be32(curr_next_index << 6); err = mthca_WRITE_MGM(dev, prev, mailbox, &status); if (err) goto out; if (status) { mthca_err(dev, "WRITE_MGM returned status %02x\n", status); err = -EINVAL; goto out; } BUG_ON(index < dev->limits.num_mgms); mthca_free(&dev->mcg_table.alloc, index); } out: mutex_unlock(&dev->mcg_table.mutex); mthca_free_mailbox(dev, mailbox); return err; } int mthca_init_mcg_table(struct mthca_dev *dev) { int err; int table_size = dev->limits.num_mgms + dev->limits.num_amgms; err = mthca_alloc_init(&dev->mcg_table.alloc, table_size, table_size - 1, dev->limits.num_mgms); if (err) return err; mutex_init(&dev->mcg_table.mutex); return 0; } void mthca_cleanup_mcg_table(struct mthca_dev *dev) { mthca_alloc_cleanup(&dev->mcg_table.alloc); }
gpl-2.0
piasek1906/Piasek-KK
drivers/w1/slaves/w1_ds2781.c
4904
4331
/* * 1-Wire implementation for the ds2781 chip * * Author: Renata Sayakhova <renata@oktetlabs.ru> * * Based on w1-ds2780 driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/types.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/idr.h> #include "../w1.h" #include "../w1_int.h" #include "../w1_family.h" #include "w1_ds2781.h" static int w1_ds2781_do_io(struct device *dev, char *buf, int addr, size_t count, int io) { struct w1_slave *sl = container_of(dev, struct w1_slave, dev); if (addr > DS2781_DATA_SIZE || addr < 0) return 0; count = min_t(int, count, DS2781_DATA_SIZE - addr); if (w1_reset_select_slave(sl) == 0) { if (io) { w1_write_8(sl->master, W1_DS2781_WRITE_DATA); w1_write_8(sl->master, addr); w1_write_block(sl->master, buf, count); } else { w1_write_8(sl->master, W1_DS2781_READ_DATA); w1_write_8(sl->master, addr); count = w1_read_block(sl->master, buf, count); } } return count; } int w1_ds2781_io(struct device *dev, char *buf, int addr, size_t count, int io) { struct w1_slave *sl = container_of(dev, struct w1_slave, dev); int ret; if (!dev) return -ENODEV; mutex_lock(&sl->master->mutex); ret = w1_ds2781_do_io(dev, buf, addr, count, io); mutex_unlock(&sl->master->mutex); return ret; } EXPORT_SYMBOL(w1_ds2781_io); int w1_ds2781_io_nolock(struct device *dev, char *buf, int addr, size_t count, int io) { int ret; if (!dev) return -ENODEV; ret = w1_ds2781_do_io(dev, buf, addr, count, io); return ret; } EXPORT_SYMBOL(w1_ds2781_io_nolock); int w1_ds2781_eeprom_cmd(struct device *dev, int addr, int cmd) { struct w1_slave *sl = container_of(dev, struct w1_slave, dev); if (!dev) return -EINVAL; mutex_lock(&sl->master->mutex); if (w1_reset_select_slave(sl) == 0) { w1_write_8(sl->master, cmd); w1_write_8(sl->master, addr); } mutex_unlock(&sl->master->mutex); return 0; } EXPORT_SYMBOL(w1_ds2781_eeprom_cmd); static ssize_t w1_ds2781_read_bin(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj); return w1_ds2781_io(dev, buf, off, count, 0); } static struct bin_attribute w1_ds2781_bin_attr = { .attr = { .name = "w1_slave", .mode = S_IRUGO, }, .size = DS2781_DATA_SIZE, .read = w1_ds2781_read_bin, }; static DEFINE_IDA(bat_ida); static int w1_ds2781_add_slave(struct w1_slave *sl) { int ret; int id; struct platform_device *pdev; id = ida_simple_get(&bat_ida, 0, 0, GFP_KERNEL); if (id < 0) { ret = id; goto noid; } pdev = platform_device_alloc("ds2781-battery", id); if (!pdev) { ret = -ENOMEM; goto pdev_alloc_failed; } pdev->dev.parent = &sl->dev; ret = platform_device_add(pdev); if (ret) goto pdev_add_failed; ret = sysfs_create_bin_file(&sl->dev.kobj, &w1_ds2781_bin_attr); if (ret) goto bin_attr_failed; dev_set_drvdata(&sl->dev, pdev); return 0; bin_attr_failed: pdev_add_failed: platform_device_unregister(pdev); pdev_alloc_failed: ida_simple_remove(&bat_ida, id); noid: return ret; } static void w1_ds2781_remove_slave(struct w1_slave *sl) { struct platform_device *pdev = dev_get_drvdata(&sl->dev); int id = pdev->id; platform_device_unregister(pdev); ida_simple_remove(&bat_ida, id); sysfs_remove_bin_file(&sl->dev.kobj, &w1_ds2781_bin_attr); } static struct w1_family_ops w1_ds2781_fops = { .add_slave = w1_ds2781_add_slave, .remove_slave = w1_ds2781_remove_slave, }; static struct w1_family w1_ds2781_family = { .fid = W1_FAMILY_DS2781, .fops = &w1_ds2781_fops, }; static int __init w1_ds2781_init(void) { ida_init(&bat_ida); return w1_register_family(&w1_ds2781_family); } static void __exit w1_ds2781_exit(void) { w1_unregister_family(&w1_ds2781_family); ida_destroy(&bat_ida); } module_init(w1_ds2781_init); module_exit(w1_ds2781_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Renata Sayakhova <renata@oktetlabs.ru>"); MODULE_DESCRIPTION("1-wire Driver for Maxim/Dallas DS2781 Stand-Alone Fuel Gauge IC");
gpl-2.0
sclukey/linux-omap
drivers/ide/piix.c
5160
14392
/* * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> * Copyright (C) 2003 Red Hat * Copyright (C) 2006-2007 MontaVista Software, Inc. <source@mvista.com> * * May be copied or modified under the terms of the GNU General Public License * * Documentation: * * Publicly available from Intel web site. Errata documentation * is also publicly available. As an aide to anyone hacking on this * driver the list of errata that are relevant is below.going back to * PIIX4. Older device documentation is now a bit tricky to find. * * Errata of note: * * Unfixable * PIIX4 errata #9 - Only on ultra obscure hw * ICH3 errata #13 - Not observed to affect real hw * by Intel * * Things we must deal with * PIIX4 errata #10 - BM IDE hang with non UDMA * (must stop/start dma to recover) * 440MX errata #15 - As PIIX4 errata #10 * PIIX4 errata #15 - Must not read control registers * during a PIO transfer * 440MX errata #13 - As PIIX4 errata #15 * ICH2 errata #21 - DMA mode 0 doesn't work right * ICH0/1 errata #55 - As ICH2 errata #21 * ICH2 spec c #9 - Extra operations needed to handle * drive hotswap [NOT YET SUPPORTED] * ICH2 spec c #20 - IDE PRD must not cross a 64K boundary * and must be dword aligned * ICH2 spec c #24 - UDMA mode 4,5 t85/86 should be 6ns not 3.3 * * Should have been BIOS fixed: * 450NX: errata #19 - DMA hangs on old 450NX * 450NX: errata #20 - DMA hangs on old 450NX * 450NX: errata #25 - Corruption with DMA on old 450NX * ICH3 errata #15 - IDE deadlock under high load * (BIOS must set dev 31 fn 0 bit 23) * ICH3 errata #18 - Don't use native mode */ #include <linux/types.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/ide.h> #include <linux/init.h> #include <asm/io.h> #define DRV_NAME "piix" static int no_piix_dma; /** * piix_set_pio_mode - set host controller for PIO mode * @port: port * @drive: drive * * Set the interface PIO mode based upon the settings done by AMI BIOS. */ static void piix_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { struct pci_dev *dev = to_pci_dev(hwif->dev); int is_slave = drive->dn & 1; int master_port = hwif->channel ? 0x42 : 0x40; int slave_port = 0x44; unsigned long flags; u16 master_data; u8 slave_data; static DEFINE_SPINLOCK(tune_lock); int control = 0; const u8 pio = drive->pio_mode - XFER_PIO_0; /* ISP RTC */ static const u8 timings[][2]= { { 0, 0 }, { 0, 0 }, { 1, 0 }, { 2, 1 }, { 2, 3 }, }; /* * Master vs slave is synchronized above us but the slave register is * shared by the two hwifs so the corner case of two slave timeouts in * parallel must be locked. */ spin_lock_irqsave(&tune_lock, flags); pci_read_config_word(dev, master_port, &master_data); if (pio > 1) control |= 1; /* Programmable timing on */ if (drive->media == ide_disk) control |= 4; /* Prefetch, post write */ if (ide_pio_need_iordy(drive, pio)) control |= 2; /* IORDY */ if (is_slave) { master_data |= 0x4000; master_data &= ~0x0070; if (pio > 1) { /* Set PPE, IE and TIME */ master_data |= control << 4; } pci_read_config_byte(dev, slave_port, &slave_data); slave_data &= hwif->channel ? 0x0f : 0xf0; slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (hwif->channel ? 4 : 0); } else { master_data &= ~0x3307; if (pio > 1) { /* enable PPE, IE and TIME */ master_data |= control; } master_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8); } pci_write_config_word(dev, master_port, master_data); if (is_slave) pci_write_config_byte(dev, slave_port, slave_data); spin_unlock_irqrestore(&tune_lock, flags); } /** * piix_set_dma_mode - set host controller for DMA mode * @hwif: port * @drive: drive * * Set a PIIX host controller to the desired DMA mode. This involves * programming the right timing data into the PCI configuration space. */ static void piix_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { struct pci_dev *dev = to_pci_dev(hwif->dev); u8 maslave = hwif->channel ? 0x42 : 0x40; int a_speed = 3 << (drive->dn * 4); int u_flag = 1 << drive->dn; int v_flag = 0x01 << drive->dn; int w_flag = 0x10 << drive->dn; int u_speed = 0; int sitre; u16 reg4042, reg4a; u8 reg48, reg54, reg55; const u8 speed = drive->dma_mode; pci_read_config_word(dev, maslave, &reg4042); sitre = (reg4042 & 0x4000) ? 1 : 0; pci_read_config_byte(dev, 0x48, &reg48); pci_read_config_word(dev, 0x4a, &reg4a); pci_read_config_byte(dev, 0x54, &reg54); pci_read_config_byte(dev, 0x55, &reg55); if (speed >= XFER_UDMA_0) { u8 udma = speed - XFER_UDMA_0; u_speed = min_t(u8, 2 - (udma & 1), udma) << (drive->dn * 4); if (!(reg48 & u_flag)) pci_write_config_byte(dev, 0x48, reg48 | u_flag); if (speed == XFER_UDMA_5) { pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag); } else { pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); } if ((reg4a & a_speed) != u_speed) pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed); if (speed > XFER_UDMA_2) { if (!(reg54 & v_flag)) pci_write_config_byte(dev, 0x54, reg54 | v_flag); } else pci_write_config_byte(dev, 0x54, reg54 & ~v_flag); } else { const u8 mwdma_to_pio[] = { 0, 3, 4 }; if (reg48 & u_flag) pci_write_config_byte(dev, 0x48, reg48 & ~u_flag); if (reg4a & a_speed) pci_write_config_word(dev, 0x4a, reg4a & ~a_speed); if (reg54 & v_flag) pci_write_config_byte(dev, 0x54, reg54 & ~v_flag); if (reg55 & w_flag) pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); if (speed >= XFER_MW_DMA_0) drive->pio_mode = mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0; else drive->pio_mode = XFER_PIO_2; /* for SWDMA2 */ piix_set_pio_mode(hwif, drive); } } /** * init_chipset_ich - set up the ICH chipset * @dev: PCI device to set up * * Initialize the PCI device as required. For the ICH this turns * out to be nice and simple. */ static int init_chipset_ich(struct pci_dev *dev) { u32 extra = 0; pci_read_config_dword(dev, 0x54, &extra); pci_write_config_dword(dev, 0x54, extra | 0x400); return 0; } /** * ich_clear_irq - clear BMDMA status * @drive: IDE drive * * ICHx contollers set DMA INTR no matter DMA or PIO. * BMDMA status might need to be cleared even for * PIO interrupts to prevent spurious/lost IRQ. */ static void ich_clear_irq(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u8 dma_stat; /* * ide_dma_end() needs BMDMA status for error checking. * So, skip clearing BMDMA status here and leave it * to ide_dma_end() if this is DMA interrupt. */ if (drive->waiting_for_dma || hwif->dma_base == 0) return; /* clear the INTR & ERROR bits */ dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); /* Should we force the bit as well ? */ outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS); } struct ich_laptop { u16 device; u16 subvendor; u16 subdevice; }; /* * List of laptops that use short cables rather than 80 wire */ static const struct ich_laptop ich_laptop[] = { /* devid, subvendor, subdev */ { 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */ { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */ { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */ { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */ { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */ { 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */ { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on Acer Aspire 2023WLMi */ { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */ { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */ { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */ { 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */ /* end marker */ { 0, } }; static u8 piix_cable_detect(ide_hwif_t *hwif) { struct pci_dev *pdev = to_pci_dev(hwif->dev); const struct ich_laptop *lap = &ich_laptop[0]; u8 reg54h = 0, mask = hwif->channel ? 0xc0 : 0x30; /* check for specials */ while (lap->device) { if (lap->device == pdev->device && lap->subvendor == pdev->subsystem_vendor && lap->subdevice == pdev->subsystem_device) { return ATA_CBL_PATA40_SHORT; } lap++; } pci_read_config_byte(pdev, 0x54, &reg54h); return (reg54h & mask) ? ATA_CBL_PATA80 : ATA_CBL_PATA40; } /** * init_hwif_piix - fill in the hwif for the PIIX * @hwif: IDE interface * * Set up the ide_hwif_t for the PIIX interface according to the * capabilities of the hardware. */ static void __devinit init_hwif_piix(ide_hwif_t *hwif) { if (!hwif->dma_base) return; if (no_piix_dma) hwif->ultra_mask = hwif->mwdma_mask = hwif->swdma_mask = 0; } static const struct ide_port_ops piix_port_ops = { .set_pio_mode = piix_set_pio_mode, .set_dma_mode = piix_set_dma_mode, .cable_detect = piix_cable_detect, }; static const struct ide_port_ops ich_port_ops = { .set_pio_mode = piix_set_pio_mode, .set_dma_mode = piix_set_dma_mode, .clear_irq = ich_clear_irq, .cable_detect = piix_cable_detect, }; #define DECLARE_PIIX_DEV(udma) \ { \ .name = DRV_NAME, \ .init_hwif = init_hwif_piix, \ .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \ .port_ops = &piix_port_ops, \ .pio_mask = ATA_PIO4, \ .swdma_mask = ATA_SWDMA2_ONLY, \ .mwdma_mask = ATA_MWDMA12_ONLY, \ .udma_mask = udma, \ } #define DECLARE_ICH_DEV(mwdma, udma) \ { \ .name = DRV_NAME, \ .init_chipset = init_chipset_ich, \ .init_hwif = init_hwif_piix, \ .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \ .port_ops = &ich_port_ops, \ .pio_mask = ATA_PIO4, \ .swdma_mask = ATA_SWDMA2_ONLY, \ .mwdma_mask = mwdma, \ .udma_mask = udma, \ } static const struct ide_port_info piix_pci_info[] __devinitdata = { /* 0: MPIIX */ { /* * MPIIX actually has only a single IDE channel mapped to * the primary or secondary ports depending on the value * of the bit 14 of the IDETIM register at offset 0x6c */ .name = DRV_NAME, .enablebits = {{0x6d,0xc0,0x80}, {0x6d,0xc0,0xc0}}, .host_flags = IDE_HFLAG_ISA_PORTS | IDE_HFLAG_NO_DMA, .pio_mask = ATA_PIO4, /* This is a painful system best to let it self tune for now */ }, /* 1: PIIXa/PIIXb/PIIX3 */ DECLARE_PIIX_DEV(0x00), /* no udma */ /* 2: PIIX4 */ DECLARE_PIIX_DEV(ATA_UDMA2), /* 3: ICH0 */ DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA2), /* 4: ICH */ DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA4), /* 5: PIIX4 */ DECLARE_PIIX_DEV(ATA_UDMA4), /* 6: ICH[2-6]/ICH[2-3]M/C-ICH/ICH5-SATA/ESB2/ICH8M */ DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA5), /* 7: ICH7/7-R, no MWDMA1 */ DECLARE_ICH_DEV(ATA_MWDMA2_ONLY, ATA_UDMA5), }; /** * piix_init_one - called when a PIIX is found * @dev: the piix device * @id: the matching pci id * * Called when the PCI registration layer (or the IDE initialization) * finds a device matching our IDE device tables. */ static int __devinit piix_init_one(struct pci_dev *dev, const struct pci_device_id *id) { return ide_pci_init_one(dev, &piix_pci_info[id->driver_data], NULL); } /** * piix_check_450nx - Check for problem 450NX setup * * Check for the present of 450NX errata #19 and errata #25. If * they are found, disable use of DMA IDE */ static void __devinit piix_check_450nx(void) { struct pci_dev *pdev = NULL; u16 cfg; while((pdev=pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev))!=NULL) { /* Look for 450NX PXB. Check for problem configurations A PCI quirk checks bit 6 already */ pci_read_config_word(pdev, 0x41, &cfg); /* Only on the original revision: IDE DMA can hang */ if (pdev->revision == 0x00) no_piix_dma = 1; /* On all revisions below 5 PXB bus lock must be disabled for IDE */ else if (cfg & (1<<14) && pdev->revision < 5) no_piix_dma = 2; } if(no_piix_dma) printk(KERN_WARNING DRV_NAME ": 450NX errata present, disabling IDE DMA.\n"); if(no_piix_dma == 2) printk(KERN_WARNING DRV_NAME ": A BIOS update may resolve this.\n"); } static const struct pci_device_id piix_pci_tbl[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371FB_0), 1 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371FB_1), 1 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371MX), 0 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371SB_1), 1 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371AB), 2 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801AB_1), 3 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82443MX_1), 2 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801AA_1), 4 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82372FB_1), 5 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82451NX), 2 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801BA_9), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801BA_8), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801CA_10), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801CA_11), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_11), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801EB_11), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801E_11), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_10), 6 }, #ifdef CONFIG_BLK_DEV_IDE_SATA { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801EB_1), 6 }, #endif { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB_2), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH6_19), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH7_21), 7 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_1), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB2_18), 7 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH8_6), 6 }, { 0, }, }; MODULE_DEVICE_TABLE(pci, piix_pci_tbl); static struct pci_driver piix_pci_driver = { .name = "PIIX_IDE", .id_table = piix_pci_tbl, .probe = piix_init_one, .remove = ide_pci_remove, .suspend = ide_pci_suspend, .resume = ide_pci_resume, }; static int __init piix_ide_init(void) { piix_check_450nx(); return ide_pci_register_driver(&piix_pci_driver); } static void __exit piix_ide_exit(void) { pci_unregister_driver(&piix_pci_driver); } module_init(piix_ide_init); module_exit(piix_ide_exit); MODULE_AUTHOR("Andre Hedrick, Andrzej Krzysztofowicz"); MODULE_DESCRIPTION("PCI driver module for Intel PIIX IDE"); MODULE_LICENSE("GPL");
gpl-2.0
MoKee/android_kernel_cyanogen_msm8916-amss
arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c
5160
4186
/***********************license start*************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2008 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information ***********************license end**************************************/ /** * * Helper utilities for qlm_jtag. * */ #include <asm/octeon/octeon.h> #include <asm/octeon/cvmx-helper-jtag.h> /** * Initialize the internal QLM JTAG logic to allow programming * of the JTAG chain by the cvmx_helper_qlm_jtag_*() functions. * These functions should only be used at the direction of Cavium * Networks. Programming incorrect values into the JTAG chain * can cause chip damage. */ void cvmx_helper_qlm_jtag_init(void) { union cvmx_ciu_qlm_jtgc jtgc; uint32_t clock_div = 0; uint32_t divisor = cvmx_sysinfo_get()->cpu_clock_hz / (25 * 1000000); divisor = (divisor - 1) >> 2; /* Convert the divisor into a power of 2 shift */ while (divisor) { clock_div++; divisor = divisor >> 1; } /* * Clock divider for QLM JTAG operations. eclk is divided by * 2^(CLK_DIV + 2) */ jtgc.u64 = 0; jtgc.s.clk_div = clock_div; jtgc.s.mux_sel = 0; if (OCTEON_IS_MODEL(OCTEON_CN52XX)) jtgc.s.bypass = 0x3; else jtgc.s.bypass = 0xf; cvmx_write_csr(CVMX_CIU_QLM_JTGC, jtgc.u64); cvmx_read_csr(CVMX_CIU_QLM_JTGC); } /** * Write up to 32bits into the QLM jtag chain. Bits are shifted * into the MSB and out the LSB, so you should shift in the low * order bits followed by the high order bits. The JTAG chain is * 4 * 268 bits long, or 1072. * * @qlm: QLM to shift value into * @bits: Number of bits to shift in (1-32). * @data: Data to shift in. Bit 0 enters the chain first, followed by * bit 1, etc. * * Returns The low order bits of the JTAG chain that shifted out of the * circle. */ uint32_t cvmx_helper_qlm_jtag_shift(int qlm, int bits, uint32_t data) { union cvmx_ciu_qlm_jtgd jtgd; jtgd.u64 = 0; jtgd.s.shift = 1; jtgd.s.shft_cnt = bits - 1; jtgd.s.shft_reg = data; if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) jtgd.s.select = 1 << qlm; cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64); do { jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD); } while (jtgd.s.shift); return jtgd.s.shft_reg >> (32 - bits); } /** * Shift long sequences of zeros into the QLM JTAG chain. It is * common to need to shift more than 32 bits of zeros into the * chain. This function is a convience wrapper around * cvmx_helper_qlm_jtag_shift() to shift more than 32 bits of * zeros at a time. * * @qlm: QLM to shift zeros into * @bits: */ void cvmx_helper_qlm_jtag_shift_zeros(int qlm, int bits) { while (bits > 0) { int n = bits; if (n > 32) n = 32; cvmx_helper_qlm_jtag_shift(qlm, n, 0); bits -= n; } } /** * Program the QLM JTAG chain into all lanes of the QLM. You must * have already shifted in 268*4, or 1072 bits into the JTAG * chain. Updating invalid values can possibly cause chip damage. * * @qlm: QLM to program */ void cvmx_helper_qlm_jtag_update(int qlm) { union cvmx_ciu_qlm_jtgd jtgd; /* Update the new data */ jtgd.u64 = 0; jtgd.s.update = 1; if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) jtgd.s.select = 1 << qlm; cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64); do { jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD); } while (jtgd.s.update); }
gpl-2.0
bshiznit/android_kernel_asus_grouper
drivers/staging/ft1000/ft1000-usb/ft1000_download.c
8232
34720
//===================================================== // CopyRight (C) 2007 Qualcomm Inc. All Rights Reserved. // // // This file is part of Express Card USB Driver // // $Id: //==================================================== // 20090926; aelias; removed compiler warnings; ubuntu 9.04; 2.6.28-15-generic #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/usb.h> #include <linux/vmalloc.h> #include "ft1000_usb.h" #define DWNLD_HANDSHAKE_LOC 0x02 #define DWNLD_TYPE_LOC 0x04 #define DWNLD_SIZE_MSW_LOC 0x06 #define DWNLD_SIZE_LSW_LOC 0x08 #define DWNLD_PS_HDR_LOC 0x0A #define MAX_DSP_WAIT_LOOPS 40 #define DSP_WAIT_SLEEP_TIME 1000 /* 1 millisecond */ #define DSP_WAIT_DISPATCH_LVL 50 /* 50 usec */ #define HANDSHAKE_TIMEOUT_VALUE 0xF1F1 #define HANDSHAKE_RESET_VALUE 0xFEFE /* When DSP requests startover */ #define HANDSHAKE_RESET_VALUE_USB 0xFE7E /* When DSP requests startover */ #define HANDSHAKE_DSP_BL_READY 0xFEFE /* At start DSP writes this when bootloader ready */ #define HANDSHAKE_DSP_BL_READY_USB 0xFE7E /* At start DSP writes this when bootloader ready */ #define HANDSHAKE_DRIVER_READY 0xFFFF /* Driver writes after receiving 0xFEFE */ #define HANDSHAKE_SEND_DATA 0x0000 /* DSP writes this when ready for more data */ #define HANDSHAKE_REQUEST 0x0001 /* Request from DSP */ #define HANDSHAKE_RESPONSE 0x0000 /* Satisfied DSP request */ #define REQUEST_CODE_LENGTH 0x0000 #define REQUEST_RUN_ADDRESS 0x0001 #define REQUEST_CODE_SEGMENT 0x0002 /* In WORD count */ #define REQUEST_DONE_BL 0x0003 #define REQUEST_DONE_CL 0x0004 #define REQUEST_VERSION_INFO 0x0005 #define REQUEST_CODE_BY_VERSION 0x0006 #define REQUEST_MAILBOX_DATA 0x0007 #define REQUEST_FILE_CHECKSUM 0x0008 #define STATE_START_DWNLD 0x01 #define STATE_BOOT_DWNLD 0x02 #define STATE_CODE_DWNLD 0x03 #define STATE_DONE_DWNLD 0x04 #define STATE_SECTION_PROV 0x05 #define STATE_DONE_PROV 0x06 #define STATE_DONE_FILE 0x07 #define MAX_LENGTH 0x7f0 // Temporary download mechanism for Magnemite #define DWNLD_MAG_TYPE_LOC 0x00 #define DWNLD_MAG_LEN_LOC 0x01 #define DWNLD_MAG_ADDR_LOC 0x02 #define DWNLD_MAG_CHKSUM_LOC 0x03 #define DWNLD_MAG_VAL_LOC 0x04 #define HANDSHAKE_MAG_DSP_BL_READY 0xFEFE0000 /* At start DSP writes this when bootloader ready */ #define HANDSHAKE_MAG_DSP_ENTRY 0x01000000 /* Dsp writes this to request for entry address */ #define HANDSHAKE_MAG_DSP_DATA 0x02000000 /* Dsp writes this to request for data block */ #define HANDSHAKE_MAG_DSP_DONE 0x03000000 /* Dsp writes this to indicate download done */ #define HANDSHAKE_MAG_DRV_READY 0xFFFF0000 /* Driver writes this to indicate ready to download */ #define HANDSHAKE_MAG_DRV_DATA 0x02FECDAB /* Driver writes this to indicate data available to DSP */ #define HANDSHAKE_MAG_DRV_ENTRY 0x01FECDAB /* Driver writes this to indicate entry point to DSP */ #define HANDSHAKE_MAG_TIMEOUT_VALUE 0xF1F1 // New Magnemite downloader #define DWNLD_MAG1_HANDSHAKE_LOC 0x00 #define DWNLD_MAG1_TYPE_LOC 0x01 #define DWNLD_MAG1_SIZE_LOC 0x02 #define DWNLD_MAG1_PS_HDR_LOC 0x03 struct dsp_file_hdr { long version_id; // Version ID of this image format. long package_id; // Package ID of code release. long build_date; // Date/time stamp when file was built. long commands_offset; // Offset to attached commands in Pseudo Hdr format. long loader_offset; // Offset to bootloader code. long loader_code_address; // Start address of bootloader. long loader_code_end; // Where bootloader code ends. long loader_code_size; long version_data_offset; // Offset were scrambled version data begins. long version_data_size; // Size, in words, of scrambled version data. long nDspImages; // Number of DSP images in file. }; #pragma pack(1) struct dsp_image_info { long coff_date; // Date/time when DSP Coff image was built. long begin_offset; // Offset in file where image begins. long end_offset; // Offset in file where image begins. long run_address; // On chip Start address of DSP code. long image_size; // Size of image. long version; // Embedded version # of DSP code. unsigned short checksum; // DSP File checksum unsigned short pad1; }; //--------------------------------------------------------------------------- // Function: check_usb_db // // Parameters: struct ft1000_device - device structure // // Returns: 0 - success // // Description: This function checks if the doorbell register is cleared // // Notes: // //--------------------------------------------------------------------------- static u32 check_usb_db (struct ft1000_device *ft1000dev) { int loopcnt; u16 temp; u32 status; loopcnt = 0; while (loopcnt < 10) { status = ft1000_read_register(ft1000dev, &temp, FT1000_REG_DOORBELL); DEBUG("check_usb_db: read FT1000_REG_DOORBELL value is %x\n", temp); if (temp & 0x0080) { DEBUG("FT1000:Got checkusb doorbell\n"); status = ft1000_write_register(ft1000dev, 0x0080, FT1000_REG_DOORBELL); status = ft1000_write_register(ft1000dev, 0x0100, FT1000_REG_DOORBELL); status = ft1000_write_register(ft1000dev, 0x8000, FT1000_REG_DOORBELL); break; } else { loopcnt++; msleep(10); } } loopcnt = 0; while (loopcnt < 20) { status = ft1000_read_register(ft1000dev, &temp, FT1000_REG_DOORBELL); DEBUG("FT1000:check_usb_db:Doorbell = 0x%x\n", temp); if (temp & 0x8000) { loopcnt++; msleep(10); } else { DEBUG("check_usb_db: door bell is cleared, return 0\n"); return 0; } } return HANDSHAKE_MAG_TIMEOUT_VALUE; } //--------------------------------------------------------------------------- // Function: get_handshake // // Parameters: struct ft1000_device - device structure // u16 expected_value - the handshake value expected // // Returns: handshakevalue - success // HANDSHAKE_TIMEOUT_VALUE - failure // // Description: This function gets the handshake and compare with the expected value // // Notes: // //--------------------------------------------------------------------------- static u16 get_handshake(struct ft1000_device *ft1000dev, u16 expected_value) { u16 handshake; int loopcnt; u32 status = 0; struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net); loopcnt = 0; while (loopcnt < 100) { /* Need to clear downloader doorbell if Hartley ASIC */ status = ft1000_write_register(ft1000dev, FT1000_DB_DNLD_RX, FT1000_REG_DOORBELL); if (pft1000info->fcodeldr) { DEBUG(" get_handshake: fcodeldr is %d\n", pft1000info->fcodeldr); pft1000info->fcodeldr = 0; status = check_usb_db(ft1000dev); if (status != STATUS_SUCCESS) { DEBUG("get_handshake: check_usb_db failed\n"); status = STATUS_FAILURE; break; } status = ft1000_write_register(ft1000dev, FT1000_DB_DNLD_RX, FT1000_REG_DOORBELL); } status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, (u8 *)&handshake, 1); handshake = ntohs(handshake); if (status) return HANDSHAKE_TIMEOUT_VALUE; if ((handshake == expected_value) || (handshake == HANDSHAKE_RESET_VALUE_USB)) { return handshake; } else { loopcnt++; msleep(10); } } return HANDSHAKE_TIMEOUT_VALUE; } //--------------------------------------------------------------------------- // Function: put_handshake // // Parameters: struct ft1000_device - device structure // u16 handshake_value - handshake to be written // // Returns: none // // Description: This function write the handshake value to the handshake location // in DPRAM // // Notes: // //--------------------------------------------------------------------------- static void put_handshake(struct ft1000_device *ft1000dev,u16 handshake_value) { u32 tempx; u16 tempword; u32 status; tempx = (u32)handshake_value; tempx = ntohl(tempx); tempword = (u16)(tempx & 0xffff); status = ft1000_write_dpram16(ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, tempword, 0); tempword = (u16)(tempx >> 16); status = ft1000_write_dpram16(ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, tempword, 1); status = ft1000_write_register(ft1000dev, FT1000_DB_DNLD_TX, FT1000_REG_DOORBELL); } static u16 get_handshake_usb(struct ft1000_device *ft1000dev, u16 expected_value) { u16 handshake; int loopcnt; u16 temp; u32 status = 0; struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net); loopcnt = 0; handshake = 0; while (loopcnt < 100) { if (pft1000info->usbboot == 2) { status = ft1000_read_dpram32(ft1000dev, 0, (u8 *)&(pft1000info->tempbuf[0]), 64); for (temp = 0; temp < 16; temp++) { DEBUG("tempbuf %d = 0x%x\n", temp, pft1000info->tempbuf[temp]); } status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, (u8 *)&handshake, 1); DEBUG("handshake from read_dpram16 = 0x%x\n", handshake); if (pft1000info->dspalive == pft1000info->tempbuf[6]) { handshake = 0; } else { handshake = pft1000info->tempbuf[1]; pft1000info->dspalive = pft1000info->tempbuf[6]; } } else { status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, (u8 *)&handshake, 1); } loopcnt++; msleep(10); handshake = ntohs(handshake); if ((handshake == expected_value) || (handshake == HANDSHAKE_RESET_VALUE_USB)) return handshake; } return HANDSHAKE_TIMEOUT_VALUE; } static void put_handshake_usb(struct ft1000_device *ft1000dev,u16 handshake_value) { int i; for (i=0; i<1000; i++); } //--------------------------------------------------------------------------- // Function: get_request_type // // Parameters: struct ft1000_device - device structure // // Returns: request type - success // // Description: This function returns the request type // // Notes: // //--------------------------------------------------------------------------- static u16 get_request_type(struct ft1000_device *ft1000dev) { u16 request_type; u32 status; u16 tempword; u32 tempx; struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net); if (pft1000info->bootmode == 1) { status = fix_ft1000_read_dpram32(ft1000dev, DWNLD_MAG1_TYPE_LOC, (u8 *)&tempx); tempx = ntohl(tempx); } else { tempx = 0; status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_TYPE_LOC, (u8 *)&tempword, 1); tempx |= (tempword << 16); tempx = ntohl(tempx); } request_type = (u16)tempx; return request_type; } static u16 get_request_type_usb(struct ft1000_device *ft1000dev) { u16 request_type; u32 status; u16 tempword; u32 tempx; struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net); if (pft1000info->bootmode == 1) { status = fix_ft1000_read_dpram32(ft1000dev, DWNLD_MAG1_TYPE_LOC, (u8 *)&tempx); tempx = ntohl(tempx); } else { if (pft1000info->usbboot == 2) { tempx = pft1000info->tempbuf[2]; tempword = pft1000info->tempbuf[3]; } else { tempx = 0; status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_TYPE_LOC, (u8 *)&tempword, 1); } tempx |= (tempword << 16); tempx = ntohl(tempx); } request_type = (u16)tempx; return request_type; } //--------------------------------------------------------------------------- // Function: get_request_value // // Parameters: struct ft1000_device - device structure // // Returns: request value - success // // Description: This function returns the request value // // Notes: // //--------------------------------------------------------------------------- static long get_request_value(struct ft1000_device *ft1000dev) { u32 value; u16 tempword; u32 status; struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net); if (pft1000info->bootmode == 1) { status = fix_ft1000_read_dpram32(ft1000dev, DWNLD_MAG1_SIZE_LOC, (u8 *)&value); value = ntohl(value); } else { status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_SIZE_LOC, (u8 *)&tempword, 0); value = tempword; status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_SIZE_LOC, (u8 *)&tempword, 1); value |= (tempword << 16); value = ntohl(value); } return value; } //--------------------------------------------------------------------------- // Function: put_request_value // // Parameters: struct ft1000_device - device structure // long lvalue - value to be put into DPRAM location DWNLD_MAG1_SIZE_LOC // // Returns: none // // Description: This function writes a value to DWNLD_MAG1_SIZE_LOC // // Notes: // //--------------------------------------------------------------------------- static void put_request_value(struct ft1000_device *ft1000dev, long lvalue) { u32 tempx; u32 status; tempx = ntohl(lvalue); status = fix_ft1000_write_dpram32(ft1000dev, DWNLD_MAG1_SIZE_LOC, (u8 *)&tempx); } //--------------------------------------------------------------------------- // Function: hdr_checksum // // Parameters: struct pseudo_hdr *pHdr - Pseudo header pointer // // Returns: checksum - success // // Description: This function returns the checksum of the pseudo header // // Notes: // //--------------------------------------------------------------------------- static u16 hdr_checksum(struct pseudo_hdr *pHdr) { u16 *usPtr = (u16 *)pHdr; u16 chksum; chksum = ((((((usPtr[0] ^ usPtr[1]) ^ usPtr[2]) ^ usPtr[3]) ^ usPtr[4]) ^ usPtr[5]) ^ usPtr[6]); return chksum; } static int check_buffers(u16 *buff_w, u16 *buff_r, int len, int offset) { int i; for (i = 0; i < len; i++) { if (buff_w[i] != buff_r[i + offset]) return -1; } return 0; } //--------------------------------------------------------------------------- // Function: write_blk // // Parameters: struct ft1000_device - device structure // u16 **pUsFile - DSP image file pointer in u16 // u8 **pUcFile - DSP image file pointer in u8 // long word_length - length of the buffer to be written // to DPRAM // // Returns: STATUS_SUCCESS - success // STATUS_FAILURE - failure // // Description: This function writes a block of DSP image to DPRAM // // Notes: // //--------------------------------------------------------------------------- static u32 write_blk (struct ft1000_device *ft1000dev, u16 **pUsFile, u8 **pUcFile, long word_length) { u32 Status = STATUS_SUCCESS; u16 dpram; int loopcnt, i, j; u16 tempword; u16 tempbuffer[64]; u16 resultbuffer[64]; struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net); //DEBUG("FT1000:download:start word_length = %d\n",(int)word_length); dpram = (u16)DWNLD_MAG1_PS_HDR_LOC; tempword = *(*pUsFile); (*pUsFile)++; Status = ft1000_write_dpram16(ft1000dev, dpram, tempword, 0); tempword = *(*pUsFile); (*pUsFile)++; Status = ft1000_write_dpram16(ft1000dev, dpram++, tempword, 1); *pUcFile = *pUcFile + 4; word_length--; tempword = (u16)word_length; word_length = (word_length / 16) + 1; for (; word_length > 0; word_length--) /* In words */ { loopcnt = 0; for (i=0; i<32; i++) { if (tempword != 0) { tempbuffer[i++] = *(*pUsFile); (*pUsFile)++; tempbuffer[i] = *(*pUsFile); (*pUsFile)++; *pUcFile = *pUcFile + 4; loopcnt++; tempword--; } else { tempbuffer[i++] = 0; tempbuffer[i] = 0; } } //DEBUG("write_blk: loopcnt is %d\n", loopcnt); //DEBUG("write_blk: bootmode = %d\n", bootmode); //DEBUG("write_blk: dpram = %x\n", dpram); if (pft1000info->bootmode == 0) { if (dpram >= 0x3F4) Status = ft1000_write_dpram32 (ft1000dev, dpram, (u8 *)&tempbuffer[0], 8); else Status = ft1000_write_dpram32 (ft1000dev, dpram, (u8 *)&tempbuffer[0], 64); } else { for (j=0; j<10; j++) { Status = ft1000_write_dpram32 (ft1000dev, dpram, (u8 *)&tempbuffer[0], 64); if (Status == STATUS_SUCCESS) { // Work around for ASIC bit stuffing problem. if ( (tempbuffer[31] & 0xfe00) == 0xfe00) { Status = ft1000_write_dpram32(ft1000dev, dpram+12, (u8 *)&tempbuffer[24], 64); } // Let's check the data written Status = ft1000_read_dpram32 (ft1000dev, dpram, (u8 *)&resultbuffer[0], 64); if ( (tempbuffer[31] & 0xfe00) == 0xfe00) { if (check_buffers(tempbuffer, resultbuffer, 28, 0)) { DEBUG("FT1000:download:DPRAM write failed 1 during bootloading\n"); msleep(10); Status = STATUS_FAILURE; break; } Status = ft1000_read_dpram32 (ft1000dev, dpram+12, (u8 *)&resultbuffer[0], 64); if (check_buffers(tempbuffer, resultbuffer, 16, 24)) { DEBUG("FT1000:download:DPRAM write failed 2 during bootloading\n"); msleep(10); Status = STATUS_FAILURE; break; } } else { if (check_buffers(tempbuffer, resultbuffer, 32, 0)) { DEBUG("FT1000:download:DPRAM write failed 3 during bootloading\n"); msleep(10); Status = STATUS_FAILURE; break; } } if (Status == STATUS_SUCCESS) break; } } if (Status != STATUS_SUCCESS) { DEBUG("FT1000:download:Write failed tempbuffer[31] = 0x%x\n", tempbuffer[31]); break; } } dpram = dpram + loopcnt; } return Status; } static void usb_dnld_complete (struct urb *urb) { //DEBUG("****** usb_dnld_complete\n"); } //--------------------------------------------------------------------------- // Function: write_blk_fifo // // Parameters: struct ft1000_device - device structure // u16 **pUsFile - DSP image file pointer in u16 // u8 **pUcFile - DSP image file pointer in u8 // long word_length - length of the buffer to be written // to DPRAM // // Returns: STATUS_SUCCESS - success // STATUS_FAILURE - failure // // Description: This function writes a block of DSP image to DPRAM // // Notes: // //--------------------------------------------------------------------------- static u32 write_blk_fifo(struct ft1000_device *ft1000dev, u16 **pUsFile, u8 **pUcFile, long word_length) { u32 Status = STATUS_SUCCESS; int byte_length; byte_length = word_length * 4; if (byte_length && ((byte_length % 64) == 0)) byte_length += 4; if (byte_length < 64) byte_length = 68; usb_init_urb(ft1000dev->tx_urb); memcpy(ft1000dev->tx_buf, *pUcFile, byte_length); usb_fill_bulk_urb(ft1000dev->tx_urb, ft1000dev->dev, usb_sndbulkpipe(ft1000dev->dev, ft1000dev->bulk_out_endpointAddr), ft1000dev->tx_buf, byte_length, usb_dnld_complete, (void *)ft1000dev); usb_submit_urb(ft1000dev->tx_urb, GFP_ATOMIC); *pUsFile = *pUsFile + (word_length << 1); *pUcFile = *pUcFile + (word_length << 2); return Status; } //--------------------------------------------------------------------------- // // Function: scram_dnldr // // Synopsis: Scramble downloader for Harley based ASIC via USB interface // // Arguments: pFileStart - pointer to start of file // FileLength - file length // // Returns: status - return code //--------------------------------------------------------------------------- u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart, u32 FileLength) { u16 status = STATUS_SUCCESS; u32 state; u16 handshake; struct pseudo_hdr *pseudo_header; u16 pseudo_header_len; long word_length; u16 request; u16 temp; u16 tempword; struct dsp_file_hdr *file_hdr; struct dsp_image_info *dsp_img_info = NULL; long requested_version; bool correct_version; struct drv_msg *mailbox_data; u16 *data = NULL; u16 *s_file = NULL; u8 *c_file = NULL; u8 *boot_end = NULL, *code_end = NULL; int image; long loader_code_address, loader_code_size = 0; long run_address = 0, run_size = 0; u32 templong; u32 image_chksum = 0; u16 dpram = 0; u8 *pbuffer; struct prov_record *pprov_record; struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net); DEBUG("Entered scram_dnldr...\n"); pft1000info->fcodeldr = 0; pft1000info->usbboot = 0; pft1000info->dspalive = 0xffff; // // Get version id of file, at first 4 bytes of file, for newer files. // state = STATE_START_DWNLD; file_hdr = (struct dsp_file_hdr *)pFileStart; ft1000_write_register(ft1000dev, 0x800, FT1000_REG_MAG_WATERMARK); s_file = (u16 *) (pFileStart + file_hdr->loader_offset); c_file = (u8 *) (pFileStart + file_hdr->loader_offset); boot_end = (u8 *) (pFileStart + file_hdr->loader_code_end); loader_code_address = file_hdr->loader_code_address; loader_code_size = file_hdr->loader_code_size; correct_version = FALSE; while ((status == STATUS_SUCCESS) && (state != STATE_DONE_FILE)) { switch (state) { case STATE_START_DWNLD: DEBUG("FT1000:STATE_START_DWNLD\n"); if (pft1000info->usbboot) handshake = get_handshake_usb(ft1000dev, HANDSHAKE_DSP_BL_READY); else handshake = get_handshake(ft1000dev, HANDSHAKE_DSP_BL_READY); if (handshake == HANDSHAKE_DSP_BL_READY) { DEBUG ("scram_dnldr: handshake is HANDSHAKE_DSP_BL_READY, call put_handshake(HANDSHAKE_DRIVER_READY)\n"); put_handshake(ft1000dev, HANDSHAKE_DRIVER_READY); } else { DEBUG ("FT1000:download:Download error: Handshake failed\n"); status = STATUS_FAILURE; } state = STATE_BOOT_DWNLD; break; case STATE_BOOT_DWNLD: DEBUG("FT1000:STATE_BOOT_DWNLD\n"); pft1000info->bootmode = 1; handshake = get_handshake(ft1000dev, HANDSHAKE_REQUEST); if (handshake == HANDSHAKE_REQUEST) { /* * Get type associated with the request. */ request = get_request_type(ft1000dev); switch (request) { case REQUEST_RUN_ADDRESS: DEBUG("FT1000:REQUEST_RUN_ADDRESS\n"); put_request_value(ft1000dev, loader_code_address); break; case REQUEST_CODE_LENGTH: DEBUG("FT1000:REQUEST_CODE_LENGTH\n"); put_request_value(ft1000dev, loader_code_size); break; case REQUEST_DONE_BL: DEBUG("FT1000:REQUEST_DONE_BL\n"); /* Reposition ptrs to beginning of code section */ s_file = (u16 *) (boot_end); c_file = (u8 *) (boot_end); //DEBUG("FT1000:download:s_file = 0x%8x\n", (int)s_file); //DEBUG("FT1000:download:c_file = 0x%8x\n", (int)c_file); state = STATE_CODE_DWNLD; pft1000info->fcodeldr = 1; break; case REQUEST_CODE_SEGMENT: //DEBUG("FT1000:REQUEST_CODE_SEGMENT\n"); word_length = get_request_value(ft1000dev); //DEBUG("FT1000:word_length = 0x%x\n", (int)word_length); //NdisMSleep (100); if (word_length > MAX_LENGTH) { DEBUG ("FT1000:download:Download error: Max length exceeded\n"); status = STATUS_FAILURE; break; } if ((word_length * 2 + c_file) > boot_end) { /* * Error, beyond boot code range. */ DEBUG ("FT1000:download:Download error: Requested len=%d exceeds BOOT code boundary.\n", (int)word_length); status = STATUS_FAILURE; break; } /* * Position ASIC DPRAM auto-increment pointer. */ dpram = (u16) DWNLD_MAG1_PS_HDR_LOC; if (word_length & 0x1) word_length++; word_length = word_length / 2; status = write_blk(ft1000dev, &s_file, &c_file, word_length); //DEBUG("write_blk returned %d\n", status); break; default: DEBUG ("FT1000:download:Download error: Bad request type=%d in BOOT download state.\n", request); status = STATUS_FAILURE; break; } if (pft1000info->usbboot) put_handshake_usb(ft1000dev, HANDSHAKE_RESPONSE); else put_handshake(ft1000dev, HANDSHAKE_RESPONSE); } else { DEBUG ("FT1000:download:Download error: Handshake failed\n"); status = STATUS_FAILURE; } break; case STATE_CODE_DWNLD: //DEBUG("FT1000:STATE_CODE_DWNLD\n"); pft1000info->bootmode = 0; if (pft1000info->usbboot) handshake = get_handshake_usb(ft1000dev, HANDSHAKE_REQUEST); else handshake = get_handshake(ft1000dev, HANDSHAKE_REQUEST); if (handshake == HANDSHAKE_REQUEST) { /* * Get type associated with the request. */ if (pft1000info->usbboot) request = get_request_type_usb(ft1000dev); else request = get_request_type(ft1000dev); switch (request) { case REQUEST_FILE_CHECKSUM: DEBUG ("FT1000:download:image_chksum = 0x%8x\n", image_chksum); put_request_value(ft1000dev, image_chksum); break; case REQUEST_RUN_ADDRESS: DEBUG ("FT1000:download: REQUEST_RUN_ADDRESS\n"); if (correct_version) { DEBUG ("FT1000:download:run_address = 0x%8x\n", (int)run_address); put_request_value(ft1000dev, run_address); } else { DEBUG ("FT1000:download:Download error: Got Run address request before image offset request.\n"); status = STATUS_FAILURE; break; } break; case REQUEST_CODE_LENGTH: DEBUG ("FT1000:download:REQUEST_CODE_LENGTH\n"); if (correct_version) { DEBUG ("FT1000:download:run_size = 0x%8x\n", (int)run_size); put_request_value(ft1000dev, run_size); } else { DEBUG ("FT1000:download:Download error: Got Size request before image offset request.\n"); status = STATUS_FAILURE; break; } break; case REQUEST_DONE_CL: pft1000info->usbboot = 3; /* Reposition ptrs to beginning of provisioning section */ s_file = (u16 *) (pFileStart + file_hdr->commands_offset); c_file = (u8 *) (pFileStart + file_hdr->commands_offset); state = STATE_DONE_DWNLD; break; case REQUEST_CODE_SEGMENT: //DEBUG("FT1000:download: REQUEST_CODE_SEGMENT - CODELOADER\n"); if (!correct_version) { DEBUG ("FT1000:download:Download error: Got Code Segment request before image offset request.\n"); status = STATUS_FAILURE; break; } word_length = get_request_value(ft1000dev); //DEBUG("FT1000:download:word_length = %d\n", (int)word_length); if (word_length > MAX_LENGTH) { DEBUG ("FT1000:download:Download error: Max length exceeded\n"); status = STATUS_FAILURE; break; } if ((word_length * 2 + c_file) > code_end) { /* * Error, beyond boot code range. */ DEBUG ("FT1000:download:Download error: Requested len=%d exceeds DSP code boundary.\n", (int)word_length); status = STATUS_FAILURE; break; } /* * Position ASIC DPRAM auto-increment pointer. */ dpram = (u16) DWNLD_MAG1_PS_HDR_LOC; if (word_length & 0x1) word_length++; word_length = word_length / 2; write_blk_fifo(ft1000dev, &s_file, &c_file, word_length); if (pft1000info->usbboot == 0) pft1000info->usbboot++; if (pft1000info->usbboot == 1) { tempword = 0; ft1000_write_dpram16(ft1000dev, DWNLD_MAG1_PS_HDR_LOC, tempword, 0); } break; case REQUEST_MAILBOX_DATA: DEBUG ("FT1000:download: REQUEST_MAILBOX_DATA\n"); // Convert length from byte count to word count. Make sure we round up. word_length = (long)(pft1000info->DSPInfoBlklen + 1) / 2; put_request_value(ft1000dev, word_length); mailbox_data = (struct drv_msg *)&(pft1000info-> DSPInfoBlk[0]); /* * Position ASIC DPRAM auto-increment pointer. */ data = (u16 *) & mailbox_data->data[0]; dpram = (u16) DWNLD_MAG1_PS_HDR_LOC; if (word_length & 0x1) word_length++; word_length = (word_length / 2); for (; word_length > 0; word_length--) { /* In words */ templong = *data++; templong |= (*data++ << 16); status = fix_ft1000_write_dpram32 (ft1000dev, dpram++, (u8 *) & templong); } break; case REQUEST_VERSION_INFO: DEBUG ("FT1000:download:REQUEST_VERSION_INFO\n"); word_length = file_hdr->version_data_size; put_request_value(ft1000dev, word_length); /* * Position ASIC DPRAM auto-increment pointer. */ s_file = (u16 *) (pFileStart + file_hdr-> version_data_offset); dpram = (u16) DWNLD_MAG1_PS_HDR_LOC; if (word_length & 0x1) word_length++; word_length = (word_length / 2); for (; word_length > 0; word_length--) { /* In words */ templong = ntohs(*s_file++); temp = ntohs(*s_file++); templong |= (temp << 16); status = fix_ft1000_write_dpram32 (ft1000dev, dpram++, (u8 *) & templong); } break; case REQUEST_CODE_BY_VERSION: DEBUG ("FT1000:download:REQUEST_CODE_BY_VERSION\n"); correct_version = FALSE; requested_version = get_request_value(ft1000dev); dsp_img_info = (struct dsp_image_info *)(pFileStart + sizeof (struct dsp_file_hdr)); for (image = 0; image < file_hdr->nDspImages; image++) { if (dsp_img_info->version == requested_version) { correct_version = TRUE; DEBUG ("FT1000:download: correct_version is TRUE\n"); s_file = (u16 *) (pFileStart + dsp_img_info-> begin_offset); c_file = (u8 *) (pFileStart + dsp_img_info-> begin_offset); code_end = (u8 *) (pFileStart + dsp_img_info-> end_offset); run_address = dsp_img_info-> run_address; run_size = dsp_img_info-> image_size; image_chksum = (u32) dsp_img_info-> checksum; break; } dsp_img_info++; } //end of for if (!correct_version) { /* * Error, beyond boot code range. */ DEBUG ("FT1000:download:Download error: Bad Version Request = 0x%x.\n", (int)requested_version); status = STATUS_FAILURE; break; } break; default: DEBUG ("FT1000:download:Download error: Bad request type=%d in CODE download state.\n", request); status = STATUS_FAILURE; break; } if (pft1000info->usbboot) put_handshake_usb(ft1000dev, HANDSHAKE_RESPONSE); else put_handshake(ft1000dev, HANDSHAKE_RESPONSE); } else { DEBUG ("FT1000:download:Download error: Handshake failed\n"); status = STATUS_FAILURE; } break; case STATE_DONE_DWNLD: DEBUG("FT1000:download:Code loader is done...\n"); state = STATE_SECTION_PROV; break; case STATE_SECTION_PROV: DEBUG("FT1000:download:STATE_SECTION_PROV\n"); pseudo_header = (struct pseudo_hdr *)c_file; if (pseudo_header->checksum == hdr_checksum(pseudo_header)) { if (pseudo_header->portdest != 0x80 /* Dsp OAM */ ) { state = STATE_DONE_PROV; break; } pseudo_header_len = ntohs(pseudo_header->length); /* Byte length for PROV records */ // Get buffer for provisioning data pbuffer = kmalloc((pseudo_header_len + sizeof(struct pseudo_hdr)), GFP_ATOMIC); if (pbuffer) { memcpy(pbuffer, (void *)c_file, (u32) (pseudo_header_len + sizeof(struct pseudo_hdr))); // link provisioning data pprov_record = kmalloc(sizeof(struct prov_record), GFP_ATOMIC); if (pprov_record) { pprov_record->pprov_data = pbuffer; list_add_tail(&pprov_record-> list, &pft1000info-> prov_list); // Move to next entry if available c_file = (u8 *) ((unsigned long) c_file + (u32) ((pseudo_header_len + 1) & 0xFFFFFFFE) + sizeof(struct pseudo_hdr)); if ((unsigned long)(c_file) - (unsigned long)(pFileStart) >= (unsigned long)FileLength) { state = STATE_DONE_FILE; } } else { kfree(pbuffer); status = STATUS_FAILURE; } } else { status = STATUS_FAILURE; } } else { /* Checksum did not compute */ status = STATUS_FAILURE; } DEBUG ("ft1000:download: after STATE_SECTION_PROV, state = %d, status= %d\n", state, status); break; case STATE_DONE_PROV: DEBUG("FT1000:download:STATE_DONE_PROV\n"); state = STATE_DONE_FILE; break; default: status = STATUS_FAILURE; break; } /* End Switch */ if (status != STATUS_SUCCESS) { break; } /**** // Check if Card is present status = Harley_Read_Register(&temp, FT1000_REG_SUP_IMASK); if ( (status != NDIS_STATUS_SUCCESS) || (temp == 0x0000) ) { break; } status = Harley_Read_Register(&temp, FT1000_REG_ASIC_ID); if ( (status != NDIS_STATUS_SUCCESS) || (temp == 0xffff) ) { break; } ****/ } /* End while */ DEBUG("Download exiting with status = 0x%8x\n", status); ft1000_write_register(ft1000dev, FT1000_DB_DNLD_TX, FT1000_REG_DOORBELL); return status; }
gpl-2.0
knone1/Shamu
sound/isa/sb/sb16_main.c
8488
27415
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Routines for control of 16-bit SoundBlaster cards and clones * Note: This is very ugly hardware which uses one 8-bit DMA channel and * second 16-bit DMA channel. Unfortunately 8-bit DMA channel can't * transfer 16-bit samples and 16-bit DMA channels can't transfer * 8-bit samples. This make full duplex more complicated than * can be... People, don't buy these soundcards for full 16-bit * duplex!!! * Note: 16-bit wide is assigned to first direction which made request. * With full duplex - playback is preferred with abstract layer. * * Note: Some chip revisions have hardware bug. Changing capture * channel from full-duplex 8bit DMA to 16bit DMA will block * 16bit DMA transfers from DSP chip (capture) until 8bit transfer * to DSP chip (playback) starts. This bug can be avoided with * "16bit DMA Allocation" setting set to Playback or Capture. * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <asm/io.h> #include <asm/dma.h> #include <linux/init.h> #include <linux/time.h> #include <linux/module.h> #include <sound/core.h> #include <sound/sb.h> #include <sound/sb16_csp.h> #include <sound/mpu401.h> #include <sound/control.h> #include <sound/info.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("Routines for control of 16-bit SoundBlaster cards and clones"); MODULE_LICENSE("GPL"); #ifdef CONFIG_SND_SB16_CSP static void snd_sb16_csp_playback_prepare(struct snd_sb *chip, struct snd_pcm_runtime *runtime) { if (chip->hardware == SB_HW_16CSP) { struct snd_sb_csp *csp = chip->csp; if (csp->running & SNDRV_SB_CSP_ST_LOADED) { /* manually loaded codec */ if ((csp->mode & SNDRV_SB_CSP_MODE_DSP_WRITE) && ((1U << runtime->format) == csp->acc_format)) { /* Supported runtime PCM format for playback */ if (csp->ops.csp_use(csp) == 0) { /* If CSP was successfully acquired */ goto __start_CSP; } } else if ((csp->mode & SNDRV_SB_CSP_MODE_QSOUND) && (csp->q_enabled)) { /* QSound decoder is loaded and enabled */ if ((1 << runtime->format) & (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE)) { /* Only for simple PCM formats */ if (csp->ops.csp_use(csp) == 0) { /* If CSP was successfully acquired */ goto __start_CSP; } } } } else if (csp->ops.csp_use(csp) == 0) { /* Acquire CSP and try to autoload hardware codec */ if (csp->ops.csp_autoload(csp, runtime->format, SNDRV_SB_CSP_MODE_DSP_WRITE)) { /* Unsupported format, release CSP */ csp->ops.csp_unuse(csp); } else { __start_CSP: /* Try to start CSP */ if (csp->ops.csp_start(csp, (chip->mode & SB_MODE_PLAYBACK_16) ? SNDRV_SB_CSP_SAMPLE_16BIT : SNDRV_SB_CSP_SAMPLE_8BIT, (runtime->channels > 1) ? SNDRV_SB_CSP_STEREO : SNDRV_SB_CSP_MONO)) { /* Failed, release CSP */ csp->ops.csp_unuse(csp); } else { /* Success, CSP acquired and running */ chip->open = SNDRV_SB_CSP_MODE_DSP_WRITE; } } } } } static void snd_sb16_csp_capture_prepare(struct snd_sb *chip, struct snd_pcm_runtime *runtime) { if (chip->hardware == SB_HW_16CSP) { struct snd_sb_csp *csp = chip->csp; if (csp->running & SNDRV_SB_CSP_ST_LOADED) { /* manually loaded codec */ if ((csp->mode & SNDRV_SB_CSP_MODE_DSP_READ) && ((1U << runtime->format) == csp->acc_format)) { /* Supported runtime PCM format for capture */ if (csp->ops.csp_use(csp) == 0) { /* If CSP was successfully acquired */ goto __start_CSP; } } } else if (csp->ops.csp_use(csp) == 0) { /* Acquire CSP and try to autoload hardware codec */ if (csp->ops.csp_autoload(csp, runtime->format, SNDRV_SB_CSP_MODE_DSP_READ)) { /* Unsupported format, release CSP */ csp->ops.csp_unuse(csp); } else { __start_CSP: /* Try to start CSP */ if (csp->ops.csp_start(csp, (chip->mode & SB_MODE_CAPTURE_16) ? SNDRV_SB_CSP_SAMPLE_16BIT : SNDRV_SB_CSP_SAMPLE_8BIT, (runtime->channels > 1) ? SNDRV_SB_CSP_STEREO : SNDRV_SB_CSP_MONO)) { /* Failed, release CSP */ csp->ops.csp_unuse(csp); } else { /* Success, CSP acquired and running */ chip->open = SNDRV_SB_CSP_MODE_DSP_READ; } } } } } static void snd_sb16_csp_update(struct snd_sb *chip) { if (chip->hardware == SB_HW_16CSP) { struct snd_sb_csp *csp = chip->csp; if (csp->qpos_changed) { spin_lock(&chip->reg_lock); csp->ops.csp_qsound_transfer (csp); spin_unlock(&chip->reg_lock); } } } static void snd_sb16_csp_playback_open(struct snd_sb *chip, struct snd_pcm_runtime *runtime) { /* CSP decoders (QSound excluded) support only 16bit transfers */ if (chip->hardware == SB_HW_16CSP) { struct snd_sb_csp *csp = chip->csp; if (csp->running & SNDRV_SB_CSP_ST_LOADED) { /* manually loaded codec */ if (csp->mode & SNDRV_SB_CSP_MODE_DSP_WRITE) { runtime->hw.formats |= csp->acc_format; } } else { /* autoloaded codecs */ runtime->hw.formats |= SNDRV_PCM_FMTBIT_MU_LAW | SNDRV_PCM_FMTBIT_A_LAW | SNDRV_PCM_FMTBIT_IMA_ADPCM; } } } static void snd_sb16_csp_playback_close(struct snd_sb *chip) { if ((chip->hardware == SB_HW_16CSP) && (chip->open == SNDRV_SB_CSP_MODE_DSP_WRITE)) { struct snd_sb_csp *csp = chip->csp; if (csp->ops.csp_stop(csp) == 0) { csp->ops.csp_unuse(csp); chip->open = 0; } } } static void snd_sb16_csp_capture_open(struct snd_sb *chip, struct snd_pcm_runtime *runtime) { /* CSP coders support only 16bit transfers */ if (chip->hardware == SB_HW_16CSP) { struct snd_sb_csp *csp = chip->csp; if (csp->running & SNDRV_SB_CSP_ST_LOADED) { /* manually loaded codec */ if (csp->mode & SNDRV_SB_CSP_MODE_DSP_READ) { runtime->hw.formats |= csp->acc_format; } } else { /* autoloaded codecs */ runtime->hw.formats |= SNDRV_PCM_FMTBIT_MU_LAW | SNDRV_PCM_FMTBIT_A_LAW | SNDRV_PCM_FMTBIT_IMA_ADPCM; } } } static void snd_sb16_csp_capture_close(struct snd_sb *chip) { if ((chip->hardware == SB_HW_16CSP) && (chip->open == SNDRV_SB_CSP_MODE_DSP_READ)) { struct snd_sb_csp *csp = chip->csp; if (csp->ops.csp_stop(csp) == 0) { csp->ops.csp_unuse(csp); chip->open = 0; } } } #else #define snd_sb16_csp_playback_prepare(chip, runtime) /*nop*/ #define snd_sb16_csp_capture_prepare(chip, runtime) /*nop*/ #define snd_sb16_csp_update(chip) /*nop*/ #define snd_sb16_csp_playback_open(chip, runtime) /*nop*/ #define snd_sb16_csp_playback_close(chip) /*nop*/ #define snd_sb16_csp_capture_open(chip, runtime) /*nop*/ #define snd_sb16_csp_capture_close(chip) /*nop*/ #endif static void snd_sb16_setup_rate(struct snd_sb *chip, unsigned short rate, int channel) { unsigned long flags; spin_lock_irqsave(&chip->reg_lock, flags); if (chip->mode & (channel == SNDRV_PCM_STREAM_PLAYBACK ? SB_MODE_PLAYBACK_16 : SB_MODE_CAPTURE_16)) snd_sb_ack_16bit(chip); else snd_sb_ack_8bit(chip); if (!(chip->mode & SB_RATE_LOCK)) { chip->locked_rate = rate; snd_sbdsp_command(chip, SB_DSP_SAMPLE_RATE_IN); snd_sbdsp_command(chip, rate >> 8); snd_sbdsp_command(chip, rate & 0xff); snd_sbdsp_command(chip, SB_DSP_SAMPLE_RATE_OUT); snd_sbdsp_command(chip, rate >> 8); snd_sbdsp_command(chip, rate & 0xff); } spin_unlock_irqrestore(&chip->reg_lock, flags); } static int snd_sb16_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int snd_sb16_hw_free(struct snd_pcm_substream *substream) { snd_pcm_lib_free_pages(substream); return 0; } static int snd_sb16_playback_prepare(struct snd_pcm_substream *substream) { unsigned long flags; struct snd_sb *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; unsigned char format; unsigned int size, count, dma; snd_sb16_csp_playback_prepare(chip, runtime); if (snd_pcm_format_unsigned(runtime->format) > 0) { format = runtime->channels > 1 ? SB_DSP4_MODE_UNS_STEREO : SB_DSP4_MODE_UNS_MONO; } else { format = runtime->channels > 1 ? SB_DSP4_MODE_SIGN_STEREO : SB_DSP4_MODE_SIGN_MONO; } snd_sb16_setup_rate(chip, runtime->rate, SNDRV_PCM_STREAM_PLAYBACK); size = chip->p_dma_size = snd_pcm_lib_buffer_bytes(substream); dma = (chip->mode & SB_MODE_PLAYBACK_8) ? chip->dma8 : chip->dma16; snd_dma_program(dma, runtime->dma_addr, size, DMA_MODE_WRITE | DMA_AUTOINIT); count = snd_pcm_lib_period_bytes(substream); spin_lock_irqsave(&chip->reg_lock, flags); if (chip->mode & SB_MODE_PLAYBACK_16) { count >>= 1; count--; snd_sbdsp_command(chip, SB_DSP4_OUT16_AI); snd_sbdsp_command(chip, format); snd_sbdsp_command(chip, count & 0xff); snd_sbdsp_command(chip, count >> 8); snd_sbdsp_command(chip, SB_DSP_DMA16_OFF); } else { count--; snd_sbdsp_command(chip, SB_DSP4_OUT8_AI); snd_sbdsp_command(chip, format); snd_sbdsp_command(chip, count & 0xff); snd_sbdsp_command(chip, count >> 8); snd_sbdsp_command(chip, SB_DSP_DMA8_OFF); } spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } static int snd_sb16_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_sb *chip = snd_pcm_substream_chip(substream); int result = 0; spin_lock(&chip->reg_lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: chip->mode |= SB_RATE_LOCK_PLAYBACK; snd_sbdsp_command(chip, chip->mode & SB_MODE_PLAYBACK_16 ? SB_DSP_DMA16_ON : SB_DSP_DMA8_ON); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: snd_sbdsp_command(chip, chip->mode & SB_MODE_PLAYBACK_16 ? SB_DSP_DMA16_OFF : SB_DSP_DMA8_OFF); /* next two lines are needed for some types of DSP4 (SB AWE 32 - 4.13) */ if (chip->mode & SB_RATE_LOCK_CAPTURE) snd_sbdsp_command(chip, chip->mode & SB_MODE_CAPTURE_16 ? SB_DSP_DMA16_ON : SB_DSP_DMA8_ON); chip->mode &= ~SB_RATE_LOCK_PLAYBACK; break; default: result = -EINVAL; } spin_unlock(&chip->reg_lock); return result; } static int snd_sb16_capture_prepare(struct snd_pcm_substream *substream) { unsigned long flags; struct snd_sb *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; unsigned char format; unsigned int size, count, dma; snd_sb16_csp_capture_prepare(chip, runtime); if (snd_pcm_format_unsigned(runtime->format) > 0) { format = runtime->channels > 1 ? SB_DSP4_MODE_UNS_STEREO : SB_DSP4_MODE_UNS_MONO; } else { format = runtime->channels > 1 ? SB_DSP4_MODE_SIGN_STEREO : SB_DSP4_MODE_SIGN_MONO; } snd_sb16_setup_rate(chip, runtime->rate, SNDRV_PCM_STREAM_CAPTURE); size = chip->c_dma_size = snd_pcm_lib_buffer_bytes(substream); dma = (chip->mode & SB_MODE_CAPTURE_8) ? chip->dma8 : chip->dma16; snd_dma_program(dma, runtime->dma_addr, size, DMA_MODE_READ | DMA_AUTOINIT); count = snd_pcm_lib_period_bytes(substream); spin_lock_irqsave(&chip->reg_lock, flags); if (chip->mode & SB_MODE_CAPTURE_16) { count >>= 1; count--; snd_sbdsp_command(chip, SB_DSP4_IN16_AI); snd_sbdsp_command(chip, format); snd_sbdsp_command(chip, count & 0xff); snd_sbdsp_command(chip, count >> 8); snd_sbdsp_command(chip, SB_DSP_DMA16_OFF); } else { count--; snd_sbdsp_command(chip, SB_DSP4_IN8_AI); snd_sbdsp_command(chip, format); snd_sbdsp_command(chip, count & 0xff); snd_sbdsp_command(chip, count >> 8); snd_sbdsp_command(chip, SB_DSP_DMA8_OFF); } spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } static int snd_sb16_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_sb *chip = snd_pcm_substream_chip(substream); int result = 0; spin_lock(&chip->reg_lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: chip->mode |= SB_RATE_LOCK_CAPTURE; snd_sbdsp_command(chip, chip->mode & SB_MODE_CAPTURE_16 ? SB_DSP_DMA16_ON : SB_DSP_DMA8_ON); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: snd_sbdsp_command(chip, chip->mode & SB_MODE_CAPTURE_16 ? SB_DSP_DMA16_OFF : SB_DSP_DMA8_OFF); /* next two lines are needed for some types of DSP4 (SB AWE 32 - 4.13) */ if (chip->mode & SB_RATE_LOCK_PLAYBACK) snd_sbdsp_command(chip, chip->mode & SB_MODE_PLAYBACK_16 ? SB_DSP_DMA16_ON : SB_DSP_DMA8_ON); chip->mode &= ~SB_RATE_LOCK_CAPTURE; break; default: result = -EINVAL; } spin_unlock(&chip->reg_lock); return result; } irqreturn_t snd_sb16dsp_interrupt(int irq, void *dev_id) { struct snd_sb *chip = dev_id; unsigned char status; int ok; spin_lock(&chip->mixer_lock); status = snd_sbmixer_read(chip, SB_DSP4_IRQSTATUS); spin_unlock(&chip->mixer_lock); if ((status & SB_IRQTYPE_MPUIN) && chip->rmidi_callback) chip->rmidi_callback(irq, chip->rmidi->private_data); if (status & SB_IRQTYPE_8BIT) { ok = 0; if (chip->mode & SB_MODE_PLAYBACK_8) { snd_pcm_period_elapsed(chip->playback_substream); snd_sb16_csp_update(chip); ok++; } if (chip->mode & SB_MODE_CAPTURE_8) { snd_pcm_period_elapsed(chip->capture_substream); ok++; } spin_lock(&chip->reg_lock); if (!ok) snd_sbdsp_command(chip, SB_DSP_DMA8_OFF); snd_sb_ack_8bit(chip); spin_unlock(&chip->reg_lock); } if (status & SB_IRQTYPE_16BIT) { ok = 0; if (chip->mode & SB_MODE_PLAYBACK_16) { snd_pcm_period_elapsed(chip->playback_substream); snd_sb16_csp_update(chip); ok++; } if (chip->mode & SB_MODE_CAPTURE_16) { snd_pcm_period_elapsed(chip->capture_substream); ok++; } spin_lock(&chip->reg_lock); if (!ok) snd_sbdsp_command(chip, SB_DSP_DMA16_OFF); snd_sb_ack_16bit(chip); spin_unlock(&chip->reg_lock); } return IRQ_HANDLED; } /* */ static snd_pcm_uframes_t snd_sb16_playback_pointer(struct snd_pcm_substream *substream) { struct snd_sb *chip = snd_pcm_substream_chip(substream); unsigned int dma; size_t ptr; dma = (chip->mode & SB_MODE_PLAYBACK_8) ? chip->dma8 : chip->dma16; ptr = snd_dma_pointer(dma, chip->p_dma_size); return bytes_to_frames(substream->runtime, ptr); } static snd_pcm_uframes_t snd_sb16_capture_pointer(struct snd_pcm_substream *substream) { struct snd_sb *chip = snd_pcm_substream_chip(substream); unsigned int dma; size_t ptr; dma = (chip->mode & SB_MODE_CAPTURE_8) ? chip->dma8 : chip->dma16; ptr = snd_dma_pointer(dma, chip->c_dma_size); return bytes_to_frames(substream->runtime, ptr); } /* */ static struct snd_pcm_hardware snd_sb16_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID), .formats = 0, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_44100, .rate_min = 4000, .rate_max = 44100, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; static struct snd_pcm_hardware snd_sb16_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID), .formats = 0, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_44100, .rate_min = 4000, .rate_max = 44100, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; /* * open/close */ static int snd_sb16_playback_open(struct snd_pcm_substream *substream) { unsigned long flags; struct snd_sb *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; spin_lock_irqsave(&chip->open_lock, flags); if (chip->mode & SB_MODE_PLAYBACK) { spin_unlock_irqrestore(&chip->open_lock, flags); return -EAGAIN; } runtime->hw = snd_sb16_playback; /* skip if 16 bit DMA was reserved for capture */ if (chip->force_mode16 & SB_MODE_CAPTURE_16) goto __skip_16bit; if (chip->dma16 >= 0 && !(chip->mode & SB_MODE_CAPTURE_16)) { chip->mode |= SB_MODE_PLAYBACK_16; runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE; /* Vibra16X hack */ if (chip->dma16 <= 3) { runtime->hw.buffer_bytes_max = runtime->hw.period_bytes_max = 64 * 1024; } else { snd_sb16_csp_playback_open(chip, runtime); } goto __open_ok; } __skip_16bit: if (chip->dma8 >= 0 && !(chip->mode & SB_MODE_CAPTURE_8)) { chip->mode |= SB_MODE_PLAYBACK_8; /* DSP v 4.xx can transfer 16bit data through 8bit DMA channel, SBHWPG 2-7 */ if (chip->dma16 < 0) { runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE; chip->mode |= SB_MODE_PLAYBACK_16; } else { runtime->hw.formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S8; } runtime->hw.buffer_bytes_max = runtime->hw.period_bytes_max = 64 * 1024; goto __open_ok; } spin_unlock_irqrestore(&chip->open_lock, flags); return -EAGAIN; __open_ok: if (chip->hardware == SB_HW_ALS100) runtime->hw.rate_max = 48000; if (chip->hardware == SB_HW_CS5530) { runtime->hw.buffer_bytes_max = 32 * 1024; runtime->hw.periods_min = 2; runtime->hw.rate_min = 44100; } if (chip->mode & SB_RATE_LOCK) runtime->hw.rate_min = runtime->hw.rate_max = chip->locked_rate; chip->playback_substream = substream; spin_unlock_irqrestore(&chip->open_lock, flags); return 0; } static int snd_sb16_playback_close(struct snd_pcm_substream *substream) { unsigned long flags; struct snd_sb *chip = snd_pcm_substream_chip(substream); snd_sb16_csp_playback_close(chip); spin_lock_irqsave(&chip->open_lock, flags); chip->playback_substream = NULL; chip->mode &= ~SB_MODE_PLAYBACK; spin_unlock_irqrestore(&chip->open_lock, flags); return 0; } static int snd_sb16_capture_open(struct snd_pcm_substream *substream) { unsigned long flags; struct snd_sb *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; spin_lock_irqsave(&chip->open_lock, flags); if (chip->mode & SB_MODE_CAPTURE) { spin_unlock_irqrestore(&chip->open_lock, flags); return -EAGAIN; } runtime->hw = snd_sb16_capture; /* skip if 16 bit DMA was reserved for playback */ if (chip->force_mode16 & SB_MODE_PLAYBACK_16) goto __skip_16bit; if (chip->dma16 >= 0 && !(chip->mode & SB_MODE_PLAYBACK_16)) { chip->mode |= SB_MODE_CAPTURE_16; runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE; /* Vibra16X hack */ if (chip->dma16 <= 3) { runtime->hw.buffer_bytes_max = runtime->hw.period_bytes_max = 64 * 1024; } else { snd_sb16_csp_capture_open(chip, runtime); } goto __open_ok; } __skip_16bit: if (chip->dma8 >= 0 && !(chip->mode & SB_MODE_PLAYBACK_8)) { chip->mode |= SB_MODE_CAPTURE_8; /* DSP v 4.xx can transfer 16bit data through 8bit DMA channel, SBHWPG 2-7 */ if (chip->dma16 < 0) { runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE; chip->mode |= SB_MODE_CAPTURE_16; } else { runtime->hw.formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S8; } runtime->hw.buffer_bytes_max = runtime->hw.period_bytes_max = 64 * 1024; goto __open_ok; } spin_unlock_irqrestore(&chip->open_lock, flags); return -EAGAIN; __open_ok: if (chip->hardware == SB_HW_ALS100) runtime->hw.rate_max = 48000; if (chip->hardware == SB_HW_CS5530) { runtime->hw.buffer_bytes_max = 32 * 1024; runtime->hw.periods_min = 2; runtime->hw.rate_min = 44100; } if (chip->mode & SB_RATE_LOCK) runtime->hw.rate_min = runtime->hw.rate_max = chip->locked_rate; chip->capture_substream = substream; spin_unlock_irqrestore(&chip->open_lock, flags); return 0; } static int snd_sb16_capture_close(struct snd_pcm_substream *substream) { unsigned long flags; struct snd_sb *chip = snd_pcm_substream_chip(substream); snd_sb16_csp_capture_close(chip); spin_lock_irqsave(&chip->open_lock, flags); chip->capture_substream = NULL; chip->mode &= ~SB_MODE_CAPTURE; spin_unlock_irqrestore(&chip->open_lock, flags); return 0; } /* * DMA control interface */ static int snd_sb16_set_dma_mode(struct snd_sb *chip, int what) { if (chip->dma8 < 0 || chip->dma16 < 0) { if (snd_BUG_ON(what)) return -EINVAL; return 0; } if (what == 0) { chip->force_mode16 = 0; } else if (what == 1) { chip->force_mode16 = SB_MODE_PLAYBACK_16; } else if (what == 2) { chip->force_mode16 = SB_MODE_CAPTURE_16; } else { return -EINVAL; } return 0; } static int snd_sb16_get_dma_mode(struct snd_sb *chip) { if (chip->dma8 < 0 || chip->dma16 < 0) return 0; switch (chip->force_mode16) { case SB_MODE_PLAYBACK_16: return 1; case SB_MODE_CAPTURE_16: return 2; default: return 0; } } static int snd_sb16_dma_control_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static char *texts[3] = { "Auto", "Playback", "Capture" }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = 3; if (uinfo->value.enumerated.item > 2) uinfo->value.enumerated.item = 2; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int snd_sb16_dma_control_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_sb *chip = snd_kcontrol_chip(kcontrol); unsigned long flags; spin_lock_irqsave(&chip->reg_lock, flags); ucontrol->value.enumerated.item[0] = snd_sb16_get_dma_mode(chip); spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } static int snd_sb16_dma_control_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_sb *chip = snd_kcontrol_chip(kcontrol); unsigned long flags; unsigned char nval, oval; int change; if ((nval = ucontrol->value.enumerated.item[0]) > 2) return -EINVAL; spin_lock_irqsave(&chip->reg_lock, flags); oval = snd_sb16_get_dma_mode(chip); change = nval != oval; snd_sb16_set_dma_mode(chip, nval); spin_unlock_irqrestore(&chip->reg_lock, flags); return change; } static struct snd_kcontrol_new snd_sb16_dma_control = { .iface = SNDRV_CTL_ELEM_IFACE_CARD, .name = "16-bit DMA Allocation", .info = snd_sb16_dma_control_info, .get = snd_sb16_dma_control_get, .put = snd_sb16_dma_control_put }; /* * Initialization part */ int snd_sb16dsp_configure(struct snd_sb * chip) { unsigned long flags; unsigned char irqreg = 0, dmareg = 0, mpureg; unsigned char realirq, realdma, realmpureg; /* note: mpu register should be present only on SB16 Vibra soundcards */ // printk(KERN_DEBUG "codec->irq=%i, codec->dma8=%i, codec->dma16=%i\n", chip->irq, chip->dma8, chip->dma16); spin_lock_irqsave(&chip->mixer_lock, flags); mpureg = snd_sbmixer_read(chip, SB_DSP4_MPUSETUP) & ~0x06; spin_unlock_irqrestore(&chip->mixer_lock, flags); switch (chip->irq) { case 2: case 9: irqreg |= SB_IRQSETUP_IRQ9; break; case 5: irqreg |= SB_IRQSETUP_IRQ5; break; case 7: irqreg |= SB_IRQSETUP_IRQ7; break; case 10: irqreg |= SB_IRQSETUP_IRQ10; break; default: return -EINVAL; } if (chip->dma8 >= 0) { switch (chip->dma8) { case 0: dmareg |= SB_DMASETUP_DMA0; break; case 1: dmareg |= SB_DMASETUP_DMA1; break; case 3: dmareg |= SB_DMASETUP_DMA3; break; default: return -EINVAL; } } if (chip->dma16 >= 0 && chip->dma16 != chip->dma8) { switch (chip->dma16) { case 5: dmareg |= SB_DMASETUP_DMA5; break; case 6: dmareg |= SB_DMASETUP_DMA6; break; case 7: dmareg |= SB_DMASETUP_DMA7; break; default: return -EINVAL; } } switch (chip->mpu_port) { case 0x300: mpureg |= 0x04; break; case 0x330: mpureg |= 0x00; break; default: mpureg |= 0x02; /* disable MPU */ } spin_lock_irqsave(&chip->mixer_lock, flags); snd_sbmixer_write(chip, SB_DSP4_IRQSETUP, irqreg); realirq = snd_sbmixer_read(chip, SB_DSP4_IRQSETUP); snd_sbmixer_write(chip, SB_DSP4_DMASETUP, dmareg); realdma = snd_sbmixer_read(chip, SB_DSP4_DMASETUP); snd_sbmixer_write(chip, SB_DSP4_MPUSETUP, mpureg); realmpureg = snd_sbmixer_read(chip, SB_DSP4_MPUSETUP); spin_unlock_irqrestore(&chip->mixer_lock, flags); if ((~realirq) & irqreg || (~realdma) & dmareg) { snd_printk(KERN_ERR "SB16 [0x%lx]: unable to set DMA & IRQ (PnP device?)\n", chip->port); snd_printk(KERN_ERR "SB16 [0x%lx]: wanted: irqreg=0x%x, dmareg=0x%x, mpureg = 0x%x\n", chip->port, realirq, realdma, realmpureg); snd_printk(KERN_ERR "SB16 [0x%lx]: got: irqreg=0x%x, dmareg=0x%x, mpureg = 0x%x\n", chip->port, irqreg, dmareg, mpureg); return -ENODEV; } return 0; } static struct snd_pcm_ops snd_sb16_playback_ops = { .open = snd_sb16_playback_open, .close = snd_sb16_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_sb16_hw_params, .hw_free = snd_sb16_hw_free, .prepare = snd_sb16_playback_prepare, .trigger = snd_sb16_playback_trigger, .pointer = snd_sb16_playback_pointer, }; static struct snd_pcm_ops snd_sb16_capture_ops = { .open = snd_sb16_capture_open, .close = snd_sb16_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_sb16_hw_params, .hw_free = snd_sb16_hw_free, .prepare = snd_sb16_capture_prepare, .trigger = snd_sb16_capture_trigger, .pointer = snd_sb16_capture_pointer, }; int snd_sb16dsp_pcm(struct snd_sb * chip, int device, struct snd_pcm ** rpcm) { struct snd_card *card = chip->card; struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(card, "SB16 DSP", device, 1, 1, &pcm)) < 0) return err; sprintf(pcm->name, "DSP v%i.%i", chip->version >> 8, chip->version & 0xff); pcm->info_flags = SNDRV_PCM_INFO_JOINT_DUPLEX; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_sb16_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_sb16_capture_ops); if (chip->dma16 >= 0 && chip->dma8 != chip->dma16) snd_ctl_add(card, snd_ctl_new1(&snd_sb16_dma_control, chip)); else pcm->info_flags = SNDRV_PCM_INFO_HALF_DUPLEX; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_isa_data(), 64*1024, 128*1024); if (rpcm) *rpcm = pcm; return 0; } const struct snd_pcm_ops *snd_sb16dsp_get_pcm_ops(int direction) { return direction == SNDRV_PCM_STREAM_PLAYBACK ? &snd_sb16_playback_ops : &snd_sb16_capture_ops; } EXPORT_SYMBOL(snd_sb16dsp_pcm); EXPORT_SYMBOL(snd_sb16dsp_get_pcm_ops); EXPORT_SYMBOL(snd_sb16dsp_configure); EXPORT_SYMBOL(snd_sb16dsp_interrupt); /* * INIT part */ static int __init alsa_sb16_init(void) { return 0; } static void __exit alsa_sb16_exit(void) { } module_init(alsa_sb16_init) module_exit(alsa_sb16_exit)
gpl-2.0
MasterSS/linux
arch/mn10300/kernel/module.c
9768
4339
/* MN10300 Kernel module helper routines * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. All Rights Reserved. * Written by Mark Salter (msalter@redhat.com) * - Derived from arch/i386/kernel/module.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public Licence as published by * the Free Software Foundation; either version 2 of the Licence, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public Licence for more details. * * You should have received a copy of the GNU General Public Licence * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/moduleloader.h> #include <linux/elf.h> #include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/bug.h> #if 0 #define DEBUGP printk #else #define DEBUGP(fmt, ...) #endif static void reloc_put16(uint8_t *p, uint32_t val) { p[0] = val & 0xff; p[1] = (val >> 8) & 0xff; } static void reloc_put24(uint8_t *p, uint32_t val) { reloc_put16(p, val); p[2] = (val >> 16) & 0xff; } static void reloc_put32(uint8_t *p, uint32_t val) { reloc_put16(p, val); reloc_put16(p+2, val >> 16); } /* * apply a RELA relocation */ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me) { unsigned int i, sym_diff_seen = 0; Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr; Elf32_Sym *sym; Elf32_Addr relocation, sym_diff_val = 0; uint8_t *location; uint32_t value; DEBUGP("Applying relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* this is where to make the change */ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; /* this is the symbol the relocation is referring to (note that * all undefined symbols have been resolved by the caller) */ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr + ELF32_R_SYM(rel[i].r_info); /* this is the adjustment to be made */ relocation = sym->st_value + rel[i].r_addend; if (sym_diff_seen) { switch (ELF32_R_TYPE(rel[i].r_info)) { case R_MN10300_32: case R_MN10300_24: case R_MN10300_16: case R_MN10300_8: relocation -= sym_diff_val; sym_diff_seen = 0; break; default: printk(KERN_ERR "module %s: Unexpected SYM_DIFF relocation: %u\n", me->name, ELF32_R_TYPE(rel[i].r_info)); return -ENOEXEC; } } switch (ELF32_R_TYPE(rel[i].r_info)) { /* for the first four relocation types, we simply * store the adjustment at the location given */ case R_MN10300_32: reloc_put32(location, relocation); break; case R_MN10300_24: reloc_put24(location, relocation); break; case R_MN10300_16: reloc_put16(location, relocation); break; case R_MN10300_8: *location = relocation; break; /* for the next three relocation types, we write the * adjustment with the address subtracted over the * value at the location given */ case R_MN10300_PCREL32: value = relocation - (uint32_t) location; reloc_put32(location, value); break; case R_MN10300_PCREL16: value = relocation - (uint32_t) location; reloc_put16(location, value); break; case R_MN10300_PCREL8: *location = relocation - (uint32_t) location; break; case R_MN10300_SYM_DIFF: /* This is used to adjust the next reloc as required * by relaxation. */ sym_diff_seen = 1; sym_diff_val = sym->st_value; break; case R_MN10300_ALIGN: /* Just ignore the ALIGN relocs. * Only interesting if kernel performed relaxation. */ continue; default: printk(KERN_ERR "module %s: Unknown relocation: %u\n", me->name, ELF32_R_TYPE(rel[i].r_info)); return -ENOEXEC; } } if (sym_diff_seen) { printk(KERN_ERR "module %s: Nothing follows SYM_DIFF relocation: %u\n", me->name, ELF32_R_TYPE(rel[i].r_info)); return -ENOEXEC; } return 0; }
gpl-2.0
lowtraxx/kernel
drivers/net/wireless/ath/debug.c
9768
1473
/* * Copyright (c) 2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/export.h> #include "ath.h" const char *ath_opmode_to_string(enum nl80211_iftype opmode) { switch (opmode) { case NL80211_IFTYPE_UNSPECIFIED: return "UNSPEC"; case NL80211_IFTYPE_ADHOC: return "ADHOC"; case NL80211_IFTYPE_STATION: return "STATION"; case NL80211_IFTYPE_AP: return "AP"; case NL80211_IFTYPE_AP_VLAN: return "AP-VLAN"; case NL80211_IFTYPE_WDS: return "WDS"; case NL80211_IFTYPE_MONITOR: return "MONITOR"; case NL80211_IFTYPE_MESH_POINT: return "MESH"; case NL80211_IFTYPE_P2P_CLIENT: return "P2P-CLIENT"; case NL80211_IFTYPE_P2P_GO: return "P2P-GO"; default: return "UNKNOWN"; } } EXPORT_SYMBOL(ath_opmode_to_string);
gpl-2.0
fulcrum7/mq107-kernel
arch/mn10300/mm/cache.c
12072
1615
/* MN10300 Cache flushing routines * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/module.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/threads.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/cacheflush.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/smp.h> #include "cache-smp.h" EXPORT_SYMBOL(mn10300_icache_inv); EXPORT_SYMBOL(mn10300_icache_inv_range); EXPORT_SYMBOL(mn10300_icache_inv_range2); EXPORT_SYMBOL(mn10300_icache_inv_page); EXPORT_SYMBOL(mn10300_dcache_inv); EXPORT_SYMBOL(mn10300_dcache_inv_range); EXPORT_SYMBOL(mn10300_dcache_inv_range2); EXPORT_SYMBOL(mn10300_dcache_inv_page); #ifdef CONFIG_MN10300_CACHE_WBACK EXPORT_SYMBOL(mn10300_dcache_flush); EXPORT_SYMBOL(mn10300_dcache_flush_inv); EXPORT_SYMBOL(mn10300_dcache_flush_inv_range); EXPORT_SYMBOL(mn10300_dcache_flush_inv_range2); EXPORT_SYMBOL(mn10300_dcache_flush_inv_page); EXPORT_SYMBOL(mn10300_dcache_flush_range); EXPORT_SYMBOL(mn10300_dcache_flush_range2); EXPORT_SYMBOL(mn10300_dcache_flush_page); #endif /* * allow userspace to flush the instruction cache */ asmlinkage long sys_cacheflush(unsigned long start, unsigned long end) { if (end < start) return -EINVAL; flush_icache_range(start, end); return 0; }
gpl-2.0
DerRomtester/one_plus_one
arch/cris/arch-v10/lib/old_checksum.c
12328
2160
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * IP/TCP/UDP checksumming routines * * Authors: Jorge Cwik, <jorge@laser.satlink.net> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Tom May, <ftom@netcom.com> * Lots of code moved from tcp.c and ip.c; see those files * for more names. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <net/checksum.h> #include <net/module.h> #undef PROFILE_CHECKSUM #ifdef PROFILE_CHECKSUM /* these are just for profiling the checksum code with an oscillioscope.. uh */ #if 0 #define BITOFF *((unsigned char *)0xb0000030) = 0xff #define BITON *((unsigned char *)0xb0000030) = 0x0 #endif #include <asm/io.h> #define CBITON LED_ACTIVE_SET(1) #define CBITOFF LED_ACTIVE_SET(0) #define BITOFF #define BITON #else #define BITOFF #define BITON #define CBITOFF #define CBITON #endif /* * computes a partial checksum, e.g. for TCP/UDP fragments */ #include <asm/delay.h> __wsum csum_partial(const void *p, int len, __wsum __sum) { u32 sum = (__force u32)__sum; const u16 *buff = p; /* * Experiments with ethernet and slip connections show that buff * is aligned on either a 2-byte or 4-byte boundary. */ const void *endMarker = p + len; const void *marker = endMarker - (len % 16); #if 0 if((int)buff & 0x3) printk("unaligned buff %p\n", buff); __delay(900); /* extra delay of 90 us to test performance hit */ #endif BITON; while (buff < marker) { sum += *buff++; sum += *buff++; sum += *buff++; sum += *buff++; sum += *buff++; sum += *buff++; sum += *buff++; sum += *buff++; } marker = endMarker - (len % 2); while (buff < marker) sum += *buff++; if (endMarker > buff) sum += *(const u8 *)buff; /* add extra byte separately */ BITOFF; return (__force __wsum)sum; } EXPORT_SYMBOL(csum_partial);
gpl-2.0
planee/DeathCore_3.3.5a
src/server/scripts/Kalimdor/CavernsOfTime/TheBlackMorass/boss_temporus.cpp
41
4751
/* * Copyright (C) 2008-2014 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ /* Name: Boss_Temporus %Complete: 75 Comment: More abilities need to be implemented Category: Caverns of Time, The Black Morass */ #include "ScriptMgr.h" #include "ScriptedCreature.h" #include "the_black_morass.h" enum Enums { SAY_ENTER = 0, SAY_AGGRO = 1, SAY_BANISH = 2, SAY_SLAY = 3, SAY_DEATH = 4, SPELL_HASTE = 31458, SPELL_MORTAL_WOUND = 31464, SPELL_WING_BUFFET = 31475, H_SPELL_WING_BUFFET = 38593, SPELL_REFLECT = 38592 //Not Implemented (Heroic mod) }; enum Events { EVENT_HASTE = 1, EVENT_MORTAL_WOUND = 2, EVENT_WING_BUFFET = 3, EVENT_SPELL_REFLECTION = 4 }; class boss_temporus : public CreatureScript { public: boss_temporus() : CreatureScript("boss_temporus") { } struct boss_temporusAI : public BossAI { boss_temporusAI(Creature* creature) : BossAI(creature, TYPE_TEMPORUS) { } void Reset() override { } void EnterCombat(Unit* /*who*/) override { events.ScheduleEvent(EVENT_HASTE, urand(15000, 23000)); events.ScheduleEvent(EVENT_MORTAL_WOUND, 8000); events.ScheduleEvent(EVENT_WING_BUFFET, urand(25000, 35000)); if (IsHeroic()) events.ScheduleEvent(EVENT_SPELL_REFLECTION, 30000); Talk(SAY_AGGRO); } void KilledUnit(Unit* /*victim*/) override { Talk(SAY_SLAY); } void JustDied(Unit* /*killer*/) override { Talk(SAY_DEATH); instance->SetData(TYPE_RIFT, SPECIAL); } void MoveInLineOfSight(Unit* who) override { //Despawn Time Keeper if (who->GetTypeId() == TYPEID_UNIT && who->GetEntry() == NPC_TIME_KEEPER) { if (me->IsWithinDistInMap(who, 20.0f)) { Talk(SAY_BANISH); me->DealDamage(who, who->GetHealth(), NULL, DIRECT_DAMAGE, SPELL_SCHOOL_MASK_NORMAL, NULL, false); } } ScriptedAI::MoveInLineOfSight(who); } void UpdateAI(uint32 diff) override { //Return since we have no target if (!UpdateVictim()) return; events.Update(diff); if (me->HasUnitState(UNIT_STATE_CASTING)) return; while (uint32 eventId = events.ExecuteEvent()) { switch (eventId) { case EVENT_HASTE: DoCast(me, SPELL_HASTE); events.ScheduleEvent(EVENT_HASTE, urand(20000, 25000)); break; case EVENT_MORTAL_WOUND: DoCast(me, SPELL_MORTAL_WOUND); events.ScheduleEvent(EVENT_MORTAL_WOUND, urand(10000, 20000)); break; case EVENT_WING_BUFFET: DoCast(me, SPELL_WING_BUFFET); events.ScheduleEvent(EVENT_WING_BUFFET, urand(20000, 30000)); break; case EVENT_SPELL_REFLECTION: // Only in Heroic DoCast(me, SPELL_REFLECT); events.ScheduleEvent(EVENT_SPELL_REFLECTION, urand(25000, 35000)); break; default: break; } } DoMeleeAttackIfReady(); } }; CreatureAI* GetAI(Creature* creature) const override { return GetInstanceAI<boss_temporusAI>(creature); } }; void AddSC_boss_temporus() { new boss_temporus(); }
gpl-2.0
kwade00/xbmc
xbmc/windowing/X11/GLContextGLX.cpp
41
8639
/* * Copyright (C) 2005-2014 Team XBMC * http://xbmc.org * * This Program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This Program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with XBMC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * */ #include "system_gl.h" #if defined(HAVE_X11) && defined(HAS_GL) #include <GL/glx.h> #include "GLContextGLX.h" #include "utils/log.h" CGLContextGLX::CGLContextGLX(Display *dpy) : CGLContext(dpy) { m_extPrefix = "GLX_"; m_glxWindow = 0; m_glxContext = 0; m_vsyncMode = 0; } bool CGLContextGLX::Refresh(bool force, int screen, Window glWindow, bool &newContext) { bool retVal = false; m_glxWindow = glWindow; m_nScreen = screen; // refresh context if (m_glxContext && !force) { CLog::Log(LOGDEBUG, "CWinSystemX11::RefreshGlxContext: refreshing context"); glXMakeCurrent(m_dpy, None, NULL); glXMakeCurrent(m_dpy, glWindow, m_glxContext); return true; } // create context XVisualInfo vMask; XVisualInfo *visuals; XVisualInfo *vInfo = NULL; int availableVisuals = 0; vMask.screen = screen; XWindowAttributes winAttr; /* Assume a depth of 24 in case the below calls to XGetWindowAttributes() or XGetVisualInfo() fail. That shouldn't happen unless something is fatally wrong, but lets prepare for everything. */ vMask.depth = 24; if (XGetWindowAttributes(m_dpy, glWindow, &winAttr)) { vMask.visualid = XVisualIDFromVisual(winAttr.visual); vInfo = XGetVisualInfo(m_dpy, VisualScreenMask | VisualIDMask, &vMask, &availableVisuals); if (!vInfo) CLog::Log(LOGWARNING, "Failed to get VisualInfo of visual 0x%x", (unsigned) vMask.visualid); else if(!IsSuitableVisual(vInfo)) { CLog::Log(LOGWARNING, "Visual 0x%x of the window is not suitable, looking for another one...", (unsigned) vInfo->visualid); vMask.depth = vInfo->depth; XFree(vInfo); vInfo = NULL; } } else CLog::Log(LOGWARNING, "Failed to get window attributes"); /* As per glXMakeCurrent documentation, we have to use the same visual as m_glWindow. Since that was not suitable for use, we try to use another one with the same depth and hope that the used implementation is less strict than the documentation. */ if (!vInfo) { visuals = XGetVisualInfo(m_dpy, VisualScreenMask | VisualDepthMask, &vMask, &availableVisuals); for (int i = 0; i < availableVisuals; i++) { if (IsSuitableVisual(&visuals[i])) { vMask.visualid = visuals[i].visualid; vInfo = XGetVisualInfo(m_dpy, VisualScreenMask | VisualIDMask, &vMask, &availableVisuals); break; } } XFree(visuals); } if (vInfo) { CLog::Log(LOGNOTICE, "Using visual 0x%x", (unsigned) vInfo->visualid); if (m_glxContext) { glXMakeCurrent(m_dpy, None, NULL); glXDestroyContext(m_dpy, m_glxContext); XSync(m_dpy, FALSE); } if ((m_glxContext = glXCreateContext(m_dpy, vInfo, NULL, True))) { // make this context current glXMakeCurrent(m_dpy, glWindow, m_glxContext); retVal = true; } else CLog::Log(LOGERROR, "GLX Error: Could not create context"); XFree(vInfo); } else { CLog::Log(LOGERROR, "GLX Error: vInfo is NULL!"); } return retVal; } void CGLContextGLX::Destroy() { glXMakeCurrent(m_dpy, None, NULL); glXDestroyContext(m_dpy, m_glxContext); m_glxContext = 0; } void CGLContextGLX::Detach() { glXMakeCurrent(m_dpy, None, NULL); } bool CGLContextGLX::IsSuitableVisual(XVisualInfo *vInfo) { int value; if (glXGetConfig(m_dpy, vInfo, GLX_RGBA, &value) || !value) return false; if (glXGetConfig(m_dpy, vInfo, GLX_DOUBLEBUFFER, &value) || !value) return false; if (glXGetConfig(m_dpy, vInfo, GLX_RED_SIZE, &value) || value < 8) return false; if (glXGetConfig(m_dpy, vInfo, GLX_GREEN_SIZE, &value) || value < 8) return false; if (glXGetConfig(m_dpy, vInfo, GLX_BLUE_SIZE, &value) || value < 8) return false; if (glXGetConfig(m_dpy, vInfo, GLX_ALPHA_SIZE, &value) || value < 8) return false; if (glXGetConfig(m_dpy, vInfo, GLX_DEPTH_SIZE, &value) || value < 8) return false; return true; } void CGLContextGLX::SetVSync(bool enable) { // turn of current setting first if(m_glXSwapIntervalEXT) m_glXSwapIntervalEXT(m_dpy, m_glxWindow, 0); else if(m_glXSwapIntervalMESA) m_glXSwapIntervalMESA(0); m_iVSyncErrors = 0; if(!enable) return; if (m_glXSwapIntervalEXT) { m_glXSwapIntervalEXT(m_dpy, m_glxWindow, 1); m_vsyncMode = 6; } if (m_glXSwapIntervalMESA) { if(m_glXSwapIntervalMESA(1) == 0) m_vsyncMode = 2; else CLog::Log(LOGWARNING, "%s - glXSwapIntervalMESA failed", __FUNCTION__); } if (m_glXWaitVideoSyncSGI && m_glXGetVideoSyncSGI && !m_vsyncMode) { unsigned int count; if(m_glXGetVideoSyncSGI(&count) == 0) m_vsyncMode = 3; else CLog::Log(LOGWARNING, "%s - glXGetVideoSyncSGI failed, glcontext probably not direct", __FUNCTION__); } } void CGLContextGLX::SwapBuffers() { if (m_vsyncMode == 3) { glFinish(); unsigned int before = 0, after = 0; if (m_glXGetVideoSyncSGI(&before) != 0) CLog::Log(LOGERROR, "%s - glXGetVideoSyncSGI - Failed to get current retrace count", __FUNCTION__); glXSwapBuffers(m_dpy, m_glxWindow); glFinish(); if(m_glXGetVideoSyncSGI(&after) != 0) CLog::Log(LOGERROR, "%s - glXGetVideoSyncSGI - Failed to get current retrace count", __FUNCTION__); if (after == before) m_iVSyncErrors = 1; else m_iVSyncErrors--; if (m_iVSyncErrors > 0) { CLog::Log(LOGINFO, "GL: retrace count didn't change after buffer swap, switching to vsync mode 4"); m_iVSyncErrors = 0; m_vsyncMode = 4; } if (m_iVSyncErrors < -200) { CLog::Log(LOGINFO, "GL: retrace count change for %d consecutive buffer swap, switching to vsync mode 2", -m_iVSyncErrors); m_iVSyncErrors = 0; m_vsyncMode = 2; } } else if (m_vsyncMode == 4) { glFinish(); unsigned int before = 0, swap = 0, after = 0; if (m_glXGetVideoSyncSGI(&before) != 0) CLog::Log(LOGERROR, "%s - glXGetVideoSyncSGI - Failed to get current retrace count", __FUNCTION__); if(m_glXWaitVideoSyncSGI(2, (before+1)%2, &swap) != 0) CLog::Log(LOGERROR, "%s - glXWaitVideoSyncSGI - Returned error", __FUNCTION__); glXSwapBuffers(m_dpy, m_glxWindow); glFinish(); if (m_glXGetVideoSyncSGI(&after) != 0) CLog::Log(LOGERROR, "%s - glXGetVideoSyncSGI - Failed to get current retrace count", __FUNCTION__); if (after == before) CLog::Log(LOGERROR, "%s - glXWaitVideoSyncSGI - Woke up early", __FUNCTION__); if (after > before + 1) m_iVSyncErrors++; else m_iVSyncErrors = 0; if (m_iVSyncErrors > 30) { CLog::Log(LOGINFO, "GL: retrace count seems to be changing due to the swapbuffers call, switching to vsync mode 3"); m_vsyncMode = 3; m_iVSyncErrors = 0; } } else glXSwapBuffers(m_dpy, m_glxWindow); } void CGLContextGLX::QueryExtensions() { m_extensions = " "; m_extensions += (const char*)glXQueryExtensionsString(m_dpy, m_nScreen); m_extensions += " "; CLog::Log(LOGDEBUG, "GLX_EXTENSIONS:%s", m_extensions.c_str()); if (IsExtSupported("GLX_SGI_video_sync")) m_glXWaitVideoSyncSGI = (int (*)(int, int, unsigned int*))glXGetProcAddress((const GLubyte*)"glXWaitVideoSyncSGI"); else m_glXWaitVideoSyncSGI = NULL; if (IsExtSupported("GLX_SGI_video_sync")) m_glXGetVideoSyncSGI = (int (*)(unsigned int*))glXGetProcAddress((const GLubyte*)"glXGetVideoSyncSGI"); else m_glXGetVideoSyncSGI = NULL; if (IsExtSupported("GLX_MESA_swap_control")) m_glXSwapIntervalMESA = (int (*)(int))glXGetProcAddress((const GLubyte*)"glXSwapIntervalMESA"); else m_glXSwapIntervalMESA = NULL; if (IsExtSupported("GLX_EXT_swap_control")) m_glXSwapIntervalEXT = (PFNGLXSWAPINTERVALEXTPROC)glXGetProcAddress((const GLubyte*)"glXSwapIntervalEXT"); else m_glXSwapIntervalEXT = NULL; } #endif
gpl-2.0
SurgeCore/SurgeCore-505
dep/acelite/ace/SSL/SSL_SOCK_Acceptor.cpp
553
7774
// -*- C++ -*- // // $Id: SSL_SOCK_Acceptor.cpp 91368 2010-08-16 13:03:34Z mhengstmengel $ #include "SSL_SOCK_Acceptor.h" #include "ace/Handle_Set.h" #include "ace/OS_Errno.h" #include "ace/OS_NS_errno.h" #include "ace/Log_Msg.h" #include "ace/Time_Value.h" #include "ace/Countdown_Time.h" #include "ace/Truncate.h" #if !defined (__ACE_INLINE__) #include "SSL_SOCK_Acceptor.inl" #endif /* __ACE_INLINE__ */ ACE_BEGIN_VERSIONED_NAMESPACE_DECL ACE_ALLOC_HOOK_DEFINE(ACE_SSL_SOCK_Acceptor) ACE_SSL_SOCK_Acceptor::~ACE_SSL_SOCK_Acceptor (void) { ACE_TRACE ("ACE_SSL_SOCK_Acceptor::~ACE_SSL_SOCK_Acceptor"); } int ACE_SSL_SOCK_Acceptor::ssl_accept (ACE_SSL_SOCK_Stream &new_stream, ACE_Time_Value *timeout) const { SSL *ssl = new_stream.ssl (); if (SSL_is_init_finished (ssl)) return 0; if (!SSL_in_accept_init (ssl)) ::SSL_set_accept_state (ssl); ACE_HANDLE handle = new_stream.get_handle (); // We're going to call SSL_accept, optionally doing ACE::select and // retrying the SSL_accept, until the SSL handshake is done or // it fails. // To get the timeout affect, set the socket to nonblocking mode // before beginning if there is a timeout specified. If the timeout // is 0 (wait as long as it takes) then don't worry about the blocking // status; we'll block in SSL_accept if the socket is blocking, and // block in ACE::select if not. int reset_blocking_mode = 0; if (timeout != 0) { reset_blocking_mode = ACE_BIT_DISABLED (ACE::get_flags (handle), ACE_NONBLOCK); // Set the handle into non-blocking mode if it's not already // in it. if (reset_blocking_mode && ACE::set_flags (handle, ACE_NONBLOCK) == -1) return -1; } // Take into account the time between each select() call below. ACE_Countdown_Time countdown (timeout); int status; do { // These handle sets are used to set up for whatever SSL_accept // says it wants next. They're reset on each pass around the loop. ACE_Handle_Set rd_handle; ACE_Handle_Set wr_handle; status = ::SSL_accept (ssl); switch (::SSL_get_error (ssl, status)) { case SSL_ERROR_NONE: status = 0; // To tell caller about success break; // Done case SSL_ERROR_WANT_WRITE: wr_handle.set_bit (handle); status = 1; // Wait for more activity break; case SSL_ERROR_WANT_READ: rd_handle.set_bit (handle); status = 1; // Wait for more activity break; case SSL_ERROR_ZERO_RETURN: // The peer has notified us that it is shutting down via // the SSL "close_notify" message so we need to // shutdown, too. status = -1; break; case SSL_ERROR_SYSCALL: // On some platforms (e.g. MS Windows) OpenSSL does not // store the last error in errno so explicitly do so. // // Explicitly check for EWOULDBLOCK since it doesn't get // converted to an SSL_ERROR_WANT_{READ,WRITE} on some // platforms. If SSL_accept failed outright, though, don't // bother checking more. This can happen if the socket gets // closed during the handshake. if (ACE_OS::set_errno_to_last_error () == EWOULDBLOCK && status == -1) { // Although the SSL_ERROR_WANT_READ/WRITE isn't getting // set correctly, the read/write state should be valid. // Use that to decide what to do. status = 1; // Wait for more activity if (SSL_want_write (ssl)) wr_handle.set_bit (handle); else if (SSL_want_read (ssl)) rd_handle.set_bit (handle); else status = -1; // Doesn't want anything - bail out } else status = -1; break; default: ACE_SSL_Context::report_error (); status = -1; break; } if (status == 1) { // Must have at least one handle to wait for at this point. ACE_ASSERT (rd_handle.num_set() == 1 || wr_handle.num_set () == 1); status = ACE::select (int (handle) + 1, &rd_handle, &wr_handle, 0, timeout); (void) countdown.update (); // 0 is timeout, so we're done. // -1 is error, so we're done. // Could be both handles set (same handle in both masks) so // set to 1. if (status >= 1) status = 1; else // Timeout or failure status = -1; } } while (status == 1 && !SSL_is_init_finished (ssl)); if (reset_blocking_mode) { ACE_Errno_Guard eguard (errno); ACE::clr_flags (handle, ACE_NONBLOCK); } return (status == -1 ? -1 : 0); } // General purpose routine for accepting new connections. // Since our underlying acceptor is of the plain old ACE_SOCK_Acceptor // variety, get the basic socket setup done with it, then take care of // the SSL handshake if the socket is accepted. int ACE_SSL_SOCK_Acceptor::accept (ACE_SSL_SOCK_Stream &new_stream, ACE_Addr *remote_addr, ACE_Time_Value *timeout, bool restart, bool reset_new_handle) const { ACE_TRACE ("ACE_SSL_SOCK_Acceptor::accept"); // Take into account the time to complete the basic TCP handshake // and the SSL handshake. ACE_Countdown_Time countdown (timeout); ACE_SOCK_Stream temp_stream; if (-1 == this->acceptor_.accept (temp_stream, remote_addr, timeout, restart, reset_new_handle)) return -1; (void) countdown.update (); new_stream.set_handle (temp_stream.get_handle ()); temp_stream.set_handle (ACE_INVALID_HANDLE); if (this->ssl_accept (new_stream, timeout) == -1) { new_stream.close (); new_stream.set_handle (ACE_INVALID_HANDLE); return -1; } return 0; } int ACE_SSL_SOCK_Acceptor::accept (ACE_SSL_SOCK_Stream &new_stream, ACE_Accept_QoS_Params qos_params, ACE_Addr *remote_addr, ACE_Time_Value *timeout, bool restart, bool reset_new_handle) const { ACE_TRACE ("ACE_SSL_SOCK_Acceptor::accept"); // Take into account the time to complete the basic TCP handshake // and the SSL handshake. ACE_Countdown_Time countdown (timeout); ACE_SOCK_Stream temp_stream; if (-1 == this->acceptor_.accept (temp_stream, qos_params, remote_addr, timeout, restart, reset_new_handle)) return -1; (void) countdown.update (); new_stream.set_handle (temp_stream.get_handle ()); temp_stream.set_handle (ACE_INVALID_HANDLE); if (this->ssl_accept (new_stream, timeout) == -1) { new_stream.close (); new_stream.set_handle (ACE_INVALID_HANDLE); return -1; } return 0; } ACE_END_VERSIONED_NAMESPACE_DECL
gpl-2.0
moscowdesire/701trollslayer
arch/x86/kernel/kvm.c
553
13958
/* * KVM paravirt_ops implementation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * Copyright IBM Corporation, 2007 * Authors: Anthony Liguori <aliguori@us.ibm.com> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/kvm_para.h> #include <linux/cpu.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/hardirq.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/hash.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/kprobes.h> #include <asm/timer.h> #include <asm/cpu.h> #include <asm/traps.h> #include <asm/desc.h> #include <asm/tlbflush.h> #define MMU_QUEUE_SIZE 1024 static int kvmapf = 1; static int parse_no_kvmapf(char *arg) { kvmapf = 0; return 0; } early_param("no-kvmapf", parse_no_kvmapf); static int steal_acc = 1; static int parse_no_stealacc(char *arg) { steal_acc = 0; return 0; } early_param("no-steal-acc", parse_no_stealacc); struct kvm_para_state { u8 mmu_queue[MMU_QUEUE_SIZE]; int mmu_queue_len; }; static DEFINE_PER_CPU(struct kvm_para_state, para_state); static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64); static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64); static int has_steal_clock = 0; static struct kvm_para_state *kvm_para_state(void) { return &per_cpu(para_state, raw_smp_processor_id()); } /* * No need for any "IO delay" on KVM */ static void kvm_io_delay(void) { } #define KVM_TASK_SLEEP_HASHBITS 8 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS) struct kvm_task_sleep_node { struct hlist_node link; wait_queue_head_t wq; u32 token; int cpu; bool halted; struct mm_struct *mm; }; static struct kvm_task_sleep_head { spinlock_t lock; struct hlist_head list; } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE]; static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b, u32 token) { struct hlist_node *p; hlist_for_each(p, &b->list) { struct kvm_task_sleep_node *n = hlist_entry(p, typeof(*n), link); if (n->token == token) return n; } return NULL; } void kvm_async_pf_task_wait(u32 token) { u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; struct kvm_task_sleep_node n, *e; DEFINE_WAIT(wait); int cpu, idle; cpu = get_cpu(); idle = idle_cpu(cpu); put_cpu(); spin_lock(&b->lock); e = _find_apf_task(b, token); if (e) { /* dummy entry exist -> wake up was delivered ahead of PF */ hlist_del(&e->link); kfree(e); spin_unlock(&b->lock); return; } n.token = token; n.cpu = smp_processor_id(); n.mm = current->active_mm; n.halted = idle || preempt_count() > 1; atomic_inc(&n.mm->mm_count); init_waitqueue_head(&n.wq); hlist_add_head(&n.link, &b->list); spin_unlock(&b->lock); for (;;) { if (!n.halted) prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE); if (hlist_unhashed(&n.link)) break; if (!n.halted) { local_irq_enable(); schedule(); local_irq_disable(); } else { /* * We cannot reschedule. So halt. */ native_safe_halt(); local_irq_disable(); } } if (!n.halted) finish_wait(&n.wq, &wait); return; } EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait); static void apf_task_wake_one(struct kvm_task_sleep_node *n) { hlist_del_init(&n->link); if (!n->mm) return; mmdrop(n->mm); if (n->halted) smp_send_reschedule(n->cpu); else if (waitqueue_active(&n->wq)) wake_up(&n->wq); } static void apf_task_wake_all(void) { int i; for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) { struct hlist_node *p, *next; struct kvm_task_sleep_head *b = &async_pf_sleepers[i]; spin_lock(&b->lock); hlist_for_each_safe(p, next, &b->list) { struct kvm_task_sleep_node *n = hlist_entry(p, typeof(*n), link); if (n->cpu == smp_processor_id()) apf_task_wake_one(n); } spin_unlock(&b->lock); } } void kvm_async_pf_task_wake(u32 token) { u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; struct kvm_task_sleep_node *n; if (token == ~0) { apf_task_wake_all(); return; } again: spin_lock(&b->lock); n = _find_apf_task(b, token); if (!n) { /* * async PF was not yet handled. * Add dummy entry for the token. */ n = kmalloc(sizeof(*n), GFP_ATOMIC); if (!n) { /* * Allocation failed! Busy wait while other cpu * handles async PF. */ spin_unlock(&b->lock); cpu_relax(); goto again; } n->token = token; n->cpu = smp_processor_id(); n->mm = NULL; init_waitqueue_head(&n->wq); hlist_add_head(&n->link, &b->list); } else apf_task_wake_one(n); spin_unlock(&b->lock); return; } EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake); u32 kvm_read_and_reset_pf_reason(void) { u32 reason = 0; if (__get_cpu_var(apf_reason).enabled) { reason = __get_cpu_var(apf_reason).reason; __get_cpu_var(apf_reason).reason = 0; } return reason; } EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason); dotraplinkage void __kprobes do_async_page_fault(struct pt_regs *regs, unsigned long error_code) { switch (kvm_read_and_reset_pf_reason()) { default: do_page_fault(regs, error_code); break; case KVM_PV_REASON_PAGE_NOT_PRESENT: /* page is swapped out by the host. */ kvm_async_pf_task_wait((u32)read_cr2()); break; case KVM_PV_REASON_PAGE_READY: kvm_async_pf_task_wake((u32)read_cr2()); break; } } static void kvm_mmu_op(void *buffer, unsigned len) { int r; unsigned long a1, a2; do { a1 = __pa(buffer); a2 = 0; /* on i386 __pa() always returns <4G */ r = kvm_hypercall3(KVM_HC_MMU_OP, len, a1, a2); buffer += r; len -= r; } while (len); } static void mmu_queue_flush(struct kvm_para_state *state) { if (state->mmu_queue_len) { kvm_mmu_op(state->mmu_queue, state->mmu_queue_len); state->mmu_queue_len = 0; } } static void kvm_deferred_mmu_op(void *buffer, int len) { struct kvm_para_state *state = kvm_para_state(); if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) { kvm_mmu_op(buffer, len); return; } if (state->mmu_queue_len + len > sizeof state->mmu_queue) mmu_queue_flush(state); memcpy(state->mmu_queue + state->mmu_queue_len, buffer, len); state->mmu_queue_len += len; } static void kvm_mmu_write(void *dest, u64 val) { __u64 pte_phys; struct kvm_mmu_op_write_pte wpte; #ifdef CONFIG_HIGHPTE struct page *page; unsigned long dst = (unsigned long) dest; page = kmap_atomic_to_page(dest); pte_phys = page_to_pfn(page); pte_phys <<= PAGE_SHIFT; pte_phys += (dst & ~(PAGE_MASK)); #else pte_phys = (unsigned long)__pa(dest); #endif wpte.header.op = KVM_MMU_OP_WRITE_PTE; wpte.pte_val = val; wpte.pte_phys = pte_phys; kvm_deferred_mmu_op(&wpte, sizeof wpte); } /* * We only need to hook operations that are MMU writes. We hook these so that * we can use lazy MMU mode to batch these operations. We could probably * improve the performance of the host code if we used some of the information * here to simplify processing of batched writes. */ static void kvm_set_pte(pte_t *ptep, pte_t pte) { kvm_mmu_write(ptep, pte_val(pte)); } static void kvm_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { kvm_mmu_write(ptep, pte_val(pte)); } static void kvm_set_pmd(pmd_t *pmdp, pmd_t pmd) { kvm_mmu_write(pmdp, pmd_val(pmd)); } #if PAGETABLE_LEVELS >= 3 #ifdef CONFIG_X86_PAE static void kvm_set_pte_atomic(pte_t *ptep, pte_t pte) { kvm_mmu_write(ptep, pte_val(pte)); } static void kvm_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { kvm_mmu_write(ptep, 0); } static void kvm_pmd_clear(pmd_t *pmdp) { kvm_mmu_write(pmdp, 0); } #endif static void kvm_set_pud(pud_t *pudp, pud_t pud) { kvm_mmu_write(pudp, pud_val(pud)); } #if PAGETABLE_LEVELS == 4 static void kvm_set_pgd(pgd_t *pgdp, pgd_t pgd) { kvm_mmu_write(pgdp, pgd_val(pgd)); } #endif #endif /* PAGETABLE_LEVELS >= 3 */ static void kvm_flush_tlb(void) { struct kvm_mmu_op_flush_tlb ftlb = { .header.op = KVM_MMU_OP_FLUSH_TLB, }; kvm_deferred_mmu_op(&ftlb, sizeof ftlb); } static void kvm_release_pt(unsigned long pfn) { struct kvm_mmu_op_release_pt rpt = { .header.op = KVM_MMU_OP_RELEASE_PT, .pt_phys = (u64)pfn << PAGE_SHIFT, }; kvm_mmu_op(&rpt, sizeof rpt); } static void kvm_enter_lazy_mmu(void) { paravirt_enter_lazy_mmu(); } static void kvm_leave_lazy_mmu(void) { struct kvm_para_state *state = kvm_para_state(); mmu_queue_flush(state); paravirt_leave_lazy_mmu(); } static void __init paravirt_ops_setup(void) { pv_info.name = "KVM"; pv_info.paravirt_enabled = 1; if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) pv_cpu_ops.io_delay = kvm_io_delay; if (kvm_para_has_feature(KVM_FEATURE_MMU_OP)) { pv_mmu_ops.set_pte = kvm_set_pte; pv_mmu_ops.set_pte_at = kvm_set_pte_at; pv_mmu_ops.set_pmd = kvm_set_pmd; #if PAGETABLE_LEVELS >= 3 #ifdef CONFIG_X86_PAE pv_mmu_ops.set_pte_atomic = kvm_set_pte_atomic; pv_mmu_ops.pte_clear = kvm_pte_clear; pv_mmu_ops.pmd_clear = kvm_pmd_clear; #endif pv_mmu_ops.set_pud = kvm_set_pud; #if PAGETABLE_LEVELS == 4 pv_mmu_ops.set_pgd = kvm_set_pgd; #endif #endif pv_mmu_ops.flush_tlb_user = kvm_flush_tlb; pv_mmu_ops.release_pte = kvm_release_pt; pv_mmu_ops.release_pmd = kvm_release_pt; pv_mmu_ops.release_pud = kvm_release_pt; pv_mmu_ops.lazy_mode.enter = kvm_enter_lazy_mmu; pv_mmu_ops.lazy_mode.leave = kvm_leave_lazy_mmu; } #ifdef CONFIG_X86_IO_APIC no_timer_check = 1; #endif } static void kvm_register_steal_time(void) { int cpu = smp_processor_id(); struct kvm_steal_time *st = &per_cpu(steal_time, cpu); if (!has_steal_clock) return; memset(st, 0, sizeof(*st)); wrmsrl(MSR_KVM_STEAL_TIME, (__pa(st) | KVM_MSR_ENABLED)); printk(KERN_INFO "kvm-stealtime: cpu %d, msr %lx\n", cpu, __pa(st)); } void __cpuinit kvm_guest_cpu_init(void) { if (!kvm_para_available()) return; if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { u64 pa = __pa(&__get_cpu_var(apf_reason)); #ifdef CONFIG_PREEMPT pa |= KVM_ASYNC_PF_SEND_ALWAYS; #endif wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED); __get_cpu_var(apf_reason).enabled = 1; printk(KERN_INFO"KVM setup async PF for cpu %d\n", smp_processor_id()); } if (has_steal_clock) kvm_register_steal_time(); } static void kvm_pv_disable_apf(void *unused) { if (!__get_cpu_var(apf_reason).enabled) return; wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); __get_cpu_var(apf_reason).enabled = 0; printk(KERN_INFO"Unregister pv shared memory for cpu %d\n", smp_processor_id()); } static int kvm_pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused) { if (code == SYS_RESTART) on_each_cpu(kvm_pv_disable_apf, NULL, 1); return NOTIFY_DONE; } static struct notifier_block kvm_pv_reboot_nb = { .notifier_call = kvm_pv_reboot_notify, }; static u64 kvm_steal_clock(int cpu) { u64 steal; struct kvm_steal_time *src; int version; src = &per_cpu(steal_time, cpu); do { version = src->version; rmb(); steal = src->steal; rmb(); } while ((version & 1) || (version != src->version)); return steal; } void kvm_disable_steal_time(void) { if (!has_steal_clock) return; wrmsr(MSR_KVM_STEAL_TIME, 0, 0); } #ifdef CONFIG_SMP static void __init kvm_smp_prepare_boot_cpu(void) { #ifdef CONFIG_KVM_CLOCK WARN_ON(kvm_register_clock("primary cpu clock")); #endif kvm_guest_cpu_init(); native_smp_prepare_boot_cpu(); } static void __cpuinit kvm_guest_cpu_online(void *dummy) { kvm_guest_cpu_init(); } static void kvm_guest_cpu_offline(void *dummy) { kvm_disable_steal_time(); kvm_pv_disable_apf(NULL); apf_task_wake_all(); } static int __cpuinit kvm_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { int cpu = (unsigned long)hcpu; switch (action) { case CPU_ONLINE: case CPU_DOWN_FAILED: case CPU_ONLINE_FROZEN: smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0); break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1); break; default: break; } return NOTIFY_OK; } static struct notifier_block __cpuinitdata kvm_cpu_notifier = { .notifier_call = kvm_cpu_notify, }; #endif static void __init kvm_apf_trap_init(void) { set_intr_gate(14, &async_page_fault); } void __init kvm_guest_init(void) { int i; if (!kvm_para_available()) return; paravirt_ops_setup(); register_reboot_notifier(&kvm_pv_reboot_nb); for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) spin_lock_init(&async_pf_sleepers[i].lock); if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF)) x86_init.irqs.trap_init = kvm_apf_trap_init; if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { has_steal_clock = 1; pv_time_ops.steal_clock = kvm_steal_clock; } #ifdef CONFIG_SMP smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; register_cpu_notifier(&kvm_cpu_notifier); #else kvm_guest_cpu_init(); #endif } static __init int activate_jump_labels(void) { if (has_steal_clock) { jump_label_inc(&paravirt_steal_enabled); if (steal_acc) jump_label_inc(&paravirt_steal_rq_enabled); } return 0; } arch_initcall(activate_jump_labels);
gpl-2.0
sh95119/linux
drivers/media/platform/s3c-camif/camif-regs.c
1577
17542
/* * Samsung s3c24xx/s3c64xx SoC CAMIF driver * * Copyright (C) 2012 Sylwester Nawrocki <sylvester.nawrocki@gmail.com> * Copyright (C) 2012 Tomasz Figa <tomasz.figa@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__ #include <linux/delay.h> #include "camif-regs.h" #define camif_write(_camif, _off, _val) writel(_val, (_camif)->io_base + (_off)) #define camif_read(_camif, _off) readl((_camif)->io_base + (_off)) void camif_hw_reset(struct camif_dev *camif) { u32 cfg; cfg = camif_read(camif, S3C_CAMIF_REG_CISRCFMT); cfg |= CISRCFMT_ITU601_8BIT; camif_write(camif, S3C_CAMIF_REG_CISRCFMT, cfg); /* S/W reset */ cfg = camif_read(camif, S3C_CAMIF_REG_CIGCTRL); cfg |= CIGCTRL_SWRST; if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV) cfg |= CIGCTRL_IRQ_LEVEL; camif_write(camif, S3C_CAMIF_REG_CIGCTRL, cfg); udelay(10); cfg = camif_read(camif, S3C_CAMIF_REG_CIGCTRL); cfg &= ~CIGCTRL_SWRST; camif_write(camif, S3C_CAMIF_REG_CIGCTRL, cfg); udelay(10); } void camif_hw_clear_pending_irq(struct camif_vp *vp) { u32 cfg = camif_read(vp->camif, S3C_CAMIF_REG_CIGCTRL); cfg |= CIGCTRL_IRQ_CLR(vp->id); camif_write(vp->camif, S3C_CAMIF_REG_CIGCTRL, cfg); } /* * Sets video test pattern (off, color bar, horizontal or vertical gradient). * External sensor pixel clock must be active for the test pattern to work. */ void camif_hw_set_test_pattern(struct camif_dev *camif, unsigned int pattern) { u32 cfg = camif_read(camif, S3C_CAMIF_REG_CIGCTRL); cfg &= ~CIGCTRL_TESTPATTERN_MASK; cfg |= (pattern << 27); camif_write(camif, S3C_CAMIF_REG_CIGCTRL, cfg); } void camif_hw_set_effect(struct camif_dev *camif, unsigned int effect, unsigned int cr, unsigned int cb) { static const struct v4l2_control colorfx[] = { { V4L2_COLORFX_NONE, CIIMGEFF_FIN_BYPASS }, { V4L2_COLORFX_BW, CIIMGEFF_FIN_ARBITRARY }, { V4L2_COLORFX_SEPIA, CIIMGEFF_FIN_ARBITRARY }, { V4L2_COLORFX_NEGATIVE, CIIMGEFF_FIN_NEGATIVE }, { V4L2_COLORFX_ART_FREEZE, CIIMGEFF_FIN_ARTFREEZE }, { V4L2_COLORFX_EMBOSS, CIIMGEFF_FIN_EMBOSSING }, { V4L2_COLORFX_SILHOUETTE, CIIMGEFF_FIN_SILHOUETTE }, { V4L2_COLORFX_SET_CBCR, CIIMGEFF_FIN_ARBITRARY }, }; unsigned int i, cfg; for (i = 0; i < ARRAY_SIZE(colorfx); i++) if (colorfx[i].id == effect) break; if (i == ARRAY_SIZE(colorfx)) return; cfg = camif_read(camif, S3C_CAMIF_REG_CIIMGEFF(camif->vp->offset)); /* Set effect */ cfg &= ~CIIMGEFF_FIN_MASK; cfg |= colorfx[i].value; /* Set both paths */ if (camif->variant->ip_revision >= S3C6400_CAMIF_IP_REV) { if (effect == V4L2_COLORFX_NONE) cfg &= ~CIIMGEFF_IE_ENABLE_MASK; else cfg |= CIIMGEFF_IE_ENABLE_MASK; } cfg &= ~CIIMGEFF_PAT_CBCR_MASK; cfg |= cr | (cb << 13); camif_write(camif, S3C_CAMIF_REG_CIIMGEFF(camif->vp->offset), cfg); } static const u32 src_pixfmt_map[8][2] = { { MEDIA_BUS_FMT_YUYV8_2X8, CISRCFMT_ORDER422_YCBYCR }, { MEDIA_BUS_FMT_YVYU8_2X8, CISRCFMT_ORDER422_YCRYCB }, { MEDIA_BUS_FMT_UYVY8_2X8, CISRCFMT_ORDER422_CBYCRY }, { MEDIA_BUS_FMT_VYUY8_2X8, CISRCFMT_ORDER422_CRYCBY }, }; /* Set camera input pixel format and resolution */ void camif_hw_set_source_format(struct camif_dev *camif) { struct v4l2_mbus_framefmt *mf = &camif->mbus_fmt; int i; u32 cfg; for (i = ARRAY_SIZE(src_pixfmt_map) - 1; i >= 0; i--) { if (src_pixfmt_map[i][0] == mf->code) break; } if (i < 0) { i = 0; dev_err(camif->dev, "Unsupported pixel code, falling back to %#08x\n", src_pixfmt_map[i][0]); } cfg = camif_read(camif, S3C_CAMIF_REG_CISRCFMT); cfg &= ~(CISRCFMT_ORDER422_MASK | CISRCFMT_SIZE_CAM_MASK); cfg |= (mf->width << 16) | mf->height; cfg |= src_pixfmt_map[i][1]; camif_write(camif, S3C_CAMIF_REG_CISRCFMT, cfg); } /* Set the camera host input window offsets (cropping) */ void camif_hw_set_camera_crop(struct camif_dev *camif) { struct v4l2_mbus_framefmt *mf = &camif->mbus_fmt; struct v4l2_rect *crop = &camif->camif_crop; u32 hoff2, voff2; u32 cfg; /* Note: s3c244x requirement: left = f_width - rect.width / 2 */ cfg = camif_read(camif, S3C_CAMIF_REG_CIWDOFST); cfg &= ~(CIWDOFST_OFST_MASK | CIWDOFST_WINOFSEN); cfg |= (crop->left << 16) | crop->top; if (crop->left != 0 || crop->top != 0) cfg |= CIWDOFST_WINOFSEN; camif_write(camif, S3C_CAMIF_REG_CIWDOFST, cfg); if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV) { hoff2 = mf->width - crop->width - crop->left; voff2 = mf->height - crop->height - crop->top; cfg = (hoff2 << 16) | voff2; camif_write(camif, S3C_CAMIF_REG_CIWDOFST2, cfg); } } void camif_hw_clear_fifo_overflow(struct camif_vp *vp) { struct camif_dev *camif = vp->camif; u32 cfg; cfg = camif_read(camif, S3C_CAMIF_REG_CIWDOFST); if (vp->id == 0) cfg |= (CIWDOFST_CLROVCOFIY | CIWDOFST_CLROVCOFICB | CIWDOFST_CLROVCOFICR); else cfg |= (/* CIWDOFST_CLROVPRFIY | */ CIWDOFST_CLROVPRFICB | CIWDOFST_CLROVPRFICR); camif_write(camif, S3C_CAMIF_REG_CIWDOFST, cfg); } /* Set video bus signals polarity */ void camif_hw_set_camera_bus(struct camif_dev *camif) { unsigned int flags = camif->pdata.sensor.flags; u32 cfg = camif_read(camif, S3C_CAMIF_REG_CIGCTRL); cfg &= ~(CIGCTRL_INVPOLPCLK | CIGCTRL_INVPOLVSYNC | CIGCTRL_INVPOLHREF | CIGCTRL_INVPOLFIELD); if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING) cfg |= CIGCTRL_INVPOLPCLK; if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW) cfg |= CIGCTRL_INVPOLVSYNC; /* * HREF is normally high during frame active data * transmission and low during horizontal synchronization * period. Thus HREF active high means HSYNC active low. */ if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) cfg |= CIGCTRL_INVPOLHREF; /* HREF active low */ if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV) { if (flags & V4L2_MBUS_FIELD_EVEN_LOW) cfg |= CIGCTRL_INVPOLFIELD; cfg |= CIGCTRL_FIELDMODE; } pr_debug("Setting CIGCTRL to: %#x\n", cfg); camif_write(camif, S3C_CAMIF_REG_CIGCTRL, cfg); } void camif_hw_set_output_addr(struct camif_vp *vp, struct camif_addr *paddr, int i) { struct camif_dev *camif = vp->camif; camif_write(camif, S3C_CAMIF_REG_CIYSA(vp->id, i), paddr->y); if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV || vp->id == VP_CODEC) { camif_write(camif, S3C_CAMIF_REG_CICBSA(vp->id, i), paddr->cb); camif_write(camif, S3C_CAMIF_REG_CICRSA(vp->id, i), paddr->cr); } pr_debug("dst_buf[%d]: %pad, cb: %pad, cr: %pad\n", i, &paddr->y, &paddr->cb, &paddr->cr); } static void camif_hw_set_out_dma_size(struct camif_vp *vp) { struct camif_frame *frame = &vp->out_frame; u32 cfg; cfg = camif_read(vp->camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset)); cfg &= ~CITRGFMT_TARGETSIZE_MASK; cfg |= (frame->f_width << 16) | frame->f_height; camif_write(vp->camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset), cfg); } static void camif_get_dma_burst(u32 width, u32 ybpp, u32 *mburst, u32 *rburst) { unsigned int nwords = width * ybpp / 4; unsigned int div, rem; if (WARN_ON(width < 8 || (width * ybpp) & 7)) return; for (div = 16; div >= 2; div /= 2) { if (nwords < div) continue; rem = nwords & (div - 1); if (rem == 0) { *mburst = div; *rburst = div; break; } if (rem == div / 2 || rem == div / 4) { *mburst = div; *rburst = rem; break; } } } void camif_hw_set_output_dma(struct camif_vp *vp) { struct camif_dev *camif = vp->camif; struct camif_frame *frame = &vp->out_frame; const struct camif_fmt *fmt = vp->out_fmt; unsigned int ymburst = 0, yrburst = 0; u32 cfg; camif_hw_set_out_dma_size(vp); if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV) { struct camif_dma_offset *offset = &frame->dma_offset; /* Set the input dma offsets. */ cfg = S3C_CISS_OFFS_INITIAL(offset->initial); cfg |= S3C_CISS_OFFS_LINE(offset->line); camif_write(camif, S3C_CAMIF_REG_CISSY(vp->id), cfg); camif_write(camif, S3C_CAMIF_REG_CISSCB(vp->id), cfg); camif_write(camif, S3C_CAMIF_REG_CISSCR(vp->id), cfg); } /* Configure DMA burst values */ camif_get_dma_burst(frame->rect.width, fmt->ybpp, &ymburst, &yrburst); cfg = camif_read(camif, S3C_CAMIF_REG_CICTRL(vp->id, vp->offset)); cfg &= ~CICTRL_BURST_MASK; cfg |= CICTRL_YBURST1(ymburst) | CICTRL_YBURST2(yrburst); cfg |= CICTRL_CBURST1(ymburst / 2) | CICTRL_CBURST2(yrburst / 2); camif_write(camif, S3C_CAMIF_REG_CICTRL(vp->id, vp->offset), cfg); pr_debug("ymburst: %u, yrburst: %u\n", ymburst, yrburst); } void camif_hw_set_input_path(struct camif_vp *vp) { u32 cfg = camif_read(vp->camif, S3C_CAMIF_REG_MSCTRL(vp->id)); cfg &= ~MSCTRL_SEL_DMA_CAM; camif_write(vp->camif, S3C_CAMIF_REG_MSCTRL(vp->id), cfg); } void camif_hw_set_target_format(struct camif_vp *vp) { struct camif_dev *camif = vp->camif; struct camif_frame *frame = &vp->out_frame; u32 cfg; pr_debug("fw: %d, fh: %d color: %d\n", frame->f_width, frame->f_height, vp->out_fmt->color); cfg = camif_read(camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset)); cfg &= ~CITRGFMT_TARGETSIZE_MASK; if (camif->variant->ip_revision == S3C244X_CAMIF_IP_REV) { /* We currently support only YCbCr 4:2:2 at the camera input */ cfg |= CITRGFMT_IN422; cfg &= ~CITRGFMT_OUT422; if (vp->out_fmt->color == IMG_FMT_YCBCR422P) cfg |= CITRGFMT_OUT422; } else { cfg &= ~CITRGFMT_OUTFORMAT_MASK; switch (vp->out_fmt->color) { case IMG_FMT_RGB565...IMG_FMT_XRGB8888: cfg |= CITRGFMT_OUTFORMAT_RGB; break; case IMG_FMT_YCBCR420...IMG_FMT_YCRCB420: cfg |= CITRGFMT_OUTFORMAT_YCBCR420; break; case IMG_FMT_YCBCR422P: cfg |= CITRGFMT_OUTFORMAT_YCBCR422; break; case IMG_FMT_YCBYCR422...IMG_FMT_CRYCBY422: cfg |= CITRGFMT_OUTFORMAT_YCBCR422I; break; } } /* Rotation is only supported by s3c64xx */ if (vp->rotation == 90 || vp->rotation == 270) cfg |= (frame->f_height << 16) | frame->f_width; else cfg |= (frame->f_width << 16) | frame->f_height; camif_write(camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset), cfg); /* Target area, output pixel width * height */ cfg = camif_read(camif, S3C_CAMIF_REG_CITAREA(vp->id, vp->offset)); cfg &= ~CITAREA_MASK; cfg |= (frame->f_width * frame->f_height); camif_write(camif, S3C_CAMIF_REG_CITAREA(vp->id, vp->offset), cfg); } void camif_hw_set_flip(struct camif_vp *vp) { u32 cfg = camif_read(vp->camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset)); cfg &= ~CITRGFMT_FLIP_MASK; if (vp->hflip) cfg |= CITRGFMT_FLIP_Y_MIRROR; if (vp->vflip) cfg |= CITRGFMT_FLIP_X_MIRROR; camif_write(vp->camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset), cfg); } static void camif_hw_set_prescaler(struct camif_vp *vp) { struct camif_dev *camif = vp->camif; struct camif_scaler *sc = &vp->scaler; u32 cfg, shfactor, addr; addr = S3C_CAMIF_REG_CISCPRERATIO(vp->id, vp->offset); shfactor = 10 - (sc->h_shift + sc->v_shift); cfg = shfactor << 28; cfg |= (sc->pre_h_ratio << 16) | sc->pre_v_ratio; camif_write(camif, addr, cfg); cfg = (sc->pre_dst_width << 16) | sc->pre_dst_height; camif_write(camif, S3C_CAMIF_REG_CISCPREDST(vp->id, vp->offset), cfg); } static void camif_s3c244x_hw_set_scaler(struct camif_vp *vp) { struct camif_dev *camif = vp->camif; struct camif_scaler *scaler = &vp->scaler; unsigned int color = vp->out_fmt->color; u32 cfg; camif_hw_set_prescaler(vp); cfg = camif_read(camif, S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset)); cfg &= ~(CISCCTRL_SCALEUP_MASK | CISCCTRL_SCALERBYPASS | CISCCTRL_MAIN_RATIO_MASK | CIPRSCCTRL_RGB_FORMAT_24BIT); if (scaler->enable) { if (scaler->scaleup_h) { if (vp->id == VP_CODEC) cfg |= CISCCTRL_SCALEUP_H; else cfg |= CIPRSCCTRL_SCALEUP_H; } if (scaler->scaleup_v) { if (vp->id == VP_CODEC) cfg |= CISCCTRL_SCALEUP_V; else cfg |= CIPRSCCTRL_SCALEUP_V; } } else { if (vp->id == VP_CODEC) cfg |= CISCCTRL_SCALERBYPASS; } cfg |= ((scaler->main_h_ratio & 0x1ff) << 16); cfg |= scaler->main_v_ratio & 0x1ff; if (vp->id == VP_PREVIEW) { if (color == IMG_FMT_XRGB8888) cfg |= CIPRSCCTRL_RGB_FORMAT_24BIT; cfg |= CIPRSCCTRL_SAMPLE; } camif_write(camif, S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset), cfg); pr_debug("main: h_ratio: %#x, v_ratio: %#x", scaler->main_h_ratio, scaler->main_v_ratio); } static void camif_s3c64xx_hw_set_scaler(struct camif_vp *vp) { struct camif_dev *camif = vp->camif; struct camif_scaler *scaler = &vp->scaler; unsigned int color = vp->out_fmt->color; u32 cfg; camif_hw_set_prescaler(vp); cfg = camif_read(camif, S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset)); cfg &= ~(CISCCTRL_CSCR2Y_WIDE | CISCCTRL_CSCY2R_WIDE | CISCCTRL_SCALEUP_H | CISCCTRL_SCALEUP_V | CISCCTRL_SCALERBYPASS | CISCCTRL_ONE2ONE | CISCCTRL_INRGB_FMT_MASK | CISCCTRL_OUTRGB_FMT_MASK | CISCCTRL_INTERLACE | CISCCTRL_EXTRGB_EXTENSION | CISCCTRL_MAIN_RATIO_MASK); cfg |= (CISCCTRL_CSCR2Y_WIDE | CISCCTRL_CSCY2R_WIDE); if (!scaler->enable) { cfg |= CISCCTRL_SCALERBYPASS; } else { if (scaler->scaleup_h) cfg |= CISCCTRL_SCALEUP_H; if (scaler->scaleup_v) cfg |= CISCCTRL_SCALEUP_V; if (scaler->copy) cfg |= CISCCTRL_ONE2ONE; } switch (color) { case IMG_FMT_RGB666: cfg |= CISCCTRL_OUTRGB_FMT_RGB666; break; case IMG_FMT_XRGB8888: cfg |= CISCCTRL_OUTRGB_FMT_RGB888; break; } cfg |= (scaler->main_h_ratio & 0x1ff) << 16; cfg |= scaler->main_v_ratio & 0x1ff; camif_write(camif, S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset), cfg); pr_debug("main: h_ratio: %#x, v_ratio: %#x", scaler->main_h_ratio, scaler->main_v_ratio); } void camif_hw_set_scaler(struct camif_vp *vp) { unsigned int ip_rev = vp->camif->variant->ip_revision; if (ip_rev == S3C244X_CAMIF_IP_REV) camif_s3c244x_hw_set_scaler(vp); else camif_s3c64xx_hw_set_scaler(vp); } void camif_hw_enable_scaler(struct camif_vp *vp, bool on) { u32 addr = S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset); u32 cfg; cfg = camif_read(vp->camif, addr); if (on) cfg |= CISCCTRL_SCALERSTART; else cfg &= ~CISCCTRL_SCALERSTART; camif_write(vp->camif, addr, cfg); } void camif_hw_set_lastirq(struct camif_vp *vp, int enable) { u32 addr = S3C_CAMIF_REG_CICTRL(vp->id, vp->offset); u32 cfg; cfg = camif_read(vp->camif, addr); if (enable) cfg |= CICTRL_LASTIRQ_ENABLE; else cfg &= ~CICTRL_LASTIRQ_ENABLE; camif_write(vp->camif, addr, cfg); } void camif_hw_enable_capture(struct camif_vp *vp) { struct camif_dev *camif = vp->camif; u32 cfg; cfg = camif_read(camif, S3C_CAMIF_REG_CIIMGCPT(vp->offset)); camif->stream_count++; if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV) cfg |= CIIMGCPT_CPT_FREN_ENABLE(vp->id); if (vp->scaler.enable) cfg |= CIIMGCPT_IMGCPTEN_SC(vp->id); if (camif->stream_count == 1) cfg |= CIIMGCPT_IMGCPTEN; camif_write(camif, S3C_CAMIF_REG_CIIMGCPT(vp->offset), cfg); pr_debug("CIIMGCPT: %#x, camif->stream_count: %d\n", cfg, camif->stream_count); } void camif_hw_disable_capture(struct camif_vp *vp) { struct camif_dev *camif = vp->camif; u32 cfg; cfg = camif_read(camif, S3C_CAMIF_REG_CIIMGCPT(vp->offset)); cfg &= ~CIIMGCPT_IMGCPTEN_SC(vp->id); if (WARN_ON(--(camif->stream_count) < 0)) camif->stream_count = 0; if (camif->stream_count == 0) cfg &= ~CIIMGCPT_IMGCPTEN; pr_debug("CIIMGCPT: %#x, camif->stream_count: %d\n", cfg, camif->stream_count); camif_write(camif, S3C_CAMIF_REG_CIIMGCPT(vp->offset), cfg); } void camif_hw_dump_regs(struct camif_dev *camif, const char *label) { struct { u32 offset; const char * const name; } registers[] = { { S3C_CAMIF_REG_CISRCFMT, "CISRCFMT" }, { S3C_CAMIF_REG_CIWDOFST, "CIWDOFST" }, { S3C_CAMIF_REG_CIGCTRL, "CIGCTRL" }, { S3C_CAMIF_REG_CIWDOFST2, "CIWDOFST2" }, { S3C_CAMIF_REG_CIYSA(0, 0), "CICOYSA0" }, { S3C_CAMIF_REG_CICBSA(0, 0), "CICOCBSA0" }, { S3C_CAMIF_REG_CICRSA(0, 0), "CICOCRSA0" }, { S3C_CAMIF_REG_CIYSA(0, 1), "CICOYSA1" }, { S3C_CAMIF_REG_CICBSA(0, 1), "CICOCBSA1" }, { S3C_CAMIF_REG_CICRSA(0, 1), "CICOCRSA1" }, { S3C_CAMIF_REG_CIYSA(0, 2), "CICOYSA2" }, { S3C_CAMIF_REG_CICBSA(0, 2), "CICOCBSA2" }, { S3C_CAMIF_REG_CICRSA(0, 2), "CICOCRSA2" }, { S3C_CAMIF_REG_CIYSA(0, 3), "CICOYSA3" }, { S3C_CAMIF_REG_CICBSA(0, 3), "CICOCBSA3" }, { S3C_CAMIF_REG_CICRSA(0, 3), "CICOCRSA3" }, { S3C_CAMIF_REG_CIYSA(1, 0), "CIPRYSA0" }, { S3C_CAMIF_REG_CIYSA(1, 1), "CIPRYSA1" }, { S3C_CAMIF_REG_CIYSA(1, 2), "CIPRYSA2" }, { S3C_CAMIF_REG_CIYSA(1, 3), "CIPRYSA3" }, { S3C_CAMIF_REG_CITRGFMT(0, 0), "CICOTRGFMT" }, { S3C_CAMIF_REG_CITRGFMT(1, 0), "CIPRTRGFMT" }, { S3C_CAMIF_REG_CICTRL(0, 0), "CICOCTRL" }, { S3C_CAMIF_REG_CICTRL(1, 0), "CIPRCTRL" }, { S3C_CAMIF_REG_CISCPREDST(0, 0), "CICOSCPREDST" }, { S3C_CAMIF_REG_CISCPREDST(1, 0), "CIPRSCPREDST" }, { S3C_CAMIF_REG_CISCPRERATIO(0, 0), "CICOSCPRERATIO" }, { S3C_CAMIF_REG_CISCPRERATIO(1, 0), "CIPRSCPRERATIO" }, { S3C_CAMIF_REG_CISCCTRL(0, 0), "CICOSCCTRL" }, { S3C_CAMIF_REG_CISCCTRL(1, 0), "CIPRSCCTRL" }, { S3C_CAMIF_REG_CITAREA(0, 0), "CICOTAREA" }, { S3C_CAMIF_REG_CITAREA(1, 0), "CIPRTAREA" }, { S3C_CAMIF_REG_CISTATUS(0, 0), "CICOSTATUS" }, { S3C_CAMIF_REG_CISTATUS(1, 0), "CIPRSTATUS" }, { S3C_CAMIF_REG_CIIMGCPT(0), "CIIMGCPT" }, }; u32 i; pr_info("--- %s ---\n", label); for (i = 0; i < ARRAY_SIZE(registers); i++) { u32 cfg = readl(camif->io_base + registers[i].offset); dev_info(camif->dev, "%s:\t0x%08x\n", registers[i].name, cfg); } }
gpl-2.0
dabaol/linux
drivers/usb/host/ohci-omap3.c
2089
5413
/* * ohci-omap3.c - driver for OHCI on OMAP3 and later processors * * Bus Glue for OMAP3 USBHOST 3 port OHCI controller * This controller is also used in later OMAPs and AM35x chips * * Copyright (C) 2007-2010 Texas Instruments, Inc. * Author: Vikram Pandita <vikram.pandita@ti.com> * Author: Anand Gadiyar <gadiyar@ti.com> * Author: Keshava Munegowda <keshava_mgowda@ti.com> * * Based on ehci-omap.c and some other ohci glue layers * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * TODO (last updated Feb 27, 2011): * - add kernel-doc */ #include <linux/dma-mapping.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/usb/otg.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/usb.h> #include <linux/usb/hcd.h> #include "ohci.h" #define DRIVER_DESC "OHCI OMAP3 driver" static const char hcd_name[] = "ohci-omap3"; static struct hc_driver __read_mostly ohci_omap3_hc_driver; /* * configure so an HC device and id are always provided * always called with process context; sleeping is OK */ /** * ohci_hcd_omap3_probe - initialize OMAP-based HCDs * * Allocates basic resources for this USB host controller, and * then invokes the start() method for the HCD associated with it * through the hotplug entry's driver_data. */ static int ohci_hcd_omap3_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ohci_hcd *ohci; struct usb_hcd *hcd = NULL; void __iomem *regs = NULL; struct resource *res; int ret; int irq; if (usb_disabled()) return -ENODEV; if (!dev->parent) { dev_err(dev, "Missing parent device\n"); return -ENODEV; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(dev, "OHCI irq failed\n"); return -ENODEV; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "UHH OHCI get resource failed\n"); return -ENOMEM; } regs = ioremap(res->start, resource_size(res)); if (!regs) { dev_err(dev, "UHH OHCI ioremap failed\n"); return -ENOMEM; } /* * Right now device-tree probed devices don't get dma_mask set. * Since shared usb code relies on it, set it here for now. * Once we have dma capability bindings this can go away. */ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (ret) goto err_io; ret = -ENODEV; hcd = usb_create_hcd(&ohci_omap3_hc_driver, dev, dev_name(dev)); if (!hcd) { dev_err(dev, "usb_create_hcd failed\n"); goto err_io; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); hcd->regs = regs; pm_runtime_enable(dev); pm_runtime_get_sync(dev); ohci = hcd_to_ohci(hcd); /* * RemoteWakeupConnected has to be set explicitly before * calling ohci_run. The reset value of RWC is 0. */ ohci->hc_control = OHCI_CTRL_RWC; ret = usb_add_hcd(hcd, irq, 0); if (ret) { dev_dbg(dev, "failed to add hcd with err %d\n", ret); goto err_add_hcd; } device_wakeup_enable(hcd->self.controller); return 0; err_add_hcd: pm_runtime_put_sync(dev); usb_put_hcd(hcd); err_io: iounmap(regs); return ret; } /* * may be called without controller electrically present * may be called with controller, bus, and devices active */ /** * ohci_hcd_omap3_remove - shutdown processing for OHCI HCDs * @pdev: USB Host Controller being removed * * Reverses the effect of ohci_hcd_omap3_probe(), first invoking * the HCD's stop() method. It is always called from a thread * context, normally "rmmod", "apmd", or something similar. */ static int ohci_hcd_omap3_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct usb_hcd *hcd = dev_get_drvdata(dev); iounmap(hcd->regs); usb_remove_hcd(hcd); pm_runtime_put_sync(dev); pm_runtime_disable(dev); usb_put_hcd(hcd); return 0; } static const struct of_device_id omap_ohci_dt_ids[] = { { .compatible = "ti,ohci-omap3" }, { } }; MODULE_DEVICE_TABLE(of, omap_ohci_dt_ids); static struct platform_driver ohci_hcd_omap3_driver = { .probe = ohci_hcd_omap3_probe, .remove = ohci_hcd_omap3_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "ohci-omap3", .of_match_table = omap_ohci_dt_ids, }, }; static int __init ohci_omap3_init(void) { if (usb_disabled()) return -ENODEV; pr_info("%s: " DRIVER_DESC "\n", hcd_name); ohci_init_driver(&ohci_omap3_hc_driver, NULL); return platform_driver_register(&ohci_hcd_omap3_driver); } module_init(ohci_omap3_init); static void __exit ohci_omap3_cleanup(void) { platform_driver_unregister(&ohci_hcd_omap3_driver); } module_exit(ohci_omap3_cleanup); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_ALIAS("platform:ohci-omap3"); MODULE_AUTHOR("Anand Gadiyar <gadiyar@ti.com>"); MODULE_LICENSE("GPL");
gpl-2.0
CyanogenMod/android_kernel_motorola_msm8916
drivers/staging/comedi/drivers/addi_apci_3501.c
2089
11873
/* * addi_apci_3501.c * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module. * Project manager: Eric Stolz * * ADDI-DATA GmbH * Dieselstrasse 3 * D-77833 Ottersweier * Tel: +19(0)7223/9493-0 * Fax: +49(0)7223/9493-92 * http://www.addi-data.com * info@addi-data.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * You should also find the complete GPL in the COPYING file accompanying * this source code. */ #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/sched.h> #include "../comedidev.h" #include "comedi_fc.h" #include "amcc_s5933.h" /* * PCI bar 1 register I/O map */ #define APCI3501_AO_CTRL_STATUS_REG 0x00 #define APCI3501_AO_CTRL_BIPOLAR (1 << 0) #define APCI3501_AO_STATUS_READY (1 << 8) #define APCI3501_AO_DATA_REG 0x04 #define APCI3501_AO_DATA_CHAN(x) ((x) << 0) #define APCI3501_AO_DATA_VAL(x) ((x) << 8) #define APCI3501_AO_DATA_BIPOLAR (1 << 31) #define APCI3501_AO_TRIG_SCS_REG 0x08 #define APCI3501_TIMER_SYNC_REG 0x20 #define APCI3501_TIMER_RELOAD_REG 0x24 #define APCI3501_TIMER_TIMEBASE_REG 0x28 #define APCI3501_TIMER_CTRL_REG 0x2c #define APCI3501_TIMER_STATUS_REG 0x30 #define APCI3501_TIMER_IRQ_REG 0x34 #define APCI3501_TIMER_WARN_RELOAD_REG 0x38 #define APCI3501_TIMER_WARN_TIMEBASE_REG 0x3c #define APCI3501_DO_REG 0x40 #define APCI3501_DI_REG 0x50 /* * AMCC S5933 NVRAM */ #define NVRAM_USER_DATA_START 0x100 #define NVCMD_BEGIN_READ (0x7 << 5) #define NVCMD_LOAD_LOW (0x4 << 5) #define NVCMD_LOAD_HIGH (0x5 << 5) /* * Function types stored in the eeprom */ #define EEPROM_DIGITALINPUT 0 #define EEPROM_DIGITALOUTPUT 1 #define EEPROM_ANALOGINPUT 2 #define EEPROM_ANALOGOUTPUT 3 #define EEPROM_TIMER 4 #define EEPROM_WATCHDOG 5 #define EEPROM_TIMER_WATCHDOG_COUNTER 10 struct apci3501_private { int i_IobaseAmcc; struct task_struct *tsk_Current; unsigned char b_TimerSelectMode; }; static struct comedi_lrange apci3501_ao_range = { 2, { BIP_RANGE(10), UNI_RANGE(10) } }; static int apci3501_wait_for_dac(struct comedi_device *dev) { unsigned int status; do { status = inl(dev->iobase + APCI3501_AO_CTRL_STATUS_REG); } while (!(status & APCI3501_AO_STATUS_READY)); return 0; } static int apci3501_ao_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int chan = CR_CHAN(insn->chanspec); unsigned int range = CR_RANGE(insn->chanspec); unsigned int val = 0; int i; int ret; /* * All analog output channels have the same output range. * 14-bit bipolar: 0-10V * 13-bit unipolar: +/-10V * Changing the range of one channel changes all of them! */ if (range) { outl(0, dev->iobase + APCI3501_AO_CTRL_STATUS_REG); } else { val |= APCI3501_AO_DATA_BIPOLAR; outl(APCI3501_AO_CTRL_BIPOLAR, dev->iobase + APCI3501_AO_CTRL_STATUS_REG); } val |= APCI3501_AO_DATA_CHAN(chan); for (i = 0; i < insn->n; i++) { if (range == 1) { if (data[i] > 0x1fff) { dev_err(dev->class_dev, "Unipolar resolution is only 13-bits\n"); return -EINVAL; } } ret = apci3501_wait_for_dac(dev); if (ret) return ret; outl(val | APCI3501_AO_DATA_VAL(data[i]), dev->iobase + APCI3501_AO_DATA_REG); } return insn->n; } #include "addi-data/hwdrv_apci3501.c" static int apci3501_di_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[1] = inl(dev->iobase + APCI3501_DI_REG) & 0x3; return insn->n; } static int apci3501_do_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int mask = data[0]; unsigned int bits = data[1]; s->state = inl(dev->iobase + APCI3501_DO_REG); if (mask) { s->state &= ~mask; s->state |= (bits & mask); outl(s->state, dev->iobase + APCI3501_DO_REG); } data[1] = s->state; return insn->n; } static void apci3501_eeprom_wait(unsigned long iobase) { unsigned char val; do { val = inb(iobase + AMCC_OP_REG_MCSR_NVCMD); } while (val & 0x80); } static unsigned short apci3501_eeprom_readw(unsigned long iobase, unsigned short addr) { unsigned short val = 0; unsigned char tmp; unsigned char i; /* Add the offset to the start of the user data */ addr += NVRAM_USER_DATA_START; for (i = 0; i < 2; i++) { /* Load the low 8 bit address */ outb(NVCMD_LOAD_LOW, iobase + AMCC_OP_REG_MCSR_NVCMD); apci3501_eeprom_wait(iobase); outb((addr + i) & 0xff, iobase + AMCC_OP_REG_MCSR_NVDATA); apci3501_eeprom_wait(iobase); /* Load the high 8 bit address */ outb(NVCMD_LOAD_HIGH, iobase + AMCC_OP_REG_MCSR_NVCMD); apci3501_eeprom_wait(iobase); outb(((addr + i) >> 8) & 0xff, iobase + AMCC_OP_REG_MCSR_NVDATA); apci3501_eeprom_wait(iobase); /* Read the eeprom data byte */ outb(NVCMD_BEGIN_READ, iobase + AMCC_OP_REG_MCSR_NVCMD); apci3501_eeprom_wait(iobase); tmp = inb(iobase + AMCC_OP_REG_MCSR_NVDATA); apci3501_eeprom_wait(iobase); if (i == 0) val |= tmp; else val |= (tmp << 8); } return val; } static int apci3501_eeprom_get_ao_n_chan(struct comedi_device *dev) { struct apci3501_private *devpriv = dev->private; unsigned long iobase = devpriv->i_IobaseAmcc; unsigned char nfuncs; int i; nfuncs = apci3501_eeprom_readw(iobase, 10) & 0xff; /* Read functionality details */ for (i = 0; i < nfuncs; i++) { unsigned short offset = i * 4; unsigned short addr; unsigned char func; unsigned short val; func = apci3501_eeprom_readw(iobase, 12 + offset) & 0x3f; addr = apci3501_eeprom_readw(iobase, 14 + offset); if (func == EEPROM_ANALOGOUTPUT) { val = apci3501_eeprom_readw(iobase, addr + 10); return (val >> 4) & 0x3ff; } } return 0; } static int apci3501_eeprom_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct apci3501_private *devpriv = dev->private; unsigned short addr = CR_CHAN(insn->chanspec); data[0] = apci3501_eeprom_readw(devpriv->i_IobaseAmcc, 2 * addr); return insn->n; } static irqreturn_t apci3501_interrupt(int irq, void *d) { struct comedi_device *dev = d; struct apci3501_private *devpriv = dev->private; unsigned int ui_Timer_AOWatchdog; unsigned long ul_Command1; int i_temp; /* Disable Interrupt */ ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG); ul_Command1 = (ul_Command1 & 0xFFFFF9FDul); outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG); ui_Timer_AOWatchdog = inl(dev->iobase + APCI3501_TIMER_IRQ_REG) & 0x1; if ((!ui_Timer_AOWatchdog)) { comedi_error(dev, "IRQ from unknown source"); return IRQ_NONE; } /* Enable Interrupt Send a signal to from kernel to user space */ send_sig(SIGIO, devpriv->tsk_Current, 0); ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG); ul_Command1 = ((ul_Command1 & 0xFFFFF9FDul) | 1 << 1); outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG); i_temp = inl(dev->iobase + APCI3501_TIMER_STATUS_REG) & 0x1; return IRQ_HANDLED; } static int apci3501_reset(struct comedi_device *dev) { unsigned int val; int chan; int ret; /* Reset all digital outputs to "0" */ outl(0x0, dev->iobase + APCI3501_DO_REG); /* Default all analog outputs to 0V (bipolar) */ outl(APCI3501_AO_CTRL_BIPOLAR, dev->iobase + APCI3501_AO_CTRL_STATUS_REG); val = APCI3501_AO_DATA_BIPOLAR | APCI3501_AO_DATA_VAL(0); /* Set all analog output channels */ for (chan = 0; chan < 8; chan++) { ret = apci3501_wait_for_dac(dev); if (ret) { dev_warn(dev->class_dev, "%s: DAC not-ready for channel %i\n", __func__, chan); } else { outl(val | APCI3501_AO_DATA_CHAN(chan), dev->iobase + APCI3501_AO_DATA_REG); } } return 0; } static int apci3501_auto_attach(struct comedi_device *dev, unsigned long context_unused) { struct pci_dev *pcidev = comedi_to_pci_dev(dev); struct apci3501_private *devpriv; struct comedi_subdevice *s; int ao_n_chan; int ret; devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL); if (!devpriv) return -ENOMEM; dev->private = devpriv; ret = comedi_pci_enable(dev); if (ret) return ret; dev->iobase = pci_resource_start(pcidev, 1); devpriv->i_IobaseAmcc = pci_resource_start(pcidev, 0); ao_n_chan = apci3501_eeprom_get_ao_n_chan(dev); if (pcidev->irq > 0) { ret = request_irq(pcidev->irq, apci3501_interrupt, IRQF_SHARED, dev->board_name, dev); if (ret == 0) dev->irq = pcidev->irq; } ret = comedi_alloc_subdevices(dev, 5); if (ret) return ret; /* Initialize the analog output subdevice */ s = &dev->subdevices[0]; if (ao_n_chan) { s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON; s->n_chan = ao_n_chan; s->maxdata = 0x3fff; s->range_table = &apci3501_ao_range; s->insn_write = apci3501_ao_insn_write; } else { s->type = COMEDI_SUBD_UNUSED; } /* Initialize the digital input subdevice */ s = &dev->subdevices[1]; s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE; s->n_chan = 2; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = apci3501_di_insn_bits; /* Initialize the digital output subdevice */ s = &dev->subdevices[2]; s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_WRITEABLE; s->n_chan = 2; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = apci3501_do_insn_bits; /* Initialize the timer/watchdog subdevice */ s = &dev->subdevices[3]; s->type = COMEDI_SUBD_TIMER; s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON; s->n_chan = 1; s->maxdata = 0; s->len_chanlist = 1; s->range_table = &range_digital; s->insn_write = i_APCI3501_StartStopWriteTimerCounterWatchdog; s->insn_read = i_APCI3501_ReadTimerCounterWatchdog; s->insn_config = i_APCI3501_ConfigTimerCounterWatchdog; /* Initialize the eeprom subdevice */ s = &dev->subdevices[4]; s->type = COMEDI_SUBD_MEMORY; s->subdev_flags = SDF_READABLE | SDF_INTERNAL; s->n_chan = 256; s->maxdata = 0xffff; s->insn_read = apci3501_eeprom_insn_read; apci3501_reset(dev); return 0; } static void apci3501_detach(struct comedi_device *dev) { if (dev->iobase) apci3501_reset(dev); if (dev->irq) free_irq(dev->irq, dev); comedi_pci_disable(dev); } static struct comedi_driver apci3501_driver = { .driver_name = "addi_apci_3501", .module = THIS_MODULE, .auto_attach = apci3501_auto_attach, .detach = apci3501_detach, }; static int apci3501_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { return comedi_pci_auto_config(dev, &apci3501_driver, id->driver_data); } static DEFINE_PCI_DEVICE_TABLE(apci3501_pci_table) = { { PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3001) }, { 0 } }; MODULE_DEVICE_TABLE(pci, apci3501_pci_table); static struct pci_driver apci3501_pci_driver = { .name = "addi_apci_3501", .id_table = apci3501_pci_table, .probe = apci3501_pci_probe, .remove = comedi_pci_auto_unconfig, }; module_comedi_pci_driver(apci3501_driver, apci3501_pci_driver); MODULE_DESCRIPTION("ADDI-DATA APCI-3501 Analog output board"); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_LICENSE("GPL");
gpl-2.0
hellsgod/hells-Core-N6P
drivers/zorro/proc.c
2089
3772
/* * Procfs interface for the Zorro bus. * * Copyright (C) 1998-2003 Geert Uytterhoeven * * Heavily based on the procfs interface for the PCI bus, which is * * Copyright (C) 1997, 1998 Martin Mares <mj@atrey.karlin.mff.cuni.cz> */ #include <linux/types.h> #include <linux/zorro.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/export.h> #include <asm/uaccess.h> #include <asm/amigahw.h> #include <asm/setup.h> static loff_t proc_bus_zorro_lseek(struct file *file, loff_t off, int whence) { loff_t new = -1; struct inode *inode = file_inode(file); mutex_lock(&inode->i_mutex); switch (whence) { case 0: new = off; break; case 1: new = file->f_pos + off; break; case 2: new = sizeof(struct ConfigDev) + off; break; } if (new < 0 || new > sizeof(struct ConfigDev)) new = -EINVAL; else file->f_pos = new; mutex_unlock(&inode->i_mutex); return new; } static ssize_t proc_bus_zorro_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct zorro_dev *z = PDE_DATA(file_inode(file)); struct ConfigDev cd; loff_t pos = *ppos; if (pos >= sizeof(struct ConfigDev)) return 0; if (nbytes >= sizeof(struct ConfigDev)) nbytes = sizeof(struct ConfigDev); if (pos + nbytes > sizeof(struct ConfigDev)) nbytes = sizeof(struct ConfigDev) - pos; /* Construct a ConfigDev */ memset(&cd, 0, sizeof(cd)); cd.cd_Rom = z->rom; cd.cd_SlotAddr = z->slotaddr; cd.cd_SlotSize = z->slotsize; cd.cd_BoardAddr = (void *)zorro_resource_start(z); cd.cd_BoardSize = zorro_resource_len(z); if (copy_to_user(buf, (void *)&cd + pos, nbytes)) return -EFAULT; *ppos += nbytes; return nbytes; } static const struct file_operations proc_bus_zorro_operations = { .owner = THIS_MODULE, .llseek = proc_bus_zorro_lseek, .read = proc_bus_zorro_read, }; static void * zorro_seq_start(struct seq_file *m, loff_t *pos) { return (*pos < zorro_num_autocon) ? pos : NULL; } static void * zorro_seq_next(struct seq_file *m, void *v, loff_t *pos) { (*pos)++; return (*pos < zorro_num_autocon) ? pos : NULL; } static void zorro_seq_stop(struct seq_file *m, void *v) { } static int zorro_seq_show(struct seq_file *m, void *v) { unsigned int slot = *(loff_t *)v; struct zorro_dev *z = &zorro_autocon[slot]; seq_printf(m, "%02x\t%08x\t%08lx\t%08lx\t%02x\n", slot, z->id, (unsigned long)zorro_resource_start(z), (unsigned long)zorro_resource_len(z), z->rom.er_Type); return 0; } static const struct seq_operations zorro_devices_seq_ops = { .start = zorro_seq_start, .next = zorro_seq_next, .stop = zorro_seq_stop, .show = zorro_seq_show, }; static int zorro_devices_proc_open(struct inode *inode, struct file *file) { return seq_open(file, &zorro_devices_seq_ops); } static const struct file_operations zorro_devices_proc_fops = { .owner = THIS_MODULE, .open = zorro_devices_proc_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static struct proc_dir_entry *proc_bus_zorro_dir; static int __init zorro_proc_attach_device(unsigned int slot) { struct proc_dir_entry *entry; char name[4]; sprintf(name, "%02x", slot); entry = proc_create_data(name, 0, proc_bus_zorro_dir, &proc_bus_zorro_operations, &zorro_autocon[slot]); if (!entry) return -ENOMEM; proc_set_size(entry, sizeof(struct zorro_dev)); return 0; } static int __init zorro_proc_init(void) { unsigned int slot; if (MACH_IS_AMIGA && AMIGAHW_PRESENT(ZORRO)) { proc_bus_zorro_dir = proc_mkdir("bus/zorro", NULL); proc_create("devices", 0, proc_bus_zorro_dir, &zorro_devices_proc_fops); for (slot = 0; slot < zorro_num_autocon; slot++) zorro_proc_attach_device(slot); } return 0; } device_initcall(zorro_proc_init);
gpl-2.0
Frontier314/kernel_ut4412
drivers/video/omap2/dss/dss_features.c
2089
13996
/* * linux/drivers/video/omap2/dss/dss_features.c * * Copyright (C) 2010 Texas Instruments * Author: Archit Taneja <archit@ti.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/err.h> #include <linux/slab.h> #include <video/omapdss.h> #include <plat/cpu.h> #include "dss.h" #include "dss_features.h" /* Defines a generic omap register field */ struct dss_reg_field { u8 start, end; }; struct dss_param_range { int min, max; }; struct omap_dss_features { const struct dss_reg_field *reg_fields; const int num_reg_fields; const u32 has_feature; const int num_mgrs; const int num_ovls; const enum omap_display_type *supported_displays; const enum omap_color_mode *supported_color_modes; const char * const *clksrc_names; const struct dss_param_range *dss_params; }; /* This struct is assigned to one of the below during initialization */ static const struct omap_dss_features *omap_current_dss_features; static const struct dss_reg_field omap2_dss_reg_fields[] = { [FEAT_REG_FIRHINC] = { 11, 0 }, [FEAT_REG_FIRVINC] = { 27, 16 }, [FEAT_REG_FIFOLOWTHRESHOLD] = { 8, 0 }, [FEAT_REG_FIFOHIGHTHRESHOLD] = { 24, 16 }, [FEAT_REG_FIFOSIZE] = { 8, 0 }, [FEAT_REG_HORIZONTALACCU] = { 9, 0 }, [FEAT_REG_VERTICALACCU] = { 25, 16 }, [FEAT_REG_DISPC_CLK_SWITCH] = { 0, 0 }, [FEAT_REG_DSIPLL_REGN] = { 0, 0 }, [FEAT_REG_DSIPLL_REGM] = { 0, 0 }, [FEAT_REG_DSIPLL_REGM_DISPC] = { 0, 0 }, [FEAT_REG_DSIPLL_REGM_DSI] = { 0, 0 }, }; static const struct dss_reg_field omap3_dss_reg_fields[] = { [FEAT_REG_FIRHINC] = { 12, 0 }, [FEAT_REG_FIRVINC] = { 28, 16 }, [FEAT_REG_FIFOLOWTHRESHOLD] = { 11, 0 }, [FEAT_REG_FIFOHIGHTHRESHOLD] = { 27, 16 }, [FEAT_REG_FIFOSIZE] = { 10, 0 }, [FEAT_REG_HORIZONTALACCU] = { 9, 0 }, [FEAT_REG_VERTICALACCU] = { 25, 16 }, [FEAT_REG_DISPC_CLK_SWITCH] = { 0, 0 }, [FEAT_REG_DSIPLL_REGN] = { 7, 1 }, [FEAT_REG_DSIPLL_REGM] = { 18, 8 }, [FEAT_REG_DSIPLL_REGM_DISPC] = { 22, 19 }, [FEAT_REG_DSIPLL_REGM_DSI] = { 26, 23 }, }; static const struct dss_reg_field omap4_dss_reg_fields[] = { [FEAT_REG_FIRHINC] = { 12, 0 }, [FEAT_REG_FIRVINC] = { 28, 16 }, [FEAT_REG_FIFOLOWTHRESHOLD] = { 15, 0 }, [FEAT_REG_FIFOHIGHTHRESHOLD] = { 31, 16 }, [FEAT_REG_FIFOSIZE] = { 15, 0 }, [FEAT_REG_HORIZONTALACCU] = { 10, 0 }, [FEAT_REG_VERTICALACCU] = { 26, 16 }, [FEAT_REG_DISPC_CLK_SWITCH] = { 9, 8 }, [FEAT_REG_DSIPLL_REGN] = { 8, 1 }, [FEAT_REG_DSIPLL_REGM] = { 20, 9 }, [FEAT_REG_DSIPLL_REGM_DISPC] = { 25, 21 }, [FEAT_REG_DSIPLL_REGM_DSI] = { 30, 26 }, }; static const enum omap_display_type omap2_dss_supported_displays[] = { /* OMAP_DSS_CHANNEL_LCD */ OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI, /* OMAP_DSS_CHANNEL_DIGIT */ OMAP_DISPLAY_TYPE_VENC, }; static const enum omap_display_type omap3430_dss_supported_displays[] = { /* OMAP_DSS_CHANNEL_LCD */ OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI | OMAP_DISPLAY_TYPE_SDI | OMAP_DISPLAY_TYPE_DSI, /* OMAP_DSS_CHANNEL_DIGIT */ OMAP_DISPLAY_TYPE_VENC, }; static const enum omap_display_type omap3630_dss_supported_displays[] = { /* OMAP_DSS_CHANNEL_LCD */ OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI | OMAP_DISPLAY_TYPE_DSI, /* OMAP_DSS_CHANNEL_DIGIT */ OMAP_DISPLAY_TYPE_VENC, }; static const enum omap_display_type omap4_dss_supported_displays[] = { /* OMAP_DSS_CHANNEL_LCD */ OMAP_DISPLAY_TYPE_DBI | OMAP_DISPLAY_TYPE_DSI, /* OMAP_DSS_CHANNEL_DIGIT */ OMAP_DISPLAY_TYPE_VENC | OMAP_DISPLAY_TYPE_HDMI, /* OMAP_DSS_CHANNEL_LCD2 */ OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI | OMAP_DISPLAY_TYPE_DSI, }; static const enum omap_color_mode omap2_dss_supported_color_modes[] = { /* OMAP_DSS_GFX */ OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 | OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 | OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P, /* OMAP_DSS_VIDEO1 */ OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_UYVY, /* OMAP_DSS_VIDEO2 */ OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_UYVY, }; static const enum omap_color_mode omap3_dss_supported_color_modes[] = { /* OMAP_DSS_GFX */ OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 | OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 | OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 | OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32, /* OMAP_DSS_VIDEO1 */ OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_UYVY, /* OMAP_DSS_VIDEO2 */ OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 | OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_UYVY | OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32, }; static const enum omap_color_mode omap4_dss_supported_color_modes[] = { /* OMAP_DSS_GFX */ OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 | OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 | OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 | OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32 | OMAP_DSS_COLOR_ARGB16_1555, /* OMAP_DSS_VIDEO1 */ OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_ARGB16_1555 | OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_NV12 | OMAP_DSS_COLOR_RGBA16 | OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_UYVY | OMAP_DSS_COLOR_ARGB16 | OMAP_DSS_COLOR_XRGB16_1555 | OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBX16 | OMAP_DSS_COLOR_RGBX32, /* OMAP_DSS_VIDEO2 */ OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_ARGB16_1555 | OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_NV12 | OMAP_DSS_COLOR_RGBA16 | OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_UYVY | OMAP_DSS_COLOR_ARGB16 | OMAP_DSS_COLOR_XRGB16_1555 | OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBX16 | OMAP_DSS_COLOR_RGBX32, }; static const char * const omap2_dss_clk_source_names[] = { [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "N/A", [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "N/A", [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK1", }; static const char * const omap3_dss_clk_source_names[] = { [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI1_PLL_FCLK", [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI2_PLL_FCLK", [OMAP_DSS_CLK_SRC_FCK] = "DSS1_ALWON_FCLK", }; static const char * const omap4_dss_clk_source_names[] = { [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "PLL1_CLK1", [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "PLL1_CLK2", [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK", [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "PLL2_CLK1", [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "PLL2_CLK2", }; static const struct dss_param_range omap2_dss_param_range[] = { [FEAT_PARAM_DSS_FCK] = { 0, 173000000 }, [FEAT_PARAM_DSIPLL_REGN] = { 0, 0 }, [FEAT_PARAM_DSIPLL_REGM] = { 0, 0 }, [FEAT_PARAM_DSIPLL_REGM_DISPC] = { 0, 0 }, [FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, 0 }, [FEAT_PARAM_DSIPLL_FINT] = { 0, 0 }, [FEAT_PARAM_DSIPLL_LPDIV] = { 0, 0 }, }; static const struct dss_param_range omap3_dss_param_range[] = { [FEAT_PARAM_DSS_FCK] = { 0, 173000000 }, [FEAT_PARAM_DSIPLL_REGN] = { 0, (1 << 7) - 1 }, [FEAT_PARAM_DSIPLL_REGM] = { 0, (1 << 11) - 1 }, [FEAT_PARAM_DSIPLL_REGM_DISPC] = { 0, (1 << 4) - 1 }, [FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, (1 << 4) - 1 }, [FEAT_PARAM_DSIPLL_FINT] = { 750000, 2100000 }, [FEAT_PARAM_DSIPLL_LPDIV] = { 1, (1 << 13) - 1}, }; static const struct dss_param_range omap4_dss_param_range[] = { [FEAT_PARAM_DSS_FCK] = { 0, 186000000 }, [FEAT_PARAM_DSIPLL_REGN] = { 0, (1 << 8) - 1 }, [FEAT_PARAM_DSIPLL_REGM] = { 0, (1 << 12) - 1 }, [FEAT_PARAM_DSIPLL_REGM_DISPC] = { 0, (1 << 5) - 1 }, [FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, (1 << 5) - 1 }, [FEAT_PARAM_DSIPLL_FINT] = { 500000, 2500000 }, [FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 }, }; /* OMAP2 DSS Features */ static const struct omap_dss_features omap2_dss_features = { .reg_fields = omap2_dss_reg_fields, .num_reg_fields = ARRAY_SIZE(omap2_dss_reg_fields), .has_feature = FEAT_LCDENABLEPOL | FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE | FEAT_FUNCGATED | FEAT_ROWREPEATENABLE | FEAT_RESIZECONF, .num_mgrs = 2, .num_ovls = 3, .supported_displays = omap2_dss_supported_displays, .supported_color_modes = omap2_dss_supported_color_modes, .clksrc_names = omap2_dss_clk_source_names, .dss_params = omap2_dss_param_range, }; /* OMAP3 DSS Features */ static const struct omap_dss_features omap3430_dss_features = { .reg_fields = omap3_dss_reg_fields, .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields), .has_feature = FEAT_GLOBAL_ALPHA | FEAT_LCDENABLEPOL | FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE | FEAT_FUNCGATED | FEAT_ROWREPEATENABLE | FEAT_LINEBUFFERSPLIT | FEAT_RESIZECONF | FEAT_DSI_PLL_FREQSEL | FEAT_DSI_REVERSE_TXCLKESC, .num_mgrs = 2, .num_ovls = 3, .supported_displays = omap3430_dss_supported_displays, .supported_color_modes = omap3_dss_supported_color_modes, .clksrc_names = omap3_dss_clk_source_names, .dss_params = omap3_dss_param_range, }; static const struct omap_dss_features omap3630_dss_features = { .reg_fields = omap3_dss_reg_fields, .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields), .has_feature = FEAT_GLOBAL_ALPHA | FEAT_LCDENABLEPOL | FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE | FEAT_PRE_MULT_ALPHA | FEAT_FUNCGATED | FEAT_ROWREPEATENABLE | FEAT_LINEBUFFERSPLIT | FEAT_RESIZECONF | FEAT_DSI_PLL_PWR_BUG | FEAT_DSI_PLL_FREQSEL, .num_mgrs = 2, .num_ovls = 3, .supported_displays = omap3630_dss_supported_displays, .supported_color_modes = omap3_dss_supported_color_modes, .clksrc_names = omap3_dss_clk_source_names, .dss_params = omap3_dss_param_range, }; /* OMAP4 DSS Features */ /* For OMAP4430 ES 1.0 revision */ static const struct omap_dss_features omap4430_es1_0_dss_features = { .reg_fields = omap4_dss_reg_fields, .num_reg_fields = ARRAY_SIZE(omap4_dss_reg_fields), .has_feature = FEAT_GLOBAL_ALPHA | FEAT_PRE_MULT_ALPHA | FEAT_MGR_LCD2 | FEAT_GLOBAL_ALPHA_VID1 | FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC | FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH | FEAT_DSI_GNQ | FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2, .num_mgrs = 3, .num_ovls = 3, .supported_displays = omap4_dss_supported_displays, .supported_color_modes = omap4_dss_supported_color_modes, .clksrc_names = omap4_dss_clk_source_names, .dss_params = omap4_dss_param_range, }; /* For all the other OMAP4 versions */ static const struct omap_dss_features omap4_dss_features = { .reg_fields = omap4_dss_reg_fields, .num_reg_fields = ARRAY_SIZE(omap4_dss_reg_fields), .has_feature = FEAT_GLOBAL_ALPHA | FEAT_PRE_MULT_ALPHA | FEAT_MGR_LCD2 | FEAT_GLOBAL_ALPHA_VID1 | FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC | FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH | FEAT_DSI_GNQ | FEAT_HDMI_CTS_SWMODE | FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2, .num_mgrs = 3, .num_ovls = 3, .supported_displays = omap4_dss_supported_displays, .supported_color_modes = omap4_dss_supported_color_modes, .clksrc_names = omap4_dss_clk_source_names, .dss_params = omap4_dss_param_range, }; /* Functions returning values related to a DSS feature */ int dss_feat_get_num_mgrs(void) { return omap_current_dss_features->num_mgrs; } int dss_feat_get_num_ovls(void) { return omap_current_dss_features->num_ovls; } unsigned long dss_feat_get_param_min(enum dss_range_param param) { return omap_current_dss_features->dss_params[param].min; } unsigned long dss_feat_get_param_max(enum dss_range_param param) { return omap_current_dss_features->dss_params[param].max; } enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel) { return omap_current_dss_features->supported_displays[channel]; } enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane) { return omap_current_dss_features->supported_color_modes[plane]; } bool dss_feat_color_mode_supported(enum omap_plane plane, enum omap_color_mode color_mode) { return omap_current_dss_features->supported_color_modes[plane] & color_mode; } const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id) { return omap_current_dss_features->clksrc_names[id]; } /* DSS has_feature check */ bool dss_has_feature(enum dss_feat_id id) { return omap_current_dss_features->has_feature & id; } void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end) { if (id >= omap_current_dss_features->num_reg_fields) BUG(); *start = omap_current_dss_features->reg_fields[id].start; *end = omap_current_dss_features->reg_fields[id].end; } void dss_features_init(void) { if (cpu_is_omap24xx()) omap_current_dss_features = &omap2_dss_features; else if (cpu_is_omap3630()) omap_current_dss_features = &omap3630_dss_features; else if (cpu_is_omap34xx()) omap_current_dss_features = &omap3430_dss_features; else if (omap_rev() == OMAP4430_REV_ES1_0) omap_current_dss_features = &omap4430_es1_0_dss_features; else if (cpu_is_omap44xx()) omap_current_dss_features = &omap4_dss_features; else DSSWARN("Unsupported OMAP version"); }
gpl-2.0
spacex/kernel-centos7
arch/arm/mach-exynos/setup-spi.c
2345
1212
/* linux/arch/arm/mach-exynos4/setup-spi.c * * Copyright (C) 2011 Samsung Electronics Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gpio.h> #include <plat/gpio-cfg.h> #ifdef CONFIG_S3C64XX_DEV_SPI0 int s3c64xx_spi0_cfg_gpio(void) { s3c_gpio_cfgpin(EXYNOS4_GPB(0), S3C_GPIO_SFN(2)); s3c_gpio_setpull(EXYNOS4_GPB(0), S3C_GPIO_PULL_UP); s3c_gpio_cfgall_range(EXYNOS4_GPB(2), 2, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); return 0; } #endif #ifdef CONFIG_S3C64XX_DEV_SPI1 int s3c64xx_spi1_cfg_gpio(void) { s3c_gpio_cfgpin(EXYNOS4_GPB(4), S3C_GPIO_SFN(2)); s3c_gpio_setpull(EXYNOS4_GPB(4), S3C_GPIO_PULL_UP); s3c_gpio_cfgall_range(EXYNOS4_GPB(6), 2, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); return 0; } #endif #ifdef CONFIG_S3C64XX_DEV_SPI2 int s3c64xx_spi2_cfg_gpio(void) { s3c_gpio_cfgpin(EXYNOS4_GPC1(1), S3C_GPIO_SFN(5)); s3c_gpio_setpull(EXYNOS4_GPC1(1), S3C_GPIO_PULL_UP); s3c_gpio_cfgall_range(EXYNOS4_GPC1(3), 2, S3C_GPIO_SFN(5), S3C_GPIO_PULL_UP); return 0; } #endif
gpl-2.0
qqzwc/JBX_Kernel
drivers/platform/x86/asus-wmi.c
2345
41040
/* * Asus PC WMI hotkey driver * * Copyright(C) 2010 Intel Corporation. * Copyright(C) 2010-2011 Corentin Chary <corentin.chary@gmail.com> * * Portions based on wistron_btns.c: * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz> * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org> * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> #include <linux/fb.h> #include <linux/backlight.h> #include <linux/leds.h> #include <linux/rfkill.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/platform_device.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #include "asus-wmi.h" MODULE_AUTHOR("Corentin Chary <corentincj@iksaif.net>, " "Yong Wang <yong.y.wang@intel.com>"); MODULE_DESCRIPTION("Asus Generic WMI Driver"); MODULE_LICENSE("GPL"); #define to_platform_driver(drv) \ (container_of((drv), struct platform_driver, driver)) #define to_asus_wmi_driver(pdrv) \ (container_of((pdrv), struct asus_wmi_driver, platform_driver)) #define ASUS_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66" #define NOTIFY_BRNUP_MIN 0x11 #define NOTIFY_BRNUP_MAX 0x1f #define NOTIFY_BRNDOWN_MIN 0x20 #define NOTIFY_BRNDOWN_MAX 0x2e /* WMI Methods */ #define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */ #define ASUS_WMI_METHODID_SFBD 0x44424653 /* Set First Boot Device */ #define ASUS_WMI_METHODID_GLCD 0x44434C47 /* Get LCD status */ #define ASUS_WMI_METHODID_GPID 0x44495047 /* Get Panel ID?? (Resol) */ #define ASUS_WMI_METHODID_QMOD 0x444F4D51 /* Quiet MODe */ #define ASUS_WMI_METHODID_SPLV 0x4C425053 /* Set Panel Light Value */ #define ASUS_WMI_METHODID_SFUN 0x4E554653 /* FUNCtionalities */ #define ASUS_WMI_METHODID_SDSP 0x50534453 /* Set DiSPlay output */ #define ASUS_WMI_METHODID_GDSP 0x50534447 /* Get DiSPlay output */ #define ASUS_WMI_METHODID_DEVP 0x50564544 /* DEVice Policy */ #define ASUS_WMI_METHODID_OSVR 0x5256534F /* OS VeRsion */ #define ASUS_WMI_METHODID_DSTS 0x53544344 /* Device STatuS */ #define ASUS_WMI_METHODID_DSTS2 0x53545344 /* Device STatuS #2*/ #define ASUS_WMI_METHODID_BSTS 0x53545342 /* Bios STatuS ? */ #define ASUS_WMI_METHODID_DEVS 0x53564544 /* DEVice Set */ #define ASUS_WMI_METHODID_CFVS 0x53564643 /* CPU Frequency Volt Set */ #define ASUS_WMI_METHODID_KBFT 0x5446424B /* KeyBoard FilTer */ #define ASUS_WMI_METHODID_INIT 0x54494E49 /* INITialize */ #define ASUS_WMI_METHODID_HKEY 0x59454B48 /* Hot KEY ?? */ #define ASUS_WMI_UNSUPPORTED_METHOD 0xFFFFFFFE /* Wireless */ #define ASUS_WMI_DEVID_HW_SWITCH 0x00010001 #define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002 #define ASUS_WMI_DEVID_WLAN 0x00010011 #define ASUS_WMI_DEVID_BLUETOOTH 0x00010013 #define ASUS_WMI_DEVID_GPS 0x00010015 #define ASUS_WMI_DEVID_WIMAX 0x00010017 #define ASUS_WMI_DEVID_WWAN3G 0x00010019 #define ASUS_WMI_DEVID_UWB 0x00010021 /* Leds */ /* 0x000200XX and 0x000400XX */ /* Backlight and Brightness */ #define ASUS_WMI_DEVID_BACKLIGHT 0x00050011 #define ASUS_WMI_DEVID_BRIGHTNESS 0x00050012 #define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021 #define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */ /* Misc */ #define ASUS_WMI_DEVID_CAMERA 0x00060013 /* Storage */ #define ASUS_WMI_DEVID_CARDREADER 0x00080013 /* Input */ #define ASUS_WMI_DEVID_TOUCHPAD 0x00100011 #define ASUS_WMI_DEVID_TOUCHPAD_LED 0x00100012 /* Fan, Thermal */ #define ASUS_WMI_DEVID_THERMAL_CTRL 0x00110011 #define ASUS_WMI_DEVID_FAN_CTRL 0x00110012 /* Power */ #define ASUS_WMI_DEVID_PROCESSOR_STATE 0x00120012 /* DSTS masks */ #define ASUS_WMI_DSTS_STATUS_BIT 0x00000001 #define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002 #define ASUS_WMI_DSTS_PRESENCE_BIT 0x00010000 #define ASUS_WMI_DSTS_USER_BIT 0x00020000 #define ASUS_WMI_DSTS_BIOS_BIT 0x00040000 #define ASUS_WMI_DSTS_BRIGHTNESS_MASK 0x000000FF #define ASUS_WMI_DSTS_MAX_BRIGTH_MASK 0x0000FF00 struct bios_args { u32 arg0; u32 arg1; } __packed; /* * <platform>/ - debugfs root directory * dev_id - current dev_id * ctrl_param - current ctrl_param * method_id - current method_id * devs - call DEVS(dev_id, ctrl_param) and print result * dsts - call DSTS(dev_id) and print result * call - call method_id(dev_id, ctrl_param) and print result */ struct asus_wmi_debug { struct dentry *root; u32 method_id; u32 dev_id; u32 ctrl_param; }; struct asus_rfkill { struct asus_wmi *asus; struct rfkill *rfkill; u32 dev_id; }; struct asus_wmi { int dsts_id; int spec; int sfun; struct input_dev *inputdev; struct backlight_device *backlight_device; struct device *hwmon_device; struct platform_device *platform_device; struct led_classdev tpd_led; int tpd_led_wk; struct workqueue_struct *led_workqueue; struct work_struct tpd_led_work; struct asus_rfkill wlan; struct asus_rfkill bluetooth; struct asus_rfkill wimax; struct asus_rfkill wwan3g; struct hotplug_slot *hotplug_slot; struct mutex hotplug_lock; struct mutex wmi_lock; struct workqueue_struct *hotplug_workqueue; struct work_struct hotplug_work; struct asus_wmi_debug debug; struct asus_wmi_driver *driver; }; static int asus_wmi_input_init(struct asus_wmi *asus) { int err; asus->inputdev = input_allocate_device(); if (!asus->inputdev) return -ENOMEM; asus->inputdev->name = asus->driver->input_name; asus->inputdev->phys = asus->driver->input_phys; asus->inputdev->id.bustype = BUS_HOST; asus->inputdev->dev.parent = &asus->platform_device->dev; err = sparse_keymap_setup(asus->inputdev, asus->driver->keymap, NULL); if (err) goto err_free_dev; err = input_register_device(asus->inputdev); if (err) goto err_free_keymap; return 0; err_free_keymap: sparse_keymap_free(asus->inputdev); err_free_dev: input_free_device(asus->inputdev); return err; } static void asus_wmi_input_exit(struct asus_wmi *asus) { if (asus->inputdev) { sparse_keymap_free(asus->inputdev); input_unregister_device(asus->inputdev); } asus->inputdev = NULL; } static int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval) { struct bios_args args = { .arg0 = arg0, .arg1 = arg1, }; struct acpi_buffer input = { (acpi_size) sizeof(args), &args }; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; acpi_status status; union acpi_object *obj; u32 tmp; status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, 1, method_id, &input, &output); if (ACPI_FAILURE(status)) goto exit; obj = (union acpi_object *)output.pointer; if (obj && obj->type == ACPI_TYPE_INTEGER) tmp = (u32) obj->integer.value; else tmp = 0; if (retval) *retval = tmp; kfree(obj); exit: if (ACPI_FAILURE(status)) return -EIO; if (tmp == ASUS_WMI_UNSUPPORTED_METHOD) return -ENODEV; return 0; } static int asus_wmi_get_devstate(struct asus_wmi *asus, u32 dev_id, u32 *retval) { return asus_wmi_evaluate_method(asus->dsts_id, dev_id, 0, retval); } static int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param, u32 *retval) { return asus_wmi_evaluate_method(ASUS_WMI_METHODID_DEVS, dev_id, ctrl_param, retval); } /* Helper for special devices with magic return codes */ static int asus_wmi_get_devstate_bits(struct asus_wmi *asus, u32 dev_id, u32 mask) { u32 retval = 0; int err; err = asus_wmi_get_devstate(asus, dev_id, &retval); if (err < 0) return err; if (!(retval & ASUS_WMI_DSTS_PRESENCE_BIT)) return -ENODEV; if (mask == ASUS_WMI_DSTS_STATUS_BIT) { if (retval & ASUS_WMI_DSTS_UNKNOWN_BIT) return -ENODEV; } return retval & mask; } static int asus_wmi_get_devstate_simple(struct asus_wmi *asus, u32 dev_id) { return asus_wmi_get_devstate_bits(asus, dev_id, ASUS_WMI_DSTS_STATUS_BIT); } /* * LEDs */ /* * These functions actually update the LED's, and are called from a * workqueue. By doing this as separate work rather than when the LED * subsystem asks, we avoid messing with the Asus ACPI stuff during a * potentially bad time, such as a timer interrupt. */ static void tpd_led_update(struct work_struct *work) { int ctrl_param; struct asus_wmi *asus; asus = container_of(work, struct asus_wmi, tpd_led_work); ctrl_param = asus->tpd_led_wk; asus_wmi_set_devstate(ASUS_WMI_DEVID_TOUCHPAD_LED, ctrl_param, NULL); } static void tpd_led_set(struct led_classdev *led_cdev, enum led_brightness value) { struct asus_wmi *asus; asus = container_of(led_cdev, struct asus_wmi, tpd_led); asus->tpd_led_wk = !!value; queue_work(asus->led_workqueue, &asus->tpd_led_work); } static int read_tpd_led_state(struct asus_wmi *asus) { return asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_TOUCHPAD_LED); } static enum led_brightness tpd_led_get(struct led_classdev *led_cdev) { struct asus_wmi *asus; asus = container_of(led_cdev, struct asus_wmi, tpd_led); return read_tpd_led_state(asus); } static int asus_wmi_led_init(struct asus_wmi *asus) { int rv; if (read_tpd_led_state(asus) < 0) return 0; asus->led_workqueue = create_singlethread_workqueue("led_workqueue"); if (!asus->led_workqueue) return -ENOMEM; INIT_WORK(&asus->tpd_led_work, tpd_led_update); asus->tpd_led.name = "asus::touchpad"; asus->tpd_led.brightness_set = tpd_led_set; asus->tpd_led.brightness_get = tpd_led_get; asus->tpd_led.max_brightness = 1; rv = led_classdev_register(&asus->platform_device->dev, &asus->tpd_led); if (rv) { destroy_workqueue(asus->led_workqueue); return rv; } return 0; } static void asus_wmi_led_exit(struct asus_wmi *asus) { if (asus->tpd_led.dev) led_classdev_unregister(&asus->tpd_led); if (asus->led_workqueue) destroy_workqueue(asus->led_workqueue); } /* * PCI hotplug (for wlan rfkill) */ static bool asus_wlan_rfkill_blocked(struct asus_wmi *asus) { int result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WLAN); if (result < 0) return false; return !result; } static void asus_rfkill_hotplug(struct asus_wmi *asus) { struct pci_dev *dev; struct pci_bus *bus; bool blocked; bool absent; u32 l; mutex_lock(&asus->wmi_lock); blocked = asus_wlan_rfkill_blocked(asus); mutex_unlock(&asus->wmi_lock); mutex_lock(&asus->hotplug_lock); if (asus->wlan.rfkill) rfkill_set_sw_state(asus->wlan.rfkill, blocked); if (asus->hotplug_slot) { bus = pci_find_bus(0, 1); if (!bus) { pr_warn("Unable to find PCI bus 1?\n"); goto out_unlock; } if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) { pr_err("Unable to read PCI config space?\n"); goto out_unlock; } absent = (l == 0xffffffff); if (blocked != absent) { pr_warn("BIOS says wireless lan is %s, " "but the pci device is %s\n", blocked ? "blocked" : "unblocked", absent ? "absent" : "present"); pr_warn("skipped wireless hotplug as probably " "inappropriate for this model\n"); goto out_unlock; } if (!blocked) { dev = pci_get_slot(bus, 0); if (dev) { /* Device already present */ pci_dev_put(dev); goto out_unlock; } dev = pci_scan_single_device(bus, 0); if (dev) { pci_bus_assign_resources(bus); if (pci_bus_add_device(dev)) pr_err("Unable to hotplug wifi\n"); } } else { dev = pci_get_slot(bus, 0); if (dev) { pci_remove_bus_device(dev); pci_dev_put(dev); } } } out_unlock: mutex_unlock(&asus->hotplug_lock); } static void asus_rfkill_notify(acpi_handle handle, u32 event, void *data) { struct asus_wmi *asus = data; if (event != ACPI_NOTIFY_BUS_CHECK) return; /* * We can't call directly asus_rfkill_hotplug because most * of the time WMBC is still being executed and not reetrant. * There is currently no way to tell ACPICA that we want this * method to be serialized, we schedule a asus_rfkill_hotplug * call later, in a safer context. */ queue_work(asus->hotplug_workqueue, &asus->hotplug_work); } static int asus_register_rfkill_notifier(struct asus_wmi *asus, char *node) { acpi_status status; acpi_handle handle; status = acpi_get_handle(NULL, node, &handle); if (ACPI_SUCCESS(status)) { status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, asus_rfkill_notify, asus); if (ACPI_FAILURE(status)) pr_warn("Failed to register notify on %s\n", node); } else return -ENODEV; return 0; } static void asus_unregister_rfkill_notifier(struct asus_wmi *asus, char *node) { acpi_status status = AE_OK; acpi_handle handle; status = acpi_get_handle(NULL, node, &handle); if (ACPI_SUCCESS(status)) { status = acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY, asus_rfkill_notify); if (ACPI_FAILURE(status)) pr_err("Error removing rfkill notify handler %s\n", node); } } static int asus_get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct asus_wmi *asus = hotplug_slot->private; int result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WLAN); if (result < 0) return result; *value = !!result; return 0; } static void asus_cleanup_pci_hotplug(struct hotplug_slot *hotplug_slot) { kfree(hotplug_slot->info); kfree(hotplug_slot); } static struct hotplug_slot_ops asus_hotplug_slot_ops = { .owner = THIS_MODULE, .get_adapter_status = asus_get_adapter_status, .get_power_status = asus_get_adapter_status, }; static void asus_hotplug_work(struct work_struct *work) { struct asus_wmi *asus; asus = container_of(work, struct asus_wmi, hotplug_work); asus_rfkill_hotplug(asus); } static int asus_setup_pci_hotplug(struct asus_wmi *asus) { int ret = -ENOMEM; struct pci_bus *bus = pci_find_bus(0, 1); if (!bus) { pr_err("Unable to find wifi PCI bus\n"); return -ENODEV; } asus->hotplug_workqueue = create_singlethread_workqueue("hotplug_workqueue"); if (!asus->hotplug_workqueue) goto error_workqueue; INIT_WORK(&asus->hotplug_work, asus_hotplug_work); asus->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL); if (!asus->hotplug_slot) goto error_slot; asus->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL); if (!asus->hotplug_slot->info) goto error_info; asus->hotplug_slot->private = asus; asus->hotplug_slot->release = &asus_cleanup_pci_hotplug; asus->hotplug_slot->ops = &asus_hotplug_slot_ops; asus_get_adapter_status(asus->hotplug_slot, &asus->hotplug_slot->info->adapter_status); ret = pci_hp_register(asus->hotplug_slot, bus, 0, "asus-wifi"); if (ret) { pr_err("Unable to register hotplug slot - %d\n", ret); goto error_register; } return 0; error_register: kfree(asus->hotplug_slot->info); error_info: kfree(asus->hotplug_slot); asus->hotplug_slot = NULL; error_slot: destroy_workqueue(asus->hotplug_workqueue); error_workqueue: return ret; } /* * Rfkill devices */ static int asus_rfkill_set(void *data, bool blocked) { struct asus_rfkill *priv = data; u32 ctrl_param = !blocked; return asus_wmi_set_devstate(priv->dev_id, ctrl_param, NULL); } static void asus_rfkill_query(struct rfkill *rfkill, void *data) { struct asus_rfkill *priv = data; int result; result = asus_wmi_get_devstate_simple(priv->asus, priv->dev_id); if (result < 0) return; rfkill_set_sw_state(priv->rfkill, !result); } static int asus_rfkill_wlan_set(void *data, bool blocked) { struct asus_rfkill *priv = data; struct asus_wmi *asus = priv->asus; int ret; /* * This handler is enabled only if hotplug is enabled. * In this case, the asus_wmi_set_devstate() will * trigger a wmi notification and we need to wait * this call to finish before being able to call * any wmi method */ mutex_lock(&asus->wmi_lock); ret = asus_rfkill_set(data, blocked); mutex_unlock(&asus->wmi_lock); return ret; } static const struct rfkill_ops asus_rfkill_wlan_ops = { .set_block = asus_rfkill_wlan_set, .query = asus_rfkill_query, }; static const struct rfkill_ops asus_rfkill_ops = { .set_block = asus_rfkill_set, .query = asus_rfkill_query, }; static int asus_new_rfkill(struct asus_wmi *asus, struct asus_rfkill *arfkill, const char *name, enum rfkill_type type, int dev_id) { int result = asus_wmi_get_devstate_simple(asus, dev_id); struct rfkill **rfkill = &arfkill->rfkill; if (result < 0) return result; arfkill->dev_id = dev_id; arfkill->asus = asus; if (dev_id == ASUS_WMI_DEVID_WLAN && asus->driver->hotplug_wireless) *rfkill = rfkill_alloc(name, &asus->platform_device->dev, type, &asus_rfkill_wlan_ops, arfkill); else *rfkill = rfkill_alloc(name, &asus->platform_device->dev, type, &asus_rfkill_ops, arfkill); if (!*rfkill) return -EINVAL; rfkill_init_sw_state(*rfkill, !result); result = rfkill_register(*rfkill); if (result) { rfkill_destroy(*rfkill); *rfkill = NULL; return result; } return 0; } static void asus_wmi_rfkill_exit(struct asus_wmi *asus) { asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P5"); asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P6"); asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P7"); if (asus->wlan.rfkill) { rfkill_unregister(asus->wlan.rfkill); rfkill_destroy(asus->wlan.rfkill); asus->wlan.rfkill = NULL; } /* * Refresh pci hotplug in case the rfkill state was changed after * asus_unregister_rfkill_notifier() */ asus_rfkill_hotplug(asus); if (asus->hotplug_slot) pci_hp_deregister(asus->hotplug_slot); if (asus->hotplug_workqueue) destroy_workqueue(asus->hotplug_workqueue); if (asus->bluetooth.rfkill) { rfkill_unregister(asus->bluetooth.rfkill); rfkill_destroy(asus->bluetooth.rfkill); asus->bluetooth.rfkill = NULL; } if (asus->wimax.rfkill) { rfkill_unregister(asus->wimax.rfkill); rfkill_destroy(asus->wimax.rfkill); asus->wimax.rfkill = NULL; } if (asus->wwan3g.rfkill) { rfkill_unregister(asus->wwan3g.rfkill); rfkill_destroy(asus->wwan3g.rfkill); asus->wwan3g.rfkill = NULL; } } static int asus_wmi_rfkill_init(struct asus_wmi *asus) { int result = 0; mutex_init(&asus->hotplug_lock); mutex_init(&asus->wmi_lock); result = asus_new_rfkill(asus, &asus->wlan, "asus-wlan", RFKILL_TYPE_WLAN, ASUS_WMI_DEVID_WLAN); if (result && result != -ENODEV) goto exit; result = asus_new_rfkill(asus, &asus->bluetooth, "asus-bluetooth", RFKILL_TYPE_BLUETOOTH, ASUS_WMI_DEVID_BLUETOOTH); if (result && result != -ENODEV) goto exit; result = asus_new_rfkill(asus, &asus->wimax, "asus-wimax", RFKILL_TYPE_WIMAX, ASUS_WMI_DEVID_WIMAX); if (result && result != -ENODEV) goto exit; result = asus_new_rfkill(asus, &asus->wwan3g, "asus-wwan3g", RFKILL_TYPE_WWAN, ASUS_WMI_DEVID_WWAN3G); if (result && result != -ENODEV) goto exit; if (!asus->driver->hotplug_wireless) goto exit; result = asus_setup_pci_hotplug(asus); /* * If we get -EBUSY then something else is handling the PCI hotplug - * don't fail in this case */ if (result == -EBUSY) result = 0; asus_register_rfkill_notifier(asus, "\\_SB.PCI0.P0P5"); asus_register_rfkill_notifier(asus, "\\_SB.PCI0.P0P6"); asus_register_rfkill_notifier(asus, "\\_SB.PCI0.P0P7"); /* * Refresh pci hotplug in case the rfkill state was changed during * setup. */ asus_rfkill_hotplug(asus); exit: if (result && result != -ENODEV) asus_wmi_rfkill_exit(asus); if (result == -ENODEV) result = 0; return result; } /* * Hwmon device */ static ssize_t asus_hwmon_pwm1(struct device *dev, struct device_attribute *attr, char *buf) { struct asus_wmi *asus = dev_get_drvdata(dev); u32 value; int err; err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_FAN_CTRL, &value); if (err < 0) return err; value &= 0xFF; if (value == 1) /* Low Speed */ value = 85; else if (value == 2) value = 170; else if (value == 3) value = 255; else if (value != 0) { pr_err("Unknown fan speed %#x", value); value = -1; } return sprintf(buf, "%d\n", value); } static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO, asus_hwmon_pwm1, NULL, 0); static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "asus\n"); } static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0); static struct attribute *hwmon_attributes[] = { &sensor_dev_attr_pwm1.dev_attr.attr, &sensor_dev_attr_name.dev_attr.attr, NULL }; static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj, struct attribute *attr, int idx) { struct device *dev = container_of(kobj, struct device, kobj); struct platform_device *pdev = to_platform_device(dev->parent); struct asus_wmi *asus = platform_get_drvdata(pdev); bool ok = true; int dev_id = -1; u32 value = ASUS_WMI_UNSUPPORTED_METHOD; if (attr == &sensor_dev_attr_pwm1.dev_attr.attr) dev_id = ASUS_WMI_DEVID_FAN_CTRL; if (dev_id != -1) { int err = asus_wmi_get_devstate(asus, dev_id, &value); if (err < 0) return err; } if (dev_id == ASUS_WMI_DEVID_FAN_CTRL) { /* * We need to find a better way, probably using sfun, * bits or spec ... * Currently we disable it if: * - ASUS_WMI_UNSUPPORTED_METHOD is returned * - reverved bits are non-zero * - sfun and presence bit are not set */ if (value == ASUS_WMI_UNSUPPORTED_METHOD || value & 0xFFF80000 || (!asus->sfun && !(value & ASUS_WMI_DSTS_PRESENCE_BIT))) ok = false; } return ok ? attr->mode : 0; } static struct attribute_group hwmon_attribute_group = { .is_visible = asus_hwmon_sysfs_is_visible, .attrs = hwmon_attributes }; static void asus_wmi_hwmon_exit(struct asus_wmi *asus) { struct device *hwmon; hwmon = asus->hwmon_device; if (!hwmon) return; sysfs_remove_group(&hwmon->kobj, &hwmon_attribute_group); hwmon_device_unregister(hwmon); asus->hwmon_device = NULL; } static int asus_wmi_hwmon_init(struct asus_wmi *asus) { struct device *hwmon; int result; hwmon = hwmon_device_register(&asus->platform_device->dev); if (IS_ERR(hwmon)) { pr_err("Could not register asus hwmon device\n"); return PTR_ERR(hwmon); } dev_set_drvdata(hwmon, asus); asus->hwmon_device = hwmon; result = sysfs_create_group(&hwmon->kobj, &hwmon_attribute_group); if (result) asus_wmi_hwmon_exit(asus); return result; } /* * Backlight */ static int read_backlight_power(struct asus_wmi *asus) { int ret = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_BACKLIGHT); if (ret < 0) return ret; return ret ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN; } static int read_brightness_max(struct asus_wmi *asus) { u32 retval; int err; err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_BRIGHTNESS, &retval); if (err < 0) return err; retval = retval & ASUS_WMI_DSTS_MAX_BRIGTH_MASK; retval >>= 8; if (!retval) return -ENODEV; return retval; } static int read_brightness(struct backlight_device *bd) { struct asus_wmi *asus = bl_get_data(bd); u32 retval; int err; err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_BRIGHTNESS, &retval); if (err < 0) return err; return retval & ASUS_WMI_DSTS_BRIGHTNESS_MASK; } static int update_bl_status(struct backlight_device *bd) { struct asus_wmi *asus = bl_get_data(bd); u32 ctrl_param; int power, err; ctrl_param = bd->props.brightness; err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BRIGHTNESS, ctrl_param, NULL); if (err < 0) return err; power = read_backlight_power(asus); if (power != -ENODEV && bd->props.power != power) { ctrl_param = !!(bd->props.power == FB_BLANK_UNBLANK); err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, ctrl_param, NULL); } return err; } static const struct backlight_ops asus_wmi_bl_ops = { .get_brightness = read_brightness, .update_status = update_bl_status, }; static int asus_wmi_backlight_notify(struct asus_wmi *asus, int code) { struct backlight_device *bd = asus->backlight_device; int old = bd->props.brightness; int new = old; if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) new = code - NOTIFY_BRNUP_MIN + 1; else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX) new = code - NOTIFY_BRNDOWN_MIN; bd->props.brightness = new; backlight_update_status(bd); backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY); return old; } static int asus_wmi_backlight_init(struct asus_wmi *asus) { struct backlight_device *bd; struct backlight_properties props; int max; int power; max = read_brightness_max(asus); if (max == -ENODEV) max = 0; else if (max < 0) return max; power = read_backlight_power(asus); if (power == -ENODEV) power = FB_BLANK_UNBLANK; else if (power < 0) return power; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_PLATFORM; props.max_brightness = max; bd = backlight_device_register(asus->driver->name, &asus->platform_device->dev, asus, &asus_wmi_bl_ops, &props); if (IS_ERR(bd)) { pr_err("Could not register backlight device\n"); return PTR_ERR(bd); } asus->backlight_device = bd; bd->props.brightness = read_brightness(bd); bd->props.power = power; backlight_update_status(bd); return 0; } static void asus_wmi_backlight_exit(struct asus_wmi *asus) { if (asus->backlight_device) backlight_device_unregister(asus->backlight_device); asus->backlight_device = NULL; } static void asus_wmi_notify(u32 value, void *context) { struct asus_wmi *asus = context; struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; acpi_status status; int code; int orig_code; status = wmi_get_event_data(value, &response); if (status != AE_OK) { pr_err("bad event status 0x%x\n", status); return; } obj = (union acpi_object *)response.pointer; if (!obj || obj->type != ACPI_TYPE_INTEGER) goto exit; code = obj->integer.value; orig_code = code; if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) code = NOTIFY_BRNUP_MIN; else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX) code = NOTIFY_BRNDOWN_MIN; if (code == NOTIFY_BRNUP_MIN || code == NOTIFY_BRNDOWN_MIN) { if (!acpi_video_backlight_support()) asus_wmi_backlight_notify(asus, orig_code); } else if (!sparse_keymap_report_event(asus->inputdev, code, 1, true)) pr_info("Unknown key %x pressed\n", code); exit: kfree(obj); } /* * Sys helpers */ static int parse_arg(const char *buf, unsigned long count, int *val) { if (!count) return 0; if (sscanf(buf, "%i", val) != 1) return -EINVAL; return count; } static ssize_t store_sys_wmi(struct asus_wmi *asus, int devid, const char *buf, size_t count) { u32 retval; int rv, err, value; value = asus_wmi_get_devstate_simple(asus, devid); if (value == -ENODEV) /* Check device presence */ return value; rv = parse_arg(buf, count, &value); err = asus_wmi_set_devstate(devid, value, &retval); if (err < 0) return err; return rv; } static ssize_t show_sys_wmi(struct asus_wmi *asus, int devid, char *buf) { int value = asus_wmi_get_devstate_simple(asus, devid); if (value < 0) return value; return sprintf(buf, "%d\n", value); } #define ASUS_WMI_CREATE_DEVICE_ATTR(_name, _mode, _cm) \ static ssize_t show_##_name(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct asus_wmi *asus = dev_get_drvdata(dev); \ \ return show_sys_wmi(asus, _cm, buf); \ } \ static ssize_t store_##_name(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct asus_wmi *asus = dev_get_drvdata(dev); \ \ return store_sys_wmi(asus, _cm, buf, count); \ } \ static struct device_attribute dev_attr_##_name = { \ .attr = { \ .name = __stringify(_name), \ .mode = _mode }, \ .show = show_##_name, \ .store = store_##_name, \ } ASUS_WMI_CREATE_DEVICE_ATTR(touchpad, 0644, ASUS_WMI_DEVID_TOUCHPAD); ASUS_WMI_CREATE_DEVICE_ATTR(camera, 0644, ASUS_WMI_DEVID_CAMERA); ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER); static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int value, rv; if (!count || sscanf(buf, "%i", &value) != 1) return -EINVAL; if (value < 0 || value > 2) return -EINVAL; rv = asus_wmi_evaluate_method(ASUS_WMI_METHODID_CFVS, value, 0, NULL); if (rv < 0) return rv; return count; } static DEVICE_ATTR(cpufv, S_IRUGO | S_IWUSR, NULL, store_cpufv); static struct attribute *platform_attributes[] = { &dev_attr_cpufv.attr, &dev_attr_camera.attr, &dev_attr_cardr.attr, &dev_attr_touchpad.attr, NULL }; static mode_t asus_sysfs_is_visible(struct kobject *kobj, struct attribute *attr, int idx) { struct device *dev = container_of(kobj, struct device, kobj); struct platform_device *pdev = to_platform_device(dev); struct asus_wmi *asus = platform_get_drvdata(pdev); bool ok = true; int devid = -1; if (attr == &dev_attr_camera.attr) devid = ASUS_WMI_DEVID_CAMERA; else if (attr == &dev_attr_cardr.attr) devid = ASUS_WMI_DEVID_CARDREADER; else if (attr == &dev_attr_touchpad.attr) devid = ASUS_WMI_DEVID_TOUCHPAD; if (devid != -1) ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0); return ok ? attr->mode : 0; } static struct attribute_group platform_attribute_group = { .is_visible = asus_sysfs_is_visible, .attrs = platform_attributes }; static void asus_wmi_sysfs_exit(struct platform_device *device) { sysfs_remove_group(&device->dev.kobj, &platform_attribute_group); } static int asus_wmi_sysfs_init(struct platform_device *device) { return sysfs_create_group(&device->dev.kobj, &platform_attribute_group); } /* * Platform device */ static int asus_wmi_platform_init(struct asus_wmi *asus) { int rv; /* INIT enable hotkeys on some models */ if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_INIT, 0, 0, &rv)) pr_info("Initialization: %#x", rv); /* We don't know yet what to do with this version... */ if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_SPEC, 0, 0x9, &rv)) { pr_info("BIOS WMI version: %d.%d", rv >> 8, rv & 0xFF); asus->spec = rv; } /* * The SFUN method probably allows the original driver to get the list * of features supported by a given model. For now, 0x0100 or 0x0800 * bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card. * The significance of others is yet to be found. */ if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_SFUN, 0, 0, &rv)) { pr_info("SFUN value: %#x", rv); asus->sfun = rv; } /* * Eee PC and Notebooks seems to have different method_id for DSTS, * but it may also be related to the BIOS's SPEC. * Note, on most Eeepc, there is no way to check if a method exist * or note, while on notebooks, they returns 0xFFFFFFFE on failure, * but once again, SPEC may probably be used for that kind of things. */ if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS, 0, 0, NULL)) asus->dsts_id = ASUS_WMI_METHODID_DSTS; else if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS2, 0, 0, NULL)) asus->dsts_id = ASUS_WMI_METHODID_DSTS2; if (!asus->dsts_id) { pr_err("Can't find DSTS"); return -ENODEV; } return asus_wmi_sysfs_init(asus->platform_device); } static void asus_wmi_platform_exit(struct asus_wmi *asus) { asus_wmi_sysfs_exit(asus->platform_device); } /* * debugfs */ struct asus_wmi_debugfs_node { struct asus_wmi *asus; char *name; int (*show) (struct seq_file *m, void *data); }; static int show_dsts(struct seq_file *m, void *data) { struct asus_wmi *asus = m->private; int err; u32 retval = -1; err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval); if (err < 0) return err; seq_printf(m, "DSTS(%#x) = %#x\n", asus->debug.dev_id, retval); return 0; } static int show_devs(struct seq_file *m, void *data) { struct asus_wmi *asus = m->private; int err; u32 retval = -1; err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param, &retval); if (err < 0) return err; seq_printf(m, "DEVS(%#x, %#x) = %#x\n", asus->debug.dev_id, asus->debug.ctrl_param, retval); return 0; } static int show_call(struct seq_file *m, void *data) { struct asus_wmi *asus = m->private; struct bios_args args = { .arg0 = asus->debug.dev_id, .arg1 = asus->debug.ctrl_param, }; struct acpi_buffer input = { (acpi_size) sizeof(args), &args }; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; acpi_status status; status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, 1, asus->debug.method_id, &input, &output); if (ACPI_FAILURE(status)) return -EIO; obj = (union acpi_object *)output.pointer; if (obj && obj->type == ACPI_TYPE_INTEGER) seq_printf(m, "%#x(%#x, %#x) = %#x\n", asus->debug.method_id, asus->debug.dev_id, asus->debug.ctrl_param, (u32) obj->integer.value); else seq_printf(m, "%#x(%#x, %#x) = t:%d\n", asus->debug.method_id, asus->debug.dev_id, asus->debug.ctrl_param, obj ? obj->type : -1); kfree(obj); return 0; } static struct asus_wmi_debugfs_node asus_wmi_debug_files[] = { {NULL, "devs", show_devs}, {NULL, "dsts", show_dsts}, {NULL, "call", show_call}, }; static int asus_wmi_debugfs_open(struct inode *inode, struct file *file) { struct asus_wmi_debugfs_node *node = inode->i_private; return single_open(file, node->show, node->asus); } static const struct file_operations asus_wmi_debugfs_io_ops = { .owner = THIS_MODULE, .open = asus_wmi_debugfs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void asus_wmi_debugfs_exit(struct asus_wmi *asus) { debugfs_remove_recursive(asus->debug.root); } static int asus_wmi_debugfs_init(struct asus_wmi *asus) { struct dentry *dent; int i; asus->debug.root = debugfs_create_dir(asus->driver->name, NULL); if (!asus->debug.root) { pr_err("failed to create debugfs directory"); goto error_debugfs; } dent = debugfs_create_x32("method_id", S_IRUGO | S_IWUSR, asus->debug.root, &asus->debug.method_id); if (!dent) goto error_debugfs; dent = debugfs_create_x32("dev_id", S_IRUGO | S_IWUSR, asus->debug.root, &asus->debug.dev_id); if (!dent) goto error_debugfs; dent = debugfs_create_x32("ctrl_param", S_IRUGO | S_IWUSR, asus->debug.root, &asus->debug.ctrl_param); if (!dent) goto error_debugfs; for (i = 0; i < ARRAY_SIZE(asus_wmi_debug_files); i++) { struct asus_wmi_debugfs_node *node = &asus_wmi_debug_files[i]; node->asus = asus; dent = debugfs_create_file(node->name, S_IFREG | S_IRUGO, asus->debug.root, node, &asus_wmi_debugfs_io_ops); if (!dent) { pr_err("failed to create debug file: %s\n", node->name); goto error_debugfs; } } return 0; error_debugfs: asus_wmi_debugfs_exit(asus); return -ENOMEM; } /* * WMI Driver */ static int asus_wmi_add(struct platform_device *pdev) { struct platform_driver *pdrv = to_platform_driver(pdev->dev.driver); struct asus_wmi_driver *wdrv = to_asus_wmi_driver(pdrv); struct asus_wmi *asus; acpi_status status; int err; asus = kzalloc(sizeof(struct asus_wmi), GFP_KERNEL); if (!asus) return -ENOMEM; asus->driver = wdrv; asus->platform_device = pdev; wdrv->platform_device = pdev; platform_set_drvdata(asus->platform_device, asus); if (wdrv->quirks) wdrv->quirks(asus->driver); err = asus_wmi_platform_init(asus); if (err) goto fail_platform; err = asus_wmi_input_init(asus); if (err) goto fail_input; err = asus_wmi_hwmon_init(asus); if (err) goto fail_hwmon; err = asus_wmi_led_init(asus); if (err) goto fail_leds; err = asus_wmi_rfkill_init(asus); if (err) goto fail_rfkill; if (!acpi_video_backlight_support()) { err = asus_wmi_backlight_init(asus); if (err && err != -ENODEV) goto fail_backlight; } else pr_info("Backlight controlled by ACPI video driver\n"); status = wmi_install_notify_handler(asus->driver->event_guid, asus_wmi_notify, asus); if (ACPI_FAILURE(status)) { pr_err("Unable to register notify handler - %d\n", status); err = -ENODEV; goto fail_wmi_handler; } err = asus_wmi_debugfs_init(asus); if (err) goto fail_debugfs; return 0; fail_debugfs: wmi_remove_notify_handler(asus->driver->event_guid); fail_wmi_handler: asus_wmi_backlight_exit(asus); fail_backlight: asus_wmi_rfkill_exit(asus); fail_rfkill: asus_wmi_led_exit(asus); fail_leds: asus_wmi_hwmon_exit(asus); fail_hwmon: asus_wmi_input_exit(asus); fail_input: asus_wmi_platform_exit(asus); fail_platform: kfree(asus); return err; } static int asus_wmi_remove(struct platform_device *device) { struct asus_wmi *asus; asus = platform_get_drvdata(device); wmi_remove_notify_handler(asus->driver->event_guid); asus_wmi_backlight_exit(asus); asus_wmi_input_exit(asus); asus_wmi_hwmon_exit(asus); asus_wmi_led_exit(asus); asus_wmi_rfkill_exit(asus); asus_wmi_debugfs_exit(asus); asus_wmi_platform_exit(asus); kfree(asus); return 0; } /* * Platform driver - hibernate/resume callbacks */ static int asus_hotk_thaw(struct device *device) { struct asus_wmi *asus = dev_get_drvdata(device); if (asus->wlan.rfkill) { bool wlan; /* * Work around bios bug - acpi _PTS turns off the wireless led * during suspend. Normally it restores it on resume, but * we should kick it ourselves in case hibernation is aborted. */ wlan = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WLAN); asus_wmi_set_devstate(ASUS_WMI_DEVID_WLAN, wlan, NULL); } return 0; } static int asus_hotk_restore(struct device *device) { struct asus_wmi *asus = dev_get_drvdata(device); int bl; /* Refresh both wlan rfkill state and pci hotplug */ if (asus->wlan.rfkill) asus_rfkill_hotplug(asus); if (asus->bluetooth.rfkill) { bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_BLUETOOTH); rfkill_set_sw_state(asus->bluetooth.rfkill, bl); } if (asus->wimax.rfkill) { bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WIMAX); rfkill_set_sw_state(asus->wimax.rfkill, bl); } if (asus->wwan3g.rfkill) { bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WWAN3G); rfkill_set_sw_state(asus->wwan3g.rfkill, bl); } return 0; } static const struct dev_pm_ops asus_pm_ops = { .thaw = asus_hotk_thaw, .restore = asus_hotk_restore, }; static int asus_wmi_probe(struct platform_device *pdev) { struct platform_driver *pdrv = to_platform_driver(pdev->dev.driver); struct asus_wmi_driver *wdrv = to_asus_wmi_driver(pdrv); int ret; if (!wmi_has_guid(ASUS_WMI_MGMT_GUID)) { pr_warn("Management GUID not found\n"); return -ENODEV; } if (wdrv->event_guid && !wmi_has_guid(wdrv->event_guid)) { pr_warn("Event GUID not found\n"); return -ENODEV; } if (wdrv->probe) { ret = wdrv->probe(pdev); if (ret) return ret; } return asus_wmi_add(pdev); } static bool used; int asus_wmi_register_driver(struct asus_wmi_driver *driver) { struct platform_driver *platform_driver; struct platform_device *platform_device; if (used) return -EBUSY; platform_driver = &driver->platform_driver; platform_driver->remove = asus_wmi_remove; platform_driver->driver.owner = driver->owner; platform_driver->driver.name = driver->name; platform_driver->driver.pm = &asus_pm_ops; platform_device = platform_create_bundle(platform_driver, asus_wmi_probe, NULL, 0, NULL, 0); if (IS_ERR(platform_device)) return PTR_ERR(platform_device); used = true; return 0; } EXPORT_SYMBOL_GPL(asus_wmi_register_driver); void asus_wmi_unregister_driver(struct asus_wmi_driver *driver) { platform_device_unregister(driver->platform_device); platform_driver_unregister(&driver->platform_driver); used = false; } EXPORT_SYMBOL_GPL(asus_wmi_unregister_driver); static int __init asus_wmi_init(void) { if (!wmi_has_guid(ASUS_WMI_MGMT_GUID)) { pr_info("Asus Management GUID not found"); return -ENODEV; } pr_info("ASUS WMI generic driver loaded"); return 0; } static void __exit asus_wmi_exit(void) { pr_info("ASUS WMI generic driver unloaded"); } module_init(asus_wmi_init); module_exit(asus_wmi_exit);
gpl-2.0
voodik/android_kernel_hardkernel_odroidxu3
arch/arm/mach-exynos/setup-spi.c
2345
1212
/* linux/arch/arm/mach-exynos4/setup-spi.c * * Copyright (C) 2011 Samsung Electronics Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gpio.h> #include <plat/gpio-cfg.h> #ifdef CONFIG_S3C64XX_DEV_SPI0 int s3c64xx_spi0_cfg_gpio(void) { s3c_gpio_cfgpin(EXYNOS4_GPB(0), S3C_GPIO_SFN(2)); s3c_gpio_setpull(EXYNOS4_GPB(0), S3C_GPIO_PULL_UP); s3c_gpio_cfgall_range(EXYNOS4_GPB(2), 2, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); return 0; } #endif #ifdef CONFIG_S3C64XX_DEV_SPI1 int s3c64xx_spi1_cfg_gpio(void) { s3c_gpio_cfgpin(EXYNOS4_GPB(4), S3C_GPIO_SFN(2)); s3c_gpio_setpull(EXYNOS4_GPB(4), S3C_GPIO_PULL_UP); s3c_gpio_cfgall_range(EXYNOS4_GPB(6), 2, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); return 0; } #endif #ifdef CONFIG_S3C64XX_DEV_SPI2 int s3c64xx_spi2_cfg_gpio(void) { s3c_gpio_cfgpin(EXYNOS4_GPC1(1), S3C_GPIO_SFN(5)); s3c_gpio_setpull(EXYNOS4_GPC1(1), S3C_GPIO_PULL_UP); s3c_gpio_cfgall_range(EXYNOS4_GPC1(3), 2, S3C_GPIO_SFN(5), S3C_GPIO_PULL_UP); return 0; } #endif
gpl-2.0
VentureROM-L/android_kernel_lge_hammerhead
drivers/input/misc/dm355evm_keys.c
4905
8110
/* * dm355evm_keys.c - support buttons and IR remote on DM355 EVM board * * Copyright (c) 2008 by David Brownell * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/i2c/dm355evm_msp.h> #include <linux/module.h> /* * The MSP430 firmware on the DM355 EVM monitors on-board pushbuttons * and an IR receptor used for the remote control. When any key is * pressed, or its autorepeat kicks in, an event is sent. This driver * read those events from the small (32 event) queue and reports them. * * Note that physically there can only be one of these devices. * * This driver was tested with firmware revision A4. */ struct dm355evm_keys { struct input_dev *input; struct device *dev; int irq; }; /* These initial keycodes can be remapped */ static const struct key_entry dm355evm_keys[] = { /* * Pushbuttons on the EVM board ... note that the labels for these * are SW10/SW11/etc on the PC board. The left/right orientation * comes only from the firmware's documentation, and presumes the * power connector is immediately in front of you and the IR sensor * is to the right. (That is, rotate the board counter-clockwise * by 90 degrees from the SW10/etc and "DM355 EVM" labels.) */ { KE_KEY, 0x00d8, { KEY_OK } }, /* SW12 */ { KE_KEY, 0x00b8, { KEY_UP } }, /* SW13 */ { KE_KEY, 0x00e8, { KEY_DOWN } }, /* SW11 */ { KE_KEY, 0x0078, { KEY_LEFT } }, /* SW14 */ { KE_KEY, 0x00f0, { KEY_RIGHT } }, /* SW10 */ /* * IR buttons ... codes assigned to match the universal remote * provided with the EVM (Philips PM4S) using DVD code 0020. * * These event codes match firmware documentation, but other * remote controls could easily send more RC5-encoded events. * The PM4S manual was used in several cases to help select * a keycode reflecting the intended usage. * * RC5 codes are 14 bits, with two start bits (0x3 prefix) * and a toggle bit (masked out below). */ { KE_KEY, 0x300c, { KEY_POWER } }, /* NOTE: docs omit this */ { KE_KEY, 0x3000, { KEY_NUMERIC_0 } }, { KE_KEY, 0x3001, { KEY_NUMERIC_1 } }, { KE_KEY, 0x3002, { KEY_NUMERIC_2 } }, { KE_KEY, 0x3003, { KEY_NUMERIC_3 } }, { KE_KEY, 0x3004, { KEY_NUMERIC_4 } }, { KE_KEY, 0x3005, { KEY_NUMERIC_5 } }, { KE_KEY, 0x3006, { KEY_NUMERIC_6 } }, { KE_KEY, 0x3007, { KEY_NUMERIC_7 } }, { KE_KEY, 0x3008, { KEY_NUMERIC_8 } }, { KE_KEY, 0x3009, { KEY_NUMERIC_9 } }, { KE_KEY, 0x3022, { KEY_ENTER } }, { KE_KEY, 0x30ec, { KEY_MODE } }, /* "tv/vcr/..." */ { KE_KEY, 0x300f, { KEY_SELECT } }, /* "info" */ { KE_KEY, 0x3020, { KEY_CHANNELUP } }, /* "up" */ { KE_KEY, 0x302e, { KEY_MENU } }, /* "in/out" */ { KE_KEY, 0x3011, { KEY_VOLUMEDOWN } }, /* "left" */ { KE_KEY, 0x300d, { KEY_MUTE } }, /* "ok" */ { KE_KEY, 0x3010, { KEY_VOLUMEUP } }, /* "right" */ { KE_KEY, 0x301e, { KEY_SUBTITLE } }, /* "cc" */ { KE_KEY, 0x3021, { KEY_CHANNELDOWN } },/* "down" */ { KE_KEY, 0x3022, { KEY_PREVIOUS } }, { KE_KEY, 0x3026, { KEY_SLEEP } }, { KE_KEY, 0x3172, { KEY_REWIND } }, /* NOTE: docs wrongly say 0x30ca */ { KE_KEY, 0x3175, { KEY_PLAY } }, { KE_KEY, 0x3174, { KEY_FASTFORWARD } }, { KE_KEY, 0x3177, { KEY_RECORD } }, { KE_KEY, 0x3176, { KEY_STOP } }, { KE_KEY, 0x3169, { KEY_PAUSE } }, }; /* * Because we communicate with the MSP430 using I2C, and all I2C calls * in Linux sleep, we use a threaded IRQ handler. The IRQ itself is * active low, but we go through the GPIO controller so we can trigger * on falling edges and not worry about enabling/disabling the IRQ in * the keypress handling path. */ static irqreturn_t dm355evm_keys_irq(int irq, void *_keys) { static u16 last_event; struct dm355evm_keys *keys = _keys; const struct key_entry *ke; unsigned int keycode; int status; u16 event; /* For simplicity we ignore INPUT_COUNT and just read * events until we get the "queue empty" indicator. * Reading INPUT_LOW decrements the count. */ for (;;) { status = dm355evm_msp_read(DM355EVM_MSP_INPUT_HIGH); if (status < 0) { dev_dbg(keys->dev, "input high err %d\n", status); break; } event = status << 8; status = dm355evm_msp_read(DM355EVM_MSP_INPUT_LOW); if (status < 0) { dev_dbg(keys->dev, "input low err %d\n", status); break; } event |= status; if (event == 0xdead) break; /* Press and release a button: two events, same code. * Press and hold (autorepeat), then release: N events * (N > 2), same code. For RC5 buttons the toggle bits * distinguish (for example) "1-autorepeat" from "1 1"; * but PCB buttons don't support that bit. * * So we must synthesize release events. We do that by * mapping events to a press/release event pair; then * to avoid adding extra events, skip the second event * of each pair. */ if (event == last_event) { last_event = 0; continue; } last_event = event; /* ignore the RC5 toggle bit */ event &= ~0x0800; /* find the key, or report it as unknown */ ke = sparse_keymap_entry_from_scancode(keys->input, event); keycode = ke ? ke->keycode : KEY_UNKNOWN; dev_dbg(keys->dev, "input event 0x%04x--> keycode %d\n", event, keycode); /* report press + release */ input_report_key(keys->input, keycode, 1); input_sync(keys->input); input_report_key(keys->input, keycode, 0); input_sync(keys->input); } return IRQ_HANDLED; } /*----------------------------------------------------------------------*/ static int __devinit dm355evm_keys_probe(struct platform_device *pdev) { struct dm355evm_keys *keys; struct input_dev *input; int status; /* allocate instance struct and input dev */ keys = kzalloc(sizeof *keys, GFP_KERNEL); input = input_allocate_device(); if (!keys || !input) { status = -ENOMEM; goto fail1; } keys->dev = &pdev->dev; keys->input = input; /* set up "threaded IRQ handler" */ status = platform_get_irq(pdev, 0); if (status < 0) goto fail1; keys->irq = status; input_set_drvdata(input, keys); input->name = "DM355 EVM Controls"; input->phys = "dm355evm/input0"; input->dev.parent = &pdev->dev; input->id.bustype = BUS_I2C; input->id.product = 0x0355; input->id.version = dm355evm_msp_read(DM355EVM_MSP_FIRMREV); status = sparse_keymap_setup(input, dm355evm_keys, NULL); if (status) goto fail1; /* REVISIT: flush the event queue? */ status = request_threaded_irq(keys->irq, NULL, dm355evm_keys_irq, IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), keys); if (status < 0) goto fail2; /* register */ status = input_register_device(input); if (status < 0) goto fail3; platform_set_drvdata(pdev, keys); return 0; fail3: free_irq(keys->irq, keys); fail2: sparse_keymap_free(input); fail1: input_free_device(input); kfree(keys); dev_err(&pdev->dev, "can't register, err %d\n", status); return status; } static int __devexit dm355evm_keys_remove(struct platform_device *pdev) { struct dm355evm_keys *keys = platform_get_drvdata(pdev); free_irq(keys->irq, keys); sparse_keymap_free(keys->input); input_unregister_device(keys->input); kfree(keys); return 0; } /* REVISIT: add suspend/resume when DaVinci supports it. The IRQ should * be able to wake up the system. When device_may_wakeup(&pdev->dev), call * enable_irq_wake() on suspend, and disable_irq_wake() on resume. */ /* * I2C is used to talk to the MSP430, but this platform device is * exposed by an MFD driver that manages I2C communications. */ static struct platform_driver dm355evm_keys_driver = { .probe = dm355evm_keys_probe, .remove = __devexit_p(dm355evm_keys_remove), .driver = { .owner = THIS_MODULE, .name = "dm355evm_keys", }, }; module_platform_driver(dm355evm_keys_driver); MODULE_LICENSE("GPL");
gpl-2.0
Orion116/kernel_samsung_lt03wifi_rebase
arch/powerpc/platforms/pseries/firmware.c
7721
2613
/* * pSeries firmware setup code. * * Portions from arch/powerpc/platforms/pseries/setup.c: * Copyright (C) 1995 Linus Torvalds * Adapted from 'alpha' version by Gary Thomas * Modified by Cort Dougan (cort@cs.nmt.edu) * Modified by PPC64 Team, IBM Corp * * Portions from arch/powerpc/kernel/firmware.c * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) * Modifications for ppc64: * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com> * Copyright (C) 2005 Stephen Rothwell, IBM Corporation * * Copyright 2006 IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <asm/firmware.h> #include <asm/prom.h> #include <asm/udbg.h> #include "pseries.h" typedef struct { unsigned long val; char * name; } firmware_feature_t; static __initdata firmware_feature_t firmware_features_table[FIRMWARE_MAX_FEATURES] = { {FW_FEATURE_PFT, "hcall-pft"}, {FW_FEATURE_TCE, "hcall-tce"}, {FW_FEATURE_SPRG0, "hcall-sprg0"}, {FW_FEATURE_DABR, "hcall-dabr"}, {FW_FEATURE_COPY, "hcall-copy"}, {FW_FEATURE_ASR, "hcall-asr"}, {FW_FEATURE_DEBUG, "hcall-debug"}, {FW_FEATURE_PERF, "hcall-perf"}, {FW_FEATURE_DUMP, "hcall-dump"}, {FW_FEATURE_INTERRUPT, "hcall-interrupt"}, {FW_FEATURE_MIGRATE, "hcall-migrate"}, {FW_FEATURE_PERFMON, "hcall-perfmon"}, {FW_FEATURE_CRQ, "hcall-crq"}, {FW_FEATURE_VIO, "hcall-vio"}, {FW_FEATURE_RDMA, "hcall-rdma"}, {FW_FEATURE_LLAN, "hcall-lLAN"}, {FW_FEATURE_BULK_REMOVE, "hcall-bulk"}, {FW_FEATURE_XDABR, "hcall-xdabr"}, {FW_FEATURE_MULTITCE, "hcall-multi-tce"}, {FW_FEATURE_SPLPAR, "hcall-splpar"}, {FW_FEATURE_VPHN, "hcall-vphn"}, }; /* Build up the firmware features bitmask using the contents of * device-tree/ibm,hypertas-functions. Ultimately this functionality may * be moved into prom.c prom_init(). */ void __init fw_feature_init(const char *hypertas, unsigned long len) { const char *s; int i; pr_debug(" -> fw_feature_init()\n"); for (s = hypertas; s < hypertas + len; s += strlen(s) + 1) { for (i = 0; i < FIRMWARE_MAX_FEATURES; i++) { /* check value against table of strings */ if (!firmware_features_table[i].name || strcmp(firmware_features_table[i].name, s)) continue; /* we have a match */ powerpc_firmware_features |= firmware_features_table[i].val; break; } } pr_debug(" <- fw_feature_init()\n"); }
gpl-2.0
DESHONOR/android_kernel_huawei_msm8916_Blefish
drivers/staging/rtl8712/ieee80211.c
7977
11601
/****************************************************************************** * ieee80211.c * * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved. * Linux device driver for RTL8192SU * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * Modifications for inclusion into the Linux staging tree are * Copyright(c) 2010 Larry Finger. All rights reserved. * * Contact information: * WLAN FAE <wlanfae@realtek.com>. * Larry Finger <Larry.Finger@lwfinger.net> * ******************************************************************************/ #define _IEEE80211_C #include "drv_types.h" #include "ieee80211.h" #include "wifi.h" #include "osdep_service.h" #include "wlan_bssdef.h" static const u8 WPA_OUI_TYPE[] = {0x00, 0x50, 0xf2, 1}; static const u8 WPA_CIPHER_SUITE_NONE[] = {0x00, 0x50, 0xf2, 0}; static const u8 WPA_CIPHER_SUITE_WEP40[] = {0x00, 0x50, 0xf2, 1}; static const u8 WPA_CIPHER_SUITE_TKIP[] = {0x00, 0x50, 0xf2, 2}; static const u8 WPA_CIPHER_SUITE_CCMP[] = {0x00, 0x50, 0xf2, 4}; static const u8 WPA_CIPHER_SUITE_WEP104[] = {0x00, 0x50, 0xf2, 5}; static const u8 RSN_CIPHER_SUITE_NONE[] = {0x00, 0x0f, 0xac, 0}; static const u8 RSN_CIPHER_SUITE_WEP40[] = {0x00, 0x0f, 0xac, 1}; static const u8 RSN_CIPHER_SUITE_TKIP[] = {0x00, 0x0f, 0xac, 2}; static const u8 RSN_CIPHER_SUITE_CCMP[] = {0x00, 0x0f, 0xac, 4}; static const u8 RSN_CIPHER_SUITE_WEP104[] = {0x00, 0x0f, 0xac, 5}; /*----------------------------------------------------------- * for adhoc-master to generate ie and provide supported-rate to fw *----------------------------------------------------------- */ static u8 WIFI_CCKRATES[] = { (IEEE80211_CCK_RATE_1MB | IEEE80211_BASIC_RATE_MASK), (IEEE80211_CCK_RATE_2MB | IEEE80211_BASIC_RATE_MASK), (IEEE80211_CCK_RATE_5MB | IEEE80211_BASIC_RATE_MASK), (IEEE80211_CCK_RATE_11MB | IEEE80211_BASIC_RATE_MASK) }; static u8 WIFI_OFDMRATES[] = { (IEEE80211_OFDM_RATE_6MB), (IEEE80211_OFDM_RATE_9MB), (IEEE80211_OFDM_RATE_12MB), (IEEE80211_OFDM_RATE_18MB), (IEEE80211_OFDM_RATE_24MB), (IEEE80211_OFDM_RATE_36MB), (IEEE80211_OFDM_RATE_48MB), (IEEE80211_OFDM_RATE_54MB) }; uint r8712_is_cckrates_included(u8 *rate) { u32 i = 0; while (rate[i] != 0) { if ((((rate[i]) & 0x7f) == 2) || (((rate[i]) & 0x7f) == 4) || (((rate[i]) & 0x7f) == 11) || (((rate[i]) & 0x7f) == 22)) return true; i++; } return false; } uint r8712_is_cckratesonly_included(u8 *rate) { u32 i = 0; while (rate[i] != 0) { if ((((rate[i]) & 0x7f) != 2) && (((rate[i]) & 0x7f) != 4) && (((rate[i]) & 0x7f) != 11) && (((rate[i]) & 0x7f) != 22)) return false; i++; } return true; } /* r8712_set_ie will update frame length */ u8 *r8712_set_ie(u8 *pbuf, sint index, uint len, u8 *source, uint *frlen) { *pbuf = (u8)index; *(pbuf + 1) = (u8)len; if (len > 0) memcpy((void *)(pbuf + 2), (void *)source, len); *frlen = *frlen + (len + 2); return pbuf + len + 2; } /*---------------------------------------------------------------------------- index: the information element id index, limit is the limit for search -----------------------------------------------------------------------------*/ u8 *r8712_get_ie(u8 *pbuf, sint index, sint *len, sint limit) { sint tmp, i; u8 *p; if (limit < 1) return NULL; p = pbuf; i = 0; *len = 0; while (1) { if (*p == index) { *len = *(p + 1); return p; } else { tmp = *(p + 1); p += (tmp + 2); i += (tmp + 2); } if (i >= limit) break; } return NULL; } static void set_supported_rate(u8 *SupportedRates, uint mode) { memset(SupportedRates, 0, NDIS_802_11_LENGTH_RATES_EX); switch (mode) { case WIRELESS_11B: memcpy(SupportedRates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN); break; case WIRELESS_11G: case WIRELESS_11A: memcpy(SupportedRates, WIFI_OFDMRATES, IEEE80211_NUM_OFDM_RATESLEN); break; case WIRELESS_11BG: memcpy(SupportedRates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN); memcpy(SupportedRates + IEEE80211_CCK_RATE_LEN, WIFI_OFDMRATES, IEEE80211_NUM_OFDM_RATESLEN); break; } } static uint r8712_get_rateset_len(u8 *rateset) { uint i = 0; while (1) { if ((rateset[i]) == 0) break; if (i > 12) break; i++; } return i; } int r8712_generate_ie(struct registry_priv *pregistrypriv) { int sz = 0, rateLen; struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network; u8 *ie = pdev_network->IEs; /*timestamp will be inserted by hardware*/ sz += 8; ie += sz; /*beacon interval : 2bytes*/ *(u16 *)ie = cpu_to_le16((u16)pdev_network->Configuration.BeaconPeriod); sz += 2; ie += 2; /*capability info*/ *(u16 *)ie = 0; *(u16 *)ie |= cpu_to_le16(cap_IBSS); if (pregistrypriv->preamble == PREAMBLE_SHORT) *(u16 *)ie |= cpu_to_le16(cap_ShortPremble); if (pdev_network->Privacy) *(u16 *)ie |= cpu_to_le16(cap_Privacy); sz += 2; ie += 2; /*SSID*/ ie = r8712_set_ie(ie, _SSID_IE_, pdev_network->Ssid.SsidLength, pdev_network->Ssid.Ssid, &sz); /*supported rates*/ set_supported_rate(pdev_network->SupportedRates, pregistrypriv->wireless_mode); rateLen = r8712_get_rateset_len(pdev_network->SupportedRates); if (rateLen > 8) { ie = r8712_set_ie(ie, _SUPPORTEDRATES_IE_, 8, pdev_network->SupportedRates, &sz); ie = r8712_set_ie(ie, _EXT_SUPPORTEDRATES_IE_, (rateLen - 8), (pdev_network->SupportedRates + 8), &sz); } else ie = r8712_set_ie(ie, _SUPPORTEDRATES_IE_, rateLen, pdev_network->SupportedRates, &sz); /*DS parameter set*/ ie = r8712_set_ie(ie, _DSSET_IE_, 1, (u8 *)&(pdev_network->Configuration.DSConfig), &sz); /*IBSS Parameter Set*/ ie = r8712_set_ie(ie, _IBSS_PARA_IE_, 2, (u8 *)&(pdev_network->Configuration.ATIMWindow), &sz); return sz; } unsigned char *r8712_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit) { int len; u16 val16; unsigned char wpa_oui_type[] = {0x00, 0x50, 0xf2, 0x01}; u8 *pbuf = pie; while (1) { pbuf = r8712_get_ie(pbuf, _WPA_IE_ID_, &len, limit); if (pbuf) { /*check if oui matches...*/ if (memcmp((pbuf + 2), wpa_oui_type, sizeof(wpa_oui_type))) goto check_next_ie; /*check version...*/ memcpy((u8 *)&val16, (pbuf + 6), sizeof(val16)); val16 = le16_to_cpu(val16); if (val16 != 0x0001) goto check_next_ie; *wpa_ie_len = *(pbuf + 1); return pbuf; } else { *wpa_ie_len = 0; return NULL; } check_next_ie: limit = limit - (pbuf - pie) - 2 - len; if (limit <= 0) break; pbuf += (2 + len); } *wpa_ie_len = 0; return NULL; } unsigned char *r8712_get_wpa2_ie(unsigned char *pie, int *rsn_ie_len, int limit) { return r8712_get_ie(pie, _WPA2_IE_ID_, rsn_ie_len, limit); } static int r8712_get_wpa_cipher_suite(u8 *s) { if (!memcmp(s, (void *)WPA_CIPHER_SUITE_NONE, WPA_SELECTOR_LEN)) return WPA_CIPHER_NONE; if (!memcmp(s, (void *)WPA_CIPHER_SUITE_WEP40, WPA_SELECTOR_LEN)) return WPA_CIPHER_WEP40; if (!memcmp(s, (void *)WPA_CIPHER_SUITE_TKIP, WPA_SELECTOR_LEN)) return WPA_CIPHER_TKIP; if (!memcmp(s, (void *)WPA_CIPHER_SUITE_CCMP, WPA_SELECTOR_LEN)) return WPA_CIPHER_CCMP; if (!memcmp(s, (void *)WPA_CIPHER_SUITE_WEP104, WPA_SELECTOR_LEN)) return WPA_CIPHER_WEP104; return 0; } static int r8712_get_wpa2_cipher_suite(u8 *s) { if (!memcmp(s, (void *)RSN_CIPHER_SUITE_NONE, RSN_SELECTOR_LEN)) return WPA_CIPHER_NONE; if (!memcmp(s, (void *)RSN_CIPHER_SUITE_WEP40, RSN_SELECTOR_LEN)) return WPA_CIPHER_WEP40; if (!memcmp(s, (void *)RSN_CIPHER_SUITE_TKIP, RSN_SELECTOR_LEN)) return WPA_CIPHER_TKIP; if (!memcmp(s, (void *)RSN_CIPHER_SUITE_CCMP, RSN_SELECTOR_LEN)) return WPA_CIPHER_CCMP; if (!memcmp(s, (void *)RSN_CIPHER_SUITE_WEP104, RSN_SELECTOR_LEN)) return WPA_CIPHER_WEP104; return 0; } int r8712_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwise_cipher) { int i, ret = _SUCCESS; int left, count; u8 *pos; if (wpa_ie_len <= 0) { /* No WPA IE - fail silently */ return _FAIL; } if ((*wpa_ie != _WPA_IE_ID_) || (*(wpa_ie + 1) != (u8)(wpa_ie_len - 2)) || (memcmp(wpa_ie + 2, (void *)WPA_OUI_TYPE, WPA_SELECTOR_LEN))) return _FAIL; pos = wpa_ie; pos += 8; left = wpa_ie_len - 8; /*group_cipher*/ if (left >= WPA_SELECTOR_LEN) { *group_cipher = r8712_get_wpa_cipher_suite(pos); pos += WPA_SELECTOR_LEN; left -= WPA_SELECTOR_LEN; } else if (left > 0) return _FAIL; /*pairwise_cipher*/ if (left >= 2) { count = le16_to_cpu(*(u16 *)pos); pos += 2; left -= 2; if (count == 0 || left < count * WPA_SELECTOR_LEN) return _FAIL; for (i = 0; i < count; i++) { *pairwise_cipher |= r8712_get_wpa_cipher_suite(pos); pos += WPA_SELECTOR_LEN; left -= WPA_SELECTOR_LEN; } } else if (left == 1) return _FAIL; return ret; } int r8712_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher, int *pairwise_cipher) { int i, ret = _SUCCESS; int left, count; u8 *pos; if (rsn_ie_len <= 0) { /* No RSN IE - fail silently */ return _FAIL; } if ((*rsn_ie != _WPA2_IE_ID_) || (*(rsn_ie+1) != (u8)(rsn_ie_len - 2))) return _FAIL; pos = rsn_ie; pos += 4; left = rsn_ie_len - 4; /*group_cipher*/ if (left >= RSN_SELECTOR_LEN) { *group_cipher = r8712_get_wpa2_cipher_suite(pos); pos += RSN_SELECTOR_LEN; left -= RSN_SELECTOR_LEN; } else if (left > 0) return _FAIL; /*pairwise_cipher*/ if (left >= 2) { count = le16_to_cpu(*(u16 *)pos); pos += 2; left -= 2; if (count == 0 || left < count * RSN_SELECTOR_LEN) return _FAIL; for (i = 0; i < count; i++) { *pairwise_cipher |= r8712_get_wpa2_cipher_suite(pos); pos += RSN_SELECTOR_LEN; left -= RSN_SELECTOR_LEN; } } else if (left == 1) return _FAIL; return ret; } int r8712_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie, u16 *wpa_len) { u8 authmode, sec_idx; u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01}; uint cnt; /*Search required WPA or WPA2 IE and copy to sec_ie[ ]*/ cnt = (_TIMESTAMP_ + _BEACON_ITERVAL_ + _CAPABILITY_); sec_idx = 0; while (cnt < in_len) { authmode = in_ie[cnt]; if ((authmode == _WPA_IE_ID_) && (!memcmp(&in_ie[cnt + 2], &wpa_oui[0], 4))) { memcpy(wpa_ie, &in_ie[cnt], in_ie[cnt + 1] + 2); *wpa_len = in_ie[cnt+1]+2; cnt += in_ie[cnt + 1] + 2; /*get next */ } else { if (authmode == _WPA2_IE_ID_) { memcpy(rsn_ie, &in_ie[cnt], in_ie[cnt + 1] + 2); *rsn_len = in_ie[cnt+1] + 2; cnt += in_ie[cnt+1] + 2; /*get next*/ } else cnt += in_ie[cnt+1] + 2; /*get next*/ } } return *rsn_len + *wpa_len; } int r8712_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen) { int match; uint cnt; u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04}; cnt = 12; match = false; while (cnt < in_len) { eid = in_ie[cnt]; if ((eid == _WPA_IE_ID_) && (!memcmp(&in_ie[cnt+2], wps_oui, 4))) { memcpy(wps_ie, &in_ie[cnt], in_ie[cnt+1]+2); *wps_ielen = in_ie[cnt+1]+2; cnt += in_ie[cnt+1]+2; match = true; break; } else cnt += in_ie[cnt+1]+2; /* goto next */ } return match; }
gpl-2.0
dianlujitao/android_kernel_huawei_c8813q
drivers/staging/rtl8192u/r819xU_phy.c
7977
59698
#include "r8192U.h" #include "r8192U_hw.h" #include "r819xU_phy.h" #include "r819xU_phyreg.h" #include "r8190_rtl8256.h" #include "r8192U_dm.h" #include "r819xU_firmware_img.h" #include "dot11d.h" static u32 RF_CHANNEL_TABLE_ZEBRA[] = { 0, 0x085c, //2412 1 0x08dc, //2417 2 0x095c, //2422 3 0x09dc, //2427 4 0x0a5c, //2432 5 0x0adc, //2437 6 0x0b5c, //2442 7 0x0bdc, //2447 8 0x0c5c, //2452 9 0x0cdc, //2457 10 0x0d5c, //2462 11 0x0ddc, //2467 12 0x0e5c, //2472 13 0x0f72, //2484 }; #define rtl819XPHY_REG_1T2RArray Rtl8192UsbPHY_REG_1T2RArray #define rtl819XMACPHY_Array_PG Rtl8192UsbMACPHY_Array_PG #define rtl819XMACPHY_Array Rtl8192UsbMACPHY_Array #define rtl819XRadioA_Array Rtl8192UsbRadioA_Array #define rtl819XRadioB_Array Rtl8192UsbRadioB_Array #define rtl819XRadioC_Array Rtl8192UsbRadioC_Array #define rtl819XRadioD_Array Rtl8192UsbRadioD_Array #define rtl819XAGCTAB_Array Rtl8192UsbAGCTAB_Array /****************************************************************************** *function: This function read BB parameters from Header file we gen, * and do register read/write * input: u32 dwBitMask //taget bit pos in the addr to be modified * output: none * return: u32 return the shift bit bit position of the mask * ****************************************************************************/ u32 rtl8192_CalculateBitShift(u32 dwBitMask) { u32 i; for (i=0; i<=31; i++) { if (((dwBitMask>>i)&0x1) == 1) break; } return i; } /****************************************************************************** *function: This function check different RF type to execute legal judgement. If RF Path is illegal, we will return false. * input: none * output: none * return: 0(illegal, false), 1(legal,true) * ***************************************************************************/ u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device* dev, u32 eRFPath) { u8 ret = 1; struct r8192_priv *priv = ieee80211_priv(dev); if (priv->rf_type == RF_2T4R) ret = 0; else if (priv->rf_type == RF_1T2R) { if (eRFPath == RF90_PATH_A || eRFPath == RF90_PATH_B) ret = 1; else if (eRFPath == RF90_PATH_C || eRFPath == RF90_PATH_D) ret = 0; } return ret; } /****************************************************************************** *function: This function set specific bits to BB register * input: net_device dev * u32 dwRegAddr //target addr to be modified * u32 dwBitMask //taget bit pos in the addr to be modified * u32 dwData //value to be write * output: none * return: none * notice: * ****************************************************************************/ void rtl8192_setBBreg(struct net_device* dev, u32 dwRegAddr, u32 dwBitMask, u32 dwData) { u32 OriginalValue, BitShift, NewValue; if(dwBitMask!= bMaskDWord) {//if not "double word" write OriginalValue = read_nic_dword(dev, dwRegAddr); BitShift = rtl8192_CalculateBitShift(dwBitMask); NewValue = (((OriginalValue) & (~dwBitMask)) | (dwData << BitShift)); write_nic_dword(dev, dwRegAddr, NewValue); }else write_nic_dword(dev, dwRegAddr, dwData); return; } /****************************************************************************** *function: This function reads specific bits from BB register * input: net_device dev * u32 dwRegAddr //target addr to be readback * u32 dwBitMask //taget bit pos in the addr to be readback * output: none * return: u32 Data //the readback register value * notice: * ****************************************************************************/ u32 rtl8192_QueryBBReg(struct net_device* dev, u32 dwRegAddr, u32 dwBitMask) { u32 Ret = 0, OriginalValue, BitShift; OriginalValue = read_nic_dword(dev, dwRegAddr); BitShift = rtl8192_CalculateBitShift(dwBitMask); Ret =(OriginalValue & dwBitMask) >> BitShift; return (Ret); } static u32 phy_FwRFSerialRead( struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 Offset ); static void phy_FwRFSerialWrite( struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 Offset, u32 Data); /****************************************************************************** *function: This function read register from RF chip * input: net_device dev * RF90_RADIO_PATH_E eRFPath //radio path of A/B/C/D * u32 Offset //target address to be read * output: none * return: u32 readback value * notice: There are three types of serial operations:(1) Software serial write.(2)Hardware LSSI-Low Speed Serial Interface.(3)Hardware HSSI-High speed serial write. Driver here need to implement (1) and (2)---need more spec for this information. * ****************************************************************************/ u32 rtl8192_phy_RFSerialRead(struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 Offset) { struct r8192_priv *priv = ieee80211_priv(dev); u32 ret = 0; u32 NewOffset = 0; BB_REGISTER_DEFINITION_T* pPhyReg = &priv->PHYRegDef[eRFPath]; rtl8192_setBBreg(dev, pPhyReg->rfLSSIReadBack, bLSSIReadBackData, 0); //make sure RF register offset is correct Offset &= 0x3f; //switch page for 8256 RF IC if (priv->rf_chip == RF_8256) { if (Offset >= 31) { priv->RfReg0Value[eRFPath] |= 0x140; //Switch to Reg_Mode2 for Reg 31-45 rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath]<<16) ); //modify offset NewOffset = Offset -30; } else if (Offset >= 16) { priv->RfReg0Value[eRFPath] |= 0x100; priv->RfReg0Value[eRFPath] &= (~0x40); //Switch to Reg_Mode 1 for Reg16-30 rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath]<<16) ); NewOffset = Offset - 15; } else NewOffset = Offset; } else { RT_TRACE((COMP_PHY|COMP_ERR), "check RF type here, need to be 8256\n"); NewOffset = Offset; } //put desired read addr to LSSI control Register rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadAddress, NewOffset); //Issue a posedge trigger // rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadEdge, 0x0); rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadEdge, 0x1); // TODO: we should not delay such a long time. Ask help from SD3 msleep(1); ret = rtl8192_QueryBBReg(dev, pPhyReg->rfLSSIReadBack, bLSSIReadBackData); // Switch back to Reg_Mode0; if(priv->rf_chip == RF_8256) { priv->RfReg0Value[eRFPath] &= 0xebf; rtl8192_setBBreg( dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath] << 16)); } return ret; } /****************************************************************************** *function: This function write data to RF register * input: net_device dev * RF90_RADIO_PATH_E eRFPath //radio path of A/B/C/D * u32 Offset //target address to be written * u32 Data //The new register data to be written * output: none * return: none * notice: For RF8256 only. =========================================================== *Reg Mode RegCTL[1] RegCTL[0] Note * (Reg00[12]) (Reg00[10]) *=========================================================== *Reg_Mode0 0 x Reg 0 ~15(0x0 ~ 0xf) *------------------------------------------------------------------ *Reg_Mode1 1 0 Reg 16 ~30(0x1 ~ 0xf) *------------------------------------------------------------------ * Reg_Mode2 1 1 Reg 31 ~ 45(0x1 ~ 0xf) *------------------------------------------------------------------ * ****************************************************************************/ void rtl8192_phy_RFSerialWrite(struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 Offset, u32 Data) { struct r8192_priv *priv = ieee80211_priv(dev); u32 DataAndAddr = 0, NewOffset = 0; BB_REGISTER_DEFINITION_T *pPhyReg = &priv->PHYRegDef[eRFPath]; Offset &= 0x3f; //spin_lock_irqsave(&priv->rf_lock, flags); // down(&priv->rf_sem); if (priv->rf_chip == RF_8256) { if (Offset >= 31) { priv->RfReg0Value[eRFPath] |= 0x140; rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath] << 16)); NewOffset = Offset - 30; } else if (Offset >= 16) { priv->RfReg0Value[eRFPath] |= 0x100; priv->RfReg0Value[eRFPath] &= (~0x40); rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath]<<16)); NewOffset = Offset - 15; } else NewOffset = Offset; } else { RT_TRACE((COMP_PHY|COMP_ERR), "check RF type here, need to be 8256\n"); NewOffset = Offset; } // Put write addr in [5:0] and write data in [31:16] DataAndAddr = (Data<<16) | (NewOffset&0x3f); // Write Operation rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr); if(Offset==0x0) priv->RfReg0Value[eRFPath] = Data; // Switch back to Reg_Mode0; if(priv->rf_chip == RF_8256) { if(Offset != 0) { priv->RfReg0Value[eRFPath] &= 0xebf; rtl8192_setBBreg( dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath] << 16)); } } //spin_unlock_irqrestore(&priv->rf_lock, flags); // up(&priv->rf_sem); return; } /****************************************************************************** *function: This function set specific bits to RF register * input: net_device dev * RF90_RADIO_PATH_E eRFPath //radio path of A/B/C/D * u32 RegAddr //target addr to be modified * u32 BitMask //taget bit pos in the addr to be modified * u32 Data //value to be write * output: none * return: none * notice: * ****************************************************************************/ void rtl8192_phy_SetRFReg(struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 RegAddr, u32 BitMask, u32 Data) { struct r8192_priv *priv = ieee80211_priv(dev); u32 Original_Value, BitShift, New_Value; // u8 time = 0; if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath)) return; if (priv->Rf_Mode == RF_OP_By_FW) { if (BitMask != bMask12Bits) // RF data is 12 bits only { Original_Value = phy_FwRFSerialRead(dev, eRFPath, RegAddr); BitShift = rtl8192_CalculateBitShift(BitMask); New_Value = ((Original_Value) & (~BitMask)) | (Data<< BitShift); phy_FwRFSerialWrite(dev, eRFPath, RegAddr, New_Value); }else phy_FwRFSerialWrite(dev, eRFPath, RegAddr, Data); udelay(200); } else { if (BitMask != bMask12Bits) // RF data is 12 bits only { Original_Value = rtl8192_phy_RFSerialRead(dev, eRFPath, RegAddr); BitShift = rtl8192_CalculateBitShift(BitMask); New_Value = (((Original_Value) & (~BitMask)) | (Data<< BitShift)); rtl8192_phy_RFSerialWrite(dev, eRFPath, RegAddr, New_Value); }else rtl8192_phy_RFSerialWrite(dev, eRFPath, RegAddr, Data); } return; } /****************************************************************************** *function: This function reads specific bits from RF register * input: net_device dev * u32 RegAddr //target addr to be readback * u32 BitMask //taget bit pos in the addr to be readback * output: none * return: u32 Data //the readback register value * notice: * ****************************************************************************/ u32 rtl8192_phy_QueryRFReg(struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 RegAddr, u32 BitMask) { u32 Original_Value, Readback_Value, BitShift; struct r8192_priv *priv = ieee80211_priv(dev); if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath)) return 0; if (priv->Rf_Mode == RF_OP_By_FW) { Original_Value = phy_FwRFSerialRead(dev, eRFPath, RegAddr); BitShift = rtl8192_CalculateBitShift(BitMask); Readback_Value = (Original_Value & BitMask) >> BitShift; udelay(200); return (Readback_Value); } else { Original_Value = rtl8192_phy_RFSerialRead(dev, eRFPath, RegAddr); BitShift = rtl8192_CalculateBitShift(BitMask); Readback_Value = (Original_Value & BitMask) >> BitShift; return (Readback_Value); } } /****************************************************************************** *function: We support firmware to execute RF-R/W. * input: dev * output: none * return: none * notice: * ***************************************************************************/ static u32 phy_FwRFSerialRead( struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 Offset ) { u32 retValue = 0; u32 Data = 0; u8 time = 0; //DbgPrint("FW RF CTRL\n\r"); /* 2007/11/02 MH Firmware RF Write control. By Francis' suggestion, we can not execute the scheme in the initial step. Otherwise, RF-R/W will waste much time. This is only for site survey. */ // 1. Read operation need not insert data. bit 0-11 //Data &= bMask12Bits; // 2. Write RF register address. Bit 12-19 Data |= ((Offset&0xFF)<<12); // 3. Write RF path. bit 20-21 Data |= ((eRFPath&0x3)<<20); // 4. Set RF read indicator. bit 22=0 //Data |= 0x00000; // 5. Trigger Fw to operate the command. bit 31 Data |= 0x80000000; // 6. We can not execute read operation if bit 31 is 1. while (read_nic_dword(dev, QPNR)&0x80000000) { // If FW can not finish RF-R/W for more than ?? times. We must reset FW. if (time++ < 100) { //DbgPrint("FW not finish RF-R Time=%d\n\r", time); udelay(10); } else break; } // 7. Execute read operation. write_nic_dword(dev, QPNR, Data); // 8. Check if firmawre send back RF content. while (read_nic_dword(dev, QPNR)&0x80000000) { // If FW can not finish RF-R/W for more than ?? times. We must reset FW. if (time++ < 100) { //DbgPrint("FW not finish RF-W Time=%d\n\r", time); udelay(10); } else return (0); } retValue = read_nic_dword(dev, RF_DATA); return (retValue); } /* phy_FwRFSerialRead */ /****************************************************************************** *function: We support firmware to execute RF-R/W. * input: dev * output: none * return: none * notice: * ***************************************************************************/ static void phy_FwRFSerialWrite( struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 Offset, u32 Data ) { u8 time = 0; //DbgPrint("N FW RF CTRL RF-%d OF%02x DATA=%03x\n\r", eRFPath, Offset, Data); /* 2007/11/02 MH Firmware RF Write control. By Francis' suggestion, we can not execute the scheme in the initial step. Otherwise, RF-R/W will waste much time. This is only for site survey. */ // 1. Set driver write bit and 12 bit data. bit 0-11 //Data &= bMask12Bits; // Done by uper layer. // 2. Write RF register address. bit 12-19 Data |= ((Offset&0xFF)<<12); // 3. Write RF path. bit 20-21 Data |= ((eRFPath&0x3)<<20); // 4. Set RF write indicator. bit 22=1 Data |= 0x400000; // 5. Trigger Fw to operate the command. bit 31=1 Data |= 0x80000000; // 6. Write operation. We can not write if bit 31 is 1. while (read_nic_dword(dev, QPNR)&0x80000000) { // If FW can not finish RF-R/W for more than ?? times. We must reset FW. if (time++ < 100) { //DbgPrint("FW not finish RF-W Time=%d\n\r", time); udelay(10); } else break; } // 7. No matter check bit. We always force the write. Because FW will // not accept the command. write_nic_dword(dev, QPNR, Data); /* 2007/11/02 MH Acoording to test, we must delay 20us to wait firmware to finish RF write operation. */ /* 2008/01/17 MH We support delay in firmware side now. */ //delay_us(20); } /* phy_FwRFSerialWrite */ /****************************************************************************** *function: This function read BB parameters from Header file we gen, * and do register read/write * input: dev * output: none * return: none * notice: BB parameters may change all the time, so please make * sure it has been synced with the newest. * ***************************************************************************/ void rtl8192_phy_configmac(struct net_device* dev) { u32 dwArrayLen = 0, i; u32* pdwArray = NULL; struct r8192_priv *priv = ieee80211_priv(dev); if(priv->btxpowerdata_readfromEEPORM) { RT_TRACE(COMP_PHY, "Rtl819XMACPHY_Array_PG\n"); dwArrayLen = MACPHY_Array_PGLength; pdwArray = rtl819XMACPHY_Array_PG; } else { RT_TRACE(COMP_PHY, "Rtl819XMACPHY_Array\n"); dwArrayLen = MACPHY_ArrayLength; pdwArray = rtl819XMACPHY_Array; } for(i = 0; i<dwArrayLen; i=i+3){ if(pdwArray[i] == 0x318) { pdwArray[i+2] = 0x00000800; //DbgPrint("ptrArray[i], ptrArray[i+1], ptrArray[i+2] = %x, %x, %x\n", // ptrArray[i], ptrArray[i+1], ptrArray[i+2]); } RT_TRACE(COMP_DBG, "The Rtl8190MACPHY_Array[0] is %x Rtl8190MACPHY_Array[1] is %x Rtl8190MACPHY_Array[2] is %x\n", pdwArray[i], pdwArray[i+1], pdwArray[i+2]); rtl8192_setBBreg(dev, pdwArray[i], pdwArray[i+1], pdwArray[i+2]); } return; } /****************************************************************************** *function: This function do dirty work * input: dev * output: none * return: none * notice: BB parameters may change all the time, so please make * sure it has been synced with the newest. * ***************************************************************************/ void rtl8192_phyConfigBB(struct net_device* dev, u8 ConfigType) { u32 i; #ifdef TO_DO_LIST u32 *rtl8192PhyRegArrayTable = NULL, *rtl8192AgcTabArrayTable = NULL; if(Adapter->bInHctTest) { PHY_REGArrayLen = PHY_REGArrayLengthDTM; AGCTAB_ArrayLen = AGCTAB_ArrayLengthDTM; Rtl8190PHY_REGArray_Table = Rtl819XPHY_REGArrayDTM; Rtl8190AGCTAB_Array_Table = Rtl819XAGCTAB_ArrayDTM; } #endif if (ConfigType == BaseBand_Config_PHY_REG) { for (i=0; i<PHY_REG_1T2RArrayLength; i+=2) { rtl8192_setBBreg(dev, rtl819XPHY_REG_1T2RArray[i], bMaskDWord, rtl819XPHY_REG_1T2RArray[i+1]); RT_TRACE(COMP_DBG, "i: %x, The Rtl819xUsbPHY_REGArray[0] is %x Rtl819xUsbPHY_REGArray[1] is %x \n",i, rtl819XPHY_REG_1T2RArray[i], rtl819XPHY_REG_1T2RArray[i+1]); } } else if (ConfigType == BaseBand_Config_AGC_TAB) { for (i=0; i<AGCTAB_ArrayLength; i+=2) { rtl8192_setBBreg(dev, rtl819XAGCTAB_Array[i], bMaskDWord, rtl819XAGCTAB_Array[i+1]); RT_TRACE(COMP_DBG, "i:%x, The rtl819XAGCTAB_Array[0] is %x rtl819XAGCTAB_Array[1] is %x \n",i, rtl819XAGCTAB_Array[i], rtl819XAGCTAB_Array[i+1]); } } return; } /****************************************************************************** *function: This function initialize Register definition offset for Radio Path * A/B/C/D * input: net_device dev * output: none * return: none * notice: Initialization value here is constant and it should never be changed * ***************************************************************************/ void rtl8192_InitBBRFRegDef(struct net_device* dev) { struct r8192_priv *priv = ieee80211_priv(dev); // RF Interface Sowrtware Control priv->PHYRegDef[RF90_PATH_A].rfintfs = rFPGA0_XAB_RFInterfaceSW; // 16 LSBs if read 32-bit from 0x870 priv->PHYRegDef[RF90_PATH_B].rfintfs = rFPGA0_XAB_RFInterfaceSW; // 16 MSBs if read 32-bit from 0x870 (16-bit for 0x872) priv->PHYRegDef[RF90_PATH_C].rfintfs = rFPGA0_XCD_RFInterfaceSW;// 16 LSBs if read 32-bit from 0x874 priv->PHYRegDef[RF90_PATH_D].rfintfs = rFPGA0_XCD_RFInterfaceSW;// 16 MSBs if read 32-bit from 0x874 (16-bit for 0x876) // RF Interface Readback Value priv->PHYRegDef[RF90_PATH_A].rfintfi = rFPGA0_XAB_RFInterfaceRB; // 16 LSBs if read 32-bit from 0x8E0 priv->PHYRegDef[RF90_PATH_B].rfintfi = rFPGA0_XAB_RFInterfaceRB;// 16 MSBs if read 32-bit from 0x8E0 (16-bit for 0x8E2) priv->PHYRegDef[RF90_PATH_C].rfintfi = rFPGA0_XCD_RFInterfaceRB;// 16 LSBs if read 32-bit from 0x8E4 priv->PHYRegDef[RF90_PATH_D].rfintfi = rFPGA0_XCD_RFInterfaceRB;// 16 MSBs if read 32-bit from 0x8E4 (16-bit for 0x8E6) // RF Interface Output (and Enable) priv->PHYRegDef[RF90_PATH_A].rfintfo = rFPGA0_XA_RFInterfaceOE; // 16 LSBs if read 32-bit from 0x860 priv->PHYRegDef[RF90_PATH_B].rfintfo = rFPGA0_XB_RFInterfaceOE; // 16 LSBs if read 32-bit from 0x864 priv->PHYRegDef[RF90_PATH_C].rfintfo = rFPGA0_XC_RFInterfaceOE;// 16 LSBs if read 32-bit from 0x868 priv->PHYRegDef[RF90_PATH_D].rfintfo = rFPGA0_XD_RFInterfaceOE;// 16 LSBs if read 32-bit from 0x86C // RF Interface (Output and) Enable priv->PHYRegDef[RF90_PATH_A].rfintfe = rFPGA0_XA_RFInterfaceOE; // 16 MSBs if read 32-bit from 0x860 (16-bit for 0x862) priv->PHYRegDef[RF90_PATH_B].rfintfe = rFPGA0_XB_RFInterfaceOE; // 16 MSBs if read 32-bit from 0x864 (16-bit for 0x866) priv->PHYRegDef[RF90_PATH_C].rfintfe = rFPGA0_XC_RFInterfaceOE;// 16 MSBs if read 32-bit from 0x86A (16-bit for 0x86A) priv->PHYRegDef[RF90_PATH_D].rfintfe = rFPGA0_XD_RFInterfaceOE;// 16 MSBs if read 32-bit from 0x86C (16-bit for 0x86E) //Addr of LSSI. Wirte RF register by driver priv->PHYRegDef[RF90_PATH_A].rf3wireOffset = rFPGA0_XA_LSSIParameter; //LSSI Parameter priv->PHYRegDef[RF90_PATH_B].rf3wireOffset = rFPGA0_XB_LSSIParameter; priv->PHYRegDef[RF90_PATH_C].rf3wireOffset = rFPGA0_XC_LSSIParameter; priv->PHYRegDef[RF90_PATH_D].rf3wireOffset = rFPGA0_XD_LSSIParameter; // RF parameter priv->PHYRegDef[RF90_PATH_A].rfLSSI_Select = rFPGA0_XAB_RFParameter; //BB Band Select priv->PHYRegDef[RF90_PATH_B].rfLSSI_Select = rFPGA0_XAB_RFParameter; priv->PHYRegDef[RF90_PATH_C].rfLSSI_Select = rFPGA0_XCD_RFParameter; priv->PHYRegDef[RF90_PATH_D].rfLSSI_Select = rFPGA0_XCD_RFParameter; // Tx AGC Gain Stage (same for all path. Should we remove this?) priv->PHYRegDef[RF90_PATH_A].rfTxGainStage = rFPGA0_TxGainStage; //Tx gain stage priv->PHYRegDef[RF90_PATH_B].rfTxGainStage = rFPGA0_TxGainStage; //Tx gain stage priv->PHYRegDef[RF90_PATH_C].rfTxGainStage = rFPGA0_TxGainStage; //Tx gain stage priv->PHYRegDef[RF90_PATH_D].rfTxGainStage = rFPGA0_TxGainStage; //Tx gain stage // Tranceiver A~D HSSI Parameter-1 priv->PHYRegDef[RF90_PATH_A].rfHSSIPara1 = rFPGA0_XA_HSSIParameter1; //wire control parameter1 priv->PHYRegDef[RF90_PATH_B].rfHSSIPara1 = rFPGA0_XB_HSSIParameter1; //wire control parameter1 priv->PHYRegDef[RF90_PATH_C].rfHSSIPara1 = rFPGA0_XC_HSSIParameter1; //wire control parameter1 priv->PHYRegDef[RF90_PATH_D].rfHSSIPara1 = rFPGA0_XD_HSSIParameter1; //wire control parameter1 // Tranceiver A~D HSSI Parameter-2 priv->PHYRegDef[RF90_PATH_A].rfHSSIPara2 = rFPGA0_XA_HSSIParameter2; //wire control parameter2 priv->PHYRegDef[RF90_PATH_B].rfHSSIPara2 = rFPGA0_XB_HSSIParameter2; //wire control parameter2 priv->PHYRegDef[RF90_PATH_C].rfHSSIPara2 = rFPGA0_XC_HSSIParameter2; //wire control parameter2 priv->PHYRegDef[RF90_PATH_D].rfHSSIPara2 = rFPGA0_XD_HSSIParameter2; //wire control parameter1 // RF switch Control priv->PHYRegDef[RF90_PATH_A].rfSwitchControl = rFPGA0_XAB_SwitchControl; //TR/Ant switch control priv->PHYRegDef[RF90_PATH_B].rfSwitchControl = rFPGA0_XAB_SwitchControl; priv->PHYRegDef[RF90_PATH_C].rfSwitchControl = rFPGA0_XCD_SwitchControl; priv->PHYRegDef[RF90_PATH_D].rfSwitchControl = rFPGA0_XCD_SwitchControl; // AGC control 1 priv->PHYRegDef[RF90_PATH_A].rfAGCControl1 = rOFDM0_XAAGCCore1; priv->PHYRegDef[RF90_PATH_B].rfAGCControl1 = rOFDM0_XBAGCCore1; priv->PHYRegDef[RF90_PATH_C].rfAGCControl1 = rOFDM0_XCAGCCore1; priv->PHYRegDef[RF90_PATH_D].rfAGCControl1 = rOFDM0_XDAGCCore1; // AGC control 2 priv->PHYRegDef[RF90_PATH_A].rfAGCControl2 = rOFDM0_XAAGCCore2; priv->PHYRegDef[RF90_PATH_B].rfAGCControl2 = rOFDM0_XBAGCCore2; priv->PHYRegDef[RF90_PATH_C].rfAGCControl2 = rOFDM0_XCAGCCore2; priv->PHYRegDef[RF90_PATH_D].rfAGCControl2 = rOFDM0_XDAGCCore2; // RX AFE control 1 priv->PHYRegDef[RF90_PATH_A].rfRxIQImbalance = rOFDM0_XARxIQImbalance; priv->PHYRegDef[RF90_PATH_B].rfRxIQImbalance = rOFDM0_XBRxIQImbalance; priv->PHYRegDef[RF90_PATH_C].rfRxIQImbalance = rOFDM0_XCRxIQImbalance; priv->PHYRegDef[RF90_PATH_D].rfRxIQImbalance = rOFDM0_XDRxIQImbalance; // RX AFE control 1 priv->PHYRegDef[RF90_PATH_A].rfRxAFE = rOFDM0_XARxAFE; priv->PHYRegDef[RF90_PATH_B].rfRxAFE = rOFDM0_XBRxAFE; priv->PHYRegDef[RF90_PATH_C].rfRxAFE = rOFDM0_XCRxAFE; priv->PHYRegDef[RF90_PATH_D].rfRxAFE = rOFDM0_XDRxAFE; // Tx AFE control 1 priv->PHYRegDef[RF90_PATH_A].rfTxIQImbalance = rOFDM0_XATxIQImbalance; priv->PHYRegDef[RF90_PATH_B].rfTxIQImbalance = rOFDM0_XBTxIQImbalance; priv->PHYRegDef[RF90_PATH_C].rfTxIQImbalance = rOFDM0_XCTxIQImbalance; priv->PHYRegDef[RF90_PATH_D].rfTxIQImbalance = rOFDM0_XDTxIQImbalance; // Tx AFE control 2 priv->PHYRegDef[RF90_PATH_A].rfTxAFE = rOFDM0_XATxAFE; priv->PHYRegDef[RF90_PATH_B].rfTxAFE = rOFDM0_XBTxAFE; priv->PHYRegDef[RF90_PATH_C].rfTxAFE = rOFDM0_XCTxAFE; priv->PHYRegDef[RF90_PATH_D].rfTxAFE = rOFDM0_XDTxAFE; // Tranceiver LSSI Readback priv->PHYRegDef[RF90_PATH_A].rfLSSIReadBack = rFPGA0_XA_LSSIReadBack; priv->PHYRegDef[RF90_PATH_B].rfLSSIReadBack = rFPGA0_XB_LSSIReadBack; priv->PHYRegDef[RF90_PATH_C].rfLSSIReadBack = rFPGA0_XC_LSSIReadBack; priv->PHYRegDef[RF90_PATH_D].rfLSSIReadBack = rFPGA0_XD_LSSIReadBack; } /****************************************************************************** *function: This function is to write register and then readback to make sure whether BB and RF is OK * input: net_device dev * HW90_BLOCK_E CheckBlock * RF90_RADIO_PATH_E eRFPath //only used when checkblock is HW90_BLOCK_RF * output: none * return: return whether BB and RF is ok(0:OK; 1:Fail) * notice: This function may be removed in the ASIC * ***************************************************************************/ u8 rtl8192_phy_checkBBAndRF(struct net_device* dev, HW90_BLOCK_E CheckBlock, RF90_RADIO_PATH_E eRFPath) { // struct r8192_priv *priv = ieee80211_priv(dev); // BB_REGISTER_DEFINITION_T *pPhyReg = &priv->PHYRegDef[eRFPath]; u8 ret = 0; u32 i, CheckTimes = 4, dwRegRead = 0; u32 WriteAddr[4]; u32 WriteData[] = {0xfffff027, 0xaa55a02f, 0x00000027, 0x55aa502f}; // Initialize register address offset to be checked WriteAddr[HW90_BLOCK_MAC] = 0x100; WriteAddr[HW90_BLOCK_PHY0] = 0x900; WriteAddr[HW90_BLOCK_PHY1] = 0x800; WriteAddr[HW90_BLOCK_RF] = 0x3; RT_TRACE(COMP_PHY, "=======>%s(), CheckBlock:%d\n", __FUNCTION__, CheckBlock); for(i=0 ; i < CheckTimes ; i++) { // // Write Data to register and readback // switch(CheckBlock) { case HW90_BLOCK_MAC: RT_TRACE(COMP_ERR, "PHY_CheckBBRFOK(): Never Write 0x100 here!"); break; case HW90_BLOCK_PHY0: case HW90_BLOCK_PHY1: write_nic_dword(dev, WriteAddr[CheckBlock], WriteData[i]); dwRegRead = read_nic_dword(dev, WriteAddr[CheckBlock]); break; case HW90_BLOCK_RF: WriteData[i] &= 0xfff; rtl8192_phy_SetRFReg(dev, eRFPath, WriteAddr[HW90_BLOCK_RF], bMask12Bits, WriteData[i]); // TODO: we should not delay for such a long time. Ask SD3 msleep(1); dwRegRead = rtl8192_phy_QueryRFReg(dev, eRFPath, WriteAddr[HW90_BLOCK_RF], bMask12Bits); msleep(1); break; default: ret = 1; break; } // // Check whether readback data is correct // if(dwRegRead != WriteData[i]) { RT_TRACE((COMP_PHY|COMP_ERR), "====>error=====dwRegRead: %x, WriteData: %x \n", dwRegRead, WriteData[i]); ret = 1; break; } } return ret; } /****************************************************************************** *function: This function initialize BB&RF * input: net_device dev * output: none * return: none * notice: Initialization value may change all the time, so please make * sure it has been synced with the newest. * ***************************************************************************/ void rtl8192_BB_Config_ParaFile(struct net_device* dev) { struct r8192_priv *priv = ieee80211_priv(dev); u8 bRegValue = 0, eCheckItem = 0, rtStatus = 0; u32 dwRegValue = 0; /************************************** //<1>Initialize BaseBand **************************************/ /*--set BB Global Reset--*/ bRegValue = read_nic_byte(dev, BB_GLOBAL_RESET); write_nic_byte(dev, BB_GLOBAL_RESET,(bRegValue|BB_GLOBAL_RESET_BIT)); mdelay(50); /*---set BB reset Active---*/ dwRegValue = read_nic_dword(dev, CPU_GEN); write_nic_dword(dev, CPU_GEN, (dwRegValue&(~CPU_GEN_BB_RST))); /*----Ckeck FPGAPHY0 and PHY1 board is OK----*/ // TODO: this function should be removed on ASIC , Emily 2007.2.2 for(eCheckItem=(HW90_BLOCK_E)HW90_BLOCK_PHY0; eCheckItem<=HW90_BLOCK_PHY1; eCheckItem++) { rtStatus = rtl8192_phy_checkBBAndRF(dev, (HW90_BLOCK_E)eCheckItem, (RF90_RADIO_PATH_E)0); //don't care RF path if(rtStatus != 0) { RT_TRACE((COMP_ERR | COMP_PHY), "PHY_RF8256_Config():Check PHY%d Fail!!\n", eCheckItem-1); return ; } } /*---- Set CCK and OFDM Block "OFF"----*/ rtl8192_setBBreg(dev, rFPGA0_RFMOD, bCCKEn|bOFDMEn, 0x0); /*----BB Register Initilazation----*/ //==m==>Set PHY REG From Header<==m== rtl8192_phyConfigBB(dev, BaseBand_Config_PHY_REG); /*----Set BB reset de-Active----*/ dwRegValue = read_nic_dword(dev, CPU_GEN); write_nic_dword(dev, CPU_GEN, (dwRegValue|CPU_GEN_BB_RST)); /*----BB AGC table Initialization----*/ //==m==>Set PHY REG From Header<==m== rtl8192_phyConfigBB(dev, BaseBand_Config_AGC_TAB); /*----Enable XSTAL ----*/ write_nic_byte_E(dev, 0x5e, 0x00); if (priv->card_8192_version == (u8)VERSION_819xU_A) { //Antenna gain offset from B/C/D to A dwRegValue = (priv->AntennaTxPwDiff[1]<<4 | priv->AntennaTxPwDiff[0]); rtl8192_setBBreg(dev, rFPGA0_TxGainStage, (bXBTxAGC|bXCTxAGC), dwRegValue); //XSTALLCap dwRegValue = priv->CrystalCap & 0xf; rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, bXtalCap, dwRegValue); } // Check if the CCK HighPower is turned ON. // This is used to calculate PWDB. priv->bCckHighPower = (u8)(rtl8192_QueryBBReg(dev, rFPGA0_XA_HSSIParameter2, 0x200)); return; } /****************************************************************************** *function: This function initialize BB&RF * input: net_device dev * output: none * return: none * notice: Initialization value may change all the time, so please make * sure it has been synced with the newest. * ***************************************************************************/ void rtl8192_BBConfig(struct net_device* dev) { rtl8192_InitBBRFRegDef(dev); //config BB&RF. As hardCode based initialization has not been well //implemented, so use file first.FIXME:should implement it for hardcode? rtl8192_BB_Config_ParaFile(dev); return; } /****************************************************************************** *function: This function obtains the initialization value of Tx power Level offset * input: net_device dev * output: none * return: none * ***************************************************************************/ void rtl8192_phy_getTxPower(struct net_device* dev) { struct r8192_priv *priv = ieee80211_priv(dev); priv->MCSTxPowerLevelOriginalOffset[0] = read_nic_dword(dev, rTxAGC_Rate18_06); priv->MCSTxPowerLevelOriginalOffset[1] = read_nic_dword(dev, rTxAGC_Rate54_24); priv->MCSTxPowerLevelOriginalOffset[2] = read_nic_dword(dev, rTxAGC_Mcs03_Mcs00); priv->MCSTxPowerLevelOriginalOffset[3] = read_nic_dword(dev, rTxAGC_Mcs07_Mcs04); priv->MCSTxPowerLevelOriginalOffset[4] = read_nic_dword(dev, rTxAGC_Mcs11_Mcs08); priv->MCSTxPowerLevelOriginalOffset[5] = read_nic_dword(dev, rTxAGC_Mcs15_Mcs12); // read rx initial gain priv->DefaultInitialGain[0] = read_nic_byte(dev, rOFDM0_XAAGCCore1); priv->DefaultInitialGain[1] = read_nic_byte(dev, rOFDM0_XBAGCCore1); priv->DefaultInitialGain[2] = read_nic_byte(dev, rOFDM0_XCAGCCore1); priv->DefaultInitialGain[3] = read_nic_byte(dev, rOFDM0_XDAGCCore1); RT_TRACE(COMP_INIT, "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x) \n", priv->DefaultInitialGain[0], priv->DefaultInitialGain[1], priv->DefaultInitialGain[2], priv->DefaultInitialGain[3]); // read framesync priv->framesync = read_nic_byte(dev, rOFDM0_RxDetector3); priv->framesyncC34 = read_nic_byte(dev, rOFDM0_RxDetector2); RT_TRACE(COMP_INIT, "Default framesync (0x%x) = 0x%x \n", rOFDM0_RxDetector3, priv->framesync); // read SIFS (save the value read fome MACPHY_REG.txt) priv->SifsTime = read_nic_word(dev, SIFS); return; } /****************************************************************************** *function: This function obtains the initialization value of Tx power Level offset * input: net_device dev * output: none * return: none * ***************************************************************************/ void rtl8192_phy_setTxPower(struct net_device* dev, u8 channel) { struct r8192_priv *priv = ieee80211_priv(dev); u8 powerlevel = priv->TxPowerLevelCCK[channel-1]; u8 powerlevelOFDM24G = priv->TxPowerLevelOFDM24G[channel-1]; switch(priv->rf_chip) { case RF_8256: PHY_SetRF8256CCKTxPower(dev, powerlevel); //need further implement PHY_SetRF8256OFDMTxPower(dev, powerlevelOFDM24G); break; default: // case RF_8225: // case RF_8258: RT_TRACE((COMP_PHY|COMP_ERR), "error RF chipID(8225 or 8258) in function %s()\n", __FUNCTION__); break; } return; } /****************************************************************************** *function: This function check Rf chip to do RF config * input: net_device dev * output: none * return: only 8256 is supported * ***************************************************************************/ void rtl8192_phy_RFConfig(struct net_device* dev) { struct r8192_priv *priv = ieee80211_priv(dev); switch(priv->rf_chip) { case RF_8256: PHY_RF8256_Config(dev); break; // case RF_8225: // case RF_8258: default: RT_TRACE(COMP_ERR, "error chip id\n"); break; } return; } /****************************************************************************** *function: This function update Initial gain * input: net_device dev * output: none * return: As Windows has not implemented this, wait for complement * ***************************************************************************/ void rtl8192_phy_updateInitGain(struct net_device* dev) { return; } /****************************************************************************** *function: This function read RF parameters from general head file, and do RF 3-wire * input: net_device dev * output: none * return: return code show if RF configuration is successful(0:pass, 1:fail) * Note: Delay may be required for RF configuration * ***************************************************************************/ u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device* dev, RF90_RADIO_PATH_E eRFPath) { int i; //u32* pRFArray; u8 ret = 0; switch(eRFPath){ case RF90_PATH_A: for(i = 0;i<RadioA_ArrayLength; i=i+2){ if(rtl819XRadioA_Array[i] == 0xfe){ mdelay(100); continue; } rtl8192_phy_SetRFReg(dev, eRFPath, rtl819XRadioA_Array[i], bMask12Bits, rtl819XRadioA_Array[i+1]); mdelay(1); } break; case RF90_PATH_B: for(i = 0;i<RadioB_ArrayLength; i=i+2){ if(rtl819XRadioB_Array[i] == 0xfe){ mdelay(100); continue; } rtl8192_phy_SetRFReg(dev, eRFPath, rtl819XRadioB_Array[i], bMask12Bits, rtl819XRadioB_Array[i+1]); mdelay(1); } break; case RF90_PATH_C: for(i = 0;i<RadioC_ArrayLength; i=i+2){ if(rtl819XRadioC_Array[i] == 0xfe){ mdelay(100); continue; } rtl8192_phy_SetRFReg(dev, eRFPath, rtl819XRadioC_Array[i], bMask12Bits, rtl819XRadioC_Array[i+1]); mdelay(1); } break; case RF90_PATH_D: for(i = 0;i<RadioD_ArrayLength; i=i+2){ if(rtl819XRadioD_Array[i] == 0xfe){ mdelay(100); continue; } rtl8192_phy_SetRFReg(dev, eRFPath, rtl819XRadioD_Array[i], bMask12Bits, rtl819XRadioD_Array[i+1]); mdelay(1); } break; default: break; } return ret; } /****************************************************************************** *function: This function set Tx Power of the channel * input: struct net_device *dev * u8 channel * output: none * return: none * Note: * ***************************************************************************/ void rtl8192_SetTxPowerLevel(struct net_device *dev, u8 channel) { struct r8192_priv *priv = ieee80211_priv(dev); u8 powerlevel = priv->TxPowerLevelCCK[channel-1]; u8 powerlevelOFDM24G = priv->TxPowerLevelOFDM24G[channel-1]; switch(priv->rf_chip) { case RF_8225: #ifdef TO_DO_LIST PHY_SetRF8225CckTxPower(Adapter, powerlevel); PHY_SetRF8225OfdmTxPower(Adapter, powerlevelOFDM24G); #endif break; case RF_8256: PHY_SetRF8256CCKTxPower(dev, powerlevel); PHY_SetRF8256OFDMTxPower(dev, powerlevelOFDM24G); break; case RF_8258: break; default: RT_TRACE(COMP_ERR, "unknown rf chip ID in rtl8192_SetTxPowerLevel()\n"); break; } return; } /****************************************************************************** *function: This function set RF state on or off * input: struct net_device *dev * RT_RF_POWER_STATE eRFPowerState //Power State to set * output: none * return: none * Note: * ***************************************************************************/ bool rtl8192_SetRFPowerState(struct net_device *dev, RT_RF_POWER_STATE eRFPowerState) { bool bResult = true; // u8 eRFPath; struct r8192_priv *priv = ieee80211_priv(dev); if(eRFPowerState == priv->ieee80211->eRFPowerState) return false; if(priv->SetRFPowerStateInProgress == true) return false; priv->SetRFPowerStateInProgress = true; switch(priv->rf_chip) { case RF_8256: switch( eRFPowerState ) { case eRfOn: //RF-A, RF-B //enable RF-Chip A/B rtl8192_setBBreg(dev, rFPGA0_XA_RFInterfaceOE, BIT4, 0x1); // 0x860[4] //analog to digital on rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0x300, 0x3);// 0x88c[9:8] //digital to analog on rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x18, 0x3); // 0x880[4:3] //rx antenna on rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0x3, 0x3);// 0xc04[1:0] //rx antenna on rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0x3, 0x3);// 0xd04[1:0] //analog to digital part2 on rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x60, 0x3); // 0x880[6:5] break; case eRfSleep: break; case eRfOff: //RF-A, RF-B //disable RF-Chip A/B rtl8192_setBBreg(dev, rFPGA0_XA_RFInterfaceOE, BIT4, 0x0); // 0x860[4] //analog to digital off, for power save rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0xf00, 0x0);// 0x88c[11:8] //digital to analog off, for power save rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x18, 0x0); // 0x880[4:3] //rx antenna off rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0xf, 0x0);// 0xc04[3:0] //rx antenna off rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0xf, 0x0);// 0xd04[3:0] //analog to digital part2 off, for power save rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x60, 0x0); // 0x880[6:5] break; default: bResult = false; RT_TRACE(COMP_ERR, "SetRFPowerState819xUsb(): unknow state to set: 0x%X!!!\n", eRFPowerState); break; } break; default: RT_TRACE(COMP_ERR, "Not support rf_chip(%x)\n", priv->rf_chip); break; } #ifdef TO_DO_LIST if(bResult) { // Update current RF state variable. pHalData->eRFPowerState = eRFPowerState; switch(pHalData->RFChipID ) { case RF_8256: switch(pHalData->eRFPowerState) { case eRfOff: // //If Rf off reason is from IPS, Led should blink with no link, by Maddest 071015 // if(pMgntInfo->RfOffReason==RF_CHANGE_BY_IPS ) { Adapter->HalFunc.LedControlHandler(Adapter,LED_CTL_NO_LINK); } else { // Turn off LED if RF is not ON. Adapter->HalFunc.LedControlHandler(Adapter, LED_CTL_POWER_OFF); } break; case eRfOn: // Turn on RF we are still linked, which might happen when // we quickly turn off and on HW RF. 2006.05.12, by rcnjko. if( pMgntInfo->bMediaConnect == TRUE ) { Adapter->HalFunc.LedControlHandler(Adapter, LED_CTL_LINK); } else { // Turn off LED if RF is not ON. Adapter->HalFunc.LedControlHandler(Adapter, LED_CTL_NO_LINK); } break; default: // do nothing. break; }// Switch RF state break; default: RT_TRACE(COMP_RF, DBG_LOUD, ("SetRFPowerState8190(): Unknown RF type\n")); break; } } #endif priv->SetRFPowerStateInProgress = false; return bResult; } /**************************************************************************************** *function: This function set command table variable(struct SwChnlCmd). * input: SwChnlCmd* CmdTable //table to be set. * u32 CmdTableIdx //variable index in table to be set * u32 CmdTableSz //table size. * SwChnlCmdID CmdID //command ID to set. * u32 Para1 * u32 Para2 * u32 msDelay * output: * return: true if finished, false otherwise * Note: * ************************************************************************************/ u8 rtl8192_phy_SetSwChnlCmdArray( SwChnlCmd* CmdTable, u32 CmdTableIdx, u32 CmdTableSz, SwChnlCmdID CmdID, u32 Para1, u32 Para2, u32 msDelay ) { SwChnlCmd* pCmd; if(CmdTable == NULL) { RT_TRACE(COMP_ERR, "phy_SetSwChnlCmdArray(): CmdTable cannot be NULL.\n"); return false; } if(CmdTableIdx >= CmdTableSz) { RT_TRACE(COMP_ERR, "phy_SetSwChnlCmdArray(): Access invalid index, please check size of the table, CmdTableIdx:%d, CmdTableSz:%d\n", CmdTableIdx, CmdTableSz); return false; } pCmd = CmdTable + CmdTableIdx; pCmd->CmdID = CmdID; pCmd->Para1 = Para1; pCmd->Para2 = Para2; pCmd->msDelay = msDelay; return true; } /****************************************************************************** *function: This function set channel step by step * input: struct net_device *dev * u8 channel * u8* stage //3 stages * u8* step // * u32* delay //whether need to delay * output: store new stage, step and delay for next step(combine with function above) * return: true if finished, false otherwise * Note: Wait for simpler function to replace it //wb * ***************************************************************************/ u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel, u8* stage, u8* step, u32* delay) { struct r8192_priv *priv = ieee80211_priv(dev); // PCHANNEL_ACCESS_SETTING pChnlAccessSetting; SwChnlCmd PreCommonCmd[MAX_PRECMD_CNT]; u32 PreCommonCmdCnt; SwChnlCmd PostCommonCmd[MAX_POSTCMD_CNT]; u32 PostCommonCmdCnt; SwChnlCmd RfDependCmd[MAX_RFDEPENDCMD_CNT]; u32 RfDependCmdCnt; SwChnlCmd *CurrentCmd = NULL; //RF90_RADIO_PATH_E eRFPath; u8 eRFPath; // u32 RfRetVal; // u8 RetryCnt; RT_TRACE(COMP_CH, "====>%s()====stage:%d, step:%d, channel:%d\n", __FUNCTION__, *stage, *step, channel); // RT_ASSERT(IsLegalChannel(Adapter, channel), ("illegal channel: %d\n", channel)); if (!IsLegalChannel(priv->ieee80211, channel)) { RT_TRACE(COMP_ERR, "=============>set to illegal channel:%d\n", channel); return true; //return true to tell upper caller function this channel setting is finished! Or it will in while loop. } //FIXME:need to check whether channel is legal or not here.WB //for(eRFPath = RF90_PATH_A; eRFPath <pHalData->NumTotalRFPath; eRFPath++) // for(eRFPath = 0; eRFPath <RF90_PATH_MAX; eRFPath++) // { // if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath)) // continue; // <1> Fill up pre common command. PreCommonCmdCnt = 0; rtl8192_phy_SetSwChnlCmdArray(PreCommonCmd, PreCommonCmdCnt++, MAX_PRECMD_CNT, CmdID_SetTxPowerLevel, 0, 0, 0); rtl8192_phy_SetSwChnlCmdArray(PreCommonCmd, PreCommonCmdCnt++, MAX_PRECMD_CNT, CmdID_End, 0, 0, 0); // <2> Fill up post common command. PostCommonCmdCnt = 0; rtl8192_phy_SetSwChnlCmdArray(PostCommonCmd, PostCommonCmdCnt++, MAX_POSTCMD_CNT, CmdID_End, 0, 0, 0); // <3> Fill up RF dependent command. RfDependCmdCnt = 0; switch( priv->rf_chip ) { case RF_8225: if (!(channel >= 1 && channel <= 14)) { RT_TRACE(COMP_ERR, "illegal channel for Zebra 8225: %d\n", channel); return true; } rtl8192_phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT, CmdID_RF_WriteReg, rZebra1_Channel, RF_CHANNEL_TABLE_ZEBRA[channel], 10); rtl8192_phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT, CmdID_End, 0, 0, 0); break; case RF_8256: // TEST!! This is not the table for 8256!! if (!(channel >= 1 && channel <= 14)) { RT_TRACE(COMP_ERR, "illegal channel for Zebra 8256: %d\n", channel); return true; } rtl8192_phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT, CmdID_RF_WriteReg, rZebra1_Channel, channel, 10); rtl8192_phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT, CmdID_End, 0, 0, 0); break; case RF_8258: break; default: RT_TRACE(COMP_ERR, "Unknown RFChipID: %d\n", priv->rf_chip); return true; break; } do{ switch(*stage) { case 0: CurrentCmd=&PreCommonCmd[*step]; break; case 1: CurrentCmd=&RfDependCmd[*step]; break; case 2: CurrentCmd=&PostCommonCmd[*step]; break; } if(CurrentCmd->CmdID==CmdID_End) { if((*stage)==2) { (*delay)=CurrentCmd->msDelay; return true; } else { (*stage)++; (*step)=0; continue; } } switch(CurrentCmd->CmdID) { case CmdID_SetTxPowerLevel: if(priv->card_8192_version == (u8)VERSION_819xU_A) //xiong: consider it later! rtl8192_SetTxPowerLevel(dev,channel); break; case CmdID_WritePortUlong: write_nic_dword(dev, CurrentCmd->Para1, CurrentCmd->Para2); break; case CmdID_WritePortUshort: write_nic_word(dev, CurrentCmd->Para1, (u16)CurrentCmd->Para2); break; case CmdID_WritePortUchar: write_nic_byte(dev, CurrentCmd->Para1, (u8)CurrentCmd->Para2); break; case CmdID_RF_WriteReg: for(eRFPath = 0; eRFPath < RF90_PATH_MAX; eRFPath++) { rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, CurrentCmd->Para1, bZebra1_ChannelNum, CurrentCmd->Para2); } break; default: break; } break; }while(true); // }/*for(Number of RF paths)*/ (*delay)=CurrentCmd->msDelay; (*step)++; return false; } /****************************************************************************** *function: This function does acturally set channel work * input: struct net_device *dev * u8 channel * output: none * return: noin * Note: We should not call this function directly * ***************************************************************************/ void rtl8192_phy_FinishSwChnlNow(struct net_device *dev, u8 channel) { struct r8192_priv *priv = ieee80211_priv(dev); u32 delay = 0; while(!rtl8192_phy_SwChnlStepByStep(dev,channel,&priv->SwChnlStage,&priv->SwChnlStep,&delay)) { // if(delay>0) // msleep(delay);//or mdelay? need further consideration if(!priv->up) break; } } /****************************************************************************** *function: Callback routine of the work item for switch channel. * input: * * output: none * return: noin * ***************************************************************************/ void rtl8192_SwChnl_WorkItem(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); RT_TRACE(COMP_CH, "==> SwChnlCallback819xUsbWorkItem(), chan:%d\n", priv->chan); rtl8192_phy_FinishSwChnlNow(dev , priv->chan); RT_TRACE(COMP_CH, "<== SwChnlCallback819xUsbWorkItem()\n"); } /****************************************************************************** *function: This function scheduled actural workitem to set channel * input: net_device dev * u8 channel //channel to set * output: none * return: return code show if workitem is scheduled(1:pass, 0:fail) * Note: Delay may be required for RF configuration * ***************************************************************************/ u8 rtl8192_phy_SwChnl(struct net_device* dev, u8 channel) { struct r8192_priv *priv = ieee80211_priv(dev); RT_TRACE(COMP_CH, "=====>%s(), SwChnlInProgress:%d\n", __FUNCTION__, priv->SwChnlInProgress); if(!priv->up) return false; if(priv->SwChnlInProgress) return false; // if(pHalData->SetBWModeInProgress) // return; if (0) //to test current channel from RF reg 0x7. { u8 eRFPath; for(eRFPath = 0; eRFPath < 2; eRFPath++){ printk("====>set channel:%x\n",rtl8192_phy_QueryRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x7, bZebra1_ChannelNum)); udelay(10); } } //-------------------------------------------- switch(priv->ieee80211->mode) { case WIRELESS_MODE_A: case WIRELESS_MODE_N_5G: if (channel<=14){ RT_TRACE(COMP_ERR, "WIRELESS_MODE_A but channel<=14"); return false; } break; case WIRELESS_MODE_B: if (channel>14){ RT_TRACE(COMP_ERR, "WIRELESS_MODE_B but channel>14"); return false; } break; case WIRELESS_MODE_G: case WIRELESS_MODE_N_24G: if (channel>14){ RT_TRACE(COMP_ERR, "WIRELESS_MODE_G but channel>14"); return false; } break; } //-------------------------------------------- priv->SwChnlInProgress = true; if(channel == 0) channel = 1; priv->chan=channel; priv->SwChnlStage=0; priv->SwChnlStep=0; // schedule_work(&(priv->SwChnlWorkItem)); // rtl8192_SwChnl_WorkItem(dev); if(priv->up) { // queue_work(priv->priv_wq,&(priv->SwChnlWorkItem)); rtl8192_SwChnl_WorkItem(dev); } priv->SwChnlInProgress = false; return true; } // /****************************************************************************** *function: Callback routine of the work item for set bandwidth mode. * input: struct net_device *dev * HT_CHANNEL_WIDTH Bandwidth //20M or 40M * HT_EXTCHNL_OFFSET Offset //Upper, Lower, or Don't care * output: none * return: none * Note: I doubt whether SetBWModeInProgress flag is necessary as we can * test whether current work in the queue or not.//do I? * ***************************************************************************/ void rtl8192_SetBWModeWorkItem(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); u8 regBwOpMode; RT_TRACE(COMP_SWBW, "==>rtl8192_SetBWModeWorkItem() Switch to %s bandwidth\n", \ priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20?"20MHz":"40MHz") if(priv->rf_chip == RF_PSEUDO_11N) { priv->SetBWModeInProgress= false; return; } //<1>Set MAC register regBwOpMode = read_nic_byte(dev, BW_OPMODE); switch(priv->CurrentChannelBW) { case HT_CHANNEL_WIDTH_20: regBwOpMode |= BW_OPMODE_20MHZ; // 2007/02/07 Mark by Emily because we have not verify whether this register works write_nic_byte(dev, BW_OPMODE, regBwOpMode); break; case HT_CHANNEL_WIDTH_20_40: regBwOpMode &= ~BW_OPMODE_20MHZ; // 2007/02/07 Mark by Emily because we have not verify whether this register works write_nic_byte(dev, BW_OPMODE, regBwOpMode); break; default: RT_TRACE(COMP_ERR, "SetChannelBandwidth819xUsb(): unknown Bandwidth: %#X\n",priv->CurrentChannelBW); break; } //<2>Set PHY related register switch(priv->CurrentChannelBW) { case HT_CHANNEL_WIDTH_20: // Add by Vivi 20071119 rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x0); rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x0); rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00100000, 1); // Correct the tx power for CCK rate in 20M. Suggest by YN, 20071207 priv->cck_present_attentuation = priv->cck_present_attentuation_20Mdefault + priv->cck_present_attentuation_difference; if(priv->cck_present_attentuation > 22) priv->cck_present_attentuation= 22; if(priv->cck_present_attentuation< 0) priv->cck_present_attentuation = 0; RT_TRACE(COMP_INIT, "20M, pHalData->CCKPresentAttentuation = %d\n", priv->cck_present_attentuation); if(priv->chan == 14 && !priv->bcck_in_ch14) { priv->bcck_in_ch14 = TRUE; dm_cck_txpower_adjust(dev,priv->bcck_in_ch14); } else if(priv->chan != 14 && priv->bcck_in_ch14) { priv->bcck_in_ch14 = FALSE; dm_cck_txpower_adjust(dev,priv->bcck_in_ch14); } else dm_cck_txpower_adjust(dev,priv->bcck_in_ch14); break; case HT_CHANNEL_WIDTH_20_40: // Add by Vivi 20071119 rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x1); rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x1); rtl8192_setBBreg(dev, rCCK0_System, bCCKSideBand, (priv->nCur40MhzPrimeSC>>1)); rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00100000, 0); rtl8192_setBBreg(dev, rOFDM1_LSTF, 0xC00, priv->nCur40MhzPrimeSC); priv->cck_present_attentuation = priv->cck_present_attentuation_40Mdefault + priv->cck_present_attentuation_difference; if(priv->cck_present_attentuation > 22) priv->cck_present_attentuation = 22; if(priv->cck_present_attentuation < 0) priv->cck_present_attentuation = 0; RT_TRACE(COMP_INIT, "40M, pHalData->CCKPresentAttentuation = %d\n", priv->cck_present_attentuation); if(priv->chan == 14 && !priv->bcck_in_ch14) { priv->bcck_in_ch14 = true; dm_cck_txpower_adjust(dev,priv->bcck_in_ch14); } else if(priv->chan!= 14 && priv->bcck_in_ch14) { priv->bcck_in_ch14 = false; dm_cck_txpower_adjust(dev,priv->bcck_in_ch14); } else dm_cck_txpower_adjust(dev,priv->bcck_in_ch14); break; default: RT_TRACE(COMP_ERR, "SetChannelBandwidth819xUsb(): unknown Bandwidth: %#X\n" ,priv->CurrentChannelBW); break; } //Skip over setting of J-mode in BB register here. Default value is "None J mode". Emily 20070315 //<3>Set RF related register switch( priv->rf_chip ) { case RF_8225: #ifdef TO_DO_LIST PHY_SetRF8225Bandwidth(Adapter, pHalData->CurrentChannelBW); #endif break; case RF_8256: PHY_SetRF8256Bandwidth(dev, priv->CurrentChannelBW); break; case RF_8258: // PHY_SetRF8258Bandwidth(); break; case RF_PSEUDO_11N: // Do Nothing break; default: RT_TRACE(COMP_ERR, "Unknown RFChipID: %d\n", priv->rf_chip); break; } priv->SetBWModeInProgress= false; RT_TRACE(COMP_SWBW, "<==SetBWMode819xUsb(), %d", atomic_read(&(priv->ieee80211->atm_swbw)) ); } /****************************************************************************** *function: This function schedules bandwidth switch work. * input: struct net_device *dev * HT_CHANNEL_WIDTH Bandwidth //20M or 40M * HT_EXTCHNL_OFFSET Offset //Upper, Lower, or Don't care * output: none * return: none * Note: I doubt whether SetBWModeInProgress flag is necessary as we can * test whether current work in the queue or not.//do I? * ***************************************************************************/ void rtl8192_SetBWMode(struct net_device *dev, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset) { struct r8192_priv *priv = ieee80211_priv(dev); if(priv->SetBWModeInProgress) return; priv->SetBWModeInProgress= true; priv->CurrentChannelBW = Bandwidth; if(Offset==HT_EXTCHNL_OFFSET_LOWER) priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_UPPER; else if(Offset==HT_EXTCHNL_OFFSET_UPPER) priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_LOWER; else priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_DONT_CARE; //queue_work(priv->priv_wq, &(priv->SetBWModeWorkItem)); // schedule_work(&(priv->SetBWModeWorkItem)); rtl8192_SetBWModeWorkItem(dev); } void InitialGain819xUsb(struct net_device *dev, u8 Operation) { struct r8192_priv *priv = ieee80211_priv(dev); priv->InitialGainOperateType = Operation; if(priv->up) { queue_delayed_work(priv->priv_wq,&priv->initialgain_operate_wq,0); } } extern void InitialGainOperateWorkItemCallBack(struct work_struct *work) { struct delayed_work *dwork = container_of(work,struct delayed_work,work); struct r8192_priv *priv = container_of(dwork,struct r8192_priv,initialgain_operate_wq); struct net_device *dev = priv->ieee80211->dev; #define SCAN_RX_INITIAL_GAIN 0x17 #define POWER_DETECTION_TH 0x08 u32 BitMask; u8 initial_gain; u8 Operation; Operation = priv->InitialGainOperateType; switch(Operation) { case IG_Backup: RT_TRACE(COMP_SCAN, "IG_Backup, backup the initial gain.\n"); initial_gain = SCAN_RX_INITIAL_GAIN;//priv->DefaultInitialGain[0];// BitMask = bMaskByte0; if(dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM) rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); // FW DIG OFF priv->initgain_backup.xaagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XAAGCCore1, BitMask); priv->initgain_backup.xbagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XBAGCCore1, BitMask); priv->initgain_backup.xcagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XCAGCCore1, BitMask); priv->initgain_backup.xdagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XDAGCCore1, BitMask); BitMask = bMaskByte2; priv->initgain_backup.cca = (u8)rtl8192_QueryBBReg(dev, rCCK0_CCA, BitMask); RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc50 is %x\n",priv->initgain_backup.xaagccore1); RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc58 is %x\n",priv->initgain_backup.xbagccore1); RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc60 is %x\n",priv->initgain_backup.xcagccore1); RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc68 is %x\n",priv->initgain_backup.xdagccore1); RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xa0a is %x\n",priv->initgain_backup.cca); RT_TRACE(COMP_SCAN, "Write scan initial gain = 0x%x \n", initial_gain); write_nic_byte(dev, rOFDM0_XAAGCCore1, initial_gain); write_nic_byte(dev, rOFDM0_XBAGCCore1, initial_gain); write_nic_byte(dev, rOFDM0_XCAGCCore1, initial_gain); write_nic_byte(dev, rOFDM0_XDAGCCore1, initial_gain); RT_TRACE(COMP_SCAN, "Write scan 0xa0a = 0x%x \n", POWER_DETECTION_TH); write_nic_byte(dev, 0xa0a, POWER_DETECTION_TH); break; case IG_Restore: RT_TRACE(COMP_SCAN, "IG_Restore, restore the initial gain.\n"); BitMask = 0x7f; //Bit0~ Bit6 if(dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM) rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); // FW DIG OFF rtl8192_setBBreg(dev, rOFDM0_XAAGCCore1, BitMask, (u32)priv->initgain_backup.xaagccore1); rtl8192_setBBreg(dev, rOFDM0_XBAGCCore1, BitMask, (u32)priv->initgain_backup.xbagccore1); rtl8192_setBBreg(dev, rOFDM0_XCAGCCore1, BitMask, (u32)priv->initgain_backup.xcagccore1); rtl8192_setBBreg(dev, rOFDM0_XDAGCCore1, BitMask, (u32)priv->initgain_backup.xdagccore1); BitMask = bMaskByte2; rtl8192_setBBreg(dev, rCCK0_CCA, BitMask, (u32)priv->initgain_backup.cca); RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc50 is %x\n",priv->initgain_backup.xaagccore1); RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc58 is %x\n",priv->initgain_backup.xbagccore1); RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc60 is %x\n",priv->initgain_backup.xcagccore1); RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc68 is %x\n",priv->initgain_backup.xdagccore1); RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xa0a is %x\n",priv->initgain_backup.cca); #ifdef RTL8190P SetTxPowerLevel8190(Adapter,priv->CurrentChannel); #endif #ifdef RTL8192E SetTxPowerLevel8190(Adapter,priv->CurrentChannel); #endif //#ifdef RTL8192U rtl8192_phy_setTxPower(dev,priv->ieee80211->current_network.channel); //#endif if(dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM) rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1); // FW DIG ON break; default: RT_TRACE(COMP_SCAN, "Unknown IG Operation. \n"); break; } }
gpl-2.0
erikcas/kernel-tianchi
drivers/isdn/hardware/eicon/debug.c
9257
63069
#include "platform.h" #include "pc.h" #include "di_defs.h" #include "debug_if.h" #include "divasync.h" #include "kst_ifc.h" #include "maintidi.h" #include "man_defs.h" /* LOCALS */ #define DBG_MAGIC (0x47114711L) static void DI_register(void *arg); static void DI_deregister(pDbgHandle hDbg); static void DI_format(int do_lock, word id, int type, char *format, va_list argument_list); static void DI_format_locked(word id, int type, char *format, va_list argument_list); static void DI_format_old(word id, char *format, va_list ap) { } static void DiProcessEventLog(unsigned short id, unsigned long msgID, va_list ap) { } static void single_p(byte *P, word *PLength, byte Id); static void diva_maint_xdi_cb(ENTITY *e); static word SuperTraceCreateReadReq(byte *P, const char *path); static int diva_mnt_cmp_nmbr(const char *nmbr); static void diva_free_dma_descriptor(IDI_CALL request, int nr); static int diva_get_dma_descriptor(IDI_CALL request, dword *dma_magic); void diva_mnt_internal_dprintf(dword drv_id, dword type, char *p, ...); static dword MaxDumpSize = 256; static dword MaxXlogSize = 2 + 128; static char TraceFilter[DIVA_MAX_SELECTIVE_FILTER_LENGTH + 1]; static int TraceFilterIdent = -1; static int TraceFilterChannel = -1; typedef struct _diva_maint_client { dword sec; dword usec; pDbgHandle hDbg; char drvName[128]; dword dbgMask; dword last_dbgMask; IDI_CALL request; _DbgHandle_ Dbg; int logical; int channels; diva_strace_library_interface_t *pIdiLib; BUFFERS XData; char xbuffer[2048 + 512]; byte *pmem; int request_pending; int dma_handle; } diva_maint_client_t; static diva_maint_client_t clients[MAX_DESCRIPTORS]; static void diva_change_management_debug_mask(diva_maint_client_t *pC, dword old_mask); static void diva_maint_error(void *user_context, diva_strace_library_interface_t *hLib, int Adapter, int error, const char *file, int line); static void diva_maint_state_change_notify(void *user_context, diva_strace_library_interface_t *hLib, int Adapter, diva_trace_line_state_t *channel, int notify_subject); static void diva_maint_trace_notify(void *user_context, diva_strace_library_interface_t *hLib, int Adapter, void *xlog_buffer, int length); typedef struct MSG_QUEUE { dword Size; /* total size of queue (constant) */ byte *Base; /* lowest address (constant) */ byte *High; /* Base + Size (constant) */ byte *Head; /* first message in queue (if any) */ byte *Tail; /* first free position */ byte *Wrap; /* current wraparound position */ dword Count; /* current no of bytes in queue */ } MSG_QUEUE; typedef struct MSG_HEAD { volatile dword Size; /* size of data following MSG_HEAD */ #define MSG_INCOMPLETE 0x8000 /* ored to Size until queueCompleteMsg */ } MSG_HEAD; #define queueCompleteMsg(p) do { ((MSG_HEAD *)p - 1)->Size &= ~MSG_INCOMPLETE; } while (0) #define queueCount(q) ((q)->Count) #define MSG_NEED(size) \ ((sizeof(MSG_HEAD) + size + sizeof(dword) - 1) & ~(sizeof(dword) - 1)) static void queueInit(MSG_QUEUE *Q, byte *Buffer, dword sizeBuffer) { Q->Size = sizeBuffer; Q->Base = Q->Head = Q->Tail = Buffer; Q->High = Buffer + sizeBuffer; Q->Wrap = NULL; Q->Count = 0; } static byte *queueAllocMsg(MSG_QUEUE *Q, word size) { /* Allocate 'size' bytes at tail of queue which will be filled later * directly with callers own message header info and/or message. * An 'alloced' message is marked incomplete by oring the 'Size' field * with MSG_INCOMPLETE. * This must be reset via queueCompleteMsg() after the message is filled. * As long as a message is marked incomplete queuePeekMsg() will return * a 'queue empty' condition when it reaches such a message. */ MSG_HEAD *Msg; word need = MSG_NEED(size); if (Q->Tail == Q->Head) { if (Q->Wrap || need > Q->Size) { return NULL; /* full */ } goto alloc; /* empty */ } if (Q->Tail > Q->Head) { if (Q->Tail + need <= Q->High) goto alloc; /* append */ if (Q->Base + need > Q->Head) { return NULL; /* too much */ } /* wraparound the queue (but not the message) */ Q->Wrap = Q->Tail; Q->Tail = Q->Base; goto alloc; } if (Q->Tail + need > Q->Head) { return NULL; /* too much */ } alloc: Msg = (MSG_HEAD *)Q->Tail; Msg->Size = size | MSG_INCOMPLETE; Q->Tail += need; Q->Count += size; return ((byte *)(Msg + 1)); } static void queueFreeMsg(MSG_QUEUE *Q) { /* Free the message at head of queue */ word size = ((MSG_HEAD *)Q->Head)->Size & ~MSG_INCOMPLETE; Q->Head += MSG_NEED(size); Q->Count -= size; if (Q->Wrap) { if (Q->Head >= Q->Wrap) { Q->Head = Q->Base; Q->Wrap = NULL; } } else if (Q->Head >= Q->Tail) { Q->Head = Q->Tail = Q->Base; } } static byte *queuePeekMsg(MSG_QUEUE *Q, word *size) { /* Show the first valid message in queue BUT DON'T free the message. * After looking on the message contents it can be freed queueFreeMsg() * or simply remain in message queue. */ MSG_HEAD *Msg = (MSG_HEAD *)Q->Head; if (((byte *)Msg == Q->Tail && !Q->Wrap) || (Msg->Size & MSG_INCOMPLETE)) { return NULL; } else { *size = Msg->Size; return ((byte *)(Msg + 1)); } } /* Message queue header */ static MSG_QUEUE *dbg_queue; static byte *dbg_base; static int external_dbg_queue; static diva_os_spin_lock_t dbg_q_lock; static diva_os_spin_lock_t dbg_adapter_lock; static int dbg_q_busy; static volatile dword dbg_sequence; static dword start_sec; static dword start_usec; /* INTERFACE: Initialize run time queue structures. base: base of the message queue length: length of the message queue do_init: perfor queue reset return: zero on success, -1 on error */ int diva_maint_init(byte *base, unsigned long length, int do_init) { if (dbg_queue || (!base) || (length < (4096 * 4))) { return (-1); } TraceFilter[0] = 0; TraceFilterIdent = -1; TraceFilterChannel = -1; dbg_base = base; diva_os_get_time(&start_sec, &start_usec); *(dword *)base = (dword)DBG_MAGIC; /* Store Magic */ base += sizeof(dword); length -= sizeof(dword); *(dword *)base = 2048; /* Extension Field Length */ base += sizeof(dword); length -= sizeof(dword); strcpy(base, "KERNEL MODE BUFFER\n"); base += 2048; length -= 2048; *(dword *)base = 0; /* Terminate extension */ base += sizeof(dword); length -= sizeof(dword); *(void **)base = (void *)(base + sizeof(void *)); /* Store Base */ base += sizeof(void *); length -= sizeof(void *); dbg_queue = (MSG_QUEUE *)base; queueInit(dbg_queue, base + sizeof(MSG_QUEUE), length - sizeof(MSG_QUEUE) - 512); external_dbg_queue = 0; if (!do_init) { external_dbg_queue = 1; /* memory was located on the external device */ } if (diva_os_initialize_spin_lock(&dbg_q_lock, "dbg_init")) { dbg_queue = NULL; dbg_base = NULL; external_dbg_queue = 0; return (-1); } if (diva_os_initialize_spin_lock(&dbg_adapter_lock, "dbg_init")) { diva_os_destroy_spin_lock(&dbg_q_lock, "dbg_init"); dbg_queue = NULL; dbg_base = NULL; external_dbg_queue = 0; return (-1); } return (0); } /* INTERFACE: Finit at unload time return address of internal queue or zero if queue was external */ void *diva_maint_finit(void) { void *ret = (void *)dbg_base; int i; dbg_queue = NULL; dbg_base = NULL; if (ret) { diva_os_destroy_spin_lock(&dbg_q_lock, "dbg_finit"); diva_os_destroy_spin_lock(&dbg_adapter_lock, "dbg_finit"); } if (external_dbg_queue) { ret = NULL; } external_dbg_queue = 0; for (i = 1; i < ARRAY_SIZE(clients); i++) { if (clients[i].pmem) { diva_os_free(0, clients[i].pmem); } } return (ret); } /* INTERFACE: Return amount of messages in debug queue */ dword diva_dbg_q_length(void) { return (dbg_queue ? queueCount(dbg_queue) : 0); } /* INTERFACE: Lock message queue and return the pointer to the first entry. */ diva_dbg_entry_head_t *diva_maint_get_message(word *size, diva_os_spin_lock_magic_t *old_irql) { diva_dbg_entry_head_t *pmsg = NULL; diva_os_enter_spin_lock(&dbg_q_lock, old_irql, "read"); if (dbg_q_busy) { diva_os_leave_spin_lock(&dbg_q_lock, old_irql, "read_busy"); return NULL; } dbg_q_busy = 1; if (!(pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, size))) { dbg_q_busy = 0; diva_os_leave_spin_lock(&dbg_q_lock, old_irql, "read_empty"); } return (pmsg); } /* INTERFACE: acknowledge last message and unlock queue */ void diva_maint_ack_message(int do_release, diva_os_spin_lock_magic_t *old_irql) { if (!dbg_q_busy) { return; } if (do_release) { queueFreeMsg(dbg_queue); } dbg_q_busy = 0; diva_os_leave_spin_lock(&dbg_q_lock, old_irql, "read_ack"); } /* INTERFACE: PRT COMP function used to register with MAINT adapter or log in compatibility mode in case older driver version is connected too */ void diva_maint_prtComp(char *format, ...) { void *hDbg; va_list ap; if (!format) return; va_start(ap, format); /* register to new log driver functions */ if ((format[0] == 0) && ((unsigned char)format[1] == 255)) { hDbg = va_arg(ap, void *); /* ptr to DbgHandle */ DI_register(hDbg); } va_end(ap); } static void DI_register(void *arg) { diva_os_spin_lock_magic_t old_irql; dword sec, usec; pDbgHandle hDbg; int id, free_id = -1, best_id = 0; diva_os_get_time(&sec, &usec); hDbg = (pDbgHandle)arg; /* Check for bad args, specially for the old obsolete debug handle */ if ((hDbg == NULL) || ((hDbg->id == 0) && (((_OldDbgHandle_ *)hDbg)->id == -1)) || (hDbg->Registered != 0)) { return; } diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "register"); for (id = 1; id < ARRAY_SIZE(clients); id++) { if (clients[id].hDbg == hDbg) { /* driver already registered */ diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "register"); return; } if (clients[id].hDbg) { /* slot is busy */ continue; } free_id = id; if (!strcmp(clients[id].drvName, hDbg->drvName)) { /* This driver was already registered with this name and slot is still free - reuse it */ best_id = 1; break; } if (!clients[id].hDbg) { /* slot is busy */ break; } } if (free_id != -1) { diva_dbg_entry_head_t *pmsg = NULL; int len; char tmp[256]; word size; /* Register new driver with id == free_id */ clients[free_id].hDbg = hDbg; clients[free_id].sec = sec; clients[free_id].usec = usec; strcpy(clients[free_id].drvName, hDbg->drvName); clients[free_id].dbgMask = hDbg->dbgMask; if (best_id) { hDbg->dbgMask |= clients[free_id].last_dbgMask; } else { clients[free_id].last_dbgMask = 0; } hDbg->Registered = DBG_HANDLE_REG_NEW; hDbg->id = (byte)free_id; hDbg->dbg_end = DI_deregister; hDbg->dbg_prt = DI_format_locked; hDbg->dbg_ev = DiProcessEventLog; hDbg->dbg_irq = DI_format_locked; if (hDbg->Version > 0) { hDbg->dbg_old = DI_format_old; } hDbg->next = (pDbgHandle)DBG_MAGIC; /* Log driver register, MAINT driver ID is '0' */ len = sprintf(tmp, "DIMAINT - drv # %d = '%s' registered", free_id, hDbg->drvName); while (!(pmsg = (diva_dbg_entry_head_t *)queueAllocMsg(dbg_queue, (word)(len + 1 + sizeof(*pmsg))))) { if ((pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, &size))) { queueFreeMsg(dbg_queue); } else { break; } } if (pmsg) { pmsg->sequence = dbg_sequence++; pmsg->time_sec = sec; pmsg->time_usec = usec; pmsg->facility = MSG_TYPE_STRING; pmsg->dli = DLI_REG; pmsg->drv_id = 0; /* id 0 - DIMAINT */ pmsg->di_cpu = 0; pmsg->data_length = len + 1; memcpy(&pmsg[1], tmp, len + 1); queueCompleteMsg(pmsg); diva_maint_wakeup_read(); } } diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "register"); } static void DI_deregister(pDbgHandle hDbg) { diva_os_spin_lock_magic_t old_irql, old_irql1; dword sec, usec; int i; word size; byte *pmem = NULL; diva_os_get_time(&sec, &usec); diva_os_enter_spin_lock(&dbg_adapter_lock, &old_irql1, "read"); diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "read"); for (i = 1; i < ARRAY_SIZE(clients); i++) { if (clients[i].hDbg == hDbg) { diva_dbg_entry_head_t *pmsg; char tmp[256]; int len; clients[i].hDbg = NULL; hDbg->id = -1; hDbg->dbgMask = 0; hDbg->dbg_end = NULL; hDbg->dbg_prt = NULL; hDbg->dbg_irq = NULL; if (hDbg->Version > 0) hDbg->dbg_old = NULL; hDbg->Registered = 0; hDbg->next = NULL; if (clients[i].pIdiLib) { (*(clients[i].pIdiLib->DivaSTraceLibraryFinit))(clients[i].pIdiLib->hLib); clients[i].pIdiLib = NULL; pmem = clients[i].pmem; clients[i].pmem = NULL; } /* Log driver register, MAINT driver ID is '0' */ len = sprintf(tmp, "DIMAINT - drv # %d = '%s' de-registered", i, hDbg->drvName); while (!(pmsg = (diva_dbg_entry_head_t *)queueAllocMsg(dbg_queue, (word)(len + 1 + sizeof(*pmsg))))) { if ((pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, &size))) { queueFreeMsg(dbg_queue); } else { break; } } if (pmsg) { pmsg->sequence = dbg_sequence++; pmsg->time_sec = sec; pmsg->time_usec = usec; pmsg->facility = MSG_TYPE_STRING; pmsg->dli = DLI_REG; pmsg->drv_id = 0; /* id 0 - DIMAINT */ pmsg->di_cpu = 0; pmsg->data_length = len + 1; memcpy(&pmsg[1], tmp, len + 1); queueCompleteMsg(pmsg); diva_maint_wakeup_read(); } break; } } diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "read_ack"); diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "read_ack"); if (pmem) { diva_os_free(0, pmem); } } static void DI_format_locked(unsigned short id, int type, char *format, va_list argument_list) { DI_format(1, id, type, format, argument_list); } static void DI_format(int do_lock, unsigned short id, int type, char *format, va_list ap) { diva_os_spin_lock_magic_t old_irql; dword sec, usec; diva_dbg_entry_head_t *pmsg = NULL; dword length; word size; static char fmtBuf[MSG_FRAME_MAX_SIZE + sizeof(*pmsg) + 1]; char *data; unsigned short code; if (diva_os_in_irq()) { dbg_sequence++; return; } if ((!format) || ((TraceFilter[0] != 0) && ((TraceFilterIdent < 0) || (TraceFilterChannel < 0)))) { return; } diva_os_get_time(&sec, &usec); if (do_lock) { diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "format"); } switch (type) { case DLI_MXLOG: case DLI_BLK: case DLI_SEND: case DLI_RECV: if (!(length = va_arg(ap, unsigned long))) { break; } if (length > MaxDumpSize) { length = MaxDumpSize; } while (!(pmsg = (diva_dbg_entry_head_t *)queueAllocMsg(dbg_queue, (word)length + sizeof(*pmsg)))) { if ((pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, &size))) { queueFreeMsg(dbg_queue); } else { break; } } if (pmsg) { memcpy(&pmsg[1], format, length); pmsg->sequence = dbg_sequence++; pmsg->time_sec = sec; pmsg->time_usec = usec; pmsg->facility = MSG_TYPE_BINARY; pmsg->dli = type; /* DLI_XXX */ pmsg->drv_id = id; /* driver MAINT id */ pmsg->di_cpu = 0; pmsg->data_length = length; queueCompleteMsg(pmsg); } break; case DLI_XLOG: { byte *p; data = va_arg(ap, char *); code = (unsigned short)va_arg(ap, unsigned int); length = (unsigned long)va_arg(ap, unsigned int); if (length > MaxXlogSize) length = MaxXlogSize; while (!(pmsg = (diva_dbg_entry_head_t *)queueAllocMsg(dbg_queue, (word)length + sizeof(*pmsg) + 2))) { if ((pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, &size))) { queueFreeMsg(dbg_queue); } else { break; } } if (pmsg) { p = (byte *)&pmsg[1]; p[0] = (char)(code); p[1] = (char)(code >> 8); if (data && length) { memcpy(&p[2], &data[0], length); } length += 2; pmsg->sequence = dbg_sequence++; pmsg->time_sec = sec; pmsg->time_usec = usec; pmsg->facility = MSG_TYPE_BINARY; pmsg->dli = type; /* DLI_XXX */ pmsg->drv_id = id; /* driver MAINT id */ pmsg->di_cpu = 0; pmsg->data_length = length; queueCompleteMsg(pmsg); } } break; case DLI_LOG: case DLI_FTL: case DLI_ERR: case DLI_TRC: case DLI_REG: case DLI_MEM: case DLI_SPL: case DLI_IRP: case DLI_TIM: case DLI_TAPI: case DLI_NDIS: case DLI_CONN: case DLI_STAT: case DLI_PRV0: case DLI_PRV1: case DLI_PRV2: case DLI_PRV3: if ((length = (unsigned long)vsprintf(&fmtBuf[0], format, ap)) > 0) { length += (sizeof(*pmsg) + 1); while (!(pmsg = (diva_dbg_entry_head_t *)queueAllocMsg(dbg_queue, (word)length))) { if ((pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, &size))) { queueFreeMsg(dbg_queue); } else { break; } } pmsg->sequence = dbg_sequence++; pmsg->time_sec = sec; pmsg->time_usec = usec; pmsg->facility = MSG_TYPE_STRING; pmsg->dli = type; /* DLI_XXX */ pmsg->drv_id = id; /* driver MAINT id */ pmsg->di_cpu = 0; pmsg->data_length = length - sizeof(*pmsg); memcpy(&pmsg[1], fmtBuf, pmsg->data_length); queueCompleteMsg(pmsg); } break; } /* switch type */ if (queueCount(dbg_queue)) { diva_maint_wakeup_read(); } if (do_lock) { diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "format"); } } /* Write driver ID and driver revision to callers buffer */ int diva_get_driver_info(dword id, byte *data, int data_length) { diva_os_spin_lock_magic_t old_irql; byte *p = data; int to_copy; if (!data || !id || (data_length < 17) || (id >= ARRAY_SIZE(clients))) { return (-1); } diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "driver info"); if (clients[id].hDbg) { *p++ = 1; *p++ = (byte)clients[id].sec; /* save seconds */ *p++ = (byte)(clients[id].sec >> 8); *p++ = (byte)(clients[id].sec >> 16); *p++ = (byte)(clients[id].sec >> 24); *p++ = (byte)(clients[id].usec / 1000); /* save mseconds */ *p++ = (byte)((clients[id].usec / 1000) >> 8); *p++ = (byte)((clients[id].usec / 1000) >> 16); *p++ = (byte)((clients[id].usec / 1000) >> 24); data_length -= 9; if ((to_copy = min(strlen(clients[id].drvName), (size_t)(data_length - 1)))) { memcpy(p, clients[id].drvName, to_copy); p += to_copy; data_length -= to_copy; if ((data_length >= 4) && clients[id].hDbg->drvTag[0]) { *p++ = '('; data_length -= 1; if ((to_copy = min(strlen(clients[id].hDbg->drvTag), (size_t)(data_length - 2)))) { memcpy(p, clients[id].hDbg->drvTag, to_copy); p += to_copy; data_length -= to_copy; if (data_length >= 2) { *p++ = ')'; data_length--; } } } } } *p++ = 0; diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "driver info"); return (p - data); } int diva_get_driver_dbg_mask(dword id, byte *data) { diva_os_spin_lock_magic_t old_irql; int ret = -1; if (!data || !id || (id >= ARRAY_SIZE(clients))) { return (-1); } diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "driver info"); if (clients[id].hDbg) { ret = 4; *data++ = (byte)(clients[id].hDbg->dbgMask); *data++ = (byte)(clients[id].hDbg->dbgMask >> 8); *data++ = (byte)(clients[id].hDbg->dbgMask >> 16); *data++ = (byte)(clients[id].hDbg->dbgMask >> 24); } diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "driver info"); return (ret); } int diva_set_driver_dbg_mask(dword id, dword mask) { diva_os_spin_lock_magic_t old_irql, old_irql1; int ret = -1; if (!id || (id >= ARRAY_SIZE(clients))) { return (-1); } diva_os_enter_spin_lock(&dbg_adapter_lock, &old_irql1, "dbg mask"); diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "dbg mask"); if (clients[id].hDbg) { dword old_mask = clients[id].hDbg->dbgMask; mask &= 0x7fffffff; clients[id].hDbg->dbgMask = mask; clients[id].last_dbgMask = (clients[id].hDbg->dbgMask | clients[id].dbgMask); ret = 4; diva_change_management_debug_mask(&clients[id], old_mask); } diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "dbg mask"); if (clients[id].request_pending) { clients[id].request_pending = 0; (*(clients[id].request))((ENTITY *)(*(clients[id].pIdiLib->DivaSTraceGetHandle))(clients[id].pIdiLib->hLib)); } diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "dbg mask"); return (ret); } static int diva_get_idi_adapter_info(IDI_CALL request, dword *serial, dword *logical) { IDI_SYNC_REQ sync_req; sync_req.xdi_logical_adapter_number.Req = 0; sync_req.xdi_logical_adapter_number.Rc = IDI_SYNC_REQ_XDI_GET_LOGICAL_ADAPTER_NUMBER; (*request)((ENTITY *)&sync_req); *logical = sync_req.xdi_logical_adapter_number.info.logical_adapter_number; sync_req.GetSerial.Req = 0; sync_req.GetSerial.Rc = IDI_SYNC_REQ_GET_SERIAL; sync_req.GetSerial.serial = 0; (*request)((ENTITY *)&sync_req); *serial = sync_req.GetSerial.serial; return (0); } /* Register XDI adapter as MAINT compatible driver */ void diva_mnt_add_xdi_adapter(const DESCRIPTOR *d) { diva_os_spin_lock_magic_t old_irql, old_irql1; dword sec, usec, logical, serial, org_mask; int id, free_id = -1; char tmp[128]; diva_dbg_entry_head_t *pmsg = NULL; int len; word size; byte *pmem; diva_os_get_time(&sec, &usec); diva_get_idi_adapter_info(d->request, &serial, &logical); if (serial & 0xff000000) { sprintf(tmp, "ADAPTER:%d SN:%u-%d", (int)logical, serial & 0x00ffffff, (byte)(((serial & 0xff000000) >> 24) + 1)); } else { sprintf(tmp, "ADAPTER:%d SN:%u", (int)logical, serial); } if (!(pmem = diva_os_malloc(0, DivaSTraceGetMemotyRequirement(d->channels)))) { return; } memset(pmem, 0x00, DivaSTraceGetMemotyRequirement(d->channels)); diva_os_enter_spin_lock(&dbg_adapter_lock, &old_irql1, "register"); diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "register"); for (id = 1; id < ARRAY_SIZE(clients); id++) { if (clients[id].hDbg && (clients[id].request == d->request)) { diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "register"); diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "register"); diva_os_free(0, pmem); return; } if (clients[id].hDbg) { /* slot is busy */ continue; } if (free_id < 0) { free_id = id; } if (!strcmp(clients[id].drvName, tmp)) { /* This driver was already registered with this name and slot is still free - reuse it */ free_id = id; break; } } if (free_id < 0) { diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "register"); diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "register"); diva_os_free(0, pmem); return; } id = free_id; clients[id].request = d->request; clients[id].request_pending = 0; clients[id].hDbg = &clients[id].Dbg; clients[id].sec = sec; clients[id].usec = usec; strcpy(clients[id].drvName, tmp); strcpy(clients[id].Dbg.drvName, tmp); clients[id].Dbg.drvTag[0] = 0; clients[id].logical = (int)logical; clients[id].channels = (int)d->channels; clients[id].dma_handle = -1; clients[id].Dbg.dbgMask = 0; clients[id].dbgMask = clients[id].Dbg.dbgMask; if (id) { clients[id].Dbg.dbgMask |= clients[free_id].last_dbgMask; } else { clients[id].last_dbgMask = 0; } clients[id].Dbg.Registered = DBG_HANDLE_REG_NEW; clients[id].Dbg.id = (byte)id; clients[id].Dbg.dbg_end = DI_deregister; clients[id].Dbg.dbg_prt = DI_format_locked; clients[id].Dbg.dbg_ev = DiProcessEventLog; clients[id].Dbg.dbg_irq = DI_format_locked; clients[id].Dbg.next = (pDbgHandle)DBG_MAGIC; { diva_trace_library_user_interface_t diva_maint_user_ifc = { &clients[id], diva_maint_state_change_notify, diva_maint_trace_notify, diva_maint_error }; /* Attach to adapter management interface */ if ((clients[id].pIdiLib = DivaSTraceLibraryCreateInstance((int)logical, &diva_maint_user_ifc, pmem))) { if (((*(clients[id].pIdiLib->DivaSTraceLibraryStart))(clients[id].pIdiLib->hLib))) { diva_mnt_internal_dprintf(0, DLI_ERR, "Adapter(%d) Start failed", (int)logical); (*(clients[id].pIdiLib->DivaSTraceLibraryFinit))(clients[id].pIdiLib->hLib); clients[id].pIdiLib = NULL; } } else { diva_mnt_internal_dprintf(0, DLI_ERR, "A(%d) management init failed", (int)logical); } } if (!clients[id].pIdiLib) { clients[id].request = NULL; clients[id].request_pending = 0; clients[id].hDbg = NULL; diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "register"); diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "register"); diva_os_free(0, pmem); return; } /* Log driver register, MAINT driver ID is '0' */ len = sprintf(tmp, "DIMAINT - drv # %d = '%s' registered", id, clients[id].Dbg.drvName); while (!(pmsg = (diva_dbg_entry_head_t *)queueAllocMsg(dbg_queue, (word)(len + 1 + sizeof(*pmsg))))) { if ((pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, &size))) { queueFreeMsg(dbg_queue); } else { break; } } if (pmsg) { pmsg->sequence = dbg_sequence++; pmsg->time_sec = sec; pmsg->time_usec = usec; pmsg->facility = MSG_TYPE_STRING; pmsg->dli = DLI_REG; pmsg->drv_id = 0; /* id 0 - DIMAINT */ pmsg->di_cpu = 0; pmsg->data_length = len + 1; memcpy(&pmsg[1], tmp, len + 1); queueCompleteMsg(pmsg); diva_maint_wakeup_read(); } org_mask = clients[id].Dbg.dbgMask; clients[id].Dbg.dbgMask = 0; diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "register"); if (clients[id].request_pending) { clients[id].request_pending = 0; (*(clients[id].request))((ENTITY *)(*(clients[id].pIdiLib->DivaSTraceGetHandle))(clients[id].pIdiLib->hLib)); } diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "register"); diva_set_driver_dbg_mask(id, org_mask); } /* De-Register XDI adapter */ void diva_mnt_remove_xdi_adapter(const DESCRIPTOR *d) { diva_os_spin_lock_magic_t old_irql, old_irql1; dword sec, usec; int i; word size; byte *pmem = NULL; diva_os_get_time(&sec, &usec); diva_os_enter_spin_lock(&dbg_adapter_lock, &old_irql1, "read"); diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "read"); for (i = 1; i < ARRAY_SIZE(clients); i++) { if (clients[i].hDbg && (clients[i].request == d->request)) { diva_dbg_entry_head_t *pmsg; char tmp[256]; int len; if (clients[i].pIdiLib) { (*(clients[i].pIdiLib->DivaSTraceLibraryFinit))(clients[i].pIdiLib->hLib); clients[i].pIdiLib = NULL; pmem = clients[i].pmem; clients[i].pmem = NULL; } clients[i].hDbg = NULL; clients[i].request_pending = 0; if (clients[i].dma_handle >= 0) { /* Free DMA handle */ diva_free_dma_descriptor(clients[i].request, clients[i].dma_handle); clients[i].dma_handle = -1; } clients[i].request = NULL; /* Log driver register, MAINT driver ID is '0' */ len = sprintf(tmp, "DIMAINT - drv # %d = '%s' de-registered", i, clients[i].Dbg.drvName); memset(&clients[i].Dbg, 0x00, sizeof(clients[i].Dbg)); while (!(pmsg = (diva_dbg_entry_head_t *)queueAllocMsg(dbg_queue, (word)(len + 1 + sizeof(*pmsg))))) { if ((pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, &size))) { queueFreeMsg(dbg_queue); } else { break; } } if (pmsg) { pmsg->sequence = dbg_sequence++; pmsg->time_sec = sec; pmsg->time_usec = usec; pmsg->facility = MSG_TYPE_STRING; pmsg->dli = DLI_REG; pmsg->drv_id = 0; /* id 0 - DIMAINT */ pmsg->di_cpu = 0; pmsg->data_length = len + 1; memcpy(&pmsg[1], tmp, len + 1); queueCompleteMsg(pmsg); diva_maint_wakeup_read(); } break; } } diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "read_ack"); diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "read_ack"); if (pmem) { diva_os_free(0, pmem); } } /* ---------------------------------------------------------------- Low level interface for management interface client ---------------------------------------------------------------- */ /* Return handle to client structure */ void *SuperTraceOpenAdapter(int AdapterNumber) { int i; for (i = 1; i < ARRAY_SIZE(clients); i++) { if (clients[i].hDbg && clients[i].request && (clients[i].logical == AdapterNumber)) { return (&clients[i]); } } return NULL; } int SuperTraceCloseAdapter(void *AdapterHandle) { return (0); } int SuperTraceReadRequest(void *AdapterHandle, const char *name, byte *data) { diva_maint_client_t *pC = (diva_maint_client_t *)AdapterHandle; if (pC && pC->pIdiLib && pC->request) { ENTITY *e = (ENTITY *)(*(pC->pIdiLib->DivaSTraceGetHandle))(pC->pIdiLib->hLib); byte *xdata = (byte *)&pC->xbuffer[0]; char tmp = 0; word length; if (!strcmp(name, "\\")) { /* Read ROOT */ name = &tmp; } length = SuperTraceCreateReadReq(xdata, name); single_p(xdata, &length, 0); /* End Of Message */ e->Req = MAN_READ; e->ReqCh = 0; e->X->PLength = length; e->X->P = (byte *)xdata; pC->request_pending = 1; return (0); } return (-1); } int SuperTraceGetNumberOfChannels(void *AdapterHandle) { if (AdapterHandle) { diva_maint_client_t *pC = (diva_maint_client_t *)AdapterHandle; return (pC->channels); } return (0); } int SuperTraceASSIGN(void *AdapterHandle, byte *data) { diva_maint_client_t *pC = (diva_maint_client_t *)AdapterHandle; if (pC && pC->pIdiLib && pC->request) { ENTITY *e = (ENTITY *)(*(pC->pIdiLib->DivaSTraceGetHandle))(pC->pIdiLib->hLib); IDI_SYNC_REQ *preq; char buffer[((sizeof(preq->xdi_extended_features) + 4) > sizeof(ENTITY)) ? (sizeof(preq->xdi_extended_features) + 4) : sizeof(ENTITY)]; char features[4]; word assign_data_length = 1; features[0] = 0; pC->xbuffer[0] = 0; preq = (IDI_SYNC_REQ *)&buffer[0]; preq->xdi_extended_features.Req = 0; preq->xdi_extended_features.Rc = IDI_SYNC_REQ_XDI_GET_EXTENDED_FEATURES; preq->xdi_extended_features.info.buffer_length_in_bytes = sizeof(features); preq->xdi_extended_features.info.features = &features[0]; (*(pC->request))((ENTITY *)preq); if ((features[0] & DIVA_XDI_EXTENDED_FEATURES_VALID) && (features[0] & DIVA_XDI_EXTENDED_FEATURE_MANAGEMENT_DMA)) { dword uninitialized_var(rx_dma_magic); if ((pC->dma_handle = diva_get_dma_descriptor(pC->request, &rx_dma_magic)) >= 0) { pC->xbuffer[0] = LLI; pC->xbuffer[1] = 8; pC->xbuffer[2] = 0x40; pC->xbuffer[3] = (byte)pC->dma_handle; pC->xbuffer[4] = (byte)rx_dma_magic; pC->xbuffer[5] = (byte)(rx_dma_magic >> 8); pC->xbuffer[6] = (byte)(rx_dma_magic >> 16); pC->xbuffer[7] = (byte)(rx_dma_magic >> 24); pC->xbuffer[8] = (byte)(DIVA_MAX_MANAGEMENT_TRANSFER_SIZE & 0xFF); pC->xbuffer[9] = (byte)(DIVA_MAX_MANAGEMENT_TRANSFER_SIZE >> 8); pC->xbuffer[10] = 0; assign_data_length = 11; } } else { pC->dma_handle = -1; } e->Id = MAN_ID; e->callback = diva_maint_xdi_cb; e->XNum = 1; e->X = &pC->XData; e->Req = ASSIGN; e->ReqCh = 0; e->X->PLength = assign_data_length; e->X->P = (byte *)&pC->xbuffer[0]; pC->request_pending = 1; return (0); } return (-1); } int SuperTraceREMOVE(void *AdapterHandle) { diva_maint_client_t *pC = (diva_maint_client_t *)AdapterHandle; if (pC && pC->pIdiLib && pC->request) { ENTITY *e = (ENTITY *)(*(pC->pIdiLib->DivaSTraceGetHandle))(pC->pIdiLib->hLib); e->XNum = 1; e->X = &pC->XData; e->Req = REMOVE; e->ReqCh = 0; e->X->PLength = 1; e->X->P = (byte *)&pC->xbuffer[0]; pC->xbuffer[0] = 0; pC->request_pending = 1; return (0); } return (-1); } int SuperTraceTraceOnRequest(void *hAdapter, const char *name, byte *data) { diva_maint_client_t *pC = (diva_maint_client_t *)hAdapter; if (pC && pC->pIdiLib && pC->request) { ENTITY *e = (ENTITY *)(*(pC->pIdiLib->DivaSTraceGetHandle))(pC->pIdiLib->hLib); byte *xdata = (byte *)&pC->xbuffer[0]; char tmp = 0; word length; if (!strcmp(name, "\\")) { /* Read ROOT */ name = &tmp; } length = SuperTraceCreateReadReq(xdata, name); single_p(xdata, &length, 0); /* End Of Message */ e->Req = MAN_EVENT_ON; e->ReqCh = 0; e->X->PLength = length; e->X->P = (byte *)xdata; pC->request_pending = 1; return (0); } return (-1); } int SuperTraceWriteVar(void *AdapterHandle, byte *data, const char *name, void *var, byte type, byte var_length) { diva_maint_client_t *pC = (diva_maint_client_t *)AdapterHandle; if (pC && pC->pIdiLib && pC->request) { ENTITY *e = (ENTITY *)(*(pC->pIdiLib->DivaSTraceGetHandle))(pC->pIdiLib->hLib); diva_man_var_header_t *pVar = (diva_man_var_header_t *)&pC->xbuffer[0]; word length = SuperTraceCreateReadReq((byte *)pVar, name); memcpy(&pC->xbuffer[length], var, var_length); length += var_length; pVar->length += var_length; pVar->value_length = var_length; pVar->type = type; single_p((byte *)pVar, &length, 0); /* End Of Message */ e->Req = MAN_WRITE; e->ReqCh = 0; e->X->PLength = length; e->X->P = (byte *)pVar; pC->request_pending = 1; return (0); } return (-1); } int SuperTraceExecuteRequest(void *AdapterHandle, const char *name, byte *data) { diva_maint_client_t *pC = (diva_maint_client_t *)AdapterHandle; if (pC && pC->pIdiLib && pC->request) { ENTITY *e = (ENTITY *)(*(pC->pIdiLib->DivaSTraceGetHandle))(pC->pIdiLib->hLib); byte *xdata = (byte *)&pC->xbuffer[0]; word length; length = SuperTraceCreateReadReq(xdata, name); single_p(xdata, &length, 0); /* End Of Message */ e->Req = MAN_EXECUTE; e->ReqCh = 0; e->X->PLength = length; e->X->P = (byte *)xdata; pC->request_pending = 1; return (0); } return (-1); } static word SuperTraceCreateReadReq(byte *P, const char *path) { byte var_length; byte *plen; var_length = (byte)strlen(path); *P++ = ESC; plen = P++; *P++ = 0x80; /* MAN_IE */ *P++ = 0x00; /* Type */ *P++ = 0x00; /* Attribute */ *P++ = 0x00; /* Status */ *P++ = 0x00; /* Variable Length */ *P++ = var_length; memcpy(P, path, var_length); P += var_length; *plen = var_length + 0x06; return ((word)(var_length + 0x08)); } static void single_p(byte *P, word *PLength, byte Id) { P[(*PLength)++] = Id; } static void diva_maint_xdi_cb(ENTITY *e) { diva_strace_context_t *pLib = DIVAS_CONTAINING_RECORD(e, diva_strace_context_t, e); diva_maint_client_t *pC; diva_os_spin_lock_magic_t old_irql, old_irql1; diva_os_enter_spin_lock(&dbg_adapter_lock, &old_irql1, "xdi_cb"); diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "xdi_cb"); pC = (diva_maint_client_t *)pLib->hAdapter; if ((e->complete == 255) || (pC->dma_handle < 0)) { if ((*(pLib->instance.DivaSTraceMessageInput))(&pLib->instance)) { diva_mnt_internal_dprintf(0, DLI_ERR, "Trace internal library error"); } } else { /* Process combined management interface indication */ if ((*(pLib->instance.DivaSTraceMessageInput))(&pLib->instance)) { diva_mnt_internal_dprintf(0, DLI_ERR, "Trace internal library error (DMA mode)"); } } diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "xdi_cb"); if (pC->request_pending) { pC->request_pending = 0; (*(pC->request))(e); } diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "xdi_cb"); } static void diva_maint_error(void *user_context, diva_strace_library_interface_t *hLib, int Adapter, int error, const char *file, int line) { diva_mnt_internal_dprintf(0, DLI_ERR, "Trace library error(%d) A(%d) %s %d", error, Adapter, file, line); } static void print_ie(diva_trace_ie_t *ie, char *buffer, int length) { int i; buffer[0] = 0; if (length > 32) { for (i = 0; ((i < ie->length) && (length > 3)); i++) { sprintf(buffer, "%02x", ie->data[i]); buffer += 2; length -= 2; if (i < (ie->length - 1)) { strcpy(buffer, " "); buffer++; length--; } } } } static void diva_maint_state_change_notify(void *user_context, diva_strace_library_interface_t *hLib, int Adapter, diva_trace_line_state_t *channel, int notify_subject) { diva_maint_client_t *pC = (diva_maint_client_t *)user_context; diva_trace_fax_state_t *fax = &channel->fax; diva_trace_modem_state_t *modem = &channel->modem; char tmp[256]; if (!pC->hDbg) { return; } switch (notify_subject) { case DIVA_SUPER_TRACE_NOTIFY_LINE_CHANGE: { int view = (TraceFilter[0] == 0); /* Process selective Trace */ if (channel->Line[0] == 'I' && channel->Line[1] == 'd' && channel->Line[2] == 'l' && channel->Line[3] == 'e') { if ((TraceFilterIdent == pC->hDbg->id) && (TraceFilterChannel == (int)channel->ChannelNumber)) { (*(hLib->DivaSTraceSetBChannel))(hLib, (int)channel->ChannelNumber, 0); (*(hLib->DivaSTraceSetAudioTap))(hLib, (int)channel->ChannelNumber, 0); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "Selective Trace OFF for Ch=%d", (int)channel->ChannelNumber); TraceFilterIdent = -1; TraceFilterChannel = -1; view = 1; } } else if (TraceFilter[0] && (TraceFilterIdent < 0) && !(diva_mnt_cmp_nmbr(&channel->RemoteAddress[0]) && diva_mnt_cmp_nmbr(&channel->LocalAddress[0]))) { if ((pC->hDbg->dbgMask & DIVA_MGT_DBG_IFC_BCHANNEL) != 0) { /* Activate B-channel trace */ (*(hLib->DivaSTraceSetBChannel))(hLib, (int)channel->ChannelNumber, 1); } if ((pC->hDbg->dbgMask & DIVA_MGT_DBG_IFC_AUDIO) != 0) { /* Activate AudioTap Trace */ (*(hLib->DivaSTraceSetAudioTap))(hLib, (int)channel->ChannelNumber, 1); } TraceFilterIdent = pC->hDbg->id; TraceFilterChannel = (int)channel->ChannelNumber; if (TraceFilterIdent >= 0) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "Selective Trace ON for Ch=%d", (int)channel->ChannelNumber); view = 1; } } if (view && (pC->hDbg->dbgMask & DIVA_MGT_DBG_LINE_EVENTS)) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L Ch = %d", (int)channel->ChannelNumber); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L Status = <%s>", &channel->Line[0]); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L Layer1 = <%s>", &channel->Framing[0]); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L Layer2 = <%s>", &channel->Layer2[0]); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L Layer3 = <%s>", &channel->Layer3[0]); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L RAddr = <%s>", &channel->RemoteAddress[0]); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L RSAddr = <%s>", &channel->RemoteSubAddress[0]); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L LAddr = <%s>", &channel->LocalAddress[0]); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L LSAddr = <%s>", &channel->LocalSubAddress[0]); print_ie(&channel->call_BC, tmp, sizeof(tmp)); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L BC = <%s>", tmp); print_ie(&channel->call_HLC, tmp, sizeof(tmp)); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L HLC = <%s>", tmp); print_ie(&channel->call_LLC, tmp, sizeof(tmp)); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L LLC = <%s>", tmp); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L CR = 0x%x", channel->CallReference); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L Disc = 0x%x", channel->LastDisconnecCause); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L Owner = <%s>", &channel->UserID[0]); } } break; case DIVA_SUPER_TRACE_NOTIFY_MODEM_CHANGE: if (pC->hDbg->dbgMask & DIVA_MGT_DBG_MDM_PROGRESS) { { int ch = TraceFilterChannel; int id = TraceFilterIdent; if ((id >= 0) && (ch >= 0) && (id < ARRAY_SIZE(clients)) && (clients[id].Dbg.id == (byte)id) && (clients[id].pIdiLib == hLib)) { if (ch != (int)modem->ChannelNumber) { break; } } else if (TraceFilter[0] != 0) { break; } } diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Ch = %lu", (int)modem->ChannelNumber); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Event = %lu", modem->Event); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Norm = %lu", modem->Norm); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Opts. = 0x%08x", modem->Options); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Tx = %lu Bps", modem->TxSpeed); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Rx = %lu Bps", modem->RxSpeed); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM RT = %lu mSec", modem->RoundtripMsec); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Sr = %lu", modem->SymbolRate); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Rxl = %d dBm", modem->RxLeveldBm); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM El = %d dBm", modem->EchoLeveldBm); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM SNR = %lu dB", modem->SNRdb); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM MAE = %lu", modem->MAE); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM LRet = %lu", modem->LocalRetrains); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM RRet = %lu", modem->RemoteRetrains); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM LRes = %lu", modem->LocalResyncs); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM RRes = %lu", modem->RemoteResyncs); if (modem->Event == 3) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Disc = %lu", modem->DiscReason); } } if ((modem->Event == 3) && (pC->hDbg->dbgMask & DIVA_MGT_DBG_MDM_STATISTICS)) { (*(pC->pIdiLib->DivaSTraceGetModemStatistics))(pC->pIdiLib); } break; case DIVA_SUPER_TRACE_NOTIFY_FAX_CHANGE: if (pC->hDbg->dbgMask & DIVA_MGT_DBG_FAX_PROGRESS) { { int ch = TraceFilterChannel; int id = TraceFilterIdent; if ((id >= 0) && (ch >= 0) && (id < ARRAY_SIZE(clients)) && (clients[id].Dbg.id == (byte)id) && (clients[id].pIdiLib == hLib)) { if (ch != (int)fax->ChannelNumber) { break; } } else if (TraceFilter[0] != 0) { break; } } diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Ch = %lu", (int)fax->ChannelNumber); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Event = %lu", fax->Event); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Pages = %lu", fax->Page_Counter); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Feat. = 0x%08x", fax->Features); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX ID = <%s>", &fax->Station_ID[0]); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Saddr = <%s>", &fax->Subaddress[0]); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Pwd = <%s>", &fax->Password[0]); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Speed = %lu", fax->Speed); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Res. = 0x%08x", fax->Resolution); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Width = %lu", fax->Paper_Width); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Length= %lu", fax->Paper_Length); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX SLT = %lu", fax->Scanline_Time); if (fax->Event == 3) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Disc = %lu", fax->Disc_Reason); } } if ((fax->Event == 3) && (pC->hDbg->dbgMask & DIVA_MGT_DBG_FAX_STATISTICS)) { (*(pC->pIdiLib->DivaSTraceGetFaxStatistics))(pC->pIdiLib); } break; case DIVA_SUPER_TRACE_INTERFACE_CHANGE: if (pC->hDbg->dbgMask & DIVA_MGT_DBG_IFC_EVENTS) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "Layer 1 -> [%s]", channel->pInterface->Layer1); diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "Layer 2 -> [%s]", channel->pInterface->Layer2); } break; case DIVA_SUPER_TRACE_NOTIFY_STAT_CHANGE: if (pC->hDbg->dbgMask & DIVA_MGT_DBG_IFC_STATISTICS) { /* Incoming Statistics */ if (channel->pInterfaceStat->inc.Calls) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "Inc Calls =%lu", channel->pInterfaceStat->inc.Calls); } if (channel->pInterfaceStat->inc.Connected) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "Inc Connected =%lu", channel->pInterfaceStat->inc.Connected); } if (channel->pInterfaceStat->inc.User_Busy) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "Inc Busy =%lu", channel->pInterfaceStat->inc.User_Busy); } if (channel->pInterfaceStat->inc.Call_Rejected) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "Inc Rejected =%lu", channel->pInterfaceStat->inc.Call_Rejected); } if (channel->pInterfaceStat->inc.Wrong_Number) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "Inc Wrong Nr =%lu", channel->pInterfaceStat->inc.Wrong_Number); } if (channel->pInterfaceStat->inc.Incompatible_Dst) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "Inc Incomp. Dest =%lu", channel->pInterfaceStat->inc.Incompatible_Dst); } if (channel->pInterfaceStat->inc.Out_of_Order) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "Inc Out of Order =%lu", channel->pInterfaceStat->inc.Out_of_Order); } if (channel->pInterfaceStat->inc.Ignored) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "Inc Ignored =%lu", channel->pInterfaceStat->inc.Ignored); } /* Outgoing Statistics */ if (channel->pInterfaceStat->outg.Calls) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "Outg Calls =%lu", channel->pInterfaceStat->outg.Calls); } if (channel->pInterfaceStat->outg.Connected) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "Outg Connected =%lu", channel->pInterfaceStat->outg.Connected); } if (channel->pInterfaceStat->outg.User_Busy) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "Outg Busy =%lu", channel->pInterfaceStat->outg.User_Busy); } if (channel->pInterfaceStat->outg.No_Answer) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "Outg No Answer =%lu", channel->pInterfaceStat->outg.No_Answer); } if (channel->pInterfaceStat->outg.Wrong_Number) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "Outg Wrong Nr =%lu", channel->pInterfaceStat->outg.Wrong_Number); } if (channel->pInterfaceStat->outg.Call_Rejected) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "Outg Rejected =%lu", channel->pInterfaceStat->outg.Call_Rejected); } if (channel->pInterfaceStat->outg.Other_Failures) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "Outg Other Failures =%lu", channel->pInterfaceStat->outg.Other_Failures); } } break; case DIVA_SUPER_TRACE_NOTIFY_MDM_STAT_CHANGE: if (channel->pInterfaceStat->mdm.Disc_Normal) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "MDM Disc Normal = %lu", channel->pInterfaceStat->mdm.Disc_Normal); } if (channel->pInterfaceStat->mdm.Disc_Unspecified) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "MDM Disc Unsp. = %lu", channel->pInterfaceStat->mdm.Disc_Unspecified); } if (channel->pInterfaceStat->mdm.Disc_Busy_Tone) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "MDM Disc Busy Tone = %lu", channel->pInterfaceStat->mdm.Disc_Busy_Tone); } if (channel->pInterfaceStat->mdm.Disc_Congestion) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "MDM Disc Congestion = %lu", channel->pInterfaceStat->mdm.Disc_Congestion); } if (channel->pInterfaceStat->mdm.Disc_Carr_Wait) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "MDM Disc Carrier Wait = %lu", channel->pInterfaceStat->mdm.Disc_Carr_Wait); } if (channel->pInterfaceStat->mdm.Disc_Trn_Timeout) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "MDM Disc Trn. T.o. = %lu", channel->pInterfaceStat->mdm.Disc_Trn_Timeout); } if (channel->pInterfaceStat->mdm.Disc_Incompat) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "MDM Disc Incompatible = %lu", channel->pInterfaceStat->mdm.Disc_Incompat); } if (channel->pInterfaceStat->mdm.Disc_Frame_Rej) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "MDM Disc Frame Reject = %lu", channel->pInterfaceStat->mdm.Disc_Frame_Rej); } if (channel->pInterfaceStat->mdm.Disc_V42bis) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "MDM Disc V.42bis = %lu", channel->pInterfaceStat->mdm.Disc_V42bis); } break; case DIVA_SUPER_TRACE_NOTIFY_FAX_STAT_CHANGE: if (channel->pInterfaceStat->fax.Disc_Normal) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "FAX Disc Normal = %lu", channel->pInterfaceStat->fax.Disc_Normal); } if (channel->pInterfaceStat->fax.Disc_Not_Ident) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "FAX Disc Not Ident. = %lu", channel->pInterfaceStat->fax.Disc_Not_Ident); } if (channel->pInterfaceStat->fax.Disc_No_Response) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "FAX Disc No Response = %lu", channel->pInterfaceStat->fax.Disc_No_Response); } if (channel->pInterfaceStat->fax.Disc_Retries) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "FAX Disc Max Retries = %lu", channel->pInterfaceStat->fax.Disc_Retries); } if (channel->pInterfaceStat->fax.Disc_Unexp_Msg) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "FAX Unexp. Msg. = %lu", channel->pInterfaceStat->fax.Disc_Unexp_Msg); } if (channel->pInterfaceStat->fax.Disc_No_Polling) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "FAX Disc No Polling = %lu", channel->pInterfaceStat->fax.Disc_No_Polling); } if (channel->pInterfaceStat->fax.Disc_Training) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "FAX Disc Training = %lu", channel->pInterfaceStat->fax.Disc_Training); } if (channel->pInterfaceStat->fax.Disc_Unexpected) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "FAX Disc Unexpected = %lu", channel->pInterfaceStat->fax.Disc_Unexpected); } if (channel->pInterfaceStat->fax.Disc_Application) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "FAX Disc Application = %lu", channel->pInterfaceStat->fax.Disc_Application); } if (channel->pInterfaceStat->fax.Disc_Incompat) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "FAX Disc Incompatible = %lu", channel->pInterfaceStat->fax.Disc_Incompat); } if (channel->pInterfaceStat->fax.Disc_No_Command) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "FAX Disc No Command = %lu", channel->pInterfaceStat->fax.Disc_No_Command); } if (channel->pInterfaceStat->fax.Disc_Long_Msg) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "FAX Disc Long Msg. = %lu", channel->pInterfaceStat->fax.Disc_Long_Msg); } if (channel->pInterfaceStat->fax.Disc_Supervisor) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "FAX Disc Supervisor = %lu", channel->pInterfaceStat->fax.Disc_Supervisor); } if (channel->pInterfaceStat->fax.Disc_SUB_SEP_PWD) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "FAX Disc SUP SEP PWD = %lu", channel->pInterfaceStat->fax.Disc_SUB_SEP_PWD); } if (channel->pInterfaceStat->fax.Disc_Invalid_Msg) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "FAX Disc Invalid Msg. = %lu", channel->pInterfaceStat->fax.Disc_Invalid_Msg); } if (channel->pInterfaceStat->fax.Disc_Page_Coding) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "FAX Disc Page Coding = %lu", channel->pInterfaceStat->fax.Disc_Page_Coding); } if (channel->pInterfaceStat->fax.Disc_App_Timeout) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "FAX Disc Appl. T.o. = %lu", channel->pInterfaceStat->fax.Disc_App_Timeout); } if (channel->pInterfaceStat->fax.Disc_Unspecified) { diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "FAX Disc Unspec. = %lu", channel->pInterfaceStat->fax.Disc_Unspecified); } break; } } /* Receive trace information from the Management Interface and store it in the internal trace buffer with MSG_TYPE_MLOG as is, without any filtering. Event Filtering and formatting is done in Management Interface self. */ static void diva_maint_trace_notify(void *user_context, diva_strace_library_interface_t *hLib, int Adapter, void *xlog_buffer, int length) { diva_maint_client_t *pC = (diva_maint_client_t *)user_context; diva_dbg_entry_head_t *pmsg; word size; dword sec, usec; int ch = TraceFilterChannel; int id = TraceFilterIdent; /* Selective trace */ if ((id >= 0) && (ch >= 0) && (id < ARRAY_SIZE(clients)) && (clients[id].Dbg.id == (byte)id) && (clients[id].pIdiLib == hLib)) { const char *p = NULL; int ch_value = -1; MI_XLOG_HDR *TrcData = (MI_XLOG_HDR *)xlog_buffer; if (Adapter != clients[id].logical) { return; /* Ignore all trace messages from other adapters */ } if (TrcData->code == 24) { p = (char *)&TrcData->code; p += 2; } /* All L1 messages start as [dsp,ch], so we can filter this information and filter out all messages that use different channel */ if (p && p[0] == '[') { if (p[2] == ',') { p += 3; ch_value = *p - '0'; } else if (p[3] == ',') { p += 4; ch_value = *p - '0'; } if (ch_value >= 0) { if (p[2] == ']') { ch_value = ch_value * 10 + p[1] - '0'; } if (ch_value != ch) { return; /* Ignore other channels */ } } } } else if (TraceFilter[0] != 0) { return; /* Ignore trace if trace filter is activated, but idle */ } diva_os_get_time(&sec, &usec); while (!(pmsg = (diva_dbg_entry_head_t *)queueAllocMsg(dbg_queue, (word)length + sizeof(*pmsg)))) { if ((pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, &size))) { queueFreeMsg(dbg_queue); } else { break; } } if (pmsg) { memcpy(&pmsg[1], xlog_buffer, length); pmsg->sequence = dbg_sequence++; pmsg->time_sec = sec; pmsg->time_usec = usec; pmsg->facility = MSG_TYPE_MLOG; pmsg->dli = pC->logical; pmsg->drv_id = pC->hDbg->id; pmsg->di_cpu = 0; pmsg->data_length = length; queueCompleteMsg(pmsg); if (queueCount(dbg_queue)) { diva_maint_wakeup_read(); } } } /* Convert MAINT trace mask to management interface trace mask/work/facility and issue command to management interface */ static void diva_change_management_debug_mask(diva_maint_client_t *pC, dword old_mask) { if (pC->request && pC->hDbg && pC->pIdiLib) { dword changed = pC->hDbg->dbgMask ^ old_mask; if (changed & DIVA_MGT_DBG_TRACE) { (*(pC->pIdiLib->DivaSTraceSetInfo))(pC->pIdiLib, (pC->hDbg->dbgMask & DIVA_MGT_DBG_TRACE) != 0); } if (changed & DIVA_MGT_DBG_DCHAN) { (*(pC->pIdiLib->DivaSTraceSetDChannel))(pC->pIdiLib, (pC->hDbg->dbgMask & DIVA_MGT_DBG_DCHAN) != 0); } if (!TraceFilter[0]) { if (changed & DIVA_MGT_DBG_IFC_BCHANNEL) { int i, state = ((pC->hDbg->dbgMask & DIVA_MGT_DBG_IFC_BCHANNEL) != 0); for (i = 0; i < pC->channels; i++) { (*(pC->pIdiLib->DivaSTraceSetBChannel))(pC->pIdiLib, i + 1, state); } } if (changed & DIVA_MGT_DBG_IFC_AUDIO) { int i, state = ((pC->hDbg->dbgMask & DIVA_MGT_DBG_IFC_AUDIO) != 0); for (i = 0; i < pC->channels; i++) { (*(pC->pIdiLib->DivaSTraceSetAudioTap))(pC->pIdiLib, i + 1, state); } } } } } void diva_mnt_internal_dprintf(dword drv_id, dword type, char *fmt, ...) { va_list ap; va_start(ap, fmt); DI_format(0, (word)drv_id, (int)type, fmt, ap); va_end(ap); } /* Shutdown all adapters before driver removal */ int diva_mnt_shutdown_xdi_adapters(void) { diva_os_spin_lock_magic_t old_irql, old_irql1; int i, fret = 0; byte *pmem; for (i = 1; i < ARRAY_SIZE(clients); i++) { pmem = NULL; diva_os_enter_spin_lock(&dbg_adapter_lock, &old_irql1, "unload"); diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "unload"); if (clients[i].hDbg && clients[i].pIdiLib && clients[i].request) { if ((*(clients[i].pIdiLib->DivaSTraceLibraryStop))(clients[i].pIdiLib) == 1) { /* Adapter removal complete */ if (clients[i].pIdiLib) { (*(clients[i].pIdiLib->DivaSTraceLibraryFinit))(clients[i].pIdiLib->hLib); clients[i].pIdiLib = NULL; pmem = clients[i].pmem; clients[i].pmem = NULL; } clients[i].hDbg = NULL; clients[i].request_pending = 0; if (clients[i].dma_handle >= 0) { /* Free DMA handle */ diva_free_dma_descriptor(clients[i].request, clients[i].dma_handle); clients[i].dma_handle = -1; } clients[i].request = NULL; } else { fret = -1; } } diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "unload"); if (clients[i].hDbg && clients[i].pIdiLib && clients[i].request && clients[i].request_pending) { clients[i].request_pending = 0; (*(clients[i].request))((ENTITY *)(*(clients[i].pIdiLib->DivaSTraceGetHandle))(clients[i].pIdiLib->hLib)); if (clients[i].dma_handle >= 0) { diva_free_dma_descriptor(clients[i].request, clients[i].dma_handle); clients[i].dma_handle = -1; } } diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "unload"); if (pmem) { diva_os_free(0, pmem); } } return (fret); } /* Set/Read the trace filter used for selective tracing. Affects B- and Audio Tap trace mask at run time */ int diva_set_trace_filter(int filter_length, const char *filter) { diva_os_spin_lock_magic_t old_irql, old_irql1; int i, ch, on, client_b_on, client_atap_on; diva_os_enter_spin_lock(&dbg_adapter_lock, &old_irql1, "dbg mask"); diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "write_filter"); if (filter_length <= DIVA_MAX_SELECTIVE_FILTER_LENGTH) { memcpy(&TraceFilter[0], filter, filter_length); if (TraceFilter[filter_length]) { TraceFilter[filter_length] = 0; } if (TraceFilter[0] == '*') { TraceFilter[0] = 0; } } else { filter_length = -1; } TraceFilterIdent = -1; TraceFilterChannel = -1; on = (TraceFilter[0] == 0); for (i = 1; i < ARRAY_SIZE(clients); i++) { if (clients[i].hDbg && clients[i].pIdiLib && clients[i].request) { client_b_on = on && ((clients[i].hDbg->dbgMask & DIVA_MGT_DBG_IFC_BCHANNEL) != 0); client_atap_on = on && ((clients[i].hDbg->dbgMask & DIVA_MGT_DBG_IFC_AUDIO) != 0); for (ch = 0; ch < clients[i].channels; ch++) { (*(clients[i].pIdiLib->DivaSTraceSetBChannel))(clients[i].pIdiLib->hLib, ch + 1, client_b_on); (*(clients[i].pIdiLib->DivaSTraceSetAudioTap))(clients[i].pIdiLib->hLib, ch + 1, client_atap_on); } } } for (i = 1; i < ARRAY_SIZE(clients); i++) { if (clients[i].hDbg && clients[i].pIdiLib && clients[i].request && clients[i].request_pending) { diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "write_filter"); clients[i].request_pending = 0; (*(clients[i].request))((ENTITY *)(*(clients[i].pIdiLib->DivaSTraceGetHandle))(clients[i].pIdiLib->hLib)); diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "write_filter"); } } diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "write_filter"); diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "dbg mask"); return (filter_length); } int diva_get_trace_filter(int max_length, char *filter) { diva_os_spin_lock_magic_t old_irql; int len; diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "read_filter"); len = strlen(&TraceFilter[0]) + 1; if (max_length >= len) { memcpy(filter, &TraceFilter[0], len); } diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "read_filter"); return (len); } static int diva_dbg_cmp_key(const char *ref, const char *key) { while (*key && (*ref++ == *key++)); return (!*key && !*ref); } /* In case trace filter starts with "C" character then all following characters are interpreted as command. Followings commands are available: - single, trace single call at time, independent from CPN/CiPN */ static int diva_mnt_cmp_nmbr(const char *nmbr) { const char *ref = &TraceFilter[0]; int ref_len = strlen(&TraceFilter[0]), nmbr_len = strlen(nmbr); if (ref[0] == 'C') { if (diva_dbg_cmp_key(&ref[1], "single")) { return (0); } return (-1); } if (!ref_len || (ref_len > nmbr_len)) { return (-1); } nmbr = nmbr + nmbr_len - 1; ref = ref + ref_len - 1; while (ref_len--) { if (*nmbr-- != *ref--) { return (-1); } } return (0); } static int diva_get_dma_descriptor(IDI_CALL request, dword *dma_magic) { ENTITY e; IDI_SYNC_REQ *pReq = (IDI_SYNC_REQ *)&e; if (!request) { return (-1); } pReq->xdi_dma_descriptor_operation.Req = 0; pReq->xdi_dma_descriptor_operation.Rc = IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION; pReq->xdi_dma_descriptor_operation.info.operation = IDI_SYNC_REQ_DMA_DESCRIPTOR_ALLOC; pReq->xdi_dma_descriptor_operation.info.descriptor_number = -1; pReq->xdi_dma_descriptor_operation.info.descriptor_address = NULL; pReq->xdi_dma_descriptor_operation.info.descriptor_magic = 0; (*request)((ENTITY *)pReq); if (!pReq->xdi_dma_descriptor_operation.info.operation && (pReq->xdi_dma_descriptor_operation.info.descriptor_number >= 0) && pReq->xdi_dma_descriptor_operation.info.descriptor_magic) { *dma_magic = pReq->xdi_dma_descriptor_operation.info.descriptor_magic; return (pReq->xdi_dma_descriptor_operation.info.descriptor_number); } else { return (-1); } } static void diva_free_dma_descriptor(IDI_CALL request, int nr) { ENTITY e; IDI_SYNC_REQ *pReq = (IDI_SYNC_REQ *)&e; if (!request || (nr < 0)) { return; } pReq->xdi_dma_descriptor_operation.Req = 0; pReq->xdi_dma_descriptor_operation.Rc = IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION; pReq->xdi_dma_descriptor_operation.info.operation = IDI_SYNC_REQ_DMA_DESCRIPTOR_FREE; pReq->xdi_dma_descriptor_operation.info.descriptor_number = nr; pReq->xdi_dma_descriptor_operation.info.descriptor_address = NULL; pReq->xdi_dma_descriptor_operation.info.descriptor_magic = 0; (*request)((ENTITY *)pReq); }
gpl-2.0
acuicultor/android_kernel_oneplus_msm8974-1
drivers/media/common/saa7146_i2c.c
9513
12683
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <media/saa7146_vv.h> static u32 saa7146_i2c_func(struct i2c_adapter *adapter) { /* DEB_I2C("'%s'\n", adapter->name); */ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_READ_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE | I2C_FUNC_SMBUS_READ_BYTE_DATA | I2C_FUNC_SMBUS_WRITE_BYTE_DATA; } /* this function returns the status-register of our i2c-device */ static inline u32 saa7146_i2c_status(struct saa7146_dev *dev) { u32 iicsta = saa7146_read(dev, I2C_STATUS); /* DEB_I2C("status: 0x%08x\n", iicsta); */ return iicsta; } /* this function runs through the i2c-messages and prepares the data to be sent through the saa7146. have a look at the specifications p. 122 ff to understand this. it returns the number of u32s to send, or -1 in case of an error. */ static int saa7146_i2c_msg_prepare(const struct i2c_msg *m, int num, __le32 *op) { int h1, h2; int i, j, addr; int mem = 0, op_count = 0; /* first determine size of needed memory */ for(i = 0; i < num; i++) { mem += m[i].len + 1; } /* worst case: we need one u32 for three bytes to be send plus one extra byte to address the device */ mem = 1 + ((mem-1) / 3); /* we assume that op points to a memory of at least * SAA7146_I2C_MEM bytes size. if we exceed this limit... */ if ((4 * mem) > SAA7146_I2C_MEM) { /* DEB_I2C("cannot prepare i2c-message\n"); */ return -ENOMEM; } /* be careful: clear out the i2c-mem first */ memset(op,0,sizeof(__le32)*mem); /* loop through all messages */ for(i = 0; i < num; i++) { /* insert the address of the i2c-slave. note: we get 7 bit i2c-addresses, so we have to perform a translation */ addr = (m[i].addr*2) + ( (0 != (m[i].flags & I2C_M_RD)) ? 1 : 0); h1 = op_count/3; h2 = op_count%3; op[h1] |= cpu_to_le32( (u8)addr << ((3-h2)*8)); op[h1] |= cpu_to_le32(SAA7146_I2C_START << ((3-h2)*2)); op_count++; /* loop through all bytes of message i */ for(j = 0; j < m[i].len; j++) { /* insert the data bytes */ h1 = op_count/3; h2 = op_count%3; op[h1] |= cpu_to_le32( (u32)((u8)m[i].buf[j]) << ((3-h2)*8)); op[h1] |= cpu_to_le32( SAA7146_I2C_CONT << ((3-h2)*2)); op_count++; } } /* have a look at the last byte inserted: if it was: ...CONT change it to ...STOP */ h1 = (op_count-1)/3; h2 = (op_count-1)%3; if ( SAA7146_I2C_CONT == (0x3 & (le32_to_cpu(op[h1]) >> ((3-h2)*2))) ) { op[h1] &= ~cpu_to_le32(0x2 << ((3-h2)*2)); op[h1] |= cpu_to_le32(SAA7146_I2C_STOP << ((3-h2)*2)); } /* return the number of u32s to send */ return mem; } /* this functions loops through all i2c-messages. normally, it should determine which bytes were read through the adapter and write them back to the corresponding i2c-message. but instead, we simply write back all bytes. fixme: this could be improved. */ static int saa7146_i2c_msg_cleanup(const struct i2c_msg *m, int num, __le32 *op) { int i, j; int op_count = 0; /* loop through all messages */ for(i = 0; i < num; i++) { op_count++; /* loop through all bytes of message i */ for(j = 0; j < m[i].len; j++) { /* write back all bytes that could have been read */ m[i].buf[j] = (le32_to_cpu(op[op_count/3]) >> ((3-(op_count%3))*8)); op_count++; } } return 0; } /* this functions resets the i2c-device and returns 0 if everything was fine, otherwise -1 */ static int saa7146_i2c_reset(struct saa7146_dev *dev) { /* get current status */ u32 status = saa7146_i2c_status(dev); /* clear registers for sure */ saa7146_write(dev, I2C_STATUS, dev->i2c_bitrate); saa7146_write(dev, I2C_TRANSFER, 0); /* check if any operation is still in progress */ if ( 0 != ( status & SAA7146_I2C_BUSY) ) { /* yes, kill ongoing operation */ DEB_I2C("busy_state detected\n"); /* set "ABORT-OPERATION"-bit (bit 7)*/ saa7146_write(dev, I2C_STATUS, (dev->i2c_bitrate | MASK_07)); saa7146_write(dev, MC2, (MASK_00 | MASK_16)); msleep(SAA7146_I2C_DELAY); /* clear all error-bits pending; this is needed because p.123, note 1 */ saa7146_write(dev, I2C_STATUS, dev->i2c_bitrate); saa7146_write(dev, MC2, (MASK_00 | MASK_16)); msleep(SAA7146_I2C_DELAY); } /* check if any error is (still) present. (this can be necessary because p.123, note 1) */ status = saa7146_i2c_status(dev); if ( dev->i2c_bitrate != status ) { DEB_I2C("error_state detected. status:0x%08x\n", status); /* Repeat the abort operation. This seems to be necessary after serious protocol errors caused by e.g. the SAA7740 */ saa7146_write(dev, I2C_STATUS, (dev->i2c_bitrate | MASK_07)); saa7146_write(dev, MC2, (MASK_00 | MASK_16)); msleep(SAA7146_I2C_DELAY); /* clear all error-bits pending */ saa7146_write(dev, I2C_STATUS, dev->i2c_bitrate); saa7146_write(dev, MC2, (MASK_00 | MASK_16)); msleep(SAA7146_I2C_DELAY); /* the data sheet says it might be necessary to clear the status twice after an abort */ saa7146_write(dev, I2C_STATUS, dev->i2c_bitrate); saa7146_write(dev, MC2, (MASK_00 | MASK_16)); msleep(SAA7146_I2C_DELAY); } /* if any error is still present, a fatal error has occurred ... */ status = saa7146_i2c_status(dev); if ( dev->i2c_bitrate != status ) { DEB_I2C("fatal error. status:0x%08x\n", status); return -1; } return 0; } /* this functions writes out the data-byte 'dword' to the i2c-device. it returns 0 if ok, -1 if the transfer failed, -2 if the transfer failed badly (e.g. address error) */ static int saa7146_i2c_writeout(struct saa7146_dev *dev, __le32 *dword, int short_delay) { u32 status = 0, mc2 = 0; int trial = 0; unsigned long timeout; /* write out i2c-command */ DEB_I2C("before: 0x%08x (status: 0x%08x), %d\n", *dword, saa7146_read(dev, I2C_STATUS), dev->i2c_op); if( 0 != (SAA7146_USE_I2C_IRQ & dev->ext->flags)) { saa7146_write(dev, I2C_STATUS, dev->i2c_bitrate); saa7146_write(dev, I2C_TRANSFER, le32_to_cpu(*dword)); dev->i2c_op = 1; SAA7146_ISR_CLEAR(dev, MASK_16|MASK_17); SAA7146_IER_ENABLE(dev, MASK_16|MASK_17); saa7146_write(dev, MC2, (MASK_00 | MASK_16)); timeout = HZ/100 + 1; /* 10ms */ timeout = wait_event_interruptible_timeout(dev->i2c_wq, dev->i2c_op == 0, timeout); if (timeout == -ERESTARTSYS || dev->i2c_op) { SAA7146_IER_DISABLE(dev, MASK_16|MASK_17); SAA7146_ISR_CLEAR(dev, MASK_16|MASK_17); if (timeout == -ERESTARTSYS) /* a signal arrived */ return -ERESTARTSYS; pr_warn("%s %s [irq]: timed out waiting for end of xfer\n", dev->name, __func__); return -EIO; } status = saa7146_read(dev, I2C_STATUS); } else { saa7146_write(dev, I2C_STATUS, dev->i2c_bitrate); saa7146_write(dev, I2C_TRANSFER, le32_to_cpu(*dword)); saa7146_write(dev, MC2, (MASK_00 | MASK_16)); /* do not poll for i2c-status before upload is complete */ timeout = jiffies + HZ/100 + 1; /* 10ms */ while(1) { mc2 = (saa7146_read(dev, MC2) & 0x1); if( 0 != mc2 ) { break; } if (time_after(jiffies,timeout)) { pr_warn("%s %s: timed out waiting for MC2\n", dev->name, __func__); return -EIO; } } /* wait until we get a transfer done or error */ timeout = jiffies + HZ/100 + 1; /* 10ms */ /* first read usually delivers bogus results... */ saa7146_i2c_status(dev); while(1) { status = saa7146_i2c_status(dev); if ((status & 0x3) != 1) break; if (time_after(jiffies,timeout)) { /* this is normal when probing the bus * (no answer from nonexisistant device...) */ pr_warn("%s %s [poll]: timed out waiting for end of xfer\n", dev->name, __func__); return -EIO; } if (++trial < 50 && short_delay) udelay(10); else msleep(1); } } /* give a detailed status report */ if ( 0 != (status & (SAA7146_I2C_SPERR | SAA7146_I2C_APERR | SAA7146_I2C_DTERR | SAA7146_I2C_DRERR | SAA7146_I2C_AL | SAA7146_I2C_ERR | SAA7146_I2C_BUSY)) ) { if ( 0 == (status & SAA7146_I2C_ERR) || 0 == (status & SAA7146_I2C_BUSY) ) { /* it may take some time until ERR goes high - ignore */ DEB_I2C("unexpected i2c status %04x\n", status); } if( 0 != (status & SAA7146_I2C_SPERR) ) { DEB_I2C("error due to invalid start/stop condition\n"); } if( 0 != (status & SAA7146_I2C_DTERR) ) { DEB_I2C("error in data transmission\n"); } if( 0 != (status & SAA7146_I2C_DRERR) ) { DEB_I2C("error when receiving data\n"); } if( 0 != (status & SAA7146_I2C_AL) ) { DEB_I2C("error because arbitration lost\n"); } /* we handle address-errors here */ if( 0 != (status & SAA7146_I2C_APERR) ) { DEB_I2C("error in address phase\n"); return -EREMOTEIO; } return -EIO; } /* read back data, just in case we were reading ... */ *dword = cpu_to_le32(saa7146_read(dev, I2C_TRANSFER)); DEB_I2C("after: 0x%08x\n", *dword); return 0; } static int saa7146_i2c_transfer(struct saa7146_dev *dev, const struct i2c_msg *msgs, int num, int retries) { int i = 0, count = 0; __le32 *buffer = dev->d_i2c.cpu_addr; int err = 0; int short_delay = 0; if (mutex_lock_interruptible(&dev->i2c_lock)) return -ERESTARTSYS; for(i=0;i<num;i++) { DEB_I2C("msg:%d/%d\n", i+1, num); } /* prepare the message(s), get number of u32s to transfer */ count = saa7146_i2c_msg_prepare(msgs, num, buffer); if ( 0 > count ) { err = -1; goto out; } if ( count > 3 || 0 != (SAA7146_I2C_SHORT_DELAY & dev->ext->flags) ) short_delay = 1; do { /* reset the i2c-device if necessary */ err = saa7146_i2c_reset(dev); if ( 0 > err ) { DEB_I2C("could not reset i2c-device\n"); goto out; } /* write out the u32s one after another */ for(i = 0; i < count; i++) { err = saa7146_i2c_writeout(dev, &buffer[i], short_delay); if ( 0 != err) { /* this one is unsatisfying: some i2c slaves on some dvb cards don't acknowledge correctly, so the saa7146 thinks that an address error occurred. in that case, the transaction should be retrying, even if an address error occurred. analog saa7146 based cards extensively rely on i2c address probing, however, and address errors indicate that a device is really *not* there. retrying in that case increases the time the device needs to probe greatly, so it should be avoided. So we bail out in irq mode after an address error and trust the saa7146 address error detection. */ if (-EREMOTEIO == err && 0 != (SAA7146_USE_I2C_IRQ & dev->ext->flags)) goto out; DEB_I2C("error while sending message(s). starting again\n"); break; } } if( 0 == err ) { err = num; break; } /* delay a bit before retrying */ msleep(10); } while (err != num && retries--); /* quit if any error occurred */ if (err != num) goto out; /* if any things had to be read, get the results */ if ( 0 != saa7146_i2c_msg_cleanup(msgs, num, buffer)) { DEB_I2C("could not cleanup i2c-message\n"); err = -1; goto out; } /* return the number of delivered messages */ DEB_I2C("transmission successful. (msg:%d)\n", err); out: /* another bug in revision 0: the i2c-registers get uploaded randomly by other uploads, so we better clear them out before continuing */ if( 0 == dev->revision ) { __le32 zero = 0; saa7146_i2c_reset(dev); if( 0 != saa7146_i2c_writeout(dev, &zero, short_delay)) { pr_info("revision 0 error. this should never happen\n"); } } mutex_unlock(&dev->i2c_lock); return err; } /* utility functions */ static int saa7146_i2c_xfer(struct i2c_adapter* adapter, struct i2c_msg *msg, int num) { struct v4l2_device *v4l2_dev = i2c_get_adapdata(adapter); struct saa7146_dev *dev = to_saa7146_dev(v4l2_dev); /* use helper function to transfer data */ return saa7146_i2c_transfer(dev, msg, num, adapter->retries); } /*****************************************************************************/ /* i2c-adapter helper functions */ /* exported algorithm data */ static struct i2c_algorithm saa7146_algo = { .master_xfer = saa7146_i2c_xfer, .functionality = saa7146_i2c_func, }; int saa7146_i2c_adapter_prepare(struct saa7146_dev *dev, struct i2c_adapter *i2c_adapter, u32 bitrate) { DEB_EE("bitrate: 0x%08x\n", bitrate); /* enable i2c-port pins */ saa7146_write(dev, MC1, (MASK_08 | MASK_24)); dev->i2c_bitrate = bitrate; saa7146_i2c_reset(dev); if (i2c_adapter) { i2c_set_adapdata(i2c_adapter, &dev->v4l2_dev); i2c_adapter->dev.parent = &dev->pci->dev; i2c_adapter->algo = &saa7146_algo; i2c_adapter->algo_data = NULL; i2c_adapter->timeout = SAA7146_I2C_TIMEOUT; i2c_adapter->retries = SAA7146_I2C_RETRIES; } return 0; }
gpl-2.0
KonstaT/android_kernel_zte_msm7x27a
drivers/macintosh/ans-lcd.c
12073
4048
/* * /dev/lcd driver for Apple Network Servers. */ #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/fs.h> #include <asm/uaccess.h> #include <asm/sections.h> #include <asm/prom.h> #include <asm/io.h> #include "ans-lcd.h" #define ANSLCD_ADDR 0xf301c000 #define ANSLCD_CTRL_IX 0x00 #define ANSLCD_DATA_IX 0x10 static unsigned long anslcd_short_delay = 80; static unsigned long anslcd_long_delay = 3280; static volatile unsigned char __iomem *anslcd_ptr; static DEFINE_MUTEX(anslcd_mutex); #undef DEBUG static void anslcd_write_byte_ctrl ( unsigned char c ) { #ifdef DEBUG printk(KERN_DEBUG "LCD: CTRL byte: %02x\n",c); #endif out_8(anslcd_ptr + ANSLCD_CTRL_IX, c); switch(c) { case 1: case 2: case 3: udelay(anslcd_long_delay); break; default: udelay(anslcd_short_delay); } } static void anslcd_write_byte_data ( unsigned char c ) { out_8(anslcd_ptr + ANSLCD_DATA_IX, c); udelay(anslcd_short_delay); } static ssize_t anslcd_write( struct file * file, const char __user * buf, size_t count, loff_t *ppos ) { const char __user *p = buf; int i; #ifdef DEBUG printk(KERN_DEBUG "LCD: write\n"); #endif if (!access_ok(VERIFY_READ, buf, count)) return -EFAULT; mutex_lock(&anslcd_mutex); for ( i = *ppos; count > 0; ++i, ++p, --count ) { char c; __get_user(c, p); anslcd_write_byte_data( c ); } mutex_unlock(&anslcd_mutex); *ppos = i; return p - buf; } static long anslcd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { char ch, __user *temp; long ret = 0; #ifdef DEBUG printk(KERN_DEBUG "LCD: ioctl(%d,%d)\n",cmd,arg); #endif mutex_lock(&anslcd_mutex); switch ( cmd ) { case ANSLCD_CLEAR: anslcd_write_byte_ctrl ( 0x38 ); anslcd_write_byte_ctrl ( 0x0f ); anslcd_write_byte_ctrl ( 0x06 ); anslcd_write_byte_ctrl ( 0x01 ); anslcd_write_byte_ctrl ( 0x02 ); break; case ANSLCD_SENDCTRL: temp = (char __user *) arg; __get_user(ch, temp); for (; ch; temp++) { /* FIXME: This is ugly, but should work, as a \0 byte is not a valid command code */ anslcd_write_byte_ctrl ( ch ); __get_user(ch, temp); } break; case ANSLCD_SETSHORTDELAY: if (!capable(CAP_SYS_ADMIN)) ret =-EACCES; else anslcd_short_delay=arg; break; case ANSLCD_SETLONGDELAY: if (!capable(CAP_SYS_ADMIN)) ret = -EACCES; else anslcd_long_delay=arg; break; default: ret = -EINVAL; } mutex_unlock(&anslcd_mutex); return ret; } static int anslcd_open( struct inode * inode, struct file * file ) { return 0; } const struct file_operations anslcd_fops = { .write = anslcd_write, .unlocked_ioctl = anslcd_ioctl, .open = anslcd_open, .llseek = default_llseek, }; static struct miscdevice anslcd_dev = { ANSLCD_MINOR, "anslcd", &anslcd_fops }; const char anslcd_logo[] = "********************" /* Line #1 */ "* LINUX! *" /* Line #3 */ "* Welcome to *" /* Line #2 */ "********************"; /* Line #4 */ static int __init anslcd_init(void) { int a; int retval; struct device_node* node; node = of_find_node_by_name(NULL, "lcd"); if (!node || !node->parent || strcmp(node->parent->name, "gc")) { of_node_put(node); return -ENODEV; } of_node_put(node); anslcd_ptr = ioremap(ANSLCD_ADDR, 0x20); retval = misc_register(&anslcd_dev); if(retval < 0){ printk(KERN_INFO "LCD: misc_register failed\n"); iounmap(anslcd_ptr); return retval; } #ifdef DEBUG printk(KERN_DEBUG "LCD: init\n"); #endif mutex_lock(&anslcd_mutex); anslcd_write_byte_ctrl ( 0x38 ); anslcd_write_byte_ctrl ( 0x0c ); anslcd_write_byte_ctrl ( 0x06 ); anslcd_write_byte_ctrl ( 0x01 ); anslcd_write_byte_ctrl ( 0x02 ); for(a=0;a<80;a++) { anslcd_write_byte_data(anslcd_logo[a]); } mutex_unlock(&anslcd_mutex); return 0; } static void __exit anslcd_exit(void) { misc_deregister(&anslcd_dev); iounmap(anslcd_ptr); } module_init(anslcd_init); module_exit(anslcd_exit);
gpl-2.0
boype/kernel_tuna_jb43
arch/sh/drivers/pci/common.c
13097
3996
#include <linux/pci.h> #include <linux/interrupt.h> #include <linux/timer.h> #include <linux/kernel.h> /* * These functions are used early on before PCI scanning is done * and all of the pci_dev and pci_bus structures have been created. */ static struct pci_dev *fake_pci_dev(struct pci_channel *hose, int top_bus, int busnr, int devfn) { static struct pci_dev dev; static struct pci_bus bus; dev.bus = &bus; dev.sysdata = hose; dev.devfn = devfn; bus.number = busnr; bus.sysdata = hose; bus.ops = hose->pci_ops; if(busnr != top_bus) /* Fake a parent bus structure. */ bus.parent = &bus; else bus.parent = NULL; return &dev; } #define EARLY_PCI_OP(rw, size, type) \ int __init early_##rw##_config_##size(struct pci_channel *hose, \ int top_bus, int bus, int devfn, int offset, type value) \ { \ return pci_##rw##_config_##size( \ fake_pci_dev(hose, top_bus, bus, devfn), \ offset, value); \ } EARLY_PCI_OP(read, byte, u8 *) EARLY_PCI_OP(read, word, u16 *) EARLY_PCI_OP(read, dword, u32 *) EARLY_PCI_OP(write, byte, u8) EARLY_PCI_OP(write, word, u16) EARLY_PCI_OP(write, dword, u32) int __init pci_is_66mhz_capable(struct pci_channel *hose, int top_bus, int current_bus) { u32 pci_devfn; unsigned short vid; int cap66 = -1; u16 stat; printk(KERN_INFO "PCI: Checking 66MHz capabilities...\n"); for (pci_devfn = 0; pci_devfn < 0xff; pci_devfn++) { if (PCI_FUNC(pci_devfn)) continue; if (early_read_config_word(hose, top_bus, current_bus, pci_devfn, PCI_VENDOR_ID, &vid) != PCIBIOS_SUCCESSFUL) continue; if (vid == 0xffff) continue; /* check 66MHz capability */ if (cap66 < 0) cap66 = 1; if (cap66) { early_read_config_word(hose, top_bus, current_bus, pci_devfn, PCI_STATUS, &stat); if (!(stat & PCI_STATUS_66MHZ)) { printk(KERN_DEBUG "PCI: %02x:%02x not 66MHz capable.\n", current_bus, pci_devfn); cap66 = 0; break; } } } return cap66 > 0; } static void pcibios_enable_err(unsigned long __data) { struct pci_channel *hose = (struct pci_channel *)__data; del_timer(&hose->err_timer); printk(KERN_DEBUG "PCI: re-enabling error IRQ.\n"); enable_irq(hose->err_irq); } static void pcibios_enable_serr(unsigned long __data) { struct pci_channel *hose = (struct pci_channel *)__data; del_timer(&hose->serr_timer); printk(KERN_DEBUG "PCI: re-enabling system error IRQ.\n"); enable_irq(hose->serr_irq); } void pcibios_enable_timers(struct pci_channel *hose) { if (hose->err_irq) { init_timer(&hose->err_timer); hose->err_timer.data = (unsigned long)hose; hose->err_timer.function = pcibios_enable_err; } if (hose->serr_irq) { init_timer(&hose->serr_timer); hose->serr_timer.data = (unsigned long)hose; hose->serr_timer.function = pcibios_enable_serr; } } /* * A simple handler for the regular PCI status errors, called from IRQ * context. */ unsigned int pcibios_handle_status_errors(unsigned long addr, unsigned int status, struct pci_channel *hose) { unsigned int cmd = 0; if (status & PCI_STATUS_REC_MASTER_ABORT) { printk(KERN_DEBUG "PCI: master abort, pc=0x%08lx\n", addr); cmd |= PCI_STATUS_REC_MASTER_ABORT; } if (status & PCI_STATUS_REC_TARGET_ABORT) { printk(KERN_DEBUG "PCI: target abort: "); pcibios_report_status(PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT, 1); printk("\n"); cmd |= PCI_STATUS_REC_TARGET_ABORT; } if (status & (PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY)) { printk(KERN_DEBUG "PCI: parity error detected: "); pcibios_report_status(PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY, 1); printk("\n"); cmd |= PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY; /* Now back off of the IRQ for awhile */ if (hose->err_irq) { disable_irq_nosync(hose->err_irq); hose->err_timer.expires = jiffies + HZ; add_timer(&hose->err_timer); } } return cmd; }
gpl-2.0
Pivosgroup/buildroot-linux-kernel-m3
drivers/i2c/busses/i2c-nforce2.c
42
13421
/* SMBus driver for nVidia nForce2 MCP Added nForce3 Pro 150 Thomas Leibold <thomas@plx.com>, Ported to 2.5 Patrick Dreker <patrick@dreker.de>, Copyright (c) 2003 Hans-Frieder Vogt <hfvogt@arcor.de>, Based on SMBus 2.0 driver for AMD-8111 IO-Hub Copyright (c) 2002 Vojtech Pavlik This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* SUPPORTED DEVICES PCI ID nForce2 MCP 0064 nForce2 Ultra 400 MCP 0084 nForce3 Pro150 MCP 00D4 nForce3 250Gb MCP 00E4 nForce4 MCP 0052 nForce4 MCP-04 0034 nForce MCP51 0264 nForce MCP55 0368 nForce MCP61 03EB nForce MCP65 0446 nForce MCP67 0542 nForce MCP73 07D8 nForce MCP78S 0752 nForce MCP79 0AA2 This driver supports the 2 SMBuses that are included in the MCP of the nForce2/3/4/5xx chipsets. */ /* Note: we assume there can only be one nForce2, with two SMBus interfaces */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/delay.h> #include <linux/dmi.h> #include <linux/acpi.h> #include <linux/slab.h> #include <asm/io.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR ("Hans-Frieder Vogt <hfvogt@gmx.net>"); MODULE_DESCRIPTION("nForce2/3/4/5xx SMBus driver"); struct nforce2_smbus { struct i2c_adapter adapter; int base; int size; int blockops; int can_abort; }; /* * nVidia nForce2 SMBus control register definitions * (Newer incarnations use standard BARs 4 and 5 instead) */ #define NFORCE_PCI_SMB1 0x50 #define NFORCE_PCI_SMB2 0x54 /* * ACPI 2.0 chapter 13 SMBus 2.0 EC register model */ #define NVIDIA_SMB_PRTCL (smbus->base + 0x00) /* protocol, PEC */ #define NVIDIA_SMB_STS (smbus->base + 0x01) /* status */ #define NVIDIA_SMB_ADDR (smbus->base + 0x02) /* address */ #define NVIDIA_SMB_CMD (smbus->base + 0x03) /* command */ #define NVIDIA_SMB_DATA (smbus->base + 0x04) /* 32 data registers */ #define NVIDIA_SMB_BCNT (smbus->base + 0x24) /* number of data bytes */ #define NVIDIA_SMB_STATUS_ABRT (smbus->base + 0x3c) /* register used to check the status of the abort command */ #define NVIDIA_SMB_CTRL (smbus->base + 0x3e) /* control register */ #define NVIDIA_SMB_STATUS_ABRT_STS 0x01 /* Bit to notify that abort succeeded */ #define NVIDIA_SMB_CTRL_ABORT 0x20 #define NVIDIA_SMB_STS_DONE 0x80 #define NVIDIA_SMB_STS_ALRM 0x40 #define NVIDIA_SMB_STS_RES 0x20 #define NVIDIA_SMB_STS_STATUS 0x1f #define NVIDIA_SMB_PRTCL_WRITE 0x00 #define NVIDIA_SMB_PRTCL_READ 0x01 #define NVIDIA_SMB_PRTCL_QUICK 0x02 #define NVIDIA_SMB_PRTCL_BYTE 0x04 #define NVIDIA_SMB_PRTCL_BYTE_DATA 0x06 #define NVIDIA_SMB_PRTCL_WORD_DATA 0x08 #define NVIDIA_SMB_PRTCL_BLOCK_DATA 0x0a #define NVIDIA_SMB_PRTCL_PEC 0x80 /* Misc definitions */ #define MAX_TIMEOUT 100 /* We disable the second SMBus channel on these boards */ static struct dmi_system_id __devinitdata nforce2_dmi_blacklist2[] = { { .ident = "DFI Lanparty NF4 Expert", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "DFI Corp,LTD"), DMI_MATCH(DMI_BOARD_NAME, "LP UT NF4 Expert"), }, }, { } }; static struct pci_driver nforce2_driver; /* For multiplexing support, we need a global reference to the 1st SMBus channel */ #if defined CONFIG_I2C_NFORCE2_S4985 || defined CONFIG_I2C_NFORCE2_S4985_MODULE struct i2c_adapter *nforce2_smbus; EXPORT_SYMBOL_GPL(nforce2_smbus); static void nforce2_set_reference(struct i2c_adapter *adap) { nforce2_smbus = adap; } #else static inline void nforce2_set_reference(struct i2c_adapter *adap) { } #endif static void nforce2_abort(struct i2c_adapter *adap) { struct nforce2_smbus *smbus = adap->algo_data; int timeout = 0; unsigned char temp; dev_dbg(&adap->dev, "Aborting current transaction\n"); outb_p(NVIDIA_SMB_CTRL_ABORT, NVIDIA_SMB_CTRL); do { msleep(1); temp = inb_p(NVIDIA_SMB_STATUS_ABRT); } while (!(temp & NVIDIA_SMB_STATUS_ABRT_STS) && (timeout++ < MAX_TIMEOUT)); if (!(temp & NVIDIA_SMB_STATUS_ABRT_STS)) dev_err(&adap->dev, "Can't reset the smbus\n"); outb_p(NVIDIA_SMB_STATUS_ABRT_STS, NVIDIA_SMB_STATUS_ABRT); } static int nforce2_check_status(struct i2c_adapter *adap) { struct nforce2_smbus *smbus = adap->algo_data; int timeout = 0; unsigned char temp; do { msleep(1); temp = inb_p(NVIDIA_SMB_STS); } while ((!temp) && (timeout++ < MAX_TIMEOUT)); if (timeout > MAX_TIMEOUT) { dev_dbg(&adap->dev, "SMBus Timeout!\n"); if (smbus->can_abort) nforce2_abort(adap); return -ETIMEDOUT; } if (!(temp & NVIDIA_SMB_STS_DONE) || (temp & NVIDIA_SMB_STS_STATUS)) { dev_dbg(&adap->dev, "Transaction failed (0x%02x)!\n", temp); return -EIO; } return 0; } /* Return negative errno on error */ static s32 nforce2_access(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { struct nforce2_smbus *smbus = adap->algo_data; unsigned char protocol, pec; u8 len; int i, status; protocol = (read_write == I2C_SMBUS_READ) ? NVIDIA_SMB_PRTCL_READ : NVIDIA_SMB_PRTCL_WRITE; pec = (flags & I2C_CLIENT_PEC) ? NVIDIA_SMB_PRTCL_PEC : 0; switch (size) { case I2C_SMBUS_QUICK: protocol |= NVIDIA_SMB_PRTCL_QUICK; read_write = I2C_SMBUS_WRITE; break; case I2C_SMBUS_BYTE: if (read_write == I2C_SMBUS_WRITE) outb_p(command, NVIDIA_SMB_CMD); protocol |= NVIDIA_SMB_PRTCL_BYTE; break; case I2C_SMBUS_BYTE_DATA: outb_p(command, NVIDIA_SMB_CMD); if (read_write == I2C_SMBUS_WRITE) outb_p(data->byte, NVIDIA_SMB_DATA); protocol |= NVIDIA_SMB_PRTCL_BYTE_DATA; break; case I2C_SMBUS_WORD_DATA: outb_p(command, NVIDIA_SMB_CMD); if (read_write == I2C_SMBUS_WRITE) { outb_p(data->word, NVIDIA_SMB_DATA); outb_p(data->word >> 8, NVIDIA_SMB_DATA+1); } protocol |= NVIDIA_SMB_PRTCL_WORD_DATA | pec; break; case I2C_SMBUS_BLOCK_DATA: outb_p(command, NVIDIA_SMB_CMD); if (read_write == I2C_SMBUS_WRITE) { len = data->block[0]; if ((len == 0) || (len > I2C_SMBUS_BLOCK_MAX)) { dev_err(&adap->dev, "Transaction failed " "(requested block size: %d)\n", len); return -EINVAL; } outb_p(len, NVIDIA_SMB_BCNT); for (i = 0; i < I2C_SMBUS_BLOCK_MAX; i++) outb_p(data->block[i + 1], NVIDIA_SMB_DATA+i); } protocol |= NVIDIA_SMB_PRTCL_BLOCK_DATA | pec; break; default: dev_err(&adap->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } outb_p((addr & 0x7f) << 1, NVIDIA_SMB_ADDR); outb_p(protocol, NVIDIA_SMB_PRTCL); status = nforce2_check_status(adap); if (status) return status; if (read_write == I2C_SMBUS_WRITE) return 0; switch (size) { case I2C_SMBUS_BYTE: case I2C_SMBUS_BYTE_DATA: data->byte = inb_p(NVIDIA_SMB_DATA); break; case I2C_SMBUS_WORD_DATA: data->word = inb_p(NVIDIA_SMB_DATA) | (inb_p(NVIDIA_SMB_DATA+1) << 8); break; case I2C_SMBUS_BLOCK_DATA: len = inb_p(NVIDIA_SMB_BCNT); if ((len <= 0) || (len > I2C_SMBUS_BLOCK_MAX)) { dev_err(&adap->dev, "Transaction failed " "(received block size: 0x%02x)\n", len); return -EPROTO; } for (i = 0; i < len; i++) data->block[i+1] = inb_p(NVIDIA_SMB_DATA + i); data->block[0] = len; break; } return 0; } static u32 nforce2_func(struct i2c_adapter *adapter) { /* other functionality might be possible, but is not tested */ return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_PEC | (((struct nforce2_smbus*)adapter->algo_data)->blockops ? I2C_FUNC_SMBUS_BLOCK_DATA : 0); } static struct i2c_algorithm smbus_algorithm = { .smbus_xfer = nforce2_access, .functionality = nforce2_func, }; static const struct pci_device_id nforce2_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE4_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP78S_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS) }, { 0 } }; MODULE_DEVICE_TABLE (pci, nforce2_ids); static int __devinit nforce2_probe_smb (struct pci_dev *dev, int bar, int alt_reg, struct nforce2_smbus *smbus, const char *name) { int error; smbus->base = pci_resource_start(dev, bar); if (smbus->base) { smbus->size = pci_resource_len(dev, bar); } else { /* Older incarnations of the device used non-standard BARs */ u16 iobase; if (pci_read_config_word(dev, alt_reg, &iobase) != PCIBIOS_SUCCESSFUL) { dev_err(&dev->dev, "Error reading PCI config for %s\n", name); return -EIO; } smbus->base = iobase & PCI_BASE_ADDRESS_IO_MASK; smbus->size = 64; } error = acpi_check_region(smbus->base, smbus->size, nforce2_driver.name); if (error) return -1; if (!request_region(smbus->base, smbus->size, nforce2_driver.name)) { dev_err(&smbus->adapter.dev, "Error requesting region %02x .. %02X for %s\n", smbus->base, smbus->base+smbus->size-1, name); return -EBUSY; } smbus->adapter.owner = THIS_MODULE; smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; smbus->adapter.algo = &smbus_algorithm; smbus->adapter.algo_data = smbus; smbus->adapter.dev.parent = &dev->dev; snprintf(smbus->adapter.name, sizeof(smbus->adapter.name), "SMBus nForce2 adapter at %04x", smbus->base); error = i2c_add_adapter(&smbus->adapter); if (error) { dev_err(&smbus->adapter.dev, "Failed to register adapter.\n"); release_region(smbus->base, smbus->size); return error; } dev_info(&smbus->adapter.dev, "nForce2 SMBus adapter at %#x\n", smbus->base); return 0; } static int __devinit nforce2_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct nforce2_smbus *smbuses; int res1, res2; /* we support 2 SMBus adapters */ if (!(smbuses = kzalloc(2*sizeof(struct nforce2_smbus), GFP_KERNEL))) return -ENOMEM; pci_set_drvdata(dev, smbuses); switch(dev->device) { case PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS: case PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS: case PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS: smbuses[0].blockops = 1; smbuses[1].blockops = 1; smbuses[0].can_abort = 1; smbuses[1].can_abort = 1; } /* SMBus adapter 1 */ res1 = nforce2_probe_smb(dev, 4, NFORCE_PCI_SMB1, &smbuses[0], "SMB1"); if (res1 < 0) { dev_err(&dev->dev, "Error probing SMB1.\n"); smbuses[0].base = 0; /* to have a check value */ } /* SMBus adapter 2 */ if (dmi_check_system(nforce2_dmi_blacklist2)) { dev_err(&dev->dev, "Disabling SMB2 for safety reasons.\n"); res2 = -EPERM; smbuses[1].base = 0; } else { res2 = nforce2_probe_smb(dev, 5, NFORCE_PCI_SMB2, &smbuses[1], "SMB2"); if (res2 < 0) { dev_err(&dev->dev, "Error probing SMB2.\n"); smbuses[1].base = 0; /* to have a check value */ } } if ((res1 < 0) && (res2 < 0)) { /* we did not find even one of the SMBuses, so we give up */ kfree(smbuses); return -ENODEV; } nforce2_set_reference(&smbuses[0].adapter); return 0; } static void __devexit nforce2_remove(struct pci_dev *dev) { struct nforce2_smbus *smbuses = (void*) pci_get_drvdata(dev); nforce2_set_reference(NULL); if (smbuses[0].base) { i2c_del_adapter(&smbuses[0].adapter); release_region(smbuses[0].base, smbuses[0].size); } if (smbuses[1].base) { i2c_del_adapter(&smbuses[1].adapter); release_region(smbuses[1].base, smbuses[1].size); } kfree(smbuses); } static struct pci_driver nforce2_driver = { .name = "nForce2_smbus", .id_table = nforce2_ids, .probe = nforce2_probe, .remove = __devexit_p(nforce2_remove), }; static int __init nforce2_init(void) { return pci_register_driver(&nforce2_driver); } static void __exit nforce2_exit(void) { pci_unregister_driver(&nforce2_driver); } module_init(nforce2_init); module_exit(nforce2_exit);
gpl-2.0
NicholasPace/android_kernel_motorola_msm8226
fs/ecryptfs/main.c
42
26111
/** * eCryptfs: Linux filesystem encryption layer * * Copyright (C) 1997-2003 Erez Zadok * Copyright (C) 2001-2003 Stony Brook University * Copyright (C) 2004-2007 International Business Machines Corp. * Author(s): Michael A. Halcrow <mahalcro@us.ibm.com> * Michael C. Thompson <mcthomps@us.ibm.com> * Tyler Hicks <tyhicks@ou.edu> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA * 02111-1307, USA. */ #include <linux/dcache.h> #include <linux/file.h> #include <linux/module.h> #include <linux/namei.h> #include <linux/skbuff.h> #include <linux/crypto.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/key.h> #include <linux/parser.h> #include <linux/fs_stack.h> #include <linux/slab.h> #include <linux/magic.h> #include "ecryptfs_kernel.h" /** * Module parameter that defines the ecryptfs_verbosity level. */ int ecryptfs_verbosity = 0; module_param(ecryptfs_verbosity, int, 0); MODULE_PARM_DESC(ecryptfs_verbosity, "Initial verbosity level (0 or 1; defaults to " "0, which is Quiet)"); /** * Module parameter that defines the number of message buffer elements */ unsigned int ecryptfs_message_buf_len = ECRYPTFS_DEFAULT_MSG_CTX_ELEMS; module_param(ecryptfs_message_buf_len, uint, 0); MODULE_PARM_DESC(ecryptfs_message_buf_len, "Number of message buffer elements"); /** * Module parameter that defines the maximum guaranteed amount of time to wait * for a response from ecryptfsd. The actual sleep time will be, more than * likely, a small amount greater than this specified value, but only less if * the message successfully arrives. */ signed long ecryptfs_message_wait_timeout = ECRYPTFS_MAX_MSG_CTX_TTL / HZ; module_param(ecryptfs_message_wait_timeout, long, 0); MODULE_PARM_DESC(ecryptfs_message_wait_timeout, "Maximum number of seconds that an operation will " "sleep while waiting for a message response from " "userspace"); /** * Module parameter that is an estimate of the maximum number of users * that will be concurrently using eCryptfs. Set this to the right * value to balance performance and memory use. */ unsigned int ecryptfs_number_of_users = ECRYPTFS_DEFAULT_NUM_USERS; module_param(ecryptfs_number_of_users, uint, 0); MODULE_PARM_DESC(ecryptfs_number_of_users, "An estimate of the number of " "concurrent users of eCryptfs"); void __ecryptfs_printk(const char *fmt, ...) { va_list args; va_start(args, fmt); if (fmt[1] == '7') { /* KERN_DEBUG */ if (ecryptfs_verbosity >= 1) vprintk(fmt, args); } else vprintk(fmt, args); va_end(args); } /** * ecryptfs_init_lower_file * @ecryptfs_dentry: Fully initialized eCryptfs dentry object, with * the lower dentry and the lower mount set * * eCryptfs only ever keeps a single open file for every lower * inode. All I/O operations to the lower inode occur through that * file. When the first eCryptfs dentry that interposes with the first * lower dentry for that inode is created, this function creates the * lower file struct and associates it with the eCryptfs * inode. When all eCryptfs files associated with the inode are released, the * file is closed. * * The lower file will be opened with read/write permissions, if * possible. Otherwise, it is opened read-only. * * This function does nothing if a lower file is already * associated with the eCryptfs inode. * * Returns zero on success; non-zero otherwise */ static int ecryptfs_init_lower_file(struct dentry *dentry, struct file **lower_file) { const struct cred *cred = current_cred(); struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); struct vfsmount *lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry); int rc; rc = ecryptfs_privileged_open(lower_file, lower_dentry, lower_mnt, cred); if (rc) { printk(KERN_ERR "Error opening lower file " "for lower_dentry [0x%p] and lower_mnt [0x%p]; " "rc = [%d]\n", lower_dentry, lower_mnt, rc); (*lower_file) = NULL; } return rc; } int ecryptfs_get_lower_file(struct dentry *dentry, struct inode *inode) { struct ecryptfs_inode_info *inode_info; int count, rc = 0; inode_info = ecryptfs_inode_to_private(inode); mutex_lock(&inode_info->lower_file_mutex); count = atomic_inc_return(&inode_info->lower_file_count); if (WARN_ON_ONCE(count < 1)) rc = -EINVAL; else if (count == 1) { rc = ecryptfs_init_lower_file(dentry, &inode_info->lower_file); if (rc) atomic_set(&inode_info->lower_file_count, 0); } mutex_unlock(&inode_info->lower_file_mutex); return rc; } void ecryptfs_put_lower_file(struct inode *inode) { struct ecryptfs_inode_info *inode_info; inode_info = ecryptfs_inode_to_private(inode); if (atomic_dec_and_mutex_lock(&inode_info->lower_file_count, &inode_info->lower_file_mutex)) { filemap_write_and_wait(inode->i_mapping); fput(inode_info->lower_file); inode_info->lower_file = NULL; mutex_unlock(&inode_info->lower_file_mutex); } } enum { ecryptfs_opt_sig, ecryptfs_opt_ecryptfs_sig, ecryptfs_opt_cipher, ecryptfs_opt_ecryptfs_cipher, ecryptfs_opt_ecryptfs_key_bytes, ecryptfs_opt_passthrough, ecryptfs_opt_xattr_metadata, ecryptfs_opt_encrypted_view, ecryptfs_opt_fnek_sig, ecryptfs_opt_fn_cipher, ecryptfs_opt_fn_cipher_key_bytes, ecryptfs_opt_unlink_sigs, ecryptfs_opt_mount_auth_tok_only, ecryptfs_opt_check_dev_ruid, ecryptfs_opt_err }; static const match_table_t tokens = { {ecryptfs_opt_sig, "sig=%s"}, {ecryptfs_opt_ecryptfs_sig, "ecryptfs_sig=%s"}, {ecryptfs_opt_cipher, "cipher=%s"}, {ecryptfs_opt_ecryptfs_cipher, "ecryptfs_cipher=%s"}, {ecryptfs_opt_ecryptfs_key_bytes, "ecryptfs_key_bytes=%u"}, {ecryptfs_opt_passthrough, "ecryptfs_passthrough"}, {ecryptfs_opt_xattr_metadata, "ecryptfs_xattr_metadata"}, {ecryptfs_opt_encrypted_view, "ecryptfs_encrypted_view"}, {ecryptfs_opt_fnek_sig, "ecryptfs_fnek_sig=%s"}, {ecryptfs_opt_fn_cipher, "ecryptfs_fn_cipher=%s"}, {ecryptfs_opt_fn_cipher_key_bytes, "ecryptfs_fn_key_bytes=%u"}, {ecryptfs_opt_unlink_sigs, "ecryptfs_unlink_sigs"}, {ecryptfs_opt_mount_auth_tok_only, "ecryptfs_mount_auth_tok_only"}, {ecryptfs_opt_check_dev_ruid, "ecryptfs_check_dev_ruid"}, {ecryptfs_opt_err, NULL} }; static int ecryptfs_init_global_auth_toks( struct ecryptfs_mount_crypt_stat *mount_crypt_stat) { struct ecryptfs_global_auth_tok *global_auth_tok; struct ecryptfs_auth_tok *auth_tok; int rc = 0; list_for_each_entry(global_auth_tok, &mount_crypt_stat->global_auth_tok_list, mount_crypt_stat_list) { rc = ecryptfs_keyring_auth_tok_for_sig( &global_auth_tok->global_auth_tok_key, &auth_tok, global_auth_tok->sig); if (rc) { printk(KERN_ERR "Could not find valid key in user " "session keyring for sig specified in mount " "option: [%s]\n", global_auth_tok->sig); global_auth_tok->flags |= ECRYPTFS_AUTH_TOK_INVALID; goto out; } else { global_auth_tok->flags &= ~ECRYPTFS_AUTH_TOK_INVALID; up_write(&(global_auth_tok->global_auth_tok_key)->sem); } } out: return rc; } static void ecryptfs_init_mount_crypt_stat( struct ecryptfs_mount_crypt_stat *mount_crypt_stat) { memset((void *)mount_crypt_stat, 0, sizeof(struct ecryptfs_mount_crypt_stat)); INIT_LIST_HEAD(&mount_crypt_stat->global_auth_tok_list); mutex_init(&mount_crypt_stat->global_auth_tok_list_mutex); mount_crypt_stat->flags |= ECRYPTFS_MOUNT_CRYPT_STAT_INITIALIZED; } /** * ecryptfs_parse_options * @sb: The ecryptfs super block * @options: The options passed to the kernel * @check_ruid: set to 1 if device uid should be checked against the ruid * * Parse mount options: * debug=N - ecryptfs_verbosity level for debug output * sig=XXX - description(signature) of the key to use * * Returns the dentry object of the lower-level (lower/interposed) * directory; We want to mount our stackable file system on top of * that lower directory. * * The signature of the key to use must be the description of a key * already in the keyring. Mounting will fail if the key can not be * found. * * Returns zero on success; non-zero on error */ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options, uid_t *check_ruid) { char *p; int rc = 0; int sig_set = 0; int cipher_name_set = 0; int fn_cipher_name_set = 0; int cipher_key_bytes; int cipher_key_bytes_set = 0; int fn_cipher_key_bytes; int fn_cipher_key_bytes_set = 0; struct ecryptfs_mount_crypt_stat *mount_crypt_stat = &sbi->mount_crypt_stat; substring_t args[MAX_OPT_ARGS]; int token; char *sig_src; char *cipher_name_dst; char *cipher_name_src; char *fn_cipher_name_dst; char *fn_cipher_name_src; char *fnek_dst; char *fnek_src; char *cipher_key_bytes_src; char *fn_cipher_key_bytes_src; u8 cipher_code; *check_ruid = 0; if (!options) { rc = -EINVAL; goto out; } ecryptfs_init_mount_crypt_stat(mount_crypt_stat); while ((p = strsep(&options, ",")) != NULL) { if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case ecryptfs_opt_sig: case ecryptfs_opt_ecryptfs_sig: sig_src = args[0].from; rc = ecryptfs_add_global_auth_tok(mount_crypt_stat, sig_src, 0); if (rc) { printk(KERN_ERR "Error attempting to register " "global sig; rc = [%d]\n", rc); goto out; } sig_set = 1; break; case ecryptfs_opt_cipher: case ecryptfs_opt_ecryptfs_cipher: cipher_name_src = args[0].from; cipher_name_dst = mount_crypt_stat-> global_default_cipher_name; strncpy(cipher_name_dst, cipher_name_src, ECRYPTFS_MAX_CIPHER_NAME_SIZE); cipher_name_dst[ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0'; cipher_name_set = 1; break; case ecryptfs_opt_ecryptfs_key_bytes: cipher_key_bytes_src = args[0].from; cipher_key_bytes = (int)simple_strtol(cipher_key_bytes_src, &cipher_key_bytes_src, 0); mount_crypt_stat->global_default_cipher_key_size = cipher_key_bytes; cipher_key_bytes_set = 1; break; case ecryptfs_opt_passthrough: mount_crypt_stat->flags |= ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED; break; case ecryptfs_opt_xattr_metadata: mount_crypt_stat->flags |= ECRYPTFS_XATTR_METADATA_ENABLED; break; case ecryptfs_opt_encrypted_view: mount_crypt_stat->flags |= ECRYPTFS_XATTR_METADATA_ENABLED; mount_crypt_stat->flags |= ECRYPTFS_ENCRYPTED_VIEW_ENABLED; break; case ecryptfs_opt_fnek_sig: fnek_src = args[0].from; fnek_dst = mount_crypt_stat->global_default_fnek_sig; strncpy(fnek_dst, fnek_src, ECRYPTFS_SIG_SIZE_HEX); mount_crypt_stat->global_default_fnek_sig[ ECRYPTFS_SIG_SIZE_HEX] = '\0'; rc = ecryptfs_add_global_auth_tok( mount_crypt_stat, mount_crypt_stat->global_default_fnek_sig, ECRYPTFS_AUTH_TOK_FNEK); if (rc) { printk(KERN_ERR "Error attempting to register " "global fnek sig [%s]; rc = [%d]\n", mount_crypt_stat->global_default_fnek_sig, rc); goto out; } mount_crypt_stat->flags |= (ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES | ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK); break; case ecryptfs_opt_fn_cipher: fn_cipher_name_src = args[0].from; fn_cipher_name_dst = mount_crypt_stat->global_default_fn_cipher_name; strncpy(fn_cipher_name_dst, fn_cipher_name_src, ECRYPTFS_MAX_CIPHER_NAME_SIZE); mount_crypt_stat->global_default_fn_cipher_name[ ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0'; fn_cipher_name_set = 1; break; case ecryptfs_opt_fn_cipher_key_bytes: fn_cipher_key_bytes_src = args[0].from; fn_cipher_key_bytes = (int)simple_strtol(fn_cipher_key_bytes_src, &fn_cipher_key_bytes_src, 0); mount_crypt_stat->global_default_fn_cipher_key_bytes = fn_cipher_key_bytes; fn_cipher_key_bytes_set = 1; break; case ecryptfs_opt_unlink_sigs: mount_crypt_stat->flags |= ECRYPTFS_UNLINK_SIGS; break; case ecryptfs_opt_mount_auth_tok_only: mount_crypt_stat->flags |= ECRYPTFS_GLOBAL_MOUNT_AUTH_TOK_ONLY; break; case ecryptfs_opt_check_dev_ruid: *check_ruid = 1; break; case ecryptfs_opt_err: default: printk(KERN_WARNING "%s: eCryptfs: unrecognized option [%s]\n", __func__, p); } } if (!sig_set) { rc = -EINVAL; ecryptfs_printk(KERN_ERR, "You must supply at least one valid " "auth tok signature as a mount " "parameter; see the eCryptfs README\n"); goto out; } if (!cipher_name_set) { int cipher_name_len = strlen(ECRYPTFS_DEFAULT_CIPHER); BUG_ON(cipher_name_len >= ECRYPTFS_MAX_CIPHER_NAME_SIZE); strcpy(mount_crypt_stat->global_default_cipher_name, ECRYPTFS_DEFAULT_CIPHER); } if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) && !fn_cipher_name_set) strcpy(mount_crypt_stat->global_default_fn_cipher_name, mount_crypt_stat->global_default_cipher_name); if (!cipher_key_bytes_set) mount_crypt_stat->global_default_cipher_key_size = 0; if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) && !fn_cipher_key_bytes_set) mount_crypt_stat->global_default_fn_cipher_key_bytes = mount_crypt_stat->global_default_cipher_key_size; cipher_code = ecryptfs_code_for_cipher_string( mount_crypt_stat->global_default_cipher_name, mount_crypt_stat->global_default_cipher_key_size); if (!cipher_code) { ecryptfs_printk(KERN_ERR, "eCryptfs doesn't support cipher: %s", mount_crypt_stat->global_default_cipher_name); rc = -EINVAL; goto out; } mutex_lock(&key_tfm_list_mutex); if (!ecryptfs_tfm_exists(mount_crypt_stat->global_default_cipher_name, NULL)) { rc = ecryptfs_add_new_key_tfm( NULL, mount_crypt_stat->global_default_cipher_name, mount_crypt_stat->global_default_cipher_key_size); if (rc) { printk(KERN_ERR "Error attempting to initialize " "cipher with name = [%s] and key size = [%td]; " "rc = [%d]\n", mount_crypt_stat->global_default_cipher_name, mount_crypt_stat->global_default_cipher_key_size, rc); rc = -EINVAL; mutex_unlock(&key_tfm_list_mutex); goto out; } } if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) && !ecryptfs_tfm_exists( mount_crypt_stat->global_default_fn_cipher_name, NULL)) { rc = ecryptfs_add_new_key_tfm( NULL, mount_crypt_stat->global_default_fn_cipher_name, mount_crypt_stat->global_default_fn_cipher_key_bytes); if (rc) { printk(KERN_ERR "Error attempting to initialize " "cipher with name = [%s] and key size = [%td]; " "rc = [%d]\n", mount_crypt_stat->global_default_fn_cipher_name, mount_crypt_stat->global_default_fn_cipher_key_bytes, rc); rc = -EINVAL; mutex_unlock(&key_tfm_list_mutex); goto out; } } mutex_unlock(&key_tfm_list_mutex); rc = ecryptfs_init_global_auth_toks(mount_crypt_stat); if (rc) printk(KERN_WARNING "One or more global auth toks could not " "properly register; rc = [%d]\n", rc); out: return rc; } struct kmem_cache *ecryptfs_sb_info_cache; static struct file_system_type ecryptfs_fs_type; /** * ecryptfs_get_sb * @fs_type * @flags * @dev_name: The path to mount over * @raw_data: The options passed into the kernel */ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *raw_data) { struct super_block *s; struct ecryptfs_sb_info *sbi; struct ecryptfs_mount_crypt_stat *mount_crypt_stat; struct ecryptfs_dentry_info *root_info; const char *err = "Getting sb failed"; struct inode *inode; struct path path; uid_t check_ruid; int rc; sbi = kmem_cache_zalloc(ecryptfs_sb_info_cache, GFP_KERNEL); if (!sbi) { rc = -ENOMEM; goto out; } rc = ecryptfs_parse_options(sbi, raw_data, &check_ruid); if (rc) { err = "Error parsing options"; goto out; } mount_crypt_stat = &sbi->mount_crypt_stat; s = sget(fs_type, NULL, set_anon_super, NULL); if (IS_ERR(s)) { rc = PTR_ERR(s); goto out; } rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs", BDI_CAP_MAP_COPY); if (rc) goto out1; ecryptfs_set_superblock_private(s, sbi); s->s_bdi = &sbi->bdi; /* ->kill_sb() will take care of sbi after that point */ sbi = NULL; s->s_op = &ecryptfs_sops; s->s_d_op = &ecryptfs_dops; err = "Reading sb failed"; rc = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path); if (rc) { ecryptfs_printk(KERN_WARNING, "kern_path() failed\n"); goto out1; } if (path.dentry->d_sb->s_type == &ecryptfs_fs_type) { rc = -EINVAL; printk(KERN_ERR "Mount on filesystem of type " "eCryptfs explicitly disallowed due to " "known incompatibilities\n"); goto out_free; } if (check_ruid && path.dentry->d_inode->i_uid != current_uid()) { rc = -EPERM; printk(KERN_ERR "Mount of device (uid: %d) not owned by " "requested user (uid: %d)\n", path.dentry->d_inode->i_uid, current_uid()); goto out_free; } ecryptfs_set_superblock_lower(s, path.dentry->d_sb); /** * Set the POSIX ACL flag based on whether they're enabled in the lower * mount. */ s->s_flags = flags & ~MS_POSIXACL; s->s_flags |= path.dentry->d_sb->s_flags & MS_POSIXACL; /** * Force a read-only eCryptfs mount when: * 1) The lower mount is ro * 2) The ecryptfs_encrypted_view mount option is specified */ if (path.dentry->d_sb->s_flags & MS_RDONLY || mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) s->s_flags |= MS_RDONLY; s->s_maxbytes = path.dentry->d_sb->s_maxbytes; s->s_blocksize = path.dentry->d_sb->s_blocksize; s->s_magic = ECRYPTFS_SUPER_MAGIC; s->s_stack_depth = path.dentry->d_sb->s_stack_depth + 1; rc = -EINVAL; if (s->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { printk(KERN_ERR "eCryptfs: maximum fs stacking depth exceeded\n"); goto out_free; } inode = ecryptfs_get_inode(path.dentry->d_inode, s); rc = PTR_ERR(inode); if (IS_ERR(inode)) goto out_free; s->s_root = d_make_root(inode); if (!s->s_root) { rc = -ENOMEM; goto out_free; } rc = -ENOMEM; root_info = kmem_cache_zalloc(ecryptfs_dentry_info_cache, GFP_KERNEL); if (!root_info) goto out_free; /* ->kill_sb() will take care of root_info */ ecryptfs_set_dentry_private(s->s_root, root_info); ecryptfs_set_dentry_lower(s->s_root, path.dentry); ecryptfs_set_dentry_lower_mnt(s->s_root, path.mnt); s->s_flags |= MS_ACTIVE; return dget(s->s_root); out_free: path_put(&path); out1: deactivate_locked_super(s); out: if (sbi) { ecryptfs_destroy_mount_crypt_stat(&sbi->mount_crypt_stat); kmem_cache_free(ecryptfs_sb_info_cache, sbi); } printk(KERN_ERR "%s; rc = [%d]\n", err, rc); return ERR_PTR(rc); } /** * ecryptfs_kill_block_super * @sb: The ecryptfs super block * * Used to bring the superblock down and free the private data. */ static void ecryptfs_kill_block_super(struct super_block *sb) { struct ecryptfs_sb_info *sb_info = ecryptfs_superblock_to_private(sb); kill_anon_super(sb); if (!sb_info) return; ecryptfs_destroy_mount_crypt_stat(&sb_info->mount_crypt_stat); bdi_destroy(&sb_info->bdi); kmem_cache_free(ecryptfs_sb_info_cache, sb_info); } static struct file_system_type ecryptfs_fs_type = { .owner = THIS_MODULE, .name = "ecryptfs", .mount = ecryptfs_mount, .kill_sb = ecryptfs_kill_block_super, .fs_flags = 0 }; /** * inode_info_init_once * * Initializes the ecryptfs_inode_info_cache when it is created */ static void inode_info_init_once(void *vptr) { struct ecryptfs_inode_info *ei = (struct ecryptfs_inode_info *)vptr; inode_init_once(&ei->vfs_inode); } static struct ecryptfs_cache_info { struct kmem_cache **cache; const char *name; size_t size; void (*ctor)(void *obj); } ecryptfs_cache_infos[] = { { .cache = &ecryptfs_auth_tok_list_item_cache, .name = "ecryptfs_auth_tok_list_item", .size = sizeof(struct ecryptfs_auth_tok_list_item), }, { .cache = &ecryptfs_file_info_cache, .name = "ecryptfs_file_cache", .size = sizeof(struct ecryptfs_file_info), }, { .cache = &ecryptfs_dentry_info_cache, .name = "ecryptfs_dentry_info_cache", .size = sizeof(struct ecryptfs_dentry_info), }, { .cache = &ecryptfs_inode_info_cache, .name = "ecryptfs_inode_cache", .size = sizeof(struct ecryptfs_inode_info), .ctor = inode_info_init_once, }, { .cache = &ecryptfs_sb_info_cache, .name = "ecryptfs_sb_cache", .size = sizeof(struct ecryptfs_sb_info), }, { .cache = &ecryptfs_header_cache, .name = "ecryptfs_headers", .size = PAGE_CACHE_SIZE, }, { .cache = &ecryptfs_xattr_cache, .name = "ecryptfs_xattr_cache", .size = PAGE_CACHE_SIZE, }, { .cache = &ecryptfs_key_record_cache, .name = "ecryptfs_key_record_cache", .size = sizeof(struct ecryptfs_key_record), }, { .cache = &ecryptfs_key_sig_cache, .name = "ecryptfs_key_sig_cache", .size = sizeof(struct ecryptfs_key_sig), }, { .cache = &ecryptfs_global_auth_tok_cache, .name = "ecryptfs_global_auth_tok_cache", .size = sizeof(struct ecryptfs_global_auth_tok), }, { .cache = &ecryptfs_key_tfm_cache, .name = "ecryptfs_key_tfm_cache", .size = sizeof(struct ecryptfs_key_tfm), }, { .cache = &ecryptfs_open_req_cache, .name = "ecryptfs_open_req_cache", .size = sizeof(struct ecryptfs_open_req), }, }; static void ecryptfs_free_kmem_caches(void) { int i; for (i = 0; i < ARRAY_SIZE(ecryptfs_cache_infos); i++) { struct ecryptfs_cache_info *info; info = &ecryptfs_cache_infos[i]; if (*(info->cache)) kmem_cache_destroy(*(info->cache)); } } /** * ecryptfs_init_kmem_caches * * Returns zero on success; non-zero otherwise */ static int ecryptfs_init_kmem_caches(void) { int i; for (i = 0; i < ARRAY_SIZE(ecryptfs_cache_infos); i++) { struct ecryptfs_cache_info *info; info = &ecryptfs_cache_infos[i]; *(info->cache) = kmem_cache_create(info->name, info->size, 0, SLAB_HWCACHE_ALIGN, info->ctor); if (!*(info->cache)) { ecryptfs_free_kmem_caches(); ecryptfs_printk(KERN_WARNING, "%s: " "kmem_cache_create failed\n", info->name); return -ENOMEM; } } return 0; } static struct kobject *ecryptfs_kobj; static ssize_t version_show(struct kobject *kobj, struct kobj_attribute *attr, char *buff) { return snprintf(buff, PAGE_SIZE, "%d\n", ECRYPTFS_VERSIONING_MASK); } static struct kobj_attribute version_attr = __ATTR_RO(version); static struct attribute *attributes[] = { &version_attr.attr, NULL, }; static struct attribute_group attr_group = { .attrs = attributes, }; static int do_sysfs_registration(void) { int rc; ecryptfs_kobj = kobject_create_and_add("ecryptfs", fs_kobj); if (!ecryptfs_kobj) { printk(KERN_ERR "Unable to create ecryptfs kset\n"); rc = -ENOMEM; goto out; } rc = sysfs_create_group(ecryptfs_kobj, &attr_group); if (rc) { printk(KERN_ERR "Unable to create ecryptfs version attributes\n"); kobject_put(ecryptfs_kobj); } out: return rc; } static void do_sysfs_unregistration(void) { sysfs_remove_group(ecryptfs_kobj, &attr_group); kobject_put(ecryptfs_kobj); } static int __init ecryptfs_init(void) { int rc; if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_CACHE_SIZE) { rc = -EINVAL; ecryptfs_printk(KERN_ERR, "The eCryptfs extent size is " "larger than the host's page size, and so " "eCryptfs cannot run on this system. The " "default eCryptfs extent size is [%u] bytes; " "the page size is [%lu] bytes.\n", ECRYPTFS_DEFAULT_EXTENT_SIZE, (unsigned long)PAGE_CACHE_SIZE); goto out; } rc = ecryptfs_init_kmem_caches(); if (rc) { printk(KERN_ERR "Failed to allocate one or more kmem_cache objects\n"); goto out; } rc = do_sysfs_registration(); if (rc) { printk(KERN_ERR "sysfs registration failed\n"); goto out_free_kmem_caches; } rc = ecryptfs_init_kthread(); if (rc) { printk(KERN_ERR "%s: kthread initialization failed; " "rc = [%d]\n", __func__, rc); goto out_do_sysfs_unregistration; } rc = ecryptfs_init_messaging(); if (rc) { printk(KERN_ERR "Failure occurred while attempting to " "initialize the communications channel to " "ecryptfsd\n"); goto out_destroy_kthread; } rc = ecryptfs_init_crypto(); if (rc) { printk(KERN_ERR "Failure whilst attempting to init crypto; " "rc = [%d]\n", rc); goto out_release_messaging; } rc = register_filesystem(&ecryptfs_fs_type); if (rc) { printk(KERN_ERR "Failed to register filesystem\n"); goto out_destroy_crypto; } if (ecryptfs_verbosity > 0) printk(KERN_CRIT "eCryptfs verbosity set to %d. Secret values " "will be written to the syslog!\n", ecryptfs_verbosity); goto out; out_destroy_crypto: ecryptfs_destroy_crypto(); out_release_messaging: ecryptfs_release_messaging(); out_destroy_kthread: ecryptfs_destroy_kthread(); out_do_sysfs_unregistration: do_sysfs_unregistration(); out_free_kmem_caches: ecryptfs_free_kmem_caches(); out: return rc; } static void __exit ecryptfs_exit(void) { int rc; rc = ecryptfs_destroy_crypto(); if (rc) printk(KERN_ERR "Failure whilst attempting to destroy crypto; " "rc = [%d]\n", rc); ecryptfs_release_messaging(); ecryptfs_destroy_kthread(); do_sysfs_unregistration(); unregister_filesystem(&ecryptfs_fs_type); ecryptfs_free_kmem_caches(); } MODULE_AUTHOR("Michael A. Halcrow <mhalcrow@us.ibm.com>"); MODULE_DESCRIPTION("eCryptfs"); MODULE_LICENSE("GPL"); module_init(ecryptfs_init) module_exit(ecryptfs_exit)
gpl-2.0
diverger/linux-2.6.34-lpc32xx
drivers/staging/go7007/go7007-v4l2.c
42
45873
/* * Copyright (C) 2005-2006 Micronas USA Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/version.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/unistd.h> #include <linux/time.h> #include <linux/vmalloc.h> #include <linux/pagemap.h> #include <linux/videodev2.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-subdev.h> #include <linux/i2c.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <asm/system.h> #include "go7007.h" #include "go7007-priv.h" #include "wis-i2c.h" /* Temporary defines until accepted in v4l-dvb */ #ifndef V4L2_MPEG_STREAM_TYPE_MPEG_ELEM #define V4L2_MPEG_STREAM_TYPE_MPEG_ELEM 6 /* MPEG elementary stream */ #endif #ifndef V4L2_MPEG_VIDEO_ENCODING_MPEG_4 #define V4L2_MPEG_VIDEO_ENCODING_MPEG_4 3 #endif #define call_all(dev, o, f, args...) \ v4l2_device_call_until_err(dev, 0, o, f, ##args) static void deactivate_buffer(struct go7007_buffer *gobuf) { int i; if (gobuf->state != BUF_STATE_IDLE) { list_del(&gobuf->stream); gobuf->state = BUF_STATE_IDLE; } if (gobuf->page_count > 0) { for (i = 0; i < gobuf->page_count; ++i) page_cache_release(gobuf->pages[i]); gobuf->page_count = 0; } } static void abort_queued(struct go7007 *go) { struct go7007_buffer *gobuf, *next; list_for_each_entry_safe(gobuf, next, &go->stream, stream) { deactivate_buffer(gobuf); } } static int go7007_streamoff(struct go7007 *go) { int retval = -EINVAL; unsigned long flags; mutex_lock(&go->hw_lock); if (go->streaming) { go->streaming = 0; go7007_stream_stop(go); spin_lock_irqsave(&go->spinlock, flags); abort_queued(go); spin_unlock_irqrestore(&go->spinlock, flags); go7007_reset_encoder(go); retval = 0; } mutex_unlock(&go->hw_lock); return 0; } static int go7007_open(struct file *file) { struct go7007 *go = video_get_drvdata(video_devdata(file)); struct go7007_file *gofh; if (go->status != STATUS_ONLINE) return -EBUSY; gofh = kmalloc(sizeof(struct go7007_file), GFP_KERNEL); if (gofh == NULL) return -ENOMEM; ++go->ref_count; gofh->go = go; mutex_init(&gofh->lock); gofh->buf_count = 0; file->private_data = gofh; return 0; } static int go7007_release(struct file *file) { struct go7007_file *gofh = file->private_data; struct go7007 *go = gofh->go; if (gofh->buf_count > 0) { go7007_streamoff(go); go->in_use = 0; kfree(gofh->bufs); gofh->buf_count = 0; } kfree(gofh); if (--go->ref_count == 0) kfree(go); file->private_data = NULL; return 0; } static u32 get_frame_type_flag(struct go7007_buffer *gobuf, int format) { u8 *f = page_address(gobuf->pages[0]); switch (format) { case GO7007_FORMAT_MJPEG: return V4L2_BUF_FLAG_KEYFRAME; case GO7007_FORMAT_MPEG4: switch ((f[gobuf->frame_offset + 4] >> 6) & 0x3) { case 0: return V4L2_BUF_FLAG_KEYFRAME; case 1: return V4L2_BUF_FLAG_PFRAME; case 2: return V4L2_BUF_FLAG_BFRAME; default: return 0; } case GO7007_FORMAT_MPEG1: case GO7007_FORMAT_MPEG2: switch ((f[gobuf->frame_offset + 5] >> 3) & 0x7) { case 1: return V4L2_BUF_FLAG_KEYFRAME; case 2: return V4L2_BUF_FLAG_PFRAME; case 3: return V4L2_BUF_FLAG_BFRAME; default: return 0; } } return 0; } static int set_capture_size(struct go7007 *go, struct v4l2_format *fmt, int try) { int sensor_height = 0, sensor_width = 0; int width, height, i; if (fmt != NULL && fmt->fmt.pix.pixelformat != V4L2_PIX_FMT_MJPEG && fmt->fmt.pix.pixelformat != V4L2_PIX_FMT_MPEG && fmt->fmt.pix.pixelformat != V4L2_PIX_FMT_MPEG4) return -EINVAL; switch (go->standard) { case GO7007_STD_NTSC: sensor_width = 720; sensor_height = 480; break; case GO7007_STD_PAL: sensor_width = 720; sensor_height = 576; break; case GO7007_STD_OTHER: sensor_width = go->board_info->sensor_width; sensor_height = go->board_info->sensor_height; break; } if (fmt == NULL) { width = sensor_width; height = sensor_height; } else if (go->board_info->sensor_flags & GO7007_SENSOR_SCALING) { if (fmt->fmt.pix.width > sensor_width) width = sensor_width; else if (fmt->fmt.pix.width < 144) width = 144; else width = fmt->fmt.pix.width & ~0x0f; if (fmt->fmt.pix.height > sensor_height) height = sensor_height; else if (fmt->fmt.pix.height < 96) height = 96; else height = fmt->fmt.pix.height & ~0x0f; } else { int requested_size = fmt->fmt.pix.width * fmt->fmt.pix.height; int sensor_size = sensor_width * sensor_height; if (64 * requested_size < 9 * sensor_size) { width = sensor_width / 4; height = sensor_height / 4; } else if (64 * requested_size < 36 * sensor_size) { width = sensor_width / 2; height = sensor_height / 2; } else { width = sensor_width; height = sensor_height; } width &= ~0xf; height &= ~0xf; } if (fmt != NULL) { u32 pixelformat = fmt->fmt.pix.pixelformat; memset(fmt, 0, sizeof(*fmt)); fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; fmt->fmt.pix.width = width; fmt->fmt.pix.height = height; fmt->fmt.pix.pixelformat = pixelformat; fmt->fmt.pix.field = V4L2_FIELD_NONE; fmt->fmt.pix.bytesperline = 0; fmt->fmt.pix.sizeimage = GO7007_BUF_SIZE; fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; /* ?? */ } if (try) return 0; go->width = width; go->height = height; go->encoder_h_offset = go->board_info->sensor_h_offset; go->encoder_v_offset = go->board_info->sensor_v_offset; for (i = 0; i < 4; ++i) go->modet[i].enable = 0; for (i = 0; i < 1624; ++i) go->modet_map[i] = 0; if (go->board_info->sensor_flags & GO7007_SENSOR_SCALING) { struct v4l2_format res; if (fmt != NULL) { res = *fmt; } else { res.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; res.fmt.pix.width = width; } if (height > sensor_height / 2) { res.fmt.pix.height = height / 2; go->encoder_v_halve = 0; } else { res.fmt.pix.height = height; go->encoder_v_halve = 1; } call_all(&go->v4l2_dev, video, s_fmt, &res); } else { if (width <= sensor_width / 4) { go->encoder_h_halve = 1; go->encoder_v_halve = 1; go->encoder_subsample = 1; } else if (width <= sensor_width / 2) { go->encoder_h_halve = 1; go->encoder_v_halve = 1; go->encoder_subsample = 0; } else { go->encoder_h_halve = 0; go->encoder_v_halve = 0; go->encoder_subsample = 0; } } if (fmt == NULL) return 0; switch (fmt->fmt.pix.pixelformat) { case V4L2_PIX_FMT_MPEG: if (go->format == GO7007_FORMAT_MPEG1 || go->format == GO7007_FORMAT_MPEG2 || go->format == GO7007_FORMAT_MPEG4) break; go->format = GO7007_FORMAT_MPEG1; go->pali = 0; go->aspect_ratio = GO7007_RATIO_1_1; go->gop_size = go->sensor_framerate / 1000; go->ipb = 0; go->closed_gop = 1; go->repeat_seqhead = 1; go->seq_header_enable = 1; go->gop_header_enable = 1; go->dvd_mode = 0; break; /* Backwards compatibility only! */ case V4L2_PIX_FMT_MPEG4: if (go->format == GO7007_FORMAT_MPEG4) break; go->format = GO7007_FORMAT_MPEG4; go->pali = 0xf5; go->aspect_ratio = GO7007_RATIO_1_1; go->gop_size = go->sensor_framerate / 1000; go->ipb = 0; go->closed_gop = 1; go->repeat_seqhead = 1; go->seq_header_enable = 1; go->gop_header_enable = 1; go->dvd_mode = 0; break; case V4L2_PIX_FMT_MJPEG: go->format = GO7007_FORMAT_MJPEG; go->pali = 0; go->aspect_ratio = GO7007_RATIO_1_1; go->gop_size = 0; go->ipb = 0; go->closed_gop = 0; go->repeat_seqhead = 0; go->seq_header_enable = 0; go->gop_header_enable = 0; go->dvd_mode = 0; break; } return 0; } #if 0 static int clip_to_modet_map(struct go7007 *go, int region, struct v4l2_clip *clip_list) { struct v4l2_clip clip, *clip_ptr; int x, y, mbnum; /* Check if coordinates are OK and if any macroblocks are already * used by other regions (besides 0) */ clip_ptr = clip_list; while (clip_ptr) { if (copy_from_user(&clip, clip_ptr, sizeof(clip))) return -EFAULT; if (clip.c.left < 0 || (clip.c.left & 0xF) || clip.c.width <= 0 || (clip.c.width & 0xF)) return -EINVAL; if (clip.c.left + clip.c.width > go->width) return -EINVAL; if (clip.c.top < 0 || (clip.c.top & 0xF) || clip.c.height <= 0 || (clip.c.height & 0xF)) return -EINVAL; if (clip.c.top + clip.c.height > go->height) return -EINVAL; for (y = 0; y < clip.c.height; y += 16) for (x = 0; x < clip.c.width; x += 16) { mbnum = (go->width >> 4) * ((clip.c.top + y) >> 4) + ((clip.c.left + x) >> 4); if (go->modet_map[mbnum] != 0 && go->modet_map[mbnum] != region) return -EBUSY; } clip_ptr = clip.next; } /* Clear old region macroblocks */ for (mbnum = 0; mbnum < 1624; ++mbnum) if (go->modet_map[mbnum] == region) go->modet_map[mbnum] = 0; /* Claim macroblocks in this list */ clip_ptr = clip_list; while (clip_ptr) { if (copy_from_user(&clip, clip_ptr, sizeof(clip))) return -EFAULT; for (y = 0; y < clip.c.height; y += 16) for (x = 0; x < clip.c.width; x += 16) { mbnum = (go->width >> 4) * ((clip.c.top + y) >> 4) + ((clip.c.left + x) >> 4); go->modet_map[mbnum] = region; } clip_ptr = clip.next; } return 0; } #endif static int mpeg_query_ctrl(struct v4l2_queryctrl *ctrl) { static const u32 mpeg_ctrls[] = { V4L2_CID_MPEG_CLASS, V4L2_CID_MPEG_STREAM_TYPE, V4L2_CID_MPEG_VIDEO_ENCODING, V4L2_CID_MPEG_VIDEO_ASPECT, V4L2_CID_MPEG_VIDEO_GOP_SIZE, V4L2_CID_MPEG_VIDEO_GOP_CLOSURE, V4L2_CID_MPEG_VIDEO_BITRATE, 0 }; static const u32 *ctrl_classes[] = { mpeg_ctrls, NULL }; ctrl->id = v4l2_ctrl_next(ctrl_classes, ctrl->id); switch (ctrl->id) { case V4L2_CID_MPEG_CLASS: return v4l2_ctrl_query_fill(ctrl, 0, 0, 0, 0); case V4L2_CID_MPEG_STREAM_TYPE: return v4l2_ctrl_query_fill(ctrl, V4L2_MPEG_STREAM_TYPE_MPEG2_DVD, V4L2_MPEG_STREAM_TYPE_MPEG_ELEM, 1, V4L2_MPEG_STREAM_TYPE_MPEG_ELEM); case V4L2_CID_MPEG_VIDEO_ENCODING: return v4l2_ctrl_query_fill(ctrl, V4L2_MPEG_VIDEO_ENCODING_MPEG_1, V4L2_MPEG_VIDEO_ENCODING_MPEG_4, 1, V4L2_MPEG_VIDEO_ENCODING_MPEG_2); case V4L2_CID_MPEG_VIDEO_ASPECT: return v4l2_ctrl_query_fill(ctrl, V4L2_MPEG_VIDEO_ASPECT_1x1, V4L2_MPEG_VIDEO_ASPECT_16x9, 1, V4L2_MPEG_VIDEO_ASPECT_1x1); case V4L2_CID_MPEG_VIDEO_GOP_SIZE: return v4l2_ctrl_query_fill(ctrl, 0, 34, 1, 15); case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: return v4l2_ctrl_query_fill(ctrl, 0, 1, 1, 0); case V4L2_CID_MPEG_VIDEO_BITRATE: return v4l2_ctrl_query_fill(ctrl, 64000, 10000000, 1, 1500000); default: return -EINVAL; } return 0; } static int mpeg_s_ctrl(struct v4l2_control *ctrl, struct go7007 *go) { /* pretty sure we can't change any of these while streaming */ if (go->streaming) return -EBUSY; switch (ctrl->id) { case V4L2_CID_MPEG_STREAM_TYPE: switch (ctrl->value) { case V4L2_MPEG_STREAM_TYPE_MPEG2_DVD: go->format = GO7007_FORMAT_MPEG2; go->bitrate = 9800000; go->gop_size = 15; go->pali = 0x48; go->closed_gop = 1; go->repeat_seqhead = 0; go->seq_header_enable = 1; go->gop_header_enable = 1; go->dvd_mode = 1; break; case V4L2_MPEG_STREAM_TYPE_MPEG_ELEM: /* todo: */ break; default: return -EINVAL; } break; case V4L2_CID_MPEG_VIDEO_ENCODING: switch (ctrl->value) { case V4L2_MPEG_VIDEO_ENCODING_MPEG_1: go->format = GO7007_FORMAT_MPEG1; go->pali = 0; break; case V4L2_MPEG_VIDEO_ENCODING_MPEG_2: go->format = GO7007_FORMAT_MPEG2; /*if (mpeg->pali >> 24 == 2) go->pali = mpeg->pali & 0xff; else*/ go->pali = 0x48; break; case V4L2_MPEG_VIDEO_ENCODING_MPEG_4: go->format = GO7007_FORMAT_MPEG4; /*if (mpeg->pali >> 24 == 4) go->pali = mpeg->pali & 0xff; else*/ go->pali = 0xf5; break; default: return -EINVAL; } go->gop_header_enable = /*mpeg->flags & GO7007_MPEG_OMIT_GOP_HEADER ? 0 :*/ 1; /*if (mpeg->flags & GO7007_MPEG_REPEAT_SEQHEADER) go->repeat_seqhead = 1; else*/ go->repeat_seqhead = 0; go->dvd_mode = 0; break; case V4L2_CID_MPEG_VIDEO_ASPECT: if (go->format == GO7007_FORMAT_MJPEG) return -EINVAL; switch (ctrl->value) { case V4L2_MPEG_VIDEO_ASPECT_1x1: go->aspect_ratio = GO7007_RATIO_1_1; break; case V4L2_MPEG_VIDEO_ASPECT_4x3: go->aspect_ratio = GO7007_RATIO_4_3; break; case V4L2_MPEG_VIDEO_ASPECT_16x9: go->aspect_ratio = GO7007_RATIO_16_9; break; case V4L2_MPEG_VIDEO_ASPECT_221x100: default: return -EINVAL; } break; case V4L2_CID_MPEG_VIDEO_GOP_SIZE: if (ctrl->value < 0 || ctrl->value > 34) return -EINVAL; go->gop_size = ctrl->value; break; case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: if (ctrl->value != 0 && ctrl->value != 1) return -EINVAL; go->closed_gop = ctrl->value; break; case V4L2_CID_MPEG_VIDEO_BITRATE: /* Upper bound is kind of arbitrary here */ if (ctrl->value < 64000 || ctrl->value > 10000000) return -EINVAL; go->bitrate = ctrl->value; break; default: return -EINVAL; } return 0; } static int mpeg_g_ctrl(struct v4l2_control *ctrl, struct go7007 *go) { switch (ctrl->id) { case V4L2_CID_MPEG_STREAM_TYPE: if (go->dvd_mode) ctrl->value = V4L2_MPEG_STREAM_TYPE_MPEG2_DVD; else ctrl->value = V4L2_MPEG_STREAM_TYPE_MPEG_ELEM; break; case V4L2_CID_MPEG_VIDEO_ENCODING: switch (go->format) { case GO7007_FORMAT_MPEG1: ctrl->value = V4L2_MPEG_VIDEO_ENCODING_MPEG_1; break; case GO7007_FORMAT_MPEG2: ctrl->value = V4L2_MPEG_VIDEO_ENCODING_MPEG_2; break; case GO7007_FORMAT_MPEG4: ctrl->value = V4L2_MPEG_VIDEO_ENCODING_MPEG_4; break; default: return -EINVAL; } break; case V4L2_CID_MPEG_VIDEO_ASPECT: switch (go->aspect_ratio) { case GO7007_RATIO_1_1: ctrl->value = V4L2_MPEG_VIDEO_ASPECT_1x1; break; case GO7007_RATIO_4_3: ctrl->value = V4L2_MPEG_VIDEO_ASPECT_4x3; break; case GO7007_RATIO_16_9: ctrl->value = V4L2_MPEG_VIDEO_ASPECT_16x9; break; default: return -EINVAL; } break; case V4L2_CID_MPEG_VIDEO_GOP_SIZE: ctrl->value = go->gop_size; break; case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: ctrl->value = go->closed_gop; break; case V4L2_CID_MPEG_VIDEO_BITRATE: ctrl->value = go->bitrate; break; default: return -EINVAL; } return 0; } static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct go7007 *go = ((struct go7007_file *) priv)->go; strlcpy(cap->driver, "go7007", sizeof(cap->driver)); strlcpy(cap->card, go->name, sizeof(cap->card)); #if 0 strlcpy(cap->bus_info, dev_name(&dev->udev->dev), sizeof(cap->bus_info)); #endif cap->version = KERNEL_VERSION(0, 9, 8); cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; /* | V4L2_CAP_AUDIO; */ if (go->board_info->flags & GO7007_BOARD_HAS_TUNER) cap->capabilities |= V4L2_CAP_TUNER; return 0; } static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *fmt) { char *desc = NULL; switch (fmt->index) { case 0: fmt->pixelformat = V4L2_PIX_FMT_MJPEG; desc = "Motion-JPEG"; break; case 1: fmt->pixelformat = V4L2_PIX_FMT_MPEG; desc = "MPEG1/MPEG2/MPEG4"; break; default: return -EINVAL; } fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; fmt->flags = V4L2_FMT_FLAG_COMPRESSED; strncpy(fmt->description, desc, sizeof(fmt->description)); return 0; } static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct go7007 *go = ((struct go7007_file *) priv)->go; fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; fmt->fmt.pix.width = go->width; fmt->fmt.pix.height = go->height; fmt->fmt.pix.pixelformat = (go->format == GO7007_FORMAT_MJPEG) ? V4L2_PIX_FMT_MJPEG : V4L2_PIX_FMT_MPEG; fmt->fmt.pix.field = V4L2_FIELD_NONE; fmt->fmt.pix.bytesperline = 0; fmt->fmt.pix.sizeimage = GO7007_BUF_SIZE; fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; return 0; } static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct go7007 *go = ((struct go7007_file *) priv)->go; return set_capture_size(go, fmt, 1); } static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct go7007 *go = ((struct go7007_file *) priv)->go; if (go->streaming) return -EBUSY; return set_capture_size(go, fmt, 0); } static int vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *req) { struct go7007_file *gofh = priv; struct go7007 *go = gofh->go; int retval = -EBUSY; unsigned int count, i; if (go->streaming) return retval; if (req->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || req->memory != V4L2_MEMORY_MMAP) return -EINVAL; mutex_lock(&gofh->lock); for (i = 0; i < gofh->buf_count; ++i) if (gofh->bufs[i].mapped > 0) goto unlock_and_return; mutex_lock(&go->hw_lock); if (go->in_use > 0 && gofh->buf_count == 0) { mutex_unlock(&go->hw_lock); goto unlock_and_return; } if (gofh->buf_count > 0) kfree(gofh->bufs); retval = -ENOMEM; count = req->count; if (count > 0) { if (count < 2) count = 2; if (count > 32) count = 32; gofh->bufs = kmalloc(count * sizeof(struct go7007_buffer), GFP_KERNEL); if (!gofh->bufs) { mutex_unlock(&go->hw_lock); goto unlock_and_return; } memset(gofh->bufs, 0, count * sizeof(struct go7007_buffer)); for (i = 0; i < count; ++i) { gofh->bufs[i].go = go; gofh->bufs[i].index = i; gofh->bufs[i].state = BUF_STATE_IDLE; gofh->bufs[i].mapped = 0; } go->in_use = 1; } else { go->in_use = 0; } gofh->buf_count = count; mutex_unlock(&go->hw_lock); mutex_unlock(&gofh->lock); memset(req, 0, sizeof(*req)); req->count = count; req->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; req->memory = V4L2_MEMORY_MMAP; return 0; unlock_and_return: mutex_unlock(&gofh->lock); return retval; } static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct go7007_file *gofh = priv; int retval = -EINVAL; unsigned int index; if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return retval; index = buf->index; mutex_lock(&gofh->lock); if (index >= gofh->buf_count) goto unlock_and_return; memset(buf, 0, sizeof(*buf)); buf->index = index; buf->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; switch (gofh->bufs[index].state) { case BUF_STATE_QUEUED: buf->flags = V4L2_BUF_FLAG_QUEUED; break; case BUF_STATE_DONE: buf->flags = V4L2_BUF_FLAG_DONE; break; default: buf->flags = 0; } if (gofh->bufs[index].mapped) buf->flags |= V4L2_BUF_FLAG_MAPPED; buf->memory = V4L2_MEMORY_MMAP; buf->m.offset = index * GO7007_BUF_SIZE; buf->length = GO7007_BUF_SIZE; mutex_unlock(&gofh->lock); return 0; unlock_and_return: mutex_unlock(&gofh->lock); return retval; } static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct go7007_file *gofh = priv; struct go7007 *go = gofh->go; struct go7007_buffer *gobuf; unsigned long flags; int retval = -EINVAL; int ret; if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || buf->memory != V4L2_MEMORY_MMAP) return retval; mutex_lock(&gofh->lock); if (buf->index < 0 || buf->index >= gofh->buf_count) goto unlock_and_return; gobuf = &gofh->bufs[buf->index]; if (!gobuf->mapped) goto unlock_and_return; retval = -EBUSY; if (gobuf->state != BUF_STATE_IDLE) goto unlock_and_return; /* offset will be 0 until we really support USERPTR streaming */ gobuf->offset = gobuf->user_addr & ~PAGE_MASK; gobuf->bytesused = 0; gobuf->frame_offset = 0; gobuf->modet_active = 0; if (gobuf->offset > 0) gobuf->page_count = GO7007_BUF_PAGES + 1; else gobuf->page_count = GO7007_BUF_PAGES; retval = -ENOMEM; down_read(&current->mm->mmap_sem); ret = get_user_pages(current, current->mm, gobuf->user_addr & PAGE_MASK, gobuf->page_count, 1, 1, gobuf->pages, NULL); up_read(&current->mm->mmap_sem); if (ret != gobuf->page_count) { int i; for (i = 0; i < ret; ++i) page_cache_release(gobuf->pages[i]); gobuf->page_count = 0; goto unlock_and_return; } gobuf->state = BUF_STATE_QUEUED; spin_lock_irqsave(&go->spinlock, flags); list_add_tail(&gobuf->stream, &go->stream); spin_unlock_irqrestore(&go->spinlock, flags); mutex_unlock(&gofh->lock); return 0; unlock_and_return: mutex_unlock(&gofh->lock); return retval; } static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct go7007_file *gofh = priv; struct go7007 *go = gofh->go; struct go7007_buffer *gobuf; int retval = -EINVAL; unsigned long flags; u32 frame_type_flag; DEFINE_WAIT(wait); if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return retval; if (buf->memory != V4L2_MEMORY_MMAP) return retval; mutex_lock(&gofh->lock); if (list_empty(&go->stream)) goto unlock_and_return; gobuf = list_entry(go->stream.next, struct go7007_buffer, stream); retval = -EAGAIN; if (gobuf->state != BUF_STATE_DONE && !(file->f_flags & O_NONBLOCK)) { for (;;) { prepare_to_wait(&go->frame_waitq, &wait, TASK_INTERRUPTIBLE); if (gobuf->state == BUF_STATE_DONE) break; if (signal_pending(current)) { retval = -ERESTARTSYS; break; } schedule(); } finish_wait(&go->frame_waitq, &wait); } if (gobuf->state != BUF_STATE_DONE) goto unlock_and_return; spin_lock_irqsave(&go->spinlock, flags); deactivate_buffer(gobuf); spin_unlock_irqrestore(&go->spinlock, flags); frame_type_flag = get_frame_type_flag(gobuf, go->format); gobuf->state = BUF_STATE_IDLE; memset(buf, 0, sizeof(*buf)); buf->index = gobuf->index; buf->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; buf->bytesused = gobuf->bytesused; buf->flags = V4L2_BUF_FLAG_MAPPED | frame_type_flag; buf->field = V4L2_FIELD_NONE; buf->timestamp = gobuf->timestamp; buf->sequence = gobuf->seq; buf->memory = V4L2_MEMORY_MMAP; buf->m.offset = gobuf->index * GO7007_BUF_SIZE; buf->length = GO7007_BUF_SIZE; buf->reserved = gobuf->modet_active; mutex_unlock(&gofh->lock); return 0; unlock_and_return: mutex_unlock(&gofh->lock); return retval; } static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct go7007_file *gofh = priv; struct go7007 *go = gofh->go; int retval = 0; if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; mutex_lock(&gofh->lock); mutex_lock(&go->hw_lock); if (!go->streaming) { go->streaming = 1; go->next_seq = 0; go->active_buf = NULL; if (go7007_start_encoder(go) < 0) retval = -EIO; else retval = 0; } mutex_unlock(&go->hw_lock); mutex_unlock(&gofh->lock); return retval; } static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct go7007_file *gofh = priv; struct go7007 *go = gofh->go; if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; mutex_lock(&gofh->lock); go7007_streamoff(go); mutex_unlock(&gofh->lock); return 0; } static int vidioc_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *query) { struct go7007 *go = ((struct go7007_file *) priv)->go; int id = query->id; if (0 == call_all(&go->v4l2_dev, core, queryctrl, query)) return 0; query->id = id; return mpeg_query_ctrl(query); } static int vidioc_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct go7007 *go = ((struct go7007_file *) priv)->go; if (0 == call_all(&go->v4l2_dev, core, g_ctrl, ctrl)) return 0; return mpeg_g_ctrl(ctrl, go); } static int vidioc_s_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct go7007 *go = ((struct go7007_file *) priv)->go; if (0 == call_all(&go->v4l2_dev, core, s_ctrl, ctrl)) return 0; return mpeg_s_ctrl(ctrl, go); } static int vidioc_g_parm(struct file *filp, void *priv, struct v4l2_streamparm *parm) { struct go7007 *go = ((struct go7007_file *) priv)->go; struct v4l2_fract timeperframe = { .numerator = 1001 * go->fps_scale, .denominator = go->sensor_framerate, }; if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; parm->parm.capture.capability |= V4L2_CAP_TIMEPERFRAME; parm->parm.capture.timeperframe = timeperframe; return 0; } static int vidioc_s_parm(struct file *filp, void *priv, struct v4l2_streamparm *parm) { struct go7007 *go = ((struct go7007_file *) priv)->go; unsigned int n, d; if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (parm->parm.capture.capturemode != 0) return -EINVAL; n = go->sensor_framerate * parm->parm.capture.timeperframe.numerator; d = 1001 * parm->parm.capture.timeperframe.denominator; if (n != 0 && d != 0 && n > d) go->fps_scale = (n + d/2) / d; else go->fps_scale = 1; return 0; } /* VIDIOC_ENUMSTD on go7007 were used for enumberating the supported fps and its resolution, when the device is not connected to TV. This were an API abuse, probably used by the lack of specific IOCTL's to enumberate it, by the time the driver were written. However, since kernel 2.6.19, two new ioctls (VIDIOC_ENUM_FRAMEINTERVALS and VIDIOC_ENUM_FRAMESIZES) were added for this purpose. The two functions bellow implements the newer ioctls */ static int vidioc_enum_framesizes(struct file *filp, void *priv, struct v4l2_frmsizeenum *fsize) { struct go7007 *go = ((struct go7007_file *) priv)->go; /* Return -EINVAL, if it is a TV board */ if ((go->board_info->flags & GO7007_BOARD_HAS_TUNER) || (go->board_info->sensor_flags & GO7007_SENSOR_TV)) return -EINVAL; if (fsize->index > 0) return -EINVAL; fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; fsize->discrete.width = go->board_info->sensor_width; fsize->discrete.height = go->board_info->sensor_height; return 0; } static int vidioc_enum_frameintervals(struct file *filp, void *priv, struct v4l2_frmivalenum *fival) { struct go7007 *go = ((struct go7007_file *) priv)->go; /* Return -EINVAL, if it is a TV board */ if ((go->board_info->flags & GO7007_BOARD_HAS_TUNER) || (go->board_info->sensor_flags & GO7007_SENSOR_TV)) return -EINVAL; if (fival->index > 0) return -EINVAL; fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; fival->discrete.numerator = 1001; fival->discrete.denominator = go->board_info->sensor_framerate; return 0; } static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *std) { struct go7007 *go = ((struct go7007_file *) priv)->go; switch (go->standard) { case GO7007_STD_NTSC: *std = V4L2_STD_NTSC; break; case GO7007_STD_PAL: *std = V4L2_STD_PAL; break; default: return -EINVAL; } return 0; } static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *std) { struct go7007 *go = ((struct go7007_file *) priv)->go; if (go->streaming) return -EBUSY; if (!(go->board_info->sensor_flags & GO7007_SENSOR_TV) && *std != 0) return -EINVAL; if (*std == 0) return -EINVAL; if ((go->board_info->flags & GO7007_BOARD_HAS_TUNER) && go->input == go->board_info->num_inputs - 1) { if (!go->i2c_adapter_online) return -EIO; if (call_all(&go->v4l2_dev, core, s_std, *std) < 0) return -EINVAL; } if (*std & V4L2_STD_NTSC) { go->standard = GO7007_STD_NTSC; go->sensor_framerate = 30000; } else if (*std & V4L2_STD_PAL) { go->standard = GO7007_STD_PAL; go->sensor_framerate = 25025; } else if (*std & V4L2_STD_SECAM) { go->standard = GO7007_STD_PAL; go->sensor_framerate = 25025; } else return -EINVAL; call_all(&go->v4l2_dev, core, s_std, *std); set_capture_size(go, NULL, 0); return 0; } static int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *std) { struct go7007 *go = ((struct go7007_file *) priv)->go; if ((go->board_info->flags & GO7007_BOARD_HAS_TUNER) && go->input == go->board_info->num_inputs - 1) { if (!go->i2c_adapter_online) return -EIO; return call_all(&go->v4l2_dev, video, querystd, std); } else if (go->board_info->sensor_flags & GO7007_SENSOR_TV) *std = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM; else *std = 0; return 0; } static int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *inp) { struct go7007 *go = ((struct go7007_file *) priv)->go; if (inp->index >= go->board_info->num_inputs) return -EINVAL; strncpy(inp->name, go->board_info->inputs[inp->index].name, sizeof(inp->name)); /* If this board has a tuner, it will be the last input */ if ((go->board_info->flags & GO7007_BOARD_HAS_TUNER) && inp->index == go->board_info->num_inputs - 1) inp->type = V4L2_INPUT_TYPE_TUNER; else inp->type = V4L2_INPUT_TYPE_CAMERA; inp->audioset = 0; inp->tuner = 0; if (go->board_info->sensor_flags & GO7007_SENSOR_TV) inp->std = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM; else inp->std = 0; return 0; } static int vidioc_g_input(struct file *file, void *priv, unsigned int *input) { struct go7007 *go = ((struct go7007_file *) priv)->go; *input = go->input; return 0; } static int vidioc_s_input(struct file *file, void *priv, unsigned int input) { struct go7007 *go = ((struct go7007_file *) priv)->go; if (input >= go->board_info->num_inputs) return -EINVAL; if (go->streaming) return -EBUSY; go->input = input; return call_all(&go->v4l2_dev, video, s_routing, input, 0, 0); } static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct go7007 *go = ((struct go7007_file *) priv)->go; if (!(go->board_info->flags & GO7007_BOARD_HAS_TUNER)) return -EINVAL; if (t->index != 0) return -EINVAL; if (!go->i2c_adapter_online) return -EIO; return call_all(&go->v4l2_dev, tuner, g_tuner, t); } static int vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct go7007 *go = ((struct go7007_file *) priv)->go; if (!(go->board_info->flags & GO7007_BOARD_HAS_TUNER)) return -EINVAL; if (t->index != 0) return -EINVAL; if (!go->i2c_adapter_online) return -EIO; switch (go->board_id) { case GO7007_BOARDID_PX_TV402U_NA: case GO7007_BOARDID_PX_TV402U_JP: /* No selectable options currently */ if (t->audmode != V4L2_TUNER_MODE_STEREO) return -EINVAL; break; } return call_all(&go->v4l2_dev, tuner, s_tuner, t); } static int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct go7007 *go = ((struct go7007_file *) priv)->go; if (!(go->board_info->flags & GO7007_BOARD_HAS_TUNER)) return -EINVAL; if (!go->i2c_adapter_online) return -EIO; f->type = V4L2_TUNER_ANALOG_TV; return call_all(&go->v4l2_dev, tuner, g_frequency, f); } static int vidioc_s_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct go7007 *go = ((struct go7007_file *) priv)->go; if (!(go->board_info->flags & GO7007_BOARD_HAS_TUNER)) return -EINVAL; if (!go->i2c_adapter_online) return -EIO; return call_all(&go->v4l2_dev, tuner, s_frequency, f); } static int vidioc_cropcap(struct file *file, void *priv, struct v4l2_cropcap *cropcap) { struct go7007 *go = ((struct go7007_file *) priv)->go; if (cropcap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; /* These specify the raw input of the sensor */ switch (go->standard) { case GO7007_STD_NTSC: cropcap->bounds.top = 0; cropcap->bounds.left = 0; cropcap->bounds.width = 720; cropcap->bounds.height = 480; cropcap->defrect.top = 0; cropcap->defrect.left = 0; cropcap->defrect.width = 720; cropcap->defrect.height = 480; break; case GO7007_STD_PAL: cropcap->bounds.top = 0; cropcap->bounds.left = 0; cropcap->bounds.width = 720; cropcap->bounds.height = 576; cropcap->defrect.top = 0; cropcap->defrect.left = 0; cropcap->defrect.width = 720; cropcap->defrect.height = 576; break; case GO7007_STD_OTHER: cropcap->bounds.top = 0; cropcap->bounds.left = 0; cropcap->bounds.width = go->board_info->sensor_width; cropcap->bounds.height = go->board_info->sensor_height; cropcap->defrect.top = 0; cropcap->defrect.left = 0; cropcap->defrect.width = go->board_info->sensor_width; cropcap->defrect.height = go->board_info->sensor_height; break; } return 0; } static int vidioc_g_crop(struct file *file, void *priv, struct v4l2_crop *crop) { struct go7007 *go = ((struct go7007_file *) priv)->go; if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; crop->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; /* These specify the raw input of the sensor */ switch (go->standard) { case GO7007_STD_NTSC: crop->c.top = 0; crop->c.left = 0; crop->c.width = 720; crop->c.height = 480; break; case GO7007_STD_PAL: crop->c.top = 0; crop->c.left = 0; crop->c.width = 720; crop->c.height = 576; break; case GO7007_STD_OTHER: crop->c.top = 0; crop->c.left = 0; crop->c.width = go->board_info->sensor_width; crop->c.height = go->board_info->sensor_height; break; } return 0; } /* FIXME: vidioc_s_crop is not really implemented!!! */ static int vidioc_s_crop(struct file *file, void *priv, struct v4l2_crop *crop) { if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; return 0; } static int vidioc_g_jpegcomp(struct file *file, void *priv, struct v4l2_jpegcompression *params) { memset(params, 0, sizeof(*params)); params->quality = 50; /* ?? */ params->jpeg_markers = V4L2_JPEG_MARKER_DHT | V4L2_JPEG_MARKER_DQT; return 0; } static int vidioc_s_jpegcomp(struct file *file, void *priv, struct v4l2_jpegcompression *params) { if (params->quality != 50 || params->jpeg_markers != (V4L2_JPEG_MARKER_DHT | V4L2_JPEG_MARKER_DQT)) return -EINVAL; return 0; } /* FIXME: Those ioctls are private, and not needed, since several standard extended controls already provide streaming control. So, those ioctls should be converted into vidioc_g_ext_ctrls() and vidioc_s_ext_ctrls() */ #if 0 /* Temporary ioctls for controlling compression characteristics */ case GO7007IOC_S_BITRATE: { int *bitrate = arg; if (go->streaming) return -EINVAL; /* Upper bound is kind of arbitrary here */ if (*bitrate < 64000 || *bitrate > 10000000) return -EINVAL; go->bitrate = *bitrate; return 0; } case GO7007IOC_G_BITRATE: { int *bitrate = arg; *bitrate = go->bitrate; return 0; } case GO7007IOC_S_COMP_PARAMS: { struct go7007_comp_params *comp = arg; if (go->format == GO7007_FORMAT_MJPEG) return -EINVAL; if (comp->gop_size > 0) go->gop_size = comp->gop_size; else go->gop_size = go->sensor_framerate / 1000; if (go->gop_size != 15) go->dvd_mode = 0; /*go->ipb = comp->max_b_frames > 0;*/ /* completely untested */ if (go->board_info->sensor_flags & GO7007_SENSOR_TV) { switch (comp->aspect_ratio) { case GO7007_ASPECT_RATIO_4_3_NTSC: case GO7007_ASPECT_RATIO_4_3_PAL: go->aspect_ratio = GO7007_RATIO_4_3; break; case GO7007_ASPECT_RATIO_16_9_NTSC: case GO7007_ASPECT_RATIO_16_9_PAL: go->aspect_ratio = GO7007_RATIO_16_9; break; default: go->aspect_ratio = GO7007_RATIO_1_1; break; } } if (comp->flags & GO7007_COMP_OMIT_SEQ_HEADER) { go->dvd_mode = 0; go->seq_header_enable = 0; } else { go->seq_header_enable = 1; } /* fall-through */ } case GO7007IOC_G_COMP_PARAMS: { struct go7007_comp_params *comp = arg; if (go->format == GO7007_FORMAT_MJPEG) return -EINVAL; memset(comp, 0, sizeof(*comp)); comp->gop_size = go->gop_size; comp->max_b_frames = go->ipb ? 2 : 0; switch (go->aspect_ratio) { case GO7007_RATIO_4_3: if (go->standard == GO7007_STD_NTSC) comp->aspect_ratio = GO7007_ASPECT_RATIO_4_3_NTSC; else comp->aspect_ratio = GO7007_ASPECT_RATIO_4_3_PAL; break; case GO7007_RATIO_16_9: if (go->standard == GO7007_STD_NTSC) comp->aspect_ratio = GO7007_ASPECT_RATIO_16_9_NTSC; else comp->aspect_ratio = GO7007_ASPECT_RATIO_16_9_PAL; break; default: comp->aspect_ratio = GO7007_ASPECT_RATIO_1_1; break; } if (go->closed_gop) comp->flags |= GO7007_COMP_CLOSED_GOP; if (!go->seq_header_enable) comp->flags |= GO7007_COMP_OMIT_SEQ_HEADER; return 0; } case GO7007IOC_S_MPEG_PARAMS: { struct go7007_mpeg_params *mpeg = arg; if (go->format != GO7007_FORMAT_MPEG1 && go->format != GO7007_FORMAT_MPEG2 && go->format != GO7007_FORMAT_MPEG4) return -EINVAL; if (mpeg->flags & GO7007_MPEG_FORCE_DVD_MODE) { go->format = GO7007_FORMAT_MPEG2; go->bitrate = 9800000; go->gop_size = 15; go->pali = 0x48; go->closed_gop = 1; go->repeat_seqhead = 0; go->seq_header_enable = 1; go->gop_header_enable = 1; go->dvd_mode = 1; } else { switch (mpeg->mpeg_video_standard) { case GO7007_MPEG_VIDEO_MPEG1: go->format = GO7007_FORMAT_MPEG1; go->pali = 0; break; case GO7007_MPEG_VIDEO_MPEG2: go->format = GO7007_FORMAT_MPEG2; if (mpeg->pali >> 24 == 2) go->pali = mpeg->pali & 0xff; else go->pali = 0x48; break; case GO7007_MPEG_VIDEO_MPEG4: go->format = GO7007_FORMAT_MPEG4; if (mpeg->pali >> 24 == 4) go->pali = mpeg->pali & 0xff; else go->pali = 0xf5; break; default: return -EINVAL; } go->gop_header_enable = mpeg->flags & GO7007_MPEG_OMIT_GOP_HEADER ? 0 : 1; if (mpeg->flags & GO7007_MPEG_REPEAT_SEQHEADER) go->repeat_seqhead = 1; else go->repeat_seqhead = 0; go->dvd_mode = 0; } /* fall-through */ } case GO7007IOC_G_MPEG_PARAMS: { struct go7007_mpeg_params *mpeg = arg; memset(mpeg, 0, sizeof(*mpeg)); switch (go->format) { case GO7007_FORMAT_MPEG1: mpeg->mpeg_video_standard = GO7007_MPEG_VIDEO_MPEG1; mpeg->pali = 0; break; case GO7007_FORMAT_MPEG2: mpeg->mpeg_video_standard = GO7007_MPEG_VIDEO_MPEG2; mpeg->pali = GO7007_MPEG_PROFILE(2, go->pali); break; case GO7007_FORMAT_MPEG4: mpeg->mpeg_video_standard = GO7007_MPEG_VIDEO_MPEG4; mpeg->pali = GO7007_MPEG_PROFILE(4, go->pali); break; default: return -EINVAL; } if (!go->gop_header_enable) mpeg->flags |= GO7007_MPEG_OMIT_GOP_HEADER; if (go->repeat_seqhead) mpeg->flags |= GO7007_MPEG_REPEAT_SEQHEADER; if (go->dvd_mode) mpeg->flags |= GO7007_MPEG_FORCE_DVD_MODE; return 0; } case GO7007IOC_S_MD_PARAMS: { struct go7007_md_params *mdp = arg; if (mdp->region > 3) return -EINVAL; if (mdp->trigger > 0) { go->modet[mdp->region].pixel_threshold = mdp->pixel_threshold >> 1; go->modet[mdp->region].motion_threshold = mdp->motion_threshold >> 1; go->modet[mdp->region].mb_threshold = mdp->trigger >> 1; go->modet[mdp->region].enable = 1; } else go->modet[mdp->region].enable = 0; /* fall-through */ } case GO7007IOC_G_MD_PARAMS: { struct go7007_md_params *mdp = arg; int region = mdp->region; if (mdp->region > 3) return -EINVAL; memset(mdp, 0, sizeof(struct go7007_md_params)); mdp->region = region; if (!go->modet[region].enable) return 0; mdp->pixel_threshold = (go->modet[region].pixel_threshold << 1) + 1; mdp->motion_threshold = (go->modet[region].motion_threshold << 1) + 1; mdp->trigger = (go->modet[region].mb_threshold << 1) + 1; return 0; } case GO7007IOC_S_MD_REGION: { struct go7007_md_region *region = arg; if (region->region < 1 || region->region > 3) return -EINVAL; return clip_to_modet_map(go, region->region, region->clips); } #endif static ssize_t go7007_read(struct file *file, char __user *data, size_t count, loff_t *ppos) { return -EINVAL; } static void go7007_vm_open(struct vm_area_struct *vma) { struct go7007_buffer *gobuf = vma->vm_private_data; ++gobuf->mapped; } static void go7007_vm_close(struct vm_area_struct *vma) { struct go7007_buffer *gobuf = vma->vm_private_data; unsigned long flags; if (--gobuf->mapped == 0) { spin_lock_irqsave(&gobuf->go->spinlock, flags); deactivate_buffer(gobuf); spin_unlock_irqrestore(&gobuf->go->spinlock, flags); } } /* Copied from videobuf-dma-sg.c */ static int go7007_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page; page = alloc_page(GFP_USER | __GFP_DMA32); if (!page) return VM_FAULT_OOM; clear_user_highpage(page, (unsigned long)vmf->virtual_address); vmf->page = page; return 0; } static struct vm_operations_struct go7007_vm_ops = { .open = go7007_vm_open, .close = go7007_vm_close, .fault = go7007_vm_fault, }; static int go7007_mmap(struct file *file, struct vm_area_struct *vma) { struct go7007_file *gofh = file->private_data; unsigned int index; if (gofh->go->status != STATUS_ONLINE) return -EIO; if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; /* only support VM_SHARED mapping */ if (vma->vm_end - vma->vm_start != GO7007_BUF_SIZE) return -EINVAL; /* must map exactly one full buffer */ mutex_lock(&gofh->lock); index = vma->vm_pgoff / GO7007_BUF_PAGES; if (index >= gofh->buf_count) { mutex_unlock(&gofh->lock); return -EINVAL; /* trying to map beyond requested buffers */ } if (index * GO7007_BUF_PAGES != vma->vm_pgoff) { mutex_unlock(&gofh->lock); return -EINVAL; /* offset is not aligned on buffer boundary */ } if (gofh->bufs[index].mapped > 0) { mutex_unlock(&gofh->lock); return -EBUSY; } gofh->bufs[index].mapped = 1; gofh->bufs[index].user_addr = vma->vm_start; vma->vm_ops = &go7007_vm_ops; vma->vm_flags |= VM_DONTEXPAND; vma->vm_flags &= ~VM_IO; vma->vm_private_data = &gofh->bufs[index]; mutex_unlock(&gofh->lock); return 0; } static unsigned int go7007_poll(struct file *file, poll_table *wait) { struct go7007_file *gofh = file->private_data; struct go7007_buffer *gobuf; if (list_empty(&gofh->go->stream)) return POLLERR; gobuf = list_entry(gofh->go->stream.next, struct go7007_buffer, stream); poll_wait(file, &gofh->go->frame_waitq, wait); if (gobuf->state == BUF_STATE_DONE) return POLLIN | POLLRDNORM; return 0; } static void go7007_vfl_release(struct video_device *vfd) { struct go7007 *go = video_get_drvdata(vfd); video_device_release(vfd); if (--go->ref_count == 0) kfree(go); } static struct v4l2_file_operations go7007_fops = { .owner = THIS_MODULE, .open = go7007_open, .release = go7007_release, .ioctl = video_ioctl2, .read = go7007_read, .mmap = go7007_mmap, .poll = go7007_poll, }; static const struct v4l2_ioctl_ops video_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, .vidioc_g_std = vidioc_g_std, .vidioc_s_std = vidioc_s_std, .vidioc_querystd = vidioc_querystd, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, .vidioc_g_parm = vidioc_g_parm, .vidioc_s_parm = vidioc_s_parm, .vidioc_enum_framesizes = vidioc_enum_framesizes, .vidioc_enum_frameintervals = vidioc_enum_frameintervals, .vidioc_cropcap = vidioc_cropcap, .vidioc_g_crop = vidioc_g_crop, .vidioc_s_crop = vidioc_s_crop, .vidioc_g_jpegcomp = vidioc_g_jpegcomp, .vidioc_s_jpegcomp = vidioc_s_jpegcomp, }; static struct video_device go7007_template = { .name = "go7007", .fops = &go7007_fops, .release = go7007_vfl_release, .ioctl_ops = &video_ioctl_ops, .tvnorms = V4L2_STD_ALL, .current_norm = V4L2_STD_NTSC, }; int go7007_v4l2_init(struct go7007 *go) { int rv; go->video_dev = video_device_alloc(); if (go->video_dev == NULL) return -ENOMEM; *go->video_dev = go7007_template; go->video_dev->parent = go->dev; rv = video_register_device(go->video_dev, VFL_TYPE_GRABBER, -1); if (rv < 0) { video_device_release(go->video_dev); go->video_dev = NULL; return rv; } rv = v4l2_device_register(go->dev, &go->v4l2_dev); if (rv < 0) { video_device_release(go->video_dev); go->video_dev = NULL; return rv; } video_set_drvdata(go->video_dev, go); ++go->ref_count; printk(KERN_INFO "%s: registered device %s [v4l2]\n", go->video_dev->name, video_device_node_name(go->video_dev)); return 0; } void go7007_v4l2_remove(struct go7007 *go) { unsigned long flags; mutex_lock(&go->hw_lock); if (go->streaming) { go->streaming = 0; go7007_stream_stop(go); spin_lock_irqsave(&go->spinlock, flags); abort_queued(go); spin_unlock_irqrestore(&go->spinlock, flags); } mutex_unlock(&go->hw_lock); if (go->video_dev) video_unregister_device(go->video_dev); v4l2_device_unregister(&go->v4l2_dev); }
gpl-2.0
KingLiuDao/linux
drivers/staging/lustre/lustre/llite/dcache.c
42
9696
/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 only, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. * * GPL HEADER END */ /* * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * * Copyright (c) 2011, 2015, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. */ #include <linux/fs.h> #include <linux/sched.h> #include <linux/quotaops.h> #define DEBUG_SUBSYSTEM S_LLITE #include "../include/obd_support.h" #include "../include/lustre_lite.h" #include "../include/lustre/lustre_idl.h" #include "../include/lustre_dlm.h" #include "llite_internal.h" static void free_dentry_data(struct rcu_head *head) { struct ll_dentry_data *lld; lld = container_of(head, struct ll_dentry_data, lld_rcu_head); kfree(lld); } /* should NOT be called with the dcache lock, see fs/dcache.c */ static void ll_release(struct dentry *de) { struct ll_dentry_data *lld; LASSERT(de); lld = ll_d2d(de); if (!lld) /* NFS copies the de->d_op methods (bug 4655) */ return; if (lld->lld_it) { ll_intent_release(lld->lld_it); kfree(lld->lld_it); } de->d_fsdata = NULL; call_rcu(&lld->lld_rcu_head, free_dentry_data); } /* Compare if two dentries are the same. Don't match if the existing dentry * is marked invalid. Returns 1 if different, 0 if the same. * * This avoids a race where ll_lookup_it() instantiates a dentry, but we get * an AST before calling d_revalidate_it(). The dentry still exists (marked * INVALID) so d_lookup() matches it, but we have no lock on it (so * lock_match() fails) and we spin around real_lookup(). */ static int ll_dcompare(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { if (len != name->len) return 1; if (memcmp(str, name->name, len)) return 1; CDEBUG(D_DENTRY, "found name %.*s(%p) flags %#x refc %d\n", name->len, name->name, dentry, dentry->d_flags, d_count(dentry)); /* mountpoint is always valid */ if (d_mountpoint((struct dentry *)dentry)) return 0; if (d_lustre_invalid(dentry)) return 1; return 0; } static inline int return_if_equal(struct ldlm_lock *lock, void *data) { return (ldlm_is_canceling(lock) && ldlm_is_discard_data(lock)) ? LDLM_ITER_CONTINUE : LDLM_ITER_STOP; } /* find any ldlm lock of the inode in mdc and lov * return 0 not find * 1 find one * < 0 error */ static int find_cbdata(struct inode *inode) { struct ll_sb_info *sbi = ll_i2sbi(inode); struct lov_stripe_md *lsm; int rc = 0; LASSERT(inode); rc = md_find_cbdata(sbi->ll_md_exp, ll_inode2fid(inode), return_if_equal, NULL); if (rc != 0) return rc; lsm = ccc_inode_lsm_get(inode); if (!lsm) return rc; rc = obd_find_cbdata(sbi->ll_dt_exp, lsm, return_if_equal, NULL); ccc_inode_lsm_put(inode, lsm); return rc; } /** * Called when last reference to a dentry is dropped and dcache wants to know * whether or not it should cache it: * - return 1 to delete the dentry immediately * - return 0 to cache the dentry * Should NOT be called with the dcache lock, see fs/dcache.c */ static int ll_ddelete(const struct dentry *de) { LASSERT(de); CDEBUG(D_DENTRY, "%s dentry %pd (%p, parent %p, inode %p) %s%s\n", d_lustre_invalid((struct dentry *)de) ? "deleting" : "keeping", de, de, de->d_parent, d_inode(de), d_unhashed(de) ? "" : "hashed,", list_empty(&de->d_subdirs) ? "" : "subdirs"); /* kernel >= 2.6.38 last refcount is decreased after this function. */ LASSERT(d_count(de) == 1); /* Disable this piece of code temporarily because this is called * inside dcache_lock so it's not appropriate to do lots of work * here. ATTENTION: Before this piece of code enabling, LU-2487 must be * resolved. */ #if 0 /* if not ldlm lock for this inode, set i_nlink to 0 so that * this inode can be recycled later b=20433 */ if (d_really_is_positive(de) && !find_cbdata(d_inode(de))) clear_nlink(d_inode(de)); #endif if (d_lustre_invalid((struct dentry *)de)) return 1; return 0; } int ll_d_init(struct dentry *de) { CDEBUG(D_DENTRY, "ldd on dentry %pd (%p) parent %p inode %p refc %d\n", de, de, de->d_parent, d_inode(de), d_count(de)); if (!de->d_fsdata) { struct ll_dentry_data *lld; lld = kzalloc(sizeof(*lld), GFP_NOFS); if (likely(lld)) { spin_lock(&de->d_lock); if (likely(!de->d_fsdata)) { de->d_fsdata = lld; __d_lustre_invalidate(de); } else { kfree(lld); } spin_unlock(&de->d_lock); } else { return -ENOMEM; } } LASSERT(de->d_op == &ll_d_ops); return 0; } void ll_intent_drop_lock(struct lookup_intent *it) { if (it->it_op && it->d.lustre.it_lock_mode) { struct lustre_handle handle; handle.cookie = it->d.lustre.it_lock_handle; CDEBUG(D_DLMTRACE, "releasing lock with cookie %#llx from it %p\n", handle.cookie, it); ldlm_lock_decref(&handle, it->d.lustre.it_lock_mode); /* bug 494: intent_release may be called multiple times, from * this thread and we don't want to double-decref this lock */ it->d.lustre.it_lock_mode = 0; if (it->d.lustre.it_remote_lock_mode != 0) { handle.cookie = it->d.lustre.it_remote_lock_handle; CDEBUG(D_DLMTRACE, "releasing remote lock with cookie%#llx from it %p\n", handle.cookie, it); ldlm_lock_decref(&handle, it->d.lustre.it_remote_lock_mode); it->d.lustre.it_remote_lock_mode = 0; } } } void ll_intent_release(struct lookup_intent *it) { CDEBUG(D_INFO, "intent %p released\n", it); ll_intent_drop_lock(it); /* We are still holding extra reference on a request, need to free it */ if (it_disposition(it, DISP_ENQ_OPEN_REF)) ptlrpc_req_finished(it->d.lustre.it_data); /* ll_file_open */ if (it_disposition(it, DISP_ENQ_CREATE_REF)) /* create rec */ ptlrpc_req_finished(it->d.lustre.it_data); it->d.lustre.it_disposition = 0; it->d.lustre.it_data = NULL; } void ll_invalidate_aliases(struct inode *inode) { struct dentry *dentry; CDEBUG(D_INODE, "marking dentries for ino "DFID"(%p) invalid\n", PFID(ll_inode2fid(inode)), inode); ll_lock_dcache(inode); hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) { CDEBUG(D_DENTRY, "dentry in drop %pd (%p) parent %p inode %p flags %d\n", dentry, dentry, dentry->d_parent, d_inode(dentry), dentry->d_flags); d_lustre_invalidate(dentry, 0); } ll_unlock_dcache(inode); } int ll_revalidate_it_finish(struct ptlrpc_request *request, struct lookup_intent *it, struct inode *inode) { int rc = 0; if (!request) return 0; if (it_disposition(it, DISP_LOOKUP_NEG)) return -ENOENT; rc = ll_prep_inode(&inode, request, NULL, it); return rc; } void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode) { if (it->d.lustre.it_lock_mode && inode) { struct ll_sb_info *sbi = ll_i2sbi(inode); CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"(%p)\n", PFID(ll_inode2fid(inode)), inode); ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL); } /* drop lookup or getattr locks immediately */ if (it->it_op == IT_LOOKUP || it->it_op == IT_GETATTR) { /* on 2.6 there are situation when several lookups and * revalidations may be requested during single operation. * therefore, we don't release intent here -bzzz */ ll_intent_drop_lock(it); } } static int ll_revalidate_dentry(struct dentry *dentry, unsigned int lookup_flags) { struct inode *dir = d_inode(dentry->d_parent); /* * if open&create is set, talk to MDS to make sure file is created if * necessary, because we can't do this in ->open() later since that's * called on an inode. return 0 here to let lookup to handle this. */ if ((lookup_flags & (LOOKUP_OPEN | LOOKUP_CREATE)) == (LOOKUP_OPEN | LOOKUP_CREATE)) return 0; if (lookup_flags & (LOOKUP_PARENT | LOOKUP_OPEN | LOOKUP_CREATE)) return 1; if (d_need_statahead(dir, dentry) <= 0) return 1; if (lookup_flags & LOOKUP_RCU) return -ECHILD; do_statahead_enter(dir, &dentry, !d_inode(dentry)); ll_statahead_mark(dir, dentry); return 1; } /* * Always trust cached dentries. Update statahead window if necessary. */ static int ll_revalidate_nd(struct dentry *dentry, unsigned int flags) { CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, flags=%u\n", dentry, flags); return ll_revalidate_dentry(dentry, flags); } static void ll_d_iput(struct dentry *de, struct inode *inode) { LASSERT(inode); if (!find_cbdata(inode)) clear_nlink(inode); iput(inode); } const struct dentry_operations ll_d_ops = { .d_revalidate = ll_revalidate_nd, .d_release = ll_release, .d_delete = ll_ddelete, .d_iput = ll_d_iput, .d_compare = ll_dcompare, };
gpl-2.0
suihkulokki/systemd
src/network/networkd-ipv4ll.c
42
7267
/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ /*** This file is part of systemd. Copyright 2013-2014 Tom Gundersen <teg@jklm.no> systemd is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. systemd is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with systemd; If not, see <http://www.gnu.org/licenses/>. ***/ #include <netinet/ether.h> #include <linux/if.h> #include "networkd-link.h" #include "network-internal.h" static int ipv4ll_address_lost(Link *link) { _cleanup_address_free_ Address *address = NULL; _cleanup_route_free_ Route *route = NULL; struct in_addr addr; int r; assert(link); link->ipv4ll_route = false; link->ipv4ll_address = false; r = sd_ipv4ll_get_address(link->ipv4ll, &addr); if (r < 0) return 0; log_link_debug(link, "IPv4 link-local release %u.%u.%u.%u", ADDRESS_FMT_VAL(addr)); r = address_new_dynamic(&address); if (r < 0) { log_link_error(link, "Could not allocate address: %s", strerror(-r)); return r; } address->family = AF_INET; address->in_addr.in = addr; address->prefixlen = 16; address->scope = RT_SCOPE_LINK; address_drop(address, link, &link_address_drop_handler); r = route_new_dynamic(&route, RTPROT_UNSPEC); if (r < 0) { log_link_error(link, "Could not allocate route: %s", strerror(-r)); return r; } route->family = AF_INET; route->scope = RT_SCOPE_LINK; route->metrics = IPV4LL_ROUTE_METRIC; route_drop(route, link, &link_route_drop_handler); link_client_handler(link); return 0; } static int ipv4ll_route_handler(sd_netlink *rtnl, sd_netlink_message *m, void *userdata) { _cleanup_link_unref_ Link *link = userdata; int r; assert(link); assert(!link->ipv4ll_route); r = sd_netlink_message_get_errno(m); if (r < 0 && r != -EEXIST) { log_link_error(link, "could not set ipv4ll route: %s", strerror(-r)); link_enter_failed(link); } link->ipv4ll_route = true; if (link->ipv4ll_address == true) link_client_handler(link); return 1; } static int ipv4ll_address_handler(sd_netlink *rtnl, sd_netlink_message *m, void *userdata) { _cleanup_link_unref_ Link *link = userdata; int r; assert(link); assert(!link->ipv4ll_address); r = sd_netlink_message_get_errno(m); if (r < 0 && r != -EEXIST) { log_link_error(link, "could not set ipv4ll address: %s", strerror(-r)); link_enter_failed(link); } else if (r >= 0) link_rtnl_process_address(rtnl, m, link->manager); link->ipv4ll_address = true; if (link->ipv4ll_route == true) link_client_handler(link); return 1; } static int ipv4ll_address_claimed(sd_ipv4ll *ll, Link *link) { _cleanup_address_free_ Address *ll_addr = NULL; _cleanup_route_free_ Route *route = NULL; struct in_addr address; int r; assert(ll); assert(link); r = sd_ipv4ll_get_address(ll, &address); if (r == -ENOENT) return 0; else if (r < 0) return r; log_link_debug(link, "IPv4 link-local claim %u.%u.%u.%u", ADDRESS_FMT_VAL(address)); r = address_new_dynamic(&ll_addr); if (r < 0) return r; ll_addr->family = AF_INET; ll_addr->in_addr.in = address; ll_addr->prefixlen = 16; ll_addr->broadcast.s_addr = ll_addr->in_addr.in.s_addr | htonl(0xfffffffflu >> ll_addr->prefixlen); ll_addr->scope = RT_SCOPE_LINK; r = address_configure(ll_addr, link, ipv4ll_address_handler); if (r < 0) return r; link->ipv4ll_address = false; r = route_new_dynamic(&route, RTPROT_STATIC); if (r < 0) return r; route->family = AF_INET; route->scope = RT_SCOPE_LINK; route->metrics = IPV4LL_ROUTE_METRIC; r = route_configure(route, link, ipv4ll_route_handler); if (r < 0) return r; link->ipv4ll_route = false; return 0; } static void ipv4ll_handler(sd_ipv4ll *ll, int event, void *userdata){ Link *link = userdata; int r; assert(link); assert(link->network); assert(link->manager); if (IN_SET(link->state, LINK_STATE_FAILED, LINK_STATE_LINGER)) return; switch(event) { case IPV4LL_EVENT_STOP: case IPV4LL_EVENT_CONFLICT: r = ipv4ll_address_lost(link); if (r < 0) { link_enter_failed(link); return; } break; case IPV4LL_EVENT_BIND: r = ipv4ll_address_claimed(ll, link); if (r < 0) { link_enter_failed(link); return; } break; default: if (event < 0) log_link_warning(link, "IPv4 link-local error: %s", strerror(-event)); else log_link_warning(link, "IPv4 link-local unknown event: %d", event); break; } } int ipv4ll_configure(Link *link) { uint8_t seed[8]; int r; assert(link); assert(link->network); assert(link->network->link_local & ADDRESS_FAMILY_IPV4); r = sd_ipv4ll_new(&link->ipv4ll); if (r < 0) return r; if (link->udev_device) { r = net_get_unique_predictable_data(link->udev_device, seed); if (r >= 0) { r = sd_ipv4ll_set_address_seed(link->ipv4ll, seed); if (r < 0) return r; } } r = sd_ipv4ll_attach_event(link->ipv4ll, NULL, 0); if (r < 0) return r; r = sd_ipv4ll_set_mac(link->ipv4ll, &link->mac); if (r < 0) return r; r = sd_ipv4ll_set_index(link->ipv4ll, link->ifindex); if (r < 0) return r; r = sd_ipv4ll_set_callback(link->ipv4ll, ipv4ll_handler, link); if (r < 0) return r; return 0; }
gpl-2.0
AuzOne/auzone_kenel
arch/arm/mach-msm/sec_debug.c
42
37218
/* * sec_debug.c * * driver supporting debug functions for Samsung device * * COPYRIGHT(C) Samsung Electronics Co., Ltd. 2006-2011 All Right Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/errno.h> #include <linux/ctype.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/input.h> #include <linux/delay.h> #include <linux/sysrq.h> #include <asm/cacheflush.h> #include <linux/io.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/proc_fs.h> #include <linux/sec_param.h> #include <mach/system.h> #include <mach/sec_debug.h> #include <mach/msm_iomap.h> #include <mach/msm_smsm.h> #ifdef CONFIG_SEC_DEBUG_LOW_LOG #include <linux/seq_file.h> #include <linux/fcntl.h> #include <linux/fs.h> #endif #include <linux/module.h> #include <mach/restart.h> #include <asm/system_info.h> enum sec_debug_upload_cause_t { UPLOAD_CAUSE_INIT = 0xCAFEBABE, UPLOAD_CAUSE_KERNEL_PANIC = 0x000000C8, UPLOAD_CAUSE_FORCED_UPLOAD = 0x00000022, UPLOAD_CAUSE_CP_ERROR_FATAL = 0x000000CC, UPLOAD_CAUSE_MDM_ERROR_FATAL = 0x000000EE, UPLOAD_CAUSE_USER_FAULT = 0x0000002F, UPLOAD_CAUSE_HSIC_DISCONNECTED = 0x000000DD, UPLOAD_CAUSE_MODEM_RST_ERR = 0x000000FC, UPLOAD_CAUSE_RIVA_RST_ERR = 0x000000FB, UPLOAD_CAUSE_LPASS_RST_ERR = 0x000000FA, UPLOAD_CAUSE_DSPS_RST_ERR = 0x000000FD, UPLOAD_CAUSE_PERIPHERAL_ERR = 0x000000FF, }; struct sec_debug_mmu_reg_t { int SCTLR; int TTBR0; int TTBR1; int TTBCR; int DACR; int DFSR; int DFAR; int IFSR; int IFAR; int DAFSR; int IAFSR; int PMRRR; int NMRRR; int FCSEPID; int CONTEXT; int URWTPID; int UROTPID; int POTPIDR; }; /* ARM CORE regs mapping structure */ struct sec_debug_core_t { /* COMMON */ unsigned int r0; unsigned int r1; unsigned int r2; unsigned int r3; unsigned int r4; unsigned int r5; unsigned int r6; unsigned int r7; unsigned int r8; unsigned int r9; unsigned int r10; unsigned int r11; unsigned int r12; /* SVC */ unsigned int r13_svc; unsigned int r14_svc; unsigned int spsr_svc; /* PC & CPSR */ unsigned int pc; unsigned int cpsr; /* USR/SYS */ unsigned int r13_usr; unsigned int r14_usr; /* FIQ */ unsigned int r8_fiq; unsigned int r9_fiq; unsigned int r10_fiq; unsigned int r11_fiq; unsigned int r12_fiq; unsigned int r13_fiq; unsigned int r14_fiq; unsigned int spsr_fiq; /* IRQ */ unsigned int r13_irq; unsigned int r14_irq; unsigned int spsr_irq; /* MON */ unsigned int r13_mon; unsigned int r14_mon; unsigned int spsr_mon; /* ABT */ unsigned int r13_abt; unsigned int r14_abt; unsigned int spsr_abt; /* UNDEF */ unsigned int r13_und; unsigned int r14_und; unsigned int spsr_und; }; /* enable sec_debug feature */ static int enable = 1; static int enable_user = 1; static int reset_reason = 0xFFEEFFEE; static char sec_build_info[100]; static unsigned int secdbg_paddr; static unsigned int secdbg_size; #ifdef CONFIG_SEC_SSR_DEBUG_LEVEL_CHK static int enable_cp_debug = 1; #endif uint runtime_debug_val; module_param_named(enable, enable, int, S_IRUGO | S_IWUSR | S_IWGRP); module_param_named(enable_user, enable_user, int, S_IRUGO | S_IWUSR | S_IWGRP); module_param_named(reset_reason, reset_reason, int, S_IRUGO | S_IWUSR | S_IWGRP); module_param_named(runtime_debug_val, runtime_debug_val, int, S_IRUGO | S_IWUSR | S_IWGRP); #ifdef CONFIG_SEC_SSR_DEBUG_LEVEL_CHK module_param_named(enable_cp_debug, enable_cp_debug, int, S_IRUGO | S_IWUSR | S_IWGRP); #endif static int force_error(const char *val, struct kernel_param *kp); module_param_call(force_error, force_error, NULL, NULL, S_IRUGO | S_IWUSR | S_IWGRP); static int sec_debug_set_cpu_affinity(const char *val, struct kernel_param *kp); module_param_call(setcpuaff, sec_debug_set_cpu_affinity, NULL, NULL, S_IRUGO | S_IWUSR | S_IWGRP); static char *sec_build_time[] = { __DATE__, __TIME__ }; /* klaatu - schedule log */ struct sec_debug_log { atomic_t idx_sched[CONFIG_NR_CPUS]; struct sched_log sched[CONFIG_NR_CPUS][SCHED_LOG_MAX]; atomic_t idx_irq[CONFIG_NR_CPUS]; struct irq_log irq[CONFIG_NR_CPUS][SCHED_LOG_MAX]; atomic_t idx_irq_exit[CONFIG_NR_CPUS]; struct irq_exit_log irq_exit[CONFIG_NR_CPUS][SCHED_LOG_MAX]; atomic_t idx_timer[CONFIG_NR_CPUS]; struct timer_log timer_log[CONFIG_NR_CPUS][SCHED_LOG_MAX]; #ifdef CONFIG_SEC_DEBUG_MSG_LOG atomic_t idx_secmsg[CONFIG_NR_CPUS]; struct secmsg_log secmsg[CONFIG_NR_CPUS][MSG_LOG_MAX]; #endif #ifdef CONFIG_SEC_DEBUG_DCVS_LOG atomic_t dcvs_log_idx ; struct dcvs_debug dcvs_log[DCVS_LOG_MAX]; #endif #ifdef CONFIG_SEC_DEBUG_FUELGAUGE_LOG atomic_t fg_log_idx; struct fuelgauge_debug fg_log[FG_LOG_MAX]; #endif }; struct sec_debug_log *secdbg_log; struct sec_debug_subsys *secdbg_subsys; struct sec_debug_subsys_data_krait *secdbg_krait; struct sec_debug_subsys_data_modem *secdbg_modem; /* klaatu - semaphore log */ #ifdef CONFIG_SEC_DEBUG_SEMAPHORE_LOG static struct sem_debug sem_debug_free_head; static struct sem_debug sem_debug_done_head; static int sem_debug_free_head_cnt; static int sem_debug_done_head_cnt; static int sem_debug_init; static spinlock_t sem_debug_lock; /* rwsemaphore logging */ static struct rwsem_debug rwsem_debug_free_head; static struct rwsem_debug rwsem_debug_done_head; static int rwsem_debug_free_head_cnt; static int rwsem_debug_done_head_cnt; static int rwsem_debug_init; static spinlock_t rwsem_debug_lock; #endif /* CONFIG_SEC_DEBUG_SEMAPHORE_LOG */ /* onlyjazz.ed26 : make the restart_reason global to enable it early in sec_debug_init and share with restart functions */ void *restart_reason; DEFINE_PER_CPU(struct sec_debug_core_t, sec_debug_core_reg); DEFINE_PER_CPU(struct sec_debug_mmu_reg_t, sec_debug_mmu_reg); DEFINE_PER_CPU(enum sec_debug_upload_cause_t, sec_debug_upload_cause); static int force_error(const char *val, struct kernel_param *kp) { pr_emerg("!!!WARN forced error : %s\n", val); if (!strncmp(val, "wdog", 4)) { pr_emerg("Generating a wdog bark!\n"); raw_local_irq_disable(); while (1) ; } else if (!strncmp(val, "dabort", 6)) { pr_emerg("Generating a data abort exception!\n"); *(unsigned int *)0x0 = 0x0; } else if (!strncmp(val, "pabort", 6)) { pr_emerg("Generating a prefetch abort exception!\n"); ((void (*)(void))0x0)(); } else if (!strncmp(val, "undef", 5)) { pr_emerg("Generating a undefined instruction exception!\n"); BUG(); #ifdef CONFIG_SEC_L1_DCACHE_PANIC_CHK } else if (!strncmp(val, "ldcache", 7)) { pr_emerg("Generating a sec_l1_dcache_check_fail!\n"); sec_l1_dcache_check_fail(); #endif } else if (!strncmp(val, "bushang", 7)) { void __iomem *p; pr_emerg("Generating Bus Hang!\n"); p = ioremap_nocache(0x04300000, 32); *(unsigned int *)p = *(unsigned int *)p; mb(); pr_info("*p = %x\n", *(unsigned int *)p); pr_emerg("Clk may be enabled.Try again if it reaches here!\n"); } else { pr_emerg("No such error defined for now!\n"); } return 0; } static int sec_debug_set_cpu_affinity(const char *val, struct kernel_param *kp) { char *endptr; pid_t pid; int cpu; struct cpumask mask; long ret; pid = (pid_t)memparse(val, &endptr); if (*endptr != '@') { pr_info("%s: invalid input strin: %s\n", __func__, val); return -EINVAL; } cpu = memparse(++endptr, &endptr); cpumask_clear(&mask); cpumask_set_cpu(cpu, &mask); pr_info("%s: Setting %d cpu affinity to cpu%d\n", __func__, pid, cpu); ret = sched_setaffinity(pid, &mask); pr_info("%s: sched_setaffinity returned %ld\n", __func__, ret); return 0; } /* for sec debug level */ unsigned int sec_dbg_level; static int __init sec_debug_level(char *str) { get_option(&str, &sec_dbg_level); return 0; } early_param("level", sec_debug_level); bool kernel_sec_set_debug_level(int level) { if (!(level == KERNEL_SEC_DEBUG_LEVEL_LOW || level == KERNEL_SEC_DEBUG_LEVEL_MID || level == KERNEL_SEC_DEBUG_LEVEL_HIGH)) { pr_notice(KERN_NOTICE "(kernel_sec_set_debug_level) The debug" "value is invalid(0x%x)!! Set default" "level(LOW)\n", level); sec_dbg_level = KERNEL_SEC_DEBUG_LEVEL_LOW; return -EINVAL; } sec_dbg_level = level; switch (level) { case KERNEL_SEC_DEBUG_LEVEL_LOW: enable = 0; enable_user = 0; break; case KERNEL_SEC_DEBUG_LEVEL_MID: enable = 1; enable_user = 0; break; case KERNEL_SEC_DEBUG_LEVEL_HIGH: enable = 1; enable_user = 1; break; default: enable = 1; enable_user = 1; } /* write to param */ sec_set_param(param_index_debuglevel, &sec_dbg_level); pr_notice(KERN_NOTICE "(kernel_sec_set_debug_level)" "The debug value is 0x%x !!\n", level); return 1; } EXPORT_SYMBOL(kernel_sec_set_debug_level); int kernel_sec_get_debug_level(void) { sec_get_param(param_index_debuglevel, &sec_dbg_level); if (!(sec_dbg_level == KERNEL_SEC_DEBUG_LEVEL_LOW || sec_dbg_level == KERNEL_SEC_DEBUG_LEVEL_MID || sec_dbg_level == KERNEL_SEC_DEBUG_LEVEL_HIGH)) { /*In case of invalid debug level, default (debug level low)*/ pr_notice(KERN_NOTICE "(%s) The debug value is" "invalid(0x%x)!! Set default level(LOW)\n", __func__, sec_dbg_level); sec_dbg_level = KERNEL_SEC_DEBUG_LEVEL_LOW; sec_set_param(param_index_debuglevel, &sec_dbg_level); } return sec_dbg_level; } EXPORT_SYMBOL(kernel_sec_get_debug_level); /* core reg dump function*/ static void sec_debug_save_core_reg(struct sec_debug_core_t *core_reg) { /* we will be in SVC mode when we enter this function. Collect SVC registers along with cmn registers. */ asm("str r0, [%0,#0]\n\t" /* R0 is pushed first to core_reg */ "mov r0, %0\n\t" /* R0 will be alias for core_reg */ "str r1, [r0,#4]\n\t" /* R1 */ "str r2, [r0,#8]\n\t" /* R2 */ "str r3, [r0,#12]\n\t" /* R3 */ "str r4, [r0,#16]\n\t" /* R4 */ "str r5, [r0,#20]\n\t" /* R5 */ "str r6, [r0,#24]\n\t" /* R6 */ "str r7, [r0,#28]\n\t" /* R7 */ "str r8, [r0,#32]\n\t" /* R8 */ "str r9, [r0,#36]\n\t" /* R9 */ "str r10, [r0,#40]\n\t" /* R10 */ "str r11, [r0,#44]\n\t" /* R11 */ "str r12, [r0,#48]\n\t" /* R12 */ /* SVC */ "str r13, [r0,#52]\n\t" /* R13_SVC */ "str r14, [r0,#56]\n\t" /* R14_SVC */ "mrs r1, spsr\n\t" /* SPSR_SVC */ "str r1, [r0,#60]\n\t" /* PC and CPSR */ "sub r1, r15, #0x4\n\t" /* PC */ "str r1, [r0,#64]\n\t" "mrs r1, cpsr\n\t" /* CPSR */ "str r1, [r0,#68]\n\t" /* SYS/USR */ "mrs r1, cpsr\n\t" /* switch to SYS mode */ "and r1, r1, #0xFFFFFFE0\n\t" "orr r1, r1, #0x1f\n\t" "msr cpsr,r1\n\t" "str r13, [r0,#72]\n\t" /* R13_USR */ "str r14, [r0,#76]\n\t" /* R14_USR */ /* FIQ */ "mrs r1, cpsr\n\t" /* switch to FIQ mode */ "and r1,r1,#0xFFFFFFE0\n\t" "orr r1,r1,#0x11\n\t" "msr cpsr,r1\n\t" "str r8, [r0,#80]\n\t" /* R8_FIQ */ "str r9, [r0,#84]\n\t" /* R9_FIQ */ "str r10, [r0,#88]\n\t" /* R10_FIQ */ "str r11, [r0,#92]\n\t" /* R11_FIQ */ "str r12, [r0,#96]\n\t" /* R12_FIQ */ "str r13, [r0,#100]\n\t" /* R13_FIQ */ "str r14, [r0,#104]\n\t" /* R14_FIQ */ "mrs r1, spsr\n\t" /* SPSR_FIQ */ "str r1, [r0,#108]\n\t" /* IRQ */ "mrs r1, cpsr\n\t" /* switch to IRQ mode */ "and r1, r1, #0xFFFFFFE0\n\t" "orr r1, r1, #0x12\n\t" "msr cpsr,r1\n\t" "str r13, [r0,#112]\n\t" /* R13_IRQ */ "str r14, [r0,#116]\n\t" /* R14_IRQ */ "mrs r1, spsr\n\t" /* SPSR_IRQ */ "str r1, [r0,#120]\n\t" /* MON */ "mrs r1, cpsr\n\t" /* switch to monitor mode */ "and r1, r1, #0xFFFFFFE0\n\t" "orr r1, r1, #0x16\n\t" "msr cpsr,r1\n\t" "str r13, [r0,#124]\n\t" /* R13_MON */ "str r14, [r0,#128]\n\t" /* R14_MON */ "mrs r1, spsr\n\t" /* SPSR_MON */ "str r1, [r0,#132]\n\t" /* ABT */ "mrs r1, cpsr\n\t" /* switch to Abort mode */ "and r1, r1, #0xFFFFFFE0\n\t" "orr r1, r1, #0x17\n\t" "msr cpsr,r1\n\t" "str r13, [r0,#136]\n\t" /* R13_ABT */ "str r14, [r0,#140]\n\t" /* R14_ABT */ "mrs r1, spsr\n\t" /* SPSR_ABT */ "str r1, [r0,#144]\n\t" /* UND */ "mrs r1, cpsr\n\t" /* switch to undef mode */ "and r1, r1, #0xFFFFFFE0\n\t" "orr r1, r1, #0x1B\n\t" "msr cpsr,r1\n\t" "str r13, [r0,#148]\n\t" /* R13_UND */ "str r14, [r0,#152]\n\t" /* R14_UND */ "mrs r1, spsr\n\t" /* SPSR_UND */ "str r1, [r0,#156]\n\t" /* restore to SVC mode */ "mrs r1, cpsr\n\t" /* switch to SVC mode */ "and r1, r1, #0xFFFFFFE0\n\t" "orr r1, r1, #0x13\n\t" "msr cpsr,r1\n\t" : /* output */ : "r"(core_reg) /* input */ : "%r0", "%r1" /* clobbered registers */ ); return; } static void sec_debug_save_mmu_reg(struct sec_debug_mmu_reg_t *mmu_reg) { asm("mrc p15, 0, r1, c1, c0, 0\n\t" /* SCTLR */ "str r1, [%0]\n\t" "mrc p15, 0, r1, c2, c0, 0\n\t" /* TTBR0 */ "str r1, [%0,#4]\n\t" "mrc p15, 0, r1, c2, c0,1\n\t" /* TTBR1 */ "str r1, [%0,#8]\n\t" "mrc p15, 0, r1, c2, c0,2\n\t" /* TTBCR */ "str r1, [%0,#12]\n\t" "mrc p15, 0, r1, c3, c0,0\n\t" /* DACR */ "str r1, [%0,#16]\n\t" "mrc p15, 0, r1, c5, c0,0\n\t" /* DFSR */ "str r1, [%0,#20]\n\t" "mrc p15, 0, r1, c6, c0,0\n\t" /* DFAR */ "str r1, [%0,#24]\n\t" "mrc p15, 0, r1, c5, c0,1\n\t" /* IFSR */ "str r1, [%0,#28]\n\t" "mrc p15, 0, r1, c6, c0,2\n\t" /* IFAR */ "str r1, [%0,#32]\n\t" /* Don't populate DAFSR and RAFSR */ "mrc p15, 0, r1, c10, c2,0\n\t" /* PMRRR */ "str r1, [%0,#44]\n\t" "mrc p15, 0, r1, c10, c2,1\n\t" /* NMRRR */ "str r1, [%0,#48]\n\t" "mrc p15, 0, r1, c13, c0,0\n\t" /* FCSEPID */ "str r1, [%0,#52]\n\t" "mrc p15, 0, r1, c13, c0,1\n\t" /* CONTEXT */ "str r1, [%0,#56]\n\t" "mrc p15, 0, r1, c13, c0,2\n\t" /* URWTPID */ "str r1, [%0,#60]\n\t" "mrc p15, 0, r1, c13, c0,3\n\t" /* UROTPID */ "str r1, [%0,#64]\n\t" "mrc p15, 0, r1, c13, c0,4\n\t" /* POTPIDR */ "str r1, [%0,#68]\n\t" : /* output */ : "r"(mmu_reg) /* input */ : "%r1", "memory" /* clobbered register */ ); } static void sec_debug_save_context(void) { unsigned long flags; local_irq_save(flags); sec_debug_save_mmu_reg(&per_cpu (sec_debug_mmu_reg, smp_processor_id())); sec_debug_save_core_reg(&per_cpu (sec_debug_core_reg, smp_processor_id())); pr_emerg("(%s) context saved(CPU:%d)\n", __func__, smp_processor_id()); local_irq_restore(flags); } #define RESTART_REASON_ADDR 0x65C static void sec_debug_set_upload_magic(unsigned magic) { pr_emerg("(%s) %x\n", __func__, magic); restart_reason = MSM_IMEM_BASE + RESTART_REASON_ADDR; __raw_writel(magic, restart_reason); flush_cache_all(); outer_flush_all(); } static int sec_debug_normal_reboot_handler(struct notifier_block *nb, unsigned long l, void *p) { sec_debug_set_upload_magic(0x0); return 0; } static void sec_debug_set_upload_cause(enum sec_debug_upload_cause_t type) { per_cpu(sec_debug_upload_cause, smp_processor_id()) = type; *(unsigned int *)0xc0000004 = type; pr_emerg("(%s) %x\n", __func__, type); } void sec_debug_hw_reset(void) { pr_emerg("(%s) %s\n", __func__, sec_build_info); pr_emerg("(%s) rebooting...\n", __func__); flush_cache_all(); outer_flush_all(); msm_restart(0, "sec_debug_hw_reset"); while (1) ; } EXPORT_SYMBOL(sec_debug_hw_reset); #ifdef CONFIG_SEC_PERIPHERAL_SECURE_CHK void sec_peripheral_secure_check_fail(void) { sec_debug_set_qc_dload_magic(0); sec_debug_set_upload_magic(0x77665507); pr_emerg("(%s) %s\n", __func__, sec_build_info); pr_emerg("(%s) rebooting...\n", __func__); flush_cache_all(); outer_flush_all(); msm_restart(0, "peripheral_hw_reset"); while (1) ; } EXPORT_SYMBOL(sec_peripheral_secure_check_fail); #endif #ifdef CONFIG_SEC_L1_DCACHE_PANIC_CHK void sec_l1_dcache_check_fail(void) { sec_debug_set_qc_dload_magic(0); sec_debug_set_upload_magic(0x77665588); pr_emerg("(%s) %s\n", __func__, sec_build_info); pr_emerg("(%s) rebooting...\n", __func__); flush_cache_all(); outer_flush_all(); msm_restart(0, "l1_dcache_reset"); while (1) ; } EXPORT_SYMBOL(sec_l1_dcache_check_fail); #endif #ifdef CONFIG_SEC_DEBUG_LOW_LOG unsigned sec_debug_get_reset_reason(void) { return reset_reason; } #endif static int sec_debug_panic_handler(struct notifier_block *nb, unsigned long l, void *buf) { unsigned int len; emerg_pet_watchdog(); sec_debug_set_qc_dload_magic(1); sec_debug_set_upload_magic(0x776655ee); #ifdef CONFIG_SEC_SSR_DEBUG_LEVEL_CHK if (!enable && !enable_cp_debug) { #else if (!enable) { #endif #ifdef CONFIG_SEC_DEBUG_LOW_LOG sec_debug_hw_reset(); #endif return -EPERM; } len = strlen(buf); if (!strncmp(buf, "User Fault", len)) sec_debug_set_upload_cause(UPLOAD_CAUSE_USER_FAULT); else if (!strncmp(buf, "Crash Key", len)) sec_debug_set_upload_cause(UPLOAD_CAUSE_FORCED_UPLOAD); else if (!strncmp(buf, "CP Crash", len)) sec_debug_set_upload_cause(UPLOAD_CAUSE_CP_ERROR_FATAL); else if (!strncmp(buf, "MDM Crash", len)) sec_debug_set_upload_cause(UPLOAD_CAUSE_MDM_ERROR_FATAL); else if (strnstr(buf, "modem", len) != NULL) sec_debug_set_upload_cause(UPLOAD_CAUSE_MODEM_RST_ERR); else if (strnstr(buf, "riva", len) != NULL) sec_debug_set_upload_cause(UPLOAD_CAUSE_RIVA_RST_ERR); else if (strnstr(buf, "lpass", len) != NULL) sec_debug_set_upload_cause(UPLOAD_CAUSE_LPASS_RST_ERR); else if (strnstr(buf, "dsps", len) != NULL) sec_debug_set_upload_cause(UPLOAD_CAUSE_DSPS_RST_ERR); else if (!strnicmp(buf, "subsys", len)) sec_debug_set_upload_cause(UPLOAD_CAUSE_PERIPHERAL_ERR); else sec_debug_set_upload_cause(UPLOAD_CAUSE_KERNEL_PANIC); ssr_panic_handler_for_sec_dbg(); sec_debug_dump_stack(); sec_debug_hw_reset(); return 0; } /* * Called from dump_stack() * This function call does not necessarily mean that a fatal error * had occurred. It may be just a warning. */ int sec_debug_dump_stack(void) { if (!enable) return -EPERM; sec_debug_save_context(); /* flush L1 from each core. L2 will be flushed later before reset. */ flush_cache_all(); return 0; } EXPORT_SYMBOL(sec_debug_dump_stack); void sec_debug_check_crash_key(unsigned int code, int value) { static enum { NONE, STEP1, STEP2} state = NONE; if (!enable) return; switch (state) { case NONE: if (code == KEY_VOLUMEUP && value) state = STEP1; else state = NONE; break; case STEP1: if (code == KEY_VOLUMEDOWN && value) state = STEP2; else state = NONE; break; case STEP2: if (code == KEY_POWER && value) { dump_all_task_info(); dump_cpu_stat(); panic("Crash Key"); } else { state = NONE; } break; } } static struct notifier_block nb_reboot_block = { .notifier_call = sec_debug_normal_reboot_handler }; static struct notifier_block nb_panic_block = { .notifier_call = sec_debug_panic_handler, }; static void sec_debug_set_build_info(void) { char *p = sec_build_info; strncat(p, "Kernel Build Info : ", 20); strncat(p, "Date:", 5); strncat(p, sec_build_time[0], 12); strncat(p, "Time:", 5); strncat(p, sec_build_time[1], 9); } static int __init __init_sec_debug_log(void) { int i; struct sec_debug_log *vaddr; int size; if (secdbg_paddr == 0 || secdbg_size == 0) { pr_info("%s: sec debug buffer not provided. Using kmalloc..\n", __func__); size = sizeof(struct sec_debug_log); vaddr = kmalloc(size, GFP_KERNEL); } else { size = secdbg_size; vaddr = ioremap_nocache(secdbg_paddr, secdbg_size); } pr_info("%s: vaddr=0x%x paddr=0x%x size=0x%x " "sizeof(struct sec_debug_log)=0x%x\n", __func__, (unsigned int)vaddr, secdbg_paddr, secdbg_size, sizeof(struct sec_debug_log)); if ((vaddr == NULL) || (sizeof(struct sec_debug_log) > size)) { pr_info("%s: ERROR! init failed!\n", __func__); return -EFAULT; } for (i = 0; i < CONFIG_NR_CPUS; i++) { atomic_set(&(vaddr->idx_sched[i]), -1); atomic_set(&(vaddr->idx_irq[i]), -1); atomic_set(&(vaddr->idx_irq_exit[i]), -1); atomic_set(&(vaddr->idx_timer[i]), -1); #ifdef CONFIG_SEC_DEBUG_MSG_LOG atomic_set(&(vaddr->idx_secmsg[i]), -1); #endif } #ifdef CONFIG_SEC_DEBUG_DCVS_LOG atomic_set(&(vaddr->dcvs_log_idx), -1); #endif #ifdef CONFIG_SEC_DEBUG_FUELGAUGE_LOG atomic_set(&(vaddr->fg_log_idx), -1); #endif secdbg_log = vaddr; pr_info("%s: init done\n", __func__); return 0; } #ifdef CONFIG_SEC_DEBUG_SUBSYS int sec_debug_save_die_info(const char *str, struct pt_regs *regs) { if (!secdbg_krait) return -ENOMEM; snprintf(secdbg_krait->excp.pc_sym, sizeof(secdbg_krait->excp.pc_sym), "%pS", (void *)regs->ARM_pc); snprintf(secdbg_krait->excp.lr_sym, sizeof(secdbg_krait->excp.lr_sym), "%pS", (void *)regs->ARM_lr); return 0; } int sec_debug_save_panic_info(const char *str, unsigned int caller) { if (!secdbg_krait) return -ENOMEM; snprintf(secdbg_krait->excp.panic_caller, sizeof(secdbg_krait->excp.panic_caller), "%pS", (void *)caller); snprintf(secdbg_krait->excp.panic_msg, sizeof(secdbg_krait->excp.panic_msg), "%s", str); snprintf(secdbg_krait->excp.thread, sizeof(secdbg_krait->excp.thread), "%s:%d", current->comm, task_pid_nr(current)); return 0; } int sec_debug_subsys_add_var_mon(char *name, unsigned int size, unsigned int pa) { if (!secdbg_krait) return -ENOMEM; if (secdbg_krait->var_mon.idx > ARRAY_SIZE(secdbg_krait->var_mon.var)) return -ENOMEM; strlcpy(secdbg_krait->var_mon.var[secdbg_krait->var_mon.idx].name, name, sizeof(secdbg_krait->var_mon.var[0].name)); secdbg_krait->var_mon.var[secdbg_krait->var_mon.idx].sizeof_type = size; secdbg_krait->var_mon.var[secdbg_krait->var_mon.idx].var_paddr = pa; secdbg_krait->var_mon.idx++; return 0; } void print_modem_dump_info(void) { int i = 0; char modem_exception_info[1024]; if (!secdbg_subsys) return; secdbg_modem = &secdbg_subsys->priv.modem; if (!secdbg_modem) return; pr_info("secdbg_modem address : 0x%x", (unsigned int)secdbg_modem); snprintf(modem_exception_info, ARRAY_SIZE(modem_exception_info)-1, "Task: %s\nFile name: %s\nLine: %d\nError msg: %s\n", secdbg_modem->excp.task, secdbg_modem->excp.file, secdbg_modem->excp.line, secdbg_modem->excp.msg); pr_info("*******************************************************\n"); pr_info("modem exception information : %s\n", modem_exception_info); pr_info("Register information:\n"); for (i = 0; i < ARRAY_SIZE(secdbg_modem->excp.core_reg); i++) { snprintf(modem_exception_info, ARRAY_SIZE(modem_exception_info)-1, "\t%s: 0x%08x\n", secdbg_modem->excp.core_reg[i].name, secdbg_modem->excp.core_reg[i].value); pr_info(" %s\n", modem_exception_info); } pr_info("*******************************************************\n"); } int sec_debug_subsys_init(void) { pr_info("%s: msm_shared_ram_phys=%x SMEM_ID_VENDOR2=%d size=%d\n", __func__, msm_shared_ram_phys, SMEM_ID_VENDOR2, sizeof(struct sec_debug_subsys)); secdbg_subsys = (struct sec_debug_subsys *)smem_alloc2( SMEM_ID_VENDOR2, sizeof(struct sec_debug_subsys)); if (secdbg_subsys == NULL) { pr_info("%s: smem alloc failed!\n", __func__); return -ENOMEM; } secdbg_krait = &secdbg_subsys->priv.krait; secdbg_subsys->krait = (struct sec_debug_subsys_data_krait *)( (unsigned int)&secdbg_subsys->priv.krait - (unsigned int)MSM_SHARED_RAM_BASE + msm_shared_ram_phys); secdbg_subsys->rpm = (struct sec_debug_subsys_data *)( (unsigned int)&secdbg_subsys->priv.rpm - (unsigned int)MSM_SHARED_RAM_BASE + msm_shared_ram_phys); secdbg_subsys->modem = (struct sec_debug_subsys_data_modem *)( (unsigned int)&secdbg_subsys->priv.modem - (unsigned int)MSM_SHARED_RAM_BASE + msm_shared_ram_phys); secdbg_subsys->dsps = (struct sec_debug_subsys_data *)( (unsigned int)&secdbg_subsys->priv.dsps - (unsigned int)MSM_SHARED_RAM_BASE + msm_shared_ram_phys); pr_info("%s: krait(%x) rpm(%x) modem(%x) dsps(%x)\n", __func__, (unsigned int)secdbg_subsys->krait, (unsigned int)secdbg_subsys->rpm, (unsigned int)secdbg_subsys->modem, (unsigned int)secdbg_subsys->dsps); strncpy(secdbg_krait->name, "Krait", sizeof(secdbg_krait->name)); strncpy(secdbg_krait->state, "Init", sizeof(secdbg_krait->state)); secdbg_krait->nr_cpus = CONFIG_NR_CPUS; sec_debug_subsys_set_kloginfo(&secdbg_krait->log.idx_paddr, &secdbg_krait->log.log_paddr, &secdbg_krait->log.size); sec_debug_subsys_set_logger_info(&secdbg_krait->logger_log); secdbg_krait->tz_core_dump = (struct tzbsp_dump_buf_s **)get_wdog_regsave_paddr(); get_fbinfo(0, &secdbg_krait->fb_info.fb_paddr, &secdbg_krait->fb_info.xres, &secdbg_krait->fb_info.yres, &secdbg_krait->fb_info.bpp, &secdbg_krait->fb_info.rgb_bitinfo.r_off, &secdbg_krait->fb_info.rgb_bitinfo.r_len, &secdbg_krait->fb_info.rgb_bitinfo.g_off, &secdbg_krait->fb_info.rgb_bitinfo.g_len, &secdbg_krait->fb_info.rgb_bitinfo.b_off, &secdbg_krait->fb_info.rgb_bitinfo.b_len, &secdbg_krait->fb_info.rgb_bitinfo.a_off, &secdbg_krait->fb_info.rgb_bitinfo.a_len); SEC_DEBUG_SUBSYS_ADD_STR_TO_MONITOR(unit_name); SEC_DEBUG_SUBSYS_ADD_STR_TO_MONITOR(linux_banner); //SEC_DEBUG_SUBSYS_ADD_VAR_TO_MONITOR(global_pvs); if (secdbg_paddr) { secdbg_krait->sched_log.sched_idx_paddr = secdbg_paddr + offsetof(struct sec_debug_log, idx_sched); secdbg_krait->sched_log.sched_buf_paddr = secdbg_paddr + offsetof(struct sec_debug_log, sched); secdbg_krait->sched_log.sched_struct_sz = sizeof(struct sched_log); secdbg_krait->sched_log.sched_array_cnt = SCHED_LOG_MAX; secdbg_krait->sched_log.irq_idx_paddr = secdbg_paddr + offsetof(struct sec_debug_log, idx_irq); secdbg_krait->sched_log.irq_buf_paddr = secdbg_paddr + offsetof(struct sec_debug_log, irq); secdbg_krait->sched_log.irq_struct_sz = sizeof(struct irq_log); secdbg_krait->sched_log.irq_array_cnt = SCHED_LOG_MAX; secdbg_krait->sched_log.irq_exit_idx_paddr = secdbg_paddr + offsetof(struct sec_debug_log, idx_irq_exit); secdbg_krait->sched_log.irq_exit_buf_paddr = secdbg_paddr + offsetof(struct sec_debug_log, irq_exit); secdbg_krait->sched_log.irq_exit_struct_sz = sizeof(struct irq_exit_log); secdbg_krait->sched_log.irq_exit_array_cnt = SCHED_LOG_MAX; } /* fill magic nubmer last to ensure data integrity when the magic * numbers are written */ secdbg_subsys->magic[0] = SEC_DEBUG_SUBSYS_MAGIC0; secdbg_subsys->magic[1] = SEC_DEBUG_SUBSYS_MAGIC1; secdbg_subsys->magic[2] = SEC_DEBUG_SUBSYS_MAGIC2; secdbg_subsys->magic[3] = SEC_DEBUG_SUBSYS_MAGIC3; return 0; } late_initcall(sec_debug_subsys_init); #endif int __init sec_debug_init(void) { restart_reason = MSM_IMEM_BASE + RESTART_REASON_ADDR; pr_emerg("%s: enable=%d\n", __func__, enable); restart_reason = ioremap_nocache((unsigned long)restart_reason, SZ_4K); /* check restart_reason here */ pr_emerg("%s: restart_reason : 0x%x\n", __func__, (unsigned int)restart_reason); register_reboot_notifier(&nb_reboot_block); atomic_notifier_chain_register(&panic_notifier_list, &nb_panic_block); if (!enable) return -EPERM; __init_sec_debug_log(); #ifdef CONFIG_SEC_DEBUG_SEMAPHORE_LOG debug_semaphore_init(); #endif sec_debug_set_build_info(); sec_debug_set_upload_magic(0x776655ee); sec_debug_set_upload_cause(UPLOAD_CAUSE_INIT); return 0; } int sec_debug_is_enabled(void) { return enable; } #ifdef CONFIG_SEC_SSR_DEBUG_LEVEL_CHK int sec_debug_is_enabled_for_ssr(void) { return enable_cp_debug; } #endif /* klaatu - schedule log */ #ifdef CONFIG_SEC_DEBUG_SCHED_LOG void __sec_debug_task_sched_log(int cpu, struct task_struct *task, char *msg) { unsigned i; if (!secdbg_log) return; if (!task && !msg) return; i = atomic_inc_return(&(secdbg_log->idx_sched[cpu])) & (SCHED_LOG_MAX - 1); secdbg_log->sched[cpu][i].time = cpu_clock(cpu); if (task) { strncpy(secdbg_log->sched[cpu][i].comm, task->comm, sizeof(secdbg_log->sched[cpu][i].comm)); secdbg_log->sched[cpu][i].pid = task->pid; } else { strncpy(secdbg_log->sched[cpu][i].comm, msg, sizeof(secdbg_log->sched[cpu][i].comm)); secdbg_log->sched[cpu][i].pid = -1; } } void sec_debug_task_sched_log_short_msg(char *msg) { preempt_disable(); __sec_debug_task_sched_log(smp_processor_id(), NULL, msg); preempt_enable(); } void sec_debug_task_sched_log(int cpu, struct task_struct *task) { __sec_debug_task_sched_log(cpu, task, NULL); } void sec_debug_timer_log(unsigned int type, int int_lock, void *fn) { int cpu = smp_processor_id(); unsigned i; if (!secdbg_log) return; i = atomic_inc_return(&(secdbg_log->idx_timer[cpu])) & (SCHED_LOG_MAX - 1); secdbg_log->timer_log[cpu][i].time = cpu_clock(cpu); secdbg_log->timer_log[cpu][i].type = type; secdbg_log->timer_log[cpu][i].int_lock = int_lock; secdbg_log->timer_log[cpu][i].fn = (void *)fn; } void sec_debug_irq_sched_log(unsigned int irq, void *fn, int en) { int cpu = smp_processor_id(); unsigned i; if (!secdbg_log) return; i = atomic_inc_return(&(secdbg_log->idx_irq[cpu])) & (SCHED_LOG_MAX - 1); secdbg_log->irq[cpu][i].time = cpu_clock(cpu); secdbg_log->irq[cpu][i].irq = irq; secdbg_log->irq[cpu][i].fn = (void *)fn; secdbg_log->irq[cpu][i].en = en; secdbg_log->irq[cpu][i].preempt_count = preempt_count(); secdbg_log->irq[cpu][i].context = &cpu; } #ifdef CONFIG_SEC_DEBUG_IRQ_EXIT_LOG void sec_debug_irq_enterexit_log(unsigned int irq, unsigned long long start_time) { int cpu = smp_processor_id(); unsigned i; if (!secdbg_log) return; i = atomic_inc_return(&(secdbg_log->idx_irq_exit[cpu])) & (SCHED_LOG_MAX - 1); secdbg_log->irq_exit[cpu][i].time = start_time; secdbg_log->irq_exit[cpu][i].end_time = cpu_clock(cpu); secdbg_log->irq_exit[cpu][i].irq = irq; secdbg_log->irq_exit[cpu][i].elapsed_time = secdbg_log->irq_exit[cpu][i].end_time - start_time; } #endif #ifdef CONFIG_SEC_DEBUG_MSG_LOG asmlinkage int sec_debug_msg_log(void *caller, const char *fmt, ...) { int cpu = smp_processor_id(); int r = 0; int i; va_list args; if (!secdbg_log) return 0; i = atomic_inc_return(&(secdbg_log->idx_secmsg[cpu])) & (MSG_LOG_MAX - 1); secdbg_log->secmsg[cpu][i].time = cpu_clock(cpu); va_start(args, fmt); r = vsnprintf(secdbg_log->secmsg[cpu][i].msg, sizeof(secdbg_log->secmsg[cpu][i].msg), fmt, args); va_end(args); secdbg_log->secmsg[cpu][i].caller0 = __builtin_return_address(0); secdbg_log->secmsg[cpu][i].caller1 = caller; secdbg_log->secmsg[cpu][i].task = current->comm; return r; } #endif #endif /* CONFIG_SEC_DEBUG_SCHED_LOG */ /* klaatu - semaphore log */ #ifdef CONFIG_SEC_DEBUG_SEMAPHORE_LOG void debug_semaphore_init(void) { int i = 0; struct sem_debug *sem_debug = NULL; spin_lock_init(&sem_debug_lock); sem_debug_free_head_cnt = 0; sem_debug_done_head_cnt = 0; /* initialize list head of sem_debug */ INIT_LIST_HEAD(&sem_debug_free_head.list); INIT_LIST_HEAD(&sem_debug_done_head.list); for (i = 0; i < SEMAPHORE_LOG_MAX; i++) { /* malloc semaphore */ sem_debug = kmalloc(sizeof(struct sem_debug), GFP_KERNEL); /* add list */ list_add(&sem_debug->list, &sem_debug_free_head.list); sem_debug_free_head_cnt++; } sem_debug_init = 1; } void debug_semaphore_down_log(struct semaphore *sem) { struct list_head *tmp; struct sem_debug *sem_dbg; unsigned long flags; if (!sem_debug_init) return; spin_lock_irqsave(&sem_debug_lock, flags); list_for_each(tmp, &sem_debug_free_head.list) { sem_dbg = list_entry(tmp, struct sem_debug, list); sem_dbg->task = current; sem_dbg->sem = sem; sem_dbg->pid = current->pid; sem_dbg->cpu = smp_processor_id(); list_del(&sem_dbg->list); list_add(&sem_dbg->list, &sem_debug_done_head.list); sem_debug_free_head_cnt--; sem_debug_done_head_cnt++; break; } spin_unlock_irqrestore(&sem_debug_lock, flags); } void debug_semaphore_up_log(struct semaphore *sem) { struct list_head *tmp; struct sem_debug *sem_dbg; unsigned long flags; if (!sem_debug_init) return; spin_lock_irqsave(&sem_debug_lock, flags); list_for_each(tmp, &sem_debug_done_head.list) { sem_dbg = list_entry(tmp, struct sem_debug, list); if (sem_dbg->sem == sem && sem_dbg->pid == current->pid) { list_del(&sem_dbg->list); list_add(&sem_dbg->list, &sem_debug_free_head.list); sem_debug_free_head_cnt++; sem_debug_done_head_cnt--; break; } } spin_unlock_irqrestore(&sem_debug_lock, flags); } /* rwsemaphore logging */ void debug_rwsemaphore_init(void) { int i = 0; struct rwsem_debug *rwsem_debug = NULL; spin_lock_init(&rwsem_debug_lock); rwsem_debug_free_head_cnt = 0; rwsem_debug_done_head_cnt = 0; /* initialize list head of sem_debug */ INIT_LIST_HEAD(&rwsem_debug_free_head.list); INIT_LIST_HEAD(&rwsem_debug_done_head.list); for (i = 0; i < RWSEMAPHORE_LOG_MAX; i++) { /* malloc semaphore */ rwsem_debug = kmalloc(sizeof(struct rwsem_debug), GFP_KERNEL); /* add list */ list_add(&rwsem_debug->list, &rwsem_debug_free_head.list); rwsem_debug_free_head_cnt++; } rwsem_debug_init = 1; } void debug_rwsemaphore_down_log(struct rw_semaphore *sem, int dir) { struct list_head *tmp; struct rwsem_debug *sem_dbg; unsigned long flags; if (!rwsem_debug_init) return; spin_lock_irqsave(&rwsem_debug_lock, flags); list_for_each(tmp, &rwsem_debug_free_head.list) { sem_dbg = list_entry(tmp, struct rwsem_debug, list); sem_dbg->task = current; sem_dbg->sem = sem; sem_dbg->pid = current->pid; sem_dbg->cpu = smp_processor_id(); sem_dbg->direction = dir; list_del(&sem_dbg->list); list_add(&sem_dbg->list, &rwsem_debug_done_head.list); rwsem_debug_free_head_cnt--; rwsem_debug_done_head_cnt++; break; } spin_unlock_irqrestore(&rwsem_debug_lock, flags); } void debug_rwsemaphore_up_log(struct rw_semaphore *sem) { struct list_head *tmp; struct rwsem_debug *sem_dbg; unsigned long flags; if (!rwsem_debug_init) return; spin_lock_irqsave(&rwsem_debug_lock, flags); list_for_each(tmp, &rwsem_debug_done_head.list) { sem_dbg = list_entry(tmp, struct rwsem_debug, list); if (sem_dbg->sem == sem && sem_dbg->pid == current->pid) { list_del(&sem_dbg->list); list_add(&sem_dbg->list, &rwsem_debug_free_head.list); rwsem_debug_free_head_cnt++; rwsem_debug_done_head_cnt--; break; } } spin_unlock_irqrestore(&rwsem_debug_lock, flags); } #endif /* CONFIG_SEC_DEBUG_SEMAPHORE_LOG */ static int __init sec_dbg_setup(char *str) { unsigned size = memparse(str, &str); pr_emerg("%s: str=%s\n", __func__, str); if (size && (size == roundup_pow_of_two(size)) && (*str == '@')) { secdbg_paddr = (unsigned int)memparse(++str, NULL); secdbg_size = size; } pr_emerg("%s: secdbg_paddr = 0x%x\n", __func__, secdbg_paddr); pr_emerg("%s: secdbg_size = 0x%x\n", __func__, secdbg_size); return 1; } __setup("sec_dbg=", sec_dbg_setup); static void sec_user_fault_dump(void) { if (enable == 1 && enable_user == 1) panic("User Fault"); } static int sec_user_fault_write(struct file *file, const char __user *buffer, size_t count, loff_t *offs) { char buf[100]; if (count > sizeof(buf) - 1) return -EINVAL; if (copy_from_user(buf, buffer, count)) return -EFAULT; buf[count] = '\0'; if (strncmp(buf, "dump_user_fault", 15) == 0) sec_user_fault_dump(); return count; } static const struct file_operations sec_user_fault_proc_fops = { .write = sec_user_fault_write, }; static int __init sec_debug_user_fault_init(void) { struct proc_dir_entry *entry; entry = proc_create("user_fault", S_IWUSR|S_IWGRP, NULL, &sec_user_fault_proc_fops); if (!entry) return -ENOMEM; return 0; } device_initcall(sec_debug_user_fault_init); #ifdef CONFIG_SEC_DEBUG_DCVS_LOG void sec_debug_dcvs_log(int cpu_no, unsigned int prev_freq, unsigned int new_freq) { unsigned int i; if (!secdbg_log) return; i = atomic_inc_return(&(secdbg_log->dcvs_log_idx)) & (DCVS_LOG_MAX - 1); secdbg_log->dcvs_log[i].cpu_no = cpu_no; secdbg_log->dcvs_log[i].prev_freq = prev_freq; secdbg_log->dcvs_log[i].new_freq = new_freq; secdbg_log->dcvs_log[i].time = cpu_clock(cpu_no); } #endif #ifdef CONFIG_SEC_DEBUG_FUELGAUGE_LOG void sec_debug_fuelgauge_log(unsigned int voltage, unsigned short soc, unsigned short charging_status) { unsigned int i; int cpu = smp_processor_id(); if (!secdbg_log) return; i = atomic_inc_return(&(secdbg_log->fg_log_idx)) & (FG_LOG_MAX - 1); secdbg_log->fg_log[i].time = cpu_clock(cpu); secdbg_log->fg_log[i].voltage = voltage; secdbg_log->fg_log[i].soc = soc; secdbg_log->fg_log[i].charging_status = charging_status; } #endif
gpl-2.0
smarkwell/asuswrt-merlin
release/src/router/db-4.8.30/build_vxworks/test_micro/b_util.c
42
3403
/* * See the file LICENSE for redistribution information. * * Copyright (c) 2005-2009 Oracle. All rights reserved. * * $Id$ */ #include "bench.h" static int b_util_testdir_remove __P((char *)); int b_util_have_hash() { #if defined(HAVE_HASH) ||\ DB_VERSION_MAJOR < 4 || DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR < 2 return (0); #else fprintf(stderr, "library build did not include support for the Hash access method\n"); return (1); #endif } int b_util_have_queue() { #if defined(HAVE_QUEUE) ||\ DB_VERSION_MAJOR < 4 || DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR < 2 return (0); #else fprintf(stderr, "library build did not include support for the Queue access method\n"); return (1); #endif } /* * b_util_dir_setup -- * Create the test directory. */ int b_util_dir_setup() { int ret; #if DB_VERSION_MAJOR > 4 || DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 3 if ((ret = __os_mkdir(NULL, TESTDIR, 0755)) != 0) { #else if ((ret = mkdir(TESTDIR, 0755)) != 0) { #endif fprintf(stderr, "%s: %s: %s\n", progname, TESTDIR, db_strerror(ret)); return (1); } return (0); } #if DB_VERSION_MAJOR > 4 || DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 4 #define OS_EXISTS(a, b, c) __os_exists(a, b, c) #else #define OS_EXISTS(a, b, c) __os_exists(b, c) #endif /* * b_util_dir_teardown * Clean up the test directory. */ int b_util_dir_teardown() { int ret; if (OS_EXISTS(NULL, TESTFILE, NULL) == 0 && (ret = b_util_unlink(TESTFILE)) != 0) { fprintf(stderr, "%s: %s: %s\n", progname, TESTFILE, db_strerror(ret)); return (1); } return (b_util_testdir_remove(TESTDIR) ? 1 : 0); } /* * testdir_remove -- * Remove a directory and all its contents, the "dir" must contain no * subdirectories, because testdir_remove will not recursively delete * all subdirectories. */ static int b_util_testdir_remove(dir) char *dir; { int cnt, i, isdir, ret; char buf[1024], **names; ret = 0; /* If the directory doesn't exist, we're done. */ if (OS_EXISTS(NULL, dir, &isdir) != 0) return (0); /* Get a list of the directory contents. */ #if DB_VERSION_MAJOR > 4 || DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 6 if ((ret = __os_dirlist(NULL, dir, 0, &names, &cnt)) != 0) return (ret); #else if ((ret = __os_dirlist(NULL, dir, &names, &cnt)) != 0) return (ret); #endif /* Go through the file name list, remove each file in the list */ for (i = 0; i < cnt; ++i) { (void)snprintf(buf, sizeof(buf), "%s%c%s", dir, PATH_SEPARATOR[0], names[i]); if ((ret = OS_EXISTS(NULL, buf, &isdir)) != 0) goto file_err; if (!isdir && (ret = b_util_unlink(buf)) != 0) { file_err: fprintf(stderr, "%s: %s: %s\n", progname, buf, db_strerror(ret)); break; } } __os_dirfree(NULL, names, cnt); /* * If we removed the contents of the directory, remove the directory * itself. */ if (i == cnt && (ret = rmdir(dir)) != 0) fprintf(stderr, "%s: %s: %s\n", progname, dir, db_strerror(errno)); return (ret); } void b_util_abort() { #if DB_VERSION_MAJOR < 4 || DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR < 6 abort(); #elif DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 6 __os_abort(); #else __os_abort(NULL); #endif } int b_util_unlink(path) char *path; { #if DB_VERSION_MAJOR < 4 || DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR < 7 return (__os_unlink(NULL, path)); #else return (__os_unlink(NULL, path, 0)); #endif }
gpl-2.0
csimmonds/rowboat-kernel
drivers/pcmcia/vrc4173_cardu.c
298
15252
/* * FILE NAME * drivers/pcmcia/vrc4173_cardu.c * * BRIEF MODULE DESCRIPTION * NEC VRC4173 CARDU driver for Socket Services * (This device doesn't support CardBus. it is supporting only 16bit PC Card.) * * Copyright 2002,2003 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/types.h> #include <asm/io.h> #include <pcmcia/ss.h> #include "vrc4173_cardu.h" MODULE_DESCRIPTION("NEC VRC4173 CARDU driver for Socket Services"); MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>"); MODULE_LICENSE("GPL"); static int vrc4173_cardu_slots; static vrc4173_socket_t cardu_sockets[CARDU_MAX_SOCKETS]; extern struct socket_info_t *pcmcia_register_socket (int slot, struct pccard_operations *vtable, int use_bus_pm); extern void pcmcia_unregister_socket(struct socket_info_t *s); static inline uint8_t exca_readb(vrc4173_socket_t *socket, uint16_t offset) { return readb(socket->base + EXCA_REGS_BASE + offset); } static inline uint16_t exca_readw(vrc4173_socket_t *socket, uint16_t offset) { uint16_t val; val = readb(socket->base + EXCA_REGS_BASE + offset); val |= (u16)readb(socket->base + EXCA_REGS_BASE + offset + 1) << 8; return val; } static inline void exca_writeb(vrc4173_socket_t *socket, uint16_t offset, uint8_t val) { writeb(val, socket->base + EXCA_REGS_BASE + offset); } static inline void exca_writew(vrc4173_socket_t *socket, uint8_t offset, uint16_t val) { writeb((u8)val, socket->base + EXCA_REGS_BASE + offset); writeb((u8)(val >> 8), socket->base + EXCA_REGS_BASE + offset + 1); } static inline uint32_t cardbus_socket_readl(vrc4173_socket_t *socket, u16 offset) { return readl(socket->base + CARDBUS_SOCKET_REGS_BASE + offset); } static inline void cardbus_socket_writel(vrc4173_socket_t *socket, u16 offset, uint32_t val) { writel(val, socket->base + CARDBUS_SOCKET_REGS_BASE + offset); } static void cardu_pciregs_init(struct pci_dev *dev) { u32 syscnt; u16 brgcnt; u8 devcnt; pci_write_config_dword(dev, 0x1c, 0x10000000); pci_write_config_dword(dev, 0x20, 0x17fff000); pci_write_config_dword(dev, 0x2c, 0); pci_write_config_dword(dev, 0x30, 0xfffc); pci_read_config_word(dev, BRGCNT, &brgcnt); brgcnt &= ~IREQ_INT; pci_write_config_word(dev, BRGCNT, brgcnt); pci_read_config_dword(dev, SYSCNT, &syscnt); syscnt &= ~(BAD_VCC_REQ_DISB|PCPCI_EN|CH_ASSIGN_MASK|SUB_ID_WR_EN|PCI_CLK_RIN); syscnt |= (CH_ASSIGN_NODMA|ASYN_INT_MODE); pci_write_config_dword(dev, SYSCNT, syscnt); pci_read_config_byte(dev, DEVCNT, &devcnt); devcnt &= ~(ZOOM_VIDEO_EN|SR_PCI_INT_SEL_MASK|PCI_INT_MODE|IRQ_MODE); devcnt |= (SR_PCI_INT_SEL_NONE|IFG); pci_write_config_byte(dev, DEVCNT, devcnt); pci_write_config_byte(dev, CHIPCNT, S_PREF_DISB); pci_write_config_byte(dev, SERRDIS, 0); } static int cardu_init(unsigned int slot) { vrc4173_socket_t *socket = &cardu_sockets[slot]; cardu_pciregs_init(socket->dev); /* CARD_SC bits are cleared by reading CARD_SC. */ exca_writeb(socket, GLO_CNT, 0); socket->cap.features |= SS_CAP_PCCARD | SS_CAP_PAGE_REGS; socket->cap.irq_mask = 0; socket->cap.map_size = 0x1000; socket->cap.pci_irq = socket->dev->irq; socket->events = 0; spin_lock_init(socket->event_lock); /* Enable PC Card status interrupts */ exca_writeb(socket, CARD_SCI, CARD_DT_EN|RDY_EN|BAT_WAR_EN|BAT_DEAD_EN); return 0; } static int cardu_register_callback(unsigned int sock, void (*handler)(void *, unsigned int), void * info) { vrc4173_socket_t *socket = &cardu_sockets[sock]; socket->handler = handler; socket->info = info; return 0; } static int cardu_inquire_socket(unsigned int sock, socket_cap_t *cap) { vrc4173_socket_t *socket = &cardu_sockets[sock]; *cap = socket->cap; return 0; } static int cardu_get_status(unsigned int sock, u_int *value) { vrc4173_socket_t *socket = &cardu_sockets[sock]; uint32_t state; uint8_t status; u_int val = 0; status = exca_readb(socket, IF_STATUS); if (status & CARD_PWR) val |= SS_POWERON; if (status & READY) val |= SS_READY; if (status & CARD_WP) val |= SS_WRPROT; if ((status & (CARD_DETECT1|CARD_DETECT2)) == (CARD_DETECT1|CARD_DETECT2)) val |= SS_DETECT; if (exca_readb(socket, INT_GEN_CNT) & CARD_TYPE_IO) { if (status & STSCHG) val |= SS_STSCHG; } else { status &= BV_DETECT_MASK; if (status != BV_DETECT_GOOD) { if (status == BV_DETECT_WARN) val |= SS_BATWARN; else val |= SS_BATDEAD; } } state = cardbus_socket_readl(socket, SKT_PRE_STATE); if (state & VOL_3V_CARD_DT) val |= SS_3VCARD; if (state & VOL_XV_CARD_DT) val |= SS_XVCARD; if (state & CB_CARD_DT) val |= SS_CARDBUS; if (!(state & (VOL_YV_CARD_DT|VOL_XV_CARD_DT|VOL_3V_CARD_DT|VOL_5V_CARD_DT|CCD20|CCD10))) val |= SS_PENDING; *value = val; return 0; } static inline uint8_t set_Vcc_value(u_char Vcc) { switch (Vcc) { case 33: return VCC_3V; case 50: return VCC_5V; } return VCC_0V; } static inline uint8_t set_Vpp_value(u_char Vpp) { switch (Vpp) { case 33: case 50: return VPP_VCC; case 120: return VPP_12V; } return VPP_0V; } static int cardu_set_socket(unsigned int sock, socket_state_t *state) { vrc4173_socket_t *socket = &cardu_sockets[sock]; uint8_t val; if (((state->Vpp == 33) || (state->Vpp == 50)) && (state->Vpp != state->Vcc)) return -EINVAL; val = set_Vcc_value(state->Vcc); val |= set_Vpp_value(state->Vpp); if (state->flags & SS_OUTPUT_ENA) val |= CARD_OUT_EN; exca_writeb(socket, PWR_CNT, val); val = exca_readb(socket, INT_GEN_CNT) & CARD_REST0; if (state->flags & SS_RESET) val &= ~CARD_REST0; else val |= CARD_REST0; if (state->flags & SS_IOCARD) val |= CARD_TYPE_IO; exca_writeb(socket, INT_GEN_CNT, val); return 0; } static int cardu_get_io_map(unsigned int sock, struct pccard_io_map *io) { vrc4173_socket_t *socket = &cardu_sockets[sock]; uint8_t ioctl, window; u_char map; map = io->map; if (map > 1) return -EINVAL; io->start = exca_readw(socket, IO_WIN_SA(map)); io->stop = exca_readw(socket, IO_WIN_EA(map)); ioctl = exca_readb(socket, IO_WIN_CNT); window = exca_readb(socket, ADR_WIN_EN); io->flags = (window & IO_WIN_EN(map)) ? MAP_ACTIVE : 0; if (ioctl & IO_WIN_DATA_AUTOSZ(map)) io->flags |= MAP_AUTOSZ; else if (ioctl & IO_WIN_DATA_16BIT(map)) io->flags |= MAP_16BIT; return 0; } static int cardu_set_io_map(unsigned int sock, struct pccard_io_map *io) { vrc4173_socket_t *socket = &cardu_sockets[sock]; uint16_t ioctl; uint8_t window, enable; u_char map; map = io->map; if (map > 1) return -EINVAL; window = exca_readb(socket, ADR_WIN_EN); enable = IO_WIN_EN(map); if (window & enable) { window &= ~enable; exca_writeb(socket, ADR_WIN_EN, window); } exca_writew(socket, IO_WIN_SA(map), io->start); exca_writew(socket, IO_WIN_EA(map), io->stop); ioctl = exca_readb(socket, IO_WIN_CNT) & ~IO_WIN_CNT_MASK(map); if (io->flags & MAP_AUTOSZ) ioctl |= IO_WIN_DATA_AUTOSZ(map); else if (io->flags & MAP_16BIT) ioctl |= IO_WIN_DATA_16BIT(map); exca_writeb(socket, IO_WIN_CNT, ioctl); if (io->flags & MAP_ACTIVE) exca_writeb(socket, ADR_WIN_EN, window | enable); return 0; } static int cardu_get_mem_map(unsigned int sock, struct pccard_mem_map *mem) { vrc4173_socket_t *socket = &cardu_sockets[sock]; uint32_t start, stop, offset, page; uint8_t window; u_char map; map = mem->map; if (map > 4) return -EINVAL; window = exca_readb(socket, ADR_WIN_EN); mem->flags = (window & MEM_WIN_EN(map)) ? MAP_ACTIVE : 0; start = exca_readw(socket, MEM_WIN_SA(map)); mem->flags |= (start & MEM_WIN_DSIZE) ? MAP_16BIT : 0; start = (start & 0x0fff) << 12; stop = exca_readw(socket, MEM_WIN_EA(map)); stop = ((stop & 0x0fff) << 12) + 0x0fff; offset = exca_readw(socket, MEM_WIN_OA(map)); mem->flags |= (offset & MEM_WIN_WP) ? MAP_WRPROT : 0; mem->flags |= (offset & MEM_WIN_REGSET) ? MAP_ATTRIB : 0; offset = ((offset & 0x3fff) << 12) + start; mem->card_start = offset & 0x03ffffff; page = exca_readb(socket, MEM_WIN_SAU(map)) << 24; mem->sys_start = start + page; mem->sys_stop = start + page; return 0; } static int cardu_set_mem_map(unsigned int sock, struct pccard_mem_map *mem) { vrc4173_socket_t *socket = &cardu_sockets[sock]; uint16_t value; uint8_t window, enable; u_long sys_start, sys_stop, card_start; u_char map; map = mem->map; sys_start = mem->sys_start; sys_stop = mem->sys_stop; card_start = mem->card_start; if (map > 4 || sys_start > sys_stop || ((sys_start ^ sys_stop) >> 24) || (card_start >> 26)) return -EINVAL; window = exca_readb(socket, ADR_WIN_EN); enable = MEM_WIN_EN(map); if (window & enable) { window &= ~enable; exca_writeb(socket, ADR_WIN_EN, window); } exca_writeb(socket, MEM_WIN_SAU(map), sys_start >> 24); value = (sys_start >> 12) & 0x0fff; if (mem->flags & MAP_16BIT) value |= MEM_WIN_DSIZE; exca_writew(socket, MEM_WIN_SA(map), value); value = (sys_stop >> 12) & 0x0fff; exca_writew(socket, MEM_WIN_EA(map), value); value = ((card_start - sys_start) >> 12) & 0x3fff; if (mem->flags & MAP_WRPROT) value |= MEM_WIN_WP; if (mem->flags & MAP_ATTRIB) value |= MEM_WIN_REGSET; exca_writew(socket, MEM_WIN_OA(map), value); if (mem->flags & MAP_ACTIVE) exca_writeb(socket, ADR_WIN_EN, window | enable); return 0; } static void cardu_proc_setup(unsigned int sock, struct proc_dir_entry *base) { } static struct pccard_operations cardu_operations = { .init = cardu_init, .register_callback = cardu_register_callback, .inquire_socket = cardu_inquire_socket, .get_status = cardu_get_status, .set_socket = cardu_set_socket, .get_io_map = cardu_get_io_map, .set_io_map = cardu_set_io_map, .get_mem_map = cardu_get_mem_map, .set_mem_map = cardu_set_mem_map, .proc_setup = cardu_proc_setup, }; static void cardu_bh(void *data) { vrc4173_socket_t *socket = (vrc4173_socket_t *)data; uint16_t events; spin_lock_irq(&socket->event_lock); events = socket->events; socket->events = 0; spin_unlock_irq(&socket->event_lock); if (socket->handler) socket->handler(socket->info, events); } static uint16_t get_events(vrc4173_socket_t *socket) { uint16_t events = 0; uint8_t csc, status; status = exca_readb(socket, IF_STATUS); csc = exca_readb(socket, CARD_SC); if ((csc & CARD_DT_CHG) && ((status & (CARD_DETECT1|CARD_DETECT2)) == (CARD_DETECT1|CARD_DETECT2))) events |= SS_DETECT; if ((csc & RDY_CHG) && (status & READY)) events |= SS_READY; if (exca_readb(socket, INT_GEN_CNT) & CARD_TYPE_IO) { if ((csc & BAT_DEAD_ST_CHG) && (status & STSCHG)) events |= SS_STSCHG; } else { if (csc & (BAT_WAR_CHG|BAT_DEAD_ST_CHG)) { if ((status & BV_DETECT_MASK) != BV_DETECT_GOOD) { if (status == BV_DETECT_WARN) events |= SS_BATWARN; else events |= SS_BATDEAD; } } } return events; } static void cardu_interrupt(int irq, void *dev_id) { vrc4173_socket_t *socket = (vrc4173_socket_t *)dev_id; uint16_t events; INIT_WORK(&socket->tq_work, cardu_bh, socket); events = get_events(socket); if (events) { spin_lock(&socket->event_lock); socket->events |= events; spin_unlock(&socket->event_lock); schedule_work(&socket->tq_work); } } static int __devinit vrc4173_cardu_probe(struct pci_dev *dev, const struct pci_device_id *ent) { vrc4173_socket_t *socket; unsigned long start, len, flags; int slot, err; slot = vrc4173_cardu_slots++; socket = &cardu_sockets[slot]; if (socket->noprobe != 0) return -EBUSY; sprintf(socket->name, "NEC VRC4173 CARDU%1d", slot+1); if ((err = pci_enable_device(dev)) < 0) return err; start = pci_resource_start(dev, 0); if (start == 0) return -ENODEV; len = pci_resource_len(dev, 0); if (len == 0) return -ENODEV; if (((flags = pci_resource_flags(dev, 0)) & IORESOURCE_MEM) == 0) return -EBUSY; if ((err = pci_request_regions(dev, socket->name)) < 0) return err; socket->base = ioremap(start, len); if (socket->base == NULL) return -ENODEV; socket->dev = dev; socket->pcmcia_socket = pcmcia_register_socket(slot, &cardu_operations, 1); if (socket->pcmcia_socket == NULL) { iounmap(socket->base); socket->base = NULL; return -ENOMEM; } if (request_irq(dev->irq, cardu_interrupt, IRQF_SHARED, socket->name, socket) < 0) { pcmcia_unregister_socket(socket->pcmcia_socket); socket->pcmcia_socket = NULL; iounmap(socket->base); socket->base = NULL; return -EBUSY; } printk(KERN_INFO "%s at %#08lx, IRQ %d\n", socket->name, start, dev->irq); return 0; } static int __devinit vrc4173_cardu_setup(char *options) { if (options == NULL || *options == '\0') return 1; if (strncmp(options, "cardu1:", 7) == 0) { options += 7; if (*options != '\0') { if (strncmp(options, "noprobe", 7) == 0) { cardu_sockets[CARDU1].noprobe = 1; options += 7; } if (*options != ',') return 1; } else return 1; } if (strncmp(options, "cardu2:", 7) == 0) { options += 7; if ((*options != '\0') && (strncmp(options, "noprobe", 7) == 0)) cardu_sockets[CARDU2].noprobe = 1; } return 1; } __setup("vrc4173_cardu=", vrc4173_cardu_setup); static struct pci_device_id vrc4173_cardu_id_table[] __devinitdata = { { .vendor = PCI_VENDOR_ID_NEC, .device = PCI_DEVICE_ID_NEC_NAPCCARD, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, {0, } }; static struct pci_driver vrc4173_cardu_driver = { .name = "NEC VRC4173 CARDU", .probe = vrc4173_cardu_probe, .id_table = vrc4173_cardu_id_table, }; static int __devinit vrc4173_cardu_init(void) { vrc4173_cardu_slots = 0; return pci_register_driver(&vrc4173_cardu_driver); } static void __devexit vrc4173_cardu_exit(void) { pci_unregister_driver(&vrc4173_cardu_driver); } module_init(vrc4173_cardu_init); module_exit(vrc4173_cardu_exit); MODULE_DEVICE_TABLE(pci, vrc4173_cardu_id_table);
gpl-2.0
freddy77/linux
drivers/staging/vt6655/channel.c
298
78470
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * File: channel.c * */ #include "baseband.h" #include "country.h" #include "channel.h" #include "device.h" #include "rf.h" /*--------------------- Static Definitions -------------------------*/ #define CARD_MAX_CHANNEL_TBL 56 //static int msglevel = MSG_LEVEL_DEBUG; static int msglevel = MSG_LEVEL_INFO; /*--------------------- Static Variables --------------------------*/ static SChannelTblElement sChannelTbl[CARD_MAX_CHANNEL_TBL + 1] = { {0, 0, false, 0}, {1, 2412, true, 0}, {2, 2417, true, 0}, {3, 2422, true, 0}, {4, 2427, true, 0}, {5, 2432, true, 0}, {6, 2437, true, 0}, {7, 2442, true, 0}, {8, 2447, true, 0}, {9, 2452, true, 0}, {10, 2457, true, 0}, {11, 2462, true, 0}, {12, 2467, true, 0}, {13, 2472, true, 0}, {14, 2484, true, 0}, {183, 4915, true, 0}, {184, 4920, true, 0}, {185, 4925, true, 0}, {187, 4935, true, 0}, {188, 4940, true, 0}, {189, 4945, true, 0}, {192, 4960, true, 0}, {196, 4980, true, 0}, {7, 5035, true, 0}, {8, 5040, true, 0}, {9, 5045, true, 0}, {11, 5055, true, 0}, {12, 5060, true, 0}, {16, 5080, true, 0}, {34, 5170, true, 0}, {36, 5180, true, 0}, {38, 5190, true, 0}, {40, 5200, true, 0}, {42, 5210, true, 0}, {44, 5220, true, 0}, {46, 5230, true, 0}, {48, 5240, true, 0}, {52, 5260, true, 0}, {56, 5280, true, 0}, {60, 5300, true, 0}, {64, 5320, true, 0}, {100, 5500, true, 0}, {104, 5520, true, 0}, {108, 5540, true, 0}, {112, 5560, true, 0}, {116, 5580, true, 0}, {120, 5600, true, 0}, {124, 5620, true, 0}, {128, 5640, true, 0}, {132, 5660, true, 0}, {136, 5680, true, 0}, {140, 5700, true, 0}, {149, 5745, true, 0}, {153, 5765, true, 0}, {157, 5785, true, 0}, {161, 5805, true, 0}, {165, 5825, true, 0} }; /************************************************************************ * The Radar regulation rules for each country ************************************************************************/ static struct { unsigned char byChannelCountryCode; /* The country code */ char chCountryCode[2]; unsigned char bChannelIdxList[CB_MAX_CHANNEL]; /* Available channels Index */ unsigned char byPower[CB_MAX_CHANNEL]; } ChannelRuleTab[] = { /************************************************************************ * This table is based on Athero driver rules ************************************************************************/ /* Country Available channels, ended with 0 */ /* 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 */ {CCODE_FCC, {'U' , 'S'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1} , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} }, {CCODE_TELEC, {'J' , 'P'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 23, 0, 0, 23, 0, 23, 23, 0, 23, 0, 0, 23, 23, 23, 0, 23, 0, 23, 0, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_ETSI, {'E' , 'U'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} }, {CCODE_RESV3, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_RESV4, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_RESV5, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_RESV6, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_RESV7, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_RESV8, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_RESV9, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_RESVa, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_RESVb, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_RESVc, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_RESVd, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_RESVe, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_ALLBAND, {' ' , ' '}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_ALBANIA, {'A' , 'L'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_ALGERIA, {'D' , 'Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_ARGENTINA, {'A' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 17, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 0} }, {CCODE_ARMENIA, {'A' , 'M'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_AUSTRALIA, {'A' , 'U'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} }, {CCODE_AUSTRIA, {'A' , 'T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0, 15, 0, 15, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_AZERBAIJAN, {'A' , 'Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_BAHRAIN, {'B' , 'H'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_BELARUS, {'B' , 'Y'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_BELGIUM, {'B' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_BELIZE, {'B' , 'Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1} , { 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} }, {CCODE_BOLIVIA, {'B' , 'O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1} , { 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} }, {CCODE_BRAZIL, {'B' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_BRUNEI_DARUSSALAM, {'B' , 'N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} }, {CCODE_BULGARIA, {'B' , 'G'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 23, 23, 0, 0, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 0, 0, 0, 0, 0} }, {CCODE_CANADA, {'C' , 'A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1} , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} }, {CCODE_CHILE, {'C' , 'L'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 17, 17, 17, 17} }, {CCODE_CHINA, {'C' , 'N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} }, {CCODE_COLOMBIA, {'C' , 'O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1} , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} }, {CCODE_COSTA_RICA, {'C' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_CROATIA, {'H' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_CYPRUS, {'C' , 'Y'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} }, {CCODE_CZECH, {'C' , 'Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_DENMARK, {'D' , 'K'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} }, {CCODE_DOMINICAN_REPUBLIC, {'D' , 'O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1} , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} }, {CCODE_ECUADOR, {'E' , 'C'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_EGYPT, {'E' , 'G'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_EL_SALVADOR, {'S' , 'V'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_ESTONIA, {'E' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} }, {CCODE_FINLAND, {'F' , 'I'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} }, {CCODE_FRANCE, {'F' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_GERMANY, {'D' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} }, {CCODE_GREECE, {'G' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_GEORGIA, {'G' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_GUATEMALA, {'G' , 'T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1} , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} }, {CCODE_HONDURAS, {'H' , 'N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_HONG_KONG, {'H' , 'K'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} }, {CCODE_HUNGARY, {'H' , 'U'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_ICELAND, {'I' , 'S'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} }, {CCODE_INDIA, {'I' , 'N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_INDONESIA, {'I' , 'D'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_IRAN, {'I' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} }, {CCODE_IRELAND, {'I' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} }, {CCODE_ITALY, {'I' , 'T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} }, {CCODE_ISRAEL, {'I' , 'L'}, { 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_JAPAN, {'J' , 'P'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_JORDAN, {'J' , 'O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_KAZAKHSTAN, {'K' , 'Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_KUWAIT, {'K' , 'W'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_LATVIA, {'L' , 'V'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_LEBANON, {'L' , 'B'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_LEICHTENSTEIN, {'L' , 'I'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_LITHUANIA, {'L' , 'T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} }, {CCODE_LUXEMBURG, {'L' , 'U'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} }, {CCODE_MACAU, {'M' , 'O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} }, {CCODE_MACEDONIA, {'M' , 'K'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_MALTA, {'M' , 'T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0} , { 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 16, 0, 16, 0, 16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 16, 16, 16, 0} }, {CCODE_MALAYSIA, {'M' , 'Y'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_MEXICO, {'M' , 'X'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1} , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} }, {CCODE_MONACO, {'M' , 'C'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_MOROCCO, {'M' , 'A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_NETHERLANDS, {'N' , 'L'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} }, {CCODE_NEW_ZEALAND, {'N' , 'Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1} , { 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} }, {CCODE_NORTH_KOREA, {'K' , 'P'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 23, 23, 23, 0} }, {CCODE_NORWAY, {'N' , 'O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} }, {CCODE_OMAN, {'O' , 'M'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_PAKISTAN, {'P' , 'K'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_PANAMA, {'P' , 'A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1} , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} }, {CCODE_PERU, {'P' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_PHILIPPINES, {'P' , 'H'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} }, {CCODE_POLAND, {'P' , 'L'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} }, {CCODE_PORTUGAL, {'P' , 'T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} }, {CCODE_PUERTO_RICO, {'P' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1} , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} }, {CCODE_QATAR, {'Q' , 'A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_ROMANIA, {'R' , 'O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_RUSSIA, {'R' , 'U'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_SAUDI_ARABIA, {'S' , 'A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_SINGAPORE, {'S' , 'G'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 20, 20, 20, 20} }, {CCODE_SLOVAKIA, {'S' , 'K'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0} , { 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 16, 0, 16, 0, 16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 16, 16, 16, 0} }, {CCODE_SLOVENIA, {'S' , 'I'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} }, {CCODE_SOUTH_AFRICA, {'Z' , 'A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} }, {CCODE_SOUTH_KOREA, {'K' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 23, 23, 23, 0} }, {CCODE_SPAIN, {'E' , 'S'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0} , { 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 16, 0, 16, 0, 16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 16, 16, 16, 0} }, {CCODE_SWEDEN, {'S' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} }, {CCODE_SWITZERLAND, {'C' , 'H'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_SYRIA, {'S' , 'Y'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_TAIWAN, {'T' , 'W'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 17, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 0} }, {CCODE_THAILAND, {'T' , 'H'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 23, 23, 23, 0} }, {CCODE_TRINIDAD_TOBAGO, {'T' , 'T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_TUNISIA, {'T' , 'N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_TURKEY, {'T' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_UK, {'G' , 'B'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} }, {CCODE_UKRAINE, {'U' , 'A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_UNITED_ARAB_EMIRATES, {'A' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_UNITED_STATES, {'U' , 'S'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1} , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} }, {CCODE_URUGUAY, {'U' , 'Y'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 23, 23, 23, 0} }, {CCODE_UZBEKISTAN, {'U' , 'Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_VENEZUELA, {'V' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0} , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 23, 23, 23, 0} }, {CCODE_VIETNAM, {'V' , 'N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_YEMEN, {'Y' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_ZIMBABWE, {'Z' , 'W'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_JAPAN_W52_W53, {'J' , 'J'}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {CCODE_MAX, {'U' , 'N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} } /* 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 */ }; /*--------------------- Export Functions --------------------------*/ /** * is_channel_valid() - Is Country Channel Valid * @ChanneIndex: defined as VT3253 MAC channel: * 1 = 2.4G channel 1 * 2 = 2.4G channel 2 * ... * 14 = 2.4G channel 14 * 15 = 4.9G channel 183 * 16 = 4.9G channel 184 * ..... * Output: true if the specified 5GHz band is allowed to be used, * false otherwise. * 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22) * * 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64, * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56) */ bool is_channel_valid(unsigned int ChannelIndex) { bool bValid; bValid = false; /* * If Channel Index is invalid, return invalid */ if ((ChannelIndex > CB_MAX_CHANNEL) || (ChannelIndex == 0)) { bValid = false; goto exit; } bValid = sChannelTbl[ChannelIndex].bValid; exit: return bValid; } /** * channel_get_list() - Get Available Channel List for a given country * @CountryCode: The country code defined in country.h * * Output: * pbyChannelTable: (QWORD *) correspondent bit mask * of available channels * 0x0000000000000001 means channel 1 is supported * 0x0000000000000003 means channel 1,2 are supported * 0x000000000000000F means channel 1,2,..15 are supported */ bool channel_get_list(unsigned int uCountryCodeIdx, unsigned char *pbyChannelTable) { if (uCountryCodeIdx >= CCODE_MAX) return false; memcpy(pbyChannelTable, ChannelRuleTab[uCountryCodeIdx].bChannelIdxList, CB_MAX_CHANNEL); return true; } void init_channel_table(void *pDeviceHandler) { PSDevice pDevice = (PSDevice) pDeviceHandler; bool bMultiBand = false; unsigned int ii; for (ii = 1; ii <= CARD_MAX_CHANNEL_TBL; ii++) { sChannelTbl[ii].bValid = false; } switch (pDevice->byRFType) { case RF_RFMD2959: case RF_AIROHA: case RF_AL2230S: case RF_UW2451: case RF_VT3226: bMultiBand = false; break; case RF_AIROHA7230: case RF_UW2452: case RF_NOTHING: default: bMultiBand = true; break; } if ((pDevice->dwDiagRefCount != 0) || pDevice->b11hEnable) { if (bMultiBand) { for (ii = 0; ii < CARD_MAX_CHANNEL_TBL; ii++) { sChannelTbl[ii + 1].bValid = true; pDevice->abyRegPwr[ii + 1] = pDevice->abyOFDMDefaultPwr[ii + 1]; pDevice->abyLocalPwr[ii + 1] = pDevice->abyOFDMDefaultPwr[ii + 1]; } for (ii = 0; ii < CHANNEL_MAX_24G; ii++) { pDevice->abyRegPwr[ii + 1] = pDevice->abyCCKDefaultPwr[ii + 1]; pDevice->abyLocalPwr[ii + 1] = pDevice->abyCCKDefaultPwr[ii + 1]; } } else { for (ii = 0; ii < CHANNEL_MAX_24G; ii++) { //2008-8-4 <add> by chester if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) { sChannelTbl[ii + 1].bValid = true; pDevice->abyRegPwr[ii + 1] = pDevice->abyCCKDefaultPwr[ii + 1]; pDevice->abyLocalPwr[ii + 1] = pDevice->abyCCKDefaultPwr[ii + 1]; } } } } else if (pDevice->byZoneType <= CCODE_MAX) { if (bMultiBand) { for (ii = 0; ii < CARD_MAX_CHANNEL_TBL; ii++) { if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) { sChannelTbl[ii + 1].bValid = true; pDevice->abyRegPwr[ii + 1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii]; pDevice->abyLocalPwr[ii + 1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii]; } } } else { for (ii = 0; ii < CHANNEL_MAX_24G; ii++) { if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) { sChannelTbl[ii + 1].bValid = true; pDevice->abyRegPwr[ii + 1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii]; pDevice->abyLocalPwr[ii + 1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii]; } } } } DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Zone=[%d][%c][%c]!!\n", pDevice->byZoneType, ChannelRuleTab[pDevice->byZoneType].chCountryCode[0], ChannelRuleTab[pDevice->byZoneType].chCountryCode[1]); for (ii = 0; ii < CARD_MAX_CHANNEL_TBL; ii++) { if (pDevice->abyRegPwr[ii + 1] == 0) pDevice->abyRegPwr[ii + 1] = pDevice->abyOFDMDefaultPwr[ii + 1]; if (pDevice->abyLocalPwr[ii + 1] == 0) pDevice->abyLocalPwr[ii + 1] = pDevice->abyOFDMDefaultPwr[ii + 1]; } } unsigned char get_channel_mapping(void *pDeviceHandler, unsigned char byChannelNumber, CARD_PHY_TYPE ePhyType) { unsigned int ii; if ((ePhyType == PHY_TYPE_11B) || (ePhyType == PHY_TYPE_11G)) return byChannelNumber; for (ii = (CB_MAX_CHANNEL_24G + 1); ii <= CB_MAX_CHANNEL;) { if (sChannelTbl[ii].byChannelNumber == byChannelNumber) return (unsigned char) ii; ii++; } return 0; } unsigned char get_channel_number(void *pDeviceHandler, unsigned char byChannelIndex) { //PSDevice pDevice = (PSDevice) pDeviceHandler; return sChannelTbl[byChannelIndex].byChannelNumber; } /** * set_channel() - Set NIC media channel * * @pDeviceHandler: The adapter to be set * @uConnectionChannel: Channel to be set * * Return Value: true if succeeded; false if failed. * */ bool set_channel(void *pDeviceHandler, unsigned int uConnectionChannel) { PSDevice pDevice = (PSDevice) pDeviceHandler; bool bResult = true; if (pDevice->byCurrentCh == uConnectionChannel) { return bResult; } if (!sChannelTbl[uConnectionChannel].bValid) { return false; } if ((uConnectionChannel > CB_MAX_CHANNEL_24G) && (pDevice->eCurrentPHYType != PHY_TYPE_11A)) { CARDbSetPhyParameter(pDevice, PHY_TYPE_11A, 0, 0, NULL, NULL); } else if ((uConnectionChannel <= CB_MAX_CHANNEL_24G) && (pDevice->eCurrentPHYType == PHY_TYPE_11A)) { CARDbSetPhyParameter(pDevice, PHY_TYPE_11G, 0, 0, NULL, NULL); } // clear NAV MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MACCR, MACCR_CLRNAV); //{{ RobertYu: 20041202 //// TX_PE will reserve 3 us for MAX2829 A mode only, it is for better TX throughput if (pDevice->byRFType == RF_AIROHA7230) { RFbAL7230SelectChannelPostProcess(pDevice->PortOffset, pDevice->byCurrentCh, (unsigned char)uConnectionChannel); } //}} RobertYu pDevice->byCurrentCh = (unsigned char)uConnectionChannel; bResult &= RFbSelectChannel(pDevice->PortOffset, pDevice->byRFType, (unsigned char)uConnectionChannel); // Init Synthesizer Table if (pDevice->bEnablePSMode) RFvWriteWakeProgSyn(pDevice->PortOffset, pDevice->byRFType, uConnectionChannel); //DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "CARDbSetMediaChannel: %d\n", (unsigned char)uConnectionChannel); BBvSoftwareReset(pDevice->PortOffset); if (pDevice->byLocalID > REV_ID_VT3253_B1) { // set HW default power register MACvSelectPage1(pDevice->PortOffset); RFbSetPower(pDevice, RATE_1M, pDevice->byCurrentCh); VNSvOutPortB(pDevice->PortOffset + MAC_REG_PWRCCK, pDevice->byCurPwr); RFbSetPower(pDevice, RATE_6M, pDevice->byCurrentCh); VNSvOutPortB(pDevice->PortOffset + MAC_REG_PWROFDM, pDevice->byCurPwr); MACvSelectPage0(pDevice->PortOffset); } if (pDevice->eCurrentPHYType == PHY_TYPE_11B) { RFbSetPower(pDevice, RATE_1M, pDevice->byCurrentCh); } else { RFbSetPower(pDevice, RATE_6M, pDevice->byCurrentCh); } return bResult; } /** * set_country_info() - Set Channel Info of Country * * Return Value: none. * */ void set_country_info(void *pDeviceHandler, CARD_PHY_TYPE ePHYType, void *pIE) { PSDevice pDevice = (PSDevice) pDeviceHandler; unsigned int ii = 0; unsigned int uu = 0; unsigned int step = 0; unsigned int uNumOfCountryInfo = 0; unsigned char byCh = 0; PWLAN_IE_COUNTRY pIE_Country = (PWLAN_IE_COUNTRY) pIE; uNumOfCountryInfo = (pIE_Country->len - 3); uNumOfCountryInfo /= 3; if (ePHYType == PHY_TYPE_11A) { pDevice->bCountryInfo5G = true; for (ii = CB_MAX_CHANNEL_24G + 1; ii <= CARD_MAX_CHANNEL_TBL; ii++) { sChannelTbl[ii].bValid = false; } step = 4; } else { pDevice->bCountryInfo24G = true; for (ii = 1; ii <= CB_MAX_CHANNEL_24G; ii++) { sChannelTbl[ii].bValid = false; } step = 1; } pDevice->abyCountryCode[0] = pIE_Country->abyCountryString[0]; pDevice->abyCountryCode[1] = pIE_Country->abyCountryString[1]; pDevice->abyCountryCode[2] = pIE_Country->abyCountryString[2]; for (ii = 0; ii < uNumOfCountryInfo; ii++) { for (uu = 0; uu < pIE_Country->abyCountryInfo[ii*3+1]; uu++) { byCh = get_channel_mapping(pDevice, (unsigned char)(pIE_Country->abyCountryInfo[ii*3]+step*uu), ePHYType); sChannelTbl[byCh].bValid = true; pDevice->abyRegPwr[byCh] = pIE_Country->abyCountryInfo[ii*3+2]; } } } /** * * set_support_channels() - Set Support Channels IE defined in 802.11h * * @hDeviceContext: device structure point * * Return Value: none. * */ unsigned char set_support_channels(void *pDeviceHandler, unsigned char *pbyIEs) { PSDevice pDevice = (PSDevice) pDeviceHandler; unsigned int ii; unsigned char byCount; PWLAN_IE_SUPP_CH pIE = (PWLAN_IE_SUPP_CH) pbyIEs; unsigned char *pbyChTupple; unsigned char byLen = 0; pIE->byElementID = WLAN_EID_SUPP_CH; pIE->len = 0; pbyChTupple = pIE->abyChannelTuple; byLen = 2; // lower band byCount = 0; if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[28] == true) { for (ii = 28; ii < 36; ii += 2) { if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] == true) { byCount++; } } *pbyChTupple++ = 34; *pbyChTupple++ = byCount; byLen += 2; } else if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[29] == true) { for (ii = 29; ii < 36; ii += 2) { if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] == true) { byCount++; } } *pbyChTupple++ = 36; *pbyChTupple++ = byCount; byLen += 2; } // middle band byCount = 0; if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[36] == true) { for (ii = 36; ii < 40; ii++) { if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] == true) { byCount++; } } *pbyChTupple++ = 52; *pbyChTupple++ = byCount; byLen += 2; } // higher band byCount = 0; if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[40] == true) { for (ii = 40; ii < 51; ii++) { if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] == true) { byCount++; } } *pbyChTupple++ = 100; *pbyChTupple++ = byCount; byLen += 2; } else if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[51] == true) { for (ii = 51; ii < 56; ii++) { if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] == true) { byCount++; } } *pbyChTupple++ = 149; *pbyChTupple++ = byCount; byLen += 2; } pIE->len += (byLen - 2); return byLen; } void set_country_IE(void *pDeviceHandler, void *pIE) { PSDevice pDevice = (PSDevice) pDeviceHandler; unsigned int ii; PWLAN_IE_COUNTRY pIECountry = (PWLAN_IE_COUNTRY) pIE; pIECountry->byElementID = WLAN_EID_COUNTRY; pIECountry->len = 0; pIECountry->abyCountryString[0] = ChannelRuleTab[pDevice->byZoneType].chCountryCode[0]; pIECountry->abyCountryString[1] = ChannelRuleTab[pDevice->byZoneType].chCountryCode[1]; pIECountry->abyCountryString[2] = ' '; for (ii = CB_MAX_CHANNEL_24G; ii < CB_MAX_CHANNEL; ii++) { if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) { pIECountry->abyCountryInfo[pIECountry->len++] = sChannelTbl[ii + 1].byChannelNumber; pIECountry->abyCountryInfo[pIECountry->len++] = 1; pIECountry->abyCountryInfo[pIECountry->len++] = ChannelRuleTab[pDevice->byZoneType].byPower[ii]; } } pIECountry->len += 3; } bool get_channel_map_info(void *pDeviceHandler, unsigned int uChannelIndex, unsigned char *pbyChannelNumber, unsigned char *pbyMap) { if (uChannelIndex > CB_MAX_CHANNEL) return false; *pbyChannelNumber = sChannelTbl[uChannelIndex].byChannelNumber; *pbyMap = sChannelTbl[uChannelIndex].byMAP; return sChannelTbl[uChannelIndex].bValid; } void set_channel_map_info(void *pDeviceHandler, unsigned int uChannelIndex, unsigned char byMap) { if (uChannelIndex > CB_MAX_CHANNEL) return; sChannelTbl[uChannelIndex].byMAP |= byMap; } void clear_channel_map_info(void *pDeviceHandler) { unsigned int ii = 0; for (ii = 1; ii <= CB_MAX_CHANNEL; ii++) sChannelTbl[ii].byMAP = 0; } unsigned char auto_channel_select(void *pDeviceHandler, CARD_PHY_TYPE ePHYType) { unsigned int ii = 0; unsigned char byOptionChannel = 0; int aiWeight[CB_MAX_CHANNEL_24G + 1] = {-1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; if (ePHYType == PHY_TYPE_11A) { for (ii = CB_MAX_CHANNEL_24G + 1; ii <= CB_MAX_CHANNEL; ii++) { if (sChannelTbl[ii].bValid) { if (byOptionChannel == 0) { byOptionChannel = (unsigned char) ii; } if (sChannelTbl[ii].byMAP == 0) { return (unsigned char) ii; } else if (!(sChannelTbl[ii].byMAP & 0x08)) { byOptionChannel = (unsigned char) ii; } } } } else { byOptionChannel = 0; for (ii = 1; ii <= CB_MAX_CHANNEL_24G; ii++) { if (sChannelTbl[ii].bValid) { if (sChannelTbl[ii].byMAP == 0) { aiWeight[ii] += 100; } else if (sChannelTbl[ii].byMAP & 0x01) { if (ii > 3) { aiWeight[ii - 3] -= 10; } if (ii > 2) { aiWeight[ii - 2] -= 20; } if (ii > 1) { aiWeight[ii - 1] -= 40; } aiWeight[ii] -= 80; if (ii < CB_MAX_CHANNEL_24G) { aiWeight[ii + 1] -= 40; } if (ii < (CB_MAX_CHANNEL_24G - 1)) { aiWeight[ii+2] -= 20; } if (ii < (CB_MAX_CHANNEL_24G - 2)) { aiWeight[ii+3] -= 10; } } } } for (ii = 1; ii <= CB_MAX_CHANNEL_24G; ii++) { if (sChannelTbl[ii].bValid && (aiWeight[ii] > aiWeight[byOptionChannel])) { byOptionChannel = (unsigned char) ii; } } } return byOptionChannel; }
gpl-2.0
koksneo/MaNgOs
dep/ACE_wrappers/ace/Cached_Connect_Strategy_T.cpp
298
22099
//$Id: Cached_Connect_Strategy_T.cpp 92097 2010-09-30 05:41:49Z msmit $ #ifndef ACE_CACHED_CONNECT_STRATEGY_T_CPP #define ACE_CACHED_CONNECT_STRATEGY_T_CPP #include "ace/Cached_Connect_Strategy_T.h" #if !defined (ACE_LACKS_PRAGMA_ONCE) #pragma once #endif /* ACE_LACKS_PRAGMA_ONCE */ #include "ace/ACE.h" #include "ace/Service_Repository.h" #include "ace/Service_Types.h" #include "ace/Thread_Manager.h" #include "ace/WFMO_Reactor.h" #define ACE_T1 class SVC_HANDLER, ACE_PEER_CONNECTOR_1, class CACHING_STRATEGY, class ATTRIBUTES, class MUTEX #define ACE_T2 SVC_HANDLER, ACE_PEER_CONNECTOR_2, CACHING_STRATEGY, ATTRIBUTES, MUTEX ACE_BEGIN_VERSIONED_NAMESPACE_DECL template <ACE_T1> ACE_Cached_Connect_Strategy_Ex<ACE_T2>::ACE_Cached_Connect_Strategy_Ex (CACHING_STRATEGY &caching_s, ACE_Creation_Strategy<SVC_HANDLER> *cre_s, ACE_Concurrency_Strategy<SVC_HANDLER> *con_s, ACE_Recycling_Strategy<SVC_HANDLER> *rec_s, MUTEX *lock, int delete_lock) : CCSBASE (cre_s, con_s, rec_s, lock, delete_lock), connection_cache_ (caching_s) { if (this->open (cre_s, con_s, rec_s) == -1) ACE_ERROR ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("ACE_Cached_Connect_Strategy_Ex<ACE_T2>\n"))); } template <ACE_T1> ACE_Cached_Connect_Strategy_Ex<ACE_T2>::~ACE_Cached_Connect_Strategy_Ex (void) { cleanup (); } template <ACE_T1> int ACE_Cached_Connect_Strategy_Ex<ACE_T2>::check_hint_i (SVC_HANDLER *&sh, const ACE_PEER_CONNECTOR_ADDR &remote_addr, ACE_Time_Value *timeout, const ACE_PEER_CONNECTOR_ADDR &local_addr, bool reuse_addr, int flags, int perms, ACE_Hash_Map_Entry<ACE_Refcounted_Hash_Recyclable<ACE_PEER_CONNECTOR_ADDR>, std::pair<SVC_HANDLER *, ATTRIBUTES> > *&entry, int &found) { ACE_UNUSED_ARG (remote_addr); ACE_UNUSED_ARG (timeout); ACE_UNUSED_ARG (local_addr); ACE_UNUSED_ARG (reuse_addr); ACE_UNUSED_ARG (flags); ACE_UNUSED_ARG (perms); found = 0; // Get the recycling act for the svc_handler CONNECTION_CACHE_ENTRY *possible_entry = (CONNECTION_CACHE_ENTRY *) sh->recycling_act (); // Check to see if the hint svc_handler has been closed down if (possible_entry->ext_id_.recycle_state () == ACE_RECYCLABLE_CLOSED) { // If close, decrement refcount if (possible_entry->ext_id_.decrement () == 0) { // If refcount goes to zero, close down the svc_handler possible_entry->int_id_.first->recycler (0, 0); possible_entry->int_id_.first->close (); this->purge_i (possible_entry); } // Hint not successful found = 0; // Reset hint sh = 0; } // If hint is not closed, see if it is connected to the correct // address and is recyclable else if ((possible_entry->ext_id_.recycle_state () == ACE_RECYCLABLE_IDLE_AND_PURGABLE || possible_entry->ext_id_.recycle_state () == ACE_RECYCLABLE_IDLE_BUT_NOT_PURGABLE) && possible_entry->ext_id_.subject () == remote_addr) { // Hint successful found = 1; // Tell the <svc_handler> that it should prepare itself for // being recycled. this->prepare_for_recycling (sh); // // Update the caching attributes directly since we don't do a // find() on the cache map. // // Indicates successful find. int find_result = 0; int result = this->caching_strategy ().notify_find (find_result, possible_entry->int_id_.second); if (result == -1) return result; } else { // This hint will not be used. possible_entry->ext_id_.decrement (); // Hint not successful found = 0; // If <sh> is not connected to the correct address or is busy, // we will not use it. sh = 0; } if (found) entry = possible_entry; return 0; } template <ACE_T1> int ACE_Cached_Connect_Strategy_Ex<ACE_T2>::find_or_create_svc_handler_i (SVC_HANDLER *&sh, const ACE_PEER_CONNECTOR_ADDR &remote_addr, ACE_Time_Value *timeout, const ACE_PEER_CONNECTOR_ADDR &local_addr, bool reuse_addr, int flags, int perms, ACE_Hash_Map_Entry<ACE_Refcounted_Hash_Recyclable<ACE_PEER_CONNECTOR_ADDR>, std::pair<SVC_HANDLER *, ATTRIBUTES> > *&entry, int &found) { REFCOUNTED_HASH_RECYCLABLE_ADDRESS search_addr (remote_addr); // Try to find the address in the cache. Only if we don't find it // do we create a new <SVC_HANDLER> and connect it with the server. while (this->find (search_addr, entry) != -1) { // We found a cached svc_handler. // Get the cached <svc_handler> sh = entry->int_id_.first; // Is the connection clean? int state_result = ACE::handle_ready (sh->peer ().get_handle (), &ACE_Time_Value::zero, 1, // read ready 0, // write ready 1);// exception ready if (state_result == 1) { if (sh->close () == -1) return -1; sh = 0; // Cycle it once again.. } else if ((state_result == -1) && (errno == ETIME)) { // Found!!! // Set the flag found = 1; // Tell the <svc_handler> that it should prepare itself for // being recycled. if (this->prepare_for_recycling (sh) == -1) return -1; return 0; } else { return -1; } } // Not found... // Set the flag found = 0; // We need to use a temporary variable here since we are not // allowed to change <sh> because other threads may use this // when we let go of the lock during the OS level connect. // // Note that making a new svc_handler, connecting remotely, // binding to the map, and assigning of the hint and recycler // should be atomic to the outside world. SVC_HANDLER *potential_handler = 0; // Create a new svc_handler if (this->make_svc_handler (potential_handler) == -1) return -1; // Connect using the svc_handler. if (this->cached_connect (potential_handler, remote_addr, timeout, local_addr, reuse_addr, flags, perms) == -1) { // Close the svc handler. potential_handler->close (0); return -1; } else { // Insert the new SVC_HANDLER instance into the cache. if (this->connection_cache_.bind (search_addr, potential_handler, entry) == -1) { // Close the svc handler and reset <sh>. potential_handler->close (0); return -1; } // Everything succeeded as planned. Assign <sh> to // <potential_handler>. sh = potential_handler; // Set the recycler and the recycling act this->assign_recycler (sh, this, entry); } return 0; } template <ACE_T1> int ACE_Cached_Connect_Strategy_Ex<ACE_T2>::cached_connect (SVC_HANDLER *&sh, const ACE_PEER_CONNECTOR_ADDR &remote_addr, ACE_Time_Value *timeout, const ACE_PEER_CONNECTOR_ADDR &local_addr, bool reuse_addr, int flags, int perms) { // Actively establish the connection. This is a timed blocking // connect. if (this->new_connection (sh, remote_addr, timeout, local_addr, reuse_addr, flags, perms) == -1) { // If connect() failed because of timeouts, we have to reject // the connection entirely. This is necessary since currently // there is no way for the non-blocking connects to complete and // for the <Connector> to notify the cache of the completion of // connect(). if (errno == EWOULDBLOCK || errno == ETIMEDOUT) errno = ENOTSUP; else if (ACE::out_of_handles (errno) || errno == EADDRINUSE) { // If the connect failed due to the process running out of // file descriptors then, auto_purging of some connections // are done from the CONNECTION_CACHE. This frees the // descriptors which get used in the connect process and // hence the same method is called again! if (this->purge_connections () == -1) return -1; // Try connecting again. if (this->new_connection (sh, remote_addr, timeout, local_addr, reuse_addr, flags, perms) == -1) { if (errno == EWOULDBLOCK || errno == ETIMEDOUT) errno = ENOTSUP; return -1; } } else { return -1; } } return 0; } template <ACE_T1> int ACE_Cached_Connect_Strategy_Ex<ACE_T2>::connect_svc_handler_i (SVC_HANDLER *&sh, const ACE_PEER_CONNECTOR_ADDR &remote_addr, ACE_Time_Value *timeout, const ACE_PEER_CONNECTOR_ADDR &local_addr, bool reuse_addr, int flags, int perms, int& found) { CONNECTION_CACHE_ENTRY *entry = 0; // Check if the user passed a hint svc_handler if (sh != 0) { int result = this->check_hint_i (sh, remote_addr, timeout, local_addr, reuse_addr, flags, perms, entry, found); if (result != 0) return result; } // If not found if (!found) { int result = this->find_or_create_svc_handler_i (sh, remote_addr, timeout, local_addr, reuse_addr, flags, perms, entry, found); if (result != 0) return result; // Increment the refcount entry->ext_id_.increment (); } // For all successful cases: mark the <svc_handler> in the cache // as being <in_use>. Therefore recyclable is BUSY. entry->ext_id_.recycle_state (ACE_RECYCLABLE_BUSY); return 0; } template <ACE_T1> int ACE_Cached_Connect_Strategy_Ex<ACE_T2>::cache_i (const void *recycling_act) { // The wonders and perils of ACT CONNECTION_CACHE_ENTRY *entry = (CONNECTION_CACHE_ENTRY *) recycling_act; // Mark the <svc_handler> in the cache as not being <in_use>. // Therefore recyclable is IDLE. entry->ext_id_.recycle_state (ACE_RECYCLABLE_IDLE_AND_PURGABLE); return 0; } template<ACE_T1> int ACE_Cached_Connect_Strategy_Ex<ACE_T2>::recycle_state_i (const void *recycling_act, ACE_Recyclable_State new_state) { // The wonders and perils of ACT CONNECTION_CACHE_ENTRY *entry = (CONNECTION_CACHE_ENTRY *) recycling_act; // Mark the <svc_handler> in the cache as not being <in_use>. // Therefore recyclable is IDLE. entry->ext_id_.recycle_state (new_state); return 0; } template<ACE_T1> ACE_Recyclable_State ACE_Cached_Connect_Strategy_Ex<ACE_T2>::recycle_state_i (const void *recycling_act) const { // The wonders and perils of ACT CONNECTION_CACHE_ENTRY *entry = (CONNECTION_CACHE_ENTRY *) recycling_act; // Mark the <svc_handler> in the cache as not being <in_use>. // Therefore recyclable is IDLE. return entry->ext_id_.recycle_state (); } template <ACE_T1> int ACE_Cached_Connect_Strategy_Ex<ACE_T2>::purge_i (const void *recycling_act) { // The wonders and perils of ACT CONNECTION_CACHE_ENTRY *entry = (CONNECTION_CACHE_ENTRY *) recycling_act; return this->connection_cache_.unbind (entry); } template <ACE_T1> int ACE_Cached_Connect_Strategy_Ex<ACE_T2>::mark_as_closed_i (const void *recycling_act) { // The wonders and perils of ACT CONNECTION_CACHE_ENTRY *entry = (CONNECTION_CACHE_ENTRY *) recycling_act; // Mark the <svc_handler> in the cache as CLOSED. entry->ext_id_.recycle_state (ACE_RECYCLABLE_CLOSED); return 0; } template <ACE_T1> int ACE_Cached_Connect_Strategy_Ex<ACE_T2>::cleanup_hint_i (const void *recycling_act, void **act_holder) { // Reset the <*act_holder> in the confines and protection of the // lock. if (act_holder) *act_holder = 0; // The wonders and perils of ACT CONNECTION_CACHE_ENTRY *entry = (CONNECTION_CACHE_ENTRY *) recycling_act; // Decrement the refcount on the <svc_handler>. int refcount = entry->ext_id_.decrement (); // If the svc_handler state is closed and the refcount == 0, call // close() on svc_handler. if (entry->ext_id_.recycle_state () == ACE_RECYCLABLE_CLOSED && refcount == 0) { entry->int_id_.first->recycler (0, 0); entry->int_id_.first->close (); this->purge_i (entry); } return 0; } template <ACE_T1> int ACE_Cached_Connect_Strategy_Ex<ACE_T2>::purge_connections (void) { return this->connection_cache_.purge (); } template <ACE_T1> CACHING_STRATEGY & ACE_Cached_Connect_Strategy_Ex<ACE_T2>::caching_strategy (void) { return this->connection_cache_.caching_strategy (); } template <ACE_T1> int ACE_Cached_Connect_Strategy_Ex<ACE_T2>::find (ACE_Refcounted_Hash_Recyclable<ACE_PEER_CONNECTOR_ADDR> &search_addr, ACE_Hash_Map_Entry<ACE_Refcounted_Hash_Recyclable<ACE_PEER_CONNECTOR_ADDR>, std::pair<SVC_HANDLER *, ATTRIBUTES> > *&entry) { typedef ACE_Hash_Map_Bucket_Iterator<REFCOUNTED_HASH_RECYCLABLE_ADDRESS, std::pair<SVC_HANDLER *, ATTRIBUTES>, ACE_Hash<REFCOUNTED_HASH_RECYCLABLE_ADDRESS>, ACE_Equal_To<REFCOUNTED_HASH_RECYCLABLE_ADDRESS>, ACE_Null_Mutex> CONNECTION_CACHE_BUCKET_ITERATOR; CONNECTION_CACHE_BUCKET_ITERATOR iterator (this->connection_cache_.map (), search_addr); CONNECTION_CACHE_BUCKET_ITERATOR end (this->connection_cache_.map (), search_addr, 1); for (; iterator != end; ++iterator) { REFCOUNTED_HASH_RECYCLABLE_ADDRESS &addr = (*iterator).ext_id_; if (addr.recycle_state () != ACE_RECYCLABLE_IDLE_AND_PURGABLE && addr.recycle_state () != ACE_RECYCLABLE_IDLE_BUT_NOT_PURGABLE) continue; if (addr.subject () != search_addr.subject ()) continue; entry = &(*iterator); // // Update the caching attributes directly since we don't do a // find() on the cache map. // // Indicates successful find. int find_result = 0; int result = this->caching_strategy ().notify_find (find_result, entry->int_id_.second); if (result == -1) return result; return 0; } return -1; } template <ACE_T1> void ACE_Cached_Connect_Strategy_Ex<ACE_T2>::cleanup (void) { // Excluded other threads from changing the cache while we cleanup ACE_GUARD (MUTEX, ace_mon, *this->lock_); // Close down all cached service handlers. typename CONNECTION_CACHE::ITERATOR iter = this->connection_cache_.begin (); while (iter != this->connection_cache_.end ()) { if ((*iter).second () != 0) { // save entry for future use CONNECTION_CACHE_ENTRY *entry = (CONNECTION_CACHE_ENTRY *) (*iter).second ()->recycling_act (); // close handler (*iter).second ()->recycler (0, 0); (*iter).second ()->close (); // remember next iter typename CONNECTION_CACHE::ITERATOR next_iter = iter; ++next_iter; // purge the item from the hash this->purge_i (entry); // assign next iter iter = next_iter; } else ++iter; } } ACE_ALLOC_HOOK_DEFINE(ACE_Cached_Connect_Strategy_Ex) ///////////////////////////////////////////////////////////////////////// template <ACE_T1> ACE_Bounded_Cached_Connect_Strategy<ACE_T2>::ACE_Bounded_Cached_Connect_Strategy (size_t max_size, CACHING_STRATEGY &caching_s, ACE_Creation_Strategy<SVC_HANDLER> *cre_s, ACE_Concurrency_Strategy<SVC_HANDLER> *con_s, ACE_Recycling_Strategy<SVC_HANDLER> *rec_s, MUTEX *lock, int delete_lock) : CCSEBASE (caching_s, cre_s, con_s, rec_s, lock, delete_lock), max_size_ (max_size) { } template <ACE_T1> ACE_Bounded_Cached_Connect_Strategy<ACE_T2>::~ACE_Bounded_Cached_Connect_Strategy(void) { } template <ACE_T1> int ACE_Bounded_Cached_Connect_Strategy<ACE_T2>::find_or_create_svc_handler_i (SVC_HANDLER *&sh, const ACE_PEER_CONNECTOR_ADDR &remote_addr, ACE_Time_Value *timeout, const ACE_PEER_CONNECTOR_ADDR &local_addr, bool reuse_addr, int flags, int perms, ACE_Hash_Map_Entry<ACE_Refcounted_Hash_Recyclable<ACE_PEER_CONNECTOR_ADDR>, std::pair<SVC_HANDLER *, ATTRIBUTES> > *&entry, int &found) { REFCOUNTED_HASH_RECYCLABLE_ADDRESS search_addr (remote_addr); // Try to find the address in the cache. Only if we don't find it // do we create a new <SVC_HANDLER> and connect it with the server. while (this->find (search_addr, entry) != -1) { // We found a cached svc_handler. // Get the cached <svc_handler> sh = entry->int_id_.first (); // Is the connection clean? int state_result= ACE::handle_ready (sh->peer ().get_handle (), &ACE_Time_Value::zero, 1, // read ready 0, // write ready 1);// exception ready if (state_result == 1) { // The connection was disconnected during idle. // close the svc_handler down. if (sh->close () == -1) { ACE_ASSERT (0); return -1; } sh = 0; // and rotate once more... } else if ((state_result == -1) && (errno == ETIME)) { // Found!!! // Set the flag found = 1; // Tell the <svc_handler> that it should prepare itself for // being recycled. if (this->prepare_for_recycling (sh) == -1) { ACE_ASSERT (0); return -1; } return 0; } else // some other return value or error... { ACE_ASSERT (0); // just to see it coming ACE_ERROR ((LM_ERROR, ACE_TEXT ("(%t)ACE_Bounded_Cached_Connect_Strategy<>::") ACE_TEXT ("find_or_create_svc_handler_i - ") ACE_TEXT ("error polling server socket state.\n"))); return -1; } } // Not found... // Set the flag found = 0; // Check the limit of handlers... if ((this->max_size_ > 0) && (this->connection_cache_.current_size () >= this->max_size_)) { // Try to purge idle connections if (this->purge_connections () == -1) return -1; // Check limit again. if (this->connection_cache_.current_size () >= this->max_size_) // still too much! return -1; // OK, we have room now... } // We need to use a temporary variable here since we are not // allowed to change <sh> because other threads may use this // when we let go of the lock during the OS level connect. // // Note that making a new svc_handler, connecting remotely, // binding to the map, and assigning of the hint and recycler // should be atomic to the outside world. SVC_HANDLER *potential_handler = 0; // Create a new svc_handler if (this->make_svc_handler (potential_handler) == -1) return -1; // Connect using the svc_handler. if (this->cached_connect (potential_handler, remote_addr, timeout, local_addr, reuse_addr, flags, perms) == -1) { // Close the svc handler. potential_handler->close (0); return -1; } else { // Insert the new SVC_HANDLER instance into the cache. if (this->connection_cache_.bind (search_addr, potential_handler, entry) == -1) { // Close the svc handler and reset <sh>. potential_handler->close (0); return -1; } // Everything succeeded as planned. Assign <sh> to // <potential_handler>. sh = potential_handler; // Set the recycler and the recycling act this->assign_recycler (sh, this, entry); } return 0; } ACE_ALLOC_HOOK_DEFINE(ACE_Bounded_Cached_Connect_Strategy) ACE_END_VERSIONED_NAMESPACE_DECL #undef ACE_T1 #undef ACE_T2 #endif /* ACE_CACHED_CONNECT_STRATEGY_T_CPP */
gpl-2.0
loganakamatsu/kernel_asus_grouper
kernel/debug/kdb/kdb_main.c
554
71300
/* * Kernel Debugger Architecture Independent Main Code * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1999-2004 Silicon Graphics, Inc. All Rights Reserved. * Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com> * Xscale (R) modifications copyright (C) 2003 Intel Corporation. * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved. */ #include <linux/ctype.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/reboot.h> #include <linux/sched.h> #include <linux/sysrq.h> #include <linux/smp.h> #include <linux/utsname.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/kallsyms.h> #include <linux/kgdb.h> #include <linux/kdb.h> #include <linux/notifier.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/nmi.h> #include <linux/time.h> #include <linux/ptrace.h> #include <linux/sysctl.h> #include <linux/cpu.h> #include <linux/kdebug.h> #include <linux/proc_fs.h> #include <linux/uaccess.h> #include <linux/slab.h> #include "kdb_private.h" #define GREP_LEN 256 char kdb_grep_string[GREP_LEN]; int kdb_grepping_flag; EXPORT_SYMBOL(kdb_grepping_flag); int kdb_grep_leading; int kdb_grep_trailing; /* * Kernel debugger state flags */ int kdb_flags; atomic_t kdb_event; /* * kdb_lock protects updates to kdb_initial_cpu. Used to * single thread processors through the kernel debugger. */ int kdb_initial_cpu = -1; /* cpu number that owns kdb */ int kdb_nextline = 1; int kdb_state; /* General KDB state */ struct task_struct *kdb_current_task; EXPORT_SYMBOL(kdb_current_task); struct pt_regs *kdb_current_regs; const char *kdb_diemsg; static int kdb_go_count; #ifdef CONFIG_KDB_CONTINUE_CATASTROPHIC static unsigned int kdb_continue_catastrophic = CONFIG_KDB_CONTINUE_CATASTROPHIC; #else static unsigned int kdb_continue_catastrophic; #endif /* kdb_commands describes the available commands. */ static kdbtab_t *kdb_commands; #define KDB_BASE_CMD_MAX 50 static int kdb_max_commands = KDB_BASE_CMD_MAX; static kdbtab_t kdb_base_commands[KDB_BASE_CMD_MAX]; #define for_each_kdbcmd(cmd, num) \ for ((cmd) = kdb_base_commands, (num) = 0; \ num < kdb_max_commands; \ num++, num == KDB_BASE_CMD_MAX ? cmd = kdb_commands : cmd++) typedef struct _kdbmsg { int km_diag; /* kdb diagnostic */ char *km_msg; /* Corresponding message text */ } kdbmsg_t; #define KDBMSG(msgnum, text) \ { KDB_##msgnum, text } static kdbmsg_t kdbmsgs[] = { KDBMSG(NOTFOUND, "Command Not Found"), KDBMSG(ARGCOUNT, "Improper argument count, see usage."), KDBMSG(BADWIDTH, "Illegal value for BYTESPERWORD use 1, 2, 4 or 8, " "8 is only allowed on 64 bit systems"), KDBMSG(BADRADIX, "Illegal value for RADIX use 8, 10 or 16"), KDBMSG(NOTENV, "Cannot find environment variable"), KDBMSG(NOENVVALUE, "Environment variable should have value"), KDBMSG(NOTIMP, "Command not implemented"), KDBMSG(ENVFULL, "Environment full"), KDBMSG(ENVBUFFULL, "Environment buffer full"), KDBMSG(TOOMANYBPT, "Too many breakpoints defined"), #ifdef CONFIG_CPU_XSCALE KDBMSG(TOOMANYDBREGS, "More breakpoints than ibcr registers defined"), #else KDBMSG(TOOMANYDBREGS, "More breakpoints than db registers defined"), #endif KDBMSG(DUPBPT, "Duplicate breakpoint address"), KDBMSG(BPTNOTFOUND, "Breakpoint not found"), KDBMSG(BADMODE, "Invalid IDMODE"), KDBMSG(BADINT, "Illegal numeric value"), KDBMSG(INVADDRFMT, "Invalid symbolic address format"), KDBMSG(BADREG, "Invalid register name"), KDBMSG(BADCPUNUM, "Invalid cpu number"), KDBMSG(BADLENGTH, "Invalid length field"), KDBMSG(NOBP, "No Breakpoint exists"), KDBMSG(BADADDR, "Invalid address"), }; #undef KDBMSG static const int __nkdb_err = sizeof(kdbmsgs) / sizeof(kdbmsg_t); /* * Initial environment. This is all kept static and local to * this file. We don't want to rely on the memory allocation * mechanisms in the kernel, so we use a very limited allocate-only * heap for new and altered environment variables. The entire * environment is limited to a fixed number of entries (add more * to __env[] if required) and a fixed amount of heap (add more to * KDB_ENVBUFSIZE if required). */ static char *__env[] = { #if defined(CONFIG_SMP) "PROMPT=[%d]kdb> ", "MOREPROMPT=[%d]more> ", #else "PROMPT=kdb> ", "MOREPROMPT=more> ", #endif "RADIX=16", "MDCOUNT=8", /* lines of md output */ KDB_PLATFORM_ENV, "DTABCOUNT=30", "NOSECT=1", (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, }; static const int __nenv = (sizeof(__env) / sizeof(char *)); struct task_struct *kdb_curr_task(int cpu) { struct task_struct *p = curr_task(cpu); #ifdef _TIF_MCA_INIT if ((task_thread_info(p)->flags & _TIF_MCA_INIT) && KDB_TSK(cpu)) p = krp->p; #endif return p; } /* * kdbgetenv - This function will return the character string value of * an environment variable. * Parameters: * match A character string representing an environment variable. * Returns: * NULL No environment variable matches 'match' * char* Pointer to string value of environment variable. */ char *kdbgetenv(const char *match) { char **ep = __env; int matchlen = strlen(match); int i; for (i = 0; i < __nenv; i++) { char *e = *ep++; if (!e) continue; if ((strncmp(match, e, matchlen) == 0) && ((e[matchlen] == '\0') || (e[matchlen] == '='))) { char *cp = strchr(e, '='); return cp ? ++cp : ""; } } return NULL; } /* * kdballocenv - This function is used to allocate bytes for * environment entries. * Parameters: * match A character string representing a numeric value * Outputs: * *value the unsigned long representation of the env variable 'match' * Returns: * Zero on success, a kdb diagnostic on failure. * Remarks: * We use a static environment buffer (envbuffer) to hold the values * of dynamically generated environment variables (see kdb_set). Buffer * space once allocated is never free'd, so over time, the amount of space * (currently 512 bytes) will be exhausted if env variables are changed * frequently. */ static char *kdballocenv(size_t bytes) { #define KDB_ENVBUFSIZE 512 static char envbuffer[KDB_ENVBUFSIZE]; static int envbufsize; char *ep = NULL; if ((KDB_ENVBUFSIZE - envbufsize) >= bytes) { ep = &envbuffer[envbufsize]; envbufsize += bytes; } return ep; } /* * kdbgetulenv - This function will return the value of an unsigned * long-valued environment variable. * Parameters: * match A character string representing a numeric value * Outputs: * *value the unsigned long represntation of the env variable 'match' * Returns: * Zero on success, a kdb diagnostic on failure. */ static int kdbgetulenv(const char *match, unsigned long *value) { char *ep; ep = kdbgetenv(match); if (!ep) return KDB_NOTENV; if (strlen(ep) == 0) return KDB_NOENVVALUE; *value = simple_strtoul(ep, NULL, 0); return 0; } /* * kdbgetintenv - This function will return the value of an * integer-valued environment variable. * Parameters: * match A character string representing an integer-valued env variable * Outputs: * *value the integer representation of the environment variable 'match' * Returns: * Zero on success, a kdb diagnostic on failure. */ int kdbgetintenv(const char *match, int *value) { unsigned long val; int diag; diag = kdbgetulenv(match, &val); if (!diag) *value = (int) val; return diag; } /* * kdbgetularg - This function will convert a numeric string into an * unsigned long value. * Parameters: * arg A character string representing a numeric value * Outputs: * *value the unsigned long represntation of arg. * Returns: * Zero on success, a kdb diagnostic on failure. */ int kdbgetularg(const char *arg, unsigned long *value) { char *endp; unsigned long val; val = simple_strtoul(arg, &endp, 0); if (endp == arg) { /* * Also try base 16, for us folks too lazy to type the * leading 0x... */ val = simple_strtoul(arg, &endp, 16); if (endp == arg) return KDB_BADINT; } *value = val; return 0; } int kdbgetu64arg(const char *arg, u64 *value) { char *endp; u64 val; val = simple_strtoull(arg, &endp, 0); if (endp == arg) { val = simple_strtoull(arg, &endp, 16); if (endp == arg) return KDB_BADINT; } *value = val; return 0; } /* * kdb_set - This function implements the 'set' command. Alter an * existing environment variable or create a new one. */ int kdb_set(int argc, const char **argv) { int i; char *ep; size_t varlen, vallen; /* * we can be invoked two ways: * set var=value argv[1]="var", argv[2]="value" * set var = value argv[1]="var", argv[2]="=", argv[3]="value" * - if the latter, shift 'em down. */ if (argc == 3) { argv[2] = argv[3]; argc--; } if (argc != 2) return KDB_ARGCOUNT; /* * Check for internal variables */ if (strcmp(argv[1], "KDBDEBUG") == 0) { unsigned int debugflags; char *cp; debugflags = simple_strtoul(argv[2], &cp, 0); if (cp == argv[2] || debugflags & ~KDB_DEBUG_FLAG_MASK) { kdb_printf("kdb: illegal debug flags '%s'\n", argv[2]); return 0; } kdb_flags = (kdb_flags & ~(KDB_DEBUG_FLAG_MASK << KDB_DEBUG_FLAG_SHIFT)) | (debugflags << KDB_DEBUG_FLAG_SHIFT); return 0; } /* * Tokenizer squashed the '=' sign. argv[1] is variable * name, argv[2] = value. */ varlen = strlen(argv[1]); vallen = strlen(argv[2]); ep = kdballocenv(varlen + vallen + 2); if (ep == (char *)0) return KDB_ENVBUFFULL; sprintf(ep, "%s=%s", argv[1], argv[2]); ep[varlen+vallen+1] = '\0'; for (i = 0; i < __nenv; i++) { if (__env[i] && ((strncmp(__env[i], argv[1], varlen) == 0) && ((__env[i][varlen] == '\0') || (__env[i][varlen] == '=')))) { __env[i] = ep; return 0; } } /* * Wasn't existing variable. Fit into slot. */ for (i = 0; i < __nenv-1; i++) { if (__env[i] == (char *)0) { __env[i] = ep; return 0; } } return KDB_ENVFULL; } static int kdb_check_regs(void) { if (!kdb_current_regs) { kdb_printf("No current kdb registers." " You may need to select another task\n"); return KDB_BADREG; } return 0; } /* * kdbgetaddrarg - This function is responsible for parsing an * address-expression and returning the value of the expression, * symbol name, and offset to the caller. * * The argument may consist of a numeric value (decimal or * hexidecimal), a symbol name, a register name (preceded by the * percent sign), an environment variable with a numeric value * (preceded by a dollar sign) or a simple arithmetic expression * consisting of a symbol name, +/-, and a numeric constant value * (offset). * Parameters: * argc - count of arguments in argv * argv - argument vector * *nextarg - index to next unparsed argument in argv[] * regs - Register state at time of KDB entry * Outputs: * *value - receives the value of the address-expression * *offset - receives the offset specified, if any * *name - receives the symbol name, if any * *nextarg - index to next unparsed argument in argv[] * Returns: * zero is returned on success, a kdb diagnostic code is * returned on error. */ int kdbgetaddrarg(int argc, const char **argv, int *nextarg, unsigned long *value, long *offset, char **name) { unsigned long addr; unsigned long off = 0; int positive; int diag; int found = 0; char *symname; char symbol = '\0'; char *cp; kdb_symtab_t symtab; /* * Process arguments which follow the following syntax: * * symbol | numeric-address [+/- numeric-offset] * %register * $environment-variable */ if (*nextarg > argc) return KDB_ARGCOUNT; symname = (char *)argv[*nextarg]; /* * If there is no whitespace between the symbol * or address and the '+' or '-' symbols, we * remember the character and replace it with a * null so the symbol/value can be properly parsed */ cp = strpbrk(symname, "+-"); if (cp != NULL) { symbol = *cp; *cp++ = '\0'; } if (symname[0] == '$') { diag = kdbgetulenv(&symname[1], &addr); if (diag) return diag; } else if (symname[0] == '%') { diag = kdb_check_regs(); if (diag) return diag; /* Implement register values with % at a later time as it is * arch optional. */ return KDB_NOTIMP; } else { found = kdbgetsymval(symname, &symtab); if (found) { addr = symtab.sym_start; } else { diag = kdbgetularg(argv[*nextarg], &addr); if (diag) return diag; } } if (!found) found = kdbnearsym(addr, &symtab); (*nextarg)++; if (name) *name = symname; if (value) *value = addr; if (offset && name && *name) *offset = addr - symtab.sym_start; if ((*nextarg > argc) && (symbol == '\0')) return 0; /* * check for +/- and offset */ if (symbol == '\0') { if ((argv[*nextarg][0] != '+') && (argv[*nextarg][0] != '-')) { /* * Not our argument. Return. */ return 0; } else { positive = (argv[*nextarg][0] == '+'); (*nextarg)++; } } else positive = (symbol == '+'); /* * Now there must be an offset! */ if ((*nextarg > argc) && (symbol == '\0')) { return KDB_INVADDRFMT; } if (!symbol) { cp = (char *)argv[*nextarg]; (*nextarg)++; } diag = kdbgetularg(cp, &off); if (diag) return diag; if (!positive) off = -off; if (offset) *offset += off; if (value) *value += off; return 0; } static void kdb_cmderror(int diag) { int i; if (diag >= 0) { kdb_printf("no error detected (diagnostic is %d)\n", diag); return; } for (i = 0; i < __nkdb_err; i++) { if (kdbmsgs[i].km_diag == diag) { kdb_printf("diag: %d: %s\n", diag, kdbmsgs[i].km_msg); return; } } kdb_printf("Unknown diag %d\n", -diag); } /* * kdb_defcmd, kdb_defcmd2 - This function implements the 'defcmd' * command which defines one command as a set of other commands, * terminated by endefcmd. kdb_defcmd processes the initial * 'defcmd' command, kdb_defcmd2 is invoked from kdb_parse for * the following commands until 'endefcmd'. * Inputs: * argc argument count * argv argument vector * Returns: * zero for success, a kdb diagnostic if error */ struct defcmd_set { int count; int usable; char *name; char *usage; char *help; char **command; }; static struct defcmd_set *defcmd_set; static int defcmd_set_count; static int defcmd_in_progress; /* Forward references */ static int kdb_exec_defcmd(int argc, const char **argv); static int kdb_defcmd2(const char *cmdstr, const char *argv0) { struct defcmd_set *s = defcmd_set + defcmd_set_count - 1; char **save_command = s->command; if (strcmp(argv0, "endefcmd") == 0) { defcmd_in_progress = 0; if (!s->count) s->usable = 0; if (s->usable) kdb_register(s->name, kdb_exec_defcmd, s->usage, s->help, 0); return 0; } if (!s->usable) return KDB_NOTIMP; s->command = kzalloc((s->count + 1) * sizeof(*(s->command)), GFP_KDB); if (!s->command) { kdb_printf("Could not allocate new kdb_defcmd table for %s\n", cmdstr); s->usable = 0; return KDB_NOTIMP; } memcpy(s->command, save_command, s->count * sizeof(*(s->command))); s->command[s->count++] = kdb_strdup(cmdstr, GFP_KDB); kfree(save_command); return 0; } static int kdb_defcmd(int argc, const char **argv) { struct defcmd_set *save_defcmd_set = defcmd_set, *s; if (defcmd_in_progress) { kdb_printf("kdb: nested defcmd detected, assuming missing " "endefcmd\n"); kdb_defcmd2("endefcmd", "endefcmd"); } if (argc == 0) { int i; for (s = defcmd_set; s < defcmd_set + defcmd_set_count; ++s) { kdb_printf("defcmd %s \"%s\" \"%s\"\n", s->name, s->usage, s->help); for (i = 0; i < s->count; ++i) kdb_printf("%s", s->command[i]); kdb_printf("endefcmd\n"); } return 0; } if (argc != 3) return KDB_ARGCOUNT; defcmd_set = kmalloc((defcmd_set_count + 1) * sizeof(*defcmd_set), GFP_KDB); if (!defcmd_set) { kdb_printf("Could not allocate new defcmd_set entry for %s\n", argv[1]); defcmd_set = save_defcmd_set; return KDB_NOTIMP; } memcpy(defcmd_set, save_defcmd_set, defcmd_set_count * sizeof(*defcmd_set)); kfree(save_defcmd_set); s = defcmd_set + defcmd_set_count; memset(s, 0, sizeof(*s)); s->usable = 1; s->name = kdb_strdup(argv[1], GFP_KDB); s->usage = kdb_strdup(argv[2], GFP_KDB); s->help = kdb_strdup(argv[3], GFP_KDB); if (s->usage[0] == '"') { strcpy(s->usage, s->usage+1); s->usage[strlen(s->usage)-1] = '\0'; } if (s->help[0] == '"') { strcpy(s->help, s->help+1); s->help[strlen(s->help)-1] = '\0'; } ++defcmd_set_count; defcmd_in_progress = 1; return 0; } /* * kdb_exec_defcmd - Execute the set of commands associated with this * defcmd name. * Inputs: * argc argument count * argv argument vector * Returns: * zero for success, a kdb diagnostic if error */ static int kdb_exec_defcmd(int argc, const char **argv) { int i, ret; struct defcmd_set *s; if (argc != 0) return KDB_ARGCOUNT; for (s = defcmd_set, i = 0; i < defcmd_set_count; ++i, ++s) { if (strcmp(s->name, argv[0]) == 0) break; } if (i == defcmd_set_count) { kdb_printf("kdb_exec_defcmd: could not find commands for %s\n", argv[0]); return KDB_NOTIMP; } for (i = 0; i < s->count; ++i) { /* Recursive use of kdb_parse, do not use argv after * this point */ argv = NULL; kdb_printf("[%s]kdb> %s\n", s->name, s->command[i]); ret = kdb_parse(s->command[i]); if (ret) return ret; } return 0; } /* Command history */ #define KDB_CMD_HISTORY_COUNT 32 #define CMD_BUFLEN 200 /* kdb_printf: max printline * size == 256 */ static unsigned int cmd_head, cmd_tail; static unsigned int cmdptr; static char cmd_hist[KDB_CMD_HISTORY_COUNT][CMD_BUFLEN]; static char cmd_cur[CMD_BUFLEN]; /* * The "str" argument may point to something like | grep xyz */ static void parse_grep(const char *str) { int len; char *cp = (char *)str, *cp2; /* sanity check: we should have been called with the \ first */ if (*cp != '|') return; cp++; while (isspace(*cp)) cp++; if (strncmp(cp, "grep ", 5)) { kdb_printf("invalid 'pipe', see grephelp\n"); return; } cp += 5; while (isspace(*cp)) cp++; cp2 = strchr(cp, '\n'); if (cp2) *cp2 = '\0'; /* remove the trailing newline */ len = strlen(cp); if (len == 0) { kdb_printf("invalid 'pipe', see grephelp\n"); return; } /* now cp points to a nonzero length search string */ if (*cp == '"') { /* allow it be "x y z" by removing the "'s - there must be two of them */ cp++; cp2 = strchr(cp, '"'); if (!cp2) { kdb_printf("invalid quoted string, see grephelp\n"); return; } *cp2 = '\0'; /* end the string where the 2nd " was */ } kdb_grep_leading = 0; if (*cp == '^') { kdb_grep_leading = 1; cp++; } len = strlen(cp); kdb_grep_trailing = 0; if (*(cp+len-1) == '$') { kdb_grep_trailing = 1; *(cp+len-1) = '\0'; } len = strlen(cp); if (!len) return; if (len >= GREP_LEN) { kdb_printf("search string too long\n"); return; } strcpy(kdb_grep_string, cp); kdb_grepping_flag++; return; } /* * kdb_parse - Parse the command line, search the command table for a * matching command and invoke the command function. This * function may be called recursively, if it is, the second call * will overwrite argv and cbuf. It is the caller's * responsibility to save their argv if they recursively call * kdb_parse(). * Parameters: * cmdstr The input command line to be parsed. * regs The registers at the time kdb was entered. * Returns: * Zero for success, a kdb diagnostic if failure. * Remarks: * Limited to 20 tokens. * * Real rudimentary tokenization. Basically only whitespace * is considered a token delimeter (but special consideration * is taken of the '=' sign as used by the 'set' command). * * The algorithm used to tokenize the input string relies on * there being at least one whitespace (or otherwise useless) * character between tokens as the character immediately following * the token is altered in-place to a null-byte to terminate the * token string. */ #define MAXARGC 20 int kdb_parse(const char *cmdstr) { static char *argv[MAXARGC]; static int argc; static char cbuf[CMD_BUFLEN+2]; char *cp; char *cpp, quoted; kdbtab_t *tp; int i, escaped, ignore_errors = 0, check_grep; /* * First tokenize the command string. */ cp = (char *)cmdstr; kdb_grepping_flag = check_grep = 0; if (KDB_FLAG(CMD_INTERRUPT)) { /* Previous command was interrupted, newline must not * repeat the command */ KDB_FLAG_CLEAR(CMD_INTERRUPT); KDB_STATE_SET(PAGER); argc = 0; /* no repeat */ } if (*cp != '\n' && *cp != '\0') { argc = 0; cpp = cbuf; while (*cp) { /* skip whitespace */ while (isspace(*cp)) cp++; if ((*cp == '\0') || (*cp == '\n') || (*cp == '#' && !defcmd_in_progress)) break; /* special case: check for | grep pattern */ if (*cp == '|') { check_grep++; break; } if (cpp >= cbuf + CMD_BUFLEN) { kdb_printf("kdb_parse: command buffer " "overflow, command ignored\n%s\n", cmdstr); return KDB_NOTFOUND; } if (argc >= MAXARGC - 1) { kdb_printf("kdb_parse: too many arguments, " "command ignored\n%s\n", cmdstr); return KDB_NOTFOUND; } argv[argc++] = cpp; escaped = 0; quoted = '\0'; /* Copy to next unquoted and unescaped * whitespace or '=' */ while (*cp && *cp != '\n' && (escaped || quoted || !isspace(*cp))) { if (cpp >= cbuf + CMD_BUFLEN) break; if (escaped) { escaped = 0; *cpp++ = *cp++; continue; } if (*cp == '\\') { escaped = 1; ++cp; continue; } if (*cp == quoted) quoted = '\0'; else if (*cp == '\'' || *cp == '"') quoted = *cp; *cpp = *cp++; if (*cpp == '=' && !quoted) break; ++cpp; } *cpp++ = '\0'; /* Squash a ws or '=' character */ } } if (!argc) return 0; if (check_grep) parse_grep(cp); if (defcmd_in_progress) { int result = kdb_defcmd2(cmdstr, argv[0]); if (!defcmd_in_progress) { argc = 0; /* avoid repeat on endefcmd */ *(argv[0]) = '\0'; } return result; } if (argv[0][0] == '-' && argv[0][1] && (argv[0][1] < '0' || argv[0][1] > '9')) { ignore_errors = 1; ++argv[0]; } for_each_kdbcmd(tp, i) { if (tp->cmd_name) { /* * If this command is allowed to be abbreviated, * check to see if this is it. */ if (tp->cmd_minlen && (strlen(argv[0]) <= tp->cmd_minlen)) { if (strncmp(argv[0], tp->cmd_name, tp->cmd_minlen) == 0) { break; } } if (strcmp(argv[0], tp->cmd_name) == 0) break; } } /* * If we don't find a command by this name, see if the first * few characters of this match any of the known commands. * e.g., md1c20 should match md. */ if (i == kdb_max_commands) { for_each_kdbcmd(tp, i) { if (tp->cmd_name) { if (strncmp(argv[0], tp->cmd_name, strlen(tp->cmd_name)) == 0) { break; } } } } if (i < kdb_max_commands) { int result; KDB_STATE_SET(CMD); result = (*tp->cmd_func)(argc-1, (const char **)argv); if (result && ignore_errors && result > KDB_CMD_GO) result = 0; KDB_STATE_CLEAR(CMD); switch (tp->cmd_repeat) { case KDB_REPEAT_NONE: argc = 0; if (argv[0]) *(argv[0]) = '\0'; break; case KDB_REPEAT_NO_ARGS: argc = 1; if (argv[1]) *(argv[1]) = '\0'; break; case KDB_REPEAT_WITH_ARGS: break; } return result; } /* * If the input with which we were presented does not * map to an existing command, attempt to parse it as an * address argument and display the result. Useful for * obtaining the address of a variable, or the nearest symbol * to an address contained in a register. */ { unsigned long value; char *name = NULL; long offset; int nextarg = 0; if (kdbgetaddrarg(0, (const char **)argv, &nextarg, &value, &offset, &name)) { return KDB_NOTFOUND; } kdb_printf("%s = ", argv[0]); kdb_symbol_print(value, NULL, KDB_SP_DEFAULT); kdb_printf("\n"); return 0; } } static int handle_ctrl_cmd(char *cmd) { #define CTRL_P 16 #define CTRL_N 14 /* initial situation */ if (cmd_head == cmd_tail) return 0; switch (*cmd) { case CTRL_P: if (cmdptr != cmd_tail) cmdptr = (cmdptr-1) % KDB_CMD_HISTORY_COUNT; strncpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN); return 1; case CTRL_N: if (cmdptr != cmd_head) cmdptr = (cmdptr+1) % KDB_CMD_HISTORY_COUNT; strncpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN); return 1; } return 0; } /* * kdb_reboot - This function implements the 'reboot' command. Reboot * the system immediately, or loop for ever on failure. */ static int kdb_reboot(int argc, const char **argv) { emergency_restart(); kdb_printf("Hmm, kdb_reboot did not reboot, spinning here\n"); while (1) cpu_relax(); /* NOTREACHED */ return 0; } static void kdb_dumpregs(struct pt_regs *regs) { int old_lvl = console_loglevel; console_loglevel = 15; kdb_trap_printk++; show_regs(regs); kdb_trap_printk--; kdb_printf("\n"); console_loglevel = old_lvl; } void kdb_set_current_task(struct task_struct *p) { kdb_current_task = p; if (kdb_task_has_cpu(p)) { kdb_current_regs = KDB_TSKREGS(kdb_process_cpu(p)); return; } kdb_current_regs = NULL; } /* * kdb_local - The main code for kdb. This routine is invoked on a * specific processor, it is not global. The main kdb() routine * ensures that only one processor at a time is in this routine. * This code is called with the real reason code on the first * entry to a kdb session, thereafter it is called with reason * SWITCH, even if the user goes back to the original cpu. * Inputs: * reason The reason KDB was invoked * error The hardware-defined error code * regs The exception frame at time of fault/breakpoint. * db_result Result code from the break or debug point. * Returns: * 0 KDB was invoked for an event which it wasn't responsible * 1 KDB handled the event for which it was invoked. * KDB_CMD_GO User typed 'go'. * KDB_CMD_CPU User switched to another cpu. * KDB_CMD_SS Single step. * KDB_CMD_SSB Single step until branch. */ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs, kdb_dbtrap_t db_result) { char *cmdbuf; int diag; struct task_struct *kdb_current = kdb_curr_task(raw_smp_processor_id()); KDB_DEBUG_STATE("kdb_local 1", reason); kdb_go_count = 0; if (reason == KDB_REASON_DEBUG) { /* special case below */ } else { kdb_printf("\nEntering kdb (current=0x%p, pid %d) ", kdb_current, kdb_current ? kdb_current->pid : 0); #if defined(CONFIG_SMP) kdb_printf("on processor %d ", raw_smp_processor_id()); #endif } switch (reason) { case KDB_REASON_DEBUG: { /* * If re-entering kdb after a single step * command, don't print the message. */ switch (db_result) { case KDB_DB_BPT: kdb_printf("\nEntering kdb (0x%p, pid %d) ", kdb_current, kdb_current->pid); #if defined(CONFIG_SMP) kdb_printf("on processor %d ", raw_smp_processor_id()); #endif kdb_printf("due to Debug @ " kdb_machreg_fmt "\n", instruction_pointer(regs)); break; case KDB_DB_SSB: /* * In the midst of ssb command. Just return. */ KDB_DEBUG_STATE("kdb_local 3", reason); return KDB_CMD_SSB; /* Continue with SSB command */ break; case KDB_DB_SS: break; case KDB_DB_SSBPT: KDB_DEBUG_STATE("kdb_local 4", reason); return 1; /* kdba_db_trap did the work */ default: kdb_printf("kdb: Bad result from kdba_db_trap: %d\n", db_result); break; } } break; case KDB_REASON_ENTER: if (KDB_STATE(KEYBOARD)) kdb_printf("due to Keyboard Entry\n"); else kdb_printf("due to KDB_ENTER()\n"); break; case KDB_REASON_KEYBOARD: KDB_STATE_SET(KEYBOARD); kdb_printf("due to Keyboard Entry\n"); break; case KDB_REASON_ENTER_SLAVE: /* drop through, slaves only get released via cpu switch */ case KDB_REASON_SWITCH: kdb_printf("due to cpu switch\n"); break; case KDB_REASON_OOPS: kdb_printf("Oops: %s\n", kdb_diemsg); kdb_printf("due to oops @ " kdb_machreg_fmt "\n", instruction_pointer(regs)); kdb_dumpregs(regs); break; case KDB_REASON_NMI: kdb_printf("due to NonMaskable Interrupt @ " kdb_machreg_fmt "\n", instruction_pointer(regs)); kdb_dumpregs(regs); break; case KDB_REASON_SSTEP: case KDB_REASON_BREAK: kdb_printf("due to %s @ " kdb_machreg_fmt "\n", reason == KDB_REASON_BREAK ? "Breakpoint" : "SS trap", instruction_pointer(regs)); /* * Determine if this breakpoint is one that we * are interested in. */ if (db_result != KDB_DB_BPT) { kdb_printf("kdb: error return from kdba_bp_trap: %d\n", db_result); KDB_DEBUG_STATE("kdb_local 6", reason); return 0; /* Not for us, dismiss it */ } break; case KDB_REASON_RECURSE: kdb_printf("due to Recursion @ " kdb_machreg_fmt "\n", instruction_pointer(regs)); break; default: kdb_printf("kdb: unexpected reason code: %d\n", reason); KDB_DEBUG_STATE("kdb_local 8", reason); return 0; /* Not for us, dismiss it */ } while (1) { /* * Initialize pager context. */ kdb_nextline = 1; KDB_STATE_CLEAR(SUPPRESS); cmdbuf = cmd_cur; *cmdbuf = '\0'; *(cmd_hist[cmd_head]) = '\0'; if (KDB_FLAG(ONLY_DO_DUMP)) { /* kdb is off but a catastrophic error requires a dump. * Take the dump and reboot. * Turn on logging so the kdb output appears in the log * buffer in the dump. */ const char *setargs[] = { "set", "LOGGING", "1" }; kdb_set(2, setargs); kdb_reboot(0, NULL); /*NOTREACHED*/ } do_full_getstr: #if defined(CONFIG_SMP) snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"), raw_smp_processor_id()); #else snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT")); #endif if (defcmd_in_progress) strncat(kdb_prompt_str, "[defcmd]", CMD_BUFLEN); /* * Fetch command from keyboard */ cmdbuf = kdb_getstr(cmdbuf, CMD_BUFLEN, kdb_prompt_str); if (*cmdbuf != '\n') { if (*cmdbuf < 32) { if (cmdptr == cmd_head) { strncpy(cmd_hist[cmd_head], cmd_cur, CMD_BUFLEN); *(cmd_hist[cmd_head] + strlen(cmd_hist[cmd_head])-1) = '\0'; } if (!handle_ctrl_cmd(cmdbuf)) *(cmd_cur+strlen(cmd_cur)-1) = '\0'; cmdbuf = cmd_cur; goto do_full_getstr; } else { strncpy(cmd_hist[cmd_head], cmd_cur, CMD_BUFLEN); } cmd_head = (cmd_head+1) % KDB_CMD_HISTORY_COUNT; if (cmd_head == cmd_tail) cmd_tail = (cmd_tail+1) % KDB_CMD_HISTORY_COUNT; } cmdptr = cmd_head; diag = kdb_parse(cmdbuf); if (diag == KDB_NOTFOUND) { kdb_printf("Unknown kdb command: '%s'\n", cmdbuf); diag = 0; } if (diag == KDB_CMD_GO || diag == KDB_CMD_CPU || diag == KDB_CMD_SS || diag == KDB_CMD_SSB || diag == KDB_CMD_KGDB) break; if (diag) kdb_cmderror(diag); } KDB_DEBUG_STATE("kdb_local 9", diag); return diag; } /* * kdb_print_state - Print the state data for the current processor * for debugging. * Inputs: * text Identifies the debug point * value Any integer value to be printed, e.g. reason code. */ void kdb_print_state(const char *text, int value) { kdb_printf("state: %s cpu %d value %d initial %d state %x\n", text, raw_smp_processor_id(), value, kdb_initial_cpu, kdb_state); } /* * kdb_main_loop - After initial setup and assignment of the * controlling cpu, all cpus are in this loop. One cpu is in * control and will issue the kdb prompt, the others will spin * until 'go' or cpu switch. * * To get a consistent view of the kernel stacks for all * processes, this routine is invoked from the main kdb code via * an architecture specific routine. kdba_main_loop is * responsible for making the kernel stacks consistent for all * processes, there should be no difference between a blocked * process and a running process as far as kdb is concerned. * Inputs: * reason The reason KDB was invoked * error The hardware-defined error code * reason2 kdb's current reason code. * Initially error but can change * according to kdb state. * db_result Result code from break or debug point. * regs The exception frame at time of fault/breakpoint. * should always be valid. * Returns: * 0 KDB was invoked for an event which it wasn't responsible * 1 KDB handled the event for which it was invoked. */ int kdb_main_loop(kdb_reason_t reason, kdb_reason_t reason2, int error, kdb_dbtrap_t db_result, struct pt_regs *regs) { int result = 1; /* Stay in kdb() until 'go', 'ss[b]' or an error */ while (1) { /* * All processors except the one that is in control * will spin here. */ KDB_DEBUG_STATE("kdb_main_loop 1", reason); while (KDB_STATE(HOLD_CPU)) { /* state KDB is turned off by kdb_cpu to see if the * other cpus are still live, each cpu in this loop * turns it back on. */ if (!KDB_STATE(KDB)) KDB_STATE_SET(KDB); } KDB_STATE_CLEAR(SUPPRESS); KDB_DEBUG_STATE("kdb_main_loop 2", reason); if (KDB_STATE(LEAVING)) break; /* Another cpu said 'go' */ /* Still using kdb, this processor is in control */ result = kdb_local(reason2, error, regs, db_result); KDB_DEBUG_STATE("kdb_main_loop 3", result); if (result == KDB_CMD_CPU) break; if (result == KDB_CMD_SS) { KDB_STATE_SET(DOING_SS); break; } if (result == KDB_CMD_SSB) { KDB_STATE_SET(DOING_SS); KDB_STATE_SET(DOING_SSB); break; } if (result == KDB_CMD_KGDB) { if (!KDB_STATE(DOING_KGDB)) kdb_printf("Entering please attach debugger " "or use $D#44+ or $3#33\n"); break; } if (result && result != 1 && result != KDB_CMD_GO) kdb_printf("\nUnexpected kdb_local return code %d\n", result); KDB_DEBUG_STATE("kdb_main_loop 4", reason); break; } if (KDB_STATE(DOING_SS)) KDB_STATE_CLEAR(SSBPT); return result; } /* * kdb_mdr - This function implements the guts of the 'mdr', memory * read command. * mdr <addr arg>,<byte count> * Inputs: * addr Start address * count Number of bytes * Returns: * Always 0. Any errors are detected and printed by kdb_getarea. */ static int kdb_mdr(unsigned long addr, unsigned int count) { unsigned char c; while (count--) { if (kdb_getarea(c, addr)) return 0; kdb_printf("%02x", c); addr++; } kdb_printf("\n"); return 0; } /* * kdb_md - This function implements the 'md', 'md1', 'md2', 'md4', * 'md8' 'mdr' and 'mds' commands. * * md|mds [<addr arg> [<line count> [<radix>]]] * mdWcN [<addr arg> [<line count> [<radix>]]] * where W = is the width (1, 2, 4 or 8) and N is the count. * for eg., md1c20 reads 20 bytes, 1 at a time. * mdr <addr arg>,<byte count> */ static void kdb_md_line(const char *fmtstr, unsigned long addr, int symbolic, int nosect, int bytesperword, int num, int repeat, int phys) { /* print just one line of data */ kdb_symtab_t symtab; char cbuf[32]; char *c = cbuf; int i; unsigned long word; memset(cbuf, '\0', sizeof(cbuf)); if (phys) kdb_printf("phys " kdb_machreg_fmt0 " ", addr); else kdb_printf(kdb_machreg_fmt0 " ", addr); for (i = 0; i < num && repeat--; i++) { if (phys) { if (kdb_getphysword(&word, addr, bytesperword)) break; } else if (kdb_getword(&word, addr, bytesperword)) break; kdb_printf(fmtstr, word); if (symbolic) kdbnearsym(word, &symtab); else memset(&symtab, 0, sizeof(symtab)); if (symtab.sym_name) { kdb_symbol_print(word, &symtab, 0); if (!nosect) { kdb_printf("\n"); kdb_printf(" %s %s " kdb_machreg_fmt " " kdb_machreg_fmt " " kdb_machreg_fmt, symtab.mod_name, symtab.sec_name, symtab.sec_start, symtab.sym_start, symtab.sym_end); } addr += bytesperword; } else { union { u64 word; unsigned char c[8]; } wc; unsigned char *cp; #ifdef __BIG_ENDIAN cp = wc.c + 8 - bytesperword; #else cp = wc.c; #endif wc.word = word; #define printable_char(c) \ ({unsigned char __c = c; isascii(__c) && isprint(__c) ? __c : '.'; }) switch (bytesperword) { case 8: *c++ = printable_char(*cp++); *c++ = printable_char(*cp++); *c++ = printable_char(*cp++); *c++ = printable_char(*cp++); addr += 4; case 4: *c++ = printable_char(*cp++); *c++ = printable_char(*cp++); addr += 2; case 2: *c++ = printable_char(*cp++); addr++; case 1: *c++ = printable_char(*cp++); addr++; break; } #undef printable_char } } kdb_printf("%*s %s\n", (int)((num-i)*(2*bytesperword + 1)+1), " ", cbuf); } static int kdb_md(int argc, const char **argv) { static unsigned long last_addr; static int last_radix, last_bytesperword, last_repeat; int radix = 16, mdcount = 8, bytesperword = KDB_WORD_SIZE, repeat; int nosect = 0; char fmtchar, fmtstr[64]; unsigned long addr; unsigned long word; long offset = 0; int symbolic = 0; int valid = 0; int phys = 0; kdbgetintenv("MDCOUNT", &mdcount); kdbgetintenv("RADIX", &radix); kdbgetintenv("BYTESPERWORD", &bytesperword); /* Assume 'md <addr>' and start with environment values */ repeat = mdcount * 16 / bytesperword; if (strcmp(argv[0], "mdr") == 0) { if (argc != 2) return KDB_ARGCOUNT; valid = 1; } else if (isdigit(argv[0][2])) { bytesperword = (int)(argv[0][2] - '0'); if (bytesperword == 0) { bytesperword = last_bytesperword; if (bytesperword == 0) bytesperword = 4; } last_bytesperword = bytesperword; repeat = mdcount * 16 / bytesperword; if (!argv[0][3]) valid = 1; else if (argv[0][3] == 'c' && argv[0][4]) { char *p; repeat = simple_strtoul(argv[0] + 4, &p, 10); mdcount = ((repeat * bytesperword) + 15) / 16; valid = !*p; } last_repeat = repeat; } else if (strcmp(argv[0], "md") == 0) valid = 1; else if (strcmp(argv[0], "mds") == 0) valid = 1; else if (strcmp(argv[0], "mdp") == 0) { phys = valid = 1; } if (!valid) return KDB_NOTFOUND; if (argc == 0) { if (last_addr == 0) return KDB_ARGCOUNT; addr = last_addr; radix = last_radix; bytesperword = last_bytesperword; repeat = last_repeat; mdcount = ((repeat * bytesperword) + 15) / 16; } if (argc) { unsigned long val; int diag, nextarg = 1; diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL); if (diag) return diag; if (argc > nextarg+2) return KDB_ARGCOUNT; if (argc >= nextarg) { diag = kdbgetularg(argv[nextarg], &val); if (!diag) { mdcount = (int) val; repeat = mdcount * 16 / bytesperword; } } if (argc >= nextarg+1) { diag = kdbgetularg(argv[nextarg+1], &val); if (!diag) radix = (int) val; } } if (strcmp(argv[0], "mdr") == 0) return kdb_mdr(addr, mdcount); switch (radix) { case 10: fmtchar = 'd'; break; case 16: fmtchar = 'x'; break; case 8: fmtchar = 'o'; break; default: return KDB_BADRADIX; } last_radix = radix; if (bytesperword > KDB_WORD_SIZE) return KDB_BADWIDTH; switch (bytesperword) { case 8: sprintf(fmtstr, "%%16.16l%c ", fmtchar); break; case 4: sprintf(fmtstr, "%%8.8l%c ", fmtchar); break; case 2: sprintf(fmtstr, "%%4.4l%c ", fmtchar); break; case 1: sprintf(fmtstr, "%%2.2l%c ", fmtchar); break; default: return KDB_BADWIDTH; } last_repeat = repeat; last_bytesperword = bytesperword; if (strcmp(argv[0], "mds") == 0) { symbolic = 1; /* Do not save these changes as last_*, they are temporary mds * overrides. */ bytesperword = KDB_WORD_SIZE; repeat = mdcount; kdbgetintenv("NOSECT", &nosect); } /* Round address down modulo BYTESPERWORD */ addr &= ~(bytesperword-1); while (repeat > 0) { unsigned long a; int n, z, num = (symbolic ? 1 : (16 / bytesperword)); if (KDB_FLAG(CMD_INTERRUPT)) return 0; for (a = addr, z = 0; z < repeat; a += bytesperword, ++z) { if (phys) { if (kdb_getphysword(&word, a, bytesperword) || word) break; } else if (kdb_getword(&word, a, bytesperword) || word) break; } n = min(num, repeat); kdb_md_line(fmtstr, addr, symbolic, nosect, bytesperword, num, repeat, phys); addr += bytesperword * n; repeat -= n; z = (z + num - 1) / num; if (z > 2) { int s = num * (z-2); kdb_printf(kdb_machreg_fmt0 "-" kdb_machreg_fmt0 " zero suppressed\n", addr, addr + bytesperword * s - 1); addr += bytesperword * s; repeat -= s; } } last_addr = addr; return 0; } /* * kdb_mm - This function implements the 'mm' command. * mm address-expression new-value * Remarks: * mm works on machine words, mmW works on bytes. */ static int kdb_mm(int argc, const char **argv) { int diag; unsigned long addr; long offset = 0; unsigned long contents; int nextarg; int width; if (argv[0][2] && !isdigit(argv[0][2])) return KDB_NOTFOUND; if (argc < 2) return KDB_ARGCOUNT; nextarg = 1; diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL); if (diag) return diag; if (nextarg > argc) return KDB_ARGCOUNT; diag = kdbgetaddrarg(argc, argv, &nextarg, &contents, NULL, NULL); if (diag) return diag; if (nextarg != argc + 1) return KDB_ARGCOUNT; width = argv[0][2] ? (argv[0][2] - '0') : (KDB_WORD_SIZE); diag = kdb_putword(addr, contents, width); if (diag) return diag; kdb_printf(kdb_machreg_fmt " = " kdb_machreg_fmt "\n", addr, contents); return 0; } /* * kdb_go - This function implements the 'go' command. * go [address-expression] */ static int kdb_go(int argc, const char **argv) { unsigned long addr; int diag; int nextarg; long offset; if (raw_smp_processor_id() != kdb_initial_cpu) { kdb_printf("go must execute on the entry cpu, " "please use \"cpu %d\" and then execute go\n", kdb_initial_cpu); return KDB_BADCPUNUM; } if (argc == 1) { nextarg = 1; diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL); if (diag) return diag; } else if (argc) { return KDB_ARGCOUNT; } diag = KDB_CMD_GO; if (KDB_FLAG(CATASTROPHIC)) { kdb_printf("Catastrophic error detected\n"); kdb_printf("kdb_continue_catastrophic=%d, ", kdb_continue_catastrophic); if (kdb_continue_catastrophic == 0 && kdb_go_count++ == 0) { kdb_printf("type go a second time if you really want " "to continue\n"); return 0; } if (kdb_continue_catastrophic == 2) { kdb_printf("forcing reboot\n"); kdb_reboot(0, NULL); } kdb_printf("attempting to continue\n"); } return diag; } /* * kdb_rd - This function implements the 'rd' command. */ static int kdb_rd(int argc, const char **argv) { int len = kdb_check_regs(); #if DBG_MAX_REG_NUM > 0 int i; char *rname; int rsize; u64 reg64; u32 reg32; u16 reg16; u8 reg8; if (len) return len; for (i = 0; i < DBG_MAX_REG_NUM; i++) { rsize = dbg_reg_def[i].size * 2; if (rsize > 16) rsize = 2; if (len + strlen(dbg_reg_def[i].name) + 4 + rsize > 80) { len = 0; kdb_printf("\n"); } if (len) len += kdb_printf(" "); switch(dbg_reg_def[i].size * 8) { case 8: rname = dbg_get_reg(i, &reg8, kdb_current_regs); if (!rname) break; len += kdb_printf("%s: %02x", rname, reg8); break; case 16: rname = dbg_get_reg(i, &reg16, kdb_current_regs); if (!rname) break; len += kdb_printf("%s: %04x", rname, reg16); break; case 32: rname = dbg_get_reg(i, &reg32, kdb_current_regs); if (!rname) break; len += kdb_printf("%s: %08x", rname, reg32); break; case 64: rname = dbg_get_reg(i, &reg64, kdb_current_regs); if (!rname) break; len += kdb_printf("%s: %016llx", rname, reg64); break; default: len += kdb_printf("%s: ??", dbg_reg_def[i].name); } } kdb_printf("\n"); #else if (len) return len; kdb_dumpregs(kdb_current_regs); #endif return 0; } /* * kdb_rm - This function implements the 'rm' (register modify) command. * rm register-name new-contents * Remarks: * Allows register modification with the same restrictions as gdb */ static int kdb_rm(int argc, const char **argv) { #if DBG_MAX_REG_NUM > 0 int diag; const char *rname; int i; u64 reg64; u32 reg32; u16 reg16; u8 reg8; if (argc != 2) return KDB_ARGCOUNT; /* * Allow presence or absence of leading '%' symbol. */ rname = argv[1]; if (*rname == '%') rname++; diag = kdbgetu64arg(argv[2], &reg64); if (diag) return diag; diag = kdb_check_regs(); if (diag) return diag; diag = KDB_BADREG; for (i = 0; i < DBG_MAX_REG_NUM; i++) { if (strcmp(rname, dbg_reg_def[i].name) == 0) { diag = 0; break; } } if (!diag) { switch(dbg_reg_def[i].size * 8) { case 8: reg8 = reg64; dbg_set_reg(i, &reg8, kdb_current_regs); break; case 16: reg16 = reg64; dbg_set_reg(i, &reg16, kdb_current_regs); break; case 32: reg32 = reg64; dbg_set_reg(i, &reg32, kdb_current_regs); break; case 64: dbg_set_reg(i, &reg64, kdb_current_regs); break; } } return diag; #else kdb_printf("ERROR: Register set currently not implemented\n"); return 0; #endif } #if defined(CONFIG_MAGIC_SYSRQ) /* * kdb_sr - This function implements the 'sr' (SYSRQ key) command * which interfaces to the soi-disant MAGIC SYSRQ functionality. * sr <magic-sysrq-code> */ static int kdb_sr(int argc, const char **argv) { if (argc != 1) return KDB_ARGCOUNT; kdb_trap_printk++; __handle_sysrq(*argv[1], false); kdb_trap_printk--; return 0; } #endif /* CONFIG_MAGIC_SYSRQ */ /* * kdb_ef - This function implements the 'regs' (display exception * frame) command. This command takes an address and expects to * find an exception frame at that address, formats and prints * it. * regs address-expression * Remarks: * Not done yet. */ static int kdb_ef(int argc, const char **argv) { int diag; unsigned long addr; long offset; int nextarg; if (argc != 1) return KDB_ARGCOUNT; nextarg = 1; diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL); if (diag) return diag; show_regs((struct pt_regs *)addr); return 0; } #if defined(CONFIG_MODULES) /* * kdb_lsmod - This function implements the 'lsmod' command. Lists * currently loaded kernel modules. * Mostly taken from userland lsmod. */ static int kdb_lsmod(int argc, const char **argv) { struct module *mod; if (argc != 0) return KDB_ARGCOUNT; kdb_printf("Module Size modstruct Used by\n"); list_for_each_entry(mod, kdb_modules, list) { kdb_printf("%-20s%8u 0x%p ", mod->name, mod->core_size, (void *)mod); #ifdef CONFIG_MODULE_UNLOAD kdb_printf("%4d ", module_refcount(mod)); #endif if (mod->state == MODULE_STATE_GOING) kdb_printf(" (Unloading)"); else if (mod->state == MODULE_STATE_COMING) kdb_printf(" (Loading)"); else kdb_printf(" (Live)"); kdb_printf(" 0x%p", mod->module_core); #ifdef CONFIG_MODULE_UNLOAD { struct module_use *use; kdb_printf(" [ "); list_for_each_entry(use, &mod->source_list, source_list) kdb_printf("%s ", use->target->name); kdb_printf("]\n"); } #endif } return 0; } #endif /* CONFIG_MODULES */ /* * kdb_env - This function implements the 'env' command. Display the * current environment variables. */ static int kdb_env(int argc, const char **argv) { int i; for (i = 0; i < __nenv; i++) { if (__env[i]) kdb_printf("%s\n", __env[i]); } if (KDB_DEBUG(MASK)) kdb_printf("KDBFLAGS=0x%x\n", kdb_flags); return 0; } #ifdef CONFIG_PRINTK /* * kdb_dmesg - This function implements the 'dmesg' command to display * the contents of the syslog buffer. * dmesg [lines] [adjust] */ static int kdb_dmesg(int argc, const char **argv) { char *syslog_data[4], *start, *end, c = '\0', *p; int diag, logging, logsize, lines = 0, adjust = 0, n; if (argc > 2) return KDB_ARGCOUNT; if (argc) { char *cp; lines = simple_strtol(argv[1], &cp, 0); if (*cp) lines = 0; if (argc > 1) { adjust = simple_strtoul(argv[2], &cp, 0); if (*cp || adjust < 0) adjust = 0; } } /* disable LOGGING if set */ diag = kdbgetintenv("LOGGING", &logging); if (!diag && logging) { const char *setargs[] = { "set", "LOGGING", "0" }; kdb_set(2, setargs); } /* syslog_data[0,1] physical start, end+1. syslog_data[2,3] * logical start, end+1. */ kdb_syslog_data(syslog_data); if (syslog_data[2] == syslog_data[3]) return 0; logsize = syslog_data[1] - syslog_data[0]; start = syslog_data[2]; end = syslog_data[3]; #define KDB_WRAP(p) (((p - syslog_data[0]) % logsize) + syslog_data[0]) for (n = 0, p = start; p < end; ++p) { c = *KDB_WRAP(p); if (c == '\n') ++n; } if (c != '\n') ++n; if (lines < 0) { if (adjust >= n) kdb_printf("buffer only contains %d lines, nothing " "printed\n", n); else if (adjust - lines >= n) kdb_printf("buffer only contains %d lines, last %d " "lines printed\n", n, n - adjust); if (adjust) { for (; start < end && adjust; ++start) { if (*KDB_WRAP(start) == '\n') --adjust; } if (start < end) ++start; } for (p = start; p < end && lines; ++p) { if (*KDB_WRAP(p) == '\n') ++lines; } end = p; } else if (lines > 0) { int skip = n - (adjust + lines); if (adjust >= n) { kdb_printf("buffer only contains %d lines, " "nothing printed\n", n); skip = n; } else if (skip < 0) { lines += skip; skip = 0; kdb_printf("buffer only contains %d lines, first " "%d lines printed\n", n, lines); } for (; start < end && skip; ++start) { if (*KDB_WRAP(start) == '\n') --skip; } for (p = start; p < end && lines; ++p) { if (*KDB_WRAP(p) == '\n') --lines; } end = p; } /* Do a line at a time (max 200 chars) to reduce protocol overhead */ c = '\n'; while (start != end) { char buf[201]; p = buf; if (KDB_FLAG(CMD_INTERRUPT)) return 0; while (start < end && (c = *KDB_WRAP(start)) && (p - buf) < sizeof(buf)-1) { ++start; *p++ = c; if (c == '\n') break; } *p = '\0'; kdb_printf("%s", buf); } if (c != '\n') kdb_printf("\n"); return 0; } #endif /* CONFIG_PRINTK */ /* * kdb_cpu - This function implements the 'cpu' command. * cpu [<cpunum>] * Returns: * KDB_CMD_CPU for success, a kdb diagnostic if error */ static void kdb_cpu_status(void) { int i, start_cpu, first_print = 1; char state, prev_state = '?'; kdb_printf("Currently on cpu %d\n", raw_smp_processor_id()); kdb_printf("Available cpus: "); for (start_cpu = -1, i = 0; i < NR_CPUS; i++) { if (!cpu_online(i)) { state = 'F'; /* cpu is offline */ } else { state = ' '; /* cpu is responding to kdb */ if (kdb_task_state_char(KDB_TSK(i)) == 'I') state = 'I'; /* idle task */ } if (state != prev_state) { if (prev_state != '?') { if (!first_print) kdb_printf(", "); first_print = 0; kdb_printf("%d", start_cpu); if (start_cpu < i-1) kdb_printf("-%d", i-1); if (prev_state != ' ') kdb_printf("(%c)", prev_state); } prev_state = state; start_cpu = i; } } /* print the trailing cpus, ignoring them if they are all offline */ if (prev_state != 'F') { if (!first_print) kdb_printf(", "); kdb_printf("%d", start_cpu); if (start_cpu < i-1) kdb_printf("-%d", i-1); if (prev_state != ' ') kdb_printf("(%c)", prev_state); } kdb_printf("\n"); } static int kdb_cpu(int argc, const char **argv) { unsigned long cpunum; int diag; if (argc == 0) { kdb_cpu_status(); return 0; } if (argc != 1) return KDB_ARGCOUNT; diag = kdbgetularg(argv[1], &cpunum); if (diag) return diag; /* * Validate cpunum */ if ((cpunum > NR_CPUS) || !cpu_online(cpunum)) return KDB_BADCPUNUM; dbg_switch_cpu = cpunum; /* * Switch to other cpu */ return KDB_CMD_CPU; } /* The user may not realize that ps/bta with no parameters does not print idle * or sleeping system daemon processes, so tell them how many were suppressed. */ void kdb_ps_suppressed(void) { int idle = 0, daemon = 0; unsigned long mask_I = kdb_task_state_string("I"), mask_M = kdb_task_state_string("M"); unsigned long cpu; const struct task_struct *p, *g; for_each_online_cpu(cpu) { p = kdb_curr_task(cpu); if (kdb_task_state(p, mask_I)) ++idle; } kdb_do_each_thread(g, p) { if (kdb_task_state(p, mask_M)) ++daemon; } kdb_while_each_thread(g, p); if (idle || daemon) { if (idle) kdb_printf("%d idle process%s (state I)%s\n", idle, idle == 1 ? "" : "es", daemon ? " and " : ""); if (daemon) kdb_printf("%d sleeping system daemon (state M) " "process%s", daemon, daemon == 1 ? "" : "es"); kdb_printf(" suppressed,\nuse 'ps A' to see all.\n"); } } /* * kdb_ps - This function implements the 'ps' command which shows a * list of the active processes. * ps [DRSTCZEUIMA] All processes, optionally filtered by state */ void kdb_ps1(const struct task_struct *p) { int cpu; unsigned long tmp; if (!p || probe_kernel_read(&tmp, (char *)p, sizeof(unsigned long))) return; cpu = kdb_process_cpu(p); kdb_printf("0x%p %8d %8d %d %4d %c 0x%p %c%s\n", (void *)p, p->pid, p->parent->pid, kdb_task_has_cpu(p), kdb_process_cpu(p), kdb_task_state_char(p), (void *)(&p->thread), p == kdb_curr_task(raw_smp_processor_id()) ? '*' : ' ', p->comm); if (kdb_task_has_cpu(p)) { if (!KDB_TSK(cpu)) { kdb_printf(" Error: no saved data for this cpu\n"); } else { if (KDB_TSK(cpu) != p) kdb_printf(" Error: does not match running " "process table (0x%p)\n", KDB_TSK(cpu)); } } } static int kdb_ps(int argc, const char **argv) { struct task_struct *g, *p; unsigned long mask, cpu; if (argc == 0) kdb_ps_suppressed(); kdb_printf("%-*s Pid Parent [*] cpu State %-*s Command\n", (int)(2*sizeof(void *))+2, "Task Addr", (int)(2*sizeof(void *))+2, "Thread"); mask = kdb_task_state_string(argc ? argv[1] : NULL); /* Run the active tasks first */ for_each_online_cpu(cpu) { if (KDB_FLAG(CMD_INTERRUPT)) return 0; p = kdb_curr_task(cpu); if (kdb_task_state(p, mask)) kdb_ps1(p); } kdb_printf("\n"); /* Now the real tasks */ kdb_do_each_thread(g, p) { if (KDB_FLAG(CMD_INTERRUPT)) return 0; if (kdb_task_state(p, mask)) kdb_ps1(p); } kdb_while_each_thread(g, p); return 0; } /* * kdb_pid - This function implements the 'pid' command which switches * the currently active process. * pid [<pid> | R] */ static int kdb_pid(int argc, const char **argv) { struct task_struct *p; unsigned long val; int diag; if (argc > 1) return KDB_ARGCOUNT; if (argc) { if (strcmp(argv[1], "R") == 0) { p = KDB_TSK(kdb_initial_cpu); } else { diag = kdbgetularg(argv[1], &val); if (diag) return KDB_BADINT; p = find_task_by_pid_ns((pid_t)val, &init_pid_ns); if (!p) { kdb_printf("No task with pid=%d\n", (pid_t)val); return 0; } } kdb_set_current_task(p); } kdb_printf("KDB current process is %s(pid=%d)\n", kdb_current_task->comm, kdb_current_task->pid); return 0; } /* * kdb_ll - This function implements the 'll' command which follows a * linked list and executes an arbitrary command for each * element. */ static int kdb_ll(int argc, const char **argv) { int diag = 0; unsigned long addr; long offset = 0; unsigned long va; unsigned long linkoffset; int nextarg; const char *command; if (argc != 3) return KDB_ARGCOUNT; nextarg = 1; diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL); if (diag) return diag; diag = kdbgetularg(argv[2], &linkoffset); if (diag) return diag; /* * Using the starting address as * the first element in the list, and assuming that * the list ends with a null pointer. */ va = addr; command = kdb_strdup(argv[3], GFP_KDB); if (!command) { kdb_printf("%s: cannot duplicate command\n", __func__); return 0; } /* Recursive use of kdb_parse, do not use argv after this point */ argv = NULL; while (va) { char buf[80]; if (KDB_FLAG(CMD_INTERRUPT)) goto out; sprintf(buf, "%s " kdb_machreg_fmt "\n", command, va); diag = kdb_parse(buf); if (diag) goto out; addr = va + linkoffset; if (kdb_getword(&va, addr, sizeof(va))) goto out; } out: kfree(command); return diag; } static int kdb_kgdb(int argc, const char **argv) { return KDB_CMD_KGDB; } /* * kdb_help - This function implements the 'help' and '?' commands. */ static int kdb_help(int argc, const char **argv) { kdbtab_t *kt; int i; kdb_printf("%-15.15s %-20.20s %s\n", "Command", "Usage", "Description"); kdb_printf("-----------------------------" "-----------------------------\n"); for_each_kdbcmd(kt, i) { if (kt->cmd_name) kdb_printf("%-15.15s %-20.20s %s\n", kt->cmd_name, kt->cmd_usage, kt->cmd_help); if (KDB_FLAG(CMD_INTERRUPT)) return 0; } return 0; } /* * kdb_kill - This function implements the 'kill' commands. */ static int kdb_kill(int argc, const char **argv) { long sig, pid; char *endp; struct task_struct *p; struct siginfo info; if (argc != 2) return KDB_ARGCOUNT; sig = simple_strtol(argv[1], &endp, 0); if (*endp) return KDB_BADINT; if (sig >= 0) { kdb_printf("Invalid signal parameter.<-signal>\n"); return 0; } sig = -sig; pid = simple_strtol(argv[2], &endp, 0); if (*endp) return KDB_BADINT; if (pid <= 0) { kdb_printf("Process ID must be large than 0.\n"); return 0; } /* Find the process. */ p = find_task_by_pid_ns(pid, &init_pid_ns); if (!p) { kdb_printf("The specified process isn't found.\n"); return 0; } p = p->group_leader; info.si_signo = sig; info.si_errno = 0; info.si_code = SI_USER; info.si_pid = pid; /* same capabilities as process being signalled */ info.si_uid = 0; /* kdb has root authority */ kdb_send_sig_info(p, &info); return 0; } struct kdb_tm { int tm_sec; /* seconds */ int tm_min; /* minutes */ int tm_hour; /* hours */ int tm_mday; /* day of the month */ int tm_mon; /* month */ int tm_year; /* year */ }; static void kdb_gmtime(struct timespec *tv, struct kdb_tm *tm) { /* This will work from 1970-2099, 2100 is not a leap year */ static int mon_day[] = { 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; memset(tm, 0, sizeof(*tm)); tm->tm_sec = tv->tv_sec % (24 * 60 * 60); tm->tm_mday = tv->tv_sec / (24 * 60 * 60) + (2 * 365 + 1); /* shift base from 1970 to 1968 */ tm->tm_min = tm->tm_sec / 60 % 60; tm->tm_hour = tm->tm_sec / 60 / 60; tm->tm_sec = tm->tm_sec % 60; tm->tm_year = 68 + 4*(tm->tm_mday / (4*365+1)); tm->tm_mday %= (4*365+1); mon_day[1] = 29; while (tm->tm_mday >= mon_day[tm->tm_mon]) { tm->tm_mday -= mon_day[tm->tm_mon]; if (++tm->tm_mon == 12) { tm->tm_mon = 0; ++tm->tm_year; mon_day[1] = 28; } } ++tm->tm_mday; } /* * Most of this code has been lifted from kernel/timer.c::sys_sysinfo(). * I cannot call that code directly from kdb, it has an unconditional * cli()/sti() and calls routines that take locks which can stop the debugger. */ static void kdb_sysinfo(struct sysinfo *val) { struct timespec uptime; do_posix_clock_monotonic_gettime(&uptime); memset(val, 0, sizeof(*val)); val->uptime = uptime.tv_sec; val->loads[0] = avenrun[0]; val->loads[1] = avenrun[1]; val->loads[2] = avenrun[2]; val->procs = nr_threads-1; si_meminfo(val); return; } /* * kdb_summary - This function implements the 'summary' command. */ static int kdb_summary(int argc, const char **argv) { struct timespec now; struct kdb_tm tm; struct sysinfo val; if (argc) return KDB_ARGCOUNT; kdb_printf("sysname %s\n", init_uts_ns.name.sysname); kdb_printf("release %s\n", init_uts_ns.name.release); kdb_printf("version %s\n", init_uts_ns.name.version); kdb_printf("machine %s\n", init_uts_ns.name.machine); kdb_printf("nodename %s\n", init_uts_ns.name.nodename); kdb_printf("domainname %s\n", init_uts_ns.name.domainname); kdb_printf("ccversion %s\n", __stringify(CCVERSION)); now = __current_kernel_time(); kdb_gmtime(&now, &tm); kdb_printf("date %04d-%02d-%02d %02d:%02d:%02d " "tz_minuteswest %d\n", 1900+tm.tm_year, tm.tm_mon+1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, sys_tz.tz_minuteswest); kdb_sysinfo(&val); kdb_printf("uptime "); if (val.uptime > (24*60*60)) { int days = val.uptime / (24*60*60); val.uptime %= (24*60*60); kdb_printf("%d day%s ", days, days == 1 ? "" : "s"); } kdb_printf("%02ld:%02ld\n", val.uptime/(60*60), (val.uptime/60)%60); /* lifted from fs/proc/proc_misc.c::loadavg_read_proc() */ #define LOAD_INT(x) ((x) >> FSHIFT) #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) kdb_printf("load avg %ld.%02ld %ld.%02ld %ld.%02ld\n", LOAD_INT(val.loads[0]), LOAD_FRAC(val.loads[0]), LOAD_INT(val.loads[1]), LOAD_FRAC(val.loads[1]), LOAD_INT(val.loads[2]), LOAD_FRAC(val.loads[2])); #undef LOAD_INT #undef LOAD_FRAC /* Display in kilobytes */ #define K(x) ((x) << (PAGE_SHIFT - 10)) kdb_printf("\nMemTotal: %8lu kB\nMemFree: %8lu kB\n" "Buffers: %8lu kB\n", val.totalram, val.freeram, val.bufferram); return 0; } /* * kdb_per_cpu - This function implements the 'per_cpu' command. */ static int kdb_per_cpu(int argc, const char **argv) { char fmtstr[64]; int cpu, diag, nextarg = 1; unsigned long addr, symaddr, val, bytesperword = 0, whichcpu = ~0UL; if (argc < 1 || argc > 3) return KDB_ARGCOUNT; diag = kdbgetaddrarg(argc, argv, &nextarg, &symaddr, NULL, NULL); if (diag) return diag; if (argc >= 2) { diag = kdbgetularg(argv[2], &bytesperword); if (diag) return diag; } if (!bytesperword) bytesperword = KDB_WORD_SIZE; else if (bytesperword > KDB_WORD_SIZE) return KDB_BADWIDTH; sprintf(fmtstr, "%%0%dlx ", (int)(2*bytesperword)); if (argc >= 3) { diag = kdbgetularg(argv[3], &whichcpu); if (diag) return diag; if (!cpu_online(whichcpu)) { kdb_printf("cpu %ld is not online\n", whichcpu); return KDB_BADCPUNUM; } } /* Most architectures use __per_cpu_offset[cpu], some use * __per_cpu_offset(cpu), smp has no __per_cpu_offset. */ #ifdef __per_cpu_offset #define KDB_PCU(cpu) __per_cpu_offset(cpu) #else #ifdef CONFIG_SMP #define KDB_PCU(cpu) __per_cpu_offset[cpu] #else #define KDB_PCU(cpu) 0 #endif #endif for_each_online_cpu(cpu) { if (KDB_FLAG(CMD_INTERRUPT)) return 0; if (whichcpu != ~0UL && whichcpu != cpu) continue; addr = symaddr + KDB_PCU(cpu); diag = kdb_getword(&val, addr, bytesperword); if (diag) { kdb_printf("%5d " kdb_bfd_vma_fmt0 " - unable to " "read, diag=%d\n", cpu, addr, diag); continue; } kdb_printf("%5d ", cpu); kdb_md_line(fmtstr, addr, bytesperword == KDB_WORD_SIZE, 1, bytesperword, 1, 1, 0); } #undef KDB_PCU return 0; } /* * display help for the use of cmd | grep pattern */ static int kdb_grep_help(int argc, const char **argv) { kdb_printf("Usage of cmd args | grep pattern:\n"); kdb_printf(" Any command's output may be filtered through an "); kdb_printf("emulated 'pipe'.\n"); kdb_printf(" 'grep' is just a key word.\n"); kdb_printf(" The pattern may include a very limited set of " "metacharacters:\n"); kdb_printf(" pattern or ^pattern or pattern$ or ^pattern$\n"); kdb_printf(" And if there are spaces in the pattern, you may " "quote it:\n"); kdb_printf(" \"pat tern\" or \"^pat tern\" or \"pat tern$\"" " or \"^pat tern$\"\n"); return 0; } /* * kdb_register_repeat - This function is used to register a kernel * debugger command. * Inputs: * cmd Command name * func Function to execute the command * usage A simple usage string showing arguments * help A simple help string describing command * repeat Does the command auto repeat on enter? * Returns: * zero for success, one if a duplicate command. */ #define kdb_command_extend 50 /* arbitrary */ int kdb_register_repeat(char *cmd, kdb_func_t func, char *usage, char *help, short minlen, kdb_repeat_t repeat) { int i; kdbtab_t *kp; /* * Brute force method to determine duplicates */ for_each_kdbcmd(kp, i) { if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) { kdb_printf("Duplicate kdb command registered: " "%s, func %p help %s\n", cmd, func, help); return 1; } } /* * Insert command into first available location in table */ for_each_kdbcmd(kp, i) { if (kp->cmd_name == NULL) break; } if (i >= kdb_max_commands) { kdbtab_t *new = kmalloc((kdb_max_commands - KDB_BASE_CMD_MAX + kdb_command_extend) * sizeof(*new), GFP_KDB); if (!new) { kdb_printf("Could not allocate new kdb_command " "table\n"); return 1; } if (kdb_commands) { memcpy(new, kdb_commands, (kdb_max_commands - KDB_BASE_CMD_MAX) * sizeof(*new)); kfree(kdb_commands); } memset(new + kdb_max_commands, 0, kdb_command_extend * sizeof(*new)); kdb_commands = new; kp = kdb_commands + kdb_max_commands - KDB_BASE_CMD_MAX; kdb_max_commands += kdb_command_extend; } kp->cmd_name = cmd; kp->cmd_func = func; kp->cmd_usage = usage; kp->cmd_help = help; kp->cmd_flags = 0; kp->cmd_minlen = minlen; kp->cmd_repeat = repeat; return 0; } EXPORT_SYMBOL_GPL(kdb_register_repeat); /* * kdb_register - Compatibility register function for commands that do * not need to specify a repeat state. Equivalent to * kdb_register_repeat with KDB_REPEAT_NONE. * Inputs: * cmd Command name * func Function to execute the command * usage A simple usage string showing arguments * help A simple help string describing command * Returns: * zero for success, one if a duplicate command. */ int kdb_register(char *cmd, kdb_func_t func, char *usage, char *help, short minlen) { return kdb_register_repeat(cmd, func, usage, help, minlen, KDB_REPEAT_NONE); } EXPORT_SYMBOL_GPL(kdb_register); /* * kdb_unregister - This function is used to unregister a kernel * debugger command. It is generally called when a module which * implements kdb commands is unloaded. * Inputs: * cmd Command name * Returns: * zero for success, one command not registered. */ int kdb_unregister(char *cmd) { int i; kdbtab_t *kp; /* * find the command. */ for_each_kdbcmd(kp, i) { if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) { kp->cmd_name = NULL; return 0; } } /* Couldn't find it. */ return 1; } EXPORT_SYMBOL_GPL(kdb_unregister); /* Initialize the kdb command table. */ static void __init kdb_inittab(void) { int i; kdbtab_t *kp; for_each_kdbcmd(kp, i) kp->cmd_name = NULL; kdb_register_repeat("md", kdb_md, "<vaddr>", "Display Memory Contents, also mdWcN, e.g. md8c1", 1, KDB_REPEAT_NO_ARGS); kdb_register_repeat("mdr", kdb_md, "<vaddr> <bytes>", "Display Raw Memory", 0, KDB_REPEAT_NO_ARGS); kdb_register_repeat("mdp", kdb_md, "<paddr> <bytes>", "Display Physical Memory", 0, KDB_REPEAT_NO_ARGS); kdb_register_repeat("mds", kdb_md, "<vaddr>", "Display Memory Symbolically", 0, KDB_REPEAT_NO_ARGS); kdb_register_repeat("mm", kdb_mm, "<vaddr> <contents>", "Modify Memory Contents", 0, KDB_REPEAT_NO_ARGS); kdb_register_repeat("go", kdb_go, "[<vaddr>]", "Continue Execution", 1, KDB_REPEAT_NONE); kdb_register_repeat("rd", kdb_rd, "", "Display Registers", 0, KDB_REPEAT_NONE); kdb_register_repeat("rm", kdb_rm, "<reg> <contents>", "Modify Registers", 0, KDB_REPEAT_NONE); kdb_register_repeat("ef", kdb_ef, "<vaddr>", "Display exception frame", 0, KDB_REPEAT_NONE); kdb_register_repeat("bt", kdb_bt, "[<vaddr>]", "Stack traceback", 1, KDB_REPEAT_NONE); kdb_register_repeat("btp", kdb_bt, "<pid>", "Display stack for process <pid>", 0, KDB_REPEAT_NONE); kdb_register_repeat("bta", kdb_bt, "[DRSTCZEUIMA]", "Display stack all processes", 0, KDB_REPEAT_NONE); kdb_register_repeat("btc", kdb_bt, "", "Backtrace current process on each cpu", 0, KDB_REPEAT_NONE); kdb_register_repeat("btt", kdb_bt, "<vaddr>", "Backtrace process given its struct task address", 0, KDB_REPEAT_NONE); kdb_register_repeat("ll", kdb_ll, "<first-element> <linkoffset> <cmd>", "Execute cmd for each element in linked list", 0, KDB_REPEAT_NONE); kdb_register_repeat("env", kdb_env, "", "Show environment variables", 0, KDB_REPEAT_NONE); kdb_register_repeat("set", kdb_set, "", "Set environment variables", 0, KDB_REPEAT_NONE); kdb_register_repeat("help", kdb_help, "", "Display Help Message", 1, KDB_REPEAT_NONE); kdb_register_repeat("?", kdb_help, "", "Display Help Message", 0, KDB_REPEAT_NONE); kdb_register_repeat("cpu", kdb_cpu, "<cpunum>", "Switch to new cpu", 0, KDB_REPEAT_NONE); kdb_register_repeat("kgdb", kdb_kgdb, "", "Enter kgdb mode", 0, KDB_REPEAT_NONE); kdb_register_repeat("ps", kdb_ps, "[<flags>|A]", "Display active task list", 0, KDB_REPEAT_NONE); kdb_register_repeat("pid", kdb_pid, "<pidnum>", "Switch to another task", 0, KDB_REPEAT_NONE); kdb_register_repeat("reboot", kdb_reboot, "", "Reboot the machine immediately", 0, KDB_REPEAT_NONE); #if defined(CONFIG_MODULES) kdb_register_repeat("lsmod", kdb_lsmod, "", "List loaded kernel modules", 0, KDB_REPEAT_NONE); #endif #if defined(CONFIG_MAGIC_SYSRQ) kdb_register_repeat("sr", kdb_sr, "<key>", "Magic SysRq key", 0, KDB_REPEAT_NONE); #endif #if defined(CONFIG_PRINTK) kdb_register_repeat("dmesg", kdb_dmesg, "[lines]", "Display syslog buffer", 0, KDB_REPEAT_NONE); #endif kdb_register_repeat("defcmd", kdb_defcmd, "name \"usage\" \"help\"", "Define a set of commands, down to endefcmd", 0, KDB_REPEAT_NONE); kdb_register_repeat("kill", kdb_kill, "<-signal> <pid>", "Send a signal to a process", 0, KDB_REPEAT_NONE); kdb_register_repeat("summary", kdb_summary, "", "Summarize the system", 4, KDB_REPEAT_NONE); kdb_register_repeat("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]", "Display per_cpu variables", 3, KDB_REPEAT_NONE); kdb_register_repeat("grephelp", kdb_grep_help, "", "Display help on | grep", 0, KDB_REPEAT_NONE); } /* Execute any commands defined in kdb_cmds. */ static void __init kdb_cmd_init(void) { int i, diag; for (i = 0; kdb_cmds[i]; ++i) { diag = kdb_parse(kdb_cmds[i]); if (diag) kdb_printf("kdb command %s failed, kdb diag %d\n", kdb_cmds[i], diag); } if (defcmd_in_progress) { kdb_printf("Incomplete 'defcmd' set, forcing endefcmd\n"); kdb_parse("endefcmd"); } } /* Initialize kdb_printf, breakpoint tables and kdb state */ void __init kdb_init(int lvl) { static int kdb_init_lvl = KDB_NOT_INITIALIZED; int i; if (kdb_init_lvl == KDB_INIT_FULL || lvl <= kdb_init_lvl) return; for (i = kdb_init_lvl; i < lvl; i++) { switch (i) { case KDB_NOT_INITIALIZED: kdb_inittab(); /* Initialize Command Table */ kdb_initbptab(); /* Initialize Breakpoints */ break; case KDB_INIT_EARLY: kdb_cmd_init(); /* Build kdb_cmds tables */ break; } } kdb_init_lvl = lvl; }
gpl-2.0
fards/DellStreak5-GingerBread
drivers/staging/comedi/drivers/s626.c
810
107926
/* comedi/drivers/s626.c Sensoray s626 Comedi driver COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> Based on Sensoray Model 626 Linux driver Version 0.2 Copyright (C) 2002-2004 Sensoray Co., Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: s626 Description: Sensoray 626 driver Devices: [Sensoray] 626 (s626) Authors: Gianluca Palli <gpalli@deis.unibo.it>, Updated: Fri, 15 Feb 2008 10:28:42 +0000 Status: experimental Configuration options: [0] - PCI bus of device (optional) [1] - PCI slot of device (optional) If bus/slot is not specified, the first supported PCI device found will be used. INSN_CONFIG instructions: analog input: none analog output: none digital channel: s626 has 3 dio subdevices (2,3 and 4) each with 16 i/o channels supported configuration options: INSN_CONFIG_DIO_QUERY COMEDI_INPUT COMEDI_OUTPUT encoder: Every channel must be configured before reading. Example code insn.insn=INSN_CONFIG; //configuration instruction insn.n=1; //number of operation (must be 1) insn.data=&initialvalue; //initial value loaded into encoder //during configuration insn.subdev=5; //encoder subdevice insn.chanspec=CR_PACK(encoder_channel,0,AREF_OTHER); //encoder_channel //to configure comedi_do_insn(cf,&insn); //executing configuration */ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/types.h> #include "../comedidev.h" #include "comedi_pci.h" #include "comedi_fc.h" #include "s626.h" MODULE_AUTHOR("Gianluca Palli <gpalli@deis.unibo.it>"); MODULE_DESCRIPTION("Sensoray 626 Comedi driver module"); MODULE_LICENSE("GPL"); struct s626_board { const char *name; int ai_chans; int ai_bits; int ao_chans; int ao_bits; int dio_chans; int dio_banks; int enc_chans; }; static const struct s626_board s626_boards[] = { { .name = "s626", .ai_chans = S626_ADC_CHANNELS, .ai_bits = 14, .ao_chans = S626_DAC_CHANNELS, .ao_bits = 13, .dio_chans = S626_DIO_CHANNELS, .dio_banks = S626_DIO_BANKS, .enc_chans = S626_ENCODER_CHANNELS, } }; #define thisboard ((const struct s626_board *)dev->board_ptr) #define PCI_VENDOR_ID_S626 0x1131 #define PCI_DEVICE_ID_S626 0x7146 /* * For devices with vendor:device id == 0x1131:0x7146 you must specify * also subvendor:subdevice ids, because otherwise it will conflict with * Philips SAA7146 media/dvb based cards. */ static DEFINE_PCI_DEVICE_TABLE(s626_pci_table) = { {PCI_VENDOR_ID_S626, PCI_DEVICE_ID_S626, 0x6000, 0x0272, 0, 0, 0}, {0} }; MODULE_DEVICE_TABLE(pci, s626_pci_table); static int s626_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int s626_detach(struct comedi_device *dev); static struct comedi_driver driver_s626 = { .driver_name = "s626", .module = THIS_MODULE, .attach = s626_attach, .detach = s626_detach, }; struct s626_private { struct pci_dev *pdev; void *base_addr; int got_regions; short allocatedBuf; uint8_t ai_cmd_running; /* ai_cmd is running */ uint8_t ai_continous; /* continous aquisition */ int ai_sample_count; /* number of samples to acquire */ unsigned int ai_sample_timer; /* time between samples in units of the timer */ int ai_convert_count; /* conversion counter */ unsigned int ai_convert_timer; /* time between conversion in units of the timer */ uint16_t CounterIntEnabs; /* Counter interrupt enable mask for MISC2 register. */ uint8_t AdcItems; /* Number of items in ADC poll list. */ struct bufferDMA RPSBuf; /* DMA buffer used to hold ADC (RPS1) program. */ struct bufferDMA ANABuf; /* DMA buffer used to receive ADC data and hold DAC data. */ uint32_t *pDacWBuf; /* Pointer to logical adrs of DMA buffer used to hold DAC data. */ uint16_t Dacpol; /* Image of DAC polarity register. */ uint8_t TrimSetpoint[12]; /* Images of TrimDAC setpoints */ uint16_t ChargeEnabled; /* Image of MISC2 Battery */ /* Charge Enabled (0 or WRMISC2_CHARGE_ENABLE). */ uint16_t WDInterval; /* Image of MISC2 watchdog interval control bits. */ uint32_t I2CAdrs; /* I2C device address for onboard EEPROM (board rev dependent). */ /* short I2Cards; */ unsigned int ao_readback[S626_DAC_CHANNELS]; }; struct dio_private { uint16_t RDDIn; uint16_t WRDOut; uint16_t RDEdgSel; uint16_t WREdgSel; uint16_t RDCapSel; uint16_t WRCapSel; uint16_t RDCapFlg; uint16_t RDIntSel; uint16_t WRIntSel; }; static struct dio_private dio_private_A = { .RDDIn = LP_RDDINA, .WRDOut = LP_WRDOUTA, .RDEdgSel = LP_RDEDGSELA, .WREdgSel = LP_WREDGSELA, .RDCapSel = LP_RDCAPSELA, .WRCapSel = LP_WRCAPSELA, .RDCapFlg = LP_RDCAPFLGA, .RDIntSel = LP_RDINTSELA, .WRIntSel = LP_WRINTSELA, }; static struct dio_private dio_private_B = { .RDDIn = LP_RDDINB, .WRDOut = LP_WRDOUTB, .RDEdgSel = LP_RDEDGSELB, .WREdgSel = LP_WREDGSELB, .RDCapSel = LP_RDCAPSELB, .WRCapSel = LP_WRCAPSELB, .RDCapFlg = LP_RDCAPFLGB, .RDIntSel = LP_RDINTSELB, .WRIntSel = LP_WRINTSELB, }; static struct dio_private dio_private_C = { .RDDIn = LP_RDDINC, .WRDOut = LP_WRDOUTC, .RDEdgSel = LP_RDEDGSELC, .WREdgSel = LP_WREDGSELC, .RDCapSel = LP_RDCAPSELC, .WRCapSel = LP_WRCAPSELC, .RDCapFlg = LP_RDCAPFLGC, .RDIntSel = LP_RDINTSELC, .WRIntSel = LP_WRINTSELC, }; /* to group dio devices (48 bits mask and data are not allowed ???) static struct dio_private *dio_private_word[]={ &dio_private_A, &dio_private_B, &dio_private_C, }; */ #define devpriv ((struct s626_private *)dev->private) #define diopriv ((struct dio_private *)s->private) COMEDI_PCI_INITCLEANUP_NOMODULE(driver_s626, s626_pci_table); /* ioctl routines */ static int s626_ai_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); /* static int s626_ai_rinsn(struct comedi_device *dev,struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data); */ static int s626_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s); static int s626_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static int s626_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static int s626_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int s626_ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int s626_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int s626_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int s626_dio_set_irq(struct comedi_device *dev, unsigned int chan); static int s626_dio_reset_irq(struct comedi_device *dev, unsigned int gruop, unsigned int mask); static int s626_dio_clear_irq(struct comedi_device *dev); static int s626_enc_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int s626_enc_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int s626_enc_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int s626_ns_to_timer(int *nanosec, int round_mode); static int s626_ai_load_polllist(uint8_t * ppl, struct comedi_cmd *cmd); static int s626_ai_inttrig(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trignum); static irqreturn_t s626_irq_handler(int irq, void *d); static unsigned int s626_ai_reg_to_uint(int data); /* static unsigned int s626_uint_to_reg(struct comedi_subdevice *s, int data); */ /* end ioctl routines */ /* internal routines */ static void s626_dio_init(struct comedi_device *dev); static void ResetADC(struct comedi_device *dev, uint8_t * ppl); static void LoadTrimDACs(struct comedi_device *dev); static void WriteTrimDAC(struct comedi_device *dev, uint8_t LogicalChan, uint8_t DacData); static uint8_t I2Cread(struct comedi_device *dev, uint8_t addr); static uint32_t I2Chandshake(struct comedi_device *dev, uint32_t val); static void SetDAC(struct comedi_device *dev, uint16_t chan, short dacdata); static void SendDAC(struct comedi_device *dev, uint32_t val); static void WriteMISC2(struct comedi_device *dev, uint16_t NewImage); static void DEBItransfer(struct comedi_device *dev); static uint16_t DEBIread(struct comedi_device *dev, uint16_t addr); static void DEBIwrite(struct comedi_device *dev, uint16_t addr, uint16_t wdata); static void DEBIreplace(struct comedi_device *dev, uint16_t addr, uint16_t mask, uint16_t wdata); static void CloseDMAB(struct comedi_device *dev, struct bufferDMA *pdma, size_t bsize); /* COUNTER OBJECT ------------------------------------------------ */ struct enc_private { /* Pointers to functions that differ for A and B counters: */ uint16_t(*GetEnable) (struct comedi_device * dev, struct enc_private *); /* Return clock enable. */ uint16_t(*GetIntSrc) (struct comedi_device * dev, struct enc_private *); /* Return interrupt source. */ uint16_t(*GetLoadTrig) (struct comedi_device * dev, struct enc_private *); /* Return preload trigger source. */ uint16_t(*GetMode) (struct comedi_device * dev, struct enc_private *); /* Return standardized operating mode. */ void (*PulseIndex) (struct comedi_device * dev, struct enc_private *); /* Generate soft index strobe. */ void (*SetEnable) (struct comedi_device * dev, struct enc_private *, uint16_t enab); /* Program clock enable. */ void (*SetIntSrc) (struct comedi_device * dev, struct enc_private *, uint16_t IntSource); /* Program interrupt source. */ void (*SetLoadTrig) (struct comedi_device * dev, struct enc_private *, uint16_t Trig); /* Program preload trigger source. */ void (*SetMode) (struct comedi_device * dev, struct enc_private *, uint16_t Setup, uint16_t DisableIntSrc); /* Program standardized operating mode. */ void (*ResetCapFlags) (struct comedi_device * dev, struct enc_private *); /* Reset event capture flags. */ uint16_t MyCRA; /* Address of CRA register. */ uint16_t MyCRB; /* Address of CRB register. */ uint16_t MyLatchLsw; /* Address of Latch least-significant-word */ /* register. */ uint16_t MyEventBits[4]; /* Bit translations for IntSrc -->RDMISC2. */ }; #define encpriv ((struct enc_private *)(dev->subdevices+5)->private) /* counters routines */ static void s626_timer_load(struct comedi_device *dev, struct enc_private *k, int tick); static uint32_t ReadLatch(struct comedi_device *dev, struct enc_private *k); static void ResetCapFlags_A(struct comedi_device *dev, struct enc_private *k); static void ResetCapFlags_B(struct comedi_device *dev, struct enc_private *k); static uint16_t GetMode_A(struct comedi_device *dev, struct enc_private *k); static uint16_t GetMode_B(struct comedi_device *dev, struct enc_private *k); static void SetMode_A(struct comedi_device *dev, struct enc_private *k, uint16_t Setup, uint16_t DisableIntSrc); static void SetMode_B(struct comedi_device *dev, struct enc_private *k, uint16_t Setup, uint16_t DisableIntSrc); static void SetEnable_A(struct comedi_device *dev, struct enc_private *k, uint16_t enab); static void SetEnable_B(struct comedi_device *dev, struct enc_private *k, uint16_t enab); static uint16_t GetEnable_A(struct comedi_device *dev, struct enc_private *k); static uint16_t GetEnable_B(struct comedi_device *dev, struct enc_private *k); static void SetLatchSource(struct comedi_device *dev, struct enc_private *k, uint16_t value); /* static uint16_t GetLatchSource(struct comedi_device *dev, struct enc_private *k ); */ static void SetLoadTrig_A(struct comedi_device *dev, struct enc_private *k, uint16_t Trig); static void SetLoadTrig_B(struct comedi_device *dev, struct enc_private *k, uint16_t Trig); static uint16_t GetLoadTrig_A(struct comedi_device *dev, struct enc_private *k); static uint16_t GetLoadTrig_B(struct comedi_device *dev, struct enc_private *k); static void SetIntSrc_B(struct comedi_device *dev, struct enc_private *k, uint16_t IntSource); static void SetIntSrc_A(struct comedi_device *dev, struct enc_private *k, uint16_t IntSource); static uint16_t GetIntSrc_A(struct comedi_device *dev, struct enc_private *k); static uint16_t GetIntSrc_B(struct comedi_device *dev, struct enc_private *k); /* static void SetClkMult(struct comedi_device *dev, struct enc_private *k, uint16_t value ) ; */ /* static uint16_t GetClkMult(struct comedi_device *dev, struct enc_private *k ) ; */ /* static void SetIndexPol(struct comedi_device *dev, struct enc_private *k, uint16_t value ); */ /* static uint16_t GetClkPol(struct comedi_device *dev, struct enc_private *k ) ; */ /* static void SetIndexSrc( struct comedi_device *dev,struct enc_private *k, uint16_t value ); */ /* static uint16_t GetClkSrc( struct comedi_device *dev,struct enc_private *k ); */ /* static void SetIndexSrc( struct comedi_device *dev,struct enc_private *k, uint16_t value ); */ /* static uint16_t GetIndexSrc( struct comedi_device *dev,struct enc_private *k ); */ static void PulseIndex_A(struct comedi_device *dev, struct enc_private *k); static void PulseIndex_B(struct comedi_device *dev, struct enc_private *k); static void Preload(struct comedi_device *dev, struct enc_private *k, uint32_t value); static void CountersInit(struct comedi_device *dev); /* end internal routines */ /* Counter objects constructor. */ /* Counter overflow/index event flag masks for RDMISC2. */ #define INDXMASK(C) (1 << (((C) > 2) ? ((C) * 2 - 1) : ((C) * 2 + 4))) #define OVERMASK(C) (1 << (((C) > 2) ? ((C) * 2 + 5) : ((C) * 2 + 10))) #define EVBITS(C) { 0, OVERMASK(C), INDXMASK(C), OVERMASK(C) | INDXMASK(C) } /* Translation table to map IntSrc into equivalent RDMISC2 event flag bits. */ /* static const uint16_t EventBits[][4] = { EVBITS(0), EVBITS(1), EVBITS(2), EVBITS(3), EVBITS(4), EVBITS(5) }; */ /* struct enc_private; */ static struct enc_private enc_private_data[] = { { .GetEnable = GetEnable_A, .GetIntSrc = GetIntSrc_A, .GetLoadTrig = GetLoadTrig_A, .GetMode = GetMode_A, .PulseIndex = PulseIndex_A, .SetEnable = SetEnable_A, .SetIntSrc = SetIntSrc_A, .SetLoadTrig = SetLoadTrig_A, .SetMode = SetMode_A, .ResetCapFlags = ResetCapFlags_A, .MyCRA = LP_CR0A, .MyCRB = LP_CR0B, .MyLatchLsw = LP_CNTR0ALSW, .MyEventBits = EVBITS(0), }, { .GetEnable = GetEnable_A, .GetIntSrc = GetIntSrc_A, .GetLoadTrig = GetLoadTrig_A, .GetMode = GetMode_A, .PulseIndex = PulseIndex_A, .SetEnable = SetEnable_A, .SetIntSrc = SetIntSrc_A, .SetLoadTrig = SetLoadTrig_A, .SetMode = SetMode_A, .ResetCapFlags = ResetCapFlags_A, .MyCRA = LP_CR1A, .MyCRB = LP_CR1B, .MyLatchLsw = LP_CNTR1ALSW, .MyEventBits = EVBITS(1), }, { .GetEnable = GetEnable_A, .GetIntSrc = GetIntSrc_A, .GetLoadTrig = GetLoadTrig_A, .GetMode = GetMode_A, .PulseIndex = PulseIndex_A, .SetEnable = SetEnable_A, .SetIntSrc = SetIntSrc_A, .SetLoadTrig = SetLoadTrig_A, .SetMode = SetMode_A, .ResetCapFlags = ResetCapFlags_A, .MyCRA = LP_CR2A, .MyCRB = LP_CR2B, .MyLatchLsw = LP_CNTR2ALSW, .MyEventBits = EVBITS(2), }, { .GetEnable = GetEnable_B, .GetIntSrc = GetIntSrc_B, .GetLoadTrig = GetLoadTrig_B, .GetMode = GetMode_B, .PulseIndex = PulseIndex_B, .SetEnable = SetEnable_B, .SetIntSrc = SetIntSrc_B, .SetLoadTrig = SetLoadTrig_B, .SetMode = SetMode_B, .ResetCapFlags = ResetCapFlags_B, .MyCRA = LP_CR0A, .MyCRB = LP_CR0B, .MyLatchLsw = LP_CNTR0BLSW, .MyEventBits = EVBITS(3), }, { .GetEnable = GetEnable_B, .GetIntSrc = GetIntSrc_B, .GetLoadTrig = GetLoadTrig_B, .GetMode = GetMode_B, .PulseIndex = PulseIndex_B, .SetEnable = SetEnable_B, .SetIntSrc = SetIntSrc_B, .SetLoadTrig = SetLoadTrig_B, .SetMode = SetMode_B, .ResetCapFlags = ResetCapFlags_B, .MyCRA = LP_CR1A, .MyCRB = LP_CR1B, .MyLatchLsw = LP_CNTR1BLSW, .MyEventBits = EVBITS(4), }, { .GetEnable = GetEnable_B, .GetIntSrc = GetIntSrc_B, .GetLoadTrig = GetLoadTrig_B, .GetMode = GetMode_B, .PulseIndex = PulseIndex_B, .SetEnable = SetEnable_B, .SetIntSrc = SetIntSrc_B, .SetLoadTrig = SetLoadTrig_B, .SetMode = SetMode_B, .ResetCapFlags = ResetCapFlags_B, .MyCRA = LP_CR2A, .MyCRB = LP_CR2B, .MyLatchLsw = LP_CNTR2BLSW, .MyEventBits = EVBITS(5), }, }; /* enab/disable a function or test status bit(s) that are accessed */ /* through Main Control Registers 1 or 2. */ #define MC_ENABLE(REGADRS, CTRLWORD) writel(((uint32_t)(CTRLWORD) << 16) | (uint32_t)(CTRLWORD), devpriv->base_addr+(REGADRS)) #define MC_DISABLE(REGADRS, CTRLWORD) writel((uint32_t)(CTRLWORD) << 16 , devpriv->base_addr+(REGADRS)) #define MC_TEST(REGADRS, CTRLWORD) ((readl(devpriv->base_addr+(REGADRS)) & CTRLWORD) != 0) /* #define WR7146(REGARDS,CTRLWORD) writel(CTRLWORD,(uint32_t)(devpriv->base_addr+(REGARDS))) */ #define WR7146(REGARDS, CTRLWORD) writel(CTRLWORD, devpriv->base_addr+(REGARDS)) /* #define RR7146(REGARDS) readl((uint32_t)(devpriv->base_addr+(REGARDS))) */ #define RR7146(REGARDS) readl(devpriv->base_addr+(REGARDS)) #define BUGFIX_STREG(REGADRS) (REGADRS - 4) /* Write a time slot control record to TSL2. */ #define VECTPORT(VECTNUM) (P_TSL2 + ((VECTNUM) << 2)) #define SETVECT(VECTNUM, VECTVAL) WR7146(VECTPORT(VECTNUM), (VECTVAL)) /* Code macros used for constructing I2C command bytes. */ #define I2C_B2(ATTR, VAL) (((ATTR) << 6) | ((VAL) << 24)) #define I2C_B1(ATTR, VAL) (((ATTR) << 4) | ((VAL) << 16)) #define I2C_B0(ATTR, VAL) (((ATTR) << 2) | ((VAL) << 8)) static const struct comedi_lrange s626_range_table = { 2, { RANGE(-5, 5), RANGE(-10, 10), } }; static int s626_attach(struct comedi_device *dev, struct comedi_devconfig *it) { /* uint8_t PollList; */ /* uint16_t AdcData; */ /* uint16_t StartVal; */ /* uint16_t index; */ /* unsigned int data[16]; */ int result; int i; int ret; resource_size_t resourceStart; dma_addr_t appdma; struct comedi_subdevice *s; const struct pci_device_id *ids; struct pci_dev *pdev = NULL; if (alloc_private(dev, sizeof(struct s626_private)) < 0) return -ENOMEM; for (i = 0; i < (ARRAY_SIZE(s626_pci_table) - 1) && !pdev; i++) { ids = &s626_pci_table[i]; do { pdev = pci_get_subsys(ids->vendor, ids->device, ids->subvendor, ids->subdevice, pdev); if ((it->options[0] || it->options[1]) && pdev) { /* matches requested bus/slot */ if (pdev->bus->number == it->options[0] && PCI_SLOT(pdev->devfn) == it->options[1]) break; } else break; } while (1); } devpriv->pdev = pdev; if (pdev == NULL) { printk("s626_attach: Board not present!!!\n"); return -ENODEV; } result = comedi_pci_enable(pdev, "s626"); if (result < 0) { printk("s626_attach: comedi_pci_enable fails\n"); return -ENODEV; } devpriv->got_regions = 1; resourceStart = pci_resource_start(devpriv->pdev, 0); devpriv->base_addr = ioremap(resourceStart, SIZEOF_ADDRESS_SPACE); if (devpriv->base_addr == NULL) { printk("s626_attach: IOREMAP failed\n"); return -ENODEV; } if (devpriv->base_addr) { /* disable master interrupt */ writel(0, devpriv->base_addr + P_IER); /* soft reset */ writel(MC1_SOFT_RESET, devpriv->base_addr + P_MC1); /* DMA FIXME DMA// */ DEBUG("s626_attach: DMA ALLOCATION\n"); /* adc buffer allocation */ devpriv->allocatedBuf = 0; devpriv->ANABuf.LogicalBase = pci_alloc_consistent(devpriv->pdev, DMABUF_SIZE, &appdma); if (devpriv->ANABuf.LogicalBase == NULL) { printk("s626_attach: DMA Memory mapping error\n"); return -ENOMEM; } devpriv->ANABuf.PhysicalBase = appdma; DEBUG ("s626_attach: AllocDMAB ADC Logical=%p, bsize=%d, Physical=0x%x\n", devpriv->ANABuf.LogicalBase, DMABUF_SIZE, (uint32_t) devpriv->ANABuf.PhysicalBase); devpriv->allocatedBuf++; devpriv->RPSBuf.LogicalBase = pci_alloc_consistent(devpriv->pdev, DMABUF_SIZE, &appdma); if (devpriv->RPSBuf.LogicalBase == NULL) { printk("s626_attach: DMA Memory mapping error\n"); return -ENOMEM; } devpriv->RPSBuf.PhysicalBase = appdma; DEBUG ("s626_attach: AllocDMAB RPS Logical=%p, bsize=%d, Physical=0x%x\n", devpriv->RPSBuf.LogicalBase, DMABUF_SIZE, (uint32_t) devpriv->RPSBuf.PhysicalBase); devpriv->allocatedBuf++; } dev->board_ptr = s626_boards; dev->board_name = thisboard->name; if (alloc_subdevices(dev, 6) < 0) return -ENOMEM; dev->iobase = (unsigned long)devpriv->base_addr; dev->irq = devpriv->pdev->irq; /* set up interrupt handler */ if (dev->irq == 0) { printk(" unknown irq (bad)\n"); } else { ret = request_irq(dev->irq, s626_irq_handler, IRQF_SHARED, "s626", dev); if (ret < 0) { printk(" irq not available\n"); dev->irq = 0; } } DEBUG("s626_attach: -- it opts %d,%d -- \n", it->options[0], it->options[1]); s = dev->subdevices + 0; /* analog input subdevice */ dev->read_subdev = s; /* we support single-ended (ground) and differential */ s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_CMD_READ; s->n_chan = thisboard->ai_chans; s->maxdata = (0xffff >> 2); s->range_table = &s626_range_table; s->len_chanlist = thisboard->ai_chans; /* This is the maximum chanlist length that the board can handle */ s->insn_config = s626_ai_insn_config; s->insn_read = s626_ai_insn_read; s->do_cmd = s626_ai_cmd; s->do_cmdtest = s626_ai_cmdtest; s->cancel = s626_ai_cancel; s = dev->subdevices + 1; /* analog output subdevice */ s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE | SDF_READABLE; s->n_chan = thisboard->ao_chans; s->maxdata = (0x3fff); s->range_table = &range_bipolar10; s->insn_write = s626_ao_winsn; s->insn_read = s626_ao_rinsn; s = dev->subdevices + 2; /* digital I/O subdevice */ s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_WRITABLE | SDF_READABLE; s->n_chan = S626_DIO_CHANNELS; s->maxdata = 1; s->io_bits = 0xffff; s->private = &dio_private_A; s->range_table = &range_digital; s->insn_config = s626_dio_insn_config; s->insn_bits = s626_dio_insn_bits; s = dev->subdevices + 3; /* digital I/O subdevice */ s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_WRITABLE | SDF_READABLE; s->n_chan = 16; s->maxdata = 1; s->io_bits = 0xffff; s->private = &dio_private_B; s->range_table = &range_digital; s->insn_config = s626_dio_insn_config; s->insn_bits = s626_dio_insn_bits; s = dev->subdevices + 4; /* digital I/O subdevice */ s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_WRITABLE | SDF_READABLE; s->n_chan = 16; s->maxdata = 1; s->io_bits = 0xffff; s->private = &dio_private_C; s->range_table = &range_digital; s->insn_config = s626_dio_insn_config; s->insn_bits = s626_dio_insn_bits; s = dev->subdevices + 5; /* encoder (counter) subdevice */ s->type = COMEDI_SUBD_COUNTER; s->subdev_flags = SDF_WRITABLE | SDF_READABLE | SDF_LSAMPL; s->n_chan = thisboard->enc_chans; s->private = enc_private_data; s->insn_config = s626_enc_insn_config; s->insn_read = s626_enc_insn_read; s->insn_write = s626_enc_insn_write; s->maxdata = 0xffffff; s->range_table = &range_unknown; /* stop ai_command */ devpriv->ai_cmd_running = 0; if (devpriv->base_addr && (devpriv->allocatedBuf == 2)) { dma_addr_t pPhysBuf; uint16_t chan; /* enab DEBI and audio pins, enable I2C interface. */ MC_ENABLE(P_MC1, MC1_DEBI | MC1_AUDIO | MC1_I2C); /* Configure DEBI operating mode. */ WR7146(P_DEBICFG, DEBI_CFG_SLAVE16 /* Local bus is 16 */ /* bits wide. */ | (DEBI_TOUT << DEBI_CFG_TOUT_BIT) /* Declare DEBI */ /* transfer timeout */ /* interval. */ |DEBI_SWAP /* Set up byte lane */ /* steering. */ | DEBI_CFG_INTEL); /* Intel-compatible */ /* local bus (DEBI */ /* never times out). */ DEBUG("s626_attach: %d debi init -- %d\n", DEBI_CFG_SLAVE16 | (DEBI_TOUT << DEBI_CFG_TOUT_BIT) | DEBI_SWAP | DEBI_CFG_INTEL, DEBI_CFG_INTEL | DEBI_CFG_TOQ | DEBI_CFG_INCQ | DEBI_CFG_16Q); /* DEBI INIT S626 WR7146( P_DEBICFG, DEBI_CFG_INTEL | DEBI_CFG_TOQ */ /* | DEBI_CFG_INCQ| DEBI_CFG_16Q); //end */ /* Paging is disabled. */ WR7146(P_DEBIPAGE, DEBI_PAGE_DISABLE); /* Disable MMU paging. */ /* Init GPIO so that ADC Start* is negated. */ WR7146(P_GPIO, GPIO_BASE | GPIO1_HI); /* IsBoardRevA is a boolean that indicates whether the board is RevA. * * VERSION 2.01 CHANGE: REV A & B BOARDS NOW SUPPORTED BY DYNAMIC * EEPROM ADDRESS SELECTION. Initialize the I2C interface, which * is used to access the onboard serial EEPROM. The EEPROM's I2C * DeviceAddress is hardwired to a value that is dependent on the * 626 board revision. On all board revisions, the EEPROM stores * TrimDAC calibration constants for analog I/O. On RevB and * higher boards, the DeviceAddress is hardwired to 0 to enable * the EEPROM to also store the PCI SubVendorID and SubDeviceID; * this is the address at which the SAA7146 expects a * configuration EEPROM to reside. On RevA boards, the EEPROM * device address, which is hardwired to 4, prevents the SAA7146 * from retrieving PCI sub-IDs, so the SAA7146 uses its built-in * default values, instead. */ /* devpriv->I2Cards= IsBoardRevA ? 0xA8 : 0xA0; // Set I2C EEPROM */ /* DeviceType (0xA0) */ /* and DeviceAddress<<1. */ devpriv->I2CAdrs = 0xA0; /* I2C device address for onboard */ /* eeprom(revb) */ /* Issue an I2C ABORT command to halt any I2C operation in */ /* progress and reset BUSY flag. */ WR7146(P_I2CSTAT, I2C_CLKSEL | I2C_ABORT); /* Write I2C control: abort any I2C activity. */ MC_ENABLE(P_MC2, MC2_UPLD_IIC); /* Invoke command upload */ while ((RR7146(P_MC2) & MC2_UPLD_IIC) == 0) ; /* and wait for upload to complete. */ /* Per SAA7146 data sheet, write to STATUS reg twice to * reset all I2C error flags. */ for (i = 0; i < 2; i++) { WR7146(P_I2CSTAT, I2C_CLKSEL); /* Write I2C control: reset error flags. */ MC_ENABLE(P_MC2, MC2_UPLD_IIC); /* Invoke command upload */ while (!MC_TEST(P_MC2, MC2_UPLD_IIC)) ; /* and wait for upload to complete. */ } /* Init audio interface functional attributes: set DAC/ADC * serial clock rates, invert DAC serial clock so that * DAC data setup times are satisfied, enable DAC serial * clock out. */ WR7146(P_ACON2, ACON2_INIT); /* Set up TSL1 slot list, which is used to control the * accumulation of ADC data: RSD1 = shift data in on SD1. * SIB_A1 = store data uint8_t at next available location in * FB BUFFER1 register. */ WR7146(P_TSL1, RSD1 | SIB_A1); /* Fetch ADC high data uint8_t. */ WR7146(P_TSL1 + 4, RSD1 | SIB_A1 | EOS); /* Fetch ADC low data uint8_t; end of TSL1. */ /* enab TSL1 slot list so that it executes all the time. */ WR7146(P_ACON1, ACON1_ADCSTART); /* Initialize RPS registers used for ADC. */ /* Physical start of RPS program. */ WR7146(P_RPSADDR1, (uint32_t) devpriv->RPSBuf.PhysicalBase); WR7146(P_RPSPAGE1, 0); /* RPS program performs no explicit mem writes. */ WR7146(P_RPS1_TOUT, 0); /* Disable RPS timeouts. */ /* SAA7146 BUG WORKAROUND. Initialize SAA7146 ADC interface * to a known state by invoking ADCs until FB BUFFER 1 * register shows that it is correctly receiving ADC data. * This is necessary because the SAA7146 ADC interface does * not start up in a defined state after a PCI reset. */ /* PollList = EOPL; // Create a simple polling */ /* // list for analog input */ /* // channel 0. */ /* ResetADC( dev, &PollList ); */ /* s626_ai_rinsn(dev,dev->subdevices,NULL,data); //( &AdcData ); // */ /* //Get initial ADC */ /* //value. */ /* StartVal = data[0]; */ /* // VERSION 2.01 CHANGE: TIMEOUT ADDED TO PREVENT HANGED EXECUTION. */ /* // Invoke ADCs until the new ADC value differs from the initial */ /* // value or a timeout occurs. The timeout protects against the */ /* // possibility that the driver is restarting and the ADC data is a */ /* // fixed value resulting from the applied ADC analog input being */ /* // unusually quiet or at the rail. */ /* for ( index = 0; index < 500; index++ ) */ /* { */ /* s626_ai_rinsn(dev,dev->subdevices,NULL,data); */ /* AdcData = data[0]; //ReadADC( &AdcData ); */ /* if ( AdcData != StartVal ) */ /* break; */ /* } */ /* end initADC */ /* init the DAC interface */ /* Init Audio2's output DMAC attributes: burst length = 1 * DWORD, threshold = 1 DWORD. */ WR7146(P_PCI_BT_A, 0); /* Init Audio2's output DMA physical addresses. The protection * address is set to 1 DWORD past the base address so that a * single DWORD will be transferred each time a DMA transfer is * enabled. */ pPhysBuf = devpriv->ANABuf.PhysicalBase + (DAC_WDMABUF_OS * sizeof(uint32_t)); WR7146(P_BASEA2_OUT, (uint32_t) pPhysBuf); /* Buffer base adrs. */ WR7146(P_PROTA2_OUT, (uint32_t) (pPhysBuf + sizeof(uint32_t))); /* Protection address. */ /* Cache Audio2's output DMA buffer logical address. This is * where DAC data is buffered for A2 output DMA transfers. */ devpriv->pDacWBuf = (uint32_t *) devpriv->ANABuf.LogicalBase + DAC_WDMABUF_OS; /* Audio2's output channels does not use paging. The protection * violation handling bit is set so that the DMAC will * automatically halt and its PCI address pointer will be reset * when the protection address is reached. */ WR7146(P_PAGEA2_OUT, 8); /* Initialize time slot list 2 (TSL2), which is used to control * the clock generation for and serialization of data to be sent * to the DAC devices. Slot 0 is a NOP that is used to trap TSL * execution; this permits other slots to be safely modified * without first turning off the TSL sequencer (which is * apparently impossible to do). Also, SD3 (which is driven by a * pull-up resistor) is shifted in and stored to the MSB of * FB_BUFFER2 to be used as evidence that the slot sequence has * not yet finished executing. */ SETVECT(0, XSD2 | RSD3 | SIB_A2 | EOS); /* Slot 0: Trap TSL execution, shift 0xFF into FB_BUFFER2. */ /* Initialize slot 1, which is constant. Slot 1 causes a * DWORD to be transferred from audio channel 2's output FIFO * to the FIFO's output buffer so that it can be serialized * and sent to the DAC during subsequent slots. All remaining * slots are dynamically populated as required by the target * DAC device. */ SETVECT(1, LF_A2); /* Slot 1: Fetch DWORD from Audio2's output FIFO. */ /* Start DAC's audio interface (TSL2) running. */ WR7146(P_ACON1, ACON1_DACSTART); /* end init DAC interface */ /* Init Trim DACs to calibrated values. Do it twice because the * SAA7146 audio channel does not always reset properly and * sometimes causes the first few TrimDAC writes to malfunction. */ LoadTrimDACs(dev); LoadTrimDACs(dev); /* Insurance. */ /* Manually init all gate array hardware in case this is a soft * reset (we have no way of determining whether this is a warm * or cold start). This is necessary because the gate array will * reset only in response to a PCI hard reset; there is no soft * reset function. */ /* Init all DAC outputs to 0V and init all DAC setpoint and * polarity images. */ for (chan = 0; chan < S626_DAC_CHANNELS; chan++) SetDAC(dev, chan, 0); /* Init image of WRMISC2 Battery Charger Enabled control bit. * This image is used when the state of the charger control bit, * which has no direct hardware readback mechanism, is queried. */ devpriv->ChargeEnabled = 0; /* Init image of watchdog timer interval in WRMISC2. This image * maintains the value of the control bits of MISC2 are * continuously reset to zero as long as the WD timer is disabled. */ devpriv->WDInterval = 0; /* Init Counter Interrupt enab mask for RDMISC2. This mask is * applied against MISC2 when testing to determine which timer * events are requesting interrupt service. */ devpriv->CounterIntEnabs = 0; /* Init counters. */ CountersInit(dev); /* Without modifying the state of the Battery Backup enab, disable * the watchdog timer, set DIO channels 0-5 to operate in the * standard DIO (vs. counter overflow) mode, disable the battery * charger, and reset the watchdog interval selector to zero. */ WriteMISC2(dev, (uint16_t) (DEBIread(dev, LP_RDMISC2) & MISC2_BATT_ENABLE)); /* Initialize the digital I/O subsystem. */ s626_dio_init(dev); /* enable interrupt test */ /* writel(IRQ_GPIO3 | IRQ_RPS1,devpriv->base_addr+P_IER); */ } DEBUG("s626_attach: comedi%d s626 attached %04x\n", dev->minor, (uint32_t) devpriv->base_addr); return 1; } static unsigned int s626_ai_reg_to_uint(int data) { unsigned int tempdata; tempdata = (data >> 18); if (tempdata & 0x2000) tempdata &= 0x1fff; else tempdata += (1 << 13); return tempdata; } /* static unsigned int s626_uint_to_reg(struct comedi_subdevice *s, int data){ */ /* return 0; */ /* } */ static irqreturn_t s626_irq_handler(int irq, void *d) { struct comedi_device *dev = d; struct comedi_subdevice *s; struct comedi_cmd *cmd; struct enc_private *k; unsigned long flags; int32_t *readaddr; uint32_t irqtype, irqstatus; int i = 0; short tempdata; uint8_t group; uint16_t irqbit; DEBUG("s626_irq_handler: interrupt request recieved!!!\n"); if (dev->attached == 0) return IRQ_NONE; /* lock to avoid race with comedi_poll */ spin_lock_irqsave(&dev->spinlock, flags); /* save interrupt enable register state */ irqstatus = readl(devpriv->base_addr + P_IER); /* read interrupt type */ irqtype = readl(devpriv->base_addr + P_ISR); /* disable master interrupt */ writel(0, devpriv->base_addr + P_IER); /* clear interrupt */ writel(irqtype, devpriv->base_addr + P_ISR); /* do somethings */ DEBUG("s626_irq_handler: interrupt type %d\n", irqtype); switch (irqtype) { case IRQ_RPS1: /* end_of_scan occurs */ DEBUG("s626_irq_handler: RPS1 irq detected\n"); /* manage ai subdevice */ s = dev->subdevices; cmd = &(s->async->cmd); /* Init ptr to DMA buffer that holds new ADC data. We skip the * first uint16_t in the buffer because it contains junk data from * the final ADC of the previous poll list scan. */ readaddr = (int32_t *) devpriv->ANABuf.LogicalBase + 1; /* get the data and hand it over to comedi */ for (i = 0; i < (s->async->cmd.chanlist_len); i++) { /* Convert ADC data to 16-bit integer values and copy to application */ /* buffer. */ tempdata = s626_ai_reg_to_uint((int)*readaddr); readaddr++; /* put data into read buffer */ /* comedi_buf_put(s->async, tempdata); */ if (cfc_write_to_buffer(s, tempdata) == 0) printk ("s626_irq_handler: cfc_write_to_buffer error!\n"); DEBUG("s626_irq_handler: ai channel %d acquired: %d\n", i, tempdata); } /* end of scan occurs */ s->async->events |= COMEDI_CB_EOS; if (!(devpriv->ai_continous)) devpriv->ai_sample_count--; if (devpriv->ai_sample_count <= 0) { devpriv->ai_cmd_running = 0; /* Stop RPS program. */ MC_DISABLE(P_MC1, MC1_ERPS1); /* send end of acquisition */ s->async->events |= COMEDI_CB_EOA; /* disable master interrupt */ irqstatus = 0; } if (devpriv->ai_cmd_running && cmd->scan_begin_src == TRIG_EXT) { DEBUG ("s626_irq_handler: enable interrupt on dio channel %d\n", cmd->scan_begin_arg); s626_dio_set_irq(dev, cmd->scan_begin_arg); DEBUG("s626_irq_handler: External trigger is set!!!\n"); } /* tell comedi that data is there */ DEBUG("s626_irq_handler: events %d\n", s->async->events); comedi_event(dev, s); break; case IRQ_GPIO3: /* check dio and conter interrupt */ DEBUG("s626_irq_handler: GPIO3 irq detected\n"); /* manage ai subdevice */ s = dev->subdevices; cmd = &(s->async->cmd); /* s626_dio_clear_irq(dev); */ for (group = 0; group < S626_DIO_BANKS; group++) { irqbit = 0; /* read interrupt type */ irqbit = DEBIread(dev, ((struct dio_private *)(dev-> subdevices + 2 + group)-> private)->RDCapFlg); /* check if interrupt is generated from dio channels */ if (irqbit) { s626_dio_reset_irq(dev, group, irqbit); DEBUG ("s626_irq_handler: check interrupt on dio group %d %d\n", group, i); if (devpriv->ai_cmd_running) { /* check if interrupt is an ai acquisition start trigger */ if ((irqbit >> (cmd->start_arg - (16 * group))) == 1 && cmd->start_src == TRIG_EXT) { DEBUG ("s626_irq_handler: Edge capture interrupt recieved from channel %d\n", cmd->start_arg); /* Start executing the RPS program. */ MC_ENABLE(P_MC1, MC1_ERPS1); DEBUG ("s626_irq_handler: aquisition start triggered!!!\n"); if (cmd->scan_begin_src == TRIG_EXT) { DEBUG ("s626_ai_cmd: enable interrupt on dio channel %d\n", cmd-> scan_begin_arg); s626_dio_set_irq(dev, cmd->scan_begin_arg); DEBUG ("s626_irq_handler: External scan trigger is set!!!\n"); } } if ((irqbit >> (cmd->scan_begin_arg - (16 * group))) == 1 && cmd->scan_begin_src == TRIG_EXT) { DEBUG ("s626_irq_handler: Edge capture interrupt recieved from channel %d\n", cmd->scan_begin_arg); /* Trigger ADC scan loop start by setting RPS Signal 0. */ MC_ENABLE(P_MC2, MC2_ADC_RPS); DEBUG ("s626_irq_handler: scan triggered!!! %d\n", devpriv->ai_sample_count); if (cmd->convert_src == TRIG_EXT) { DEBUG ("s626_ai_cmd: enable interrupt on dio channel %d group %d\n", cmd->convert_arg - (16 * group), group); devpriv->ai_convert_count = cmd->chanlist_len; s626_dio_set_irq(dev, cmd->convert_arg); DEBUG ("s626_irq_handler: External convert trigger is set!!!\n"); } if (cmd->convert_src == TRIG_TIMER) { k = &encpriv[5]; devpriv->ai_convert_count = cmd->chanlist_len; k->SetEnable(dev, k, CLKENAB_ALWAYS); } } if ((irqbit >> (cmd->convert_arg - (16 * group))) == 1 && cmd->convert_src == TRIG_EXT) { DEBUG ("s626_irq_handler: Edge capture interrupt recieved from channel %d\n", cmd->convert_arg); /* Trigger ADC scan loop start by setting RPS Signal 0. */ MC_ENABLE(P_MC2, MC2_ADC_RPS); DEBUG ("s626_irq_handler: adc convert triggered!!!\n"); devpriv->ai_convert_count--; if (devpriv->ai_convert_count > 0) { DEBUG ("s626_ai_cmd: enable interrupt on dio channel %d group %d\n", cmd->convert_arg - (16 * group), group); s626_dio_set_irq(dev, cmd->convert_arg); DEBUG ("s626_irq_handler: External trigger is set!!!\n"); } } } break; } } /* read interrupt type */ irqbit = DEBIread(dev, LP_RDMISC2); /* check interrupt on counters */ DEBUG("s626_irq_handler: check counters interrupt %d\n", irqbit); if (irqbit & IRQ_COINT1A) { DEBUG ("s626_irq_handler: interrupt on counter 1A overflow\n"); k = &encpriv[0]; /* clear interrupt capture flag */ k->ResetCapFlags(dev, k); } if (irqbit & IRQ_COINT2A) { DEBUG ("s626_irq_handler: interrupt on counter 2A overflow\n"); k = &encpriv[1]; /* clear interrupt capture flag */ k->ResetCapFlags(dev, k); } if (irqbit & IRQ_COINT3A) { DEBUG ("s626_irq_handler: interrupt on counter 3A overflow\n"); k = &encpriv[2]; /* clear interrupt capture flag */ k->ResetCapFlags(dev, k); } if (irqbit & IRQ_COINT1B) { DEBUG ("s626_irq_handler: interrupt on counter 1B overflow\n"); k = &encpriv[3]; /* clear interrupt capture flag */ k->ResetCapFlags(dev, k); } if (irqbit & IRQ_COINT2B) { DEBUG ("s626_irq_handler: interrupt on counter 2B overflow\n"); k = &encpriv[4]; /* clear interrupt capture flag */ k->ResetCapFlags(dev, k); if (devpriv->ai_convert_count > 0) { devpriv->ai_convert_count--; if (devpriv->ai_convert_count == 0) k->SetEnable(dev, k, CLKENAB_INDEX); if (cmd->convert_src == TRIG_TIMER) { DEBUG ("s626_irq_handler: conver timer trigger!!! %d\n", devpriv->ai_convert_count); /* Trigger ADC scan loop start by setting RPS Signal 0. */ MC_ENABLE(P_MC2, MC2_ADC_RPS); } } } if (irqbit & IRQ_COINT3B) { DEBUG ("s626_irq_handler: interrupt on counter 3B overflow\n"); k = &encpriv[5]; /* clear interrupt capture flag */ k->ResetCapFlags(dev, k); if (cmd->scan_begin_src == TRIG_TIMER) { DEBUG ("s626_irq_handler: scan timer trigger!!!\n"); /* Trigger ADC scan loop start by setting RPS Signal 0. */ MC_ENABLE(P_MC2, MC2_ADC_RPS); } if (cmd->convert_src == TRIG_TIMER) { DEBUG ("s626_irq_handler: convert timer trigger is set\n"); k = &encpriv[4]; devpriv->ai_convert_count = cmd->chanlist_len; k->SetEnable(dev, k, CLKENAB_ALWAYS); } } } /* enable interrupt */ writel(irqstatus, devpriv->base_addr + P_IER); DEBUG("s626_irq_handler: exit interrupt service routine.\n"); spin_unlock_irqrestore(&dev->spinlock, flags); return IRQ_HANDLED; } static int s626_detach(struct comedi_device *dev) { if (devpriv) { /* stop ai_command */ devpriv->ai_cmd_running = 0; if (devpriv->base_addr) { /* interrupt mask */ WR7146(P_IER, 0); /* Disable master interrupt. */ WR7146(P_ISR, IRQ_GPIO3 | IRQ_RPS1); /* Clear board's IRQ status flag. */ /* Disable the watchdog timer and battery charger. */ WriteMISC2(dev, 0); /* Close all interfaces on 7146 device. */ WR7146(P_MC1, MC1_SHUTDOWN); WR7146(P_ACON1, ACON1_BASE); CloseDMAB(dev, &devpriv->RPSBuf, DMABUF_SIZE); CloseDMAB(dev, &devpriv->ANABuf, DMABUF_SIZE); } if (dev->irq) free_irq(dev->irq, dev); if (devpriv->base_addr) iounmap(devpriv->base_addr); if (devpriv->pdev) { if (devpriv->got_regions) comedi_pci_disable(devpriv->pdev); pci_dev_put(devpriv->pdev); } } DEBUG("s626_detach: S626 detached!\n"); return 0; } /* * this functions build the RPS program for hardware driven acquistion */ void ResetADC(struct comedi_device *dev, uint8_t * ppl) { register uint32_t *pRPS; uint32_t JmpAdrs; uint16_t i; uint16_t n; uint32_t LocalPPL; struct comedi_cmd *cmd = &(dev->subdevices->async->cmd); /* Stop RPS program in case it is currently running. */ MC_DISABLE(P_MC1, MC1_ERPS1); /* Set starting logical address to write RPS commands. */ pRPS = (uint32_t *) devpriv->RPSBuf.LogicalBase; /* Initialize RPS instruction pointer. */ WR7146(P_RPSADDR1, (uint32_t) devpriv->RPSBuf.PhysicalBase); /* Construct RPS program in RPSBuf DMA buffer */ if (cmd != NULL && cmd->scan_begin_src != TRIG_FOLLOW) { DEBUG("ResetADC: scan_begin pause inserted\n"); /* Wait for Start trigger. */ *pRPS++ = RPS_PAUSE | RPS_SIGADC; *pRPS++ = RPS_CLRSIGNAL | RPS_SIGADC; } /* SAA7146 BUG WORKAROUND Do a dummy DEBI Write. This is necessary * because the first RPS DEBI Write following a non-RPS DEBI write * seems to always fail. If we don't do this dummy write, the ADC * gain might not be set to the value required for the first slot in * the poll list; the ADC gain would instead remain unchanged from * the previously programmed value. */ *pRPS++ = RPS_LDREG | (P_DEBICMD >> 2); /* Write DEBI Write command and address to shadow RAM. */ *pRPS++ = DEBI_CMD_WRWORD | LP_GSEL; *pRPS++ = RPS_LDREG | (P_DEBIAD >> 2); /* Write DEBI immediate data to shadow RAM: */ *pRPS++ = GSEL_BIPOLAR5V; /* arbitrary immediate data value. */ *pRPS++ = RPS_CLRSIGNAL | RPS_DEBI; /* Reset "shadow RAM uploaded" flag. */ *pRPS++ = RPS_UPLOAD | RPS_DEBI; /* Invoke shadow RAM upload. */ *pRPS++ = RPS_PAUSE | RPS_DEBI; /* Wait for shadow upload to finish. */ /* Digitize all slots in the poll list. This is implemented as a * for loop to limit the slot count to 16 in case the application * forgot to set the EOPL flag in the final slot. */ for (devpriv->AdcItems = 0; devpriv->AdcItems < 16; devpriv->AdcItems++) { /* Convert application's poll list item to private board class * format. Each app poll list item is an uint8_t with form * (EOPL,x,x,RANGE,CHAN<3:0>), where RANGE code indicates 0 = * +-10V, 1 = +-5V, and EOPL = End of Poll List marker. */ LocalPPL = (*ppl << 8) | (*ppl & 0x10 ? GSEL_BIPOLAR5V : GSEL_BIPOLAR10V); /* Switch ADC analog gain. */ *pRPS++ = RPS_LDREG | (P_DEBICMD >> 2); /* Write DEBI command */ /* and address to */ /* shadow RAM. */ *pRPS++ = DEBI_CMD_WRWORD | LP_GSEL; *pRPS++ = RPS_LDREG | (P_DEBIAD >> 2); /* Write DEBI */ /* immediate data to */ /* shadow RAM. */ *pRPS++ = LocalPPL; *pRPS++ = RPS_CLRSIGNAL | RPS_DEBI; /* Reset "shadow RAM uploaded" */ /* flag. */ *pRPS++ = RPS_UPLOAD | RPS_DEBI; /* Invoke shadow RAM upload. */ *pRPS++ = RPS_PAUSE | RPS_DEBI; /* Wait for shadow upload to */ /* finish. */ /* Select ADC analog input channel. */ *pRPS++ = RPS_LDREG | (P_DEBICMD >> 2); /* Write DEBI command and address to shadow RAM. */ *pRPS++ = DEBI_CMD_WRWORD | LP_ISEL; *pRPS++ = RPS_LDREG | (P_DEBIAD >> 2); /* Write DEBI immediate data to shadow RAM. */ *pRPS++ = LocalPPL; *pRPS++ = RPS_CLRSIGNAL | RPS_DEBI; /* Reset "shadow RAM uploaded" flag. */ *pRPS++ = RPS_UPLOAD | RPS_DEBI; /* Invoke shadow RAM upload. */ *pRPS++ = RPS_PAUSE | RPS_DEBI; /* Wait for shadow upload to finish. */ /* Delay at least 10 microseconds for analog input settling. * Instead of padding with NOPs, we use RPS_JUMP instructions * here; this allows us to produce a longer delay than is * possible with NOPs because each RPS_JUMP flushes the RPS' * instruction prefetch pipeline. */ JmpAdrs = (uint32_t) devpriv->RPSBuf.PhysicalBase + (uint32_t) ((unsigned long)pRPS - (unsigned long)devpriv->RPSBuf.LogicalBase); for (i = 0; i < (10 * RPSCLK_PER_US / 2); i++) { JmpAdrs += 8; /* Repeat to implement time delay: */ *pRPS++ = RPS_JUMP; /* Jump to next RPS instruction. */ *pRPS++ = JmpAdrs; } if (cmd != NULL && cmd->convert_src != TRIG_NOW) { DEBUG("ResetADC: convert pause inserted\n"); /* Wait for Start trigger. */ *pRPS++ = RPS_PAUSE | RPS_SIGADC; *pRPS++ = RPS_CLRSIGNAL | RPS_SIGADC; } /* Start ADC by pulsing GPIO1. */ *pRPS++ = RPS_LDREG | (P_GPIO >> 2); /* Begin ADC Start pulse. */ *pRPS++ = GPIO_BASE | GPIO1_LO; *pRPS++ = RPS_NOP; /* VERSION 2.03 CHANGE: STRETCH OUT ADC START PULSE. */ *pRPS++ = RPS_LDREG | (P_GPIO >> 2); /* End ADC Start pulse. */ *pRPS++ = GPIO_BASE | GPIO1_HI; /* Wait for ADC to complete (GPIO2 is asserted high when ADC not * busy) and for data from previous conversion to shift into FB * BUFFER 1 register. */ *pRPS++ = RPS_PAUSE | RPS_GPIO2; /* Wait for ADC done. */ /* Transfer ADC data from FB BUFFER 1 register to DMA buffer. */ *pRPS++ = RPS_STREG | (BUGFIX_STREG(P_FB_BUFFER1) >> 2); *pRPS++ = (uint32_t) devpriv->ANABuf.PhysicalBase + (devpriv->AdcItems << 2); /* If this slot's EndOfPollList flag is set, all channels have */ /* now been processed. */ if (*ppl++ & EOPL) { devpriv->AdcItems++; /* Adjust poll list item count. */ break; /* Exit poll list processing loop. */ } } DEBUG("ResetADC: ADC items %d \n", devpriv->AdcItems); /* VERSION 2.01 CHANGE: DELAY CHANGED FROM 250NS to 2US. Allow the * ADC to stabilize for 2 microseconds before starting the final * (dummy) conversion. This delay is necessary to allow sufficient * time between last conversion finished and the start of the dummy * conversion. Without this delay, the last conversion's data value * is sometimes set to the previous conversion's data value. */ for (n = 0; n < (2 * RPSCLK_PER_US); n++) *pRPS++ = RPS_NOP; /* Start a dummy conversion to cause the data from the last * conversion of interest to be shifted in. */ *pRPS++ = RPS_LDREG | (P_GPIO >> 2); /* Begin ADC Start pulse. */ *pRPS++ = GPIO_BASE | GPIO1_LO; *pRPS++ = RPS_NOP; /* VERSION 2.03 CHANGE: STRETCH OUT ADC START PULSE. */ *pRPS++ = RPS_LDREG | (P_GPIO >> 2); /* End ADC Start pulse. */ *pRPS++ = GPIO_BASE | GPIO1_HI; /* Wait for the data from the last conversion of interest to arrive * in FB BUFFER 1 register. */ *pRPS++ = RPS_PAUSE | RPS_GPIO2; /* Wait for ADC done. */ /* Transfer final ADC data from FB BUFFER 1 register to DMA buffer. */ *pRPS++ = RPS_STREG | (BUGFIX_STREG(P_FB_BUFFER1) >> 2); /* */ *pRPS++ = (uint32_t) devpriv->ANABuf.PhysicalBase + (devpriv->AdcItems << 2); /* Indicate ADC scan loop is finished. */ /* *pRPS++= RPS_CLRSIGNAL | RPS_SIGADC ; // Signal ReadADC() that scan is done. */ /* invoke interrupt */ if (devpriv->ai_cmd_running == 1) { DEBUG("ResetADC: insert irq in ADC RPS task\n"); *pRPS++ = RPS_IRQ; } /* Restart RPS program at its beginning. */ *pRPS++ = RPS_JUMP; /* Branch to start of RPS program. */ *pRPS++ = (uint32_t) devpriv->RPSBuf.PhysicalBase; /* End of RPS program build */ } /* TO COMPLETE, IF NECESSARY */ static int s626_ai_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { return -EINVAL; } /* static int s626_ai_rinsn(struct comedi_device *dev,struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data) */ /* { */ /* register uint8_t i; */ /* register int32_t *readaddr; */ /* DEBUG("as626_ai_rinsn: ai_rinsn enter \n"); */ /* Trigger ADC scan loop start by setting RPS Signal 0. */ /* MC_ENABLE( P_MC2, MC2_ADC_RPS ); */ /* Wait until ADC scan loop is finished (RPS Signal 0 reset). */ /* while ( MC_TEST( P_MC2, MC2_ADC_RPS ) ); */ /* Init ptr to DMA buffer that holds new ADC data. We skip the * first uint16_t in the buffer because it contains junk data from * the final ADC of the previous poll list scan. */ /* readaddr = (uint32_t *)devpriv->ANABuf.LogicalBase + 1; */ /* Convert ADC data to 16-bit integer values and copy to application buffer. */ /* for ( i = 0; i < devpriv->AdcItems; i++ ) { */ /* *data = s626_ai_reg_to_uint( *readaddr++ ); */ /* DEBUG("s626_ai_rinsn: data %d \n",*data); */ /* data++; */ /* } */ /* DEBUG("s626_ai_rinsn: ai_rinsn escape \n"); */ /* return i; */ /* } */ static int s626_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { uint16_t chan = CR_CHAN(insn->chanspec); uint16_t range = CR_RANGE(insn->chanspec); uint16_t AdcSpec = 0; uint32_t GpioImage; int n; /* interrupt call test */ /* writel(IRQ_GPIO3,devpriv->base_addr+P_PSR); */ /* Writing a logical 1 into any of the RPS_PSR bits causes the * corresponding interrupt to be generated if enabled */ DEBUG("s626_ai_insn_read: entering\n"); /* Convert application's ADC specification into form * appropriate for register programming. */ if (range == 0) AdcSpec = (chan << 8) | (GSEL_BIPOLAR5V); else AdcSpec = (chan << 8) | (GSEL_BIPOLAR10V); /* Switch ADC analog gain. */ DEBIwrite(dev, LP_GSEL, AdcSpec); /* Set gain. */ /* Select ADC analog input channel. */ DEBIwrite(dev, LP_ISEL, AdcSpec); /* Select channel. */ for (n = 0; n < insn->n; n++) { /* Delay 10 microseconds for analog input settling. */ udelay(10); /* Start ADC by pulsing GPIO1 low. */ GpioImage = RR7146(P_GPIO); /* Assert ADC Start command */ WR7146(P_GPIO, GpioImage & ~GPIO1_HI); /* and stretch it out. */ WR7146(P_GPIO, GpioImage & ~GPIO1_HI); WR7146(P_GPIO, GpioImage & ~GPIO1_HI); /* Negate ADC Start command. */ WR7146(P_GPIO, GpioImage | GPIO1_HI); /* Wait for ADC to complete (GPIO2 is asserted high when */ /* ADC not busy) and for data from previous conversion to */ /* shift into FB BUFFER 1 register. */ /* Wait for ADC done. */ while (!(RR7146(P_PSR) & PSR_GPIO2)) ; /* Fetch ADC data. */ if (n != 0) data[n - 1] = s626_ai_reg_to_uint(RR7146(P_FB_BUFFER1)); /* Allow the ADC to stabilize for 4 microseconds before * starting the next (final) conversion. This delay is * necessary to allow sufficient time between last * conversion finished and the start of the next * conversion. Without this delay, the last conversion's * data value is sometimes set to the previous * conversion's data value. */ udelay(4); } /* Start a dummy conversion to cause the data from the * previous conversion to be shifted in. */ GpioImage = RR7146(P_GPIO); /* Assert ADC Start command */ WR7146(P_GPIO, GpioImage & ~GPIO1_HI); /* and stretch it out. */ WR7146(P_GPIO, GpioImage & ~GPIO1_HI); WR7146(P_GPIO, GpioImage & ~GPIO1_HI); /* Negate ADC Start command. */ WR7146(P_GPIO, GpioImage | GPIO1_HI); /* Wait for the data to arrive in FB BUFFER 1 register. */ /* Wait for ADC done. */ while (!(RR7146(P_PSR) & PSR_GPIO2)) ; /* Fetch ADC data from audio interface's input shift register. */ /* Fetch ADC data. */ if (n != 0) data[n - 1] = s626_ai_reg_to_uint(RR7146(P_FB_BUFFER1)); DEBUG("s626_ai_insn_read: samples %d, data %d\n", n, data[n - 1]); return n; } static int s626_ai_load_polllist(uint8_t * ppl, struct comedi_cmd *cmd) { int n; for (n = 0; n < cmd->chanlist_len; n++) { if (CR_RANGE((cmd->chanlist)[n]) == 0) ppl[n] = (CR_CHAN((cmd->chanlist)[n])) | (RANGE_5V); else ppl[n] = (CR_CHAN((cmd->chanlist)[n])) | (RANGE_10V); } if (n != 0) ppl[n - 1] |= EOPL; return n; } static int s626_ai_inttrig(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trignum) { if (trignum != 0) return -EINVAL; DEBUG("s626_ai_inttrig: trigger adc start..."); /* Start executing the RPS program. */ MC_ENABLE(P_MC1, MC1_ERPS1); s->async->inttrig = NULL; DEBUG(" done\n"); return 1; } /* TO COMPLETE */ static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { uint8_t ppl[16]; struct comedi_cmd *cmd = &s->async->cmd; struct enc_private *k; int tick; DEBUG("s626_ai_cmd: entering command function\n"); if (devpriv->ai_cmd_running) { printk("s626_ai_cmd: Another ai_cmd is running %d\n", dev->minor); return -EBUSY; } /* disable interrupt */ writel(0, devpriv->base_addr + P_IER); /* clear interrupt request */ writel(IRQ_RPS1 | IRQ_GPIO3, devpriv->base_addr + P_ISR); /* clear any pending interrupt */ s626_dio_clear_irq(dev); /* s626_enc_clear_irq(dev); */ /* reset ai_cmd_running flag */ devpriv->ai_cmd_running = 0; /* test if cmd is valid */ if (cmd == NULL) { DEBUG("s626_ai_cmd: NULL command\n"); return -EINVAL; } else { DEBUG("s626_ai_cmd: command recieved!!!\n"); } if (dev->irq == 0) { comedi_error(dev, "s626_ai_cmd: cannot run command without an irq"); return -EIO; } s626_ai_load_polllist(ppl, cmd); devpriv->ai_cmd_running = 1; devpriv->ai_convert_count = 0; switch (cmd->scan_begin_src) { case TRIG_FOLLOW: break; case TRIG_TIMER: /* set a conter to generate adc trigger at scan_begin_arg interval */ k = &encpriv[5]; tick = s626_ns_to_timer((int *)&cmd->scan_begin_arg, cmd->flags & TRIG_ROUND_MASK); /* load timer value and enable interrupt */ s626_timer_load(dev, k, tick); k->SetEnable(dev, k, CLKENAB_ALWAYS); DEBUG("s626_ai_cmd: scan trigger timer is set with value %d\n", tick); break; case TRIG_EXT: /* set the digital line and interrupt for scan trigger */ if (cmd->start_src != TRIG_EXT) s626_dio_set_irq(dev, cmd->scan_begin_arg); DEBUG("s626_ai_cmd: External scan trigger is set!!!\n"); break; } switch (cmd->convert_src) { case TRIG_NOW: break; case TRIG_TIMER: /* set a conter to generate adc trigger at convert_arg interval */ k = &encpriv[4]; tick = s626_ns_to_timer((int *)&cmd->convert_arg, cmd->flags & TRIG_ROUND_MASK); /* load timer value and enable interrupt */ s626_timer_load(dev, k, tick); k->SetEnable(dev, k, CLKENAB_INDEX); DEBUG ("s626_ai_cmd: convert trigger timer is set with value %d\n", tick); break; case TRIG_EXT: /* set the digital line and interrupt for convert trigger */ if (cmd->scan_begin_src != TRIG_EXT && cmd->start_src == TRIG_EXT) s626_dio_set_irq(dev, cmd->convert_arg); DEBUG("s626_ai_cmd: External convert trigger is set!!!\n"); break; } switch (cmd->stop_src) { case TRIG_COUNT: /* data arrives as one packet */ devpriv->ai_sample_count = cmd->stop_arg; devpriv->ai_continous = 0; break; case TRIG_NONE: /* continous aquisition */ devpriv->ai_continous = 1; devpriv->ai_sample_count = 0; break; } ResetADC(dev, ppl); switch (cmd->start_src) { case TRIG_NOW: /* Trigger ADC scan loop start by setting RPS Signal 0. */ /* MC_ENABLE( P_MC2, MC2_ADC_RPS ); */ /* Start executing the RPS program. */ MC_ENABLE(P_MC1, MC1_ERPS1); DEBUG("s626_ai_cmd: ADC triggered\n"); s->async->inttrig = NULL; break; case TRIG_EXT: /* configure DIO channel for acquisition trigger */ s626_dio_set_irq(dev, cmd->start_arg); DEBUG("s626_ai_cmd: External start trigger is set!!!\n"); s->async->inttrig = NULL; break; case TRIG_INT: s->async->inttrig = s626_ai_inttrig; break; } /* enable interrupt */ writel(IRQ_GPIO3 | IRQ_RPS1, devpriv->base_addr + P_IER); DEBUG("s626_ai_cmd: command function terminated\n"); return 0; } static int s626_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; /* cmdtest tests a particular command to see if it is valid. Using * the cmdtest ioctl, a user can create a valid cmd and then have it * executes by the cmd ioctl. * * cmdtest returns 1,2,3,4 or 0, depending on which tests the * command passes. */ /* step 1: make sure trigger sources are trivially valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW | TRIG_INT | TRIG_EXT; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_TIMER | TRIG_EXT | TRIG_FOLLOW; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_TIMER | TRIG_EXT | TRIG_NOW; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2: make sure trigger sources are unique and mutually compatible */ /* note that mutual compatibility is not an issue here */ if (cmd->scan_begin_src != TRIG_TIMER && cmd->scan_begin_src != TRIG_EXT && cmd->scan_begin_src != TRIG_FOLLOW) err++; if (cmd->convert_src != TRIG_TIMER && cmd->convert_src != TRIG_EXT && cmd->convert_src != TRIG_NOW) err++; if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->start_src != TRIG_EXT && cmd->start_arg != 0) { cmd->start_arg = 0; err++; } if (cmd->start_src == TRIG_EXT && cmd->start_arg > 39) { cmd->start_arg = 39; err++; } if (cmd->scan_begin_src == TRIG_EXT && cmd->scan_begin_arg > 39) { cmd->scan_begin_arg = 39; err++; } if (cmd->convert_src == TRIG_EXT && cmd->convert_arg > 39) { cmd->convert_arg = 39; err++; } #define MAX_SPEED 200000 /* in nanoseconds */ #define MIN_SPEED 2000000000 /* in nanoseconds */ if (cmd->scan_begin_src == TRIG_TIMER) { if (cmd->scan_begin_arg < MAX_SPEED) { cmd->scan_begin_arg = MAX_SPEED; err++; } if (cmd->scan_begin_arg > MIN_SPEED) { cmd->scan_begin_arg = MIN_SPEED; err++; } } else { /* external trigger */ /* should be level/edge, hi/lo specification here */ /* should specify multiple external triggers */ /* if(cmd->scan_begin_arg>9){ */ /* cmd->scan_begin_arg=9; */ /* err++; */ /* } */ } if (cmd->convert_src == TRIG_TIMER) { if (cmd->convert_arg < MAX_SPEED) { cmd->convert_arg = MAX_SPEED; err++; } if (cmd->convert_arg > MIN_SPEED) { cmd->convert_arg = MIN_SPEED; err++; } } else { /* external trigger */ /* see above */ /* if(cmd->convert_arg>9){ */ /* cmd->convert_arg=9; */ /* err++; */ /* } */ } if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } if (cmd->stop_src == TRIG_COUNT) { if (cmd->stop_arg > 0x00ffffff) { cmd->stop_arg = 0x00ffffff; err++; } } else { /* TRIG_NONE */ if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } } if (err) return 3; /* step 4: fix up any arguments */ if (cmd->scan_begin_src == TRIG_TIMER) { tmp = cmd->scan_begin_arg; s626_ns_to_timer((int *)&cmd->scan_begin_arg, cmd->flags & TRIG_ROUND_MASK); if (tmp != cmd->scan_begin_arg) err++; } if (cmd->convert_src == TRIG_TIMER) { tmp = cmd->convert_arg; s626_ns_to_timer((int *)&cmd->convert_arg, cmd->flags & TRIG_ROUND_MASK); if (tmp != cmd->convert_arg) err++; if (cmd->scan_begin_src == TRIG_TIMER && cmd->scan_begin_arg < cmd->convert_arg * cmd->scan_end_arg) { cmd->scan_begin_arg = cmd->convert_arg * cmd->scan_end_arg; err++; } } if (err) return 4; return 0; } static int s626_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { /* Stop RPS program in case it is currently running. */ MC_DISABLE(P_MC1, MC1_ERPS1); /* disable master interrupt */ writel(0, devpriv->base_addr + P_IER); devpriv->ai_cmd_running = 0; return 0; } /* This function doesn't require a particular form, this is just what * happens to be used in some of the drivers. It should convert ns * nanoseconds to a counter value suitable for programming the device. * Also, it should adjust ns so that it cooresponds to the actual time * that the device will use. */ static int s626_ns_to_timer(int *nanosec, int round_mode) { int divider, base; base = 500; /* 2MHz internal clock */ switch (round_mode) { case TRIG_ROUND_NEAREST: default: divider = (*nanosec + base / 2) / base; break; case TRIG_ROUND_DOWN: divider = (*nanosec) / base; break; case TRIG_ROUND_UP: divider = (*nanosec + base - 1) / base; break; } *nanosec = base * divider; return divider - 1; } static int s626_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; uint16_t chan = CR_CHAN(insn->chanspec); int16_t dacdata; for (i = 0; i < insn->n; i++) { dacdata = (int16_t) data[i]; devpriv->ao_readback[CR_CHAN(insn->chanspec)] = data[i]; dacdata -= (0x1fff); SetDAC(dev, chan, dacdata); } return i; } static int s626_ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; for (i = 0; i < insn->n; i++) data[i] = devpriv->ao_readback[CR_CHAN(insn->chanspec)]; return i; } /* *************** DIGITAL I/O FUNCTIONS *************** * All DIO functions address a group of DIO channels by means of * "group" argument. group may be 0, 1 or 2, which correspond to DIO * ports A, B and C, respectively. */ static void s626_dio_init(struct comedi_device *dev) { uint16_t group; struct comedi_subdevice *s; /* Prepare to treat writes to WRCapSel as capture disables. */ DEBIwrite(dev, LP_MISC1, MISC1_NOEDCAP); /* For each group of sixteen channels ... */ for (group = 0; group < S626_DIO_BANKS; group++) { s = dev->subdevices + 2 + group; DEBIwrite(dev, diopriv->WRIntSel, 0); /* Disable all interrupts. */ DEBIwrite(dev, diopriv->WRCapSel, 0xFFFF); /* Disable all event */ /* captures. */ DEBIwrite(dev, diopriv->WREdgSel, 0); /* Init all DIOs to */ /* default edge */ /* polarity. */ DEBIwrite(dev, diopriv->WRDOut, 0); /* Program all outputs */ /* to inactive state. */ } DEBUG("s626_dio_init: DIO initialized \n"); } /* DIO devices are slightly special. Although it is possible to * implement the insn_read/insn_write interface, it is much more * useful to applications if you implement the insn_bits interface. * This allows packed reading/writing of the DIO channels. The comedi * core can convert between insn_bits and insn_read/write */ static int s626_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { /* Length of data must be 2 (mask and new data, see below) */ if (insn->n == 0) return 0; if (insn->n != 2) { printk ("comedi%d: s626: s626_dio_insn_bits(): Invalid instruction length\n", dev->minor); return -EINVAL; } /* * The insn data consists of a mask in data[0] and the new data in * data[1]. The mask defines which bits we are concerning about. * The new data must be anded with the mask. Each channel * corresponds to a bit. */ if (data[0]) { /* Check if requested ports are configured for output */ if ((s->io_bits & data[0]) != data[0]) return -EIO; s->state &= ~data[0]; s->state |= data[0] & data[1]; /* Write out the new digital output lines */ DEBIwrite(dev, diopriv->WRDOut, s->state); } data[1] = DEBIread(dev, diopriv->RDDIn); return 2; } static int s626_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { switch (data[0]) { case INSN_CONFIG_DIO_QUERY: data[1] = (s-> io_bits & (1 << CR_CHAN(insn->chanspec))) ? COMEDI_OUTPUT : COMEDI_INPUT; return insn->n; break; case COMEDI_INPUT: s->io_bits &= ~(1 << CR_CHAN(insn->chanspec)); break; case COMEDI_OUTPUT: s->io_bits |= 1 << CR_CHAN(insn->chanspec); break; default: return -EINVAL; break; } DEBIwrite(dev, diopriv->WRDOut, s->io_bits); return 1; } static int s626_dio_set_irq(struct comedi_device *dev, unsigned int chan) { unsigned int group; unsigned int bitmask; unsigned int status; /* select dio bank */ group = chan / 16; bitmask = 1 << (chan - (16 * group)); DEBUG("s626_dio_set_irq: enable interrupt on dio channel %d group %d\n", chan - (16 * group), group); /* set channel to capture positive edge */ status = DEBIread(dev, ((struct dio_private *)(dev->subdevices + 2 + group)->private)->RDEdgSel); DEBIwrite(dev, ((struct dio_private *)(dev->subdevices + 2 + group)->private)->WREdgSel, bitmask | status); /* enable interrupt on selected channel */ status = DEBIread(dev, ((struct dio_private *)(dev->subdevices + 2 + group)->private)->RDIntSel); DEBIwrite(dev, ((struct dio_private *)(dev->subdevices + 2 + group)->private)->WRIntSel, bitmask | status); /* enable edge capture write command */ DEBIwrite(dev, LP_MISC1, MISC1_EDCAP); /* enable edge capture on selected channel */ status = DEBIread(dev, ((struct dio_private *)(dev->subdevices + 2 + group)->private)->RDCapSel); DEBIwrite(dev, ((struct dio_private *)(dev->subdevices + 2 + group)->private)->WRCapSel, bitmask | status); return 0; } static int s626_dio_reset_irq(struct comedi_device *dev, unsigned int group, unsigned int mask) { DEBUG ("s626_dio_reset_irq: disable interrupt on dio channel %d group %d\n", mask, group); /* disable edge capture write command */ DEBIwrite(dev, LP_MISC1, MISC1_NOEDCAP); /* enable edge capture on selected channel */ DEBIwrite(dev, ((struct dio_private *)(dev->subdevices + 2 + group)->private)->WRCapSel, mask); return 0; } static int s626_dio_clear_irq(struct comedi_device *dev) { unsigned int group; /* disable edge capture write command */ DEBIwrite(dev, LP_MISC1, MISC1_NOEDCAP); for (group = 0; group < S626_DIO_BANKS; group++) { /* clear pending events and interrupt */ DEBIwrite(dev, ((struct dio_private *)(dev->subdevices + 2 + group)->private)->WRCapSel, 0xffff); } return 0; } /* Now this function initializes the value of the counter (data[0]) and set the subdevice. To complete with trigger and interrupt configuration */ static int s626_enc_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { uint16_t Setup = (LOADSRC_INDX << BF_LOADSRC) | /* Preload upon */ /* index. */ (INDXSRC_SOFT << BF_INDXSRC) | /* Disable hardware index. */ (CLKSRC_COUNTER << BF_CLKSRC) | /* Operating mode is Counter. */ (CLKPOL_POS << BF_CLKPOL) | /* Active high clock. */ /* ( CNTDIR_UP << BF_CLKPOL ) | // Count direction is Down. */ (CLKMULT_1X << BF_CLKMULT) | /* Clock multiplier is 1x. */ (CLKENAB_INDEX << BF_CLKENAB); /* uint16_t DisableIntSrc=TRUE; */ /* uint32_t Preloadvalue; //Counter initial value */ uint16_t valueSrclatch = LATCHSRC_AB_READ; uint16_t enab = CLKENAB_ALWAYS; struct enc_private *k = &encpriv[CR_CHAN(insn->chanspec)]; DEBUG("s626_enc_insn_config: encoder config\n"); /* (data==NULL) ? (Preloadvalue=0) : (Preloadvalue=data[0]); */ k->SetMode(dev, k, Setup, TRUE); Preload(dev, k, *(insn->data)); k->PulseIndex(dev, k); SetLatchSource(dev, k, valueSrclatch); k->SetEnable(dev, k, (uint16_t) (enab != 0)); return insn->n; } static int s626_enc_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int n; struct enc_private *k = &encpriv[CR_CHAN(insn->chanspec)]; DEBUG("s626_enc_insn_read: encoder read channel %d \n", CR_CHAN(insn->chanspec)); for (n = 0; n < insn->n; n++) data[n] = ReadLatch(dev, k); DEBUG("s626_enc_insn_read: encoder sample %d\n", data[n]); return n; } static int s626_enc_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct enc_private *k = &encpriv[CR_CHAN(insn->chanspec)]; DEBUG("s626_enc_insn_write: encoder write channel %d \n", CR_CHAN(insn->chanspec)); /* Set the preload register */ Preload(dev, k, data[0]); /* Software index pulse forces the preload register to load */ /* into the counter */ k->SetLoadTrig(dev, k, 0); k->PulseIndex(dev, k); k->SetLoadTrig(dev, k, 2); DEBUG("s626_enc_insn_write: End encoder write\n"); return 1; } static void s626_timer_load(struct comedi_device *dev, struct enc_private *k, int tick) { uint16_t Setup = (LOADSRC_INDX << BF_LOADSRC) | /* Preload upon */ /* index. */ (INDXSRC_SOFT << BF_INDXSRC) | /* Disable hardware index. */ (CLKSRC_TIMER << BF_CLKSRC) | /* Operating mode is Timer. */ (CLKPOL_POS << BF_CLKPOL) | /* Active high clock. */ (CNTDIR_DOWN << BF_CLKPOL) | /* Count direction is Down. */ (CLKMULT_1X << BF_CLKMULT) | /* Clock multiplier is 1x. */ (CLKENAB_INDEX << BF_CLKENAB); uint16_t valueSrclatch = LATCHSRC_A_INDXA; /* uint16_t enab=CLKENAB_ALWAYS; */ k->SetMode(dev, k, Setup, FALSE); /* Set the preload register */ Preload(dev, k, tick); /* Software index pulse forces the preload register to load */ /* into the counter */ k->SetLoadTrig(dev, k, 0); k->PulseIndex(dev, k); /* set reload on counter overflow */ k->SetLoadTrig(dev, k, 1); /* set interrupt on overflow */ k->SetIntSrc(dev, k, INTSRC_OVER); SetLatchSource(dev, k, valueSrclatch); /* k->SetEnable(dev,k,(uint16_t)(enab != 0)); */ } /* *********** DAC FUNCTIONS *********** */ /* Slot 0 base settings. */ #define VECT0 (XSD2 | RSD3 | SIB_A2) /* Slot 0 always shifts in 0xFF and store it to FB_BUFFER2. */ /* TrimDac LogicalChan-to-PhysicalChan mapping table. */ static uint8_t trimchan[] = { 10, 9, 8, 3, 2, 7, 6, 1, 0, 5, 4 }; /* TrimDac LogicalChan-to-EepromAdrs mapping table. */ static uint8_t trimadrs[] = { 0x40, 0x41, 0x42, 0x50, 0x51, 0x52, 0x53, 0x60, 0x61, 0x62, 0x63 }; static void LoadTrimDACs(struct comedi_device *dev) { register uint8_t i; /* Copy TrimDac setpoint values from EEPROM to TrimDacs. */ for (i = 0; i < ARRAY_SIZE(trimchan); i++) WriteTrimDAC(dev, i, I2Cread(dev, trimadrs[i])); } static void WriteTrimDAC(struct comedi_device *dev, uint8_t LogicalChan, uint8_t DacData) { uint32_t chan; /* Save the new setpoint in case the application needs to read it back later. */ devpriv->TrimSetpoint[LogicalChan] = (uint8_t) DacData; /* Map logical channel number to physical channel number. */ chan = (uint32_t) trimchan[LogicalChan]; /* Set up TSL2 records for TrimDac write operation. All slots shift * 0xFF in from pulled-up SD3 so that the end of the slot sequence * can be detected. */ SETVECT(2, XSD2 | XFIFO_1 | WS3); /* Slot 2: Send high uint8_t to target TrimDac. */ SETVECT(3, XSD2 | XFIFO_0 | WS3); /* Slot 3: Send low uint8_t to target TrimDac. */ SETVECT(4, XSD2 | XFIFO_3 | WS1); /* Slot 4: Send NOP high uint8_t to DAC0 to keep clock running. */ SETVECT(5, XSD2 | XFIFO_2 | WS1 | EOS); /* Slot 5: Send NOP low uint8_t to DAC0. */ /* Construct and transmit target DAC's serial packet: * ( 0000 AAAA ), ( DDDD DDDD ),( 0x00 ),( 0x00 ) where A<3:0> is the * DAC channel's address, and D<7:0> is the DAC setpoint. Append a * WORD value (that writes a channel 0 NOP command to a non-existent * main DAC channel) that serves to keep the clock running after the * packet has been sent to the target DAC. */ /* Address the DAC channel within the trimdac device. */ SendDAC(dev, ((uint32_t) chan << 8) | (uint32_t) DacData); /* Include DAC setpoint data. */ } /* ************** EEPROM ACCESS FUNCTIONS ************** */ /* Read uint8_t from EEPROM. */ static uint8_t I2Cread(struct comedi_device *dev, uint8_t addr) { uint8_t rtnval; /* Send EEPROM target address. */ if (I2Chandshake(dev, I2C_B2(I2C_ATTRSTART, I2CW) /* Byte2 = I2C command: write to I2C EEPROM device. */ | I2C_B1(I2C_ATTRSTOP, addr) /* Byte1 = EEPROM internal target address. */ | I2C_B0(I2C_ATTRNOP, 0))) { /* Byte0 = Not sent. */ /* Abort function and declare error if handshake failed. */ DEBUG("I2Cread: error handshake I2Cread a\n"); return 0; } /* Execute EEPROM read. */ if (I2Chandshake(dev, I2C_B2(I2C_ATTRSTART, I2CR) /* Byte2 = I2C */ /* command: read */ /* from I2C EEPROM */ /* device. */ |I2C_B1(I2C_ATTRSTOP, 0) /* Byte1 receives */ /* uint8_t from */ /* EEPROM. */ |I2C_B0(I2C_ATTRNOP, 0))) { /* Byte0 = Not sent. */ /* Abort function and declare error if handshake failed. */ DEBUG("I2Cread: error handshake I2Cread b\n"); return 0; } /* Return copy of EEPROM value. */ rtnval = (uint8_t) (RR7146(P_I2CCTRL) >> 16); return rtnval; } static uint32_t I2Chandshake(struct comedi_device *dev, uint32_t val) { /* Write I2C command to I2C Transfer Control shadow register. */ WR7146(P_I2CCTRL, val); /* Upload I2C shadow registers into working registers and wait for */ /* upload confirmation. */ MC_ENABLE(P_MC2, MC2_UPLD_IIC); while (!MC_TEST(P_MC2, MC2_UPLD_IIC)) ; /* Wait until I2C bus transfer is finished or an error occurs. */ while ((RR7146(P_I2CCTRL) & (I2C_BUSY | I2C_ERR)) == I2C_BUSY) ; /* Return non-zero if I2C error occured. */ return RR7146(P_I2CCTRL) & I2C_ERR; } /* Private helper function: Write setpoint to an application DAC channel. */ static void SetDAC(struct comedi_device *dev, uint16_t chan, short dacdata) { register uint16_t signmask; register uint32_t WSImage; /* Adjust DAC data polarity and set up Polarity Control Register */ /* image. */ signmask = 1 << chan; if (dacdata < 0) { dacdata = -dacdata; devpriv->Dacpol |= signmask; } else devpriv->Dacpol &= ~signmask; /* Limit DAC setpoint value to valid range. */ if ((uint16_t) dacdata > 0x1FFF) dacdata = 0x1FFF; /* Set up TSL2 records (aka "vectors") for DAC update. Vectors V2 * and V3 transmit the setpoint to the target DAC. V4 and V5 send * data to a non-existent TrimDac channel just to keep the clock * running after sending data to the target DAC. This is necessary * to eliminate the clock glitch that would otherwise occur at the * end of the target DAC's serial data stream. When the sequence * restarts at V0 (after executing V5), the gate array automatically * disables gating for the DAC clock and all DAC chip selects. */ WSImage = (chan & 2) ? WS1 : WS2; /* Choose DAC chip select to be asserted. */ SETVECT(2, XSD2 | XFIFO_1 | WSImage); /* Slot 2: Transmit high data byte to target DAC. */ SETVECT(3, XSD2 | XFIFO_0 | WSImage); /* Slot 3: Transmit low data byte to target DAC. */ SETVECT(4, XSD2 | XFIFO_3 | WS3); /* Slot 4: Transmit to non-existent TrimDac channel to keep clock */ SETVECT(5, XSD2 | XFIFO_2 | WS3 | EOS); /* Slot 5: running after writing target DAC's low data byte. */ /* Construct and transmit target DAC's serial packet: * ( A10D DDDD ),( DDDD DDDD ),( 0x0F ),( 0x00 ) where A is chan<0>, * and D<12:0> is the DAC setpoint. Append a WORD value (that writes * to a non-existent TrimDac channel) that serves to keep the clock * running after the packet has been sent to the target DAC. */ SendDAC(dev, 0x0F000000 /* Continue clock after target DAC data (write to non-existent trimdac). */ | 0x00004000 /* Address the two main dual-DAC devices (TSL's chip select enables * target device). */ | ((uint32_t) (chan & 1) << 15) /* Address the DAC channel within the device. */ | (uint32_t) dacdata); /* Include DAC setpoint data. */ } /* Private helper function: Transmit serial data to DAC via Audio * channel 2. Assumes: (1) TSL2 slot records initialized, and (2) * Dacpol contains valid target image. */ static void SendDAC(struct comedi_device *dev, uint32_t val) { /* START THE SERIAL CLOCK RUNNING ------------- */ /* Assert DAC polarity control and enable gating of DAC serial clock * and audio bit stream signals. At this point in time we must be * assured of being in time slot 0. If we are not in slot 0, the * serial clock and audio stream signals will be disabled; this is * because the following DEBIwrite statement (which enables signals * to be passed through the gate array) would execute before the * trailing edge of WS1/WS3 (which turns off the signals), thus * causing the signals to be inactive during the DAC write. */ DEBIwrite(dev, LP_DACPOL, devpriv->Dacpol); /* TRANSFER OUTPUT DWORD VALUE INTO A2'S OUTPUT FIFO ---------------- */ /* Copy DAC setpoint value to DAC's output DMA buffer. */ /* WR7146( (uint32_t)devpriv->pDacWBuf, val ); */ *devpriv->pDacWBuf = val; /* enab the output DMA transfer. This will cause the DMAC to copy * the DAC's data value to A2's output FIFO. The DMA transfer will * then immediately terminate because the protection address is * reached upon transfer of the first DWORD value. */ MC_ENABLE(P_MC1, MC1_A2OUT); /* While the DMA transfer is executing ... */ /* Reset Audio2 output FIFO's underflow flag (along with any other * FIFO underflow/overflow flags). When set, this flag will * indicate that we have emerged from slot 0. */ WR7146(P_ISR, ISR_AFOU); /* Wait for the DMA transfer to finish so that there will be data * available in the FIFO when time slot 1 tries to transfer a DWORD * from the FIFO to the output buffer register. We test for DMA * Done by polling the DMAC enable flag; this flag is automatically * cleared when the transfer has finished. */ while ((RR7146(P_MC1) & MC1_A2OUT) != 0) ; /* START THE OUTPUT STREAM TO THE TARGET DAC -------------------- */ /* FIFO data is now available, so we enable execution of time slots * 1 and higher by clearing the EOS flag in slot 0. Note that SD3 * will be shifted in and stored in FB_BUFFER2 for end-of-slot-list * detection. */ SETVECT(0, XSD2 | RSD3 | SIB_A2); /* Wait for slot 1 to execute to ensure that the Packet will be * transmitted. This is detected by polling the Audio2 output FIFO * underflow flag, which will be set when slot 1 execution has * finished transferring the DAC's data DWORD from the output FIFO * to the output buffer register. */ while ((RR7146(P_SSR) & SSR_AF2_OUT) == 0) ; /* Set up to trap execution at slot 0 when the TSL sequencer cycles * back to slot 0 after executing the EOS in slot 5. Also, * simultaneously shift out and in the 0x00 that is ALWAYS the value * stored in the last byte to be shifted out of the FIFO's DWORD * buffer register. */ SETVECT(0, XSD2 | XFIFO_2 | RSD2 | SIB_A2 | EOS); /* WAIT FOR THE TRANSACTION TO FINISH ----------------------- */ /* Wait for the TSL to finish executing all time slots before * exiting this function. We must do this so that the next DAC * write doesn't start, thereby enabling clock/chip select signals: * * 1. Before the TSL sequence cycles back to slot 0, which disables * the clock/cs signal gating and traps slot // list execution. * we have not yet finished slot 5 then the clock/cs signals are * still gated and we have not finished transmitting the stream. * * 2. While slots 2-5 are executing due to a late slot 0 trap. In * this case, the slot sequence is currently repeating, but with * clock/cs signals disabled. We must wait for slot 0 to trap * execution before setting up the next DAC setpoint DMA transfer * and enabling the clock/cs signals. To detect the end of slot 5, * we test for the FB_BUFFER2 MSB contents to be equal to 0xFF. If * the TSL has not yet finished executing slot 5 ... */ if ((RR7146(P_FB_BUFFER2) & 0xFF000000) != 0) { /* The trap was set on time and we are still executing somewhere * in slots 2-5, so we now wait for slot 0 to execute and trap * TSL execution. This is detected when FB_BUFFER2 MSB changes * from 0xFF to 0x00, which slot 0 causes to happen by shifting * out/in on SD2 the 0x00 that is always referenced by slot 5. */ while ((RR7146(P_FB_BUFFER2) & 0xFF000000) != 0) ; } /* Either (1) we were too late setting the slot 0 trap; the TSL * sequencer restarted slot 0 before we could set the EOS trap flag, * or (2) we were not late and execution is now trapped at slot 0. * In either case, we must now change slot 0 so that it will store * value 0xFF (instead of 0x00) to FB_BUFFER2 next time it executes. * In order to do this, we reprogram slot 0 so that it will shift in * SD3, which is driven only by a pull-up resistor. */ SETVECT(0, RSD3 | SIB_A2 | EOS); /* Wait for slot 0 to execute, at which time the TSL is setup for * the next DAC write. This is detected when FB_BUFFER2 MSB changes * from 0x00 to 0xFF. */ while ((RR7146(P_FB_BUFFER2) & 0xFF000000) == 0) ; } static void WriteMISC2(struct comedi_device *dev, uint16_t NewImage) { DEBIwrite(dev, LP_MISC1, MISC1_WENABLE); /* enab writes to */ /* MISC2 register. */ DEBIwrite(dev, LP_WRMISC2, NewImage); /* Write new image to MISC2. */ DEBIwrite(dev, LP_MISC1, MISC1_WDISABLE); /* Disable writes to MISC2. */ } /* Initialize the DEBI interface for all transfers. */ static uint16_t DEBIread(struct comedi_device *dev, uint16_t addr) { uint16_t retval; /* Set up DEBI control register value in shadow RAM. */ WR7146(P_DEBICMD, DEBI_CMD_RDWORD | addr); /* Execute the DEBI transfer. */ DEBItransfer(dev); /* Fetch target register value. */ retval = (uint16_t) RR7146(P_DEBIAD); /* Return register value. */ return retval; } /* Execute a DEBI transfer. This must be called from within a */ /* critical section. */ static void DEBItransfer(struct comedi_device *dev) { /* Initiate upload of shadow RAM to DEBI control register. */ MC_ENABLE(P_MC2, MC2_UPLD_DEBI); /* Wait for completion of upload from shadow RAM to DEBI control */ /* register. */ while (!MC_TEST(P_MC2, MC2_UPLD_DEBI)) ; /* Wait until DEBI transfer is done. */ while (RR7146(P_PSR) & PSR_DEBI_S) ; } /* Write a value to a gate array register. */ static void DEBIwrite(struct comedi_device *dev, uint16_t addr, uint16_t wdata) { /* Set up DEBI control register value in shadow RAM. */ WR7146(P_DEBICMD, DEBI_CMD_WRWORD | addr); WR7146(P_DEBIAD, wdata); /* Execute the DEBI transfer. */ DEBItransfer(dev); } /* Replace the specified bits in a gate array register. Imports: mask * specifies bits that are to be preserved, wdata is new value to be * or'd with the masked original. */ static void DEBIreplace(struct comedi_device *dev, uint16_t addr, uint16_t mask, uint16_t wdata) { /* Copy target gate array register into P_DEBIAD register. */ WR7146(P_DEBICMD, DEBI_CMD_RDWORD | addr); /* Set up DEBI control reg value in shadow RAM. */ DEBItransfer(dev); /* Execute the DEBI Read transfer. */ /* Write back the modified image. */ WR7146(P_DEBICMD, DEBI_CMD_WRWORD | addr); /* Set up DEBI control reg value in shadow RAM. */ WR7146(P_DEBIAD, wdata | ((uint16_t) RR7146(P_DEBIAD) & mask)); /* Modify the register image. */ DEBItransfer(dev); /* Execute the DEBI Write transfer. */ } static void CloseDMAB(struct comedi_device *dev, struct bufferDMA *pdma, size_t bsize) { void *vbptr; dma_addr_t vpptr; DEBUG("CloseDMAB: Entering S626DRV_CloseDMAB():\n"); if (pdma == NULL) return; /* find the matching allocation from the board struct */ vbptr = pdma->LogicalBase; vpptr = pdma->PhysicalBase; if (vbptr) { pci_free_consistent(devpriv->pdev, bsize, vbptr, vpptr); pdma->LogicalBase = 0; pdma->PhysicalBase = 0; DEBUG("CloseDMAB(): Logical=%p, bsize=%d, Physical=0x%x\n", vbptr, bsize, (uint32_t) vpptr); } } /* ****** COUNTER FUNCTIONS ******* */ /* All counter functions address a specific counter by means of the * "Counter" argument, which is a logical counter number. The Counter * argument may have any of the following legal values: 0=0A, 1=1A, * 2=2A, 3=0B, 4=1B, 5=2B. */ /* Forward declarations for functions that are common to both A and B counters: */ /* ****** PRIVATE COUNTER FUNCTIONS ****** */ /* Read a counter's output latch. */ static uint32_t ReadLatch(struct comedi_device *dev, struct enc_private *k) { register uint32_t value; /* DEBUG FIXME DEBUG("ReadLatch: Read Latch enter\n"); */ /* Latch counts and fetch LSW of latched counts value. */ value = (uint32_t) DEBIread(dev, k->MyLatchLsw); /* Fetch MSW of latched counts and combine with LSW. */ value |= ((uint32_t) DEBIread(dev, k->MyLatchLsw + 2) << 16); /* DEBUG FIXME DEBUG("ReadLatch: Read Latch exit\n"); */ /* Return latched counts. */ return value; } /* Reset a counter's index and overflow event capture flags. */ static void ResetCapFlags_A(struct comedi_device *dev, struct enc_private *k) { DEBIreplace(dev, k->MyCRB, (uint16_t) (~CRBMSK_INTCTRL), CRBMSK_INTRESETCMD | CRBMSK_INTRESET_A); } static void ResetCapFlags_B(struct comedi_device *dev, struct enc_private *k) { DEBIreplace(dev, k->MyCRB, (uint16_t) (~CRBMSK_INTCTRL), CRBMSK_INTRESETCMD | CRBMSK_INTRESET_B); } /* Return counter setup in a format (COUNTER_SETUP) that is consistent */ /* for both A and B counters. */ static uint16_t GetMode_A(struct comedi_device *dev, struct enc_private *k) { register uint16_t cra; register uint16_t crb; register uint16_t setup; /* Fetch CRA and CRB register images. */ cra = DEBIread(dev, k->MyCRA); crb = DEBIread(dev, k->MyCRB); /* Populate the standardized counter setup bit fields. Note: */ /* IndexSrc is restricted to ENC_X or IndxPol. */ setup = ((cra & STDMSK_LOADSRC) /* LoadSrc = LoadSrcA. */ |((crb << (STDBIT_LATCHSRC - CRBBIT_LATCHSRC)) & STDMSK_LATCHSRC) /* LatchSrc = LatchSrcA. */ |((cra << (STDBIT_INTSRC - CRABIT_INTSRC_A)) & STDMSK_INTSRC) /* IntSrc = IntSrcA. */ |((cra << (STDBIT_INDXSRC - (CRABIT_INDXSRC_A + 1))) & STDMSK_INDXSRC) /* IndxSrc = IndxSrcA<1>. */ |((cra >> (CRABIT_INDXPOL_A - STDBIT_INDXPOL)) & STDMSK_INDXPOL) /* IndxPol = IndxPolA. */ |((crb >> (CRBBIT_CLKENAB_A - STDBIT_CLKENAB)) & STDMSK_CLKENAB)); /* ClkEnab = ClkEnabA. */ /* Adjust mode-dependent parameters. */ if (cra & (2 << CRABIT_CLKSRC_A)) /* If Timer mode (ClkSrcA<1> == 1): */ setup |= ((CLKSRC_TIMER << STDBIT_CLKSRC) /* Indicate Timer mode. */ |((cra << (STDBIT_CLKPOL - CRABIT_CLKSRC_A)) & STDMSK_CLKPOL) /* Set ClkPol to indicate count direction (ClkSrcA<0>). */ |(MULT_X1 << STDBIT_CLKMULT)); /* ClkMult must be 1x in Timer mode. */ else /* If Counter mode (ClkSrcA<1> == 0): */ setup |= ((CLKSRC_COUNTER << STDBIT_CLKSRC) /* Indicate Counter mode. */ |((cra >> (CRABIT_CLKPOL_A - STDBIT_CLKPOL)) & STDMSK_CLKPOL) /* Pass through ClkPol. */ |(((cra & CRAMSK_CLKMULT_A) == (MULT_X0 << CRABIT_CLKMULT_A)) ? /* Force ClkMult to 1x if not legal, else pass through. */ (MULT_X1 << STDBIT_CLKMULT) : ((cra >> (CRABIT_CLKMULT_A - STDBIT_CLKMULT)) & STDMSK_CLKMULT))); /* Return adjusted counter setup. */ return setup; } static uint16_t GetMode_B(struct comedi_device *dev, struct enc_private *k) { register uint16_t cra; register uint16_t crb; register uint16_t setup; /* Fetch CRA and CRB register images. */ cra = DEBIread(dev, k->MyCRA); crb = DEBIread(dev, k->MyCRB); /* Populate the standardized counter setup bit fields. Note: */ /* IndexSrc is restricted to ENC_X or IndxPol. */ setup = (((crb << (STDBIT_INTSRC - CRBBIT_INTSRC_B)) & STDMSK_INTSRC) /* IntSrc = IntSrcB. */ |((crb << (STDBIT_LATCHSRC - CRBBIT_LATCHSRC)) & STDMSK_LATCHSRC) /* LatchSrc = LatchSrcB. */ |((crb << (STDBIT_LOADSRC - CRBBIT_LOADSRC_B)) & STDMSK_LOADSRC) /* LoadSrc = LoadSrcB. */ |((crb << (STDBIT_INDXPOL - CRBBIT_INDXPOL_B)) & STDMSK_INDXPOL) /* IndxPol = IndxPolB. */ |((crb >> (CRBBIT_CLKENAB_B - STDBIT_CLKENAB)) & STDMSK_CLKENAB) /* ClkEnab = ClkEnabB. */ |((cra >> ((CRABIT_INDXSRC_B + 1) - STDBIT_INDXSRC)) & STDMSK_INDXSRC)); /* IndxSrc = IndxSrcB<1>. */ /* Adjust mode-dependent parameters. */ if ((crb & CRBMSK_CLKMULT_B) == (MULT_X0 << CRBBIT_CLKMULT_B)) /* If Extender mode (ClkMultB == MULT_X0): */ setup |= ((CLKSRC_EXTENDER << STDBIT_CLKSRC) /* Indicate Extender mode. */ |(MULT_X1 << STDBIT_CLKMULT) /* Indicate multiplier is 1x. */ |((cra >> (CRABIT_CLKSRC_B - STDBIT_CLKPOL)) & STDMSK_CLKPOL)); /* Set ClkPol equal to Timer count direction (ClkSrcB<0>). */ else if (cra & (2 << CRABIT_CLKSRC_B)) /* If Timer mode (ClkSrcB<1> == 1): */ setup |= ((CLKSRC_TIMER << STDBIT_CLKSRC) /* Indicate Timer mode. */ |(MULT_X1 << STDBIT_CLKMULT) /* Indicate multiplier is 1x. */ |((cra >> (CRABIT_CLKSRC_B - STDBIT_CLKPOL)) & STDMSK_CLKPOL)); /* Set ClkPol equal to Timer count direction (ClkSrcB<0>). */ else /* If Counter mode (ClkSrcB<1> == 0): */ setup |= ((CLKSRC_COUNTER << STDBIT_CLKSRC) /* Indicate Timer mode. */ |((crb >> (CRBBIT_CLKMULT_B - STDBIT_CLKMULT)) & STDMSK_CLKMULT) /* Clock multiplier is passed through. */ |((crb << (STDBIT_CLKPOL - CRBBIT_CLKPOL_B)) & STDMSK_CLKPOL)); /* Clock polarity is passed through. */ /* Return adjusted counter setup. */ return setup; } /* * Set the operating mode for the specified counter. The setup * parameter is treated as a COUNTER_SETUP data type. The following * parameters are programmable (all other parms are ignored): ClkMult, * ClkPol, ClkEnab, IndexSrc, IndexPol, LoadSrc. */ static void SetMode_A(struct comedi_device *dev, struct enc_private *k, uint16_t Setup, uint16_t DisableIntSrc) { register uint16_t cra; register uint16_t crb; register uint16_t setup = Setup; /* Cache the Standard Setup. */ /* Initialize CRA and CRB images. */ cra = ((setup & CRAMSK_LOADSRC_A) /* Preload trigger is passed through. */ |((setup & STDMSK_INDXSRC) >> (STDBIT_INDXSRC - (CRABIT_INDXSRC_A + 1)))); /* IndexSrc is restricted to ENC_X or IndxPol. */ crb = (CRBMSK_INTRESETCMD | CRBMSK_INTRESET_A /* Reset any pending CounterA event captures. */ | ((setup & STDMSK_CLKENAB) << (CRBBIT_CLKENAB_A - STDBIT_CLKENAB))); /* Clock enable is passed through. */ /* Force IntSrc to Disabled if DisableIntSrc is asserted. */ if (!DisableIntSrc) cra |= ((setup & STDMSK_INTSRC) >> (STDBIT_INTSRC - CRABIT_INTSRC_A)); /* Populate all mode-dependent attributes of CRA & CRB images. */ switch ((setup & STDMSK_CLKSRC) >> STDBIT_CLKSRC) { case CLKSRC_EXTENDER: /* Extender Mode: Force to Timer mode */ /* (Extender valid only for B counters). */ case CLKSRC_TIMER: /* Timer Mode: */ cra |= ((2 << CRABIT_CLKSRC_A) /* ClkSrcA<1> selects system clock */ |((setup & STDMSK_CLKPOL) >> (STDBIT_CLKPOL - CRABIT_CLKSRC_A)) /* with count direction (ClkSrcA<0>) obtained from ClkPol. */ |(1 << CRABIT_CLKPOL_A) /* ClkPolA behaves as always-on clock enable. */ |(MULT_X1 << CRABIT_CLKMULT_A)); /* ClkMult must be 1x. */ break; default: /* Counter Mode: */ cra |= (CLKSRC_COUNTER /* Select ENC_C and ENC_D as clock/direction inputs. */ | ((setup & STDMSK_CLKPOL) << (CRABIT_CLKPOL_A - STDBIT_CLKPOL)) /* Clock polarity is passed through. */ |(((setup & STDMSK_CLKMULT) == (MULT_X0 << STDBIT_CLKMULT)) ? /* Force multiplier to x1 if not legal, otherwise pass through. */ (MULT_X1 << CRABIT_CLKMULT_A) : ((setup & STDMSK_CLKMULT) << (CRABIT_CLKMULT_A - STDBIT_CLKMULT)))); } /* Force positive index polarity if IndxSrc is software-driven only, */ /* otherwise pass it through. */ if (~setup & STDMSK_INDXSRC) cra |= ((setup & STDMSK_INDXPOL) << (CRABIT_INDXPOL_A - STDBIT_INDXPOL)); /* If IntSrc has been forced to Disabled, update the MISC2 interrupt */ /* enable mask to indicate the counter interrupt is disabled. */ if (DisableIntSrc) devpriv->CounterIntEnabs &= ~k->MyEventBits[3]; /* While retaining CounterB and LatchSrc configurations, program the */ /* new counter operating mode. */ DEBIreplace(dev, k->MyCRA, CRAMSK_INDXSRC_B | CRAMSK_CLKSRC_B, cra); DEBIreplace(dev, k->MyCRB, (uint16_t) (~(CRBMSK_INTCTRL | CRBMSK_CLKENAB_A)), crb); } static void SetMode_B(struct comedi_device *dev, struct enc_private *k, uint16_t Setup, uint16_t DisableIntSrc) { register uint16_t cra; register uint16_t crb; register uint16_t setup = Setup; /* Cache the Standard Setup. */ /* Initialize CRA and CRB images. */ cra = ((setup & STDMSK_INDXSRC) << ((CRABIT_INDXSRC_B + 1) - STDBIT_INDXSRC)); /* IndexSrc field is restricted to ENC_X or IndxPol. */ crb = (CRBMSK_INTRESETCMD | CRBMSK_INTRESET_B /* Reset event captures and disable interrupts. */ | ((setup & STDMSK_CLKENAB) << (CRBBIT_CLKENAB_B - STDBIT_CLKENAB)) /* Clock enable is passed through. */ |((setup & STDMSK_LOADSRC) >> (STDBIT_LOADSRC - CRBBIT_LOADSRC_B))); /* Preload trigger source is passed through. */ /* Force IntSrc to Disabled if DisableIntSrc is asserted. */ if (!DisableIntSrc) crb |= ((setup & STDMSK_INTSRC) >> (STDBIT_INTSRC - CRBBIT_INTSRC_B)); /* Populate all mode-dependent attributes of CRA & CRB images. */ switch ((setup & STDMSK_CLKSRC) >> STDBIT_CLKSRC) { case CLKSRC_TIMER: /* Timer Mode: */ cra |= ((2 << CRABIT_CLKSRC_B) /* ClkSrcB<1> selects system clock */ |((setup & STDMSK_CLKPOL) << (CRABIT_CLKSRC_B - STDBIT_CLKPOL))); /* with direction (ClkSrcB<0>) obtained from ClkPol. */ crb |= ((1 << CRBBIT_CLKPOL_B) /* ClkPolB behaves as always-on clock enable. */ |(MULT_X1 << CRBBIT_CLKMULT_B)); /* ClkMultB must be 1x. */ break; case CLKSRC_EXTENDER: /* Extender Mode: */ cra |= ((2 << CRABIT_CLKSRC_B) /* ClkSrcB source is OverflowA (same as "timer") */ |((setup & STDMSK_CLKPOL) << (CRABIT_CLKSRC_B - STDBIT_CLKPOL))); /* with direction obtained from ClkPol. */ crb |= ((1 << CRBBIT_CLKPOL_B) /* ClkPolB controls IndexB -- always set to active. */ |(MULT_X0 << CRBBIT_CLKMULT_B)); /* ClkMultB selects OverflowA as the clock source. */ break; default: /* Counter Mode: */ cra |= (CLKSRC_COUNTER << CRABIT_CLKSRC_B); /* Select ENC_C and ENC_D as clock/direction inputs. */ crb |= (((setup & STDMSK_CLKPOL) >> (STDBIT_CLKPOL - CRBBIT_CLKPOL_B)) /* ClkPol is passed through. */ |(((setup & STDMSK_CLKMULT) == (MULT_X0 << STDBIT_CLKMULT)) ? /* Force ClkMult to x1 if not legal, otherwise pass through. */ (MULT_X1 << CRBBIT_CLKMULT_B) : ((setup & STDMSK_CLKMULT) << (CRBBIT_CLKMULT_B - STDBIT_CLKMULT)))); } /* Force positive index polarity if IndxSrc is software-driven only, */ /* otherwise pass it through. */ if (~setup & STDMSK_INDXSRC) crb |= ((setup & STDMSK_INDXPOL) >> (STDBIT_INDXPOL - CRBBIT_INDXPOL_B)); /* If IntSrc has been forced to Disabled, update the MISC2 interrupt */ /* enable mask to indicate the counter interrupt is disabled. */ if (DisableIntSrc) devpriv->CounterIntEnabs &= ~k->MyEventBits[3]; /* While retaining CounterA and LatchSrc configurations, program the */ /* new counter operating mode. */ DEBIreplace(dev, k->MyCRA, (uint16_t) (~(CRAMSK_INDXSRC_B | CRAMSK_CLKSRC_B)), cra); DEBIreplace(dev, k->MyCRB, CRBMSK_CLKENAB_A | CRBMSK_LATCHSRC, crb); } /* Return/set a counter's enable. enab: 0=always enabled, 1=enabled by index. */ static void SetEnable_A(struct comedi_device *dev, struct enc_private *k, uint16_t enab) { DEBUG("SetEnable_A: SetEnable_A enter 3541\n"); DEBIreplace(dev, k->MyCRB, (uint16_t) (~(CRBMSK_INTCTRL | CRBMSK_CLKENAB_A)), (uint16_t) (enab << CRBBIT_CLKENAB_A)); } static void SetEnable_B(struct comedi_device *dev, struct enc_private *k, uint16_t enab) { DEBIreplace(dev, k->MyCRB, (uint16_t) (~(CRBMSK_INTCTRL | CRBMSK_CLKENAB_B)), (uint16_t) (enab << CRBBIT_CLKENAB_B)); } static uint16_t GetEnable_A(struct comedi_device *dev, struct enc_private *k) { return (DEBIread(dev, k->MyCRB) >> CRBBIT_CLKENAB_A) & 1; } static uint16_t GetEnable_B(struct comedi_device *dev, struct enc_private *k) { return (DEBIread(dev, k->MyCRB) >> CRBBIT_CLKENAB_B) & 1; } /* Return/set a counter pair's latch trigger source. 0: On read * access, 1: A index latches A, 2: B index latches B, 3: A overflow * latches B. */ static void SetLatchSource(struct comedi_device *dev, struct enc_private *k, uint16_t value) { DEBUG("SetLatchSource: SetLatchSource enter 3550 \n"); DEBIreplace(dev, k->MyCRB, (uint16_t) (~(CRBMSK_INTCTRL | CRBMSK_LATCHSRC)), (uint16_t) (value << CRBBIT_LATCHSRC)); DEBUG("SetLatchSource: SetLatchSource exit \n"); } /* * static uint16_t GetLatchSource(struct comedi_device *dev, struct enc_private *k ) * { * return ( DEBIread( dev, k->MyCRB) >> CRBBIT_LATCHSRC ) & 3; * } */ /* * Return/set the event that will trigger transfer of the preload * register into the counter. 0=ThisCntr_Index, 1=ThisCntr_Overflow, * 2=OverflowA (B counters only), 3=disabled. */ static void SetLoadTrig_A(struct comedi_device *dev, struct enc_private *k, uint16_t Trig) { DEBIreplace(dev, k->MyCRA, (uint16_t) (~CRAMSK_LOADSRC_A), (uint16_t) (Trig << CRABIT_LOADSRC_A)); } static void SetLoadTrig_B(struct comedi_device *dev, struct enc_private *k, uint16_t Trig) { DEBIreplace(dev, k->MyCRB, (uint16_t) (~(CRBMSK_LOADSRC_B | CRBMSK_INTCTRL)), (uint16_t) (Trig << CRBBIT_LOADSRC_B)); } static uint16_t GetLoadTrig_A(struct comedi_device *dev, struct enc_private *k) { return (DEBIread(dev, k->MyCRA) >> CRABIT_LOADSRC_A) & 3; } static uint16_t GetLoadTrig_B(struct comedi_device *dev, struct enc_private *k) { return (DEBIread(dev, k->MyCRB) >> CRBBIT_LOADSRC_B) & 3; } /* Return/set counter interrupt source and clear any captured * index/overflow events. IntSource: 0=Disabled, 1=OverflowOnly, * 2=IndexOnly, 3=IndexAndOverflow. */ static void SetIntSrc_A(struct comedi_device *dev, struct enc_private *k, uint16_t IntSource) { /* Reset any pending counter overflow or index captures. */ DEBIreplace(dev, k->MyCRB, (uint16_t) (~CRBMSK_INTCTRL), CRBMSK_INTRESETCMD | CRBMSK_INTRESET_A); /* Program counter interrupt source. */ DEBIreplace(dev, k->MyCRA, ~CRAMSK_INTSRC_A, (uint16_t) (IntSource << CRABIT_INTSRC_A)); /* Update MISC2 interrupt enable mask. */ devpriv->CounterIntEnabs = (devpriv->CounterIntEnabs & ~k-> MyEventBits[3]) | k->MyEventBits[IntSource]; } static void SetIntSrc_B(struct comedi_device *dev, struct enc_private *k, uint16_t IntSource) { uint16_t crb; /* Cache writeable CRB register image. */ crb = DEBIread(dev, k->MyCRB) & ~CRBMSK_INTCTRL; /* Reset any pending counter overflow or index captures. */ DEBIwrite(dev, k->MyCRB, (uint16_t) (crb | CRBMSK_INTRESETCMD | CRBMSK_INTRESET_B)); /* Program counter interrupt source. */ DEBIwrite(dev, k->MyCRB, (uint16_t) ((crb & ~CRBMSK_INTSRC_B) | (IntSource << CRBBIT_INTSRC_B))); /* Update MISC2 interrupt enable mask. */ devpriv->CounterIntEnabs = (devpriv->CounterIntEnabs & ~k-> MyEventBits[3]) | k->MyEventBits[IntSource]; } static uint16_t GetIntSrc_A(struct comedi_device *dev, struct enc_private *k) { return (DEBIread(dev, k->MyCRA) >> CRABIT_INTSRC_A) & 3; } static uint16_t GetIntSrc_B(struct comedi_device *dev, struct enc_private *k) { return (DEBIread(dev, k->MyCRB) >> CRBBIT_INTSRC_B) & 3; } /* Return/set the clock multiplier. */ /* static void SetClkMult(struct comedi_device *dev, struct enc_private *k, uint16_t value ) */ /* { */ /* k->SetMode(dev, k, (uint16_t)( ( k->GetMode(dev, k ) & ~STDMSK_CLKMULT ) | ( value << STDBIT_CLKMULT ) ), FALSE ); */ /* } */ /* static uint16_t GetClkMult(struct comedi_device *dev, struct enc_private *k ) */ /* { */ /* return ( k->GetMode(dev, k ) >> STDBIT_CLKMULT ) & 3; */ /* } */ /* Return/set the clock polarity. */ /* static void SetClkPol( struct comedi_device *dev,struct enc_private *k, uint16_t value ) */ /* { */ /* k->SetMode(dev, k, (uint16_t)( ( k->GetMode(dev, k ) & ~STDMSK_CLKPOL ) | ( value << STDBIT_CLKPOL ) ), FALSE ); */ /* } */ /* static uint16_t GetClkPol(struct comedi_device *dev, struct enc_private *k ) */ /* { */ /* return ( k->GetMode(dev, k ) >> STDBIT_CLKPOL ) & 1; */ /* } */ /* Return/set the clock source. */ /* static void SetClkSrc( struct comedi_device *dev,struct enc_private *k, uint16_t value ) */ /* { */ /* k->SetMode(dev, k, (uint16_t)( ( k->GetMode(dev, k ) & ~STDMSK_CLKSRC ) | ( value << STDBIT_CLKSRC ) ), FALSE ); */ /* } */ /* static uint16_t GetClkSrc( struct comedi_device *dev,struct enc_private *k ) */ /* { */ /* return ( k->GetMode(dev, k ) >> STDBIT_CLKSRC ) & 3; */ /* } */ /* Return/set the index polarity. */ /* static void SetIndexPol(struct comedi_device *dev, struct enc_private *k, uint16_t value ) */ /* { */ /* k->SetMode(dev, k, (uint16_t)( ( k->GetMode(dev, k ) & ~STDMSK_INDXPOL ) | ( (value != 0) << STDBIT_INDXPOL ) ), FALSE ); */ /* } */ /* static uint16_t GetIndexPol(struct comedi_device *dev, struct enc_private *k ) */ /* { */ /* return ( k->GetMode(dev, k ) >> STDBIT_INDXPOL ) & 1; */ /* } */ /* Return/set the index source. */ /* static void SetIndexSrc(struct comedi_device *dev, struct enc_private *k, uint16_t value ) */ /* { */ /* DEBUG("SetIndexSrc: set index src enter 3700\n"); */ /* k->SetMode(dev, k, (uint16_t)( ( k->GetMode(dev, k ) & ~STDMSK_INDXSRC ) | ( (value != 0) << STDBIT_INDXSRC ) ), FALSE ); */ /* } */ /* static uint16_t GetIndexSrc(struct comedi_device *dev, struct enc_private *k ) */ /* { */ /* return ( k->GetMode(dev, k ) >> STDBIT_INDXSRC ) & 1; */ /* } */ /* Generate an index pulse. */ static void PulseIndex_A(struct comedi_device *dev, struct enc_private *k) { register uint16_t cra; DEBUG("PulseIndex_A: pulse index enter\n"); cra = DEBIread(dev, k->MyCRA); /* Pulse index. */ DEBIwrite(dev, k->MyCRA, (uint16_t) (cra ^ CRAMSK_INDXPOL_A)); DEBUG("PulseIndex_A: pulse index step1\n"); DEBIwrite(dev, k->MyCRA, cra); } static void PulseIndex_B(struct comedi_device *dev, struct enc_private *k) { register uint16_t crb; crb = DEBIread(dev, k->MyCRB) & ~CRBMSK_INTCTRL; /* Pulse index. */ DEBIwrite(dev, k->MyCRB, (uint16_t) (crb ^ CRBMSK_INDXPOL_B)); DEBIwrite(dev, k->MyCRB, crb); } /* Write value into counter preload register. */ static void Preload(struct comedi_device *dev, struct enc_private *k, uint32_t value) { DEBUG("Preload: preload enter\n"); DEBIwrite(dev, (uint16_t) (k->MyLatchLsw), (uint16_t) value); /* Write value to preload register. */ DEBUG("Preload: preload step 1\n"); DEBIwrite(dev, (uint16_t) (k->MyLatchLsw + 2), (uint16_t) (value >> 16)); } static void CountersInit(struct comedi_device *dev) { int chan; struct enc_private *k; uint16_t Setup = (LOADSRC_INDX << BF_LOADSRC) | /* Preload upon */ /* index. */ (INDXSRC_SOFT << BF_INDXSRC) | /* Disable hardware index. */ (CLKSRC_COUNTER << BF_CLKSRC) | /* Operating mode is counter. */ (CLKPOL_POS << BF_CLKPOL) | /* Active high clock. */ (CNTDIR_UP << BF_CLKPOL) | /* Count direction is up. */ (CLKMULT_1X << BF_CLKMULT) | /* Clock multiplier is 1x. */ (CLKENAB_INDEX << BF_CLKENAB); /* Enabled by index */ /* Disable all counter interrupts and clear any captured counter events. */ for (chan = 0; chan < S626_ENCODER_CHANNELS; chan++) { k = &encpriv[chan]; k->SetMode(dev, k, Setup, TRUE); k->SetIntSrc(dev, k, 0); k->ResetCapFlags(dev, k); k->SetEnable(dev, k, CLKENAB_ALWAYS); } DEBUG("CountersInit: counters initialized \n"); }
gpl-2.0
ggsamsa/sched_casio
drivers/md/dm-stripe.c
810
8339
/* * Copyright (C) 2001-2003 Sistina Software (UK) Limited. * * This file is released under the GPL. */ #include <linux/device-mapper.h> #include <linux/module.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/bio.h> #include <linux/slab.h> #include <linux/log2.h> #define DM_MSG_PREFIX "striped" #define DM_IO_ERROR_THRESHOLD 15 struct stripe { struct dm_dev *dev; sector_t physical_start; atomic_t error_count; }; struct stripe_c { uint32_t stripes; /* The size of this target / num. stripes */ sector_t stripe_width; /* stripe chunk size */ uint32_t chunk_shift; sector_t chunk_mask; /* Needed for handling events */ struct dm_target *ti; /* Work struct used for triggering events*/ struct work_struct kstriped_ws; struct stripe stripe[0]; }; static struct workqueue_struct *kstriped; /* * An event is triggered whenever a drive * drops out of a stripe volume. */ static void trigger_event(struct work_struct *work) { struct stripe_c *sc = container_of(work, struct stripe_c, kstriped_ws); dm_table_event(sc->ti->table); } static inline struct stripe_c *alloc_context(unsigned int stripes) { size_t len; if (dm_array_too_big(sizeof(struct stripe_c), sizeof(struct stripe), stripes)) return NULL; len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes); return kmalloc(len, GFP_KERNEL); } /* * Parse a single <dev> <sector> pair */ static int get_stripe(struct dm_target *ti, struct stripe_c *sc, unsigned int stripe, char **argv) { unsigned long long start; if (sscanf(argv[1], "%llu", &start) != 1) return -EINVAL; if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &sc->stripe[stripe].dev)) return -ENXIO; sc->stripe[stripe].physical_start = start; return 0; } /* * Construct a striped mapping. * <number of stripes> <chunk size (2^^n)> [<dev_path> <offset>]+ */ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct stripe_c *sc; sector_t width; uint32_t stripes; uint32_t chunk_size; char *end; int r; unsigned int i; if (argc < 2) { ti->error = "Not enough arguments"; return -EINVAL; } stripes = simple_strtoul(argv[0], &end, 10); if (!stripes || *end) { ti->error = "Invalid stripe count"; return -EINVAL; } chunk_size = simple_strtoul(argv[1], &end, 10); if (*end) { ti->error = "Invalid chunk_size"; return -EINVAL; } /* * chunk_size is a power of two */ if (!is_power_of_2(chunk_size) || (chunk_size < (PAGE_SIZE >> SECTOR_SHIFT))) { ti->error = "Invalid chunk size"; return -EINVAL; } if (ti->len & (chunk_size - 1)) { ti->error = "Target length not divisible by " "chunk size"; return -EINVAL; } width = ti->len; if (sector_div(width, stripes)) { ti->error = "Target length not divisible by " "number of stripes"; return -EINVAL; } /* * Do we have enough arguments for that many stripes ? */ if (argc != (2 + 2 * stripes)) { ti->error = "Not enough destinations " "specified"; return -EINVAL; } sc = alloc_context(stripes); if (!sc) { ti->error = "Memory allocation for striped context " "failed"; return -ENOMEM; } INIT_WORK(&sc->kstriped_ws, trigger_event); /* Set pointer to dm target; used in trigger_event */ sc->ti = ti; sc->stripes = stripes; sc->stripe_width = width; ti->split_io = chunk_size; ti->num_flush_requests = stripes; sc->chunk_mask = ((sector_t) chunk_size) - 1; for (sc->chunk_shift = 0; chunk_size; sc->chunk_shift++) chunk_size >>= 1; sc->chunk_shift--; /* * Get the stripe destinations. */ for (i = 0; i < stripes; i++) { argv += 2; r = get_stripe(ti, sc, i, argv); if (r < 0) { ti->error = "Couldn't parse stripe destination"; while (i--) dm_put_device(ti, sc->stripe[i].dev); kfree(sc); return r; } atomic_set(&(sc->stripe[i].error_count), 0); } ti->private = sc; return 0; } static void stripe_dtr(struct dm_target *ti) { unsigned int i; struct stripe_c *sc = (struct stripe_c *) ti->private; for (i = 0; i < sc->stripes; i++) dm_put_device(ti, sc->stripe[i].dev); flush_workqueue(kstriped); kfree(sc); } static int stripe_map(struct dm_target *ti, struct bio *bio, union map_info *map_context) { struct stripe_c *sc = (struct stripe_c *) ti->private; sector_t offset, chunk; uint32_t stripe; if (unlikely(bio_empty_barrier(bio))) { BUG_ON(map_context->flush_request >= sc->stripes); bio->bi_bdev = sc->stripe[map_context->flush_request].dev->bdev; return DM_MAPIO_REMAPPED; } offset = bio->bi_sector - ti->begin; chunk = offset >> sc->chunk_shift; stripe = sector_div(chunk, sc->stripes); bio->bi_bdev = sc->stripe[stripe].dev->bdev; bio->bi_sector = sc->stripe[stripe].physical_start + (chunk << sc->chunk_shift) + (offset & sc->chunk_mask); return DM_MAPIO_REMAPPED; } /* * Stripe status: * * INFO * #stripes [stripe_name <stripe_name>] [group word count] * [error count 'A|D' <error count 'A|D'>] * * TABLE * #stripes [stripe chunk size] * [stripe_name physical_start <stripe_name physical_start>] * */ static int stripe_status(struct dm_target *ti, status_type_t type, char *result, unsigned int maxlen) { struct stripe_c *sc = (struct stripe_c *) ti->private; char buffer[sc->stripes + 1]; unsigned int sz = 0; unsigned int i; switch (type) { case STATUSTYPE_INFO: DMEMIT("%d ", sc->stripes); for (i = 0; i < sc->stripes; i++) { DMEMIT("%s ", sc->stripe[i].dev->name); buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ? 'D' : 'A'; } buffer[i] = '\0'; DMEMIT("1 %s", buffer); break; case STATUSTYPE_TABLE: DMEMIT("%d %llu", sc->stripes, (unsigned long long)sc->chunk_mask + 1); for (i = 0; i < sc->stripes; i++) DMEMIT(" %s %llu", sc->stripe[i].dev->name, (unsigned long long)sc->stripe[i].physical_start); break; } return 0; } static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error, union map_info *map_context) { unsigned i; char major_minor[16]; struct stripe_c *sc = ti->private; if (!error) return 0; /* I/O complete */ if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD)) return error; if (error == -EOPNOTSUPP) return error; memset(major_minor, 0, sizeof(major_minor)); sprintf(major_minor, "%d:%d", MAJOR(disk_devt(bio->bi_bdev->bd_disk)), MINOR(disk_devt(bio->bi_bdev->bd_disk))); /* * Test to see which stripe drive triggered the event * and increment error count for all stripes on that device. * If the error count for a given device exceeds the threshold * value we will no longer trigger any further events. */ for (i = 0; i < sc->stripes; i++) if (!strcmp(sc->stripe[i].dev->name, major_minor)) { atomic_inc(&(sc->stripe[i].error_count)); if (atomic_read(&(sc->stripe[i].error_count)) < DM_IO_ERROR_THRESHOLD) queue_work(kstriped, &sc->kstriped_ws); } return error; } static int stripe_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct stripe_c *sc = ti->private; int ret = 0; unsigned i = 0; do { ret = fn(ti, sc->stripe[i].dev, sc->stripe[i].physical_start, sc->stripe_width, data); } while (!ret && ++i < sc->stripes); return ret; } static void stripe_io_hints(struct dm_target *ti, struct queue_limits *limits) { struct stripe_c *sc = ti->private; unsigned chunk_size = (sc->chunk_mask + 1) << 9; blk_limits_io_min(limits, chunk_size); blk_limits_io_opt(limits, chunk_size * sc->stripes); } static struct target_type stripe_target = { .name = "striped", .version = {1, 3, 0}, .module = THIS_MODULE, .ctr = stripe_ctr, .dtr = stripe_dtr, .map = stripe_map, .end_io = stripe_end_io, .status = stripe_status, .iterate_devices = stripe_iterate_devices, .io_hints = stripe_io_hints, }; int __init dm_stripe_init(void) { int r; r = dm_register_target(&stripe_target); if (r < 0) { DMWARN("target registration failed"); return r; } kstriped = create_singlethread_workqueue("kstriped"); if (!kstriped) { DMERR("failed to create workqueue kstriped"); dm_unregister_target(&stripe_target); return -ENOMEM; } return r; } void dm_stripe_exit(void) { dm_unregister_target(&stripe_target); destroy_workqueue(kstriped); return; }
gpl-2.0
pawitp/android_kernel_samsung_aries
drivers/staging/comedi/drivers/cb_pcidio.c
810
9430
/* comedi/drivers/cb_pcidio.c A Comedi driver for PCI-DIO24H & PCI-DIO48H of ComputerBoards (currently MeasurementComputing) COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: cb_pcidio Description: ComputerBoards' DIO boards with PCI interface Devices: [Measurement Computing] PCI-DIO24 (cb_pcidio), PCI-DIO24H, PCI-DIO48H Author: Yoshiya Matsuzaka Updated: Mon, 29 Oct 2007 15:40:47 +0000 Status: experimental This driver has been modified from skel.c of comedi-0.7.70. Configuration Options: [0] - PCI bus of device (optional) [1] - PCI slot of device (optional) If bus/slot is not specified, the first available PCI device will be used. Passing a zero for an option is the same as leaving it unspecified. */ /*------------------------------ HEADER FILES ---------------------------------*/ #include "../comedidev.h" #include "comedi_pci.h" #include "8255.h" /*-------------------------- MACROS and DATATYPES -----------------------------*/ #define PCI_VENDOR_ID_CB 0x1307 /* * Board descriptions for two imaginary boards. Describing the * boards in this way is optional, and completely driver-dependent. * Some drivers use arrays such as this, other do not. */ struct pcidio_board { const char *name; /* name of the board */ int dev_id; int n_8255; /* number of 8255 chips on board */ /* indices of base address regions */ int pcicontroler_badrindex; int dioregs_badrindex; }; static const struct pcidio_board pcidio_boards[] = { { .name = "pci-dio24", .dev_id = 0x0028, .n_8255 = 1, .pcicontroler_badrindex = 1, .dioregs_badrindex = 2, }, { .name = "pci-dio24h", .dev_id = 0x0014, .n_8255 = 1, .pcicontroler_badrindex = 1, .dioregs_badrindex = 2, }, { .name = "pci-dio48h", .dev_id = 0x000b, .n_8255 = 2, .pcicontroler_badrindex = 0, .dioregs_badrindex = 1, }, }; /* This is used by modprobe to translate PCI IDs to drivers. Should * only be used for PCI and ISA-PnP devices */ /* Please add your PCI vendor ID to comedidev.h, and it will be forwarded * upstream. */ static DEFINE_PCI_DEVICE_TABLE(pcidio_pci_table) = { { PCI_VENDOR_ID_CB, 0x0028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_CB, 0x0014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_CB, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { 0} }; MODULE_DEVICE_TABLE(pci, pcidio_pci_table); /* * Useful for shorthand access to the particular board structure */ #define thisboard ((const struct pcidio_board *)dev->board_ptr) /* this structure is for data unique to this hardware driver. If several hardware drivers keep similar information in this structure, feel free to suggest moving the variable to the struct comedi_device struct. */ struct pcidio_private { int data; /* currently unused */ /* would be useful for a PCI device */ struct pci_dev *pci_dev; /* used for DO readback, currently unused */ unsigned int do_readback[4]; /* up to 4 unsigned int suffice to hold 96 bits for PCI-DIO96 */ unsigned long dio_reg_base; /* address of port A of the first 8255 chip on board */ }; /* * most drivers define the following macro to make it easy to * access the private structure. */ #define devpriv ((struct pcidio_private *)dev->private) /* * The struct comedi_driver structure tells the Comedi core module * which functions to call to configure/deconfigure (attach/detach) * the board, and also about the kernel module that contains * the device code. */ static int pcidio_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int pcidio_detach(struct comedi_device *dev); static struct comedi_driver driver_cb_pcidio = { .driver_name = "cb_pcidio", .module = THIS_MODULE, .attach = pcidio_attach, .detach = pcidio_detach, /* It is not necessary to implement the following members if you are * writing a driver for a ISA PnP or PCI card */ /* Most drivers will support multiple types of boards by * having an array of board structures. These were defined * in pcidio_boards[] above. Note that the element 'name' * was first in the structure -- Comedi uses this fact to * extract the name of the board without knowing any details * about the structure except for its length. * When a device is attached (by comedi_config), the name * of the device is given to Comedi, and Comedi tries to * match it by going through the list of board names. If * there is a match, the address of the pointer is put * into dev->board_ptr and driver->attach() is called. * * Note that these are not necessary if you can determine * the type of board in software. ISA PnP, PCI, and PCMCIA * devices are such boards. */ /* The following fields should NOT be initialized if you are dealing * with PCI devices * * .board_name = pcidio_boards, * .offset = sizeof(struct pcidio_board), * .num_names = sizeof(pcidio_boards) / sizeof(structpcidio_board), */ }; /*------------------------------- FUNCTIONS -----------------------------------*/ /* * Attach is called by the Comedi core to configure the driver * for a particular board. If you specified a board_name array * in the driver structure, dev->board_ptr contains that * address. */ static int pcidio_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct pci_dev *pcidev = NULL; int index; int i; printk("comedi%d: cb_pcidio: \n", dev->minor); /* * Allocate the private structure area. alloc_private() is a * convenient macro defined in comedidev.h. */ if (alloc_private(dev, sizeof(struct pcidio_private)) < 0) return -ENOMEM; /* * If you can probe the device to determine what device in a series * it is, this is the place to do it. Otherwise, dev->board_ptr * should already be initialized. */ /* * Probe the device to determine what device in the series it is. */ for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL); pcidev != NULL; pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) { /* is it not a computer boards card? */ if (pcidev->vendor != PCI_VENDOR_ID_CB) continue; /* loop through cards supported by this driver */ for (index = 0; index < ARRAY_SIZE(pcidio_boards); index++) { if (pcidio_boards[index].dev_id != pcidev->device) continue; /* was a particular bus/slot requested? */ if (it->options[0] || it->options[1]) { /* are we on the wrong bus/slot? */ if (pcidev->bus->number != it->options[0] || PCI_SLOT(pcidev->devfn) != it->options[1]) { continue; } } dev->board_ptr = pcidio_boards + index; goto found; } } printk("No supported ComputerBoards/MeasurementComputing card found on " "requested position\n"); return -EIO; found: /* * Initialize dev->board_name. Note that we can use the "thisboard" * macro now, since we just initialized it in the last line. */ dev->board_name = thisboard->name; devpriv->pci_dev = pcidev; printk("Found %s on bus %i, slot %i\n", thisboard->name, devpriv->pci_dev->bus->number, PCI_SLOT(devpriv->pci_dev->devfn)); if (comedi_pci_enable(pcidev, thisboard->name)) { printk ("cb_pcidio: failed to enable PCI device and request regions\n"); return -EIO; } devpriv->dio_reg_base = pci_resource_start(devpriv->pci_dev, pcidio_boards[index].dioregs_badrindex); /* * Allocate the subdevice structures. alloc_subdevice() is a * convenient macro defined in comedidev.h. */ if (alloc_subdevices(dev, thisboard->n_8255) < 0) return -ENOMEM; for (i = 0; i < thisboard->n_8255; i++) { subdev_8255_init(dev, dev->subdevices + i, NULL, devpriv->dio_reg_base + i * 4); printk(" subdev %d: base = 0x%lx\n", i, devpriv->dio_reg_base + i * 4); } printk("attached\n"); return 1; } /* * _detach is called to deconfigure a device. It should deallocate * resources. * This function is also called when _attach() fails, so it should be * careful not to release resources that were not necessarily * allocated by _attach(). dev->private and dev->subdevices are * deallocated automatically by the core. */ static int pcidio_detach(struct comedi_device *dev) { printk("comedi%d: cb_pcidio: remove\n", dev->minor); if (devpriv) { if (devpriv->pci_dev) { if (devpriv->dio_reg_base) comedi_pci_disable(devpriv->pci_dev); pci_dev_put(devpriv->pci_dev); } } if (dev->subdevices) { int i; for (i = 0; i < thisboard->n_8255; i++) subdev_8255_cleanup(dev, dev->subdevices + i); } return 0; } /* * A convenient macro that defines init_module() and cleanup_module(), * as necessary. */ COMEDI_PCI_INITCLEANUP(driver_cb_pcidio, pcidio_pci_table);
gpl-2.0
spacecaker/CM7_Space_Kernel_Cooper
drivers/scsi/bfa/bfa_hw_cb.c
810
3918
/* * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * * Linux driver for Brocade Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <bfa_priv.h> #include <bfi/bfi_cbreg.h> void bfa_hwcb_reginit(struct bfa_s *bfa) { struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc); int i, q, fn = bfa_ioc_pcifn(&bfa->ioc); if (fn == 0) { bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS); bfa_regs->intr_mask = (kva + HOSTFN0_INT_MSK); } else { bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS); bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK); } for (i = 0; i < BFI_IOC_MAX_CQS; i++) { /* * CPE registers */ q = CPE_Q_NUM(fn, i); bfa_regs->cpe_q_pi[i] = (kva + CPE_Q_PI(q)); bfa_regs->cpe_q_ci[i] = (kva + CPE_Q_CI(q)); bfa_regs->cpe_q_depth[i] = (kva + CPE_Q_DEPTH(q)); /* * RME registers */ q = CPE_Q_NUM(fn, i); bfa_regs->rme_q_pi[i] = (kva + RME_Q_PI(q)); bfa_regs->rme_q_ci[i] = (kva + RME_Q_CI(q)); bfa_regs->rme_q_depth[i] = (kva + RME_Q_DEPTH(q)); } } void bfa_hwcb_reqq_ack(struct bfa_s *bfa, int reqq) { } static void bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq) { bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, __HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq)); } void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq) { } static void bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq) { bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, __HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq)); } void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap, u32 *num_vecs, u32 *max_vec_bit) { #define __HFN_NUMINTS 13 if (bfa_ioc_pcifn(&bfa->ioc) == 0) { *msix_vecs_bmap = (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0); *max_vec_bit = __HFN_INT_MBOX_LPU0; } else { *msix_vecs_bmap = (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1); *max_vec_bit = __HFN_INT_MBOX_LPU1; } *msix_vecs_bmap |= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS); *num_vecs = __HFN_NUMINTS; } /** * No special setup required for crossbow -- vector assignments are implicit. */ void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs) { int i; bfa_assert((nvecs == 1) || (nvecs == __HFN_NUMINTS)); bfa->msix.nvecs = nvecs; if (nvecs == 1) { for (i = 0; i < BFA_MSIX_CB_MAX; i++) bfa->msix.handler[i] = bfa_msix_all; return; } for (i = BFA_MSIX_CPE_Q0; i <= BFA_MSIX_CPE_Q7; i++) bfa->msix.handler[i] = bfa_msix_reqq; for (i = BFA_MSIX_RME_Q0; i <= BFA_MSIX_RME_Q7; i++) bfa->msix.handler[i] = bfa_msix_rspq; for (; i < BFA_MSIX_CB_MAX; i++) bfa->msix.handler[i] = bfa_msix_lpu_err; } /** * Crossbow -- dummy, interrupts are masked */ void bfa_hwcb_msix_install(struct bfa_s *bfa) { } void bfa_hwcb_msix_uninstall(struct bfa_s *bfa) { } /** * No special enable/disable -- vector assignments are implicit. */ void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix) { bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix; bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix; }
gpl-2.0
androidarmv6/android_kernel_lge_msm7x27
drivers/staging/comedi/drivers/aio_iiro_16.c
810
4652
/* comedi/drivers/aio_iiro_16.c Driver for Acces I/O Products PC-104 AIO-IIRO-16 Digital I/O board Copyright (C) 2006 C&C Technologies, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: aio_iiro_16 Description: Acces I/O Products PC-104 IIRO16 Relay And Isolated Input Board Author: Zachary Ware <zach.ware@cctechnol.com> Devices: [Acces I/O] PC-104 AIO12-8 Status: experimental Configuration Options: [0] - I/O port base address */ #include "../comedidev.h" #include <linux/ioport.h> #define AIO_IIRO_16_SIZE 0x08 #define AIO_IIRO_16_RELAY_0_7 0x00 #define AIO_IIRO_16_INPUT_0_7 0x01 #define AIO_IIRO_16_IRQ 0x02 #define AIO_IIRO_16_RELAY_8_15 0x04 #define AIO_IIRO_16_INPUT_8_15 0x05 struct aio_iiro_16_board { const char *name; int do_; int di; }; static const struct aio_iiro_16_board aio_iiro_16_boards[] = { { .name = "aio_iiro_16", .di = 16, .do_ = 16}, }; #define thisboard ((const struct aio_iiro_16_board *) dev->board_ptr) struct aio_iiro_16_private { int data; struct pci_dev *pci_dev; unsigned int ao_readback[2]; }; #define devpriv ((struct aio_iiro_16_private *) dev->private) static int aio_iiro_16_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int aio_iiro_16_detach(struct comedi_device *dev); static struct comedi_driver driver_aio_iiro_16 = { .driver_name = "aio_iiro_16", .module = THIS_MODULE, .attach = aio_iiro_16_attach, .detach = aio_iiro_16_detach, .board_name = &aio_iiro_16_boards[0].name, .offset = sizeof(struct aio_iiro_16_board), .num_names = ARRAY_SIZE(aio_iiro_16_boards), }; static int aio_iiro_16_dio_insn_bits_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int aio_iiro_16_dio_insn_bits_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int aio_iiro_16_attach(struct comedi_device *dev, struct comedi_devconfig *it) { int iobase; struct comedi_subdevice *s; printk(KERN_INFO "comedi%d: aio_iiro_16: ", dev->minor); dev->board_name = thisboard->name; iobase = it->options[0]; if (!request_region(iobase, AIO_IIRO_16_SIZE, dev->board_name)) { printk("I/O port conflict"); return -EIO; } dev->iobase = iobase; if (alloc_private(dev, sizeof(struct aio_iiro_16_private)) < 0) return -ENOMEM; if (alloc_subdevices(dev, 2) < 0) return -ENOMEM; s = dev->subdevices + 0; s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_WRITABLE; s->n_chan = 16; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = aio_iiro_16_dio_insn_bits_write; s = dev->subdevices + 1; s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE; s->n_chan = 16; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = aio_iiro_16_dio_insn_bits_read; printk("attached\n"); return 1; } static int aio_iiro_16_detach(struct comedi_device *dev) { printk(KERN_INFO "comedi%d: aio_iiro_16: remove\n", dev->minor); if (dev->iobase) release_region(dev->iobase, AIO_IIRO_16_SIZE); return 0; } static int aio_iiro_16_dio_insn_bits_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (insn->n != 2) return -EINVAL; if (data[0]) { s->state &= ~data[0]; s->state |= data[0] & data[1]; outb(s->state & 0xff, dev->iobase + AIO_IIRO_16_RELAY_0_7); outb((s->state >> 8) & 0xff, dev->iobase + AIO_IIRO_16_RELAY_8_15); } data[1] = s->state; return 2; } static int aio_iiro_16_dio_insn_bits_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (insn->n != 2) return -EINVAL; data[1] = 0; data[1] |= inb(dev->iobase + AIO_IIRO_16_INPUT_0_7); data[1] |= inb(dev->iobase + AIO_IIRO_16_INPUT_8_15) << 8; return 2; } COMEDI_INITCLEANUP(driver_aio_iiro_16);
gpl-2.0
assusdan/cyanogenmod_kernel_prestigio_muzed3
drivers/staging/comedi/drivers/pcm3724.c
2090
5824
/* comedi/drivers/pcm724.c Drew Csillag <drew_csillag@yahoo.com> hardware driver for Advantech card: card: PCM-3724 driver: pcm3724 Options for PCM-3724 [0] - IO Base */ /* Driver: pcm3724 Description: Advantech PCM-3724 Author: Drew Csillag <drew_csillag@yahoo.com> Devices: [Advantech] PCM-3724 (pcm724) Status: tested This is driver for digital I/O boards PCM-3724 with 48 DIO. It needs 8255.o for operations and only immediate mode is supported. See the source for configuration details. Copy/pasted/hacked from pcm724.c */ /* * check_driver overrides: * struct comedi_insn */ #include "../comedidev.h" #include <linux/ioport.h> #include <linux/delay.h> #include "8255.h" #define PCM3724_SIZE 16 #define SIZE_8255 4 #define BUF_C0 0x1 #define BUF_B0 0x2 #define BUF_A0 0x4 #define BUF_C1 0x8 #define BUF_B1 0x10 #define BUF_A1 0x20 #define GATE_A0 0x4 #define GATE_B0 0x2 #define GATE_C0 0x1 #define GATE_A1 0x20 #define GATE_B1 0x10 #define GATE_C1 0x8 /* from 8255.c */ #define CR_CW 0x80 #define _8255_CR 3 #define CR_B_IO 0x02 #define CR_B_MODE 0x04 #define CR_C_IO 0x09 #define CR_A_IO 0x10 #define CR_A_MODE(a) ((a)<<5) #define CR_CW 0x80 /* used to track configured dios */ struct priv_pcm3724 { int dio_1; int dio_2; }; static int subdev_8255_cb(int dir, int port, int data, unsigned long arg) { unsigned long iobase = arg; unsigned char inbres; /* printk("8255cb %d %d %d %lx\n", dir,port,data,arg); */ if (dir) { /* printk("8255 cb outb(%x, %lx)\n", data, iobase+port); */ outb(data, iobase + port); return 0; } else { inbres = inb(iobase + port); /* printk("8255 cb inb(%lx) = %x\n", iobase+port, inbres); */ return inbres; } } static int compute_buffer(int config, int devno, struct comedi_subdevice *s) { /* 1 in io_bits indicates output */ if (s->io_bits & 0x0000ff) { if (devno == 0) config |= BUF_A0; else config |= BUF_A1; } if (s->io_bits & 0x00ff00) { if (devno == 0) config |= BUF_B0; else config |= BUF_B1; } if (s->io_bits & 0xff0000) { if (devno == 0) config |= BUF_C0; else config |= BUF_C1; } return config; } static void do_3724_config(struct comedi_device *dev, struct comedi_subdevice *s, int chanspec) { struct comedi_subdevice *s_dio1 = &dev->subdevices[0]; struct comedi_subdevice *s_dio2 = &dev->subdevices[1]; int config; int buffer_config; unsigned long port_8255_cfg; config = CR_CW; buffer_config = 0; /* 1 in io_bits indicates output, 1 in config indicates input */ if (!(s->io_bits & 0x0000ff)) config |= CR_A_IO; if (!(s->io_bits & 0x00ff00)) config |= CR_B_IO; if (!(s->io_bits & 0xff0000)) config |= CR_C_IO; buffer_config = compute_buffer(0, 0, s_dio1); buffer_config = compute_buffer(buffer_config, 1, s_dio2); if (s == s_dio1) port_8255_cfg = dev->iobase + _8255_CR; else port_8255_cfg = dev->iobase + SIZE_8255 + _8255_CR; outb(buffer_config, dev->iobase + 8); /* update buffer register */ /* printk("pcm3724 buffer_config (%lx) %d, %x\n", dev->iobase + _8255_CR, chanspec, buffer_config); */ outb(config, port_8255_cfg); } static void enable_chan(struct comedi_device *dev, struct comedi_subdevice *s, int chanspec) { struct priv_pcm3724 *priv = dev->private; struct comedi_subdevice *s_dio1 = &dev->subdevices[0]; unsigned int mask; int gatecfg; gatecfg = 0; mask = 1 << CR_CHAN(chanspec); if (s == s_dio1) priv->dio_1 |= mask; else priv->dio_2 |= mask; if (priv->dio_1 & 0xff0000) gatecfg |= GATE_C0; if (priv->dio_1 & 0xff00) gatecfg |= GATE_B0; if (priv->dio_1 & 0xff) gatecfg |= GATE_A0; if (priv->dio_2 & 0xff0000) gatecfg |= GATE_C1; if (priv->dio_2 & 0xff00) gatecfg |= GATE_B1; if (priv->dio_2 & 0xff) gatecfg |= GATE_A1; /* printk("gate control %x\n", gatecfg); */ outb(gatecfg, dev->iobase + 9); } /* overriding the 8255 insn config */ static int subdev_3724_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int mask; unsigned int bits; mask = 1 << CR_CHAN(insn->chanspec); if (mask & 0x0000ff) bits = 0x0000ff; else if (mask & 0x00ff00) bits = 0x00ff00; else if (mask & 0x0f0000) bits = 0x0f0000; else bits = 0xf00000; switch (data[0]) { case INSN_CONFIG_DIO_INPUT: s->io_bits &= ~bits; break; case INSN_CONFIG_DIO_OUTPUT: s->io_bits |= bits; break; case INSN_CONFIG_DIO_QUERY: data[1] = (s->io_bits & bits) ? COMEDI_OUTPUT : COMEDI_INPUT; return insn->n; break; default: return -EINVAL; } do_3724_config(dev, s, insn->chanspec); enable_chan(dev, s, insn->chanspec); return 1; } static int pcm3724_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct priv_pcm3724 *priv; struct comedi_subdevice *s; int ret, i; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; dev->private = priv; ret = comedi_request_region(dev, it->options[0], PCM3724_SIZE); if (ret) return ret; ret = comedi_alloc_subdevices(dev, 2); if (ret) return ret; for (i = 0; i < dev->n_subdevices; i++) { s = &dev->subdevices[i]; subdev_8255_init(dev, s, subdev_8255_cb, (unsigned long)(dev->iobase + SIZE_8255 * i)); s->insn_config = subdev_3724_insn_config; } return 0; } static void pcm3724_detach(struct comedi_device *dev) { int i; for (i = 0; i < dev->n_subdevices; i++) comedi_spriv_free(dev, i); comedi_legacy_detach(dev); } static struct comedi_driver pcm3724_driver = { .driver_name = "pcm3724", .module = THIS_MODULE, .attach = pcm3724_attach, .detach = pcm3724_detach, }; module_comedi_driver(pcm3724_driver); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
alexax66/kernel_samsung_a3xelte
drivers/staging/comedi/drivers/mpc624.c
2090
11179
/* comedi/drivers/mpc624.c Hardware driver for a Micro/sys inc. MPC-624 PC/104 board COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: mpc624 Description: Micro/sys MPC-624 PC/104 board Devices: [Micro/sys] MPC-624 (mpc624) Author: Stanislaw Raczynski <sraczynski@op.pl> Updated: Thu, 15 Sep 2005 12:01:18 +0200 Status: working The Micro/sys MPC-624 board is based on the LTC2440 24-bit sigma-delta ADC chip. Subdevices supported by the driver: - Analog In: supported - Digital I/O: not supported - LEDs: not supported - EEPROM: not supported Configuration Options: [0] - I/O base address [1] - conversion rate Conversion rate RMS noise Effective Number Of Bits 0 3.52kHz 23uV 17 1 1.76kHz 3.5uV 20 2 880Hz 2uV 21.3 3 440Hz 1.4uV 21.8 4 220Hz 1uV 22.4 5 110Hz 750uV 22.9 6 55Hz 510nV 23.4 7 27.5Hz 375nV 24 8 13.75Hz 250nV 24.4 9 6.875Hz 200nV 24.6 [2] - voltage range 0 -1.01V .. +1.01V 1 -10.1V .. +10.1V */ #include "../comedidev.h" #include <linux/ioport.h> #include <linux/delay.h> /* Consecutive I/O port addresses */ #define MPC624_SIZE 16 /* Offsets of different ports */ #define MPC624_MASTER_CONTROL 0 /* not used */ #define MPC624_GNMUXCH 1 /* Gain, Mux, Channel of ADC */ #define MPC624_ADC 2 /* read/write to/from ADC */ #define MPC624_EE 3 /* read/write to/from serial EEPROM via I2C */ #define MPC624_LEDS 4 /* write to LEDs */ #define MPC624_DIO 5 /* read/write to/from digital I/O ports */ #define MPC624_IRQ_MASK 6 /* IRQ masking enable/disable */ /* Register bits' names */ #define MPC624_ADBUSY (1<<5) #define MPC624_ADSDO (1<<4) #define MPC624_ADFO (1<<3) #define MPC624_ADCS (1<<2) #define MPC624_ADSCK (1<<1) #define MPC624_ADSDI (1<<0) /* SDI Speed/Resolution Programming bits */ #define MPC624_OSR4 (1<<31) #define MPC624_OSR3 (1<<30) #define MPC624_OSR2 (1<<29) #define MPC624_OSR1 (1<<28) #define MPC624_OSR0 (1<<27) /* 32-bit output value bits' names */ #define MPC624_EOC_BIT (1<<31) #define MPC624_DMY_BIT (1<<30) #define MPC624_SGN_BIT (1<<29) /* Conversion speeds */ /* OSR4 OSR3 OSR2 OSR1 OSR0 Conversion rate RMS noise ENOB^ * X 0 0 0 1 3.52kHz 23uV 17 * X 0 0 1 0 1.76kHz 3.5uV 20 * X 0 0 1 1 880Hz 2uV 21.3 * X 0 1 0 0 440Hz 1.4uV 21.8 * X 0 1 0 1 220Hz 1uV 22.4 * X 0 1 1 0 110Hz 750uV 22.9 * X 0 1 1 1 55Hz 510nV 23.4 * X 1 0 0 0 27.5Hz 375nV 24 * X 1 0 0 1 13.75Hz 250nV 24.4 * X 1 1 1 1 6.875Hz 200nV 24.6 * * ^ - Effective Number Of Bits */ #define MPC624_SPEED_3_52_kHz (MPC624_OSR4 | MPC624_OSR0) #define MPC624_SPEED_1_76_kHz (MPC624_OSR4 | MPC624_OSR1) #define MPC624_SPEED_880_Hz (MPC624_OSR4 | MPC624_OSR1 | MPC624_OSR0) #define MPC624_SPEED_440_Hz (MPC624_OSR4 | MPC624_OSR2) #define MPC624_SPEED_220_Hz (MPC624_OSR4 | MPC624_OSR2 | MPC624_OSR0) #define MPC624_SPEED_110_Hz (MPC624_OSR4 | MPC624_OSR2 | MPC624_OSR1) #define MPC624_SPEED_55_Hz \ (MPC624_OSR4 | MPC624_OSR2 | MPC624_OSR1 | MPC624_OSR0) #define MPC624_SPEED_27_5_Hz (MPC624_OSR4 | MPC624_OSR3) #define MPC624_SPEED_13_75_Hz (MPC624_OSR4 | MPC624_OSR3 | MPC624_OSR0) #define MPC624_SPEED_6_875_Hz \ (MPC624_OSR4 | MPC624_OSR3 | MPC624_OSR2 | MPC624_OSR1 | MPC624_OSR0) /* -------------------------------------------------------------------------- */ struct mpc624_private { /* set by mpc624_attach() from driver's parameters */ unsigned long int ulConvertionRate; }; /* -------------------------------------------------------------------------- */ static const struct comedi_lrange range_mpc624_bipolar1 = { 1, { /* BIP_RANGE(1.01) this is correct, */ /* but my MPC-624 actually seems to have a range of 2.02 */ BIP_RANGE(2.02) } }; static const struct comedi_lrange range_mpc624_bipolar10 = { 1, { /* BIP_RANGE(10.1) this is correct, */ /* but my MPC-624 actually seems to have a range of 20.2 */ BIP_RANGE(20.2) } }; /* Timeout 200ms */ #define TIMEOUT 200 static int mpc624_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct mpc624_private *devpriv = dev->private; int n, i; unsigned long int data_in, data_out; unsigned char ucPort; /* * WARNING: * We always write 0 to GNSWA bit, so the channel range is +-/10.1Vdc */ outb(insn->chanspec, dev->iobase + MPC624_GNMUXCH); /* printk("Channel %d:\n", insn->chanspec); */ if (!insn->n) { printk(KERN_INFO "MPC624: Warning, no data to acquire\n"); return 0; } for (n = 0; n < insn->n; n++) { /* Trigger the conversion */ outb(MPC624_ADSCK, dev->iobase + MPC624_ADC); udelay(1); outb(MPC624_ADCS | MPC624_ADSCK, dev->iobase + MPC624_ADC); udelay(1); outb(0, dev->iobase + MPC624_ADC); udelay(1); /* Wait for the conversion to end */ for (i = 0; i < TIMEOUT; i++) { ucPort = inb(dev->iobase + MPC624_ADC); if (ucPort & MPC624_ADBUSY) udelay(1000); else break; } if (i == TIMEOUT) { printk(KERN_ERR "MPC624: timeout (%dms)\n", TIMEOUT); data[n] = 0; return -ETIMEDOUT; } /* Start reading data */ data_in = 0; data_out = devpriv->ulConvertionRate; udelay(1); for (i = 0; i < 32; i++) { /* Set the clock low */ outb(0, dev->iobase + MPC624_ADC); udelay(1); if (data_out & (1 << 31)) { /* the next bit is a 1 */ /* Set the ADSDI line (send to MPC624) */ outb(MPC624_ADSDI, dev->iobase + MPC624_ADC); udelay(1); /* Set the clock high */ outb(MPC624_ADSCK | MPC624_ADSDI, dev->iobase + MPC624_ADC); } else { /* the next bit is a 0 */ /* Set the ADSDI line (send to MPC624) */ outb(0, dev->iobase + MPC624_ADC); udelay(1); /* Set the clock high */ outb(MPC624_ADSCK, dev->iobase + MPC624_ADC); } /* Read ADSDO on high clock (receive from MPC624) */ udelay(1); data_in <<= 1; data_in |= (inb(dev->iobase + MPC624_ADC) & MPC624_ADSDO) >> 4; udelay(1); data_out <<= 1; } /* * Received 32-bit long value consist of: * 31: EOC - * (End Of Transmission) bit - should be 0 * 30: DMY * (Dummy) bit - should be 0 * 29: SIG * (Sign) bit- 1 if the voltage is positive, * 0 if negative * 28: MSB * (Most Significant Bit) - the first bit of * the conversion result * .... * 05: LSB * (Least Significant Bit)- the last bit of the * conversion result * 04-00: sub-LSB * - sub-LSBs are basically noise, but when * averaged properly, they can increase conversion * precision up to 29 bits; they can be discarded * without loss of resolution. */ if (data_in & MPC624_EOC_BIT) printk(KERN_INFO "MPC624:EOC bit is set (data_in=%lu)!", data_in); if (data_in & MPC624_DMY_BIT) printk(KERN_INFO "MPC624:DMY bit is set (data_in=%lu)!", data_in); if (data_in & MPC624_SGN_BIT) { /* Volatge is positive */ /* * comedi operates on unsigned numbers, so mask off EOC * and DMY and don't clear the SGN bit */ data_in &= 0x3FFFFFFF; data[n] = data_in; } else { /* The voltage is negative */ /* * data_in contains a number in 30-bit two's complement * code and we must deal with it */ data_in |= MPC624_SGN_BIT; data_in = ~data_in; data_in += 1; data_in &= ~(MPC624_EOC_BIT | MPC624_DMY_BIT); /* clear EOC and DMY bits */ data_in = 0x20000000 - data_in; data[n] = data_in; } } /* Return the number of samples read/written */ return n; } static int mpc624_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct mpc624_private *devpriv; struct comedi_subdevice *s; int ret; ret = comedi_request_region(dev, it->options[0], MPC624_SIZE); if (ret) return ret; devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL); if (!devpriv) return -ENOMEM; dev->private = devpriv; switch (it->options[1]) { case 0: devpriv->ulConvertionRate = MPC624_SPEED_3_52_kHz; break; case 1: devpriv->ulConvertionRate = MPC624_SPEED_1_76_kHz; break; case 2: devpriv->ulConvertionRate = MPC624_SPEED_880_Hz; break; case 3: devpriv->ulConvertionRate = MPC624_SPEED_440_Hz; break; case 4: devpriv->ulConvertionRate = MPC624_SPEED_220_Hz; break; case 5: devpriv->ulConvertionRate = MPC624_SPEED_110_Hz; break; case 6: devpriv->ulConvertionRate = MPC624_SPEED_55_Hz; break; case 7: devpriv->ulConvertionRate = MPC624_SPEED_27_5_Hz; break; case 8: devpriv->ulConvertionRate = MPC624_SPEED_13_75_Hz; break; case 9: devpriv->ulConvertionRate = MPC624_SPEED_6_875_Hz; break; default: devpriv->ulConvertionRate = MPC624_SPEED_3_52_kHz; } ret = comedi_alloc_subdevices(dev, 1); if (ret) return ret; s = &dev->subdevices[0]; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_DIFF; s->n_chan = 8; switch (it->options[1]) { default: s->maxdata = 0x3FFFFFFF; } switch (it->options[1]) { case 0: s->range_table = &range_mpc624_bipolar1; break; default: s->range_table = &range_mpc624_bipolar10; } s->len_chanlist = 1; s->insn_read = mpc624_ai_rinsn; return 1; } static struct comedi_driver mpc624_driver = { .driver_name = "mpc624", .module = THIS_MODULE, .attach = mpc624_attach, .detach = comedi_legacy_detach, }; module_comedi_driver(mpc624_driver); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
leeymcj/linuxrk-tk1
drivers/video/omap2/displays/panel-tpo-td043mtea1.c
2090
14064
/* * LCD panel driver for TPO TD043MTEA1 * * Author: Gražvydas Ignotas <notasas@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/spi/spi.h> #include <linux/regulator/consumer.h> #include <linux/gpio.h> #include <linux/err.h> #include <linux/slab.h> #include <video/omapdss.h> #include <video/omap-panel-data.h> #define TPO_R02_MODE(x) ((x) & 7) #define TPO_R02_MODE_800x480 7 #define TPO_R02_NCLK_RISING BIT(3) #define TPO_R02_HSYNC_HIGH BIT(4) #define TPO_R02_VSYNC_HIGH BIT(5) #define TPO_R03_NSTANDBY BIT(0) #define TPO_R03_EN_CP_CLK BIT(1) #define TPO_R03_EN_VGL_PUMP BIT(2) #define TPO_R03_EN_PWM BIT(3) #define TPO_R03_DRIVING_CAP_100 BIT(4) #define TPO_R03_EN_PRE_CHARGE BIT(6) #define TPO_R03_SOFTWARE_CTL BIT(7) #define TPO_R04_NFLIP_H BIT(0) #define TPO_R04_NFLIP_V BIT(1) #define TPO_R04_CP_CLK_FREQ_1H BIT(2) #define TPO_R04_VGL_FREQ_1H BIT(4) #define TPO_R03_VAL_NORMAL (TPO_R03_NSTANDBY | TPO_R03_EN_CP_CLK | \ TPO_R03_EN_VGL_PUMP | TPO_R03_EN_PWM | \ TPO_R03_DRIVING_CAP_100 | TPO_R03_EN_PRE_CHARGE | \ TPO_R03_SOFTWARE_CTL) #define TPO_R03_VAL_STANDBY (TPO_R03_DRIVING_CAP_100 | \ TPO_R03_EN_PRE_CHARGE | TPO_R03_SOFTWARE_CTL) static const u16 tpo_td043_def_gamma[12] = { 105, 315, 381, 431, 490, 537, 579, 686, 780, 837, 880, 1023 }; struct tpo_td043_device { struct spi_device *spi; struct regulator *vcc_reg; int nreset_gpio; u16 gamma[12]; u32 mode; u32 hmirror:1; u32 vmirror:1; u32 powered_on:1; u32 spi_suspended:1; u32 power_on_resume:1; }; /* used to pass spi_device from SPI to DSS portion of the driver */ static struct tpo_td043_device *g_tpo_td043; static int tpo_td043_write(struct spi_device *spi, u8 addr, u8 data) { struct spi_message m; struct spi_transfer xfer; u16 w; int r; spi_message_init(&m); memset(&xfer, 0, sizeof(xfer)); w = ((u16)addr << 10) | (1 << 8) | data; xfer.tx_buf = &w; xfer.bits_per_word = 16; xfer.len = 2; spi_message_add_tail(&xfer, &m); r = spi_sync(spi, &m); if (r < 0) dev_warn(&spi->dev, "failed to write to LCD reg (%d)\n", r); return r; } static void tpo_td043_write_gamma(struct spi_device *spi, u16 gamma[12]) { u8 i, val; /* gamma bits [9:8] */ for (val = i = 0; i < 4; i++) val |= (gamma[i] & 0x300) >> ((i + 1) * 2); tpo_td043_write(spi, 0x11, val); for (val = i = 0; i < 4; i++) val |= (gamma[i+4] & 0x300) >> ((i + 1) * 2); tpo_td043_write(spi, 0x12, val); for (val = i = 0; i < 4; i++) val |= (gamma[i+8] & 0x300) >> ((i + 1) * 2); tpo_td043_write(spi, 0x13, val); /* gamma bits [7:0] */ for (val = i = 0; i < 12; i++) tpo_td043_write(spi, 0x14 + i, gamma[i] & 0xff); } static int tpo_td043_write_mirror(struct spi_device *spi, bool h, bool v) { u8 reg4 = TPO_R04_NFLIP_H | TPO_R04_NFLIP_V | \ TPO_R04_CP_CLK_FREQ_1H | TPO_R04_VGL_FREQ_1H; if (h) reg4 &= ~TPO_R04_NFLIP_H; if (v) reg4 &= ~TPO_R04_NFLIP_V; return tpo_td043_write(spi, 4, reg4); } static int tpo_td043_set_hmirror(struct omap_dss_device *dssdev, bool enable) { struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev); tpo_td043->hmirror = enable; return tpo_td043_write_mirror(tpo_td043->spi, tpo_td043->hmirror, tpo_td043->vmirror); } static bool tpo_td043_get_hmirror(struct omap_dss_device *dssdev) { struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev); return tpo_td043->hmirror; } static ssize_t tpo_td043_vmirror_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%d\n", tpo_td043->vmirror); } static ssize_t tpo_td043_vmirror_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev); int val; int ret; ret = kstrtoint(buf, 0, &val); if (ret < 0) return ret; val = !!val; ret = tpo_td043_write_mirror(tpo_td043->spi, tpo_td043->hmirror, val); if (ret < 0) return ret; tpo_td043->vmirror = val; return count; } static ssize_t tpo_td043_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%d\n", tpo_td043->mode); } static ssize_t tpo_td043_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev); long val; int ret; ret = kstrtol(buf, 0, &val); if (ret != 0 || val & ~7) return -EINVAL; tpo_td043->mode = val; val |= TPO_R02_NCLK_RISING; tpo_td043_write(tpo_td043->spi, 2, val); return count; } static ssize_t tpo_td043_gamma_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev); ssize_t len = 0; int ret; int i; for (i = 0; i < ARRAY_SIZE(tpo_td043->gamma); i++) { ret = snprintf(buf + len, PAGE_SIZE - len, "%u ", tpo_td043->gamma[i]); if (ret < 0) return ret; len += ret; } buf[len - 1] = '\n'; return len; } static ssize_t tpo_td043_gamma_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev); unsigned int g[12]; int ret; int i; ret = sscanf(buf, "%u %u %u %u %u %u %u %u %u %u %u %u", &g[0], &g[1], &g[2], &g[3], &g[4], &g[5], &g[6], &g[7], &g[8], &g[9], &g[10], &g[11]); if (ret != 12) return -EINVAL; for (i = 0; i < 12; i++) tpo_td043->gamma[i] = g[i]; tpo_td043_write_gamma(tpo_td043->spi, tpo_td043->gamma); return count; } static DEVICE_ATTR(vmirror, S_IRUGO | S_IWUSR, tpo_td043_vmirror_show, tpo_td043_vmirror_store); static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, tpo_td043_mode_show, tpo_td043_mode_store); static DEVICE_ATTR(gamma, S_IRUGO | S_IWUSR, tpo_td043_gamma_show, tpo_td043_gamma_store); static struct attribute *tpo_td043_attrs[] = { &dev_attr_vmirror.attr, &dev_attr_mode.attr, &dev_attr_gamma.attr, NULL, }; static struct attribute_group tpo_td043_attr_group = { .attrs = tpo_td043_attrs, }; static const struct omap_video_timings tpo_td043_timings = { .x_res = 800, .y_res = 480, .pixel_clock = 36000, .hsw = 1, .hfp = 68, .hbp = 214, .vsw = 1, .vfp = 39, .vbp = 34, .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, .de_level = OMAPDSS_SIG_ACTIVE_HIGH, .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }; static inline struct panel_tpo_td043_data *get_panel_data(const struct omap_dss_device *dssdev) { return (struct panel_tpo_td043_data *) dssdev->data; } static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043) { int r; if (tpo_td043->powered_on) return 0; r = regulator_enable(tpo_td043->vcc_reg); if (r != 0) return r; /* wait for panel to stabilize */ msleep(160); if (gpio_is_valid(tpo_td043->nreset_gpio)) gpio_set_value(tpo_td043->nreset_gpio, 1); tpo_td043_write(tpo_td043->spi, 2, TPO_R02_MODE(tpo_td043->mode) | TPO_R02_NCLK_RISING); tpo_td043_write(tpo_td043->spi, 3, TPO_R03_VAL_NORMAL); tpo_td043_write(tpo_td043->spi, 0x20, 0xf0); tpo_td043_write(tpo_td043->spi, 0x21, 0xf0); tpo_td043_write_mirror(tpo_td043->spi, tpo_td043->hmirror, tpo_td043->vmirror); tpo_td043_write_gamma(tpo_td043->spi, tpo_td043->gamma); tpo_td043->powered_on = 1; return 0; } static void tpo_td043_power_off(struct tpo_td043_device *tpo_td043) { if (!tpo_td043->powered_on) return; tpo_td043_write(tpo_td043->spi, 3, TPO_R03_VAL_STANDBY | TPO_R03_EN_PWM); if (gpio_is_valid(tpo_td043->nreset_gpio)) gpio_set_value(tpo_td043->nreset_gpio, 0); /* wait for at least 2 vsyncs before cutting off power */ msleep(50); tpo_td043_write(tpo_td043->spi, 3, TPO_R03_VAL_STANDBY); regulator_disable(tpo_td043->vcc_reg); tpo_td043->powered_on = 0; } static int tpo_td043_enable_dss(struct omap_dss_device *dssdev) { struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev); int r; if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) return 0; omapdss_dpi_set_timings(dssdev, &dssdev->panel.timings); omapdss_dpi_set_data_lines(dssdev, dssdev->phy.dpi.data_lines); r = omapdss_dpi_display_enable(dssdev); if (r) goto err0; /* * If we are resuming from system suspend, SPI clocks might not be * enabled yet, so we'll program the LCD from SPI PM resume callback. */ if (!tpo_td043->spi_suspended) { r = tpo_td043_power_on(tpo_td043); if (r) goto err1; } dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; return 0; err1: omapdss_dpi_display_disable(dssdev); err0: return r; } static void tpo_td043_disable_dss(struct omap_dss_device *dssdev) { struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev); if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) return; omapdss_dpi_display_disable(dssdev); if (!tpo_td043->spi_suspended) tpo_td043_power_off(tpo_td043); } static int tpo_td043_enable(struct omap_dss_device *dssdev) { dev_dbg(&dssdev->dev, "enable\n"); return tpo_td043_enable_dss(dssdev); } static void tpo_td043_disable(struct omap_dss_device *dssdev) { dev_dbg(&dssdev->dev, "disable\n"); tpo_td043_disable_dss(dssdev); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } static int tpo_td043_probe(struct omap_dss_device *dssdev) { struct tpo_td043_device *tpo_td043 = g_tpo_td043; struct panel_tpo_td043_data *pdata = get_panel_data(dssdev); int ret = 0; dev_dbg(&dssdev->dev, "probe\n"); if (tpo_td043 == NULL) { dev_err(&dssdev->dev, "missing tpo_td043_device\n"); return -ENODEV; } if (!pdata) return -EINVAL; tpo_td043->nreset_gpio = pdata->nreset_gpio; dssdev->panel.timings = tpo_td043_timings; dssdev->ctrl.pixel_size = 24; tpo_td043->mode = TPO_R02_MODE_800x480; memcpy(tpo_td043->gamma, tpo_td043_def_gamma, sizeof(tpo_td043->gamma)); tpo_td043->vcc_reg = regulator_get(&dssdev->dev, "vcc"); if (IS_ERR(tpo_td043->vcc_reg)) { dev_err(&dssdev->dev, "failed to get LCD VCC regulator\n"); ret = PTR_ERR(tpo_td043->vcc_reg); goto fail_regulator; } if (gpio_is_valid(tpo_td043->nreset_gpio)) { ret = devm_gpio_request_one(&dssdev->dev, tpo_td043->nreset_gpio, GPIOF_OUT_INIT_LOW, "lcd reset"); if (ret < 0) { dev_err(&dssdev->dev, "couldn't request reset GPIO\n"); goto fail_gpio_req; } } ret = sysfs_create_group(&dssdev->dev.kobj, &tpo_td043_attr_group); if (ret) dev_warn(&dssdev->dev, "failed to create sysfs files\n"); dev_set_drvdata(&dssdev->dev, tpo_td043); return 0; fail_gpio_req: regulator_put(tpo_td043->vcc_reg); fail_regulator: kfree(tpo_td043); return ret; } static void tpo_td043_remove(struct omap_dss_device *dssdev) { struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev); dev_dbg(&dssdev->dev, "remove\n"); sysfs_remove_group(&dssdev->dev.kobj, &tpo_td043_attr_group); regulator_put(tpo_td043->vcc_reg); } static void tpo_td043_set_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { omapdss_dpi_set_timings(dssdev, timings); dssdev->panel.timings = *timings; } static int tpo_td043_check_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { return dpi_check_timings(dssdev, timings); } static struct omap_dss_driver tpo_td043_driver = { .probe = tpo_td043_probe, .remove = tpo_td043_remove, .enable = tpo_td043_enable, .disable = tpo_td043_disable, .set_mirror = tpo_td043_set_hmirror, .get_mirror = tpo_td043_get_hmirror, .set_timings = tpo_td043_set_timings, .check_timings = tpo_td043_check_timings, .driver = { .name = "tpo_td043mtea1_panel", .owner = THIS_MODULE, }, }; static int tpo_td043_spi_probe(struct spi_device *spi) { struct omap_dss_device *dssdev = spi->dev.platform_data; struct tpo_td043_device *tpo_td043; int ret; if (dssdev == NULL) { dev_err(&spi->dev, "missing dssdev\n"); return -ENODEV; } if (g_tpo_td043 != NULL) return -EBUSY; spi->bits_per_word = 16; spi->mode = SPI_MODE_0; ret = spi_setup(spi); if (ret < 0) { dev_err(&spi->dev, "spi_setup failed: %d\n", ret); return ret; } tpo_td043 = kzalloc(sizeof(*tpo_td043), GFP_KERNEL); if (tpo_td043 == NULL) return -ENOMEM; tpo_td043->spi = spi; dev_set_drvdata(&spi->dev, tpo_td043); g_tpo_td043 = tpo_td043; omap_dss_register_driver(&tpo_td043_driver); return 0; } static int tpo_td043_spi_remove(struct spi_device *spi) { struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&spi->dev); omap_dss_unregister_driver(&tpo_td043_driver); kfree(tpo_td043); g_tpo_td043 = NULL; return 0; } #ifdef CONFIG_PM_SLEEP static int tpo_td043_spi_suspend(struct device *dev) { struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev); dev_dbg(dev, "tpo_td043_spi_suspend, tpo %p\n", tpo_td043); tpo_td043->power_on_resume = tpo_td043->powered_on; tpo_td043_power_off(tpo_td043); tpo_td043->spi_suspended = 1; return 0; } static int tpo_td043_spi_resume(struct device *dev) { struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev); int ret; dev_dbg(dev, "tpo_td043_spi_resume\n"); if (tpo_td043->power_on_resume) { ret = tpo_td043_power_on(tpo_td043); if (ret) return ret; } tpo_td043->spi_suspended = 0; return 0; } #endif static SIMPLE_DEV_PM_OPS(tpo_td043_spi_pm, tpo_td043_spi_suspend, tpo_td043_spi_resume); static struct spi_driver tpo_td043_spi_driver = { .driver = { .name = "tpo_td043mtea1_panel_spi", .owner = THIS_MODULE, .pm = &tpo_td043_spi_pm, }, .probe = tpo_td043_spi_probe, .remove = tpo_td043_spi_remove, }; module_spi_driver(tpo_td043_spi_driver); MODULE_AUTHOR("Gražvydas Ignotas <notasas@gmail.com>"); MODULE_DESCRIPTION("TPO TD043MTEA1 LCD Driver"); MODULE_LICENSE("GPL");
gpl-2.0
esialb/yocto-3.10-edison
drivers/staging/comedi/drivers/comedi_parport.c
2090
8742
/* comedi/drivers/comedi_parport.c hardware driver for standard parallel port COMEDI - Linux Control and Measurement Device Interface Copyright (C) 1998,2001 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: comedi_parport Description: Standard PC parallel port Author: ds Status: works in immediate mode Devices: [standard] parallel port (comedi_parport) Updated: Tue, 30 Apr 2002 21:11:45 -0700 A cheap and easy way to get a few more digital I/O lines. Steal additional parallel ports from old computers or your neighbors' computers. Option list: 0: I/O port base for the parallel port. 1: IRQ Parallel Port Lines: pin subdev chan aka --- ------ ---- --- 1 2 0 strobe 2 0 0 data 0 3 0 1 data 1 4 0 2 data 2 5 0 3 data 3 6 0 4 data 4 7 0 5 data 5 8 0 6 data 6 9 0 7 data 7 10 1 3 acknowledge 11 1 4 busy 12 1 2 output 13 1 1 printer selected 14 2 1 auto LF 15 1 0 error 16 2 2 init 17 2 3 select printer 18-25 ground Notes: Subdevices 0 is digital I/O, subdevice 1 is digital input, and subdevice 2 is digital output. Unlike other Comedi devices, subdevice 0 defaults to output. Pins 13 and 14 are inverted once by Comedi and once by the hardware, thus cancelling the effect. Pin 1 is a strobe, thus acts like one. There's no way in software to change this, at least on a standard parallel port. Subdevice 3 pretends to be a digital input subdevice, but it always returns 0 when read. However, if you run a command with scan_begin_src=TRIG_EXT, it uses pin 10 as a external triggering pin, which can be used to wake up tasks. */ /* see http://www.beyondlogic.org/ for information. or http://www.linux-magazin.de/ausgabe/1999/10/IO/io.html */ #include "../comedidev.h" #include <linux/interrupt.h> #include <linux/ioport.h> #include "comedi_fc.h" #define PARPORT_SIZE 3 #define PARPORT_A 0 #define PARPORT_B 1 #define PARPORT_C 2 struct parport_private { unsigned int a_data; unsigned int c_data; int enable_irq; }; static int parport_insn_a(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct parport_private *devpriv = dev->private; if (data[0]) { devpriv->a_data &= ~data[0]; devpriv->a_data |= (data[0] & data[1]); outb(devpriv->a_data, dev->iobase + PARPORT_A); } data[1] = inb(dev->iobase + PARPORT_A); return insn->n; } static int parport_insn_config_a(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct parport_private *devpriv = dev->private; if (data[0]) { s->io_bits = 0xff; devpriv->c_data &= ~(1 << 5); } else { s->io_bits = 0; devpriv->c_data |= (1 << 5); } outb(devpriv->c_data, dev->iobase + PARPORT_C); return 1; } static int parport_insn_b(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (data[0]) { /* should writes be ignored? */ /* anyone??? */ } data[1] = (inb(dev->iobase + PARPORT_B) >> 3); return insn->n; } static int parport_insn_c(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct parport_private *devpriv = dev->private; data[0] &= 0x0f; if (data[0]) { devpriv->c_data &= ~data[0]; devpriv->c_data |= (data[0] & data[1]); outb(devpriv->c_data, dev->iobase + PARPORT_C); } data[1] = devpriv->c_data & 0xf; return insn->n; } static int parport_intr_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[1] = 0; return insn->n; } static int parport_intr_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; /* Step 1 : check if triggers are trivially valid */ err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW); err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT); err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_FOLLOW); err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT); err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_NONE); if (err) return 1; /* Step 2a : make sure trigger sources are unique */ /* Step 2b : and mutually compatible */ if (err) return 2; /* Step 3: check if arguments are trivially valid */ err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0); err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0); err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0); err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, 1); err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0); if (err) return 3; /* step 4: ignored */ if (err) return 4; return 0; } static int parport_intr_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct parport_private *devpriv = dev->private; devpriv->c_data |= 0x10; outb(devpriv->c_data, dev->iobase + PARPORT_C); devpriv->enable_irq = 1; return 0; } static int parport_intr_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { struct parport_private *devpriv = dev->private; devpriv->c_data &= ~0x10; outb(devpriv->c_data, dev->iobase + PARPORT_C); devpriv->enable_irq = 0; return 0; } static irqreturn_t parport_interrupt(int irq, void *d) { struct comedi_device *dev = d; struct parport_private *devpriv = dev->private; struct comedi_subdevice *s = &dev->subdevices[3]; if (!devpriv->enable_irq) return IRQ_NONE; comedi_buf_put(s->async, 0); s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS; comedi_event(dev, s); return IRQ_HANDLED; } static int parport_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct parport_private *devpriv; struct comedi_subdevice *s; unsigned int irq; int ret; ret = comedi_request_region(dev, it->options[0], PARPORT_SIZE); if (ret) return ret; irq = it->options[1]; if (irq) { ret = request_irq(irq, parport_interrupt, 0, dev->board_name, dev); if (ret < 0) { dev_err(dev->class_dev, "irq not available\n"); return -EINVAL; } dev->irq = irq; } ret = comedi_alloc_subdevices(dev, 4); if (ret) return ret; devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL); if (!devpriv) return -ENOMEM; dev->private = devpriv; s = &dev->subdevices[0]; s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 8; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = parport_insn_a; s->insn_config = parport_insn_config_a; s = &dev->subdevices[1]; s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE; s->n_chan = 5; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = parport_insn_b; s = &dev->subdevices[2]; s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_WRITABLE; s->n_chan = 4; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = parport_insn_c; s = &dev->subdevices[3]; if (irq) { dev->read_subdev = s; s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE | SDF_CMD_READ; s->n_chan = 1; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = parport_intr_insn; s->do_cmdtest = parport_intr_cmdtest; s->do_cmd = parport_intr_cmd; s->cancel = parport_intr_cancel; } else { s->type = COMEDI_SUBD_UNUSED; } devpriv->a_data = 0; outb(devpriv->a_data, dev->iobase + PARPORT_A); devpriv->c_data = 0; outb(devpriv->c_data, dev->iobase + PARPORT_C); return 0; } static struct comedi_driver parport_driver = { .driver_name = "comedi_parport", .module = THIS_MODULE, .attach = parport_attach, .detach = comedi_legacy_detach, }; module_comedi_driver(parport_driver); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
AOKP/kernel_samsung_exynos5410
arch/tile/kernel/compat_signal.c
4394
12202
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/personality.h> #include <linux/suspend.h> #include <linux/ptrace.h> #include <linux/elf.h> #include <linux/compat.h> #include <linux/syscalls.h> #include <linux/uaccess.h> #include <asm/processor.h> #include <asm/ucontext.h> #include <asm/sigframe.h> #include <asm/syscalls.h> #include <arch/interrupts.h> struct compat_sigaction { compat_uptr_t sa_handler; compat_ulong_t sa_flags; compat_uptr_t sa_restorer; sigset_t sa_mask __packed; }; struct compat_sigaltstack { compat_uptr_t ss_sp; int ss_flags; compat_size_t ss_size; }; struct compat_ucontext { compat_ulong_t uc_flags; compat_uptr_t uc_link; struct compat_sigaltstack uc_stack; struct sigcontext uc_mcontext; sigset_t uc_sigmask; /* mask last for extensibility */ }; #define COMPAT_SI_PAD_SIZE ((SI_MAX_SIZE - 3 * sizeof(int)) / sizeof(int)) struct compat_siginfo { int si_signo; int si_errno; int si_code; union { int _pad[COMPAT_SI_PAD_SIZE]; /* kill() */ struct { unsigned int _pid; /* sender's pid */ unsigned int _uid; /* sender's uid */ } _kill; /* POSIX.1b timers */ struct { compat_timer_t _tid; /* timer id */ int _overrun; /* overrun count */ compat_sigval_t _sigval; /* same as below */ int _sys_private; /* not to be passed to user */ int _overrun_incr; /* amount to add to overrun */ } _timer; /* POSIX.1b signals */ struct { unsigned int _pid; /* sender's pid */ unsigned int _uid; /* sender's uid */ compat_sigval_t _sigval; } _rt; /* SIGCHLD */ struct { unsigned int _pid; /* which child */ unsigned int _uid; /* sender's uid */ int _status; /* exit code */ compat_clock_t _utime; compat_clock_t _stime; } _sigchld; /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ struct { unsigned int _addr; /* faulting insn/memory ref. */ #ifdef __ARCH_SI_TRAPNO int _trapno; /* TRAP # which caused the signal */ #endif } _sigfault; /* SIGPOLL */ struct { int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ int _fd; } _sigpoll; } _sifields; }; struct compat_rt_sigframe { unsigned char save_area[C_ABI_SAVE_AREA_SIZE]; /* caller save area */ struct compat_siginfo info; struct compat_ucontext uc; }; #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act, struct compat_sigaction __user *oact, size_t sigsetsize) { struct k_sigaction new_sa, old_sa; int ret = -EINVAL; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) goto out; if (act) { compat_uptr_t handler, restorer; if (!access_ok(VERIFY_READ, act, sizeof(*act)) || __get_user(handler, &act->sa_handler) || __get_user(new_sa.sa.sa_flags, &act->sa_flags) || __get_user(restorer, &act->sa_restorer) || __copy_from_user(&new_sa.sa.sa_mask, &act->sa_mask, sizeof(sigset_t))) return -EFAULT; new_sa.sa.sa_handler = compat_ptr(handler); new_sa.sa.sa_restorer = compat_ptr(restorer); } ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); if (!ret && oact) { if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(ptr_to_compat(old_sa.sa.sa_handler), &oact->sa_handler) || __put_user(ptr_to_compat(old_sa.sa.sa_restorer), &oact->sa_restorer) || __put_user(old_sa.sa.sa_flags, &oact->sa_flags) || __copy_to_user(&oact->sa_mask, &old_sa.sa.sa_mask, sizeof(sigset_t))) return -EFAULT; } out: return ret; } long compat_sys_rt_sigqueueinfo(int pid, int sig, struct compat_siginfo __user *uinfo) { siginfo_t info; int ret; mm_segment_t old_fs = get_fs(); if (copy_siginfo_from_user32(&info, uinfo)) return -EFAULT; set_fs(KERNEL_DS); ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force __user *)&info); set_fs(old_fs); return ret; } int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from) { int err; if (!access_ok(VERIFY_WRITE, to, sizeof(struct compat_siginfo))) return -EFAULT; /* If you change siginfo_t structure, please make sure that this code is fixed accordingly. It should never copy any pad contained in the structure to avoid security leaks, but must copy the generic 3 ints plus the relevant union member. */ err = __put_user(from->si_signo, &to->si_signo); err |= __put_user(from->si_errno, &to->si_errno); err |= __put_user((short)from->si_code, &to->si_code); if (from->si_code < 0) { err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(ptr_to_compat(from->si_ptr), &to->si_ptr); } else { /* * First 32bits of unions are always present: * si_pid === si_band === si_tid === si_addr(LS half) */ err |= __put_user(from->_sifields._pad[0], &to->_sifields._pad[0]); switch (from->si_code >> 16) { case __SI_FAULT >> 16: break; case __SI_CHLD >> 16: err |= __put_user(from->si_utime, &to->si_utime); err |= __put_user(from->si_stime, &to->si_stime); err |= __put_user(from->si_status, &to->si_status); /* FALL THROUGH */ default: case __SI_KILL >> 16: err |= __put_user(from->si_uid, &to->si_uid); break; case __SI_POLL >> 16: err |= __put_user(from->si_fd, &to->si_fd); break; case __SI_TIMER >> 16: err |= __put_user(from->si_overrun, &to->si_overrun); err |= __put_user(ptr_to_compat(from->si_ptr), &to->si_ptr); break; /* This is not generated by the kernel as of now. */ case __SI_RT >> 16: case __SI_MESGQ >> 16: err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_int, &to->si_int); break; } } return err; } int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) { int err; u32 ptr32; if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo))) return -EFAULT; err = __get_user(to->si_signo, &from->si_signo); err |= __get_user(to->si_errno, &from->si_errno); err |= __get_user(to->si_code, &from->si_code); err |= __get_user(to->si_pid, &from->si_pid); err |= __get_user(to->si_uid, &from->si_uid); err |= __get_user(ptr32, &from->si_ptr); to->si_ptr = compat_ptr(ptr32); return err; } long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, struct compat_sigaltstack __user *uoss_ptr, struct pt_regs *regs) { stack_t uss, uoss; int ret; mm_segment_t seg; if (uss_ptr) { u32 ptr; memset(&uss, 0, sizeof(stack_t)); if (!access_ok(VERIFY_READ, uss_ptr, sizeof(*uss_ptr)) || __get_user(ptr, &uss_ptr->ss_sp) || __get_user(uss.ss_flags, &uss_ptr->ss_flags) || __get_user(uss.ss_size, &uss_ptr->ss_size)) return -EFAULT; uss.ss_sp = compat_ptr(ptr); } seg = get_fs(); set_fs(KERNEL_DS); ret = do_sigaltstack(uss_ptr ? (stack_t __user __force *)&uss : NULL, (stack_t __user __force *)&uoss, (unsigned long)compat_ptr(regs->sp)); set_fs(seg); if (ret >= 0 && uoss_ptr) { if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(*uoss_ptr)) || __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) || __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) || __put_user(uoss.ss_size, &uoss_ptr->ss_size)) ret = -EFAULT; } return ret; } /* The assembly shim for this function arranges to ignore the return value. */ long compat_sys_rt_sigreturn(struct pt_regs *regs) { struct compat_rt_sigframe __user *frame = (struct compat_rt_sigframe __user *) compat_ptr(regs->sp); sigset_t set; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) goto badframe; if (compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0) goto badframe; return 0; badframe: signal_fault("bad sigreturn frame", regs, frame, 0); return 0; } /* * Determine which stack to use.. */ static inline void __user *compat_get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) { unsigned long sp; /* Default to using normal stack */ sp = (unsigned long)compat_ptr(regs->sp); /* * If we are on the alternate signal stack and would overflow * it, don't. Return an always-bogus address instead so we * will die with SIGSEGV. */ if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) return (void __user __force *)-1UL; /* This is the X/Open sanctioned signal stack switching. */ if (ka->sa.sa_flags & SA_ONSTACK) { if (sas_ss_flags(sp) == 0) sp = current->sas_ss_sp + current->sas_ss_size; } sp -= frame_size; /* * Align the stack pointer according to the TILE ABI, * i.e. so that on function entry (sp & 15) == 0. */ sp &= -16UL; return (void __user *) sp; } int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { unsigned long restorer; struct compat_rt_sigframe __user *frame; int err = 0; int usig; frame = compat_get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; usig = current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig; /* Always write at least the signal number for the stack backtracer. */ if (ka->sa.sa_flags & SA_SIGINFO) { /* At sigreturn time, restore the callee-save registers too. */ err |= copy_siginfo_to_user32(&frame->info, info); regs->flags |= PT_FLAGS_RESTORE_REGS; } else { err |= __put_user(info->si_signo, &frame->info.si_signo); } /* Create the ucontext. */ err |= __clear_user(&frame->save_area, sizeof(frame->save_area)); err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); err |= __put_user(ptr_to_compat((void *)(current->sas_ss_sp)), &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto give_sigsegv; restorer = VDSO_BASE; if (ka->sa.sa_flags & SA_RESTORER) restorer = ptr_to_compat_reg(ka->sa.sa_restorer); /* * Set up registers for signal handler. * Registers that we don't modify keep the value they had from * user-space at the time we took the signal. * We always pass siginfo and mcontext, regardless of SA_SIGINFO, * since some things rely on this (e.g. glibc's debug/segfault.c). */ regs->pc = ptr_to_compat_reg(ka->sa.sa_handler); regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ regs->sp = ptr_to_compat_reg(frame); regs->lr = restorer; regs->regs[0] = (unsigned long) usig; regs->regs[1] = ptr_to_compat_reg(&frame->info); regs->regs[2] = ptr_to_compat_reg(&frame->uc); regs->flags |= PT_FLAGS_CALLER_SAVES; /* * Notify any tracer that was single-stepping it. * The tracer may want to single-step inside the * handler too. */ if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); return 0; give_sigsegv: signal_fault("bad setup frame", regs, frame, sig); return -EFAULT; }
gpl-2.0
heechul/linux
sound/soc/samsung/idma.c
4906
10178
/* * sound/soc/samsung/idma.c * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * * I2S0's Internal DMA driver * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/module.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include "i2s.h" #include "idma.h" #include "dma.h" #include "i2s-regs.h" #define ST_RUNNING (1<<0) #define ST_OPENED (1<<1) static const struct snd_pcm_hardware idma_hardware = { .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_U24_LE | SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S8, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = MAX_IDMA_BUFFER, .period_bytes_min = 128, .period_bytes_max = MAX_IDMA_PERIOD, .periods_min = 1, .periods_max = 2, }; struct idma_ctrl { spinlock_t lock; int state; dma_addr_t start; dma_addr_t pos; dma_addr_t end; dma_addr_t period; dma_addr_t periodsz; void *token; void (*cb)(void *dt, int bytes_xfer); }; static struct idma_info { spinlock_t lock; void __iomem *regs; dma_addr_t lp_tx_addr; } idma; static void idma_getpos(dma_addr_t *src) { *src = idma.lp_tx_addr + (readl(idma.regs + I2STRNCNT) & 0xffffff) * 4; } static int idma_enqueue(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct idma_ctrl *prtd = substream->runtime->private_data; u32 val; spin_lock(&prtd->lock); prtd->token = (void *) substream; spin_unlock(&prtd->lock); /* Internal DMA Level0 Interrupt Address */ val = idma.lp_tx_addr + prtd->periodsz; writel(val, idma.regs + I2SLVL0ADDR); /* Start address0 of I2S internal DMA operation. */ val = idma.lp_tx_addr; writel(val, idma.regs + I2SSTR0); /* * Transfer block size for I2S internal DMA. * Should decide transfer size before start dma operation */ val = readl(idma.regs + I2SSIZE); val &= ~(I2SSIZE_TRNMSK << I2SSIZE_SHIFT); val |= (((runtime->dma_bytes >> 2) & I2SSIZE_TRNMSK) << I2SSIZE_SHIFT); writel(val, idma.regs + I2SSIZE); val = readl(idma.regs + I2SAHB); val |= AHB_INTENLVL0; writel(val, idma.regs + I2SAHB); return 0; } static void idma_setcallbk(struct snd_pcm_substream *substream, void (*cb)(void *, int)) { struct idma_ctrl *prtd = substream->runtime->private_data; spin_lock(&prtd->lock); prtd->cb = cb; spin_unlock(&prtd->lock); } static void idma_control(int op) { u32 val = readl(idma.regs + I2SAHB); spin_lock(&idma.lock); switch (op) { case LPAM_DMA_START: val |= (AHB_INTENLVL0 | AHB_DMAEN); break; case LPAM_DMA_STOP: val &= ~(AHB_INTENLVL0 | AHB_DMAEN); break; default: spin_unlock(&idma.lock); return; } writel(val, idma.regs + I2SAHB); spin_unlock(&idma.lock); } static void idma_done(void *id, int bytes_xfer) { struct snd_pcm_substream *substream = id; struct idma_ctrl *prtd = substream->runtime->private_data; if (prtd && (prtd->state & ST_RUNNING)) snd_pcm_period_elapsed(substream); } static int idma_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_pcm_runtime *runtime = substream->runtime; struct idma_ctrl *prtd = substream->runtime->private_data; u32 mod = readl(idma.regs + I2SMOD); u32 ahb = readl(idma.regs + I2SAHB); ahb |= (AHB_DMARLD | AHB_INTMASK); mod |= MOD_TXS_IDMA; writel(ahb, idma.regs + I2SAHB); writel(mod, idma.regs + I2SMOD); snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); runtime->dma_bytes = params_buffer_bytes(params); prtd->start = prtd->pos = runtime->dma_addr; prtd->period = params_periods(params); prtd->periodsz = params_period_bytes(params); prtd->end = runtime->dma_addr + runtime->dma_bytes; idma_setcallbk(substream, idma_done); return 0; } static int idma_hw_free(struct snd_pcm_substream *substream) { snd_pcm_set_runtime_buffer(substream, NULL); return 0; } static int idma_prepare(struct snd_pcm_substream *substream) { struct idma_ctrl *prtd = substream->runtime->private_data; prtd->pos = prtd->start; /* flush the DMA channel */ idma_control(LPAM_DMA_STOP); idma_enqueue(substream); return 0; } static int idma_trigger(struct snd_pcm_substream *substream, int cmd) { struct idma_ctrl *prtd = substream->runtime->private_data; int ret = 0; spin_lock(&prtd->lock); switch (cmd) { case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: prtd->state |= ST_RUNNING; idma_control(LPAM_DMA_START); break; case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: prtd->state &= ~ST_RUNNING; idma_control(LPAM_DMA_STOP); break; default: ret = -EINVAL; break; } spin_unlock(&prtd->lock); return ret; } static snd_pcm_uframes_t idma_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct idma_ctrl *prtd = runtime->private_data; dma_addr_t src; unsigned long res; spin_lock(&prtd->lock); idma_getpos(&src); res = src - prtd->start; spin_unlock(&prtd->lock); return bytes_to_frames(substream->runtime, res); } static int idma_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; unsigned long size, offset; int ret; /* From snd_pcm_lib_mmap_iomem */ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_flags |= VM_IO; size = vma->vm_end - vma->vm_start; offset = vma->vm_pgoff << PAGE_SHIFT; ret = io_remap_pfn_range(vma, vma->vm_start, (runtime->dma_addr + offset) >> PAGE_SHIFT, size, vma->vm_page_prot); return ret; } static irqreturn_t iis_irq(int irqno, void *dev_id) { struct idma_ctrl *prtd = (struct idma_ctrl *)dev_id; u32 iiscon, iisahb, val, addr; iisahb = readl(idma.regs + I2SAHB); iiscon = readl(idma.regs + I2SCON); val = (iisahb & AHB_LVL0INT) ? AHB_CLRLVL0INT : 0; if (val) { iisahb |= val; writel(iisahb, idma.regs + I2SAHB); addr = readl(idma.regs + I2SLVL0ADDR) - idma.lp_tx_addr; addr += prtd->periodsz; addr %= (prtd->end - prtd->start); addr += idma.lp_tx_addr; writel(addr, idma.regs + I2SLVL0ADDR); if (prtd->cb) prtd->cb(prtd->token, prtd->period); } return IRQ_HANDLED; } static int idma_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct idma_ctrl *prtd; int ret; snd_soc_set_runtime_hwparams(substream, &idma_hardware); prtd = kzalloc(sizeof(struct idma_ctrl), GFP_KERNEL); if (prtd == NULL) return -ENOMEM; ret = request_irq(IRQ_I2S0, iis_irq, 0, "i2s", prtd); if (ret < 0) { pr_err("fail to claim i2s irq , ret = %d\n", ret); kfree(prtd); return ret; } spin_lock_init(&prtd->lock); runtime->private_data = prtd; return 0; } static int idma_close(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct idma_ctrl *prtd = runtime->private_data; free_irq(IRQ_I2S0, prtd); if (!prtd) pr_err("idma_close called with prtd == NULL\n"); kfree(prtd); return 0; } static struct snd_pcm_ops idma_ops = { .open = idma_open, .close = idma_close, .ioctl = snd_pcm_lib_ioctl, .trigger = idma_trigger, .pointer = idma_pointer, .mmap = idma_mmap, .hw_params = idma_hw_params, .hw_free = idma_hw_free, .prepare = idma_prepare, }; static void idma_free(struct snd_pcm *pcm) { struct snd_pcm_substream *substream; struct snd_dma_buffer *buf; substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; if (!substream) return; buf = &substream->dma_buffer; if (!buf->area) return; iounmap(buf->area); buf->area = NULL; buf->addr = 0; } static int preallocate_idma_buffer(struct snd_pcm *pcm, int stream) { struct snd_pcm_substream *substream = pcm->streams[stream].substream; struct snd_dma_buffer *buf = &substream->dma_buffer; buf->dev.dev = pcm->card->dev; buf->private_data = NULL; /* Assign PCM buffer pointers */ buf->dev.type = SNDRV_DMA_TYPE_CONTINUOUS; buf->addr = idma.lp_tx_addr; buf->bytes = idma_hardware.buffer_bytes_max; buf->area = (unsigned char *)ioremap(buf->addr, buf->bytes); return 0; } static u64 idma_mask = DMA_BIT_MASK(32); static int idma_new(struct snd_soc_pcm_runtime *rtd) { struct snd_card *card = rtd->card->snd_card; struct snd_pcm *pcm = rtd->pcm; int ret = 0; if (!card->dev->dma_mask) card->dev->dma_mask = &idma_mask; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = DMA_BIT_MASK(32); if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { ret = preallocate_idma_buffer(pcm, SNDRV_PCM_STREAM_PLAYBACK); } return ret; } void idma_reg_addr_init(void __iomem *regs, dma_addr_t addr) { spin_lock_init(&idma.lock); idma.regs = regs; idma.lp_tx_addr = addr; } static struct snd_soc_platform_driver asoc_idma_platform = { .ops = &idma_ops, .pcm_new = idma_new, .pcm_free = idma_free, }; static int __devinit asoc_idma_platform_probe(struct platform_device *pdev) { return snd_soc_register_platform(&pdev->dev, &asoc_idma_platform); } static int __devexit asoc_idma_platform_remove(struct platform_device *pdev) { snd_soc_unregister_platform(&pdev->dev); return 0; } static struct platform_driver asoc_idma_driver = { .driver = { .name = "samsung-idma", .owner = THIS_MODULE, }, .probe = asoc_idma_platform_probe, .remove = __devexit_p(asoc_idma_platform_remove), }; module_platform_driver(asoc_idma_driver); MODULE_AUTHOR("Jaswinder Singh, <jassisinghbrar@gmail.com>"); MODULE_DESCRIPTION("Samsung ASoC IDMA Driver"); MODULE_LICENSE("GPL");
gpl-2.0
mirjak/linux-accecn
arch/mips/txx9/generic/irq_tx4927.c
5162
1919
/* * Common tx4927 irq handler * * Author: MontaVista Software, Inc. * source@mvista.com * * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <asm/irq_cpu.h> #include <asm/txx9/tx4927.h> void __init tx4927_irq_init(void) { int i; mips_cpu_irq_init(); txx9_irq_init(TX4927_IRC_REG & 0xfffffffffULL); irq_set_chained_handler(MIPS_CPU_IRQ_BASE + TX4927_IRC_INT, handle_simple_irq); /* raise priority for errors, timers, SIO */ txx9_irq_set_pri(TX4927_IR_ECCERR, 7); txx9_irq_set_pri(TX4927_IR_WTOERR, 7); txx9_irq_set_pri(TX4927_IR_PCIERR, 7); txx9_irq_set_pri(TX4927_IR_PCIPME, 7); for (i = 0; i < TX4927_NUM_IR_TMR; i++) txx9_irq_set_pri(TX4927_IR_TMR(i), 6); for (i = 0; i < TX4927_NUM_IR_SIO; i++) txx9_irq_set_pri(TX4927_IR_SIO(i), 5); }
gpl-2.0
safarend/android_kernel_samsung_exynos5420
drivers/net/ethernet/mellanox/mlx4/en_port.c
7466
7831
/* * Copyright (c) 2007 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/if_vlan.h> #include <linux/mlx4/device.h> #include <linux/mlx4/cmd.h> #include "en_port.h" #include "mlx4_en.h" int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv) { struct mlx4_cmd_mailbox *mailbox; struct mlx4_set_vlan_fltr_mbox *filter; int i; int j; int index = 0; u32 entry; int err = 0; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); filter = mailbox->buf; memset(filter, 0, sizeof(*filter)); for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) { entry = 0; for (j = 0; j < 32; j++) if (test_bit(index++, priv->active_vlans)) entry |= 1 << j; filter->entry[i] = cpu_to_be32(entry); } err = mlx4_cmd(dev, mailbox->dma, priv->port, 0, MLX4_CMD_SET_VLAN_FLTR, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); mlx4_free_cmd_mailbox(dev, mailbox); return err; } int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port) { struct mlx4_en_query_port_context *qport_context; struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]); struct mlx4_en_port_state *state = &priv->port_state; struct mlx4_cmd_mailbox *mailbox; int err; mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); memset(mailbox->buf, 0, sizeof(*qport_context)); err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0, MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); if (err) goto out; qport_context = mailbox->buf; /* This command is always accessed from Ethtool context * already synchronized, no need in locking */ state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK); switch (qport_context->link_speed & MLX4_EN_SPEED_MASK) { case MLX4_EN_1G_SPEED: state->link_speed = 1000; break; case MLX4_EN_10G_SPEED_XAUI: case MLX4_EN_10G_SPEED_XFI: state->link_speed = 10000; break; case MLX4_EN_40G_SPEED: state->link_speed = 40000; break; default: state->link_speed = -1; break; } state->transciver = qport_context->transceiver; out: mlx4_free_cmd_mailbox(mdev->dev, mailbox); return err; } int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) { struct mlx4_en_stat_out_mbox *mlx4_en_stats; struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]); struct net_device_stats *stats = &priv->stats; struct mlx4_cmd_mailbox *mailbox; u64 in_mod = reset << 8 | port; int err; int i; mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); memset(mailbox->buf, 0, sizeof(*mlx4_en_stats)); err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0, MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); if (err) goto out; mlx4_en_stats = mailbox->buf; spin_lock_bh(&priv->stats_lock); stats->rx_packets = 0; stats->rx_bytes = 0; priv->port_stats.rx_chksum_good = 0; priv->port_stats.rx_chksum_none = 0; for (i = 0; i < priv->rx_ring_num; i++) { stats->rx_packets += priv->rx_ring[i].packets; stats->rx_bytes += priv->rx_ring[i].bytes; priv->port_stats.rx_chksum_good += priv->rx_ring[i].csum_ok; priv->port_stats.rx_chksum_none += priv->rx_ring[i].csum_none; } stats->tx_packets = 0; stats->tx_bytes = 0; priv->port_stats.tx_chksum_offload = 0; for (i = 0; i < priv->tx_ring_num; i++) { stats->tx_packets += priv->tx_ring[i].packets; stats->tx_bytes += priv->tx_ring[i].bytes; priv->port_stats.tx_chksum_offload += priv->tx_ring[i].tx_csum; } stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) + be32_to_cpu(mlx4_en_stats->RdropLength) + be32_to_cpu(mlx4_en_stats->RJBBR) + be32_to_cpu(mlx4_en_stats->RCRC) + be32_to_cpu(mlx4_en_stats->RRUNT); stats->tx_errors = be32_to_cpu(mlx4_en_stats->TDROP); stats->multicast = be64_to_cpu(mlx4_en_stats->MCAST_prio_0) + be64_to_cpu(mlx4_en_stats->MCAST_prio_1) + be64_to_cpu(mlx4_en_stats->MCAST_prio_2) + be64_to_cpu(mlx4_en_stats->MCAST_prio_3) + be64_to_cpu(mlx4_en_stats->MCAST_prio_4) + be64_to_cpu(mlx4_en_stats->MCAST_prio_5) + be64_to_cpu(mlx4_en_stats->MCAST_prio_6) + be64_to_cpu(mlx4_en_stats->MCAST_prio_7) + be64_to_cpu(mlx4_en_stats->MCAST_novlan); stats->collisions = 0; stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); stats->rx_frame_errors = 0; stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); stats->tx_aborted_errors = 0; stats->tx_carrier_errors = 0; stats->tx_fifo_errors = 0; stats->tx_heartbeat_errors = 0; stats->tx_window_errors = 0; priv->pkstats.broadcast = be64_to_cpu(mlx4_en_stats->RBCAST_prio_0) + be64_to_cpu(mlx4_en_stats->RBCAST_prio_1) + be64_to_cpu(mlx4_en_stats->RBCAST_prio_2) + be64_to_cpu(mlx4_en_stats->RBCAST_prio_3) + be64_to_cpu(mlx4_en_stats->RBCAST_prio_4) + be64_to_cpu(mlx4_en_stats->RBCAST_prio_5) + be64_to_cpu(mlx4_en_stats->RBCAST_prio_6) + be64_to_cpu(mlx4_en_stats->RBCAST_prio_7) + be64_to_cpu(mlx4_en_stats->RBCAST_novlan); priv->pkstats.rx_prio[0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0); priv->pkstats.rx_prio[1] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1); priv->pkstats.rx_prio[2] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2); priv->pkstats.rx_prio[3] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3); priv->pkstats.rx_prio[4] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4); priv->pkstats.rx_prio[5] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5); priv->pkstats.rx_prio[6] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6); priv->pkstats.rx_prio[7] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7); priv->pkstats.tx_prio[0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0); priv->pkstats.tx_prio[1] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1); priv->pkstats.tx_prio[2] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2); priv->pkstats.tx_prio[3] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3); priv->pkstats.tx_prio[4] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4); priv->pkstats.tx_prio[5] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5); priv->pkstats.tx_prio[6] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6); priv->pkstats.tx_prio[7] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7); spin_unlock_bh(&priv->stats_lock); out: mlx4_free_cmd_mailbox(mdev->dev, mailbox); return err; }
gpl-2.0
AOSPA-L/android_kernel_htc_msm8994
drivers/video/omap/lcd_inn1610.c
8234
3223
/* * LCD panel support for the TI OMAP1610 Innovator board * * Copyright (C) 2004 Nokia Corporation * Author: Imre Deak <imre.deak@nokia.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include "omapfb.h" #define MODULE_NAME "omapfb-lcd_h3" static int innovator1610_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) { int r = 0; /* configure GPIO(14, 15) as outputs */ if (gpio_request_one(14, GPIOF_OUT_INIT_LOW, "lcd_en0")) { pr_err(MODULE_NAME ": can't request GPIO 14\n"); r = -1; goto exit; } if (gpio_request_one(15, GPIOF_OUT_INIT_LOW, "lcd_en1")) { pr_err(MODULE_NAME ": can't request GPIO 15\n"); gpio_free(14); r = -1; goto exit; } exit: return r; } static void innovator1610_panel_cleanup(struct lcd_panel *panel) { gpio_free(15); gpio_free(14); } static int innovator1610_panel_enable(struct lcd_panel *panel) { /* set GPIO14 and GPIO15 high */ gpio_set_value(14, 1); gpio_set_value(15, 1); return 0; } static void innovator1610_panel_disable(struct lcd_panel *panel) { /* set GPIO13, GPIO14 and GPIO15 low */ gpio_set_value(14, 0); gpio_set_value(15, 0); } static unsigned long innovator1610_panel_get_caps(struct lcd_panel *panel) { return 0; } struct lcd_panel innovator1610_panel = { .name = "inn1610", .config = OMAP_LCDC_PANEL_TFT, .bpp = 16, .data_lines = 16, .x_res = 320, .y_res = 240, .pixel_clock = 12500, .hsw = 40, .hfp = 40, .hbp = 72, .vsw = 1, .vfp = 1, .vbp = 0, .pcd = 12, .init = innovator1610_panel_init, .cleanup = innovator1610_panel_cleanup, .enable = innovator1610_panel_enable, .disable = innovator1610_panel_disable, .get_caps = innovator1610_panel_get_caps, }; static int innovator1610_panel_probe(struct platform_device *pdev) { omapfb_register_panel(&innovator1610_panel); return 0; } static int innovator1610_panel_remove(struct platform_device *pdev) { return 0; } static int innovator1610_panel_suspend(struct platform_device *pdev, pm_message_t mesg) { return 0; } static int innovator1610_panel_resume(struct platform_device *pdev) { return 0; } static struct platform_driver innovator1610_panel_driver = { .probe = innovator1610_panel_probe, .remove = innovator1610_panel_remove, .suspend = innovator1610_panel_suspend, .resume = innovator1610_panel_resume, .driver = { .name = "lcd_inn1610", .owner = THIS_MODULE, }, }; module_platform_driver(innovator1610_panel_driver);
gpl-2.0
kozmikkick/tripndroid-endeavoru-3.5.7
fs/ext4/symlink.c
9258
1390
/* * linux/fs/ext4/symlink.c * * Only fast symlinks left here - the rest is done by generic code. AV, 1999 * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/symlink.c * * Copyright (C) 1991, 1992 Linus Torvalds * * ext4 symlink handling code */ #include <linux/fs.h> #include <linux/jbd2.h> #include <linux/namei.h> #include "ext4.h" #include "xattr.h" static void *ext4_follow_link(struct dentry *dentry, struct nameidata *nd) { struct ext4_inode_info *ei = EXT4_I(dentry->d_inode); nd_set_link(nd, (char *) ei->i_data); return NULL; } const struct inode_operations ext4_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, .setattr = ext4_setattr, #ifdef CONFIG_EXT4_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = ext4_listxattr, .removexattr = generic_removexattr, #endif }; const struct inode_operations ext4_fast_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = ext4_follow_link, .setattr = ext4_setattr, #ifdef CONFIG_EXT4_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = ext4_listxattr, .removexattr = generic_removexattr, #endif };
gpl-2.0
MaxiCM-Private/android_kernel_lge_jagnm
arch/sh/mm/nommu.c
12074
1689
/* * arch/sh/mm/nommu.c * * Various helper routines and stubs for MMUless SH. * * Copyright (C) 2002 - 2009 Paul Mundt * * Released under the terms of the GNU GPL v2.0. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/string.h> #include <linux/mm.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include <asm/page.h> #include <asm/uaccess.h> /* * Nothing too terribly exciting here .. */ void copy_page(void *to, void *from) { memcpy(to, from, PAGE_SIZE); } __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n) { memcpy(to, from, n); return 0; } __kernel_size_t __clear_user(void *to, __kernel_size_t n) { memset(to, 0, n); return 0; } void local_flush_tlb_all(void) { BUG(); } void local_flush_tlb_mm(struct mm_struct *mm) { BUG(); } void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { BUG(); } void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { BUG(); } void local_flush_tlb_one(unsigned long asid, unsigned long page) { BUG(); } void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { BUG(); } void __flush_tlb_global(void) { } void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { } void __init kmap_coherent_init(void) { } void *kmap_coherent(struct page *page, unsigned long addr) { BUG(); return NULL; } void kunmap_coherent(void *kvaddr) { BUG(); } void __init page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) { } void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) { } void pgtable_cache_init(void) { }
gpl-2.0
cphelps76/DEMENTED_kernel_jf
arch/sh/mm/nommu.c
12074
1689
/* * arch/sh/mm/nommu.c * * Various helper routines and stubs for MMUless SH. * * Copyright (C) 2002 - 2009 Paul Mundt * * Released under the terms of the GNU GPL v2.0. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/string.h> #include <linux/mm.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include <asm/page.h> #include <asm/uaccess.h> /* * Nothing too terribly exciting here .. */ void copy_page(void *to, void *from) { memcpy(to, from, PAGE_SIZE); } __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n) { memcpy(to, from, n); return 0; } __kernel_size_t __clear_user(void *to, __kernel_size_t n) { memset(to, 0, n); return 0; } void local_flush_tlb_all(void) { BUG(); } void local_flush_tlb_mm(struct mm_struct *mm) { BUG(); } void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { BUG(); } void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { BUG(); } void local_flush_tlb_one(unsigned long asid, unsigned long page) { BUG(); } void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { BUG(); } void __flush_tlb_global(void) { } void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { } void __init kmap_coherent_init(void) { } void *kmap_coherent(struct page *page, unsigned long addr) { BUG(); return NULL; } void kunmap_coherent(void *kvaddr) { BUG(); } void __init page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) { } void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) { } void pgtable_cache_init(void) { }
gpl-2.0
PaoloW8/kernel_ZOPO
arch/sh/mm/nommu.c
12074
1689
/* * arch/sh/mm/nommu.c * * Various helper routines and stubs for MMUless SH. * * Copyright (C) 2002 - 2009 Paul Mundt * * Released under the terms of the GNU GPL v2.0. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/string.h> #include <linux/mm.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include <asm/page.h> #include <asm/uaccess.h> /* * Nothing too terribly exciting here .. */ void copy_page(void *to, void *from) { memcpy(to, from, PAGE_SIZE); } __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n) { memcpy(to, from, n); return 0; } __kernel_size_t __clear_user(void *to, __kernel_size_t n) { memset(to, 0, n); return 0; } void local_flush_tlb_all(void) { BUG(); } void local_flush_tlb_mm(struct mm_struct *mm) { BUG(); } void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { BUG(); } void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { BUG(); } void local_flush_tlb_one(unsigned long asid, unsigned long page) { BUG(); } void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { BUG(); } void __flush_tlb_global(void) { } void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { } void __init kmap_coherent_init(void) { } void *kmap_coherent(struct page *page, unsigned long addr) { BUG(); return NULL; } void kunmap_coherent(void *kvaddr) { BUG(); } void __init page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) { } void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) { } void pgtable_cache_init(void) { }
gpl-2.0
bcnice20/Shooter-2.6.35_mr
drivers/usb/host/fhci-q.c
13866
7107
/* * Freescale QUICC Engine USB Host Controller Driver * * Copyright (c) Freescale Semicondutor, Inc. 2006. * Shlomi Gridish <gridish@freescale.com> * Jerry Huang <Chang-Ming.Huang@freescale.com> * Copyright (c) Logic Product Development, Inc. 2007 * Peter Barada <peterb@logicpd.com> * Copyright (c) MontaVista Software, Inc. 2008. * Anton Vorontsov <avorontsov@ru.mvista.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/spinlock.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/usb.h> #include <linux/usb/hcd.h> #include "fhci.h" /* maps the hardware error code to the USB error code */ static int status_to_error(u32 status) { if (status == USB_TD_OK) return 0; else if (status & USB_TD_RX_ER_CRC) return -EILSEQ; else if (status & USB_TD_RX_ER_NONOCT) return -EPROTO; else if (status & USB_TD_RX_ER_OVERUN) return -ECOMM; else if (status & USB_TD_RX_ER_BITSTUFF) return -EPROTO; else if (status & USB_TD_RX_ER_PID) return -EILSEQ; else if (status & (USB_TD_TX_ER_NAK | USB_TD_TX_ER_TIMEOUT)) return -ETIMEDOUT; else if (status & USB_TD_TX_ER_STALL) return -EPIPE; else if (status & USB_TD_TX_ER_UNDERUN) return -ENOSR; else if (status & USB_TD_RX_DATA_UNDERUN) return -EREMOTEIO; else if (status & USB_TD_RX_DATA_OVERUN) return -EOVERFLOW; else return -EINVAL; } void fhci_add_td_to_frame(struct fhci_time_frame *frame, struct td *td) { list_add_tail(&td->frame_lh, &frame->tds_list); } void fhci_add_tds_to_ed(struct ed *ed, struct td **td_list, int number) { int i; for (i = 0; i < number; i++) { struct td *td = td_list[i]; list_add_tail(&td->node, &ed->td_list); } if (ed->td_head == NULL) ed->td_head = td_list[0]; } static struct td *peek_td_from_ed(struct ed *ed) { struct td *td; if (!list_empty(&ed->td_list)) td = list_entry(ed->td_list.next, struct td, node); else td = NULL; return td; } struct td *fhci_remove_td_from_frame(struct fhci_time_frame *frame) { struct td *td; if (!list_empty(&frame->tds_list)) { td = list_entry(frame->tds_list.next, struct td, frame_lh); list_del_init(frame->tds_list.next); } else td = NULL; return td; } struct td *fhci_peek_td_from_frame(struct fhci_time_frame *frame) { struct td *td; if (!list_empty(&frame->tds_list)) td = list_entry(frame->tds_list.next, struct td, frame_lh); else td = NULL; return td; } struct td *fhci_remove_td_from_ed(struct ed *ed) { struct td *td; if (!list_empty(&ed->td_list)) { td = list_entry(ed->td_list.next, struct td, node); list_del_init(ed->td_list.next); /* if this TD was the ED's head, find next TD */ if (!list_empty(&ed->td_list)) ed->td_head = list_entry(ed->td_list.next, struct td, node); else ed->td_head = NULL; } else td = NULL; return td; } struct td *fhci_remove_td_from_done_list(struct fhci_controller_list *p_list) { struct td *td; if (!list_empty(&p_list->done_list)) { td = list_entry(p_list->done_list.next, struct td, node); list_del_init(p_list->done_list.next); } else td = NULL; return td; } void fhci_move_td_from_ed_to_done_list(struct fhci_usb *usb, struct ed *ed) { struct td *td; td = ed->td_head; list_del_init(&td->node); /* If this TD was the ED's head,find next TD */ if (!list_empty(&ed->td_list)) ed->td_head = list_entry(ed->td_list.next, struct td, node); else { ed->td_head = NULL; ed->state = FHCI_ED_SKIP; } ed->toggle_carry = td->toggle; list_add_tail(&td->node, &usb->hc_list->done_list); if (td->ioc) usb->transfer_confirm(usb->fhci); } /* free done FHCI URB resource such as ED and TD */ static void free_urb_priv(struct fhci_hcd *fhci, struct urb *urb) { int i; struct urb_priv *urb_priv = urb->hcpriv; struct ed *ed = urb_priv->ed; for (i = 0; i < urb_priv->num_of_tds; i++) { list_del_init(&urb_priv->tds[i]->node); fhci_recycle_empty_td(fhci, urb_priv->tds[i]); } /* if this TD was the ED's head,find the next TD */ if (!list_empty(&ed->td_list)) ed->td_head = list_entry(ed->td_list.next, struct td, node); else ed->td_head = NULL; kfree(urb_priv->tds); kfree(urb_priv); urb->hcpriv = NULL; /* if this TD was the ED's head,find next TD */ if (ed->td_head == NULL) list_del_init(&ed->node); fhci->active_urbs--; } /* this routine called to complete and free done URB */ void fhci_urb_complete_free(struct fhci_hcd *fhci, struct urb *urb) { free_urb_priv(fhci, urb); if (urb->status == -EINPROGRESS) { if (urb->actual_length != urb->transfer_buffer_length && urb->transfer_flags & URB_SHORT_NOT_OK) urb->status = -EREMOTEIO; else urb->status = 0; } usb_hcd_unlink_urb_from_ep(fhci_to_hcd(fhci), urb); spin_unlock(&fhci->lock); usb_hcd_giveback_urb(fhci_to_hcd(fhci), urb, urb->status); spin_lock(&fhci->lock); } /* * caculate transfer length/stats and update the urb * Precondition: irqsafe(only for urb-?status locking) */ void fhci_done_td(struct urb *urb, struct td *td) { struct ed *ed = td->ed; u32 cc = td->status; /* ISO...drivers see per-TD length/status */ if (ed->mode == FHCI_TF_ISO) { u32 len; if (!(urb->transfer_flags & URB_SHORT_NOT_OK && cc == USB_TD_RX_DATA_UNDERUN)) cc = USB_TD_OK; if (usb_pipeout(urb->pipe)) len = urb->iso_frame_desc[td->iso_index].length; else len = td->actual_len; urb->actual_length += len; urb->iso_frame_desc[td->iso_index].actual_length = len; urb->iso_frame_desc[td->iso_index].status = status_to_error(cc); } /* BULK,INT,CONTROL... drivers see aggregate length/status, * except that "setup" bytes aren't counted and "short" transfers * might not be reported as errors. */ else { if (td->error_cnt >= 3) urb->error_count = 3; /* control endpoint only have soft stalls */ /* update packet status if needed(short may be ok) */ if (!(urb->transfer_flags & URB_SHORT_NOT_OK) && cc == USB_TD_RX_DATA_UNDERUN) { ed->state = FHCI_ED_OPER; cc = USB_TD_OK; } if (cc != USB_TD_OK) { if (urb->status == -EINPROGRESS) urb->status = status_to_error(cc); } /* count all non-empty packets except control SETUP packet */ if (td->type != FHCI_TA_SETUP || td->iso_index != 0) urb->actual_length += td->actual_len; } } /* there are some pedning request to unlink */ void fhci_del_ed_list(struct fhci_hcd *fhci, struct ed *ed) { struct td *td = peek_td_from_ed(ed); struct urb *urb = td->urb; struct urb_priv *urb_priv = urb->hcpriv; if (urb_priv->state == URB_DEL) { td = fhci_remove_td_from_ed(ed); /* HC may have partly processed this TD */ if (td->status != USB_TD_INPROGRESS) fhci_done_td(urb, td); /* URB is done;clean up */ if (++(urb_priv->tds_cnt) == urb_priv->num_of_tds) fhci_urb_complete_free(fhci, urb); } }
gpl-2.0
spica234/HP-CM9-Kernel-32
arch/arm/mach-tegra/odm_kit/adaptations/pmu/tps6586x/nvodm_pmu_tps6586x_i2c.c
43
6396
/* * Copyright (c) 2009 NVIDIA Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "nvodm_pmu_tps6586x_i2c.h" #include "nvodm_pmu_tps6586x.h" #include "pmu_hal.h" NvBool Tps6586xI2cWrite8( NvOdmPmuDeviceHandle hPmu, NvU32 Addr, NvU32 Data) { NvU8 WriteBuffer[2]; NvOdmI2cStatus status = NvOdmI2cStatus_Success; NvOdmI2cTransactionInfo TransactionInfo = {0}; WriteBuffer[0] = Addr & 0xFF; // PMU offset WriteBuffer[1] = Data & 0xFF; // written data TransactionInfo.Address = ((NvOdmPmuDeviceTPS *)(hPmu->pPrivate))->DeviceAddr; TransactionInfo.Buf = &WriteBuffer[0]; TransactionInfo.Flags = NVODM_I2C_IS_WRITE; TransactionInfo.NumBytes = 2; status = NvOdmI2cTransaction(((NvOdmPmuDeviceTPS *)(hPmu->pPrivate))->hOdmI2C, &TransactionInfo, 1, TPS6586x_I2C_SPEED_KHZ, NV_WAIT_INFINITE); if (status == NvOdmI2cStatus_Success) { return NV_TRUE; } else { switch (status) { case NvOdmI2cStatus_Timeout: NVODMPMU_PRINTF(("NvOdmPmuI2cWrite8 Failed: Timeout\n")); break; case NvOdmI2cStatus_SlaveNotFound: default: NVODMPMU_PRINTF(("NvOdmPmuI2cWrite8 Failed: SlaveNotFound\n")); break; } return NV_FALSE; } } NvBool Tps6586xI2cRead8( NvOdmPmuDeviceHandle hPmu, NvU32 Addr, NvU32 *Data) { NvU8 ReadBuffer=0; NvOdmI2cStatus status = NvOdmI2cStatus_Success; NvOdmI2cTransactionInfo TransactionInfo[2]; // Write the PMU offset ReadBuffer = Addr & 0xFF; TransactionInfo[0].Address = ((NvOdmPmuDeviceTPS *)(hPmu->pPrivate))->DeviceAddr; TransactionInfo[0].Buf = &ReadBuffer; TransactionInfo[0].Flags = NVODM_I2C_IS_WRITE; TransactionInfo[0].NumBytes = 1; TransactionInfo[1].Address = (((NvOdmPmuDeviceTPS *)(hPmu->pPrivate))->DeviceAddr | 0x1); TransactionInfo[1].Buf = &ReadBuffer; TransactionInfo[1].Flags = 0; TransactionInfo[1].NumBytes = 1; // Read data from PMU at the specified offset status = NvOdmI2cTransaction(((NvOdmPmuDeviceTPS *)(hPmu->pPrivate))->hOdmI2C, &TransactionInfo[0], 2, TPS6586x_I2C_SPEED_KHZ, NV_WAIT_INFINITE); if (status != NvOdmI2cStatus_Success) { switch (status) { case NvOdmI2cStatus_Timeout: NVODMPMU_PRINTF(("NvOdmPmuI2cRead8 Failed: Timeout\n")); break; case NvOdmI2cStatus_SlaveNotFound: default: NVODMPMU_PRINTF(("NvOdmPmuI2cRead8 Failed: SlaveNotFound\n")); break; } return NV_FALSE; } *Data = ReadBuffer; return NV_TRUE; } NvBool Tps6586xI2cWrite32( NvOdmPmuDeviceHandle hPmu, NvU32 Addr, NvU32 Data) { NvU8 WriteBuffer[5]; NvOdmI2cStatus status = NvOdmI2cStatus_Success; NvOdmI2cTransactionInfo TransactionInfo = {0}; WriteBuffer[0] = (NvU8)(Addr & 0xFF); WriteBuffer[1] = (NvU8)((Data >> 24) & 0xFF); WriteBuffer[2] = (NvU8)((Data >> 16) & 0xFF); WriteBuffer[3] = (NvU8)((Data >> 8) & 0xFF); WriteBuffer[4] = (NvU8)(Data & 0xFF); TransactionInfo.Address = ((NvOdmPmuDeviceTPS *)(hPmu->pPrivate))->DeviceAddr; TransactionInfo.Buf = &WriteBuffer[0]; TransactionInfo.Flags = NVODM_I2C_IS_WRITE; TransactionInfo.NumBytes = 5; status = NvOdmI2cTransaction(((NvOdmPmuDeviceTPS *)(hPmu->pPrivate))->hOdmI2C, &TransactionInfo, 1, TPS6586x_I2C_SPEED_KHZ, NV_WAIT_INFINITE); if (status == NvOdmI2cStatus_Success) { return NV_TRUE; } else { switch (status) { case NvOdmI2cStatus_Timeout: NVODMPMU_PRINTF(("NvOdmPmuI2cWrite8 Failed: Timeout\n")); break; case NvOdmI2cStatus_SlaveNotFound: default: NVODMPMU_PRINTF(("NvOdmPmuI2cWrite8 Failed: SlaveNotFound\n")); break; } return NV_FALSE; } } NvBool Tps6586xI2cRead32( NvOdmPmuDeviceHandle hPmu, NvU32 Addr, NvU32 *Data) { NvU8 ReadBuffer[5]; NvOdmI2cStatus status = NvOdmI2cStatus_Success; NvOdmI2cTransactionInfo TransactionInfo[2]; // Write the PMU offset ReadBuffer[0] = Addr & 0xFF; TransactionInfo[0].Address = ((NvOdmPmuDeviceTPS *)(hPmu->pPrivate))->DeviceAddr; TransactionInfo[0].Buf = &ReadBuffer[0]; TransactionInfo[0].Flags = NVODM_I2C_IS_WRITE; TransactionInfo[0].NumBytes = 1; TransactionInfo[1].Address = (((NvOdmPmuDeviceTPS *)(hPmu->pPrivate))->DeviceAddr | 0x1); TransactionInfo[1].Buf = &ReadBuffer[0]; TransactionInfo[1].Flags = 0; TransactionInfo[1].NumBytes = 4; // Read data from PMU at the specified offset status = NvOdmI2cTransaction(((NvOdmPmuDeviceTPS *)(hPmu->pPrivate))->hOdmI2C, &TransactionInfo[0], 2, TPS6586x_I2C_SPEED_KHZ, NV_WAIT_INFINITE); if (status != NvOdmI2cStatus_Success) { switch (status) { case NvOdmI2cStatus_Timeout: NVODMPMU_PRINTF(("NvOdmPmuI2cRead8 Failed: Timeout\n")); break; case NvOdmI2cStatus_SlaveNotFound: default: NVODMPMU_PRINTF(("NvOdmPmuI2cRead8 Failed: SlaveNotFound\n")); break; } return NV_FALSE; } *Data = (ReadBuffer[0] << 24) | (ReadBuffer[1] << 16) | (ReadBuffer[2] << 8) | ReadBuffer[3]; return NV_TRUE; }
gpl-2.0
hexianren/linux-3.7-Panda
drivers/gpu/drm/i915/i915_suspend.c
43
29216
/* * * Copyright 2008 (c) Intel Corporation * Jesse Barnes <jbarnes@virtuousgeek.org> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <drm/drmP.h> #include <drm/i915_drm.h> #include "intel_drv.h" #include "i915_reg.h" static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) { struct drm_i915_private *dev_priv = dev->dev_private; u32 dpll_reg; /* On IVB, 3rd pipe shares PLL with another one */ if (pipe > 1) return false; if (HAS_PCH_SPLIT(dev)) dpll_reg = _PCH_DPLL(pipe); else dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B; return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE); } static void i915_save_palette(struct drm_device *dev, enum pipe pipe) { struct drm_i915_private *dev_priv = dev->dev_private; unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B); u32 *array; int i; if (!i915_pipe_enabled(dev, pipe)) return; if (HAS_PCH_SPLIT(dev)) reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; if (pipe == PIPE_A) array = dev_priv->save_palette_a; else array = dev_priv->save_palette_b; for (i = 0; i < 256; i++) array[i] = I915_READ(reg + (i << 2)); } static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) { struct drm_i915_private *dev_priv = dev->dev_private; unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B); u32 *array; int i; if (!i915_pipe_enabled(dev, pipe)) return; if (HAS_PCH_SPLIT(dev)) reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; if (pipe == PIPE_A) array = dev_priv->save_palette_a; else array = dev_priv->save_palette_b; for (i = 0; i < 256; i++) I915_WRITE(reg + (i << 2), array[i]); } static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg) { struct drm_i915_private *dev_priv = dev->dev_private; I915_WRITE8(index_port, reg); return I915_READ8(data_port); } static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable) { struct drm_i915_private *dev_priv = dev->dev_private; I915_READ8(st01); I915_WRITE8(VGA_AR_INDEX, palette_enable | reg); return I915_READ8(VGA_AR_DATA_READ); } static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable) { struct drm_i915_private *dev_priv = dev->dev_private; I915_READ8(st01); I915_WRITE8(VGA_AR_INDEX, palette_enable | reg); I915_WRITE8(VGA_AR_DATA_WRITE, val); } static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val) { struct drm_i915_private *dev_priv = dev->dev_private; I915_WRITE8(index_port, reg); I915_WRITE8(data_port, val); } static void i915_save_vga(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; u16 cr_index, cr_data, st01; /* VGA color palette registers */ dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK); /* MSR bits */ dev_priv->saveMSR = I915_READ8(VGA_MSR_READ); if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { cr_index = VGA_CR_INDEX_CGA; cr_data = VGA_CR_DATA_CGA; st01 = VGA_ST01_CGA; } else { cr_index = VGA_CR_INDEX_MDA; cr_data = VGA_CR_DATA_MDA; st01 = VGA_ST01_MDA; } /* CRT controller regs */ i915_write_indexed(dev, cr_index, cr_data, 0x11, i915_read_indexed(dev, cr_index, cr_data, 0x11) & (~0x80)); for (i = 0; i <= 0x24; i++) dev_priv->saveCR[i] = i915_read_indexed(dev, cr_index, cr_data, i); /* Make sure we don't turn off CR group 0 writes */ dev_priv->saveCR[0x11] &= ~0x80; /* Attribute controller registers */ I915_READ8(st01); dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX); for (i = 0; i <= 0x14; i++) dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0); I915_READ8(st01); I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX); I915_READ8(st01); /* Graphics controller registers */ for (i = 0; i < 9; i++) dev_priv->saveGR[i] = i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i); dev_priv->saveGR[0x10] = i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10); dev_priv->saveGR[0x11] = i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11); dev_priv->saveGR[0x18] = i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18); /* Sequencer registers */ for (i = 0; i < 8; i++) dev_priv->saveSR[i] = i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i); } static void i915_restore_vga(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; u16 cr_index, cr_data, st01; /* MSR bits */ I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR); if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { cr_index = VGA_CR_INDEX_CGA; cr_data = VGA_CR_DATA_CGA; st01 = VGA_ST01_CGA; } else { cr_index = VGA_CR_INDEX_MDA; cr_data = VGA_CR_DATA_MDA; st01 = VGA_ST01_MDA; } /* Sequencer registers, don't write SR07 */ for (i = 0; i < 7; i++) i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i, dev_priv->saveSR[i]); /* CRT controller regs */ /* Enable CR group 0 writes */ i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]); for (i = 0; i <= 0x24; i++) i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]); /* Graphics controller regs */ for (i = 0; i < 9; i++) i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i, dev_priv->saveGR[i]); i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10, dev_priv->saveGR[0x10]); i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11, dev_priv->saveGR[0x11]); i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18, dev_priv->saveGR[0x18]); /* Attribute controller registers */ I915_READ8(st01); /* switch back to index mode */ for (i = 0; i <= 0x14; i++) i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0); I915_READ8(st01); /* switch back to index mode */ I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20); I915_READ8(st01); /* VGA color palette registers */ I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK); } static void i915_save_modeset_reg(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; if (drm_core_check_feature(dev, DRIVER_MODESET)) return; /* Cursor state */ dev_priv->saveCURACNTR = I915_READ(_CURACNTR); dev_priv->saveCURAPOS = I915_READ(_CURAPOS); dev_priv->saveCURABASE = I915_READ(_CURABASE); dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR); dev_priv->saveCURBPOS = I915_READ(_CURBPOS); dev_priv->saveCURBBASE = I915_READ(_CURBBASE); if (IS_GEN2(dev)) dev_priv->saveCURSIZE = I915_READ(CURSIZE); if (HAS_PCH_SPLIT(dev)) { dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); } /* Pipe & plane A info */ dev_priv->savePIPEACONF = I915_READ(_PIPEACONF); dev_priv->savePIPEASRC = I915_READ(_PIPEASRC); if (HAS_PCH_SPLIT(dev)) { dev_priv->saveFPA0 = I915_READ(_PCH_FPA0); dev_priv->saveFPA1 = I915_READ(_PCH_FPA1); dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A); } else { dev_priv->saveFPA0 = I915_READ(_FPA0); dev_priv->saveFPA1 = I915_READ(_FPA1); dev_priv->saveDPLL_A = I915_READ(_DPLL_A); } if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD); dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A); dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A); dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A); dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A); dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A); dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A); if (!HAS_PCH_SPLIT(dev)) dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A); if (HAS_PCH_SPLIT(dev)) { dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1); dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1); dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1); dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1); dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL); dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL); dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1); dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ); dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS); dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF); dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A); dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A); dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A); dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A); dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A); dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A); } dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR); dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE); dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE); dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS); dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR); if (INTEL_INFO(dev)->gen >= 4) { dev_priv->saveDSPASURF = I915_READ(_DSPASURF); dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF); } i915_save_palette(dev, PIPE_A); dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT); /* Pipe & plane B info */ dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF); dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC); if (HAS_PCH_SPLIT(dev)) { dev_priv->saveFPB0 = I915_READ(_PCH_FPB0); dev_priv->saveFPB1 = I915_READ(_PCH_FPB1); dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B); } else { dev_priv->saveFPB0 = I915_READ(_FPB0); dev_priv->saveFPB1 = I915_READ(_FPB1); dev_priv->saveDPLL_B = I915_READ(_DPLL_B); } if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD); dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B); dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B); dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B); dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B); dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B); dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B); if (!HAS_PCH_SPLIT(dev)) dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B); if (HAS_PCH_SPLIT(dev)) { dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1); dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1); dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1); dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1); dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL); dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL); dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1); dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ); dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS); dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF); dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B); dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B); dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B); dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B); dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B); dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B); } dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR); dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE); dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE); dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS); dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR); if (INTEL_INFO(dev)->gen >= 4) { dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF); dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF); } i915_save_palette(dev, PIPE_B); dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT); /* Fences */ switch (INTEL_INFO(dev)->gen) { case 7: case 6: for (i = 0; i < 16; i++) dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); break; case 5: case 4: for (i = 0; i < 16; i++) dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); break; case 3: if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) for (i = 0; i < 8; i++) dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); case 2: for (i = 0; i < 8; i++) dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); break; } return; } static void i915_restore_modeset_reg(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int dpll_a_reg, fpa0_reg, fpa1_reg; int dpll_b_reg, fpb0_reg, fpb1_reg; int i; if (drm_core_check_feature(dev, DRIVER_MODESET)) return; /* Fences */ switch (INTEL_INFO(dev)->gen) { case 7: case 6: for (i = 0; i < 16; i++) I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); break; case 5: case 4: for (i = 0; i < 16; i++) I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); break; case 3: case 2: if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) for (i = 0; i < 8; i++) I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); for (i = 0; i < 8; i++) I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); break; } if (HAS_PCH_SPLIT(dev)) { dpll_a_reg = _PCH_DPLL_A; dpll_b_reg = _PCH_DPLL_B; fpa0_reg = _PCH_FPA0; fpb0_reg = _PCH_FPB0; fpa1_reg = _PCH_FPA1; fpb1_reg = _PCH_FPB1; } else { dpll_a_reg = _DPLL_A; dpll_b_reg = _DPLL_B; fpa0_reg = _FPA0; fpb0_reg = _FPB0; fpa1_reg = _FPA1; fpb1_reg = _FPB1; } if (HAS_PCH_SPLIT(dev)) { I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL); I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL); } /* Pipe & plane A info */ /* Prime the clock */ if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A & ~DPLL_VCO_ENABLE); POSTING_READ(dpll_a_reg); udelay(150); } I915_WRITE(fpa0_reg, dev_priv->saveFPA0); I915_WRITE(fpa1_reg, dev_priv->saveFPA1); /* Actually enable it */ I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A); POSTING_READ(dpll_a_reg); udelay(150); if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD); POSTING_READ(_DPLL_A_MD); } udelay(150); /* Restore mode */ I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A); I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A); I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A); I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A); I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A); I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A); if (!HAS_PCH_SPLIT(dev)) I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A); if (HAS_PCH_SPLIT(dev)) { I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1); I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1); I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1); I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1); I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL); I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL); I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1); I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ); I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS); I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF); I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A); I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A); I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A); I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A); I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A); I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A); } /* Restore plane info */ I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE); I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS); I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC); I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR); I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE); if (INTEL_INFO(dev)->gen >= 4) { I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF); I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF); } I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF); i915_restore_palette(dev, PIPE_A); /* Enable the plane */ I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR); I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR)); /* Pipe & plane B info */ if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B & ~DPLL_VCO_ENABLE); POSTING_READ(dpll_b_reg); udelay(150); } I915_WRITE(fpb0_reg, dev_priv->saveFPB0); I915_WRITE(fpb1_reg, dev_priv->saveFPB1); /* Actually enable it */ I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); POSTING_READ(dpll_b_reg); udelay(150); if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD); POSTING_READ(_DPLL_B_MD); } udelay(150); /* Restore mode */ I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B); I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B); I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B); I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B); I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B); I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B); if (!HAS_PCH_SPLIT(dev)) I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B); if (HAS_PCH_SPLIT(dev)) { I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1); I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1); I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1); I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1); I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL); I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL); I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1); I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ); I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS); I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF); I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B); I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B); I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B); I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B); I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B); I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B); } /* Restore plane info */ I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE); I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS); I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC); I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR); I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); if (INTEL_INFO(dev)->gen >= 4) { I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF); I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); } I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF); i915_restore_palette(dev, PIPE_B); /* Enable the plane */ I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR); I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR)); /* Cursor state */ I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS); I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR); I915_WRITE(_CURABASE, dev_priv->saveCURABASE); I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS); I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR); I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE); if (IS_GEN2(dev)) I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); return; } static void i915_save_display(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; /* Display arbitration control */ dev_priv->saveDSPARB = I915_READ(DSPARB); /* This is only meaningful in non-KMS mode */ /* Don't save them in KMS mode */ i915_save_modeset_reg(dev); /* CRT state */ if (HAS_PCH_SPLIT(dev)) { dev_priv->saveADPA = I915_READ(PCH_ADPA); } else { dev_priv->saveADPA = I915_READ(ADPA); } /* LVDS state */ if (HAS_PCH_SPLIT(dev)) { dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL); dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); dev_priv->saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL); dev_priv->saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2); dev_priv->saveLVDS = I915_READ(PCH_LVDS); } else { dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); if (INTEL_INFO(dev)->gen >= 4) dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); if (IS_MOBILE(dev) && !IS_I830(dev)) dev_priv->saveLVDS = I915_READ(LVDS); } if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); if (HAS_PCH_SPLIT(dev)) { dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); } else { dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); } /* Display Port state */ if (SUPPORTS_INTEGRATED_DP(dev)) { dev_priv->saveDP_B = I915_READ(DP_B); dev_priv->saveDP_C = I915_READ(DP_C); dev_priv->saveDP_D = I915_READ(DP_D); dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M); dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M); dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N); dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N); dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M); dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M); dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N); dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N); } /* FIXME: save TV & SDVO state */ /* Only save FBC state on the platform that supports FBC */ if (I915_HAS_FBC(dev)) { if (HAS_PCH_SPLIT(dev)) { dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); } else if (IS_GM45(dev)) { dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); } else { dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); } } /* VGA state */ dev_priv->saveVGA0 = I915_READ(VGA0); dev_priv->saveVGA1 = I915_READ(VGA1); dev_priv->saveVGA_PD = I915_READ(VGA_PD); if (HAS_PCH_SPLIT(dev)) dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL); else dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); i915_save_vga(dev); } static void i915_restore_display(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; /* Display arbitration */ I915_WRITE(DSPARB, dev_priv->saveDSPARB); /* Display port ratios (must be done before clock is set) */ if (SUPPORTS_INTEGRATED_DP(dev)) { I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M); I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N); I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N); I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M); I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M); I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); } /* This is only meaningful in non-KMS mode */ /* Don't restore them in KMS mode */ i915_restore_modeset_reg(dev); /* CRT state */ if (HAS_PCH_SPLIT(dev)) I915_WRITE(PCH_ADPA, dev_priv->saveADPA); else I915_WRITE(ADPA, dev_priv->saveADPA); /* LVDS state */ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); if (HAS_PCH_SPLIT(dev)) { I915_WRITE(PCH_LVDS, dev_priv->saveLVDS); } else if (IS_MOBILE(dev) && !IS_I830(dev)) I915_WRITE(LVDS, dev_priv->saveLVDS); if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); if (HAS_PCH_SPLIT(dev)) { I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL); I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2); /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2; * otherwise we get blank eDP screen after S3 on some machines */ I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2); I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL); I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); I915_WRITE(RSTDBYCTL, dev_priv->saveMCHBAR_RENDER_STANDBY); } else { I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); I915_WRITE(BLC_HIST_CTL, dev_priv->saveBLC_HIST_CTL); I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); } /* Display Port state */ if (SUPPORTS_INTEGRATED_DP(dev)) { I915_WRITE(DP_B, dev_priv->saveDP_B); I915_WRITE(DP_C, dev_priv->saveDP_C); I915_WRITE(DP_D, dev_priv->saveDP_D); } /* FIXME: restore TV & SDVO state */ /* only restore FBC info on the platform that supports FBC*/ intel_disable_fbc(dev); if (I915_HAS_FBC(dev)) { if (HAS_PCH_SPLIT(dev)) { I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); } else if (IS_GM45(dev)) { I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); } else { I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); } } /* VGA state */ if (HAS_PCH_SPLIT(dev)) I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); else I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); I915_WRITE(VGA0, dev_priv->saveVGA0); I915_WRITE(VGA1, dev_priv->saveVGA1); I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); POSTING_READ(VGA_PD); udelay(150); i915_restore_vga(dev); } int i915_save_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); mutex_lock(&dev->struct_mutex); /* Hardware status page */ dev_priv->saveHWS = I915_READ(HWS_PGA); i915_save_display(dev); /* Interrupt state */ if (HAS_PCH_SPLIT(dev)) { dev_priv->saveDEIER = I915_READ(DEIER); dev_priv->saveDEIMR = I915_READ(DEIMR); dev_priv->saveGTIER = I915_READ(GTIER); dev_priv->saveGTIMR = I915_READ(GTIMR); dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR); dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); dev_priv->saveMCHBAR_RENDER_STANDBY = I915_READ(RSTDBYCTL); dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG); } else { dev_priv->saveIER = I915_READ(IER); dev_priv->saveIMR = I915_READ(IMR); } intel_disable_gt_powersave(dev); /* Cache mode state */ dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); /* Memory Arbitration state */ dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); /* Scratch space */ for (i = 0; i < 16; i++) { dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2)); dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); } for (i = 0; i < 3; i++) dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); mutex_unlock(&dev->struct_mutex); return 0; } int i915_restore_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); mutex_lock(&dev->struct_mutex); /* Hardware status page */ I915_WRITE(HWS_PGA, dev_priv->saveHWS); i915_restore_display(dev); /* Interrupt state */ if (HAS_PCH_SPLIT(dev)) { I915_WRITE(DEIER, dev_priv->saveDEIER); I915_WRITE(DEIMR, dev_priv->saveDEIMR); I915_WRITE(GTIER, dev_priv->saveGTIER); I915_WRITE(GTIMR, dev_priv->saveGTIMR); I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR); I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR); I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG); } else { I915_WRITE(IER, dev_priv->saveIER); I915_WRITE(IMR, dev_priv->saveIMR); } /* Cache mode state */ I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); /* Memory arbitration state */ I915_WRITE(MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000); for (i = 0; i < 16; i++) { I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]); I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]); } for (i = 0; i < 3; i++) I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); mutex_unlock(&dev->struct_mutex); intel_i2c_reset(dev); return 0; }
gpl-2.0
codebam/linux
net/netfilter/nf_conntrack_ftp.c
43
16949
/* FTP extension for connection tracking. */ /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> * (C) 2006-2012 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/netfilter.h> #include <linux/ip.h> #include <linux/slab.h> #include <linux/ipv6.h> #include <linux/ctype.h> #include <linux/inet.h> #include <net/checksum.h> #include <net/tcp.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_expect.h> #include <net/netfilter/nf_conntrack_ecache.h> #include <net/netfilter/nf_conntrack_helper.h> #include <linux/netfilter/nf_conntrack_ftp.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>"); MODULE_DESCRIPTION("ftp connection tracking helper"); MODULE_ALIAS("ip_conntrack_ftp"); MODULE_ALIAS_NFCT_HELPER("ftp"); /* This is slow, but it's simple. --RR */ static char *ftp_buffer; static DEFINE_SPINLOCK(nf_ftp_lock); #define MAX_PORTS 8 static u_int16_t ports[MAX_PORTS]; static unsigned int ports_c; module_param_array(ports, ushort, &ports_c, 0400); static bool loose; module_param(loose, bool, 0600); unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb, enum ip_conntrack_info ctinfo, enum nf_ct_ftp_type type, unsigned int protoff, unsigned int matchoff, unsigned int matchlen, struct nf_conntrack_expect *exp); EXPORT_SYMBOL_GPL(nf_nat_ftp_hook); static int try_rfc959(const char *, size_t, struct nf_conntrack_man *, char, unsigned int *); static int try_rfc1123(const char *, size_t, struct nf_conntrack_man *, char, unsigned int *); static int try_eprt(const char *, size_t, struct nf_conntrack_man *, char, unsigned int *); static int try_epsv_response(const char *, size_t, struct nf_conntrack_man *, char, unsigned int *); static struct ftp_search { const char *pattern; size_t plen; char skip; char term; enum nf_ct_ftp_type ftptype; int (*getnum)(const char *, size_t, struct nf_conntrack_man *, char, unsigned int *); } search[IP_CT_DIR_MAX][2] = { [IP_CT_DIR_ORIGINAL] = { { .pattern = "PORT", .plen = sizeof("PORT") - 1, .skip = ' ', .term = '\r', .ftptype = NF_CT_FTP_PORT, .getnum = try_rfc959, }, { .pattern = "EPRT", .plen = sizeof("EPRT") - 1, .skip = ' ', .term = '\r', .ftptype = NF_CT_FTP_EPRT, .getnum = try_eprt, }, }, [IP_CT_DIR_REPLY] = { { .pattern = "227 ", .plen = sizeof("227 ") - 1, .ftptype = NF_CT_FTP_PASV, .getnum = try_rfc1123, }, { .pattern = "229 ", .plen = sizeof("229 ") - 1, .skip = '(', .term = ')', .ftptype = NF_CT_FTP_EPSV, .getnum = try_epsv_response, }, }, }; static int get_ipv6_addr(const char *src, size_t dlen, struct in6_addr *dst, u_int8_t term) { const char *end; int ret = in6_pton(src, min_t(size_t, dlen, 0xffff), (u8 *)dst, term, &end); if (ret > 0) return (int)(end - src); return 0; } static int try_number(const char *data, size_t dlen, u_int32_t array[], int array_size, char sep, char term) { u_int32_t i, len; memset(array, 0, sizeof(array[0])*array_size); /* Keep data pointing at next char. */ for (i = 0, len = 0; len < dlen && i < array_size; len++, data++) { if (*data >= '0' && *data <= '9') { array[i] = array[i]*10 + *data - '0'; } else if (*data == sep) i++; else { /* Unexpected character; true if it's the terminator (or we don't care about one) and we're finished. */ if ((*data == term || !term) && i == array_size - 1) return len; pr_debug("Char %u (got %u nums) `%u' unexpected\n", len, i, *data); return 0; } } pr_debug("Failed to fill %u numbers separated by %c\n", array_size, sep); return 0; } /* Returns 0, or length of numbers: 192,168,1,1,5,6 */ static int try_rfc959(const char *data, size_t dlen, struct nf_conntrack_man *cmd, char term, unsigned int *offset) { int length; u_int32_t array[6]; length = try_number(data, dlen, array, 6, ',', term); if (length == 0) return 0; cmd->u3.ip = htonl((array[0] << 24) | (array[1] << 16) | (array[2] << 8) | array[3]); cmd->u.tcp.port = htons((array[4] << 8) | array[5]); return length; } /* * From RFC 1123: * The format of the 227 reply to a PASV command is not * well standardized. In particular, an FTP client cannot * assume that the parentheses shown on page 40 of RFC-959 * will be present (and in fact, Figure 3 on page 43 omits * them). Therefore, a User-FTP program that interprets * the PASV reply must scan the reply for the first digit * of the host and port numbers. */ static int try_rfc1123(const char *data, size_t dlen, struct nf_conntrack_man *cmd, char term, unsigned int *offset) { int i; for (i = 0; i < dlen; i++) if (isdigit(data[i])) break; if (i == dlen) return 0; *offset += i; return try_rfc959(data + i, dlen - i, cmd, 0, offset); } /* Grab port: number up to delimiter */ static int get_port(const char *data, int start, size_t dlen, char delim, __be16 *port) { u_int16_t tmp_port = 0; int i; for (i = start; i < dlen; i++) { /* Finished? */ if (data[i] == delim) { if (tmp_port == 0) break; *port = htons(tmp_port); pr_debug("get_port: return %d\n", tmp_port); return i + 1; } else if (data[i] >= '0' && data[i] <= '9') tmp_port = tmp_port*10 + data[i] - '0'; else { /* Some other crap */ pr_debug("get_port: invalid char.\n"); break; } } return 0; } /* Returns 0, or length of numbers: |1|132.235.1.2|6275| or |2|3ffe::1|6275| */ static int try_eprt(const char *data, size_t dlen, struct nf_conntrack_man *cmd, char term, unsigned int *offset) { char delim; int length; /* First character is delimiter, then "1" for IPv4 or "2" for IPv6, then delimiter again. */ if (dlen <= 3) { pr_debug("EPRT: too short\n"); return 0; } delim = data[0]; if (isdigit(delim) || delim < 33 || delim > 126 || data[2] != delim) { pr_debug("try_eprt: invalid delimitter.\n"); return 0; } if ((cmd->l3num == PF_INET && data[1] != '1') || (cmd->l3num == PF_INET6 && data[1] != '2')) { pr_debug("EPRT: invalid protocol number.\n"); return 0; } pr_debug("EPRT: Got %c%c%c\n", delim, data[1], delim); if (data[1] == '1') { u_int32_t array[4]; /* Now we have IP address. */ length = try_number(data + 3, dlen - 3, array, 4, '.', delim); if (length != 0) cmd->u3.ip = htonl((array[0] << 24) | (array[1] << 16) | (array[2] << 8) | array[3]); } else { /* Now we have IPv6 address. */ length = get_ipv6_addr(data + 3, dlen - 3, (struct in6_addr *)cmd->u3.ip6, delim); } if (length == 0) return 0; pr_debug("EPRT: Got IP address!\n"); /* Start offset includes initial "|1|", and trailing delimiter */ return get_port(data, 3 + length + 1, dlen, delim, &cmd->u.tcp.port); } /* Returns 0, or length of numbers: |||6446| */ static int try_epsv_response(const char *data, size_t dlen, struct nf_conntrack_man *cmd, char term, unsigned int *offset) { char delim; /* Three delimiters. */ if (dlen <= 3) return 0; delim = data[0]; if (isdigit(delim) || delim < 33 || delim > 126 || data[1] != delim || data[2] != delim) return 0; return get_port(data, 3, dlen, delim, &cmd->u.tcp.port); } /* Return 1 for match, 0 for accept, -1 for partial. */ static int find_pattern(const char *data, size_t dlen, const char *pattern, size_t plen, char skip, char term, unsigned int *numoff, unsigned int *numlen, struct nf_conntrack_man *cmd, int (*getnum)(const char *, size_t, struct nf_conntrack_man *, char, unsigned int *)) { size_t i = plen; pr_debug("find_pattern `%s': dlen = %Zu\n", pattern, dlen); if (dlen == 0) return 0; if (dlen <= plen) { /* Short packet: try for partial? */ if (strncasecmp(data, pattern, dlen) == 0) return -1; else return 0; } if (strncasecmp(data, pattern, plen) != 0) { #if 0 size_t i; pr_debug("ftp: string mismatch\n"); for (i = 0; i < plen; i++) { pr_debug("ftp:char %u `%c'(%u) vs `%c'(%u)\n", i, data[i], data[i], pattern[i], pattern[i]); } #endif return 0; } pr_debug("Pattern matches!\n"); /* Now we've found the constant string, try to skip to the 'skip' character */ if (skip) { for (i = plen; data[i] != skip; i++) if (i == dlen - 1) return -1; /* Skip over the last character */ i++; } pr_debug("Skipped up to `%c'!\n", skip); *numoff = i; *numlen = getnum(data + i, dlen - i, cmd, term, numoff); if (!*numlen) return -1; pr_debug("Match succeeded!\n"); return 1; } /* Look up to see if we're just after a \n. */ static int find_nl_seq(u32 seq, const struct nf_ct_ftp_master *info, int dir) { unsigned int i; for (i = 0; i < info->seq_aft_nl_num[dir]; i++) if (info->seq_aft_nl[dir][i] == seq) return 1; return 0; } /* We don't update if it's older than what we have. */ static void update_nl_seq(struct nf_conn *ct, u32 nl_seq, struct nf_ct_ftp_master *info, int dir, struct sk_buff *skb) { unsigned int i, oldest; /* Look for oldest: if we find exact match, we're done. */ for (i = 0; i < info->seq_aft_nl_num[dir]; i++) { if (info->seq_aft_nl[dir][i] == nl_seq) return; } if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) { info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq; } else { if (before(info->seq_aft_nl[dir][0], info->seq_aft_nl[dir][1])) oldest = 0; else oldest = 1; if (after(nl_seq, info->seq_aft_nl[dir][oldest])) info->seq_aft_nl[dir][oldest] = nl_seq; } } static int help(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { unsigned int dataoff, datalen; const struct tcphdr *th; struct tcphdr _tcph; const char *fb_ptr; int ret; u32 seq; int dir = CTINFO2DIR(ctinfo); unsigned int uninitialized_var(matchlen), uninitialized_var(matchoff); struct nf_ct_ftp_master *ct_ftp_info = nfct_help_data(ct); struct nf_conntrack_expect *exp; union nf_inet_addr *daddr; struct nf_conntrack_man cmd = {}; unsigned int i; int found = 0, ends_in_nl; typeof(nf_nat_ftp_hook) nf_nat_ftp; /* Until there's been traffic both ways, don't look in packets. */ if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) { pr_debug("ftp: Conntrackinfo = %u\n", ctinfo); return NF_ACCEPT; } th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph); if (th == NULL) return NF_ACCEPT; dataoff = protoff + th->doff * 4; /* No data? */ if (dataoff >= skb->len) { pr_debug("ftp: dataoff(%u) >= skblen(%u)\n", dataoff, skb->len); return NF_ACCEPT; } datalen = skb->len - dataoff; spin_lock_bh(&nf_ftp_lock); fb_ptr = skb_header_pointer(skb, dataoff, datalen, ftp_buffer); BUG_ON(fb_ptr == NULL); ends_in_nl = (fb_ptr[datalen - 1] == '\n'); seq = ntohl(th->seq) + datalen; /* Look up to see if we're just after a \n. */ if (!find_nl_seq(ntohl(th->seq), ct_ftp_info, dir)) { /* We're picking up this, clear flags and let it continue */ if (unlikely(ct_ftp_info->flags[dir] & NF_CT_FTP_SEQ_PICKUP)) { ct_ftp_info->flags[dir] ^= NF_CT_FTP_SEQ_PICKUP; goto skip_nl_seq; } /* Now if this ends in \n, update ftp info. */ pr_debug("nf_conntrack_ftp: wrong seq pos %s(%u) or %s(%u)\n", ct_ftp_info->seq_aft_nl_num[dir] > 0 ? "" : "(UNSET)", ct_ftp_info->seq_aft_nl[dir][0], ct_ftp_info->seq_aft_nl_num[dir] > 1 ? "" : "(UNSET)", ct_ftp_info->seq_aft_nl[dir][1]); ret = NF_ACCEPT; goto out_update_nl; } skip_nl_seq: /* Initialize IP/IPv6 addr to expected address (it's not mentioned in EPSV responses) */ cmd.l3num = nf_ct_l3num(ct); memcpy(cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all, sizeof(cmd.u3.all)); for (i = 0; i < ARRAY_SIZE(search[dir]); i++) { found = find_pattern(fb_ptr, datalen, search[dir][i].pattern, search[dir][i].plen, search[dir][i].skip, search[dir][i].term, &matchoff, &matchlen, &cmd, search[dir][i].getnum); if (found) break; } if (found == -1) { /* We don't usually drop packets. After all, this is connection tracking, not packet filtering. However, it is necessary for accurate tracking in this case. */ nf_ct_helper_log(skb, ct, "partial matching of `%s'", search[dir][i].pattern); ret = NF_DROP; goto out; } else if (found == 0) { /* No match */ ret = NF_ACCEPT; goto out_update_nl; } pr_debug("conntrack_ftp: match `%.*s' (%u bytes at %u)\n", matchlen, fb_ptr + matchoff, matchlen, ntohl(th->seq) + matchoff); exp = nf_ct_expect_alloc(ct); if (exp == NULL) { nf_ct_helper_log(skb, ct, "cannot alloc expectation"); ret = NF_DROP; goto out; } /* We refer to the reverse direction ("!dir") tuples here, * because we're expecting something in the other direction. * Doesn't matter unless NAT is happening. */ daddr = &ct->tuplehash[!dir].tuple.dst.u3; /* Update the ftp info */ if ((cmd.l3num == nf_ct_l3num(ct)) && memcmp(&cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all, sizeof(cmd.u3.all))) { /* Enrico Scholz's passive FTP to partially RNAT'd ftp server: it really wants us to connect to a different IP address. Simply don't record it for NAT. */ if (cmd.l3num == PF_INET) { pr_debug("NOT RECORDING: %pI4 != %pI4\n", &cmd.u3.ip, &ct->tuplehash[dir].tuple.src.u3.ip); } else { pr_debug("NOT RECORDING: %pI6 != %pI6\n", cmd.u3.ip6, ct->tuplehash[dir].tuple.src.u3.ip6); } /* Thanks to Cristiano Lincoln Mattos <lincoln@cesar.org.br> for reporting this potential problem (DMZ machines opening holes to internal networks, or the packet filter itself). */ if (!loose) { ret = NF_ACCEPT; goto out_put_expect; } daddr = &cmd.u3; } nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, cmd.l3num, &ct->tuplehash[!dir].tuple.src.u3, daddr, IPPROTO_TCP, NULL, &cmd.u.tcp.port); /* Now, NAT might want to mangle the packet, and register the * (possibly changed) expectation itself. */ nf_nat_ftp = rcu_dereference(nf_nat_ftp_hook); if (nf_nat_ftp && ct->status & IPS_NAT_MASK) ret = nf_nat_ftp(skb, ctinfo, search[dir][i].ftptype, protoff, matchoff, matchlen, exp); else { /* Can't expect this? Best to drop packet now. */ if (nf_ct_expect_related(exp) != 0) { nf_ct_helper_log(skb, ct, "cannot add expectation"); ret = NF_DROP; } else ret = NF_ACCEPT; } out_put_expect: nf_ct_expect_put(exp); out_update_nl: /* Now if this ends in \n, update ftp info. Seq may have been * adjusted by NAT code. */ if (ends_in_nl) update_nl_seq(ct, seq, ct_ftp_info, dir, skb); out: spin_unlock_bh(&nf_ftp_lock); return ret; } static int nf_ct_ftp_from_nlattr(struct nlattr *attr, struct nf_conn *ct) { struct nf_ct_ftp_master *ftp = nfct_help_data(ct); /* This conntrack has been injected from user-space, always pick up * sequence tracking. Otherwise, the first FTP command after the * failover breaks. */ ftp->flags[IP_CT_DIR_ORIGINAL] |= NF_CT_FTP_SEQ_PICKUP; ftp->flags[IP_CT_DIR_REPLY] |= NF_CT_FTP_SEQ_PICKUP; return 0; } static struct nf_conntrack_helper ftp[MAX_PORTS * 2] __read_mostly; static const struct nf_conntrack_expect_policy ftp_exp_policy = { .max_expected = 1, .timeout = 5 * 60, }; /* don't make this __exit, since it's called from __init ! */ static void nf_conntrack_ftp_fini(void) { nf_conntrack_helpers_unregister(ftp, ports_c * 2); kfree(ftp_buffer); } static int __init nf_conntrack_ftp_init(void) { int i, ret = 0; ftp_buffer = kmalloc(65536, GFP_KERNEL); if (!ftp_buffer) return -ENOMEM; if (ports_c == 0) ports[ports_c++] = FTP_PORT; /* FIXME should be configurable whether IPv4 and IPv6 FTP connections are tracked or not - YK */ for (i = 0; i < ports_c; i++) { nf_ct_helper_init(&ftp[2 * i], AF_INET, IPPROTO_TCP, "ftp", FTP_PORT, ports[i], ports[i], &ftp_exp_policy, 0, sizeof(struct nf_ct_ftp_master), help, nf_ct_ftp_from_nlattr, THIS_MODULE); nf_ct_helper_init(&ftp[2 * i + 1], AF_INET6, IPPROTO_TCP, "ftp", FTP_PORT, ports[i], ports[i], &ftp_exp_policy, 0, sizeof(struct nf_ct_ftp_master), help, nf_ct_ftp_from_nlattr, THIS_MODULE); } ret = nf_conntrack_helpers_register(ftp, ports_c * 2); if (ret < 0) { pr_err("failed to register helpers\n"); kfree(ftp_buffer); return ret; } return 0; } module_init(nf_conntrack_ftp_init); module_exit(nf_conntrack_ftp_fini);
gpl-2.0
rhcp011235/sgh-t989_kernel
net/bluetooth_mgmt/hci_event.c
299
87715
/* BlueZ - Bluetooth protocol stack for Linux Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth HCI event handling. */ #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/skbuff.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <net/sock.h> #include <asm/system.h> #include <linux/uaccess.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> /* Handle HCI Event packets */ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); BT_DBG("%s status 0x%x", hdev->name, status); if (status) { hci_dev_lock(hdev); mgmt_stop_discovery_failed(hdev, status); hci_dev_unlock(hdev); return; } clear_bit(HCI_INQUIRY, &hdev->flags); hci_dev_lock(hdev); hci_discovery_set_state(hdev, DISCOVERY_STOPPED); hci_dev_unlock(hdev); hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status); hci_conn_check_pending(hdev); } static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); BT_DBG("%s status 0x%x", hdev->name, status); if (status) return; hci_conn_check_pending(hdev); } static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb) { BT_DBG("%s", hdev->name); } static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_role_discovery *rp = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s status 0x%x", hdev->name, rp->status); if (rp->status) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); if (conn) { if (rp->role) conn->link_mode &= ~HCI_LM_MASTER; else conn->link_mode |= HCI_LM_MASTER; } hci_dev_unlock(hdev); } static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_link_policy *rp = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s status 0x%x", hdev->name, rp->status); if (rp->status) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); if (conn) conn->link_policy = __le16_to_cpu(rp->policy); hci_dev_unlock(hdev); } static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_write_link_policy *rp = (void *) skb->data; struct hci_conn *conn; void *sent; BT_DBG("%s status 0x%x", hdev->name, rp->status); if (rp->status) return; sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); if (!sent) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); if (conn) conn->link_policy = get_unaligned_le16(sent + 2); hci_dev_unlock(hdev); } static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_def_link_policy *rp = (void *) skb->data; BT_DBG("%s status 0x%x", hdev->name, rp->status); if (rp->status) return; hdev->link_policy = __le16_to_cpu(rp->policy); } static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); void *sent; BT_DBG("%s status 0x%x", hdev->name, status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); if (!sent) return; if (!status) hdev->link_policy = get_unaligned_le16(sent); hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status); } static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); BT_DBG("%s status 0x%x", hdev->name, status); clear_bit(HCI_RESET, &hdev->flags); hci_req_complete(hdev, HCI_OP_RESET, status); /* Reset all flags, except persistent ones */ hdev->dev_flags &= BIT(HCI_MGMT) | BIT(HCI_SETUP) | BIT(HCI_AUTO_OFF) | BIT(HCI_LINK_KEYS) | BIT(HCI_DEBUG_KEYS); hci_dev_lock(hdev); hci_discovery_set_state(hdev, DISCOVERY_STOPPED); hci_dev_unlock(hdev); } static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); void *sent; BT_DBG("%s status 0x%x", hdev->name, status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); if (!sent) return; hci_dev_lock(hdev); /* local name eir issue. * before update_eir, set hdev->dev_name */ if (status == 0) memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); if (test_bit(HCI_MGMT, &hdev->dev_flags)) mgmt_set_local_name_complete(hdev, sent, status); /* local name eir issue. before update_eir, * set hdev->dev_name * if (status == 0) * memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); */ hci_dev_unlock(hdev); } static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_local_name *rp = (void *) skb->data; BT_DBG("%s status 0x%x", hdev->name, rp->status); if (rp->status) return; if (test_bit(HCI_SETUP, &hdev->dev_flags)) memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); } static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); void *sent; BT_DBG("%s status 0x%x", hdev->name, status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); if (!sent) return; if (!status) { __u8 param = *((__u8 *) sent); if (param == AUTH_ENABLED) set_bit(HCI_AUTH, &hdev->flags); else clear_bit(HCI_AUTH, &hdev->flags); } hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status); } static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); void *sent; BT_DBG("%s status 0x%x", hdev->name, status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); if (!sent) return; if (!status) { __u8 param = *((__u8 *) sent); if (param) set_bit(HCI_ENCRYPT, &hdev->flags); else clear_bit(HCI_ENCRYPT, &hdev->flags); } hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status); } static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) { __u8 param, status = *((__u8 *) skb->data); int old_pscan, old_iscan; void *sent; BT_DBG("%s status 0x%x", hdev->name, status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); if (!sent) return; param = *((__u8 *) sent); hci_dev_lock(hdev); if (status != 0) { mgmt_write_scan_failed(hdev, param, status); hdev->discov_timeout = 0; goto done; } /* discoverable set issue. * 1. set timeout value. turn off -> on * 2. set timeout never. turn off -> on */ /* old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags); */ old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags); if (param & SCAN_INQUIRY) { set_bit(HCI_ISCAN, &hdev->flags); if (!old_iscan) mgmt_discoverable(hdev, 1); if (hdev->discov_timeout > 0) { int to = msecs_to_jiffies(hdev->discov_timeout * 1000); queue_delayed_work(hdev->workqueue, &hdev->discov_off, to); } } else if (old_iscan) mgmt_discoverable(hdev, 0); /* discoverable set issue. * 1. set timeout value. turn off -> on * 2. set timeout never. turn off -> on */ old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags); if (param & SCAN_PAGE) { set_bit(HCI_PSCAN, &hdev->flags); if (!old_pscan) mgmt_connectable(hdev, 1); } else if (old_pscan) mgmt_connectable(hdev, 0); done: hci_dev_unlock(hdev); hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status); } static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_class_of_dev *rp = (void *) skb->data; BT_DBG("%s status 0x%x", hdev->name, rp->status); if (rp->status) return; memcpy(hdev->dev_class, rp->dev_class, 3); BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); } static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); void *sent; BT_DBG("%s status 0x%x", hdev->name, status); if (status) return; sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); if (!sent) return; memcpy(hdev->dev_class, sent, 3); } static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_voice_setting *rp = (void *) skb->data; __u16 setting; BT_DBG("%s status 0x%x", hdev->name, rp->status); if (rp->status) return; setting = __le16_to_cpu(rp->voice_setting); if (hdev->voice_setting == setting) return; hdev->voice_setting = setting; BT_DBG("%s voice setting 0x%04x", hdev->name, setting); if (hdev->notify) { tasklet_disable(&hdev->tx_task); hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); tasklet_enable(&hdev->tx_task); } } static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); __u16 setting; void *sent; BT_DBG("%s status 0x%x", hdev->name, status); if (status) return; sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); if (!sent) return; setting = get_unaligned_le16(sent); if (hdev->voice_setting == setting) return; hdev->voice_setting = setting; BT_DBG("%s voice setting 0x%04x", hdev->name, setting); if (hdev->notify) { tasklet_disable(&hdev->tx_task); hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); tasklet_enable(&hdev->tx_task); } } static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); BT_DBG("%s status 0x%x", hdev->name, status); hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status); } static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_ssp_mode *rp = (void *) skb->data; BT_DBG("%s status 0x%x", hdev->name, rp->status); if (rp->status) return; hdev->ssp_mode = rp->mode; } static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); void *sent; BT_DBG("%s status 0x%x", hdev->name, status); if (status) return; sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); if (!sent) return; hdev->ssp_mode = *((__u8 *) sent); } static u8 hci_get_inquiry_mode(struct hci_dev *hdev) { if (hdev->features[6] & LMP_EXT_INQ) return 2; if (hdev->features[3] & LMP_RSSI_INQ) return 1; if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x0757) return 1; if (hdev->manufacturer == 15) { if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963) return 1; if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963) return 1; if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965) return 1; } if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 && hdev->lmp_subver == 0x1805) return 1; return 0; } static void hci_setup_inquiry_mode(struct hci_dev *hdev) { u8 mode; mode = hci_get_inquiry_mode(hdev); hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode); } static void hci_setup_event_mask(struct hci_dev *hdev) { /* The second byte is 0xff instead of 0x9f (two reserved bits * disabled) since a Broadcom 1.2 dongle doesn't respond to the * command otherwise */ u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; /* CSR 1.1 dongles does not accept any bitfield so don't try to set * any event mask for pre 1.2 devices */ if (hdev->lmp_ver <= 1) return; events[4] |= 0x01; /* Flow Specification Complete */ events[4] |= 0x02; /* Inquiry Result with RSSI */ events[4] |= 0x04; /* Read Remote Extended Features Complete */ events[5] |= 0x08; /* Synchronous Connection Complete */ events[5] |= 0x10; /* Synchronous Connection Changed */ if (hdev->features[3] & LMP_RSSI_INQ) events[4] |= 0x04; /* Inquiry Result with RSSI */ if (hdev->features[5] & LMP_SNIFF_SUBR) events[5] |= 0x20; /* Sniff Subrating */ if (hdev->features[5] & LMP_PAUSE_ENC) events[5] |= 0x80; /* Encryption Key Refresh Complete */ if (hdev->features[6] & LMP_EXT_INQ) events[5] |= 0x40; /* Extended Inquiry Result */ if (hdev->features[6] & LMP_NO_FLUSH) events[7] |= 0x01; /* Enhanced Flush Complete */ if (hdev->features[7] & LMP_LSTO) events[6] |= 0x80; /* Link Supervision Timeout Changed */ if (hdev->features[6] & LMP_SIMPLE_PAIR) { events[6] |= 0x01; /* IO Capability Request */ events[6] |= 0x02; /* IO Capability Response */ events[6] |= 0x04; /* User Confirmation Request */ events[6] |= 0x08; /* User Passkey Request */ events[6] |= 0x10; /* Remote OOB Data Request */ events[6] |= 0x20; /* Simple Pairing Complete */ events[7] |= 0x04; /* User Passkey Notification */ events[7] |= 0x08; /* Keypress Notification */ events[7] |= 0x10; /* Remote Host Supported * Features Notification */ } if (hdev->features[4] & LMP_LE) events[7] |= 0x20; /* LE Meta-Event */ hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events); } static void hci_set_le_support(struct hci_dev *hdev) { struct hci_cp_write_le_host_supported cp; memset(&cp, 0, sizeof(cp)); if (enable_le && test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { cp.le = 1; cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR); } hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp); } static void hci_setup(struct hci_dev *hdev) { hci_setup_event_mask(hdev); if (hdev->lmp_ver > 1) hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); if (hdev->features[6] & LMP_SIMPLE_PAIR) { u8 mode = 0x01; hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode); } if (hdev->features[3] & LMP_RSSI_INQ) hci_setup_inquiry_mode(hdev); if (hdev->features[7] & LMP_INQ_TX_PWR) hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL); if (hdev->features[7] & LMP_EXTFEATURES) { struct hci_cp_read_local_ext_features cp; cp.page = 0x01; hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp); } if (hdev->features[4] & LMP_LE) hci_set_le_support(hdev); } static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_local_version *rp = (void *) skb->data; BT_DBG("%s status 0x%x", hdev->name, rp->status); if (rp->status) return; hdev->hci_ver = rp->hci_ver; hdev->hci_rev = __le16_to_cpu(rp->hci_rev); hdev->lmp_ver = rp->lmp_ver; hdev->manufacturer = __le16_to_cpu(rp->manufacturer); hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name, hdev->manufacturer, hdev->hci_ver, hdev->hci_rev); if (test_bit(HCI_INIT, &hdev->flags)) hci_setup(hdev); } static void hci_setup_link_policy(struct hci_dev *hdev) { u16 link_policy = 0; if (hdev->features[0] & LMP_RSWITCH) link_policy |= HCI_LP_RSWITCH; if (hdev->features[0] & LMP_HOLD) link_policy |= HCI_LP_HOLD; if (hdev->features[0] & LMP_SNIFF) link_policy |= HCI_LP_SNIFF; if (hdev->features[1] & LMP_PARK) link_policy |= HCI_LP_PARK; link_policy = cpu_to_le16(link_policy); hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(link_policy), &link_policy); } static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_local_commands *rp = (void *) skb->data; BT_DBG("%s status 0x%x", hdev->name, rp->status); if (rp->status) goto done; memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10)) hci_setup_link_policy(hdev); done: hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status); } static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_local_features *rp = (void *) skb->data; BT_DBG("%s status 0x%x", hdev->name, rp->status); if (rp->status) return; memcpy(hdev->features, rp->features, 8); /* Adjust default settings according to features * supported by device. */ if (hdev->features[0] & LMP_3SLOT) hdev->pkt_type |= (HCI_DM3 | HCI_DH3); if (hdev->features[0] & LMP_5SLOT) hdev->pkt_type |= (HCI_DM5 | HCI_DH5); if (hdev->features[1] & LMP_HV2) { hdev->pkt_type |= (HCI_HV2); hdev->esco_type |= (ESCO_HV2); } if (hdev->features[1] & LMP_HV3) { hdev->pkt_type |= (HCI_HV3); hdev->esco_type |= (ESCO_HV3); } if (hdev->features[3] & LMP_ESCO) hdev->esco_type |= (ESCO_EV3); if (hdev->features[4] & LMP_EV4) hdev->esco_type |= (ESCO_EV4); if (hdev->features[4] & LMP_EV5) hdev->esco_type |= (ESCO_EV5); if (hdev->features[5] & LMP_EDR_ESCO_2M) hdev->esco_type |= (ESCO_2EV3); if (hdev->features[5] & LMP_EDR_ESCO_3M) hdev->esco_type |= (ESCO_3EV3); if (hdev->features[5] & LMP_EDR_3S_ESCO) hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name, hdev->features[0], hdev->features[1], hdev->features[2], hdev->features[3], hdev->features[4], hdev->features[5], hdev->features[6], hdev->features[7]); } static void hci_cc_read_local_ext_features(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_local_ext_features *rp = (void *) skb->data; BT_DBG("%s status 0x%x", hdev->name, rp->status); if (rp->status) return; memcpy(hdev->extfeatures, rp->features, 8); hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status); } static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_buffer_size *rp = (void *) skb->data; BT_DBG("%s status 0x%x", hdev->name, rp->status); if (rp->status) return; hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); hdev->sco_mtu = rp->sco_mtu; hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { hdev->sco_mtu = 64; hdev->sco_pkts = 8; } hdev->acl_cnt = hdev->acl_pkts; hdev->sco_cnt = hdev->sco_pkts; BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); } static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_bd_addr *rp = (void *) skb->data; BT_DBG("%s status 0x%x", hdev->name, rp->status); if (!rp->status) bacpy(&hdev->bdaddr, &rp->bdaddr); hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status); } static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); BT_DBG("%s status 0x%x", hdev->name, status); hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status); } static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); BT_DBG("%s status 0x%x", hdev->name, status); hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status); } static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); BT_DBG("%s status 0x%x", hdev->name, status); hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status); } static void hci_cc_write_inquiry_mode(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); BT_DBG("%s status 0x%x", hdev->name, status); hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status); } static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); BT_DBG("%s status 0x%x", hdev->name, status); hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status); } static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); BT_DBG("%s status 0x%x", hdev->name, status); hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status); } static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_pin_code_reply *rp = (void *) skb->data; struct hci_cp_pin_code_reply *cp; struct hci_conn *conn; BT_DBG("%s status 0x%x", hdev->name, rp->status); hci_dev_lock(hdev); if (test_bit(HCI_MGMT, &hdev->dev_flags)) mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); if (rp->status != 0) goto unlock; cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); if (!cp) goto unlock; conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); if (conn) conn->pin_length = cp->pin_len; unlock: hci_dev_unlock(hdev); } static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data; BT_DBG("%s status 0x%x", hdev->name, rp->status); hci_dev_lock(hdev); if (test_bit(HCI_MGMT, &hdev->dev_flags)) mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, rp->status); hci_dev_unlock(hdev); } static void hci_cc_le_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_le_read_buffer_size *rp = (void *) skb->data; BT_DBG("%s status 0x%x", hdev->name, rp->status); if (rp->status) return; hdev->le_mtu = __le16_to_cpu(rp->le_mtu); hdev->le_pkts = rp->le_max_pkt; hdev->le_cnt = hdev->le_pkts; BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status); } static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_user_confirm_reply *rp = (void *) skb->data; BT_DBG("%s status 0x%x", hdev->name, rp->status); hci_dev_lock(hdev); if (test_bit(HCI_MGMT, &hdev->dev_flags)) mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, rp->status); hci_dev_unlock(hdev); } static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_user_confirm_reply *rp = (void *) skb->data; BT_DBG("%s status 0x%x", hdev->name, rp->status); hci_dev_lock(hdev); if (test_bit(HCI_MGMT, &hdev->dev_flags)) mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, rp->status); hci_dev_unlock(hdev); } static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_user_confirm_reply *rp = (void *) skb->data; BT_DBG("%s status 0x%x", hdev->name, rp->status); hci_dev_lock(hdev); if (test_bit(HCI_MGMT, &hdev->dev_flags)) mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, rp->status); hci_dev_unlock(hdev); } static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_user_confirm_reply *rp = (void *) skb->data; BT_DBG("%s status 0x%x", hdev->name, rp->status); hci_dev_lock(hdev); if (test_bit(HCI_MGMT, &hdev->dev_flags)) mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, rp->status); hci_dev_unlock(hdev); } static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_local_oob_data *rp = (void *) skb->data; BT_DBG("%s status 0x%x", hdev->name, rp->status); hci_dev_lock(hdev); mgmt_read_local_oob_data_reply_complete(hdev, rp->hash, rp->randomizer, rp->status); hci_dev_unlock(hdev); } static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); BT_DBG("%s status 0x%x", hdev->name, status); hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status); if (status) { hci_dev_lock(hdev); mgmt_start_discovery_failed(hdev, status); hci_dev_unlock(hdev); return; } } static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_cp_le_set_scan_enable *cp; __u8 status = *((__u8 *) skb->data); BT_DBG("%s status 0x%x", hdev->name, status); cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); if (!cp) return; switch (cp->enable) { case LE_SCANNING_ENABLED: hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status); if (status) { hci_dev_lock(hdev); mgmt_start_discovery_failed(hdev, status); hci_dev_unlock(hdev); return; } set_bit(HCI_LE_SCAN, &hdev->dev_flags); del_timer_sync(&hdev->adv_timer); hci_dev_lock(hdev); /* workaround for non security req device. (ex, HRM v1.7) * if remote device is not discoverable when le scan is started, adv entry is cleared. * then addr type(random or public) can't be found in hci_connect. * so we remove hci_adv_entries_clear(hdev) */ hci_discovery_set_state(hdev, DISCOVERY_LE_SCAN); hci_dev_unlock(hdev); break; case LE_SCANNING_DISABLED: if (status) return; clear_bit(HCI_LE_SCAN, &hdev->dev_flags); hci_dev_lock(hdev); hci_discovery_set_state(hdev, DISCOVERY_STOPPED); hci_dev_unlock(hdev); mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT); break; default: BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable); break; } } static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_le_ltk_reply *rp = (void *) skb->data; BT_DBG("%s status 0x%x", hdev->name, rp->status); if (rp->status) return; hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status); } static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data; BT_DBG("%s status 0x%x", hdev->name, rp->status); if (rp->status) return; hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status); } static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_cp_read_local_ext_features cp; struct hci_cp_write_le_host_supported *sent; __u8 status = *((__u8 *) skb->data); BT_DBG("%s status 0x%x", hdev->name, status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); if (sent && test_bit(HCI_MGMT, &hdev->dev_flags)) { mgmt_le_enable_complete(hdev, sent->le, status); } if (status) return; cp.page = 0x01; hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp); } static void hci_cc_le_test_end(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_le_test_end *rp = (void *) skb->data; BT_DBG("hci_cc_le_test_end : %s status 0x%x, num_pkts 0x%x(%d)", hdev->name, rp->status, rp->num_pkts, rp->num_pkts); mgmt_le_test_end_complete(hdev, rp->status, rp->num_pkts); } static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) { BT_DBG("%s status 0x%x", hdev->name, status); if (status) { hci_req_complete(hdev, HCI_OP_INQUIRY, status); hci_conn_check_pending(hdev); hci_dev_lock(hdev); if (test_bit(HCI_MGMT, &hdev->dev_flags)) { /* [GGSM/sc47.yun] P120828-6815. Discovery fail issue */ BT_ERR("Discovery can't be done with other commands"); hci_discovery_set_state(hdev, DISCOVERY_STOPPING); mgmt_start_discovery_failed(hdev, status); } hci_dev_unlock(hdev); return; } set_bit(HCI_INQUIRY, &hdev->flags); hci_dev_lock(hdev); hci_discovery_set_state(hdev, DISCOVERY_INQUIRY); hci_dev_unlock(hdev); } static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) { struct hci_cp_create_conn *cp; struct hci_conn *conn; BT_DBG("%s status 0x%x", hdev->name, status); cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn); if (status) { if (conn && conn->state == BT_CONNECT) { if (status != 0x0c || conn->attempt > 2) { conn->state = BT_CLOSED; hci_proto_connect_cfm(conn, status); hci_conn_del(conn); } else conn->state = BT_CONNECT2; } } else { if (!conn) { conn = hci_conn_add(hdev, ACL_LINK, 0, &cp->bdaddr); if (conn) { conn->out = 1; conn->link_mode |= HCI_LM_MASTER; } else BT_ERR("No memory for new connection"); } } hci_dev_unlock(hdev); } static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) { struct hci_cp_add_sco *cp; struct hci_conn *acl, *sco; __u16 handle; BT_DBG("%s status 0x%x", hdev->name, status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); if (!cp) return; handle = __le16_to_cpu(cp->handle); BT_DBG("%s handle %d", hdev->name, handle); hci_dev_lock(hdev); acl = hci_conn_hash_lookup_handle(hdev, handle); if (acl) { sco = acl->link; if (sco) { sco->state = BT_CLOSED; hci_proto_connect_cfm(sco, status); hci_conn_del(sco); } } hci_dev_unlock(hdev); } static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) { struct hci_cp_auth_requested *cp; struct hci_conn *conn; BT_DBG("%s status 0x%x", hdev->name, status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (conn) { if (conn->state == BT_CONFIG) { hci_proto_connect_cfm(conn, status); hci_conn_put(conn); } } hci_dev_unlock(hdev); } static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) { struct hci_cp_set_conn_encrypt *cp; struct hci_conn *conn; BT_DBG("%s status 0x%x", hdev->name, status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (conn) { if (conn->state == BT_CONFIG) { hci_proto_connect_cfm(conn, status); hci_conn_put(conn); } } hci_dev_unlock(hdev); } static int hci_outgoing_auth_needed(struct hci_dev *hdev, struct hci_conn *conn) { if (conn->state != BT_CONFIG || !conn->out) return 0; if (conn->pending_sec_level == BT_SECURITY_SDP) return 0; /* for 2.0 pair issue. *without this, connect_cfm is called abnormally. */ /* Only request authentication for SSP connections or non-SSP * devices with sec_level HIGH or if MITM protection is requested */ if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) && conn->pending_sec_level != BT_SECURITY_HIGH && !(conn->auth_type & 0x01)) return 0; return 1; } static inline int hci_resolve_name(struct hci_dev *hdev, struct inquiry_entry *e) { struct hci_cp_remote_name_req cp; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, &e->data.bdaddr); cp.pscan_rep_mode = e->data.pscan_rep_mode; cp.pscan_mode = e->data.pscan_mode; cp.clock_offset = e->data.clock_offset; return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); } static bool hci_resolve_next_name(struct hci_dev *hdev) { struct discovery_state *discov = &hdev->discovery; struct inquiry_entry *e; if (list_empty(&discov->resolve)) return false; e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); if (e != NULL && hci_resolve_name(hdev, e) == 0) { e->name_state = NAME_PENDING; return true; } return false; } static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, bdaddr_t *bdaddr, u8 *name, u8 name_len) { struct discovery_state *discov = &hdev->discovery; struct inquiry_entry *e; /* this event is for remote name & class update.actual connected event is sent from hci_conn_complete_evt */ if (conn /*&& !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)*/) mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, name, name_len, conn->dev_class); if (discov->state == DISCOVERY_STOPPED) return; if (discov->state == DISCOVERY_STOPPING) goto discov_complete; if (discov->state != DISCOVERY_RESOLVING) return; e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); if (e) { e->name_state = NAME_KNOWN; list_del(&e->list); if (name) mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi, name, name_len); } if (hci_resolve_next_name(hdev)) return; discov_complete: hci_discovery_set_state(hdev, DISCOVERY_STOPPED); } static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) { struct hci_cp_remote_name_req *cp; struct hci_conn *conn; BT_DBG("%s status 0x%x", hdev->name, status); /* If successful wait for the name req complete event before * checking for the need to do authentication */ if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); if (test_bit(HCI_MGMT, &hdev->dev_flags)) hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); if (!conn) goto unlock; if (!hci_outgoing_auth_needed(hdev, conn)) goto unlock; if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { struct hci_cp_auth_requested cp; cp.handle = __cpu_to_le16(conn->handle); hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); } unlock: hci_dev_unlock(hdev); } static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) { struct hci_cp_read_remote_features *cp; struct hci_conn *conn; BT_DBG("%s status 0x%x", hdev->name, status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (conn) { if (conn->state == BT_CONFIG) { hci_proto_connect_cfm(conn, status); hci_conn_put(conn); } } hci_dev_unlock(hdev); } static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) { struct hci_cp_read_remote_ext_features *cp; struct hci_conn *conn; BT_DBG("%s status 0x%x", hdev->name, status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (conn) { if (conn->state == BT_CONFIG) { hci_proto_connect_cfm(conn, status); hci_conn_put(conn); } } hci_dev_unlock(hdev); } static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) { struct hci_cp_setup_sync_conn *cp; struct hci_conn *acl, *sco; __u16 handle; BT_DBG("%s status 0x%x", hdev->name, status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); if (!cp) return; handle = __le16_to_cpu(cp->handle); BT_DBG("%s handle %d", hdev->name, handle); hci_dev_lock(hdev); acl = hci_conn_hash_lookup_handle(hdev, handle); if (acl) { sco = acl->link; if (sco) { sco->state = BT_CLOSED; hci_proto_connect_cfm(sco, status); hci_conn_del(sco); } } hci_dev_unlock(hdev); } static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) { struct hci_cp_sniff_mode *cp; struct hci_conn *conn; BT_DBG("%s status 0x%x", hdev->name, status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (conn) { clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) hci_sco_setup(conn, status); } hci_dev_unlock(hdev); } static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) { struct hci_cp_exit_sniff_mode *cp; struct hci_conn *conn; BT_DBG("%s status 0x%x", hdev->name, status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (conn) { clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) hci_sco_setup(conn, status); } hci_dev_unlock(hdev); } static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) { struct hci_cp_disconnect *cp; struct hci_conn *conn; if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (conn) mgmt_disconnect_failed(hdev, &conn->dst, conn->type, conn->dst_type, status); hci_dev_unlock(hdev); } static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status) { struct hci_cp_le_create_conn *cp; struct hci_conn *conn; BT_DBG("%s status 0x%x", hdev->name, status); cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr); BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr), conn); if (status) { if (conn && conn->state == BT_CONNECT) { conn->state = BT_CLOSED; hci_proto_connect_cfm(conn, status); hci_conn_del(conn); } } else { if (!conn) { conn = hci_conn_add(hdev, LE_LINK, 0, &cp->peer_addr); if (conn) { conn->dst_type = cp->peer_addr_type; conn->out = 1; } else { BT_ERR("No memory for new connection"); } } } hci_dev_unlock(hdev); } static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) { BT_DBG("%s status 0x%x", hdev->name, status); } static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); struct discovery_state *discov = &hdev->discovery; struct inquiry_entry *e; BT_DBG("%s status %d", hdev->name, status); hci_req_complete(hdev, HCI_OP_INQUIRY, status); hci_conn_check_pending(hdev); if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) return; if (!test_bit(HCI_MGMT, &hdev->dev_flags)) return; hci_dev_lock(hdev); if (discov->state != DISCOVERY_INQUIRY) goto unlock; if (list_empty(&discov->resolve)) { hci_discovery_set_state(hdev, DISCOVERY_STOPPED); goto unlock; } e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); if (e != NULL && hci_resolve_name(hdev, e) == 0) { e->name_state = NAME_PENDING; hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); } else { hci_discovery_set_state(hdev, DISCOVERY_STOPPED); } unlock: hci_dev_unlock(hdev); } static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct inquiry_data data; struct inquiry_info *info = (void *) (skb->data + 1); int num_rsp = *((__u8 *) skb->data); BT_DBG("%s num_rsp %d", hdev->name, num_rsp); if (!num_rsp) return; hci_dev_lock(hdev); for (; num_rsp; num_rsp--, info++) { bool name_known; bacpy(&data.bdaddr, &info->bdaddr); data.pscan_rep_mode = info->pscan_rep_mode; data.pscan_period_mode = info->pscan_period_mode; data.pscan_mode = info->pscan_mode; memcpy(data.dev_class, info->dev_class, 3); data.clock_offset = info->clock_offset; data.rssi = 0x00; data.ssp_mode = 0x00; name_known = hci_inquiry_cache_update(hdev, &data, false); mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, info->dev_class, 0, !name_known, NULL, 0); } hci_dev_unlock(hdev); } static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_conn_complete *ev = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s", hdev->name); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); if (!conn) { if (ev->link_type != SCO_LINK) goto unlock; conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); if (!conn) goto unlock; conn->type = SCO_LINK; } if (!ev->status) { conn->handle = __le16_to_cpu(ev->handle); if (conn->type == ACL_LINK) { conn->state = BT_CONFIG; hci_conn_hold(conn); conn->disc_timeout = HCI_DISCONN_TIMEOUT; /* update mgmt state */ if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) mgmt_device_connected(hdev, &conn->dst, conn->type, conn->dst_type, NULL, 0, conn->dev_class); /* Encryption implies authentication - 0x00 Link level encryption disabled. - 0x01 Link level encryption enabled. */ if (ev->encr_mode == 0x01) { conn->link_mode |= HCI_LM_AUTH; conn->link_mode |= HCI_LM_ENCRYPT; } } else conn->state = BT_CONNECTED; hci_conn_hold_device(conn); hci_conn_add_sysfs(conn); if (test_bit(HCI_AUTH, &hdev->flags)) conn->link_mode |= HCI_LM_AUTH; if (test_bit(HCI_ENCRYPT, &hdev->flags)) conn->link_mode |= HCI_LM_ENCRYPT; /* Get remote version */ if (conn->type == ACL_LINK) { struct hci_cp_read_remote_version cp; cp.handle = ev->handle; hci_send_cmd(hdev, HCI_OP_READ_REMOTE_VERSION, sizeof(cp), &cp); } /* Set packet type for incoming connection */ if (!conn->out && hdev->hci_ver < 3) { struct hci_cp_change_conn_ptype cp; cp.handle = ev->handle; cp.pkt_type = cpu_to_le16(conn->pkt_type); hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), &cp); } } else { conn->state = BT_CLOSED; if (conn->type == ACL_LINK) mgmt_connect_failed(hdev, &ev->bdaddr, conn->type, conn->dst_type, ev->status); } if (conn->type == ACL_LINK) hci_sco_setup(conn, ev->status); if (ev->status) { hci_proto_connect_cfm(conn, ev->status); hci_conn_del(conn); } else if (ev->link_type != ACL_LINK) hci_proto_connect_cfm(conn, ev->status); unlock: hci_dev_unlock(hdev); hci_conn_check_pending(hdev); } static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_conn_request *ev = (void *) skb->data; int mask = hdev->link_mode; BT_DBG("%s bdaddr %s type 0x%x", hdev->name, batostr(&ev->bdaddr), ev->link_type); mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); if ((mask & HCI_LM_ACCEPT) && !hci_blacklist_lookup(hdev, &ev->bdaddr)) { /* Connection accepted */ struct inquiry_entry *ie; struct hci_conn *conn; hci_dev_lock(hdev); ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); if (ie) memcpy(ie->data.dev_class, ev->dev_class, 3); conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); if (!conn) { /* pkt_type not yet used for incoming connections */ conn = hci_conn_add(hdev, ev->link_type, 0, &ev->bdaddr); if (!conn) { BT_ERR("No memory for new connection"); hci_dev_unlock(hdev); return; } } memcpy(conn->dev_class, ev->dev_class, 3); conn->state = BT_CONNECT; hci_dev_unlock(hdev); if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) { struct hci_cp_accept_conn_req cp; bacpy(&cp.bdaddr, &ev->bdaddr); if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) cp.role = 0x00; /* Become master */ else cp.role = 0x01; /* Remain slave */ hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); } else { struct hci_cp_accept_sync_conn_req cp; bacpy(&cp.bdaddr, &ev->bdaddr); cp.pkt_type = cpu_to_le16(conn->pkt_type); cp.tx_bandwidth = cpu_to_le32(0x00001f40); cp.rx_bandwidth = cpu_to_le32(0x00001f40); cp.max_latency = cpu_to_le16(0xffff); cp.content_format = cpu_to_le16(hdev->voice_setting); cp.retrans_effort = 0xff; hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp), &cp); } } else { /* Connection rejected */ struct hci_cp_reject_conn_req cp; bacpy(&cp.bdaddr, &ev->bdaddr); cp.reason = 0x0f; hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); } } static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_disconn_complete *ev = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s status %d", hdev->name, ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (!conn) goto unlock; if (ev->status == 0) conn->state = BT_CLOSED; if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) && (conn->type == ACL_LINK || conn->type == LE_LINK)) { if (ev->status != 0) mgmt_disconnect_failed(hdev, &conn->dst, conn->type, conn->dst_type, ev->status); else mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type); } if (ev->status == 0) { hci_proto_disconn_cfm(conn, ev->reason); hci_conn_del(conn); } unlock: hci_dev_unlock(hdev); } static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_auth_complete *ev = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s status %d", hdev->name, ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (!conn) goto unlock; /* for pin or key missing case (noBonding) */ BT_DBG("conn->remote_auth %x, conn->remote_cap %x, conn->auth_type %x, conn->io_capability %x", conn->remote_auth, conn->remote_cap, conn->auth_type, conn->io_capability); if (ev->status == 0x06 && hdev->ssp_mode > 0 && conn->ssp_mode > 0) { struct hci_cp_auth_requested cp; hci_remove_link_key(hdev, &conn->dst); cp.handle = cpu_to_le16(conn->handle); hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); hci_dev_unlock(hdev); BT_DBG("Pin or key missing !!!"); return; } /* SS_BLUETOOTH(is80.hwang) 2012.05.18 */ /* for pin code request issue */ #if defined(CONFIG_BT_CSR8811) if (ev->status == 0x06 ) { BT_ERR("Pin or key missing !!!"); hci_remove_link_key(hdev, &conn->dst); hci_dev_unlock(hdev); return ; } #endif /* SS_BLUEZ_BT(is80.hwang) End */ if (!ev->status) { if (!(conn->ssp_mode > 0 && hdev->ssp_mode > 0) && test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { BT_INFO("re-auth of legacy device is not possible."); } else { conn->link_mode |= HCI_LM_AUTH; conn->sec_level = conn->pending_sec_level; } } else { mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type, ev->status); conn->disc_timeout = HCI_DISCONN_TIMEOUT / 200; /* 0.01 sec */ } clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags); if (conn->state == BT_CONFIG) { if (!ev->status && hdev->ssp_mode > 0 && conn->ssp_mode > 0) { /* This is work-around for BCM2070 PC firmware problem * after auth complete, slave side will send * the HCI_OP_SET_CONN_ENCRYPT cmd 10ms later */ if (conn->link_mode & HCI_LM_MASTER) { struct hci_cp_set_conn_encrypt cp; cp.handle = ev->handle; cp.encrypt = 0x01; hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), &cp); } else mod_timer(&conn->encrypt_timer, jiffies + msecs_to_jiffies(10)); } else { conn->state = BT_CONNECTED; hci_proto_connect_cfm(conn, ev->status); hci_conn_put(conn); } } else { hci_auth_cfm(conn, ev->status); hci_conn_hold(conn); conn->disc_timeout = HCI_DISCONN_TIMEOUT; hci_conn_put(conn); } if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { if (!ev->status) { /* This is work-around for BCM2070 PC firmware problem * after auth complete, slave side will send * the HCI_OP_SET_CONN_ENCRYPT cmd 10ms later */ if ((conn->link_mode & HCI_LM_MASTER)) { struct hci_cp_set_conn_encrypt cp; cp.handle = ev->handle; cp.encrypt = 0x01; hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), &cp); } else mod_timer(&conn->encrypt_timer, jiffies + msecs_to_jiffies(10)); } else { clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); hci_encrypt_cfm(conn, ev->status, 0x00); } } unlock: hci_dev_unlock(hdev); } static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_remote_name *ev = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s", hdev->name); hci_conn_check_pending(hdev); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!test_bit(HCI_MGMT, &hdev->dev_flags)) goto check_auth; if (ev->status == 0) { hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name, strnlen(ev->name, HCI_MAX_NAME_LENGTH)); /* workaround for HM1800 * If HM1800 & incoming connection, change the role as master */ if (conn != NULL && !conn->out && (!strncmp(ev->name, "HM1800", 6) || !strncmp(ev->name, "HM5000", 6))) { BT_ERR("VPS's device should be change role"); hci_conn_switch_role(conn, 0x00); } } else hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0); check_auth: if (!conn) goto unlock; if (!hci_outgoing_auth_needed(hdev, conn)) goto unlock; if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { struct hci_cp_auth_requested cp; cp.handle = __cpu_to_le16(conn->handle); hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); } unlock: hci_dev_unlock(hdev); } static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_encrypt_change *ev = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s status %d", hdev->name, ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (conn) { /* Pin or key missing */ if (ev->status == 0x06 && conn->type == LE_LINK) hci_remove_ltk(hdev, &conn->dst); if (!ev->status) { if (ev->encrypt) { /* Encryption implies authentication */ conn->link_mode |= HCI_LM_AUTH; conn->link_mode |= HCI_LM_ENCRYPT; conn->sec_level = conn->pending_sec_level; } else conn->link_mode &= ~HCI_LM_ENCRYPT; } clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); if (conn->state == BT_CONFIG) { if (!ev->status) conn->state = BT_CONNECTED; hci_proto_connect_cfm(conn, ev->status); hci_conn_put(conn); } else hci_encrypt_cfm(conn, ev->status, ev->encrypt); if (test_bit(HCI_MGMT, &hdev->dev_flags)) mgmt_encrypt_change(hdev, &conn->dst, ev->status); } hci_dev_unlock(hdev); } static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_change_link_key_complete *ev = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s status %d", hdev->name, ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (conn) { if (!ev->status) conn->link_mode |= HCI_LM_SECURE; clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); hci_key_change_cfm(conn, ev->status); } hci_dev_unlock(hdev); } static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_remote_features *ev = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s status %d", hdev->name, ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (!conn) goto unlock; if (!ev->status) { memcpy(conn->features, ev->features, 8); mgmt_remote_features(hdev, &conn->dst, ev->features); } if (conn->state != BT_CONFIG) goto unlock; if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) { struct hci_cp_read_remote_ext_features cp; cp.handle = ev->handle; cp.page = 0x01; hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, sizeof(cp), &cp); goto unlock; } if (!ev->status) { struct hci_cp_remote_name_req cp; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, &conn->dst); cp.pscan_rep_mode = 0x02; hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) mgmt_device_connected(hdev, &conn->dst, conn->type, conn->dst_type, NULL, 0, conn->dev_class); if (!hci_outgoing_auth_needed(hdev, conn)) { conn->state = BT_CONNECTED; hci_proto_connect_cfm(conn, ev->status); hci_conn_put(conn); } unlock: hci_dev_unlock(hdev); } static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_remote_version *ev = (void *) skb->data; struct hci_cp_read_remote_features cp; struct hci_conn *conn; BT_DBG("%s status %d", hdev->name, ev->status); hci_dev_lock(hdev); cp.handle = ev->handle; hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, sizeof(cp), &cp); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (!conn) goto unlock; if (!ev->status) mgmt_remote_version(hdev, &conn->dst, ev->lmp_ver, ev->manufacturer, ev->lmp_subver); unlock: hci_dev_unlock(hdev); } static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { BT_DBG("%s", hdev->name); } /* monitoring of the RSSI of the link between two Bluetooth devices */ static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_rssi *rp = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s status 0x%x", hdev->name, rp->status); if (!test_bit(HCI_MGMT, &hdev->dev_flags)) return; conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); if (!conn) { mgmt_read_rssi_failed(hdev); return; } mgmt_read_rssi_complete(hdev, &conn->dst, rp->rssi, rp->status); } static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_cmd_complete *ev = (void *) skb->data; __u16 opcode; skb_pull(skb, sizeof(*ev)); opcode = __le16_to_cpu(ev->opcode); switch (opcode) { case HCI_OP_INQUIRY_CANCEL: hci_cc_inquiry_cancel(hdev, skb); break; case HCI_OP_EXIT_PERIODIC_INQ: hci_cc_exit_periodic_inq(hdev, skb); break; case HCI_OP_REMOTE_NAME_REQ_CANCEL: hci_cc_remote_name_req_cancel(hdev, skb); break; case HCI_OP_ROLE_DISCOVERY: hci_cc_role_discovery(hdev, skb); break; case HCI_OP_READ_LINK_POLICY: hci_cc_read_link_policy(hdev, skb); break; case HCI_OP_WRITE_LINK_POLICY: hci_cc_write_link_policy(hdev, skb); break; case HCI_OP_READ_DEF_LINK_POLICY: hci_cc_read_def_link_policy(hdev, skb); break; case HCI_OP_WRITE_DEF_LINK_POLICY: hci_cc_write_def_link_policy(hdev, skb); break; case HCI_OP_RESET: hci_cc_reset(hdev, skb); break; case HCI_OP_WRITE_LOCAL_NAME: hci_cc_write_local_name(hdev, skb); break; case HCI_OP_READ_LOCAL_NAME: hci_cc_read_local_name(hdev, skb); break; case HCI_OP_WRITE_AUTH_ENABLE: hci_cc_write_auth_enable(hdev, skb); break; case HCI_OP_WRITE_ENCRYPT_MODE: hci_cc_write_encrypt_mode(hdev, skb); break; case HCI_OP_WRITE_SCAN_ENABLE: hci_cc_write_scan_enable(hdev, skb); break; case HCI_OP_READ_CLASS_OF_DEV: hci_cc_read_class_of_dev(hdev, skb); break; case HCI_OP_WRITE_CLASS_OF_DEV: hci_cc_write_class_of_dev(hdev, skb); break; case HCI_OP_READ_VOICE_SETTING: hci_cc_read_voice_setting(hdev, skb); break; case HCI_OP_WRITE_VOICE_SETTING: hci_cc_write_voice_setting(hdev, skb); break; case HCI_OP_HOST_BUFFER_SIZE: hci_cc_host_buffer_size(hdev, skb); break; case HCI_OP_READ_SSP_MODE: hci_cc_read_ssp_mode(hdev, skb); break; case HCI_OP_WRITE_SSP_MODE: hci_cc_write_ssp_mode(hdev, skb); break; case HCI_OP_READ_LOCAL_VERSION: hci_cc_read_local_version(hdev, skb); break; case HCI_OP_READ_LOCAL_COMMANDS: hci_cc_read_local_commands(hdev, skb); break; case HCI_OP_READ_LOCAL_FEATURES: hci_cc_read_local_features(hdev, skb); break; case HCI_OP_READ_LOCAL_EXT_FEATURES: hci_cc_read_local_ext_features(hdev, skb); break; case HCI_OP_READ_BUFFER_SIZE: hci_cc_read_buffer_size(hdev, skb); break; case HCI_OP_READ_BD_ADDR: hci_cc_read_bd_addr(hdev, skb); break; case HCI_OP_WRITE_CA_TIMEOUT: hci_cc_write_ca_timeout(hdev, skb); break; case HCI_OP_DELETE_STORED_LINK_KEY: hci_cc_delete_stored_link_key(hdev, skb); break; case HCI_OP_SET_EVENT_MASK: hci_cc_set_event_mask(hdev, skb); break; case HCI_OP_WRITE_INQUIRY_MODE: hci_cc_write_inquiry_mode(hdev, skb); break; case HCI_OP_READ_INQ_RSP_TX_POWER: hci_cc_read_inq_rsp_tx_power(hdev, skb); break; case HCI_OP_SET_EVENT_FLT: hci_cc_set_event_flt(hdev, skb); break; case HCI_OP_PIN_CODE_REPLY: hci_cc_pin_code_reply(hdev, skb); break; case HCI_OP_PIN_CODE_NEG_REPLY: hci_cc_pin_code_neg_reply(hdev, skb); break; case HCI_OP_READ_LOCAL_OOB_DATA: hci_cc_read_local_oob_data_reply(hdev, skb); break; case HCI_OP_LE_READ_BUFFER_SIZE: hci_cc_le_read_buffer_size(hdev, skb); break; case HCI_OP_USER_CONFIRM_REPLY: hci_cc_user_confirm_reply(hdev, skb); break; case HCI_OP_USER_CONFIRM_NEG_REPLY: hci_cc_user_confirm_neg_reply(hdev, skb); break; case HCI_OP_USER_PASSKEY_REPLY: hci_cc_user_passkey_reply(hdev, skb); break; case HCI_OP_USER_PASSKEY_NEG_REPLY: hci_cc_user_passkey_neg_reply(hdev, skb); break; case HCI_OP_LE_SET_SCAN_PARAM: hci_cc_le_set_scan_param(hdev, skb); break; case HCI_OP_LE_SET_SCAN_ENABLE: hci_cc_le_set_scan_enable(hdev, skb); break; case HCI_OP_LE_LTK_REPLY: hci_cc_le_ltk_reply(hdev, skb); break; case HCI_OP_LE_LTK_NEG_REPLY: hci_cc_le_ltk_neg_reply(hdev, skb); break; case HCI_OP_WRITE_LE_HOST_SUPPORTED: hci_cc_write_le_host_supported(hdev, skb); break; /* monitoring of the RSSI of the link between two Bluetooth devices */ case HCI_OP_READ_RSSI: hci_cc_read_rssi(hdev, skb); break; case HCI_OP_LE_TEST_END: hci_cc_le_test_end(hdev, skb); break; default: BT_DBG("%s opcode 0x%x", hdev->name, opcode); break; } if (ev->opcode != HCI_OP_NOP) del_timer(&hdev->cmd_timer); if (ev->ncmd) { atomic_set(&hdev->cmd_cnt, 1); if (!skb_queue_empty(&hdev->cmd_q)) tasklet_schedule(&hdev->cmd_task); } } static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_cmd_status *ev = (void *) skb->data; __u16 opcode; skb_pull(skb, sizeof(*ev)); opcode = __le16_to_cpu(ev->opcode); switch (opcode) { case HCI_OP_INQUIRY: hci_cs_inquiry(hdev, ev->status); break; case HCI_OP_CREATE_CONN: hci_cs_create_conn(hdev, ev->status); break; case HCI_OP_ADD_SCO: hci_cs_add_sco(hdev, ev->status); break; case HCI_OP_AUTH_REQUESTED: hci_cs_auth_requested(hdev, ev->status); break; case HCI_OP_SET_CONN_ENCRYPT: hci_cs_set_conn_encrypt(hdev, ev->status); break; case HCI_OP_REMOTE_NAME_REQ: hci_cs_remote_name_req(hdev, ev->status); break; case HCI_OP_READ_REMOTE_FEATURES: hci_cs_read_remote_features(hdev, ev->status); break; case HCI_OP_READ_REMOTE_EXT_FEATURES: hci_cs_read_remote_ext_features(hdev, ev->status); break; case HCI_OP_SETUP_SYNC_CONN: hci_cs_setup_sync_conn(hdev, ev->status); break; case HCI_OP_SNIFF_MODE: hci_cs_sniff_mode(hdev, ev->status); break; case HCI_OP_EXIT_SNIFF_MODE: hci_cs_exit_sniff_mode(hdev, ev->status); break; case HCI_OP_DISCONNECT: hci_cs_disconnect(hdev, ev->status); break; case HCI_OP_LE_CREATE_CONN: hci_cs_le_create_conn(hdev, ev->status); break; case HCI_OP_LE_START_ENC: hci_cs_le_start_enc(hdev, ev->status); break; default: BT_DBG("%s opcode 0x%x", hdev->name, opcode); break; } if (ev->opcode != HCI_OP_NOP) del_timer(&hdev->cmd_timer); if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { atomic_set(&hdev->cmd_cnt, 1); if (!skb_queue_empty(&hdev->cmd_q)) tasklet_schedule(&hdev->cmd_task); } } static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_role_change *ev = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s status %d", hdev->name, ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (conn) { if (!ev->status) { if (ev->role) conn->link_mode &= ~HCI_LM_MASTER; else conn->link_mode |= HCI_LM_MASTER; } clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); hci_role_switch_cfm(conn, ev->status, ev->role); } hci_dev_unlock(hdev); } static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_num_comp_pkts *ev = (void *) skb->data; __le16 *ptr; int i; skb_pull(skb, sizeof(*ev)); BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl); if (skb->len < ev->num_hndl * 4) { BT_DBG("%s bad parameters", hdev->name); return; } tasklet_disable(&hdev->tx_task); for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) { struct hci_conn *conn; __u16 handle, count; handle = get_unaligned_le16(ptr++); count = get_unaligned_le16(ptr++); conn = hci_conn_hash_lookup_handle(hdev, handle); if (conn) { conn->sent -= count; if (conn->type == ACL_LINK) { hdev->acl_cnt += count; if (hdev->acl_cnt > hdev->acl_pkts) hdev->acl_cnt = hdev->acl_pkts; } else if (conn->type == LE_LINK) { if (hdev->le_pkts) { hdev->le_cnt += count; if (hdev->le_cnt > hdev->le_pkts) hdev->le_cnt = hdev->le_pkts; } else { hdev->acl_cnt += count; if (hdev->acl_cnt > hdev->acl_pkts) hdev->acl_cnt = hdev->acl_pkts; } } else { hdev->sco_cnt += count; if (hdev->sco_cnt > hdev->sco_pkts) hdev->sco_cnt = hdev->sco_pkts; } } } tasklet_schedule(&hdev->tx_task); tasklet_enable(&hdev->tx_task); } static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_mode_change *ev = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s status %d", hdev->name, ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (conn) { conn->mode = ev->mode; conn->interval = __le16_to_cpu(ev->interval); if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) { if (conn->mode == HCI_CM_ACTIVE) conn->power_save = 1; else conn->power_save = 0; } if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) hci_sco_setup(conn, ev->status); } hci_dev_unlock(hdev); } static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_pin_code_req *ev = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s", hdev->name); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!conn) goto unlock; if (conn->state == BT_CONNECTED) { hci_conn_hold(conn); conn->disc_timeout = HCI_PAIRING_TIMEOUT; hci_conn_put(conn); } if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags)) hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(ev->bdaddr), &ev->bdaddr); else if (test_bit(HCI_MGMT, &hdev->dev_flags)) { u8 secure; if (conn->pending_sec_level == BT_SECURITY_HIGH) secure = 1; else secure = 0; mgmt_pin_code_request(hdev, &ev->bdaddr, secure); } unlock: hci_dev_unlock(hdev); } static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_link_key_req *ev = (void *) skb->data; struct hci_cp_link_key_reply cp; struct hci_conn *conn; struct link_key *key; BT_DBG("%s", hdev->name); if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags)) return; hci_dev_lock(hdev); key = hci_find_link_key(hdev, &ev->bdaddr); if (!key) { BT_DBG("%s link key not found for %s", hdev->name, batostr(&ev->bdaddr)); goto not_found; } BT_DBG("%s found key type %u for %s", hdev->name, key->type, batostr(&ev->bdaddr)); if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) && key->type == HCI_LK_DEBUG_COMBINATION) { BT_DBG("%s ignoring debug key", hdev->name); goto not_found; } conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (conn) { if (key->type == HCI_LK_UNAUTH_COMBINATION && conn->auth_type != 0xff && (conn->auth_type & 0x01)) { BT_DBG("%s ignoring unauthenticated key", hdev->name); goto not_found; } /* - This is mgmt only. hciops doesn't checking like this. - * If device is pre 2.1 & security level is high, combination key type is required. * (core spec 4.0 GAP 1671p) * And 16 digit PIN is recommended. (but not mandatory) * Now, Google API only support high & low level for outgoing. * So if application use high level security, 16 digit PIN is needed. (mgmt based) * But Google is still using hciops, There is no problem in their platform. * This can make confusion to 3rd party developer. * Disable this part for same action with hciops. * and this should be checked after google's update. */ /* *if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && * conn->pending_sec_level == BT_SECURITY_HIGH) { * BT_DBG("%s ignoring key unauthenticated for high \ * security", hdev->name); * goto not_found; *} */ conn->key_type = key->type; conn->pin_length = key->pin_len; } bacpy(&cp.bdaddr, &ev->bdaddr); memcpy(cp.link_key, key->val, 16); hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); hci_dev_unlock(hdev); return; not_found: hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); hci_dev_unlock(hdev); } static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_link_key_notify *ev = (void *) skb->data; struct hci_conn *conn; u8 pin_len = 0; BT_DBG("%s", hdev->name); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (conn) { hci_conn_hold(conn); conn->disc_timeout = HCI_DISCONN_TIMEOUT; pin_len = conn->pin_length; if (ev->key_type != HCI_LK_CHANGED_COMBINATION) conn->key_type = ev->key_type; hci_conn_put(conn); } if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags)) hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key, ev->key_type, pin_len); hci_dev_unlock(hdev); } static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_clock_offset *ev = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s status %d", hdev->name, ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (conn && !ev->status) { struct inquiry_entry *ie; ie = hci_inquiry_cache_lookup(hdev, &conn->dst); if (ie) { ie->data.clock_offset = ev->clock_offset; ie->timestamp = jiffies; } } hci_dev_unlock(hdev); } static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_pkt_type_change *ev = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s status %d", hdev->name, ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (conn && !ev->status) conn->pkt_type = __le16_to_cpu(ev->pkt_type); hci_dev_unlock(hdev); } static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; struct inquiry_entry *ie; BT_DBG("%s", hdev->name); hci_dev_lock(hdev); ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); if (ie) { ie->data.pscan_rep_mode = ev->pscan_rep_mode; ie->timestamp = jiffies; } hci_dev_unlock(hdev); } static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct inquiry_data data; int num_rsp = *((__u8 *) skb->data); bool name_known; BT_DBG("%s num_rsp %d", hdev->name, num_rsp); if (!num_rsp) return; hci_dev_lock(hdev); if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { struct inquiry_info_with_rssi_and_pscan_mode *info; info = (void *) (skb->data + 1); for (; num_rsp; num_rsp--, info++) { bacpy(&data.bdaddr, &info->bdaddr); data.pscan_rep_mode = info->pscan_rep_mode; data.pscan_period_mode = info->pscan_period_mode; data.pscan_mode = info->pscan_mode; memcpy(data.dev_class, info->dev_class, 3); data.clock_offset = info->clock_offset; data.rssi = info->rssi; data.ssp_mode = 0x00; name_known = hci_inquiry_cache_update(hdev, &data, false); mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, info->dev_class, info->rssi, !name_known, NULL, 0); } } else { struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); for (; num_rsp; num_rsp--, info++) { bacpy(&data.bdaddr, &info->bdaddr); data.pscan_rep_mode = info->pscan_rep_mode; data.pscan_period_mode = info->pscan_period_mode; data.pscan_mode = 0x00; memcpy(data.dev_class, info->dev_class, 3); data.clock_offset = info->clock_offset; data.rssi = info->rssi; data.ssp_mode = 0x00; name_known = hci_inquiry_cache_update(hdev, &data, false); mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, info->dev_class, info->rssi, !name_known, NULL, 0); } } hci_dev_unlock(hdev); } static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_remote_ext_features *ev = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s", hdev->name); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (!conn) goto unlock; if (!ev->status && ev->page == 0x01) { struct inquiry_entry *ie; ie = hci_inquiry_cache_lookup(hdev, &conn->dst); if (ie) ie->data.ssp_mode = (ev->features[0] & 0x01); conn->ssp_mode = (ev->features[0] & 0x01); } if (conn->state != BT_CONFIG) goto unlock; if (!ev->status) { struct hci_cp_remote_name_req cp; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, &conn->dst); cp.pscan_rep_mode = 0x02; hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) mgmt_device_connected(hdev, &conn->dst, conn->type, conn->dst_type, NULL, 0, conn->dev_class); if (!hci_outgoing_auth_needed(hdev, conn)) { conn->state = BT_CONNECTED; hci_proto_connect_cfm(conn, ev->status); hci_conn_put(conn); } unlock: hci_dev_unlock(hdev); } static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_sync_conn_complete *ev = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s status %d", hdev->name, ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); if (!conn) { if (ev->link_type == ESCO_LINK) goto unlock; conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); if (!conn) goto unlock; conn->type = SCO_LINK; } switch (ev->status) { case 0x00: conn->handle = __le16_to_cpu(ev->handle); conn->state = BT_CONNECTED; hci_conn_hold_device(conn); hci_conn_add_sysfs(conn); break; /*case 0x10:*//* Connection Accept Timeout */ case 0x11: /* Unsupported Feature or Parameter Value */ case 0x1c: /* SCO interval rejected */ case 0x1a: /* Unsupported Remote Feature */ case 0x1f: /* Unspecified error */ if (conn->out && conn->attempt < 2 && !conn->hdev->is_wbs) { conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | (hdev->esco_type & EDR_ESCO_MASK); hci_setup_sync(conn, conn->link->handle); goto unlock; } /* fall through */ default: conn->state = BT_CLOSED; break; } hci_proto_connect_cfm(conn, ev->status); if (ev->status) hci_conn_del(conn); unlock: hci_dev_unlock(hdev); } static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb) { BT_DBG("%s", hdev->name); } static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_sniff_subrate *ev = (void *) skb->data; BT_DBG("%s status %d", hdev->name, ev->status); } static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct inquiry_data data; struct extended_inquiry_info *info = (void *) (skb->data + 1); int num_rsp = *((__u8 *) skb->data); BT_DBG("%s num_rsp %d", hdev->name, num_rsp); if (!num_rsp) return; hci_dev_lock(hdev); for (; num_rsp; num_rsp--, info++) { bool name_known; __u8 eir[sizeof(info->data) + 1]; size_t eir_len; bacpy(&data.bdaddr, &info->bdaddr); data.pscan_rep_mode = info->pscan_rep_mode; data.pscan_period_mode = info->pscan_period_mode; data.pscan_mode = 0x00; memcpy(data.dev_class, info->dev_class, 3); data.clock_offset = info->clock_offset; data.rssi = info->rssi; data.ssp_mode = 0x01; /*__u8 eir[sizeof(info->data) + 1]; *size_t eir_len; */ eir_len = eir_length(info->data, sizeof(info->data)); memset(eir, 0, sizeof(eir)); memcpy(eir, info->data, eir_len); if (test_bit(HCI_MGMT, &hdev->dev_flags)) name_known = eir_has_data_type(info->data, sizeof(info->data), EIR_NAME_COMPLETE); else name_known = true; name_known = hci_inquiry_cache_update(hdev, &data, name_known); mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, info->dev_class, info->rssi, !name_known, eir, eir_len); } hci_dev_unlock(hdev); } static inline u8 hci_get_auth_req(struct hci_conn *conn) { /* If remote requests dedicated bonding follow that lead */ if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) { /* If both remote and local IO capabilities allow MITM * protection then require it, otherwise don't */ if (conn->remote_cap == 0x03 || conn->io_capability == 0x03) return 0x02; else return 0x03; } /* If remote requests no-bonding follow that lead */ if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01) return conn->remote_auth | (conn->auth_type & 0x01); return conn->auth_type; } static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_io_capa_request *ev = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s", hdev->name); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!conn) goto unlock; hci_conn_hold(conn); if (!test_bit(HCI_MGMT, &hdev->dev_flags)) goto unlock; if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) || (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { struct hci_cp_io_capability_reply cp; bacpy(&cp.bdaddr, &ev->bdaddr); /* Change the IO capability from KeyboardDisplay * to DisplayYesNo as it is not supported by BT spec. */ cp.capability = (conn->io_capability == 0x04) ? 0x01 : conn->io_capability; conn->auth_type = hci_get_auth_req(conn); cp.authentication = conn->auth_type; if ((conn->out == 0x01 || conn->remote_oob == 0x01) && hci_find_remote_oob_data(hdev, &conn->dst)) cp.oob_data = 0x01; else cp.oob_data = 0x00; hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, sizeof(cp), &cp); } else { struct hci_cp_io_capability_neg_reply cp; bacpy(&cp.bdaddr, &ev->bdaddr); cp.reason = 0x18; /* Pairing not allowed */ hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, sizeof(cp), &cp); } unlock: hci_dev_unlock(hdev); } static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_io_capa_reply *ev = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s", hdev->name); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!conn) goto unlock; conn->remote_cap = ev->capability; conn->remote_oob = ev->oob_data; conn->remote_auth = ev->authentication; unlock: hci_dev_unlock(hdev); } static inline void hci_user_passkey_notification_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_user_passkey_notification *ev = (void *) skb->data; BT_DBG("%s", hdev->name); hci_dev_lock(hdev); if (test_bit(HCI_MGMT, &hdev->dev_flags)) mgmt_user_passkey_notification(hdev, &ev->bdaddr, ev->passkey); hci_dev_unlock(hdev); } static inline void hci_user_confirm_request_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_user_confirm_req *ev = (void *) skb->data; int loc_mitm, rem_mitm, confirm_hint = 0; struct hci_conn *conn; BT_DBG("%s", hdev->name); hci_dev_lock(hdev); if (!test_bit(HCI_MGMT, &hdev->dev_flags)) goto unlock; conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!conn) goto unlock; loc_mitm = (conn->auth_type & 0x01); rem_mitm = (conn->remote_auth & 0x01); /* If we require MITM but the remote device can't provide that * (it has NoInputNoOutput) then reject the confirmation * request. The only exception is when we're dedicated bonding * initiators (connect_cfm_cb set) since then we always have the MITM * bit set. */ if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) { BT_DBG("Rejecting request: remote device can't provide MITM"); hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, sizeof(ev->bdaddr), &ev->bdaddr); goto unlock; } /* If no side requires MITM protection; auto-accept */ if ((!loc_mitm || conn->remote_cap == 0x03) && (!rem_mitm || conn->io_capability == 0x03)) { /* If we're not the initiators request authorization to * proceed from user space (mgmt_user_confirm with * confirm_hint set to 1). */ if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { BT_DBG("Confirming auto-accept as acceptor"); confirm_hint = 1; goto confirm; } BT_DBG("Auto-accept of user confirmation with %ums delay", hdev->auto_accept_delay); if (hdev->auto_accept_delay > 0) { int delay = msecs_to_jiffies(hdev->auto_accept_delay); mod_timer(&conn->auto_accept_timer, jiffies + delay); goto unlock; } hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(ev->bdaddr), &ev->bdaddr); goto unlock; } confirm: mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey, confirm_hint); unlock: hci_dev_unlock(hdev); } static inline void hci_user_passkey_request_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_user_passkey_req *ev = (void *) skb->data; BT_DBG("%s", hdev->name); hci_dev_lock(hdev); if (test_bit(HCI_MGMT, &hdev->dev_flags)) mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); hci_dev_unlock(hdev); } static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_simple_pair_complete *ev = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s", hdev->name); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!conn) goto unlock; /* To avoid duplicate auth_failed events to user space we check * the HCI_CONN_AUTH_PEND flag which will be set if we * initiated the authentication. A traditional auth_complete * event gets always produced as initiator and is also mapped to * the mgmt_auth_failed event */ if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0) { mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type, ev->status); conn->out = 1; conn->disc_timeout = HCI_DISCONN_TIMEOUT / 200; /* 0.01 sec */ } hci_conn_put(conn); unlock: hci_dev_unlock(hdev); } static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_remote_host_features *ev = (void *) skb->data; struct inquiry_entry *ie; BT_DBG("%s", hdev->name); hci_dev_lock(hdev); ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); if (ie) ie->data.ssp_mode = (ev->features[0] & 0x01); hci_dev_unlock(hdev); } static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; struct oob_data *data; BT_DBG("%s", hdev->name); hci_dev_lock(hdev); if (!test_bit(HCI_MGMT, &hdev->dev_flags)) goto unlock; data = hci_find_remote_oob_data(hdev, &ev->bdaddr); if (data) { struct hci_cp_remote_oob_data_reply cp; bacpy(&cp.bdaddr, &ev->bdaddr); memcpy(cp.hash, data->hash, sizeof(cp.hash)); memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer)); hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp), &cp); } else { struct hci_cp_remote_oob_data_neg_reply cp; bacpy(&cp.bdaddr, &ev->bdaddr); hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp), &cp); } unlock: hci_dev_unlock(hdev); } static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_le_conn_complete *ev = (void *) skb->data; struct hci_conn *conn; BT_DBG("%s status %d", hdev->name, ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr); /* after connection canceled, * use the addr as conn->dst instead of ev->bdaddr (00:00:00:00:00:00) */ if (ev->status == 0x02 && !conn) { conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); bacpy(&ev->bdaddr, &conn->dst); } if (!conn) { conn = hci_conn_add(hdev, LE_LINK, 0, &ev->bdaddr); if (!conn) { BT_ERR("No memory for new connection"); hci_dev_unlock(hdev); return; } conn->dst_type = ev->bdaddr_type; } if (ev->status) { mgmt_connect_failed(hdev, &ev->bdaddr, conn->type, conn->dst_type, ev->status); hci_proto_connect_cfm(conn, ev->status); conn->state = BT_CLOSED; hci_conn_del(conn); goto unlock; } if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) mgmt_device_connected(hdev, &ev->bdaddr, conn->type, conn->dst_type, NULL, 0, 0); conn->sec_level = BT_SECURITY_LOW; conn->handle = __le16_to_cpu(ev->handle); conn->state = BT_CONNECTED; hci_conn_hold_device(conn); hci_conn_add_sysfs(conn); hci_proto_connect_cfm(conn, ev->status); unlock: hci_dev_unlock(hdev); } static inline void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) { u8 num_reports = skb->data[0]; void *ptr = &skb->data[1]; s8 rssi; hci_dev_lock(hdev); while (num_reports--) { struct hci_ev_le_advertising_info *ev = ptr; hci_add_adv_entry(hdev, ev); rssi = ev->data[ev->length]; mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type, NULL, rssi, 0, ev->data, ev->length); ptr += sizeof(*ev) + ev->length + 1; } hci_dev_unlock(hdev); } static inline void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_le_ltk_req *ev = (void *) skb->data; struct hci_cp_le_ltk_reply cp; struct hci_cp_le_ltk_neg_reply neg; struct hci_conn *conn; struct smp_ltk *ltk; BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle)); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (conn == NULL) goto not_found; ltk = hci_find_ltk(hdev, ev->ediv, ev->random); if (ltk == NULL) goto not_found; memcpy(cp.ltk, ltk->val, sizeof(ltk->val)); cp.handle = cpu_to_le16(conn->handle); if (ltk->authenticated) conn->sec_level = BT_SECURITY_HIGH; hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); if (ltk->type & HCI_SMP_STK) { list_del(&ltk->list); kfree(ltk); } hci_dev_unlock(hdev); return; not_found: neg.handle = ev->handle; hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); hci_dev_unlock(hdev); } static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_le_meta *le_ev = (void *) skb->data; skb_pull(skb, sizeof(*le_ev)); switch (le_ev->subevent) { case HCI_EV_LE_CONN_COMPLETE: hci_le_conn_complete_evt(hdev, skb); break; case HCI_EV_LE_ADVERTISING_REPORT: hci_le_adv_report_evt(hdev, skb); break; case HCI_EV_LE_LTK_REQ: hci_le_ltk_request_evt(hdev, skb); break; default: break; } } void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_event_hdr *hdr = (void *) skb->data; __u8 event = hdr->evt; skb_pull(skb, HCI_EVENT_HDR_SIZE); switch (event) { case HCI_EV_INQUIRY_COMPLETE: hci_inquiry_complete_evt(hdev, skb); break; case HCI_EV_INQUIRY_RESULT: hci_inquiry_result_evt(hdev, skb); break; case HCI_EV_CONN_COMPLETE: hci_conn_complete_evt(hdev, skb); break; case HCI_EV_CONN_REQUEST: hci_conn_request_evt(hdev, skb); break; case HCI_EV_DISCONN_COMPLETE: hci_disconn_complete_evt(hdev, skb); break; case HCI_EV_AUTH_COMPLETE: hci_auth_complete_evt(hdev, skb); break; case HCI_EV_REMOTE_NAME: hci_remote_name_evt(hdev, skb); break; case HCI_EV_ENCRYPT_CHANGE: hci_encrypt_change_evt(hdev, skb); break; case HCI_EV_CHANGE_LINK_KEY_COMPLETE: hci_change_link_key_complete_evt(hdev, skb); break; case HCI_EV_REMOTE_FEATURES: hci_remote_features_evt(hdev, skb); break; case HCI_EV_REMOTE_VERSION: hci_remote_version_evt(hdev, skb); break; case HCI_EV_QOS_SETUP_COMPLETE: hci_qos_setup_complete_evt(hdev, skb); break; case HCI_EV_CMD_COMPLETE: hci_cmd_complete_evt(hdev, skb); break; case HCI_EV_CMD_STATUS: hci_cmd_status_evt(hdev, skb); break; case HCI_EV_ROLE_CHANGE: hci_role_change_evt(hdev, skb); break; case HCI_EV_NUM_COMP_PKTS: hci_num_comp_pkts_evt(hdev, skb); break; case HCI_EV_MODE_CHANGE: hci_mode_change_evt(hdev, skb); break; case HCI_EV_PIN_CODE_REQ: hci_pin_code_request_evt(hdev, skb); break; case HCI_EV_LINK_KEY_REQ: hci_link_key_request_evt(hdev, skb); break; case HCI_EV_LINK_KEY_NOTIFY: hci_link_key_notify_evt(hdev, skb); break; case HCI_EV_CLOCK_OFFSET: hci_clock_offset_evt(hdev, skb); break; case HCI_EV_PKT_TYPE_CHANGE: hci_pkt_type_change_evt(hdev, skb); break; case HCI_EV_PSCAN_REP_MODE: hci_pscan_rep_mode_evt(hdev, skb); break; case HCI_EV_INQUIRY_RESULT_WITH_RSSI: hci_inquiry_result_with_rssi_evt(hdev, skb); break; case HCI_EV_REMOTE_EXT_FEATURES: hci_remote_ext_features_evt(hdev, skb); break; case HCI_EV_SYNC_CONN_COMPLETE: hci_sync_conn_complete_evt(hdev, skb); break; case HCI_EV_SYNC_CONN_CHANGED: hci_sync_conn_changed_evt(hdev, skb); break; case HCI_EV_SNIFF_SUBRATE: hci_sniff_subrate_evt(hdev, skb); break; case HCI_EV_EXTENDED_INQUIRY_RESULT: hci_extended_inquiry_result_evt(hdev, skb); break; case HCI_EV_IO_CAPA_REQUEST: hci_io_capa_request_evt(hdev, skb); break; case HCI_EV_IO_CAPA_REPLY: hci_io_capa_reply_evt(hdev, skb); break; case HCI_EV_USER_PASSKEY_NOTIFICATION: hci_user_passkey_notification_evt(hdev, skb); break; case HCI_EV_USER_CONFIRM_REQUEST: hci_user_confirm_request_evt(hdev, skb); break; case HCI_EV_USER_PASSKEY_REQUEST: hci_user_passkey_request_evt(hdev, skb); break; case HCI_EV_SIMPLE_PAIR_COMPLETE: hci_simple_pair_complete_evt(hdev, skb); break; case HCI_EV_REMOTE_HOST_FEATURES: hci_remote_host_features_evt(hdev, skb); break; case HCI_EV_LE_META: hci_le_meta_evt(hdev, skb); break; case HCI_EV_REMOTE_OOB_DATA_REQUEST: hci_remote_oob_data_request_evt(hdev, skb); break; default: BT_DBG("%s event 0x%x", hdev->name, event); break; } kfree_skb(skb); hdev->stat.evt_rx++; } /* Generate internal stack event */ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data) { struct hci_event_hdr *hdr; struct hci_ev_stack_internal *ev; struct sk_buff *skb; skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC); if (!skb) return; hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE); hdr->evt = HCI_EV_STACK_INTERNAL; hdr->plen = sizeof(*ev) + dlen; ev = (void *) skb_put(skb, sizeof(*ev) + dlen); ev->type = type; memcpy(ev->data, data, dlen); bt_cb(skb)->incoming = 1; __net_timestamp(skb); bt_cb(skb)->pkt_type = HCI_EVENT_PKT; skb->dev = (void *) hdev; hci_send_to_sock(hdev, skb, NULL); kfree_skb(skb); }
gpl-2.0
shinkumara/sprout_shinkumara_kernel
fs/btrfs/file-item.c
555
23290
/* * Copyright (C) 2007 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/bio.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/highmem.h> #include "ctree.h" #include "disk-io.h" #include "transaction.h" #include "print-tree.h" #define __MAX_CSUM_ITEMS(r, size) ((((BTRFS_LEAF_DATA_SIZE(r) - \ sizeof(struct btrfs_item) * 2) / \ size) - 1)) #define MAX_CSUM_ITEMS(r, size) (min(__MAX_CSUM_ITEMS(r, size), PAGE_CACHE_SIZE)) #define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \ sizeof(struct btrfs_ordered_sum)) / \ sizeof(struct btrfs_sector_sum) * \ (r)->sectorsize - (r)->sectorsize) int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 objectid, u64 pos, u64 disk_offset, u64 disk_num_bytes, u64 num_bytes, u64 offset, u64 ram_bytes, u8 compression, u8 encryption, u16 other_encoding) { int ret = 0; struct btrfs_file_extent_item *item; struct btrfs_key file_key; struct btrfs_path *path; struct extent_buffer *leaf; path = btrfs_alloc_path(); if (!path) return -ENOMEM; file_key.objectid = objectid; file_key.offset = pos; btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY); path->leave_spinning = 1; ret = btrfs_insert_empty_item(trans, root, path, &file_key, sizeof(*item)); if (ret < 0) goto out; BUG_ON(ret); /* Can't happen */ leaf = path->nodes[0]; item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset); btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes); btrfs_set_file_extent_offset(leaf, item, offset); btrfs_set_file_extent_num_bytes(leaf, item, num_bytes); btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes); btrfs_set_file_extent_generation(leaf, item, trans->transid); btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); btrfs_set_file_extent_compression(leaf, item, compression); btrfs_set_file_extent_encryption(leaf, item, encryption); btrfs_set_file_extent_other_encoding(leaf, item, other_encoding); btrfs_mark_buffer_dirty(leaf); out: btrfs_free_path(path); return ret; } struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 bytenr, int cow) { int ret; struct btrfs_key file_key; struct btrfs_key found_key; struct btrfs_csum_item *item; struct extent_buffer *leaf; u64 csum_offset = 0; u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); int csums_in_item; file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; file_key.offset = bytenr; btrfs_set_key_type(&file_key, BTRFS_EXTENT_CSUM_KEY); ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow); if (ret < 0) goto fail; leaf = path->nodes[0]; if (ret > 0) { ret = 1; if (path->slots[0] == 0) goto fail; path->slots[0]--; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); if (btrfs_key_type(&found_key) != BTRFS_EXTENT_CSUM_KEY) goto fail; csum_offset = (bytenr - found_key.offset) >> root->fs_info->sb->s_blocksize_bits; csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]); csums_in_item /= csum_size; if (csum_offset >= csums_in_item) { ret = -EFBIG; goto fail; } } item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); item = (struct btrfs_csum_item *)((unsigned char *)item + csum_offset * csum_size); return item; fail: if (ret > 0) ret = -ENOENT; return ERR_PTR(ret); } int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 objectid, u64 offset, int mod) { int ret; struct btrfs_key file_key; int ins_len = mod < 0 ? -1 : 0; int cow = mod != 0; file_key.objectid = objectid; file_key.offset = offset; btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY); ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow); return ret; } static int __btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, struct bio *bio, u64 logical_offset, u32 *dst, int dio) { u32 sum; struct bio_vec *bvec = bio->bi_io_vec; int bio_index = 0; u64 offset = 0; u64 item_start_offset = 0; u64 item_last_offset = 0; u64 disk_bytenr; u32 diff; u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); int ret; struct btrfs_path *path; struct btrfs_csum_item *item = NULL; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; path = btrfs_alloc_path(); if (!path) return -ENOMEM; if (bio->bi_size > PAGE_CACHE_SIZE * 8) path->reada = 2; WARN_ON(bio->bi_vcnt <= 0); /* * the free space stuff is only read when it hasn't been * updated in the current transaction. So, we can safely * read from the commit root and sidestep a nasty deadlock * between reading the free space cache and updating the csum tree. */ if (btrfs_is_free_space_inode(root, inode)) { path->search_commit_root = 1; path->skip_locking = 1; } disk_bytenr = (u64)bio->bi_sector << 9; if (dio) offset = logical_offset; while (bio_index < bio->bi_vcnt) { if (!dio) offset = page_offset(bvec->bv_page) + bvec->bv_offset; ret = btrfs_find_ordered_sum(inode, offset, disk_bytenr, &sum); if (ret == 0) goto found; if (!item || disk_bytenr < item_start_offset || disk_bytenr >= item_last_offset) { struct btrfs_key found_key; u32 item_size; if (item) btrfs_release_path(path); item = btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, disk_bytenr, 0); if (IS_ERR(item)) { ret = PTR_ERR(item); if (ret == -ENOENT || ret == -EFBIG) ret = 0; sum = 0; if (BTRFS_I(inode)->root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) { set_extent_bits(io_tree, offset, offset + bvec->bv_len - 1, EXTENT_NODATASUM, GFP_NOFS); } else { printk(KERN_INFO "btrfs no csum found " "for inode %llu start %llu\n", (unsigned long long) btrfs_ino(inode), (unsigned long long)offset); } item = NULL; btrfs_release_path(path); goto found; } btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); item_start_offset = found_key.offset; item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); item_last_offset = item_start_offset + (item_size / csum_size) * root->sectorsize; item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_csum_item); } /* * this byte range must be able to fit inside * a single leaf so it will also fit inside a u32 */ diff = disk_bytenr - item_start_offset; diff = diff / root->sectorsize; diff = diff * csum_size; read_extent_buffer(path->nodes[0], &sum, ((unsigned long)item) + diff, csum_size); found: if (dst) *dst++ = sum; else set_state_private(io_tree, offset, sum); disk_bytenr += bvec->bv_len; offset += bvec->bv_len; bio_index++; bvec++; } btrfs_free_path(path); return 0; } int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, struct bio *bio, u32 *dst) { return __btrfs_lookup_bio_sums(root, inode, bio, 0, dst, 0); } int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, struct bio *bio, u64 offset, u32 *dst) { return __btrfs_lookup_bio_sums(root, inode, bio, offset, dst, 1); } int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, struct list_head *list, int search_commit) { struct btrfs_key key; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_ordered_sum *sums; struct btrfs_sector_sum *sector_sum; struct btrfs_csum_item *item; LIST_HEAD(tmplist); unsigned long offset; int ret; size_t size; u64 csum_end; u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); path = btrfs_alloc_path(); if (!path) return -ENOMEM; if (search_commit) { path->skip_locking = 1; path->reada = 2; path->search_commit_root = 1; } key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; key.offset = start; key.type = BTRFS_EXTENT_CSUM_KEY; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto fail; if (ret > 0 && path->slots[0] > 0) { leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID && key.type == BTRFS_EXTENT_CSUM_KEY) { offset = (start - key.offset) >> root->fs_info->sb->s_blocksize_bits; if (offset * csum_size < btrfs_item_size_nr(leaf, path->slots[0] - 1)) path->slots[0]--; } } while (start <= end) { leaf = path->nodes[0]; if (path->slots[0] >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(root, path); if (ret < 0) goto fail; if (ret > 0) break; leaf = path->nodes[0]; } btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || key.type != BTRFS_EXTENT_CSUM_KEY) break; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (key.offset > end) break; if (key.offset > start) start = key.offset; size = btrfs_item_size_nr(leaf, path->slots[0]); csum_end = key.offset + (size / csum_size) * root->sectorsize; if (csum_end <= start) { path->slots[0]++; continue; } csum_end = min(csum_end, end + 1); item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_csum_item); while (start < csum_end) { size = min_t(size_t, csum_end - start, MAX_ORDERED_SUM_BYTES(root)); sums = kzalloc(btrfs_ordered_sum_size(root, size), GFP_NOFS); if (!sums) { ret = -ENOMEM; goto fail; } sector_sum = sums->sums; sums->bytenr = start; sums->len = size; offset = (start - key.offset) >> root->fs_info->sb->s_blocksize_bits; offset *= csum_size; while (size > 0) { read_extent_buffer(path->nodes[0], &sector_sum->sum, ((unsigned long)item) + offset, csum_size); sector_sum->bytenr = start; size -= root->sectorsize; start += root->sectorsize; offset += csum_size; sector_sum++; } list_add_tail(&sums->list, &tmplist); } path->slots[0]++; } ret = 0; fail: while (ret < 0 && !list_empty(&tmplist)) { sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list); list_del(&sums->list); kfree(sums); } list_splice_tail(&tmplist, list); btrfs_free_path(path); return ret; } int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, struct bio *bio, u64 file_start, int contig) { struct btrfs_ordered_sum *sums; struct btrfs_sector_sum *sector_sum; struct btrfs_ordered_extent *ordered; char *data; struct bio_vec *bvec = bio->bi_io_vec; int bio_index = 0; unsigned long total_bytes = 0; unsigned long this_sum_bytes = 0; u64 offset; u64 disk_bytenr; WARN_ON(bio->bi_vcnt <= 0); sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_size), GFP_NOFS); if (!sums) return -ENOMEM; sector_sum = sums->sums; disk_bytenr = (u64)bio->bi_sector << 9; sums->len = bio->bi_size; INIT_LIST_HEAD(&sums->list); if (contig) offset = file_start; else offset = page_offset(bvec->bv_page) + bvec->bv_offset; ordered = btrfs_lookup_ordered_extent(inode, offset); BUG_ON(!ordered); /* Logic error */ sums->bytenr = ordered->start; while (bio_index < bio->bi_vcnt) { if (!contig) offset = page_offset(bvec->bv_page) + bvec->bv_offset; if (!contig && (offset >= ordered->file_offset + ordered->len || offset < ordered->file_offset)) { unsigned long bytes_left; sums->len = this_sum_bytes; this_sum_bytes = 0; btrfs_add_ordered_sum(inode, ordered, sums); btrfs_put_ordered_extent(ordered); bytes_left = bio->bi_size - total_bytes; sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left), GFP_NOFS); BUG_ON(!sums); /* -ENOMEM */ sector_sum = sums->sums; sums->len = bytes_left; ordered = btrfs_lookup_ordered_extent(inode, offset); BUG_ON(!ordered); /* Logic error */ sums->bytenr = ordered->start; } data = kmap_atomic(bvec->bv_page); sector_sum->sum = ~(u32)0; sector_sum->sum = btrfs_csum_data(root, data + bvec->bv_offset, sector_sum->sum, bvec->bv_len); kunmap_atomic(data); btrfs_csum_final(sector_sum->sum, (char *)&sector_sum->sum); sector_sum->bytenr = disk_bytenr; sector_sum++; bio_index++; total_bytes += bvec->bv_len; this_sum_bytes += bvec->bv_len; disk_bytenr += bvec->bv_len; offset += bvec->bv_len; bvec++; } this_sum_bytes = 0; btrfs_add_ordered_sum(inode, ordered, sums); btrfs_put_ordered_extent(ordered); return 0; } /* * helper function for csum removal, this expects the * key to describe the csum pointed to by the path, and it expects * the csum to overlap the range [bytenr, len] * * The csum should not be entirely contained in the range and the * range should not be entirely contained in the csum. * * This calls btrfs_truncate_item with the correct args based on the * overlap, and fixes up the key as required. */ static noinline void truncate_one_csum(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *key, u64 bytenr, u64 len) { struct extent_buffer *leaf; u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); u64 csum_end; u64 end_byte = bytenr + len; u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits; leaf = path->nodes[0]; csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size; csum_end <<= root->fs_info->sb->s_blocksize_bits; csum_end += key->offset; if (key->offset < bytenr && csum_end <= end_byte) { /* * [ bytenr - len ] * [ ] * [csum ] * A simple truncate off the end of the item */ u32 new_size = (bytenr - key->offset) >> blocksize_bits; new_size *= csum_size; btrfs_truncate_item(trans, root, path, new_size, 1); } else if (key->offset >= bytenr && csum_end > end_byte && end_byte > key->offset) { /* * [ bytenr - len ] * [ ] * [csum ] * we need to truncate from the beginning of the csum */ u32 new_size = (csum_end - end_byte) >> blocksize_bits; new_size *= csum_size; btrfs_truncate_item(trans, root, path, new_size, 0); key->offset = end_byte; btrfs_set_item_key_safe(trans, root, path, key); } else { BUG(); } } /* * deletes the csum items from the csum tree for a given * range of bytes. */ int btrfs_del_csums(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 len) { struct btrfs_path *path; struct btrfs_key key; u64 end_byte = bytenr + len; u64 csum_end; struct extent_buffer *leaf; int ret; u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); int blocksize_bits = root->fs_info->sb->s_blocksize_bits; root = root->fs_info->csum_root; path = btrfs_alloc_path(); if (!path) return -ENOMEM; while (1) { key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; key.offset = end_byte - 1; key.type = BTRFS_EXTENT_CSUM_KEY; path->leave_spinning = 1; ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret > 0) { if (path->slots[0] == 0) break; path->slots[0]--; } else if (ret < 0) { break; } leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || key.type != BTRFS_EXTENT_CSUM_KEY) { break; } if (key.offset >= end_byte) break; csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size; csum_end <<= blocksize_bits; csum_end += key.offset; /* this csum ends before we start, we're done */ if (csum_end <= bytenr) break; /* delete the entire item, it is inside our range */ if (key.offset >= bytenr && csum_end <= end_byte) { ret = btrfs_del_item(trans, root, path); if (ret) goto out; if (key.offset == bytenr) break; } else if (key.offset < bytenr && csum_end > end_byte) { unsigned long offset; unsigned long shift_len; unsigned long item_offset; /* * [ bytenr - len ] * [csum ] * * Our bytes are in the middle of the csum, * we need to split this item and insert a new one. * * But we can't drop the path because the * csum could change, get removed, extended etc. * * The trick here is the max size of a csum item leaves * enough room in the tree block for a single * item header. So, we split the item in place, * adding a new header pointing to the existing * bytes. Then we loop around again and we have * a nicely formed csum item that we can neatly * truncate. */ offset = (bytenr - key.offset) >> blocksize_bits; offset *= csum_size; shift_len = (len >> blocksize_bits) * csum_size; item_offset = btrfs_item_ptr_offset(leaf, path->slots[0]); memset_extent_buffer(leaf, 0, item_offset + offset, shift_len); key.offset = bytenr; /* * btrfs_split_item returns -EAGAIN when the * item changed size or key */ ret = btrfs_split_item(trans, root, path, &key, offset); if (ret && ret != -EAGAIN) { btrfs_abort_transaction(trans, root, ret); goto out; } key.offset = end_byte - 1; } else { truncate_one_csum(trans, root, path, &key, bytenr, len); if (key.offset < bytenr) break; } btrfs_release_path(path); } ret = 0; out: btrfs_free_path(path); return ret; } int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_ordered_sum *sums) { u64 bytenr; int ret; struct btrfs_key file_key; struct btrfs_key found_key; u64 next_offset; u64 total_bytes = 0; int found_next; struct btrfs_path *path; struct btrfs_csum_item *item; struct btrfs_csum_item *item_end; struct extent_buffer *leaf = NULL; u64 csum_offset; struct btrfs_sector_sum *sector_sum; u32 nritems; u32 ins_size; u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); path = btrfs_alloc_path(); if (!path) return -ENOMEM; sector_sum = sums->sums; again: next_offset = (u64)-1; found_next = 0; file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; file_key.offset = sector_sum->bytenr; bytenr = sector_sum->bytenr; btrfs_set_key_type(&file_key, BTRFS_EXTENT_CSUM_KEY); item = btrfs_lookup_csum(trans, root, path, sector_sum->bytenr, 1); if (!IS_ERR(item)) { leaf = path->nodes[0]; ret = 0; goto found; } ret = PTR_ERR(item); if (ret != -EFBIG && ret != -ENOENT) goto fail_unlock; if (ret == -EFBIG) { u32 item_size; /* we found one, but it isn't big enough yet */ leaf = path->nodes[0]; item_size = btrfs_item_size_nr(leaf, path->slots[0]); if ((item_size / csum_size) >= MAX_CSUM_ITEMS(root, csum_size)) { /* already at max size, make a new one */ goto insert; } } else { int slot = path->slots[0] + 1; /* we didn't find a csum item, insert one */ nritems = btrfs_header_nritems(path->nodes[0]); if (path->slots[0] >= nritems - 1) { ret = btrfs_next_leaf(root, path); if (ret == 1) found_next = 1; if (ret != 0) goto insert; slot = 0; } btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || found_key.type != BTRFS_EXTENT_CSUM_KEY) { found_next = 1; goto insert; } next_offset = found_key.offset; found_next = 1; goto insert; } /* * at this point, we know the tree has an item, but it isn't big * enough yet to put our csum in. Grow it */ btrfs_release_path(path); ret = btrfs_search_slot(trans, root, &file_key, path, csum_size, 1); if (ret < 0) goto fail_unlock; if (ret > 0) { if (path->slots[0] == 0) goto insert; path->slots[0]--; } leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); csum_offset = (bytenr - found_key.offset) >> root->fs_info->sb->s_blocksize_bits; if (btrfs_key_type(&found_key) != BTRFS_EXTENT_CSUM_KEY || found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || csum_offset >= MAX_CSUM_ITEMS(root, csum_size)) { goto insert; } if (csum_offset >= btrfs_item_size_nr(leaf, path->slots[0]) / csum_size) { u32 diff = (csum_offset + 1) * csum_size; /* * is the item big enough already? we dropped our lock * before and need to recheck */ if (diff < btrfs_item_size_nr(leaf, path->slots[0])) goto csum; diff = diff - btrfs_item_size_nr(leaf, path->slots[0]); if (diff != csum_size) goto insert; btrfs_extend_item(trans, root, path, diff); goto csum; } insert: btrfs_release_path(path); csum_offset = 0; if (found_next) { u64 tmp = total_bytes + root->sectorsize; u64 next_sector = sector_sum->bytenr; struct btrfs_sector_sum *next = sector_sum + 1; while (tmp < sums->len) { if (next_sector + root->sectorsize != next->bytenr) break; tmp += root->sectorsize; next_sector = next->bytenr; next++; } tmp = min(tmp, next_offset - file_key.offset); tmp >>= root->fs_info->sb->s_blocksize_bits; tmp = max((u64)1, tmp); tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size)); ins_size = csum_size * tmp; } else { ins_size = csum_size; } path->leave_spinning = 1; ret = btrfs_insert_empty_item(trans, root, path, &file_key, ins_size); path->leave_spinning = 0; if (ret < 0) goto fail_unlock; if (ret != 0) { WARN_ON(1); goto fail_unlock; } csum: leaf = path->nodes[0]; item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); ret = 0; item = (struct btrfs_csum_item *)((unsigned char *)item + csum_offset * csum_size); found: item_end = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); item_end = (struct btrfs_csum_item *)((unsigned char *)item_end + btrfs_item_size_nr(leaf, path->slots[0])); next_sector: write_extent_buffer(leaf, &sector_sum->sum, (unsigned long)item, csum_size); total_bytes += root->sectorsize; sector_sum++; if (total_bytes < sums->len) { item = (struct btrfs_csum_item *)((char *)item + csum_size); if (item < item_end && bytenr + PAGE_CACHE_SIZE == sector_sum->bytenr) { bytenr = sector_sum->bytenr; goto next_sector; } } btrfs_mark_buffer_dirty(path->nodes[0]); if (total_bytes < sums->len) { btrfs_release_path(path); cond_resched(); goto again; } out: btrfs_free_path(path); return ret; fail_unlock: goto out; }
gpl-2.0
hazard209/Charge_Kernel
fs/xfs/linux-2.6/xfs_fs_subr.c
811
2382
/* * Copyright (c) 2000-2002,2005-2006 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_vnodeops.h" #include "xfs_bmap_btree.h" #include "xfs_inode.h" #include "xfs_trace.h" int fs_noerr(void) { return 0; } int fs_nosys(void) { return ENOSYS; } void fs_noval(void) { return; } /* * note: all filemap functions return negative error codes. These * need to be inverted before returning to the xfs core functions. */ void xfs_tosspages( xfs_inode_t *ip, xfs_off_t first, xfs_off_t last, int fiopt) { struct address_space *mapping = VFS_I(ip)->i_mapping; if (mapping->nrpages) truncate_inode_pages(mapping, first); } int xfs_flushinval_pages( xfs_inode_t *ip, xfs_off_t first, xfs_off_t last, int fiopt) { struct address_space *mapping = VFS_I(ip)->i_mapping; int ret = 0; trace_xfs_pagecache_inval(ip, first, last); if (mapping->nrpages) { xfs_iflags_clear(ip, XFS_ITRUNCATED); ret = filemap_write_and_wait(mapping); if (!ret) truncate_inode_pages(mapping, first); } return -ret; } int xfs_flush_pages( xfs_inode_t *ip, xfs_off_t first, xfs_off_t last, uint64_t flags, int fiopt) { struct address_space *mapping = VFS_I(ip)->i_mapping; int ret = 0; int ret2; if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { xfs_iflags_clear(ip, XFS_ITRUNCATED); ret = -filemap_fdatawrite(mapping); } if (flags & XBF_ASYNC) return ret; ret2 = xfs_wait_on_pages(ip, first, last); if (!ret) ret = ret2; return ret; } int xfs_wait_on_pages( xfs_inode_t *ip, xfs_off_t first, xfs_off_t last) { struct address_space *mapping = VFS_I(ip)->i_mapping; if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) return -filemap_fdatawait(mapping); return 0; }
gpl-2.0
senlan008/linux-1
drivers/input/misc/88pm860x_onkey.c
1579
4143
/* * 88pm860x_onkey.c - Marvell 88PM860x ONKEY driver * * Copyright (C) 2009-2010 Marvell International Ltd. * Haojian Zhuang <haojian.zhuang@marvell.com> * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of this * archive for more details. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/mfd/88pm860x.h> #include <linux/slab.h> #include <linux/device.h> #define PM8607_WAKEUP 0x0b #define LONG_ONKEY_EN (1 << 1) #define ONKEY_STATUS (1 << 0) struct pm860x_onkey_info { struct input_dev *idev; struct pm860x_chip *chip; struct i2c_client *i2c; struct device *dev; int irq; }; /* 88PM860x gives us an interrupt when ONKEY is held */ static irqreturn_t pm860x_onkey_handler(int irq, void *data) { struct pm860x_onkey_info *info = data; int ret; ret = pm860x_reg_read(info->i2c, PM8607_STATUS_2); ret &= ONKEY_STATUS; input_report_key(info->idev, KEY_POWER, ret); input_sync(info->idev); /* Enable 8-second long onkey detection */ pm860x_set_bits(info->i2c, PM8607_WAKEUP, 3, LONG_ONKEY_EN); return IRQ_HANDLED; } static int pm860x_onkey_probe(struct platform_device *pdev) { struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent); struct pm860x_onkey_info *info; int irq, ret; irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "No IRQ resource!\n"); return -EINVAL; } info = devm_kzalloc(&pdev->dev, sizeof(struct pm860x_onkey_info), GFP_KERNEL); if (!info) return -ENOMEM; info->chip = chip; info->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion; info->dev = &pdev->dev; info->irq = irq; info->idev = devm_input_allocate_device(&pdev->dev); if (!info->idev) { dev_err(chip->dev, "Failed to allocate input dev\n"); return -ENOMEM; } info->idev->name = "88pm860x_on"; info->idev->phys = "88pm860x_on/input0"; info->idev->id.bustype = BUS_I2C; info->idev->dev.parent = &pdev->dev; info->idev->evbit[0] = BIT_MASK(EV_KEY); info->idev->keybit[BIT_WORD(KEY_POWER)] = BIT_MASK(KEY_POWER); ret = input_register_device(info->idev); if (ret) { dev_err(chip->dev, "Can't register input device: %d\n", ret); return ret; } ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL, pm860x_onkey_handler, IRQF_ONESHOT, "onkey", info); if (ret < 0) { dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n", info->irq, ret); return ret; } platform_set_drvdata(pdev, info); device_init_wakeup(&pdev->dev, 1); return 0; } static int __maybe_unused pm860x_onkey_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent); if (device_may_wakeup(dev)) chip->wakeup_flag |= 1 << PM8607_IRQ_ONKEY; return 0; } static int __maybe_unused pm860x_onkey_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent); if (device_may_wakeup(dev)) chip->wakeup_flag &= ~(1 << PM8607_IRQ_ONKEY); return 0; } static SIMPLE_DEV_PM_OPS(pm860x_onkey_pm_ops, pm860x_onkey_suspend, pm860x_onkey_resume); static struct platform_driver pm860x_onkey_driver = { .driver = { .name = "88pm860x-onkey", .pm = &pm860x_onkey_pm_ops, }, .probe = pm860x_onkey_probe, }; module_platform_driver(pm860x_onkey_driver); MODULE_DESCRIPTION("Marvell 88PM860x ONKEY driver"); MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>"); MODULE_LICENSE("GPL");
gpl-2.0
bgat/linux-multi-v7
arch/tile/kernel/vdso.c
1835
4902
/* * Copyright 2012 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/binfmts.h> #include <linux/compat.h> #include <linux/elf.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <asm/vdso.h> #include <asm/mman.h> #include <asm/sections.h> #include <arch/sim.h> /* The alignment of the vDSO. */ #define VDSO_ALIGNMENT PAGE_SIZE static unsigned int vdso_pages; static struct page **vdso_pagelist; #ifdef CONFIG_COMPAT static unsigned int vdso32_pages; static struct page **vdso32_pagelist; #endif static int vdso_ready; /* * The vdso data page. */ static union { struct vdso_data data; u8 page[PAGE_SIZE]; } vdso_data_store __page_aligned_data; struct vdso_data *vdso_data = &vdso_data_store.data; static unsigned int __read_mostly vdso_enabled = 1; static struct page **vdso_setup(void *vdso_kbase, unsigned int pages) { int i; struct page **pagelist; pagelist = kzalloc(sizeof(struct page *) * (pages + 1), GFP_KERNEL); BUG_ON(pagelist == NULL); for (i = 0; i < pages - 1; i++) { struct page *pg = virt_to_page(vdso_kbase + i*PAGE_SIZE); ClearPageReserved(pg); pagelist[i] = pg; } pagelist[pages - 1] = virt_to_page(vdso_data); pagelist[pages] = NULL; return pagelist; } static int __init vdso_init(void) { int data_pages = sizeof(vdso_data_store) >> PAGE_SHIFT; /* * We can disable vDSO support generally, but we need to retain * one page to support the two-bundle (16-byte) rt_sigreturn path. */ if (!vdso_enabled) { size_t offset = (unsigned long)&__vdso_rt_sigreturn; static struct page *sigret_page; sigret_page = alloc_page(GFP_KERNEL | __GFP_ZERO); BUG_ON(sigret_page == NULL); vdso_pagelist = &sigret_page; vdso_pages = 1; BUG_ON(offset >= PAGE_SIZE); memcpy(page_address(sigret_page) + offset, vdso_start + offset, 16); #ifdef CONFIG_COMPAT vdso32_pages = vdso_pages; vdso32_pagelist = vdso_pagelist; #endif vdso_ready = 1; return 0; } vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT; vdso_pages += data_pages; vdso_pagelist = vdso_setup(vdso_start, vdso_pages); #ifdef CONFIG_COMPAT vdso32_pages = (vdso32_end - vdso32_start) >> PAGE_SHIFT; vdso32_pages += data_pages; vdso32_pagelist = vdso_setup(vdso32_start, vdso32_pages); #endif smp_wmb(); vdso_ready = 1; return 0; } arch_initcall(vdso_init); const char *arch_vma_name(struct vm_area_struct *vma) { if (vma->vm_mm && vma->vm_start == VDSO_BASE) return "[vdso]"; #ifndef __tilegx__ if (vma->vm_start == MEM_USER_INTRPT) return "[intrpt]"; #endif return NULL; } int setup_vdso_pages(void) { struct page **pagelist; unsigned long pages; struct mm_struct *mm = current->mm; unsigned long vdso_base = 0; int retval = 0; if (!vdso_ready) return 0; mm->context.vdso_base = 0; pagelist = vdso_pagelist; pages = vdso_pages; #ifdef CONFIG_COMPAT if (is_compat_task()) { pagelist = vdso32_pagelist; pages = vdso32_pages; } #endif /* * vDSO has a problem and was disabled, just don't "enable" it for the * process. */ if (pages == 0) return 0; vdso_base = get_unmapped_area(NULL, vdso_base, (pages << PAGE_SHIFT) + ((VDSO_ALIGNMENT - 1) & PAGE_MASK), 0, 0); if (IS_ERR_VALUE(vdso_base)) { retval = vdso_base; return retval; } /* Add required alignment. */ vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT); /* * Put vDSO base into mm struct. We need to do this before calling * install_special_mapping or the perf counter mmap tracking code * will fail to recognise it as a vDSO (since arch_vma_name fails). */ mm->context.vdso_base = vdso_base; /* * our vma flags don't have VM_WRITE so by default, the process isn't * allowed to write those pages. * gdb can break that with ptrace interface, and thus trigger COW on * those pages but it's then your responsibility to never do that on * the "data" page of the vDSO or you'll stop getting kernel updates * and your nice userland gettimeofday will be totally dead. * It's fine to use that for setting breakpoints in the vDSO code * pages though */ retval = install_special_mapping(mm, vdso_base, pages << PAGE_SHIFT, VM_READ|VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, pagelist); if (retval) mm->context.vdso_base = 0; return retval; } static __init int vdso_func(char *s) { return kstrtouint(s, 0, &vdso_enabled); } __setup("vdso=", vdso_func);
gpl-2.0
FEDEVEL/imx6rex-linux-3.10.17
net/mac802154/rx.c
2859
3050
/* * Copyright (C) 2007-2012 Siemens AG * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * Written by: * Pavel Smolenskiy <pavel.smolenskiy@gmail.com> * Maxim Gorbachyov <maxim.gorbachev@siemens.com> * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/workqueue.h> #include <linux/netdevice.h> #include <linux/crc-ccitt.h> #include <net/mac802154.h> #include <net/ieee802154_netdev.h> #include "mac802154.h" /* The IEEE 802.15.4 standard defines 4 MAC packet types: * - beacon frame * - MAC command frame * - acknowledgement frame * - data frame * * and only the data frame should be pushed to the upper layers, other types * are just internal MAC layer management information. So only data packets * are going to be sent to the networking queue, all other will be processed * right here by using the device workqueue. */ struct rx_work { struct sk_buff *skb; struct work_struct work; struct ieee802154_dev *dev; u8 lqi; }; static void mac802154_subif_rx(struct ieee802154_dev *hw, struct sk_buff *skb, u8 lqi) { struct mac802154_priv *priv = mac802154_to_priv(hw); mac_cb(skb)->lqi = lqi; skb->protocol = htons(ETH_P_IEEE802154); skb_reset_mac_header(skb); BUILD_BUG_ON(sizeof(struct ieee802154_mac_cb) > sizeof(skb->cb)); if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) { u16 crc; if (skb->len < 2) { pr_debug("got invalid frame\n"); goto out; } crc = crc_ccitt(0, skb->data, skb->len); if (crc) { pr_debug("CRC mismatch\n"); goto out; } skb_trim(skb, skb->len - 2); /* CRC */ } mac802154_monitors_rx(priv, skb); mac802154_wpans_rx(priv, skb); out: dev_kfree_skb(skb); return; } static void mac802154_rx_worker(struct work_struct *work) { struct rx_work *rw = container_of(work, struct rx_work, work); struct sk_buff *skb = rw->skb; mac802154_subif_rx(rw->dev, skb, rw->lqi); kfree(rw); } void ieee802154_rx_irqsafe(struct ieee802154_dev *dev, struct sk_buff *skb, u8 lqi) { struct mac802154_priv *priv = mac802154_to_priv(dev); struct rx_work *work; if (!skb) return; work = kzalloc(sizeof(struct rx_work), GFP_ATOMIC); if (!work) return; INIT_WORK(&work->work, mac802154_rx_worker); work->skb = skb; work->dev = dev; work->lqi = lqi; queue_work(priv->dev_workqueue, &work->work); } EXPORT_SYMBOL(ieee802154_rx_irqsafe);
gpl-2.0
netmodule/kernel-zx3
drivers/media/dvb-frontends/stv6110.c
3115
11036
/* * stv6110.c * * Driver for ST STV6110 satellite tuner IC. * * Copyright (C) 2009 NetUP Inc. * Copyright (C) 2009 Igor M. Liplianin <liplianin@netup.ru> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/slab.h> #include <linux/module.h> #include <linux/dvb/frontend.h> #include <linux/types.h> #include "stv6110.h" /* Max transfer size done by I2C transfer functions */ #define MAX_XFER_SIZE 64 static int debug; struct stv6110_priv { int i2c_address; struct i2c_adapter *i2c; u32 mclk; u8 clk_div; u8 gain; u8 regs[8]; }; #define dprintk(args...) \ do { \ if (debug) \ printk(KERN_DEBUG args); \ } while (0) static s32 abssub(s32 a, s32 b) { if (a > b) return a - b; else return b - a; }; static int stv6110_release(struct dvb_frontend *fe) { kfree(fe->tuner_priv); fe->tuner_priv = NULL; return 0; } static int stv6110_write_regs(struct dvb_frontend *fe, u8 buf[], int start, int len) { struct stv6110_priv *priv = fe->tuner_priv; int rc; u8 cmdbuf[MAX_XFER_SIZE]; struct i2c_msg msg = { .addr = priv->i2c_address, .flags = 0, .buf = cmdbuf, .len = len + 1 }; dprintk("%s\n", __func__); if (1 + len > sizeof(cmdbuf)) { printk(KERN_WARNING "%s: i2c wr: len=%d is too big!\n", KBUILD_MODNAME, len); return -EINVAL; } if (start + len > 8) return -EINVAL; memcpy(&cmdbuf[1], buf, len); cmdbuf[0] = start; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); rc = i2c_transfer(priv->i2c, &msg, 1); if (rc != 1) dprintk("%s: i2c error\n", __func__); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); return 0; } static int stv6110_read_regs(struct dvb_frontend *fe, u8 regs[], int start, int len) { struct stv6110_priv *priv = fe->tuner_priv; int rc; u8 reg[] = { start }; struct i2c_msg msg[] = { { .addr = priv->i2c_address, .flags = 0, .buf = reg, .len = 1, }, { .addr = priv->i2c_address, .flags = I2C_M_RD, .buf = regs, .len = len, }, }; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); rc = i2c_transfer(priv->i2c, msg, 2); if (rc != 2) dprintk("%s: i2c error\n", __func__); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); memcpy(&priv->regs[start], regs, len); return 0; } static int stv6110_read_reg(struct dvb_frontend *fe, int start) { u8 buf[] = { 0 }; stv6110_read_regs(fe, buf, start, 1); return buf[0]; } static int stv6110_sleep(struct dvb_frontend *fe) { u8 reg[] = { 0 }; stv6110_write_regs(fe, reg, 0, 1); return 0; } static u32 carrier_width(u32 symbol_rate, fe_rolloff_t rolloff) { u32 rlf; switch (rolloff) { case ROLLOFF_20: rlf = 20; break; case ROLLOFF_25: rlf = 25; break; default: rlf = 35; break; } return symbol_rate + ((symbol_rate * rlf) / 100); } static int stv6110_set_bandwidth(struct dvb_frontend *fe, u32 bandwidth) { struct stv6110_priv *priv = fe->tuner_priv; u8 r8, ret = 0x04; int i; if ((bandwidth / 2) > 36000000) /*BW/2 max=31+5=36 mhz for r8=31*/ r8 = 31; else if ((bandwidth / 2) < 5000000) /* BW/2 min=5Mhz for F=0 */ r8 = 0; else /*if 5 < BW/2 < 36*/ r8 = (bandwidth / 2) / 1000000 - 5; /* ctrl3, RCCLKOFF = 0 Activate the calibration Clock */ /* ctrl3, CF = r8 Set the LPF value */ priv->regs[RSTV6110_CTRL3] &= ~((1 << 6) | 0x1f); priv->regs[RSTV6110_CTRL3] |= (r8 & 0x1f); stv6110_write_regs(fe, &priv->regs[RSTV6110_CTRL3], RSTV6110_CTRL3, 1); /* stat1, CALRCSTRT = 1 Start LPF auto calibration*/ priv->regs[RSTV6110_STAT1] |= 0x02; stv6110_write_regs(fe, &priv->regs[RSTV6110_STAT1], RSTV6110_STAT1, 1); i = 0; /* Wait for CALRCSTRT == 0 */ while ((i < 10) && (ret != 0)) { ret = ((stv6110_read_reg(fe, RSTV6110_STAT1)) & 0x02); mdelay(1); /* wait for LPF auto calibration */ i++; } /* RCCLKOFF = 1 calibration done, desactivate the calibration Clock */ priv->regs[RSTV6110_CTRL3] |= (1 << 6); stv6110_write_regs(fe, &priv->regs[RSTV6110_CTRL3], RSTV6110_CTRL3, 1); return 0; } static int stv6110_init(struct dvb_frontend *fe) { struct stv6110_priv *priv = fe->tuner_priv; u8 buf0[] = { 0x07, 0x11, 0xdc, 0x85, 0x17, 0x01, 0xe6, 0x1e }; memcpy(priv->regs, buf0, 8); /* K = (Reference / 1000000) - 16 */ priv->regs[RSTV6110_CTRL1] &= ~(0x1f << 3); priv->regs[RSTV6110_CTRL1] |= ((((priv->mclk / 1000000) - 16) & 0x1f) << 3); /* divisor value for the output clock */ priv->regs[RSTV6110_CTRL2] &= ~0xc0; priv->regs[RSTV6110_CTRL2] |= (priv->clk_div << 6); stv6110_write_regs(fe, &priv->regs[RSTV6110_CTRL1], RSTV6110_CTRL1, 8); msleep(1); stv6110_set_bandwidth(fe, 72000000); return 0; } static int stv6110_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct stv6110_priv *priv = fe->tuner_priv; u32 nbsteps, divider, psd2, freq; u8 regs[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; stv6110_read_regs(fe, regs, 0, 8); /*N*/ divider = (priv->regs[RSTV6110_TUNING2] & 0x0f) << 8; divider += priv->regs[RSTV6110_TUNING1]; /*R*/ nbsteps = (priv->regs[RSTV6110_TUNING2] >> 6) & 3; /*p*/ psd2 = (priv->regs[RSTV6110_TUNING2] >> 4) & 1; freq = divider * (priv->mclk / 1000); freq /= (1 << (nbsteps + psd2)); freq /= 4; *frequency = freq; return 0; } static int stv6110_set_frequency(struct dvb_frontend *fe, u32 frequency) { struct stv6110_priv *priv = fe->tuner_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; u8 ret = 0x04; u32 divider, ref, p, presc, i, result_freq, vco_freq; s32 p_calc, p_calc_opt = 1000, r_div, r_div_opt = 0, p_val; s32 srate; dprintk("%s, freq=%d kHz, mclk=%d Hz\n", __func__, frequency, priv->mclk); /* K = (Reference / 1000000) - 16 */ priv->regs[RSTV6110_CTRL1] &= ~(0x1f << 3); priv->regs[RSTV6110_CTRL1] |= ((((priv->mclk / 1000000) - 16) & 0x1f) << 3); /* BB_GAIN = db/2 */ if (fe->ops.set_property && fe->ops.get_property) { srate = c->symbol_rate; dprintk("%s: Get Frontend parameters: srate=%d\n", __func__, srate); } else srate = 15000000; priv->regs[RSTV6110_CTRL2] &= ~0x0f; priv->regs[RSTV6110_CTRL2] |= (priv->gain & 0x0f); if (frequency <= 1023000) { p = 1; presc = 0; } else if (frequency <= 1300000) { p = 1; presc = 1; } else if (frequency <= 2046000) { p = 0; presc = 0; } else { p = 0; presc = 1; } /* DIV4SEL = p*/ priv->regs[RSTV6110_TUNING2] &= ~(1 << 4); priv->regs[RSTV6110_TUNING2] |= (p << 4); /* PRESC32ON = presc */ priv->regs[RSTV6110_TUNING2] &= ~(1 << 5); priv->regs[RSTV6110_TUNING2] |= (presc << 5); p_val = (int)(1 << (p + 1)) * 10;/* P = 2 or P = 4 */ for (r_div = 0; r_div <= 3; r_div++) { p_calc = (priv->mclk / 100000); p_calc /= (1 << (r_div + 1)); if ((abssub(p_calc, p_val)) < (abssub(p_calc_opt, p_val))) r_div_opt = r_div; p_calc_opt = (priv->mclk / 100000); p_calc_opt /= (1 << (r_div_opt + 1)); } ref = priv->mclk / ((1 << (r_div_opt + 1)) * (1 << (p + 1))); divider = (((frequency * 1000) + (ref >> 1)) / ref); /* RDIV = r_div_opt */ priv->regs[RSTV6110_TUNING2] &= ~(3 << 6); priv->regs[RSTV6110_TUNING2] |= (((r_div_opt) & 3) << 6); /* NDIV_MSB = MSB(divider) */ priv->regs[RSTV6110_TUNING2] &= ~0x0f; priv->regs[RSTV6110_TUNING2] |= (((divider) >> 8) & 0x0f); /* NDIV_LSB, LSB(divider) */ priv->regs[RSTV6110_TUNING1] = (divider & 0xff); /* CALVCOSTRT = 1 VCO Auto Calibration */ priv->regs[RSTV6110_STAT1] |= 0x04; stv6110_write_regs(fe, &priv->regs[RSTV6110_CTRL1], RSTV6110_CTRL1, 8); i = 0; /* Wait for CALVCOSTRT == 0 */ while ((i < 10) && (ret != 0)) { ret = ((stv6110_read_reg(fe, RSTV6110_STAT1)) & 0x04); msleep(1); /* wait for VCO auto calibration */ i++; } ret = stv6110_read_reg(fe, RSTV6110_STAT1); stv6110_get_frequency(fe, &result_freq); vco_freq = divider * ((priv->mclk / 1000) / ((1 << (r_div_opt + 1)))); dprintk("%s, stat1=%x, lo_freq=%d kHz, vco_frec=%d kHz\n", __func__, ret, result_freq, vco_freq); return 0; } static int stv6110_set_params(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; u32 bandwidth = carrier_width(c->symbol_rate, c->rolloff); stv6110_set_frequency(fe, c->frequency); stv6110_set_bandwidth(fe, bandwidth); return 0; } static int stv6110_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth) { struct stv6110_priv *priv = fe->tuner_priv; u8 r8 = 0; u8 regs[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; stv6110_read_regs(fe, regs, 0, 8); /* CF */ r8 = priv->regs[RSTV6110_CTRL3] & 0x1f; *bandwidth = (r8 + 5) * 2000000;/* x2 for ZIF tuner BW/2 = F+5 Mhz */ return 0; } static struct dvb_tuner_ops stv6110_tuner_ops = { .info = { .name = "ST STV6110", .frequency_min = 950000, .frequency_max = 2150000, .frequency_step = 1000, }, .init = stv6110_init, .release = stv6110_release, .sleep = stv6110_sleep, .set_params = stv6110_set_params, .get_frequency = stv6110_get_frequency, .set_frequency = stv6110_set_frequency, .get_bandwidth = stv6110_get_bandwidth, .set_bandwidth = stv6110_set_bandwidth, }; struct dvb_frontend *stv6110_attach(struct dvb_frontend *fe, const struct stv6110_config *config, struct i2c_adapter *i2c) { struct stv6110_priv *priv = NULL; u8 reg0[] = { 0x00, 0x07, 0x11, 0xdc, 0x85, 0x17, 0x01, 0xe6, 0x1e }; struct i2c_msg msg[] = { { .addr = config->i2c_address, .flags = 0, .buf = reg0, .len = 9 } }; int ret; /* divisor value for the output clock */ reg0[2] &= ~0xc0; reg0[2] |= (config->clk_div << 6); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); ret = i2c_transfer(i2c, msg, 1); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); if (ret != 1) return NULL; priv = kzalloc(sizeof(struct stv6110_priv), GFP_KERNEL); if (priv == NULL) return NULL; priv->i2c_address = config->i2c_address; priv->i2c = i2c; priv->mclk = config->mclk; priv->clk_div = config->clk_div; priv->gain = config->gain; memcpy(&priv->regs, &reg0[1], 8); memcpy(&fe->ops.tuner_ops, &stv6110_tuner_ops, sizeof(struct dvb_tuner_ops)); fe->tuner_priv = priv; printk(KERN_INFO "STV6110 attached on addr=%x!\n", priv->i2c_address); return fe; } EXPORT_SYMBOL(stv6110_attach); module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); MODULE_DESCRIPTION("ST STV6110 driver"); MODULE_AUTHOR("Igor M. Liplianin"); MODULE_LICENSE("GPL");
gpl-2.0
ChristopherKing42/Spoon-Knife
net/atm/proc.c
4395
11747
/* net/atm/proc.c - ATM /proc interface * * Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA * * seq_file api usage by romieu@fr.zoreil.com * * Evaluating the efficiency of the whole thing if left as an exercise to * the reader. */ #include <linux/module.h> /* for EXPORT_SYMBOL */ #include <linux/string.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/stat.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/errno.h> #include <linux/atm.h> #include <linux/atmdev.h> #include <linux/netdevice.h> #include <linux/atmclip.h> #include <linux/init.h> /* for __init */ #include <linux/slab.h> #include <net/net_namespace.h> #include <net/atmclip.h> #include <linux/uaccess.h> #include <linux/param.h> /* for HZ */ #include <linux/atomic.h> #include "resources.h" #include "common.h" /* atm_proc_init prototype */ #include "signaling.h" /* to get sigd - ugly too */ static ssize_t proc_dev_atm_read(struct file *file, char __user *buf, size_t count, loff_t *pos); static const struct file_operations proc_atm_dev_ops = { .owner = THIS_MODULE, .read = proc_dev_atm_read, .llseek = noop_llseek, }; static void add_stats(struct seq_file *seq, const char *aal, const struct k_atm_aal_stats *stats) { seq_printf(seq, "%s ( %d %d %d %d %d )", aal, atomic_read(&stats->tx), atomic_read(&stats->tx_err), atomic_read(&stats->rx), atomic_read(&stats->rx_err), atomic_read(&stats->rx_drop)); } static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev) { int i; seq_printf(seq, "%3d %-8s", dev->number, dev->type); for (i = 0; i < ESI_LEN; i++) seq_printf(seq, "%02x", dev->esi[i]); seq_puts(seq, " "); add_stats(seq, "0", &dev->stats.aal0); seq_puts(seq, " "); add_stats(seq, "5", &dev->stats.aal5); seq_printf(seq, "\t[%d]", atomic_read(&dev->refcnt)); seq_putc(seq, '\n'); } struct vcc_state { int bucket; struct sock *sk; int family; }; static inline int compare_family(struct sock *sk, int family) { return !family || (sk->sk_family == family); } static int __vcc_walk(struct sock **sock, int family, int *bucket, loff_t l) { struct sock *sk = *sock; if (sk == SEQ_START_TOKEN) { for (*bucket = 0; *bucket < VCC_HTABLE_SIZE; ++*bucket) { struct hlist_head *head = &vcc_hash[*bucket]; sk = hlist_empty(head) ? NULL : __sk_head(head); if (sk) break; } l--; } try_again: for (; sk; sk = sk_next(sk)) { l -= compare_family(sk, family); if (l < 0) goto out; } if (!sk && ++*bucket < VCC_HTABLE_SIZE) { sk = sk_head(&vcc_hash[*bucket]); goto try_again; } sk = SEQ_START_TOKEN; out: *sock = sk; return (l < 0); } static inline void *vcc_walk(struct vcc_state *state, loff_t l) { return __vcc_walk(&state->sk, state->family, &state->bucket, l) ? state : NULL; } static int __vcc_seq_open(struct inode *inode, struct file *file, int family, const struct seq_operations *ops) { struct vcc_state *state; state = __seq_open_private(file, ops, sizeof(*state)); if (state == NULL) return -ENOMEM; state->family = family; return 0; } static void *vcc_seq_start(struct seq_file *seq, loff_t *pos) __acquires(vcc_sklist_lock) { struct vcc_state *state = seq->private; loff_t left = *pos; read_lock(&vcc_sklist_lock); state->sk = SEQ_START_TOKEN; return left ? vcc_walk(state, left) : SEQ_START_TOKEN; } static void vcc_seq_stop(struct seq_file *seq, void *v) __releases(vcc_sklist_lock) { read_unlock(&vcc_sklist_lock); } static void *vcc_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct vcc_state *state = seq->private; v = vcc_walk(state, 1); *pos += !!PTR_ERR(v); return v; } static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc) { static const char *const class_name[] = { "off", "UBR", "CBR", "VBR", "ABR"}; static const char *const aal_name[] = { "---", "1", "2", "3/4", /* 0- 3 */ "???", "5", "???", "???", /* 4- 7 */ "???", "???", "???", "???", /* 8-11 */ "???", "0", "???", "???"}; /* 12-15 */ seq_printf(seq, "%3d %3d %5d %-3s %7d %-5s %7d %-6s", vcc->dev->number, vcc->vpi, vcc->vci, vcc->qos.aal >= ARRAY_SIZE(aal_name) ? "err" : aal_name[vcc->qos.aal], vcc->qos.rxtp.min_pcr, class_name[vcc->qos.rxtp.traffic_class], vcc->qos.txtp.min_pcr, class_name[vcc->qos.txtp.traffic_class]); if (test_bit(ATM_VF_IS_CLIP, &vcc->flags)) { struct clip_vcc *clip_vcc = CLIP_VCC(vcc); struct net_device *dev; dev = clip_vcc->entry ? clip_vcc->entry->neigh->dev : NULL; seq_printf(seq, "CLIP, Itf:%s, Encap:", dev ? dev->name : "none?"); seq_printf(seq, "%s", clip_vcc->encap ? "LLC/SNAP" : "None"); } seq_putc(seq, '\n'); } static const char *vcc_state(struct atm_vcc *vcc) { static const char *const map[] = { ATM_VS2TXT_MAP }; return map[ATM_VF2VS(vcc->flags)]; } static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc) { struct sock *sk = sk_atm(vcc); seq_printf(seq, "%pK ", vcc); if (!vcc->dev) seq_printf(seq, "Unassigned "); else seq_printf(seq, "%3d %3d %5d ", vcc->dev->number, vcc->vpi, vcc->vci); switch (sk->sk_family) { case AF_ATMPVC: seq_printf(seq, "PVC"); break; case AF_ATMSVC: seq_printf(seq, "SVC"); break; default: seq_printf(seq, "%3d", sk->sk_family); } seq_printf(seq, " %04lx %5d %7d/%7d %7d/%7d [%d]\n", vcc->flags, sk->sk_err, sk_wmem_alloc_get(sk), sk->sk_sndbuf, sk_rmem_alloc_get(sk), sk->sk_rcvbuf, atomic_read(&sk->sk_refcnt)); } static void svc_info(struct seq_file *seq, struct atm_vcc *vcc) { if (!vcc->dev) seq_printf(seq, sizeof(void *) == 4 ? "N/A@%pK%10s" : "N/A@%pK%2s", vcc, ""); else seq_printf(seq, "%3d %3d %5d ", vcc->dev->number, vcc->vpi, vcc->vci); seq_printf(seq, "%-10s ", vcc_state(vcc)); seq_printf(seq, "%s%s", vcc->remote.sas_addr.pub, *vcc->remote.sas_addr.pub && *vcc->remote.sas_addr.prv ? "+" : ""); if (*vcc->remote.sas_addr.prv) { int i; for (i = 0; i < ATM_ESA_LEN; i++) seq_printf(seq, "%02x", vcc->remote.sas_addr.prv[i]); } seq_putc(seq, '\n'); } static int atm_dev_seq_show(struct seq_file *seq, void *v) { static char atm_dev_banner[] = "Itf Type ESI/\"MAC\"addr " "AAL(TX,err,RX,err,drop) ... [refcnt]\n"; if (v == &atm_devs) seq_puts(seq, atm_dev_banner); else { struct atm_dev *dev = list_entry(v, struct atm_dev, dev_list); atm_dev_info(seq, dev); } return 0; } static const struct seq_operations atm_dev_seq_ops = { .start = atm_dev_seq_start, .next = atm_dev_seq_next, .stop = atm_dev_seq_stop, .show = atm_dev_seq_show, }; static int atm_dev_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &atm_dev_seq_ops); } static const struct file_operations devices_seq_fops = { .open = atm_dev_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int pvc_seq_show(struct seq_file *seq, void *v) { static char atm_pvc_banner[] = "Itf VPI VCI AAL RX(PCR,Class) TX(PCR,Class)\n"; if (v == SEQ_START_TOKEN) seq_puts(seq, atm_pvc_banner); else { struct vcc_state *state = seq->private; struct atm_vcc *vcc = atm_sk(state->sk); pvc_info(seq, vcc); } return 0; } static const struct seq_operations pvc_seq_ops = { .start = vcc_seq_start, .next = vcc_seq_next, .stop = vcc_seq_stop, .show = pvc_seq_show, }; static int pvc_seq_open(struct inode *inode, struct file *file) { return __vcc_seq_open(inode, file, PF_ATMPVC, &pvc_seq_ops); } static const struct file_operations pvc_seq_fops = { .open = pvc_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; static int vcc_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) { seq_printf(seq, sizeof(void *) == 4 ? "%-8s%s" : "%-16s%s", "Address ", "Itf VPI VCI Fam Flags Reply " "Send buffer Recv buffer [refcnt]\n"); } else { struct vcc_state *state = seq->private; struct atm_vcc *vcc = atm_sk(state->sk); vcc_info(seq, vcc); } return 0; } static const struct seq_operations vcc_seq_ops = { .start = vcc_seq_start, .next = vcc_seq_next, .stop = vcc_seq_stop, .show = vcc_seq_show, }; static int vcc_seq_open(struct inode *inode, struct file *file) { return __vcc_seq_open(inode, file, 0, &vcc_seq_ops); } static const struct file_operations vcc_seq_fops = { .open = vcc_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; static int svc_seq_show(struct seq_file *seq, void *v) { static const char atm_svc_banner[] = "Itf VPI VCI State Remote\n"; if (v == SEQ_START_TOKEN) seq_puts(seq, atm_svc_banner); else { struct vcc_state *state = seq->private; struct atm_vcc *vcc = atm_sk(state->sk); svc_info(seq, vcc); } return 0; } static const struct seq_operations svc_seq_ops = { .start = vcc_seq_start, .next = vcc_seq_next, .stop = vcc_seq_stop, .show = svc_seq_show, }; static int svc_seq_open(struct inode *inode, struct file *file) { return __vcc_seq_open(inode, file, PF_ATMSVC, &svc_seq_ops); } static const struct file_operations svc_seq_fops = { .open = svc_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; static ssize_t proc_dev_atm_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct atm_dev *dev; unsigned long page; int length; if (count == 0) return 0; page = get_zeroed_page(GFP_KERNEL); if (!page) return -ENOMEM; dev = PDE_DATA(file_inode(file)); if (!dev->ops->proc_read) length = -EINVAL; else { length = dev->ops->proc_read(dev, pos, (char *)page); if (length > count) length = -EINVAL; } if (length >= 0) { if (copy_to_user(buf, (char *)page, length)) length = -EFAULT; (*pos)++; } free_page(page); return length; } struct proc_dir_entry *atm_proc_root; EXPORT_SYMBOL(atm_proc_root); int atm_proc_dev_register(struct atm_dev *dev) { int error; /* No proc info */ if (!dev->ops->proc_read) return 0; error = -ENOMEM; dev->proc_name = kasprintf(GFP_KERNEL, "%s:%d", dev->type, dev->number); if (!dev->proc_name) goto err_out; dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root, &proc_atm_dev_ops, dev); if (!dev->proc_entry) goto err_free_name; return 0; err_free_name: kfree(dev->proc_name); err_out: return error; } void atm_proc_dev_deregister(struct atm_dev *dev) { if (!dev->ops->proc_read) return; remove_proc_entry(dev->proc_name, atm_proc_root); kfree(dev->proc_name); } static struct atm_proc_entry { char *name; const struct file_operations *proc_fops; struct proc_dir_entry *dirent; } atm_proc_ents[] = { { .name = "devices", .proc_fops = &devices_seq_fops }, { .name = "pvc", .proc_fops = &pvc_seq_fops }, { .name = "svc", .proc_fops = &svc_seq_fops }, { .name = "vc", .proc_fops = &vcc_seq_fops }, { .name = NULL, .proc_fops = NULL } }; static void atm_proc_dirs_remove(void) { static struct atm_proc_entry *e; for (e = atm_proc_ents; e->name; e++) { if (e->dirent) remove_proc_entry(e->name, atm_proc_root); } remove_proc_entry("atm", init_net.proc_net); } int __init atm_proc_init(void) { static struct atm_proc_entry *e; int ret; atm_proc_root = proc_net_mkdir(&init_net, "atm", init_net.proc_net); if (!atm_proc_root) goto err_out; for (e = atm_proc_ents; e->name; e++) { struct proc_dir_entry *dirent; dirent = proc_create(e->name, S_IRUGO, atm_proc_root, e->proc_fops); if (!dirent) goto err_out_remove; e->dirent = dirent; } ret = 0; out: return ret; err_out_remove: atm_proc_dirs_remove(); err_out: ret = -ENOMEM; goto out; } void atm_proc_exit(void) { atm_proc_dirs_remove(); }
gpl-2.0
zaclimon/android_kernel_samsung_kylepro
drivers/staging/tidspbridge/pmgr/cmm.c
7467
25072
/* * cmm.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * The Communication(Shared) Memory Management(CMM) module provides * shared memory management services for DSP/BIOS Bridge data streaming * and messaging. * * Multiple shared memory segments can be registered with CMM. * Each registered SM segment is represented by a SM "allocator" that * describes a block of physically contiguous shared memory used for * future allocations by CMM. * * Memory is coalesced back to the appropriate heap when a buffer is * freed. * * Notes: * Va: Virtual address. * Pa: Physical or kernel system address. * * Copyright (C) 2005-2006 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #include <linux/types.h> #include <linux/list.h> /* ----------------------------------- DSP/BIOS Bridge */ #include <dspbridge/dbdefs.h> /* ----------------------------------- OS Adaptation Layer */ #include <dspbridge/sync.h> /* ----------------------------------- Platform Manager */ #include <dspbridge/dev.h> #include <dspbridge/proc.h> /* ----------------------------------- This */ #include <dspbridge/cmm.h> /* ----------------------------------- Defines, Data Structures, Typedefs */ #define NEXT_PA(pnode) (pnode->pa + pnode->size) /* Other bus/platform translations */ #define DSPPA2GPPPA(base, x, y) ((x)+(y)) #define GPPPA2DSPPA(base, x, y) ((x)-(y)) /* * Allocators define a block of contiguous memory used for future allocations. * * sma - shared memory allocator. * vma - virtual memory allocator.(not used). */ struct cmm_allocator { /* sma */ unsigned int shm_base; /* Start of physical SM block */ u32 sm_size; /* Size of SM block in bytes */ unsigned int vm_base; /* Start of VM block. (Dev driver * context for 'sma') */ u32 dsp_phys_addr_offset; /* DSP PA to GPP PA offset for this * SM space */ s8 c_factor; /* DSPPa to GPPPa Conversion Factor */ unsigned int dsp_base; /* DSP virt base byte address */ u32 dsp_size; /* DSP seg size in bytes */ struct cmm_object *cmm_mgr; /* back ref to parent mgr */ /* node list of available memory */ struct list_head free_list; /* node list of memory in use */ struct list_head in_use_list; }; struct cmm_xlator { /* Pa<->Va translator object */ /* CMM object this translator associated */ struct cmm_object *cmm_mgr; /* * Client process virtual base address that corresponds to phys SM * base address for translator's seg_id. * Only 1 segment ID currently supported. */ unsigned int virt_base; /* virtual base address */ u32 virt_size; /* size of virt space in bytes */ u32 seg_id; /* Segment Id */ }; /* CMM Mgr */ struct cmm_object { /* * Cmm Lock is used to serialize access mem manager for multi-threads. */ struct mutex cmm_lock; /* Lock to access cmm mgr */ struct list_head node_free_list; /* Free list of memory nodes */ u32 min_block_size; /* Min SM block; default 16 bytes */ u32 page_size; /* Memory Page size (1k/4k) */ /* GPP SM segment ptrs */ struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS]; }; /* Default CMM Mgr attributes */ static struct cmm_mgrattrs cmm_dfltmgrattrs = { /* min_block_size, min block size(bytes) allocated by cmm mgr */ 16 }; /* Default allocation attributes */ static struct cmm_attrs cmm_dfltalctattrs = { 1 /* seg_id, default segment Id for allocator */ }; /* Address translator default attrs */ static struct cmm_xlatorattrs cmm_dfltxlatorattrs = { /* seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */ 1, 0, /* dsp_bufs */ 0, /* dsp_buf_size */ NULL, /* vm_base */ 0, /* vm_size */ }; /* SM node representing a block of memory. */ struct cmm_mnode { struct list_head link; /* must be 1st element */ u32 pa; /* Phys addr */ u32 va; /* Virtual address in device process context */ u32 size; /* SM block size in bytes */ u32 client_proc; /* Process that allocated this mem block */ }; /* ----------------------------------- Function Prototypes */ static void add_to_free_list(struct cmm_allocator *allocator, struct cmm_mnode *pnode); static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj, u32 ul_seg_id); static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator, u32 usize); static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa, u32 dw_va, u32 ul_size); /* get available slot for new allocator */ static s32 get_slot(struct cmm_object *cmm_mgr_obj); static void un_register_gppsm_seg(struct cmm_allocator *psma); /* * ======== cmm_calloc_buf ======== * Purpose: * Allocate a SM buffer, zero contents, and return the physical address * and optional driver context virtual address(pp_buf_va). * * The freelist is sorted in increasing size order. Get the first * block that satifies the request and sort the remaining back on * the freelist; if large enough. The kept block is placed on the * inUseList. */ void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize, struct cmm_attrs *pattrs, void **pp_buf_va) { struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr; void *buf_pa = NULL; struct cmm_mnode *pnode = NULL; struct cmm_mnode *new_node = NULL; struct cmm_allocator *allocator = NULL; u32 delta_size; u8 *pbyte = NULL; s32 cnt; if (pattrs == NULL) pattrs = &cmm_dfltalctattrs; if (pp_buf_va != NULL) *pp_buf_va = NULL; if (cmm_mgr_obj && (usize != 0)) { if (pattrs->seg_id > 0) { /* SegId > 0 is SM */ /* get the allocator object for this segment id */ allocator = get_allocator(cmm_mgr_obj, pattrs->seg_id); /* keep block size a multiple of min_block_size */ usize = ((usize - 1) & ~(cmm_mgr_obj->min_block_size - 1)) + cmm_mgr_obj->min_block_size; mutex_lock(&cmm_mgr_obj->cmm_lock); pnode = get_free_block(allocator, usize); } if (pnode) { delta_size = (pnode->size - usize); if (delta_size >= cmm_mgr_obj->min_block_size) { /* create a new block with the leftovers and * add to freelist */ new_node = get_node(cmm_mgr_obj, pnode->pa + usize, pnode->va + usize, (u32) delta_size); /* leftovers go free */ add_to_free_list(allocator, new_node); /* adjust our node's size */ pnode->size = usize; } /* Tag node with client process requesting allocation * We'll need to free up a process's alloc'd SM if the * client process goes away. */ /* Return TGID instead of process handle */ pnode->client_proc = current->tgid; /* put our node on InUse list */ list_add_tail(&pnode->link, &allocator->in_use_list); buf_pa = (void *)pnode->pa; /* physical address */ /* clear mem */ pbyte = (u8 *) pnode->va; for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++) *pbyte = 0; if (pp_buf_va != NULL) { /* Virtual address */ *pp_buf_va = (void *)pnode->va; } } mutex_unlock(&cmm_mgr_obj->cmm_lock); } return buf_pa; } /* * ======== cmm_create ======== * Purpose: * Create a communication memory manager object. */ int cmm_create(struct cmm_object **ph_cmm_mgr, struct dev_object *hdev_obj, const struct cmm_mgrattrs *mgr_attrts) { struct cmm_object *cmm_obj = NULL; int status = 0; *ph_cmm_mgr = NULL; /* create, zero, and tag a cmm mgr object */ cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL); if (!cmm_obj) return -ENOMEM; if (mgr_attrts == NULL) mgr_attrts = &cmm_dfltmgrattrs; /* set defaults */ /* save away smallest block allocation for this cmm mgr */ cmm_obj->min_block_size = mgr_attrts->min_block_size; cmm_obj->page_size = PAGE_SIZE; /* create node free list */ INIT_LIST_HEAD(&cmm_obj->node_free_list); mutex_init(&cmm_obj->cmm_lock); *ph_cmm_mgr = cmm_obj; return status; } /* * ======== cmm_destroy ======== * Purpose: * Release the communication memory manager resources. */ int cmm_destroy(struct cmm_object *hcmm_mgr, bool force) { struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr; struct cmm_info temp_info; int status = 0; s32 slot_seg; struct cmm_mnode *node, *tmp; if (!hcmm_mgr) { status = -EFAULT; return status; } mutex_lock(&cmm_mgr_obj->cmm_lock); /* If not force then fail if outstanding allocations exist */ if (!force) { /* Check for outstanding memory allocations */ status = cmm_get_info(hcmm_mgr, &temp_info); if (!status) { if (temp_info.total_in_use_cnt > 0) { /* outstanding allocations */ status = -EPERM; } } } if (!status) { /* UnRegister SM allocator */ for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) { if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] != NULL) { un_register_gppsm_seg (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg]); /* Set slot to NULL for future reuse */ cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = NULL; } } } list_for_each_entry_safe(node, tmp, &cmm_mgr_obj->node_free_list, link) { list_del(&node->link); kfree(node); } mutex_unlock(&cmm_mgr_obj->cmm_lock); if (!status) { /* delete CS & cmm mgr object */ mutex_destroy(&cmm_mgr_obj->cmm_lock); kfree(cmm_mgr_obj); } return status; } /* * ======== cmm_free_buf ======== * Purpose: * Free the given buffer. */ int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa, u32 ul_seg_id) { struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr; int status = -EFAULT; struct cmm_mnode *curr, *tmp; struct cmm_allocator *allocator; struct cmm_attrs *pattrs; if (ul_seg_id == 0) { pattrs = &cmm_dfltalctattrs; ul_seg_id = pattrs->seg_id; } if (!hcmm_mgr || !(ul_seg_id > 0)) { status = -EFAULT; return status; } allocator = get_allocator(cmm_mgr_obj, ul_seg_id); if (!allocator) return status; mutex_lock(&cmm_mgr_obj->cmm_lock); list_for_each_entry_safe(curr, tmp, &allocator->in_use_list, link) { if (curr->pa == (u32) buf_pa) { list_del(&curr->link); add_to_free_list(allocator, curr); status = 0; break; } } mutex_unlock(&cmm_mgr_obj->cmm_lock); return status; } /* * ======== cmm_get_handle ======== * Purpose: * Return the communication memory manager object for this device. * This is typically called from the client process. */ int cmm_get_handle(void *hprocessor, struct cmm_object ** ph_cmm_mgr) { int status = 0; struct dev_object *hdev_obj; if (hprocessor != NULL) status = proc_get_dev_object(hprocessor, &hdev_obj); else hdev_obj = dev_get_first(); /* default */ if (!status) status = dev_get_cmm_mgr(hdev_obj, ph_cmm_mgr); return status; } /* * ======== cmm_get_info ======== * Purpose: * Return the current memory utilization information. */ int cmm_get_info(struct cmm_object *hcmm_mgr, struct cmm_info *cmm_info_obj) { struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr; u32 ul_seg; int status = 0; struct cmm_allocator *altr; struct cmm_mnode *curr; if (!hcmm_mgr) { status = -EFAULT; return status; } mutex_lock(&cmm_mgr_obj->cmm_lock); cmm_info_obj->num_gppsm_segs = 0; /* # of SM segments */ /* Total # of outstanding alloc */ cmm_info_obj->total_in_use_cnt = 0; /* min block size */ cmm_info_obj->min_block_size = cmm_mgr_obj->min_block_size; /* check SM memory segments */ for (ul_seg = 1; ul_seg <= CMM_MAXGPPSEGS; ul_seg++) { /* get the allocator object for this segment id */ altr = get_allocator(cmm_mgr_obj, ul_seg); if (!altr) continue; cmm_info_obj->num_gppsm_segs++; cmm_info_obj->seg_info[ul_seg - 1].seg_base_pa = altr->shm_base - altr->dsp_size; cmm_info_obj->seg_info[ul_seg - 1].total_seg_size = altr->dsp_size + altr->sm_size; cmm_info_obj->seg_info[ul_seg - 1].gpp_base_pa = altr->shm_base; cmm_info_obj->seg_info[ul_seg - 1].gpp_size = altr->sm_size; cmm_info_obj->seg_info[ul_seg - 1].dsp_base_va = altr->dsp_base; cmm_info_obj->seg_info[ul_seg - 1].dsp_size = altr->dsp_size; cmm_info_obj->seg_info[ul_seg - 1].seg_base_va = altr->vm_base - altr->dsp_size; cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt = 0; list_for_each_entry(curr, &altr->in_use_list, link) { cmm_info_obj->total_in_use_cnt++; cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt++; } } mutex_unlock(&cmm_mgr_obj->cmm_lock); return status; } /* * ======== cmm_register_gppsm_seg ======== * Purpose: * Register a block of SM with the CMM to be used for later GPP SM * allocations. */ int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr, u32 dw_gpp_base_pa, u32 ul_size, u32 dsp_addr_offset, s8 c_factor, u32 dw_dsp_base, u32 ul_dsp_size, u32 *sgmt_id, u32 gpp_base_va) { struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr; struct cmm_allocator *psma = NULL; int status = 0; struct cmm_mnode *new_node; s32 slot_seg; dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x " "dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n", __func__, dw_gpp_base_pa, ul_size, dsp_addr_offset, dw_dsp_base, ul_dsp_size, gpp_base_va); if (!hcmm_mgr) return -EFAULT; /* make sure we have room for another allocator */ mutex_lock(&cmm_mgr_obj->cmm_lock); slot_seg = get_slot(cmm_mgr_obj); if (slot_seg < 0) { status = -EPERM; goto func_end; } /* Check if input ul_size is big enough to alloc at least one block */ if (ul_size < cmm_mgr_obj->min_block_size) { status = -EINVAL; goto func_end; } /* create, zero, and tag an SM allocator object */ psma = kzalloc(sizeof(struct cmm_allocator), GFP_KERNEL); if (!psma) { status = -ENOMEM; goto func_end; } psma->cmm_mgr = hcmm_mgr; /* ref to parent */ psma->shm_base = dw_gpp_base_pa; /* SM Base phys */ psma->sm_size = ul_size; /* SM segment size in bytes */ psma->vm_base = gpp_base_va; psma->dsp_phys_addr_offset = dsp_addr_offset; psma->c_factor = c_factor; psma->dsp_base = dw_dsp_base; psma->dsp_size = ul_dsp_size; if (psma->vm_base == 0) { status = -EPERM; goto func_end; } /* return the actual segment identifier */ *sgmt_id = (u32) slot_seg + 1; INIT_LIST_HEAD(&psma->free_list); INIT_LIST_HEAD(&psma->in_use_list); /* Get a mem node for this hunk-o-memory */ new_node = get_node(cmm_mgr_obj, dw_gpp_base_pa, psma->vm_base, ul_size); /* Place node on the SM allocator's free list */ if (new_node) { list_add_tail(&new_node->link, &psma->free_list); } else { status = -ENOMEM; goto func_end; } /* make entry */ cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = psma; func_end: /* Cleanup allocator */ if (status && psma) un_register_gppsm_seg(psma); mutex_unlock(&cmm_mgr_obj->cmm_lock); return status; } /* * ======== cmm_un_register_gppsm_seg ======== * Purpose: * UnRegister GPP SM segments with the CMM. */ int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr, u32 ul_seg_id) { struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr; int status = 0; struct cmm_allocator *psma; u32 ul_id = ul_seg_id; if (!hcmm_mgr) return -EFAULT; if (ul_seg_id == CMM_ALLSEGMENTS) ul_id = 1; if ((ul_id <= 0) || (ul_id > CMM_MAXGPPSEGS)) return -EINVAL; /* * FIXME: CMM_MAXGPPSEGS == 1. why use a while cycle? Seems to me like * the ul_seg_id is not needed here. It must be always 1. */ while (ul_id <= CMM_MAXGPPSEGS) { mutex_lock(&cmm_mgr_obj->cmm_lock); /* slot = seg_id-1 */ psma = cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1]; if (psma != NULL) { un_register_gppsm_seg(psma); /* Set alctr ptr to NULL for future reuse */ cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1] = NULL; } else if (ul_seg_id != CMM_ALLSEGMENTS) { status = -EPERM; } mutex_unlock(&cmm_mgr_obj->cmm_lock); if (ul_seg_id != CMM_ALLSEGMENTS) break; ul_id++; } /* end while */ return status; } /* * ======== un_register_gppsm_seg ======== * Purpose: * UnRegister the SM allocator by freeing all its resources and * nulling cmm mgr table entry. * Note: * This routine is always called within cmm lock crit sect. */ static void un_register_gppsm_seg(struct cmm_allocator *psma) { struct cmm_mnode *curr, *tmp; /* free nodes on free list */ list_for_each_entry_safe(curr, tmp, &psma->free_list, link) { list_del(&curr->link); kfree(curr); } /* free nodes on InUse list */ list_for_each_entry_safe(curr, tmp, &psma->in_use_list, link) { list_del(&curr->link); kfree(curr); } if ((void *)psma->vm_base != NULL) MEM_UNMAP_LINEAR_ADDRESS((void *)psma->vm_base); /* Free allocator itself */ kfree(psma); } /* * ======== get_slot ======== * Purpose: * An available slot # is returned. Returns negative on failure. */ static s32 get_slot(struct cmm_object *cmm_mgr_obj) { s32 slot_seg = -1; /* neg on failure */ /* get first available slot in cmm mgr SMSegTab[] */ for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) { if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] == NULL) break; } if (slot_seg == CMM_MAXGPPSEGS) slot_seg = -1; /* failed */ return slot_seg; } /* * ======== get_node ======== * Purpose: * Get a memory node from freelist or create a new one. */ static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa, u32 dw_va, u32 ul_size) { struct cmm_mnode *pnode; /* Check cmm mgr's node freelist */ if (list_empty(&cmm_mgr_obj->node_free_list)) { pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL); if (!pnode) return NULL; } else { /* surely a valid element */ pnode = list_first_entry(&cmm_mgr_obj->node_free_list, struct cmm_mnode, link); list_del_init(&pnode->link); } pnode->pa = dw_pa; pnode->va = dw_va; pnode->size = ul_size; return pnode; } /* * ======== delete_node ======== * Purpose: * Put a memory node on the cmm nodelist for later use. * Doesn't actually delete the node. Heap thrashing friendly. */ static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode) { list_add_tail(&pnode->link, &cmm_mgr_obj->node_free_list); } /* * ====== get_free_block ======== * Purpose: * Scan the free block list and return the first block that satisfies * the size. */ static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator, u32 usize) { struct cmm_mnode *node, *tmp; if (!allocator) return NULL; list_for_each_entry_safe(node, tmp, &allocator->free_list, link) { if (usize <= node->size) { list_del(&node->link); return node; } } return NULL; } /* * ======== add_to_free_list ======== * Purpose: * Coalesce node into the freelist in ascending size order. */ static void add_to_free_list(struct cmm_allocator *allocator, struct cmm_mnode *node) { struct cmm_mnode *curr; if (!node) { pr_err("%s: failed - node is NULL\n", __func__); return; } list_for_each_entry(curr, &allocator->free_list, link) { if (NEXT_PA(curr) == node->pa) { curr->size += node->size; delete_node(allocator->cmm_mgr, node); return; } if (curr->pa == NEXT_PA(node)) { curr->pa = node->pa; curr->va = node->va; curr->size += node->size; delete_node(allocator->cmm_mgr, node); return; } } list_for_each_entry(curr, &allocator->free_list, link) { if (curr->size >= node->size) { list_add_tail(&node->link, &curr->link); return; } } list_add_tail(&node->link, &allocator->free_list); } /* * ======== get_allocator ======== * Purpose: * Return the allocator for the given SM Segid. * SegIds: 1,2,3..max. */ static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj, u32 ul_seg_id) { return cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1]; } /* * The CMM_Xlator[xxx] routines below are used by Node and Stream * to perform SM address translation to the client process address space. * A "translator" object is created by a node/stream for each SM seg used. */ /* * ======== cmm_xlator_create ======== * Purpose: * Create an address translator object. */ int cmm_xlator_create(struct cmm_xlatorobject **xlator, struct cmm_object *hcmm_mgr, struct cmm_xlatorattrs *xlator_attrs) { struct cmm_xlator *xlator_object = NULL; int status = 0; *xlator = NULL; if (xlator_attrs == NULL) xlator_attrs = &cmm_dfltxlatorattrs; /* set defaults */ xlator_object = kzalloc(sizeof(struct cmm_xlator), GFP_KERNEL); if (xlator_object != NULL) { xlator_object->cmm_mgr = hcmm_mgr; /* ref back to CMM */ /* SM seg_id */ xlator_object->seg_id = xlator_attrs->seg_id; } else { status = -ENOMEM; } if (!status) *xlator = (struct cmm_xlatorobject *)xlator_object; return status; } /* * ======== cmm_xlator_alloc_buf ======== */ void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator, void *va_buf, u32 pa_size) { struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator; void *pbuf = NULL; void *tmp_va_buff; struct cmm_attrs attrs; if (xlator_obj) { attrs.seg_id = xlator_obj->seg_id; __raw_writel(0, va_buf); /* Alloc SM */ pbuf = cmm_calloc_buf(xlator_obj->cmm_mgr, pa_size, &attrs, NULL); if (pbuf) { /* convert to translator(node/strm) process Virtual * address */ tmp_va_buff = cmm_xlator_translate(xlator, pbuf, CMM_PA2VA); __raw_writel((u32)tmp_va_buff, va_buf); } } return pbuf; } /* * ======== cmm_xlator_free_buf ======== * Purpose: * Free the given SM buffer and descriptor. * Does not free virtual memory. */ int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va) { struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator; int status = -EPERM; void *buf_pa = NULL; if (xlator_obj) { /* convert Va to Pa so we can free it. */ buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA); if (buf_pa) { status = cmm_free_buf(xlator_obj->cmm_mgr, buf_pa, xlator_obj->seg_id); if (status) { /* Uh oh, this shouldn't happen. Descriptor * gone! */ pr_err("%s, line %d: Assertion failed\n", __FILE__, __LINE__); } } } return status; } /* * ======== cmm_xlator_info ======== * Purpose: * Set/Get translator info. */ int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 ** paddr, u32 ul_size, u32 segm_id, bool set_info) { struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator; int status = 0; if (xlator_obj) { if (set_info) { /* set translators virtual address range */ xlator_obj->virt_base = (u32) *paddr; xlator_obj->virt_size = ul_size; } else { /* return virt base address */ *paddr = (u8 *) xlator_obj->virt_base; } } else { status = -EFAULT; } return status; } /* * ======== cmm_xlator_translate ======== */ void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr, enum cmm_xlatetype xtype) { u32 dw_addr_xlate = 0; struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator; struct cmm_object *cmm_mgr_obj = NULL; struct cmm_allocator *allocator = NULL; u32 dw_offset = 0; if (!xlator_obj) goto loop_cont; cmm_mgr_obj = (struct cmm_object *)xlator_obj->cmm_mgr; /* get this translator's default SM allocator */ allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->seg_id - 1]; if (!allocator) goto loop_cont; if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_VA2PA) || (xtype == CMM_PA2VA)) { if (xtype == CMM_PA2VA) { /* Gpp Va = Va Base + offset */ dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base - allocator-> dsp_size); dw_addr_xlate = xlator_obj->virt_base + dw_offset; /* Check if translated Va base is in range */ if ((dw_addr_xlate < xlator_obj->virt_base) || (dw_addr_xlate >= (xlator_obj->virt_base + xlator_obj->virt_size))) { dw_addr_xlate = 0; /* bad address */ } } else { /* Gpp PA = Gpp Base + offset */ dw_offset = (u8 *) paddr - (u8 *) xlator_obj->virt_base; dw_addr_xlate = allocator->shm_base - allocator->dsp_size + dw_offset; } } else { dw_addr_xlate = (u32) paddr; } /*Now convert address to proper target physical address if needed */ if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_PA2DSPPA)) { /* Got Gpp Pa now, convert to DSP Pa */ dw_addr_xlate = GPPPA2DSPPA((allocator->shm_base - allocator->dsp_size), dw_addr_xlate, allocator->dsp_phys_addr_offset * allocator->c_factor); } else if (xtype == CMM_DSPPA2PA) { /* Got DSP Pa, convert to GPP Pa */ dw_addr_xlate = DSPPA2GPPPA(allocator->shm_base - allocator->dsp_size, dw_addr_xlate, allocator->dsp_phys_addr_offset * allocator->c_factor); } loop_cont: return (void *)dw_addr_xlate; }
gpl-2.0
binkybear/nexus10-5
net/unix/garbage.c
7723
10621
/* * NET3: Garbage Collector For AF_UNIX sockets * * Garbage Collector: * Copyright (C) Barak A. Pearlmutter. * Released under the GPL version 2 or later. * * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem. * If it doesn't work blame me, it worked when Barak sent it. * * Assumptions: * * - object w/ a bit * - free list * * Current optimizations: * * - explicit stack instead of recursion * - tail recurse on first born instead of immediate push/pop * - we gather the stuff that should not be killed into tree * and stack is just a path from root to the current pointer. * * Future optimizations: * * - don't just push entire root set; process in place * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Fixes: * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed. * Cope with changing max_files. * Al Viro 11 Oct 1998 * Graph may have cycles. That is, we can send the descriptor * of foo to bar and vice versa. Current code chokes on that. * Fix: move SCM_RIGHTS ones into the separate list and then * skb_free() them all instead of doing explicit fput's. * Another problem: since fput() may block somebody may * create a new unix_socket when we are in the middle of sweep * phase. Fix: revert the logic wrt MARKED. Mark everything * upon the beginning and unmark non-junk ones. * * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS * sent to connect()'ed but still not accept()'ed sockets. * Fixed. Old code had slightly different problem here: * extra fput() in situation when we passed the descriptor via * such socket and closed it (descriptor). That would happen on * each unix_gc() until the accept(). Since the struct file in * question would go to the free list and might be reused... * That might be the reason of random oopses on filp_close() * in unrelated processes. * * AV 28 Feb 1999 * Kill the explicit allocation of stack. Now we keep the tree * with root in dummy + pointer (gc_current) to one of the nodes. * Stack is represented as path from gc_current to dummy. Unmark * now means "add to tree". Push == "make it a son of gc_current". * Pop == "move gc_current to parent". We keep only pointers to * parents (->gc_tree). * AV 1 Mar 1999 * Damn. Added missing check for ->dead in listen queues scanning. * * Miklos Szeredi 25 Jun 2007 * Reimplement with a cycle collecting algorithm. This should * solve several problems with the previous code, like being racy * wrt receive and holding up unrelated socket operations. */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/un.h> #include <linux/net.h> #include <linux/fs.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/file.h> #include <linux/proc_fs.h> #include <linux/mutex.h> #include <linux/wait.h> #include <net/sock.h> #include <net/af_unix.h> #include <net/scm.h> #include <net/tcp_states.h> /* Internal data structures and random procedures: */ static LIST_HEAD(gc_inflight_list); static LIST_HEAD(gc_candidates); static DEFINE_SPINLOCK(unix_gc_lock); static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait); unsigned int unix_tot_inflight; struct sock *unix_get_socket(struct file *filp) { struct sock *u_sock = NULL; struct inode *inode = filp->f_path.dentry->d_inode; /* * Socket ? */ if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { struct socket *sock = SOCKET_I(inode); struct sock *s = sock->sk; /* * PF_UNIX ? */ if (s && sock->ops && sock->ops->family == PF_UNIX) u_sock = s; } return u_sock; } /* * Keep the number of times in flight count for the file * descriptor if it is for an AF_UNIX socket. */ void unix_inflight(struct file *fp) { struct sock *s = unix_get_socket(fp); if (s) { struct unix_sock *u = unix_sk(s); spin_lock(&unix_gc_lock); if (atomic_long_inc_return(&u->inflight) == 1) { BUG_ON(!list_empty(&u->link)); list_add_tail(&u->link, &gc_inflight_list); } else { BUG_ON(list_empty(&u->link)); } unix_tot_inflight++; spin_unlock(&unix_gc_lock); } } void unix_notinflight(struct file *fp) { struct sock *s = unix_get_socket(fp); if (s) { struct unix_sock *u = unix_sk(s); spin_lock(&unix_gc_lock); BUG_ON(list_empty(&u->link)); if (atomic_long_dec_and_test(&u->inflight)) list_del_init(&u->link); unix_tot_inflight--; spin_unlock(&unix_gc_lock); } } static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), struct sk_buff_head *hitlist) { struct sk_buff *skb; struct sk_buff *next; spin_lock(&x->sk_receive_queue.lock); skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { /* * Do we have file descriptors ? */ if (UNIXCB(skb).fp) { bool hit = false; /* * Process the descriptors of this socket */ int nfd = UNIXCB(skb).fp->count; struct file **fp = UNIXCB(skb).fp->fp; while (nfd--) { /* * Get the socket the fd matches * if it indeed does so */ struct sock *sk = unix_get_socket(*fp++); if (sk) { struct unix_sock *u = unix_sk(sk); /* * Ignore non-candidates, they could * have been added to the queues after * starting the garbage collection */ if (u->gc_candidate) { hit = true; func(u); } } } if (hit && hitlist != NULL) { __skb_unlink(skb, &x->sk_receive_queue); __skb_queue_tail(hitlist, skb); } } } spin_unlock(&x->sk_receive_queue.lock); } static void scan_children(struct sock *x, void (*func)(struct unix_sock *), struct sk_buff_head *hitlist) { if (x->sk_state != TCP_LISTEN) scan_inflight(x, func, hitlist); else { struct sk_buff *skb; struct sk_buff *next; struct unix_sock *u; LIST_HEAD(embryos); /* * For a listening socket collect the queued embryos * and perform a scan on them as well. */ spin_lock(&x->sk_receive_queue.lock); skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { u = unix_sk(skb->sk); /* * An embryo cannot be in-flight, so it's safe * to use the list link. */ BUG_ON(!list_empty(&u->link)); list_add_tail(&u->link, &embryos); } spin_unlock(&x->sk_receive_queue.lock); while (!list_empty(&embryos)) { u = list_entry(embryos.next, struct unix_sock, link); scan_inflight(&u->sk, func, hitlist); list_del_init(&u->link); } } } static void dec_inflight(struct unix_sock *usk) { atomic_long_dec(&usk->inflight); } static void inc_inflight(struct unix_sock *usk) { atomic_long_inc(&usk->inflight); } static void inc_inflight_move_tail(struct unix_sock *u) { atomic_long_inc(&u->inflight); /* * If this still might be part of a cycle, move it to the end * of the list, so that it's checked even if it was already * passed over */ if (u->gc_maybe_cycle) list_move_tail(&u->link, &gc_candidates); } static bool gc_in_progress = false; #define UNIX_INFLIGHT_TRIGGER_GC 16000 void wait_for_unix_gc(void) { /* * If number of inflight sockets is insane, * force a garbage collect right now. */ if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress) unix_gc(); wait_event(unix_gc_wait, gc_in_progress == false); } /* The external entry point: unix_gc() */ void unix_gc(void) { struct unix_sock *u; struct unix_sock *next; struct sk_buff_head hitlist; struct list_head cursor; LIST_HEAD(not_cycle_list); spin_lock(&unix_gc_lock); /* Avoid a recursive GC. */ if (gc_in_progress) goto out; gc_in_progress = true; /* * First, select candidates for garbage collection. Only * in-flight sockets are considered, and from those only ones * which don't have any external reference. * * Holding unix_gc_lock will protect these candidates from * being detached, and hence from gaining an external * reference. Since there are no possible receivers, all * buffers currently on the candidates' queues stay there * during the garbage collection. * * We also know that no new candidate can be added onto the * receive queues. Other, non candidate sockets _can_ be * added to queue, so we must make sure only to touch * candidates. */ list_for_each_entry_safe(u, next, &gc_inflight_list, link) { long total_refs; long inflight_refs; total_refs = file_count(u->sk.sk_socket->file); inflight_refs = atomic_long_read(&u->inflight); BUG_ON(inflight_refs < 1); BUG_ON(total_refs < inflight_refs); if (total_refs == inflight_refs) { list_move_tail(&u->link, &gc_candidates); u->gc_candidate = 1; u->gc_maybe_cycle = 1; } } /* * Now remove all internal in-flight reference to children of * the candidates. */ list_for_each_entry(u, &gc_candidates, link) scan_children(&u->sk, dec_inflight, NULL); /* * Restore the references for children of all candidates, * which have remaining references. Do this recursively, so * only those remain, which form cyclic references. * * Use a "cursor" link, to make the list traversal safe, even * though elements might be moved about. */ list_add(&cursor, &gc_candidates); while (cursor.next != &gc_candidates) { u = list_entry(cursor.next, struct unix_sock, link); /* Move cursor to after the current position. */ list_move(&cursor, &u->link); if (atomic_long_read(&u->inflight) > 0) { list_move_tail(&u->link, &not_cycle_list); u->gc_maybe_cycle = 0; scan_children(&u->sk, inc_inflight_move_tail, NULL); } } list_del(&cursor); /* * not_cycle_list contains those sockets which do not make up a * cycle. Restore these to the inflight list. */ while (!list_empty(&not_cycle_list)) { u = list_entry(not_cycle_list.next, struct unix_sock, link); u->gc_candidate = 0; list_move_tail(&u->link, &gc_inflight_list); } /* * Now gc_candidates contains only garbage. Restore original * inflight counters for these as well, and remove the skbuffs * which are creating the cycle(s). */ skb_queue_head_init(&hitlist); list_for_each_entry(u, &gc_candidates, link) scan_children(&u->sk, inc_inflight, &hitlist); spin_unlock(&unix_gc_lock); /* Here we are. Hitlist is filled. Die. */ __skb_queue_purge(&hitlist); spin_lock(&unix_gc_lock); /* All candidates should have been detached by now. */ BUG_ON(!list_empty(&gc_candidates)); gc_in_progress = false; wake_up(&unix_gc_wait); out: spin_unlock(&unix_gc_lock); }
gpl-2.0
dhiru1602/android_kernel_samsung_latona
drivers/staging/rtl8187se/r8185b_init.c
7979
50201
/*++ Copyright (c) Realtek Semiconductor Corp. All rights reserved. Module Name: r8185b_init.c Abstract: Hardware Initialization and Hardware IO for RTL8185B Major Change History: When Who What ---------- --------------- ------------------------------- 2006-11-15 Xiong Created Notes: This file is ported from RTL8185B Windows driver. --*/ /*--------------------------Include File------------------------------------*/ #include <linux/spinlock.h> #include "r8180_hw.h" #include "r8180.h" #include "r8180_rtl8225.h" /* RTL8225 Radio frontend */ #include "r8180_93cx6.h" /* Card EEPROM */ #include "r8180_wx.h" #include "ieee80211/dot11d.h" /* #define CONFIG_RTL8180_IO_MAP */ #define TC_3W_POLL_MAX_TRY_CNT 5 static u8 MAC_REG_TABLE[][2] = { /*PAGA 0: */ /* 0x34(BRSR), 0xBE(RATE_FALLBACK_CTL), 0x1E0(ARFR) would set in HwConfigureRTL8185() */ /* 0x272(RFSW_CTRL), 0x1CE(AESMSK_QC) set in InitializeAdapter8185(). */ /* 0x1F0~0x1F8 set in MacConfig_85BASIC() */ {0x08, 0xae}, {0x0a, 0x72}, {0x5b, 0x42}, {0x84, 0x88}, {0x85, 0x24}, {0x88, 0x54}, {0x8b, 0xb8}, {0x8c, 0x03}, {0x8d, 0x40}, {0x8e, 0x00}, {0x8f, 0x00}, {0x5b, 0x18}, {0x91, 0x03}, {0x94, 0x0F}, {0x95, 0x32}, {0x96, 0x00}, {0x97, 0x07}, {0xb4, 0x22}, {0xdb, 0x00}, {0xf0, 0x32}, {0xf1, 0x32}, {0xf2, 0x00}, {0xf3, 0x00}, {0xf4, 0x32}, {0xf5, 0x43}, {0xf6, 0x00}, {0xf7, 0x00}, {0xf8, 0x46}, {0xf9, 0xa4}, {0xfa, 0x00}, {0xfb, 0x00}, {0xfc, 0x96}, {0xfd, 0xa4}, {0xfe, 0x00}, {0xff, 0x00}, /*PAGE 1: */ /* For Flextronics system Logo PCIHCT failure: */ /* 0x1C4~0x1CD set no-zero value to avoid PCI configuration space 0x45[7]=1 */ {0x5e, 0x01}, {0x58, 0x00}, {0x59, 0x00}, {0x5a, 0x04}, {0x5b, 0x00}, {0x60, 0x24}, {0x61, 0x97}, {0x62, 0xF0}, {0x63, 0x09}, {0x80, 0x0F}, {0x81, 0xFF}, {0x82, 0xFF}, {0x83, 0x03}, {0xC4, 0x22}, {0xC5, 0x22}, {0xC6, 0x22}, {0xC7, 0x22}, {0xC8, 0x22}, /* lzm add 080826 */ {0xC9, 0x22}, {0xCA, 0x22}, {0xCB, 0x22}, {0xCC, 0x22}, {0xCD, 0x22},/* lzm add 080826 */ {0xe2, 0x00}, /* PAGE 2: */ {0x5e, 0x02}, {0x0c, 0x04}, {0x4c, 0x30}, {0x4d, 0x08}, {0x50, 0x05}, {0x51, 0xf5}, {0x52, 0x04}, {0x53, 0xa0}, {0x54, 0xff}, {0x55, 0xff}, {0x56, 0xff}, {0x57, 0xff}, {0x58, 0x08}, {0x59, 0x08}, {0x5a, 0x08}, {0x5b, 0x08}, {0x60, 0x08}, {0x61, 0x08}, {0x62, 0x08}, {0x63, 0x08}, {0x64, 0x2f}, {0x8c, 0x3f}, {0x8d, 0x3f}, {0x8e, 0x3f}, {0x8f, 0x3f}, {0xc4, 0xff}, {0xc5, 0xff}, {0xc6, 0xff}, {0xc7, 0xff}, {0xc8, 0x00}, {0xc9, 0x00}, {0xca, 0x80}, {0xcb, 0x00}, /* PAGA 0: */ {0x5e, 0x00}, {0x9f, 0x03} }; static u8 ZEBRA_AGC[] = { 0, 0x7E, 0x7E, 0x7E, 0x7E, 0x7D, 0x7C, 0x7B, 0x7A, 0x79, 0x78, 0x77, 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x70, 0x6F, 0x6E, 0x6D, 0x6C, 0x6B, 0x6A, 0x69, 0x68, 0x67, 0x66, 0x65, 0x64, 0x63, 0x62, 0x48, 0x47, 0x46, 0x45, 0x44, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x15, 0x16, 0x17, 0x17, 0x18, 0x18, 0x19, 0x1a, 0x1a, 0x1b, 0x1b, 0x1c, 0x1c, 0x1d, 0x1d, 0x1d, 0x1e, 0x1e, 0x1f, 0x1f, 0x1f, 0x20, 0x20, 0x20, 0x20, 0x21, 0x21, 0x21, 0x22, 0x22, 0x22, 0x23, 0x23, 0x24, 0x24, 0x25, 0x25, 0x25, 0x26, 0x26, 0x27, 0x27, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F }; static u32 ZEBRA_RF_RX_GAIN_TABLE[] = { 0x0096, 0x0076, 0x0056, 0x0036, 0x0016, 0x01f6, 0x01d6, 0x01b6, 0x0196, 0x0176, 0x00F7, 0x00D7, 0x00B7, 0x0097, 0x0077, 0x0057, 0x0037, 0x00FB, 0x00DB, 0x00BB, 0x00FF, 0x00E3, 0x00C3, 0x00A3, 0x0083, 0x0063, 0x0043, 0x0023, 0x0003, 0x01E3, 0x01C3, 0x01A3, 0x0183, 0x0163, 0x0143, 0x0123, 0x0103 }; static u8 OFDM_CONFIG[] = { /* OFDM reg0x06[7:0]=0xFF: Enable power saving mode in RX */ /* OFDM reg0x3C[4]=1'b1: Enable RX power saving mode */ /* ofdm 0x3a = 0x7b ,(original : 0xfb) For ECS shielding room TP test */ /* 0x00 */ 0x10, 0x0F, 0x0A, 0x0C, 0x14, 0xFA, 0xFF, 0x50, 0x00, 0x50, 0x00, 0x00, 0x00, 0x5C, 0x00, 0x00, /* 0x10 */ 0x40, 0x00, 0x40, 0x00, 0x00, 0x00, 0xA8, 0x26, 0x32, 0x33, 0x06, 0xA5, 0x6F, 0x55, 0xC8, 0xBB, /* 0x20 */ 0x0A, 0xE1, 0x2C, 0x4A, 0x86, 0x83, 0x34, 0x00, 0x4F, 0x24, 0x6F, 0xC2, 0x03, 0x40, 0x80, 0x00, /* 0x30 */ 0xC0, 0xC1, 0x58, 0xF1, 0x00, 0xC4, 0x90, 0x3e, 0xD8, 0x3C, 0x7B, 0x10, 0x10 }; /* --------------------------------------------------------------- * Hardware IO * the code is ported from Windows source code ----------------------------------------------------------------*/ void PlatformIOWrite1Byte( struct net_device *dev, u32 offset, u8 data ) { write_nic_byte(dev, offset, data); read_nic_byte(dev, offset); /* To make sure write operation is completed, 2005.11.09, by rcnjko. */ } void PlatformIOWrite2Byte( struct net_device *dev, u32 offset, u16 data ) { write_nic_word(dev, offset, data); read_nic_word(dev, offset); /* To make sure write operation is completed, 2005.11.09, by rcnjko. */ } u8 PlatformIORead1Byte(struct net_device *dev, u32 offset); void PlatformIOWrite4Byte( struct net_device *dev, u32 offset, u32 data ) { /* {by amy 080312 */ if (offset == PhyAddr) { /* For Base Band configuration. */ unsigned char cmdByte; unsigned long dataBytes; unsigned char idx; u8 u1bTmp; cmdByte = (u8)(data & 0x000000ff); dataBytes = data>>8; /* 071010, rcnjko: The critical section is only BB read/write race condition. Assumption: 1. We assume NO one will access BB at DIRQL, otherwise, system will crash for acquiring the spinlock in such context. 2. PlatformIOWrite4Byte() MUST NOT be recursive. */ /* NdisAcquireSpinLock( &(pDevice->IoSpinLock) ); */ for (idx = 0; idx < 30; idx++) { /* Make sure command bit is clear before access it. */ u1bTmp = PlatformIORead1Byte(dev, PhyAddr); if ((u1bTmp & BIT7) == 0) break; else mdelay(10); } for (idx = 0; idx < 3; idx++) PlatformIOWrite1Byte(dev, offset+1+idx, ((u8 *)&dataBytes)[idx]); write_nic_byte(dev, offset, cmdByte); /* NdisReleaseSpinLock( &(pDevice->IoSpinLock) ); */ } /* by amy 080312} */ else { write_nic_dword(dev, offset, data); read_nic_dword(dev, offset); /* To make sure write operation is completed, 2005.11.09, by rcnjko. */ } } u8 PlatformIORead1Byte( struct net_device *dev, u32 offset ) { u8 data = 0; data = read_nic_byte(dev, offset); return data; } u16 PlatformIORead2Byte( struct net_device *dev, u32 offset ) { u16 data = 0; data = read_nic_word(dev, offset); return data; } u32 PlatformIORead4Byte( struct net_device *dev, u32 offset ) { u32 data = 0; data = read_nic_dword(dev, offset); return data; } void SetOutputEnableOfRfPins(struct net_device *dev) { write_nic_word(dev, RFPinsEnable, 0x1bff); } static int HwHSSIThreeWire( struct net_device *dev, u8 *pDataBuf, u8 nDataBufBitCnt, int bSI, int bWrite ) { int bResult = 1; u8 TryCnt; u8 u1bTmp; do { /* Check if WE and RE are cleared. */ for (TryCnt = 0; TryCnt < TC_3W_POLL_MAX_TRY_CNT; TryCnt++) { u1bTmp = read_nic_byte(dev, SW_3W_CMD1); if ((u1bTmp & (SW_3W_CMD1_RE|SW_3W_CMD1_WE)) == 0) break; udelay(10); } if (TryCnt == TC_3W_POLL_MAX_TRY_CNT) { printk(KERN_ERR "rtl8187se: HwThreeWire(): CmdReg:" " %#X RE|WE bits are not clear!!\n", u1bTmp); dump_stack(); return 0; } /* RTL8187S HSSI Read/Write Function */ u1bTmp = read_nic_byte(dev, RF_SW_CONFIG); if (bSI) u1bTmp |= RF_SW_CFG_SI; /* reg08[1]=1 Serial Interface(SI) */ else u1bTmp &= ~RF_SW_CFG_SI; /* reg08[1]=0 Parallel Interface(PI) */ write_nic_byte(dev, RF_SW_CONFIG, u1bTmp); if (bSI) { /* jong: HW SI read must set reg84[3]=0. */ u1bTmp = read_nic_byte(dev, RFPinsSelect); u1bTmp &= ~BIT3; write_nic_byte(dev, RFPinsSelect, u1bTmp); } /* Fill up data buffer for write operation. */ if (bWrite) { if (nDataBufBitCnt == 16) { write_nic_word(dev, SW_3W_DB0, *((u16 *)pDataBuf)); } else if (nDataBufBitCnt == 64) { /* RTL8187S shouldn't enter this case */ write_nic_dword(dev, SW_3W_DB0, *((u32 *)pDataBuf)); write_nic_dword(dev, SW_3W_DB1, *((u32 *)(pDataBuf + 4))); } else { int idx; int ByteCnt = nDataBufBitCnt / 8; /* printk("%d\n",nDataBufBitCnt); */ if ((nDataBufBitCnt % 8) != 0) { printk(KERN_ERR "rtl8187se: " "HwThreeWire(): nDataBufBitCnt(%d)" " should be multiple of 8!!!\n", nDataBufBitCnt); dump_stack(); nDataBufBitCnt += 8; nDataBufBitCnt &= ~7; } if (nDataBufBitCnt > 64) { printk(KERN_ERR "rtl8187se: HwThreeWire():" " nDataBufBitCnt(%d) should <= 64!!!\n", nDataBufBitCnt); dump_stack(); nDataBufBitCnt = 64; } for (idx = 0; idx < ByteCnt; idx++) write_nic_byte(dev, (SW_3W_DB0+idx), *(pDataBuf+idx)); } } else { /* read */ if (bSI) { /* SI - reg274[3:0] : RF register's Address */ write_nic_word(dev, SW_3W_DB0, *((u16 *)pDataBuf)); } else { /* PI - reg274[15:12] : RF register's Address */ write_nic_word(dev, SW_3W_DB0, (*((u16 *)pDataBuf)) << 12); } } /* Set up command: WE or RE. */ if (bWrite) write_nic_byte(dev, SW_3W_CMD1, SW_3W_CMD1_WE); else write_nic_byte(dev, SW_3W_CMD1, SW_3W_CMD1_RE); /* Check if DONE is set. */ for (TryCnt = 0; TryCnt < TC_3W_POLL_MAX_TRY_CNT; TryCnt++) { u1bTmp = read_nic_byte(dev, SW_3W_CMD1); if ((u1bTmp & SW_3W_CMD1_DONE) != 0) break; udelay(10); } write_nic_byte(dev, SW_3W_CMD1, 0); /* Read back data for read operation. */ if (bWrite == 0) { if (bSI) { /* Serial Interface : reg363_362[11:0] */ *((u16 *)pDataBuf) = read_nic_word(dev, SI_DATA_READ) ; } else { /* Parallel Interface : reg361_360[11:0] */ *((u16 *)pDataBuf) = read_nic_word(dev, PI_DATA_READ); } *((u16 *)pDataBuf) &= 0x0FFF; } } while (0); return bResult; } void RF_WriteReg(struct net_device *dev, u8 offset, u32 data) { u32 data2Write; u8 len; /* Pure HW 3-wire. */ data2Write = (data << 4) | (u32)(offset & 0x0f); len = 16; HwHSSIThreeWire(dev, (u8 *)(&data2Write), len, 1, 1); } u32 RF_ReadReg(struct net_device *dev, u8 offset) { u32 data2Write; u8 wlen; u32 dataRead; data2Write = ((u32)(offset & 0x0f)); wlen = 16; HwHSSIThreeWire(dev, (u8 *)(&data2Write), wlen, 1, 0); dataRead = data2Write; return dataRead; } /* by Owen on 04/07/14 for writing BB register successfully */ void WriteBBPortUchar( struct net_device *dev, u32 Data ) { /* u8 TimeoutCounter; */ u8 RegisterContent; u8 UCharData; UCharData = (u8)((Data & 0x0000ff00) >> 8); PlatformIOWrite4Byte(dev, PhyAddr, Data); /* for(TimeoutCounter = 10; TimeoutCounter > 0; TimeoutCounter--) */ { PlatformIOWrite4Byte(dev, PhyAddr, Data & 0xffffff7f); RegisterContent = PlatformIORead1Byte(dev, PhyDataR); /*if(UCharData == RegisterContent) */ /* break; */ } } u8 ReadBBPortUchar( struct net_device *dev, u32 addr ) { /*u8 TimeoutCounter; */ u8 RegisterContent; PlatformIOWrite4Byte(dev, PhyAddr, addr & 0xffffff7f); RegisterContent = PlatformIORead1Byte(dev, PhyDataR); return RegisterContent; } /* {by amy 080312 */ /* Description: Perform Antenna settings with antenna diversity on 87SE. Created by Roger, 2008.01.25. */ bool SetAntennaConfig87SE( struct net_device *dev, u8 DefaultAnt, /* 0: Main, 1: Aux. */ bool bAntDiversity /* 1:Enable, 0: Disable. */ ) { struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); bool bAntennaSwitched = true; /* printk("SetAntennaConfig87SE(): DefaultAnt(%d), bAntDiversity(%d)\n", DefaultAnt, bAntDiversity); */ /* Threshold for antenna diversity. */ write_phy_cck(dev, 0x0c, 0x09); /* Reg0c : 09 */ if (bAntDiversity) { /* Enable Antenna Diversity. */ if (DefaultAnt == 1) { /* aux antenna */ /* Mac register, aux antenna */ write_nic_byte(dev, ANTSEL, 0x00); /* Config CCK RX antenna. */ write_phy_cck(dev, 0x11, 0xbb); /* Reg11 : bb */ write_phy_cck(dev, 0x01, 0xc7); /* Reg01 : c7 */ /* Config OFDM RX antenna. */ write_phy_ofdm(dev, 0x0D, 0x54); /* Reg0d : 54 */ write_phy_ofdm(dev, 0x18, 0xb2); /* Reg18 : b2 */ } else { /* use main antenna */ /* Mac register, main antenna */ write_nic_byte(dev, ANTSEL, 0x03); /* base band */ /* Config CCK RX antenna. */ write_phy_cck(dev, 0x11, 0x9b); /* Reg11 : 9b */ write_phy_cck(dev, 0x01, 0xc7); /* Reg01 : c7 */ /* Config OFDM RX antenna. */ write_phy_ofdm(dev, 0x0d, 0x5c); /* Reg0d : 5c */ write_phy_ofdm(dev, 0x18, 0xb2); /* Reg18 : b2 */ } } else { /* Disable Antenna Diversity. */ if (DefaultAnt == 1) { /* aux Antenna */ /* Mac register, aux antenna */ write_nic_byte(dev, ANTSEL, 0x00); /* Config CCK RX antenna. */ write_phy_cck(dev, 0x11, 0xbb); /* Reg11 : bb */ write_phy_cck(dev, 0x01, 0x47); /* Reg01 : 47 */ /* Config OFDM RX antenna. */ write_phy_ofdm(dev, 0x0D, 0x54); /* Reg0d : 54 */ write_phy_ofdm(dev, 0x18, 0x32); /* Reg18 : 32 */ } else { /* main Antenna */ /* Mac register, main antenna */ write_nic_byte(dev, ANTSEL, 0x03); /* Config CCK RX antenna. */ write_phy_cck(dev, 0x11, 0x9b); /* Reg11 : 9b */ write_phy_cck(dev, 0x01, 0x47); /* Reg01 : 47 */ /* Config OFDM RX antenna. */ write_phy_ofdm(dev, 0x0D, 0x5c); /* Reg0d : 5c */ write_phy_ofdm(dev, 0x18, 0x32); /*Reg18 : 32 */ } } priv->CurrAntennaIndex = DefaultAnt; /* Update default settings. */ return bAntennaSwitched; } /* by amy 080312 */ /* --------------------------------------------------------------- * Hardware Initialization. * the code is ported from Windows source code ----------------------------------------------------------------*/ void ZEBRA_Config_85BASIC_HardCode( struct net_device *dev ) { struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); u32 i; u32 addr, data; u32 u4bRegOffset, u4bRegValue, u4bRF23, u4bRF24; u8 u1b24E; int d_cut = 0; /* ============================================================================= 87S_PCIE :: RADIOCFG.TXT ============================================================================= */ /* Page1 : reg16-reg30 */ RF_WriteReg(dev, 0x00, 0x013f); mdelay(1); /* switch to page1 */ u4bRF23 = RF_ReadReg(dev, 0x08); mdelay(1); u4bRF24 = RF_ReadReg(dev, 0x09); mdelay(1); if (u4bRF23 == 0x818 && u4bRF24 == 0x70C) { d_cut = 1; printk(KERN_INFO "rtl8187se: card type changed from C- to D-cut\n"); } /* Page0 : reg0-reg15 */ RF_WriteReg(dev, 0x00, 0x009f); mdelay(1);/* 1 */ RF_WriteReg(dev, 0x01, 0x06e0); mdelay(1); RF_WriteReg(dev, 0x02, 0x004d); mdelay(1);/* 2 */ RF_WriteReg(dev, 0x03, 0x07f1); mdelay(1);/* 3 */ RF_WriteReg(dev, 0x04, 0x0975); mdelay(1); RF_WriteReg(dev, 0x05, 0x0c72); mdelay(1); RF_WriteReg(dev, 0x06, 0x0ae6); mdelay(1); RF_WriteReg(dev, 0x07, 0x00ca); mdelay(1); RF_WriteReg(dev, 0x08, 0x0e1c); mdelay(1); RF_WriteReg(dev, 0x09, 0x02f0); mdelay(1); RF_WriteReg(dev, 0x0a, 0x09d0); mdelay(1); RF_WriteReg(dev, 0x0b, 0x01ba); mdelay(1); RF_WriteReg(dev, 0x0c, 0x0640); mdelay(1); RF_WriteReg(dev, 0x0d, 0x08df); mdelay(1); RF_WriteReg(dev, 0x0e, 0x0020); mdelay(1); RF_WriteReg(dev, 0x0f, 0x0990); mdelay(1); /* Page1 : reg16-reg30 */ RF_WriteReg(dev, 0x00, 0x013f); mdelay(1); RF_WriteReg(dev, 0x03, 0x0806); mdelay(1); RF_WriteReg(dev, 0x04, 0x03a7); mdelay(1); RF_WriteReg(dev, 0x05, 0x059b); mdelay(1); RF_WriteReg(dev, 0x06, 0x0081); mdelay(1); RF_WriteReg(dev, 0x07, 0x01A0); mdelay(1); /* Don't write RF23/RF24 to make a difference between 87S C cut and D cut. asked by SD3 stevenl. */ RF_WriteReg(dev, 0x0a, 0x0001); mdelay(1); RF_WriteReg(dev, 0x0b, 0x0418); mdelay(1); if (d_cut) { RF_WriteReg(dev, 0x0c, 0x0fbe); mdelay(1); RF_WriteReg(dev, 0x0d, 0x0008); mdelay(1); RF_WriteReg(dev, 0x0e, 0x0807); mdelay(1); /* RX LO buffer */ } else { RF_WriteReg(dev, 0x0c, 0x0fbe); mdelay(1); RF_WriteReg(dev, 0x0d, 0x0008); mdelay(1); RF_WriteReg(dev, 0x0e, 0x0806); mdelay(1); /* RX LO buffer */ } RF_WriteReg(dev, 0x0f, 0x0acc); mdelay(1); RF_WriteReg(dev, 0x00, 0x01d7); mdelay(1); /* 6 */ RF_WriteReg(dev, 0x03, 0x0e00); mdelay(1); RF_WriteReg(dev, 0x04, 0x0e50); mdelay(1); for (i = 0; i <= 36; i++) { RF_WriteReg(dev, 0x01, i); mdelay(1); RF_WriteReg(dev, 0x02, ZEBRA_RF_RX_GAIN_TABLE[i]); mdelay(1); } RF_WriteReg(dev, 0x05, 0x0203); mdelay(1); /* 203, 343 */ RF_WriteReg(dev, 0x06, 0x0200); mdelay(1); /* 400 */ RF_WriteReg(dev, 0x00, 0x0137); mdelay(1); /* switch to reg16-reg30, and HSSI disable 137 */ mdelay(10); /* Deay 10 ms. */ /* 0xfd */ RF_WriteReg(dev, 0x0d, 0x0008); mdelay(1); /* Z4 synthesizer loop filter setting, 392 */ mdelay(10); /* Deay 10 ms. */ /* 0xfd */ RF_WriteReg(dev, 0x00, 0x0037); mdelay(1); /* switch to reg0-reg15, and HSSI disable */ mdelay(10); /* Deay 10 ms. */ /* 0xfd */ RF_WriteReg(dev, 0x04, 0x0160); mdelay(1); /* CBC on, Tx Rx disable, High gain */ mdelay(10); /* Deay 10 ms. */ /* 0xfd */ RF_WriteReg(dev, 0x07, 0x0080); mdelay(1); /* Z4 setted channel 1 */ mdelay(10); /* Deay 10 ms. */ /* 0xfd */ RF_WriteReg(dev, 0x02, 0x088D); mdelay(1); /* LC calibration */ mdelay(200); /* Deay 200 ms. */ /* 0xfd */ mdelay(10); /* Deay 10 ms. */ /* 0xfd */ mdelay(10); /* Deay 10 ms. */ /* 0xfd */ RF_WriteReg(dev, 0x00, 0x0137); mdelay(1); /* switch to reg16-reg30 137, and HSSI disable 137 */ mdelay(10); /* Deay 10 ms. */ /* 0xfd */ RF_WriteReg(dev, 0x07, 0x0000); mdelay(1); RF_WriteReg(dev, 0x07, 0x0180); mdelay(1); RF_WriteReg(dev, 0x07, 0x0220); mdelay(1); RF_WriteReg(dev, 0x07, 0x03E0); mdelay(1); /* DAC calibration off 20070702 */ RF_WriteReg(dev, 0x06, 0x00c1); mdelay(1); RF_WriteReg(dev, 0x0a, 0x0001); mdelay(1); /* {by amy 080312 */ /* For crystal calibration, added by Roger, 2007.12.11. */ if (priv->bXtalCalibration) { /* reg 30. */ /* enable crystal calibration. RF Reg[30], (1)Xin:[12:9], Xout:[8:5], addr[4:0]. (2)PA Pwr delay timer[15:14], default: 2.4us, set BIT15=0 (3)RF signal on/off when calibration[13], default: on, set BIT13=0. So we should minus 4 BITs offset. */ RF_WriteReg(dev, 0x0f, (priv->XtalCal_Xin<<5) | (priv->XtalCal_Xout<<1) | BIT11 | BIT9); mdelay(1); printk("ZEBRA_Config_85BASIC_HardCode(): (%02x)\n", (priv->XtalCal_Xin<<5) | (priv->XtalCal_Xout<<1) | BIT11 | BIT9); } else { /* using default value. Xin=6, Xout=6. */ RF_WriteReg(dev, 0x0f, 0x0acc); mdelay(1); } /* by amy 080312 */ RF_WriteReg(dev, 0x00, 0x00bf); mdelay(1); /* switch to reg0-reg15, and HSSI enable */ RF_WriteReg(dev, 0x0d, 0x08df); mdelay(1); /* Rx BB start calibration, 00c//+edward */ RF_WriteReg(dev, 0x02, 0x004d); mdelay(1); /* temperature meter off */ RF_WriteReg(dev, 0x04, 0x0975); mdelay(1); /* Rx mode */ mdelay(10); /* Deay 10 ms.*/ /* 0xfe */ mdelay(10); /* Deay 10 ms.*/ /* 0xfe */ mdelay(10); /* Deay 10 ms.*/ /* 0xfe */ RF_WriteReg(dev, 0x00, 0x0197); mdelay(1); /* Rx mode*/ /*+edward */ RF_WriteReg(dev, 0x05, 0x05ab); mdelay(1); /* Rx mode*/ /*+edward */ RF_WriteReg(dev, 0x00, 0x009f); mdelay(1); /* Rx mode*/ /*+edward */ RF_WriteReg(dev, 0x01, 0x0000); mdelay(1); /* Rx mode*/ /*+edward */ RF_WriteReg(dev, 0x02, 0x0000); mdelay(1); /* Rx mode*/ /*+edward */ /* power save parameters. */ u1b24E = read_nic_byte(dev, 0x24E); write_nic_byte(dev, 0x24E, (u1b24E & (~(BIT5|BIT6)))); /*============================================================================= ============================================================================= CCKCONF.TXT ============================================================================= */ /* [POWER SAVE] Power Saving Parameters by jong. 2007-11-27 CCK reg0x00[7]=1'b1 :power saving for TX (default) CCK reg0x00[6]=1'b1: power saving for RX (default) CCK reg0x06[4]=1'b1: turn off channel estimation related circuits if not doing channel estimation. CCK reg0x06[3]=1'b1: turn off unused circuits before cca = 1 CCK reg0x06[2]=1'b1: turn off cck's circuit if macrst =0 */ write_phy_cck(dev, 0x00, 0xc8); write_phy_cck(dev, 0x06, 0x1c); write_phy_cck(dev, 0x10, 0x78); write_phy_cck(dev, 0x2e, 0xd0); write_phy_cck(dev, 0x2f, 0x06); write_phy_cck(dev, 0x01, 0x46); /* power control */ write_nic_byte(dev, CCK_TXAGC, 0x10); write_nic_byte(dev, OFDM_TXAGC, 0x1B); write_nic_byte(dev, ANTSEL, 0x03); /* ============================================================================= AGC.txt ============================================================================= */ write_phy_ofdm(dev, 0x00, 0x12); for (i = 0; i < 128; i++) { data = ZEBRA_AGC[i+1]; data = data << 8; data = data | 0x0000008F; addr = i + 0x80; /* enable writing AGC table */ addr = addr << 8; addr = addr | 0x0000008E; WriteBBPortUchar(dev, data); WriteBBPortUchar(dev, addr); WriteBBPortUchar(dev, 0x0000008E); } PlatformIOWrite4Byte(dev, PhyAddr, 0x00001080); /* Annie, 2006-05-05 */ /* ============================================================================= ============================================================================= OFDMCONF.TXT ============================================================================= */ for (i = 0; i < 60; i++) { u4bRegOffset = i; u4bRegValue = OFDM_CONFIG[i]; WriteBBPortUchar(dev, (0x00000080 | (u4bRegOffset & 0x7f) | ((u4bRegValue & 0xff) << 8))); } /* ============================================================================= by amy for antenna ============================================================================= */ /* {by amy 080312 */ /* Config Sw/Hw Combinational Antenna Diversity. Added by Roger, 2008.02.26. */ SetAntennaConfig87SE(dev, priv->bDefaultAntenna1, priv->bSwAntennaDiverity); /* by amy 080312} */ /* by amy for antenna */ } void UpdateInitialGain( struct net_device *dev ) { struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); /* lzm add 080826 */ if (priv->eRFPowerState != eRfOn) { /* Don't access BB/RF under disable PLL situation. RT_TRACE(COMP_DIG, DBG_LOUD, ("UpdateInitialGain - pHalData->eRFPowerState!=eRfOn\n")); Back to the original state */ priv->InitialGain = priv->InitialGainBackUp; return; } switch (priv->InitialGain) { case 1: /* m861dBm */ write_phy_ofdm(dev, 0x17, 0x26); mdelay(1); write_phy_ofdm(dev, 0x24, 0x86); mdelay(1); write_phy_ofdm(dev, 0x05, 0xfa); mdelay(1); break; case 2: /* m862dBm */ write_phy_ofdm(dev, 0x17, 0x36); mdelay(1); write_phy_ofdm(dev, 0x24, 0x86); mdelay(1); write_phy_ofdm(dev, 0x05, 0xfa); mdelay(1); break; case 3: /* m863dBm */ write_phy_ofdm(dev, 0x17, 0x36); mdelay(1); write_phy_ofdm(dev, 0x24, 0x86); mdelay(1); write_phy_ofdm(dev, 0x05, 0xfb); mdelay(1); break; case 4: /* m864dBm */ write_phy_ofdm(dev, 0x17, 0x46); mdelay(1); write_phy_ofdm(dev, 0x24, 0x86); mdelay(1); write_phy_ofdm(dev, 0x05, 0xfb); mdelay(1); break; case 5: /* m82dBm */ write_phy_ofdm(dev, 0x17, 0x46); mdelay(1); write_phy_ofdm(dev, 0x24, 0x96); mdelay(1); write_phy_ofdm(dev, 0x05, 0xfb); mdelay(1); break; case 6: /* m78dBm */ write_phy_ofdm(dev, 0x17, 0x56); mdelay(1); write_phy_ofdm(dev, 0x24, 0x96); mdelay(1); write_phy_ofdm(dev, 0x05, 0xfc); mdelay(1); break; case 7: /* m74dBm */ write_phy_ofdm(dev, 0x17, 0x56); mdelay(1); write_phy_ofdm(dev, 0x24, 0xa6); mdelay(1); write_phy_ofdm(dev, 0x05, 0xfc); mdelay(1); break; case 8: write_phy_ofdm(dev, 0x17, 0x66); mdelay(1); write_phy_ofdm(dev, 0x24, 0xb6); mdelay(1); write_phy_ofdm(dev, 0x05, 0xfc); mdelay(1); break; default: /* MP */ write_phy_ofdm(dev, 0x17, 0x26); mdelay(1); write_phy_ofdm(dev, 0x24, 0x86); mdelay(1); write_phy_ofdm(dev, 0x05, 0xfa); mdelay(1); break; } } /* Description: Tx Power tracking mechanism routine on 87SE. Created by Roger, 2007.12.11. */ void InitTxPwrTracking87SE( struct net_device *dev ) { u32 u4bRfReg; u4bRfReg = RF_ReadReg(dev, 0x02); /* Enable Thermal meter indication. */ RF_WriteReg(dev, 0x02, u4bRfReg|PWR_METER_EN); mdelay(1); } void PhyConfig8185( struct net_device *dev ) { struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); write_nic_dword(dev, RCR, priv->ReceiveConfig); priv->RFProgType = read_nic_byte(dev, CONFIG4) & 0x03; /* RF config */ ZEBRA_Config_85BASIC_HardCode(dev); /* {by amy 080312 */ /* Set default initial gain state to 4, approved by SD3 DZ, by Bruce, 2007-06-06. */ if (priv->bDigMechanism) { if (priv->InitialGain == 0) priv->InitialGain = 4; } /* Enable thermal meter indication to implement TxPower tracking on 87SE. We initialize thermal meter here to avoid unsuccessful configuration. Added by Roger, 2007.12.11. */ if (priv->bTxPowerTrack) InitTxPwrTracking87SE(dev); /* by amy 080312} */ priv->InitialGainBackUp = priv->InitialGain; UpdateInitialGain(dev); return; } void HwConfigureRTL8185( struct net_device *dev ) { /* RTL8185_TODO: Determine Retrylimit, TxAGC, AutoRateFallback control. */ u8 bUNIVERSAL_CONTROL_RL = 0; u8 bUNIVERSAL_CONTROL_AGC = 1; u8 bUNIVERSAL_CONTROL_ANT = 1; u8 bAUTO_RATE_FALLBACK_CTL = 1; u8 val8; write_nic_word(dev, BRSR, 0x0fff); /* Retry limit */ val8 = read_nic_byte(dev, CW_CONF); if (bUNIVERSAL_CONTROL_RL) val8 = val8 & 0xfd; else val8 = val8 | 0x02; write_nic_byte(dev, CW_CONF, val8); /* Tx AGC */ val8 = read_nic_byte(dev, TXAGC_CTL); if (bUNIVERSAL_CONTROL_AGC) { write_nic_byte(dev, CCK_TXAGC, 128); write_nic_byte(dev, OFDM_TXAGC, 128); val8 = val8 & 0xfe; } else { val8 = val8 | 0x01 ; } write_nic_byte(dev, TXAGC_CTL, val8); /* Tx Antenna including Feedback control */ val8 = read_nic_byte(dev, TXAGC_CTL); if (bUNIVERSAL_CONTROL_ANT) { write_nic_byte(dev, ANTSEL, 0x00); val8 = val8 & 0xfd; } else { val8 = val8 & (val8|0x02); /* xiong-2006-11-15 */ } write_nic_byte(dev, TXAGC_CTL, val8); /* Auto Rate fallback control */ val8 = read_nic_byte(dev, RATE_FALLBACK); val8 &= 0x7c; if (bAUTO_RATE_FALLBACK_CTL) { val8 |= RATE_FALLBACK_CTL_ENABLE | RATE_FALLBACK_CTL_AUTO_STEP1; /* <RJ_TODO_8185B> We shall set up the ARFR according to user's setting. */ PlatformIOWrite2Byte(dev, ARFR, 0x0fff); /* set 1M ~ 54Mbps. */ } write_nic_byte(dev, RATE_FALLBACK, val8); } static void MacConfig_85BASIC_HardCode( struct net_device *dev) { /* ============================================================================ MACREG.TXT ============================================================================ */ int nLinesRead = 0; u32 u4bRegOffset, u4bRegValue, u4bPageIndex = 0; int i; nLinesRead = sizeof(MAC_REG_TABLE)/2; for (i = 0; i < nLinesRead; i++) { /* nLinesRead=101 */ u4bRegOffset = MAC_REG_TABLE[i][0]; u4bRegValue = MAC_REG_TABLE[i][1]; if (u4bRegOffset == 0x5e) u4bPageIndex = u4bRegValue; else u4bRegOffset |= (u4bPageIndex << 8); write_nic_byte(dev, u4bRegOffset, (u8)u4bRegValue); } /* ============================================================================ */ } static void MacConfig_85BASIC( struct net_device *dev) { u8 u1DA; MacConfig_85BASIC_HardCode(dev); /* ============================================================================ */ /* Follow TID_AC_MAP of WMac. */ write_nic_word(dev, TID_AC_MAP, 0xfa50); /* Interrupt Migration, Jong suggested we use set 0x0000 first, 2005.12.14, by rcnjko. */ write_nic_word(dev, IntMig, 0x0000); /* Prevent TPC to cause CRC error. Added by Annie, 2006-06-10. */ PlatformIOWrite4Byte(dev, 0x1F0, 0x00000000); PlatformIOWrite4Byte(dev, 0x1F4, 0x00000000); PlatformIOWrite1Byte(dev, 0x1F8, 0x00); /* Asked for by SD3 CM Lin, 2006.06.27, by rcnjko. */ /* power save parameter based on "87SE power save parameters 20071127.doc", as follow. */ /* Enable DA10 TX power saving */ u1DA = read_nic_byte(dev, PHYPR); write_nic_byte(dev, PHYPR, (u1DA | BIT2)); /* POWER: */ write_nic_word(dev, 0x360, 0x1000); write_nic_word(dev, 0x362, 0x1000); /* AFE. */ write_nic_word(dev, 0x370, 0x0560); write_nic_word(dev, 0x372, 0x0560); write_nic_word(dev, 0x374, 0x0DA4); write_nic_word(dev, 0x376, 0x0DA4); write_nic_word(dev, 0x378, 0x0560); write_nic_word(dev, 0x37A, 0x0560); write_nic_word(dev, 0x37C, 0x00EC); write_nic_word(dev, 0x37E, 0x00EC); /*+edward */ write_nic_byte(dev, 0x24E, 0x01); } u8 GetSupportedWirelessMode8185( struct net_device *dev ) { u8 btSupportedWirelessMode = 0; btSupportedWirelessMode = (WIRELESS_MODE_B | WIRELESS_MODE_G); return btSupportedWirelessMode; } void ActUpdateChannelAccessSetting( struct net_device *dev, WIRELESS_MODE WirelessMode, PCHANNEL_ACCESS_SETTING ChnlAccessSetting ) { struct r8180_priv *priv = ieee80211_priv(dev); struct ieee80211_device *ieee = priv->ieee80211; AC_CODING eACI; AC_PARAM AcParam; u8 bFollowLegacySetting = 0; u8 u1bAIFS; /* <RJ_TODO_8185B> TODO: We still don't know how to set up these registers, just follow WMAC to verify 8185B FPAG. <RJ_TODO_8185B> Jong said CWmin/CWmax register are not functional in 8185B, so we shall fill channel access realted register into AC parameter registers, even in nQBss. */ ChnlAccessSetting->SIFS_Timer = 0x22; /* Suggested by Jong, 2005.12.08. */ ChnlAccessSetting->DIFS_Timer = 0x1C; /* 2006.06.02, by rcnjko. */ ChnlAccessSetting->SlotTimeTimer = 9; /* 2006.06.02, by rcnjko. */ ChnlAccessSetting->EIFS_Timer = 0x5B; /* Suggested by wcchu, it is the default value of EIFS register, 2005.12.08. */ ChnlAccessSetting->CWminIndex = 3; /* 2006.06.02, by rcnjko. */ ChnlAccessSetting->CWmaxIndex = 7; /* 2006.06.02, by rcnjko. */ write_nic_byte(dev, SIFS, ChnlAccessSetting->SIFS_Timer); write_nic_byte(dev, SLOT, ChnlAccessSetting->SlotTimeTimer); /* Rewrited from directly use PlatformEFIOWrite1Byte(), by Annie, 2006-03-29. */ u1bAIFS = aSifsTime + (2 * ChnlAccessSetting->SlotTimeTimer); write_nic_byte(dev, EIFS, ChnlAccessSetting->EIFS_Timer); write_nic_byte(dev, AckTimeOutReg, 0x5B); /* <RJ_EXPR_QOS> Suggested by wcchu, it is the default value of EIFS register, 2005.12.08. */ { /* Legacy 802.11. */ bFollowLegacySetting = 1; } /* this setting is copied from rtl8187B. xiong-2006-11-13 */ if (bFollowLegacySetting) { /* Follow 802.11 seeting to AC parameter, all AC shall use the same parameter. 2005.12.01, by rcnjko. */ AcParam.longData = 0; AcParam.f.AciAifsn.f.AIFSN = 2; /* Follow 802.11 DIFS. */ AcParam.f.AciAifsn.f.ACM = 0; AcParam.f.Ecw.f.ECWmin = ChnlAccessSetting->CWminIndex; /* Follow 802.11 CWmin. */ AcParam.f.Ecw.f.ECWmax = ChnlAccessSetting->CWmaxIndex; /* Follow 802.11 CWmax. */ AcParam.f.TXOPLimit = 0; /* lzm reserved 080826 */ /* For turbo mode setting. port from 87B by Isaiah 2008-08-01 */ if (ieee->current_network.Turbo_Enable == 1) AcParam.f.TXOPLimit = 0x01FF; /* For 87SE with Intel 4965 Ad-Hoc mode have poor throughput (19MB) */ if (ieee->iw_mode == IW_MODE_ADHOC) AcParam.f.TXOPLimit = 0x0020; for (eACI = 0; eACI < AC_MAX; eACI++) { AcParam.f.AciAifsn.f.ACI = (u8)eACI; { PAC_PARAM pAcParam = (PAC_PARAM)(&AcParam); AC_CODING eACI; u8 u1bAIFS; u32 u4bAcParam; /* Retrive paramters to udpate. */ eACI = pAcParam->f.AciAifsn.f.ACI; u1bAIFS = pAcParam->f.AciAifsn.f.AIFSN * ChnlAccessSetting->SlotTimeTimer + aSifsTime; u4bAcParam = ((((u32)(pAcParam->f.TXOPLimit)) << AC_PARAM_TXOP_LIMIT_OFFSET) | (((u32)(pAcParam->f.Ecw.f.ECWmax)) << AC_PARAM_ECW_MAX_OFFSET) | (((u32)(pAcParam->f.Ecw.f.ECWmin)) << AC_PARAM_ECW_MIN_OFFSET) | (((u32)u1bAIFS) << AC_PARAM_AIFS_OFFSET)); switch (eACI) { case AC1_BK: /* write_nic_dword(dev, AC_BK_PARAM, u4bAcParam); */ break; case AC0_BE: /* write_nic_dword(dev, AC_BK_PARAM, u4bAcParam); */ break; case AC2_VI: /* write_nic_dword(dev, AC_BK_PARAM, u4bAcParam); */ break; case AC3_VO: /* write_nic_dword(dev, AC_BK_PARAM, u4bAcParam); */ break; default: DMESGW("SetHwReg8185(): invalid ACI: %d !\n", eACI); break; } /* Cehck ACM bit. */ /* If it is set, immediately set ACM control bit to downgrading AC for passing WMM testplan. Annie, 2005-12-13. */ { PACI_AIFSN pAciAifsn = (PACI_AIFSN)(&pAcParam->f.AciAifsn); AC_CODING eACI = pAciAifsn->f.ACI; /*modified Joseph */ /*for 8187B AsynIORead issue */ u8 AcmCtrl = 0; if (pAciAifsn->f.ACM) { /* ACM bit is 1. */ switch (eACI) { case AC0_BE: AcmCtrl |= (BEQ_ACM_EN|BEQ_ACM_CTL|ACM_HW_EN); /* or 0x21 */ break; case AC2_VI: AcmCtrl |= (VIQ_ACM_EN|VIQ_ACM_CTL|ACM_HW_EN); /* or 0x42 */ break; case AC3_VO: AcmCtrl |= (VOQ_ACM_EN|VOQ_ACM_CTL|ACM_HW_EN); /* or 0x84 */ break; default: DMESGW("SetHwReg8185(): [HW_VAR_ACM_CTRL] ACM set failed: eACI is %d\n", eACI); break; } } else { /* ACM bit is 0. */ switch (eACI) { case AC0_BE: AcmCtrl &= ((~BEQ_ACM_EN) & (~BEQ_ACM_CTL) & (~ACM_HW_EN)); /* and 0xDE */ break; case AC2_VI: AcmCtrl &= ((~VIQ_ACM_EN) & (~VIQ_ACM_CTL) & (~ACM_HW_EN)); /* and 0xBD */ break; case AC3_VO: AcmCtrl &= ((~VOQ_ACM_EN) & (~VOQ_ACM_CTL) & (~ACM_HW_EN)); /* and 0x7B */ break; default: break; } } write_nic_byte(dev, ACM_CONTROL, 0); } } } } } void ActSetWirelessMode8185( struct net_device *dev, u8 btWirelessMode ) { struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); struct ieee80211_device *ieee = priv->ieee80211; u8 btSupportedWirelessMode = GetSupportedWirelessMode8185(dev); if ((btWirelessMode & btSupportedWirelessMode) == 0) { /* Don't switch to unsupported wireless mode, 2006.02.15, by rcnjko. */ DMESGW("ActSetWirelessMode8185(): WirelessMode(%d) is not supported (%d)!\n", btWirelessMode, btSupportedWirelessMode); return; } /* 1. Assign wireless mode to swtich if necessary. */ if (btWirelessMode == WIRELESS_MODE_AUTO) { if ((btSupportedWirelessMode & WIRELESS_MODE_A)) { btWirelessMode = WIRELESS_MODE_A; } else if (btSupportedWirelessMode & WIRELESS_MODE_G) { btWirelessMode = WIRELESS_MODE_G; } else if ((btSupportedWirelessMode & WIRELESS_MODE_B)) { btWirelessMode = WIRELESS_MODE_B; } else { DMESGW("ActSetWirelessMode8185(): No valid wireless mode supported, btSupportedWirelessMode(%x)!!!\n", btSupportedWirelessMode); btWirelessMode = WIRELESS_MODE_B; } } /* 2. Swtich band: RF or BB specific actions, * for example, refresh tables in omc8255, or change initial gain if necessary. * Nothing to do for Zebra to switch band. * Update current wireless mode if we swtich to specified band successfully. */ ieee->mode = (WIRELESS_MODE)btWirelessMode; /* 3. Change related setting. */ if( ieee->mode == WIRELESS_MODE_A ) { DMESG("WIRELESS_MODE_A\n"); } else if( ieee->mode == WIRELESS_MODE_B ) { DMESG("WIRELESS_MODE_B\n"); } else if( ieee->mode == WIRELESS_MODE_G ) { DMESG("WIRELESS_MODE_G\n"); } ActUpdateChannelAccessSetting( dev, ieee->mode, &priv->ChannelAccessSetting); } void rtl8185b_irq_enable(struct net_device *dev) { struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); priv->irq_enabled = 1; write_nic_dword(dev, IMR, priv->IntrMask); } /* by amy for power save */ void DrvIFIndicateDisassociation( struct net_device *dev, u16 reason ) { /* nothing is needed after disassociation request. */ } void MgntDisconnectIBSS( struct net_device *dev ) { struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); u8 i; DrvIFIndicateDisassociation(dev, unspec_reason); for (i = 0; i < 6 ; i++) priv->ieee80211->current_network.bssid[i] = 0x55; priv->ieee80211->state = IEEE80211_NOLINK; /* Stop Beacon. Vista add a Adhoc profile, HW radio off until OID_DOT11_RESET_REQUEST Driver would set MSR=NO_LINK, then HW Radio ON, MgntQueue Stuck. Because Bcn DMA isn't complete, mgnt queue would stuck until Bcn packet send. Disable Beacon Queue Own bit, suggested by jong */ ieee80211_stop_send_beacons(priv->ieee80211); priv->ieee80211->link_change(dev); notify_wx_assoc_event(priv->ieee80211); } void MlmeDisassociateRequest( struct net_device *dev, u8 *asSta, u8 asRsn ) { struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); u8 i; SendDisassociation(priv->ieee80211, asSta, asRsn); if (memcmp(priv->ieee80211->current_network.bssid, asSta, 6) == 0) { /*ShuChen TODO: change media status. */ /*ShuChen TODO: What to do when disassociate. */ DrvIFIndicateDisassociation(dev, unspec_reason); for (i = 0; i < 6; i++) priv->ieee80211->current_network.bssid[i] = 0x22; ieee80211_disassociate(priv->ieee80211); } } void MgntDisconnectAP( struct net_device *dev, u8 asRsn ) { struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); /* Commented out by rcnjko, 2005.01.27: I move SecClearAllKeys() to MgntActSet_802_11_DISASSOCIATE(). 2004/09/15, kcwu, the key should be cleared, or the new handshaking will not success In WPA WPA2 need to Clear all key ... because new key will set after new handshaking. 2004.10.11, by rcnjko. */ MlmeDisassociateRequest(dev, priv->ieee80211->current_network.bssid, asRsn); priv->ieee80211->state = IEEE80211_NOLINK; } bool MgntDisconnect( struct net_device *dev, u8 asRsn ) { struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); /* Schedule an workitem to wake up for ps mode, 070109, by rcnjko. */ if (IS_DOT11D_ENABLE(priv->ieee80211)) Dot11d_Reset(priv->ieee80211); /* In adhoc mode, update beacon frame. */ if (priv->ieee80211->state == IEEE80211_LINKED) { if (priv->ieee80211->iw_mode == IW_MODE_ADHOC) MgntDisconnectIBSS(dev); if (priv->ieee80211->iw_mode == IW_MODE_INFRA) { /* We clear key here instead of MgntDisconnectAP() because that MgntActSet_802_11_DISASSOCIATE() is an interface called by OS, e.g. OID_802_11_DISASSOCIATE in Windows while as MgntDisconnectAP() is used to handle disassociation related things to AP, e.g. send Disassoc frame to AP. 2005.01.27, by rcnjko. */ MgntDisconnectAP(dev, asRsn); } /* Inidicate Disconnect, 2005.02.23, by rcnjko. */ } return true; } /* Description: Chang RF Power State. Note that, only MgntActSet_RF_State() is allowed to set HW_VAR_RF_STATE. Assumption: PASSIVE LEVEL. */ bool SetRFPowerState( struct net_device *dev, RT_RF_POWER_STATE eRFPowerState ) { struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); bool bResult = false; if (eRFPowerState == priv->eRFPowerState) return bResult; bResult = SetZebraRFPowerState8185(dev, eRFPowerState); return bResult; } void HalEnableRx8185Dummy( struct net_device *dev ) { } void HalDisableRx8185Dummy( struct net_device *dev ) { } bool MgntActSet_RF_State( struct net_device *dev, RT_RF_POWER_STATE StateToSet, u32 ChangeSource ) { struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); bool bActionAllowed = false; bool bConnectBySSID = false; RT_RF_POWER_STATE rtState; u16 RFWaitCounter = 0; unsigned long flag; /* Prevent the race condition of RF state change. By Bruce, 2007-11-28. Only one thread can change the RF state at one time, and others should wait to be executed. */ while (true) { spin_lock_irqsave(&priv->rf_ps_lock, flag); if (priv->RFChangeInProgress) { spin_unlock_irqrestore(&priv->rf_ps_lock, flag); /* Set RF after the previous action is done. */ while (priv->RFChangeInProgress) { RFWaitCounter++; udelay(1000); /* 1 ms */ /* Wait too long, return FALSE to avoid to be stuck here. */ if (RFWaitCounter > 1000) { /* 1sec */ printk("MgntActSet_RF_State(): Wait too long to set RF\n"); /* TODO: Reset RF state? */ return false; } } } else { priv->RFChangeInProgress = true; spin_unlock_irqrestore(&priv->rf_ps_lock, flag); break; } } rtState = priv->eRFPowerState; switch (StateToSet) { case eRfOn: /* Turn On RF no matter the IPS setting because we need to update the RF state to Ndis under Vista, or the Windows does not allow the driver to perform site survey any more. By Bruce, 2007-10-02. */ priv->RfOffReason &= (~ChangeSource); if (!priv->RfOffReason) { priv->RfOffReason = 0; bActionAllowed = true; if (rtState == eRfOff && ChangeSource >= RF_CHANGE_BY_HW && !priv->bInHctTest) bConnectBySSID = true; } else ; break; case eRfOff: /* 070125, rcnjko: we always keep connected in AP mode. */ if (priv->RfOffReason > RF_CHANGE_BY_IPS) { /* 060808, Annie: Disconnect to current BSS when radio off. Asked by QuanTa. Calling MgntDisconnect() instead of MgntActSet_802_11_DISASSOCIATE(), because we do NOT need to set ssid to dummy ones. */ MgntDisconnect(dev, disas_lv_ss); /* Clear content of bssDesc[] and bssDesc4Query[] to avoid reporting old bss to UI. */ } priv->RfOffReason |= ChangeSource; bActionAllowed = true; break; case eRfSleep: priv->RfOffReason |= ChangeSource; bActionAllowed = true; break; default: break; } if (bActionAllowed) { /* Config HW to the specified mode. */ SetRFPowerState(dev, StateToSet); /* Turn on RF. */ if (StateToSet == eRfOn) { HalEnableRx8185Dummy(dev); if (bConnectBySSID) { /* by amy not supported */ } } /* Turn off RF. */ else if (StateToSet == eRfOff) HalDisableRx8185Dummy(dev); } /* Release RF spinlock */ spin_lock_irqsave(&priv->rf_ps_lock, flag); priv->RFChangeInProgress = false; spin_unlock_irqrestore(&priv->rf_ps_lock, flag); return bActionAllowed; } void InactivePowerSave( struct net_device *dev ) { struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); /* This flag "bSwRfProcessing", indicates the status of IPS procedure, should be set if the IPS workitem is really scheduled. The old code, sets this flag before scheduling the IPS workitem and however, at the same time the previous IPS workitem did not end yet, fails to schedule the current workitem. Thus, bSwRfProcessing blocks the IPS procedure of switching RF. */ priv->bSwRfProcessing = true; MgntActSet_RF_State(dev, priv->eInactivePowerState, RF_CHANGE_BY_IPS); /* To solve CAM values miss in RF OFF, rewrite CAM values after RF ON. By Bruce, 2007-09-20. */ priv->bSwRfProcessing = false; } /* Description: Enter the inactive power save mode. RF will be off */ void IPSEnter( struct net_device *dev ) { struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); RT_RF_POWER_STATE rtState; if (priv->bInactivePs) { rtState = priv->eRFPowerState; /* Do not enter IPS in the following conditions: (1) RF is already OFF or Sleep (2) bSwRfProcessing (indicates the IPS is still under going) (3) Connectted (only disconnected can trigger IPS) (4) IBSS (send Beacon) (5) AP mode (send Beacon) */ if (rtState == eRfOn && !priv->bSwRfProcessing && (priv->ieee80211->state != IEEE80211_LINKED)) { priv->eInactivePowerState = eRfOff; InactivePowerSave(dev); } } } void IPSLeave( struct net_device *dev ) { struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); RT_RF_POWER_STATE rtState; if (priv->bInactivePs) { rtState = priv->eRFPowerState; if ((rtState == eRfOff || rtState == eRfSleep) && (!priv->bSwRfProcessing) && priv->RfOffReason <= RF_CHANGE_BY_IPS) { priv->eInactivePowerState = eRfOn; InactivePowerSave(dev); } } } void rtl8185b_adapter_start(struct net_device *dev) { struct r8180_priv *priv = ieee80211_priv(dev); struct ieee80211_device *ieee = priv->ieee80211; u8 SupportedWirelessMode; u8 InitWirelessMode; u8 bInvalidWirelessMode = 0; u8 tmpu8; u8 btCR9346; u8 TmpU1b; u8 btPSR; write_nic_byte(dev, 0x24e, (BIT5|BIT6|BIT0)); rtl8180_reset(dev); priv->dma_poll_mask = 0; priv->dma_poll_stop_mask = 0; HwConfigureRTL8185(dev); write_nic_dword(dev, MAC0, ((u32 *)dev->dev_addr)[0]); write_nic_word(dev, MAC4, ((u32 *)dev->dev_addr)[1] & 0xffff); write_nic_byte(dev, MSR, read_nic_byte(dev, MSR) & 0xf3); /* default network type to 'No Link' */ write_nic_word(dev, BcnItv, 100); write_nic_word(dev, AtimWnd, 2); PlatformIOWrite2Byte(dev, FEMR, 0xFFFF); write_nic_byte(dev, WPA_CONFIG, 0); MacConfig_85BASIC(dev); /* Override the RFSW_CTRL (MAC offset 0x272-0x273), 2006.06.07, by rcnjko. */ /* BT_DEMO_BOARD type */ PlatformIOWrite2Byte(dev, RFSW_CTRL, 0x569a); /* ----------------------------------------------------------------------------- Set up PHY related. ----------------------------------------------------------------------------- */ /* Enable Config3.PARAM_En to revise AnaaParm. */ write_nic_byte(dev, CR9346, 0xc0); /* enable config register write */ tmpu8 = read_nic_byte(dev, CONFIG3); write_nic_byte(dev, CONFIG3, (tmpu8 | CONFIG3_PARM_En)); /* Turn on Analog power. */ /* Asked for by William, otherwise, MAC 3-wire can't work, 2006.06.27, by rcnjko. */ write_nic_dword(dev, ANAPARAM2, ANAPARM2_ASIC_ON); write_nic_dword(dev, ANAPARAM, ANAPARM_ASIC_ON); write_nic_word(dev, ANAPARAM3, 0x0010); write_nic_byte(dev, CONFIG3, tmpu8); write_nic_byte(dev, CR9346, 0x00); /* enable EEM0 and EEM1 in 9346CR */ btCR9346 = read_nic_byte(dev, CR9346); write_nic_byte(dev, CR9346, (btCR9346 | 0xC0)); /* B cut use LED1 to control HW RF on/off */ TmpU1b = read_nic_byte(dev, CONFIG5); TmpU1b = TmpU1b & ~BIT3; write_nic_byte(dev, CONFIG5, TmpU1b); /* disable EEM0 and EEM1 in 9346CR */ btCR9346 &= ~(0xC0); write_nic_byte(dev, CR9346, btCR9346); /* Enable Led (suggested by Jong) */ /* B-cut RF Radio on/off 5e[3]=0 */ btPSR = read_nic_byte(dev, PSR); write_nic_byte(dev, PSR, (btPSR | BIT3)); /* setup initial timing for RFE. */ write_nic_word(dev, RFPinsOutput, 0x0480); SetOutputEnableOfRfPins(dev); write_nic_word(dev, RFPinsSelect, 0x2488); /* PHY config. */ PhyConfig8185(dev); /* We assume RegWirelessMode has already been initialized before, however, we has to validate the wireless mode here and provide a reasonable initialized value if necessary. 2005.01.13, by rcnjko. */ SupportedWirelessMode = GetSupportedWirelessMode8185(dev); if ((ieee->mode != WIRELESS_MODE_B) && (ieee->mode != WIRELESS_MODE_G) && (ieee->mode != WIRELESS_MODE_A) && (ieee->mode != WIRELESS_MODE_AUTO)) { /* It should be one of B, G, A, or AUTO. */ bInvalidWirelessMode = 1; } else { /* One of B, G, A, or AUTO. */ /* Check if the wireless mode is supported by RF. */ if ((ieee->mode != WIRELESS_MODE_AUTO) && (ieee->mode & SupportedWirelessMode) == 0) { bInvalidWirelessMode = 1; } } if (bInvalidWirelessMode || ieee->mode == WIRELESS_MODE_AUTO) { /* Auto or other invalid value. */ /* Assigne a wireless mode to initialize. */ if ((SupportedWirelessMode & WIRELESS_MODE_A)) { InitWirelessMode = WIRELESS_MODE_A; } else if ((SupportedWirelessMode & WIRELESS_MODE_G)) { InitWirelessMode = WIRELESS_MODE_G; } else if ((SupportedWirelessMode & WIRELESS_MODE_B)) { InitWirelessMode = WIRELESS_MODE_B; } else { DMESGW("InitializeAdapter8185(): No valid wireless mode supported, SupportedWirelessMode(%x)!!!\n", SupportedWirelessMode); InitWirelessMode = WIRELESS_MODE_B; } /* Initialize RegWirelessMode if it is not a valid one. */ if (bInvalidWirelessMode) ieee->mode = (WIRELESS_MODE)InitWirelessMode; } else { /* One of B, G, A. */ InitWirelessMode = ieee->mode; } /* by amy for power save */ priv->eRFPowerState = eRfOff; priv->RfOffReason = 0; { MgntActSet_RF_State(dev, eRfOn, 0); } /* If inactive power mode is enabled, disable rf while in disconnected state. */ if (priv->bInactivePs) MgntActSet_RF_State(dev , eRfOff, RF_CHANGE_BY_IPS); /* by amy for power save */ ActSetWirelessMode8185(dev, (u8)(InitWirelessMode)); /* ----------------------------------------------------------------------------- */ rtl8185b_irq_enable(dev); netif_start_queue(dev); } void rtl8185b_rx_enable(struct net_device *dev) { u8 cmd; /* for now we accept data, management & ctl frame*/ struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); if (dev->flags & IFF_PROMISC) DMESG("NIC in promisc mode"); if (priv->ieee80211->iw_mode == IW_MODE_MONITOR || \ dev->flags & IFF_PROMISC) { priv->ReceiveConfig = priv->ReceiveConfig & (~RCR_APM); priv->ReceiveConfig = priv->ReceiveConfig | RCR_AAP; } if (priv->ieee80211->iw_mode == IW_MODE_MONITOR) priv->ReceiveConfig = priv->ReceiveConfig | RCR_ACF | RCR_APWRMGT | RCR_AICV; if (priv->crcmon == 1 && priv->ieee80211->iw_mode == IW_MODE_MONITOR) priv->ReceiveConfig = priv->ReceiveConfig | RCR_ACRC32; write_nic_dword(dev, RCR, priv->ReceiveConfig); fix_rx_fifo(dev); cmd = read_nic_byte(dev, CMD); write_nic_byte(dev, CMD, cmd | (1<<CMD_RX_ENABLE_SHIFT)); } void rtl8185b_tx_enable(struct net_device *dev) { u8 cmd; u8 byte; struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); write_nic_dword(dev, TCR, priv->TransmitConfig); byte = read_nic_byte(dev, MSR); byte |= MSR_LINK_ENEDCA; write_nic_byte(dev, MSR, byte); fix_tx_fifo(dev); cmd = read_nic_byte(dev, CMD); write_nic_byte(dev, CMD, cmd | (1<<CMD_TX_ENABLE_SHIFT)); }
gpl-2.0
d3trax/asuswrt-merlin
release/src-rt/linux/linux-2.6/drivers/scsi/aic7xxx/aic79xx_core.c
44
278804
/* * Core routines and tables shareable across OS platforms. * * Copyright (c) 1994-2002 Justin T. Gibbs. * Copyright (c) 2000-2003 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#250 $ */ #ifdef __linux__ #include "aic79xx_osm.h" #include "aic79xx_inline.h" #include "aicasm/aicasm_insformat.h" #else #include <dev/aic7xxx/aic79xx_osm.h> #include <dev/aic7xxx/aic79xx_inline.h> #include <dev/aic7xxx/aicasm/aicasm_insformat.h> #endif /***************************** Lookup Tables **********************************/ static char *ahd_chip_names[] = { "NONE", "aic7901", "aic7902", "aic7901A" }; static const u_int num_chip_names = ARRAY_SIZE(ahd_chip_names); /* * Hardware error codes. */ struct ahd_hard_error_entry { uint8_t errno; char *errmesg; }; static struct ahd_hard_error_entry ahd_hard_errors[] = { { DSCTMOUT, "Discard Timer has timed out" }, { ILLOPCODE, "Illegal Opcode in sequencer program" }, { SQPARERR, "Sequencer Parity Error" }, { DPARERR, "Data-path Parity Error" }, { MPARERR, "Scratch or SCB Memory Parity Error" }, { CIOPARERR, "CIOBUS Parity Error" }, }; static const u_int num_errors = ARRAY_SIZE(ahd_hard_errors); static struct ahd_phase_table_entry ahd_phase_table[] = { { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" }, { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" }, { P_COMMAND, MSG_NOOP, "in Command phase" }, { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, { P_BUSFREE, MSG_NOOP, "while idle" }, { 0, MSG_NOOP, "in unknown phase" } }; /* * In most cases we only wish to itterate over real phases, so * exclude the last element from the count. */ static const u_int num_phases = ARRAY_SIZE(ahd_phase_table) - 1; /* Our Sequencer Program */ #include "aic79xx_seq.h" /**************************** Function Declarations ***************************/ static void ahd_handle_transmission_error(struct ahd_softc *ahd); static void ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1); static int ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime); static int ahd_handle_nonpkt_busfree(struct ahd_softc *ahd); static void ahd_handle_proto_violation(struct ahd_softc *ahd); static void ahd_force_renegotiation(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static struct ahd_tmode_tstate* ahd_alloc_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel); #ifdef AHD_TARGET_MODE static void ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force); #endif static void ahd_devlimited_syncrate(struct ahd_softc *ahd, struct ahd_initiator_tinfo *, u_int *period, u_int *ppr_options, role_t role); static void ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct ahd_transinfo *tinfo); static void ahd_update_pending_scbs(struct ahd_softc *ahd); static void ahd_fetch_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static void ahd_scb_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb); static void ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb); static void ahd_build_transfer_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static void ahd_construct_sdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset); static void ahd_construct_wdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int bus_width); static void ahd_construct_ppr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset, u_int bus_width, u_int ppr_options); static void ahd_clear_msg_state(struct ahd_softc *ahd); static void ahd_handle_message_phase(struct ahd_softc *ahd); typedef enum { AHDMSG_1B, AHDMSG_2B, AHDMSG_EXT } ahd_msgtype; static int ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full); static int ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static int ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static void ahd_handle_ign_wide_residue(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static void ahd_reinitialize_dataptrs(struct ahd_softc *ahd); static void ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int lun, cam_status status, char *message, int verbose_level); #ifdef AHD_TARGET_MODE static void ahd_setup_target_msgin(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb); #endif static u_int ahd_sglist_size(struct ahd_softc *ahd); static u_int ahd_sglist_allocsize(struct ahd_softc *ahd); static bus_dmamap_callback_t ahd_dmamap_cb; static void ahd_initialize_hscbs(struct ahd_softc *ahd); static int ahd_init_scbdata(struct ahd_softc *ahd); static void ahd_fini_scbdata(struct ahd_softc *ahd); static void ahd_setup_iocell_workaround(struct ahd_softc *ahd); static void ahd_iocell_first_selection(struct ahd_softc *ahd); static void ahd_add_col_list(struct ahd_softc *ahd, struct scb *scb, u_int col_idx); static void ahd_rem_col_list(struct ahd_softc *ahd, struct scb *scb); static void ahd_chip_init(struct ahd_softc *ahd); static void ahd_qinfifo_requeue(struct ahd_softc *ahd, struct scb *prev_scb, struct scb *scb); static int ahd_qinfifo_count(struct ahd_softc *ahd); static int ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahd_search_action action, u_int *list_head, u_int *list_tail, u_int tid); static void ahd_stitch_tid_list(struct ahd_softc *ahd, u_int tid_prev, u_int tid_cur, u_int tid_next); static void ahd_add_scb_to_free_list(struct ahd_softc *ahd, u_int scbid); static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, u_int prev, u_int next, u_int tid); static void ahd_reset_current_bus(struct ahd_softc *ahd); static ahd_callback_t ahd_stat_timer; #ifdef AHD_DUMP_SEQ static void ahd_dumpseq(struct ahd_softc *ahd); #endif static void ahd_loadseq(struct ahd_softc *ahd); static int ahd_check_patch(struct ahd_softc *ahd, struct patch **start_patch, u_int start_instr, u_int *skip_addr); static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address); static void ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts); static int ahd_probe_stack_size(struct ahd_softc *ahd); static int ahd_scb_active_in_fifo(struct ahd_softc *ahd, struct scb *scb); static void ahd_run_data_fifo(struct ahd_softc *ahd, struct scb *scb); #ifdef AHD_TARGET_MODE static void ahd_queue_lstate_event(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate, u_int initiator_id, u_int event_type, u_int event_arg); static void ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask); static int ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd); #endif static int ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status); static void ahd_alloc_scbs(struct ahd_softc *ahd); static void ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, u_int scbid); static void ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb); static void ahd_clear_critical_section(struct ahd_softc *ahd); static void ahd_clear_intstat(struct ahd_softc *ahd); static void ahd_enable_coalescing(struct ahd_softc *ahd, int enable); static u_int ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl); static void ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb); static void ahd_handle_scb_status(struct ahd_softc *ahd, struct scb *scb); static struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase); static void ahd_shutdown(void *arg); static void ahd_update_coalescing_values(struct ahd_softc *ahd, u_int timer, u_int maxcmds, u_int mincmds); static int ahd_verify_vpd_cksum(struct vpd_config *vpd); static int ahd_wait_seeprom(struct ahd_softc *ahd); static int ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, int target, char channel, int lun, u_int tag, role_t role); /******************************** Private Inlines *****************************/ static __inline void ahd_assert_atn(struct ahd_softc *ahd) { ahd_outb(ahd, SCSISIGO, ATNO); } /* * Determine if the current connection has a packetized * agreement. This does not necessarily mean that we * are currently in a packetized transfer. We could * just as easily be sending or receiving a message. */ static __inline int ahd_currently_packetized(struct ahd_softc *ahd) { ahd_mode_state saved_modes; int packetized; saved_modes = ahd_save_modes(ahd); if ((ahd->bugs & AHD_PKTIZED_STATUS_BUG) != 0) { /* * The packetized bit refers to the last * connection, not the current one. Check * for non-zero LQISTATE instead. */ ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); packetized = ahd_inb(ahd, LQISTATE) != 0; } else { ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); packetized = ahd_inb(ahd, LQISTAT2) & PACKETIZED; } ahd_restore_modes(ahd, saved_modes); return (packetized); } static __inline int ahd_set_active_fifo(struct ahd_softc *ahd) { u_int active_fifo; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); active_fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO; switch (active_fifo) { case 0: case 1: ahd_set_modes(ahd, active_fifo, active_fifo); return (1); default: return (0); } } static __inline void ahd_unbusy_tcl(struct ahd_softc *ahd, u_int tcl) { ahd_busy_tcl(ahd, tcl, SCB_LIST_NULL); } /* * Determine whether the sequencer reported a residual * for this SCB/transaction. */ static __inline void ahd_update_residual(struct ahd_softc *ahd, struct scb *scb) { uint32_t sgptr; sgptr = ahd_le32toh(scb->hscb->sgptr); if ((sgptr & SG_STATUS_VALID) != 0) ahd_calc_residual(ahd, scb); } static __inline void ahd_complete_scb(struct ahd_softc *ahd, struct scb *scb) { uint32_t sgptr; sgptr = ahd_le32toh(scb->hscb->sgptr); if ((sgptr & SG_STATUS_VALID) != 0) ahd_handle_scb_status(ahd, scb); else ahd_done(ahd, scb); } /************************* Sequencer Execution Control ************************/ /* * Restart the sequencer program from address zero */ static void ahd_restart(struct ahd_softc *ahd) { ahd_pause(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* No more pending messages */ ahd_clear_msg_state(ahd); ahd_outb(ahd, SCSISIGO, 0); /* De-assert BSY */ ahd_outb(ahd, MSG_OUT, MSG_NOOP); /* No message to send */ ahd_outb(ahd, SXFRCTL1, ahd_inb(ahd, SXFRCTL1) & ~BITBUCKET); ahd_outb(ahd, SEQINTCTL, 0); ahd_outb(ahd, LASTPHASE, P_BUSFREE); ahd_outb(ahd, SEQ_FLAGS, 0); ahd_outb(ahd, SAVED_SCSIID, 0xFF); ahd_outb(ahd, SAVED_LUN, 0xFF); /* * Ensure that the sequencer's idea of TQINPOS * matches our own. The sequencer increments TQINPOS * only after it sees a DMA complete and a reset could * occur before the increment leaving the kernel to believe * the command arrived but the sequencer to not. */ ahd_outb(ahd, TQINPOS, ahd->tqinfifonext); /* Always allow reselection */ ahd_outb(ahd, SCSISEQ1, ahd_inb(ahd, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); /* * Clear any pending sequencer interrupt. It is no * longer relevant since we're resetting the Program * Counter. */ ahd_outb(ahd, CLRINT, CLRSEQINT); ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET); ahd_unpause(ahd); } static void ahd_clear_fifo(struct ahd_softc *ahd, u_int fifo) { ahd_mode_state saved_modes; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_FIFOS) != 0) printf("%s: Clearing FIFO %d\n", ahd_name(ahd), fifo); #endif saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, fifo, fifo); ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT); if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) ahd_outb(ahd, CCSGCTL, CCSGRESET); ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); ahd_outb(ahd, SG_STATE, 0); ahd_restore_modes(ahd, saved_modes); } /************************* Input/Output Queues ********************************/ /* * Flush and completed commands that are sitting in the command * complete queues down on the chip but have yet to be dma'ed back up. */ static void ahd_flush_qoutfifo(struct ahd_softc *ahd) { struct scb *scb; ahd_mode_state saved_modes; u_int saved_scbptr; u_int ccscbctl; u_int scbid; u_int next_scbid; saved_modes = ahd_save_modes(ahd); /* * Flush the good status FIFO for completed packetized commands. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); saved_scbptr = ahd_get_scbptr(ahd); while ((ahd_inb(ahd, LQISTAT2) & LQIGSAVAIL) != 0) { u_int fifo_mode; u_int i; scbid = ahd_inw(ahd, GSFIFO); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printf("%s: Warning - GSFIFO SCB %d invalid\n", ahd_name(ahd), scbid); continue; } /* * Determine if this transaction is still active in * any FIFO. If it is, we must flush that FIFO to * the host before completing the command. */ fifo_mode = 0; rescan_fifos: for (i = 0; i < 2; i++) { /* Toggle to the other mode. */ fifo_mode ^= 1; ahd_set_modes(ahd, fifo_mode, fifo_mode); if (ahd_scb_active_in_fifo(ahd, scb) == 0) continue; ahd_run_data_fifo(ahd, scb); /* * Running this FIFO may cause a CFG4DATA for * this same transaction to assert in the other * FIFO or a new snapshot SAVEPTRS interrupt * in this FIFO. Even running a FIFO may not * clear the transaction if we are still waiting * for data to drain to the host. We must loop * until the transaction is not active in either * FIFO just to be sure. Reset our loop counter * so we will visit both FIFOs again before * declaring this transaction finished. We * also delay a bit so that status has a chance * to change before we look at this FIFO again. */ ahd_delay(200); goto rescan_fifos; } ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_set_scbptr(ahd, scbid); if ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_LIST_NULL) == 0 && ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_FULL_RESID) != 0 || (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR) & SG_LIST_NULL) != 0)) { u_int comp_head; /* * The transfer completed with a residual. * Place this SCB on the complete DMA list * so that we update our in-core copy of the * SCB before completing the command. */ ahd_outb(ahd, SCB_SCSI_STATUS, 0); ahd_outb(ahd, SCB_SGPTR, ahd_inb_scbram(ahd, SCB_SGPTR) | SG_STATUS_VALID); ahd_outw(ahd, SCB_TAG, scbid); ahd_outw(ahd, SCB_NEXT_COMPLETE, SCB_LIST_NULL); comp_head = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); if (SCBID_IS_NULL(comp_head)) { ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, scbid); ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid); } else { u_int tail; tail = ahd_inw(ahd, COMPLETE_DMA_SCB_TAIL); ahd_set_scbptr(ahd, tail); ahd_outw(ahd, SCB_NEXT_COMPLETE, scbid); ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid); ahd_set_scbptr(ahd, scbid); } } else ahd_complete_scb(ahd, scb); } ahd_set_scbptr(ahd, saved_scbptr); /* * Setup for command channel portion of flush. */ ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); /* * Wait for any inprogress DMA to complete and clear DMA state * if this if for an SCB in the qinfifo. */ while (((ccscbctl = ahd_inb(ahd, CCSCBCTL)) & (CCARREN|CCSCBEN)) != 0) { if ((ccscbctl & (CCSCBDIR|CCARREN)) == (CCSCBDIR|CCARREN)) { if ((ccscbctl & ARRDONE) != 0) break; } else if ((ccscbctl & CCSCBDONE) != 0) break; ahd_delay(200); } /* * We leave the sequencer to cleanup in the case of DMA's to * update the qoutfifo. In all other cases (DMA's to the * chip or a push of an SCB from the COMPLETE_DMA_SCB list), * we disable the DMA engine so that the sequencer will not * attempt to handle the DMA completion. */ if ((ccscbctl & CCSCBDIR) != 0 || (ccscbctl & ARRDONE) != 0) ahd_outb(ahd, CCSCBCTL, ccscbctl & ~(CCARREN|CCSCBEN)); /* * Complete any SCBs that just finished * being DMA'ed into the qoutfifo. */ ahd_run_qoutfifo(ahd); saved_scbptr = ahd_get_scbptr(ahd); /* * Manually update/complete any completed SCBs that are waiting to be * DMA'ed back up to the host. */ scbid = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); while (!SCBID_IS_NULL(scbid)) { uint8_t *hscb_ptr; u_int i; ahd_set_scbptr(ahd, scbid); next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printf("%s: Warning - DMA-up and complete " "SCB %d invalid\n", ahd_name(ahd), scbid); continue; } hscb_ptr = (uint8_t *)scb->hscb; for (i = 0; i < sizeof(struct hardware_scb); i++) *hscb_ptr++ = ahd_inb_scbram(ahd, SCB_BASE + i); ahd_complete_scb(ahd, scb); scbid = next_scbid; } ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL); scbid = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD); while (!SCBID_IS_NULL(scbid)) { ahd_set_scbptr(ahd, scbid); next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printf("%s: Warning - Complete Qfrz SCB %d invalid\n", ahd_name(ahd), scbid); continue; } ahd_complete_scb(ahd, scb); scbid = next_scbid; } ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL); scbid = ahd_inw(ahd, COMPLETE_SCB_HEAD); while (!SCBID_IS_NULL(scbid)) { ahd_set_scbptr(ahd, scbid); next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printf("%s: Warning - Complete SCB %d invalid\n", ahd_name(ahd), scbid); continue; } ahd_complete_scb(ahd, scb); scbid = next_scbid; } ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL); /* * Restore state. */ ahd_set_scbptr(ahd, saved_scbptr); ahd_restore_modes(ahd, saved_modes); ahd->flags |= AHD_UPDATE_PEND_CMDS; } /* * Determine if an SCB for a packetized transaction * is active in a FIFO. */ static int ahd_scb_active_in_fifo(struct ahd_softc *ahd, struct scb *scb) { /* * The FIFO is only active for our transaction if * the SCBPTR matches the SCB's ID and the firmware * has installed a handler for the FIFO or we have * a pending SAVEPTRS or CFG4DATA interrupt. */ if (ahd_get_scbptr(ahd) != SCB_GET_TAG(scb) || ((ahd_inb(ahd, LONGJMP_ADDR+1) & INVALID_ADDR) != 0 && (ahd_inb(ahd, SEQINTSRC) & (CFG4DATA|SAVEPTRS)) == 0)) return (0); return (1); } /* * Run a data fifo to completion for a transaction we know * has completed across the SCSI bus (good status has been * received). We are already set to the correct FIFO mode * on entry to this routine. * * This function attempts to operate exactly as the firmware * would when running this FIFO. Care must be taken to update * this routine any time the firmware's FIFO algorithm is * changed. */ static void ahd_run_data_fifo(struct ahd_softc *ahd, struct scb *scb) { u_int seqintsrc; seqintsrc = ahd_inb(ahd, SEQINTSRC); if ((seqintsrc & CFG4DATA) != 0) { uint32_t datacnt; uint32_t sgptr; /* * Clear full residual flag. */ sgptr = ahd_inl_scbram(ahd, SCB_SGPTR) & ~SG_FULL_RESID; ahd_outb(ahd, SCB_SGPTR, sgptr); /* * Load datacnt and address. */ datacnt = ahd_inl_scbram(ahd, SCB_DATACNT); if ((datacnt & AHD_DMA_LAST_SEG) != 0) { sgptr |= LAST_SEG; ahd_outb(ahd, SG_STATE, 0); } else ahd_outb(ahd, SG_STATE, LOADING_NEEDED); ahd_outq(ahd, HADDR, ahd_inq_scbram(ahd, SCB_DATAPTR)); ahd_outl(ahd, HCNT, datacnt & AHD_SG_LEN_MASK); ahd_outb(ahd, SG_CACHE_PRE, sgptr); ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN); /* * Initialize Residual Fields. */ ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, datacnt >> 24); ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr & SG_PTR_MASK); /* * Mark the SCB as having a FIFO in use. */ ahd_outb(ahd, SCB_FIFO_USE_COUNT, ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) + 1); /* * Install a "fake" handler for this FIFO. */ ahd_outw(ahd, LONGJMP_ADDR, 0); /* * Notify the hardware that we have satisfied * this sequencer interrupt. */ ahd_outb(ahd, CLRSEQINTSRC, CLRCFG4DATA); } else if ((seqintsrc & SAVEPTRS) != 0) { uint32_t sgptr; uint32_t resid; if ((ahd_inb(ahd, LONGJMP_ADDR+1)&INVALID_ADDR) != 0) { /* * Snapshot Save Pointers. All that * is necessary to clear the snapshot * is a CLRCHN. */ goto clrchn; } /* * Disable S/G fetch so the DMA engine * is available to future users. */ if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) ahd_outb(ahd, CCSGCTL, 0); ahd_outb(ahd, SG_STATE, 0); /* * Flush the data FIFO. Strickly only * necessary for Rev A parts. */ ahd_outb(ahd, DFCNTRL, ahd_inb(ahd, DFCNTRL) | FIFOFLUSH); /* * Calculate residual. */ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); resid = ahd_inl(ahd, SHCNT); resid |= ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT+3) << 24; ahd_outl(ahd, SCB_RESIDUAL_DATACNT, resid); if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG) == 0) { /* * Must back up to the correct S/G element. * Typically this just means resetting our * low byte to the offset in the SG_CACHE, * but if we wrapped, we have to correct * the other bytes of the sgptr too. */ if ((ahd_inb(ahd, SG_CACHE_SHADOW) & 0x80) != 0 && (sgptr & 0x80) == 0) sgptr -= 0x100; sgptr &= ~0xFF; sgptr |= ahd_inb(ahd, SG_CACHE_SHADOW) & SG_ADDR_MASK; ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); ahd_outb(ahd, SCB_RESIDUAL_DATACNT + 3, 0); } else if ((resid & AHD_SG_LEN_MASK) == 0) { ahd_outb(ahd, SCB_RESIDUAL_SGPTR, sgptr | SG_LIST_NULL); } /* * Save Pointers. */ ahd_outq(ahd, SCB_DATAPTR, ahd_inq(ahd, SHADDR)); ahd_outl(ahd, SCB_DATACNT, resid); ahd_outl(ahd, SCB_SGPTR, sgptr); ahd_outb(ahd, CLRSEQINTSRC, CLRSAVEPTRS); ahd_outb(ahd, SEQIMODE, ahd_inb(ahd, SEQIMODE) | ENSAVEPTRS); /* * If the data is to the SCSI bus, we are * done, otherwise wait for FIFOEMP. */ if ((ahd_inb(ahd, DFCNTRL) & DIRECTION) != 0) goto clrchn; } else if ((ahd_inb(ahd, SG_STATE) & LOADING_NEEDED) != 0) { uint32_t sgptr; uint64_t data_addr; uint32_t data_len; u_int dfcntrl; /* * Disable S/G fetch so the DMA engine * is available to future users. We won't * be using the DMA engine to load segments. */ if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) { ahd_outb(ahd, CCSGCTL, 0); ahd_outb(ahd, SG_STATE, LOADING_NEEDED); } /* * Wait for the DMA engine to notice that the * host transfer is enabled and that there is * space in the S/G FIFO for new segments before * loading more segments. */ if ((ahd_inb(ahd, DFSTATUS) & PRELOAD_AVAIL) != 0 && (ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0) { /* * Determine the offset of the next S/G * element to load. */ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); sgptr &= SG_PTR_MASK; if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); data_addr = sg->addr; data_len = sg->len; sgptr += sizeof(*sg); } else { struct ahd_dma_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); data_addr = sg->len & AHD_SG_HIGH_ADDR_MASK; data_addr <<= 8; data_addr |= sg->addr; data_len = sg->len; sgptr += sizeof(*sg); } /* * Update residual information. */ ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, data_len >> 24); ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); /* * Load the S/G. */ if (data_len & AHD_DMA_LAST_SEG) { sgptr |= LAST_SEG; ahd_outb(ahd, SG_STATE, 0); } ahd_outq(ahd, HADDR, data_addr); ahd_outl(ahd, HCNT, data_len & AHD_SG_LEN_MASK); ahd_outb(ahd, SG_CACHE_PRE, sgptr & 0xFF); /* * Advertise the segment to the hardware. */ dfcntrl = ahd_inb(ahd, DFCNTRL)|PRELOADEN|HDMAEN; if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0) { /* * Use SCSIENWRDIS so that SCSIEN * is never modified by this * operation. */ dfcntrl |= SCSIENWRDIS; } ahd_outb(ahd, DFCNTRL, dfcntrl); } } else if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG_DONE) != 0) { /* * Transfer completed to the end of SG list * and has flushed to the host. */ ahd_outb(ahd, SCB_SGPTR, ahd_inb_scbram(ahd, SCB_SGPTR) | SG_LIST_NULL); goto clrchn; } else if ((ahd_inb(ahd, DFSTATUS) & FIFOEMP) != 0) { clrchn: /* * Clear any handler for this FIFO, decrement * the FIFO use count for the SCB, and release * the FIFO. */ ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); ahd_outb(ahd, SCB_FIFO_USE_COUNT, ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) - 1); ahd_outb(ahd, DFFSXFRCTL, CLRCHN); } } /* * Look for entries in the QoutFIFO that have completed. * The valid_tag completion field indicates the validity * of the entry - the valid value toggles each time through * the queue. We use the sg_status field in the completion * entry to avoid referencing the hscb if the completion * occurred with no errors and no residual. sg_status is * a copy of the first byte (little endian) of the sgptr * hscb field. */ void ahd_run_qoutfifo(struct ahd_softc *ahd) { struct ahd_completion *completion; struct scb *scb; u_int scb_index; if ((ahd->flags & AHD_RUNNING_QOUTFIFO) != 0) panic("ahd_run_qoutfifo recursion"); ahd->flags |= AHD_RUNNING_QOUTFIFO; ahd_sync_qoutfifo(ahd, BUS_DMASYNC_POSTREAD); for (;;) { completion = &ahd->qoutfifo[ahd->qoutfifonext]; if (completion->valid_tag != ahd->qoutfifonext_valid_tag) break; scb_index = ahd_le16toh(completion->tag); scb = ahd_lookup_scb(ahd, scb_index); if (scb == NULL) { printf("%s: WARNING no command for scb %d " "(cmdcmplt)\nQOUTPOS = %d\n", ahd_name(ahd), scb_index, ahd->qoutfifonext); ahd_dump_card_state(ahd); } else if ((completion->sg_status & SG_STATUS_VALID) != 0) { ahd_handle_scb_status(ahd, scb); } else { ahd_done(ahd, scb); } ahd->qoutfifonext = (ahd->qoutfifonext+1) & (AHD_QOUT_SIZE-1); if (ahd->qoutfifonext == 0) ahd->qoutfifonext_valid_tag ^= QOUTFIFO_ENTRY_VALID; } ahd->flags &= ~AHD_RUNNING_QOUTFIFO; } /************************* Interrupt Handling *********************************/ void ahd_handle_hwerrint(struct ahd_softc *ahd) { /* * Some catastrophic hardware error has occurred. * Print it for the user and disable the controller. */ int i; int error; error = ahd_inb(ahd, ERROR); for (i = 0; i < num_errors; i++) { if ((error & ahd_hard_errors[i].errno) != 0) printf("%s: hwerrint, %s\n", ahd_name(ahd), ahd_hard_errors[i].errmesg); } ahd_dump_card_state(ahd); panic("BRKADRINT"); /* Tell everyone that this HBA is no longer available */ ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, CAM_NO_HBA); /* Tell the system that this controller has gone away. */ ahd_free(ahd); } #ifdef AHD_DEBUG static void ahd_dump_sglist(struct scb *scb) { int i; if (scb->sg_count > 0) { if ((scb->ahd_softc->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg_list; sg_list = (struct ahd_dma64_seg*)scb->sg_list; for (i = 0; i < scb->sg_count; i++) { uint64_t addr; uint32_t len; addr = ahd_le64toh(sg_list[i].addr); len = ahd_le32toh(sg_list[i].len); printf("sg[%d] - Addr 0x%x%x : Length %d%s\n", i, (uint32_t)((addr >> 32) & 0xFFFFFFFF), (uint32_t)(addr & 0xFFFFFFFF), sg_list[i].len & AHD_SG_LEN_MASK, (sg_list[i].len & AHD_DMA_LAST_SEG) ? " Last" : ""); } } else { struct ahd_dma_seg *sg_list; sg_list = (struct ahd_dma_seg*)scb->sg_list; for (i = 0; i < scb->sg_count; i++) { uint32_t len; len = ahd_le32toh(sg_list[i].len); printf("sg[%d] - Addr 0x%x%x : Length %d%s\n", i, (len & AHD_SG_HIGH_ADDR_MASK) >> 24, ahd_le32toh(sg_list[i].addr), len & AHD_SG_LEN_MASK, len & AHD_DMA_LAST_SEG ? " Last" : ""); } } } } #endif /* AHD_DEBUG */ void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat) { u_int seqintcode; /* * Save the sequencer interrupt code and clear the SEQINT * bit. We will unpause the sequencer, if appropriate, * after servicing the request. */ seqintcode = ahd_inb(ahd, SEQINTCODE); ahd_outb(ahd, CLRINT, CLRSEQINT); if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { /* * Unpause the sequencer and let it clear * SEQINT by writing NO_SEQINT to it. This * will cause the sequencer to be paused again, * which is the expected state of this routine. */ ahd_unpause(ahd); while (!ahd_is_paused(ahd)) ; ahd_outb(ahd, CLRINT, CLRSEQINT); } ahd_update_modes(ahd); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printf("%s: Handle Seqint Called for code %d\n", ahd_name(ahd), seqintcode); #endif switch (seqintcode) { case ENTERING_NONPACK: { struct scb *scb; u_int scbid; AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { /* * Somehow need to know if this * is from a selection or reselection. * From that, we can determine target * ID so we at least have an I_T nexus. */ } else { ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid); ahd_outb(ahd, SAVED_LUN, scb->hscb->lun); ahd_outb(ahd, SEQ_FLAGS, 0x0); } if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0 && (ahd_inb(ahd, SCSISIGO) & ATNO) != 0) { /* * Phase change after read stream with * CRC error with P0 asserted on last * packet. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) printf("%s: Assuming LQIPHASE_NLQ with " "P0 assertion\n", ahd_name(ahd)); #endif } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) printf("%s: Entering NONPACK\n", ahd_name(ahd)); #endif break; } case INVALID_SEQINT: printf("%s: Invalid Sequencer interrupt occurred, " "resetting channel.\n", ahd_name(ahd)); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) ahd_dump_card_state(ahd); #endif ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); break; case STATUS_OVERRUN: { struct scb *scb; u_int scbid; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL) ahd_print_path(ahd, scb); else printf("%s: ", ahd_name(ahd)); printf("SCB %d Packetized Status Overrun", scbid); ahd_dump_card_state(ahd); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); break; } case CFG4ISTAT_INTR: { struct scb *scb; u_int scbid; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { ahd_dump_card_state(ahd); printf("CFG4ISTAT: Free SCB %d referenced", scbid); panic("For safety"); } ahd_outq(ahd, HADDR, scb->sense_busaddr); ahd_outw(ahd, HCNT, AHD_SENSE_BUFSIZE); ahd_outb(ahd, HCNT + 2, 0); ahd_outb(ahd, SG_CACHE_PRE, SG_LAST_SEG); ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN); break; } case ILLEGAL_PHASE: { u_int bus_phase; bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; printf("%s: ILLEGAL_PHASE 0x%x\n", ahd_name(ahd), bus_phase); switch (bus_phase) { case P_DATAOUT: case P_DATAIN: case P_DATAOUT_DT: case P_DATAIN_DT: case P_MESGOUT: case P_STATUS: case P_MESGIN: ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); printf("%s: Issued Bus Reset.\n", ahd_name(ahd)); break; case P_COMMAND: { struct ahd_devinfo devinfo; struct scb *scb; struct ahd_initiator_tinfo *targ_info; struct ahd_tmode_tstate *tstate; struct ahd_transinfo *tinfo; u_int scbid; /* * If a target takes us into the command phase * assume that it has been externally reset and * has thus lost our previous packetized negotiation * agreement. Since we have not sent an identify * message and may not have fully qualified the * connection, we change our command to TUR, assert * ATN and ABORT the task when we go to message in * phase. The OSM will see the REQUEUE_REQUEST * status and retry the command. */ scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printf("Invalid phase with no valid SCB. " "Resetting bus.\n"); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); break; } ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb), SCB_GET_TARGET(ahd, scb), SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), ROLE_INITIATOR); targ_info = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); tinfo = &targ_info->curr; ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_ACTIVE, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_ACTIVE, /*paused*/TRUE); /* Hand-craft TUR command */ ahd_outb(ahd, SCB_CDB_STORE, 0); ahd_outb(ahd, SCB_CDB_STORE+1, 0); ahd_outb(ahd, SCB_CDB_STORE+2, 0); ahd_outb(ahd, SCB_CDB_STORE+3, 0); ahd_outb(ahd, SCB_CDB_STORE+4, 0); ahd_outb(ahd, SCB_CDB_STORE+5, 0); ahd_outb(ahd, SCB_CDB_LEN, 6); scb->hscb->control &= ~(TAG_ENB|SCB_TAG_TYPE); scb->hscb->control |= MK_MESSAGE; ahd_outb(ahd, SCB_CONTROL, scb->hscb->control); ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid); /* * The lun is 0, regardless of the SCB's lun * as we have not sent an identify message. */ ahd_outb(ahd, SAVED_LUN, 0); ahd_outb(ahd, SEQ_FLAGS, 0); ahd_assert_atn(ahd); scb->flags &= ~SCB_PACKETIZED; scb->flags |= SCB_ABORT|SCB_EXTERNAL_RESET; ahd_freeze_devq(ahd, scb); ahd_set_transaction_status(scb, CAM_REQUEUE_REQ); ahd_freeze_scb(scb); /* Notify XPT */ ahd_send_async(ahd, devinfo.channel, devinfo.target, CAM_LUN_WILDCARD, AC_SENT_BDR); /* * Allow the sequencer to continue with * non-pack processing. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, CLRLQOINT1, CLRLQOPHACHGINPKT); if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) { ahd_outb(ahd, CLRLQOINT1, 0); } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { ahd_print_path(ahd, scb); printf("Unexpected command phase from " "packetized target\n"); } #endif break; } } break; } case CFG4OVERRUN: { struct scb *scb; u_int scb_index; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { printf("%s: CFG4OVERRUN mode = %x\n", ahd_name(ahd), ahd_inb(ahd, MODE_PTR)); } #endif scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); if (scb == NULL) { /* * Attempt to transfer to an SCB that is * not outstanding. */ ahd_assert_atn(ahd); ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd->msgout_buf[0] = MSG_ABORT_TASK; ahd->msgout_len = 1; ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; /* * Clear status received flag to prevent any * attempt to complete this bogus SCB. */ ahd_outb(ahd, SCB_CONTROL, ahd_inb_scbram(ahd, SCB_CONTROL) & ~STATUS_RCVD); } break; } case DUMP_CARD_STATE: { ahd_dump_card_state(ahd); break; } case PDATA_REINIT: { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { printf("%s: PDATA_REINIT - DFCNTRL = 0x%x " "SG_CACHE_SHADOW = 0x%x\n", ahd_name(ahd), ahd_inb(ahd, DFCNTRL), ahd_inb(ahd, SG_CACHE_SHADOW)); } #endif ahd_reinitialize_dataptrs(ahd); break; } case HOST_MSG_LOOP: { struct ahd_devinfo devinfo; /* * The sequencer has encountered a message phase * that requires host assistance for completion. * While handling the message phase(s), we will be * notified by the sequencer after each byte is * transfered so we can track bus phase changes. * * If this is the first time we've seen a HOST_MSG_LOOP * interrupt, initialize the state of the host message * loop. */ ahd_fetch_devinfo(ahd, &devinfo); if (ahd->msg_type == MSG_TYPE_NONE) { struct scb *scb; u_int scb_index; u_int bus_phase; bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; if (bus_phase != P_MESGIN && bus_phase != P_MESGOUT) { printf("ahd_intr: HOST_MSG_LOOP bad " "phase 0x%x\n", bus_phase); /* * Probably transitioned to bus free before * we got here. Just punt the message. */ ahd_dump_card_state(ahd); ahd_clear_intstat(ahd); ahd_restart(ahd); return; } scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); if (devinfo.role == ROLE_INITIATOR) { if (bus_phase == P_MESGOUT) ahd_setup_initiator_msgout(ahd, &devinfo, scb); else { ahd->msg_type = MSG_TYPE_INITIATOR_MSGIN; ahd->msgin_index = 0; } } #ifdef AHD_TARGET_MODE else { if (bus_phase == P_MESGOUT) { ahd->msg_type = MSG_TYPE_TARGET_MSGOUT; ahd->msgin_index = 0; } else ahd_setup_target_msgin(ahd, &devinfo, scb); } #endif } ahd_handle_message_phase(ahd); break; } case NO_MATCH: { /* Ensure we don't leave the selection hardware on */ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); printf("%s:%c:%d: no active SCB for reconnecting " "target - issuing BUS DEVICE RESET\n", ahd_name(ahd), 'A', ahd_inb(ahd, SELID) >> 4); printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " "REG0 == 0x%x ACCUM = 0x%x\n", ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN), ahd_inw(ahd, REG0), ahd_inb(ahd, ACCUM)); printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " "SINDEX == 0x%x\n", ahd_inb(ahd, SEQ_FLAGS), ahd_get_scbptr(ahd), ahd_find_busy_tcl(ahd, BUILD_TCL(ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN))), ahd_inw(ahd, SINDEX)); printf("SELID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " "SCB_CONTROL == 0x%x\n", ahd_inb(ahd, SELID), ahd_inb_scbram(ahd, SCB_SCSIID), ahd_inb_scbram(ahd, SCB_LUN), ahd_inb_scbram(ahd, SCB_CONTROL)); printf("SCSIBUS[0] == 0x%x, SCSISIGI == 0x%x\n", ahd_inb(ahd, SCSIBUS), ahd_inb(ahd, SCSISIGI)); printf("SXFRCTL0 == 0x%x\n", ahd_inb(ahd, SXFRCTL0)); printf("SEQCTL0 == 0x%x\n", ahd_inb(ahd, SEQCTL0)); ahd_dump_card_state(ahd); ahd->msgout_buf[0] = MSG_BUS_DEV_RESET; ahd->msgout_len = 1; ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd_assert_atn(ahd); break; } case PROTO_VIOLATION: { ahd_handle_proto_violation(ahd); break; } case IGN_WIDE_RES: { struct ahd_devinfo devinfo; ahd_fetch_devinfo(ahd, &devinfo); ahd_handle_ign_wide_residue(ahd, &devinfo); break; } case BAD_PHASE: { u_int lastphase; lastphase = ahd_inb(ahd, LASTPHASE); printf("%s:%c:%d: unknown scsi bus phase %x, " "lastphase = 0x%x. Attempting to continue\n", ahd_name(ahd), 'A', SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)), lastphase, ahd_inb(ahd, SCSISIGI)); break; } case MISSED_BUSFREE: { u_int lastphase; lastphase = ahd_inb(ahd, LASTPHASE); printf("%s:%c:%d: Missed busfree. " "Lastphase = 0x%x, Curphase = 0x%x\n", ahd_name(ahd), 'A', SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)), lastphase, ahd_inb(ahd, SCSISIGI)); ahd_restart(ahd); return; } case DATA_OVERRUN: { /* * When the sequencer detects an overrun, it * places the controller in "BITBUCKET" mode * and allows the target to complete its transfer. * Unfortunately, none of the counters get updated * when the controller is in this mode, so we have * no way of knowing how large the overrun was. */ struct scb *scb; u_int scbindex; #ifdef AHD_DEBUG u_int lastphase; #endif scbindex = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbindex); #ifdef AHD_DEBUG lastphase = ahd_inb(ahd, LASTPHASE); if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { ahd_print_path(ahd, scb); printf("data overrun detected %s. Tag == 0x%x.\n", ahd_lookup_phase_entry(lastphase)->phasemsg, SCB_GET_TAG(scb)); ahd_print_path(ahd, scb); printf("%s seen Data Phase. Length = %ld. " "NumSGs = %d.\n", ahd_inb(ahd, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", ahd_get_transfer_length(scb), scb->sg_count); ahd_dump_sglist(scb); } #endif /* * Set this and it will take effect when the * target does a command complete. */ ahd_freeze_devq(ahd, scb); ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR); ahd_freeze_scb(scb); break; } case MKMSG_FAILED: { struct ahd_devinfo devinfo; struct scb *scb; u_int scbid; ahd_fetch_devinfo(ahd, &devinfo); printf("%s:%c:%d:%d: Attempt to issue message failed\n", ahd_name(ahd), devinfo.channel, devinfo.target, devinfo.lun); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL && (scb->flags & SCB_RECOVERY_SCB) != 0) /* * Ensure that we didn't put a second instance of this * SCB into the QINFIFO. */ ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), SCB_GET_CHANNEL(ahd, scb), SCB_GET_LUN(scb), SCB_GET_TAG(scb), ROLE_INITIATOR, /*status*/0, SEARCH_REMOVE); ahd_outb(ahd, SCB_CONTROL, ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE); break; } case TASKMGMT_FUNC_COMPLETE: { u_int scbid; struct scb *scb; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL) { u_int lun; u_int tag; cam_status error; ahd_print_path(ahd, scb); printf("Task Management Func 0x%x Complete\n", scb->hscb->task_management); lun = CAM_LUN_WILDCARD; tag = SCB_LIST_NULL; switch (scb->hscb->task_management) { case SIU_TASKMGMT_ABORT_TASK: tag = SCB_GET_TAG(scb); case SIU_TASKMGMT_ABORT_TASK_SET: case SIU_TASKMGMT_CLEAR_TASK_SET: lun = scb->hscb->lun; error = CAM_REQ_ABORTED; ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A', lun, tag, ROLE_INITIATOR, error); break; case SIU_TASKMGMT_LUN_RESET: lun = scb->hscb->lun; case SIU_TASKMGMT_TARGET_RESET: { struct ahd_devinfo devinfo; ahd_scb_devinfo(ahd, &devinfo, scb); error = CAM_BDR_SENT; ahd_handle_devreset(ahd, &devinfo, lun, CAM_BDR_SENT, lun != CAM_LUN_WILDCARD ? "Lun Reset" : "Target Reset", /*verbose_level*/0); break; } default: panic("Unexpected TaskMgmt Func\n"); break; } } break; } case TASKMGMT_CMD_CMPLT_OKAY: { u_int scbid; struct scb *scb; /* * An ABORT TASK TMF failed to be delivered before * the targeted command completed normally. */ scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL) { /* * Remove the second instance of this SCB from * the QINFIFO if it is still there. */ ahd_print_path(ahd, scb); printf("SCB completes before TMF\n"); /* * Handle losing the race. Wait until any * current selection completes. We will then * set the TMF back to zero in this SCB so that * the sequencer doesn't bother to issue another * sequencer interrupt for its completion. */ while ((ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0 && (ahd_inb(ahd, SSTAT0) & SELDO) == 0 && (ahd_inb(ahd, SSTAT1) & SELTO) == 0) ; ahd_outb(ahd, SCB_TASK_MANAGEMENT, 0); ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), SCB_GET_CHANNEL(ahd, scb), SCB_GET_LUN(scb), SCB_GET_TAG(scb), ROLE_INITIATOR, /*status*/0, SEARCH_REMOVE); } break; } case TRACEPOINT0: case TRACEPOINT1: case TRACEPOINT2: case TRACEPOINT3: printf("%s: Tracepoint %d\n", ahd_name(ahd), seqintcode - TRACEPOINT0); break; case NO_SEQINT: break; case SAW_HWERR: ahd_handle_hwerrint(ahd); break; default: printf("%s: Unexpected SEQINTCODE %d\n", ahd_name(ahd), seqintcode); break; } /* * The sequencer is paused immediately on * a SEQINT, so we should restart it when * we're done. */ ahd_unpause(ahd); } void ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat) { struct scb *scb; u_int status0; u_int status3; u_int status; u_int lqistat1; u_int lqostat0; u_int scbid; u_int busfreetime; ahd_update_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); status3 = ahd_inb(ahd, SSTAT3) & (NTRAMPERR|OSRAMPERR); status0 = ahd_inb(ahd, SSTAT0) & (IOERR|OVERRUN|SELDI|SELDO); status = ahd_inb(ahd, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); lqistat1 = ahd_inb(ahd, LQISTAT1); lqostat0 = ahd_inb(ahd, LQOSTAT0); busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME; /* * Ignore external resets after a bus reset. */ if (((status & SCSIRSTI) != 0) && (ahd->flags & AHD_BUS_RESET_ACTIVE)) { ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); return; } /* * Clear bus reset flag */ ahd->flags &= ~AHD_BUS_RESET_ACTIVE; if ((status0 & (SELDI|SELDO)) != 0) { u_int simode0; ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); simode0 = ahd_inb(ahd, SIMODE0); status0 &= simode0 & (IOERR|OVERRUN|SELDI|SELDO); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); } scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) scb = NULL; if ((status0 & IOERR) != 0) { u_int now_lvd; now_lvd = ahd_inb(ahd, SBLKCTL) & ENAB40; printf("%s: Transceiver State Has Changed to %s mode\n", ahd_name(ahd), now_lvd ? "LVD" : "SE"); ahd_outb(ahd, CLRSINT0, CLRIOERR); /* * A change in I/O mode is equivalent to a bus reset. */ ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); ahd_pause(ahd); ahd_setup_iocell_workaround(ahd); ahd_unpause(ahd); } else if ((status0 & OVERRUN) != 0) { printf("%s: SCSI offset overrun detected. Resetting bus.\n", ahd_name(ahd)); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); } else if ((status & SCSIRSTI) != 0) { printf("%s: Someone reset channel A\n", ahd_name(ahd)); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/FALSE); } else if ((status & SCSIPERR) != 0) { /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); ahd_handle_transmission_error(ahd); } else if (lqostat0 != 0) { printf("%s: lqostat0 == 0x%x!\n", ahd_name(ahd), lqostat0); ahd_outb(ahd, CLRLQOINT0, lqostat0); if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) ahd_outb(ahd, CLRLQOINT1, 0); } else if ((status & SELTO) != 0) { u_int scbid; /* Stop the selection */ ahd_outb(ahd, SCSISEQ0, 0); /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); /* No more pending messages */ ahd_clear_msg_state(ahd); /* Clear interrupt state */ ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); /* * Although the driver does not care about the * 'Selection in Progress' status bit, the busy * LED does. SELINGO is only cleared by a sucessfull * selection, so we must manually clear it to insure * the LED turns off just incase no future successful * selections occur (e.g. no devices on the bus). */ ahd_outb(ahd, CLRSINT0, CLRSELINGO); scbid = ahd_inw(ahd, WAITING_TID_HEAD); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printf("%s: ahd_intr - referenced scb not " "valid during SELTO scb(0x%x)\n", ahd_name(ahd), scbid); ahd_dump_card_state(ahd); } else { struct ahd_devinfo devinfo; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_SELTO) != 0) { ahd_print_path(ahd, scb); printf("Saw Selection Timeout for SCB 0x%x\n", scbid); } #endif ahd_scb_devinfo(ahd, &devinfo, scb); ahd_set_transaction_status(scb, CAM_SEL_TIMEOUT); ahd_freeze_devq(ahd, scb); /* * Cancel any pending transactions on the device * now that it seems to be missing. This will * also revert us to async/narrow transfers until * we can renegotiate with the device. */ ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD, CAM_SEL_TIMEOUT, "Selection Timeout", /*verbose_level*/1); } ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_iocell_first_selection(ahd); ahd_unpause(ahd); } else if ((status0 & (SELDI|SELDO)) != 0) { ahd_iocell_first_selection(ahd); ahd_unpause(ahd); } else if (status3 != 0) { printf("%s: SCSI Cell parity error SSTAT3 == 0x%x\n", ahd_name(ahd), status3); ahd_outb(ahd, CLRSINT3, status3); } else if ((lqistat1 & (LQIPHASE_LQ|LQIPHASE_NLQ)) != 0) { /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); ahd_handle_lqiphase_error(ahd, lqistat1); } else if ((lqistat1 & LQICRCI_NLQ) != 0) { /* * This status can be delayed during some * streaming operations. The SCSIPHASE * handler has already dealt with this case * so just clear the error. */ ahd_outb(ahd, CLRLQIINT1, CLRLQICRCI_NLQ); } else if ((status & BUSFREE) != 0 || (lqistat1 & LQOBUSFREE) != 0) { u_int lqostat1; int restart; int clear_fifo; int packetized; u_int mode; /* * Clear our selection hardware as soon as possible. * We may have an entry in the waiting Q for this target, * that is affected by this busfree and we don't want to * go about selecting the target while we handle the event. */ ahd_outb(ahd, SCSISEQ0, 0); /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); /* * Determine what we were up to at the time of * the busfree. */ mode = AHD_MODE_SCSI; busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME; lqostat1 = ahd_inb(ahd, LQOSTAT1); switch (busfreetime) { case BUSFREE_DFF0: case BUSFREE_DFF1: { u_int scbid; struct scb *scb; mode = busfreetime == BUSFREE_DFF0 ? AHD_MODE_DFF0 : AHD_MODE_DFF1; ahd_set_modes(ahd, mode, mode); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printf("%s: Invalid SCB %d in DFF%d " "during unexpected busfree\n", ahd_name(ahd), scbid, mode); packetized = 0; } else packetized = (scb->flags & SCB_PACKETIZED) != 0; clear_fifo = 1; break; } case BUSFREE_LQO: clear_fifo = 0; packetized = 1; break; default: clear_fifo = 0; packetized = (lqostat1 & LQOBUSFREE) != 0; if (!packetized && ahd_inb(ahd, LASTPHASE) == P_BUSFREE && (ahd_inb(ahd, SSTAT0) & SELDI) == 0 && ((ahd_inb(ahd, SSTAT0) & SELDO) == 0 || (ahd_inb(ahd, SCSISEQ0) & ENSELO) == 0)) /* * Assume packetized if we are not * on the bus in a non-packetized * capacity and any pending selection * was a packetized selection. */ packetized = 1; break; } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printf("Saw Busfree. Busfreetime = 0x%x.\n", busfreetime); #endif /* * Busfrees that occur in non-packetized phases are * handled by the nonpkt_busfree handler. */ if (packetized && ahd_inb(ahd, LASTPHASE) == P_BUSFREE) { restart = ahd_handle_pkt_busfree(ahd, busfreetime); } else { packetized = 0; restart = ahd_handle_nonpkt_busfree(ahd); } /* * Clear the busfree interrupt status. The setting of * the interrupt is a pulse, so in a perfect world, we * would not need to muck with the ENBUSFREE logic. This * would ensure that if the bus moves on to another * connection, busfree protection is still in force. If * BUSFREEREV is broken, however, we must manually clear * the ENBUSFREE if the busfree occurred during a non-pack * connection so that we don't get false positives during * future, packetized, connections. */ ahd_outb(ahd, CLRSINT1, CLRBUSFREE); if (packetized == 0 && (ahd->bugs & AHD_BUSFREEREV_BUG) != 0) ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~ENBUSFREE); if (clear_fifo) ahd_clear_fifo(ahd, mode); ahd_clear_msg_state(ahd); ahd_outb(ahd, CLRINT, CLRSCSIINT); if (restart) { ahd_restart(ahd); } else { ahd_unpause(ahd); } } else { printf("%s: Missing case in ahd_handle_scsiint. status = %x\n", ahd_name(ahd), status); ahd_dump_card_state(ahd); ahd_clear_intstat(ahd); ahd_unpause(ahd); } } static void ahd_handle_transmission_error(struct ahd_softc *ahd) { struct scb *scb; u_int scbid; u_int lqistat1; u_int lqistat2; u_int msg_out; u_int curphase; u_int lastphase; u_int perrdiag; u_int cur_col; int silent; scb = NULL; ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); lqistat1 = ahd_inb(ahd, LQISTAT1) & ~(LQIPHASE_LQ|LQIPHASE_NLQ); lqistat2 = ahd_inb(ahd, LQISTAT2); if ((lqistat1 & (LQICRCI_NLQ|LQICRCI_LQ)) == 0 && (ahd->bugs & AHD_NLQICRC_DELAYED_BUG) != 0) { u_int lqistate; ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); lqistate = ahd_inb(ahd, LQISTATE); if ((lqistate >= 0x1E && lqistate <= 0x24) || (lqistate == 0x29)) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { printf("%s: NLQCRC found via LQISTATE\n", ahd_name(ahd)); } #endif lqistat1 |= LQICRCI_NLQ; } ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); } ahd_outb(ahd, CLRLQIINT1, lqistat1); lastphase = ahd_inb(ahd, LASTPHASE); curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; perrdiag = ahd_inb(ahd, PERRDIAG); msg_out = MSG_INITIATOR_DET_ERR; ahd_outb(ahd, CLRSINT1, CLRSCSIPERR); /* * Try to find the SCB associated with this error. */ silent = FALSE; if (lqistat1 == 0 || (lqistat1 & LQICRCI_NLQ) != 0) { if ((lqistat1 & (LQICRCI_NLQ|LQIOVERI_NLQ)) != 0) ahd_set_active_fifo(ahd); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL && SCB_IS_SILENT(scb)) silent = TRUE; } cur_col = 0; if (silent == FALSE) { printf("%s: Transmission error detected\n", ahd_name(ahd)); ahd_lqistat1_print(lqistat1, &cur_col, 50); ahd_lastphase_print(lastphase, &cur_col, 50); ahd_scsisigi_print(curphase, &cur_col, 50); ahd_perrdiag_print(perrdiag, &cur_col, 50); printf("\n"); ahd_dump_card_state(ahd); } if ((lqistat1 & (LQIOVERI_LQ|LQIOVERI_NLQ)) != 0) { if (silent == FALSE) { printf("%s: Gross protocol error during incoming " "packet. lqistat1 == 0x%x. Resetting bus.\n", ahd_name(ahd), lqistat1); } ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); return; } else if ((lqistat1 & LQICRCI_LQ) != 0) { /* * A CRC error has been detected on an incoming LQ. * The bus is currently hung on the last ACK. * Hit LQIRETRY to release the last ack, and * wait for the sequencer to determine that ATNO * is asserted while in message out to take us * to our host message loop. No NONPACKREQ or * LQIPHASE type errors will occur in this * scenario. After this first LQIRETRY, the LQI * manager will be in ISELO where it will * happily sit until another packet phase begins. * Unexpected bus free detection is enabled * through any phases that occur after we release * this last ack until the LQI manager sees a * packet phase. This implies we may have to * ignore a perfectly valid "unexected busfree" * after our "initiator detected error" message is * sent. A busfree is the expected response after * we tell the target that it's L_Q was corrupted. * (SPI4R09 10.7.3.3.3) */ ahd_outb(ahd, LQCTL2, LQIRETRY); printf("LQIRetry for LQICRCI_LQ to release ACK\n"); } else if ((lqistat1 & LQICRCI_NLQ) != 0) { /* * We detected a CRC error in a NON-LQ packet. * The hardware has varying behavior in this situation * depending on whether this packet was part of a * stream or not. * * PKT by PKT mode: * The hardware has already acked the complete packet. * If the target honors our outstanding ATN condition, * we should be (or soon will be) in MSGOUT phase. * This will trigger the LQIPHASE_LQ status bit as the * hardware was expecting another LQ. Unexpected * busfree detection is enabled. Once LQIPHASE_LQ is * true (first entry into host message loop is much * the same), we must clear LQIPHASE_LQ and hit * LQIRETRY so the hardware is ready to handle * a future LQ. NONPACKREQ will not be asserted again * once we hit LQIRETRY until another packet is * processed. The target may either go busfree * or start another packet in response to our message. * * Read Streaming P0 asserted: * If we raise ATN and the target completes the entire * stream (P0 asserted during the last packet), the * hardware will ack all data and return to the ISTART * state. When the target reponds to our ATN condition, * LQIPHASE_LQ will be asserted. We should respond to * this with an LQIRETRY to prepare for any future * packets. NONPACKREQ will not be asserted again * once we hit LQIRETRY until another packet is * processed. The target may either go busfree or * start another packet in response to our message. * Busfree detection is enabled. * * Read Streaming P0 not asserted: * If we raise ATN and the target transitions to * MSGOUT in or after a packet where P0 is not * asserted, the hardware will assert LQIPHASE_NLQ. * We should respond to the LQIPHASE_NLQ with an * LQIRETRY. Should the target stay in a non-pkt * phase after we send our message, the hardware * will assert LQIPHASE_LQ. Recovery is then just as * listed above for the read streaming with P0 asserted. * Busfree detection is enabled. */ if (silent == FALSE) printf("LQICRC_NLQ\n"); if (scb == NULL) { printf("%s: No SCB valid for LQICRC_NLQ. " "Resetting bus\n", ahd_name(ahd)); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); return; } } else if ((lqistat1 & LQIBADLQI) != 0) { printf("Need to handle BADLQI!\n"); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); return; } else if ((perrdiag & (PARITYERR|PREVPHASE)) == PARITYERR) { if ((curphase & ~P_DATAIN_DT) != 0) { /* Ack the byte. So we can continue. */ if (silent == FALSE) printf("Acking %s to clear perror\n", ahd_lookup_phase_entry(curphase)->phasemsg); ahd_inb(ahd, SCSIDAT); } if (curphase == P_MESGIN) msg_out = MSG_PARITY_ERROR; } /* * We've set the hardware to assert ATN if we * get a parity error on "in" phases, so all we * need to do is stuff the message buffer with * the appropriate message. "In" phases have set * mesg_out to something other than MSG_NOP. */ ahd->send_msg_perror = msg_out; if (scb != NULL && msg_out == MSG_INITIATOR_DET_ERR) scb->flags |= SCB_TRANSMISSION_ERROR; ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_unpause(ahd); } static void ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1) { /* * Clear the sources of the interrupts. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, CLRLQIINT1, lqistat1); /* * If the "illegal" phase changes were in response * to our ATN to flag a CRC error, AND we ended up * on packet boundaries, clear the error, restart the * LQI manager as appropriate, and go on our merry * way toward sending the message. Otherwise, reset * the bus to clear the error. */ ahd_set_active_fifo(ahd); if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0 && (ahd_inb(ahd, MDFFSTAT) & DLZERO) != 0) { if ((lqistat1 & LQIPHASE_LQ) != 0) { printf("LQIRETRY for LQIPHASE_LQ\n"); ahd_outb(ahd, LQCTL2, LQIRETRY); } else if ((lqistat1 & LQIPHASE_NLQ) != 0) { printf("LQIRETRY for LQIPHASE_NLQ\n"); ahd_outb(ahd, LQCTL2, LQIRETRY); } else panic("ahd_handle_lqiphase_error: No phase errors\n"); ahd_dump_card_state(ahd); ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_unpause(ahd); } else { printf("Reseting Channel for LQI Phase error\n"); ahd_dump_card_state(ahd); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); } } /* * Packetized unexpected or expected busfree. * Entered in mode based on busfreetime. */ static int ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime) { u_int lqostat1; AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); lqostat1 = ahd_inb(ahd, LQOSTAT1); if ((lqostat1 & LQOBUSFREE) != 0) { struct scb *scb; u_int scbid; u_int saved_scbptr; u_int waiting_h; u_int waiting_t; u_int next; /* * The LQO manager detected an unexpected busfree * either: * * 1) During an outgoing LQ. * 2) After an outgoing LQ but before the first * REQ of the command packet. * 3) During an outgoing command packet. * * In all cases, CURRSCB is pointing to the * SCB that encountered the failure. Clean * up the queue, clear SELDO and LQOBUSFREE, * and allow the sequencer to restart the select * out at its lesure. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); scbid = ahd_inw(ahd, CURRSCB); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) panic("SCB not valid during LQOBUSFREE"); /* * Clear the status. */ ahd_outb(ahd, CLRLQOINT1, CLRLQOBUSFREE); if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) ahd_outb(ahd, CLRLQOINT1, 0); ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); ahd_flush_device_writes(ahd); ahd_outb(ahd, CLRSINT0, CLRSELDO); /* * Return the LQO manager to its idle loop. It will * not do this automatically if the busfree occurs * after the first REQ of either the LQ or command * packet or between the LQ and command packet. */ ahd_outb(ahd, LQCTL2, ahd_inb(ahd, LQCTL2) | LQOTOIDLE); /* * Update the waiting for selection queue so * we restart on the correct SCB. */ waiting_h = ahd_inw(ahd, WAITING_TID_HEAD); saved_scbptr = ahd_get_scbptr(ahd); if (waiting_h != scbid) { ahd_outw(ahd, WAITING_TID_HEAD, scbid); waiting_t = ahd_inw(ahd, WAITING_TID_TAIL); if (waiting_t == waiting_h) { ahd_outw(ahd, WAITING_TID_TAIL, scbid); next = SCB_LIST_NULL; } else { ahd_set_scbptr(ahd, waiting_h); next = ahd_inw_scbram(ahd, SCB_NEXT2); } ahd_set_scbptr(ahd, scbid); ahd_outw(ahd, SCB_NEXT2, next); } ahd_set_scbptr(ahd, saved_scbptr); if (scb->crc_retry_count < AHD_MAX_LQ_CRC_ERRORS) { if (SCB_IS_SILENT(scb) == FALSE) { ahd_print_path(ahd, scb); printf("Probable outgoing LQ CRC error. " "Retrying command\n"); } scb->crc_retry_count++; } else { ahd_set_transaction_status(scb, CAM_UNCOR_PARITY); ahd_freeze_scb(scb); ahd_freeze_devq(ahd, scb); } /* Return unpausing the sequencer. */ return (0); } else if ((ahd_inb(ahd, PERRDIAG) & PARITYERR) != 0) { /* * Ignore what are really parity errors that * occur on the last REQ of a free running * clock prior to going busfree. Some drives * do not properly active negate just before * going busfree resulting in a parity glitch. */ ahd_outb(ahd, CLRSINT1, CLRSCSIPERR|CLRBUSFREE); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MASKED_ERRORS) != 0) printf("%s: Parity on last REQ detected " "during busfree phase.\n", ahd_name(ahd)); #endif /* Return unpausing the sequencer. */ return (0); } if (ahd->src_mode != AHD_MODE_SCSI) { u_int scbid; struct scb *scb; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); ahd_print_path(ahd, scb); printf("Unexpected PKT busfree condition\n"); ahd_dump_card_state(ahd); ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A', SCB_GET_LUN(scb), SCB_GET_TAG(scb), ROLE_INITIATOR, CAM_UNEXP_BUSFREE); /* Return restarting the sequencer. */ return (1); } printf("%s: Unexpected PKT busfree condition\n", ahd_name(ahd)); ahd_dump_card_state(ahd); /* Restart the sequencer. */ return (1); } /* * Non-packetized unexpected or expected busfree. */ static int ahd_handle_nonpkt_busfree(struct ahd_softc *ahd) { struct ahd_devinfo devinfo; struct scb *scb; u_int lastphase; u_int saved_scsiid; u_int saved_lun; u_int target; u_int initiator_role_id; u_int scbid; u_int ppr_busfree; int printerror; /* * Look at what phase we were last in. If its message out, * chances are pretty good that the busfree was in response * to one of our abort requests. */ lastphase = ahd_inb(ahd, LASTPHASE); saved_scsiid = ahd_inb(ahd, SAVED_SCSIID); saved_lun = ahd_inb(ahd, SAVED_LUN); target = SCSIID_TARGET(ahd, saved_scsiid); initiator_role_id = SCSIID_OUR_ID(saved_scsiid); ahd_compile_devinfo(&devinfo, initiator_role_id, target, saved_lun, 'A', ROLE_INITIATOR); printerror = 1; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) scb = NULL; ppr_busfree = (ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0; if (lastphase == P_MESGOUT) { u_int tag; tag = SCB_LIST_NULL; if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT_TAG, TRUE) || ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT, TRUE)) { int found; int sent_msg; if (scb == NULL) { ahd_print_devinfo(ahd, &devinfo); printf("Abort for unidentified " "connection completed.\n"); /* restart the sequencer. */ return (1); } sent_msg = ahd->msgout_buf[ahd->msgout_index - 1]; ahd_print_path(ahd, scb); printf("SCB %d - Abort%s Completed.\n", SCB_GET_TAG(scb), sent_msg == MSG_ABORT_TAG ? "" : " Tag"); if (sent_msg == MSG_ABORT_TAG) tag = SCB_GET_TAG(scb); if ((scb->flags & SCB_EXTERNAL_RESET) != 0) { /* * This abort is in response to an * unexpected switch to command phase * for a packetized connection. Since * the identify message was never sent, * "saved lun" is 0. We really want to * abort only the SCB that encountered * this error, which could have a different * lun. The SCB will be retried so the OS * will see the UA after renegotiating to * packetized. */ tag = SCB_GET_TAG(scb); saved_lun = scb->hscb->lun; } found = ahd_abort_scbs(ahd, target, 'A', saved_lun, tag, ROLE_INITIATOR, CAM_REQ_ABORTED); printf("found == 0x%x\n", found); printerror = 0; } else if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_BUS_DEV_RESET, TRUE)) { #ifdef __FreeBSD__ /* * Don't mark the user's request for this BDR * as completing with CAM_BDR_SENT. CAM3 * specifies CAM_REQ_CMP. */ if (scb != NULL && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV && ahd_match_scb(ahd, scb, target, 'A', CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_INITIATOR)) ahd_set_transaction_status(scb, CAM_REQ_CMP); #endif ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD, CAM_BDR_SENT, "Bus Device Reset", /*verbose_level*/0); printerror = 0; } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, FALSE) && ppr_busfree == 0) { struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; /* * PPR Rejected. * * If the previous negotiation was packetized, * this could be because the device has been * reset without our knowledge. Force our * current negotiation to async and retry the * negotiation. Otherwise retry the command * with non-ppr negotiation. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("PPR negotiation rejected busfree.\n"); #endif tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ)!=0) { ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR, /*paused*/TRUE); /* * The expect PPR busfree handler below * will effect the retry and necessary * abort. */ } else { tinfo->curr.transport_version = 2; tinfo->goal.transport_version = 2; tinfo->goal.ppr_options = 0; /* * Remove any SCBs in the waiting for selection * queue that may also be for this target so * that command ordering is preserved. */ ahd_freeze_devq(ahd, scb); ahd_qinfifo_requeue_tail(ahd, scb); printerror = 0; } } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE) && ppr_busfree == 0) { /* * Negotiation Rejected. Go-narrow and * retry command. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("WDTR negotiation rejected busfree.\n"); #endif ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); /* * Remove any SCBs in the waiting for selection * queue that may also be for this target so that * command ordering is preserved. */ ahd_freeze_devq(ahd, scb); ahd_qinfifo_requeue_tail(ahd, scb); printerror = 0; } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE) && ppr_busfree == 0) { /* * Negotiation Rejected. Go-async and * retry command. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("SDTR negotiation rejected busfree.\n"); #endif ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); /* * Remove any SCBs in the waiting for selection * queue that may also be for this target so that * command ordering is preserved. */ ahd_freeze_devq(ahd, scb); ahd_qinfifo_requeue_tail(ahd, scb); printerror = 0; } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0 && ahd_sent_msg(ahd, AHDMSG_1B, MSG_INITIATOR_DET_ERR, TRUE)) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("Expected IDE Busfree\n"); #endif printerror = 0; } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_QASREJ_BUSFREE) && ahd_sent_msg(ahd, AHDMSG_1B, MSG_MESSAGE_REJECT, TRUE)) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("Expected QAS Reject Busfree\n"); #endif printerror = 0; } } /* * The busfree required flag is honored at the end of * the message phases. We check it last in case we * had to send some other message that caused a busfree. */ if (printerror != 0 && (lastphase == P_MESGIN || lastphase == P_MESGOUT) && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) { ahd_freeze_devq(ahd, scb); ahd_set_transaction_status(scb, CAM_REQUEUE_REQ); ahd_freeze_scb(scb); if ((ahd->msg_flags & MSG_FLAG_IU_REQ_CHANGED) != 0) { ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), SCB_GET_CHANNEL(ahd, scb), SCB_GET_LUN(scb), SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQ_ABORTED); } else { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("PPR Negotiation Busfree.\n"); #endif ahd_done(ahd, scb); } printerror = 0; } if (printerror != 0) { int aborted; aborted = 0; if (scb != NULL) { u_int tag; if ((scb->hscb->control & TAG_ENB) != 0) tag = SCB_GET_TAG(scb); else tag = SCB_LIST_NULL; ahd_print_path(ahd, scb); aborted = ahd_abort_scbs(ahd, target, 'A', SCB_GET_LUN(scb), tag, ROLE_INITIATOR, CAM_UNEXP_BUSFREE); } else { /* * We had not fully identified this connection, * so we cannot abort anything. */ printf("%s: ", ahd_name(ahd)); } printf("Unexpected busfree %s, %d SCBs aborted, " "PRGMCNT == 0x%x\n", ahd_lookup_phase_entry(lastphase)->phasemsg, aborted, ahd_inw(ahd, PRGMCNT)); ahd_dump_card_state(ahd); if (lastphase != P_BUSFREE) ahd_force_renegotiation(ahd, &devinfo); } /* Always restart the sequencer. */ return (1); } static void ahd_handle_proto_violation(struct ahd_softc *ahd) { struct ahd_devinfo devinfo; struct scb *scb; u_int scbid; u_int seq_flags; u_int curphase; u_int lastphase; int found; ahd_fetch_devinfo(ahd, &devinfo); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); seq_flags = ahd_inb(ahd, SEQ_FLAGS); curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; lastphase = ahd_inb(ahd, LASTPHASE); if ((seq_flags & NOT_IDENTIFIED) != 0) { /* * The reconnecting target either did not send an * identify message, or did, but we didn't find an SCB * to match. */ ahd_print_devinfo(ahd, &devinfo); printf("Target did not send an IDENTIFY message. " "LASTPHASE = 0x%x.\n", lastphase); scb = NULL; } else if (scb == NULL) { /* * We don't seem to have an SCB active for this * transaction. Print an error and reset the bus. */ ahd_print_devinfo(ahd, &devinfo); printf("No SCB found during protocol violation\n"); goto proto_violation_reset; } else { ahd_set_transaction_status(scb, CAM_SEQUENCE_FAIL); if ((seq_flags & NO_CDB_SENT) != 0) { ahd_print_path(ahd, scb); printf("No or incomplete CDB sent to device.\n"); } else if ((ahd_inb_scbram(ahd, SCB_CONTROL) & STATUS_RCVD) == 0) { /* * The target never bothered to provide status to * us prior to completing the command. Since we don't * know the disposition of this command, we must attempt * to abort it. Assert ATN and prepare to send an abort * message. */ ahd_print_path(ahd, scb); printf("Completed command without status.\n"); } else { ahd_print_path(ahd, scb); printf("Unknown protocol violation.\n"); ahd_dump_card_state(ahd); } } if ((lastphase & ~P_DATAIN_DT) == 0 || lastphase == P_COMMAND) { proto_violation_reset: /* * Target either went directly to data * phase or didn't respond to our ATN. * The only safe thing to do is to blow * it away with a bus reset. */ found = ahd_reset_channel(ahd, 'A', TRUE); printf("%s: Issued Channel %c Bus Reset. " "%d SCBs aborted\n", ahd_name(ahd), 'A', found); } else { /* * Leave the selection hardware off in case * this abort attempt will affect yet to * be sent commands. */ ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); ahd_assert_atn(ahd); ahd_outb(ahd, MSG_OUT, HOST_MSG); if (scb == NULL) { ahd_print_devinfo(ahd, &devinfo); ahd->msgout_buf[0] = MSG_ABORT_TASK; ahd->msgout_len = 1; ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; } else { ahd_print_path(ahd, scb); scb->flags |= SCB_ABORT; } printf("Protocol violation %s. Attempting to abort.\n", ahd_lookup_phase_entry(curphase)->phasemsg); } } /* * Force renegotiation to occur the next time we initiate * a command to the current device. */ static void ahd_force_renegotiation(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { struct ahd_initiator_tinfo *targ_info; struct ahd_tmode_tstate *tstate; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, devinfo); printf("Forcing renegotiation\n"); } #endif targ_info = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); ahd_update_neg_request(ahd, devinfo, tstate, targ_info, AHD_NEG_IF_NON_ASYNC); } #define AHD_MAX_STEPS 2000 static void ahd_clear_critical_section(struct ahd_softc *ahd) { ahd_mode_state saved_modes; int stepping; int steps; int first_instr; u_int simode0; u_int simode1; u_int simode3; u_int lqimode0; u_int lqimode1; u_int lqomode0; u_int lqomode1; if (ahd->num_critical_sections == 0) return; stepping = FALSE; steps = 0; first_instr = 0; simode0 = 0; simode1 = 0; simode3 = 0; lqimode0 = 0; lqimode1 = 0; lqomode0 = 0; lqomode1 = 0; saved_modes = ahd_save_modes(ahd); for (;;) { struct cs *cs; u_int seqaddr; u_int i; ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); seqaddr = ahd_inw(ahd, CURADDR); cs = ahd->critical_sections; for (i = 0; i < ahd->num_critical_sections; i++, cs++) { if (cs->begin < seqaddr && cs->end >= seqaddr) break; } if (i == ahd->num_critical_sections) break; if (steps > AHD_MAX_STEPS) { printf("%s: Infinite loop in critical section\n" "%s: First Instruction 0x%x now 0x%x\n", ahd_name(ahd), ahd_name(ahd), first_instr, seqaddr); ahd_dump_card_state(ahd); panic("critical section loop"); } steps++; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printf("%s: Single stepping at 0x%x\n", ahd_name(ahd), seqaddr); #endif if (stepping == FALSE) { first_instr = seqaddr; ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); simode0 = ahd_inb(ahd, SIMODE0); simode3 = ahd_inb(ahd, SIMODE3); lqimode0 = ahd_inb(ahd, LQIMODE0); lqimode1 = ahd_inb(ahd, LQIMODE1); lqomode0 = ahd_inb(ahd, LQOMODE0); lqomode1 = ahd_inb(ahd, LQOMODE1); ahd_outb(ahd, SIMODE0, 0); ahd_outb(ahd, SIMODE3, 0); ahd_outb(ahd, LQIMODE0, 0); ahd_outb(ahd, LQIMODE1, 0); ahd_outb(ahd, LQOMODE0, 0); ahd_outb(ahd, LQOMODE1, 0); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); simode1 = ahd_inb(ahd, SIMODE1); /* * We don't clear ENBUSFREE. Unfortunately * we cannot re-enable busfree detection within * the current connection, so we must leave it * on while single stepping. */ ahd_outb(ahd, SIMODE1, simode1 & ENBUSFREE); ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) | STEP); stepping = TRUE; } ahd_outb(ahd, CLRSINT1, CLRBUSFREE); ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); ahd_outb(ahd, HCNTRL, ahd->unpause); while (!ahd_is_paused(ahd)) ahd_delay(200); ahd_update_modes(ahd); } if (stepping) { ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); ahd_outb(ahd, SIMODE0, simode0); ahd_outb(ahd, SIMODE3, simode3); ahd_outb(ahd, LQIMODE0, lqimode0); ahd_outb(ahd, LQIMODE1, lqimode1); ahd_outb(ahd, LQOMODE0, lqomode0); ahd_outb(ahd, LQOMODE1, lqomode1); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) & ~STEP); ahd_outb(ahd, SIMODE1, simode1); /* * SCSIINT seems to glitch occassionally when * the interrupt masks are restored. Clear SCSIINT * one more time so that only persistent errors * are seen as a real interrupt. */ ahd_outb(ahd, CLRINT, CLRSCSIINT); } ahd_restore_modes(ahd, saved_modes); } /* * Clear any pending interrupt status. */ static void ahd_clear_intstat(struct ahd_softc *ahd) { AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); /* Clear any interrupt conditions this may have caused */ ahd_outb(ahd, CLRLQIINT0, CLRLQIATNQAS|CLRLQICRCT1|CLRLQICRCT2 |CLRLQIBADLQT|CLRLQIATNLQ|CLRLQIATNCMD); ahd_outb(ahd, CLRLQIINT1, CLRLQIPHASE_LQ|CLRLQIPHASE_NLQ|CLRLIQABORT |CLRLQICRCI_LQ|CLRLQICRCI_NLQ|CLRLQIBADLQI |CLRLQIOVERI_LQ|CLRLQIOVERI_NLQ|CLRNONPACKREQ); ahd_outb(ahd, CLRLQOINT0, CLRLQOTARGSCBPERR|CLRLQOSTOPT2|CLRLQOATNLQ |CLRLQOATNPKT|CLRLQOTCRC); ahd_outb(ahd, CLRLQOINT1, CLRLQOINITSCBPERR|CLRLQOSTOPI2|CLRLQOBADQAS |CLRLQOBUSFREE|CLRLQOPHACHGINPKT); if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) { ahd_outb(ahd, CLRLQOINT0, 0); ahd_outb(ahd, CLRLQOINT1, 0); } ahd_outb(ahd, CLRSINT3, CLRNTRAMPERR|CLROSRAMPERR); ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI |CLRBUSFREE|CLRSCSIPERR|CLRREQINIT); ahd_outb(ahd, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO |CLRIOERR|CLROVERRUN); ahd_outb(ahd, CLRINT, CLRSCSIINT); } /**************************** Debugging Routines ******************************/ #ifdef AHD_DEBUG uint32_t ahd_debug = AHD_DEBUG_OPTS; #endif #if 0 void ahd_print_scb(struct scb *scb) { struct hardware_scb *hscb; int i; hscb = scb->hscb; printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", (void *)scb, hscb->control, hscb->scsiid, hscb->lun, hscb->cdb_len); printf("Shared Data: "); for (i = 0; i < sizeof(hscb->shared_data.idata.cdb); i++) printf("%#02x", hscb->shared_data.idata.cdb[i]); printf(" dataptr:%#x%x datacnt:%#x sgptr:%#x tag:%#x\n", (uint32_t)((ahd_le64toh(hscb->dataptr) >> 32) & 0xFFFFFFFF), (uint32_t)(ahd_le64toh(hscb->dataptr) & 0xFFFFFFFF), ahd_le32toh(hscb->datacnt), ahd_le32toh(hscb->sgptr), SCB_GET_TAG(scb)); ahd_dump_sglist(scb); } #endif /* 0 */ /************************* Transfer Negotiation *******************************/ /* * Allocate per target mode instance (ID we respond to as a target) * transfer negotiation data structures. */ static struct ahd_tmode_tstate * ahd_alloc_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel) { struct ahd_tmode_tstate *master_tstate; struct ahd_tmode_tstate *tstate; int i; master_tstate = ahd->enabled_targets[ahd->our_id]; if (ahd->enabled_targets[scsi_id] != NULL && ahd->enabled_targets[scsi_id] != master_tstate) panic("%s: ahd_alloc_tstate - Target already allocated", ahd_name(ahd)); tstate = malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT); if (tstate == NULL) return (NULL); /* * If we have allocated a master tstate, copy user settings from * the master tstate (taken from SRAM or the EEPROM) for this * channel, but reset our current and goal settings to async/narrow * until an initiator talks to us. */ if (master_tstate != NULL) { memcpy(tstate, master_tstate, sizeof(*tstate)); memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); for (i = 0; i < 16; i++) { memset(&tstate->transinfo[i].curr, 0, sizeof(tstate->transinfo[i].curr)); memset(&tstate->transinfo[i].goal, 0, sizeof(tstate->transinfo[i].goal)); } } else memset(tstate, 0, sizeof(*tstate)); ahd->enabled_targets[scsi_id] = tstate; return (tstate); } #ifdef AHD_TARGET_MODE /* * Free per target mode instance (ID we respond to as a target) * transfer negotiation data structures. */ static void ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force) { struct ahd_tmode_tstate *tstate; /* * Don't clean up our "master" tstate. * It has our default user settings. */ if (scsi_id == ahd->our_id && force == FALSE) return; tstate = ahd->enabled_targets[scsi_id]; if (tstate != NULL) free(tstate, M_DEVBUF); ahd->enabled_targets[scsi_id] = NULL; } #endif /* * Called when we have an active connection to a target on the bus, * this function finds the nearest period to the input period limited * by the capabilities of the bus connectivity of and sync settings for * the target. */ void ahd_devlimited_syncrate(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, u_int *period, u_int *ppr_options, role_t role) { struct ahd_transinfo *transinfo; u_int maxsync; if ((ahd_inb(ahd, SBLKCTL) & ENAB40) != 0 && (ahd_inb(ahd, SSTAT2) & EXP_ACTIVE) == 0) { maxsync = AHD_SYNCRATE_PACED; } else { maxsync = AHD_SYNCRATE_ULTRA; /* Can't do DT related options on an SE bus */ *ppr_options &= MSG_EXT_PPR_QAS_REQ; } /* * Never allow a value higher than our current goal * period otherwise we may allow a target initiated * negotiation to go above the limit as set by the * user. In the case of an initiator initiated * sync negotiation, we limit based on the user * setting. This allows the system to still accept * incoming negotiations even if target initiated * negotiation is not performed. */ if (role == ROLE_TARGET) transinfo = &tinfo->user; else transinfo = &tinfo->goal; *ppr_options &= (transinfo->ppr_options|MSG_EXT_PPR_PCOMP_EN); if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) { maxsync = max(maxsync, (u_int)AHD_SYNCRATE_ULTRA2); *ppr_options &= ~MSG_EXT_PPR_DT_REQ; } if (transinfo->period == 0) { *period = 0; *ppr_options = 0; } else { *period = max(*period, (u_int)transinfo->period); ahd_find_syncrate(ahd, period, ppr_options, maxsync); } } /* * Look up the valid period to SCSIRATE conversion in our table. * Return the period and offset that should be sent to the target * if this was the beginning of an SDTR. */ void ahd_find_syncrate(struct ahd_softc *ahd, u_int *period, u_int *ppr_options, u_int maxsync) { if (*period < maxsync) *period = maxsync; if ((*ppr_options & MSG_EXT_PPR_DT_REQ) != 0 && *period > AHD_SYNCRATE_MIN_DT) *ppr_options &= ~MSG_EXT_PPR_DT_REQ; if (*period > AHD_SYNCRATE_MIN) *period = 0; /* Honor PPR option conformance rules. */ if (*period > AHD_SYNCRATE_PACED) *ppr_options &= ~MSG_EXT_PPR_RTI; if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0) *ppr_options &= (MSG_EXT_PPR_DT_REQ|MSG_EXT_PPR_QAS_REQ); if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0) *ppr_options &= MSG_EXT_PPR_QAS_REQ; /* Skip all PACED only entries if IU is not available */ if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0 && *period < AHD_SYNCRATE_DT) *period = AHD_SYNCRATE_DT; /* Skip all DT only entries if DT is not available */ if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 && *period < AHD_SYNCRATE_ULTRA2) *period = AHD_SYNCRATE_ULTRA2; } /* * Truncate the given synchronous offset to a value the * current adapter type and syncrate are capable of. */ static void ahd_validate_offset(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, u_int period, u_int *offset, int wide, role_t role) { u_int maxoffset; /* Limit offset to what we can do */ if (period == 0) maxoffset = 0; else if (period <= AHD_SYNCRATE_PACED) { if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) maxoffset = MAX_OFFSET_PACED_BUG; else maxoffset = MAX_OFFSET_PACED; } else maxoffset = MAX_OFFSET_NON_PACED; *offset = min(*offset, maxoffset); if (tinfo != NULL) { if (role == ROLE_TARGET) *offset = min(*offset, (u_int)tinfo->user.offset); else *offset = min(*offset, (u_int)tinfo->goal.offset); } } /* * Truncate the given transfer width parameter to a value the * current adapter type is capable of. */ static void ahd_validate_width(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, u_int *bus_width, role_t role) { switch (*bus_width) { default: if (ahd->features & AHD_WIDE) { /* Respond Wide */ *bus_width = MSG_EXT_WDTR_BUS_16_BIT; break; } /* FALLTHROUGH */ case MSG_EXT_WDTR_BUS_8_BIT: *bus_width = MSG_EXT_WDTR_BUS_8_BIT; break; } if (tinfo != NULL) { if (role == ROLE_TARGET) *bus_width = min((u_int)tinfo->user.width, *bus_width); else *bus_width = min((u_int)tinfo->goal.width, *bus_width); } } /* * Update the bitmask of targets for which the controller should * negotiate with at the next convenient oportunity. This currently * means the next time we send the initial identify messages for * a new transaction. */ int ahd_update_neg_request(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct ahd_tmode_tstate *tstate, struct ahd_initiator_tinfo *tinfo, ahd_neg_type neg_type) { u_int auto_negotiate_orig; auto_negotiate_orig = tstate->auto_negotiate; if (neg_type == AHD_NEG_ALWAYS) { /* * Force our "current" settings to be * unknown so that unless a bus reset * occurs the need to renegotiate is * recorded persistently. */ if ((ahd->features & AHD_WIDE) != 0) tinfo->curr.width = AHD_WIDTH_UNKNOWN; tinfo->curr.period = AHD_PERIOD_UNKNOWN; tinfo->curr.offset = AHD_OFFSET_UNKNOWN; } if (tinfo->curr.period != tinfo->goal.period || tinfo->curr.width != tinfo->goal.width || tinfo->curr.offset != tinfo->goal.offset || tinfo->curr.ppr_options != tinfo->goal.ppr_options || (neg_type == AHD_NEG_IF_NON_ASYNC && (tinfo->goal.offset != 0 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT || tinfo->goal.ppr_options != 0))) tstate->auto_negotiate |= devinfo->target_mask; else tstate->auto_negotiate &= ~devinfo->target_mask; return (auto_negotiate_orig != tstate->auto_negotiate); } /* * Update the user/goal/curr tables of synchronous negotiation * parameters as well as, in the case of a current or active update, * any data structures on the host controller. In the case of an * active update, the specified target is currently talking to us on * the bus, so the transfer parameter update must take effect * immediately. */ void ahd_set_syncrate(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset, u_int ppr_options, u_int type, int paused) { struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; u_int old_period; u_int old_offset; u_int old_ppr; int active; int update_needed; active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE; update_needed = 0; if (period == 0 || offset == 0) { period = 0; offset = 0; } tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); if ((type & AHD_TRANS_USER) != 0) { tinfo->user.period = period; tinfo->user.offset = offset; tinfo->user.ppr_options = ppr_options; } if ((type & AHD_TRANS_GOAL) != 0) { tinfo->goal.period = period; tinfo->goal.offset = offset; tinfo->goal.ppr_options = ppr_options; } old_period = tinfo->curr.period; old_offset = tinfo->curr.offset; old_ppr = tinfo->curr.ppr_options; if ((type & AHD_TRANS_CUR) != 0 && (old_period != period || old_offset != offset || old_ppr != ppr_options)) { update_needed++; tinfo->curr.period = period; tinfo->curr.offset = offset; tinfo->curr.ppr_options = ppr_options; ahd_send_async(ahd, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_TRANSFER_NEG); if (bootverbose) { if (offset != 0) { int options; printf("%s: target %d synchronous with " "period = 0x%x, offset = 0x%x", ahd_name(ahd), devinfo->target, period, offset); options = 0; if ((ppr_options & MSG_EXT_PPR_RD_STRM) != 0) { printf("(RDSTRM"); options++; } if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) { printf("%s", options ? "|DT" : "(DT"); options++; } if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { printf("%s", options ? "|IU" : "(IU"); options++; } if ((ppr_options & MSG_EXT_PPR_RTI) != 0) { printf("%s", options ? "|RTI" : "(RTI"); options++; } if ((ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) { printf("%s", options ? "|QAS" : "(QAS"); options++; } if (options != 0) printf(")\n"); else printf("\n"); } else { printf("%s: target %d using " "asynchronous transfers%s\n", ahd_name(ahd), devinfo->target, (ppr_options & MSG_EXT_PPR_QAS_REQ) != 0 ? "(QAS)" : ""); } } } /* * Always refresh the neg-table to handle the case of the * sequencer setting the ENATNO bit for a MK_MESSAGE request. * We will always renegotiate in that case if this is a * packetized request. Also manage the busfree expected flag * from this common routine so that we catch changes due to * WDTR or SDTR messages. */ if ((type & AHD_TRANS_CUR) != 0) { if (!paused) ahd_pause(ahd); ahd_update_neg_table(ahd, devinfo, &tinfo->curr); if (!paused) ahd_unpause(ahd); if (ahd->msg_type != MSG_TYPE_NONE) { if ((old_ppr & MSG_EXT_PPR_IU_REQ) != (ppr_options & MSG_EXT_PPR_IU_REQ)) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, devinfo); printf("Expecting IU Change busfree\n"); } #endif ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE | MSG_FLAG_IU_REQ_CHANGED; } if ((old_ppr & MSG_EXT_PPR_IU_REQ) != 0) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("PPR with IU_REQ outstanding\n"); #endif ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE; } } } update_needed += ahd_update_neg_request(ahd, devinfo, tstate, tinfo, AHD_NEG_TO_GOAL); if (update_needed && active) ahd_update_pending_scbs(ahd); } /* * Update the user/goal/curr tables of wide negotiation * parameters as well as, in the case of a current or active update, * any data structures on the host controller. In the case of an * active update, the specified target is currently talking to us on * the bus, so the transfer parameter update must take effect * immediately. */ void ahd_set_width(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int width, u_int type, int paused) { struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; u_int oldwidth; int active; int update_needed; active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE; update_needed = 0; tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); if ((type & AHD_TRANS_USER) != 0) tinfo->user.width = width; if ((type & AHD_TRANS_GOAL) != 0) tinfo->goal.width = width; oldwidth = tinfo->curr.width; if ((type & AHD_TRANS_CUR) != 0 && oldwidth != width) { update_needed++; tinfo->curr.width = width; ahd_send_async(ahd, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_TRANSFER_NEG); if (bootverbose) { printf("%s: target %d using %dbit transfers\n", ahd_name(ahd), devinfo->target, 8 * (0x01 << width)); } } if ((type & AHD_TRANS_CUR) != 0) { if (!paused) ahd_pause(ahd); ahd_update_neg_table(ahd, devinfo, &tinfo->curr); if (!paused) ahd_unpause(ahd); } update_needed += ahd_update_neg_request(ahd, devinfo, tstate, tinfo, AHD_NEG_TO_GOAL); if (update_needed && active) ahd_update_pending_scbs(ahd); } /* * Update the current state of tagged queuing for a given target. */ static void ahd_set_tags(struct ahd_softc *ahd, struct scsi_cmnd *cmd, struct ahd_devinfo *devinfo, ahd_queue_alg alg) { struct scsi_device *sdev = cmd->device; ahd_platform_set_tags(ahd, sdev, devinfo, alg); ahd_send_async(ahd, devinfo->channel, devinfo->target, devinfo->lun, AC_TRANSFER_NEG); } static void ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct ahd_transinfo *tinfo) { ahd_mode_state saved_modes; u_int period; u_int ppr_opts; u_int con_opts; u_int offset; u_int saved_negoaddr; uint8_t iocell_opts[sizeof(ahd->iocell_opts)]; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); saved_negoaddr = ahd_inb(ahd, NEGOADDR); ahd_outb(ahd, NEGOADDR, devinfo->target); period = tinfo->period; offset = tinfo->offset; memcpy(iocell_opts, ahd->iocell_opts, sizeof(ahd->iocell_opts)); ppr_opts = tinfo->ppr_options & (MSG_EXT_PPR_QAS_REQ|MSG_EXT_PPR_DT_REQ |MSG_EXT_PPR_IU_REQ|MSG_EXT_PPR_RTI); con_opts = 0; if (period == 0) period = AHD_SYNCRATE_ASYNC; if (period == AHD_SYNCRATE_160) { if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) { /* * When the SPI4 spec was finalized, PACE transfers * was not made a configurable option in the PPR * message. Instead it is assumed to be enabled for * any syncrate faster than 80MHz. Nevertheless, * Harpoon2A4 allows this to be configurable. * * Harpoon2A4 also assumes at most 2 data bytes per * negotiated REQ/ACK offset. Paced transfers take * 4, so we must adjust our offset. */ ppr_opts |= PPROPT_PACE; offset *= 2; /* * Harpoon2A assumed that there would be a * fallback rate between 160MHz and 80Mhz, * so 7 is used as the period factor rather * than 8 for 160MHz. */ period = AHD_SYNCRATE_REVA_160; } if ((tinfo->ppr_options & MSG_EXT_PPR_PCOMP_EN) == 0) iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK; } else { /* * Precomp should be disabled for non-paced transfers. */ iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK; if ((ahd->features & AHD_NEW_IOCELL_OPTS) != 0 && (ppr_opts & MSG_EXT_PPR_DT_REQ) != 0 && (ppr_opts & MSG_EXT_PPR_IU_REQ) == 0) { /* * Slow down our CRC interval to be * compatible with non-packetized * U160 devices that can't handle a * CRC at full speed. */ con_opts |= ENSLOWCRC; } if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) { /* * On H2A4, revert to a slower slewrate * on non-paced transfers. */ iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_SLEWRATE_MASK; } } ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PRECOMP_SLEW); ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_PRECOMP_SLEW_INDEX]); ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_AMPLITUDE); ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_AMPLITUDE_INDEX]); ahd_outb(ahd, NEGPERIOD, period); ahd_outb(ahd, NEGPPROPTS, ppr_opts); ahd_outb(ahd, NEGOFFSET, offset); if (tinfo->width == MSG_EXT_WDTR_BUS_16_BIT) con_opts |= WIDEXFER; /* * Slow down our CRC interval to be * compatible with packetized U320 devices * that can't handle a CRC at full speed */ if (ahd->features & AHD_AIC79XXB_SLOWCRC) { con_opts |= ENSLOWCRC; } /* * During packetized transfers, the target will * give us the oportunity to send command packets * without us asserting attention. */ if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) == 0) con_opts |= ENAUTOATNO; ahd_outb(ahd, NEGCONOPTS, con_opts); ahd_outb(ahd, NEGOADDR, saved_negoaddr); ahd_restore_modes(ahd, saved_modes); } /* * When the transfer settings for a connection change, setup for * negotiation in pending SCBs to effect the change as quickly as * possible. We also cancel any negotiations that are scheduled * for inflight SCBs that have not been started yet. */ static void ahd_update_pending_scbs(struct ahd_softc *ahd) { struct scb *pending_scb; int pending_scb_count; int paused; u_int saved_scbptr; ahd_mode_state saved_modes; /* * Traverse the pending SCB list and ensure that all of the * SCBs there have the proper settings. We can only safely * clear the negotiation required flag (setting requires the * execution queue to be modified) and this is only possible * if we are not already attempting to select out for this * SCB. For this reason, all callers only call this routine * if we are changing the negotiation settings for the currently * active transaction on the bus. */ pending_scb_count = 0; LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) { struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; ahd_scb_devinfo(ahd, &devinfo, pending_scb); tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); if ((tstate->auto_negotiate & devinfo.target_mask) == 0 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; pending_scb->hscb->control &= ~MK_MESSAGE; } ahd_sync_scb(ahd, pending_scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); pending_scb_count++; } if (pending_scb_count == 0) return; if (ahd_is_paused(ahd)) { paused = 1; } else { paused = 0; ahd_pause(ahd); } /* * Force the sequencer to reinitialize the selection for * the command at the head of the execution queue if it * has already been setup. The negotiation changes may * effect whether we select-out with ATN. It is only * safe to clear ENSELO when the bus is not free and no * selection is in progres or completed. */ saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); if ((ahd_inb(ahd, SCSISIGI) & BSYI) != 0 && (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) == 0) ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); saved_scbptr = ahd_get_scbptr(ahd); /* Ensure that the hscbs down on the card match the new information */ LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) { u_int scb_tag; u_int control; scb_tag = SCB_GET_TAG(pending_scb); ahd_set_scbptr(ahd, scb_tag); control = ahd_inb_scbram(ahd, SCB_CONTROL); control &= ~MK_MESSAGE; control |= pending_scb->hscb->control & MK_MESSAGE; ahd_outb(ahd, SCB_CONTROL, control); } ahd_set_scbptr(ahd, saved_scbptr); ahd_restore_modes(ahd, saved_modes); if (paused == 0) ahd_unpause(ahd); } /**************************** Pathing Information *****************************/ static void ahd_fetch_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { ahd_mode_state saved_modes; u_int saved_scsiid; role_t role; int our_id; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); if (ahd_inb(ahd, SSTAT0) & TARGET) role = ROLE_TARGET; else role = ROLE_INITIATOR; if (role == ROLE_TARGET && (ahd_inb(ahd, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) { /* We were selected, so pull our id from TARGIDIN */ our_id = ahd_inb(ahd, TARGIDIN) & OID; } else if (role == ROLE_TARGET) our_id = ahd_inb(ahd, TOWNID); else our_id = ahd_inb(ahd, IOWNID); saved_scsiid = ahd_inb(ahd, SAVED_SCSIID); ahd_compile_devinfo(devinfo, our_id, SCSIID_TARGET(ahd, saved_scsiid), ahd_inb(ahd, SAVED_LUN), SCSIID_CHANNEL(ahd, saved_scsiid), role); ahd_restore_modes(ahd, saved_modes); } void ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { printf("%s:%c:%d:%d: ", ahd_name(ahd), 'A', devinfo->target, devinfo->lun); } static struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase) { struct ahd_phase_table_entry *entry; struct ahd_phase_table_entry *last_entry; /* * num_phases doesn't include the default entry which * will be returned if the phase doesn't match. */ last_entry = &ahd_phase_table[num_phases]; for (entry = ahd_phase_table; entry < last_entry; entry++) { if (phase == entry->phase) break; } return (entry); } void ahd_compile_devinfo(struct ahd_devinfo *devinfo, u_int our_id, u_int target, u_int lun, char channel, role_t role) { devinfo->our_scsiid = our_id; devinfo->target = target; devinfo->lun = lun; devinfo->target_offset = target; devinfo->channel = channel; devinfo->role = role; if (channel == 'B') devinfo->target_offset += 8; devinfo->target_mask = (0x01 << devinfo->target_offset); } static void ahd_scb_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb) { role_t role; int our_id; our_id = SCSIID_OUR_ID(scb->hscb->scsiid); role = ROLE_INITIATOR; if ((scb->hscb->control & TARGET_SCB) != 0) role = ROLE_TARGET; ahd_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahd, scb), SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), role); } /************************ Message Phase Processing ****************************/ /* * When an initiator transaction with the MK_MESSAGE flag either reconnects * or enters the initial message out phase, we are interrupted. Fill our * outgoing message buffer with the appropriate message and beging handing * the message phase(s) manually. */ static void ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb) { /* * To facilitate adding multiple messages together, * each routine should increment the index and len * variables instead of setting them explicitly. */ ahd->msgout_index = 0; ahd->msgout_len = 0; if (ahd_currently_packetized(ahd)) ahd->msg_flags |= MSG_FLAG_PACKETIZED; if (ahd->send_msg_perror && ahd_inb(ahd, MSG_OUT) == HOST_MSG) { ahd->msgout_buf[ahd->msgout_index++] = ahd->send_msg_perror; ahd->msgout_len++; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("Setting up for Parity Error delivery\n"); #endif return; } else if (scb == NULL) { printf("%s: WARNING. No pending message for " "I_T msgin. Issuing NO-OP\n", ahd_name(ahd)); ahd->msgout_buf[ahd->msgout_index++] = MSG_NOOP; ahd->msgout_len++; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; return; } if ((scb->flags & SCB_DEVICE_RESET) == 0 && (scb->flags & SCB_PACKETIZED) == 0 && ahd_inb(ahd, MSG_OUT) == MSG_IDENTIFYFLAG) { u_int identify_msg; identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); if ((scb->hscb->control & DISCENB) != 0) identify_msg |= MSG_IDENTIFY_DISCFLAG; ahd->msgout_buf[ahd->msgout_index++] = identify_msg; ahd->msgout_len++; if ((scb->hscb->control & TAG_ENB) != 0) { ahd->msgout_buf[ahd->msgout_index++] = scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); ahd->msgout_buf[ahd->msgout_index++] = SCB_GET_TAG(scb); ahd->msgout_len += 2; } } if (scb->flags & SCB_DEVICE_RESET) { ahd->msgout_buf[ahd->msgout_index++] = MSG_BUS_DEV_RESET; ahd->msgout_len++; ahd_print_path(ahd, scb); printf("Bus Device Reset Message Sent\n"); /* * Clear our selection hardware in advance of * the busfree. We may have an entry in the waiting * Q for this target, and we don't want to go about * selecting while we handle the busfree and blow it * away. */ ahd_outb(ahd, SCSISEQ0, 0); } else if ((scb->flags & SCB_ABORT) != 0) { if ((scb->hscb->control & TAG_ENB) != 0) { ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT_TAG; } else { ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT; } ahd->msgout_len++; ahd_print_path(ahd, scb); printf("Abort%s Message Sent\n", (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); /* * Clear our selection hardware in advance of * the busfree. We may have an entry in the waiting * Q for this target, and we don't want to go about * selecting while we handle the busfree and blow it * away. */ ahd_outb(ahd, SCSISEQ0, 0); } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { ahd_build_transfer_msg(ahd, devinfo); /* * Clear our selection hardware in advance of potential * PPR IU status change busfree. We may have an entry in * the waiting Q for this target, and we don't want to go * about selecting while we handle the busfree and blow * it away. */ ahd_outb(ahd, SCSISEQ0, 0); } else { printf("ahd_intr: AWAITING_MSG for an SCB that " "does not have a waiting message\n"); printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, devinfo->target_mask); panic("SCB = %d, SCB Control = %x:%x, MSG_OUT = %x " "SCB flags = %x", SCB_GET_TAG(scb), scb->hscb->control, ahd_inb_scbram(ahd, SCB_CONTROL), ahd_inb(ahd, MSG_OUT), scb->flags); } /* * Clear the MK_MESSAGE flag from the SCB so we aren't * asked to send this message again. */ ahd_outb(ahd, SCB_CONTROL, ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE); scb->hscb->control &= ~MK_MESSAGE; ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; } /* * Build an appropriate transfer negotiation message for the * currently active target. */ static void ahd_build_transfer_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { /* * We need to initiate transfer negotiations. * If our current and goal settings are identical, * we want to renegotiate due to a check condition. */ struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; int dowide; int dosync; int doppr; u_int period; u_int ppr_options; u_int offset; tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* * Filter our period based on the current connection. * If we can't perform DT transfers on this segment (not in LVD * mode for instance), then our decision to issue a PPR message * may change. */ period = tinfo->goal.period; offset = tinfo->goal.offset; ppr_options = tinfo->goal.ppr_options; /* Target initiated PPR is not allowed in the SCSI spec */ if (devinfo->role == ROLE_TARGET) ppr_options = 0; ahd_devlimited_syncrate(ahd, tinfo, &period, &ppr_options, devinfo->role); dowide = tinfo->curr.width != tinfo->goal.width; dosync = tinfo->curr.offset != offset || tinfo->curr.period != period; /* * Only use PPR if we have options that need it, even if the device * claims to support it. There might be an expander in the way * that doesn't. */ doppr = ppr_options != 0; if (!dowide && !dosync && !doppr) { dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; dosync = tinfo->goal.offset != 0; } if (!dowide && !dosync && !doppr) { /* * Force async with a WDTR message if we have a wide bus, * or just issue an SDTR with a 0 offset. */ if ((ahd->features & AHD_WIDE) != 0) dowide = 1; else dosync = 1; if (bootverbose) { ahd_print_devinfo(ahd, devinfo); printf("Ensuring async\n"); } } /* Target initiated PPR is not allowed in the SCSI spec */ if (devinfo->role == ROLE_TARGET) doppr = 0; /* * Both the PPR message and SDTR message require the * goal syncrate to be limited to what the target device * is capable of handling (based on whether an LVD->SE * expander is on the bus), so combine these two cases. * Regardless, guarantee that if we are using WDTR and SDTR * messages that WDTR comes first. */ if (doppr || (dosync && !dowide)) { offset = tinfo->goal.offset; ahd_validate_offset(ahd, tinfo, period, &offset, doppr ? tinfo->goal.width : tinfo->curr.width, devinfo->role); if (doppr) { ahd_construct_ppr(ahd, devinfo, period, offset, tinfo->goal.width, ppr_options); } else { ahd_construct_sdtr(ahd, devinfo, period, offset); } } else { ahd_construct_wdtr(ahd, devinfo, tinfo->goal.width); } } /* * Build a synchronous negotiation message in our message * buffer based on the input parameters. */ static void ahd_construct_sdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset) { if (offset == 0) period = AHD_ASYNC_XFER_PERIOD; ahd->msgout_index += spi_populate_sync_msg( ahd->msgout_buf + ahd->msgout_index, period, offset); ahd->msgout_len += 5; if (bootverbose) { printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, period, offset); } } /* * Build a wide negotiateion message in our message * buffer based on the input parameters. */ static void ahd_construct_wdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int bus_width) { ahd->msgout_index += spi_populate_width_msg( ahd->msgout_buf + ahd->msgout_index, bus_width); ahd->msgout_len += 4; if (bootverbose) { printf("(%s:%c:%d:%d): Sending WDTR %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, bus_width); } } /* * Build a parallel protocol request message in our message * buffer based on the input parameters. */ static void ahd_construct_ppr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset, u_int bus_width, u_int ppr_options) { /* * Always request precompensation from * the other target if we are running * at paced syncrates. */ if (period <= AHD_SYNCRATE_PACED) ppr_options |= MSG_EXT_PPR_PCOMP_EN; if (offset == 0) period = AHD_ASYNC_XFER_PERIOD; ahd->msgout_index += spi_populate_ppr_msg( ahd->msgout_buf + ahd->msgout_index, period, offset, bus_width, ppr_options); ahd->msgout_len += 8; if (bootverbose) { printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " "offset %x, ppr_options %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, bus_width, period, offset, ppr_options); } } /* * Clear any active message state. */ static void ahd_clear_msg_state(struct ahd_softc *ahd) { ahd_mode_state saved_modes; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd->send_msg_perror = 0; ahd->msg_flags = MSG_FLAG_NONE; ahd->msgout_len = 0; ahd->msgin_index = 0; ahd->msg_type = MSG_TYPE_NONE; if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0) { /* * The target didn't care to respond to our * message request, so clear ATN. */ ahd_outb(ahd, CLRSINT1, CLRATNO); } ahd_outb(ahd, MSG_OUT, MSG_NOOP); ahd_outb(ahd, SEQ_FLAGS2, ahd_inb(ahd, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); ahd_restore_modes(ahd, saved_modes); } /* * Manual message loop handler. */ static void ahd_handle_message_phase(struct ahd_softc *ahd) { struct ahd_devinfo devinfo; u_int bus_phase; int end_session; ahd_fetch_devinfo(ahd, &devinfo); end_session = FALSE; bus_phase = ahd_inb(ahd, LASTPHASE); if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0) { printf("LQIRETRY for LQIPHASE_OUTPKT\n"); ahd_outb(ahd, LQCTL2, LQIRETRY); } reswitch: switch (ahd->msg_type) { case MSG_TYPE_INITIATOR_MSGOUT: { int lastbyte; int phasemis; int msgdone; if (ahd->msgout_len == 0 && ahd->send_msg_perror == 0) panic("HOST_MSG_LOOP interrupt with no active message"); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, &devinfo); printf("INITIATOR_MSG_OUT"); } #endif phasemis = bus_phase != P_MESGOUT; if (phasemis) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { printf(" PHASEMIS %s\n", ahd_lookup_phase_entry(bus_phase) ->phasemsg); } #endif if (bus_phase == P_MESGIN) { /* * Change gears and see if * this messages is of interest to * us or should be passed back to * the sequencer. */ ahd_outb(ahd, CLRSINT1, CLRATNO); ahd->send_msg_perror = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGIN; ahd->msgin_index = 0; goto reswitch; } end_session = TRUE; break; } if (ahd->send_msg_perror) { ahd_outb(ahd, CLRSINT1, CLRATNO); ahd_outb(ahd, CLRSINT1, CLRREQINIT); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf(" byte 0x%x\n", ahd->send_msg_perror); #endif /* * If we are notifying the target of a CRC error * during packetized operations, the target is * within its rights to acknowledge our message * with a busfree. */ if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0 && ahd->send_msg_perror == MSG_INITIATOR_DET_ERR) ahd->msg_flags |= MSG_FLAG_EXPECT_IDE_BUSFREE; ahd_outb(ahd, RETURN_2, ahd->send_msg_perror); ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE); break; } msgdone = ahd->msgout_index == ahd->msgout_len; if (msgdone) { /* * The target has requested a retry. * Re-assert ATN, reset our message index to * 0, and try again. */ ahd->msgout_index = 0; ahd_assert_atn(ahd); } lastbyte = ahd->msgout_index == (ahd->msgout_len - 1); if (lastbyte) { /* Last byte is signified by dropping ATN */ ahd_outb(ahd, CLRSINT1, CLRATNO); } /* * Clear our interrupt status and present * the next byte on the bus. */ ahd_outb(ahd, CLRSINT1, CLRREQINIT); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf(" byte 0x%x\n", ahd->msgout_buf[ahd->msgout_index]); #endif ahd_outb(ahd, RETURN_2, ahd->msgout_buf[ahd->msgout_index++]); ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE); break; } case MSG_TYPE_INITIATOR_MSGIN: { int phasemis; int message_done; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, &devinfo); printf("INITIATOR_MSG_IN"); } #endif phasemis = bus_phase != P_MESGIN; if (phasemis) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { printf(" PHASEMIS %s\n", ahd_lookup_phase_entry(bus_phase) ->phasemsg); } #endif ahd->msgin_index = 0; if (bus_phase == P_MESGOUT && (ahd->send_msg_perror != 0 || (ahd->msgout_len != 0 && ahd->msgout_index == 0))) { ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; goto reswitch; } end_session = TRUE; break; } /* Pull the byte in without acking it */ ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIBUS); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf(" byte 0x%x\n", ahd->msgin_buf[ahd->msgin_index]); #endif message_done = ahd_parse_msg(ahd, &devinfo); if (message_done) { /* * Clear our incoming message buffer in case there * is another message following this one. */ ahd->msgin_index = 0; /* * If this message illicited a response, * assert ATN so the target takes us to the * message out phase. */ if (ahd->msgout_len != 0) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, &devinfo); printf("Asserting ATN for response\n"); } #endif ahd_assert_atn(ahd); } } else ahd->msgin_index++; if (message_done == MSGLOOP_TERMINATED) { end_session = TRUE; } else { /* Ack the byte */ ahd_outb(ahd, CLRSINT1, CLRREQINIT); ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_READ); } break; } case MSG_TYPE_TARGET_MSGIN: { int msgdone; int msgout_request; /* * By default, the message loop will continue. */ ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); if (ahd->msgout_len == 0) panic("Target MSGIN with no active message"); /* * If we interrupted a mesgout session, the initiator * will not know this until our first REQ. So, we * only honor mesgout requests after we've sent our * first byte. */ if ((ahd_inb(ahd, SCSISIGI) & ATNI) != 0 && ahd->msgout_index > 0) msgout_request = TRUE; else msgout_request = FALSE; if (msgout_request) { /* * Change gears and see if * this messages is of interest to * us or should be passed back to * the sequencer. */ ahd->msg_type = MSG_TYPE_TARGET_MSGOUT; ahd_outb(ahd, SCSISIGO, P_MESGOUT | BSYO); ahd->msgin_index = 0; /* Dummy read to REQ for first byte */ ahd_inb(ahd, SCSIDAT); ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); break; } msgdone = ahd->msgout_index == ahd->msgout_len; if (msgdone) { ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) & ~SPIOEN); end_session = TRUE; break; } /* * Present the next byte on the bus. */ ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); ahd_outb(ahd, SCSIDAT, ahd->msgout_buf[ahd->msgout_index++]); break; } case MSG_TYPE_TARGET_MSGOUT: { int lastbyte; int msgdone; /* * By default, the message loop will continue. */ ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); /* * The initiator signals that this is * the last byte by dropping ATN. */ lastbyte = (ahd_inb(ahd, SCSISIGI) & ATNI) == 0; /* * Read the latched byte, but turn off SPIOEN first * so that we don't inadvertently cause a REQ for the * next byte. */ ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) & ~SPIOEN); ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIDAT); msgdone = ahd_parse_msg(ahd, &devinfo); if (msgdone == MSGLOOP_TERMINATED) { /* * The message is *really* done in that it caused * us to go to bus free. The sequencer has already * been reset at this point, so pull the ejection * handle. */ return; } ahd->msgin_index++; /* * XXX Read spec about initiator dropping ATN too soon * and use msgdone to detect it. */ if (msgdone == MSGLOOP_MSGCOMPLETE) { ahd->msgin_index = 0; /* * If this message illicited a response, transition * to the Message in phase and send it. */ if (ahd->msgout_len != 0) { ahd_outb(ahd, SCSISIGO, P_MESGIN | BSYO); ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); ahd->msg_type = MSG_TYPE_TARGET_MSGIN; ahd->msgin_index = 0; break; } } if (lastbyte) end_session = TRUE; else { /* Ask for the next byte. */ ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); } break; } default: panic("Unknown REQINIT message type"); } if (end_session) { if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0) { printf("%s: Returning to Idle Loop\n", ahd_name(ahd)); ahd_clear_msg_state(ahd); /* * Perform the equivalent of a clear_target_state. */ ahd_outb(ahd, LASTPHASE, P_BUSFREE); ahd_outb(ahd, SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT); ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET); } else { ahd_clear_msg_state(ahd); ahd_outb(ahd, RETURN_1, EXIT_MSG_LOOP); } } } /* * See if we sent a particular extended message to the target. * If "full" is true, return true only if the target saw the full * message. If "full" is false, return true if the target saw at * least the first byte of the message. */ static int ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full) { int found; u_int index; found = FALSE; index = 0; while (index < ahd->msgout_len) { if (ahd->msgout_buf[index] == MSG_EXTENDED) { u_int end_index; end_index = index + 1 + ahd->msgout_buf[index + 1]; if (ahd->msgout_buf[index+2] == msgval && type == AHDMSG_EXT) { if (full) { if (ahd->msgout_index > end_index) found = TRUE; } else if (ahd->msgout_index > index) found = TRUE; } index = end_index; } else if (ahd->msgout_buf[index] >= MSG_SIMPLE_TASK && ahd->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { /* Skip tag type and tag id or residue param*/ index += 2; } else { /* Single byte message */ if (type == AHDMSG_1B && ahd->msgout_index > index && (ahd->msgout_buf[index] == msgval || ((ahd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0 && msgval == MSG_IDENTIFYFLAG))) found = TRUE; index++; } if (found) break; } return (found); } /* * Wait for a complete incoming message, parse it, and respond accordingly. */ static int ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; int reject; int done; int response; done = MSGLOOP_IN_PROG; response = FALSE; reject = FALSE; tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* * Parse as much of the message as is available, * rejecting it if we don't support it. When * the entire message is available and has been * handled, return MSGLOOP_MSGCOMPLETE, indicating * that we have parsed an entire message. * * In the case of extended messages, we accept the length * byte outright and perform more checking once we know the * extended message type. */ switch (ahd->msgin_buf[0]) { case MSG_DISCONNECT: case MSG_SAVEDATAPOINTER: case MSG_CMDCOMPLETE: case MSG_RESTOREPOINTERS: case MSG_IGN_WIDE_RESIDUE: /* * End our message loop as these are messages * the sequencer handles on its own. */ done = MSGLOOP_TERMINATED; break; case MSG_MESSAGE_REJECT: response = ahd_handle_msg_reject(ahd, devinfo); /* FALLTHROUGH */ case MSG_NOOP: done = MSGLOOP_MSGCOMPLETE; break; case MSG_EXTENDED: { /* Wait for enough of the message to begin validation */ if (ahd->msgin_index < 2) break; switch (ahd->msgin_buf[2]) { case MSG_EXT_SDTR: { u_int period; u_int ppr_options; u_int offset; u_int saved_offset; if (ahd->msgin_buf[1] != MSG_EXT_SDTR_LEN) { reject = TRUE; break; } /* * Wait until we have both args before validating * and acting on this message. * * Add one to MSG_EXT_SDTR_LEN to account for * the extended message preamble. */ if (ahd->msgin_index < (MSG_EXT_SDTR_LEN + 1)) break; period = ahd->msgin_buf[3]; ppr_options = 0; saved_offset = offset = ahd->msgin_buf[4]; ahd_devlimited_syncrate(ahd, tinfo, &period, &ppr_options, devinfo->role); ahd_validate_offset(ahd, tinfo, period, &offset, tinfo->curr.width, devinfo->role); if (bootverbose) { printf("(%s:%c:%d:%d): Received " "SDTR period %x, offset %x\n\t" "Filtered to period %x, offset %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, ahd->msgin_buf[3], saved_offset, period, offset); } ahd_set_syncrate(ahd, devinfo, period, offset, ppr_options, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); /* * See if we initiated Sync Negotiation * and didn't have to fall down to async * transfers. */ if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, TRUE)) { /* We started it */ if (saved_offset != offset) { /* Went too low - force async */ reject = TRUE; } } else { /* * Send our own SDTR in reply */ if (bootverbose && devinfo->role == ROLE_INITIATOR) { printf("(%s:%c:%d:%d): Target " "Initiated SDTR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_construct_sdtr(ahd, devinfo, period, offset); ahd->msgout_index = 0; response = TRUE; } done = MSGLOOP_MSGCOMPLETE; break; } case MSG_EXT_WDTR: { u_int bus_width; u_int saved_width; u_int sending_reply; sending_reply = FALSE; if (ahd->msgin_buf[1] != MSG_EXT_WDTR_LEN) { reject = TRUE; break; } /* * Wait until we have our arg before validating * and acting on this message. * * Add one to MSG_EXT_WDTR_LEN to account for * the extended message preamble. */ if (ahd->msgin_index < (MSG_EXT_WDTR_LEN + 1)) break; bus_width = ahd->msgin_buf[3]; saved_width = bus_width; ahd_validate_width(ahd, tinfo, &bus_width, devinfo->role); if (bootverbose) { printf("(%s:%c:%d:%d): Received WDTR " "%x filtered to %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, saved_width, bus_width); } if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, TRUE)) { /* * Don't send a WDTR back to the * target, since we asked first. * If the width went higher than our * request, reject it. */ if (saved_width > bus_width) { reject = TRUE; printf("(%s:%c:%d:%d): requested %dBit " "transfers. Rejecting...\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, 8 * (0x01 << bus_width)); bus_width = 0; } } else { /* * Send our own WDTR in reply */ if (bootverbose && devinfo->role == ROLE_INITIATOR) { printf("(%s:%c:%d:%d): Target " "Initiated WDTR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_construct_wdtr(ahd, devinfo, bus_width); ahd->msgout_index = 0; response = TRUE; sending_reply = TRUE; } /* * After a wide message, we are async, but * some devices don't seem to honor this portion * of the spec. Force a renegotiation of the * sync component of our transfer agreement even * if our goal is async. By updating our width * after forcing the negotiation, we avoid * renegotiating for width. */ ahd_update_neg_request(ahd, devinfo, tstate, tinfo, AHD_NEG_ALWAYS); ahd_set_width(ahd, devinfo, bus_width, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); if (sending_reply == FALSE && reject == FALSE) { /* * We will always have an SDTR to send. */ ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_build_transfer_msg(ahd, devinfo); ahd->msgout_index = 0; response = TRUE; } done = MSGLOOP_MSGCOMPLETE; break; } case MSG_EXT_PPR: { u_int period; u_int offset; u_int bus_width; u_int ppr_options; u_int saved_width; u_int saved_offset; u_int saved_ppr_options; if (ahd->msgin_buf[1] != MSG_EXT_PPR_LEN) { reject = TRUE; break; } /* * Wait until we have all args before validating * and acting on this message. * * Add one to MSG_EXT_PPR_LEN to account for * the extended message preamble. */ if (ahd->msgin_index < (MSG_EXT_PPR_LEN + 1)) break; period = ahd->msgin_buf[3]; offset = ahd->msgin_buf[5]; bus_width = ahd->msgin_buf[6]; saved_width = bus_width; ppr_options = ahd->msgin_buf[7]; /* * According to the spec, a DT only * period factor with no DT option * set implies async. */ if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 && period <= 9) offset = 0; saved_ppr_options = ppr_options; saved_offset = offset; /* * Transfer options are only available if we * are negotiating wide. */ if (bus_width == 0) ppr_options &= MSG_EXT_PPR_QAS_REQ; ahd_validate_width(ahd, tinfo, &bus_width, devinfo->role); ahd_devlimited_syncrate(ahd, tinfo, &period, &ppr_options, devinfo->role); ahd_validate_offset(ahd, tinfo, period, &offset, bus_width, devinfo->role); if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, TRUE)) { /* * If we are unable to do any of the * requested options (we went too low), * then we'll have to reject the message. */ if (saved_width > bus_width || saved_offset != offset || saved_ppr_options != ppr_options) { reject = TRUE; period = 0; offset = 0; bus_width = 0; ppr_options = 0; } } else { if (devinfo->role != ROLE_TARGET) printf("(%s:%c:%d:%d): Target " "Initiated PPR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); else printf("(%s:%c:%d:%d): Initiator " "Initiated PPR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_construct_ppr(ahd, devinfo, period, offset, bus_width, ppr_options); ahd->msgout_index = 0; response = TRUE; } if (bootverbose) { printf("(%s:%c:%d:%d): Received PPR width %x, " "period %x, offset %x,options %x\n" "\tFiltered to width %x, period %x, " "offset %x, options %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, saved_width, ahd->msgin_buf[3], saved_offset, saved_ppr_options, bus_width, period, offset, ppr_options); } ahd_set_width(ahd, devinfo, bus_width, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); ahd_set_syncrate(ahd, devinfo, period, offset, ppr_options, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); done = MSGLOOP_MSGCOMPLETE; break; } default: /* Unknown extended message. Reject it. */ reject = TRUE; break; } break; } #ifdef AHD_TARGET_MODE case MSG_BUS_DEV_RESET: ahd_handle_devreset(ahd, devinfo, CAM_LUN_WILDCARD, CAM_BDR_SENT, "Bus Device Reset Received", /*verbose_level*/0); ahd_restart(ahd); done = MSGLOOP_TERMINATED; break; case MSG_ABORT_TAG: case MSG_ABORT: case MSG_CLEAR_QUEUE: { int tag; /* Target mode messages */ if (devinfo->role != ROLE_TARGET) { reject = TRUE; break; } tag = SCB_LIST_NULL; if (ahd->msgin_buf[0] == MSG_ABORT_TAG) tag = ahd_inb(ahd, INITIATOR_TAG); ahd_abort_scbs(ahd, devinfo->target, devinfo->channel, devinfo->lun, tag, ROLE_TARGET, CAM_REQ_ABORTED); tstate = ahd->enabled_targets[devinfo->our_scsiid]; if (tstate != NULL) { struct ahd_tmode_lstate* lstate; lstate = tstate->enabled_luns[devinfo->lun]; if (lstate != NULL) { ahd_queue_lstate_event(ahd, lstate, devinfo->our_scsiid, ahd->msgin_buf[0], /*arg*/tag); ahd_send_lstate_events(ahd, lstate); } } ahd_restart(ahd); done = MSGLOOP_TERMINATED; break; } #endif case MSG_QAS_REQUEST: #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("%s: QAS request. SCSISIGI == 0x%x\n", ahd_name(ahd), ahd_inb(ahd, SCSISIGI)); #endif ahd->msg_flags |= MSG_FLAG_EXPECT_QASREJ_BUSFREE; /* FALLTHROUGH */ case MSG_TERM_IO_PROC: default: reject = TRUE; break; } if (reject) { /* * Setup to reject the message. */ ahd->msgout_index = 0; ahd->msgout_len = 1; ahd->msgout_buf[0] = MSG_MESSAGE_REJECT; done = MSGLOOP_MSGCOMPLETE; response = TRUE; } if (done != MSGLOOP_IN_PROG && !response) /* Clear the outgoing message buffer */ ahd->msgout_len = 0; return (done); } /* * Process a message reject message. */ static int ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { /* * What we care about here is if we had an * outstanding SDTR or WDTR message for this * target. If we did, this is a signal that * the target is refusing negotiation. */ struct scb *scb; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; u_int scb_index; u_int last_msg; int response = 0; scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* Might be necessary */ last_msg = ahd_inb(ahd, LAST_MSG); if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) { if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/TRUE) && tinfo->goal.period <= AHD_SYNCRATE_PACED) { /* * Target may not like our SPI-4 PPR Options. * Attempt to negotiate 80MHz which will turn * off these options. */ if (bootverbose) { printf("(%s:%c:%d:%d): PPR Rejected. " "Trying simple U160 PPR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } tinfo->goal.period = AHD_SYNCRATE_DT; tinfo->goal.ppr_options &= MSG_EXT_PPR_IU_REQ | MSG_EXT_PPR_QAS_REQ | MSG_EXT_PPR_DT_REQ; } else { /* * Target does not support the PPR message. * Attempt to negotiate SPI-2 style. */ if (bootverbose) { printf("(%s:%c:%d:%d): PPR Rejected. " "Trying WDTR/SDTR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } tinfo->goal.ppr_options = 0; tinfo->curr.transport_version = 2; tinfo->goal.transport_version = 2; } ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_build_transfer_msg(ahd, devinfo); ahd->msgout_index = 0; response = 1; } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) { /* note 8bit xfers */ printf("(%s:%c:%d:%d): refuses WIDE negotiation. Using " "8bit transfers\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); /* * No need to clear the sync rate. If the target * did not accept the command, our syncrate is * unaffected. If the target started the negotiation, * but rejected our response, we already cleared the * sync rate before sending our WDTR. */ if (tinfo->goal.offset != tinfo->curr.offset) { /* Start the sync negotiation */ ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_build_transfer_msg(ahd, devinfo); ahd->msgout_index = 0; response = 1; } } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) { /* note asynch xfers and clear flag */ ahd_set_syncrate(ahd, devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); printf("(%s:%c:%d:%d): refuses synchronous negotiation. " "Using asynchronous transfers\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) { int tag_type; int mask; tag_type = (scb->hscb->control & MSG_SIMPLE_TASK); if (tag_type == MSG_SIMPLE_TASK) { printf("(%s:%c:%d:%d): refuses tagged commands. " "Performing non-tagged I/O\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_NONE); mask = ~0x23; } else { printf("(%s:%c:%d:%d): refuses %s tagged commands. " "Performing simple queue tagged I/O only\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, tag_type == MSG_ORDERED_TASK ? "ordered" : "head of queue"); ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_BASIC); mask = ~0x03; } /* * Resend the identify for this CCB as the target * may believe that the selection is invalid otherwise. */ ahd_outb(ahd, SCB_CONTROL, ahd_inb_scbram(ahd, SCB_CONTROL) & mask); scb->hscb->control &= mask; ahd_set_transaction_tag(scb, /*enabled*/FALSE, /*type*/MSG_SIMPLE_TASK); ahd_outb(ahd, MSG_OUT, MSG_IDENTIFYFLAG); ahd_assert_atn(ahd); ahd_busy_tcl(ahd, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), SCB_GET_TAG(scb)); /* * Requeue all tagged commands for this target * currently in our posession so they can be * converted to untagged commands. */ ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), SCB_GET_CHANNEL(ahd, scb), SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQUEUE_REQ, SEARCH_COMPLETE); } else if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_IDENTIFYFLAG, TRUE)) { /* * Most likely the device believes that we had * previously negotiated packetized. */ ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE | MSG_FLAG_IU_REQ_CHANGED; ahd_force_renegotiation(ahd, devinfo); ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_build_transfer_msg(ahd, devinfo); ahd->msgout_index = 0; response = 1; } else { /* * Otherwise, we ignore it. */ printf("%s:%c:%d: Message reject for %x -- ignored\n", ahd_name(ahd), devinfo->channel, devinfo->target, last_msg); } return (response); } /* * Process an ingnore wide residue message. */ static void ahd_handle_ign_wide_residue(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { u_int scb_index; struct scb *scb; scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); /* * XXX Actually check data direction in the sequencer? * Perhaps add datadir to some spare bits in the hscb? */ if ((ahd_inb(ahd, SEQ_FLAGS) & DPHASE) == 0 || ahd_get_transfer_dir(scb) != CAM_DIR_IN) { /* * Ignore the message if we haven't * seen an appropriate data phase yet. */ } else { /* * If the residual occurred on the last * transfer and the transfer request was * expected to end on an odd count, do * nothing. Otherwise, subtract a byte * and update the residual count accordingly. */ uint32_t sgptr; sgptr = ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR); if ((sgptr & SG_LIST_NULL) != 0 && (ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE) & SCB_XFERLEN_ODD) != 0) { /* * If the residual occurred on the last * transfer and the transfer request was * expected to end on an odd count, do * nothing. */ } else { uint32_t data_cnt; uint64_t data_addr; uint32_t sglen; /* Pull in the rest of the sgptr */ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); data_cnt = ahd_inl_scbram(ahd, SCB_RESIDUAL_DATACNT); if ((sgptr & SG_LIST_NULL) != 0) { /* * The residual data count is not updated * for the command run to completion case. * Explicitly zero the count. */ data_cnt &= ~AHD_SG_LEN_MASK; } data_addr = ahd_inq(ahd, SHADDR); data_cnt += 1; data_addr -= 1; sgptr &= SG_PTR_MASK; if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); /* * The residual sg ptr points to the next S/G * to load so we must go back one. */ sg--; sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; if (sg != scb->sg_list && sglen < (data_cnt & AHD_SG_LEN_MASK)) { sg--; sglen = ahd_le32toh(sg->len); /* * Preserve High Address and SG_LIST * bits while setting the count to 1. */ data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK)); data_addr = ahd_le64toh(sg->addr) + (sglen & AHD_SG_LEN_MASK) - 1; /* * Increment sg so it points to the * "next" sg. */ sg++; sgptr = ahd_sg_virt_to_bus(ahd, scb, sg); } } else { struct ahd_dma_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); /* * The residual sg ptr points to the next S/G * to load so we must go back one. */ sg--; sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; if (sg != scb->sg_list && sglen < (data_cnt & AHD_SG_LEN_MASK)) { sg--; sglen = ahd_le32toh(sg->len); /* * Preserve High Address and SG_LIST * bits while setting the count to 1. */ data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK)); data_addr = ahd_le32toh(sg->addr) + (sglen & AHD_SG_LEN_MASK) - 1; /* * Increment sg so it points to the * "next" sg. */ sg++; sgptr = ahd_sg_virt_to_bus(ahd, scb, sg); } } /* * Toggle the "oddness" of the transfer length * to handle this mid-transfer ignore wide * residue. This ensures that the oddness is * correct for subsequent data transfers. */ ahd_outb(ahd, SCB_TASK_ATTRIBUTE, ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE) ^ SCB_XFERLEN_ODD); ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); ahd_outl(ahd, SCB_RESIDUAL_DATACNT, data_cnt); /* * The FIFO's pointers will be updated if/when the * sequencer re-enters a data phase. */ } } } /* * Reinitialize the data pointers for the active transfer * based on its current residual. */ static void ahd_reinitialize_dataptrs(struct ahd_softc *ahd) { struct scb *scb; ahd_mode_state saved_modes; u_int scb_index; u_int wait; uint32_t sgptr; uint32_t resid; uint64_t dataptr; AHD_ASSERT_MODES(ahd, AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK, AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK); scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); /* * Release and reacquire the FIFO so we * have a clean slate. */ ahd_outb(ahd, DFFSXFRCTL, CLRCHN); wait = 1000; while (--wait && !(ahd_inb(ahd, MDFFSTAT) & FIFOFREE)) ahd_delay(100); if (wait == 0) { ahd_print_path(ahd, scb); printf("ahd_reinitialize_dataptrs: Forcing FIFO free.\n"); ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT); } saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, DFFSTAT, ahd_inb(ahd, DFFSTAT) | (saved_modes == 0x11 ? CURRFIFO_1 : CURRFIFO_0)); /* * Determine initial values for data_addr and data_cnt * for resuming the data phase. */ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); sgptr &= SG_PTR_MASK; resid = (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 2) << 16) | (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 1) << 8) | ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT); if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); /* The residual sg_ptr always points to the next sg */ sg--; dataptr = ahd_le64toh(sg->addr) + (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK) - resid; ahd_outl(ahd, HADDR + 4, dataptr >> 32); } else { struct ahd_dma_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); /* The residual sg_ptr always points to the next sg */ sg--; dataptr = ahd_le32toh(sg->addr) + (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK) - resid; ahd_outb(ahd, HADDR + 4, (ahd_le32toh(sg->len) & ~AHD_SG_LEN_MASK) >> 24); } ahd_outl(ahd, HADDR, dataptr); ahd_outb(ahd, HCNT + 2, resid >> 16); ahd_outb(ahd, HCNT + 1, resid >> 8); ahd_outb(ahd, HCNT, resid); } /* * Handle the effects of issuing a bus device reset message. */ static void ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int lun, cam_status status, char *message, int verbose_level) { #ifdef AHD_TARGET_MODE struct ahd_tmode_tstate* tstate; #endif int found; found = ahd_abort_scbs(ahd, devinfo->target, devinfo->channel, lun, SCB_LIST_NULL, devinfo->role, status); #ifdef AHD_TARGET_MODE /* * Send an immediate notify ccb to all target mord peripheral * drivers affected by this action. */ tstate = ahd->enabled_targets[devinfo->our_scsiid]; if (tstate != NULL) { u_int cur_lun; u_int max_lun; if (lun != CAM_LUN_WILDCARD) { cur_lun = 0; max_lun = AHD_NUM_LUNS - 1; } else { cur_lun = lun; max_lun = lun; } for (;cur_lun <= max_lun; cur_lun++) { struct ahd_tmode_lstate* lstate; lstate = tstate->enabled_luns[cur_lun]; if (lstate == NULL) continue; ahd_queue_lstate_event(ahd, lstate, devinfo->our_scsiid, MSG_BUS_DEV_RESET, /*arg*/0); ahd_send_lstate_events(ahd, lstate); } } #endif /* * Go back to async/narrow transfers and renegotiate. */ ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR, /*paused*/TRUE); ahd_set_syncrate(ahd, devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR, /*paused*/TRUE); if (status != CAM_SEL_TIMEOUT) ahd_send_async(ahd, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_SENT_BDR); if (message != NULL && bootverbose) printf("%s: %s on %c:%d. %d SCBs aborted\n", ahd_name(ahd), message, devinfo->channel, devinfo->target, found); } #ifdef AHD_TARGET_MODE static void ahd_setup_target_msgin(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb) { /* * To facilitate adding multiple messages together, * each routine should increment the index and len * variables instead of setting them explicitly. */ ahd->msgout_index = 0; ahd->msgout_len = 0; if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) ahd_build_transfer_msg(ahd, devinfo); else panic("ahd_intr: AWAITING target message with no message"); ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_TARGET_MSGIN; } #endif /**************************** Initialization **********************************/ static u_int ahd_sglist_size(struct ahd_softc *ahd) { bus_size_t list_size; list_size = sizeof(struct ahd_dma_seg) * AHD_NSEG; if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) list_size = sizeof(struct ahd_dma64_seg) * AHD_NSEG; return (list_size); } /* * Calculate the optimum S/G List allocation size. S/G elements used * for a given transaction must be physically contiguous. Assume the * OS will allocate full pages to us, so it doesn't make sense to request * less than a page. */ static u_int ahd_sglist_allocsize(struct ahd_softc *ahd) { bus_size_t sg_list_increment; bus_size_t sg_list_size; bus_size_t max_list_size; bus_size_t best_list_size; /* Start out with the minimum required for AHD_NSEG. */ sg_list_increment = ahd_sglist_size(ahd); sg_list_size = sg_list_increment; /* Get us as close as possible to a page in size. */ while ((sg_list_size + sg_list_increment) <= PAGE_SIZE) sg_list_size += sg_list_increment; /* * Try to reduce the amount of wastage by allocating * multiple pages. */ best_list_size = sg_list_size; max_list_size = roundup(sg_list_increment, PAGE_SIZE); if (max_list_size < 4 * PAGE_SIZE) max_list_size = 4 * PAGE_SIZE; if (max_list_size > (AHD_SCB_MAX_ALLOC * sg_list_increment)) max_list_size = (AHD_SCB_MAX_ALLOC * sg_list_increment); while ((sg_list_size + sg_list_increment) <= max_list_size && (sg_list_size % PAGE_SIZE) != 0) { bus_size_t new_mod; bus_size_t best_mod; sg_list_size += sg_list_increment; new_mod = sg_list_size % PAGE_SIZE; best_mod = best_list_size % PAGE_SIZE; if (new_mod > best_mod || new_mod == 0) { best_list_size = sg_list_size; } } return (best_list_size); } /* * Allocate a controller structure for a new device * and perform initial initializion. */ struct ahd_softc * ahd_alloc(void *platform_arg, char *name) { struct ahd_softc *ahd; #ifndef __FreeBSD__ ahd = malloc(sizeof(*ahd), M_DEVBUF, M_NOWAIT); if (!ahd) { printf("aic7xxx: cannot malloc softc!\n"); free(name, M_DEVBUF); return NULL; } #else ahd = device_get_softc((device_t)platform_arg); #endif memset(ahd, 0, sizeof(*ahd)); ahd->seep_config = malloc(sizeof(*ahd->seep_config), M_DEVBUF, M_NOWAIT); if (ahd->seep_config == NULL) { #ifndef __FreeBSD__ free(ahd, M_DEVBUF); #endif free(name, M_DEVBUF); return (NULL); } LIST_INIT(&ahd->pending_scbs); /* We don't know our unit number until the OSM sets it */ ahd->name = name; ahd->unit = -1; ahd->description = NULL; ahd->bus_description = NULL; ahd->channel = 'A'; ahd->chip = AHD_NONE; ahd->features = AHD_FENONE; ahd->bugs = AHD_BUGNONE; ahd->flags = AHD_SPCHK_ENB_A|AHD_RESET_BUS_A|AHD_TERM_ENB_A | AHD_EXTENDED_TRANS_A|AHD_STPWLEVEL_A; ahd_timer_init(&ahd->reset_timer); ahd_timer_init(&ahd->stat_timer); ahd->int_coalescing_timer = AHD_INT_COALESCING_TIMER_DEFAULT; ahd->int_coalescing_maxcmds = AHD_INT_COALESCING_MAXCMDS_DEFAULT; ahd->int_coalescing_mincmds = AHD_INT_COALESCING_MINCMDS_DEFAULT; ahd->int_coalescing_threshold = AHD_INT_COALESCING_THRESHOLD_DEFAULT; ahd->int_coalescing_stop_threshold = AHD_INT_COALESCING_STOP_THRESHOLD_DEFAULT; if (ahd_platform_alloc(ahd, platform_arg) != 0) { ahd_free(ahd); ahd = NULL; } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MEMORY) != 0) { printf("%s: scb size = 0x%x, hscb size = 0x%x\n", ahd_name(ahd), (u_int)sizeof(struct scb), (u_int)sizeof(struct hardware_scb)); } #endif return (ahd); } int ahd_softc_init(struct ahd_softc *ahd) { ahd->unpause = 0; ahd->pause = PAUSE; return (0); } void ahd_set_unit(struct ahd_softc *ahd, int unit) { ahd->unit = unit; } void ahd_set_name(struct ahd_softc *ahd, char *name) { if (ahd->name != NULL) free(ahd->name, M_DEVBUF); ahd->name = name; } void ahd_free(struct ahd_softc *ahd) { int i; switch (ahd->init_level) { default: case 5: ahd_shutdown(ahd); /* FALLTHROUGH */ case 4: ahd_dmamap_unload(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap); /* FALLTHROUGH */ case 3: ahd_dmamem_free(ahd, ahd->shared_data_dmat, ahd->qoutfifo, ahd->shared_data_map.dmamap); ahd_dmamap_destroy(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap); /* FALLTHROUGH */ case 2: ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat); case 1: #ifndef __linux__ ahd_dma_tag_destroy(ahd, ahd->buffer_dmat); #endif break; case 0: break; } #ifndef __linux__ ahd_dma_tag_destroy(ahd, ahd->parent_dmat); #endif ahd_platform_free(ahd); ahd_fini_scbdata(ahd); for (i = 0; i < AHD_NUM_TARGETS; i++) { struct ahd_tmode_tstate *tstate; tstate = ahd->enabled_targets[i]; if (tstate != NULL) { #ifdef AHD_TARGET_MODE int j; for (j = 0; j < AHD_NUM_LUNS; j++) { struct ahd_tmode_lstate *lstate; lstate = tstate->enabled_luns[j]; if (lstate != NULL) { xpt_free_path(lstate->path); free(lstate, M_DEVBUF); } } #endif free(tstate, M_DEVBUF); } } #ifdef AHD_TARGET_MODE if (ahd->black_hole != NULL) { xpt_free_path(ahd->black_hole->path); free(ahd->black_hole, M_DEVBUF); } #endif if (ahd->name != NULL) free(ahd->name, M_DEVBUF); if (ahd->seep_config != NULL) free(ahd->seep_config, M_DEVBUF); if (ahd->saved_stack != NULL) free(ahd->saved_stack, M_DEVBUF); #ifndef __FreeBSD__ free(ahd, M_DEVBUF); #endif return; } static void ahd_shutdown(void *arg) { struct ahd_softc *ahd; ahd = (struct ahd_softc *)arg; /* * Stop periodic timer callbacks. */ ahd_timer_stop(&ahd->reset_timer); ahd_timer_stop(&ahd->stat_timer); /* This will reset most registers to 0, but not all */ ahd_reset(ahd, /*reinit*/FALSE); } /* * Reset the controller and record some information about it * that is only available just after a reset. If "reinit" is * non-zero, this reset occured after initial configuration * and the caller requests that the chip be fully reinitialized * to a runable state. Chip interrupts are *not* enabled after * a reinitialization. The caller must enable interrupts via * ahd_intr_enable(). */ int ahd_reset(struct ahd_softc *ahd, int reinit) { u_int sxfrctl1; int wait; uint32_t cmd; /* * Preserve the value of the SXFRCTL1 register for all channels. * It contains settings that affect termination and we don't want * to disturb the integrity of the bus. */ ahd_pause(ahd); ahd_update_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); sxfrctl1 = ahd_inb(ahd, SXFRCTL1); cmd = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2); if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) { uint32_t mod_cmd; /* * A4 Razor #632 * During the assertion of CHIPRST, the chip * does not disable its parity logic prior to * the start of the reset. This may cause a * parity error to be detected and thus a * spurious SERR or PERR assertion. Disble * PERR and SERR responses during the CHIPRST. */ mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN); ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, mod_cmd, /*bytes*/2); } ahd_outb(ahd, HCNTRL, CHIPRST | ahd->pause); /* * Ensure that the reset has finished. We delay 1000us * prior to reading the register to make sure the chip * has sufficiently completed its reset to handle register * accesses. */ wait = 1000; do { ahd_delay(1000); } while (--wait && !(ahd_inb(ahd, HCNTRL) & CHIPRSTACK)); if (wait == 0) { printf("%s: WARNING - Failed chip reset! " "Trying to initialize anyway.\n", ahd_name(ahd)); } ahd_outb(ahd, HCNTRL, ahd->pause); if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) { /* * Clear any latched PCI error status and restore * previous SERR and PERR response enables. */ ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, 0xFF, /*bytes*/1); ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, cmd, /*bytes*/2); } /* * Mode should be SCSI after a chip reset, but lets * set it just to be safe. We touch the MODE_PTR * register directly so as to bypass the lazy update * code in ahd_set_modes(). */ ahd_known_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI)); /* * Restore SXFRCTL1. * * We must always initialize STPWEN to 1 before we * restore the saved values. STPWEN is initialized * to a tri-state condition which can only be cleared * by turning it on. */ ahd_outb(ahd, SXFRCTL1, sxfrctl1|STPWEN); ahd_outb(ahd, SXFRCTL1, sxfrctl1); /* Determine chip configuration */ ahd->features &= ~AHD_WIDE; if ((ahd_inb(ahd, SBLKCTL) & SELWIDE) != 0) ahd->features |= AHD_WIDE; /* * If a recovery action has forced a chip reset, * re-initialize the chip to our liking. */ if (reinit != 0) ahd_chip_init(ahd); return (0); } /* * Determine the number of SCBs available on the controller */ static int ahd_probe_scbs(struct ahd_softc *ahd) { int i; AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); for (i = 0; i < AHD_SCB_MAX; i++) { int j; ahd_set_scbptr(ahd, i); ahd_outw(ahd, SCB_BASE, i); for (j = 2; j < 64; j++) ahd_outb(ahd, SCB_BASE+j, 0); /* Start out life as unallocated (needing an abort) */ ahd_outb(ahd, SCB_CONTROL, MK_MESSAGE); if (ahd_inw_scbram(ahd, SCB_BASE) != i) break; ahd_set_scbptr(ahd, 0); if (ahd_inw_scbram(ahd, SCB_BASE) != 0) break; } return (i); } static void ahd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { dma_addr_t *baddr; baddr = (dma_addr_t *)arg; *baddr = segs->ds_addr; } static void ahd_initialize_hscbs(struct ahd_softc *ahd) { int i; for (i = 0; i < ahd->scb_data.maxhscbs; i++) { ahd_set_scbptr(ahd, i); /* Clear the control byte. */ ahd_outb(ahd, SCB_CONTROL, 0); /* Set the next pointer */ ahd_outw(ahd, SCB_NEXT, SCB_LIST_NULL); } } static int ahd_init_scbdata(struct ahd_softc *ahd) { struct scb_data *scb_data; int i; scb_data = &ahd->scb_data; TAILQ_INIT(&scb_data->free_scbs); for (i = 0; i < AHD_NUM_TARGETS * AHD_NUM_LUNS_NONPKT; i++) LIST_INIT(&scb_data->free_scb_lists[i]); LIST_INIT(&scb_data->any_dev_free_scb_list); SLIST_INIT(&scb_data->hscb_maps); SLIST_INIT(&scb_data->sg_maps); SLIST_INIT(&scb_data->sense_maps); /* Determine the number of hardware SCBs and initialize them */ scb_data->maxhscbs = ahd_probe_scbs(ahd); if (scb_data->maxhscbs == 0) { printf("%s: No SCB space found\n", ahd_name(ahd)); return (ENXIO); } ahd_initialize_hscbs(ahd); /* * Create our DMA tags. These tags define the kinds of device * accessible memory allocations and memory mappings we will * need to perform during normal operation. * * Unless we need to further restrict the allocation, we rely * on the restrictions of the parent dmat, hence the common * use of MAXADDR and MAXSIZE. */ /* DMA tag for our hardware scb structures */ if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, PAGE_SIZE, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->hscb_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* DMA tag for our S/G structures. */ if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/8, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, ahd_sglist_allocsize(ahd), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->sg_dmat) != 0) { goto error_exit; } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MEMORY) != 0) printf("%s: ahd_sglist_allocsize = 0x%x\n", ahd_name(ahd), ahd_sglist_allocsize(ahd)); #endif scb_data->init_level++; /* DMA tag for our sense buffers. We allocate in page sized chunks */ if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, PAGE_SIZE, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->sense_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* Perform initial CCB allocation */ ahd_alloc_scbs(ahd); if (scb_data->numscbs == 0) { printf("%s: ahd_init_scbdata - " "Unable to allocate initial scbs\n", ahd_name(ahd)); goto error_exit; } /* * Note that we were successfull */ return (0); error_exit: return (ENOMEM); } static struct scb * ahd_find_scb_by_tag(struct ahd_softc *ahd, u_int tag) { struct scb *scb; /* * Look on the pending list. */ LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { if (SCB_GET_TAG(scb) == tag) return (scb); } /* * Then on all of the collision free lists. */ TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { struct scb *list_scb; list_scb = scb; do { if (SCB_GET_TAG(list_scb) == tag) return (list_scb); list_scb = LIST_NEXT(list_scb, collision_links); } while (list_scb); } /* * And finally on the generic free list. */ LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) { if (SCB_GET_TAG(scb) == tag) return (scb); } return (NULL); } static void ahd_fini_scbdata(struct ahd_softc *ahd) { struct scb_data *scb_data; scb_data = &ahd->scb_data; if (scb_data == NULL) return; switch (scb_data->init_level) { default: case 7: { struct map_node *sns_map; while ((sns_map = SLIST_FIRST(&scb_data->sense_maps)) != NULL) { SLIST_REMOVE_HEAD(&scb_data->sense_maps, links); ahd_dmamap_unload(ahd, scb_data->sense_dmat, sns_map->dmamap); ahd_dmamem_free(ahd, scb_data->sense_dmat, sns_map->vaddr, sns_map->dmamap); free(sns_map, M_DEVBUF); } ahd_dma_tag_destroy(ahd, scb_data->sense_dmat); /* FALLTHROUGH */ } case 6: { struct map_node *sg_map; while ((sg_map = SLIST_FIRST(&scb_data->sg_maps)) != NULL) { SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); ahd_dmamap_unload(ahd, scb_data->sg_dmat, sg_map->dmamap); ahd_dmamem_free(ahd, scb_data->sg_dmat, sg_map->vaddr, sg_map->dmamap); free(sg_map, M_DEVBUF); } ahd_dma_tag_destroy(ahd, scb_data->sg_dmat); /* FALLTHROUGH */ } case 5: { struct map_node *hscb_map; while ((hscb_map = SLIST_FIRST(&scb_data->hscb_maps)) != NULL) { SLIST_REMOVE_HEAD(&scb_data->hscb_maps, links); ahd_dmamap_unload(ahd, scb_data->hscb_dmat, hscb_map->dmamap); ahd_dmamem_free(ahd, scb_data->hscb_dmat, hscb_map->vaddr, hscb_map->dmamap); free(hscb_map, M_DEVBUF); } ahd_dma_tag_destroy(ahd, scb_data->hscb_dmat); /* FALLTHROUGH */ } case 4: case 3: case 2: case 1: case 0: break; } } /* * DSP filter Bypass must be enabled until the first selection * after a change in bus mode (Razor #491 and #493). */ static void ahd_setup_iocell_workaround(struct ahd_softc *ahd) { ahd_mode_state saved_modes; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); ahd_outb(ahd, DSPDATACTL, ahd_inb(ahd, DSPDATACTL) | BYPASSENAB | RCVROFFSTDIS | XMITOFFSTDIS); ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) | (ENSELDO|ENSELDI)); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printf("%s: Setting up iocell workaround\n", ahd_name(ahd)); #endif ahd_restore_modes(ahd, saved_modes); ahd->flags &= ~AHD_HAD_FIRST_SEL; } static void ahd_iocell_first_selection(struct ahd_softc *ahd) { ahd_mode_state saved_modes; u_int sblkctl; if ((ahd->flags & AHD_HAD_FIRST_SEL) != 0) return; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); sblkctl = ahd_inb(ahd, SBLKCTL); ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printf("%s: iocell first selection\n", ahd_name(ahd)); #endif if ((sblkctl & ENAB40) != 0) { ahd_outb(ahd, DSPDATACTL, ahd_inb(ahd, DSPDATACTL) & ~BYPASSENAB); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printf("%s: BYPASS now disabled\n", ahd_name(ahd)); #endif } ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) & ~(ENSELDO|ENSELDI)); ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_restore_modes(ahd, saved_modes); ahd->flags |= AHD_HAD_FIRST_SEL; } /*************************** SCB Management ***********************************/ static void ahd_add_col_list(struct ahd_softc *ahd, struct scb *scb, u_int col_idx) { struct scb_list *free_list; struct scb_tailq *free_tailq; struct scb *first_scb; scb->flags |= SCB_ON_COL_LIST; AHD_SET_SCB_COL_IDX(scb, col_idx); free_list = &ahd->scb_data.free_scb_lists[col_idx]; free_tailq = &ahd->scb_data.free_scbs; first_scb = LIST_FIRST(free_list); if (first_scb != NULL) { LIST_INSERT_AFTER(first_scb, scb, collision_links); } else { LIST_INSERT_HEAD(free_list, scb, collision_links); TAILQ_INSERT_TAIL(free_tailq, scb, links.tqe); } } static void ahd_rem_col_list(struct ahd_softc *ahd, struct scb *scb) { struct scb_list *free_list; struct scb_tailq *free_tailq; struct scb *first_scb; u_int col_idx; scb->flags &= ~SCB_ON_COL_LIST; col_idx = AHD_GET_SCB_COL_IDX(ahd, scb); free_list = &ahd->scb_data.free_scb_lists[col_idx]; free_tailq = &ahd->scb_data.free_scbs; first_scb = LIST_FIRST(free_list); if (first_scb == scb) { struct scb *next_scb; /* * Maintain order in the collision free * lists for fairness if this device has * other colliding tags active. */ next_scb = LIST_NEXT(scb, collision_links); if (next_scb != NULL) { TAILQ_INSERT_AFTER(free_tailq, scb, next_scb, links.tqe); } TAILQ_REMOVE(free_tailq, scb, links.tqe); } LIST_REMOVE(scb, collision_links); } /* * Get a free scb. If there are none, see if we can allocate a new SCB. */ struct scb * ahd_get_scb(struct ahd_softc *ahd, u_int col_idx) { struct scb *scb; int tries; tries = 0; look_again: TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { if (AHD_GET_SCB_COL_IDX(ahd, scb) != col_idx) { ahd_rem_col_list(ahd, scb); goto found; } } if ((scb = LIST_FIRST(&ahd->scb_data.any_dev_free_scb_list)) == NULL) { if (tries++ != 0) return (NULL); ahd_alloc_scbs(ahd); goto look_again; } LIST_REMOVE(scb, links.le); if (col_idx != AHD_NEVER_COL_IDX && (scb->col_scb != NULL) && (scb->col_scb->flags & SCB_ACTIVE) == 0) { LIST_REMOVE(scb->col_scb, links.le); ahd_add_col_list(ahd, scb->col_scb, col_idx); } found: scb->flags |= SCB_ACTIVE; return (scb); } /* * Return an SCB resource to the free list. */ void ahd_free_scb(struct ahd_softc *ahd, struct scb *scb) { /* Clean up for the next user */ scb->flags = SCB_FLAG_NONE; scb->hscb->control = 0; ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = NULL; if (scb->col_scb == NULL) { /* * No collision possible. Just free normally. */ LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, scb, links.le); } else if ((scb->col_scb->flags & SCB_ON_COL_LIST) != 0) { /* * The SCB we might have collided with is on * a free collision list. Put both SCBs on * the generic list. */ ahd_rem_col_list(ahd, scb->col_scb); LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, scb, links.le); LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, scb->col_scb, links.le); } else if ((scb->col_scb->flags & (SCB_PACKETIZED|SCB_ACTIVE)) == SCB_ACTIVE && (scb->col_scb->hscb->control & TAG_ENB) != 0) { /* * The SCB we might collide with on the next allocation * is still active in a non-packetized, tagged, context. * Put us on the SCB collision list. */ ahd_add_col_list(ahd, scb, AHD_GET_SCB_COL_IDX(ahd, scb->col_scb)); } else { /* * The SCB we might collide with on the next allocation * is either active in a packetized context, or free. * Since we can't collide, put this SCB on the generic * free list. */ LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, scb, links.le); } ahd_platform_scb_free(ahd, scb); } static void ahd_alloc_scbs(struct ahd_softc *ahd) { struct scb_data *scb_data; struct scb *next_scb; struct hardware_scb *hscb; struct map_node *hscb_map; struct map_node *sg_map; struct map_node *sense_map; uint8_t *segs; uint8_t *sense_data; dma_addr_t hscb_busaddr; dma_addr_t sg_busaddr; dma_addr_t sense_busaddr; int newcount; int i; scb_data = &ahd->scb_data; if (scb_data->numscbs >= AHD_SCB_MAX_ALLOC) /* Can't allocate any more */ return; if (scb_data->scbs_left != 0) { int offset; offset = (PAGE_SIZE / sizeof(*hscb)) - scb_data->scbs_left; hscb_map = SLIST_FIRST(&scb_data->hscb_maps); hscb = &((struct hardware_scb *)hscb_map->vaddr)[offset]; hscb_busaddr = hscb_map->physaddr + (offset * sizeof(*hscb)); } else { hscb_map = malloc(sizeof(*hscb_map), M_DEVBUF, M_NOWAIT); if (hscb_map == NULL) return; /* Allocate the next batch of hardware SCBs */ if (ahd_dmamem_alloc(ahd, scb_data->hscb_dmat, (void **)&hscb_map->vaddr, BUS_DMA_NOWAIT, &hscb_map->dmamap) != 0) { free(hscb_map, M_DEVBUF); return; } SLIST_INSERT_HEAD(&scb_data->hscb_maps, hscb_map, links); ahd_dmamap_load(ahd, scb_data->hscb_dmat, hscb_map->dmamap, hscb_map->vaddr, PAGE_SIZE, ahd_dmamap_cb, &hscb_map->physaddr, /*flags*/0); hscb = (struct hardware_scb *)hscb_map->vaddr; hscb_busaddr = hscb_map->physaddr; scb_data->scbs_left = PAGE_SIZE / sizeof(*hscb); } if (scb_data->sgs_left != 0) { int offset; offset = ((ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd)) - scb_data->sgs_left) * ahd_sglist_size(ahd); sg_map = SLIST_FIRST(&scb_data->sg_maps); segs = sg_map->vaddr + offset; sg_busaddr = sg_map->physaddr + offset; } else { sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); if (sg_map == NULL) return; /* Allocate the next batch of S/G lists */ if (ahd_dmamem_alloc(ahd, scb_data->sg_dmat, (void **)&sg_map->vaddr, BUS_DMA_NOWAIT, &sg_map->dmamap) != 0) { free(sg_map, M_DEVBUF); return; } SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); ahd_dmamap_load(ahd, scb_data->sg_dmat, sg_map->dmamap, sg_map->vaddr, ahd_sglist_allocsize(ahd), ahd_dmamap_cb, &sg_map->physaddr, /*flags*/0); segs = sg_map->vaddr; sg_busaddr = sg_map->physaddr; scb_data->sgs_left = ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd); #ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_MEMORY) printf("Mapped SG data\n"); #endif } if (scb_data->sense_left != 0) { int offset; offset = PAGE_SIZE - (AHD_SENSE_BUFSIZE * scb_data->sense_left); sense_map = SLIST_FIRST(&scb_data->sense_maps); sense_data = sense_map->vaddr + offset; sense_busaddr = sense_map->physaddr + offset; } else { sense_map = malloc(sizeof(*sense_map), M_DEVBUF, M_NOWAIT); if (sense_map == NULL) return; /* Allocate the next batch of sense buffers */ if (ahd_dmamem_alloc(ahd, scb_data->sense_dmat, (void **)&sense_map->vaddr, BUS_DMA_NOWAIT, &sense_map->dmamap) != 0) { free(sense_map, M_DEVBUF); return; } SLIST_INSERT_HEAD(&scb_data->sense_maps, sense_map, links); ahd_dmamap_load(ahd, scb_data->sense_dmat, sense_map->dmamap, sense_map->vaddr, PAGE_SIZE, ahd_dmamap_cb, &sense_map->physaddr, /*flags*/0); sense_data = sense_map->vaddr; sense_busaddr = sense_map->physaddr; scb_data->sense_left = PAGE_SIZE / AHD_SENSE_BUFSIZE; #ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_MEMORY) printf("Mapped sense data\n"); #endif } newcount = min(scb_data->sense_left, scb_data->scbs_left); newcount = min(newcount, scb_data->sgs_left); newcount = min(newcount, (AHD_SCB_MAX_ALLOC - scb_data->numscbs)); for (i = 0; i < newcount; i++) { struct scb_platform_data *pdata; u_int col_tag; #ifndef __linux__ int error; #endif next_scb = (struct scb *)malloc(sizeof(*next_scb), M_DEVBUF, M_NOWAIT); if (next_scb == NULL) break; pdata = (struct scb_platform_data *)malloc(sizeof(*pdata), M_DEVBUF, M_NOWAIT); if (pdata == NULL) { free(next_scb, M_DEVBUF); break; } next_scb->platform_data = pdata; next_scb->hscb_map = hscb_map; next_scb->sg_map = sg_map; next_scb->sense_map = sense_map; next_scb->sg_list = segs; next_scb->sense_data = sense_data; next_scb->sense_busaddr = sense_busaddr; memset(hscb, 0, sizeof(*hscb)); next_scb->hscb = hscb; hscb->hscb_busaddr = ahd_htole32(hscb_busaddr); /* * The sequencer always starts with the second entry. * The first entry is embedded in the scb. */ next_scb->sg_list_busaddr = sg_busaddr; if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) next_scb->sg_list_busaddr += sizeof(struct ahd_dma64_seg); else next_scb->sg_list_busaddr += sizeof(struct ahd_dma_seg); next_scb->ahd_softc = ahd; next_scb->flags = SCB_FLAG_NONE; #ifndef __linux__ error = ahd_dmamap_create(ahd, ahd->buffer_dmat, /*flags*/0, &next_scb->dmamap); if (error != 0) { free(next_scb, M_DEVBUF); free(pdata, M_DEVBUF); break; } #endif next_scb->hscb->tag = ahd_htole16(scb_data->numscbs); col_tag = scb_data->numscbs ^ 0x100; next_scb->col_scb = ahd_find_scb_by_tag(ahd, col_tag); if (next_scb->col_scb != NULL) next_scb->col_scb->col_scb = next_scb; ahd_free_scb(ahd, next_scb); hscb++; hscb_busaddr += sizeof(*hscb); segs += ahd_sglist_size(ahd); sg_busaddr += ahd_sglist_size(ahd); sense_data += AHD_SENSE_BUFSIZE; sense_busaddr += AHD_SENSE_BUFSIZE; scb_data->numscbs++; scb_data->sense_left--; scb_data->scbs_left--; scb_data->sgs_left--; } } void ahd_controller_info(struct ahd_softc *ahd, char *buf) { const char *speed; const char *type; int len; len = sprintf(buf, "%s: ", ahd_chip_names[ahd->chip & AHD_CHIPID_MASK]); buf += len; speed = "Ultra320 "; if ((ahd->features & AHD_WIDE) != 0) { type = "Wide "; } else { type = "Single "; } len = sprintf(buf, "%s%sChannel %c, SCSI Id=%d, ", speed, type, ahd->channel, ahd->our_id); buf += len; sprintf(buf, "%s, %d SCBs", ahd->bus_description, ahd->scb_data.maxhscbs); } static const char *channel_strings[] = { "Primary Low", "Primary High", "Secondary Low", "Secondary High" }; static const char *termstat_strings[] = { "Terminated Correctly", "Over Terminated", "Under Terminated", "Not Configured" }; /* * Start the board, ready for normal operation */ int ahd_init(struct ahd_softc *ahd) { uint8_t *next_vaddr; dma_addr_t next_baddr; size_t driver_data_size; int i; int error; u_int warn_user; uint8_t current_sensing; uint8_t fstat; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); ahd->stack_size = ahd_probe_stack_size(ahd); ahd->saved_stack = malloc(ahd->stack_size * sizeof(uint16_t), M_DEVBUF, M_NOWAIT); if (ahd->saved_stack == NULL) return (ENOMEM); /* * Verify that the compiler hasn't over-agressively * padded important structures. */ if (sizeof(struct hardware_scb) != 64) panic("Hardware SCB size is incorrect"); #ifdef AHD_DEBUG if ((ahd_debug & AHD_DEBUG_SEQUENCER) != 0) ahd->flags |= AHD_SEQUENCER_DEBUG; #endif /* * Default to allowing initiator operations. */ ahd->flags |= AHD_INITIATORROLE; /* * Only allow target mode features if this unit has them enabled. */ if ((AHD_TMODE_ENABLE & (0x1 << ahd->unit)) == 0) ahd->features &= ~AHD_TARGETMODE; #ifndef __linux__ /* DMA tag for mapping buffers into device visible space. */ if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/ahd->flags & AHD_39BIT_ADDRESSING ? (dma_addr_t)0x7FFFFFFFFFULL : BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/(AHD_NSEG - 1) * PAGE_SIZE, /*nsegments*/AHD_NSEG, /*maxsegsz*/AHD_MAXTRANSFER_SIZE, /*flags*/BUS_DMA_ALLOCNOW, &ahd->buffer_dmat) != 0) { return (ENOMEM); } #endif ahd->init_level++; /* * DMA tag for our command fifos and other data in system memory * the card's sequencer must be able to access. For initiator * roles, we need to allocate space for the qoutfifo. When providing * for the target mode role, we must additionally provide space for * the incoming target command fifo. */ driver_data_size = AHD_SCB_MAX * sizeof(*ahd->qoutfifo) + sizeof(struct hardware_scb); if ((ahd->features & AHD_TARGETMODE) != 0) driver_data_size += AHD_TMODE_CMDS * sizeof(struct target_cmd); if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) driver_data_size += PKT_OVERRUN_BUFSIZE; if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, driver_data_size, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &ahd->shared_data_dmat) != 0) { return (ENOMEM); } ahd->init_level++; /* Allocation of driver data */ if (ahd_dmamem_alloc(ahd, ahd->shared_data_dmat, (void **)&ahd->shared_data_map.vaddr, BUS_DMA_NOWAIT, &ahd->shared_data_map.dmamap) != 0) { return (ENOMEM); } ahd->init_level++; /* And permanently map it in */ ahd_dmamap_load(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, ahd->shared_data_map.vaddr, driver_data_size, ahd_dmamap_cb, &ahd->shared_data_map.physaddr, /*flags*/0); ahd->qoutfifo = (struct ahd_completion *)ahd->shared_data_map.vaddr; next_vaddr = (uint8_t *)&ahd->qoutfifo[AHD_QOUT_SIZE]; next_baddr = ahd->shared_data_map.physaddr + AHD_QOUT_SIZE*sizeof(struct ahd_completion); if ((ahd->features & AHD_TARGETMODE) != 0) { ahd->targetcmds = (struct target_cmd *)next_vaddr; next_vaddr += AHD_TMODE_CMDS * sizeof(struct target_cmd); next_baddr += AHD_TMODE_CMDS * sizeof(struct target_cmd); } if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) { ahd->overrun_buf = next_vaddr; next_vaddr += PKT_OVERRUN_BUFSIZE; next_baddr += PKT_OVERRUN_BUFSIZE; } /* * We need one SCB to serve as the "next SCB". Since the * tag identifier in this SCB will never be used, there is * no point in using a valid HSCB tag from an SCB pulled from * the standard free pool. So, we allocate this "sentinel" * specially from the DMA safe memory chunk used for the QOUTFIFO. */ ahd->next_queued_hscb = (struct hardware_scb *)next_vaddr; ahd->next_queued_hscb_map = &ahd->shared_data_map; ahd->next_queued_hscb->hscb_busaddr = ahd_htole32(next_baddr); ahd->init_level++; /* Allocate SCB data now that buffer_dmat is initialized */ if (ahd_init_scbdata(ahd) != 0) return (ENOMEM); if ((ahd->flags & AHD_INITIATORROLE) == 0) ahd->flags &= ~AHD_RESET_BUS_A; /* * Before committing these settings to the chip, give * the OSM one last chance to modify our configuration. */ ahd_platform_init(ahd); /* Bring up the chip. */ ahd_chip_init(ahd); AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if ((ahd->flags & AHD_CURRENT_SENSING) == 0) goto init_done; /* * Verify termination based on current draw and * warn user if the bus is over/under terminated. */ error = ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, CURSENSE_ENB); if (error != 0) { printf("%s: current sensing timeout 1\n", ahd_name(ahd)); goto init_done; } for (i = 20, fstat = FLX_FSTAT_BUSY; (fstat & FLX_FSTAT_BUSY) != 0 && i; i--) { error = ahd_read_flexport(ahd, FLXADDR_FLEXSTAT, &fstat); if (error != 0) { printf("%s: current sensing timeout 2\n", ahd_name(ahd)); goto init_done; } } if (i == 0) { printf("%s: Timedout during current-sensing test\n", ahd_name(ahd)); goto init_done; } /* Latch Current Sensing status. */ error = ahd_read_flexport(ahd, FLXADDR_CURRENT_STAT, &current_sensing); if (error != 0) { printf("%s: current sensing timeout 3\n", ahd_name(ahd)); goto init_done; } /* Diable current sensing. */ ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 0); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_TERMCTL) != 0) { printf("%s: current_sensing == 0x%x\n", ahd_name(ahd), current_sensing); } #endif warn_user = 0; for (i = 0; i < 4; i++, current_sensing >>= FLX_CSTAT_SHIFT) { u_int term_stat; term_stat = (current_sensing & FLX_CSTAT_MASK); switch (term_stat) { case FLX_CSTAT_OVER: case FLX_CSTAT_UNDER: warn_user++; case FLX_CSTAT_INVALID: case FLX_CSTAT_OKAY: if (warn_user == 0 && bootverbose == 0) break; printf("%s: %s Channel %s\n", ahd_name(ahd), channel_strings[i], termstat_strings[term_stat]); break; } } if (warn_user) { printf("%s: WARNING. Termination is not configured correctly.\n" "%s: WARNING. SCSI bus operations may FAIL.\n", ahd_name(ahd), ahd_name(ahd)); } init_done: ahd_restart(ahd); ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US, ahd_stat_timer, ahd); return (0); } /* * (Re)initialize chip state after a chip reset. */ static void ahd_chip_init(struct ahd_softc *ahd) { uint32_t busaddr; u_int sxfrctl1; u_int scsiseq_template; u_int wait; u_int i; u_int target; ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* * Take the LED out of diagnostic mode */ ahd_outb(ahd, SBLKCTL, ahd_inb(ahd, SBLKCTL) & ~(DIAGLEDEN|DIAGLEDON)); /* * Return HS_MAILBOX to its default value. */ ahd->hs_mailbox = 0; ahd_outb(ahd, HS_MAILBOX, 0); /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1. */ ahd_outb(ahd, IOWNID, ahd->our_id); ahd_outb(ahd, TOWNID, ahd->our_id); sxfrctl1 = (ahd->flags & AHD_TERM_ENB_A) != 0 ? STPWEN : 0; sxfrctl1 |= (ahd->flags & AHD_SPCHK_ENB_A) != 0 ? ENSPCHK : 0; if ((ahd->bugs & AHD_LONG_SETIMO_BUG) && (ahd->seltime != STIMESEL_MIN)) { /* * The selection timer duration is twice as long * as it should be. Halve it by adding "1" to * the user specified setting. */ sxfrctl1 |= ahd->seltime + STIMESEL_BUG_ADJ; } else { sxfrctl1 |= ahd->seltime; } ahd_outb(ahd, SXFRCTL0, DFON); ahd_outb(ahd, SXFRCTL1, sxfrctl1|ahd->seltime|ENSTIMER|ACTNEGEN); ahd_outb(ahd, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); /* * Now that termination is set, wait for up * to 500ms for our transceivers to settle. If * the adapter does not have a cable attached, * the transceivers may never settle, so don't * complain if we fail here. */ for (wait = 10000; (ahd_inb(ahd, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; wait--) ahd_delay(100); /* Clear any false bus resets due to the transceivers settling */ ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); ahd_outb(ahd, CLRINT, CLRSCSIINT); /* Initialize mode specific S/G state. */ for (i = 0; i < 2; i++) { ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i); ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); ahd_outb(ahd, SG_STATE, 0); ahd_outb(ahd, CLRSEQINTSRC, 0xFF); ahd_outb(ahd, SEQIMODE, ENSAVEPTRS|ENCFG4DATA|ENCFG4ISTAT |ENCFG4TSTAT|ENCFG4ICMD|ENCFG4TCMD); } ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); ahd_outb(ahd, DSCOMMAND0, ahd_inb(ahd, DSCOMMAND0)|MPARCKEN|CACHETHEN); ahd_outb(ahd, DFF_THRSH, RD_DFTHRSH_75|WR_DFTHRSH_75); ahd_outb(ahd, SIMODE0, ENIOERR|ENOVERRUN); ahd_outb(ahd, SIMODE3, ENNTRAMPERR|ENOSRAMPERR); if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) { ahd_outb(ahd, OPTIONMODE, AUTOACKEN|AUTO_MSGOUT_DE); } else { ahd_outb(ahd, OPTIONMODE, AUTOACKEN|BUSFREEREV|AUTO_MSGOUT_DE); } ahd_outb(ahd, SCSCHKN, CURRFIFODEF|WIDERESEN|SHVALIDSTDIS); if ((ahd->chip & AHD_BUS_MASK) == AHD_PCIX) /* * Do not issue a target abort when a split completion * error occurs. Let our PCIX interrupt handler deal * with it instead. H2A4 Razor #625 */ ahd_outb(ahd, PCIXCTL, ahd_inb(ahd, PCIXCTL) | SPLTSTADIS); if ((ahd->bugs & AHD_LQOOVERRUN_BUG) != 0) ahd_outb(ahd, LQOSCSCTL, LQONOCHKOVER); /* * Tweak IOCELL settings. */ if ((ahd->flags & AHD_HP_BOARD) != 0) { for (i = 0; i < NUMDSPS; i++) { ahd_outb(ahd, DSPSELECT, i); ahd_outb(ahd, WRTBIASCTL, WRTBIASCTL_HP_DEFAULT); } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printf("%s: WRTBIASCTL now 0x%x\n", ahd_name(ahd), WRTBIASCTL_HP_DEFAULT); #endif } ahd_setup_iocell_workaround(ahd); /* * Enable LQI Manager interrupts. */ ahd_outb(ahd, LQIMODE1, ENLQIPHASE_LQ|ENLQIPHASE_NLQ|ENLIQABORT | ENLQICRCI_LQ|ENLQICRCI_NLQ|ENLQIBADLQI | ENLQIOVERI_LQ|ENLQIOVERI_NLQ); ahd_outb(ahd, LQOMODE0, ENLQOATNLQ|ENLQOATNPKT|ENLQOTCRC); /* * We choose to have the sequencer catch LQOPHCHGINPKT errors * manually for the command phase at the start of a packetized * selection case. ENLQOBUSFREE should be made redundant by * the BUSFREE interrupt, but it seems that some LQOBUSFREE * events fail to assert the BUSFREE interrupt so we must * also enable LQOBUSFREE interrupts. */ ahd_outb(ahd, LQOMODE1, ENLQOBUSFREE); /* * Setup sequencer interrupt handlers. */ ahd_outw(ahd, INTVEC1_ADDR, ahd_resolve_seqaddr(ahd, LABEL_seq_isr)); ahd_outw(ahd, INTVEC2_ADDR, ahd_resolve_seqaddr(ahd, LABEL_timer_isr)); /* * Setup SCB Offset registers. */ if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) { ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, pkt_long_lun)); } else { ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, lun)); } ahd_outb(ahd, CMDLENPTR, offsetof(struct hardware_scb, cdb_len)); ahd_outb(ahd, ATTRPTR, offsetof(struct hardware_scb, task_attribute)); ahd_outb(ahd, FLAGPTR, offsetof(struct hardware_scb, task_management)); ahd_outb(ahd, CMDPTR, offsetof(struct hardware_scb, shared_data.idata.cdb)); ahd_outb(ahd, QNEXTPTR, offsetof(struct hardware_scb, next_hscb_busaddr)); ahd_outb(ahd, ABRTBITPTR, MK_MESSAGE_BIT_OFFSET); ahd_outb(ahd, ABRTBYTEPTR, offsetof(struct hardware_scb, control)); if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) { ahd_outb(ahd, LUNLEN, sizeof(ahd->next_queued_hscb->pkt_long_lun) - 1); } else { ahd_outb(ahd, LUNLEN, LUNLEN_SINGLE_LEVEL_LUN); } ahd_outb(ahd, CDBLIMIT, SCB_CDB_LEN_PTR - 1); ahd_outb(ahd, MAXCMD, 0xFF); ahd_outb(ahd, SCBAUTOPTR, AUSCBPTR_EN | offsetof(struct hardware_scb, tag)); /* We haven't been enabled for target mode yet. */ ahd_outb(ahd, MULTARGID, 0); ahd_outb(ahd, MULTARGID + 1, 0); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* Initialize the negotiation table. */ if ((ahd->features & AHD_NEW_IOCELL_OPTS) == 0) { /* * Clear the spare bytes in the neg table to avoid * spurious parity errors. */ for (target = 0; target < AHD_NUM_TARGETS; target++) { ahd_outb(ahd, NEGOADDR, target); ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PER_DEV0); for (i = 0; i < AHD_NUM_PER_DEV_ANNEXCOLS; i++) ahd_outb(ahd, ANNEXDAT, 0); } } for (target = 0; target < AHD_NUM_TARGETS; target++) { struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, target, &tstate); ahd_compile_devinfo(&devinfo, ahd->our_id, target, CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR); ahd_update_neg_table(ahd, &devinfo, &tinfo->curr); } ahd_outb(ahd, CLRSINT3, NTRAMPERR|OSRAMPERR); ahd_outb(ahd, CLRINT, CLRSCSIINT); #ifdef NEEDS_MORE_TESTING /* * Always enable abort on incoming L_Qs if this feature is * supported. We use this to catch invalid SCB references. */ if ((ahd->bugs & AHD_ABORT_LQI_BUG) == 0) ahd_outb(ahd, LQCTL1, ABORTPENDING); else #endif ahd_outb(ahd, LQCTL1, 0); /* All of our queues are empty */ ahd->qoutfifonext = 0; ahd->qoutfifonext_valid_tag = QOUTFIFO_ENTRY_VALID; ahd_outb(ahd, QOUTFIFO_ENTRY_VALID_TAG, QOUTFIFO_ENTRY_VALID); for (i = 0; i < AHD_QOUT_SIZE; i++) ahd->qoutfifo[i].valid_tag = 0; ahd_sync_qoutfifo(ahd, BUS_DMASYNC_PREREAD); ahd->qinfifonext = 0; for (i = 0; i < AHD_QIN_SIZE; i++) ahd->qinfifo[i] = SCB_LIST_NULL; if ((ahd->features & AHD_TARGETMODE) != 0) { /* All target command blocks start out invalid. */ for (i = 0; i < AHD_TMODE_CMDS; i++) ahd->targetcmds[i].cmd_valid = 0; ahd_sync_tqinfifo(ahd, BUS_DMASYNC_PREREAD); ahd->tqinfifonext = 1; ahd_outb(ahd, KERNEL_TQINPOS, ahd->tqinfifonext - 1); ahd_outb(ahd, TQINPOS, ahd->tqinfifonext); } /* Initialize Scratch Ram. */ ahd_outb(ahd, SEQ_FLAGS, 0); ahd_outb(ahd, SEQ_FLAGS2, 0); /* We don't have any waiting selections */ ahd_outw(ahd, WAITING_TID_HEAD, SCB_LIST_NULL); ahd_outw(ahd, WAITING_TID_TAIL, SCB_LIST_NULL); ahd_outw(ahd, MK_MESSAGE_SCB, SCB_LIST_NULL); ahd_outw(ahd, MK_MESSAGE_SCSIID, 0xFF); for (i = 0; i < AHD_NUM_TARGETS; i++) ahd_outw(ahd, WAITING_SCB_TAILS + (2 * i), SCB_LIST_NULL); /* * Nobody is waiting to be DMAed into the QOUTFIFO. */ ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_SCB_DMAINPROG_HEAD, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL); /* * The Freeze Count is 0. */ ahd->qfreeze_cnt = 0; ahd_outw(ahd, QFREEZE_COUNT, 0); ahd_outw(ahd, KERNEL_QFREEZE_COUNT, 0); /* * Tell the sequencer where it can find our arrays in memory. */ busaddr = ahd->shared_data_map.physaddr; ahd_outl(ahd, SHARED_DATA_ADDR, busaddr); ahd_outl(ahd, QOUTFIFO_NEXT_ADDR, busaddr); /* * Setup the allowed SCSI Sequences based on operational mode. * If we are a target, we'll enable select in operations once * we've had a lun enabled. */ scsiseq_template = ENAUTOATNP; if ((ahd->flags & AHD_INITIATORROLE) != 0) scsiseq_template |= ENRSELI; ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq_template); /* There are no busy SCBs yet. */ for (target = 0; target < AHD_NUM_TARGETS; target++) { int lun; for (lun = 0; lun < AHD_NUM_LUNS_NONPKT; lun++) ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(target, 'A', lun)); } /* * Initialize the group code to command length table. * Vendor Unique codes are set to 0 so we only capture * the first byte of the cdb. These can be overridden * when target mode is enabled. */ ahd_outb(ahd, CMDSIZE_TABLE, 5); ahd_outb(ahd, CMDSIZE_TABLE + 1, 9); ahd_outb(ahd, CMDSIZE_TABLE + 2, 9); ahd_outb(ahd, CMDSIZE_TABLE + 3, 0); ahd_outb(ahd, CMDSIZE_TABLE + 4, 15); ahd_outb(ahd, CMDSIZE_TABLE + 5, 11); ahd_outb(ahd, CMDSIZE_TABLE + 6, 0); ahd_outb(ahd, CMDSIZE_TABLE + 7, 0); /* Tell the sequencer of our initial queue positions */ ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); ahd_outb(ahd, QOFF_CTLSTA, SCB_QSIZE_512); ahd->qinfifonext = 0; ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); ahd_set_hescb_qoff(ahd, 0); ahd_set_snscb_qoff(ahd, 0); ahd_set_sescb_qoff(ahd, 0); ahd_set_sdscb_qoff(ahd, 0); /* * Tell the sequencer which SCB will be the next one it receives. */ busaddr = ahd_le32toh(ahd->next_queued_hscb->hscb_busaddr); ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); /* * Default to coalescing disabled. */ ahd_outw(ahd, INT_COALESCING_CMDCOUNT, 0); ahd_outw(ahd, CMDS_PENDING, 0); ahd_update_coalescing_values(ahd, ahd->int_coalescing_timer, ahd->int_coalescing_maxcmds, ahd->int_coalescing_mincmds); ahd_enable_coalescing(ahd, FALSE); ahd_loadseq(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); if (ahd->features & AHD_AIC79XXB_SLOWCRC) { u_int negodat3 = ahd_inb(ahd, NEGCONOPTS); negodat3 |= ENSLOWCRC; ahd_outb(ahd, NEGCONOPTS, negodat3); negodat3 = ahd_inb(ahd, NEGCONOPTS); if (!(negodat3 & ENSLOWCRC)) printf("aic79xx: failed to set the SLOWCRC bit\n"); else printf("aic79xx: SLOWCRC bit set\n"); } } /* * Setup default device and controller settings. * This should only be called if our probe has * determined that no configuration data is available. */ int ahd_default_config(struct ahd_softc *ahd) { int targ; ahd->our_id = 7; /* * Allocate a tstate to house information for our * initiator presence on the bus as well as the user * data for any target mode initiator. */ if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) { printf("%s: unable to allocate ahd_tmode_tstate. " "Failing attach\n", ahd_name(ahd)); return (ENOMEM); } for (targ = 0; targ < AHD_NUM_TARGETS; targ++) { struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; uint16_t target_mask; tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, targ, &tstate); /* * We support SPC2 and SPI4. */ tinfo->user.protocol_version = 4; tinfo->user.transport_version = 4; target_mask = 0x01 << targ; ahd->user_discenable |= target_mask; tstate->discenable |= target_mask; ahd->user_tagenable |= target_mask; #ifdef AHD_FORCE_160 tinfo->user.period = AHD_SYNCRATE_DT; #else tinfo->user.period = AHD_SYNCRATE_160; #endif tinfo->user.offset = MAX_OFFSET; tinfo->user.ppr_options = MSG_EXT_PPR_RD_STRM | MSG_EXT_PPR_WR_FLOW | MSG_EXT_PPR_HOLD_MCS | MSG_EXT_PPR_IU_REQ | MSG_EXT_PPR_QAS_REQ | MSG_EXT_PPR_DT_REQ; if ((ahd->features & AHD_RTI) != 0) tinfo->user.ppr_options |= MSG_EXT_PPR_RTI; tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; /* * Start out Async/Narrow/Untagged and with * conservative protocol support. */ tinfo->goal.protocol_version = 2; tinfo->goal.transport_version = 2; tinfo->curr.protocol_version = 2; tinfo->curr.transport_version = 2; ahd_compile_devinfo(&devinfo, ahd->our_id, targ, CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR); tstate->tagenable &= ~target_mask; ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); } return (0); } /* * Parse device configuration information. */ int ahd_parse_cfgdata(struct ahd_softc *ahd, struct seeprom_config *sc) { int targ; int max_targ; max_targ = sc->max_targets & CFMAXTARG; ahd->our_id = sc->brtime_id & CFSCSIID; /* * Allocate a tstate to house information for our * initiator presence on the bus as well as the user * data for any target mode initiator. */ if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) { printf("%s: unable to allocate ahd_tmode_tstate. " "Failing attach\n", ahd_name(ahd)); return (ENOMEM); } for (targ = 0; targ < max_targ; targ++) { struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *tinfo; struct ahd_transinfo *user_tinfo; struct ahd_tmode_tstate *tstate; uint16_t target_mask; tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, targ, &tstate); user_tinfo = &tinfo->user; /* * We support SPC2 and SPI4. */ tinfo->user.protocol_version = 4; tinfo->user.transport_version = 4; target_mask = 0x01 << targ; ahd->user_discenable &= ~target_mask; tstate->discenable &= ~target_mask; ahd->user_tagenable &= ~target_mask; if (sc->device_flags[targ] & CFDISC) { tstate->discenable |= target_mask; ahd->user_discenable |= target_mask; ahd->user_tagenable |= target_mask; } else { /* * Cannot be packetized without disconnection. */ sc->device_flags[targ] &= ~CFPACKETIZED; } user_tinfo->ppr_options = 0; user_tinfo->period = (sc->device_flags[targ] & CFXFER); if (user_tinfo->period < CFXFER_ASYNC) { if (user_tinfo->period <= AHD_PERIOD_10MHz) user_tinfo->ppr_options |= MSG_EXT_PPR_DT_REQ; user_tinfo->offset = MAX_OFFSET; } else { user_tinfo->offset = 0; user_tinfo->period = AHD_ASYNC_XFER_PERIOD; } #ifdef AHD_FORCE_160 if (user_tinfo->period <= AHD_SYNCRATE_160) user_tinfo->period = AHD_SYNCRATE_DT; #endif if ((sc->device_flags[targ] & CFPACKETIZED) != 0) { user_tinfo->ppr_options |= MSG_EXT_PPR_RD_STRM | MSG_EXT_PPR_WR_FLOW | MSG_EXT_PPR_HOLD_MCS | MSG_EXT_PPR_IU_REQ; if ((ahd->features & AHD_RTI) != 0) user_tinfo->ppr_options |= MSG_EXT_PPR_RTI; } if ((sc->device_flags[targ] & CFQAS) != 0) user_tinfo->ppr_options |= MSG_EXT_PPR_QAS_REQ; if ((sc->device_flags[targ] & CFWIDEB) != 0) user_tinfo->width = MSG_EXT_WDTR_BUS_16_BIT; else user_tinfo->width = MSG_EXT_WDTR_BUS_8_BIT; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printf("(%d): %x:%x:%x:%x\n", targ, user_tinfo->width, user_tinfo->period, user_tinfo->offset, user_tinfo->ppr_options); #endif /* * Start out Async/Narrow/Untagged and with * conservative protocol support. */ tstate->tagenable &= ~target_mask; tinfo->goal.protocol_version = 2; tinfo->goal.transport_version = 2; tinfo->curr.protocol_version = 2; tinfo->curr.transport_version = 2; ahd_compile_devinfo(&devinfo, ahd->our_id, targ, CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR); ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); } ahd->flags &= ~AHD_SPCHK_ENB_A; if (sc->bios_control & CFSPARITY) ahd->flags |= AHD_SPCHK_ENB_A; ahd->flags &= ~AHD_RESET_BUS_A; if (sc->bios_control & CFRESETB) ahd->flags |= AHD_RESET_BUS_A; ahd->flags &= ~AHD_EXTENDED_TRANS_A; if (sc->bios_control & CFEXTEND) ahd->flags |= AHD_EXTENDED_TRANS_A; ahd->flags &= ~AHD_BIOS_ENABLED; if ((sc->bios_control & CFBIOSSTATE) == CFBS_ENABLED) ahd->flags |= AHD_BIOS_ENABLED; ahd->flags &= ~AHD_STPWLEVEL_A; if ((sc->adapter_control & CFSTPWLEVEL) != 0) ahd->flags |= AHD_STPWLEVEL_A; return (0); } /* * Parse device configuration information. */ int ahd_parse_vpddata(struct ahd_softc *ahd, struct vpd_config *vpd) { int error; error = ahd_verify_vpd_cksum(vpd); if (error == 0) return (EINVAL); if ((vpd->bios_flags & VPDBOOTHOST) != 0) ahd->flags |= AHD_BOOT_CHANNEL; return (0); } void ahd_intr_enable(struct ahd_softc *ahd, int enable) { u_int hcntrl; hcntrl = ahd_inb(ahd, HCNTRL); hcntrl &= ~INTEN; ahd->pause &= ~INTEN; ahd->unpause &= ~INTEN; if (enable) { hcntrl |= INTEN; ahd->pause |= INTEN; ahd->unpause |= INTEN; } ahd_outb(ahd, HCNTRL, hcntrl); } static void ahd_update_coalescing_values(struct ahd_softc *ahd, u_int timer, u_int maxcmds, u_int mincmds) { if (timer > AHD_TIMER_MAX_US) timer = AHD_TIMER_MAX_US; ahd->int_coalescing_timer = timer; if (maxcmds > AHD_INT_COALESCING_MAXCMDS_MAX) maxcmds = AHD_INT_COALESCING_MAXCMDS_MAX; if (mincmds > AHD_INT_COALESCING_MINCMDS_MAX) mincmds = AHD_INT_COALESCING_MINCMDS_MAX; ahd->int_coalescing_maxcmds = maxcmds; ahd_outw(ahd, INT_COALESCING_TIMER, timer / AHD_TIMER_US_PER_TICK); ahd_outb(ahd, INT_COALESCING_MAXCMDS, -maxcmds); ahd_outb(ahd, INT_COALESCING_MINCMDS, -mincmds); } static void ahd_enable_coalescing(struct ahd_softc *ahd, int enable) { ahd->hs_mailbox &= ~ENINT_COALESCE; if (enable) ahd->hs_mailbox |= ENINT_COALESCE; ahd_outb(ahd, HS_MAILBOX, ahd->hs_mailbox); ahd_flush_device_writes(ahd); ahd_run_qoutfifo(ahd); } /* * Ensure that the card is paused in a location * outside of all critical sections and that all * pending work is completed prior to returning. * This routine should only be called from outside * an interrupt context. */ void ahd_pause_and_flushwork(struct ahd_softc *ahd) { u_int intstat; u_int maxloops; maxloops = 1000; ahd->flags |= AHD_ALL_INTERRUPTS; ahd_pause(ahd); /* * Freeze the outgoing selections. We do this only * until we are safely paused without further selections * pending. */ ahd->qfreeze_cnt--; ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); ahd_outb(ahd, SEQ_FLAGS2, ahd_inb(ahd, SEQ_FLAGS2) | SELECTOUT_QFROZEN); do { ahd_unpause(ahd); /* * Give the sequencer some time to service * any active selections. */ ahd_delay(500); ahd_intr(ahd); ahd_pause(ahd); intstat = ahd_inb(ahd, INTSTAT); if ((intstat & INT_PEND) == 0) { ahd_clear_critical_section(ahd); intstat = ahd_inb(ahd, INTSTAT); } } while (--maxloops && (intstat != 0xFF || (ahd->features & AHD_REMOVABLE) == 0) && ((intstat & INT_PEND) != 0 || (ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0 || (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) != 0)); if (maxloops == 0) { printf("Infinite interrupt loop, INTSTAT = %x", ahd_inb(ahd, INTSTAT)); } ahd->qfreeze_cnt++; ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); ahd_flush_qoutfifo(ahd); ahd->flags &= ~AHD_ALL_INTERRUPTS; } #if 0 int ahd_suspend(struct ahd_softc *ahd) { ahd_pause_and_flushwork(ahd); if (LIST_FIRST(&ahd->pending_scbs) != NULL) { ahd_unpause(ahd); return (EBUSY); } ahd_shutdown(ahd); return (0); } #endif /* 0 */ #if 0 int ahd_resume(struct ahd_softc *ahd) { ahd_reset(ahd, /*reinit*/TRUE); ahd_intr_enable(ahd, TRUE); ahd_restart(ahd); return (0); } #endif /* 0 */ /************************** Busy Target Table *********************************/ /* * Set SCBPTR to the SCB that contains the busy * table entry for TCL. Return the offset into * the SCB that contains the entry for TCL. * saved_scbid is dereferenced and set to the * scbid that should be restored once manipualtion * of the TCL entry is complete. */ static __inline u_int ahd_index_busy_tcl(struct ahd_softc *ahd, u_int *saved_scbid, u_int tcl) { /* * Index to the SCB that contains the busy entry. */ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); *saved_scbid = ahd_get_scbptr(ahd); ahd_set_scbptr(ahd, TCL_LUN(tcl) | ((TCL_TARGET_OFFSET(tcl) & 0xC) << 4)); /* * And now calculate the SCB offset to the entry. * Each entry is 2 bytes wide, hence the * multiplication by 2. */ return (((TCL_TARGET_OFFSET(tcl) & 0x3) << 1) + SCB_DISCONNECTED_LISTS); } /* * Return the untagged transaction id for a given target/channel lun. */ static u_int ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl) { u_int scbid; u_int scb_offset; u_int saved_scbptr; scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl); scbid = ahd_inw_scbram(ahd, scb_offset); ahd_set_scbptr(ahd, saved_scbptr); return (scbid); } static void ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, u_int scbid) { u_int scb_offset; u_int saved_scbptr; scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl); ahd_outw(ahd, scb_offset, scbid); ahd_set_scbptr(ahd, saved_scbptr); } /************************** SCB and SCB queue management **********************/ static int ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, int target, char channel, int lun, u_int tag, role_t role) { int targ = SCB_GET_TARGET(ahd, scb); char chan = SCB_GET_CHANNEL(ahd, scb); int slun = SCB_GET_LUN(scb); int match; match = ((chan == channel) || (channel == ALL_CHANNELS)); if (match != 0) match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); if (match != 0) match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); if (match != 0) { #ifdef AHD_TARGET_MODE int group; group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); if (role == ROLE_INITIATOR) { match = (group != XPT_FC_GROUP_TMODE) && ((tag == SCB_GET_TAG(scb)) || (tag == SCB_LIST_NULL)); } else if (role == ROLE_TARGET) { match = (group == XPT_FC_GROUP_TMODE) && ((tag == scb->io_ctx->csio.tag_id) || (tag == SCB_LIST_NULL)); } #else /* !AHD_TARGET_MODE */ match = ((tag == SCB_GET_TAG(scb)) || (tag == SCB_LIST_NULL)); #endif /* AHD_TARGET_MODE */ } return match; } static void ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb) { int target; char channel; int lun; target = SCB_GET_TARGET(ahd, scb); lun = SCB_GET_LUN(scb); channel = SCB_GET_CHANNEL(ahd, scb); ahd_search_qinfifo(ahd, target, channel, lun, /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, CAM_REQUEUE_REQ, SEARCH_COMPLETE); ahd_platform_freeze_devq(ahd, scb); } void ahd_qinfifo_requeue_tail(struct ahd_softc *ahd, struct scb *scb) { struct scb *prev_scb; ahd_mode_state saved_modes; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); prev_scb = NULL; if (ahd_qinfifo_count(ahd) != 0) { u_int prev_tag; u_int prev_pos; prev_pos = AHD_QIN_WRAP(ahd->qinfifonext - 1); prev_tag = ahd->qinfifo[prev_pos]; prev_scb = ahd_lookup_scb(ahd, prev_tag); } ahd_qinfifo_requeue(ahd, prev_scb, scb); ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); ahd_restore_modes(ahd, saved_modes); } static void ahd_qinfifo_requeue(struct ahd_softc *ahd, struct scb *prev_scb, struct scb *scb) { if (prev_scb == NULL) { uint32_t busaddr; busaddr = ahd_le32toh(scb->hscb->hscb_busaddr); ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); } else { prev_scb->hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr; ahd_sync_scb(ahd, prev_scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb); ahd->qinfifonext++; scb->hscb->next_hscb_busaddr = ahd->next_queued_hscb->hscb_busaddr; ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } static int ahd_qinfifo_count(struct ahd_softc *ahd) { u_int qinpos; u_int wrap_qinpos; u_int wrap_qinfifonext; AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); qinpos = ahd_get_snscb_qoff(ahd); wrap_qinpos = AHD_QIN_WRAP(qinpos); wrap_qinfifonext = AHD_QIN_WRAP(ahd->qinfifonext); if (wrap_qinfifonext >= wrap_qinpos) return (wrap_qinfifonext - wrap_qinpos); else return (wrap_qinfifonext + ARRAY_SIZE(ahd->qinfifo) - wrap_qinpos); } void ahd_reset_cmds_pending(struct ahd_softc *ahd) { struct scb *scb; ahd_mode_state saved_modes; u_int pending_cmds; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); /* * Don't count any commands as outstanding that the * sequencer has already marked for completion. */ ahd_flush_qoutfifo(ahd); pending_cmds = 0; LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { pending_cmds++; } ahd_outw(ahd, CMDS_PENDING, pending_cmds - ahd_qinfifo_count(ahd)); ahd_restore_modes(ahd, saved_modes); ahd->flags &= ~AHD_UPDATE_PEND_CMDS; } static void ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status) { cam_status ostat; cam_status cstat; ostat = ahd_get_transaction_status(scb); if (ostat == CAM_REQ_INPROG) ahd_set_transaction_status(scb, status); cstat = ahd_get_transaction_status(scb); if (cstat != CAM_REQ_CMP) ahd_freeze_scb(scb); ahd_done(ahd, scb); } int ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahd_search_action action) { struct scb *scb; struct scb *mk_msg_scb; struct scb *prev_scb; ahd_mode_state saved_modes; u_int qinstart; u_int qinpos; u_int qintail; u_int tid_next; u_int tid_prev; u_int scbid; u_int seq_flags2; u_int savedscbptr; uint32_t busaddr; int found; int targets; /* Must be in CCHAN mode */ saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); /* * Halt any pending SCB DMA. The sequencer will reinitiate * this dma if the qinfifo is not empty once we unpause. */ if ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN|CCSCBDIR)) == (CCARREN|CCSCBEN|CCSCBDIR)) { ahd_outb(ahd, CCSCBCTL, ahd_inb(ahd, CCSCBCTL) & ~(CCARREN|CCSCBEN)); while ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN)) != 0) ; } /* Determine sequencer's position in the qinfifo. */ qintail = AHD_QIN_WRAP(ahd->qinfifonext); qinstart = ahd_get_snscb_qoff(ahd); qinpos = AHD_QIN_WRAP(qinstart); found = 0; prev_scb = NULL; if (action == SEARCH_PRINT) { printf("qinstart = %d qinfifonext = %d\nQINFIFO:", qinstart, ahd->qinfifonext); } /* * Start with an empty queue. Entries that are not chosen * for removal will be re-added to the queue as we go. */ ahd->qinfifonext = qinstart; busaddr = ahd_le32toh(ahd->next_queued_hscb->hscb_busaddr); ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); while (qinpos != qintail) { scb = ahd_lookup_scb(ahd, ahd->qinfifo[qinpos]); if (scb == NULL) { printf("qinpos = %d, SCB index = %d\n", qinpos, ahd->qinfifo[qinpos]); panic("Loop 1\n"); } if (ahd_match_scb(ahd, scb, target, channel, lun, tag, role)) { /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: if ((scb->flags & SCB_ACTIVE) == 0) printf("Inactive SCB in qinfifo\n"); ahd_done_with_status(ahd, scb, status); /* FALLTHROUGH */ case SEARCH_REMOVE: break; case SEARCH_PRINT: printf(" 0x%x", ahd->qinfifo[qinpos]); /* FALLTHROUGH */ case SEARCH_COUNT: ahd_qinfifo_requeue(ahd, prev_scb, scb); prev_scb = scb; break; } } else { ahd_qinfifo_requeue(ahd, prev_scb, scb); prev_scb = scb; } qinpos = AHD_QIN_WRAP(qinpos+1); } ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); if (action == SEARCH_PRINT) printf("\nWAITING_TID_QUEUES:\n"); /* * Search waiting for selection lists. We traverse the * list of "their ids" waiting for selection and, if * appropriate, traverse the SCBs of each "their id" * looking for matches. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); seq_flags2 = ahd_inb(ahd, SEQ_FLAGS2); if ((seq_flags2 & PENDING_MK_MESSAGE) != 0) { scbid = ahd_inw(ahd, MK_MESSAGE_SCB); mk_msg_scb = ahd_lookup_scb(ahd, scbid); } else mk_msg_scb = NULL; savedscbptr = ahd_get_scbptr(ahd); tid_next = ahd_inw(ahd, WAITING_TID_HEAD); tid_prev = SCB_LIST_NULL; targets = 0; for (scbid = tid_next; !SCBID_IS_NULL(scbid); scbid = tid_next) { u_int tid_head; u_int tid_tail; targets++; if (targets > AHD_NUM_TARGETS) panic("TID LIST LOOP"); if (scbid >= ahd->scb_data.numscbs) { printf("%s: Waiting TID List inconsistency. " "SCB index == 0x%x, yet numscbs == 0x%x.", ahd_name(ahd), scbid, ahd->scb_data.numscbs); ahd_dump_card_state(ahd); panic("for safety"); } scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printf("%s: SCB = 0x%x Not Active!\n", ahd_name(ahd), scbid); panic("Waiting TID List traversal\n"); } ahd_set_scbptr(ahd, scbid); tid_next = ahd_inw_scbram(ahd, SCB_NEXT2); if (ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN) == 0) { tid_prev = scbid; continue; } /* * We found a list of scbs that needs to be searched. */ if (action == SEARCH_PRINT) printf(" %d ( ", SCB_GET_TARGET(ahd, scb)); tid_head = scbid; found += ahd_search_scb_list(ahd, target, channel, lun, tag, role, status, action, &tid_head, &tid_tail, SCB_GET_TARGET(ahd, scb)); /* * Check any MK_MESSAGE SCB that is still waiting to * enter this target's waiting for selection queue. */ if (mk_msg_scb != NULL && ahd_match_scb(ahd, mk_msg_scb, target, channel, lun, tag, role)) { /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: if ((mk_msg_scb->flags & SCB_ACTIVE) == 0) printf("Inactive SCB pending MK_MSG\n"); ahd_done_with_status(ahd, mk_msg_scb, status); /* FALLTHROUGH */ case SEARCH_REMOVE: { u_int tail_offset; printf("Removing MK_MSG scb\n"); /* * Reset our tail to the tail of the * main per-target list. */ tail_offset = WAITING_SCB_TAILS + (2 * SCB_GET_TARGET(ahd, mk_msg_scb)); ahd_outw(ahd, tail_offset, tid_tail); seq_flags2 &= ~PENDING_MK_MESSAGE; ahd_outb(ahd, SEQ_FLAGS2, seq_flags2); ahd_outw(ahd, CMDS_PENDING, ahd_inw(ahd, CMDS_PENDING)-1); mk_msg_scb = NULL; break; } case SEARCH_PRINT: printf(" 0x%x", SCB_GET_TAG(scb)); /* FALLTHROUGH */ case SEARCH_COUNT: break; } } if (mk_msg_scb != NULL && SCBID_IS_NULL(tid_head) && ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN)) { /* * When removing the last SCB for a target * queue with a pending MK_MESSAGE scb, we * must queue the MK_MESSAGE scb. */ printf("Queueing mk_msg_scb\n"); tid_head = ahd_inw(ahd, MK_MESSAGE_SCB); seq_flags2 &= ~PENDING_MK_MESSAGE; ahd_outb(ahd, SEQ_FLAGS2, seq_flags2); mk_msg_scb = NULL; } if (tid_head != scbid) ahd_stitch_tid_list(ahd, tid_prev, tid_head, tid_next); if (!SCBID_IS_NULL(tid_head)) tid_prev = tid_head; if (action == SEARCH_PRINT) printf(")\n"); } /* Restore saved state. */ ahd_set_scbptr(ahd, savedscbptr); ahd_restore_modes(ahd, saved_modes); return (found); } static int ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahd_search_action action, u_int *list_head, u_int *list_tail, u_int tid) { struct scb *scb; u_int scbid; u_int next; u_int prev; int found; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); found = 0; prev = SCB_LIST_NULL; next = *list_head; *list_tail = SCB_LIST_NULL; for (scbid = next; !SCBID_IS_NULL(scbid); scbid = next) { if (scbid >= ahd->scb_data.numscbs) { printf("%s:SCB List inconsistency. " "SCB == 0x%x, yet numscbs == 0x%x.", ahd_name(ahd), scbid, ahd->scb_data.numscbs); ahd_dump_card_state(ahd); panic("for safety"); } scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printf("%s: SCB = %d Not Active!\n", ahd_name(ahd), scbid); panic("Waiting List traversal\n"); } ahd_set_scbptr(ahd, scbid); *list_tail = scbid; next = ahd_inw_scbram(ahd, SCB_NEXT); if (ahd_match_scb(ahd, scb, target, channel, lun, SCB_LIST_NULL, role) == 0) { prev = scbid; continue; } found++; switch (action) { case SEARCH_COMPLETE: if ((scb->flags & SCB_ACTIVE) == 0) printf("Inactive SCB in Waiting List\n"); ahd_done_with_status(ahd, scb, status); /* FALLTHROUGH */ case SEARCH_REMOVE: ahd_rem_wscb(ahd, scbid, prev, next, tid); *list_tail = prev; if (SCBID_IS_NULL(prev)) *list_head = next; break; case SEARCH_PRINT: printf("0x%x ", scbid); case SEARCH_COUNT: prev = scbid; break; } if (found > AHD_SCB_MAX) panic("SCB LIST LOOP"); } if (action == SEARCH_COMPLETE || action == SEARCH_REMOVE) ahd_outw(ahd, CMDS_PENDING, ahd_inw(ahd, CMDS_PENDING) - found); return (found); } static void ahd_stitch_tid_list(struct ahd_softc *ahd, u_int tid_prev, u_int tid_cur, u_int tid_next) { AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if (SCBID_IS_NULL(tid_cur)) { /* Bypass current TID list */ if (SCBID_IS_NULL(tid_prev)) { ahd_outw(ahd, WAITING_TID_HEAD, tid_next); } else { ahd_set_scbptr(ahd, tid_prev); ahd_outw(ahd, SCB_NEXT2, tid_next); } if (SCBID_IS_NULL(tid_next)) ahd_outw(ahd, WAITING_TID_TAIL, tid_prev); } else { /* Stitch through tid_cur */ if (SCBID_IS_NULL(tid_prev)) { ahd_outw(ahd, WAITING_TID_HEAD, tid_cur); } else { ahd_set_scbptr(ahd, tid_prev); ahd_outw(ahd, SCB_NEXT2, tid_cur); } ahd_set_scbptr(ahd, tid_cur); ahd_outw(ahd, SCB_NEXT2, tid_next); if (SCBID_IS_NULL(tid_next)) ahd_outw(ahd, WAITING_TID_TAIL, tid_cur); } } /* * Manipulate the waiting for selection list and return the * scb that follows the one that we remove. */ static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, u_int prev, u_int next, u_int tid) { u_int tail_offset; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if (!SCBID_IS_NULL(prev)) { ahd_set_scbptr(ahd, prev); ahd_outw(ahd, SCB_NEXT, next); } /* * SCBs that have MK_MESSAGE set in them may * cause the tail pointer to be updated without * setting the next pointer of the previous tail. * Only clear the tail if the removed SCB was * the tail. */ tail_offset = WAITING_SCB_TAILS + (2 * tid); if (SCBID_IS_NULL(next) && ahd_inw(ahd, tail_offset) == scbid) ahd_outw(ahd, tail_offset, prev); ahd_add_scb_to_free_list(ahd, scbid); return (next); } /* * Add the SCB as selected by SCBPTR onto the on chip list of * free hardware SCBs. This list is empty/unused if we are not * performing SCB paging. */ static void ahd_add_scb_to_free_list(struct ahd_softc *ahd, u_int scbid) { /* XXX Need some other mechanism to designate "free". */ /* * Invalidate the tag so that our abort * routines don't think it's active. ahd_outb(ahd, SCB_TAG, SCB_LIST_NULL); */ } /******************************** Error Handling ******************************/ /* * Abort all SCBs that match the given description (target/channel/lun/tag), * setting their status to the passed in status if the status has not already * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer * is paused before it is called. */ static int ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status) { struct scb *scbp; struct scb *scbp_next; u_int i, j; u_int maxtarget; u_int minlun; u_int maxlun; int found; ahd_mode_state saved_modes; /* restore this when we're done */ saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); found = ahd_search_qinfifo(ahd, target, channel, lun, SCB_LIST_NULL, role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); /* * Clean out the busy target table for any untagged commands. */ i = 0; maxtarget = 16; if (target != CAM_TARGET_WILDCARD) { i = target; if (channel == 'B') i += 8; maxtarget = i + 1; } if (lun == CAM_LUN_WILDCARD) { minlun = 0; maxlun = AHD_NUM_LUNS_NONPKT; } else if (lun >= AHD_NUM_LUNS_NONPKT) { minlun = maxlun = 0; } else { minlun = lun; maxlun = lun + 1; } if (role != ROLE_TARGET) { for (;i < maxtarget; i++) { for (j = minlun;j < maxlun; j++) { u_int scbid; u_int tcl; tcl = BUILD_TCL_RAW(i, 'A', j); scbid = ahd_find_busy_tcl(ahd, tcl); scbp = ahd_lookup_scb(ahd, scbid); if (scbp == NULL || ahd_match_scb(ahd, scbp, target, channel, lun, tag, role) == 0) continue; ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(i, 'A', j)); } } } /* * Don't abort commands that have already completed, * but haven't quite made it up to the host yet. */ ahd_flush_qoutfifo(ahd); /* * Go through the pending CCB list and look for * commands for this target that are still active. * These are other tagged commands that were * disconnected when the reset occurred. */ scbp_next = LIST_FIRST(&ahd->pending_scbs); while (scbp_next != NULL) { scbp = scbp_next; scbp_next = LIST_NEXT(scbp, pending_links); if (ahd_match_scb(ahd, scbp, target, channel, lun, tag, role)) { cam_status ostat; ostat = ahd_get_transaction_status(scbp); if (ostat == CAM_REQ_INPROG) ahd_set_transaction_status(scbp, status); if (ahd_get_transaction_status(scbp) != CAM_REQ_CMP) ahd_freeze_scb(scbp); if ((scbp->flags & SCB_ACTIVE) == 0) printf("Inactive SCB on pending list\n"); ahd_done(ahd, scbp); found++; } } ahd_restore_modes(ahd, saved_modes); ahd_platform_abort_scbs(ahd, target, channel, lun, tag, role, status); ahd->flags |= AHD_UPDATE_PEND_CMDS; return found; } static void ahd_reset_current_bus(struct ahd_softc *ahd) { uint8_t scsiseq; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~ENSCSIRST); scsiseq = ahd_inb(ahd, SCSISEQ0) & ~(ENSELO|ENARBO|SCSIRSTO); ahd_outb(ahd, SCSISEQ0, scsiseq | SCSIRSTO); ahd_flush_device_writes(ahd); ahd_delay(AHD_BUSRESET_DELAY); /* Turn off the bus reset */ ahd_outb(ahd, SCSISEQ0, scsiseq); ahd_flush_device_writes(ahd); ahd_delay(AHD_BUSRESET_DELAY); if ((ahd->bugs & AHD_SCSIRST_BUG) != 0) { /* * 2A Razor #474 * Certain chip state is not cleared for * SCSI bus resets that we initiate, so * we must reset the chip. */ ahd_reset(ahd, /*reinit*/TRUE); ahd_intr_enable(ahd, /*enable*/TRUE); AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); } ahd_clear_intstat(ahd); } int ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset) { struct ahd_devinfo devinfo; u_int initiator; u_int target; u_int max_scsiid; int found; u_int fifo; u_int next_fifo; uint8_t scsiseq; /* * Check if the last bus reset is cleared */ if (ahd->flags & AHD_BUS_RESET_ACTIVE) { printf("%s: bus reset still active\n", ahd_name(ahd)); return 0; } ahd->flags |= AHD_BUS_RESET_ACTIVE; ahd->pending_device = NULL; ahd_compile_devinfo(&devinfo, CAM_TARGET_WILDCARD, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, channel, ROLE_UNKNOWN); ahd_pause(ahd); /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); /* * Run our command complete fifos to ensure that we perform * completion processing on any commands that 'completed' * before the reset occurred. */ ahd_run_qoutfifo(ahd); #ifdef AHD_TARGET_MODE if ((ahd->flags & AHD_TARGETROLE) != 0) { ahd_run_tqinfifo(ahd, /*paused*/TRUE); } #endif ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* * Disable selections so no automatic hardware * functions will modify chip state. */ ahd_outb(ahd, SCSISEQ0, 0); ahd_outb(ahd, SCSISEQ1, 0); /* * Safely shut down our DMA engines. Always start with * the FIFO that is not currently active (if any are * actively connected). */ next_fifo = fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO; if (next_fifo > CURRFIFO_1) /* If disconneced, arbitrarily start with FIFO1. */ next_fifo = fifo = 0; do { next_fifo ^= CURRFIFO_1; ahd_set_modes(ahd, next_fifo, next_fifo); ahd_outb(ahd, DFCNTRL, ahd_inb(ahd, DFCNTRL) & ~(SCSIEN|HDMAEN)); while ((ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0) ahd_delay(10); /* * Set CURRFIFO to the now inactive channel. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, DFFSTAT, next_fifo); } while (next_fifo != fifo); /* * Reset the bus if we are initiating this reset */ ahd_clear_msg_state(ahd); ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~(ENBUSFREE|ENSCSIRST)); if (initiate_reset) ahd_reset_current_bus(ahd); ahd_clear_intstat(ahd); /* * Clean up all the state information for the * pending transactions on this bus. */ found = ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); /* * Cleanup anything left in the FIFOs. */ ahd_clear_fifo(ahd, 0); ahd_clear_fifo(ahd, 1); /* * Clear SCSI interrupt status */ ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); /* * Reenable selections */ ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) | ENSCSIRST); scsiseq = ahd_inb(ahd, SCSISEQ_TEMPLATE); ahd_outb(ahd, SCSISEQ1, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7; #ifdef AHD_TARGET_MODE /* * Send an immediate notify ccb to all target more peripheral * drivers affected by this action. */ for (target = 0; target <= max_scsiid; target++) { struct ahd_tmode_tstate* tstate; u_int lun; tstate = ahd->enabled_targets[target]; if (tstate == NULL) continue; for (lun = 0; lun < AHD_NUM_LUNS; lun++) { struct ahd_tmode_lstate* lstate; lstate = tstate->enabled_luns[lun]; if (lstate == NULL) continue; ahd_queue_lstate_event(ahd, lstate, CAM_TARGET_WILDCARD, EVENT_TYPE_BUS_RESET, /*arg*/0); ahd_send_lstate_events(ahd, lstate); } } #endif /* * Revert to async/narrow transfers until we renegotiate. */ for (target = 0; target <= max_scsiid; target++) { if (ahd->enabled_targets[target] == NULL) continue; for (initiator = 0; initiator <= max_scsiid; initiator++) { struct ahd_devinfo devinfo; ahd_compile_devinfo(&devinfo, target, initiator, CAM_LUN_WILDCARD, 'A', ROLE_UNKNOWN); ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR, /*paused*/TRUE); } } /* Notify the XPT that a bus reset occurred */ ahd_send_async(ahd, devinfo.channel, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, AC_BUS_RESET); ahd_restart(ahd); return (found); } /**************************** Statistics Processing ***************************/ static void ahd_stat_timer(void *arg) { struct ahd_softc *ahd = arg; u_long s; int enint_coal; ahd_lock(ahd, &s); enint_coal = ahd->hs_mailbox & ENINT_COALESCE; if (ahd->cmdcmplt_total > ahd->int_coalescing_threshold) enint_coal |= ENINT_COALESCE; else if (ahd->cmdcmplt_total < ahd->int_coalescing_stop_threshold) enint_coal &= ~ENINT_COALESCE; if (enint_coal != (ahd->hs_mailbox & ENINT_COALESCE)) { ahd_enable_coalescing(ahd, enint_coal); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_INT_COALESCING) != 0) printf("%s: Interrupt coalescing " "now %sabled. Cmds %d\n", ahd_name(ahd), (enint_coal & ENINT_COALESCE) ? "en" : "dis", ahd->cmdcmplt_total); #endif } ahd->cmdcmplt_bucket = (ahd->cmdcmplt_bucket+1) & (AHD_STAT_BUCKETS-1); ahd->cmdcmplt_total -= ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]; ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket] = 0; ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US, ahd_stat_timer, ahd); ahd_unlock(ahd, &s); } /****************************** Status Processing *****************************/ static void ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb) { struct hardware_scb *hscb; int paused; /* * The sequencer freezes its select-out queue * anytime a SCSI status error occurs. We must * handle the error and increment our qfreeze count * to allow the sequencer to continue. We don't * bother clearing critical sections here since all * operations are on data structures that the sequencer * is not touching once the queue is frozen. */ hscb = scb->hscb; if (ahd_is_paused(ahd)) { paused = 1; } else { paused = 0; ahd_pause(ahd); } /* Freeze the queue until the client sees the error. */ ahd_freeze_devq(ahd, scb); ahd_freeze_scb(scb); ahd->qfreeze_cnt++; ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); if (paused == 0) ahd_unpause(ahd); /* Don't want to clobber the original sense code */ if ((scb->flags & SCB_SENSE) != 0) { /* * Clear the SCB_SENSE Flag and perform * a normal command completion. */ scb->flags &= ~SCB_SENSE; ahd_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); ahd_done(ahd, scb); return; } ahd_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); ahd_set_scsi_status(scb, hscb->shared_data.istatus.scsi_status); switch (hscb->shared_data.istatus.scsi_status) { case STATUS_PKT_SENSE: { struct scsi_status_iu_header *siu; ahd_sync_sense(ahd, scb, BUS_DMASYNC_POSTREAD); siu = (struct scsi_status_iu_header *)scb->sense_data; ahd_set_scsi_status(scb, siu->status); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_SENSE) != 0) { ahd_print_path(ahd, scb); printf("SCB 0x%x Received PKT Status of 0x%x\n", SCB_GET_TAG(scb), siu->status); printf("\tflags = 0x%x, sense len = 0x%x, " "pktfail = 0x%x\n", siu->flags, scsi_4btoul(siu->sense_length), scsi_4btoul(siu->pkt_failures_length)); } #endif if ((siu->flags & SIU_RSPVALID) != 0) { ahd_print_path(ahd, scb); if (scsi_4btoul(siu->pkt_failures_length) < 4) { printf("Unable to parse pkt_failures\n"); } else { switch (SIU_PKTFAIL_CODE(siu)) { case SIU_PFC_NONE: printf("No packet failure found\n"); break; case SIU_PFC_CIU_FIELDS_INVALID: printf("Invalid Command IU Field\n"); break; case SIU_PFC_TMF_NOT_SUPPORTED: printf("TMF not supportd\n"); break; case SIU_PFC_TMF_FAILED: printf("TMF failed\n"); break; case SIU_PFC_INVALID_TYPE_CODE: printf("Invalid L_Q Type code\n"); break; case SIU_PFC_ILLEGAL_REQUEST: printf("Illegal request\n"); default: break; } } if (siu->status == SCSI_STATUS_OK) ahd_set_transaction_status(scb, CAM_REQ_CMP_ERR); } if ((siu->flags & SIU_SNSVALID) != 0) { scb->flags |= SCB_PKT_SENSE; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_SENSE) != 0) printf("Sense data available\n"); #endif } ahd_done(ahd, scb); break; } case SCSI_STATUS_CMD_TERMINATED: case SCSI_STATUS_CHECK_COND: { struct ahd_devinfo devinfo; struct ahd_dma_seg *sg; struct scsi_sense *sc; struct ahd_initiator_tinfo *targ_info; struct ahd_tmode_tstate *tstate; struct ahd_transinfo *tinfo; #ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_SENSE) { ahd_print_path(ahd, scb); printf("SCB %d: requests Check Status\n", SCB_GET_TAG(scb)); } #endif if (ahd_perform_autosense(scb) == 0) break; ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb), SCB_GET_TARGET(ahd, scb), SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), ROLE_INITIATOR); targ_info = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); tinfo = &targ_info->curr; sg = scb->sg_list; sc = (struct scsi_sense *)hscb->shared_data.idata.cdb; /* * Save off the residual if there is one. */ ahd_update_residual(ahd, scb); #ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_SENSE) { ahd_print_path(ahd, scb); printf("Sending Sense\n"); } #endif scb->sg_count = 0; sg = ahd_sg_setup(ahd, scb, sg, ahd_get_sense_bufaddr(ahd, scb), ahd_get_sense_bufsize(ahd, scb), /*last*/TRUE); sc->opcode = REQUEST_SENSE; sc->byte2 = 0; if (tinfo->protocol_version <= SCSI_REV_2 && SCB_GET_LUN(scb) < 8) sc->byte2 = SCB_GET_LUN(scb) << 5; sc->unused[0] = 0; sc->unused[1] = 0; sc->length = ahd_get_sense_bufsize(ahd, scb); sc->control = 0; /* * We can't allow the target to disconnect. * This will be an untagged transaction and * having the target disconnect will make this * transaction indestinguishable from outstanding * tagged transactions. */ hscb->control = 0; /* * This request sense could be because the * the device lost power or in some other * way has lost our transfer negotiations. * Renegotiate if appropriate. Unit attention * errors will be reported before any data * phases occur. */ if (ahd_get_residual(scb) == ahd_get_transfer_length(scb)) { ahd_update_neg_request(ahd, &devinfo, tstate, targ_info, AHD_NEG_IF_NON_ASYNC); } if (tstate->auto_negotiate & devinfo.target_mask) { hscb->control |= MK_MESSAGE; scb->flags &= ~(SCB_NEGOTIATE|SCB_ABORT|SCB_DEVICE_RESET); scb->flags |= SCB_AUTO_NEGOTIATE; } hscb->cdb_len = sizeof(*sc); ahd_setup_data_scb(ahd, scb); scb->flags |= SCB_SENSE; ahd_queue_scb(ahd, scb); break; } case SCSI_STATUS_OK: printf("%s: Interrupted for staus of 0???\n", ahd_name(ahd)); /* FALLTHROUGH */ default: ahd_done(ahd, scb); break; } } static void ahd_handle_scb_status(struct ahd_softc *ahd, struct scb *scb) { if (scb->hscb->shared_data.istatus.scsi_status != 0) { ahd_handle_scsi_status(ahd, scb); } else { ahd_calc_residual(ahd, scb); ahd_done(ahd, scb); } } /* * Calculate the residual for a just completed SCB. */ static void ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb) { struct hardware_scb *hscb; struct initiator_status *spkt; uint32_t sgptr; uint32_t resid_sgptr; uint32_t resid; /* * 5 cases. * 1) No residual. * SG_STATUS_VALID clear in sgptr. * 2) Transferless command * 3) Never performed any transfers. * sgptr has SG_FULL_RESID set. * 4) No residual but target did not * save data pointers after the * last transfer, so sgptr was * never updated. * 5) We have a partial residual. * Use residual_sgptr to determine * where we are. */ hscb = scb->hscb; sgptr = ahd_le32toh(hscb->sgptr); if ((sgptr & SG_STATUS_VALID) == 0) /* Case 1 */ return; sgptr &= ~SG_STATUS_VALID; if ((sgptr & SG_LIST_NULL) != 0) /* Case 2 */ return; /* * Residual fields are the same in both * target and initiator status packets, * so we can always use the initiator fields * regardless of the role for this SCB. */ spkt = &hscb->shared_data.istatus; resid_sgptr = ahd_le32toh(spkt->residual_sgptr); if ((sgptr & SG_FULL_RESID) != 0) { /* Case 3 */ resid = ahd_get_transfer_length(scb); } else if ((resid_sgptr & SG_LIST_NULL) != 0) { /* Case 4 */ return; } else if ((resid_sgptr & SG_OVERRUN_RESID) != 0) { ahd_print_path(ahd, scb); printf("data overrun detected Tag == 0x%x.\n", SCB_GET_TAG(scb)); ahd_freeze_devq(ahd, scb); ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR); ahd_freeze_scb(scb); return; } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); /* NOTREACHED */ } else { struct ahd_dma_seg *sg; /* * Remainder of the SG where the transfer * stopped. */ resid = ahd_le32toh(spkt->residual_datacnt) & AHD_SG_LEN_MASK; sg = ahd_sg_bus_to_virt(ahd, scb, resid_sgptr & SG_PTR_MASK); /* The residual sg_ptr always points to the next sg */ sg--; /* * Add up the contents of all residual * SG segments that are after the SG where * the transfer stopped. */ while ((ahd_le32toh(sg->len) & AHD_DMA_LAST_SEG) == 0) { sg++; resid += ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; } } if ((scb->flags & SCB_SENSE) == 0) ahd_set_residual(scb, resid); else ahd_set_sense_residual(scb, resid); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) { ahd_print_path(ahd, scb); printf("Handled %sResidual of %d bytes\n", (scb->flags & SCB_SENSE) ? "Sense " : "", resid); } #endif } /******************************* Target Mode **********************************/ #ifdef AHD_TARGET_MODE /* * Add a target mode event to this lun's queue */ static void ahd_queue_lstate_event(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate, u_int initiator_id, u_int event_type, u_int event_arg) { struct ahd_tmode_event *event; int pending; xpt_freeze_devq(lstate->path, /*count*/1); if (lstate->event_w_idx >= lstate->event_r_idx) pending = lstate->event_w_idx - lstate->event_r_idx; else pending = AHD_TMODE_EVENT_BUFFER_SIZE + 1 - (lstate->event_r_idx - lstate->event_w_idx); if (event_type == EVENT_TYPE_BUS_RESET || event_type == MSG_BUS_DEV_RESET) { /* * Any earlier events are irrelevant, so reset our buffer. * This has the effect of allowing us to deal with reset * floods (an external device holding down the reset line) * without losing the event that is really interesting. */ lstate->event_r_idx = 0; lstate->event_w_idx = 0; xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); } if (pending == AHD_TMODE_EVENT_BUFFER_SIZE) { xpt_print_path(lstate->path); printf("immediate event %x:%x lost\n", lstate->event_buffer[lstate->event_r_idx].event_type, lstate->event_buffer[lstate->event_r_idx].event_arg); lstate->event_r_idx++; if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE) lstate->event_r_idx = 0; xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); } event = &lstate->event_buffer[lstate->event_w_idx]; event->initiator_id = initiator_id; event->event_type = event_type; event->event_arg = event_arg; lstate->event_w_idx++; if (lstate->event_w_idx == AHD_TMODE_EVENT_BUFFER_SIZE) lstate->event_w_idx = 0; } /* * Send any target mode events queued up waiting * for immediate notify resources. */ void ahd_send_lstate_events(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate) { struct ccb_hdr *ccbh; struct ccb_immed_notify *inot; while (lstate->event_r_idx != lstate->event_w_idx && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { struct ahd_tmode_event *event; event = &lstate->event_buffer[lstate->event_r_idx]; SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); inot = (struct ccb_immed_notify *)ccbh; switch (event->event_type) { case EVENT_TYPE_BUS_RESET: ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; break; default: ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; inot->message_args[0] = event->event_type; inot->message_args[1] = event->event_arg; break; } inot->initiator_id = event->initiator_id; inot->sense_len = 0; xpt_done((union ccb *)inot); lstate->event_r_idx++; if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE) lstate->event_r_idx = 0; } } #endif /******************** Sequencer Program Patching/Download *********************/ #ifdef AHD_DUMP_SEQ void ahd_dumpseq(struct ahd_softc* ahd) { int i; int max_prog; max_prog = 2048; ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); ahd_outw(ahd, PRGMCNT, 0); for (i = 0; i < max_prog; i++) { uint8_t ins_bytes[4]; ahd_insb(ahd, SEQRAM, ins_bytes, 4); printf("0x%08x\n", ins_bytes[0] << 24 | ins_bytes[1] << 16 | ins_bytes[2] << 8 | ins_bytes[3]); } } #endif static void ahd_loadseq(struct ahd_softc *ahd) { struct cs cs_table[num_critical_sections]; u_int begin_set[num_critical_sections]; u_int end_set[num_critical_sections]; struct patch *cur_patch; u_int cs_count; u_int cur_cs; u_int i; int downloaded; u_int skip_addr; u_int sg_prefetch_cnt; u_int sg_prefetch_cnt_limit; u_int sg_prefetch_align; u_int sg_size; u_int cacheline_mask; uint8_t download_consts[DOWNLOAD_CONST_COUNT]; if (bootverbose) printf("%s: Downloading Sequencer Program...", ahd_name(ahd)); #if DOWNLOAD_CONST_COUNT != 8 #error "Download Const Mismatch" #endif /* * Start out with 0 critical sections * that apply to this firmware load. */ cs_count = 0; cur_cs = 0; memset(begin_set, 0, sizeof(begin_set)); memset(end_set, 0, sizeof(end_set)); /* * Setup downloadable constant table. * * The computation for the S/G prefetch variables is * a bit complicated. We would like to always fetch * in terms of cachelined sized increments. However, * if the cacheline is not an even multiple of the * SG element size or is larger than our SG RAM, using * just the cache size might leave us with only a portion * of an SG element at the tail of a prefetch. If the * cacheline is larger than our S/G prefetch buffer less * the size of an SG element, we may round down to a cacheline * that doesn't contain any or all of the S/G of interest * within the bounds of our S/G ram. Provide variables to * the sequencer that will allow it to handle these edge * cases. */ /* Start by aligning to the nearest cacheline. */ sg_prefetch_align = ahd->pci_cachesize; if (sg_prefetch_align == 0) sg_prefetch_align = 8; /* Round down to the nearest power of 2. */ while (powerof2(sg_prefetch_align) == 0) sg_prefetch_align--; cacheline_mask = sg_prefetch_align - 1; /* * If the cacheline boundary is greater than half our prefetch RAM * we risk not being able to fetch even a single complete S/G * segment if we align to that boundary. */ if (sg_prefetch_align > CCSGADDR_MAX/2) sg_prefetch_align = CCSGADDR_MAX/2; /* Start by fetching a single cacheline. */ sg_prefetch_cnt = sg_prefetch_align; /* * Increment the prefetch count by cachelines until * at least one S/G element will fit. */ sg_size = sizeof(struct ahd_dma_seg); if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) sg_size = sizeof(struct ahd_dma64_seg); while (sg_prefetch_cnt < sg_size) sg_prefetch_cnt += sg_prefetch_align; /* * If the cacheline is not an even multiple of * the S/G size, we may only get a partial S/G when * we align. Add a cacheline if this is the case. */ if ((sg_prefetch_align % sg_size) != 0 && (sg_prefetch_cnt < CCSGADDR_MAX)) sg_prefetch_cnt += sg_prefetch_align; /* * Lastly, compute a value that the sequencer can use * to determine if the remainder of the CCSGRAM buffer * has a full S/G element in it. */ sg_prefetch_cnt_limit = -(sg_prefetch_cnt - sg_size + 1); download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; download_consts[SG_PREFETCH_CNT_LIMIT] = sg_prefetch_cnt_limit; download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_align - 1); download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_align - 1); download_consts[SG_SIZEOF] = sg_size; download_consts[PKT_OVERRUN_BUFOFFSET] = (ahd->overrun_buf - (uint8_t *)ahd->qoutfifo) / 256; download_consts[SCB_TRANSFER_SIZE] = SCB_TRANSFER_SIZE_1BYTE_LUN; download_consts[CACHELINE_MASK] = cacheline_mask; cur_patch = patches; downloaded = 0; skip_addr = 0; ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); ahd_outw(ahd, PRGMCNT, 0); for (i = 0; i < sizeof(seqprog)/4; i++) { if (ahd_check_patch(ahd, &cur_patch, i, &skip_addr) == 0) { /* * Don't download this instruction as it * is in a patch that was removed. */ continue; } /* * Move through the CS table until we find a CS * that might apply to this instruction. */ for (; cur_cs < num_critical_sections; cur_cs++) { if (critical_sections[cur_cs].end <= i) { if (begin_set[cs_count] == TRUE && end_set[cs_count] == FALSE) { cs_table[cs_count].end = downloaded; end_set[cs_count] = TRUE; cs_count++; } continue; } if (critical_sections[cur_cs].begin <= i && begin_set[cs_count] == FALSE) { cs_table[cs_count].begin = downloaded; begin_set[cs_count] = TRUE; } break; } ahd_download_instr(ahd, i, download_consts); downloaded++; } ahd->num_critical_sections = cs_count; if (cs_count != 0) { cs_count *= sizeof(struct cs); ahd->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT); if (ahd->critical_sections == NULL) panic("ahd_loadseq: Could not malloc"); memcpy(ahd->critical_sections, cs_table, cs_count); } ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE); if (bootverbose) { printf(" %d instructions downloaded\n", downloaded); printf("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n", ahd_name(ahd), ahd->features, ahd->bugs, ahd->flags); } } static int ahd_check_patch(struct ahd_softc *ahd, struct patch **start_patch, u_int start_instr, u_int *skip_addr) { struct patch *cur_patch; struct patch *last_patch; u_int num_patches; num_patches = ARRAY_SIZE(patches); last_patch = &patches[num_patches]; cur_patch = *start_patch; while (cur_patch < last_patch && start_instr == cur_patch->begin) { if (cur_patch->patch_func(ahd) == 0) { /* Start rejecting code */ *skip_addr = start_instr + cur_patch->skip_instr; cur_patch += cur_patch->skip_patch; } else { /* Accepted this patch. Advance to the next * one and wait for our intruction pointer to * hit this point. */ cur_patch++; } } *start_patch = cur_patch; if (start_instr < *skip_addr) /* Still skipping */ return (0); return (1); } static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address) { struct patch *cur_patch; int address_offset; u_int skip_addr; u_int i; address_offset = 0; cur_patch = patches; skip_addr = 0; for (i = 0; i < address;) { ahd_check_patch(ahd, &cur_patch, i, &skip_addr); if (skip_addr > i) { int end_addr; end_addr = min(address, skip_addr); address_offset += end_addr - i; i = skip_addr; } else { i++; } } return (address - address_offset); } static void ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts) { union ins_formats instr; struct ins_format1 *fmt1_ins; struct ins_format3 *fmt3_ins; u_int opcode; /* * The firmware is always compiled into a little endian format. */ instr.integer = ahd_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); fmt1_ins = &instr.format1; fmt3_ins = NULL; /* Pull the opcode */ opcode = instr.format1.opcode; switch (opcode) { case AIC_OP_JMP: case AIC_OP_JC: case AIC_OP_JNC: case AIC_OP_CALL: case AIC_OP_JNE: case AIC_OP_JNZ: case AIC_OP_JE: case AIC_OP_JZ: { fmt3_ins = &instr.format3; fmt3_ins->address = ahd_resolve_seqaddr(ahd, fmt3_ins->address); /* FALLTHROUGH */ } case AIC_OP_OR: case AIC_OP_AND: case AIC_OP_XOR: case AIC_OP_ADD: case AIC_OP_ADC: case AIC_OP_BMOV: if (fmt1_ins->parity != 0) { fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; } fmt1_ins->parity = 0; /* FALLTHROUGH */ case AIC_OP_ROL: { int i, count; /* Calculate odd parity for the instruction */ for (i = 0, count = 0; i < 31; i++) { uint32_t mask; mask = 0x01 << i; if ((instr.integer & mask) != 0) count++; } if ((count & 0x01) == 0) instr.format1.parity = 1; /* The sequencer is a little endian cpu */ instr.integer = ahd_htole32(instr.integer); ahd_outsb(ahd, SEQRAM, instr.bytes, 4); break; } default: panic("Unknown opcode encountered in seq program"); break; } } static int ahd_probe_stack_size(struct ahd_softc *ahd) { int last_probe; last_probe = 0; while (1) { int i; /* * We avoid using 0 as a pattern to avoid * confusion if the stack implementation * "back-fills" with zeros when "poping' * entries. */ for (i = 1; i <= last_probe+1; i++) { ahd_outb(ahd, STACK, i & 0xFF); ahd_outb(ahd, STACK, (i >> 8) & 0xFF); } /* Verify */ for (i = last_probe+1; i > 0; i--) { u_int stack_entry; stack_entry = ahd_inb(ahd, STACK) |(ahd_inb(ahd, STACK) << 8); if (stack_entry != i) goto sized; } last_probe++; } sized: return (last_probe); } int ahd_print_register(ahd_reg_parse_entry_t *table, u_int num_entries, const char *name, u_int address, u_int value, u_int *cur_column, u_int wrap_point) { int printed; u_int printed_mask; if (cur_column != NULL && *cur_column >= wrap_point) { printf("\n"); *cur_column = 0; } printed = printf("%s[0x%x]", name, value); if (table == NULL) { printed += printf(" "); *cur_column += printed; return (printed); } printed_mask = 0; while (printed_mask != 0xFF) { int entry; for (entry = 0; entry < num_entries; entry++) { if (((value & table[entry].mask) != table[entry].value) || ((printed_mask & table[entry].mask) == table[entry].mask)) continue; printed += printf("%s%s", printed_mask == 0 ? ":(" : "|", table[entry].name); printed_mask |= table[entry].mask; break; } if (entry >= num_entries) break; } if (printed_mask != 0) printed += printf(") "); else printed += printf(" "); if (cur_column != NULL) *cur_column += printed; return (printed); } void ahd_dump_card_state(struct ahd_softc *ahd) { struct scb *scb; ahd_mode_state saved_modes; u_int dffstat; int paused; u_int scb_index; u_int saved_scb_index; u_int cur_col; int i; if (ahd_is_paused(ahd)) { paused = 1; } else { paused = 0; ahd_pause(ahd); } saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); printf(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n" "%s: Dumping Card State at program address 0x%x Mode 0x%x\n", ahd_name(ahd), ahd_inw(ahd, CURADDR), ahd_build_mode_state(ahd, ahd->saved_src_mode, ahd->saved_dst_mode)); if (paused) printf("Card was paused\n"); if (ahd_check_cmdcmpltqueues(ahd)) printf("Completions are pending\n"); /* * Mode independent registers. */ cur_col = 0; ahd_intstat_print(ahd_inb(ahd, INTSTAT), &cur_col, 50); ahd_seloid_print(ahd_inb(ahd, SELOID), &cur_col, 50); ahd_selid_print(ahd_inb(ahd, SELID), &cur_col, 50); ahd_hs_mailbox_print(ahd_inb(ahd, LOCAL_HS_MAILBOX), &cur_col, 50); ahd_intctl_print(ahd_inb(ahd, INTCTL), &cur_col, 50); ahd_seqintstat_print(ahd_inb(ahd, SEQINTSTAT), &cur_col, 50); ahd_saved_mode_print(ahd_inb(ahd, SAVED_MODE), &cur_col, 50); ahd_dffstat_print(ahd_inb(ahd, DFFSTAT), &cur_col, 50); ahd_scsisigi_print(ahd_inb(ahd, SCSISIGI), &cur_col, 50); ahd_scsiphase_print(ahd_inb(ahd, SCSIPHASE), &cur_col, 50); ahd_scsibus_print(ahd_inb(ahd, SCSIBUS), &cur_col, 50); ahd_lastphase_print(ahd_inb(ahd, LASTPHASE), &cur_col, 50); ahd_scsiseq0_print(ahd_inb(ahd, SCSISEQ0), &cur_col, 50); ahd_scsiseq1_print(ahd_inb(ahd, SCSISEQ1), &cur_col, 50); ahd_seqctl0_print(ahd_inb(ahd, SEQCTL0), &cur_col, 50); ahd_seqintctl_print(ahd_inb(ahd, SEQINTCTL), &cur_col, 50); ahd_seq_flags_print(ahd_inb(ahd, SEQ_FLAGS), &cur_col, 50); ahd_seq_flags2_print(ahd_inb(ahd, SEQ_FLAGS2), &cur_col, 50); ahd_qfreeze_count_print(ahd_inw(ahd, QFREEZE_COUNT), &cur_col, 50); ahd_kernel_qfreeze_count_print(ahd_inw(ahd, KERNEL_QFREEZE_COUNT), &cur_col, 50); ahd_mk_message_scb_print(ahd_inw(ahd, MK_MESSAGE_SCB), &cur_col, 50); ahd_mk_message_scsiid_print(ahd_inb(ahd, MK_MESSAGE_SCSIID), &cur_col, 50); ahd_sstat0_print(ahd_inb(ahd, SSTAT0), &cur_col, 50); ahd_sstat1_print(ahd_inb(ahd, SSTAT1), &cur_col, 50); ahd_sstat2_print(ahd_inb(ahd, SSTAT2), &cur_col, 50); ahd_sstat3_print(ahd_inb(ahd, SSTAT3), &cur_col, 50); ahd_perrdiag_print(ahd_inb(ahd, PERRDIAG), &cur_col, 50); ahd_simode1_print(ahd_inb(ahd, SIMODE1), &cur_col, 50); ahd_lqistat0_print(ahd_inb(ahd, LQISTAT0), &cur_col, 50); ahd_lqistat1_print(ahd_inb(ahd, LQISTAT1), &cur_col, 50); ahd_lqistat2_print(ahd_inb(ahd, LQISTAT2), &cur_col, 50); ahd_lqostat0_print(ahd_inb(ahd, LQOSTAT0), &cur_col, 50); ahd_lqostat1_print(ahd_inb(ahd, LQOSTAT1), &cur_col, 50); ahd_lqostat2_print(ahd_inb(ahd, LQOSTAT2), &cur_col, 50); printf("\n"); printf("\nSCB Count = %d CMDS_PENDING = %d LASTSCB 0x%x " "CURRSCB 0x%x NEXTSCB 0x%x\n", ahd->scb_data.numscbs, ahd_inw(ahd, CMDS_PENDING), ahd_inw(ahd, LASTSCB), ahd_inw(ahd, CURRSCB), ahd_inw(ahd, NEXTSCB)); cur_col = 0; /* QINFIFO */ ahd_search_qinfifo(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, /*status*/0, SEARCH_PRINT); saved_scb_index = ahd_get_scbptr(ahd); printf("Pending list:"); i = 0; LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { if (i++ > AHD_SCB_MAX) break; cur_col = printf("\n%3d FIFO_USE[0x%x] ", SCB_GET_TAG(scb), ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT)); ahd_set_scbptr(ahd, SCB_GET_TAG(scb)); ahd_scb_control_print(ahd_inb_scbram(ahd, SCB_CONTROL), &cur_col, 60); ahd_scb_scsiid_print(ahd_inb_scbram(ahd, SCB_SCSIID), &cur_col, 60); } printf("\nTotal %d\n", i); printf("Kernel Free SCB list: "); i = 0; TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { struct scb *list_scb; list_scb = scb; do { printf("%d ", SCB_GET_TAG(list_scb)); list_scb = LIST_NEXT(list_scb, collision_links); } while (list_scb && i++ < AHD_SCB_MAX); } LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) { if (i++ > AHD_SCB_MAX) break; printf("%d ", SCB_GET_TAG(scb)); } printf("\n"); printf("Sequencer Complete DMA-inprog list: "); scb_index = ahd_inw(ahd, COMPLETE_SCB_DMAINPROG_HEAD); i = 0; while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { ahd_set_scbptr(ahd, scb_index); printf("%d ", scb_index); scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); } printf("\n"); printf("Sequencer Complete list: "); scb_index = ahd_inw(ahd, COMPLETE_SCB_HEAD); i = 0; while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { ahd_set_scbptr(ahd, scb_index); printf("%d ", scb_index); scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); } printf("\n"); printf("Sequencer DMA-Up and Complete list: "); scb_index = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); i = 0; while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { ahd_set_scbptr(ahd, scb_index); printf("%d ", scb_index); scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); } printf("\n"); printf("Sequencer On QFreeze and Complete list: "); scb_index = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD); i = 0; while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { ahd_set_scbptr(ahd, scb_index); printf("%d ", scb_index); scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); } printf("\n"); ahd_set_scbptr(ahd, saved_scb_index); dffstat = ahd_inb(ahd, DFFSTAT); for (i = 0; i < 2; i++) { #ifdef AHD_DEBUG struct scb *fifo_scb; #endif u_int fifo_scbptr; ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i); fifo_scbptr = ahd_get_scbptr(ahd); printf("\n\n%s: FIFO%d %s, LONGJMP == 0x%x, SCB 0x%x\n", ahd_name(ahd), i, (dffstat & (FIFO0FREE << i)) ? "Free" : "Active", ahd_inw(ahd, LONGJMP_ADDR), fifo_scbptr); cur_col = 0; ahd_seqimode_print(ahd_inb(ahd, SEQIMODE), &cur_col, 50); ahd_seqintsrc_print(ahd_inb(ahd, SEQINTSRC), &cur_col, 50); ahd_dfcntrl_print(ahd_inb(ahd, DFCNTRL), &cur_col, 50); ahd_dfstatus_print(ahd_inb(ahd, DFSTATUS), &cur_col, 50); ahd_sg_cache_shadow_print(ahd_inb(ahd, SG_CACHE_SHADOW), &cur_col, 50); ahd_sg_state_print(ahd_inb(ahd, SG_STATE), &cur_col, 50); ahd_dffsxfrctl_print(ahd_inb(ahd, DFFSXFRCTL), &cur_col, 50); ahd_soffcnt_print(ahd_inb(ahd, SOFFCNT), &cur_col, 50); ahd_mdffstat_print(ahd_inb(ahd, MDFFSTAT), &cur_col, 50); if (cur_col > 50) { printf("\n"); cur_col = 0; } cur_col += printf("SHADDR = 0x%x%x, SHCNT = 0x%x ", ahd_inl(ahd, SHADDR+4), ahd_inl(ahd, SHADDR), (ahd_inb(ahd, SHCNT) | (ahd_inb(ahd, SHCNT + 1) << 8) | (ahd_inb(ahd, SHCNT + 2) << 16))); if (cur_col > 50) { printf("\n"); cur_col = 0; } cur_col += printf("HADDR = 0x%x%x, HCNT = 0x%x ", ahd_inl(ahd, HADDR+4), ahd_inl(ahd, HADDR), (ahd_inb(ahd, HCNT) | (ahd_inb(ahd, HCNT + 1) << 8) | (ahd_inb(ahd, HCNT + 2) << 16))); ahd_ccsgctl_print(ahd_inb(ahd, CCSGCTL), &cur_col, 50); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_SG) != 0) { fifo_scb = ahd_lookup_scb(ahd, fifo_scbptr); if (fifo_scb != NULL) ahd_dump_sglist(fifo_scb); } #endif } printf("\nLQIN: "); for (i = 0; i < 20; i++) printf("0x%x ", ahd_inb(ahd, LQIN + i)); printf("\n"); ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); printf("%s: LQISTATE = 0x%x, LQOSTATE = 0x%x, OPTIONMODE = 0x%x\n", ahd_name(ahd), ahd_inb(ahd, LQISTATE), ahd_inb(ahd, LQOSTATE), ahd_inb(ahd, OPTIONMODE)); printf("%s: OS_SPACE_CNT = 0x%x MAXCMDCNT = 0x%x\n", ahd_name(ahd), ahd_inb(ahd, OS_SPACE_CNT), ahd_inb(ahd, MAXCMDCNT)); printf("%s: SAVED_SCSIID = 0x%x SAVED_LUN = 0x%x\n", ahd_name(ahd), ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN)); ahd_simode0_print(ahd_inb(ahd, SIMODE0), &cur_col, 50); printf("\n"); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); cur_col = 0; ahd_ccscbctl_print(ahd_inb(ahd, CCSCBCTL), &cur_col, 50); printf("\n"); ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); printf("%s: REG0 == 0x%x, SINDEX = 0x%x, DINDEX = 0x%x\n", ahd_name(ahd), ahd_inw(ahd, REG0), ahd_inw(ahd, SINDEX), ahd_inw(ahd, DINDEX)); printf("%s: SCBPTR == 0x%x, SCB_NEXT == 0x%x, SCB_NEXT2 == 0x%x\n", ahd_name(ahd), ahd_get_scbptr(ahd), ahd_inw_scbram(ahd, SCB_NEXT), ahd_inw_scbram(ahd, SCB_NEXT2)); printf("CDB %x %x %x %x %x %x\n", ahd_inb_scbram(ahd, SCB_CDB_STORE), ahd_inb_scbram(ahd, SCB_CDB_STORE+1), ahd_inb_scbram(ahd, SCB_CDB_STORE+2), ahd_inb_scbram(ahd, SCB_CDB_STORE+3), ahd_inb_scbram(ahd, SCB_CDB_STORE+4), ahd_inb_scbram(ahd, SCB_CDB_STORE+5)); printf("STACK:"); for (i = 0; i < ahd->stack_size; i++) { ahd->saved_stack[i] = ahd_inb(ahd, STACK)|(ahd_inb(ahd, STACK) << 8); printf(" 0x%x", ahd->saved_stack[i]); } for (i = ahd->stack_size-1; i >= 0; i--) { ahd_outb(ahd, STACK, ahd->saved_stack[i] & 0xFF); ahd_outb(ahd, STACK, (ahd->saved_stack[i] >> 8) & 0xFF); } printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); ahd_restore_modes(ahd, saved_modes); if (paused == 0) ahd_unpause(ahd); } #if 0 void ahd_dump_scbs(struct ahd_softc *ahd) { ahd_mode_state saved_modes; u_int saved_scb_index; int i; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); saved_scb_index = ahd_get_scbptr(ahd); for (i = 0; i < AHD_SCB_MAX; i++) { ahd_set_scbptr(ahd, i); printf("%3d", i); printf("(CTRL 0x%x ID 0x%x N 0x%x N2 0x%x SG 0x%x, RSG 0x%x)\n", ahd_inb_scbram(ahd, SCB_CONTROL), ahd_inb_scbram(ahd, SCB_SCSIID), ahd_inw_scbram(ahd, SCB_NEXT), ahd_inw_scbram(ahd, SCB_NEXT2), ahd_inl_scbram(ahd, SCB_SGPTR), ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR)); } printf("\n"); ahd_set_scbptr(ahd, saved_scb_index); ahd_restore_modes(ahd, saved_modes); } #endif /* 0 */ /**************************** Flexport Logic **********************************/ /* * Read count 16bit words from 16bit word address start_addr from the * SEEPROM attached to the controller, into buf, using the controller's * SEEPROM reading state machine. Optionally treat the data as a byte * stream in terms of byte order. */ int ahd_read_seeprom(struct ahd_softc *ahd, uint16_t *buf, u_int start_addr, u_int count, int bytestream) { u_int cur_addr; u_int end_addr; int error; /* * If we never make it through the loop even once, * we were passed invalid arguments. */ error = EINVAL; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); end_addr = start_addr + count; for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) { ahd_outb(ahd, SEEADR, cur_addr); ahd_outb(ahd, SEECTL, SEEOP_READ | SEESTART); error = ahd_wait_seeprom(ahd); if (error) break; if (bytestream != 0) { uint8_t *bytestream_ptr; bytestream_ptr = (uint8_t *)buf; *bytestream_ptr++ = ahd_inb(ahd, SEEDAT); *bytestream_ptr = ahd_inb(ahd, SEEDAT+1); } else { /* * ahd_inw() already handles machine byte order. */ *buf = ahd_inw(ahd, SEEDAT); } buf++; } return (error); } /* * Write count 16bit words from buf, into SEEPROM attache to the * controller starting at 16bit word address start_addr, using the * controller's SEEPROM writing state machine. */ int ahd_write_seeprom(struct ahd_softc *ahd, uint16_t *buf, u_int start_addr, u_int count) { u_int cur_addr; u_int end_addr; int error; int retval; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); error = ENOENT; /* Place the chip into write-enable mode */ ahd_outb(ahd, SEEADR, SEEOP_EWEN_ADDR); ahd_outb(ahd, SEECTL, SEEOP_EWEN | SEESTART); error = ahd_wait_seeprom(ahd); if (error) return (error); /* * Write the data. If we don't get throught the loop at * least once, the arguments were invalid. */ retval = EINVAL; end_addr = start_addr + count; for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) { ahd_outw(ahd, SEEDAT, *buf++); ahd_outb(ahd, SEEADR, cur_addr); ahd_outb(ahd, SEECTL, SEEOP_WRITE | SEESTART); retval = ahd_wait_seeprom(ahd); if (retval) break; } /* * Disable writes. */ ahd_outb(ahd, SEEADR, SEEOP_EWDS_ADDR); ahd_outb(ahd, SEECTL, SEEOP_EWDS | SEESTART); error = ahd_wait_seeprom(ahd); if (error) return (error); return (retval); } /* * Wait ~100us for the serial eeprom to satisfy our request. */ static int ahd_wait_seeprom(struct ahd_softc *ahd) { int cnt; cnt = 5000; while ((ahd_inb(ahd, SEESTAT) & (SEEARBACK|SEEBUSY)) != 0 && --cnt) ahd_delay(5); if (cnt == 0) return (ETIMEDOUT); return (0); } /* * Validate the two checksums in the per_channel * vital product data struct. */ static int ahd_verify_vpd_cksum(struct vpd_config *vpd) { int i; int maxaddr; uint32_t checksum; uint8_t *vpdarray; vpdarray = (uint8_t *)vpd; maxaddr = offsetof(struct vpd_config, vpd_checksum); checksum = 0; for (i = offsetof(struct vpd_config, resource_type); i < maxaddr; i++) checksum = checksum + vpdarray[i]; if (checksum == 0 || (-checksum & 0xFF) != vpd->vpd_checksum) return (0); checksum = 0; maxaddr = offsetof(struct vpd_config, checksum); for (i = offsetof(struct vpd_config, default_target_flags); i < maxaddr; i++) checksum = checksum + vpdarray[i]; if (checksum == 0 || (-checksum & 0xFF) != vpd->checksum) return (0); return (1); } int ahd_verify_cksum(struct seeprom_config *sc) { int i; int maxaddr; uint32_t checksum; uint16_t *scarray; maxaddr = (sizeof(*sc)/2) - 1; checksum = 0; scarray = (uint16_t *)sc; for (i = 0; i < maxaddr; i++) checksum = checksum + scarray[i]; if (checksum == 0 || (checksum & 0xFFFF) != sc->checksum) { return (0); } else { return (1); } } int ahd_acquire_seeprom(struct ahd_softc *ahd) { /* * We should be able to determine the SEEPROM type * from the flexport logic, but unfortunately not * all implementations have this logic and there is * no programatic method for determining if the logic * is present. */ return (1); #if 0 uint8_t seetype; int error; error = ahd_read_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, &seetype); if (error != 0 || ((seetype & FLX_ROMSTAT_SEECFG) == FLX_ROMSTAT_SEE_NONE)) return (0); return (1); #endif } void ahd_release_seeprom(struct ahd_softc *ahd) { /* Currently a no-op */ } /* * Wait at most 2 seconds for flexport arbitration to succeed. */ static int ahd_wait_flexport(struct ahd_softc *ahd) { int cnt; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); cnt = 1000000 * 2 / 5; while ((ahd_inb(ahd, BRDCTL) & FLXARBACK) == 0 && --cnt) ahd_delay(5); if (cnt == 0) return (ETIMEDOUT); return (0); } int ahd_write_flexport(struct ahd_softc *ahd, u_int addr, u_int value) { int error; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if (addr > 7) panic("ahd_write_flexport: address out of range"); ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3)); error = ahd_wait_flexport(ahd); if (error != 0) return (error); ahd_outb(ahd, BRDDAT, value); ahd_flush_device_writes(ahd); ahd_outb(ahd, BRDCTL, BRDSTB|BRDEN|(addr << 3)); ahd_flush_device_writes(ahd); ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3)); ahd_flush_device_writes(ahd); ahd_outb(ahd, BRDCTL, 0); ahd_flush_device_writes(ahd); return (0); } int ahd_read_flexport(struct ahd_softc *ahd, u_int addr, uint8_t *value) { int error; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if (addr > 7) panic("ahd_read_flexport: address out of range"); ahd_outb(ahd, BRDCTL, BRDRW|BRDEN|(addr << 3)); error = ahd_wait_flexport(ahd); if (error != 0) return (error); *value = ahd_inb(ahd, BRDDAT); ahd_outb(ahd, BRDCTL, 0); ahd_flush_device_writes(ahd); return (0); } /************************* Target Mode ****************************************/ #ifdef AHD_TARGET_MODE cam_status ahd_find_tmode_devs(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb, struct ahd_tmode_tstate **tstate, struct ahd_tmode_lstate **lstate, int notfound_failure) { if ((ahd->features & AHD_TARGETMODE) == 0) return (CAM_REQ_INVALID); /* * Handle the 'black hole' device that sucks up * requests to unattached luns on enabled targets. */ if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { *tstate = NULL; *lstate = ahd->black_hole; } else { u_int max_id; max_id = (ahd->features & AHD_WIDE) ? 16 : 8; if (ccb->ccb_h.target_id >= max_id) return (CAM_TID_INVALID); if (ccb->ccb_h.target_lun >= AHD_NUM_LUNS) return (CAM_LUN_INVALID); *tstate = ahd->enabled_targets[ccb->ccb_h.target_id]; *lstate = NULL; if (*tstate != NULL) *lstate = (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; } if (notfound_failure != 0 && *lstate == NULL) return (CAM_PATH_INVALID); return (CAM_REQ_CMP); } void ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb) { #if NOT_YET struct ahd_tmode_tstate *tstate; struct ahd_tmode_lstate *lstate; struct ccb_en_lun *cel; cam_status status; u_int target; u_int lun; u_int target_mask; u_long s; char channel; status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate, &lstate, /*notfound_failure*/FALSE); if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; return; } if ((ahd->features & AHD_MULTIROLE) != 0) { u_int our_id; our_id = ahd->our_id; if (ccb->ccb_h.target_id != our_id) { if ((ahd->features & AHD_MULTI_TID) != 0 && (ahd->flags & AHD_INITIATORROLE) != 0) { /* * Only allow additional targets if * the initiator role is disabled. * The hardware cannot handle a re-select-in * on the initiator id during a re-select-out * on a different target id. */ status = CAM_TID_INVALID; } else if ((ahd->flags & AHD_INITIATORROLE) != 0 || ahd->enabled_luns > 0) { /* * Only allow our target id to change * if the initiator role is not configured * and there are no enabled luns which * are attached to the currently registered * scsi id. */ status = CAM_TID_INVALID; } } } if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; return; } /* * We now have an id that is valid. * If we aren't in target mode, switch modes. */ if ((ahd->flags & AHD_TARGETROLE) == 0 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { u_long s; printf("Configuring Target Mode\n"); ahd_lock(ahd, &s); if (LIST_FIRST(&ahd->pending_scbs) != NULL) { ccb->ccb_h.status = CAM_BUSY; ahd_unlock(ahd, &s); return; } ahd->flags |= AHD_TARGETROLE; if ((ahd->features & AHD_MULTIROLE) == 0) ahd->flags &= ~AHD_INITIATORROLE; ahd_pause(ahd); ahd_loadseq(ahd); ahd_restart(ahd); ahd_unlock(ahd, &s); } cel = &ccb->cel; target = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; channel = SIM_CHANNEL(ahd, sim); target_mask = 0x01 << target; if (channel == 'B') target_mask <<= 8; if (cel->enable != 0) { u_int scsiseq1; /* Are we already enabled?? */ if (lstate != NULL) { xpt_print_path(ccb->ccb_h.path); printf("Lun already enabled\n"); ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; return; } if (cel->grp6_len != 0 || cel->grp7_len != 0) { /* * Don't (yet?) support vendor * specific commands. */ ccb->ccb_h.status = CAM_REQ_INVALID; printf("Non-zero Group Codes\n"); return; } /* * Seems to be okay. * Setup our data structures. */ if (target != CAM_TARGET_WILDCARD && tstate == NULL) { tstate = ahd_alloc_tstate(ahd, target, channel); if (tstate == NULL) { xpt_print_path(ccb->ccb_h.path); printf("Couldn't allocate tstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } } lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT); if (lstate == NULL) { xpt_print_path(ccb->ccb_h.path); printf("Couldn't allocate lstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } memset(lstate, 0, sizeof(*lstate)); status = xpt_create_path(&lstate->path, /*periph*/NULL, xpt_path_path_id(ccb->ccb_h.path), xpt_path_target_id(ccb->ccb_h.path), xpt_path_lun_id(ccb->ccb_h.path)); if (status != CAM_REQ_CMP) { free(lstate, M_DEVBUF); xpt_print_path(ccb->ccb_h.path); printf("Couldn't allocate path\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } SLIST_INIT(&lstate->accept_tios); SLIST_INIT(&lstate->immed_notifies); ahd_lock(ahd, &s); ahd_pause(ahd); if (target != CAM_TARGET_WILDCARD) { tstate->enabled_luns[lun] = lstate; ahd->enabled_luns++; if ((ahd->features & AHD_MULTI_TID) != 0) { u_int targid_mask; targid_mask = ahd_inw(ahd, TARGID); targid_mask |= target_mask; ahd_outw(ahd, TARGID, targid_mask); ahd_update_scsiid(ahd, targid_mask); } else { u_int our_id; char channel; channel = SIM_CHANNEL(ahd, sim); our_id = SIM_SCSI_ID(ahd, sim); /* * This can only happen if selections * are not enabled */ if (target != our_id) { u_int sblkctl; char cur_channel; int swap; sblkctl = ahd_inb(ahd, SBLKCTL); cur_channel = (sblkctl & SELBUSB) ? 'B' : 'A'; if ((ahd->features & AHD_TWIN) == 0) cur_channel = 'A'; swap = cur_channel != channel; ahd->our_id = target; if (swap) ahd_outb(ahd, SBLKCTL, sblkctl ^ SELBUSB); ahd_outb(ahd, SCSIID, target); if (swap) ahd_outb(ahd, SBLKCTL, sblkctl); } } } else ahd->black_hole = lstate; /* Allow select-in operations */ if (ahd->black_hole != NULL && ahd->enabled_luns > 0) { scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); scsiseq1 |= ENSELI; ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1); scsiseq1 = ahd_inb(ahd, SCSISEQ1); scsiseq1 |= ENSELI; ahd_outb(ahd, SCSISEQ1, scsiseq1); } ahd_unpause(ahd); ahd_unlock(ahd, &s); ccb->ccb_h.status = CAM_REQ_CMP; xpt_print_path(ccb->ccb_h.path); printf("Lun now enabled for target mode\n"); } else { struct scb *scb; int i, empty; if (lstate == NULL) { ccb->ccb_h.status = CAM_LUN_INVALID; return; } ahd_lock(ahd, &s); ccb->ccb_h.status = CAM_REQ_CMP; LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { struct ccb_hdr *ccbh; ccbh = &scb->io_ctx->ccb_h; if (ccbh->func_code == XPT_CONT_TARGET_IO && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ printf("CTIO pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; ahd_unlock(ahd, &s); return; } } if (SLIST_FIRST(&lstate->accept_tios) != NULL) { printf("ATIOs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { printf("INOTs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (ccb->ccb_h.status != CAM_REQ_CMP) { ahd_unlock(ahd, &s); return; } xpt_print_path(ccb->ccb_h.path); printf("Target mode disabled\n"); xpt_free_path(lstate->path); free(lstate, M_DEVBUF); ahd_pause(ahd); /* Can we clean up the target too? */ if (target != CAM_TARGET_WILDCARD) { tstate->enabled_luns[lun] = NULL; ahd->enabled_luns--; for (empty = 1, i = 0; i < 8; i++) if (tstate->enabled_luns[i] != NULL) { empty = 0; break; } if (empty) { ahd_free_tstate(ahd, target, channel, /*force*/FALSE); if (ahd->features & AHD_MULTI_TID) { u_int targid_mask; targid_mask = ahd_inw(ahd, TARGID); targid_mask &= ~target_mask; ahd_outw(ahd, TARGID, targid_mask); ahd_update_scsiid(ahd, targid_mask); } } } else { ahd->black_hole = NULL; /* * We can't allow selections without * our black hole device. */ empty = TRUE; } if (ahd->enabled_luns == 0) { /* Disallow select-in */ u_int scsiseq1; scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); scsiseq1 &= ~ENSELI; ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1); scsiseq1 = ahd_inb(ahd, SCSISEQ1); scsiseq1 &= ~ENSELI; ahd_outb(ahd, SCSISEQ1, scsiseq1); if ((ahd->features & AHD_MULTIROLE) == 0) { printf("Configuring Initiator Mode\n"); ahd->flags &= ~AHD_TARGETROLE; ahd->flags |= AHD_INITIATORROLE; ahd_pause(ahd); ahd_loadseq(ahd); ahd_restart(ahd); /* * Unpaused. The extra unpause * that follows is harmless. */ } } ahd_unpause(ahd); ahd_unlock(ahd, &s); } #endif } static void ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask) { #if NOT_YET u_int scsiid_mask; u_int scsiid; if ((ahd->features & AHD_MULTI_TID) == 0) panic("ahd_update_scsiid called on non-multitid unit\n"); /* * Since we will rely on the TARGID mask * for selection enables, ensure that OID * in SCSIID is not set to some other ID * that we don't want to allow selections on. */ if ((ahd->features & AHD_ULTRA2) != 0) scsiid = ahd_inb(ahd, SCSIID_ULTRA2); else scsiid = ahd_inb(ahd, SCSIID); scsiid_mask = 0x1 << (scsiid & OID); if ((targid_mask & scsiid_mask) == 0) { u_int our_id; /* ffs counts from 1 */ our_id = ffs(targid_mask); if (our_id == 0) our_id = ahd->our_id; else our_id--; scsiid &= TID; scsiid |= our_id; } if ((ahd->features & AHD_ULTRA2) != 0) ahd_outb(ahd, SCSIID_ULTRA2, scsiid); else ahd_outb(ahd, SCSIID, scsiid); #endif } void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused) { struct target_cmd *cmd; ahd_sync_tqinfifo(ahd, BUS_DMASYNC_POSTREAD); while ((cmd = &ahd->targetcmds[ahd->tqinfifonext])->cmd_valid != 0) { /* * Only advance through the queue if we * have the resources to process the command. */ if (ahd_handle_target_cmd(ahd, cmd) != 0) break; cmd->cmd_valid = 0; ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, ahd_targetcmd_offset(ahd, ahd->tqinfifonext), sizeof(struct target_cmd), BUS_DMASYNC_PREREAD); ahd->tqinfifonext++; /* * Lazily update our position in the target mode incoming * command queue as seen by the sequencer. */ if ((ahd->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { u_int hs_mailbox; hs_mailbox = ahd_inb(ahd, HS_MAILBOX); hs_mailbox &= ~HOST_TQINPOS; hs_mailbox |= ahd->tqinfifonext & HOST_TQINPOS; ahd_outb(ahd, HS_MAILBOX, hs_mailbox); } } } static int ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd) { struct ahd_tmode_tstate *tstate; struct ahd_tmode_lstate *lstate; struct ccb_accept_tio *atio; uint8_t *byte; int initiator; int target; int lun; initiator = SCSIID_TARGET(ahd, cmd->scsiid); target = SCSIID_OUR_ID(cmd->scsiid); lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); byte = cmd->bytes; tstate = ahd->enabled_targets[target]; lstate = NULL; if (tstate != NULL) lstate = tstate->enabled_luns[lun]; /* * Commands for disabled luns go to the black hole driver. */ if (lstate == NULL) lstate = ahd->black_hole; atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); if (atio == NULL) { ahd->flags |= AHD_TQINFIFO_BLOCKED; /* * Wait for more ATIOs from the peripheral driver for this lun. */ return (1); } else ahd->flags &= ~AHD_TQINFIFO_BLOCKED; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_TQIN) != 0) printf("Incoming command from %d for %d:%d%s\n", initiator, target, lun, lstate == ahd->black_hole ? "(Black Holed)" : ""); #endif SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); if (lstate == ahd->black_hole) { /* Fill in the wildcards */ atio->ccb_h.target_id = target; atio->ccb_h.target_lun = lun; } /* * Package it up and send it off to * whomever has this lun enabled. */ atio->sense_len = 0; atio->init_id = initiator; if (byte[0] != 0xFF) { /* Tag was included */ atio->tag_action = *byte++; atio->tag_id = *byte++; atio->ccb_h.flags = CAM_TAG_ACTION_VALID; } else { atio->ccb_h.flags = 0; } byte++; /* Okay. Now determine the cdb size based on the command code */ switch (*byte >> CMD_GROUP_CODE_SHIFT) { case 0: atio->cdb_len = 6; break; case 1: case 2: atio->cdb_len = 10; break; case 4: atio->cdb_len = 16; break; case 5: atio->cdb_len = 12; break; case 3: default: /* Only copy the opcode. */ atio->cdb_len = 1; printf("Reserved or VU command code type encountered\n"); break; } memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); atio->ccb_h.status |= CAM_CDB_RECVD; if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { /* * We weren't allowed to disconnect. * We're hanging on the bus until a * continue target I/O comes in response * to this accept tio. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_TQIN) != 0) printf("Received Immediate Command %d:%d:%d - %p\n", initiator, target, lun, ahd->pending_device); #endif ahd->pending_device = lstate; ahd_freeze_ccb((union ccb *)atio); atio->ccb_h.flags |= CAM_DIS_DISCONNECT; } xpt_done((union ccb*)atio); return (0); } #endif
gpl-2.0
x86-8/linux-3.7
net/ipv6/ip6_fib.c
44
36797
/* * Linux INET6 implementation * Forwarding Information Database * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ /* * Changes: * Yuji SEKIYA @USAGI: Support default route on router node; * remove ip6_null_entry from the top of * routing table. * Ville Nuorvala: Fixed routing subtrees. */ #define pr_fmt(fmt) "IPv6: " fmt #include <linux/errno.h> #include <linux/types.h> #include <linux/net.h> #include <linux/route.h> #include <linux/netdevice.h> #include <linux/in6.h> #include <linux/init.h> #include <linux/list.h> #include <linux/slab.h> #include <net/ipv6.h> #include <net/ndisc.h> #include <net/addrconf.h> #include <net/ip6_fib.h> #include <net/ip6_route.h> #define RT6_DEBUG 2 #if RT6_DEBUG >= 3 #define RT6_TRACE(x...) pr_debug(x) #else #define RT6_TRACE(x...) do { ; } while (0) #endif static struct kmem_cache * fib6_node_kmem __read_mostly; enum fib_walk_state_t { #ifdef CONFIG_IPV6_SUBTREES FWS_S, #endif FWS_L, FWS_R, FWS_C, FWS_U }; struct fib6_cleaner_t { struct fib6_walker_t w; struct net *net; int (*func)(struct rt6_info *, void *arg); void *arg; }; static DEFINE_RWLOCK(fib6_walker_lock); #ifdef CONFIG_IPV6_SUBTREES #define FWS_INIT FWS_S #else #define FWS_INIT FWS_L #endif static void fib6_prune_clones(struct net *net, struct fib6_node *fn, struct rt6_info *rt); static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn); static struct fib6_node *fib6_repair_tree(struct net *net, struct fib6_node *fn); static int fib6_walk(struct fib6_walker_t *w); static int fib6_walk_continue(struct fib6_walker_t *w); /* * A routing update causes an increase of the serial number on the * affected subtree. This allows for cached routes to be asynchronously * tested when modifications are made to the destination cache as a * result of redirects, path MTU changes, etc. */ static __u32 rt_sernum; static void fib6_gc_timer_cb(unsigned long arg); static LIST_HEAD(fib6_walkers); #define FOR_WALKERS(w) list_for_each_entry(w, &fib6_walkers, lh) static inline void fib6_walker_link(struct fib6_walker_t *w) { write_lock_bh(&fib6_walker_lock); list_add(&w->lh, &fib6_walkers); write_unlock_bh(&fib6_walker_lock); } static inline void fib6_walker_unlink(struct fib6_walker_t *w) { write_lock_bh(&fib6_walker_lock); list_del(&w->lh); write_unlock_bh(&fib6_walker_lock); } static __inline__ u32 fib6_new_sernum(void) { u32 n = ++rt_sernum; if ((__s32)n <= 0) rt_sernum = n = 1; return n; } /* * Auxiliary address test functions for the radix tree. * * These assume a 32bit processor (although it will work on * 64bit processors) */ /* * test bit */ #if defined(__LITTLE_ENDIAN) # define BITOP_BE32_SWIZZLE (0x1F & ~7) #else # define BITOP_BE32_SWIZZLE 0 #endif static __inline__ __be32 addr_bit_set(const void *token, int fn_bit) { const __be32 *addr = token; /* * Here, * 1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f) * is optimized version of * htonl(1 << ((~fn_bit)&0x1F)) * See include/asm-generic/bitops/le.h. */ return (__force __be32)(1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)) & addr[fn_bit >> 5]; } static __inline__ struct fib6_node * node_alloc(void) { struct fib6_node *fn; fn = kmem_cache_zalloc(fib6_node_kmem, GFP_ATOMIC); return fn; } static __inline__ void node_free(struct fib6_node * fn) { kmem_cache_free(fib6_node_kmem, fn); } static __inline__ void rt6_release(struct rt6_info *rt) { if (atomic_dec_and_test(&rt->rt6i_ref)) dst_free(&rt->dst); } static void fib6_link_table(struct net *net, struct fib6_table *tb) { unsigned int h; /* * Initialize table lock at a single place to give lockdep a key, * tables aren't visible prior to being linked to the list. */ rwlock_init(&tb->tb6_lock); h = tb->tb6_id & (FIB6_TABLE_HASHSZ - 1); /* * No protection necessary, this is the only list mutatation * operation, tables never disappear once they exist. */ hlist_add_head_rcu(&tb->tb6_hlist, &net->ipv6.fib_table_hash[h]); } #ifdef CONFIG_IPV6_MULTIPLE_TABLES static struct fib6_table *fib6_alloc_table(struct net *net, u32 id) { struct fib6_table *table; table = kzalloc(sizeof(*table), GFP_ATOMIC); if (table) { table->tb6_id = id; table->tb6_root.leaf = net->ipv6.ip6_null_entry; table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; inet_peer_base_init(&table->tb6_peers); } return table; } struct fib6_table *fib6_new_table(struct net *net, u32 id) { struct fib6_table *tb; if (id == 0) id = RT6_TABLE_MAIN; tb = fib6_get_table(net, id); if (tb) return tb; tb = fib6_alloc_table(net, id); if (tb) fib6_link_table(net, tb); return tb; } struct fib6_table *fib6_get_table(struct net *net, u32 id) { struct fib6_table *tb; struct hlist_head *head; struct hlist_node *node; unsigned int h; if (id == 0) id = RT6_TABLE_MAIN; h = id & (FIB6_TABLE_HASHSZ - 1); rcu_read_lock(); head = &net->ipv6.fib_table_hash[h]; hlist_for_each_entry_rcu(tb, node, head, tb6_hlist) { if (tb->tb6_id == id) { rcu_read_unlock(); return tb; } } rcu_read_unlock(); return NULL; } static void __net_init fib6_tables_init(struct net *net) { fib6_link_table(net, net->ipv6.fib6_main_tbl); fib6_link_table(net, net->ipv6.fib6_local_tbl); } #else struct fib6_table *fib6_new_table(struct net *net, u32 id) { return fib6_get_table(net, id); } struct fib6_table *fib6_get_table(struct net *net, u32 id) { return net->ipv6.fib6_main_tbl; } struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, int flags, pol_lookup_t lookup) { return (struct dst_entry *) lookup(net, net->ipv6.fib6_main_tbl, fl6, flags); } static void __net_init fib6_tables_init(struct net *net) { fib6_link_table(net, net->ipv6.fib6_main_tbl); } #endif static int fib6_dump_node(struct fib6_walker_t *w) { int res; struct rt6_info *rt; for (rt = w->leaf; rt; rt = rt->dst.rt6_next) { res = rt6_dump_route(rt, w->args); if (res < 0) { /* Frame is full, suspend walking */ w->leaf = rt; return 1; } WARN_ON(res == 0); } w->leaf = NULL; return 0; } static void fib6_dump_end(struct netlink_callback *cb) { struct fib6_walker_t *w = (void*)cb->args[2]; if (w) { if (cb->args[4]) { cb->args[4] = 0; fib6_walker_unlink(w); } cb->args[2] = 0; kfree(w); } cb->done = (void*)cb->args[3]; cb->args[1] = 3; } static int fib6_dump_done(struct netlink_callback *cb) { fib6_dump_end(cb); return cb->done ? cb->done(cb) : 0; } static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb, struct netlink_callback *cb) { struct fib6_walker_t *w; int res; w = (void *)cb->args[2]; w->root = &table->tb6_root; if (cb->args[4] == 0) { w->count = 0; w->skip = 0; read_lock_bh(&table->tb6_lock); res = fib6_walk(w); read_unlock_bh(&table->tb6_lock); if (res > 0) { cb->args[4] = 1; cb->args[5] = w->root->fn_sernum; } } else { if (cb->args[5] != w->root->fn_sernum) { /* Begin at the root if the tree changed */ cb->args[5] = w->root->fn_sernum; w->state = FWS_INIT; w->node = w->root; w->skip = w->count; } else w->skip = 0; read_lock_bh(&table->tb6_lock); res = fib6_walk_continue(w); read_unlock_bh(&table->tb6_lock); if (res <= 0) { fib6_walker_unlink(w); cb->args[4] = 0; } } return res; } static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); unsigned int h, s_h; unsigned int e = 0, s_e; struct rt6_rtnl_dump_arg arg; struct fib6_walker_t *w; struct fib6_table *tb; struct hlist_node *node; struct hlist_head *head; int res = 0; s_h = cb->args[0]; s_e = cb->args[1]; w = (void *)cb->args[2]; if (!w) { /* New dump: * * 1. hook callback destructor. */ cb->args[3] = (long)cb->done; cb->done = fib6_dump_done; /* * 2. allocate and initialize walker. */ w = kzalloc(sizeof(*w), GFP_ATOMIC); if (!w) return -ENOMEM; w->func = fib6_dump_node; cb->args[2] = (long)w; } arg.skb = skb; arg.cb = cb; arg.net = net; w->args = &arg; rcu_read_lock(); for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) { e = 0; head = &net->ipv6.fib_table_hash[h]; hlist_for_each_entry_rcu(tb, node, head, tb6_hlist) { if (e < s_e) goto next; res = fib6_dump_table(tb, skb, cb); if (res != 0) goto out; next: e++; } } out: rcu_read_unlock(); cb->args[1] = e; cb->args[0] = h; res = res < 0 ? res : skb->len; if (res <= 0) fib6_dump_end(cb); return res; } /* * Routing Table * * return the appropriate node for a routing tree "add" operation * by either creating and inserting or by returning an existing * node. */ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr, int addrlen, int plen, int offset, int allow_create, int replace_required) { struct fib6_node *fn, *in, *ln; struct fib6_node *pn = NULL; struct rt6key *key; int bit; __be32 dir = 0; __u32 sernum = fib6_new_sernum(); RT6_TRACE("fib6_add_1\n"); /* insert node in tree */ fn = root; do { key = (struct rt6key *)((u8 *)fn->leaf + offset); /* * Prefix match */ if (plen < fn->fn_bit || !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) { if (!allow_create) { if (replace_required) { pr_warn("Can't replace route, no match found\n"); return ERR_PTR(-ENOENT); } pr_warn("NLM_F_CREATE should be set when creating new route\n"); } goto insert_above; } /* * Exact match ? */ if (plen == fn->fn_bit) { /* clean up an intermediate node */ if (!(fn->fn_flags & RTN_RTINFO)) { rt6_release(fn->leaf); fn->leaf = NULL; } fn->fn_sernum = sernum; return fn; } /* * We have more bits to go */ /* Try to walk down on tree. */ fn->fn_sernum = sernum; dir = addr_bit_set(addr, fn->fn_bit); pn = fn; fn = dir ? fn->right: fn->left; } while (fn); if (!allow_create) { /* We should not create new node because * NLM_F_REPLACE was specified without NLM_F_CREATE * I assume it is safe to require NLM_F_CREATE when * REPLACE flag is used! Later we may want to remove the * check for replace_required, because according * to netlink specification, NLM_F_CREATE * MUST be specified if new route is created. * That would keep IPv6 consistent with IPv4 */ if (replace_required) { pr_warn("Can't replace route, no match found\n"); return ERR_PTR(-ENOENT); } pr_warn("NLM_F_CREATE should be set when creating new route\n"); } /* * We walked to the bottom of tree. * Create new leaf node without children. */ ln = node_alloc(); if (!ln) return ERR_PTR(-ENOMEM); ln->fn_bit = plen; ln->parent = pn; ln->fn_sernum = sernum; if (dir) pn->right = ln; else pn->left = ln; return ln; insert_above: /* * split since we don't have a common prefix anymore or * we have a less significant route. * we've to insert an intermediate node on the list * this new node will point to the one we need to create * and the current */ pn = fn->parent; /* find 1st bit in difference between the 2 addrs. See comment in __ipv6_addr_diff: bit may be an invalid value, but if it is >= plen, the value is ignored in any case. */ bit = __ipv6_addr_diff(addr, &key->addr, addrlen); /* * (intermediate)[in] * / \ * (new leaf node)[ln] (old node)[fn] */ if (plen > bit) { in = node_alloc(); ln = node_alloc(); if (!in || !ln) { if (in) node_free(in); if (ln) node_free(ln); return ERR_PTR(-ENOMEM); } /* * new intermediate node. * RTN_RTINFO will * be off since that an address that chooses one of * the branches would not match less specific routes * in the other branch */ in->fn_bit = bit; in->parent = pn; in->leaf = fn->leaf; atomic_inc(&in->leaf->rt6i_ref); in->fn_sernum = sernum; /* update parent pointer */ if (dir) pn->right = in; else pn->left = in; ln->fn_bit = plen; ln->parent = in; fn->parent = in; ln->fn_sernum = sernum; if (addr_bit_set(addr, bit)) { in->right = ln; in->left = fn; } else { in->left = ln; in->right = fn; } } else { /* plen <= bit */ /* * (new leaf node)[ln] * / \ * (old node)[fn] NULL */ ln = node_alloc(); if (!ln) return ERR_PTR(-ENOMEM); ln->fn_bit = plen; ln->parent = pn; ln->fn_sernum = sernum; if (dir) pn->right = ln; else pn->left = ln; if (addr_bit_set(&key->addr, plen)) ln->right = fn; else ln->left = fn; fn->parent = ln; } return ln; } /* * Insert routing information in a node. */ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, struct nl_info *info) { struct rt6_info *iter = NULL; struct rt6_info **ins; int replace = (info->nlh && (info->nlh->nlmsg_flags & NLM_F_REPLACE)); int add = (!info->nlh || (info->nlh->nlmsg_flags & NLM_F_CREATE)); int found = 0; ins = &fn->leaf; for (iter = fn->leaf; iter; iter = iter->dst.rt6_next) { /* * Search for duplicates */ if (iter->rt6i_metric == rt->rt6i_metric) { /* * Same priority level */ if (info->nlh && (info->nlh->nlmsg_flags & NLM_F_EXCL)) return -EEXIST; if (replace) { found++; break; } if (iter->dst.dev == rt->dst.dev && iter->rt6i_idev == rt->rt6i_idev && ipv6_addr_equal(&iter->rt6i_gateway, &rt->rt6i_gateway)) { if (!(iter->rt6i_flags & RTF_EXPIRES)) return -EEXIST; if (!(rt->rt6i_flags & RTF_EXPIRES)) rt6_clean_expires(iter); else rt6_set_expires(iter, rt->dst.expires); return -EEXIST; } } if (iter->rt6i_metric > rt->rt6i_metric) break; ins = &iter->dst.rt6_next; } /* Reset round-robin state, if necessary */ if (ins == &fn->leaf) fn->rr_ptr = NULL; /* * insert node */ if (!replace) { if (!add) pr_warn("NLM_F_CREATE should be set when creating new route\n"); add: rt->dst.rt6_next = iter; *ins = rt; rt->rt6i_node = fn; atomic_inc(&rt->rt6i_ref); inet6_rt_notify(RTM_NEWROUTE, rt, info); info->nl_net->ipv6.rt6_stats->fib_rt_entries++; if (!(fn->fn_flags & RTN_RTINFO)) { info->nl_net->ipv6.rt6_stats->fib_route_nodes++; fn->fn_flags |= RTN_RTINFO; } } else { if (!found) { if (add) goto add; pr_warn("NLM_F_REPLACE set, but no existing node found!\n"); return -ENOENT; } *ins = rt; rt->rt6i_node = fn; rt->dst.rt6_next = iter->dst.rt6_next; atomic_inc(&rt->rt6i_ref); inet6_rt_notify(RTM_NEWROUTE, rt, info); rt6_release(iter); if (!(fn->fn_flags & RTN_RTINFO)) { info->nl_net->ipv6.rt6_stats->fib_route_nodes++; fn->fn_flags |= RTN_RTINFO; } } return 0; } static __inline__ void fib6_start_gc(struct net *net, struct rt6_info *rt) { if (!timer_pending(&net->ipv6.ip6_fib_timer) && (rt->rt6i_flags & (RTF_EXPIRES | RTF_CACHE))) mod_timer(&net->ipv6.ip6_fib_timer, jiffies + net->ipv6.sysctl.ip6_rt_gc_interval); } void fib6_force_start_gc(struct net *net) { if (!timer_pending(&net->ipv6.ip6_fib_timer)) mod_timer(&net->ipv6.ip6_fib_timer, jiffies + net->ipv6.sysctl.ip6_rt_gc_interval); } /* * Add routing information to the routing tree. * <destination addr>/<source addr> * with source addr info in sub-trees */ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) { struct fib6_node *fn, *pn = NULL; int err = -ENOMEM; int allow_create = 1; int replace_required = 0; if (info->nlh) { if (!(info->nlh->nlmsg_flags & NLM_F_CREATE)) allow_create = 0; if (info->nlh->nlmsg_flags & NLM_F_REPLACE) replace_required = 1; } if (!allow_create && !replace_required) pr_warn("RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n"); fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr), rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst), allow_create, replace_required); if (IS_ERR(fn)) { err = PTR_ERR(fn); goto out; } pn = fn; #ifdef CONFIG_IPV6_SUBTREES if (rt->rt6i_src.plen) { struct fib6_node *sn; if (!fn->subtree) { struct fib6_node *sfn; /* * Create subtree. * * fn[main tree] * | * sfn[subtree root] * \ * sn[new leaf node] */ /* Create subtree root node */ sfn = node_alloc(); if (!sfn) goto st_failure; sfn->leaf = info->nl_net->ipv6.ip6_null_entry; atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref); sfn->fn_flags = RTN_ROOT; sfn->fn_sernum = fib6_new_sernum(); /* Now add the first leaf node to new subtree */ sn = fib6_add_1(sfn, &rt->rt6i_src.addr, sizeof(struct in6_addr), rt->rt6i_src.plen, offsetof(struct rt6_info, rt6i_src), allow_create, replace_required); if (IS_ERR(sn)) { /* If it is failed, discard just allocated root, and then (in st_failure) stale node in main tree. */ node_free(sfn); err = PTR_ERR(sn); goto st_failure; } /* Now link new subtree to main tree */ sfn->parent = fn; fn->subtree = sfn; } else { sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr, sizeof(struct in6_addr), rt->rt6i_src.plen, offsetof(struct rt6_info, rt6i_src), allow_create, replace_required); if (IS_ERR(sn)) { err = PTR_ERR(sn); goto st_failure; } } if (!fn->leaf) { fn->leaf = rt; atomic_inc(&rt->rt6i_ref); } fn = sn; } #endif err = fib6_add_rt2node(fn, rt, info); if (!err) { fib6_start_gc(info->nl_net, rt); if (!(rt->rt6i_flags & RTF_CACHE)) fib6_prune_clones(info->nl_net, pn, rt); } out: if (err) { #ifdef CONFIG_IPV6_SUBTREES /* * If fib6_add_1 has cleared the old leaf pointer in the * super-tree leaf node we have to find a new one for it. */ if (pn != fn && pn->leaf == rt) { pn->leaf = NULL; atomic_dec(&rt->rt6i_ref); } if (pn != fn && !pn->leaf && !(pn->fn_flags & RTN_RTINFO)) { pn->leaf = fib6_find_prefix(info->nl_net, pn); #if RT6_DEBUG >= 2 if (!pn->leaf) { WARN_ON(pn->leaf == NULL); pn->leaf = info->nl_net->ipv6.ip6_null_entry; } #endif atomic_inc(&pn->leaf->rt6i_ref); } #endif dst_free(&rt->dst); } return err; #ifdef CONFIG_IPV6_SUBTREES /* Subtree creation failed, probably main tree node is orphan. If it is, shoot it. */ st_failure: if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) fib6_repair_tree(info->nl_net, fn); dst_free(&rt->dst); return err; #endif } /* * Routing tree lookup * */ struct lookup_args { int offset; /* key offset on rt6_info */ const struct in6_addr *addr; /* search key */ }; static struct fib6_node * fib6_lookup_1(struct fib6_node *root, struct lookup_args *args) { struct fib6_node *fn; __be32 dir; if (unlikely(args->offset == 0)) return NULL; /* * Descend on a tree */ fn = root; for (;;) { struct fib6_node *next; dir = addr_bit_set(args->addr, fn->fn_bit); next = dir ? fn->right : fn->left; if (next) { fn = next; continue; } break; } while (fn) { if (FIB6_SUBTREE(fn) || fn->fn_flags & RTN_RTINFO) { struct rt6key *key; key = (struct rt6key *) ((u8 *) fn->leaf + args->offset); if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) { #ifdef CONFIG_IPV6_SUBTREES if (fn->subtree) fn = fib6_lookup_1(fn->subtree, args + 1); #endif if (!fn || fn->fn_flags & RTN_RTINFO) return fn; } } if (fn->fn_flags & RTN_ROOT) break; fn = fn->parent; } return NULL; } struct fib6_node * fib6_lookup(struct fib6_node *root, const struct in6_addr *daddr, const struct in6_addr *saddr) { struct fib6_node *fn; struct lookup_args args[] = { { .offset = offsetof(struct rt6_info, rt6i_dst), .addr = daddr, }, #ifdef CONFIG_IPV6_SUBTREES { .offset = offsetof(struct rt6_info, rt6i_src), .addr = saddr, }, #endif { .offset = 0, /* sentinel */ } }; fn = fib6_lookup_1(root, daddr ? args : args + 1); if (!fn || fn->fn_flags & RTN_TL_ROOT) fn = root; return fn; } /* * Get node with specified destination prefix (and source prefix, * if subtrees are used) */ static struct fib6_node * fib6_locate_1(struct fib6_node *root, const struct in6_addr *addr, int plen, int offset) { struct fib6_node *fn; for (fn = root; fn ; ) { struct rt6key *key = (struct rt6key *)((u8 *)fn->leaf + offset); /* * Prefix match */ if (plen < fn->fn_bit || !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) return NULL; if (plen == fn->fn_bit) return fn; /* * We have more bits to go */ if (addr_bit_set(addr, fn->fn_bit)) fn = fn->right; else fn = fn->left; } return NULL; } struct fib6_node * fib6_locate(struct fib6_node *root, const struct in6_addr *daddr, int dst_len, const struct in6_addr *saddr, int src_len) { struct fib6_node *fn; fn = fib6_locate_1(root, daddr, dst_len, offsetof(struct rt6_info, rt6i_dst)); #ifdef CONFIG_IPV6_SUBTREES if (src_len) { WARN_ON(saddr == NULL); if (fn && fn->subtree) fn = fib6_locate_1(fn->subtree, saddr, src_len, offsetof(struct rt6_info, rt6i_src)); } #endif if (fn && fn->fn_flags & RTN_RTINFO) return fn; return NULL; } /* * Deletion * */ static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn) { if (fn->fn_flags & RTN_ROOT) return net->ipv6.ip6_null_entry; while (fn) { if (fn->left) return fn->left->leaf; if (fn->right) return fn->right->leaf; fn = FIB6_SUBTREE(fn); } return NULL; } /* * Called to trim the tree of intermediate nodes when possible. "fn" * is the node we want to try and remove. */ static struct fib6_node *fib6_repair_tree(struct net *net, struct fib6_node *fn) { int children; int nstate; struct fib6_node *child, *pn; struct fib6_walker_t *w; int iter = 0; for (;;) { RT6_TRACE("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter); iter++; WARN_ON(fn->fn_flags & RTN_RTINFO); WARN_ON(fn->fn_flags & RTN_TL_ROOT); WARN_ON(fn->leaf != NULL); children = 0; child = NULL; if (fn->right) child = fn->right, children |= 1; if (fn->left) child = fn->left, children |= 2; if (children == 3 || FIB6_SUBTREE(fn) #ifdef CONFIG_IPV6_SUBTREES /* Subtree root (i.e. fn) may have one child */ || (children && fn->fn_flags & RTN_ROOT) #endif ) { fn->leaf = fib6_find_prefix(net, fn); #if RT6_DEBUG >= 2 if (!fn->leaf) { WARN_ON(!fn->leaf); fn->leaf = net->ipv6.ip6_null_entry; } #endif atomic_inc(&fn->leaf->rt6i_ref); return fn->parent; } pn = fn->parent; #ifdef CONFIG_IPV6_SUBTREES if (FIB6_SUBTREE(pn) == fn) { WARN_ON(!(fn->fn_flags & RTN_ROOT)); FIB6_SUBTREE(pn) = NULL; nstate = FWS_L; } else { WARN_ON(fn->fn_flags & RTN_ROOT); #endif if (pn->right == fn) pn->right = child; else if (pn->left == fn) pn->left = child; #if RT6_DEBUG >= 2 else WARN_ON(1); #endif if (child) child->parent = pn; nstate = FWS_R; #ifdef CONFIG_IPV6_SUBTREES } #endif read_lock(&fib6_walker_lock); FOR_WALKERS(w) { if (!child) { if (w->root == fn) { w->root = w->node = NULL; RT6_TRACE("W %p adjusted by delroot 1\n", w); } else if (w->node == fn) { RT6_TRACE("W %p adjusted by delnode 1, s=%d/%d\n", w, w->state, nstate); w->node = pn; w->state = nstate; } } else { if (w->root == fn) { w->root = child; RT6_TRACE("W %p adjusted by delroot 2\n", w); } if (w->node == fn) { w->node = child; if (children&2) { RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state); w->state = w->state>=FWS_R ? FWS_U : FWS_INIT; } else { RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state); w->state = w->state>=FWS_C ? FWS_U : FWS_INIT; } } } } read_unlock(&fib6_walker_lock); node_free(fn); if (pn->fn_flags & RTN_RTINFO || FIB6_SUBTREE(pn)) return pn; rt6_release(pn->leaf); pn->leaf = NULL; fn = pn; } } static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, struct nl_info *info) { struct fib6_walker_t *w; struct rt6_info *rt = *rtp; struct net *net = info->nl_net; RT6_TRACE("fib6_del_route\n"); /* Unlink it */ *rtp = rt->dst.rt6_next; rt->rt6i_node = NULL; net->ipv6.rt6_stats->fib_rt_entries--; net->ipv6.rt6_stats->fib_discarded_routes++; /* Reset round-robin state, if necessary */ if (fn->rr_ptr == rt) fn->rr_ptr = NULL; /* Adjust walkers */ read_lock(&fib6_walker_lock); FOR_WALKERS(w) { if (w->state == FWS_C && w->leaf == rt) { RT6_TRACE("walker %p adjusted by delroute\n", w); w->leaf = rt->dst.rt6_next; if (!w->leaf) w->state = FWS_U; } } read_unlock(&fib6_walker_lock); rt->dst.rt6_next = NULL; /* If it was last route, expunge its radix tree node */ if (!fn->leaf) { fn->fn_flags &= ~RTN_RTINFO; net->ipv6.rt6_stats->fib_route_nodes--; fn = fib6_repair_tree(net, fn); } if (atomic_read(&rt->rt6i_ref) != 1) { /* This route is used as dummy address holder in some split * nodes. It is not leaked, but it still holds other resources, * which must be released in time. So, scan ascendant nodes * and replace dummy references to this route with references * to still alive ones. */ while (fn) { if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) { fn->leaf = fib6_find_prefix(net, fn); atomic_inc(&fn->leaf->rt6i_ref); rt6_release(rt); } fn = fn->parent; } /* No more references are possible at this point. */ BUG_ON(atomic_read(&rt->rt6i_ref) != 1); } inet6_rt_notify(RTM_DELROUTE, rt, info); rt6_release(rt); } int fib6_del(struct rt6_info *rt, struct nl_info *info) { struct net *net = info->nl_net; struct fib6_node *fn = rt->rt6i_node; struct rt6_info **rtp; #if RT6_DEBUG >= 2 if (rt->dst.obsolete>0) { WARN_ON(fn != NULL); return -ENOENT; } #endif if (!fn || rt == net->ipv6.ip6_null_entry) return -ENOENT; WARN_ON(!(fn->fn_flags & RTN_RTINFO)); if (!(rt->rt6i_flags & RTF_CACHE)) { struct fib6_node *pn = fn; #ifdef CONFIG_IPV6_SUBTREES /* clones of this route might be in another subtree */ if (rt->rt6i_src.plen) { while (!(pn->fn_flags & RTN_ROOT)) pn = pn->parent; pn = pn->parent; } #endif fib6_prune_clones(info->nl_net, pn, rt); } /* * Walk the leaf entries looking for ourself */ for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->dst.rt6_next) { if (*rtp == rt) { fib6_del_route(fn, rtp, info); return 0; } } return -ENOENT; } /* * Tree traversal function. * * Certainly, it is not interrupt safe. * However, it is internally reenterable wrt itself and fib6_add/fib6_del. * It means, that we can modify tree during walking * and use this function for garbage collection, clone pruning, * cleaning tree when a device goes down etc. etc. * * It guarantees that every node will be traversed, * and that it will be traversed only once. * * Callback function w->func may return: * 0 -> continue walking. * positive value -> walking is suspended (used by tree dumps, * and probably by gc, if it will be split to several slices) * negative value -> terminate walking. * * The function itself returns: * 0 -> walk is complete. * >0 -> walk is incomplete (i.e. suspended) * <0 -> walk is terminated by an error. */ static int fib6_walk_continue(struct fib6_walker_t *w) { struct fib6_node *fn, *pn; for (;;) { fn = w->node; if (!fn) return 0; if (w->prune && fn != w->root && fn->fn_flags & RTN_RTINFO && w->state < FWS_C) { w->state = FWS_C; w->leaf = fn->leaf; } switch (w->state) { #ifdef CONFIG_IPV6_SUBTREES case FWS_S: if (FIB6_SUBTREE(fn)) { w->node = FIB6_SUBTREE(fn); continue; } w->state = FWS_L; #endif case FWS_L: if (fn->left) { w->node = fn->left; w->state = FWS_INIT; continue; } w->state = FWS_R; case FWS_R: if (fn->right) { w->node = fn->right; w->state = FWS_INIT; continue; } w->state = FWS_C; w->leaf = fn->leaf; case FWS_C: if (w->leaf && fn->fn_flags & RTN_RTINFO) { int err; if (w->skip) { w->skip--; continue; } err = w->func(w); if (err) return err; w->count++; continue; } w->state = FWS_U; case FWS_U: if (fn == w->root) return 0; pn = fn->parent; w->node = pn; #ifdef CONFIG_IPV6_SUBTREES if (FIB6_SUBTREE(pn) == fn) { WARN_ON(!(fn->fn_flags & RTN_ROOT)); w->state = FWS_L; continue; } #endif if (pn->left == fn) { w->state = FWS_R; continue; } if (pn->right == fn) { w->state = FWS_C; w->leaf = w->node->leaf; continue; } #if RT6_DEBUG >= 2 WARN_ON(1); #endif } } } static int fib6_walk(struct fib6_walker_t *w) { int res; w->state = FWS_INIT; w->node = w->root; fib6_walker_link(w); res = fib6_walk_continue(w); if (res <= 0) fib6_walker_unlink(w); return res; } static int fib6_clean_node(struct fib6_walker_t *w) { int res; struct rt6_info *rt; struct fib6_cleaner_t *c = container_of(w, struct fib6_cleaner_t, w); struct nl_info info = { .nl_net = c->net, }; for (rt = w->leaf; rt; rt = rt->dst.rt6_next) { res = c->func(rt, c->arg); if (res < 0) { w->leaf = rt; res = fib6_del(rt, &info); if (res) { #if RT6_DEBUG >= 2 pr_debug("%s: del failed: rt=%p@%p err=%d\n", __func__, rt, rt->rt6i_node, res); #endif continue; } return 0; } WARN_ON(res != 0); } w->leaf = rt; return 0; } /* * Convenient frontend to tree walker. * * func is called on each route. * It may return -1 -> delete this route. * 0 -> continue walking * * prune==1 -> only immediate children of node (certainly, * ignoring pure split nodes) will be scanned. */ static void fib6_clean_tree(struct net *net, struct fib6_node *root, int (*func)(struct rt6_info *, void *arg), int prune, void *arg) { struct fib6_cleaner_t c; c.w.root = root; c.w.func = fib6_clean_node; c.w.prune = prune; c.w.count = 0; c.w.skip = 0; c.func = func; c.arg = arg; c.net = net; fib6_walk(&c.w); } void fib6_clean_all_ro(struct net *net, int (*func)(struct rt6_info *, void *arg), int prune, void *arg) { struct fib6_table *table; struct hlist_node *node; struct hlist_head *head; unsigned int h; rcu_read_lock(); for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { head = &net->ipv6.fib_table_hash[h]; hlist_for_each_entry_rcu(table, node, head, tb6_hlist) { read_lock_bh(&table->tb6_lock); fib6_clean_tree(net, &table->tb6_root, func, prune, arg); read_unlock_bh(&table->tb6_lock); } } rcu_read_unlock(); } void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg), int prune, void *arg) { struct fib6_table *table; struct hlist_node *node; struct hlist_head *head; unsigned int h; rcu_read_lock(); for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { head = &net->ipv6.fib_table_hash[h]; hlist_for_each_entry_rcu(table, node, head, tb6_hlist) { write_lock_bh(&table->tb6_lock); fib6_clean_tree(net, &table->tb6_root, func, prune, arg); write_unlock_bh(&table->tb6_lock); } } rcu_read_unlock(); } static int fib6_prune_clone(struct rt6_info *rt, void *arg) { if (rt->rt6i_flags & RTF_CACHE) { RT6_TRACE("pruning clone %p\n", rt); return -1; } return 0; } static void fib6_prune_clones(struct net *net, struct fib6_node *fn, struct rt6_info *rt) { fib6_clean_tree(net, fn, fib6_prune_clone, 1, rt); } /* * Garbage collection */ static struct fib6_gc_args { int timeout; int more; } gc_args; static int fib6_age(struct rt6_info *rt, void *arg) { unsigned long now = jiffies; /* * check addrconf expiration here. * Routes are expired even if they are in use. * * Also age clones. Note, that clones are aged out * only if they are not in use now. */ if (rt->rt6i_flags & RTF_EXPIRES && rt->dst.expires) { if (time_after(now, rt->dst.expires)) { RT6_TRACE("expiring %p\n", rt); return -1; } gc_args.more++; } else if (rt->rt6i_flags & RTF_CACHE) { if (atomic_read(&rt->dst.__refcnt) == 0 && time_after_eq(now, rt->dst.lastuse + gc_args.timeout)) { RT6_TRACE("aging clone %p\n", rt); return -1; } else if (rt->rt6i_flags & RTF_GATEWAY) { struct neighbour *neigh; __u8 neigh_flags = 0; neigh = dst_neigh_lookup(&rt->dst, &rt->rt6i_gateway); if (neigh) { neigh_flags = neigh->flags; neigh_release(neigh); } if (!(neigh_flags & NTF_ROUTER)) { RT6_TRACE("purging route %p via non-router but gateway\n", rt); return -1; } } gc_args.more++; } return 0; } static DEFINE_SPINLOCK(fib6_gc_lock); void fib6_run_gc(unsigned long expires, struct net *net) { if (expires != ~0UL) { spin_lock_bh(&fib6_gc_lock); gc_args.timeout = expires ? (int)expires : net->ipv6.sysctl.ip6_rt_gc_interval; } else { if (!spin_trylock_bh(&fib6_gc_lock)) { mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ); return; } gc_args.timeout = net->ipv6.sysctl.ip6_rt_gc_interval; } gc_args.more = icmp6_dst_gc(); fib6_clean_all(net, fib6_age, 0, NULL); if (gc_args.more) mod_timer(&net->ipv6.ip6_fib_timer, round_jiffies(jiffies + net->ipv6.sysctl.ip6_rt_gc_interval)); else del_timer(&net->ipv6.ip6_fib_timer); spin_unlock_bh(&fib6_gc_lock); } static void fib6_gc_timer_cb(unsigned long arg) { fib6_run_gc(0, (struct net *)arg); } static int __net_init fib6_net_init(struct net *net) { size_t size = sizeof(struct hlist_head) * FIB6_TABLE_HASHSZ; setup_timer(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, (unsigned long)net); net->ipv6.rt6_stats = kzalloc(sizeof(*net->ipv6.rt6_stats), GFP_KERNEL); if (!net->ipv6.rt6_stats) goto out_timer; /* Avoid false sharing : Use at least a full cache line */ size = max_t(size_t, size, L1_CACHE_BYTES); net->ipv6.fib_table_hash = kzalloc(size, GFP_KERNEL); if (!net->ipv6.fib_table_hash) goto out_rt6_stats; net->ipv6.fib6_main_tbl = kzalloc(sizeof(*net->ipv6.fib6_main_tbl), GFP_KERNEL); if (!net->ipv6.fib6_main_tbl) goto out_fib_table_hash; net->ipv6.fib6_main_tbl->tb6_id = RT6_TABLE_MAIN; net->ipv6.fib6_main_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry; net->ipv6.fib6_main_tbl->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; inet_peer_base_init(&net->ipv6.fib6_main_tbl->tb6_peers); #ifdef CONFIG_IPV6_MULTIPLE_TABLES net->ipv6.fib6_local_tbl = kzalloc(sizeof(*net->ipv6.fib6_local_tbl), GFP_KERNEL); if (!net->ipv6.fib6_local_tbl) goto out_fib6_main_tbl; net->ipv6.fib6_local_tbl->tb6_id = RT6_TABLE_LOCAL; net->ipv6.fib6_local_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry; net->ipv6.fib6_local_tbl->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; inet_peer_base_init(&net->ipv6.fib6_local_tbl->tb6_peers); #endif fib6_tables_init(net); return 0; #ifdef CONFIG_IPV6_MULTIPLE_TABLES out_fib6_main_tbl: kfree(net->ipv6.fib6_main_tbl); #endif out_fib_table_hash: kfree(net->ipv6.fib_table_hash); out_rt6_stats: kfree(net->ipv6.rt6_stats); out_timer: return -ENOMEM; } static void fib6_net_exit(struct net *net) { rt6_ifdown(net, NULL); del_timer_sync(&net->ipv6.ip6_fib_timer); #ifdef CONFIG_IPV6_MULTIPLE_TABLES inetpeer_invalidate_tree(&net->ipv6.fib6_local_tbl->tb6_peers); kfree(net->ipv6.fib6_local_tbl); #endif inetpeer_invalidate_tree(&net->ipv6.fib6_main_tbl->tb6_peers); kfree(net->ipv6.fib6_main_tbl); kfree(net->ipv6.fib_table_hash); kfree(net->ipv6.rt6_stats); } static struct pernet_operations fib6_net_ops = { .init = fib6_net_init, .exit = fib6_net_exit, }; int __init fib6_init(void) { int ret = -ENOMEM; fib6_node_kmem = kmem_cache_create("fib6_nodes", sizeof(struct fib6_node), 0, SLAB_HWCACHE_ALIGN, NULL); if (!fib6_node_kmem) goto out; ret = register_pernet_subsys(&fib6_net_ops); if (ret) goto out_kmem_cache_create; ret = __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib, NULL); if (ret) goto out_unregister_subsys; out: return ret; out_unregister_subsys: unregister_pernet_subsys(&fib6_net_ops); out_kmem_cache_create: kmem_cache_destroy(fib6_node_kmem); goto out; } void fib6_gc_cleanup(void) { unregister_pernet_subsys(&fib6_net_ops); kmem_cache_destroy(fib6_node_kmem); }
gpl-2.0
deadman96385/android_kernel_leeco_msm8996
sound/usb/quirks.c
44
38372
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/audio.h> #include <linux/usb/midi.h> #include <sound/control.h> #include <sound/core.h> #include <sound/info.h> #include <sound/pcm.h> #include "usbaudio.h" #include "card.h" #include "mixer.h" #include "mixer_quirks.h" #include "midi.h" #include "quirks.h" #include "helper.h" #include "endpoint.h" #include "pcm.h" #include "clock.h" #include "stream.h" /* * handle the quirks for the contained interfaces */ static int create_composite_quirk(struct snd_usb_audio *chip, struct usb_interface *iface, struct usb_driver *driver, const struct snd_usb_audio_quirk *quirk) { int probed_ifnum = get_iface_desc(iface->altsetting)->bInterfaceNumber; int err; for (quirk = quirk->data; quirk->ifnum >= 0; ++quirk) { iface = usb_ifnum_to_if(chip->dev, quirk->ifnum); if (!iface) continue; if (quirk->ifnum != probed_ifnum && usb_interface_claimed(iface)) continue; err = snd_usb_create_quirk(chip, iface, driver, quirk); if (err < 0) return err; if (quirk->ifnum != probed_ifnum) usb_driver_claim_interface(driver, iface, (void *)-1L); } return 0; } static int ignore_interface_quirk(struct snd_usb_audio *chip, struct usb_interface *iface, struct usb_driver *driver, const struct snd_usb_audio_quirk *quirk) { return 0; } /* * Allow alignment on audio sub-slot (channel samples) rather than * on audio slots (audio frames) */ static int create_align_transfer_quirk(struct snd_usb_audio *chip, struct usb_interface *iface, struct usb_driver *driver, const struct snd_usb_audio_quirk *quirk) { chip->txfr_quirk = 1; return 1; /* Continue with creating streams and mixer */ } static int create_any_midi_quirk(struct snd_usb_audio *chip, struct usb_interface *intf, struct usb_driver *driver, const struct snd_usb_audio_quirk *quirk) { return snd_usbmidi_create(chip->card, intf, &chip->midi_list, quirk); } /* * create a stream for an interface with proper descriptors */ static int create_standard_audio_quirk(struct snd_usb_audio *chip, struct usb_interface *iface, struct usb_driver *driver, const struct snd_usb_audio_quirk *quirk) { struct usb_host_interface *alts; struct usb_interface_descriptor *altsd; int err; alts = &iface->altsetting[0]; altsd = get_iface_desc(alts); err = snd_usb_parse_audio_interface(chip, altsd->bInterfaceNumber); if (err < 0) { usb_audio_err(chip, "cannot setup if %d: error %d\n", altsd->bInterfaceNumber, err); return err; } /* reset the current interface */ usb_set_interface(chip->dev, altsd->bInterfaceNumber, 0); return 0; } /* * create a stream for an endpoint/altsetting without proper descriptors */ static int create_fixed_stream_quirk(struct snd_usb_audio *chip, struct usb_interface *iface, struct usb_driver *driver, const struct snd_usb_audio_quirk *quirk) { struct audioformat *fp; struct usb_host_interface *alts; struct usb_interface_descriptor *altsd; int stream, err; unsigned *rate_table = NULL; fp = kmemdup(quirk->data, sizeof(*fp), GFP_KERNEL); if (!fp) { usb_audio_err(chip, "cannot memdup\n"); return -ENOMEM; } if (fp->nr_rates > MAX_NR_RATES) { kfree(fp); return -EINVAL; } if (fp->nr_rates > 0) { rate_table = kmemdup(fp->rate_table, sizeof(int) * fp->nr_rates, GFP_KERNEL); if (!rate_table) { kfree(fp); return -ENOMEM; } fp->rate_table = rate_table; } stream = (fp->endpoint & USB_DIR_IN) ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK; err = snd_usb_add_audio_stream(chip, stream, fp); if (err < 0) { kfree(fp); kfree(rate_table); return err; } if (fp->iface != get_iface_desc(&iface->altsetting[0])->bInterfaceNumber || fp->altset_idx >= iface->num_altsetting) { kfree(fp); kfree(rate_table); return -EINVAL; } alts = &iface->altsetting[fp->altset_idx]; altsd = get_iface_desc(alts); fp->protocol = altsd->bInterfaceProtocol; if (fp->datainterval == 0) fp->datainterval = snd_usb_parse_datainterval(chip, alts); if (fp->maxpacksize == 0) fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize); usb_set_interface(chip->dev, fp->iface, 0); snd_usb_init_pitch(chip, fp->iface, alts, fp); snd_usb_init_sample_rate(chip, fp->iface, alts, fp, fp->rate_max); return 0; } static int create_auto_pcm_quirk(struct snd_usb_audio *chip, struct usb_interface *iface, struct usb_driver *driver) { struct usb_host_interface *alts; struct usb_interface_descriptor *altsd; struct usb_endpoint_descriptor *epd; struct uac1_as_header_descriptor *ashd; struct uac_format_type_i_discrete_descriptor *fmtd; /* * Most Roland/Yamaha audio streaming interfaces have more or less * standard descriptors, but older devices might lack descriptors, and * future ones might change, so ensure that we fail silently if the * interface doesn't look exactly right. */ /* must have a non-zero altsetting for streaming */ if (iface->num_altsetting < 2) return -ENODEV; alts = &iface->altsetting[1]; altsd = get_iface_desc(alts); /* must have an isochronous endpoint for streaming */ if (altsd->bNumEndpoints < 1) return -ENODEV; epd = get_endpoint(alts, 0); if (!usb_endpoint_xfer_isoc(epd)) return -ENODEV; /* must have format descriptors */ ashd = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_AS_GENERAL); fmtd = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_FORMAT_TYPE); if (!ashd || ashd->bLength < 7 || !fmtd || fmtd->bLength < 8) return -ENODEV; return create_standard_audio_quirk(chip, iface, driver, NULL); } static int create_yamaha_midi_quirk(struct snd_usb_audio *chip, struct usb_interface *iface, struct usb_driver *driver, struct usb_host_interface *alts) { static const struct snd_usb_audio_quirk yamaha_midi_quirk = { .type = QUIRK_MIDI_YAMAHA }; struct usb_midi_in_jack_descriptor *injd; struct usb_midi_out_jack_descriptor *outjd; /* must have some valid jack descriptors */ injd = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, USB_MS_MIDI_IN_JACK); outjd = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, USB_MS_MIDI_OUT_JACK); if (!injd && !outjd) return -ENODEV; if (injd && (injd->bLength < 5 || (injd->bJackType != USB_MS_EMBEDDED && injd->bJackType != USB_MS_EXTERNAL))) return -ENODEV; if (outjd && (outjd->bLength < 6 || (outjd->bJackType != USB_MS_EMBEDDED && outjd->bJackType != USB_MS_EXTERNAL))) return -ENODEV; return create_any_midi_quirk(chip, iface, driver, &yamaha_midi_quirk); } static int create_roland_midi_quirk(struct snd_usb_audio *chip, struct usb_interface *iface, struct usb_driver *driver, struct usb_host_interface *alts) { static const struct snd_usb_audio_quirk roland_midi_quirk = { .type = QUIRK_MIDI_ROLAND }; u8 *roland_desc = NULL; /* might have a vendor-specific descriptor <06 24 F1 02 ...> */ for (;;) { roland_desc = snd_usb_find_csint_desc(alts->extra, alts->extralen, roland_desc, 0xf1); if (!roland_desc) return -ENODEV; if (roland_desc[0] < 6 || roland_desc[3] != 2) continue; return create_any_midi_quirk(chip, iface, driver, &roland_midi_quirk); } } static int create_std_midi_quirk(struct snd_usb_audio *chip, struct usb_interface *iface, struct usb_driver *driver, struct usb_host_interface *alts) { struct usb_ms_header_descriptor *mshd; struct usb_ms_endpoint_descriptor *msepd; /* must have the MIDIStreaming interface header descriptor*/ mshd = (struct usb_ms_header_descriptor *)alts->extra; if (alts->extralen < 7 || mshd->bLength < 7 || mshd->bDescriptorType != USB_DT_CS_INTERFACE || mshd->bDescriptorSubtype != USB_MS_HEADER) return -ENODEV; /* must have the MIDIStreaming endpoint descriptor*/ msepd = (struct usb_ms_endpoint_descriptor *)alts->endpoint[0].extra; if (alts->endpoint[0].extralen < 4 || msepd->bLength < 4 || msepd->bDescriptorType != USB_DT_CS_ENDPOINT || msepd->bDescriptorSubtype != UAC_MS_GENERAL || msepd->bNumEmbMIDIJack < 1 || msepd->bNumEmbMIDIJack > 16) return -ENODEV; return create_any_midi_quirk(chip, iface, driver, NULL); } static int create_auto_midi_quirk(struct snd_usb_audio *chip, struct usb_interface *iface, struct usb_driver *driver) { struct usb_host_interface *alts; struct usb_interface_descriptor *altsd; struct usb_endpoint_descriptor *epd; int err; alts = &iface->altsetting[0]; altsd = get_iface_desc(alts); /* must have at least one bulk/interrupt endpoint for streaming */ if (altsd->bNumEndpoints < 1) return -ENODEV; epd = get_endpoint(alts, 0); if (!usb_endpoint_xfer_bulk(epd) && !usb_endpoint_xfer_int(epd)) return -ENODEV; switch (USB_ID_VENDOR(chip->usb_id)) { case 0x0499: /* Yamaha */ err = create_yamaha_midi_quirk(chip, iface, driver, alts); if (err != -ENODEV) return err; break; case 0x0582: /* Roland */ err = create_roland_midi_quirk(chip, iface, driver, alts); if (err != -ENODEV) return err; break; } return create_std_midi_quirk(chip, iface, driver, alts); } static int create_autodetect_quirk(struct snd_usb_audio *chip, struct usb_interface *iface, struct usb_driver *driver) { int err; err = create_auto_pcm_quirk(chip, iface, driver); if (err == -ENODEV) err = create_auto_midi_quirk(chip, iface, driver); return err; } static int create_autodetect_quirks(struct snd_usb_audio *chip, struct usb_interface *iface, struct usb_driver *driver, const struct snd_usb_audio_quirk *quirk) { int probed_ifnum = get_iface_desc(iface->altsetting)->bInterfaceNumber; int ifcount, ifnum, err; err = create_autodetect_quirk(chip, iface, driver); if (err < 0) return err; /* * ALSA PCM playback/capture devices cannot be registered in two steps, * so we have to claim the other corresponding interface here. */ ifcount = chip->dev->actconfig->desc.bNumInterfaces; for (ifnum = 0; ifnum < ifcount; ifnum++) { if (ifnum == probed_ifnum || quirk->ifnum >= 0) continue; iface = usb_ifnum_to_if(chip->dev, ifnum); if (!iface || usb_interface_claimed(iface) || get_iface_desc(iface->altsetting)->bInterfaceClass != USB_CLASS_VENDOR_SPEC) continue; err = create_autodetect_quirk(chip, iface, driver); if (err >= 0) usb_driver_claim_interface(driver, iface, (void *)-1L); } return 0; } /* * Create a stream for an Edirol UA-700/UA-25/UA-4FX interface. * The only way to detect the sample rate is by looking at wMaxPacketSize. */ static int create_uaxx_quirk(struct snd_usb_audio *chip, struct usb_interface *iface, struct usb_driver *driver, const struct snd_usb_audio_quirk *quirk) { static const struct audioformat ua_format = { .formats = SNDRV_PCM_FMTBIT_S24_3LE, .channels = 2, .fmt_type = UAC_FORMAT_TYPE_I, .altsetting = 1, .altset_idx = 1, .rates = SNDRV_PCM_RATE_CONTINUOUS, }; struct usb_host_interface *alts; struct usb_interface_descriptor *altsd; struct audioformat *fp; int stream, err; /* both PCM and MIDI interfaces have 2 or more altsettings */ if (iface->num_altsetting < 2) return -ENXIO; alts = &iface->altsetting[1]; altsd = get_iface_desc(alts); if (altsd->bNumEndpoints == 2) { static const struct snd_usb_midi_endpoint_info ua700_ep = { .out_cables = 0x0003, .in_cables = 0x0003 }; static const struct snd_usb_audio_quirk ua700_quirk = { .type = QUIRK_MIDI_FIXED_ENDPOINT, .data = &ua700_ep }; static const struct snd_usb_midi_endpoint_info uaxx_ep = { .out_cables = 0x0001, .in_cables = 0x0001 }; static const struct snd_usb_audio_quirk uaxx_quirk = { .type = QUIRK_MIDI_FIXED_ENDPOINT, .data = &uaxx_ep }; const struct snd_usb_audio_quirk *quirk = chip->usb_id == USB_ID(0x0582, 0x002b) ? &ua700_quirk : &uaxx_quirk; return snd_usbmidi_create(chip->card, iface, &chip->midi_list, quirk); } if (altsd->bNumEndpoints != 1) return -ENXIO; fp = kmemdup(&ua_format, sizeof(*fp), GFP_KERNEL); if (!fp) return -ENOMEM; fp->iface = altsd->bInterfaceNumber; fp->endpoint = get_endpoint(alts, 0)->bEndpointAddress; fp->ep_attr = get_endpoint(alts, 0)->bmAttributes; fp->datainterval = 0; fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize); switch (fp->maxpacksize) { case 0x120: fp->rate_max = fp->rate_min = 44100; break; case 0x138: case 0x140: fp->rate_max = fp->rate_min = 48000; break; case 0x258: case 0x260: fp->rate_max = fp->rate_min = 96000; break; default: usb_audio_err(chip, "unknown sample rate\n"); kfree(fp); return -ENXIO; } stream = (fp->endpoint & USB_DIR_IN) ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK; err = snd_usb_add_audio_stream(chip, stream, fp); if (err < 0) { kfree(fp); return err; } usb_set_interface(chip->dev, fp->iface, 0); return 0; } /* * Create a standard mixer for the specified interface. */ static int create_standard_mixer_quirk(struct snd_usb_audio *chip, struct usb_interface *iface, struct usb_driver *driver, const struct snd_usb_audio_quirk *quirk) { if (quirk->ifnum < 0) return 0; return snd_usb_create_mixer(chip, quirk->ifnum, 0); } /* * audio-interface quirks * * returns zero if no standard audio/MIDI parsing is needed. * returns a positive value if standard audio/midi interfaces are parsed * after this. * returns a negative value at error. */ int snd_usb_create_quirk(struct snd_usb_audio *chip, struct usb_interface *iface, struct usb_driver *driver, const struct snd_usb_audio_quirk *quirk) { typedef int (*quirk_func_t)(struct snd_usb_audio *, struct usb_interface *, struct usb_driver *, const struct snd_usb_audio_quirk *); static const quirk_func_t quirk_funcs[] = { [QUIRK_IGNORE_INTERFACE] = ignore_interface_quirk, [QUIRK_COMPOSITE] = create_composite_quirk, [QUIRK_AUTODETECT] = create_autodetect_quirks, [QUIRK_MIDI_STANDARD_INTERFACE] = create_any_midi_quirk, [QUIRK_MIDI_FIXED_ENDPOINT] = create_any_midi_quirk, [QUIRK_MIDI_YAMAHA] = create_any_midi_quirk, [QUIRK_MIDI_ROLAND] = create_any_midi_quirk, [QUIRK_MIDI_MIDIMAN] = create_any_midi_quirk, [QUIRK_MIDI_NOVATION] = create_any_midi_quirk, [QUIRK_MIDI_RAW_BYTES] = create_any_midi_quirk, [QUIRK_MIDI_EMAGIC] = create_any_midi_quirk, [QUIRK_MIDI_CME] = create_any_midi_quirk, [QUIRK_MIDI_AKAI] = create_any_midi_quirk, [QUIRK_MIDI_FTDI] = create_any_midi_quirk, [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk, [QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk, [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk, [QUIRK_AUDIO_ALIGN_TRANSFER] = create_align_transfer_quirk, [QUIRK_AUDIO_STANDARD_MIXER] = create_standard_mixer_quirk, }; if (quirk->type < QUIRK_TYPE_COUNT) { return quirk_funcs[quirk->type](chip, iface, driver, quirk); } else { usb_audio_err(chip, "invalid quirk type %d\n", quirk->type); return -ENXIO; } } /* * boot quirks */ #define EXTIGY_FIRMWARE_SIZE_OLD 794 #define EXTIGY_FIRMWARE_SIZE_NEW 483 static int snd_usb_extigy_boot_quirk(struct usb_device *dev, struct usb_interface *intf) { struct usb_host_config *config = dev->actconfig; int err; if (le16_to_cpu(get_cfg_desc(config)->wTotalLength) == EXTIGY_FIRMWARE_SIZE_OLD || le16_to_cpu(get_cfg_desc(config)->wTotalLength) == EXTIGY_FIRMWARE_SIZE_NEW) { dev_dbg(&dev->dev, "sending Extigy boot sequence...\n"); /* Send message to force it to reconnect with full interface. */ err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev,0), 0x10, 0x43, 0x0001, 0x000a, NULL, 0); if (err < 0) dev_dbg(&dev->dev, "error sending boot message: %d\n", err); err = usb_get_descriptor(dev, USB_DT_DEVICE, 0, &dev->descriptor, sizeof(dev->descriptor)); config = dev->actconfig; if (err < 0) dev_dbg(&dev->dev, "error usb_get_descriptor: %d\n", err); err = usb_reset_configuration(dev); if (err < 0) dev_dbg(&dev->dev, "error usb_reset_configuration: %d\n", err); dev_dbg(&dev->dev, "extigy_boot: new boot length = %d\n", le16_to_cpu(get_cfg_desc(config)->wTotalLength)); return -ENODEV; /* quit this anyway */ } return 0; } static int snd_usb_audigy2nx_boot_quirk(struct usb_device *dev) { u8 buf = 1; snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), 0x2a, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER, 0, 0, &buf, 1); if (buf == 0) { snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), 0x29, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, 1, 2000, NULL, 0); return -ENODEV; } return 0; } static int snd_usb_fasttrackpro_boot_quirk(struct usb_device *dev) { int err; if (dev->actconfig->desc.bConfigurationValue == 1) { dev_info(&dev->dev, "Fast Track Pro switching to config #2\n"); /* This function has to be available by the usb core module. * if it is not avialable the boot quirk has to be left out * and the configuration has to be set by udev or hotplug * rules */ err = usb_driver_set_configuration(dev, 2); if (err < 0) dev_dbg(&dev->dev, "error usb_driver_set_configuration: %d\n", err); /* Always return an error, so that we stop creating a device that will just be destroyed and recreated with a new configuration */ return -ENODEV; } else dev_info(&dev->dev, "Fast Track Pro config OK\n"); return 0; } /* * C-Media CM106/CM106+ have four 16-bit internal registers that are nicely * documented in the device's data sheet. */ static int snd_usb_cm106_write_int_reg(struct usb_device *dev, int reg, u16 value) { u8 buf[4]; buf[0] = 0x20; buf[1] = value & 0xff; buf[2] = (value >> 8) & 0xff; buf[3] = reg; return snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), USB_REQ_SET_CONFIGURATION, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT, 0, 0, &buf, 4); } static int snd_usb_cm106_boot_quirk(struct usb_device *dev) { /* * Enable line-out driver mode, set headphone source to front * channels, enable stereo mic. */ return snd_usb_cm106_write_int_reg(dev, 2, 0x8004); } /* * C-Media CM6206 is based on CM106 with two additional * registers that are not documented in the data sheet. * Values here are chosen based on sniffing USB traffic * under Windows. */ static int snd_usb_cm6206_boot_quirk(struct usb_device *dev) { int err = 0, reg; int val[] = {0x2004, 0x3000, 0xf800, 0x143f, 0x0000, 0x3000}; for (reg = 0; reg < ARRAY_SIZE(val); reg++) { err = snd_usb_cm106_write_int_reg(dev, reg, val[reg]); if (err < 0) return err; } return err; } /* quirk for Plantronics GameCom 780 with CM6302 chip */ static int snd_usb_gamecon780_boot_quirk(struct usb_device *dev) { /* set the initial volume and don't change; other values are either * too loud or silent due to firmware bug (bko#65251) */ u8 buf[2] = { 0x74, 0xe3 }; return snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC_SET_CUR, USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT, UAC_FU_VOLUME << 8, 9 << 8, buf, 2); } /* * Novation Twitch DJ controller * Focusrite Novation Saffire 6 USB audio card */ static int snd_usb_novation_boot_quirk(struct usb_device *dev) { /* preemptively set up the device because otherwise the * raw MIDI endpoints are not active */ usb_set_interface(dev, 0, 1); return 0; } /* * This call will put the synth in "USB send" mode, i.e it will send MIDI * messages through USB (this is disabled at startup). The synth will * acknowledge by sending a sysex on endpoint 0x85 and by displaying a USB * sign on its LCD. Values here are chosen based on sniffing USB traffic * under Windows. */ static int snd_usb_accessmusic_boot_quirk(struct usb_device *dev) { int err, actual_length; /* "midi send" enable */ static const u8 seq[] = { 0x4e, 0x73, 0x52, 0x01 }; void *buf = kmemdup(seq, ARRAY_SIZE(seq), GFP_KERNEL); if (!buf) return -ENOMEM; err = usb_interrupt_msg(dev, usb_sndintpipe(dev, 0x05), buf, ARRAY_SIZE(seq), &actual_length, 1000); kfree(buf); if (err < 0) return err; return 0; } /* * Some sound cards from Native Instruments are in fact compliant to the USB * audio standard of version 2 and other approved USB standards, even though * they come up as vendor-specific device when first connected. * * However, they can be told to come up with a new set of descriptors * upon their next enumeration, and the interfaces announced by the new * descriptors will then be handled by the kernel's class drivers. As the * product ID will also change, no further checks are required. */ static int snd_usb_nativeinstruments_boot_quirk(struct usb_device *dev) { int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0xaf, USB_TYPE_VENDOR | USB_RECIP_DEVICE, 1, 0, NULL, 0, 1000); if (ret < 0) return ret; usb_reset_device(dev); /* return -EAGAIN, so the creation of an audio interface for this * temporary device is aborted. The device will reconnect with a * new product ID */ return -EAGAIN; } static void mbox2_setup_48_24_magic(struct usb_device *dev) { u8 srate[3]; u8 temp[12]; /* Choose 48000Hz permanently */ srate[0] = 0x80; srate[1] = 0xbb; srate[2] = 0x00; /* Send the magic! */ snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), 0x01, 0x22, 0x0100, 0x0085, &temp, 0x0003); snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), 0x81, 0xa2, 0x0100, 0x0085, &srate, 0x0003); snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), 0x81, 0xa2, 0x0100, 0x0086, &srate, 0x0003); snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), 0x81, 0xa2, 0x0100, 0x0003, &srate, 0x0003); return; } /* Digidesign Mbox 2 needs to load firmware onboard * and driver must wait a few seconds for initialisation. */ #define MBOX2_FIRMWARE_SIZE 646 #define MBOX2_BOOT_LOADING 0x01 /* Hard coded into the device */ #define MBOX2_BOOT_READY 0x02 /* Hard coded into the device */ static int snd_usb_mbox2_boot_quirk(struct usb_device *dev) { struct usb_host_config *config = dev->actconfig; int err; u8 bootresponse[0x12]; int fwsize; int count; fwsize = le16_to_cpu(get_cfg_desc(config)->wTotalLength); if (fwsize != MBOX2_FIRMWARE_SIZE) { dev_err(&dev->dev, "Invalid firmware size=%d.\n", fwsize); return -ENODEV; } dev_dbg(&dev->dev, "Sending Digidesign Mbox 2 boot sequence...\n"); count = 0; bootresponse[0] = MBOX2_BOOT_LOADING; while ((bootresponse[0] == MBOX2_BOOT_LOADING) && (count < 10)) { msleep(500); /* 0.5 second delay */ snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), /* Control magic - load onboard firmware */ 0x85, 0xc0, 0x0001, 0x0000, &bootresponse, 0x0012); if (bootresponse[0] == MBOX2_BOOT_READY) break; dev_dbg(&dev->dev, "device not ready, resending boot sequence...\n"); count++; } if (bootresponse[0] != MBOX2_BOOT_READY) { dev_err(&dev->dev, "Unknown bootresponse=%d, or timed out, ignoring device.\n", bootresponse[0]); return -ENODEV; } dev_dbg(&dev->dev, "device initialised!\n"); err = usb_get_descriptor(dev, USB_DT_DEVICE, 0, &dev->descriptor, sizeof(dev->descriptor)); config = dev->actconfig; if (err < 0) dev_dbg(&dev->dev, "error usb_get_descriptor: %d\n", err); err = usb_reset_configuration(dev); if (err < 0) dev_dbg(&dev->dev, "error usb_reset_configuration: %d\n", err); dev_dbg(&dev->dev, "mbox2_boot: new boot length = %d\n", le16_to_cpu(get_cfg_desc(config)->wTotalLength)); mbox2_setup_48_24_magic(dev); dev_info(&dev->dev, "Digidesign Mbox 2: 24bit 48kHz"); return 0; /* Successful boot */ } /* * Setup quirks */ #define MAUDIO_SET 0x01 /* parse device_setup */ #define MAUDIO_SET_COMPATIBLE 0x80 /* use only "win-compatible" interfaces */ #define MAUDIO_SET_DTS 0x02 /* enable DTS Digital Output */ #define MAUDIO_SET_96K 0x04 /* 48-96KHz rate if set, 8-48KHz otherwise */ #define MAUDIO_SET_24B 0x08 /* 24bits sample if set, 16bits otherwise */ #define MAUDIO_SET_DI 0x10 /* enable Digital Input */ #define MAUDIO_SET_MASK 0x1f /* bit mask for setup value */ #define MAUDIO_SET_24B_48K_DI 0x19 /* 24bits+48KHz+Digital Input */ #define MAUDIO_SET_24B_48K_NOTDI 0x09 /* 24bits+48KHz+No Digital Input */ #define MAUDIO_SET_16B_48K_DI 0x11 /* 16bits+48KHz+Digital Input */ #define MAUDIO_SET_16B_48K_NOTDI 0x01 /* 16bits+48KHz+No Digital Input */ static int quattro_skip_setting_quirk(struct snd_usb_audio *chip, int iface, int altno) { /* Reset ALL ifaces to 0 altsetting. * Call it for every possible altsetting of every interface. */ usb_set_interface(chip->dev, iface, 0); if (chip->setup & MAUDIO_SET) { if (chip->setup & MAUDIO_SET_COMPATIBLE) { if (iface != 1 && iface != 2) return 1; /* skip all interfaces but 1 and 2 */ } else { unsigned int mask; if (iface == 1 || iface == 2) return 1; /* skip interfaces 1 and 2 */ if ((chip->setup & MAUDIO_SET_96K) && altno != 1) return 1; /* skip this altsetting */ mask = chip->setup & MAUDIO_SET_MASK; if (mask == MAUDIO_SET_24B_48K_DI && altno != 2) return 1; /* skip this altsetting */ if (mask == MAUDIO_SET_24B_48K_NOTDI && altno != 3) return 1; /* skip this altsetting */ if (mask == MAUDIO_SET_16B_48K_NOTDI && altno != 4) return 1; /* skip this altsetting */ } } usb_audio_dbg(chip, "using altsetting %d for interface %d config %d\n", altno, iface, chip->setup); return 0; /* keep this altsetting */ } static int audiophile_skip_setting_quirk(struct snd_usb_audio *chip, int iface, int altno) { /* Reset ALL ifaces to 0 altsetting. * Call it for every possible altsetting of every interface. */ usb_set_interface(chip->dev, iface, 0); if (chip->setup & MAUDIO_SET) { unsigned int mask; if ((chip->setup & MAUDIO_SET_DTS) && altno != 6) return 1; /* skip this altsetting */ if ((chip->setup & MAUDIO_SET_96K) && altno != 1) return 1; /* skip this altsetting */ mask = chip->setup & MAUDIO_SET_MASK; if (mask == MAUDIO_SET_24B_48K_DI && altno != 2) return 1; /* skip this altsetting */ if (mask == MAUDIO_SET_24B_48K_NOTDI && altno != 3) return 1; /* skip this altsetting */ if (mask == MAUDIO_SET_16B_48K_DI && altno != 4) return 1; /* skip this altsetting */ if (mask == MAUDIO_SET_16B_48K_NOTDI && altno != 5) return 1; /* skip this altsetting */ } return 0; /* keep this altsetting */ } static int fasttrackpro_skip_setting_quirk(struct snd_usb_audio *chip, int iface, int altno) { /* Reset ALL ifaces to 0 altsetting. * Call it for every possible altsetting of every interface. */ usb_set_interface(chip->dev, iface, 0); /* possible configuration where both inputs and only one output is *used is not supported by the current setup */ if (chip->setup & (MAUDIO_SET | MAUDIO_SET_24B)) { if (chip->setup & MAUDIO_SET_96K) { if (altno != 3 && altno != 6) return 1; } else if (chip->setup & MAUDIO_SET_DI) { if (iface == 4) return 1; /* no analog input */ if (altno != 2 && altno != 5) return 1; /* enable only altsets 2 and 5 */ } else { if (iface == 5) return 1; /* disable digialt input */ if (altno != 2 && altno != 5) return 1; /* enalbe only altsets 2 and 5 */ } } else { /* keep only 16-Bit mode */ if (altno != 1) return 1; } usb_audio_dbg(chip, "using altsetting %d for interface %d config %d\n", altno, iface, chip->setup); return 0; /* keep this altsetting */ } int snd_usb_apply_interface_quirk(struct snd_usb_audio *chip, int iface, int altno) { /* audiophile usb: skip altsets incompatible with device_setup */ if (chip->usb_id == USB_ID(0x0763, 0x2003)) return audiophile_skip_setting_quirk(chip, iface, altno); /* quattro usb: skip altsets incompatible with device_setup */ if (chip->usb_id == USB_ID(0x0763, 0x2001)) return quattro_skip_setting_quirk(chip, iface, altno); /* fasttrackpro usb: skip altsets incompatible with device_setup */ if (chip->usb_id == USB_ID(0x0763, 0x2012)) return fasttrackpro_skip_setting_quirk(chip, iface, altno); return 0; } int snd_usb_apply_boot_quirk(struct usb_device *dev, struct usb_interface *intf, const struct snd_usb_audio_quirk *quirk) { u32 id = USB_ID(le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct)); switch (id) { case USB_ID(0x041e, 0x3000): /* SB Extigy needs special boot-up sequence */ /* if more models come, this will go to the quirk list. */ return snd_usb_extigy_boot_quirk(dev, intf); case USB_ID(0x041e, 0x3020): /* SB Audigy 2 NX needs its own boot-up magic, too */ return snd_usb_audigy2nx_boot_quirk(dev); case USB_ID(0x10f5, 0x0200): /* C-Media CM106 / Turtle Beach Audio Advantage Roadie */ return snd_usb_cm106_boot_quirk(dev); case USB_ID(0x0d8c, 0x0102): /* C-Media CM6206 / CM106-Like Sound Device */ case USB_ID(0x0ccd, 0x00b1): /* Terratec Aureon 7.1 USB */ return snd_usb_cm6206_boot_quirk(dev); case USB_ID(0x0dba, 0x3000): /* Digidesign Mbox 2 */ return snd_usb_mbox2_boot_quirk(dev); case USB_ID(0x1235, 0x0010): /* Focusrite Novation Saffire 6 USB */ case USB_ID(0x1235, 0x0018): /* Focusrite Novation Twitch */ return snd_usb_novation_boot_quirk(dev); case USB_ID(0x133e, 0x0815): /* Access Music VirusTI Desktop */ return snd_usb_accessmusic_boot_quirk(dev); case USB_ID(0x17cc, 0x1000): /* Komplete Audio 6 */ case USB_ID(0x17cc, 0x1010): /* Traktor Audio 6 */ case USB_ID(0x17cc, 0x1020): /* Traktor Audio 10 */ return snd_usb_nativeinstruments_boot_quirk(dev); case USB_ID(0x0763, 0x2012): /* M-Audio Fast Track Pro USB */ return snd_usb_fasttrackpro_boot_quirk(dev); case USB_ID(0x047f, 0xc010): /* Plantronics Gamecom 780 */ return snd_usb_gamecon780_boot_quirk(dev); } return 0; } /* * check if the device uses big-endian samples */ int snd_usb_is_big_endian_format(struct snd_usb_audio *chip, struct audioformat *fp) { /* it depends on altsetting whether the device is big-endian or not */ switch (chip->usb_id) { case USB_ID(0x0763, 0x2001): /* M-Audio Quattro: captured data only */ if (fp->altsetting == 2 || fp->altsetting == 3 || fp->altsetting == 5 || fp->altsetting == 6) return 1; break; case USB_ID(0x0763, 0x2003): /* M-Audio Audiophile USB */ if (chip->setup == 0x00 || fp->altsetting == 1 || fp->altsetting == 2 || fp->altsetting == 3) return 1; break; case USB_ID(0x0763, 0x2012): /* M-Audio Fast Track Pro */ if (fp->altsetting == 2 || fp->altsetting == 3 || fp->altsetting == 5 || fp->altsetting == 6) return 1; break; } return 0; } /* * For E-Mu 0404USB/0202USB/TrackerPre/0204 sample rate should be set for device, * not for interface. */ enum { EMU_QUIRK_SR_44100HZ = 0, EMU_QUIRK_SR_48000HZ, EMU_QUIRK_SR_88200HZ, EMU_QUIRK_SR_96000HZ, EMU_QUIRK_SR_176400HZ, EMU_QUIRK_SR_192000HZ }; static void set_format_emu_quirk(struct snd_usb_substream *subs, struct audioformat *fmt) { unsigned char emu_samplerate_id = 0; /* When capture is active * sample rate shouldn't be changed * by playback substream */ if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) { if (subs->stream->substream[SNDRV_PCM_STREAM_CAPTURE].interface != -1) return; } switch (fmt->rate_min) { case 48000: emu_samplerate_id = EMU_QUIRK_SR_48000HZ; break; case 88200: emu_samplerate_id = EMU_QUIRK_SR_88200HZ; break; case 96000: emu_samplerate_id = EMU_QUIRK_SR_96000HZ; break; case 176400: emu_samplerate_id = EMU_QUIRK_SR_176400HZ; break; case 192000: emu_samplerate_id = EMU_QUIRK_SR_192000HZ; break; default: emu_samplerate_id = EMU_QUIRK_SR_44100HZ; break; } snd_emuusb_set_samplerate(subs->stream->chip, emu_samplerate_id); subs->pkt_offset_adj = (emu_samplerate_id >= EMU_QUIRK_SR_176400HZ) ? 4 : 0; } void snd_usb_set_format_quirk(struct snd_usb_substream *subs, struct audioformat *fmt) { switch (subs->stream->chip->usb_id) { case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */ case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */ case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */ case USB_ID(0x041e, 0x3f19): /* E-Mu 0204 USB */ set_format_emu_quirk(subs, fmt); break; } } bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) { /* devices which do not support reading the sample rate. */ switch (chip->usb_id) { case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */ case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */ case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */ case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */ case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ return true; } return false; } /* Marantz/Denon USB DACs need a vendor cmd to switch * between PCM and native DSD mode */ static bool is_marantz_denon_dac(unsigned int id) { switch (id) { case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */ case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */ case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */ return true; } return false; } int snd_usb_select_mode_quirk(struct snd_usb_substream *subs, struct audioformat *fmt) { struct usb_device *dev = subs->dev; int err; if (is_marantz_denon_dac(subs->stream->chip->usb_id)) { /* First switch to alt set 0, otherwise the mode switch cmd * will not be accepted by the DAC */ err = usb_set_interface(dev, fmt->iface, 0); if (err < 0) return err; mdelay(20); /* Delay needed after setting the interface */ switch (fmt->altsetting) { case 2: /* DSD mode requested */ case 1: /* PCM mode requested */ err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), 0, USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, fmt->altsetting - 1, 1, NULL, 0); if (err < 0) return err; break; } mdelay(20); } return 0; } void snd_usb_endpoint_start_quirk(struct snd_usb_endpoint *ep) { /* * "Playback Design" products send bogus feedback data at the start * of the stream. Ignore them. */ if ((le16_to_cpu(ep->chip->dev->descriptor.idVendor) == 0x23ba) && ep->type == SND_USB_ENDPOINT_TYPE_SYNC) ep->skip_packets = 4; /* * M-Audio Fast Track C400/C600 - when packets are not skipped, real * world latency varies by approx. +/- 50 frames (at 96KHz) each time * the stream is (re)started. When skipping packets 16 at endpoint * start up, the real world latency is stable within +/- 1 frame (also * across power cycles). */ if ((ep->chip->usb_id == USB_ID(0x0763, 0x2030) || ep->chip->usb_id == USB_ID(0x0763, 0x2031)) && ep->type == SND_USB_ENDPOINT_TYPE_DATA) ep->skip_packets = 16; } void snd_usb_set_interface_quirk(struct usb_device *dev) { /* * "Playback Design" products need a 50ms delay after setting the * USB interface. */ if (le16_to_cpu(dev->descriptor.idVendor) == 0x23ba) mdelay(50); } void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size) { /* * "Playback Design" products need a 20ms delay after each * class compliant request */ if ((le16_to_cpu(dev->descriptor.idVendor) == 0x23ba) && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) mdelay(20); /* Marantz/Denon devices with USB DAC functionality need a delay * after each class compliant request */ if (is_marantz_denon_dac(USB_ID(le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct))) && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) mdelay(20); /* Zoom R16/24 needs a tiny delay here, otherwise requests like * get/set frequency return as failed despite actually succeeding. */ if ((le16_to_cpu(dev->descriptor.idVendor) == 0x1686) && (le16_to_cpu(dev->descriptor.idProduct) == 0x00dd) && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) mdelay(1); } /* * snd_usb_interface_dsd_format_quirks() is called from format.c to * augment the PCM format bit-field for DSD types. The UAC standards * don't have a designated bit field to denote DSD-capable interfaces, * hence all hardware that is known to support this format has to be * listed here. */ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, struct audioformat *fp, unsigned int sample_bytes) { /* Playback Designs */ if (le16_to_cpu(chip->dev->descriptor.idVendor) == 0x23ba) { switch (fp->altsetting) { case 1: fp->dsd_dop = true; return SNDRV_PCM_FMTBIT_DSD_U16_LE; case 2: fp->dsd_bitrev = true; return SNDRV_PCM_FMTBIT_DSD_U8; case 3: fp->dsd_bitrev = true; return SNDRV_PCM_FMTBIT_DSD_U16_LE; } } /* XMOS based USB DACs */ switch (chip->usb_id) { case USB_ID(0x20b1, 0x3008): /* iFi Audio micro/nano iDSD */ case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */ case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */ if (fp->altsetting == 2) return SNDRV_PCM_FMTBIT_DSD_U32_BE; break; case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */ case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */ if (fp->altsetting == 3) return SNDRV_PCM_FMTBIT_DSD_U32_BE; break; default: break; } /* Denon/Marantz devices with USB DAC functionality */ if (is_marantz_denon_dac(chip->usb_id)) { if (fp->altsetting == 2) return SNDRV_PCM_FMTBIT_DSD_U32_BE; } return 0; }
gpl-2.0
Selectah/trinityzero
src/server/scripts/Kalimdor/RuinsOfAhnQiraj/boss_rajaxx.cpp
44
3978
/* * Copyright (C) 2008-2013 TrinityCore <http://www.trinitycore.org/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "ObjectMgr.h" #include "ScriptMgr.h" #include "ScriptedCreature.h" #include "ruins_of_ahnqiraj.h" enum Yells { // The time of our retribution is at hand! Let darkness reign in the hearts of our enemies! Sound: 8645 Emote: 35 SAY_ANDOROV_INTRO = 0, // Before for the first wave SAY_ANDOROV_ATTACK = 1, // Beginning the event SAY_WAVE3 = 0, SAY_WAVE4 = 1, SAY_WAVE5 = 2, SAY_WAVE6 = 3, SAY_WAVE7 = 4, SAY_INTRO = 5, SAY_UNK1 = 6, SAY_UNK2 = 7, SAY_UNK3 = 8, SAY_DEATH = 9, SAY_CHANGEAGGRO = 10, SAY_KILLS_ANDOROV = 11, SAY_COMPLETE_QUEST = 12 // Yell when realm complete quest 8743 for world event // Warriors, Captains, continue the fight! Sound: 8640 }; enum Spells { SPELL_DISARM = 6713, SPELL_FRENZY = 8269, SPELL_THUNDERCRASH = 25599 }; enum Events { EVENT_DISARM = 1, // 03:58:27, 03:58:49 EVENT_THUNDERCRASH = 2, // 03:58:29, 03:58:50 EVENT_CHANGE_AGGRO = 3, }; class boss_rajaxx : public CreatureScript { public: boss_rajaxx() : CreatureScript("boss_rajaxx") { } struct boss_rajaxxAI : public BossAI { boss_rajaxxAI(Creature* creature) : BossAI(creature, DATA_RAJAXX) { } void Reset() OVERRIDE { _Reset(); enraged = false; events.ScheduleEvent(EVENT_DISARM, 10000); events.ScheduleEvent(EVENT_THUNDERCRASH, 12000); } void JustDied(Unit* /*killer*/) OVERRIDE { //SAY_DEATH _JustDied(); } void EnterCombat(Unit* /*victim*/) OVERRIDE { _EnterCombat(); } void UpdateAI(uint32 diff) OVERRIDE { if (!UpdateVictim()) return; events.Update(diff); if (me->HasUnitState(UNIT_STATE_CASTING)) return; while (uint32 eventId = events.ExecuteEvent()) { switch (eventId) { case EVENT_DISARM: DoCastVictim(SPELL_DISARM); events.ScheduleEvent(EVENT_DISARM, 22000); break; case EVENT_THUNDERCRASH: DoCast(me, SPELL_THUNDERCRASH); events.ScheduleEvent(EVENT_THUNDERCRASH, 21000); break; default: break; } } DoMeleeAttackIfReady(); } private: bool enraged; }; CreatureAI* GetAI(Creature* creature) const OVERRIDE { return new boss_rajaxxAI(creature); } }; void AddSC_boss_rajaxx() { new boss_rajaxx(); }
gpl-2.0
systemd/linux
net/mac80211/debugfs.c
44
8236
/* * mac80211 debugfs for wireless PHYs * * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * * GPLv2 * */ #include <linux/debugfs.h> #include <linux/rtnetlink.h> #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" #include "debugfs.h" #define DEBUGFS_FORMAT_BUFFER_SIZE 100 int mac80211_format_buffer(char __user *userbuf, size_t count, loff_t *ppos, char *fmt, ...) { va_list args; char buf[DEBUGFS_FORMAT_BUFFER_SIZE]; int res; va_start(args, fmt); res = vscnprintf(buf, sizeof(buf), fmt, args); va_end(args); return simple_read_from_buffer(userbuf, count, ppos, buf, res); } #define DEBUGFS_READONLY_FILE_FN(name, fmt, value...) \ static ssize_t name## _read(struct file *file, char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ struct ieee80211_local *local = file->private_data; \ \ return mac80211_format_buffer(userbuf, count, ppos, \ fmt "\n", ##value); \ } #define DEBUGFS_READONLY_FILE_OPS(name) \ static const struct file_operations name## _ops = { \ .read = name## _read, \ .open = simple_open, \ .llseek = generic_file_llseek, \ }; #define DEBUGFS_READONLY_FILE(name, fmt, value...) \ DEBUGFS_READONLY_FILE_FN(name, fmt, value) \ DEBUGFS_READONLY_FILE_OPS(name) #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, 0400, phyd, local, &name## _ops); #define DEBUGFS_ADD_MODE(name, mode) \ debugfs_create_file(#name, mode, phyd, local, &name## _ops); DEBUGFS_READONLY_FILE(user_power, "%d", local->user_power_level); DEBUGFS_READONLY_FILE(power, "%d", local->hw.conf.power_level); DEBUGFS_READONLY_FILE(total_ps_buffered, "%d", local->total_ps_buffered); DEBUGFS_READONLY_FILE(wep_iv, "%#08x", local->wep_iv & 0xffffff); DEBUGFS_READONLY_FILE(rate_ctrl_alg, "%s", local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver"); #ifdef CONFIG_PM static ssize_t reset_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; rtnl_lock(); __ieee80211_suspend(&local->hw, NULL); __ieee80211_resume(&local->hw); rtnl_unlock(); return count; } static const struct file_operations reset_ops = { .write = reset_write, .open = simple_open, .llseek = noop_llseek, }; #endif static const char *hw_flag_names[] = { #define FLAG(F) [IEEE80211_HW_##F] = #F FLAG(HAS_RATE_CONTROL), FLAG(RX_INCLUDES_FCS), FLAG(HOST_BROADCAST_PS_BUFFERING), FLAG(SIGNAL_UNSPEC), FLAG(SIGNAL_DBM), FLAG(NEED_DTIM_BEFORE_ASSOC), FLAG(SPECTRUM_MGMT), FLAG(AMPDU_AGGREGATION), FLAG(SUPPORTS_PS), FLAG(PS_NULLFUNC_STACK), FLAG(SUPPORTS_DYNAMIC_PS), FLAG(MFP_CAPABLE), FLAG(WANT_MONITOR_VIF), FLAG(NO_AUTO_VIF), FLAG(SW_CRYPTO_CONTROL), FLAG(SUPPORT_FAST_XMIT), FLAG(REPORTS_TX_ACK_STATUS), FLAG(CONNECTION_MONITOR), FLAG(QUEUE_CONTROL), FLAG(SUPPORTS_PER_STA_GTK), FLAG(AP_LINK_PS), FLAG(TX_AMPDU_SETUP_IN_HW), FLAG(SUPPORTS_RC_TABLE), FLAG(P2P_DEV_ADDR_FOR_INTF), FLAG(TIMING_BEACON_ONLY), FLAG(SUPPORTS_HT_CCK_RATES), FLAG(CHANCTX_STA_CSA), FLAG(SUPPORTS_CLONED_SKBS), FLAG(SINGLE_SCAN_ON_ALL_BANDS), FLAG(TDLS_WIDER_BW), FLAG(SUPPORTS_AMSDU_IN_AMPDU), FLAG(BEACON_TX_STATUS), FLAG(NEEDS_UNIQUE_STA_ADDR), #undef FLAG }; static ssize_t hwflags_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; size_t bufsz = 30 * NUM_IEEE80211_HW_FLAGS; char *buf = kzalloc(bufsz, GFP_KERNEL); char *pos = buf, *end = buf + bufsz - 1; ssize_t rv; int i; if (!buf) return -ENOMEM; /* fail compilation if somebody adds or removes * a flag without updating the name array above */ BUILD_BUG_ON(ARRAY_SIZE(hw_flag_names) != NUM_IEEE80211_HW_FLAGS); for (i = 0; i < NUM_IEEE80211_HW_FLAGS; i++) { if (test_bit(i, local->hw.flags)) pos += scnprintf(pos, end - pos, "%s\n", hw_flag_names[i]); } rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); kfree(buf); return rv; } static ssize_t queues_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; unsigned long flags; char buf[IEEE80211_MAX_QUEUES * 20]; int q, res = 0; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); for (q = 0; q < local->hw.queues; q++) res += sprintf(buf + res, "%02d: %#.8lx/%d\n", q, local->queue_stop_reasons[q], skb_queue_len(&local->pending[q])); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); return simple_read_from_buffer(user_buf, count, ppos, buf, res); } DEBUGFS_READONLY_FILE_OPS(hwflags); DEBUGFS_READONLY_FILE_OPS(queues); /* statistics stuff */ static ssize_t format_devstat_counter(struct ieee80211_local *local, char __user *userbuf, size_t count, loff_t *ppos, int (*printvalue)(struct ieee80211_low_level_stats *stats, char *buf, int buflen)) { struct ieee80211_low_level_stats stats; char buf[20]; int res; rtnl_lock(); res = drv_get_stats(local, &stats); rtnl_unlock(); if (res) return res; res = printvalue(&stats, buf, sizeof(buf)); return simple_read_from_buffer(userbuf, count, ppos, buf, res); } #define DEBUGFS_DEVSTATS_FILE(name) \ static int print_devstats_##name(struct ieee80211_low_level_stats *stats,\ char *buf, int buflen) \ { \ return scnprintf(buf, buflen, "%u\n", stats->name); \ } \ static ssize_t stats_ ##name## _read(struct file *file, \ char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ return format_devstat_counter(file->private_data, \ userbuf, \ count, \ ppos, \ print_devstats_##name); \ } \ \ static const struct file_operations stats_ ##name## _ops = { \ .read = stats_ ##name## _read, \ .open = simple_open, \ .llseek = generic_file_llseek, \ }; #define DEBUGFS_STATS_ADD(name) \ debugfs_create_u32(#name, 0400, statsd, &local->name); #define DEBUGFS_DEVSTATS_ADD(name) \ debugfs_create_file(#name, 0400, statsd, local, &stats_ ##name## _ops); DEBUGFS_DEVSTATS_FILE(dot11ACKFailureCount); DEBUGFS_DEVSTATS_FILE(dot11RTSFailureCount); DEBUGFS_DEVSTATS_FILE(dot11FCSErrorCount); DEBUGFS_DEVSTATS_FILE(dot11RTSSuccessCount); void debugfs_hw_add(struct ieee80211_local *local) { struct dentry *phyd = local->hw.wiphy->debugfsdir; struct dentry *statsd; if (!phyd) return; local->debugfs.keys = debugfs_create_dir("keys", phyd); DEBUGFS_ADD(total_ps_buffered); DEBUGFS_ADD(wep_iv); DEBUGFS_ADD(queues); #ifdef CONFIG_PM DEBUGFS_ADD_MODE(reset, 0200); #endif DEBUGFS_ADD(hwflags); DEBUGFS_ADD(user_power); DEBUGFS_ADD(power); statsd = debugfs_create_dir("statistics", phyd); /* if the dir failed, don't put all the other things into the root! */ if (!statsd) return; #ifdef CONFIG_MAC80211_DEBUG_COUNTERS DEBUGFS_STATS_ADD(dot11TransmittedFragmentCount); DEBUGFS_STATS_ADD(dot11MulticastTransmittedFrameCount); DEBUGFS_STATS_ADD(dot11FailedCount); DEBUGFS_STATS_ADD(dot11RetryCount); DEBUGFS_STATS_ADD(dot11MultipleRetryCount); DEBUGFS_STATS_ADD(dot11FrameDuplicateCount); DEBUGFS_STATS_ADD(dot11ReceivedFragmentCount); DEBUGFS_STATS_ADD(dot11MulticastReceivedFrameCount); DEBUGFS_STATS_ADD(dot11TransmittedFrameCount); DEBUGFS_STATS_ADD(tx_handlers_drop); DEBUGFS_STATS_ADD(tx_handlers_queued); DEBUGFS_STATS_ADD(tx_handlers_drop_wep); DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc); DEBUGFS_STATS_ADD(tx_handlers_drop_unauth_port); DEBUGFS_STATS_ADD(rx_handlers_drop); DEBUGFS_STATS_ADD(rx_handlers_queued); DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc); DEBUGFS_STATS_ADD(rx_handlers_drop_defrag); DEBUGFS_STATS_ADD(tx_expand_skb_head); DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned); DEBUGFS_STATS_ADD(rx_expand_skb_head_defrag); DEBUGFS_STATS_ADD(rx_handlers_fragments); DEBUGFS_STATS_ADD(tx_status_drop); #endif DEBUGFS_DEVSTATS_ADD(dot11ACKFailureCount); DEBUGFS_DEVSTATS_ADD(dot11RTSFailureCount); DEBUGFS_DEVSTATS_ADD(dot11FCSErrorCount); DEBUGFS_DEVSTATS_ADD(dot11RTSSuccessCount); }
gpl-2.0
Aaron0927/xen-4.2.1
tools/qemu-xen/hw/s390-virtio-bus.c
44
11548
/* * QEMU S390 virtio target * * Copyright (c) 2009 Alexander Graf <agraf@suse.de> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "hw.h" #include "block.h" #include "sysemu.h" #include "net.h" #include "boards.h" #include "monitor.h" #include "loader.h" #include "elf.h" #include "hw/virtio.h" #include "hw/virtio-serial.h" #include "hw/virtio-net.h" #include "hw/sysbus.h" #include "kvm.h" #include "hw/s390-virtio-bus.h" /* #define DEBUG_S390 */ #ifdef DEBUG_S390 #define dprintf(fmt, ...) \ do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) #else #define dprintf(fmt, ...) \ do { } while (0) #endif #define VIRTIO_EXT_CODE 0x2603 struct BusInfo s390_virtio_bus_info = { .name = "s390-virtio", .size = sizeof(VirtIOS390Bus), }; typedef struct { DeviceInfo qdev; int (*init)(VirtIOS390Device *dev); } VirtIOS390DeviceInfo; static const VirtIOBindings virtio_s390_bindings; static ram_addr_t s390_virtio_device_num_vq(VirtIOS390Device *dev); /* length of VirtIO device pages */ const target_phys_addr_t virtio_size = S390_DEVICE_PAGES * TARGET_PAGE_SIZE; VirtIOS390Bus *s390_virtio_bus_init(ram_addr_t *ram_size) { VirtIOS390Bus *bus; BusState *_bus; DeviceState *dev; /* Create bridge device */ dev = qdev_create(NULL, "s390-virtio-bridge"); qdev_init_nofail(dev); /* Create bus on bridge device */ _bus = qbus_create(&s390_virtio_bus_info, dev, "s390-virtio"); bus = DO_UPCAST(VirtIOS390Bus, bus, _bus); bus->dev_page = *ram_size; bus->dev_offs = bus->dev_page; bus->next_ring = bus->dev_page + TARGET_PAGE_SIZE; /* Allocate RAM for VirtIO device pages (descriptors, queues, rings) */ *ram_size += S390_DEVICE_PAGES * TARGET_PAGE_SIZE; return bus; } static int s390_virtio_device_init(VirtIOS390Device *dev, VirtIODevice *vdev) { VirtIOS390Bus *bus; int dev_len; bus = DO_UPCAST(VirtIOS390Bus, bus, dev->qdev.parent_bus); dev->vdev = vdev; dev->dev_offs = bus->dev_offs; dev->feat_len = sizeof(uint32_t); /* always keep 32 bits features */ dev_len = VIRTIO_DEV_OFFS_CONFIG; dev_len += s390_virtio_device_num_vq(dev) * VIRTIO_VQCONFIG_LEN; dev_len += dev->feat_len * 2; dev_len += vdev->config_len; bus->dev_offs += dev_len; virtio_bind_device(vdev, &virtio_s390_bindings, dev); dev->host_features = vdev->get_features(vdev, dev->host_features); s390_virtio_device_sync(dev); return 0; } static int s390_virtio_net_init(VirtIOS390Device *dev) { VirtIODevice *vdev; vdev = virtio_net_init((DeviceState *)dev, &dev->nic, &dev->net); if (!vdev) { return -1; } return s390_virtio_device_init(dev, vdev); } static int s390_virtio_blk_init(VirtIOS390Device *dev) { VirtIODevice *vdev; vdev = virtio_blk_init((DeviceState *)dev, &dev->block, &dev->block_serial); if (!vdev) { return -1; } return s390_virtio_device_init(dev, vdev); } static int s390_virtio_serial_init(VirtIOS390Device *dev) { VirtIOS390Bus *bus; VirtIODevice *vdev; int r; bus = DO_UPCAST(VirtIOS390Bus, bus, dev->qdev.parent_bus); vdev = virtio_serial_init((DeviceState *)dev, &dev->serial); if (!vdev) { return -1; } r = s390_virtio_device_init(dev, vdev); if (!r) { bus->console = dev; } return r; } static uint64_t s390_virtio_device_vq_token(VirtIOS390Device *dev, int vq) { ram_addr_t token_off; token_off = (dev->dev_offs + VIRTIO_DEV_OFFS_CONFIG) + (vq * VIRTIO_VQCONFIG_LEN) + VIRTIO_VQCONFIG_OFFS_TOKEN; return ldq_be_phys(token_off); } static ram_addr_t s390_virtio_device_num_vq(VirtIOS390Device *dev) { VirtIODevice *vdev = dev->vdev; int num_vq; for (num_vq = 0; num_vq < VIRTIO_PCI_QUEUE_MAX; num_vq++) { if (!virtio_queue_get_num(vdev, num_vq)) { break; } } return num_vq; } static ram_addr_t s390_virtio_next_ring(VirtIOS390Bus *bus) { ram_addr_t r = bus->next_ring; bus->next_ring += VIRTIO_RING_LEN; return r; } void s390_virtio_device_sync(VirtIOS390Device *dev) { VirtIOS390Bus *bus = DO_UPCAST(VirtIOS390Bus, bus, dev->qdev.parent_bus); ram_addr_t cur_offs; uint8_t num_vq; int i; virtio_reset(dev->vdev); /* Sync dev space */ stb_phys(dev->dev_offs + VIRTIO_DEV_OFFS_TYPE, dev->vdev->device_id); stb_phys(dev->dev_offs + VIRTIO_DEV_OFFS_NUM_VQ, s390_virtio_device_num_vq(dev)); stb_phys(dev->dev_offs + VIRTIO_DEV_OFFS_FEATURE_LEN, dev->feat_len); stb_phys(dev->dev_offs + VIRTIO_DEV_OFFS_CONFIG_LEN, dev->vdev->config_len); num_vq = s390_virtio_device_num_vq(dev); stb_phys(dev->dev_offs + VIRTIO_DEV_OFFS_NUM_VQ, num_vq); /* Sync virtqueues */ for (i = 0; i < num_vq; i++) { ram_addr_t vq = (dev->dev_offs + VIRTIO_DEV_OFFS_CONFIG) + (i * VIRTIO_VQCONFIG_LEN); ram_addr_t vring; vring = s390_virtio_next_ring(bus); virtio_queue_set_addr(dev->vdev, i, vring); virtio_queue_set_vector(dev->vdev, i, i); stq_be_phys(vq + VIRTIO_VQCONFIG_OFFS_ADDRESS, vring); stw_be_phys(vq + VIRTIO_VQCONFIG_OFFS_NUM, virtio_queue_get_num(dev->vdev, i)); } cur_offs = dev->dev_offs; cur_offs += VIRTIO_DEV_OFFS_CONFIG; cur_offs += num_vq * VIRTIO_VQCONFIG_LEN; /* Sync feature bitmap */ stl_le_phys(cur_offs, dev->host_features); dev->feat_offs = cur_offs + dev->feat_len; cur_offs += dev->feat_len * 2; /* Sync config space */ if (dev->vdev->get_config) { dev->vdev->get_config(dev->vdev, dev->vdev->config); } cpu_physical_memory_write(cur_offs, dev->vdev->config, dev->vdev->config_len); cur_offs += dev->vdev->config_len; } void s390_virtio_device_update_status(VirtIOS390Device *dev) { VirtIODevice *vdev = dev->vdev; uint32_t features; virtio_set_status(vdev, ldub_phys(dev->dev_offs + VIRTIO_DEV_OFFS_STATUS)); /* Update guest supported feature bitmap */ features = bswap32(ldl_be_phys(dev->feat_offs)); virtio_set_features(vdev, features); } VirtIOS390Device *s390_virtio_bus_console(VirtIOS390Bus *bus) { return bus->console; } /* Find a device by vring address */ VirtIOS390Device *s390_virtio_bus_find_vring(VirtIOS390Bus *bus, ram_addr_t mem, int *vq_num) { VirtIOS390Device *_dev; DeviceState *dev; int i; QTAILQ_FOREACH(dev, &bus->bus.children, sibling) { _dev = (VirtIOS390Device *)dev; for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { if (!virtio_queue_get_addr(_dev->vdev, i)) break; if (virtio_queue_get_addr(_dev->vdev, i) == mem) { if (vq_num) { *vq_num = i; } return _dev; } } } return NULL; } /* Find a device by device descriptor location */ VirtIOS390Device *s390_virtio_bus_find_mem(VirtIOS390Bus *bus, ram_addr_t mem) { VirtIOS390Device *_dev; DeviceState *dev; QTAILQ_FOREACH(dev, &bus->bus.children, sibling) { _dev = (VirtIOS390Device *)dev; if (_dev->dev_offs == mem) { return _dev; } } return NULL; } static void virtio_s390_notify(void *opaque, uint16_t vector) { VirtIOS390Device *dev = (VirtIOS390Device*)opaque; uint64_t token = s390_virtio_device_vq_token(dev, vector); CPUState *env = s390_cpu_addr2state(0); if (kvm_enabled()) { kvm_s390_virtio_irq(env, 0, token); } else { cpu_inject_ext(env, VIRTIO_EXT_CODE, 0, token); } } static unsigned virtio_s390_get_features(void *opaque) { VirtIOS390Device *dev = (VirtIOS390Device*)opaque; return dev->host_features; } /**************** S390 Virtio Bus Device Descriptions *******************/ static const VirtIOBindings virtio_s390_bindings = { .notify = virtio_s390_notify, .get_features = virtio_s390_get_features, }; static VirtIOS390DeviceInfo s390_virtio_net = { .init = s390_virtio_net_init, .qdev.name = "virtio-net-s390", .qdev.alias = "virtio-net", .qdev.size = sizeof(VirtIOS390Device), .qdev.props = (Property[]) { DEFINE_NIC_PROPERTIES(VirtIOS390Device, nic), DEFINE_PROP_UINT32("x-txtimer", VirtIOS390Device, net.txtimer, TX_TIMER_INTERVAL), DEFINE_PROP_INT32("x-txburst", VirtIOS390Device, net.txburst, TX_BURST), DEFINE_PROP_STRING("tx", VirtIOS390Device, net.tx), DEFINE_PROP_END_OF_LIST(), }, }; static VirtIOS390DeviceInfo s390_virtio_blk = { .init = s390_virtio_blk_init, .qdev.name = "virtio-blk-s390", .qdev.alias = "virtio-blk", .qdev.size = sizeof(VirtIOS390Device), .qdev.props = (Property[]) { DEFINE_BLOCK_PROPERTIES(VirtIOS390Device, block), DEFINE_PROP_STRING("serial", VirtIOS390Device, block_serial), DEFINE_PROP_END_OF_LIST(), }, }; static VirtIOS390DeviceInfo s390_virtio_serial = { .init = s390_virtio_serial_init, .qdev.name = "virtio-serial-s390", .qdev.alias = "virtio-serial", .qdev.size = sizeof(VirtIOS390Device), .qdev.props = (Property[]) { DEFINE_PROP_UINT32("max_ports", VirtIOS390Device, serial.max_virtserial_ports, 31), DEFINE_PROP_END_OF_LIST(), }, }; static int s390_virtio_busdev_init(DeviceState *dev, DeviceInfo *info) { VirtIOS390DeviceInfo *_info = (VirtIOS390DeviceInfo *)info; VirtIOS390Device *_dev = (VirtIOS390Device *)dev; return _info->init(_dev); } static void s390_virtio_bus_register_withprop(VirtIOS390DeviceInfo *info) { info->qdev.init = s390_virtio_busdev_init; info->qdev.bus_info = &s390_virtio_bus_info; assert(info->qdev.size >= sizeof(VirtIOS390Device)); qdev_register(&info->qdev); } static void s390_virtio_register(void) { s390_virtio_bus_register_withprop(&s390_virtio_serial); s390_virtio_bus_register_withprop(&s390_virtio_blk); s390_virtio_bus_register_withprop(&s390_virtio_net); } device_init(s390_virtio_register); /***************** S390 Virtio Bus Bridge Device *******************/ /* Only required to have the virtio bus as child in the system bus */ static int s390_virtio_bridge_init(SysBusDevice *dev) { /* nothing */ return 0; } static SysBusDeviceInfo s390_virtio_bridge_info = { .init = s390_virtio_bridge_init, .qdev.name = "s390-virtio-bridge", .qdev.size = sizeof(SysBusDevice), .qdev.no_user = 1, }; static void s390_virtio_register_devices(void) { sysbus_register_withprop(&s390_virtio_bridge_info); } device_init(s390_virtio_register_devices)
gpl-2.0
clarkli86/linux-3.7-mini2440-mtp
drivers/block/aoe/aoedev.c
44
8804
/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */ /* * aoedev.c * AoE device utility functions; maintains device list. */ #include <linux/hdreg.h> #include <linux/blkdev.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/bitmap.h> #include <linux/kdev_t.h> #include <linux/moduleparam.h> #include "aoe.h" static void dummy_timer(ulong); static void aoedev_freedev(struct aoedev *); static void freetgt(struct aoedev *d, struct aoetgt *t); static void skbpoolfree(struct aoedev *d); static int aoe_dyndevs = 1; module_param(aoe_dyndevs, int, 0644); MODULE_PARM_DESC(aoe_dyndevs, "Use dynamic minor numbers for devices."); static struct aoedev *devlist; static DEFINE_SPINLOCK(devlist_lock); /* Because some systems will have one, many, or no * - partitions, * - slots per shelf, * - or shelves, * we need some flexibility in the way the minor numbers * are allocated. So they are dynamic. */ #define N_DEVS ((1U<<MINORBITS)/AOE_PARTITIONS) static DEFINE_SPINLOCK(used_minors_lock); static DECLARE_BITMAP(used_minors, N_DEVS); static int minor_get_dyn(ulong *sysminor) { ulong flags; ulong n; int error = 0; spin_lock_irqsave(&used_minors_lock, flags); n = find_first_zero_bit(used_minors, N_DEVS); if (n < N_DEVS) set_bit(n, used_minors); else error = -1; spin_unlock_irqrestore(&used_minors_lock, flags); *sysminor = n * AOE_PARTITIONS; return error; } static int minor_get_static(ulong *sysminor, ulong aoemaj, int aoemin) { ulong flags; ulong n; int error = 0; enum { /* for backwards compatibility when !aoe_dyndevs, * a static number of supported slots per shelf */ NPERSHELF = 16, }; n = aoemaj * NPERSHELF + aoemin; if (aoemin >= NPERSHELF || n >= N_DEVS) { pr_err("aoe: %s with e%ld.%d\n", "cannot use static minor device numbers", aoemaj, aoemin); error = -1; } else { spin_lock_irqsave(&used_minors_lock, flags); if (test_bit(n, used_minors)) { pr_err("aoe: %s %lu\n", "existing device already has static minor number", n); error = -1; } else set_bit(n, used_minors); spin_unlock_irqrestore(&used_minors_lock, flags); } *sysminor = n; return error; } static int minor_get(ulong *sysminor, ulong aoemaj, int aoemin) { if (aoe_dyndevs) return minor_get_dyn(sysminor); else return minor_get_static(sysminor, aoemaj, aoemin); } static void minor_free(ulong minor) { ulong flags; minor /= AOE_PARTITIONS; BUG_ON(minor >= N_DEVS); spin_lock_irqsave(&used_minors_lock, flags); BUG_ON(!test_bit(minor, used_minors)); clear_bit(minor, used_minors); spin_unlock_irqrestore(&used_minors_lock, flags); } /* * Users who grab a pointer to the device with aoedev_by_aoeaddr * automatically get a reference count and must be responsible * for performing a aoedev_put. With the addition of async * kthread processing I'm no longer confident that we can * guarantee consistency in the face of device flushes. * * For the time being, we only bother to add extra references for * frames sitting on the iocq. When the kthreads finish processing * these frames, they will aoedev_put the device. */ void aoedev_put(struct aoedev *d) { ulong flags; spin_lock_irqsave(&devlist_lock, flags); d->ref--; spin_unlock_irqrestore(&devlist_lock, flags); } static void dummy_timer(ulong vp) { struct aoedev *d; d = (struct aoedev *)vp; if (d->flags & DEVFL_TKILL) return; d->timer.expires = jiffies + HZ; add_timer(&d->timer); } static void aoe_failip(struct aoedev *d) { struct request *rq; struct bio *bio; unsigned long n; aoe_failbuf(d, d->ip.buf); rq = d->ip.rq; if (rq == NULL) return; while ((bio = d->ip.nxbio)) { clear_bit(BIO_UPTODATE, &bio->bi_flags); d->ip.nxbio = bio->bi_next; n = (unsigned long) rq->special; rq->special = (void *) --n; } if ((unsigned long) rq->special == 0) aoe_end_request(d, rq, 0); } void aoedev_downdev(struct aoedev *d) { struct aoetgt *t, **tt, **te; struct frame *f; struct list_head *head, *pos, *nx; struct request *rq; int i; d->flags &= ~DEVFL_UP; /* clean out active buffers */ for (i = 0; i < NFACTIVE; i++) { head = &d->factive[i]; list_for_each_safe(pos, nx, head) { f = list_entry(pos, struct frame, head); list_del(pos); if (f->buf) { f->buf->nframesout--; aoe_failbuf(d, f->buf); } aoe_freetframe(f); } } /* reset window dressings */ tt = d->targets; te = tt + NTARGETS; for (; tt < te && (t = *tt); tt++) { t->maxout = t->nframes; t->nout = 0; } /* clean out the in-process request (if any) */ aoe_failip(d); d->htgt = NULL; /* fast fail all pending I/O */ if (d->blkq) { while ((rq = blk_peek_request(d->blkq))) { blk_start_request(rq); aoe_end_request(d, rq, 1); } } if (d->gd) set_capacity(d->gd, 0); } static void aoedev_freedev(struct aoedev *d) { struct aoetgt **t, **e; cancel_work_sync(&d->work); if (d->gd) { aoedisk_rm_sysfs(d); del_gendisk(d->gd); put_disk(d->gd); blk_cleanup_queue(d->blkq); } t = d->targets; e = t + NTARGETS; for (; t < e && *t; t++) freetgt(d, *t); if (d->bufpool) mempool_destroy(d->bufpool); skbpoolfree(d); minor_free(d->sysminor); kfree(d); } int aoedev_flush(const char __user *str, size_t cnt) { ulong flags; struct aoedev *d, **dd; struct aoedev *rmd = NULL; char buf[16]; int all = 0; if (cnt >= 3) { if (cnt > sizeof buf) cnt = sizeof buf; if (copy_from_user(buf, str, cnt)) return -EFAULT; all = !strncmp(buf, "all", 3); } spin_lock_irqsave(&devlist_lock, flags); dd = &devlist; while ((d = *dd)) { spin_lock(&d->lock); if ((!all && (d->flags & DEVFL_UP)) || (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE)) || d->nopen || d->ref) { spin_unlock(&d->lock); dd = &d->next; continue; } *dd = d->next; aoedev_downdev(d); d->flags |= DEVFL_TKILL; spin_unlock(&d->lock); d->next = rmd; rmd = d; } spin_unlock_irqrestore(&devlist_lock, flags); while ((d = rmd)) { rmd = d->next; del_timer_sync(&d->timer); aoedev_freedev(d); /* must be able to sleep */ } return 0; } /* This has been confirmed to occur once with Tms=3*1000 due to the * driver changing link and not processing its transmit ring. The * problem is hard enough to solve by returning an error that I'm * still punting on "solving" this. */ static void skbfree(struct sk_buff *skb) { enum { Sms = 250, Tms = 30 * 1000}; int i = Tms / Sms; if (skb == NULL) return; while (atomic_read(&skb_shinfo(skb)->dataref) != 1 && i-- > 0) msleep(Sms); if (i < 0) { printk(KERN_ERR "aoe: %s holds ref: %s\n", skb->dev ? skb->dev->name : "netif", "cannot free skb -- memory leaked."); return; } skb->truesize -= skb->data_len; skb_shinfo(skb)->nr_frags = skb->data_len = 0; skb_trim(skb, 0); dev_kfree_skb(skb); } static void skbpoolfree(struct aoedev *d) { struct sk_buff *skb, *tmp; skb_queue_walk_safe(&d->skbpool, skb, tmp) skbfree(skb); __skb_queue_head_init(&d->skbpool); } /* find it or allocate it */ struct aoedev * aoedev_by_aoeaddr(ulong maj, int min, int do_alloc) { struct aoedev *d; int i; ulong flags; ulong sysminor; spin_lock_irqsave(&devlist_lock, flags); for (d=devlist; d; d=d->next) if (d->aoemajor == maj && d->aoeminor == min) { d->ref++; break; } if (d || !do_alloc || minor_get(&sysminor, maj, min) < 0) goto out; d = kcalloc(1, sizeof *d, GFP_ATOMIC); if (!d) goto out; INIT_WORK(&d->work, aoecmd_sleepwork); spin_lock_init(&d->lock); skb_queue_head_init(&d->skbpool); init_timer(&d->timer); d->timer.data = (ulong) d; d->timer.function = dummy_timer; d->timer.expires = jiffies + HZ; add_timer(&d->timer); d->bufpool = NULL; /* defer to aoeblk_gdalloc */ d->tgt = d->targets; d->ref = 1; for (i = 0; i < NFACTIVE; i++) INIT_LIST_HEAD(&d->factive[i]); d->sysminor = sysminor; d->aoemajor = maj; d->aoeminor = min; d->mintimer = MINTIMER; d->next = devlist; devlist = d; out: spin_unlock_irqrestore(&devlist_lock, flags); return d; } static void freetgt(struct aoedev *d, struct aoetgt *t) { struct frame *f; struct list_head *pos, *nx, *head; struct aoeif *ifp; for (ifp = t->ifs; ifp < &t->ifs[NAOEIFS]; ++ifp) { if (!ifp->nd) break; dev_put(ifp->nd); } head = &t->ffree; list_for_each_safe(pos, nx, head) { list_del(pos); f = list_entry(pos, struct frame, head); skbfree(f->skb); kfree(f); } kfree(t); } void aoedev_exit(void) { struct aoedev *d; ulong flags; aoe_flush_iocq(); while ((d = devlist)) { devlist = d->next; spin_lock_irqsave(&d->lock, flags); aoedev_downdev(d); d->flags |= DEVFL_TKILL; spin_unlock_irqrestore(&d->lock, flags); del_timer_sync(&d->timer); aoedev_freedev(d); } } int __init aoedev_init(void) { return 0; }
gpl-2.0
AriesVE-DevCon-TEAM/samsung-kernel-msm7x30
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
812
149588
/* * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <asm/unaligned.h> #include "hw.h" #include "ar9003_phy.h" #include "ar9003_eeprom.h" #define COMP_HDR_LEN 4 #define COMP_CKSUM_LEN 2 #define LE16(x) __constant_cpu_to_le16(x) #define LE32(x) __constant_cpu_to_le32(x) /* Local defines to distinguish between extension and control CTL's */ #define EXT_ADDITIVE (0x8000) #define CTL_11A_EXT (CTL_11A | EXT_ADDITIVE) #define CTL_11G_EXT (CTL_11G | EXT_ADDITIVE) #define CTL_11B_EXT (CTL_11B | EXT_ADDITIVE) #define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 /* 10*log10(2)*2 */ #define REDUCE_SCALED_POWER_BY_THREE_CHAIN 9 /* 10*log10(3)*2 */ #define PWRINCR_3_TO_1_CHAIN 9 /* 10*log(3)*2 */ #define PWRINCR_3_TO_2_CHAIN 3 /* floor(10*log(3/2)*2) */ #define PWRINCR_2_TO_1_CHAIN 6 /* 10*log(2)*2 */ #define SUB_NUM_CTL_MODES_AT_5G_40 2 /* excluding HT40, EXT-OFDM */ #define SUB_NUM_CTL_MODES_AT_2G_40 3 /* excluding HT40, EXT-OFDM, EXT-CCK */ #define CTL(_tpower, _flag) ((_tpower) | ((_flag) << 6)) #define EEPROM_DATA_LEN_9485 1088 static int ar9003_hw_power_interpolate(int32_t x, int32_t *px, int32_t *py, u_int16_t np); static const struct ar9300_eeprom ar9300_default = { .eepromVersion = 2, .templateVersion = 2, .macAddr = {0, 2, 3, 4, 5, 6}, .custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, .baseEepHeader = { .regDmn = { LE16(0), LE16(0x1f) }, .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */ .opCapFlags = { .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A, .eepMisc = 0, }, .rfSilent = 0, .blueToothOptions = 0, .deviceCap = 0, .deviceType = 5, /* takes lower byte in eeprom location */ .pwrTableOffset = AR9300_PWR_TABLE_OFFSET, .params_for_tuning_caps = {0, 0}, .featureEnable = 0x0c, /* * bit0 - enable tx temp comp - disabled * bit1 - enable tx volt comp - disabled * bit2 - enable fastClock - enabled * bit3 - enable doubling - enabled * bit4 - enable internal regulator - disabled * bit5 - enable pa predistortion - disabled */ .miscConfiguration = 0, /* bit0 - turn down drivestrength */ .eepromWriteEnableGpio = 3, .wlanDisableGpio = 0, .wlanLedGpio = 8, .rxBandSelectGpio = 0xff, .txrxgain = 0, .swreg = 0, }, .modalHeader2G = { /* ar9300_modal_eep_header 2g */ /* 4 idle,t1,t2,b(4 bits per setting) */ .antCtrlCommon = LE32(0x110), /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */ .antCtrlCommon2 = LE32(0x22222), /* * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r, * rx1, rx12, b (2 bits each) */ .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150) }, /* * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db * for ar9280 (0xa20c/b20c 5:0) */ .xatten1DB = {0, 0, 0}, /* * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin * for ar9280 (0xa20c/b20c 16:12 */ .xatten1Margin = {0, 0, 0}, .tempSlope = 36, .voltSlope = 0, /* * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur * channels in usual fbin coding format */ .spurChans = {0, 0, 0, 0, 0}, /* * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check * if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */ .antennaGain = 0, .switchSettling = 0x2c, .adcDesiredSize = -30, .txEndToXpaOff = 0, .txEndToRxOn = 0x2, .txFrameToXpaOn = 0xe, .thresh62 = 28, .papdRateMaskHt20 = LE32(0x0cf0e0e0), .papdRateMaskHt40 = LE32(0x6cf0e0e0), .futureModal = { 0, 0, 0, 0, 0, 0, 0, 0, }, }, .base_ext1 = { .ant_div_control = 0, .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1), }, /* ar9300_cal_data_per_freq_op_loop 2g */ .calPierData2G = { { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, }, .calTarget_freqbin_Cck = { FREQ2FBIN(2412, 1), FREQ2FBIN(2484, 1), }, .calTarget_freqbin_2G = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTarget_freqbin_2GHT20 = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTarget_freqbin_2GHT40 = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTargetPowerCck = { /* 1L-5L,5S,11L,11S */ { {36, 36, 36, 36} }, { {36, 36, 36, 36} }, }, .calTargetPower2G = { /* 6-24,36,48,54 */ { {32, 32, 28, 24} }, { {32, 32, 28, 24} }, { {32, 32, 28, 24} }, }, .calTargetPower2GHT20 = { { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} }, { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} }, { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} }, }, .calTargetPower2GHT40 = { { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} }, { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} }, { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} }, }, .ctlIndex_2G = { 0x11, 0x12, 0x15, 0x17, 0x41, 0x42, 0x45, 0x47, 0x31, 0x32, 0x35, 0x37, }, .ctl_freqbin_2G = { { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2457, 1), FREQ2FBIN(2462, 1) }, { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2462, 1), 0xFF, }, { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2462, 1), 0xFF, }, { FREQ2FBIN(2422, 1), FREQ2FBIN(2427, 1), FREQ2FBIN(2447, 1), FREQ2FBIN(2452, 1) }, { /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1), }, { /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), 0, }, { /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), FREQ2FBIN(2472, 1), 0, }, { /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1), /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1), /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1), /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1), }, { /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), }, { /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), 0 }, { /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), 0 }, { /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1), /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1), /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1), /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1), } }, .ctlPowerData_2G = { { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } }, }, .modalHeader5G = { /* 4 idle,t1,t2,b (4 bits per setting) */ .antCtrlCommon = LE32(0x110), /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */ .antCtrlCommon2 = LE32(0x22222), /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */ .antCtrlChain = { LE16(0x000), LE16(0x000), LE16(0x000), }, /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */ .xatten1DB = {0, 0, 0}, /* * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin * for merlin (0xa20c/b20c 16:12 */ .xatten1Margin = {0, 0, 0}, .tempSlope = 68, .voltSlope = 0, /* spurChans spur channels in usual fbin coding format */ .spurChans = {0, 0, 0, 0, 0}, /* noiseFloorThreshCh Check if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */ .antennaGain = 0, .switchSettling = 0x2d, .adcDesiredSize = -30, .txEndToXpaOff = 0, .txEndToRxOn = 0x2, .txFrameToXpaOn = 0xe, .thresh62 = 28, .papdRateMaskHt20 = LE32(0x0c80c080), .papdRateMaskHt40 = LE32(0x0080c080), .futureModal = { 0, 0, 0, 0, 0, 0, 0, 0, }, }, .base_ext2 = { .tempSlopeLow = 0, .tempSlopeHigh = 0, .xatten1DBLow = {0, 0, 0}, .xatten1MarginLow = {0, 0, 0}, .xatten1DBHigh = {0, 0, 0}, .xatten1MarginHigh = {0, 0, 0} }, .calFreqPier5G = { FREQ2FBIN(5180, 0), FREQ2FBIN(5220, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5725, 0), FREQ2FBIN(5825, 0) }, .calPierData5G = { { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, }, .calTarget_freqbin_5G = { FREQ2FBIN(5180, 0), FREQ2FBIN(5220, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5725, 0), FREQ2FBIN(5825, 0) }, .calTarget_freqbin_5GHT20 = { FREQ2FBIN(5180, 0), FREQ2FBIN(5240, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5700, 0), FREQ2FBIN(5745, 0), FREQ2FBIN(5725, 0), FREQ2FBIN(5825, 0) }, .calTarget_freqbin_5GHT40 = { FREQ2FBIN(5180, 0), FREQ2FBIN(5240, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5700, 0), FREQ2FBIN(5745, 0), FREQ2FBIN(5725, 0), FREQ2FBIN(5825, 0) }, .calTargetPower5G = { /* 6-24,36,48,54 */ { {20, 20, 20, 10} }, { {20, 20, 20, 10} }, { {20, 20, 20, 10} }, { {20, 20, 20, 10} }, { {20, 20, 20, 10} }, { {20, 20, 20, 10} }, { {20, 20, 20, 10} }, { {20, 20, 20, 10} }, }, .calTargetPower5GHT20 = { /* * 0_8_16,1-3_9-11_17-19, * 4,5,6,7,12,13,14,15,20,21,22,23 */ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, }, .calTargetPower5GHT40 = { /* * 0_8_16,1-3_9-11_17-19, * 4,5,6,7,12,13,14,15,20,21,22,23 */ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, }, .ctlIndex_5G = { 0x10, 0x16, 0x18, 0x40, 0x46, 0x48, 0x30, 0x36, 0x38 }, .ctl_freqbin_5G = { { /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0), /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0), /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0), /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0), /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0) }, { /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0), /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0), /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0), /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0), /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0) }, { /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0), /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0), /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0), /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0), /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0), /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0), /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0), /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0) }, { /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0), /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0), /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0), /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0), /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[3].ctlEdges[6].bChannel */ 0xFF, /* Data[3].ctlEdges[7].bChannel */ 0xFF, }, { /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0), /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0), /* Data[4].ctlEdges[4].bChannel */ 0xFF, /* Data[4].ctlEdges[5].bChannel */ 0xFF, /* Data[4].ctlEdges[6].bChannel */ 0xFF, /* Data[4].ctlEdges[7].bChannel */ 0xFF, }, { /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0), /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0), /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0), /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0), /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0), /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0), /* Data[5].ctlEdges[6].bChannel */ 0xFF, /* Data[5].ctlEdges[7].bChannel */ 0xFF }, { /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0), /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0), /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0), /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0), /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0), /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0), /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0) }, { /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0), /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0), /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0), /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0), /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0) }, { /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0), /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0), /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0), /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0), /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0), /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0), /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0), /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0) } }, .ctlPowerData_5G = { { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), } }, } }; static const struct ar9300_eeprom ar9300_x113 = { .eepromVersion = 2, .templateVersion = 6, .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0}, .custData = {"x113-023-f0000"}, .baseEepHeader = { .regDmn = { LE16(0), LE16(0x1f) }, .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */ .opCapFlags = { .opFlags = AR5416_OPFLAGS_11A, .eepMisc = 0, }, .rfSilent = 0, .blueToothOptions = 0, .deviceCap = 0, .deviceType = 5, /* takes lower byte in eeprom location */ .pwrTableOffset = AR9300_PWR_TABLE_OFFSET, .params_for_tuning_caps = {0, 0}, .featureEnable = 0x0d, /* * bit0 - enable tx temp comp - disabled * bit1 - enable tx volt comp - disabled * bit2 - enable fastClock - enabled * bit3 - enable doubling - enabled * bit4 - enable internal regulator - disabled * bit5 - enable pa predistortion - disabled */ .miscConfiguration = 0, /* bit0 - turn down drivestrength */ .eepromWriteEnableGpio = 6, .wlanDisableGpio = 0, .wlanLedGpio = 8, .rxBandSelectGpio = 0xff, .txrxgain = 0x21, .swreg = 0, }, .modalHeader2G = { /* ar9300_modal_eep_header 2g */ /* 4 idle,t1,t2,b(4 bits per setting) */ .antCtrlCommon = LE32(0x110), /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */ .antCtrlCommon2 = LE32(0x44444), /* * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r, * rx1, rx12, b (2 bits each) */ .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150) }, /* * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db * for ar9280 (0xa20c/b20c 5:0) */ .xatten1DB = {0, 0, 0}, /* * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin * for ar9280 (0xa20c/b20c 16:12 */ .xatten1Margin = {0, 0, 0}, .tempSlope = 25, .voltSlope = 0, /* * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur * channels in usual fbin coding format */ .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0}, /* * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check * if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */ .antennaGain = 0, .switchSettling = 0x2c, .adcDesiredSize = -30, .txEndToXpaOff = 0, .txEndToRxOn = 0x2, .txFrameToXpaOn = 0xe, .thresh62 = 28, .papdRateMaskHt20 = LE32(0x0c80c080), .papdRateMaskHt40 = LE32(0x0080c080), .futureModal = { 0, 0, 0, 0, 0, 0, 0, 0, }, }, .base_ext1 = { .ant_div_control = 0, .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1), }, /* ar9300_cal_data_per_freq_op_loop 2g */ .calPierData2G = { { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, }, .calTarget_freqbin_Cck = { FREQ2FBIN(2412, 1), FREQ2FBIN(2472, 1), }, .calTarget_freqbin_2G = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTarget_freqbin_2GHT20 = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTarget_freqbin_2GHT40 = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTargetPowerCck = { /* 1L-5L,5S,11L,11S */ { {34, 34, 34, 34} }, { {34, 34, 34, 34} }, }, .calTargetPower2G = { /* 6-24,36,48,54 */ { {34, 34, 32, 32} }, { {34, 34, 32, 32} }, { {34, 34, 32, 32} }, }, .calTargetPower2GHT20 = { { {32, 32, 32, 32, 32, 28, 32, 32, 30, 28, 0, 0, 0, 0} }, { {32, 32, 32, 32, 32, 28, 32, 32, 30, 28, 0, 0, 0, 0} }, { {32, 32, 32, 32, 32, 28, 32, 32, 30, 28, 0, 0, 0, 0} }, }, .calTargetPower2GHT40 = { { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} }, { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} }, { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} }, }, .ctlIndex_2G = { 0x11, 0x12, 0x15, 0x17, 0x41, 0x42, 0x45, 0x47, 0x31, 0x32, 0x35, 0x37, }, .ctl_freqbin_2G = { { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2457, 1), FREQ2FBIN(2462, 1) }, { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2462, 1), 0xFF, }, { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2462, 1), 0xFF, }, { FREQ2FBIN(2422, 1), FREQ2FBIN(2427, 1), FREQ2FBIN(2447, 1), FREQ2FBIN(2452, 1) }, { /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1), }, { /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), 0, }, { /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), FREQ2FBIN(2472, 1), 0, }, { /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1), /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1), /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1), /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1), }, { /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), }, { /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), 0 }, { /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), 0 }, { /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1), /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1), /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1), /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1), } }, .ctlPowerData_2G = { { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } }, }, .modalHeader5G = { /* 4 idle,t1,t2,b (4 bits per setting) */ .antCtrlCommon = LE32(0x220), /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */ .antCtrlCommon2 = LE32(0x11111), /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */ .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150), }, /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */ .xatten1DB = {0, 0, 0}, /* * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin * for merlin (0xa20c/b20c 16:12 */ .xatten1Margin = {0, 0, 0}, .tempSlope = 68, .voltSlope = 0, /* spurChans spur channels in usual fbin coding format */ .spurChans = {FREQ2FBIN(5500, 0), 0, 0, 0, 0}, /* noiseFloorThreshCh Check if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, .quick_drop = 0, .xpaBiasLvl = 0xf, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */ .antennaGain = 0, .switchSettling = 0x2d, .adcDesiredSize = -30, .txEndToXpaOff = 0, .txEndToRxOn = 0x2, .txFrameToXpaOn = 0xe, .thresh62 = 28, .papdRateMaskHt20 = LE32(0x0cf0e0e0), .papdRateMaskHt40 = LE32(0x6cf0e0e0), .futureModal = { 0, 0, 0, 0, 0, 0, 0, 0, }, }, .base_ext2 = { .tempSlopeLow = 72, .tempSlopeHigh = 105, .xatten1DBLow = {0, 0, 0}, .xatten1MarginLow = {0, 0, 0}, .xatten1DBHigh = {0, 0, 0}, .xatten1MarginHigh = {0, 0, 0} }, .calFreqPier5G = { FREQ2FBIN(5180, 0), FREQ2FBIN(5240, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5745, 0), FREQ2FBIN(5785, 0) }, .calPierData5G = { { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, }, .calTarget_freqbin_5G = { FREQ2FBIN(5180, 0), FREQ2FBIN(5220, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5745, 0), FREQ2FBIN(5785, 0) }, .calTarget_freqbin_5GHT20 = { FREQ2FBIN(5180, 0), FREQ2FBIN(5240, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5700, 0), FREQ2FBIN(5745, 0), FREQ2FBIN(5825, 0) }, .calTarget_freqbin_5GHT40 = { FREQ2FBIN(5190, 0), FREQ2FBIN(5230, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5410, 0), FREQ2FBIN(5510, 0), FREQ2FBIN(5670, 0), FREQ2FBIN(5755, 0), FREQ2FBIN(5825, 0) }, .calTargetPower5G = { /* 6-24,36,48,54 */ { {42, 40, 40, 34} }, { {42, 40, 40, 34} }, { {42, 40, 40, 34} }, { {42, 40, 40, 34} }, { {42, 40, 40, 34} }, { {42, 40, 40, 34} }, { {42, 40, 40, 34} }, { {42, 40, 40, 34} }, }, .calTargetPower5GHT20 = { /* * 0_8_16,1-3_9-11_17-19, * 4,5,6,7,12,13,14,15,20,21,22,23 */ { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} }, { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} }, { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} }, { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} }, { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} }, { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} }, { {38, 38, 38, 38, 32, 28, 38, 38, 32, 28, 38, 38, 32, 26} }, { {36, 36, 36, 36, 32, 28, 36, 36, 32, 28, 36, 36, 32, 26} }, }, .calTargetPower5GHT40 = { /* * 0_8_16,1-3_9-11_17-19, * 4,5,6,7,12,13,14,15,20,21,22,23 */ { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} }, { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} }, { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} }, { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} }, { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} }, { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} }, { {36, 36, 36, 36, 30, 26, 36, 36, 30, 26, 36, 36, 30, 24} }, { {34, 34, 34, 34, 30, 26, 34, 34, 30, 26, 34, 34, 30, 24} }, }, .ctlIndex_5G = { 0x10, 0x16, 0x18, 0x40, 0x46, 0x48, 0x30, 0x36, 0x38 }, .ctl_freqbin_5G = { { /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0), /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0), /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0), /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0), /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0) }, { /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0), /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0), /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0), /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0), /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0) }, { /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0), /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0), /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0), /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0), /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0), /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0), /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0), /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0) }, { /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0), /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0), /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0), /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0), /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[3].ctlEdges[6].bChannel */ 0xFF, /* Data[3].ctlEdges[7].bChannel */ 0xFF, }, { /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0), /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0), /* Data[4].ctlEdges[4].bChannel */ 0xFF, /* Data[4].ctlEdges[5].bChannel */ 0xFF, /* Data[4].ctlEdges[6].bChannel */ 0xFF, /* Data[4].ctlEdges[7].bChannel */ 0xFF, }, { /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0), /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0), /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0), /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0), /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0), /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0), /* Data[5].ctlEdges[6].bChannel */ 0xFF, /* Data[5].ctlEdges[7].bChannel */ 0xFF }, { /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0), /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0), /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0), /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0), /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0), /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0), /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0) }, { /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0), /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0), /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0), /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0), /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0) }, { /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0), /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0), /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0), /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0), /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0), /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0), /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0), /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0) } }, .ctlPowerData_5G = { { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), } }, } }; static const struct ar9300_eeprom ar9300_h112 = { .eepromVersion = 2, .templateVersion = 3, .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0}, .custData = {"h112-241-f0000"}, .baseEepHeader = { .regDmn = { LE16(0), LE16(0x1f) }, .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */ .opCapFlags = { .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A, .eepMisc = 0, }, .rfSilent = 0, .blueToothOptions = 0, .deviceCap = 0, .deviceType = 5, /* takes lower byte in eeprom location */ .pwrTableOffset = AR9300_PWR_TABLE_OFFSET, .params_for_tuning_caps = {0, 0}, .featureEnable = 0x0d, /* * bit0 - enable tx temp comp - disabled * bit1 - enable tx volt comp - disabled * bit2 - enable fastClock - enabled * bit3 - enable doubling - enabled * bit4 - enable internal regulator - disabled * bit5 - enable pa predistortion - disabled */ .miscConfiguration = 0, /* bit0 - turn down drivestrength */ .eepromWriteEnableGpio = 6, .wlanDisableGpio = 0, .wlanLedGpio = 8, .rxBandSelectGpio = 0xff, .txrxgain = 0x10, .swreg = 0, }, .modalHeader2G = { /* ar9300_modal_eep_header 2g */ /* 4 idle,t1,t2,b(4 bits per setting) */ .antCtrlCommon = LE32(0x110), /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */ .antCtrlCommon2 = LE32(0x44444), /* * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r, * rx1, rx12, b (2 bits each) */ .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150) }, /* * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db * for ar9280 (0xa20c/b20c 5:0) */ .xatten1DB = {0, 0, 0}, /* * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin * for ar9280 (0xa20c/b20c 16:12 */ .xatten1Margin = {0, 0, 0}, .tempSlope = 25, .voltSlope = 0, /* * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur * channels in usual fbin coding format */ .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0}, /* * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check * if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */ .antennaGain = 0, .switchSettling = 0x2c, .adcDesiredSize = -30, .txEndToXpaOff = 0, .txEndToRxOn = 0x2, .txFrameToXpaOn = 0xe, .thresh62 = 28, .papdRateMaskHt20 = LE32(0x0c80c080), .papdRateMaskHt40 = LE32(0x0080c080), .futureModal = { 0, 0, 0, 0, 0, 0, 0, 0, }, }, .base_ext1 = { .ant_div_control = 0, .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2462, 1), }, /* ar9300_cal_data_per_freq_op_loop 2g */ .calPierData2G = { { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, }, .calTarget_freqbin_Cck = { FREQ2FBIN(2412, 1), FREQ2FBIN(2472, 1), }, .calTarget_freqbin_2G = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTarget_freqbin_2GHT20 = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTarget_freqbin_2GHT40 = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTargetPowerCck = { /* 1L-5L,5S,11L,11S */ { {34, 34, 34, 34} }, { {34, 34, 34, 34} }, }, .calTargetPower2G = { /* 6-24,36,48,54 */ { {34, 34, 32, 32} }, { {34, 34, 32, 32} }, { {34, 34, 32, 32} }, }, .calTargetPower2GHT20 = { { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 28, 28, 28, 24} }, { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 28, 28, 28, 24} }, { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 28, 28, 28, 24} }, }, .calTargetPower2GHT40 = { { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 26, 26, 26, 22} }, { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 26, 26, 26, 22} }, { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 26, 26, 26, 22} }, }, .ctlIndex_2G = { 0x11, 0x12, 0x15, 0x17, 0x41, 0x42, 0x45, 0x47, 0x31, 0x32, 0x35, 0x37, }, .ctl_freqbin_2G = { { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2457, 1), FREQ2FBIN(2462, 1) }, { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2462, 1), 0xFF, }, { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2462, 1), 0xFF, }, { FREQ2FBIN(2422, 1), FREQ2FBIN(2427, 1), FREQ2FBIN(2447, 1), FREQ2FBIN(2452, 1) }, { /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1), }, { /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), 0, }, { /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), FREQ2FBIN(2472, 1), 0, }, { /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1), /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1), /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1), /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1), }, { /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), }, { /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), 0 }, { /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), 0 }, { /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1), /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1), /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1), /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1), } }, .ctlPowerData_2G = { { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } }, }, .modalHeader5G = { /* 4 idle,t1,t2,b (4 bits per setting) */ .antCtrlCommon = LE32(0x220), /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */ .antCtrlCommon2 = LE32(0x44444), /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */ .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150), }, /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */ .xatten1DB = {0, 0, 0}, /* * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin * for merlin (0xa20c/b20c 16:12 */ .xatten1Margin = {0, 0, 0}, .tempSlope = 45, .voltSlope = 0, /* spurChans spur channels in usual fbin coding format */ .spurChans = {0, 0, 0, 0, 0}, /* noiseFloorThreshCh Check if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */ .antennaGain = 0, .switchSettling = 0x2d, .adcDesiredSize = -30, .txEndToXpaOff = 0, .txEndToRxOn = 0x2, .txFrameToXpaOn = 0xe, .thresh62 = 28, .papdRateMaskHt20 = LE32(0x0cf0e0e0), .papdRateMaskHt40 = LE32(0x6cf0e0e0), .futureModal = { 0, 0, 0, 0, 0, 0, 0, 0, }, }, .base_ext2 = { .tempSlopeLow = 40, .tempSlopeHigh = 50, .xatten1DBLow = {0, 0, 0}, .xatten1MarginLow = {0, 0, 0}, .xatten1DBHigh = {0, 0, 0}, .xatten1MarginHigh = {0, 0, 0} }, .calFreqPier5G = { FREQ2FBIN(5180, 0), FREQ2FBIN(5220, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5700, 0), FREQ2FBIN(5785, 0) }, .calPierData5G = { { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, }, .calTarget_freqbin_5G = { FREQ2FBIN(5180, 0), FREQ2FBIN(5240, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5700, 0), FREQ2FBIN(5825, 0) }, .calTarget_freqbin_5GHT20 = { FREQ2FBIN(5180, 0), FREQ2FBIN(5240, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5700, 0), FREQ2FBIN(5745, 0), FREQ2FBIN(5825, 0) }, .calTarget_freqbin_5GHT40 = { FREQ2FBIN(5180, 0), FREQ2FBIN(5240, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5700, 0), FREQ2FBIN(5745, 0), FREQ2FBIN(5825, 0) }, .calTargetPower5G = { /* 6-24,36,48,54 */ { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, }, .calTargetPower5GHT20 = { /* * 0_8_16,1-3_9-11_17-19, * 4,5,6,7,12,13,14,15,20,21,22,23 */ { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 20, 20, 20, 16} }, { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 20, 20, 20, 16} }, { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 18, 18, 18, 16} }, { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 18, 18, 18, 16} }, { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 16, 16, 16, 14} }, { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 16, 16, 16, 14} }, { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 14, 14, 14, 12} }, { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 14, 14, 14, 12} }, }, .calTargetPower5GHT40 = { /* * 0_8_16,1-3_9-11_17-19, * 4,5,6,7,12,13,14,15,20,21,22,23 */ { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 18, 18, 18, 14} }, { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 18, 18, 18, 14} }, { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 16, 16, 16, 12} }, { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 16, 16, 16, 12} }, { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 14, 14, 14, 10} }, { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 14, 14, 14, 10} }, { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 12, 12, 12, 8} }, { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 12, 12, 12, 8} }, }, .ctlIndex_5G = { 0x10, 0x16, 0x18, 0x40, 0x46, 0x48, 0x30, 0x36, 0x38 }, .ctl_freqbin_5G = { { /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0), /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0), /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0), /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0), /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0) }, { /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0), /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0), /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0), /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0), /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0) }, { /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0), /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0), /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0), /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0), /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0), /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0), /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0), /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0) }, { /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0), /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0), /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0), /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0), /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[3].ctlEdges[6].bChannel */ 0xFF, /* Data[3].ctlEdges[7].bChannel */ 0xFF, }, { /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0), /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0), /* Data[4].ctlEdges[4].bChannel */ 0xFF, /* Data[4].ctlEdges[5].bChannel */ 0xFF, /* Data[4].ctlEdges[6].bChannel */ 0xFF, /* Data[4].ctlEdges[7].bChannel */ 0xFF, }, { /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0), /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0), /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0), /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0), /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0), /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0), /* Data[5].ctlEdges[6].bChannel */ 0xFF, /* Data[5].ctlEdges[7].bChannel */ 0xFF }, { /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0), /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0), /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0), /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0), /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0), /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0), /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0) }, { /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0), /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0), /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0), /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0), /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0) }, { /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0), /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0), /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0), /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0), /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0), /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0), /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0), /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0) } }, .ctlPowerData_5G = { { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), } }, } }; static const struct ar9300_eeprom ar9300_x112 = { .eepromVersion = 2, .templateVersion = 5, .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0}, .custData = {"x112-041-f0000"}, .baseEepHeader = { .regDmn = { LE16(0), LE16(0x1f) }, .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */ .opCapFlags = { .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A, .eepMisc = 0, }, .rfSilent = 0, .blueToothOptions = 0, .deviceCap = 0, .deviceType = 5, /* takes lower byte in eeprom location */ .pwrTableOffset = AR9300_PWR_TABLE_OFFSET, .params_for_tuning_caps = {0, 0}, .featureEnable = 0x0d, /* * bit0 - enable tx temp comp - disabled * bit1 - enable tx volt comp - disabled * bit2 - enable fastclock - enabled * bit3 - enable doubling - enabled * bit4 - enable internal regulator - disabled * bit5 - enable pa predistortion - disabled */ .miscConfiguration = 0, /* bit0 - turn down drivestrength */ .eepromWriteEnableGpio = 6, .wlanDisableGpio = 0, .wlanLedGpio = 8, .rxBandSelectGpio = 0xff, .txrxgain = 0x0, .swreg = 0, }, .modalHeader2G = { /* ar9300_modal_eep_header 2g */ /* 4 idle,t1,t2,b(4 bits per setting) */ .antCtrlCommon = LE32(0x110), /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */ .antCtrlCommon2 = LE32(0x22222), /* * antCtrlChain[ar9300_max_chains]; 6 idle, t, r, * rx1, rx12, b (2 bits each) */ .antCtrlChain = { LE16(0x10), LE16(0x10), LE16(0x10) }, /* * xatten1DB[AR9300_max_chains]; 3 xatten1_db * for ar9280 (0xa20c/b20c 5:0) */ .xatten1DB = {0x1b, 0x1b, 0x1b}, /* * xatten1Margin[ar9300_max_chains]; 3 xatten1_margin * for ar9280 (0xa20c/b20c 16:12 */ .xatten1Margin = {0x15, 0x15, 0x15}, .tempSlope = 50, .voltSlope = 0, /* * spurChans[OSPrey_eeprom_modal_sPURS]; spur * channels in usual fbin coding format */ .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0}, /* * noiseFloorThreshch[ar9300_max_cHAINS]; 3 Check * if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */ .antennaGain = 0, .switchSettling = 0x2c, .adcDesiredSize = -30, .txEndToXpaOff = 0, .txEndToRxOn = 0x2, .txFrameToXpaOn = 0xe, .thresh62 = 28, .papdRateMaskHt20 = LE32(0x0c80c080), .papdRateMaskHt40 = LE32(0x0080c080), .futureModal = { 0, 0, 0, 0, 0, 0, 0, 0, }, }, .base_ext1 = { .ant_div_control = 0, .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1), }, /* ar9300_cal_data_per_freq_op_loop 2g */ .calPierData2G = { { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, }, .calTarget_freqbin_Cck = { FREQ2FBIN(2412, 1), FREQ2FBIN(2472, 1), }, .calTarget_freqbin_2G = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTarget_freqbin_2GHT20 = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTarget_freqbin_2GHT40 = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTargetPowerCck = { /* 1L-5L,5S,11L,11s */ { {38, 38, 38, 38} }, { {38, 38, 38, 38} }, }, .calTargetPower2G = { /* 6-24,36,48,54 */ { {38, 38, 36, 34} }, { {38, 38, 36, 34} }, { {38, 38, 34, 32} }, }, .calTargetPower2GHT20 = { { {36, 36, 36, 36, 36, 34, 34, 32, 30, 28, 28, 28, 28, 26} }, { {36, 36, 36, 36, 36, 34, 36, 34, 32, 30, 30, 30, 28, 26} }, { {36, 36, 36, 36, 36, 34, 34, 32, 30, 28, 28, 28, 28, 26} }, }, .calTargetPower2GHT40 = { { {36, 36, 36, 36, 34, 32, 32, 30, 28, 26, 26, 26, 26, 24} }, { {36, 36, 36, 36, 34, 32, 34, 32, 30, 28, 28, 28, 28, 24} }, { {36, 36, 36, 36, 34, 32, 32, 30, 28, 26, 26, 26, 26, 24} }, }, .ctlIndex_2G = { 0x11, 0x12, 0x15, 0x17, 0x41, 0x42, 0x45, 0x47, 0x31, 0x32, 0x35, 0x37, }, .ctl_freqbin_2G = { { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2457, 1), FREQ2FBIN(2462, 1) }, { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2462, 1), 0xFF, }, { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2462, 1), 0xFF, }, { FREQ2FBIN(2422, 1), FREQ2FBIN(2427, 1), FREQ2FBIN(2447, 1), FREQ2FBIN(2452, 1) }, { /* Data[4].ctledges[0].bchannel */ FREQ2FBIN(2412, 1), /* Data[4].ctledges[1].bchannel */ FREQ2FBIN(2417, 1), /* Data[4].ctledges[2].bchannel */ FREQ2FBIN(2472, 1), /* Data[4].ctledges[3].bchannel */ FREQ2FBIN(2484, 1), }, { /* Data[5].ctledges[0].bchannel */ FREQ2FBIN(2412, 1), /* Data[5].ctledges[1].bchannel */ FREQ2FBIN(2417, 1), /* Data[5].ctledges[2].bchannel */ FREQ2FBIN(2472, 1), 0, }, { /* Data[6].ctledges[0].bchannel */ FREQ2FBIN(2412, 1), /* Data[6].ctledges[1].bchannel */ FREQ2FBIN(2417, 1), FREQ2FBIN(2472, 1), 0, }, { /* Data[7].ctledges[0].bchannel */ FREQ2FBIN(2422, 1), /* Data[7].ctledges[1].bchannel */ FREQ2FBIN(2427, 1), /* Data[7].ctledges[2].bchannel */ FREQ2FBIN(2447, 1), /* Data[7].ctledges[3].bchannel */ FREQ2FBIN(2462, 1), }, { /* Data[8].ctledges[0].bchannel */ FREQ2FBIN(2412, 1), /* Data[8].ctledges[1].bchannel */ FREQ2FBIN(2417, 1), /* Data[8].ctledges[2].bchannel */ FREQ2FBIN(2472, 1), }, { /* Data[9].ctledges[0].bchannel */ FREQ2FBIN(2412, 1), /* Data[9].ctledges[1].bchannel */ FREQ2FBIN(2417, 1), /* Data[9].ctledges[2].bchannel */ FREQ2FBIN(2472, 1), 0 }, { /* Data[10].ctledges[0].bchannel */ FREQ2FBIN(2412, 1), /* Data[10].ctledges[1].bchannel */ FREQ2FBIN(2417, 1), /* Data[10].ctledges[2].bchannel */ FREQ2FBIN(2472, 1), 0 }, { /* Data[11].ctledges[0].bchannel */ FREQ2FBIN(2422, 1), /* Data[11].ctledges[1].bchannel */ FREQ2FBIN(2427, 1), /* Data[11].ctledges[2].bchannel */ FREQ2FBIN(2447, 1), /* Data[11].ctledges[3].bchannel */ FREQ2FBIN(2462, 1), } }, .ctlPowerData_2G = { { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } }, }, .modalHeader5G = { /* 4 idle,t1,t2,b (4 bits per setting) */ .antCtrlCommon = LE32(0x110), /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */ .antCtrlCommon2 = LE32(0x22222), /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */ .antCtrlChain = { LE16(0x0), LE16(0x0), LE16(0x0), }, /* xatten1DB 3 xatten1_db for ar9280 (0xa20c/b20c 5:0) */ .xatten1DB = {0x13, 0x19, 0x17}, /* * xatten1Margin[ar9300_max_chains]; 3 xatten1_margin * for merlin (0xa20c/b20c 16:12 */ .xatten1Margin = {0x19, 0x19, 0x19}, .tempSlope = 70, .voltSlope = 15, /* spurChans spur channels in usual fbin coding format */ .spurChans = {0, 0, 0, 0, 0}, /* noiseFloorThreshch check if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */ .antennaGain = 0, .switchSettling = 0x2d, .adcDesiredSize = -30, .txEndToXpaOff = 0, .txEndToRxOn = 0x2, .txFrameToXpaOn = 0xe, .thresh62 = 28, .papdRateMaskHt20 = LE32(0x0cf0e0e0), .papdRateMaskHt40 = LE32(0x6cf0e0e0), .futureModal = { 0, 0, 0, 0, 0, 0, 0, 0, }, }, .base_ext2 = { .tempSlopeLow = 72, .tempSlopeHigh = 105, .xatten1DBLow = {0x10, 0x14, 0x10}, .xatten1MarginLow = {0x19, 0x19 , 0x19}, .xatten1DBHigh = {0x1d, 0x20, 0x24}, .xatten1MarginHigh = {0x10, 0x10, 0x10} }, .calFreqPier5G = { FREQ2FBIN(5180, 0), FREQ2FBIN(5220, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5700, 0), FREQ2FBIN(5785, 0) }, .calPierData5G = { { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, }, .calTarget_freqbin_5G = { FREQ2FBIN(5180, 0), FREQ2FBIN(5220, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5725, 0), FREQ2FBIN(5825, 0) }, .calTarget_freqbin_5GHT20 = { FREQ2FBIN(5180, 0), FREQ2FBIN(5220, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5725, 0), FREQ2FBIN(5825, 0) }, .calTarget_freqbin_5GHT40 = { FREQ2FBIN(5180, 0), FREQ2FBIN(5220, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5725, 0), FREQ2FBIN(5825, 0) }, .calTargetPower5G = { /* 6-24,36,48,54 */ { {32, 32, 28, 26} }, { {32, 32, 28, 26} }, { {32, 32, 28, 26} }, { {32, 32, 26, 24} }, { {32, 32, 26, 24} }, { {32, 32, 24, 22} }, { {30, 30, 24, 22} }, { {30, 30, 24, 22} }, }, .calTargetPower5GHT20 = { /* * 0_8_16,1-3_9-11_17-19, * 4,5,6,7,12,13,14,15,20,21,22,23 */ { {32, 32, 32, 32, 28, 26, 32, 28, 26, 24, 24, 24, 22, 22} }, { {32, 32, 32, 32, 28, 26, 32, 28, 26, 24, 24, 24, 22, 22} }, { {32, 32, 32, 32, 28, 26, 32, 28, 26, 24, 24, 24, 22, 22} }, { {32, 32, 32, 32, 28, 26, 32, 26, 24, 22, 22, 22, 20, 20} }, { {32, 32, 32, 32, 28, 26, 32, 26, 24, 22, 20, 18, 16, 16} }, { {32, 32, 32, 32, 28, 26, 32, 24, 20, 16, 18, 16, 14, 14} }, { {30, 30, 30, 30, 28, 26, 30, 24, 20, 16, 18, 16, 14, 14} }, { {30, 30, 30, 30, 28, 26, 30, 24, 20, 16, 18, 16, 14, 14} }, }, .calTargetPower5GHT40 = { /* * 0_8_16,1-3_9-11_17-19, * 4,5,6,7,12,13,14,15,20,21,22,23 */ { {32, 32, 32, 30, 28, 26, 30, 28, 26, 24, 24, 24, 22, 22} }, { {32, 32, 32, 30, 28, 26, 30, 28, 26, 24, 24, 24, 22, 22} }, { {32, 32, 32, 30, 28, 26, 30, 28, 26, 24, 24, 24, 22, 22} }, { {32, 32, 32, 30, 28, 26, 30, 26, 24, 22, 22, 22, 20, 20} }, { {32, 32, 32, 30, 28, 26, 30, 26, 24, 22, 20, 18, 16, 16} }, { {32, 32, 32, 30, 28, 26, 30, 22, 20, 16, 18, 16, 14, 14} }, { {30, 30, 30, 30, 28, 26, 30, 22, 20, 16, 18, 16, 14, 14} }, { {30, 30, 30, 30, 28, 26, 30, 22, 20, 16, 18, 16, 14, 14} }, }, .ctlIndex_5G = { 0x10, 0x16, 0x18, 0x40, 0x46, 0x48, 0x30, 0x36, 0x38 }, .ctl_freqbin_5G = { { /* Data[0].ctledges[0].bchannel */ FREQ2FBIN(5180, 0), /* Data[0].ctledges[1].bchannel */ FREQ2FBIN(5260, 0), /* Data[0].ctledges[2].bchannel */ FREQ2FBIN(5280, 0), /* Data[0].ctledges[3].bchannel */ FREQ2FBIN(5500, 0), /* Data[0].ctledges[4].bchannel */ FREQ2FBIN(5600, 0), /* Data[0].ctledges[5].bchannel */ FREQ2FBIN(5700, 0), /* Data[0].ctledges[6].bchannel */ FREQ2FBIN(5745, 0), /* Data[0].ctledges[7].bchannel */ FREQ2FBIN(5825, 0) }, { /* Data[1].ctledges[0].bchannel */ FREQ2FBIN(5180, 0), /* Data[1].ctledges[1].bchannel */ FREQ2FBIN(5260, 0), /* Data[1].ctledges[2].bchannel */ FREQ2FBIN(5280, 0), /* Data[1].ctledges[3].bchannel */ FREQ2FBIN(5500, 0), /* Data[1].ctledges[4].bchannel */ FREQ2FBIN(5520, 0), /* Data[1].ctledges[5].bchannel */ FREQ2FBIN(5700, 0), /* Data[1].ctledges[6].bchannel */ FREQ2FBIN(5745, 0), /* Data[1].ctledges[7].bchannel */ FREQ2FBIN(5825, 0) }, { /* Data[2].ctledges[0].bchannel */ FREQ2FBIN(5190, 0), /* Data[2].ctledges[1].bchannel */ FREQ2FBIN(5230, 0), /* Data[2].ctledges[2].bchannel */ FREQ2FBIN(5270, 0), /* Data[2].ctledges[3].bchannel */ FREQ2FBIN(5310, 0), /* Data[2].ctledges[4].bchannel */ FREQ2FBIN(5510, 0), /* Data[2].ctledges[5].bchannel */ FREQ2FBIN(5550, 0), /* Data[2].ctledges[6].bchannel */ FREQ2FBIN(5670, 0), /* Data[2].ctledges[7].bchannel */ FREQ2FBIN(5755, 0) }, { /* Data[3].ctledges[0].bchannel */ FREQ2FBIN(5180, 0), /* Data[3].ctledges[1].bchannel */ FREQ2FBIN(5200, 0), /* Data[3].ctledges[2].bchannel */ FREQ2FBIN(5260, 0), /* Data[3].ctledges[3].bchannel */ FREQ2FBIN(5320, 0), /* Data[3].ctledges[4].bchannel */ FREQ2FBIN(5500, 0), /* Data[3].ctledges[5].bchannel */ FREQ2FBIN(5700, 0), /* Data[3].ctledges[6].bchannel */ 0xFF, /* Data[3].ctledges[7].bchannel */ 0xFF, }, { /* Data[4].ctledges[0].bchannel */ FREQ2FBIN(5180, 0), /* Data[4].ctledges[1].bchannel */ FREQ2FBIN(5260, 0), /* Data[4].ctledges[2].bchannel */ FREQ2FBIN(5500, 0), /* Data[4].ctledges[3].bchannel */ FREQ2FBIN(5700, 0), /* Data[4].ctledges[4].bchannel */ 0xFF, /* Data[4].ctledges[5].bchannel */ 0xFF, /* Data[4].ctledges[6].bchannel */ 0xFF, /* Data[4].ctledges[7].bchannel */ 0xFF, }, { /* Data[5].ctledges[0].bchannel */ FREQ2FBIN(5190, 0), /* Data[5].ctledges[1].bchannel */ FREQ2FBIN(5270, 0), /* Data[5].ctledges[2].bchannel */ FREQ2FBIN(5310, 0), /* Data[5].ctledges[3].bchannel */ FREQ2FBIN(5510, 0), /* Data[5].ctledges[4].bchannel */ FREQ2FBIN(5590, 0), /* Data[5].ctledges[5].bchannel */ FREQ2FBIN(5670, 0), /* Data[5].ctledges[6].bchannel */ 0xFF, /* Data[5].ctledges[7].bchannel */ 0xFF }, { /* Data[6].ctledges[0].bchannel */ FREQ2FBIN(5180, 0), /* Data[6].ctledges[1].bchannel */ FREQ2FBIN(5200, 0), /* Data[6].ctledges[2].bchannel */ FREQ2FBIN(5220, 0), /* Data[6].ctledges[3].bchannel */ FREQ2FBIN(5260, 0), /* Data[6].ctledges[4].bchannel */ FREQ2FBIN(5500, 0), /* Data[6].ctledges[5].bchannel */ FREQ2FBIN(5600, 0), /* Data[6].ctledges[6].bchannel */ FREQ2FBIN(5700, 0), /* Data[6].ctledges[7].bchannel */ FREQ2FBIN(5745, 0) }, { /* Data[7].ctledges[0].bchannel */ FREQ2FBIN(5180, 0), /* Data[7].ctledges[1].bchannel */ FREQ2FBIN(5260, 0), /* Data[7].ctledges[2].bchannel */ FREQ2FBIN(5320, 0), /* Data[7].ctledges[3].bchannel */ FREQ2FBIN(5500, 0), /* Data[7].ctledges[4].bchannel */ FREQ2FBIN(5560, 0), /* Data[7].ctledges[5].bchannel */ FREQ2FBIN(5700, 0), /* Data[7].ctledges[6].bchannel */ FREQ2FBIN(5745, 0), /* Data[7].ctledges[7].bchannel */ FREQ2FBIN(5825, 0) }, { /* Data[8].ctledges[0].bchannel */ FREQ2FBIN(5190, 0), /* Data[8].ctledges[1].bchannel */ FREQ2FBIN(5230, 0), /* Data[8].ctledges[2].bchannel */ FREQ2FBIN(5270, 0), /* Data[8].ctledges[3].bchannel */ FREQ2FBIN(5510, 0), /* Data[8].ctledges[4].bchannel */ FREQ2FBIN(5550, 0), /* Data[8].ctledges[5].bchannel */ FREQ2FBIN(5670, 0), /* Data[8].ctledges[6].bchannel */ FREQ2FBIN(5755, 0), /* Data[8].ctledges[7].bchannel */ FREQ2FBIN(5795, 0) } }, .ctlPowerData_5G = { { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), } }, } }; static const struct ar9300_eeprom ar9300_h116 = { .eepromVersion = 2, .templateVersion = 4, .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0}, .custData = {"h116-041-f0000"}, .baseEepHeader = { .regDmn = { LE16(0), LE16(0x1f) }, .txrxMask = 0x33, /* 4 bits tx and 4 bits rx */ .opCapFlags = { .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A, .eepMisc = 0, }, .rfSilent = 0, .blueToothOptions = 0, .deviceCap = 0, .deviceType = 5, /* takes lower byte in eeprom location */ .pwrTableOffset = AR9300_PWR_TABLE_OFFSET, .params_for_tuning_caps = {0, 0}, .featureEnable = 0x0d, /* * bit0 - enable tx temp comp - disabled * bit1 - enable tx volt comp - disabled * bit2 - enable fastClock - enabled * bit3 - enable doubling - enabled * bit4 - enable internal regulator - disabled * bit5 - enable pa predistortion - disabled */ .miscConfiguration = 0, /* bit0 - turn down drivestrength */ .eepromWriteEnableGpio = 6, .wlanDisableGpio = 0, .wlanLedGpio = 8, .rxBandSelectGpio = 0xff, .txrxgain = 0x10, .swreg = 0, }, .modalHeader2G = { /* ar9300_modal_eep_header 2g */ /* 4 idle,t1,t2,b(4 bits per setting) */ .antCtrlCommon = LE32(0x110), /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */ .antCtrlCommon2 = LE32(0x44444), /* * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r, * rx1, rx12, b (2 bits each) */ .antCtrlChain = { LE16(0x10), LE16(0x10), LE16(0x10) }, /* * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db * for ar9280 (0xa20c/b20c 5:0) */ .xatten1DB = {0x1f, 0x1f, 0x1f}, /* * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin * for ar9280 (0xa20c/b20c 16:12 */ .xatten1Margin = {0x12, 0x12, 0x12}, .tempSlope = 25, .voltSlope = 0, /* * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur * channels in usual fbin coding format */ .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0}, /* * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check * if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */ .antennaGain = 0, .switchSettling = 0x2c, .adcDesiredSize = -30, .txEndToXpaOff = 0, .txEndToRxOn = 0x2, .txFrameToXpaOn = 0xe, .thresh62 = 28, .papdRateMaskHt20 = LE32(0x0c80C080), .papdRateMaskHt40 = LE32(0x0080C080), .futureModal = { 0, 0, 0, 0, 0, 0, 0, 0, }, }, .base_ext1 = { .ant_div_control = 0, .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2462, 1), }, /* ar9300_cal_data_per_freq_op_loop 2g */ .calPierData2G = { { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, }, .calTarget_freqbin_Cck = { FREQ2FBIN(2412, 1), FREQ2FBIN(2472, 1), }, .calTarget_freqbin_2G = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTarget_freqbin_2GHT20 = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTarget_freqbin_2GHT40 = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTargetPowerCck = { /* 1L-5L,5S,11L,11S */ { {34, 34, 34, 34} }, { {34, 34, 34, 34} }, }, .calTargetPower2G = { /* 6-24,36,48,54 */ { {34, 34, 32, 32} }, { {34, 34, 32, 32} }, { {34, 34, 32, 32} }, }, .calTargetPower2GHT20 = { { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 0, 0, 0, 0} }, { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 0, 0, 0, 0} }, { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 0, 0, 0, 0} }, }, .calTargetPower2GHT40 = { { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} }, { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} }, { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} }, }, .ctlIndex_2G = { 0x11, 0x12, 0x15, 0x17, 0x41, 0x42, 0x45, 0x47, 0x31, 0x32, 0x35, 0x37, }, .ctl_freqbin_2G = { { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2457, 1), FREQ2FBIN(2462, 1) }, { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2462, 1), 0xFF, }, { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2462, 1), 0xFF, }, { FREQ2FBIN(2422, 1), FREQ2FBIN(2427, 1), FREQ2FBIN(2447, 1), FREQ2FBIN(2452, 1) }, { /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1), }, { /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), 0, }, { /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), FREQ2FBIN(2472, 1), 0, }, { /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1), /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1), /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1), /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1), }, { /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), }, { /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), 0 }, { /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), 0 }, { /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1), /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1), /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1), /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1), } }, .ctlPowerData_2G = { { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } }, }, .modalHeader5G = { /* 4 idle,t1,t2,b (4 bits per setting) */ .antCtrlCommon = LE32(0x220), /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */ .antCtrlCommon2 = LE32(0x44444), /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */ .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150), }, /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */ .xatten1DB = {0x19, 0x19, 0x19}, /* * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin * for merlin (0xa20c/b20c 16:12 */ .xatten1Margin = {0x14, 0x14, 0x14}, .tempSlope = 70, .voltSlope = 0, /* spurChans spur channels in usual fbin coding format */ .spurChans = {0, 0, 0, 0, 0}, /* noiseFloorThreshCh Check if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */ .antennaGain = 0, .switchSettling = 0x2d, .adcDesiredSize = -30, .txEndToXpaOff = 0, .txEndToRxOn = 0x2, .txFrameToXpaOn = 0xe, .thresh62 = 28, .papdRateMaskHt20 = LE32(0x0cf0e0e0), .papdRateMaskHt40 = LE32(0x6cf0e0e0), .futureModal = { 0, 0, 0, 0, 0, 0, 0, 0, }, }, .base_ext2 = { .tempSlopeLow = 35, .tempSlopeHigh = 50, .xatten1DBLow = {0, 0, 0}, .xatten1MarginLow = {0, 0, 0}, .xatten1DBHigh = {0, 0, 0}, .xatten1MarginHigh = {0, 0, 0} }, .calFreqPier5G = { FREQ2FBIN(5160, 0), FREQ2FBIN(5220, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5700, 0), FREQ2FBIN(5785, 0) }, .calPierData5G = { { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, }, .calTarget_freqbin_5G = { FREQ2FBIN(5180, 0), FREQ2FBIN(5240, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5700, 0), FREQ2FBIN(5825, 0) }, .calTarget_freqbin_5GHT20 = { FREQ2FBIN(5180, 0), FREQ2FBIN(5240, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5700, 0), FREQ2FBIN(5745, 0), FREQ2FBIN(5825, 0) }, .calTarget_freqbin_5GHT40 = { FREQ2FBIN(5180, 0), FREQ2FBIN(5240, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5700, 0), FREQ2FBIN(5745, 0), FREQ2FBIN(5825, 0) }, .calTargetPower5G = { /* 6-24,36,48,54 */ { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, }, .calTargetPower5GHT20 = { /* * 0_8_16,1-3_9-11_17-19, * 4,5,6,7,12,13,14,15,20,21,22,23 */ { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 0, 0, 0, 0} }, { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 0, 0, 0, 0} }, { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 0, 0, 0, 0} }, { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 0, 0, 0, 0} }, { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 0, 0, 0, 0} }, { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 0, 0, 0, 0} }, { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 0, 0, 0, 0} }, { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 0, 0, 0, 0} }, }, .calTargetPower5GHT40 = { /* * 0_8_16,1-3_9-11_17-19, * 4,5,6,7,12,13,14,15,20,21,22,23 */ { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 0, 0, 0, 0} }, { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 0, 0, 0, 0} }, { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 0, 0, 0, 0} }, { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 0, 0, 0, 0} }, { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 0, 0, 0, 0} }, { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 0, 0, 0, 0} }, { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 0, 0, 0, 0} }, { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 0, 0, 0, 0} }, }, .ctlIndex_5G = { 0x10, 0x16, 0x18, 0x40, 0x46, 0x48, 0x30, 0x36, 0x38 }, .ctl_freqbin_5G = { { /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0), /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0), /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0), /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0), /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0) }, { /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0), /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0), /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0), /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0), /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0) }, { /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0), /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0), /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0), /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0), /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0), /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0), /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0), /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0) }, { /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0), /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0), /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0), /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0), /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[3].ctlEdges[6].bChannel */ 0xFF, /* Data[3].ctlEdges[7].bChannel */ 0xFF, }, { /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0), /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0), /* Data[4].ctlEdges[4].bChannel */ 0xFF, /* Data[4].ctlEdges[5].bChannel */ 0xFF, /* Data[4].ctlEdges[6].bChannel */ 0xFF, /* Data[4].ctlEdges[7].bChannel */ 0xFF, }, { /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0), /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0), /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0), /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0), /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0), /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0), /* Data[5].ctlEdges[6].bChannel */ 0xFF, /* Data[5].ctlEdges[7].bChannel */ 0xFF }, { /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0), /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0), /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0), /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0), /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0), /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0), /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0) }, { /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0), /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0), /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0), /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0), /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0) }, { /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0), /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0), /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0), /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0), /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0), /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0), /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0), /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0) } }, .ctlPowerData_5G = { { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), } }, } }; static const struct ar9300_eeprom *ar9300_eep_templates[] = { &ar9300_default, &ar9300_x112, &ar9300_h116, &ar9300_h112, &ar9300_x113, }; static const struct ar9300_eeprom *ar9003_eeprom_struct_find_by_id(int id) { #define N_LOOP (sizeof(ar9300_eep_templates) / sizeof(ar9300_eep_templates[0])) int it; for (it = 0; it < N_LOOP; it++) if (ar9300_eep_templates[it]->templateVersion == id) return ar9300_eep_templates[it]; return NULL; #undef N_LOOP } static u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz) { if (fbin == AR5416_BCHAN_UNUSED) return fbin; return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin)); } static int ath9k_hw_ar9300_check_eeprom(struct ath_hw *ah) { return 0; } static int interpolate(int x, int xa, int xb, int ya, int yb) { int bf, factor, plus; bf = 2 * (yb - ya) * (x - xa) / (xb - xa); factor = bf / 2; plus = bf % 2; return ya + factor + plus; } static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah, enum eeprom_param param) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader; switch (param) { case EEP_MAC_LSW: return get_unaligned_be16(eep->macAddr); case EEP_MAC_MID: return get_unaligned_be16(eep->macAddr + 2); case EEP_MAC_MSW: return get_unaligned_be16(eep->macAddr + 4); case EEP_REG_0: return le16_to_cpu(pBase->regDmn[0]); case EEP_OP_CAP: return pBase->deviceCap; case EEP_OP_MODE: return pBase->opCapFlags.opFlags; case EEP_RF_SILENT: return pBase->rfSilent; case EEP_TX_MASK: return (pBase->txrxMask >> 4) & 0xf; case EEP_RX_MASK: return pBase->txrxMask & 0xf; case EEP_DRIVE_STRENGTH: #define AR9300_EEP_BASE_DRIV_STRENGTH 0x1 return pBase->miscConfiguration & AR9300_EEP_BASE_DRIV_STRENGTH; case EEP_INTERNAL_REGULATOR: /* Bit 4 is internal regulator flag */ return (pBase->featureEnable & 0x10) >> 4; case EEP_SWREG: return le32_to_cpu(pBase->swreg); case EEP_PAPRD: return !!(pBase->featureEnable & BIT(5)); case EEP_CHAIN_MASK_REDUCE: return (pBase->miscConfiguration >> 0x3) & 0x1; case EEP_ANT_DIV_CTL1: return eep->base_ext1.ant_div_control; case EEP_ANTENNA_GAIN_5G: return eep->modalHeader5G.antennaGain; case EEP_ANTENNA_GAIN_2G: return eep->modalHeader2G.antennaGain; case EEP_QUICK_DROP: return pBase->miscConfiguration & BIT(1); default: return 0; } } static bool ar9300_eeprom_read_byte(struct ath_common *common, int address, u8 *buffer) { u16 val; if (unlikely(!ath9k_hw_nvram_read(common, address / 2, &val))) return false; *buffer = (val >> (8 * (address % 2))) & 0xff; return true; } static bool ar9300_eeprom_read_word(struct ath_common *common, int address, u8 *buffer) { u16 val; if (unlikely(!ath9k_hw_nvram_read(common, address / 2, &val))) return false; buffer[0] = val >> 8; buffer[1] = val & 0xff; return true; } static bool ar9300_read_eeprom(struct ath_hw *ah, int address, u8 *buffer, int count) { struct ath_common *common = ath9k_hw_common(ah); int i; if ((address < 0) || ((address + count) / 2 > AR9300_EEPROM_SIZE - 1)) { ath_dbg(common, EEPROM, "eeprom address not in range\n"); return false; } /* * Since we're reading the bytes in reverse order from a little-endian * word stream, an even address means we only use the lower half of * the 16-bit word at that address */ if (address % 2 == 0) { if (!ar9300_eeprom_read_byte(common, address--, buffer++)) goto error; count--; } for (i = 0; i < count / 2; i++) { if (!ar9300_eeprom_read_word(common, address, buffer)) goto error; address -= 2; buffer += 2; } if (count % 2) if (!ar9300_eeprom_read_byte(common, address, buffer)) goto error; return true; error: ath_dbg(common, EEPROM, "unable to read eeprom region at offset %d\n", address); return false; } static bool ar9300_otp_read_word(struct ath_hw *ah, int addr, u32 *data) { REG_READ(ah, AR9300_OTP_BASE + (4 * addr)); if (!ath9k_hw_wait(ah, AR9300_OTP_STATUS, AR9300_OTP_STATUS_TYPE, AR9300_OTP_STATUS_VALID, 1000)) return false; *data = REG_READ(ah, AR9300_OTP_READ_DATA); return true; } static bool ar9300_read_otp(struct ath_hw *ah, int address, u8 *buffer, int count) { u32 data; int i; for (i = 0; i < count; i++) { int offset = 8 * ((address - i) % 4); if (!ar9300_otp_read_word(ah, (address - i) / 4, &data)) return false; buffer[i] = (data >> offset) & 0xff; } return true; } static void ar9300_comp_hdr_unpack(u8 *best, int *code, int *reference, int *length, int *major, int *minor) { unsigned long value[4]; value[0] = best[0]; value[1] = best[1]; value[2] = best[2]; value[3] = best[3]; *code = ((value[0] >> 5) & 0x0007); *reference = (value[0] & 0x001f) | ((value[1] >> 2) & 0x0020); *length = ((value[1] << 4) & 0x07f0) | ((value[2] >> 4) & 0x000f); *major = (value[2] & 0x000f); *minor = (value[3] & 0x00ff); } static u16 ar9300_comp_cksum(u8 *data, int dsize) { int it, checksum = 0; for (it = 0; it < dsize; it++) { checksum += data[it]; checksum &= 0xffff; } return checksum; } static bool ar9300_uncompress_block(struct ath_hw *ah, u8 *mptr, int mdataSize, u8 *block, int size) { int it; int spot; int offset; int length; struct ath_common *common = ath9k_hw_common(ah); spot = 0; for (it = 0; it < size; it += (length+2)) { offset = block[it]; offset &= 0xff; spot += offset; length = block[it+1]; length &= 0xff; if (length > 0 && spot >= 0 && spot+length <= mdataSize) { ath_dbg(common, EEPROM, "Restore at %d: spot=%d offset=%d length=%d\n", it, spot, offset, length); memcpy(&mptr[spot], &block[it+2], length); spot += length; } else if (length > 0) { ath_dbg(common, EEPROM, "Bad restore at %d: spot=%d offset=%d length=%d\n", it, spot, offset, length); return false; } } return true; } static int ar9300_compress_decision(struct ath_hw *ah, int it, int code, int reference, u8 *mptr, u8 *word, int length, int mdata_size) { struct ath_common *common = ath9k_hw_common(ah); const struct ar9300_eeprom *eep = NULL; switch (code) { case _CompressNone: if (length != mdata_size) { ath_dbg(common, EEPROM, "EEPROM structure size mismatch memory=%d eeprom=%d\n", mdata_size, length); return -1; } memcpy(mptr, (u8 *) (word + COMP_HDR_LEN), length); ath_dbg(common, EEPROM, "restored eeprom %d: uncompressed, length %d\n", it, length); break; case _CompressBlock: if (reference == 0) { } else { eep = ar9003_eeprom_struct_find_by_id(reference); if (eep == NULL) { ath_dbg(common, EEPROM, "can't find reference eeprom struct %d\n", reference); return -1; } memcpy(mptr, eep, mdata_size); } ath_dbg(common, EEPROM, "restore eeprom %d: block, reference %d, length %d\n", it, reference, length); ar9300_uncompress_block(ah, mptr, mdata_size, (u8 *) (word + COMP_HDR_LEN), length); break; default: ath_dbg(common, EEPROM, "unknown compression code %d\n", code); return -1; } return 0; } typedef bool (*eeprom_read_op)(struct ath_hw *ah, int address, u8 *buffer, int count); static bool ar9300_check_header(void *data) { u32 *word = data; return !(*word == 0 || *word == ~0); } static bool ar9300_check_eeprom_header(struct ath_hw *ah, eeprom_read_op read, int base_addr) { u8 header[4]; if (!read(ah, base_addr, header, 4)) return false; return ar9300_check_header(header); } static int ar9300_eeprom_restore_flash(struct ath_hw *ah, u8 *mptr, int mdata_size) { struct ath_common *common = ath9k_hw_common(ah); u16 *data = (u16 *) mptr; int i; for (i = 0; i < mdata_size / 2; i++, data++) ath9k_hw_nvram_read(common, i, data); return 0; } /* * Read the configuration data from the eeprom. * The data can be put in any specified memory buffer. * * Returns -1 on error. * Returns address of next memory location on success. */ static int ar9300_eeprom_restore_internal(struct ath_hw *ah, u8 *mptr, int mdata_size) { #define MDEFAULT 15 #define MSTATE 100 int cptr; u8 *word; int code; int reference, length, major, minor; int osize; int it; u16 checksum, mchecksum; struct ath_common *common = ath9k_hw_common(ah); eeprom_read_op read; if (ath9k_hw_use_flash(ah)) return ar9300_eeprom_restore_flash(ah, mptr, mdata_size); word = kzalloc(2048, GFP_KERNEL); if (!word) return -ENOMEM; memcpy(mptr, &ar9300_default, mdata_size); read = ar9300_read_eeprom; if (AR_SREV_9485(ah)) cptr = AR9300_BASE_ADDR_4K; else if (AR_SREV_9330(ah)) cptr = AR9300_BASE_ADDR_512; else cptr = AR9300_BASE_ADDR; ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n", cptr); if (ar9300_check_eeprom_header(ah, read, cptr)) goto found; cptr = AR9300_BASE_ADDR_512; ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n", cptr); if (ar9300_check_eeprom_header(ah, read, cptr)) goto found; read = ar9300_read_otp; cptr = AR9300_BASE_ADDR; ath_dbg(common, EEPROM, "Trying OTP access at Address 0x%04x\n", cptr); if (ar9300_check_eeprom_header(ah, read, cptr)) goto found; cptr = AR9300_BASE_ADDR_512; ath_dbg(common, EEPROM, "Trying OTP access at Address 0x%04x\n", cptr); if (ar9300_check_eeprom_header(ah, read, cptr)) goto found; goto fail; found: ath_dbg(common, EEPROM, "Found valid EEPROM data\n"); for (it = 0; it < MSTATE; it++) { if (!read(ah, cptr, word, COMP_HDR_LEN)) goto fail; if (!ar9300_check_header(word)) break; ar9300_comp_hdr_unpack(word, &code, &reference, &length, &major, &minor); ath_dbg(common, EEPROM, "Found block at %x: code=%d ref=%d length=%d major=%d minor=%d\n", cptr, code, reference, length, major, minor); if ((!AR_SREV_9485(ah) && length >= 1024) || (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485)) { ath_dbg(common, EEPROM, "Skipping bad header\n"); cptr -= COMP_HDR_LEN; continue; } osize = length; read(ah, cptr, word, COMP_HDR_LEN + osize + COMP_CKSUM_LEN); checksum = ar9300_comp_cksum(&word[COMP_HDR_LEN], length); mchecksum = get_unaligned_le16(&word[COMP_HDR_LEN + osize]); ath_dbg(common, EEPROM, "checksum %x %x\n", checksum, mchecksum); if (checksum == mchecksum) { ar9300_compress_decision(ah, it, code, reference, mptr, word, length, mdata_size); } else { ath_dbg(common, EEPROM, "skipping block with bad checksum\n"); } cptr -= (COMP_HDR_LEN + osize + COMP_CKSUM_LEN); } kfree(word); return cptr; fail: kfree(word); return -1; } /* * Restore the configuration structure by reading the eeprom. * This function destroys any existing in-memory structure * content. */ static bool ath9k_hw_ar9300_fill_eeprom(struct ath_hw *ah) { u8 *mptr = (u8 *) &ah->eeprom.ar9300_eep; if (ar9300_eeprom_restore_internal(ah, mptr, sizeof(struct ar9300_eeprom)) < 0) return false; return true; } #if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS) static u32 ar9003_dump_modal_eeprom(char *buf, u32 len, u32 size, struct ar9300_modal_eep_header *modal_hdr) { PR_EEP("Chain0 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[0])); PR_EEP("Chain1 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[1])); PR_EEP("Chain2 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[2])); PR_EEP("Ant. Common Control", le32_to_cpu(modal_hdr->antCtrlCommon)); PR_EEP("Ant. Common Control2", le32_to_cpu(modal_hdr->antCtrlCommon2)); PR_EEP("Ant. Gain", modal_hdr->antennaGain); PR_EEP("Switch Settle", modal_hdr->switchSettling); PR_EEP("Chain0 xatten1DB", modal_hdr->xatten1DB[0]); PR_EEP("Chain1 xatten1DB", modal_hdr->xatten1DB[1]); PR_EEP("Chain2 xatten1DB", modal_hdr->xatten1DB[2]); PR_EEP("Chain0 xatten1Margin", modal_hdr->xatten1Margin[0]); PR_EEP("Chain1 xatten1Margin", modal_hdr->xatten1Margin[1]); PR_EEP("Chain2 xatten1Margin", modal_hdr->xatten1Margin[2]); PR_EEP("Temp Slope", modal_hdr->tempSlope); PR_EEP("Volt Slope", modal_hdr->voltSlope); PR_EEP("spur Channels0", modal_hdr->spurChans[0]); PR_EEP("spur Channels1", modal_hdr->spurChans[1]); PR_EEP("spur Channels2", modal_hdr->spurChans[2]); PR_EEP("spur Channels3", modal_hdr->spurChans[3]); PR_EEP("spur Channels4", modal_hdr->spurChans[4]); PR_EEP("Chain0 NF Threshold", modal_hdr->noiseFloorThreshCh[0]); PR_EEP("Chain1 NF Threshold", modal_hdr->noiseFloorThreshCh[1]); PR_EEP("Chain2 NF Threshold", modal_hdr->noiseFloorThreshCh[2]); PR_EEP("Quick Drop", modal_hdr->quick_drop); PR_EEP("txEndToXpaOff", modal_hdr->txEndToXpaOff); PR_EEP("xPA Bias Level", modal_hdr->xpaBiasLvl); PR_EEP("txFrameToDataStart", modal_hdr->txFrameToDataStart); PR_EEP("txFrameToPaOn", modal_hdr->txFrameToPaOn); PR_EEP("txFrameToXpaOn", modal_hdr->txFrameToXpaOn); PR_EEP("txClip", modal_hdr->txClip); PR_EEP("ADC Desired size", modal_hdr->adcDesiredSize); return len; } static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, u8 *buf, u32 len, u32 size) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; struct ar9300_base_eep_hdr *pBase; if (!dump_base_hdr) { len += snprintf(buf + len, size - len, "%20s :\n", "2GHz modal Header"); len += ar9003_dump_modal_eeprom(buf, len, size, &eep->modalHeader2G); len += snprintf(buf + len, size - len, "%20s :\n", "5GHz modal Header"); len += ar9003_dump_modal_eeprom(buf, len, size, &eep->modalHeader5G); goto out; } pBase = &eep->baseEepHeader; PR_EEP("EEPROM Version", ah->eeprom.ar9300_eep.eepromVersion); PR_EEP("RegDomain1", le16_to_cpu(pBase->regDmn[0])); PR_EEP("RegDomain2", le16_to_cpu(pBase->regDmn[1])); PR_EEP("TX Mask", (pBase->txrxMask >> 4)); PR_EEP("RX Mask", (pBase->txrxMask & 0x0f)); PR_EEP("Allow 5GHz", !!(pBase->opCapFlags.opFlags & AR5416_OPFLAGS_11A)); PR_EEP("Allow 2GHz", !!(pBase->opCapFlags.opFlags & AR5416_OPFLAGS_11G)); PR_EEP("Disable 2GHz HT20", !!(pBase->opCapFlags.opFlags & AR5416_OPFLAGS_N_2G_HT20)); PR_EEP("Disable 2GHz HT40", !!(pBase->opCapFlags.opFlags & AR5416_OPFLAGS_N_2G_HT40)); PR_EEP("Disable 5Ghz HT20", !!(pBase->opCapFlags.opFlags & AR5416_OPFLAGS_N_5G_HT20)); PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags.opFlags & AR5416_OPFLAGS_N_5G_HT40)); PR_EEP("Big Endian", !!(pBase->opCapFlags.eepMisc & 0x01)); PR_EEP("RF Silent", pBase->rfSilent); PR_EEP("BT option", pBase->blueToothOptions); PR_EEP("Device Cap", pBase->deviceCap); PR_EEP("Device Type", pBase->deviceType); PR_EEP("Power Table Offset", pBase->pwrTableOffset); PR_EEP("Tuning Caps1", pBase->params_for_tuning_caps[0]); PR_EEP("Tuning Caps2", pBase->params_for_tuning_caps[1]); PR_EEP("Enable Tx Temp Comp", !!(pBase->featureEnable & BIT(0))); PR_EEP("Enable Tx Volt Comp", !!(pBase->featureEnable & BIT(1))); PR_EEP("Enable fast clock", !!(pBase->featureEnable & BIT(2))); PR_EEP("Enable doubling", !!(pBase->featureEnable & BIT(3))); PR_EEP("Internal regulator", !!(pBase->featureEnable & BIT(4))); PR_EEP("Enable Paprd", !!(pBase->featureEnable & BIT(5))); PR_EEP("Driver Strength", !!(pBase->miscConfiguration & BIT(0))); PR_EEP("Quick Drop", !!(pBase->miscConfiguration & BIT(1))); PR_EEP("Chain mask Reduce", (pBase->miscConfiguration >> 0x3) & 0x1); PR_EEP("Write enable Gpio", pBase->eepromWriteEnableGpio); PR_EEP("WLAN Disable Gpio", pBase->wlanDisableGpio); PR_EEP("WLAN LED Gpio", pBase->wlanLedGpio); PR_EEP("Rx Band Select Gpio", pBase->rxBandSelectGpio); PR_EEP("Tx Gain", pBase->txrxgain >> 4); PR_EEP("Rx Gain", pBase->txrxgain & 0xf); PR_EEP("SW Reg", le32_to_cpu(pBase->swreg)); len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress", ah->eeprom.ar9300_eep.macAddr); out: if (len > size) len = size; return len; } #else static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, u8 *buf, u32 len, u32 size) { return 0; } #endif /* XXX: review hardware docs */ static int ath9k_hw_ar9300_get_eeprom_ver(struct ath_hw *ah) { return ah->eeprom.ar9300_eep.eepromVersion; } /* XXX: could be read from the eepromVersion, not sure yet */ static int ath9k_hw_ar9300_get_eeprom_rev(struct ath_hw *ah) { return 0; } static s32 ar9003_hw_xpa_bias_level_get(struct ath_hw *ah, bool is2ghz) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; if (is2ghz) return eep->modalHeader2G.xpaBiasLvl; else return eep->modalHeader5G.xpaBiasLvl; } static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz) { int bias = ar9003_hw_xpa_bias_level_get(ah, is2ghz); if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah)) REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias); else if (AR_SREV_9462(ah)) REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias); else { REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias); REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_XPABIASLVL_MSB, bias >> 2); REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_XPASHORT2GND, 1); } } static u16 ar9003_switch_com_spdt_get(struct ath_hw *ah, bool is_2ghz) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; __le16 val; if (is_2ghz) val = eep->modalHeader2G.switchcomspdt; else val = eep->modalHeader5G.switchcomspdt; return le16_to_cpu(val); } static u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; __le32 val; if (is2ghz) val = eep->modalHeader2G.antCtrlCommon; else val = eep->modalHeader5G.antCtrlCommon; return le32_to_cpu(val); } static u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; __le32 val; if (is2ghz) val = eep->modalHeader2G.antCtrlCommon2; else val = eep->modalHeader5G.antCtrlCommon2; return le32_to_cpu(val); } static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah, int chain, bool is2ghz) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; __le16 val = 0; if (chain >= 0 && chain < AR9300_MAX_CHAINS) { if (is2ghz) val = eep->modalHeader2G.antCtrlChain[chain]; else val = eep->modalHeader5G.antCtrlChain[chain]; } return le16_to_cpu(val); } static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz) { int chain; u32 regval, value; u32 ant_div_ctl1; static const u32 switch_chain_reg[AR9300_MAX_CHAINS] = { AR_PHY_SWITCH_CHAIN_0, AR_PHY_SWITCH_CHAIN_1, AR_PHY_SWITCH_CHAIN_2, }; if (AR_SREV_9485(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0)) ath9k_hw_cfg_output(ah, AR9300_EXT_LNA_CTL_GPIO_AR9485, AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED); value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz); if (AR_SREV_9462(ah)) { REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM, AR_SWITCH_TABLE_COM_AR9462_ALL, value); } else REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM, AR_SWITCH_TABLE_COM_ALL, value); /* * AR9462 defines new switch table for BT/WLAN, * here's new field name in XXX.ref for both 2G and 5G. * Register: [GLB_CONTROL] GLB_CONTROL (@0x20044) * 15:12 R/W SWITCH_TABLE_COM_SPDT_WLAN_RX * SWITCH_TABLE_COM_SPDT_WLAN_RX * * 11:8 R/W SWITCH_TABLE_COM_SPDT_WLAN_TX * SWITCH_TABLE_COM_SPDT_WLAN_TX * * 7:4 R/W SWITCH_TABLE_COM_SPDT_WLAN_IDLE * SWITCH_TABLE_COM_SPDT_WLAN_IDLE */ if (AR_SREV_9462_20_OR_LATER(ah)) { value = ar9003_switch_com_spdt_get(ah, is2ghz); REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL, AR_SWITCH_TABLE_COM_SPDT_ALL, value); } value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz); REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value); for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) { if ((ah->rxchainmask & BIT(chain)) || (ah->txchainmask & BIT(chain))) { value = ar9003_hw_ant_ctrl_chain_get(ah, chain, is2ghz); REG_RMW_FIELD(ah, switch_chain_reg[chain], AR_SWITCH_TABLE_ALL, value); } } if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) { value = ath9k_hw_ar9300_get_eeprom(ah, EEP_ANT_DIV_CTL1); /* * main_lnaconf, alt_lnaconf, main_tb, alt_tb * are the fields present */ regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL); regval &= (~AR_ANT_DIV_CTRL_ALL); regval |= (value & 0x3f) << AR_ANT_DIV_CTRL_ALL_S; /* enable_lnadiv */ regval &= (~AR_PHY_9485_ANT_DIV_LNADIV); regval |= ((value >> 6) & 0x1) << AR_PHY_9485_ANT_DIV_LNADIV_S; REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); /*enable fast_div */ regval = REG_READ(ah, AR_PHY_CCK_DETECT); regval &= (~AR_FAST_DIV_ENABLE); regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S; REG_WRITE(ah, AR_PHY_CCK_DETECT, regval); ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1); /* check whether antenna diversity is enabled */ if ((ant_div_ctl1 >> 0x6) == 0x3) { regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL); /* * clear bits 25-30 main_lnaconf, alt_lnaconf, * main_tb, alt_tb */ regval &= (~(AR_PHY_9485_ANT_DIV_MAIN_LNACONF | AR_PHY_9485_ANT_DIV_ALT_LNACONF | AR_PHY_9485_ANT_DIV_ALT_GAINTB | AR_PHY_9485_ANT_DIV_MAIN_GAINTB)); /* by default use LNA1 for the main antenna */ regval |= (AR_PHY_9485_ANT_DIV_LNA1 << AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S); regval |= (AR_PHY_9485_ANT_DIV_LNA2 << AR_PHY_9485_ANT_DIV_ALT_LNACONF_S); REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); } } } static void ar9003_hw_drive_strength_apply(struct ath_hw *ah) { int drive_strength; unsigned long reg; drive_strength = ath9k_hw_ar9300_get_eeprom(ah, EEP_DRIVE_STRENGTH); if (!drive_strength) return; reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS1); reg &= ~0x00ffffc0; reg |= 0x5 << 21; reg |= 0x5 << 18; reg |= 0x5 << 15; reg |= 0x5 << 12; reg |= 0x5 << 9; reg |= 0x5 << 6; REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS1, reg); reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS2); reg &= ~0xffffffe0; reg |= 0x5 << 29; reg |= 0x5 << 26; reg |= 0x5 << 23; reg |= 0x5 << 20; reg |= 0x5 << 17; reg |= 0x5 << 14; reg |= 0x5 << 11; reg |= 0x5 << 8; reg |= 0x5 << 5; REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS2, reg); reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS4); reg &= ~0xff800000; reg |= 0x5 << 29; reg |= 0x5 << 26; reg |= 0x5 << 23; REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS4, reg); } static u16 ar9003_hw_atten_chain_get(struct ath_hw *ah, int chain, struct ath9k_channel *chan) { int f[3], t[3]; u16 value; struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; if (chain >= 0 && chain < 3) { if (IS_CHAN_2GHZ(chan)) return eep->modalHeader2G.xatten1DB[chain]; else if (eep->base_ext2.xatten1DBLow[chain] != 0) { t[0] = eep->base_ext2.xatten1DBLow[chain]; f[0] = 5180; t[1] = eep->modalHeader5G.xatten1DB[chain]; f[1] = 5500; t[2] = eep->base_ext2.xatten1DBHigh[chain]; f[2] = 5785; value = ar9003_hw_power_interpolate((s32) chan->channel, f, t, 3); return value; } else return eep->modalHeader5G.xatten1DB[chain]; } return 0; } static u16 ar9003_hw_atten_chain_get_margin(struct ath_hw *ah, int chain, struct ath9k_channel *chan) { int f[3], t[3]; u16 value; struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; if (chain >= 0 && chain < 3) { if (IS_CHAN_2GHZ(chan)) return eep->modalHeader2G.xatten1Margin[chain]; else if (eep->base_ext2.xatten1MarginLow[chain] != 0) { t[0] = eep->base_ext2.xatten1MarginLow[chain]; f[0] = 5180; t[1] = eep->modalHeader5G.xatten1Margin[chain]; f[1] = 5500; t[2] = eep->base_ext2.xatten1MarginHigh[chain]; f[2] = 5785; value = ar9003_hw_power_interpolate((s32) chan->channel, f, t, 3); return value; } else return eep->modalHeader5G.xatten1Margin[chain]; } return 0; } static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan) { int i; u16 value; unsigned long ext_atten_reg[3] = {AR_PHY_EXT_ATTEN_CTL_0, AR_PHY_EXT_ATTEN_CTL_1, AR_PHY_EXT_ATTEN_CTL_2, }; /* Test value. if 0 then attenuation is unused. Don't load anything. */ for (i = 0; i < 3; i++) { if (ah->txchainmask & BIT(i)) { value = ar9003_hw_atten_chain_get(ah, i, chan); REG_RMW_FIELD(ah, ext_atten_reg[i], AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB, value); value = ar9003_hw_atten_chain_get_margin(ah, i, chan); REG_RMW_FIELD(ah, ext_atten_reg[i], AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN, value); } } } static bool is_pmu_set(struct ath_hw *ah, u32 pmu_reg, int pmu_set) { int timeout = 100; while (pmu_set != REG_READ(ah, pmu_reg)) { if (timeout-- == 0) return false; REG_WRITE(ah, pmu_reg, pmu_set); udelay(10); } return true; } static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah) { int internal_regulator = ath9k_hw_ar9300_get_eeprom(ah, EEP_INTERNAL_REGULATOR); u32 reg_val; if (internal_regulator) { if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) { int reg_pmu_set; reg_pmu_set = REG_READ(ah, AR_PHY_PMU2) & ~AR_PHY_PMU2_PGM; REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set); if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set)) return; if (AR_SREV_9330(ah)) { if (ah->is_clk_25mhz) { reg_pmu_set = (3 << 1) | (8 << 4) | (3 << 8) | (1 << 14) | (6 << 17) | (1 << 20) | (3 << 24); } else { reg_pmu_set = (4 << 1) | (7 << 4) | (3 << 8) | (1 << 14) | (6 << 17) | (1 << 20) | (3 << 24); } } else { reg_pmu_set = (5 << 1) | (7 << 4) | (2 << 8) | (2 << 14) | (6 << 17) | (1 << 20) | (3 << 24) | (1 << 28); } REG_WRITE(ah, AR_PHY_PMU1, reg_pmu_set); if (!is_pmu_set(ah, AR_PHY_PMU1, reg_pmu_set)) return; reg_pmu_set = (REG_READ(ah, AR_PHY_PMU2) & ~0xFFC00000) | (4 << 26); REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set); if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set)) return; reg_pmu_set = (REG_READ(ah, AR_PHY_PMU2) & ~0x00200000) | (1 << 21); REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set); if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set)) return; } else if (AR_SREV_9462(ah)) { reg_val = ath9k_hw_ar9300_get_eeprom(ah, EEP_SWREG); REG_WRITE(ah, AR_PHY_PMU1, reg_val); } else { /* Internal regulator is ON. Write swreg register. */ reg_val = ath9k_hw_ar9300_get_eeprom(ah, EEP_SWREG); REG_WRITE(ah, AR_RTC_REG_CONTROL1, REG_READ(ah, AR_RTC_REG_CONTROL1) & (~AR_RTC_REG_CONTROL1_SWREG_PROGRAM)); REG_WRITE(ah, AR_RTC_REG_CONTROL0, reg_val); /* Set REG_CONTROL1.SWREG_PROGRAM */ REG_WRITE(ah, AR_RTC_REG_CONTROL1, REG_READ(ah, AR_RTC_REG_CONTROL1) | AR_RTC_REG_CONTROL1_SWREG_PROGRAM); } } else { if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) { REG_RMW_FIELD(ah, AR_PHY_PMU2, AR_PHY_PMU2_PGM, 0); while (REG_READ_FIELD(ah, AR_PHY_PMU2, AR_PHY_PMU2_PGM)) udelay(10); REG_RMW_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD, 0x1); while (!REG_READ_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD)) udelay(10); REG_RMW_FIELD(ah, AR_PHY_PMU2, AR_PHY_PMU2_PGM, 0x1); while (!REG_READ_FIELD(ah, AR_PHY_PMU2, AR_PHY_PMU2_PGM)) udelay(10); } else if (AR_SREV_9462(ah)) REG_RMW_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD, 0x1); else { reg_val = REG_READ(ah, AR_RTC_SLEEP_CLK) | AR_RTC_FORCE_SWREG_PRD; REG_WRITE(ah, AR_RTC_SLEEP_CLK, reg_val); } } } static void ar9003_hw_apply_tuning_caps(struct ath_hw *ah) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; u8 tuning_caps_param = eep->baseEepHeader.params_for_tuning_caps[0]; if (eep->baseEepHeader.featureEnable & 0x40) { tuning_caps_param &= 0x7f; REG_RMW_FIELD(ah, AR_CH0_XTAL, AR_CH0_XTAL_CAPINDAC, tuning_caps_param); REG_RMW_FIELD(ah, AR_CH0_XTAL, AR_CH0_XTAL_CAPOUTDAC, tuning_caps_param); } } static void ar9003_hw_quick_drop_apply(struct ath_hw *ah, u16 freq) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; int quick_drop = ath9k_hw_ar9300_get_eeprom(ah, EEP_QUICK_DROP); s32 t[3], f[3] = {5180, 5500, 5785}; if (!quick_drop) return; if (freq < 4000) quick_drop = eep->modalHeader2G.quick_drop; else { t[0] = eep->base_ext1.quick_drop_low; t[1] = eep->modalHeader5G.quick_drop; t[2] = eep->base_ext1.quick_drop_high; quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3); } REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop); } static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, u16 freq) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; u32 value; value = (freq < 4000) ? eep->modalHeader2G.txEndToXpaOff : eep->modalHeader5G.txEndToXpaOff; REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL, AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF, value); REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL, AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF, value); } static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah, struct ath9k_channel *chan) { ar9003_hw_xpa_bias_level_apply(ah, IS_CHAN_2GHZ(chan)); ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan)); ar9003_hw_drive_strength_apply(ah); ar9003_hw_atten_apply(ah, chan); ar9003_hw_quick_drop_apply(ah, chan->channel); if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah)) ar9003_hw_internal_regulator_apply(ah); if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah)) ar9003_hw_apply_tuning_caps(ah); ar9003_hw_txend_to_xpa_off_apply(ah, chan->channel); } static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah, struct ath9k_channel *chan) { } /* * Returns the interpolated y value corresponding to the specified x value * from the np ordered pairs of data (px,py). * The pairs do not have to be in any order. * If the specified x value is less than any of the px, * the returned y value is equal to the py for the lowest px. * If the specified x value is greater than any of the px, * the returned y value is equal to the py for the highest px. */ static int ar9003_hw_power_interpolate(int32_t x, int32_t *px, int32_t *py, u_int16_t np) { int ip = 0; int lx = 0, ly = 0, lhave = 0; int hx = 0, hy = 0, hhave = 0; int dx = 0; int y = 0; lhave = 0; hhave = 0; /* identify best lower and higher x calibration measurement */ for (ip = 0; ip < np; ip++) { dx = x - px[ip]; /* this measurement is higher than our desired x */ if (dx <= 0) { if (!hhave || dx > (x - hx)) { /* new best higher x measurement */ hx = px[ip]; hy = py[ip]; hhave = 1; } } /* this measurement is lower than our desired x */ if (dx >= 0) { if (!lhave || dx < (x - lx)) { /* new best lower x measurement */ lx = px[ip]; ly = py[ip]; lhave = 1; } } } /* the low x is good */ if (lhave) { /* so is the high x */ if (hhave) { /* they're the same, so just pick one */ if (hx == lx) y = ly; else /* interpolate */ y = interpolate(x, lx, hx, ly, hy); } else /* only low is good, use it */ y = ly; } else if (hhave) /* only high is good, use it */ y = hy; else /* nothing is good,this should never happen unless np=0, ???? */ y = -(1 << 30); return y; } static u8 ar9003_hw_eeprom_get_tgt_pwr(struct ath_hw *ah, u16 rateIndex, u16 freq, bool is2GHz) { u16 numPiers, i; s32 targetPowerArray[AR9300_NUM_5G_20_TARGET_POWERS]; s32 freqArray[AR9300_NUM_5G_20_TARGET_POWERS]; struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; struct cal_tgt_pow_legacy *pEepromTargetPwr; u8 *pFreqBin; if (is2GHz) { numPiers = AR9300_NUM_2G_20_TARGET_POWERS; pEepromTargetPwr = eep->calTargetPower2G; pFreqBin = eep->calTarget_freqbin_2G; } else { numPiers = AR9300_NUM_5G_20_TARGET_POWERS; pEepromTargetPwr = eep->calTargetPower5G; pFreqBin = eep->calTarget_freqbin_5G; } /* * create array of channels and targetpower from * targetpower piers stored on eeprom */ for (i = 0; i < numPiers; i++) { freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz); targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex]; } /* interpolate to get target power for given frequency */ return (u8) ar9003_hw_power_interpolate((s32) freq, freqArray, targetPowerArray, numPiers); } static u8 ar9003_hw_eeprom_get_ht20_tgt_pwr(struct ath_hw *ah, u16 rateIndex, u16 freq, bool is2GHz) { u16 numPiers, i; s32 targetPowerArray[AR9300_NUM_5G_20_TARGET_POWERS]; s32 freqArray[AR9300_NUM_5G_20_TARGET_POWERS]; struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; struct cal_tgt_pow_ht *pEepromTargetPwr; u8 *pFreqBin; if (is2GHz) { numPiers = AR9300_NUM_2G_20_TARGET_POWERS; pEepromTargetPwr = eep->calTargetPower2GHT20; pFreqBin = eep->calTarget_freqbin_2GHT20; } else { numPiers = AR9300_NUM_5G_20_TARGET_POWERS; pEepromTargetPwr = eep->calTargetPower5GHT20; pFreqBin = eep->calTarget_freqbin_5GHT20; } /* * create array of channels and targetpower * from targetpower piers stored on eeprom */ for (i = 0; i < numPiers; i++) { freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz); targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex]; } /* interpolate to get target power for given frequency */ return (u8) ar9003_hw_power_interpolate((s32) freq, freqArray, targetPowerArray, numPiers); } static u8 ar9003_hw_eeprom_get_ht40_tgt_pwr(struct ath_hw *ah, u16 rateIndex, u16 freq, bool is2GHz) { u16 numPiers, i; s32 targetPowerArray[AR9300_NUM_5G_40_TARGET_POWERS]; s32 freqArray[AR9300_NUM_5G_40_TARGET_POWERS]; struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; struct cal_tgt_pow_ht *pEepromTargetPwr; u8 *pFreqBin; if (is2GHz) { numPiers = AR9300_NUM_2G_40_TARGET_POWERS; pEepromTargetPwr = eep->calTargetPower2GHT40; pFreqBin = eep->calTarget_freqbin_2GHT40; } else { numPiers = AR9300_NUM_5G_40_TARGET_POWERS; pEepromTargetPwr = eep->calTargetPower5GHT40; pFreqBin = eep->calTarget_freqbin_5GHT40; } /* * create array of channels and targetpower from * targetpower piers stored on eeprom */ for (i = 0; i < numPiers; i++) { freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz); targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex]; } /* interpolate to get target power for given frequency */ return (u8) ar9003_hw_power_interpolate((s32) freq, freqArray, targetPowerArray, numPiers); } static u8 ar9003_hw_eeprom_get_cck_tgt_pwr(struct ath_hw *ah, u16 rateIndex, u16 freq) { u16 numPiers = AR9300_NUM_2G_CCK_TARGET_POWERS, i; s32 targetPowerArray[AR9300_NUM_2G_CCK_TARGET_POWERS]; s32 freqArray[AR9300_NUM_2G_CCK_TARGET_POWERS]; struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; struct cal_tgt_pow_legacy *pEepromTargetPwr = eep->calTargetPowerCck; u8 *pFreqBin = eep->calTarget_freqbin_Cck; /* * create array of channels and targetpower from * targetpower piers stored on eeprom */ for (i = 0; i < numPiers; i++) { freqArray[i] = FBIN2FREQ(pFreqBin[i], 1); targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex]; } /* interpolate to get target power for given frequency */ return (u8) ar9003_hw_power_interpolate((s32) freq, freqArray, targetPowerArray, numPiers); } /* Set tx power registers to array of values passed in */ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray) { #define POW_SM(_r, _s) (((_r) & 0x3f) << (_s)) /* make sure forced gain is not set */ REG_WRITE(ah, AR_PHY_TX_FORCED_GAIN, 0); /* Write the OFDM power per rate set */ /* 6 (LSB), 9, 12, 18 (MSB) */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(0), POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 16) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0)); /* 24 (LSB), 36, 48, 54 (MSB) */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(1), POW_SM(pPwrArray[ALL_TARGET_LEGACY_54], 24) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_48], 16) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_36], 8) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0)); /* Write the CCK power per rate set */ /* 1L (LSB), reserved, 2L, 2S (MSB) */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(2), POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 24) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) | /* POW_SM(txPowerTimes2, 8) | this is reserved for AR9003 */ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0)); /* 5.5L (LSB), 5.5S, 11L, 11S (MSB) */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(3), POW_SM(pPwrArray[ALL_TARGET_LEGACY_11S], 24) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_11L], 16) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_5S], 8) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0) ); /* Write the power for duplicated frames - HT40 */ /* dup40_cck (LSB), dup40_ofdm, ext20_cck, ext20_ofdm (MSB) */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(8), POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0) ); /* Write the HT20 power per rate set */ /* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(4), POW_SM(pPwrArray[ALL_TARGET_HT20_5], 24) | POW_SM(pPwrArray[ALL_TARGET_HT20_4], 16) | POW_SM(pPwrArray[ALL_TARGET_HT20_1_3_9_11_17_19], 8) | POW_SM(pPwrArray[ALL_TARGET_HT20_0_8_16], 0) ); /* 6 (LSB), 7, 12, 13 (MSB) */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(5), POW_SM(pPwrArray[ALL_TARGET_HT20_13], 24) | POW_SM(pPwrArray[ALL_TARGET_HT20_12], 16) | POW_SM(pPwrArray[ALL_TARGET_HT20_7], 8) | POW_SM(pPwrArray[ALL_TARGET_HT20_6], 0) ); /* 14 (LSB), 15, 20, 21 */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(9), POW_SM(pPwrArray[ALL_TARGET_HT20_21], 24) | POW_SM(pPwrArray[ALL_TARGET_HT20_20], 16) | POW_SM(pPwrArray[ALL_TARGET_HT20_15], 8) | POW_SM(pPwrArray[ALL_TARGET_HT20_14], 0) ); /* Mixed HT20 and HT40 rates */ /* HT20 22 (LSB), HT20 23, HT40 22, HT40 23 (MSB) */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(10), POW_SM(pPwrArray[ALL_TARGET_HT40_23], 24) | POW_SM(pPwrArray[ALL_TARGET_HT40_22], 16) | POW_SM(pPwrArray[ALL_TARGET_HT20_23], 8) | POW_SM(pPwrArray[ALL_TARGET_HT20_22], 0) ); /* * Write the HT40 power per rate set * correct PAR difference between HT40 and HT20/LEGACY * 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(6), POW_SM(pPwrArray[ALL_TARGET_HT40_5], 24) | POW_SM(pPwrArray[ALL_TARGET_HT40_4], 16) | POW_SM(pPwrArray[ALL_TARGET_HT40_1_3_9_11_17_19], 8) | POW_SM(pPwrArray[ALL_TARGET_HT40_0_8_16], 0) ); /* 6 (LSB), 7, 12, 13 (MSB) */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(7), POW_SM(pPwrArray[ALL_TARGET_HT40_13], 24) | POW_SM(pPwrArray[ALL_TARGET_HT40_12], 16) | POW_SM(pPwrArray[ALL_TARGET_HT40_7], 8) | POW_SM(pPwrArray[ALL_TARGET_HT40_6], 0) ); /* 14 (LSB), 15, 20, 21 */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(11), POW_SM(pPwrArray[ALL_TARGET_HT40_21], 24) | POW_SM(pPwrArray[ALL_TARGET_HT40_20], 16) | POW_SM(pPwrArray[ALL_TARGET_HT40_15], 8) | POW_SM(pPwrArray[ALL_TARGET_HT40_14], 0) ); return 0; #undef POW_SM } static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq, u8 *targetPowerValT2) { /* XXX: hard code for now, need to get from eeprom struct */ u8 ht40PowerIncForPdadc = 0; bool is2GHz = false; unsigned int i = 0; struct ath_common *common = ath9k_hw_common(ah); if (freq < 4000) is2GHz = true; targetPowerValT2[ALL_TARGET_LEGACY_6_24] = ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_6_24, freq, is2GHz); targetPowerValT2[ALL_TARGET_LEGACY_36] = ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_36, freq, is2GHz); targetPowerValT2[ALL_TARGET_LEGACY_48] = ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_48, freq, is2GHz); targetPowerValT2[ALL_TARGET_LEGACY_54] = ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_54, freq, is2GHz); targetPowerValT2[ALL_TARGET_LEGACY_1L_5L] = ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_1L_5L, freq); targetPowerValT2[ALL_TARGET_LEGACY_5S] = ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_5S, freq); targetPowerValT2[ALL_TARGET_LEGACY_11L] = ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11L, freq); targetPowerValT2[ALL_TARGET_LEGACY_11S] = ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11S, freq); targetPowerValT2[ALL_TARGET_HT20_0_8_16] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_1_3_9_11_17_19] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_1_3_9_11_17_19, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_4] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_4, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_5] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_5, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_6] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_6, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_7] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_7, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_12] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_12, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_13] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_13, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_14] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_14, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_15] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_15, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_20] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_20, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_21] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_21, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_22] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_22, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_23] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_23, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT40_0_8_16] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_1_3_9_11_17_19] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_1_3_9_11_17_19, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_4] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_4, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_5] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_5, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_6] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_6, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_7] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_7, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_12] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_12, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_13] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_13, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_14] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_14, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_15] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_15, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_20] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_20, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_21] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_21, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_22] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_22, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_23] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_23, freq, is2GHz) + ht40PowerIncForPdadc; for (i = 0; i < ar9300RateSize; i++) { ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]); } } static int ar9003_hw_cal_pier_get(struct ath_hw *ah, int mode, int ipier, int ichain, int *pfrequency, int *pcorrection, int *ptemperature, int *pvoltage) { u8 *pCalPier; struct ar9300_cal_data_per_freq_op_loop *pCalPierStruct; int is2GHz; struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; struct ath_common *common = ath9k_hw_common(ah); if (ichain >= AR9300_MAX_CHAINS) { ath_dbg(common, EEPROM, "Invalid chain index, must be less than %d\n", AR9300_MAX_CHAINS); return -1; } if (mode) { /* 5GHz */ if (ipier >= AR9300_NUM_5G_CAL_PIERS) { ath_dbg(common, EEPROM, "Invalid 5GHz cal pier index, must be less than %d\n", AR9300_NUM_5G_CAL_PIERS); return -1; } pCalPier = &(eep->calFreqPier5G[ipier]); pCalPierStruct = &(eep->calPierData5G[ichain][ipier]); is2GHz = 0; } else { if (ipier >= AR9300_NUM_2G_CAL_PIERS) { ath_dbg(common, EEPROM, "Invalid 2GHz cal pier index, must be less than %d\n", AR9300_NUM_2G_CAL_PIERS); return -1; } pCalPier = &(eep->calFreqPier2G[ipier]); pCalPierStruct = &(eep->calPierData2G[ichain][ipier]); is2GHz = 1; } *pfrequency = FBIN2FREQ(*pCalPier, is2GHz); *pcorrection = pCalPierStruct->refPower; *ptemperature = pCalPierStruct->tempMeas; *pvoltage = pCalPierStruct->voltMeas; return 0; } static int ar9003_hw_power_control_override(struct ath_hw *ah, int frequency, int *correction, int *voltage, int *temperature) { int tempSlope = 0; struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; int f[3], t[3]; REG_RMW(ah, AR_PHY_TPC_11_B0, (correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S), AR_PHY_TPC_OLPC_GAIN_DELTA); if (ah->caps.tx_chainmask & BIT(1)) REG_RMW(ah, AR_PHY_TPC_11_B1, (correction[1] << AR_PHY_TPC_OLPC_GAIN_DELTA_S), AR_PHY_TPC_OLPC_GAIN_DELTA); if (ah->caps.tx_chainmask & BIT(2)) REG_RMW(ah, AR_PHY_TPC_11_B2, (correction[2] << AR_PHY_TPC_OLPC_GAIN_DELTA_S), AR_PHY_TPC_OLPC_GAIN_DELTA); /* enable open loop power control on chip */ REG_RMW(ah, AR_PHY_TPC_6_B0, (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S), AR_PHY_TPC_6_ERROR_EST_MODE); if (ah->caps.tx_chainmask & BIT(1)) REG_RMW(ah, AR_PHY_TPC_6_B1, (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S), AR_PHY_TPC_6_ERROR_EST_MODE); if (ah->caps.tx_chainmask & BIT(2)) REG_RMW(ah, AR_PHY_TPC_6_B2, (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S), AR_PHY_TPC_6_ERROR_EST_MODE); /* * enable temperature compensation * Need to use register names */ if (frequency < 4000) tempSlope = eep->modalHeader2G.tempSlope; else if (eep->base_ext2.tempSlopeLow != 0) { t[0] = eep->base_ext2.tempSlopeLow; f[0] = 5180; t[1] = eep->modalHeader5G.tempSlope; f[1] = 5500; t[2] = eep->base_ext2.tempSlopeHigh; f[2] = 5785; tempSlope = ar9003_hw_power_interpolate((s32) frequency, f, t, 3); } else tempSlope = eep->modalHeader5G.tempSlope; REG_RMW_FIELD(ah, AR_PHY_TPC_19, AR_PHY_TPC_19_ALPHA_THERM, tempSlope); if (AR_SREV_9462_20(ah)) REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1, AR_PHY_TPC_19_B1_ALPHA_THERM, tempSlope); REG_RMW_FIELD(ah, AR_PHY_TPC_18, AR_PHY_TPC_18_THERM_CAL_VALUE, temperature[0]); return 0; } /* Apply the recorded correction values. */ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency) { int ichain, ipier, npier; int mode; int lfrequency[AR9300_MAX_CHAINS], lcorrection[AR9300_MAX_CHAINS], ltemperature[AR9300_MAX_CHAINS], lvoltage[AR9300_MAX_CHAINS]; int hfrequency[AR9300_MAX_CHAINS], hcorrection[AR9300_MAX_CHAINS], htemperature[AR9300_MAX_CHAINS], hvoltage[AR9300_MAX_CHAINS]; int fdiff; int correction[AR9300_MAX_CHAINS], voltage[AR9300_MAX_CHAINS], temperature[AR9300_MAX_CHAINS]; int pfrequency, pcorrection, ptemperature, pvoltage; struct ath_common *common = ath9k_hw_common(ah); mode = (frequency >= 4000); if (mode) npier = AR9300_NUM_5G_CAL_PIERS; else npier = AR9300_NUM_2G_CAL_PIERS; for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) { lfrequency[ichain] = 0; hfrequency[ichain] = 100000; } /* identify best lower and higher frequency calibration measurement */ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) { for (ipier = 0; ipier < npier; ipier++) { if (!ar9003_hw_cal_pier_get(ah, mode, ipier, ichain, &pfrequency, &pcorrection, &ptemperature, &pvoltage)) { fdiff = frequency - pfrequency; /* * this measurement is higher than * our desired frequency */ if (fdiff <= 0) { if (hfrequency[ichain] <= 0 || hfrequency[ichain] >= 100000 || fdiff > (frequency - hfrequency[ichain])) { /* * new best higher * frequency measurement */ hfrequency[ichain] = pfrequency; hcorrection[ichain] = pcorrection; htemperature[ichain] = ptemperature; hvoltage[ichain] = pvoltage; } } if (fdiff >= 0) { if (lfrequency[ichain] <= 0 || fdiff < (frequency - lfrequency[ichain])) { /* * new best lower * frequency measurement */ lfrequency[ichain] = pfrequency; lcorrection[ichain] = pcorrection; ltemperature[ichain] = ptemperature; lvoltage[ichain] = pvoltage; } } } } } /* interpolate */ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) { ath_dbg(common, EEPROM, "ch=%d f=%d low=%d %d h=%d %d\n", ichain, frequency, lfrequency[ichain], lcorrection[ichain], hfrequency[ichain], hcorrection[ichain]); /* they're the same, so just pick one */ if (hfrequency[ichain] == lfrequency[ichain]) { correction[ichain] = lcorrection[ichain]; voltage[ichain] = lvoltage[ichain]; temperature[ichain] = ltemperature[ichain]; } /* the low frequency is good */ else if (frequency - lfrequency[ichain] < 1000) { /* so is the high frequency, interpolate */ if (hfrequency[ichain] - frequency < 1000) { correction[ichain] = interpolate(frequency, lfrequency[ichain], hfrequency[ichain], lcorrection[ichain], hcorrection[ichain]); temperature[ichain] = interpolate(frequency, lfrequency[ichain], hfrequency[ichain], ltemperature[ichain], htemperature[ichain]); voltage[ichain] = interpolate(frequency, lfrequency[ichain], hfrequency[ichain], lvoltage[ichain], hvoltage[ichain]); } /* only low is good, use it */ else { correction[ichain] = lcorrection[ichain]; temperature[ichain] = ltemperature[ichain]; voltage[ichain] = lvoltage[ichain]; } } /* only high is good, use it */ else if (hfrequency[ichain] - frequency < 1000) { correction[ichain] = hcorrection[ichain]; temperature[ichain] = htemperature[ichain]; voltage[ichain] = hvoltage[ichain]; } else { /* nothing is good, presume 0???? */ correction[ichain] = 0; temperature[ichain] = 0; voltage[ichain] = 0; } } ar9003_hw_power_control_override(ah, frequency, correction, voltage, temperature); ath_dbg(common, EEPROM, "for frequency=%d, calibration correction = %d %d %d\n", frequency, correction[0], correction[1], correction[2]); return 0; } static u16 ar9003_hw_get_direct_edge_power(struct ar9300_eeprom *eep, int idx, int edge, bool is2GHz) { struct cal_ctl_data_2g *ctl_2g = eep->ctlPowerData_2G; struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G; if (is2GHz) return CTL_EDGE_TPOWER(ctl_2g[idx].ctlEdges[edge]); else return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge]); } static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep, int idx, unsigned int edge, u16 freq, bool is2GHz) { struct cal_ctl_data_2g *ctl_2g = eep->ctlPowerData_2G; struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G; u8 *ctl_freqbin = is2GHz ? &eep->ctl_freqbin_2G[idx][0] : &eep->ctl_freqbin_5G[idx][0]; if (is2GHz) { if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 1) < freq && CTL_EDGE_FLAGS(ctl_2g[idx].ctlEdges[edge - 1])) return CTL_EDGE_TPOWER(ctl_2g[idx].ctlEdges[edge - 1]); } else { if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 0) < freq && CTL_EDGE_FLAGS(ctl_5g[idx].ctlEdges[edge - 1])) return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge - 1]); } return MAX_RATE_POWER; } /* * Find the maximum conformance test limit for the given channel and CTL info */ static u16 ar9003_hw_get_max_edge_power(struct ar9300_eeprom *eep, u16 freq, int idx, bool is2GHz) { u16 twiceMaxEdgePower = MAX_RATE_POWER; u8 *ctl_freqbin = is2GHz ? &eep->ctl_freqbin_2G[idx][0] : &eep->ctl_freqbin_5G[idx][0]; u16 num_edges = is2GHz ? AR9300_NUM_BAND_EDGES_2G : AR9300_NUM_BAND_EDGES_5G; unsigned int edge; /* Get the edge power */ for (edge = 0; (edge < num_edges) && (ctl_freqbin[edge] != AR5416_BCHAN_UNUSED); edge++) { /* * If there's an exact channel match or an inband flag set * on the lower channel use the given rdEdgePower */ if (freq == ath9k_hw_fbin2freq(ctl_freqbin[edge], is2GHz)) { twiceMaxEdgePower = ar9003_hw_get_direct_edge_power(eep, idx, edge, is2GHz); break; } else if ((edge > 0) && (freq < ath9k_hw_fbin2freq(ctl_freqbin[edge], is2GHz))) { twiceMaxEdgePower = ar9003_hw_get_indirect_edge_power(eep, idx, edge, freq, is2GHz); /* * Leave loop - no more affecting edges possible in * this monotonic increasing list */ break; } } return twiceMaxEdgePower; } static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah, struct ath9k_channel *chan, u8 *pPwrArray, u16 cfgCtl, u8 antenna_reduction, u16 powerLimit) { struct ath_common *common = ath9k_hw_common(ah); struct ar9300_eeprom *pEepData = &ah->eeprom.ar9300_eep; u16 twiceMaxEdgePower; int i; u16 scaledPower = 0, minCtlPower; static const u16 ctlModesFor11a[] = { CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40 }; static const u16 ctlModesFor11g[] = { CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40 }; u16 numCtlModes; const u16 *pCtlMode; u16 ctlMode, freq; struct chan_centers centers; u8 *ctlIndex; u8 ctlNum; u16 twiceMinEdgePower; bool is2ghz = IS_CHAN_2GHZ(chan); ath9k_hw_get_channel_centers(ah, chan, &centers); scaledPower = powerLimit - antenna_reduction; /* * Reduce scaled Power by number of chains active to get * to per chain tx power level */ switch (ar5416_get_ntxchains(ah->txchainmask)) { case 1: break; case 2: if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN) scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN; else scaledPower = 0; break; case 3: if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN) scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN; else scaledPower = 0; break; } scaledPower = max((u16)0, scaledPower); /* * Get target powers from EEPROM - our baseline for TX Power */ if (is2ghz) { /* Setup for CTL modes */ /* CTL_11B, CTL_11G, CTL_2GHT20 */ numCtlModes = ARRAY_SIZE(ctlModesFor11g) - SUB_NUM_CTL_MODES_AT_2G_40; pCtlMode = ctlModesFor11g; if (IS_CHAN_HT40(chan)) /* All 2G CTL's */ numCtlModes = ARRAY_SIZE(ctlModesFor11g); } else { /* Setup for CTL modes */ /* CTL_11A, CTL_5GHT20 */ numCtlModes = ARRAY_SIZE(ctlModesFor11a) - SUB_NUM_CTL_MODES_AT_5G_40; pCtlMode = ctlModesFor11a; if (IS_CHAN_HT40(chan)) /* All 5G CTL's */ numCtlModes = ARRAY_SIZE(ctlModesFor11a); } /* * For MIMO, need to apply regulatory caps individually across * dynamically running modes: CCK, OFDM, HT20, HT40 * * The outer loop walks through each possible applicable runtime mode. * The inner loop walks through each ctlIndex entry in EEPROM. * The ctl value is encoded as [7:4] == test group, [3:0] == test mode. */ for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) { bool isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) || (pCtlMode[ctlMode] == CTL_2GHT40); if (isHt40CtlMode) freq = centers.synth_center; else if (pCtlMode[ctlMode] & EXT_ADDITIVE) freq = centers.ext_center; else freq = centers.ctl_center; ath_dbg(common, REGULATORY, "LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, EXT_ADDITIVE %d\n", ctlMode, numCtlModes, isHt40CtlMode, (pCtlMode[ctlMode] & EXT_ADDITIVE)); /* walk through each CTL index stored in EEPROM */ if (is2ghz) { ctlIndex = pEepData->ctlIndex_2G; ctlNum = AR9300_NUM_CTLS_2G; } else { ctlIndex = pEepData->ctlIndex_5G; ctlNum = AR9300_NUM_CTLS_5G; } twiceMaxEdgePower = MAX_RATE_POWER; for (i = 0; (i < ctlNum) && ctlIndex[i]; i++) { ath_dbg(common, REGULATORY, "LOOP-Ctlidx %d: cfgCtl 0x%2.2x pCtlMode 0x%2.2x ctlIndex 0x%2.2x chan %d\n", i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i], chan->channel); /* * compare test group from regulatory * channel list with test mode from pCtlMode * list */ if ((((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == ctlIndex[i]) || (((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == ((ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL))) { twiceMinEdgePower = ar9003_hw_get_max_edge_power(pEepData, freq, i, is2ghz); if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) /* * Find the minimum of all CTL * edge powers that apply to * this channel */ twiceMaxEdgePower = min(twiceMaxEdgePower, twiceMinEdgePower); else { /* specific */ twiceMaxEdgePower = twiceMinEdgePower; break; } } } minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower); ath_dbg(common, REGULATORY, "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n", ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower, scaledPower, minCtlPower); /* Apply ctl mode to correct target power set */ switch (pCtlMode[ctlMode]) { case CTL_11B: for (i = ALL_TARGET_LEGACY_1L_5L; i <= ALL_TARGET_LEGACY_11S; i++) pPwrArray[i] = (u8)min((u16)pPwrArray[i], minCtlPower); break; case CTL_11A: case CTL_11G: for (i = ALL_TARGET_LEGACY_6_24; i <= ALL_TARGET_LEGACY_54; i++) pPwrArray[i] = (u8)min((u16)pPwrArray[i], minCtlPower); break; case CTL_5GHT20: case CTL_2GHT20: for (i = ALL_TARGET_HT20_0_8_16; i <= ALL_TARGET_HT20_21; i++) pPwrArray[i] = (u8)min((u16)pPwrArray[i], minCtlPower); pPwrArray[ALL_TARGET_HT20_22] = (u8)min((u16)pPwrArray[ALL_TARGET_HT20_22], minCtlPower); pPwrArray[ALL_TARGET_HT20_23] = (u8)min((u16)pPwrArray[ALL_TARGET_HT20_23], minCtlPower); break; case CTL_5GHT40: case CTL_2GHT40: for (i = ALL_TARGET_HT40_0_8_16; i <= ALL_TARGET_HT40_23; i++) pPwrArray[i] = (u8)min((u16)pPwrArray[i], minCtlPower); break; default: break; } } /* end ctl mode checking */ } static inline u8 mcsidx_to_tgtpwridx(unsigned int mcs_idx, u8 base_pwridx) { u8 mod_idx = mcs_idx % 8; if (mod_idx <= 3) return mod_idx ? (base_pwridx + 1) : base_pwridx; else return base_pwridx + 4 * (mcs_idx / 8) + mod_idx - 2; } static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah, struct ath9k_channel *chan, u16 cfgCtl, u8 twiceAntennaReduction, u8 powerLimit, bool test) { struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); struct ath_common *common = ath9k_hw_common(ah); struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; struct ar9300_modal_eep_header *modal_hdr; u8 targetPowerValT2[ar9300RateSize]; u8 target_power_val_t2_eep[ar9300RateSize]; unsigned int i = 0, paprd_scale_factor = 0; u8 pwr_idx, min_pwridx = 0; ar9003_hw_set_target_power_eeprom(ah, chan->channel, targetPowerValT2); if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) { if (IS_CHAN_2GHZ(chan)) modal_hdr = &eep->modalHeader2G; else modal_hdr = &eep->modalHeader5G; ah->paprd_ratemask = le32_to_cpu(modal_hdr->papdRateMaskHt20) & AR9300_PAPRD_RATE_MASK; ah->paprd_ratemask_ht40 = le32_to_cpu(modal_hdr->papdRateMaskHt40) & AR9300_PAPRD_RATE_MASK; paprd_scale_factor = ar9003_get_paprd_scale_factor(ah, chan); min_pwridx = IS_CHAN_HT40(chan) ? ALL_TARGET_HT40_0_8_16 : ALL_TARGET_HT20_0_8_16; if (!ah->paprd_table_write_done) { memcpy(target_power_val_t2_eep, targetPowerValT2, sizeof(targetPowerValT2)); for (i = 0; i < 24; i++) { pwr_idx = mcsidx_to_tgtpwridx(i, min_pwridx); if (ah->paprd_ratemask & (1 << i)) { if (targetPowerValT2[pwr_idx] && targetPowerValT2[pwr_idx] == target_power_val_t2_eep[pwr_idx]) targetPowerValT2[pwr_idx] -= paprd_scale_factor; } } } memcpy(target_power_val_t2_eep, targetPowerValT2, sizeof(targetPowerValT2)); } ar9003_hw_set_power_per_rate_table(ah, chan, targetPowerValT2, cfgCtl, twiceAntennaReduction, powerLimit); if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) { for (i = 0; i < ar9300RateSize; i++) { if ((ah->paprd_ratemask & (1 << i)) && (abs(targetPowerValT2[i] - target_power_val_t2_eep[i]) > paprd_scale_factor)) { ah->paprd_ratemask &= ~(1 << i); ath_dbg(common, EEPROM, "paprd disabled for mcs %d\n", i); } } } regulatory->max_power_level = 0; for (i = 0; i < ar9300RateSize; i++) { if (targetPowerValT2[i] > regulatory->max_power_level) regulatory->max_power_level = targetPowerValT2[i]; } ath9k_hw_update_regulatory_maxpower(ah); if (test) return; for (i = 0; i < ar9300RateSize; i++) { ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]); } ah->txpower_limit = regulatory->max_power_level; /* Write target power array to registers */ ar9003_hw_tx_power_regwrite(ah, targetPowerValT2); ar9003_hw_calibration_apply(ah, chan->channel); if (IS_CHAN_2GHZ(chan)) { if (IS_CHAN_HT40(chan)) i = ALL_TARGET_HT40_0_8_16; else i = ALL_TARGET_HT20_0_8_16; } else { if (IS_CHAN_HT40(chan)) i = ALL_TARGET_HT40_7; else i = ALL_TARGET_HT20_7; } ah->paprd_target_power = targetPowerValT2[i]; } static u16 ath9k_hw_ar9300_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) { return AR_NO_SPUR; } s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; return (eep->baseEepHeader.txrxgain >> 4) & 0xf; /* bits 7:4 */ } s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; return (eep->baseEepHeader.txrxgain) & 0xf; /* bits 3:0 */ } u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; if (is_2ghz) return eep->modalHeader2G.spurChans; else return eep->modalHeader5G.spurChans; } unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah, struct ath9k_channel *chan) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; if (IS_CHAN_2GHZ(chan)) return MS(le32_to_cpu(eep->modalHeader2G.papdRateMaskHt20), AR9300_PAPRD_SCALE_1); else { if (chan->channel >= 5700) return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20), AR9300_PAPRD_SCALE_1); else if (chan->channel >= 5400) return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt40), AR9300_PAPRD_SCALE_2); else return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt40), AR9300_PAPRD_SCALE_1); } } const struct eeprom_ops eep_ar9300_ops = { .check_eeprom = ath9k_hw_ar9300_check_eeprom, .get_eeprom = ath9k_hw_ar9300_get_eeprom, .fill_eeprom = ath9k_hw_ar9300_fill_eeprom, .dump_eeprom = ath9k_hw_ar9003_dump_eeprom, .get_eeprom_ver = ath9k_hw_ar9300_get_eeprom_ver, .get_eeprom_rev = ath9k_hw_ar9300_get_eeprom_rev, .set_board_values = ath9k_hw_ar9300_set_board_values, .set_addac = ath9k_hw_ar9300_set_addac, .set_txpower = ath9k_hw_ar9300_set_txpower, .get_spur_channel = ath9k_hw_ar9300_get_spur_channel };
gpl-2.0
xaxaxa/n7102_kernel
drivers/net/virtio_net.c
1836
27996
/* A network driver using virtio. * * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ //#define DEBUG #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/module.h> #include <linux/virtio.h> #include <linux/virtio_net.h> #include <linux/scatterlist.h> #include <linux/if_vlan.h> #include <linux/slab.h> static int napi_weight = 128; module_param(napi_weight, int, 0444); static int csum = 1, gso = 1; module_param(csum, bool, 0444); module_param(gso, bool, 0444); /* FIXME: MTU in config. */ #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) #define GOOD_COPY_LEN 128 #define VIRTNET_SEND_COMMAND_SG_MAX 2 struct virtnet_info { struct virtio_device *vdev; struct virtqueue *rvq, *svq, *cvq; struct net_device *dev; struct napi_struct napi; unsigned int status; /* Number of input buffers, and max we've ever had. */ unsigned int num, max; /* I like... big packets and I cannot lie! */ bool big_packets; /* Host will merge rx buffers for big packets (shake it! shake it!) */ bool mergeable_rx_bufs; /* Work struct for refilling if we run low on memory. */ struct delayed_work refill; /* Chain pages by the private ptr. */ struct page *pages; /* fragments + linear part + virtio header */ struct scatterlist rx_sg[MAX_SKB_FRAGS + 2]; struct scatterlist tx_sg[MAX_SKB_FRAGS + 2]; }; struct skb_vnet_hdr { union { struct virtio_net_hdr hdr; struct virtio_net_hdr_mrg_rxbuf mhdr; }; unsigned int num_sg; }; struct padded_vnet_hdr { struct virtio_net_hdr hdr; /* * virtio_net_hdr should be in a separated sg buffer because of a * QEMU bug, and data sg buffer shares same page with this header sg. * This padding makes next sg 16 byte aligned after virtio_net_hdr. */ char padding[6]; }; static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) { return (struct skb_vnet_hdr *)skb->cb; } /* * private is used to chain pages for big packets, put the whole * most recent used list in the beginning for reuse */ static void give_pages(struct virtnet_info *vi, struct page *page) { struct page *end; /* Find end of list, sew whole thing into vi->pages. */ for (end = page; end->private; end = (struct page *)end->private); end->private = (unsigned long)vi->pages; vi->pages = page; } static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask) { struct page *p = vi->pages; if (p) { vi->pages = (struct page *)p->private; /* clear private here, it is used to chain pages */ p->private = 0; } else p = alloc_page(gfp_mask); return p; } static void skb_xmit_done(struct virtqueue *svq) { struct virtnet_info *vi = svq->vdev->priv; /* Suppress further interrupts. */ virtqueue_disable_cb(svq); /* We were probably waiting for more output buffers. */ netif_wake_queue(vi->dev); } static void set_skb_frag(struct sk_buff *skb, struct page *page, unsigned int offset, unsigned int *len) { int i = skb_shinfo(skb)->nr_frags; skb_frag_t *f; f = &skb_shinfo(skb)->frags[i]; f->size = min((unsigned)PAGE_SIZE - offset, *len); f->page_offset = offset; f->page = page; skb->data_len += f->size; skb->len += f->size; skb_shinfo(skb)->nr_frags++; *len -= f->size; } static struct sk_buff *page_to_skb(struct virtnet_info *vi, struct page *page, unsigned int len) { struct sk_buff *skb; struct skb_vnet_hdr *hdr; unsigned int copy, hdr_len, offset; char *p; p = page_address(page); /* copy small packet so we can reuse these pages for small data */ skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); if (unlikely(!skb)) return NULL; hdr = skb_vnet_hdr(skb); if (vi->mergeable_rx_bufs) { hdr_len = sizeof hdr->mhdr; offset = hdr_len; } else { hdr_len = sizeof hdr->hdr; offset = sizeof(struct padded_vnet_hdr); } memcpy(hdr, p, hdr_len); len -= hdr_len; p += offset; copy = len; if (copy > skb_tailroom(skb)) copy = skb_tailroom(skb); memcpy(skb_put(skb, copy), p, copy); len -= copy; offset += copy; while (len) { set_skb_frag(skb, page, offset, &len); page = (struct page *)page->private; offset = 0; } if (page) give_pages(vi, page); return skb; } static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb) { struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); struct page *page; int num_buf, i, len; num_buf = hdr->mhdr.num_buffers; while (--num_buf) { i = skb_shinfo(skb)->nr_frags; if (i >= MAX_SKB_FRAGS) { pr_debug("%s: packet too long\n", skb->dev->name); skb->dev->stats.rx_length_errors++; return -EINVAL; } page = virtqueue_get_buf(vi->rvq, &len); if (!page) { pr_debug("%s: rx error: %d buffers missing\n", skb->dev->name, hdr->mhdr.num_buffers); skb->dev->stats.rx_length_errors++; return -EINVAL; } if (len > PAGE_SIZE) len = PAGE_SIZE; set_skb_frag(skb, page, 0, &len); --vi->num; } return 0; } static void receive_buf(struct net_device *dev, void *buf, unsigned int len) { struct virtnet_info *vi = netdev_priv(dev); struct sk_buff *skb; struct page *page; struct skb_vnet_hdr *hdr; if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); dev->stats.rx_length_errors++; if (vi->mergeable_rx_bufs || vi->big_packets) give_pages(vi, buf); else dev_kfree_skb(buf); return; } if (!vi->mergeable_rx_bufs && !vi->big_packets) { skb = buf; len -= sizeof(struct virtio_net_hdr); skb_trim(skb, len); } else { page = buf; skb = page_to_skb(vi, page, len); if (unlikely(!skb)) { dev->stats.rx_dropped++; give_pages(vi, page); return; } if (vi->mergeable_rx_bufs) if (receive_mergeable(vi, skb)) { dev_kfree_skb(skb); return; } } hdr = skb_vnet_hdr(skb); skb->truesize += skb->data_len; dev->stats.rx_bytes += skb->len; dev->stats.rx_packets++; if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { pr_debug("Needs csum!\n"); if (!skb_partial_csum_set(skb, hdr->hdr.csum_start, hdr->hdr.csum_offset)) goto frame_err; } skb->protocol = eth_type_trans(skb, dev); pr_debug("Receiving skb proto 0x%04x len %i type %i\n", ntohs(skb->protocol), skb->len, skb->pkt_type); if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { pr_debug("GSO!\n"); switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { case VIRTIO_NET_HDR_GSO_TCPV4: skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; break; case VIRTIO_NET_HDR_GSO_UDP: skb_shinfo(skb)->gso_type = SKB_GSO_UDP; break; case VIRTIO_NET_HDR_GSO_TCPV6: skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; break; default: if (net_ratelimit()) printk(KERN_WARNING "%s: bad gso type %u.\n", dev->name, hdr->hdr.gso_type); goto frame_err; } if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; skb_shinfo(skb)->gso_size = hdr->hdr.gso_size; if (skb_shinfo(skb)->gso_size == 0) { if (net_ratelimit()) printk(KERN_WARNING "%s: zero gso size.\n", dev->name); goto frame_err; } /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; skb_shinfo(skb)->gso_segs = 0; } netif_receive_skb(skb); return; frame_err: dev->stats.rx_frame_errors++; dev_kfree_skb(skb); } static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp) { struct sk_buff *skb; struct skb_vnet_hdr *hdr; int err; skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); if (unlikely(!skb)) return -ENOMEM; skb_put(skb, MAX_PACKET_LEN); hdr = skb_vnet_hdr(skb); sg_set_buf(vi->rx_sg, &hdr->hdr, sizeof hdr->hdr); skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len); err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 2, skb, gfp); if (err < 0) dev_kfree_skb(skb); return err; } static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp) { struct page *first, *list = NULL; char *p; int i, err, offset; /* page in vi->rx_sg[MAX_SKB_FRAGS + 1] is list tail */ for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { first = get_a_page(vi, gfp); if (!first) { if (list) give_pages(vi, list); return -ENOMEM; } sg_set_buf(&vi->rx_sg[i], page_address(first), PAGE_SIZE); /* chain new page in list head to match sg */ first->private = (unsigned long)list; list = first; } first = get_a_page(vi, gfp); if (!first) { give_pages(vi, list); return -ENOMEM; } p = page_address(first); /* vi->rx_sg[0], vi->rx_sg[1] share the same page */ /* a separated vi->rx_sg[0] for virtio_net_hdr only due to QEMU bug */ sg_set_buf(&vi->rx_sg[0], p, sizeof(struct virtio_net_hdr)); /* vi->rx_sg[1] for data packet, from offset */ offset = sizeof(struct padded_vnet_hdr); sg_set_buf(&vi->rx_sg[1], p + offset, PAGE_SIZE - offset); /* chain first in list head */ first->private = (unsigned long)list; err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2, first, gfp); if (err < 0) give_pages(vi, first); return err; } static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp) { struct page *page; int err; page = get_a_page(vi, gfp); if (!page) return -ENOMEM; sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE); err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 1, page, gfp); if (err < 0) give_pages(vi, page); return err; } /* Returns false if we couldn't fill entirely (OOM). */ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp) { int err; bool oom; do { if (vi->mergeable_rx_bufs) err = add_recvbuf_mergeable(vi, gfp); else if (vi->big_packets) err = add_recvbuf_big(vi, gfp); else err = add_recvbuf_small(vi, gfp); oom = err == -ENOMEM; if (err < 0) break; ++vi->num; } while (err > 0); if (unlikely(vi->num > vi->max)) vi->max = vi->num; virtqueue_kick(vi->rvq); return !oom; } static void skb_recv_done(struct virtqueue *rvq) { struct virtnet_info *vi = rvq->vdev->priv; /* Schedule NAPI, Suppress further interrupts if successful. */ if (napi_schedule_prep(&vi->napi)) { virtqueue_disable_cb(rvq); __napi_schedule(&vi->napi); } } static void virtnet_napi_enable(struct virtnet_info *vi) { napi_enable(&vi->napi); /* If all buffers were filled by other side before we napi_enabled, we * won't get another interrupt, so process any outstanding packets * now. virtnet_poll wants re-enable the queue, so we disable here. * We synchronize against interrupts via NAPI_STATE_SCHED */ if (napi_schedule_prep(&vi->napi)) { virtqueue_disable_cb(vi->rvq); __napi_schedule(&vi->napi); } } static void refill_work(struct work_struct *work) { struct virtnet_info *vi; bool still_empty; vi = container_of(work, struct virtnet_info, refill.work); napi_disable(&vi->napi); still_empty = !try_fill_recv(vi, GFP_KERNEL); virtnet_napi_enable(vi); /* In theory, this can happen: if we don't get any buffers in * we will *never* try to fill again. */ if (still_empty) schedule_delayed_work(&vi->refill, HZ/2); } static int virtnet_poll(struct napi_struct *napi, int budget) { struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi); void *buf; unsigned int len, received = 0; again: while (received < budget && (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) { receive_buf(vi->dev, buf, len); --vi->num; received++; } if (vi->num < vi->max / 2) { if (!try_fill_recv(vi, GFP_ATOMIC)) schedule_delayed_work(&vi->refill, 0); } /* Out of packets? */ if (received < budget) { napi_complete(napi); if (unlikely(!virtqueue_enable_cb(vi->rvq)) && napi_schedule_prep(napi)) { virtqueue_disable_cb(vi->rvq); __napi_schedule(napi); goto again; } } return received; } static unsigned int free_old_xmit_skbs(struct virtnet_info *vi) { struct sk_buff *skb; unsigned int len, tot_sgs = 0; while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) { pr_debug("Sent skb %p\n", skb); vi->dev->stats.tx_bytes += skb->len; vi->dev->stats.tx_packets++; tot_sgs += skb_vnet_hdr(skb)->num_sg; dev_kfree_skb_any(skb); } return tot_sgs; } static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) { struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); if (skb->ip_summed == CHECKSUM_PARTIAL) { hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; hdr->hdr.csum_start = skb_checksum_start_offset(skb); hdr->hdr.csum_offset = skb->csum_offset; } else { hdr->hdr.flags = 0; hdr->hdr.csum_offset = hdr->hdr.csum_start = 0; } if (skb_is_gso(skb)) { hdr->hdr.hdr_len = skb_headlen(skb); hdr->hdr.gso_size = skb_shinfo(skb)->gso_size; if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; else BUG(); if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; } else { hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; hdr->hdr.gso_size = hdr->hdr.hdr_len = 0; } hdr->mhdr.num_buffers = 0; /* Encode metadata header at front. */ if (vi->mergeable_rx_bufs) sg_set_buf(vi->tx_sg, &hdr->mhdr, sizeof hdr->mhdr); else sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr); hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1; return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg, 0, skb); } static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int capacity; /* Free up any pending old buffers before queueing new ones. */ free_old_xmit_skbs(vi); /* Try to transmit */ capacity = xmit_skb(vi, skb); /* This can happen with OOM and indirect buffers. */ if (unlikely(capacity < 0)) { if (net_ratelimit()) { if (likely(capacity == -ENOMEM)) { dev_warn(&dev->dev, "TX queue failure: out of memory\n"); } else { dev->stats.tx_fifo_errors++; dev_warn(&dev->dev, "Unexpected TX queue failure: %d\n", capacity); } } dev->stats.tx_dropped++; kfree_skb(skb); return NETDEV_TX_OK; } virtqueue_kick(vi->svq); /* Don't wait up for transmitted skbs to be freed. */ skb_orphan(skb); nf_reset(skb); /* Apparently nice girls don't return TX_BUSY; stop the queue * before it gets out of hand. Naturally, this wastes entries. */ if (capacity < 2+MAX_SKB_FRAGS) { netif_stop_queue(dev); if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) { /* More just got used, free them then recheck. */ capacity += free_old_xmit_skbs(vi); if (capacity >= 2+MAX_SKB_FRAGS) { netif_start_queue(dev); virtqueue_disable_cb(vi->svq); } } } return NETDEV_TX_OK; } static int virtnet_set_mac_address(struct net_device *dev, void *p) { struct virtnet_info *vi = netdev_priv(dev); struct virtio_device *vdev = vi->vdev; int ret; ret = eth_mac_addr(dev, p); if (ret) return ret; if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) vdev->config->set(vdev, offsetof(struct virtio_net_config, mac), dev->dev_addr, dev->addr_len); return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER static void virtnet_netpoll(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); napi_schedule(&vi->napi); } #endif static int virtnet_open(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); virtnet_napi_enable(vi); return 0; } /* * Send command via the control virtqueue and check status. Commands * supported by the hypervisor, as indicated by feature bits, should * never fail unless improperly formated. */ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, struct scatterlist *data, int out, int in) { struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2]; struct virtio_net_ctrl_hdr ctrl; virtio_net_ctrl_ack status = ~0; unsigned int tmp; int i; /* Caller should know better */ BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) || (out + in > VIRTNET_SEND_COMMAND_SG_MAX)); out++; /* Add header */ in++; /* Add return status */ ctrl.class = class; ctrl.cmd = cmd; sg_init_table(sg, out + in); sg_set_buf(&sg[0], &ctrl, sizeof(ctrl)); for_each_sg(data, s, out + in - 2, i) sg_set_buf(&sg[i + 1], sg_virt(s), s->length); sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi) < 0); virtqueue_kick(vi->cvq); /* * Spin for a response, the kick causes an ioport write, trapping * into the hypervisor, so the request should be handled immediately. */ while (!virtqueue_get_buf(vi->cvq, &tmp)) cpu_relax(); return status == VIRTIO_NET_OK; } static int virtnet_close(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); napi_disable(&vi->napi); return 0; } static void virtnet_set_rx_mode(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg[2]; u8 promisc, allmulti; struct virtio_net_ctrl_mac *mac_data; struct netdev_hw_addr *ha; int uc_count; int mc_count; void *buf; int i; /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) return; promisc = ((dev->flags & IFF_PROMISC) != 0); allmulti = ((dev->flags & IFF_ALLMULTI) != 0); sg_init_one(sg, &promisc, sizeof(promisc)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_PROMISC, sg, 1, 0)) dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", promisc ? "en" : "dis"); sg_init_one(sg, &allmulti, sizeof(allmulti)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_ALLMULTI, sg, 1, 0)) dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", allmulti ? "en" : "dis"); uc_count = netdev_uc_count(dev); mc_count = netdev_mc_count(dev); /* MAC filter - use one buffer for both lists */ buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + (2 * sizeof(mac_data->entries)), GFP_ATOMIC); mac_data = buf; if (!buf) { dev_warn(&dev->dev, "No memory for MAC address buffer\n"); return; } sg_init_table(sg, 2); /* Store the unicast list and count in the front of the buffer */ mac_data->entries = uc_count; i = 0; netdev_for_each_uc_addr(ha, dev) memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); sg_set_buf(&sg[0], mac_data, sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); /* multicast list and count fill the end */ mac_data = (void *)&mac_data->macs[uc_count][0]; mac_data->entries = mc_count; i = 0; netdev_for_each_mc_addr(ha, dev) memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); sg_set_buf(&sg[1], mac_data, sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_TABLE_SET, sg, 2, 0)) dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); kfree(buf); } static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; sg_init_one(&sg, &vid, sizeof(vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0)) dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); } static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; sg_init_one(&sg, &vid, sizeof(vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0)) dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); } static const struct ethtool_ops virtnet_ethtool_ops = { .get_link = ethtool_op_get_link, }; #define MIN_MTU 68 #define MAX_MTU 65535 static int virtnet_change_mtu(struct net_device *dev, int new_mtu) { if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) return -EINVAL; dev->mtu = new_mtu; return 0; } static const struct net_device_ops virtnet_netdev = { .ndo_open = virtnet_open, .ndo_stop = virtnet_close, .ndo_start_xmit = start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = virtnet_set_mac_address, .ndo_set_rx_mode = virtnet_set_rx_mode, .ndo_change_mtu = virtnet_change_mtu, .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = virtnet_netpoll, #endif }; static void virtnet_update_status(struct virtnet_info *vi) { u16 v; if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) return; vi->vdev->config->get(vi->vdev, offsetof(struct virtio_net_config, status), &v, sizeof(v)); /* Ignore unknown (future) status bits */ v &= VIRTIO_NET_S_LINK_UP; if (vi->status == v) return; vi->status = v; if (vi->status & VIRTIO_NET_S_LINK_UP) { netif_carrier_on(vi->dev); netif_wake_queue(vi->dev); } else { netif_carrier_off(vi->dev); netif_stop_queue(vi->dev); } } static void virtnet_config_changed(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; virtnet_update_status(vi); } static int virtnet_probe(struct virtio_device *vdev) { int err; struct net_device *dev; struct virtnet_info *vi; struct virtqueue *vqs[3]; vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL}; const char *names[] = { "input", "output", "control" }; int nvqs; /* Allocate ourselves a network device with room for our info */ dev = alloc_etherdev(sizeof(struct virtnet_info)); if (!dev) return -ENOMEM; /* Set up network device as normal. */ dev->netdev_ops = &virtnet_netdev; dev->features = NETIF_F_HIGHDMA; SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); SET_NETDEV_DEV(dev, &vdev->dev); /* Do we support "hardware" checksums? */ if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { /* This opens up the world of extra features. */ dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; if (csum) dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO | NETIF_F_TSO_ECN | NETIF_F_TSO6; } /* Individual feature bits: what can host handle? */ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) dev->hw_features |= NETIF_F_TSO; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) dev->hw_features |= NETIF_F_TSO6; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) dev->hw_features |= NETIF_F_TSO_ECN; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) dev->hw_features |= NETIF_F_UFO; if (gso) dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); /* (!csum && gso) case will be fixed by register_netdev() */ } /* Configuration may specify what MAC to use. Otherwise random. */ if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { vdev->config->get(vdev, offsetof(struct virtio_net_config, mac), dev->dev_addr, dev->addr_len); } else random_ether_addr(dev->dev_addr); /* Set up our device-specific information */ vi = netdev_priv(dev); netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight); vi->dev = dev; vi->vdev = vdev; vdev->priv = vi; vi->pages = NULL; INIT_DELAYED_WORK(&vi->refill, refill_work); sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg)); sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg)); /* If we can receive ANY GSO packets, we must allocate large ones. */ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) vi->big_packets = true; if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) vi->mergeable_rx_bufs = true; /* We expect two virtqueues, receive then send, * and optionally control. */ nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2; err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names); if (err) goto free; vi->rvq = vqs[0]; vi->svq = vqs[1]; if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) { vi->cvq = vqs[2]; if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) dev->features |= NETIF_F_HW_VLAN_FILTER; } err = register_netdev(dev); if (err) { pr_debug("virtio_net: registering device failed\n"); goto free_vqs; } /* Last of all, set up some receive buffers. */ try_fill_recv(vi, GFP_KERNEL); /* If we didn't even get one input buffer, we're useless. */ if (vi->num == 0) { err = -ENOMEM; goto unregister; } /* Assume link up if device can't report link status, otherwise get link status from config. */ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { netif_carrier_off(dev); virtnet_update_status(vi); } else { vi->status = VIRTIO_NET_S_LINK_UP; netif_carrier_on(dev); } pr_debug("virtnet: registered device %s\n", dev->name); return 0; unregister: unregister_netdev(dev); cancel_delayed_work_sync(&vi->refill); free_vqs: vdev->config->del_vqs(vdev); free: free_netdev(dev); return err; } static void free_unused_bufs(struct virtnet_info *vi) { void *buf; while (1) { buf = virtqueue_detach_unused_buf(vi->svq); if (!buf) break; dev_kfree_skb(buf); } while (1) { buf = virtqueue_detach_unused_buf(vi->rvq); if (!buf) break; if (vi->mergeable_rx_bufs || vi->big_packets) give_pages(vi, buf); else dev_kfree_skb(buf); --vi->num; } BUG_ON(vi->num != 0); } static void __devexit virtnet_remove(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; /* Stop all the virtqueues. */ vdev->config->reset(vdev); unregister_netdev(vi->dev); cancel_delayed_work_sync(&vi->refill); /* Free unused buffers in both send and recv, if any. */ free_unused_bufs(vi); vdev->config->del_vqs(vi->vdev); while (vi->pages) __free_pages(get_a_page(vi, GFP_KERNEL), 0); free_netdev(vi->dev); } static struct virtio_device_id id_table[] = { { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, { 0 }, }; static unsigned int features[] = { VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, }; static struct virtio_driver virtio_net_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtnet_probe, .remove = __devexit_p(virtnet_remove), .config_changed = virtnet_config_changed, }; static int __init init(void) { return register_virtio_driver(&virtio_net_driver); } static void __exit fini(void) { unregister_virtio_driver(&virtio_net_driver); } module_init(init); module_exit(fini); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio network driver"); MODULE_LICENSE("GPL");
gpl-2.0
caibo2014/mptcp
arch/mn10300/kernel/setup.c
2092
6815
/* MN10300 Arch-specific initialisation * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/tty.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/seq_file.h> #include <linux/cpu.h> #include <asm/processor.h> #include <linux/console.h> #include <asm/uaccess.h> #include <asm/setup.h> #include <asm/io.h> #include <asm/smp.h> #include <proc/proc.h> #include <asm/fpu.h> #include <asm/sections.h> struct mn10300_cpuinfo boot_cpu_data; static char __initdata cmd_line[COMMAND_LINE_SIZE]; char redboot_command_line[COMMAND_LINE_SIZE] = "console=ttyS0,115200 root=/dev/mtdblock3 rw"; char __initdata redboot_platform_name[COMMAND_LINE_SIZE]; static struct resource code_resource = { .start = 0x100000, .end = 0, .name = "Kernel code", }; static struct resource data_resource = { .start = 0, .end = 0, .name = "Kernel data", }; static unsigned long __initdata phys_memory_base; static unsigned long __initdata phys_memory_end; static unsigned long __initdata memory_end; unsigned long memory_size; struct thread_info *__current_ti = &init_thread_union.thread_info; struct task_struct *__current = &init_task; #define mn10300_known_cpus 5 static const char *const mn10300_cputypes[] = { "am33-1", "am33-2", "am34-1", "am33-3", "am34-2", "unknown" }; /* * Pick out the memory size. We look for mem=size, * where size is "size[KkMm]" */ static int __init early_mem(char *p) { memory_size = memparse(p, &p); if (memory_size == 0) panic("Memory size not known\n"); return 0; } early_param("mem", early_mem); /* * architecture specific setup */ void __init setup_arch(char **cmdline_p) { unsigned long bootmap_size; unsigned long kstart_pfn, start_pfn, free_pfn, end_pfn; cpu_init(); unit_setup(); smp_init_cpus(); /* save unparsed command line copy for /proc/cmdline */ strlcpy(boot_command_line, redboot_command_line, COMMAND_LINE_SIZE); /* populate cmd_line too for later use, preserving boot_command_line */ strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); *cmdline_p = cmd_line; parse_early_param(); memory_end = (unsigned long) CONFIG_KERNEL_RAM_BASE_ADDRESS + memory_size; if (memory_end > phys_memory_end) memory_end = phys_memory_end; init_mm.start_code = (unsigned long)&_text; init_mm.end_code = (unsigned long) &_etext; init_mm.end_data = (unsigned long) &_edata; init_mm.brk = (unsigned long) &_end; code_resource.start = virt_to_bus(&_text); code_resource.end = virt_to_bus(&_etext)-1; data_resource.start = virt_to_bus(&_etext); data_resource.end = virt_to_bus(&_edata)-1; start_pfn = (CONFIG_KERNEL_RAM_BASE_ADDRESS >> PAGE_SHIFT); kstart_pfn = PFN_UP(__pa(&_text)); free_pfn = PFN_UP(__pa(&_end)); end_pfn = PFN_DOWN(__pa(memory_end)); bootmap_size = init_bootmem_node(&contig_page_data, free_pfn, start_pfn, end_pfn); if (kstart_pfn > start_pfn) free_bootmem(PFN_PHYS(start_pfn), PFN_PHYS(kstart_pfn - start_pfn)); free_bootmem(PFN_PHYS(free_pfn), PFN_PHYS(end_pfn - free_pfn)); /* If interrupt vector table is in main ram, then we need to reserve the page it is occupying. */ if (CONFIG_INTERRUPT_VECTOR_BASE >= CONFIG_KERNEL_RAM_BASE_ADDRESS && CONFIG_INTERRUPT_VECTOR_BASE < memory_end) reserve_bootmem(CONFIG_INTERRUPT_VECTOR_BASE, PAGE_SIZE, BOOTMEM_DEFAULT); reserve_bootmem(PAGE_ALIGN(PFN_PHYS(free_pfn)), bootmap_size, BOOTMEM_DEFAULT); #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) conswitchp = &vga_con; #elif defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; #endif #endif paging_init(); } /* * perform CPU initialisation */ void __init cpu_init(void) { unsigned long cpurev = CPUREV, type; type = (CPUREV & CPUREV_TYPE) >> CPUREV_TYPE_S; if (type > mn10300_known_cpus) type = mn10300_known_cpus; printk(KERN_INFO "Panasonic %s, rev %ld\n", mn10300_cputypes[type], (cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S); get_mem_info(&phys_memory_base, &memory_size); phys_memory_end = phys_memory_base + memory_size; fpu_init_state(); } static struct cpu cpu_devices[NR_CPUS]; static int __init topology_init(void) { int i; for_each_present_cpu(i) register_cpu(&cpu_devices[i], i); return 0; } subsys_initcall(topology_init); /* * Get CPU information for use by the procfs. */ static int show_cpuinfo(struct seq_file *m, void *v) { #ifdef CONFIG_SMP struct mn10300_cpuinfo *c = v; unsigned long cpu_id = c - cpu_data; unsigned long cpurev = c->type, type, icachesz, dcachesz; #else /* CONFIG_SMP */ unsigned long cpu_id = 0; unsigned long cpurev = CPUREV, type, icachesz, dcachesz; #endif /* CONFIG_SMP */ #ifdef CONFIG_SMP if (!cpu_online(cpu_id)) return 0; #endif type = (cpurev & CPUREV_TYPE) >> CPUREV_TYPE_S; if (type > mn10300_known_cpus) type = mn10300_known_cpus; icachesz = ((cpurev & CPUREV_ICWAY ) >> CPUREV_ICWAY_S) * ((cpurev & CPUREV_ICSIZE) >> CPUREV_ICSIZE_S) * 1024; dcachesz = ((cpurev & CPUREV_DCWAY ) >> CPUREV_DCWAY_S) * ((cpurev & CPUREV_DCSIZE) >> CPUREV_DCSIZE_S) * 1024; seq_printf(m, "processor : %ld\n" "vendor_id : " PROCESSOR_VENDOR_NAME "\n" "cpu core : %s\n" "cpu rev : %lu\n" "model name : " PROCESSOR_MODEL_NAME "\n" "icache size: %lu\n" "dcache size: %lu\n", cpu_id, mn10300_cputypes[type], (cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S, icachesz, dcachesz ); seq_printf(m, "ioclk speed: %lu.%02luMHz\n" "bogomips : %lu.%02lu\n\n", MN10300_IOCLK / 1000000, (MN10300_IOCLK / 10000) % 100, #ifdef CONFIG_SMP c->loops_per_jiffy / (500000 / HZ), (c->loops_per_jiffy / (5000 / HZ)) % 100 #else /* CONFIG_SMP */ loops_per_jiffy / (500000 / HZ), (loops_per_jiffy / (5000 / HZ)) % 100 #endif /* CONFIG_SMP */ ); return 0; } static void *c_start(struct seq_file *m, loff_t *pos) { return *pos < NR_CPUS ? cpu_data + *pos : NULL; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) { ++*pos; return c_start(m, pos); } static void c_stop(struct seq_file *m, void *v) { } const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = show_cpuinfo, };
gpl-2.0
KernelWorld/KW-Kenzo
net/nfc/core.c
2092
20639
/* * Copyright (C) 2011 Instituto Nokia de Tecnologia * * Authors: * Lauro Ramos Venancio <lauro.venancio@openbossa.org> * Aloisio Almeida Jr <aloisio.almeida@openbossa.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/rfkill.h> #include <linux/nfc.h> #include <net/genetlink.h> #include "nfc.h" #define VERSION "0.1" #define NFC_CHECK_PRES_FREQ_MS 2000 int nfc_devlist_generation; DEFINE_MUTEX(nfc_devlist_mutex); /* NFC device ID bitmap */ static DEFINE_IDA(nfc_index_ida); /** * nfc_dev_up - turn on the NFC device * * @dev: The nfc device to be turned on * * The device remains up until the nfc_dev_down function is called. */ int nfc_dev_up(struct nfc_dev *dev) { int rc = 0; pr_debug("dev_name=%s\n", dev_name(&dev->dev)); device_lock(&dev->dev); if (dev->rfkill && rfkill_blocked(dev->rfkill)) { rc = -ERFKILL; goto error; } if (!device_is_registered(&dev->dev)) { rc = -ENODEV; goto error; } if (dev->dev_up) { rc = -EALREADY; goto error; } if (dev->ops->dev_up) rc = dev->ops->dev_up(dev); if (!rc) dev->dev_up = true; error: device_unlock(&dev->dev); return rc; } /** * nfc_dev_down - turn off the NFC device * * @dev: The nfc device to be turned off */ int nfc_dev_down(struct nfc_dev *dev) { int rc = 0; pr_debug("dev_name=%s\n", dev_name(&dev->dev)); device_lock(&dev->dev); if (!device_is_registered(&dev->dev)) { rc = -ENODEV; goto error; } if (!dev->dev_up) { rc = -EALREADY; goto error; } if (dev->polling || dev->active_target) { rc = -EBUSY; goto error; } if (dev->ops->dev_down) dev->ops->dev_down(dev); dev->dev_up = false; error: device_unlock(&dev->dev); return rc; } static int nfc_rfkill_set_block(void *data, bool blocked) { struct nfc_dev *dev = data; pr_debug("%s blocked %d", dev_name(&dev->dev), blocked); if (!blocked) return 0; nfc_dev_down(dev); return 0; } static const struct rfkill_ops nfc_rfkill_ops = { .set_block = nfc_rfkill_set_block, }; /** * nfc_start_poll - start polling for nfc targets * * @dev: The nfc device that must start polling * @protocols: bitset of nfc protocols that must be used for polling * * The device remains polling for targets until a target is found or * the nfc_stop_poll function is called. */ int nfc_start_poll(struct nfc_dev *dev, u32 im_protocols, u32 tm_protocols) { int rc; pr_debug("dev_name %s initiator protocols 0x%x target protocols 0x%x\n", dev_name(&dev->dev), im_protocols, tm_protocols); if (!im_protocols && !tm_protocols) return -EINVAL; device_lock(&dev->dev); if (!device_is_registered(&dev->dev)) { rc = -ENODEV; goto error; } if (!dev->dev_up) { rc = -ENODEV; goto error; } if (dev->polling) { rc = -EBUSY; goto error; } rc = dev->ops->start_poll(dev, im_protocols, tm_protocols); if (!rc) { dev->polling = true; dev->rf_mode = NFC_RF_NONE; } error: device_unlock(&dev->dev); return rc; } /** * nfc_stop_poll - stop polling for nfc targets * * @dev: The nfc device that must stop polling */ int nfc_stop_poll(struct nfc_dev *dev) { int rc = 0; pr_debug("dev_name=%s\n", dev_name(&dev->dev)); device_lock(&dev->dev); if (!device_is_registered(&dev->dev)) { rc = -ENODEV; goto error; } if (!dev->polling) { rc = -EINVAL; goto error; } dev->ops->stop_poll(dev); dev->polling = false; dev->rf_mode = NFC_RF_NONE; error: device_unlock(&dev->dev); return rc; } static struct nfc_target *nfc_find_target(struct nfc_dev *dev, u32 target_idx) { int i; if (dev->n_targets == 0) return NULL; for (i = 0; i < dev->n_targets; i++) { if (dev->targets[i].idx == target_idx) return &dev->targets[i]; } return NULL; } int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode) { int rc = 0; u8 *gb; size_t gb_len; struct nfc_target *target; pr_debug("dev_name=%s comm %d\n", dev_name(&dev->dev), comm_mode); if (!dev->ops->dep_link_up) return -EOPNOTSUPP; device_lock(&dev->dev); if (!device_is_registered(&dev->dev)) { rc = -ENODEV; goto error; } if (dev->dep_link_up == true) { rc = -EALREADY; goto error; } gb = nfc_llcp_general_bytes(dev, &gb_len); if (gb_len > NFC_MAX_GT_LEN) { rc = -EINVAL; goto error; } target = nfc_find_target(dev, target_index); if (target == NULL) { rc = -ENOTCONN; goto error; } rc = dev->ops->dep_link_up(dev, target, comm_mode, gb, gb_len); if (!rc) { dev->active_target = target; dev->rf_mode = NFC_RF_INITIATOR; } error: device_unlock(&dev->dev); return rc; } int nfc_dep_link_down(struct nfc_dev *dev) { int rc = 0; pr_debug("dev_name=%s\n", dev_name(&dev->dev)); if (!dev->ops->dep_link_down) return -EOPNOTSUPP; device_lock(&dev->dev); if (!device_is_registered(&dev->dev)) { rc = -ENODEV; goto error; } if (dev->dep_link_up == false) { rc = -EALREADY; goto error; } rc = dev->ops->dep_link_down(dev); if (!rc) { dev->dep_link_up = false; dev->active_target = NULL; dev->rf_mode = NFC_RF_NONE; nfc_llcp_mac_is_down(dev); nfc_genl_dep_link_down_event(dev); } error: device_unlock(&dev->dev); return rc; } int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx, u8 comm_mode, u8 rf_mode) { dev->dep_link_up = true; nfc_llcp_mac_is_up(dev, target_idx, comm_mode, rf_mode); return nfc_genl_dep_link_up_event(dev, target_idx, comm_mode, rf_mode); } EXPORT_SYMBOL(nfc_dep_link_is_up); /** * nfc_activate_target - prepare the target for data exchange * * @dev: The nfc device that found the target * @target_idx: index of the target that must be activated * @protocol: nfc protocol that will be used for data exchange */ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol) { int rc; struct nfc_target *target; pr_debug("dev_name=%s target_idx=%u protocol=%u\n", dev_name(&dev->dev), target_idx, protocol); device_lock(&dev->dev); if (!device_is_registered(&dev->dev)) { rc = -ENODEV; goto error; } if (dev->active_target) { rc = -EBUSY; goto error; } target = nfc_find_target(dev, target_idx); if (target == NULL) { rc = -ENOTCONN; goto error; } rc = dev->ops->activate_target(dev, target, protocol); if (!rc) { dev->active_target = target; dev->rf_mode = NFC_RF_INITIATOR; if (dev->ops->check_presence && !dev->shutting_down) mod_timer(&dev->check_pres_timer, jiffies + msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS)); } error: device_unlock(&dev->dev); return rc; } /** * nfc_deactivate_target - deactivate a nfc target * * @dev: The nfc device that found the target * @target_idx: index of the target that must be deactivated */ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx) { int rc = 0; pr_debug("dev_name=%s target_idx=%u\n", dev_name(&dev->dev), target_idx); device_lock(&dev->dev); if (!device_is_registered(&dev->dev)) { rc = -ENODEV; goto error; } if (dev->active_target == NULL) { rc = -ENOTCONN; goto error; } if (dev->active_target->idx != target_idx) { rc = -ENOTCONN; goto error; } if (dev->ops->check_presence) del_timer_sync(&dev->check_pres_timer); dev->ops->deactivate_target(dev, dev->active_target); dev->active_target = NULL; error: device_unlock(&dev->dev); return rc; } /** * nfc_data_exchange - transceive data * * @dev: The nfc device that found the target * @target_idx: index of the target * @skb: data to be sent * @cb: callback called when the response is received * @cb_context: parameter for the callback function * * The user must wait for the callback before calling this function again. */ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb, data_exchange_cb_t cb, void *cb_context) { int rc; pr_debug("dev_name=%s target_idx=%u skb->len=%u\n", dev_name(&dev->dev), target_idx, skb->len); device_lock(&dev->dev); if (!device_is_registered(&dev->dev)) { rc = -ENODEV; kfree_skb(skb); goto error; } if (dev->rf_mode == NFC_RF_INITIATOR && dev->active_target != NULL) { if (dev->active_target->idx != target_idx) { rc = -EADDRNOTAVAIL; kfree_skb(skb); goto error; } if (dev->ops->check_presence) del_timer_sync(&dev->check_pres_timer); rc = dev->ops->im_transceive(dev, dev->active_target, skb, cb, cb_context); if (!rc && dev->ops->check_presence && !dev->shutting_down) mod_timer(&dev->check_pres_timer, jiffies + msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS)); } else if (dev->rf_mode == NFC_RF_TARGET && dev->ops->tm_send != NULL) { rc = dev->ops->tm_send(dev, skb); } else { rc = -ENOTCONN; kfree_skb(skb); goto error; } error: device_unlock(&dev->dev); return rc; } int nfc_set_remote_general_bytes(struct nfc_dev *dev, u8 *gb, u8 gb_len) { pr_debug("dev_name=%s gb_len=%d\n", dev_name(&dev->dev), gb_len); if (gb_len > NFC_MAX_GT_LEN) return -EINVAL; return nfc_llcp_set_remote_gb(dev, gb, gb_len); } EXPORT_SYMBOL(nfc_set_remote_general_bytes); u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, size_t *gb_len) { pr_debug("dev_name=%s\n", dev_name(&dev->dev)); return nfc_llcp_general_bytes(dev, gb_len); } EXPORT_SYMBOL(nfc_get_local_general_bytes); int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb) { /* Only LLCP target mode for now */ if (dev->dep_link_up == false) { kfree_skb(skb); return -ENOLINK; } return nfc_llcp_data_received(dev, skb); } EXPORT_SYMBOL(nfc_tm_data_received); int nfc_tm_activated(struct nfc_dev *dev, u32 protocol, u8 comm_mode, u8 *gb, size_t gb_len) { int rc; device_lock(&dev->dev); dev->polling = false; if (gb != NULL) { rc = nfc_set_remote_general_bytes(dev, gb, gb_len); if (rc < 0) goto out; } dev->rf_mode = NFC_RF_TARGET; if (protocol == NFC_PROTO_NFC_DEP_MASK) nfc_dep_link_is_up(dev, 0, comm_mode, NFC_RF_TARGET); rc = nfc_genl_tm_activated(dev, protocol); out: device_unlock(&dev->dev); return rc; } EXPORT_SYMBOL(nfc_tm_activated); int nfc_tm_deactivated(struct nfc_dev *dev) { dev->dep_link_up = false; dev->rf_mode = NFC_RF_NONE; return nfc_genl_tm_deactivated(dev); } EXPORT_SYMBOL(nfc_tm_deactivated); /** * nfc_alloc_send_skb - allocate a skb for data exchange responses * * @size: size to allocate * @gfp: gfp flags */ struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk, unsigned int flags, unsigned int size, unsigned int *err) { struct sk_buff *skb; unsigned int total_size; total_size = size + dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE; skb = sock_alloc_send_skb(sk, total_size, flags & MSG_DONTWAIT, err); if (skb) skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); return skb; } /** * nfc_alloc_recv_skb - allocate a skb for data exchange responses * * @size: size to allocate * @gfp: gfp flags */ struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp) { struct sk_buff *skb; unsigned int total_size; total_size = size + 1; skb = alloc_skb(total_size, gfp); if (skb) skb_reserve(skb, 1); return skb; } EXPORT_SYMBOL(nfc_alloc_recv_skb); /** * nfc_targets_found - inform that targets were found * * @dev: The nfc device that found the targets * @targets: array of nfc targets found * @ntargets: targets array size * * The device driver must call this function when one or many nfc targets * are found. After calling this function, the device driver must stop * polling for targets. * NOTE: This function can be called with targets=NULL and n_targets=0 to * notify a driver error, meaning that the polling operation cannot complete. * IMPORTANT: this function must not be called from an atomic context. * In addition, it must also not be called from a context that would prevent * the NFC Core to call other nfc ops entry point concurrently. */ int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets, int n_targets) { int i; pr_debug("dev_name=%s n_targets=%d\n", dev_name(&dev->dev), n_targets); for (i = 0; i < n_targets; i++) targets[i].idx = dev->target_next_idx++; device_lock(&dev->dev); if (dev->polling == false) { device_unlock(&dev->dev); return 0; } dev->polling = false; dev->targets_generation++; kfree(dev->targets); dev->targets = NULL; if (targets) { dev->targets = kmemdup(targets, n_targets * sizeof(struct nfc_target), GFP_ATOMIC); if (!dev->targets) { dev->n_targets = 0; device_unlock(&dev->dev); return -ENOMEM; } } dev->n_targets = n_targets; device_unlock(&dev->dev); nfc_genl_targets_found(dev); return 0; } EXPORT_SYMBOL(nfc_targets_found); /** * nfc_target_lost - inform that an activated target went out of field * * @dev: The nfc device that had the activated target in field * @target_idx: the nfc index of the target * * The device driver must call this function when the activated target * goes out of the field. * IMPORTANT: this function must not be called from an atomic context. * In addition, it must also not be called from a context that would prevent * the NFC Core to call other nfc ops entry point concurrently. */ int nfc_target_lost(struct nfc_dev *dev, u32 target_idx) { struct nfc_target *tg; int i; pr_debug("dev_name %s n_target %d\n", dev_name(&dev->dev), target_idx); device_lock(&dev->dev); for (i = 0; i < dev->n_targets; i++) { tg = &dev->targets[i]; if (tg->idx == target_idx) break; } if (i == dev->n_targets) { device_unlock(&dev->dev); return -EINVAL; } dev->targets_generation++; dev->n_targets--; dev->active_target = NULL; if (dev->n_targets) { memcpy(&dev->targets[i], &dev->targets[i + 1], (dev->n_targets - i) * sizeof(struct nfc_target)); } else { kfree(dev->targets); dev->targets = NULL; } device_unlock(&dev->dev); nfc_genl_target_lost(dev, target_idx); return 0; } EXPORT_SYMBOL(nfc_target_lost); inline void nfc_driver_failure(struct nfc_dev *dev, int err) { nfc_targets_found(dev, NULL, 0); } EXPORT_SYMBOL(nfc_driver_failure); static void nfc_release(struct device *d) { struct nfc_dev *dev = to_nfc_dev(d); pr_debug("dev_name=%s\n", dev_name(&dev->dev)); nfc_genl_data_exit(&dev->genl_data); kfree(dev->targets); kfree(dev); } static void nfc_check_pres_work(struct work_struct *work) { struct nfc_dev *dev = container_of(work, struct nfc_dev, check_pres_work); int rc; device_lock(&dev->dev); if (dev->active_target && timer_pending(&dev->check_pres_timer) == 0) { rc = dev->ops->check_presence(dev, dev->active_target); if (rc == -EOPNOTSUPP) goto exit; if (rc) { u32 active_target_idx = dev->active_target->idx; device_unlock(&dev->dev); nfc_target_lost(dev, active_target_idx); return; } if (!dev->shutting_down) mod_timer(&dev->check_pres_timer, jiffies + msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS)); } exit: device_unlock(&dev->dev); } static void nfc_check_pres_timeout(unsigned long data) { struct nfc_dev *dev = (struct nfc_dev *)data; schedule_work(&dev->check_pres_work); } struct class nfc_class = { .name = "nfc", .dev_release = nfc_release, }; EXPORT_SYMBOL(nfc_class); static int match_idx(struct device *d, const void *data) { struct nfc_dev *dev = to_nfc_dev(d); const unsigned int *idx = data; return dev->idx == *idx; } struct nfc_dev *nfc_get_device(unsigned int idx) { struct device *d; d = class_find_device(&nfc_class, NULL, &idx, match_idx); if (!d) return NULL; return to_nfc_dev(d); } /** * nfc_allocate_device - allocate a new nfc device * * @ops: device operations * @supported_protocols: NFC protocols supported by the device */ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, u32 supported_protocols, u32 supported_se, int tx_headroom, int tx_tailroom) { struct nfc_dev *dev; if (!ops->start_poll || !ops->stop_poll || !ops->activate_target || !ops->deactivate_target || !ops->im_transceive) return NULL; if (!supported_protocols) return NULL; dev = kzalloc(sizeof(struct nfc_dev), GFP_KERNEL); if (!dev) return NULL; dev->ops = ops; dev->supported_protocols = supported_protocols; dev->supported_se = supported_se; dev->active_se = NFC_SE_NONE; dev->tx_headroom = tx_headroom; dev->tx_tailroom = tx_tailroom; nfc_genl_data_init(&dev->genl_data); dev->rf_mode = NFC_RF_NONE; /* first generation must not be 0 */ dev->targets_generation = 1; if (ops->check_presence) { init_timer(&dev->check_pres_timer); dev->check_pres_timer.data = (unsigned long)dev; dev->check_pres_timer.function = nfc_check_pres_timeout; INIT_WORK(&dev->check_pres_work, nfc_check_pres_work); } return dev; } EXPORT_SYMBOL(nfc_allocate_device); /** * nfc_register_device - register a nfc device in the nfc subsystem * * @dev: The nfc device to register */ int nfc_register_device(struct nfc_dev *dev) { int rc; pr_debug("dev_name=%s\n", dev_name(&dev->dev)); dev->idx = ida_simple_get(&nfc_index_ida, 0, 0, GFP_KERNEL); if (dev->idx < 0) return dev->idx; dev->dev.class = &nfc_class; dev_set_name(&dev->dev, "nfc%d", dev->idx); device_initialize(&dev->dev); mutex_lock(&nfc_devlist_mutex); nfc_devlist_generation++; rc = device_add(&dev->dev); mutex_unlock(&nfc_devlist_mutex); if (rc < 0) return rc; rc = nfc_llcp_register_device(dev); if (rc) pr_err("Could not register llcp device\n"); rc = nfc_genl_device_added(dev); if (rc) pr_debug("The userspace won't be notified that the device %s was added\n", dev_name(&dev->dev)); dev->rfkill = rfkill_alloc(dev_name(&dev->dev), &dev->dev, RFKILL_TYPE_NFC, &nfc_rfkill_ops, dev); if (dev->rfkill) { if (rfkill_register(dev->rfkill) < 0) { rfkill_destroy(dev->rfkill); dev->rfkill = NULL; } } return 0; } EXPORT_SYMBOL(nfc_register_device); /** * nfc_unregister_device - unregister a nfc device in the nfc subsystem * * @dev: The nfc device to unregister */ void nfc_unregister_device(struct nfc_dev *dev) { int rc, id; pr_debug("dev_name=%s\n", dev_name(&dev->dev)); id = dev->idx; if (dev->rfkill) { rfkill_unregister(dev->rfkill); rfkill_destroy(dev->rfkill); } if (dev->ops->check_presence) { device_lock(&dev->dev); dev->shutting_down = true; device_unlock(&dev->dev); del_timer_sync(&dev->check_pres_timer); cancel_work_sync(&dev->check_pres_work); } rc = nfc_genl_device_removed(dev); if (rc) pr_debug("The userspace won't be notified that the device %s " "was removed\n", dev_name(&dev->dev)); nfc_llcp_unregister_device(dev); mutex_lock(&nfc_devlist_mutex); nfc_devlist_generation++; device_del(&dev->dev); mutex_unlock(&nfc_devlist_mutex); ida_simple_remove(&nfc_index_ida, id); } EXPORT_SYMBOL(nfc_unregister_device); static int __init nfc_init(void) { int rc; pr_info("NFC Core ver %s\n", VERSION); rc = class_register(&nfc_class); if (rc) return rc; rc = nfc_genl_init(); if (rc) goto err_genl; /* the first generation must not be 0 */ nfc_devlist_generation = 1; rc = rawsock_init(); if (rc) goto err_rawsock; rc = nfc_llcp_init(); if (rc) goto err_llcp_sock; rc = af_nfc_init(); if (rc) goto err_af_nfc; return 0; err_af_nfc: nfc_llcp_exit(); err_llcp_sock: rawsock_exit(); err_rawsock: nfc_genl_exit(); err_genl: class_unregister(&nfc_class); return rc; } static void __exit nfc_exit(void) { af_nfc_exit(); nfc_llcp_exit(); rawsock_exit(); nfc_genl_exit(); class_unregister(&nfc_class); } subsys_initcall(nfc_init); module_exit(nfc_exit); MODULE_AUTHOR("Lauro Ramos Venancio <lauro.venancio@openbossa.org>"); MODULE_DESCRIPTION("NFC Core ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_NFC); MODULE_ALIAS_GENL_FAMILY(NFC_GENL_NAME);
gpl-2.0
Thunderoar/android_kernel_samsung_goyave3g
fs/ext4/hash.c
3372
4457
/* * linux/fs/ext4/hash.c * * Copyright (C) 2002 by Theodore Ts'o * * This file is released under the GPL v2. * * This file may be redistributed under the terms of the GNU Public * License. */ #include <linux/fs.h> #include <linux/jbd2.h> #include <linux/cryptohash.h> #include "ext4.h" #define DELTA 0x9E3779B9 static void TEA_transform(__u32 buf[4], __u32 const in[]) { __u32 sum = 0; __u32 b0 = buf[0], b1 = buf[1]; __u32 a = in[0], b = in[1], c = in[2], d = in[3]; int n = 16; do { sum += DELTA; b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b); b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); } while (--n); buf[0] += b0; buf[1] += b1; } /* The old legacy hash */ static __u32 dx_hack_hash_unsigned(const char *name, int len) { __u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9; const unsigned char *ucp = (const unsigned char *) name; while (len--) { hash = hash1 + (hash0 ^ (((int) *ucp++) * 7152373)); if (hash & 0x80000000) hash -= 0x7fffffff; hash1 = hash0; hash0 = hash; } return hash0 << 1; } static __u32 dx_hack_hash_signed(const char *name, int len) { __u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9; const signed char *scp = (const signed char *) name; while (len--) { hash = hash1 + (hash0 ^ (((int) *scp++) * 7152373)); if (hash & 0x80000000) hash -= 0x7fffffff; hash1 = hash0; hash0 = hash; } return hash0 << 1; } static void str2hashbuf_signed(const char *msg, int len, __u32 *buf, int num) { __u32 pad, val; int i; const signed char *scp = (const signed char *) msg; pad = (__u32)len | ((__u32)len << 8); pad |= pad << 16; val = pad; if (len > num*4) len = num * 4; for (i = 0; i < len; i++) { if ((i % 4) == 0) val = pad; val = ((int) scp[i]) + (val << 8); if ((i % 4) == 3) { *buf++ = val; val = pad; num--; } } if (--num >= 0) *buf++ = val; while (--num >= 0) *buf++ = pad; } static void str2hashbuf_unsigned(const char *msg, int len, __u32 *buf, int num) { __u32 pad, val; int i; const unsigned char *ucp = (const unsigned char *) msg; pad = (__u32)len | ((__u32)len << 8); pad |= pad << 16; val = pad; if (len > num*4) len = num * 4; for (i = 0; i < len; i++) { if ((i % 4) == 0) val = pad; val = ((int) ucp[i]) + (val << 8); if ((i % 4) == 3) { *buf++ = val; val = pad; num--; } } if (--num >= 0) *buf++ = val; while (--num >= 0) *buf++ = pad; } /* * Returns the hash of a filename. If len is 0 and name is NULL, then * this function can be used to test whether or not a hash version is * supported. * * The seed is an 4 longword (32 bits) "secret" which can be used to * uniquify a hash. If the seed is all zero's, then some default seed * may be used. * * A particular hash version specifies whether or not the seed is * represented, and whether or not the returned hash is 32 bits or 64 * bits. 32 bit hashes will return 0 for the minor hash. */ int ext4fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo) { __u32 hash; __u32 minor_hash = 0; const char *p; int i; __u32 in[8], buf[4]; void (*str2hashbuf)(const char *, int, __u32 *, int) = str2hashbuf_signed; /* Initialize the default seed for the hash checksum functions */ buf[0] = 0x67452301; buf[1] = 0xefcdab89; buf[2] = 0x98badcfe; buf[3] = 0x10325476; /* Check to see if the seed is all zero's */ if (hinfo->seed) { for (i = 0; i < 4; i++) { if (hinfo->seed[i]) { memcpy(buf, hinfo->seed, sizeof(buf)); break; } } } switch (hinfo->hash_version) { case DX_HASH_LEGACY_UNSIGNED: hash = dx_hack_hash_unsigned(name, len); break; case DX_HASH_LEGACY: hash = dx_hack_hash_signed(name, len); break; case DX_HASH_HALF_MD4_UNSIGNED: str2hashbuf = str2hashbuf_unsigned; case DX_HASH_HALF_MD4: p = name; while (len > 0) { (*str2hashbuf)(p, len, in, 8); half_md4_transform(buf, in); len -= 32; p += 32; } minor_hash = buf[2]; hash = buf[1]; break; case DX_HASH_TEA_UNSIGNED: str2hashbuf = str2hashbuf_unsigned; case DX_HASH_TEA: p = name; while (len > 0) { (*str2hashbuf)(p, len, in, 4); TEA_transform(buf, in); len -= 16; p += 16; } hash = buf[0]; minor_hash = buf[1]; break; default: hinfo->hash = 0; return -1; } hash = hash & ~1; if (hash == (EXT4_HTREE_EOF_32BIT << 1)) hash = (EXT4_HTREE_EOF_32BIT - 1) << 1; hinfo->hash = hash; hinfo->minor_hash = minor_hash; return 0; }
gpl-2.0
scruiser/kernel
sound/isa/wss/wss_lib.c
3372
66010
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Routines for control of CS4231(A)/CS4232/InterWave & compatible chips * * Bugs: * - sometimes record brokes playback with WSS portion of * Yamaha OPL3-SA3 chip * - CS4231 (GUS MAX) - still trouble with occasional noises * - broken initialization? * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/pm.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/module.h> #include <sound/core.h> #include <sound/wss.h> #include <sound/pcm_params.h> #include <sound/tlv.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/irq.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("Routines for control of CS4231(A)/CS4232/InterWave & compatible chips"); MODULE_LICENSE("GPL"); #if 0 #define SNDRV_DEBUG_MCE #endif /* * Some variables */ static unsigned char freq_bits[14] = { /* 5510 */ 0x00 | CS4231_XTAL2, /* 6620 */ 0x0E | CS4231_XTAL2, /* 8000 */ 0x00 | CS4231_XTAL1, /* 9600 */ 0x0E | CS4231_XTAL1, /* 11025 */ 0x02 | CS4231_XTAL2, /* 16000 */ 0x02 | CS4231_XTAL1, /* 18900 */ 0x04 | CS4231_XTAL2, /* 22050 */ 0x06 | CS4231_XTAL2, /* 27042 */ 0x04 | CS4231_XTAL1, /* 32000 */ 0x06 | CS4231_XTAL1, /* 33075 */ 0x0C | CS4231_XTAL2, /* 37800 */ 0x08 | CS4231_XTAL2, /* 44100 */ 0x0A | CS4231_XTAL2, /* 48000 */ 0x0C | CS4231_XTAL1 }; static unsigned int rates[14] = { 5510, 6620, 8000, 9600, 11025, 16000, 18900, 22050, 27042, 32000, 33075, 37800, 44100, 48000 }; static struct snd_pcm_hw_constraint_list hw_constraints_rates = { .count = ARRAY_SIZE(rates), .list = rates, .mask = 0, }; static int snd_wss_xrate(struct snd_pcm_runtime *runtime) { return snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_rates); } static unsigned char snd_wss_original_image[32] = { 0x00, /* 00/00 - lic */ 0x00, /* 01/01 - ric */ 0x9f, /* 02/02 - la1ic */ 0x9f, /* 03/03 - ra1ic */ 0x9f, /* 04/04 - la2ic */ 0x9f, /* 05/05 - ra2ic */ 0xbf, /* 06/06 - loc */ 0xbf, /* 07/07 - roc */ 0x20, /* 08/08 - pdfr */ CS4231_AUTOCALIB, /* 09/09 - ic */ 0x00, /* 0a/10 - pc */ 0x00, /* 0b/11 - ti */ CS4231_MODE2, /* 0c/12 - mi */ 0xfc, /* 0d/13 - lbc */ 0x00, /* 0e/14 - pbru */ 0x00, /* 0f/15 - pbrl */ 0x80, /* 10/16 - afei */ 0x01, /* 11/17 - afeii */ 0x9f, /* 12/18 - llic */ 0x9f, /* 13/19 - rlic */ 0x00, /* 14/20 - tlb */ 0x00, /* 15/21 - thb */ 0x00, /* 16/22 - la3mic/reserved */ 0x00, /* 17/23 - ra3mic/reserved */ 0x00, /* 18/24 - afs */ 0x00, /* 19/25 - lamoc/version */ 0xcf, /* 1a/26 - mioc */ 0x00, /* 1b/27 - ramoc/reserved */ 0x20, /* 1c/28 - cdfr */ 0x00, /* 1d/29 - res4 */ 0x00, /* 1e/30 - cbru */ 0x00, /* 1f/31 - cbrl */ }; static unsigned char snd_opti93x_original_image[32] = { 0x00, /* 00/00 - l_mixout_outctrl */ 0x00, /* 01/01 - r_mixout_outctrl */ 0x88, /* 02/02 - l_cd_inctrl */ 0x88, /* 03/03 - r_cd_inctrl */ 0x88, /* 04/04 - l_a1/fm_inctrl */ 0x88, /* 05/05 - r_a1/fm_inctrl */ 0x80, /* 06/06 - l_dac_inctrl */ 0x80, /* 07/07 - r_dac_inctrl */ 0x00, /* 08/08 - ply_dataform_reg */ 0x00, /* 09/09 - if_conf */ 0x00, /* 0a/10 - pin_ctrl */ 0x00, /* 0b/11 - err_init_reg */ 0x0a, /* 0c/12 - id_reg */ 0x00, /* 0d/13 - reserved */ 0x00, /* 0e/14 - ply_upcount_reg */ 0x00, /* 0f/15 - ply_lowcount_reg */ 0x88, /* 10/16 - reserved/l_a1_inctrl */ 0x88, /* 11/17 - reserved/r_a1_inctrl */ 0x88, /* 12/18 - l_line_inctrl */ 0x88, /* 13/19 - r_line_inctrl */ 0x88, /* 14/20 - l_mic_inctrl */ 0x88, /* 15/21 - r_mic_inctrl */ 0x80, /* 16/22 - l_out_outctrl */ 0x80, /* 17/23 - r_out_outctrl */ 0x00, /* 18/24 - reserved */ 0x00, /* 19/25 - reserved */ 0x00, /* 1a/26 - reserved */ 0x00, /* 1b/27 - reserved */ 0x00, /* 1c/28 - cap_dataform_reg */ 0x00, /* 1d/29 - reserved */ 0x00, /* 1e/30 - cap_upcount_reg */ 0x00 /* 1f/31 - cap_lowcount_reg */ }; /* * Basic I/O functions */ static inline void wss_outb(struct snd_wss *chip, u8 offset, u8 val) { outb(val, chip->port + offset); } static inline u8 wss_inb(struct snd_wss *chip, u8 offset) { return inb(chip->port + offset); } static void snd_wss_wait(struct snd_wss *chip) { int timeout; for (timeout = 250; timeout > 0 && (wss_inb(chip, CS4231P(REGSEL)) & CS4231_INIT); timeout--) udelay(100); } static void snd_wss_dout(struct snd_wss *chip, unsigned char reg, unsigned char value) { int timeout; for (timeout = 250; timeout > 0 && (wss_inb(chip, CS4231P(REGSEL)) & CS4231_INIT); timeout--) udelay(10); wss_outb(chip, CS4231P(REGSEL), chip->mce_bit | reg); wss_outb(chip, CS4231P(REG), value); mb(); } void snd_wss_out(struct snd_wss *chip, unsigned char reg, unsigned char value) { snd_wss_wait(chip); #ifdef CONFIG_SND_DEBUG if (wss_inb(chip, CS4231P(REGSEL)) & CS4231_INIT) snd_printk(KERN_DEBUG "out: auto calibration time out " "- reg = 0x%x, value = 0x%x\n", reg, value); #endif wss_outb(chip, CS4231P(REGSEL), chip->mce_bit | reg); wss_outb(chip, CS4231P(REG), value); chip->image[reg] = value; mb(); snd_printdd("codec out - reg 0x%x = 0x%x\n", chip->mce_bit | reg, value); } EXPORT_SYMBOL(snd_wss_out); unsigned char snd_wss_in(struct snd_wss *chip, unsigned char reg) { snd_wss_wait(chip); #ifdef CONFIG_SND_DEBUG if (wss_inb(chip, CS4231P(REGSEL)) & CS4231_INIT) snd_printk(KERN_DEBUG "in: auto calibration time out " "- reg = 0x%x\n", reg); #endif wss_outb(chip, CS4231P(REGSEL), chip->mce_bit | reg); mb(); return wss_inb(chip, CS4231P(REG)); } EXPORT_SYMBOL(snd_wss_in); void snd_cs4236_ext_out(struct snd_wss *chip, unsigned char reg, unsigned char val) { wss_outb(chip, CS4231P(REGSEL), chip->mce_bit | 0x17); wss_outb(chip, CS4231P(REG), reg | (chip->image[CS4236_EXT_REG] & 0x01)); wss_outb(chip, CS4231P(REG), val); chip->eimage[CS4236_REG(reg)] = val; #if 0 printk(KERN_DEBUG "ext out : reg = 0x%x, val = 0x%x\n", reg, val); #endif } EXPORT_SYMBOL(snd_cs4236_ext_out); unsigned char snd_cs4236_ext_in(struct snd_wss *chip, unsigned char reg) { wss_outb(chip, CS4231P(REGSEL), chip->mce_bit | 0x17); wss_outb(chip, CS4231P(REG), reg | (chip->image[CS4236_EXT_REG] & 0x01)); #if 1 return wss_inb(chip, CS4231P(REG)); #else { unsigned char res; res = wss_inb(chip, CS4231P(REG)); printk(KERN_DEBUG "ext in : reg = 0x%x, val = 0x%x\n", reg, res); return res; } #endif } EXPORT_SYMBOL(snd_cs4236_ext_in); #if 0 static void snd_wss_debug(struct snd_wss *chip) { printk(KERN_DEBUG "CS4231 REGS: INDEX = 0x%02x " " STATUS = 0x%02x\n", wss_inb(chip, CS4231P(REGSEL)), wss_inb(chip, CS4231P(STATUS))); printk(KERN_DEBUG " 0x00: left input = 0x%02x " " 0x10: alt 1 (CFIG 2) = 0x%02x\n", snd_wss_in(chip, 0x00), snd_wss_in(chip, 0x10)); printk(KERN_DEBUG " 0x01: right input = 0x%02x " " 0x11: alt 2 (CFIG 3) = 0x%02x\n", snd_wss_in(chip, 0x01), snd_wss_in(chip, 0x11)); printk(KERN_DEBUG " 0x02: GF1 left input = 0x%02x " " 0x12: left line in = 0x%02x\n", snd_wss_in(chip, 0x02), snd_wss_in(chip, 0x12)); printk(KERN_DEBUG " 0x03: GF1 right input = 0x%02x " " 0x13: right line in = 0x%02x\n", snd_wss_in(chip, 0x03), snd_wss_in(chip, 0x13)); printk(KERN_DEBUG " 0x04: CD left input = 0x%02x " " 0x14: timer low = 0x%02x\n", snd_wss_in(chip, 0x04), snd_wss_in(chip, 0x14)); printk(KERN_DEBUG " 0x05: CD right input = 0x%02x " " 0x15: timer high = 0x%02x\n", snd_wss_in(chip, 0x05), snd_wss_in(chip, 0x15)); printk(KERN_DEBUG " 0x06: left output = 0x%02x " " 0x16: left MIC (PnP) = 0x%02x\n", snd_wss_in(chip, 0x06), snd_wss_in(chip, 0x16)); printk(KERN_DEBUG " 0x07: right output = 0x%02x " " 0x17: right MIC (PnP) = 0x%02x\n", snd_wss_in(chip, 0x07), snd_wss_in(chip, 0x17)); printk(KERN_DEBUG " 0x08: playback format = 0x%02x " " 0x18: IRQ status = 0x%02x\n", snd_wss_in(chip, 0x08), snd_wss_in(chip, 0x18)); printk(KERN_DEBUG " 0x09: iface (CFIG 1) = 0x%02x " " 0x19: left line out = 0x%02x\n", snd_wss_in(chip, 0x09), snd_wss_in(chip, 0x19)); printk(KERN_DEBUG " 0x0a: pin control = 0x%02x " " 0x1a: mono control = 0x%02x\n", snd_wss_in(chip, 0x0a), snd_wss_in(chip, 0x1a)); printk(KERN_DEBUG " 0x0b: init & status = 0x%02x " " 0x1b: right line out = 0x%02x\n", snd_wss_in(chip, 0x0b), snd_wss_in(chip, 0x1b)); printk(KERN_DEBUG " 0x0c: revision & mode = 0x%02x " " 0x1c: record format = 0x%02x\n", snd_wss_in(chip, 0x0c), snd_wss_in(chip, 0x1c)); printk(KERN_DEBUG " 0x0d: loopback = 0x%02x " " 0x1d: var freq (PnP) = 0x%02x\n", snd_wss_in(chip, 0x0d), snd_wss_in(chip, 0x1d)); printk(KERN_DEBUG " 0x0e: ply upr count = 0x%02x " " 0x1e: ply lwr count = 0x%02x\n", snd_wss_in(chip, 0x0e), snd_wss_in(chip, 0x1e)); printk(KERN_DEBUG " 0x0f: rec upr count = 0x%02x " " 0x1f: rec lwr count = 0x%02x\n", snd_wss_in(chip, 0x0f), snd_wss_in(chip, 0x1f)); } #endif /* * CS4231 detection / MCE routines */ static void snd_wss_busy_wait(struct snd_wss *chip) { int timeout; /* huh.. looks like this sequence is proper for CS4231A chip (GUS MAX) */ for (timeout = 5; timeout > 0; timeout--) wss_inb(chip, CS4231P(REGSEL)); /* end of cleanup sequence */ for (timeout = 25000; timeout > 0 && (wss_inb(chip, CS4231P(REGSEL)) & CS4231_INIT); timeout--) udelay(10); } void snd_wss_mce_up(struct snd_wss *chip) { unsigned long flags; int timeout; snd_wss_wait(chip); #ifdef CONFIG_SND_DEBUG if (wss_inb(chip, CS4231P(REGSEL)) & CS4231_INIT) snd_printk(KERN_DEBUG "mce_up - auto calibration time out (0)\n"); #endif spin_lock_irqsave(&chip->reg_lock, flags); chip->mce_bit |= CS4231_MCE; timeout = wss_inb(chip, CS4231P(REGSEL)); if (timeout == 0x80) snd_printk(KERN_DEBUG "mce_up [0x%lx]: " "serious init problem - codec still busy\n", chip->port); if (!(timeout & CS4231_MCE)) wss_outb(chip, CS4231P(REGSEL), chip->mce_bit | (timeout & 0x1f)); spin_unlock_irqrestore(&chip->reg_lock, flags); } EXPORT_SYMBOL(snd_wss_mce_up); void snd_wss_mce_down(struct snd_wss *chip) { unsigned long flags; unsigned long end_time; int timeout; int hw_mask = WSS_HW_CS4231_MASK | WSS_HW_CS4232_MASK | WSS_HW_AD1848; snd_wss_busy_wait(chip); #ifdef CONFIG_SND_DEBUG if (wss_inb(chip, CS4231P(REGSEL)) & CS4231_INIT) snd_printk(KERN_DEBUG "mce_down [0x%lx] - " "auto calibration time out (0)\n", (long)CS4231P(REGSEL)); #endif spin_lock_irqsave(&chip->reg_lock, flags); chip->mce_bit &= ~CS4231_MCE; timeout = wss_inb(chip, CS4231P(REGSEL)); wss_outb(chip, CS4231P(REGSEL), chip->mce_bit | (timeout & 0x1f)); spin_unlock_irqrestore(&chip->reg_lock, flags); if (timeout == 0x80) snd_printk(KERN_DEBUG "mce_down [0x%lx]: " "serious init problem - codec still busy\n", chip->port); if ((timeout & CS4231_MCE) == 0 || !(chip->hardware & hw_mask)) return; /* * Wait for (possible -- during init auto-calibration may not be set) * calibration process to start. Needs up to 5 sample periods on AD1848 * which at the slowest possible rate of 5.5125 kHz means 907 us. */ msleep(1); snd_printdd("(1) jiffies = %lu\n", jiffies); /* check condition up to 250 ms */ end_time = jiffies + msecs_to_jiffies(250); while (snd_wss_in(chip, CS4231_TEST_INIT) & CS4231_CALIB_IN_PROGRESS) { if (time_after(jiffies, end_time)) { snd_printk(KERN_ERR "mce_down - " "auto calibration time out (2)\n"); return; } msleep(1); } snd_printdd("(2) jiffies = %lu\n", jiffies); /* check condition up to 100 ms */ end_time = jiffies + msecs_to_jiffies(100); while (wss_inb(chip, CS4231P(REGSEL)) & CS4231_INIT) { if (time_after(jiffies, end_time)) { snd_printk(KERN_ERR "mce_down - auto calibration time out (3)\n"); return; } msleep(1); } snd_printdd("(3) jiffies = %lu\n", jiffies); snd_printd("mce_down - exit = 0x%x\n", wss_inb(chip, CS4231P(REGSEL))); } EXPORT_SYMBOL(snd_wss_mce_down); static unsigned int snd_wss_get_count(unsigned char format, unsigned int size) { switch (format & 0xe0) { case CS4231_LINEAR_16: case CS4231_LINEAR_16_BIG: size >>= 1; break; case CS4231_ADPCM_16: return size >> 2; } if (format & CS4231_STEREO) size >>= 1; return size; } static int snd_wss_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_wss *chip = snd_pcm_substream_chip(substream); int result = 0; unsigned int what; struct snd_pcm_substream *s; int do_start; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: do_start = 1; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: do_start = 0; break; default: return -EINVAL; } what = 0; snd_pcm_group_for_each_entry(s, substream) { if (s == chip->playback_substream) { what |= CS4231_PLAYBACK_ENABLE; snd_pcm_trigger_done(s, substream); } else if (s == chip->capture_substream) { what |= CS4231_RECORD_ENABLE; snd_pcm_trigger_done(s, substream); } } spin_lock(&chip->reg_lock); if (do_start) { chip->image[CS4231_IFACE_CTRL] |= what; if (chip->trigger) chip->trigger(chip, what, 1); } else { chip->image[CS4231_IFACE_CTRL] &= ~what; if (chip->trigger) chip->trigger(chip, what, 0); } snd_wss_out(chip, CS4231_IFACE_CTRL, chip->image[CS4231_IFACE_CTRL]); spin_unlock(&chip->reg_lock); #if 0 snd_wss_debug(chip); #endif return result; } /* * CODEC I/O */ static unsigned char snd_wss_get_rate(unsigned int rate) { int i; for (i = 0; i < ARRAY_SIZE(rates); i++) if (rate == rates[i]) return freq_bits[i]; // snd_BUG(); return freq_bits[ARRAY_SIZE(rates) - 1]; } static unsigned char snd_wss_get_format(struct snd_wss *chip, int format, int channels) { unsigned char rformat; rformat = CS4231_LINEAR_8; switch (format) { case SNDRV_PCM_FORMAT_MU_LAW: rformat = CS4231_ULAW_8; break; case SNDRV_PCM_FORMAT_A_LAW: rformat = CS4231_ALAW_8; break; case SNDRV_PCM_FORMAT_S16_LE: rformat = CS4231_LINEAR_16; break; case SNDRV_PCM_FORMAT_S16_BE: rformat = CS4231_LINEAR_16_BIG; break; case SNDRV_PCM_FORMAT_IMA_ADPCM: rformat = CS4231_ADPCM_16; break; } if (channels > 1) rformat |= CS4231_STEREO; #if 0 snd_printk(KERN_DEBUG "get_format: 0x%x (mode=0x%x)\n", format, mode); #endif return rformat; } static void snd_wss_calibrate_mute(struct snd_wss *chip, int mute) { unsigned long flags; mute = mute ? 0x80 : 0; spin_lock_irqsave(&chip->reg_lock, flags); if (chip->calibrate_mute == mute) { spin_unlock_irqrestore(&chip->reg_lock, flags); return; } if (!mute) { snd_wss_dout(chip, CS4231_LEFT_INPUT, chip->image[CS4231_LEFT_INPUT]); snd_wss_dout(chip, CS4231_RIGHT_INPUT, chip->image[CS4231_RIGHT_INPUT]); snd_wss_dout(chip, CS4231_LOOPBACK, chip->image[CS4231_LOOPBACK]); } else { snd_wss_dout(chip, CS4231_LEFT_INPUT, 0); snd_wss_dout(chip, CS4231_RIGHT_INPUT, 0); snd_wss_dout(chip, CS4231_LOOPBACK, 0xfd); } snd_wss_dout(chip, CS4231_AUX1_LEFT_INPUT, mute | chip->image[CS4231_AUX1_LEFT_INPUT]); snd_wss_dout(chip, CS4231_AUX1_RIGHT_INPUT, mute | chip->image[CS4231_AUX1_RIGHT_INPUT]); snd_wss_dout(chip, CS4231_AUX2_LEFT_INPUT, mute | chip->image[CS4231_AUX2_LEFT_INPUT]); snd_wss_dout(chip, CS4231_AUX2_RIGHT_INPUT, mute | chip->image[CS4231_AUX2_RIGHT_INPUT]); snd_wss_dout(chip, CS4231_LEFT_OUTPUT, mute | chip->image[CS4231_LEFT_OUTPUT]); snd_wss_dout(chip, CS4231_RIGHT_OUTPUT, mute | chip->image[CS4231_RIGHT_OUTPUT]); if (!(chip->hardware & WSS_HW_AD1848_MASK)) { snd_wss_dout(chip, CS4231_LEFT_LINE_IN, mute | chip->image[CS4231_LEFT_LINE_IN]); snd_wss_dout(chip, CS4231_RIGHT_LINE_IN, mute | chip->image[CS4231_RIGHT_LINE_IN]); snd_wss_dout(chip, CS4231_MONO_CTRL, mute ? 0xc0 : chip->image[CS4231_MONO_CTRL]); } if (chip->hardware == WSS_HW_INTERWAVE) { snd_wss_dout(chip, CS4231_LEFT_MIC_INPUT, mute | chip->image[CS4231_LEFT_MIC_INPUT]); snd_wss_dout(chip, CS4231_RIGHT_MIC_INPUT, mute | chip->image[CS4231_RIGHT_MIC_INPUT]); snd_wss_dout(chip, CS4231_LINE_LEFT_OUTPUT, mute | chip->image[CS4231_LINE_LEFT_OUTPUT]); snd_wss_dout(chip, CS4231_LINE_RIGHT_OUTPUT, mute | chip->image[CS4231_LINE_RIGHT_OUTPUT]); } chip->calibrate_mute = mute; spin_unlock_irqrestore(&chip->reg_lock, flags); } static void snd_wss_playback_format(struct snd_wss *chip, struct snd_pcm_hw_params *params, unsigned char pdfr) { unsigned long flags; int full_calib = 1; mutex_lock(&chip->mce_mutex); if (chip->hardware == WSS_HW_CS4231A || (chip->hardware & WSS_HW_CS4232_MASK)) { spin_lock_irqsave(&chip->reg_lock, flags); if ((chip->image[CS4231_PLAYBK_FORMAT] & 0x0f) == (pdfr & 0x0f)) { /* rate is same? */ snd_wss_out(chip, CS4231_ALT_FEATURE_1, chip->image[CS4231_ALT_FEATURE_1] | 0x10); chip->image[CS4231_PLAYBK_FORMAT] = pdfr; snd_wss_out(chip, CS4231_PLAYBK_FORMAT, chip->image[CS4231_PLAYBK_FORMAT]); snd_wss_out(chip, CS4231_ALT_FEATURE_1, chip->image[CS4231_ALT_FEATURE_1] &= ~0x10); udelay(100); /* Fixes audible clicks at least on GUS MAX */ full_calib = 0; } spin_unlock_irqrestore(&chip->reg_lock, flags); } else if (chip->hardware == WSS_HW_AD1845) { unsigned rate = params_rate(params); /* * Program the AD1845 correctly for the playback stream. * Note that we do NOT need to toggle the MCE bit because * the PLAYBACK_ENABLE bit of the Interface Configuration * register is set. * * NOTE: We seem to need to write to the MSB before the LSB * to get the correct sample frequency. */ spin_lock_irqsave(&chip->reg_lock, flags); snd_wss_out(chip, CS4231_PLAYBK_FORMAT, (pdfr & 0xf0)); snd_wss_out(chip, AD1845_UPR_FREQ_SEL, (rate >> 8) & 0xff); snd_wss_out(chip, AD1845_LWR_FREQ_SEL, rate & 0xff); full_calib = 0; spin_unlock_irqrestore(&chip->reg_lock, flags); } if (full_calib) { snd_wss_mce_up(chip); spin_lock_irqsave(&chip->reg_lock, flags); if (chip->hardware != WSS_HW_INTERWAVE && !chip->single_dma) { if (chip->image[CS4231_IFACE_CTRL] & CS4231_RECORD_ENABLE) pdfr = (pdfr & 0xf0) | (chip->image[CS4231_REC_FORMAT] & 0x0f); } else { chip->image[CS4231_PLAYBK_FORMAT] = pdfr; } snd_wss_out(chip, CS4231_PLAYBK_FORMAT, pdfr); spin_unlock_irqrestore(&chip->reg_lock, flags); if (chip->hardware == WSS_HW_OPL3SA2) udelay(100); /* this seems to help */ snd_wss_mce_down(chip); } mutex_unlock(&chip->mce_mutex); } static void snd_wss_capture_format(struct snd_wss *chip, struct snd_pcm_hw_params *params, unsigned char cdfr) { unsigned long flags; int full_calib = 1; mutex_lock(&chip->mce_mutex); if (chip->hardware == WSS_HW_CS4231A || (chip->hardware & WSS_HW_CS4232_MASK)) { spin_lock_irqsave(&chip->reg_lock, flags); if ((chip->image[CS4231_PLAYBK_FORMAT] & 0x0f) == (cdfr & 0x0f) || /* rate is same? */ (chip->image[CS4231_IFACE_CTRL] & CS4231_PLAYBACK_ENABLE)) { snd_wss_out(chip, CS4231_ALT_FEATURE_1, chip->image[CS4231_ALT_FEATURE_1] | 0x20); snd_wss_out(chip, CS4231_REC_FORMAT, chip->image[CS4231_REC_FORMAT] = cdfr); snd_wss_out(chip, CS4231_ALT_FEATURE_1, chip->image[CS4231_ALT_FEATURE_1] &= ~0x20); full_calib = 0; } spin_unlock_irqrestore(&chip->reg_lock, flags); } else if (chip->hardware == WSS_HW_AD1845) { unsigned rate = params_rate(params); /* * Program the AD1845 correctly for the capture stream. * Note that we do NOT need to toggle the MCE bit because * the PLAYBACK_ENABLE bit of the Interface Configuration * register is set. * * NOTE: We seem to need to write to the MSB before the LSB * to get the correct sample frequency. */ spin_lock_irqsave(&chip->reg_lock, flags); snd_wss_out(chip, CS4231_REC_FORMAT, (cdfr & 0xf0)); snd_wss_out(chip, AD1845_UPR_FREQ_SEL, (rate >> 8) & 0xff); snd_wss_out(chip, AD1845_LWR_FREQ_SEL, rate & 0xff); full_calib = 0; spin_unlock_irqrestore(&chip->reg_lock, flags); } if (full_calib) { snd_wss_mce_up(chip); spin_lock_irqsave(&chip->reg_lock, flags); if (chip->hardware != WSS_HW_INTERWAVE && !(chip->image[CS4231_IFACE_CTRL] & CS4231_PLAYBACK_ENABLE)) { if (chip->single_dma) snd_wss_out(chip, CS4231_PLAYBK_FORMAT, cdfr); else snd_wss_out(chip, CS4231_PLAYBK_FORMAT, (chip->image[CS4231_PLAYBK_FORMAT] & 0xf0) | (cdfr & 0x0f)); spin_unlock_irqrestore(&chip->reg_lock, flags); snd_wss_mce_down(chip); snd_wss_mce_up(chip); spin_lock_irqsave(&chip->reg_lock, flags); } if (chip->hardware & WSS_HW_AD1848_MASK) snd_wss_out(chip, CS4231_PLAYBK_FORMAT, cdfr); else snd_wss_out(chip, CS4231_REC_FORMAT, cdfr); spin_unlock_irqrestore(&chip->reg_lock, flags); snd_wss_mce_down(chip); } mutex_unlock(&chip->mce_mutex); } /* * Timer interface */ static unsigned long snd_wss_timer_resolution(struct snd_timer *timer) { struct snd_wss *chip = snd_timer_chip(timer); if (chip->hardware & WSS_HW_CS4236B_MASK) return 14467; else return chip->image[CS4231_PLAYBK_FORMAT] & 1 ? 9969 : 9920; } static int snd_wss_timer_start(struct snd_timer *timer) { unsigned long flags; unsigned int ticks; struct snd_wss *chip = snd_timer_chip(timer); spin_lock_irqsave(&chip->reg_lock, flags); ticks = timer->sticks; if ((chip->image[CS4231_ALT_FEATURE_1] & CS4231_TIMER_ENABLE) == 0 || (unsigned char)(ticks >> 8) != chip->image[CS4231_TIMER_HIGH] || (unsigned char)ticks != chip->image[CS4231_TIMER_LOW]) { chip->image[CS4231_TIMER_HIGH] = (unsigned char) (ticks >> 8); snd_wss_out(chip, CS4231_TIMER_HIGH, chip->image[CS4231_TIMER_HIGH]); chip->image[CS4231_TIMER_LOW] = (unsigned char) ticks; snd_wss_out(chip, CS4231_TIMER_LOW, chip->image[CS4231_TIMER_LOW]); snd_wss_out(chip, CS4231_ALT_FEATURE_1, chip->image[CS4231_ALT_FEATURE_1] | CS4231_TIMER_ENABLE); } spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } static int snd_wss_timer_stop(struct snd_timer *timer) { unsigned long flags; struct snd_wss *chip = snd_timer_chip(timer); spin_lock_irqsave(&chip->reg_lock, flags); chip->image[CS4231_ALT_FEATURE_1] &= ~CS4231_TIMER_ENABLE; snd_wss_out(chip, CS4231_ALT_FEATURE_1, chip->image[CS4231_ALT_FEATURE_1]); spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } static void snd_wss_init(struct snd_wss *chip) { unsigned long flags; snd_wss_calibrate_mute(chip, 1); snd_wss_mce_down(chip); #ifdef SNDRV_DEBUG_MCE snd_printk(KERN_DEBUG "init: (1)\n"); #endif snd_wss_mce_up(chip); spin_lock_irqsave(&chip->reg_lock, flags); chip->image[CS4231_IFACE_CTRL] &= ~(CS4231_PLAYBACK_ENABLE | CS4231_PLAYBACK_PIO | CS4231_RECORD_ENABLE | CS4231_RECORD_PIO | CS4231_CALIB_MODE); chip->image[CS4231_IFACE_CTRL] |= CS4231_AUTOCALIB; snd_wss_out(chip, CS4231_IFACE_CTRL, chip->image[CS4231_IFACE_CTRL]); spin_unlock_irqrestore(&chip->reg_lock, flags); snd_wss_mce_down(chip); #ifdef SNDRV_DEBUG_MCE snd_printk(KERN_DEBUG "init: (2)\n"); #endif snd_wss_mce_up(chip); spin_lock_irqsave(&chip->reg_lock, flags); chip->image[CS4231_IFACE_CTRL] &= ~CS4231_AUTOCALIB; snd_wss_out(chip, CS4231_IFACE_CTRL, chip->image[CS4231_IFACE_CTRL]); snd_wss_out(chip, CS4231_ALT_FEATURE_1, chip->image[CS4231_ALT_FEATURE_1]); spin_unlock_irqrestore(&chip->reg_lock, flags); snd_wss_mce_down(chip); #ifdef SNDRV_DEBUG_MCE snd_printk(KERN_DEBUG "init: (3) - afei = 0x%x\n", chip->image[CS4231_ALT_FEATURE_1]); #endif spin_lock_irqsave(&chip->reg_lock, flags); snd_wss_out(chip, CS4231_ALT_FEATURE_2, chip->image[CS4231_ALT_FEATURE_2]); spin_unlock_irqrestore(&chip->reg_lock, flags); snd_wss_mce_up(chip); spin_lock_irqsave(&chip->reg_lock, flags); snd_wss_out(chip, CS4231_PLAYBK_FORMAT, chip->image[CS4231_PLAYBK_FORMAT]); spin_unlock_irqrestore(&chip->reg_lock, flags); snd_wss_mce_down(chip); #ifdef SNDRV_DEBUG_MCE snd_printk(KERN_DEBUG "init: (4)\n"); #endif snd_wss_mce_up(chip); spin_lock_irqsave(&chip->reg_lock, flags); if (!(chip->hardware & WSS_HW_AD1848_MASK)) snd_wss_out(chip, CS4231_REC_FORMAT, chip->image[CS4231_REC_FORMAT]); spin_unlock_irqrestore(&chip->reg_lock, flags); snd_wss_mce_down(chip); snd_wss_calibrate_mute(chip, 0); #ifdef SNDRV_DEBUG_MCE snd_printk(KERN_DEBUG "init: (5)\n"); #endif } static int snd_wss_open(struct snd_wss *chip, unsigned int mode) { unsigned long flags; mutex_lock(&chip->open_mutex); if ((chip->mode & mode) || ((chip->mode & WSS_MODE_OPEN) && chip->single_dma)) { mutex_unlock(&chip->open_mutex); return -EAGAIN; } if (chip->mode & WSS_MODE_OPEN) { chip->mode |= mode; mutex_unlock(&chip->open_mutex); return 0; } /* ok. now enable and ack CODEC IRQ */ spin_lock_irqsave(&chip->reg_lock, flags); if (!(chip->hardware & WSS_HW_AD1848_MASK)) { snd_wss_out(chip, CS4231_IRQ_STATUS, CS4231_PLAYBACK_IRQ | CS4231_RECORD_IRQ | CS4231_TIMER_IRQ); snd_wss_out(chip, CS4231_IRQ_STATUS, 0); } wss_outb(chip, CS4231P(STATUS), 0); /* clear IRQ */ wss_outb(chip, CS4231P(STATUS), 0); /* clear IRQ */ chip->image[CS4231_PIN_CTRL] |= CS4231_IRQ_ENABLE; snd_wss_out(chip, CS4231_PIN_CTRL, chip->image[CS4231_PIN_CTRL]); if (!(chip->hardware & WSS_HW_AD1848_MASK)) { snd_wss_out(chip, CS4231_IRQ_STATUS, CS4231_PLAYBACK_IRQ | CS4231_RECORD_IRQ | CS4231_TIMER_IRQ); snd_wss_out(chip, CS4231_IRQ_STATUS, 0); } spin_unlock_irqrestore(&chip->reg_lock, flags); chip->mode = mode; mutex_unlock(&chip->open_mutex); return 0; } static void snd_wss_close(struct snd_wss *chip, unsigned int mode) { unsigned long flags; mutex_lock(&chip->open_mutex); chip->mode &= ~mode; if (chip->mode & WSS_MODE_OPEN) { mutex_unlock(&chip->open_mutex); return; } /* disable IRQ */ spin_lock_irqsave(&chip->reg_lock, flags); if (!(chip->hardware & WSS_HW_AD1848_MASK)) snd_wss_out(chip, CS4231_IRQ_STATUS, 0); wss_outb(chip, CS4231P(STATUS), 0); /* clear IRQ */ wss_outb(chip, CS4231P(STATUS), 0); /* clear IRQ */ chip->image[CS4231_PIN_CTRL] &= ~CS4231_IRQ_ENABLE; snd_wss_out(chip, CS4231_PIN_CTRL, chip->image[CS4231_PIN_CTRL]); /* now disable record & playback */ if (chip->image[CS4231_IFACE_CTRL] & (CS4231_PLAYBACK_ENABLE | CS4231_PLAYBACK_PIO | CS4231_RECORD_ENABLE | CS4231_RECORD_PIO)) { spin_unlock_irqrestore(&chip->reg_lock, flags); snd_wss_mce_up(chip); spin_lock_irqsave(&chip->reg_lock, flags); chip->image[CS4231_IFACE_CTRL] &= ~(CS4231_PLAYBACK_ENABLE | CS4231_PLAYBACK_PIO | CS4231_RECORD_ENABLE | CS4231_RECORD_PIO); snd_wss_out(chip, CS4231_IFACE_CTRL, chip->image[CS4231_IFACE_CTRL]); spin_unlock_irqrestore(&chip->reg_lock, flags); snd_wss_mce_down(chip); spin_lock_irqsave(&chip->reg_lock, flags); } /* clear IRQ again */ if (!(chip->hardware & WSS_HW_AD1848_MASK)) snd_wss_out(chip, CS4231_IRQ_STATUS, 0); wss_outb(chip, CS4231P(STATUS), 0); /* clear IRQ */ wss_outb(chip, CS4231P(STATUS), 0); /* clear IRQ */ spin_unlock_irqrestore(&chip->reg_lock, flags); chip->mode = 0; mutex_unlock(&chip->open_mutex); } /* * timer open/close */ static int snd_wss_timer_open(struct snd_timer *timer) { struct snd_wss *chip = snd_timer_chip(timer); snd_wss_open(chip, WSS_MODE_TIMER); return 0; } static int snd_wss_timer_close(struct snd_timer *timer) { struct snd_wss *chip = snd_timer_chip(timer); snd_wss_close(chip, WSS_MODE_TIMER); return 0; } static struct snd_timer_hardware snd_wss_timer_table = { .flags = SNDRV_TIMER_HW_AUTO, .resolution = 9945, .ticks = 65535, .open = snd_wss_timer_open, .close = snd_wss_timer_close, .c_resolution = snd_wss_timer_resolution, .start = snd_wss_timer_start, .stop = snd_wss_timer_stop, }; /* * ok.. exported functions.. */ static int snd_wss_playback_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_wss *chip = snd_pcm_substream_chip(substream); unsigned char new_pdfr; int err; if ((err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0) return err; new_pdfr = snd_wss_get_format(chip, params_format(hw_params), params_channels(hw_params)) | snd_wss_get_rate(params_rate(hw_params)); chip->set_playback_format(chip, hw_params, new_pdfr); return 0; } static int snd_wss_playback_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_pages(substream); } static int snd_wss_playback_prepare(struct snd_pcm_substream *substream) { struct snd_wss *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; unsigned long flags; unsigned int size = snd_pcm_lib_buffer_bytes(substream); unsigned int count = snd_pcm_lib_period_bytes(substream); spin_lock_irqsave(&chip->reg_lock, flags); chip->p_dma_size = size; chip->image[CS4231_IFACE_CTRL] &= ~(CS4231_PLAYBACK_ENABLE | CS4231_PLAYBACK_PIO); snd_dma_program(chip->dma1, runtime->dma_addr, size, DMA_MODE_WRITE | DMA_AUTOINIT); count = snd_wss_get_count(chip->image[CS4231_PLAYBK_FORMAT], count) - 1; snd_wss_out(chip, CS4231_PLY_LWR_CNT, (unsigned char) count); snd_wss_out(chip, CS4231_PLY_UPR_CNT, (unsigned char) (count >> 8)); spin_unlock_irqrestore(&chip->reg_lock, flags); #if 0 snd_wss_debug(chip); #endif return 0; } static int snd_wss_capture_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_wss *chip = snd_pcm_substream_chip(substream); unsigned char new_cdfr; int err; if ((err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0) return err; new_cdfr = snd_wss_get_format(chip, params_format(hw_params), params_channels(hw_params)) | snd_wss_get_rate(params_rate(hw_params)); chip->set_capture_format(chip, hw_params, new_cdfr); return 0; } static int snd_wss_capture_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_pages(substream); } static int snd_wss_capture_prepare(struct snd_pcm_substream *substream) { struct snd_wss *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; unsigned long flags; unsigned int size = snd_pcm_lib_buffer_bytes(substream); unsigned int count = snd_pcm_lib_period_bytes(substream); spin_lock_irqsave(&chip->reg_lock, flags); chip->c_dma_size = size; chip->image[CS4231_IFACE_CTRL] &= ~(CS4231_RECORD_ENABLE | CS4231_RECORD_PIO); snd_dma_program(chip->dma2, runtime->dma_addr, size, DMA_MODE_READ | DMA_AUTOINIT); if (chip->hardware & WSS_HW_AD1848_MASK) count = snd_wss_get_count(chip->image[CS4231_PLAYBK_FORMAT], count); else count = snd_wss_get_count(chip->image[CS4231_REC_FORMAT], count); count--; if (chip->single_dma && chip->hardware != WSS_HW_INTERWAVE) { snd_wss_out(chip, CS4231_PLY_LWR_CNT, (unsigned char) count); snd_wss_out(chip, CS4231_PLY_UPR_CNT, (unsigned char) (count >> 8)); } else { snd_wss_out(chip, CS4231_REC_LWR_CNT, (unsigned char) count); snd_wss_out(chip, CS4231_REC_UPR_CNT, (unsigned char) (count >> 8)); } spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } void snd_wss_overrange(struct snd_wss *chip) { unsigned long flags; unsigned char res; spin_lock_irqsave(&chip->reg_lock, flags); res = snd_wss_in(chip, CS4231_TEST_INIT); spin_unlock_irqrestore(&chip->reg_lock, flags); if (res & (0x08 | 0x02)) /* detect overrange only above 0dB; may be user selectable? */ chip->capture_substream->runtime->overrange++; } EXPORT_SYMBOL(snd_wss_overrange); irqreturn_t snd_wss_interrupt(int irq, void *dev_id) { struct snd_wss *chip = dev_id; unsigned char status; if (chip->hardware & WSS_HW_AD1848_MASK) /* pretend it was the only possible irq for AD1848 */ status = CS4231_PLAYBACK_IRQ; else status = snd_wss_in(chip, CS4231_IRQ_STATUS); if (status & CS4231_TIMER_IRQ) { if (chip->timer) snd_timer_interrupt(chip->timer, chip->timer->sticks); } if (chip->single_dma && chip->hardware != WSS_HW_INTERWAVE) { if (status & CS4231_PLAYBACK_IRQ) { if (chip->mode & WSS_MODE_PLAY) { if (chip->playback_substream) snd_pcm_period_elapsed(chip->playback_substream); } if (chip->mode & WSS_MODE_RECORD) { if (chip->capture_substream) { snd_wss_overrange(chip); snd_pcm_period_elapsed(chip->capture_substream); } } } } else { if (status & CS4231_PLAYBACK_IRQ) { if (chip->playback_substream) snd_pcm_period_elapsed(chip->playback_substream); } if (status & CS4231_RECORD_IRQ) { if (chip->capture_substream) { snd_wss_overrange(chip); snd_pcm_period_elapsed(chip->capture_substream); } } } spin_lock(&chip->reg_lock); status = ~CS4231_ALL_IRQS | ~status; if (chip->hardware & WSS_HW_AD1848_MASK) wss_outb(chip, CS4231P(STATUS), 0); else snd_wss_out(chip, CS4231_IRQ_STATUS, status); spin_unlock(&chip->reg_lock); return IRQ_HANDLED; } EXPORT_SYMBOL(snd_wss_interrupt); static snd_pcm_uframes_t snd_wss_playback_pointer(struct snd_pcm_substream *substream) { struct snd_wss *chip = snd_pcm_substream_chip(substream); size_t ptr; if (!(chip->image[CS4231_IFACE_CTRL] & CS4231_PLAYBACK_ENABLE)) return 0; ptr = snd_dma_pointer(chip->dma1, chip->p_dma_size); return bytes_to_frames(substream->runtime, ptr); } static snd_pcm_uframes_t snd_wss_capture_pointer(struct snd_pcm_substream *substream) { struct snd_wss *chip = snd_pcm_substream_chip(substream); size_t ptr; if (!(chip->image[CS4231_IFACE_CTRL] & CS4231_RECORD_ENABLE)) return 0; ptr = snd_dma_pointer(chip->dma2, chip->c_dma_size); return bytes_to_frames(substream->runtime, ptr); } /* */ static int snd_ad1848_probe(struct snd_wss *chip) { unsigned long timeout = jiffies + msecs_to_jiffies(1000); unsigned long flags; unsigned char r; unsigned short hardware = 0; int err = 0; int i; while (wss_inb(chip, CS4231P(REGSEL)) & CS4231_INIT) { if (time_after(jiffies, timeout)) return -ENODEV; cond_resched(); } spin_lock_irqsave(&chip->reg_lock, flags); /* set CS423x MODE 1 */ snd_wss_dout(chip, CS4231_MISC_INFO, 0); snd_wss_dout(chip, CS4231_RIGHT_INPUT, 0x45); /* 0x55 & ~0x10 */ r = snd_wss_in(chip, CS4231_RIGHT_INPUT); if (r != 0x45) { /* RMGE always high on AD1847 */ if ((r & ~CS4231_ENABLE_MIC_GAIN) != 0x45) { err = -ENODEV; goto out; } hardware = WSS_HW_AD1847; } else { snd_wss_dout(chip, CS4231_LEFT_INPUT, 0xaa); r = snd_wss_in(chip, CS4231_LEFT_INPUT); /* L/RMGE always low on AT2320 */ if ((r | CS4231_ENABLE_MIC_GAIN) != 0xaa) { err = -ENODEV; goto out; } } /* clear pending IRQ */ wss_inb(chip, CS4231P(STATUS)); wss_outb(chip, CS4231P(STATUS), 0); mb(); if ((chip->hardware & WSS_HW_TYPE_MASK) != WSS_HW_DETECT) goto out; if (hardware) { chip->hardware = hardware; goto out; } r = snd_wss_in(chip, CS4231_MISC_INFO); /* set CS423x MODE 2 */ snd_wss_dout(chip, CS4231_MISC_INFO, CS4231_MODE2); for (i = 0; i < 16; i++) { if (snd_wss_in(chip, i) != snd_wss_in(chip, 16 + i)) { /* we have more than 16 registers: check ID */ if ((r & 0xf) != 0xa) goto out_mode; /* * on CMI8330, CS4231_VERSION is volume control and * can be set to 0 */ snd_wss_dout(chip, CS4231_VERSION, 0); r = snd_wss_in(chip, CS4231_VERSION) & 0xe7; if (!r) chip->hardware = WSS_HW_CMI8330; goto out_mode; } } if (r & 0x80) chip->hardware = WSS_HW_CS4248; else chip->hardware = WSS_HW_AD1848; out_mode: snd_wss_dout(chip, CS4231_MISC_INFO, 0); out: spin_unlock_irqrestore(&chip->reg_lock, flags); return err; } static int snd_wss_probe(struct snd_wss *chip) { unsigned long flags; int i, id, rev, regnum; unsigned char *ptr; unsigned int hw; id = snd_ad1848_probe(chip); if (id < 0) return id; hw = chip->hardware; if ((hw & WSS_HW_TYPE_MASK) == WSS_HW_DETECT) { for (i = 0; i < 50; i++) { mb(); if (wss_inb(chip, CS4231P(REGSEL)) & CS4231_INIT) msleep(2); else { spin_lock_irqsave(&chip->reg_lock, flags); snd_wss_out(chip, CS4231_MISC_INFO, CS4231_MODE2); id = snd_wss_in(chip, CS4231_MISC_INFO) & 0x0f; spin_unlock_irqrestore(&chip->reg_lock, flags); if (id == 0x0a) break; /* this is valid value */ } } snd_printdd("wss: port = 0x%lx, id = 0x%x\n", chip->port, id); if (id != 0x0a) return -ENODEV; /* no valid device found */ rev = snd_wss_in(chip, CS4231_VERSION) & 0xe7; snd_printdd("CS4231: VERSION (I25) = 0x%x\n", rev); if (rev == 0x80) { unsigned char tmp = snd_wss_in(chip, 23); snd_wss_out(chip, 23, ~tmp); if (snd_wss_in(chip, 23) != tmp) chip->hardware = WSS_HW_AD1845; else chip->hardware = WSS_HW_CS4231; } else if (rev == 0xa0) { chip->hardware = WSS_HW_CS4231A; } else if (rev == 0xa2) { chip->hardware = WSS_HW_CS4232; } else if (rev == 0xb2) { chip->hardware = WSS_HW_CS4232A; } else if (rev == 0x83) { chip->hardware = WSS_HW_CS4236; } else if (rev == 0x03) { chip->hardware = WSS_HW_CS4236B; } else { snd_printk(KERN_ERR "unknown CS chip with version 0x%x\n", rev); return -ENODEV; /* unknown CS4231 chip? */ } } spin_lock_irqsave(&chip->reg_lock, flags); wss_inb(chip, CS4231P(STATUS)); /* clear any pendings IRQ */ wss_outb(chip, CS4231P(STATUS), 0); mb(); spin_unlock_irqrestore(&chip->reg_lock, flags); if (!(chip->hardware & WSS_HW_AD1848_MASK)) chip->image[CS4231_MISC_INFO] = CS4231_MODE2; switch (chip->hardware) { case WSS_HW_INTERWAVE: chip->image[CS4231_MISC_INFO] = CS4231_IW_MODE3; break; case WSS_HW_CS4235: case WSS_HW_CS4236B: case WSS_HW_CS4237B: case WSS_HW_CS4238B: case WSS_HW_CS4239: if (hw == WSS_HW_DETECT3) chip->image[CS4231_MISC_INFO] = CS4231_4236_MODE3; else chip->hardware = WSS_HW_CS4236; break; } chip->image[CS4231_IFACE_CTRL] = (chip->image[CS4231_IFACE_CTRL] & ~CS4231_SINGLE_DMA) | (chip->single_dma ? CS4231_SINGLE_DMA : 0); if (chip->hardware != WSS_HW_OPTI93X) { chip->image[CS4231_ALT_FEATURE_1] = 0x80; chip->image[CS4231_ALT_FEATURE_2] = chip->hardware == WSS_HW_INTERWAVE ? 0xc2 : 0x01; } /* enable fine grained frequency selection */ if (chip->hardware == WSS_HW_AD1845) chip->image[AD1845_PWR_DOWN] = 8; ptr = (unsigned char *) &chip->image; regnum = (chip->hardware & WSS_HW_AD1848_MASK) ? 16 : 32; snd_wss_mce_down(chip); spin_lock_irqsave(&chip->reg_lock, flags); for (i = 0; i < regnum; i++) /* ok.. fill all registers */ snd_wss_out(chip, i, *ptr++); spin_unlock_irqrestore(&chip->reg_lock, flags); snd_wss_mce_up(chip); snd_wss_mce_down(chip); mdelay(2); /* ok.. try check hardware version for CS4236+ chips */ if ((hw & WSS_HW_TYPE_MASK) == WSS_HW_DETECT) { if (chip->hardware == WSS_HW_CS4236B) { rev = snd_cs4236_ext_in(chip, CS4236_VERSION); snd_cs4236_ext_out(chip, CS4236_VERSION, 0xff); id = snd_cs4236_ext_in(chip, CS4236_VERSION); snd_cs4236_ext_out(chip, CS4236_VERSION, rev); snd_printdd("CS4231: ext version; rev = 0x%x, id = 0x%x\n", rev, id); if ((id & 0x1f) == 0x1d) { /* CS4235 */ chip->hardware = WSS_HW_CS4235; switch (id >> 5) { case 4: case 5: case 6: break; default: snd_printk(KERN_WARNING "unknown CS4235 chip " "(enhanced version = 0x%x)\n", id); } } else if ((id & 0x1f) == 0x0b) { /* CS4236/B */ switch (id >> 5) { case 4: case 5: case 6: case 7: chip->hardware = WSS_HW_CS4236B; break; default: snd_printk(KERN_WARNING "unknown CS4236 chip " "(enhanced version = 0x%x)\n", id); } } else if ((id & 0x1f) == 0x08) { /* CS4237B */ chip->hardware = WSS_HW_CS4237B; switch (id >> 5) { case 4: case 5: case 6: case 7: break; default: snd_printk(KERN_WARNING "unknown CS4237B chip " "(enhanced version = 0x%x)\n", id); } } else if ((id & 0x1f) == 0x09) { /* CS4238B */ chip->hardware = WSS_HW_CS4238B; switch (id >> 5) { case 5: case 6: case 7: break; default: snd_printk(KERN_WARNING "unknown CS4238B chip " "(enhanced version = 0x%x)\n", id); } } else if ((id & 0x1f) == 0x1e) { /* CS4239 */ chip->hardware = WSS_HW_CS4239; switch (id >> 5) { case 4: case 5: case 6: break; default: snd_printk(KERN_WARNING "unknown CS4239 chip " "(enhanced version = 0x%x)\n", id); } } else { snd_printk(KERN_WARNING "unknown CS4236/CS423xB chip " "(enhanced version = 0x%x)\n", id); } } } return 0; /* all things are ok.. */ } /* */ static struct snd_pcm_hardware snd_wss_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START), .formats = (SNDRV_PCM_FMTBIT_MU_LAW | SNDRV_PCM_FMTBIT_A_LAW | SNDRV_PCM_FMTBIT_IMA_ADPCM | SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE), .rates = SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_8000_48000, .rate_min = 5510, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; static struct snd_pcm_hardware snd_wss_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_SYNC_START), .formats = (SNDRV_PCM_FMTBIT_MU_LAW | SNDRV_PCM_FMTBIT_A_LAW | SNDRV_PCM_FMTBIT_IMA_ADPCM | SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE), .rates = SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_8000_48000, .rate_min = 5510, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; /* */ static int snd_wss_playback_open(struct snd_pcm_substream *substream) { struct snd_wss *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; runtime->hw = snd_wss_playback; /* hardware limitation of older chipsets */ if (chip->hardware & WSS_HW_AD1848_MASK) runtime->hw.formats &= ~(SNDRV_PCM_FMTBIT_IMA_ADPCM | SNDRV_PCM_FMTBIT_S16_BE); /* hardware bug in InterWave chipset */ if (chip->hardware == WSS_HW_INTERWAVE && chip->dma1 > 3) runtime->hw.formats &= ~SNDRV_PCM_FMTBIT_MU_LAW; /* hardware limitation of cheap chips */ if (chip->hardware == WSS_HW_CS4235 || chip->hardware == WSS_HW_CS4239) runtime->hw.formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE; snd_pcm_limit_isa_dma_size(chip->dma1, &runtime->hw.buffer_bytes_max); snd_pcm_limit_isa_dma_size(chip->dma1, &runtime->hw.period_bytes_max); if (chip->claim_dma) { if ((err = chip->claim_dma(chip, chip->dma_private_data, chip->dma1)) < 0) return err; } err = snd_wss_open(chip, WSS_MODE_PLAY); if (err < 0) { if (chip->release_dma) chip->release_dma(chip, chip->dma_private_data, chip->dma1); snd_free_pages(runtime->dma_area, runtime->dma_bytes); return err; } chip->playback_substream = substream; snd_pcm_set_sync(substream); chip->rate_constraint(runtime); return 0; } static int snd_wss_capture_open(struct snd_pcm_substream *substream) { struct snd_wss *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; runtime->hw = snd_wss_capture; /* hardware limitation of older chipsets */ if (chip->hardware & WSS_HW_AD1848_MASK) runtime->hw.formats &= ~(SNDRV_PCM_FMTBIT_IMA_ADPCM | SNDRV_PCM_FMTBIT_S16_BE); /* hardware limitation of cheap chips */ if (chip->hardware == WSS_HW_CS4235 || chip->hardware == WSS_HW_CS4239 || chip->hardware == WSS_HW_OPTI93X) runtime->hw.formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE; snd_pcm_limit_isa_dma_size(chip->dma2, &runtime->hw.buffer_bytes_max); snd_pcm_limit_isa_dma_size(chip->dma2, &runtime->hw.period_bytes_max); if (chip->claim_dma) { if ((err = chip->claim_dma(chip, chip->dma_private_data, chip->dma2)) < 0) return err; } err = snd_wss_open(chip, WSS_MODE_RECORD); if (err < 0) { if (chip->release_dma) chip->release_dma(chip, chip->dma_private_data, chip->dma2); snd_free_pages(runtime->dma_area, runtime->dma_bytes); return err; } chip->capture_substream = substream; snd_pcm_set_sync(substream); chip->rate_constraint(runtime); return 0; } static int snd_wss_playback_close(struct snd_pcm_substream *substream) { struct snd_wss *chip = snd_pcm_substream_chip(substream); chip->playback_substream = NULL; snd_wss_close(chip, WSS_MODE_PLAY); return 0; } static int snd_wss_capture_close(struct snd_pcm_substream *substream) { struct snd_wss *chip = snd_pcm_substream_chip(substream); chip->capture_substream = NULL; snd_wss_close(chip, WSS_MODE_RECORD); return 0; } static void snd_wss_thinkpad_twiddle(struct snd_wss *chip, int on) { int tmp; if (!chip->thinkpad_flag) return; outb(0x1c, AD1848_THINKPAD_CTL_PORT1); tmp = inb(AD1848_THINKPAD_CTL_PORT2); if (on) /* turn it on */ tmp |= AD1848_THINKPAD_CS4248_ENABLE_BIT; else /* turn it off */ tmp &= ~AD1848_THINKPAD_CS4248_ENABLE_BIT; outb(tmp, AD1848_THINKPAD_CTL_PORT2); } #ifdef CONFIG_PM /* lowlevel suspend callback for CS4231 */ static void snd_wss_suspend(struct snd_wss *chip) { int reg; unsigned long flags; snd_pcm_suspend_all(chip->pcm); spin_lock_irqsave(&chip->reg_lock, flags); for (reg = 0; reg < 32; reg++) chip->image[reg] = snd_wss_in(chip, reg); spin_unlock_irqrestore(&chip->reg_lock, flags); if (chip->thinkpad_flag) snd_wss_thinkpad_twiddle(chip, 0); } /* lowlevel resume callback for CS4231 */ static void snd_wss_resume(struct snd_wss *chip) { int reg; unsigned long flags; /* int timeout; */ if (chip->thinkpad_flag) snd_wss_thinkpad_twiddle(chip, 1); snd_wss_mce_up(chip); spin_lock_irqsave(&chip->reg_lock, flags); for (reg = 0; reg < 32; reg++) { switch (reg) { case CS4231_VERSION: break; default: snd_wss_out(chip, reg, chip->image[reg]); break; } } /* Yamaha needs this to resume properly */ if (chip->hardware == WSS_HW_OPL3SA2) snd_wss_out(chip, CS4231_PLAYBK_FORMAT, chip->image[CS4231_PLAYBK_FORMAT]); spin_unlock_irqrestore(&chip->reg_lock, flags); #if 1 snd_wss_mce_down(chip); #else /* The following is a workaround to avoid freeze after resume on TP600E. This is the first half of copy of snd_wss_mce_down(), but doesn't include rescheduling. -- iwai */ snd_wss_busy_wait(chip); spin_lock_irqsave(&chip->reg_lock, flags); chip->mce_bit &= ~CS4231_MCE; timeout = wss_inb(chip, CS4231P(REGSEL)); wss_outb(chip, CS4231P(REGSEL), chip->mce_bit | (timeout & 0x1f)); spin_unlock_irqrestore(&chip->reg_lock, flags); if (timeout == 0x80) snd_printk(KERN_ERR "down [0x%lx]: serious init problem " "- codec still busy\n", chip->port); if ((timeout & CS4231_MCE) == 0 || !(chip->hardware & (WSS_HW_CS4231_MASK | WSS_HW_CS4232_MASK))) { return; } snd_wss_busy_wait(chip); #endif } #endif /* CONFIG_PM */ static int snd_wss_free(struct snd_wss *chip) { release_and_free_resource(chip->res_port); release_and_free_resource(chip->res_cport); if (chip->irq >= 0) { disable_irq(chip->irq); if (!(chip->hwshare & WSS_HWSHARE_IRQ)) free_irq(chip->irq, (void *) chip); } if (!(chip->hwshare & WSS_HWSHARE_DMA1) && chip->dma1 >= 0) { snd_dma_disable(chip->dma1); free_dma(chip->dma1); } if (!(chip->hwshare & WSS_HWSHARE_DMA2) && chip->dma2 >= 0 && chip->dma2 != chip->dma1) { snd_dma_disable(chip->dma2); free_dma(chip->dma2); } if (chip->timer) snd_device_free(chip->card, chip->timer); kfree(chip); return 0; } static int snd_wss_dev_free(struct snd_device *device) { struct snd_wss *chip = device->device_data; return snd_wss_free(chip); } const char *snd_wss_chip_id(struct snd_wss *chip) { switch (chip->hardware) { case WSS_HW_CS4231: return "CS4231"; case WSS_HW_CS4231A: return "CS4231A"; case WSS_HW_CS4232: return "CS4232"; case WSS_HW_CS4232A: return "CS4232A"; case WSS_HW_CS4235: return "CS4235"; case WSS_HW_CS4236: return "CS4236"; case WSS_HW_CS4236B: return "CS4236B"; case WSS_HW_CS4237B: return "CS4237B"; case WSS_HW_CS4238B: return "CS4238B"; case WSS_HW_CS4239: return "CS4239"; case WSS_HW_INTERWAVE: return "AMD InterWave"; case WSS_HW_OPL3SA2: return chip->card->shortname; case WSS_HW_AD1845: return "AD1845"; case WSS_HW_OPTI93X: return "OPTi 93x"; case WSS_HW_AD1847: return "AD1847"; case WSS_HW_AD1848: return "AD1848"; case WSS_HW_CS4248: return "CS4248"; case WSS_HW_CMI8330: return "CMI8330/C3D"; default: return "???"; } } EXPORT_SYMBOL(snd_wss_chip_id); static int snd_wss_new(struct snd_card *card, unsigned short hardware, unsigned short hwshare, struct snd_wss **rchip) { struct snd_wss *chip; *rchip = NULL; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) return -ENOMEM; chip->hardware = hardware; chip->hwshare = hwshare; spin_lock_init(&chip->reg_lock); mutex_init(&chip->mce_mutex); mutex_init(&chip->open_mutex); chip->card = card; chip->rate_constraint = snd_wss_xrate; chip->set_playback_format = snd_wss_playback_format; chip->set_capture_format = snd_wss_capture_format; if (chip->hardware == WSS_HW_OPTI93X) memcpy(&chip->image, &snd_opti93x_original_image, sizeof(snd_opti93x_original_image)); else memcpy(&chip->image, &snd_wss_original_image, sizeof(snd_wss_original_image)); if (chip->hardware & WSS_HW_AD1848_MASK) { chip->image[CS4231_PIN_CTRL] = 0; chip->image[CS4231_TEST_INIT] = 0; } *rchip = chip; return 0; } int snd_wss_create(struct snd_card *card, unsigned long port, unsigned long cport, int irq, int dma1, int dma2, unsigned short hardware, unsigned short hwshare, struct snd_wss **rchip) { static struct snd_device_ops ops = { .dev_free = snd_wss_dev_free, }; struct snd_wss *chip; int err; err = snd_wss_new(card, hardware, hwshare, &chip); if (err < 0) return err; chip->irq = -1; chip->dma1 = -1; chip->dma2 = -1; chip->res_port = request_region(port, 4, "WSS"); if (!chip->res_port) { snd_printk(KERN_ERR "wss: can't grab port 0x%lx\n", port); snd_wss_free(chip); return -EBUSY; } chip->port = port; if ((long)cport >= 0) { chip->res_cport = request_region(cport, 8, "CS4232 Control"); if (!chip->res_cport) { snd_printk(KERN_ERR "wss: can't grab control port 0x%lx\n", cport); snd_wss_free(chip); return -ENODEV; } } chip->cport = cport; if (!(hwshare & WSS_HWSHARE_IRQ)) if (request_irq(irq, snd_wss_interrupt, 0, "WSS", (void *) chip)) { snd_printk(KERN_ERR "wss: can't grab IRQ %d\n", irq); snd_wss_free(chip); return -EBUSY; } chip->irq = irq; if (!(hwshare & WSS_HWSHARE_DMA1) && request_dma(dma1, "WSS - 1")) { snd_printk(KERN_ERR "wss: can't grab DMA1 %d\n", dma1); snd_wss_free(chip); return -EBUSY; } chip->dma1 = dma1; if (!(hwshare & WSS_HWSHARE_DMA2) && dma1 != dma2 && dma2 >= 0 && request_dma(dma2, "WSS - 2")) { snd_printk(KERN_ERR "wss: can't grab DMA2 %d\n", dma2); snd_wss_free(chip); return -EBUSY; } if (dma1 == dma2 || dma2 < 0) { chip->single_dma = 1; chip->dma2 = chip->dma1; } else chip->dma2 = dma2; if (hardware == WSS_HW_THINKPAD) { chip->thinkpad_flag = 1; chip->hardware = WSS_HW_DETECT; /* reset */ snd_wss_thinkpad_twiddle(chip, 1); } /* global setup */ if (snd_wss_probe(chip) < 0) { snd_wss_free(chip); return -ENODEV; } snd_wss_init(chip); #if 0 if (chip->hardware & WSS_HW_CS4232_MASK) { if (chip->res_cport == NULL) snd_printk(KERN_ERR "CS4232 control port features are " "not accessible\n"); } #endif /* Register device */ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); if (err < 0) { snd_wss_free(chip); return err; } #ifdef CONFIG_PM /* Power Management */ chip->suspend = snd_wss_suspend; chip->resume = snd_wss_resume; #endif *rchip = chip; return 0; } EXPORT_SYMBOL(snd_wss_create); static struct snd_pcm_ops snd_wss_playback_ops = { .open = snd_wss_playback_open, .close = snd_wss_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_wss_playback_hw_params, .hw_free = snd_wss_playback_hw_free, .prepare = snd_wss_playback_prepare, .trigger = snd_wss_trigger, .pointer = snd_wss_playback_pointer, }; static struct snd_pcm_ops snd_wss_capture_ops = { .open = snd_wss_capture_open, .close = snd_wss_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_wss_capture_hw_params, .hw_free = snd_wss_capture_hw_free, .prepare = snd_wss_capture_prepare, .trigger = snd_wss_trigger, .pointer = snd_wss_capture_pointer, }; int snd_wss_pcm(struct snd_wss *chip, int device, struct snd_pcm **rpcm) { struct snd_pcm *pcm; int err; err = snd_pcm_new(chip->card, "WSS", device, 1, 1, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_wss_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_wss_capture_ops); /* global setup */ pcm->private_data = chip; pcm->info_flags = 0; if (chip->single_dma) pcm->info_flags |= SNDRV_PCM_INFO_HALF_DUPLEX; if (chip->hardware != WSS_HW_INTERWAVE) pcm->info_flags |= SNDRV_PCM_INFO_JOINT_DUPLEX; strcpy(pcm->name, snd_wss_chip_id(chip)); snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_isa_data(), 64*1024, chip->dma1 > 3 || chip->dma2 > 3 ? 128*1024 : 64*1024); chip->pcm = pcm; if (rpcm) *rpcm = pcm; return 0; } EXPORT_SYMBOL(snd_wss_pcm); static void snd_wss_timer_free(struct snd_timer *timer) { struct snd_wss *chip = timer->private_data; chip->timer = NULL; } int snd_wss_timer(struct snd_wss *chip, int device, struct snd_timer **rtimer) { struct snd_timer *timer; struct snd_timer_id tid; int err; /* Timer initialization */ tid.dev_class = SNDRV_TIMER_CLASS_CARD; tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE; tid.card = chip->card->number; tid.device = device; tid.subdevice = 0; if ((err = snd_timer_new(chip->card, "CS4231", &tid, &timer)) < 0) return err; strcpy(timer->name, snd_wss_chip_id(chip)); timer->private_data = chip; timer->private_free = snd_wss_timer_free; timer->hw = snd_wss_timer_table; chip->timer = timer; if (rtimer) *rtimer = timer; return 0; } EXPORT_SYMBOL(snd_wss_timer); /* * MIXER part */ static int snd_wss_info_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static char *texts[4] = { "Line", "Aux", "Mic", "Mix" }; static char *opl3sa_texts[4] = { "Line", "CD", "Mic", "Mix" }; static char *gusmax_texts[4] = { "Line", "Synth", "Mic", "Mix" }; char **ptexts = texts; struct snd_wss *chip = snd_kcontrol_chip(kcontrol); if (snd_BUG_ON(!chip->card)) return -EINVAL; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 2; uinfo->value.enumerated.items = 4; if (uinfo->value.enumerated.item > 3) uinfo->value.enumerated.item = 3; if (!strcmp(chip->card->driver, "GUS MAX")) ptexts = gusmax_texts; switch (chip->hardware) { case WSS_HW_INTERWAVE: ptexts = gusmax_texts; break; case WSS_HW_OPTI93X: case WSS_HW_OPL3SA2: ptexts = opl3sa_texts; break; } strcpy(uinfo->value.enumerated.name, ptexts[uinfo->value.enumerated.item]); return 0; } static int snd_wss_get_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_wss *chip = snd_kcontrol_chip(kcontrol); unsigned long flags; spin_lock_irqsave(&chip->reg_lock, flags); ucontrol->value.enumerated.item[0] = (chip->image[CS4231_LEFT_INPUT] & CS4231_MIXS_ALL) >> 6; ucontrol->value.enumerated.item[1] = (chip->image[CS4231_RIGHT_INPUT] & CS4231_MIXS_ALL) >> 6; spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } static int snd_wss_put_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_wss *chip = snd_kcontrol_chip(kcontrol); unsigned long flags; unsigned short left, right; int change; if (ucontrol->value.enumerated.item[0] > 3 || ucontrol->value.enumerated.item[1] > 3) return -EINVAL; left = ucontrol->value.enumerated.item[0] << 6; right = ucontrol->value.enumerated.item[1] << 6; spin_lock_irqsave(&chip->reg_lock, flags); left = (chip->image[CS4231_LEFT_INPUT] & ~CS4231_MIXS_ALL) | left; right = (chip->image[CS4231_RIGHT_INPUT] & ~CS4231_MIXS_ALL) | right; change = left != chip->image[CS4231_LEFT_INPUT] || right != chip->image[CS4231_RIGHT_INPUT]; snd_wss_out(chip, CS4231_LEFT_INPUT, left); snd_wss_out(chip, CS4231_RIGHT_INPUT, right); spin_unlock_irqrestore(&chip->reg_lock, flags); return change; } int snd_wss_info_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int mask = (kcontrol->private_value >> 16) & 0xff; uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = mask; return 0; } EXPORT_SYMBOL(snd_wss_info_single); int snd_wss_get_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_wss *chip = snd_kcontrol_chip(kcontrol); unsigned long flags; int reg = kcontrol->private_value & 0xff; int shift = (kcontrol->private_value >> 8) & 0xff; int mask = (kcontrol->private_value >> 16) & 0xff; int invert = (kcontrol->private_value >> 24) & 0xff; spin_lock_irqsave(&chip->reg_lock, flags); ucontrol->value.integer.value[0] = (chip->image[reg] >> shift) & mask; spin_unlock_irqrestore(&chip->reg_lock, flags); if (invert) ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0]; return 0; } EXPORT_SYMBOL(snd_wss_get_single); int snd_wss_put_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_wss *chip = snd_kcontrol_chip(kcontrol); unsigned long flags; int reg = kcontrol->private_value & 0xff; int shift = (kcontrol->private_value >> 8) & 0xff; int mask = (kcontrol->private_value >> 16) & 0xff; int invert = (kcontrol->private_value >> 24) & 0xff; int change; unsigned short val; val = (ucontrol->value.integer.value[0] & mask); if (invert) val = mask - val; val <<= shift; spin_lock_irqsave(&chip->reg_lock, flags); val = (chip->image[reg] & ~(mask << shift)) | val; change = val != chip->image[reg]; snd_wss_out(chip, reg, val); spin_unlock_irqrestore(&chip->reg_lock, flags); return change; } EXPORT_SYMBOL(snd_wss_put_single); int snd_wss_info_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int mask = (kcontrol->private_value >> 24) & 0xff; uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = mask; return 0; } EXPORT_SYMBOL(snd_wss_info_double); int snd_wss_get_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_wss *chip = snd_kcontrol_chip(kcontrol); unsigned long flags; int left_reg = kcontrol->private_value & 0xff; int right_reg = (kcontrol->private_value >> 8) & 0xff; int shift_left = (kcontrol->private_value >> 16) & 0x07; int shift_right = (kcontrol->private_value >> 19) & 0x07; int mask = (kcontrol->private_value >> 24) & 0xff; int invert = (kcontrol->private_value >> 22) & 1; spin_lock_irqsave(&chip->reg_lock, flags); ucontrol->value.integer.value[0] = (chip->image[left_reg] >> shift_left) & mask; ucontrol->value.integer.value[1] = (chip->image[right_reg] >> shift_right) & mask; spin_unlock_irqrestore(&chip->reg_lock, flags); if (invert) { ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0]; ucontrol->value.integer.value[1] = mask - ucontrol->value.integer.value[1]; } return 0; } EXPORT_SYMBOL(snd_wss_get_double); int snd_wss_put_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_wss *chip = snd_kcontrol_chip(kcontrol); unsigned long flags; int left_reg = kcontrol->private_value & 0xff; int right_reg = (kcontrol->private_value >> 8) & 0xff; int shift_left = (kcontrol->private_value >> 16) & 0x07; int shift_right = (kcontrol->private_value >> 19) & 0x07; int mask = (kcontrol->private_value >> 24) & 0xff; int invert = (kcontrol->private_value >> 22) & 1; int change; unsigned short val1, val2; val1 = ucontrol->value.integer.value[0] & mask; val2 = ucontrol->value.integer.value[1] & mask; if (invert) { val1 = mask - val1; val2 = mask - val2; } val1 <<= shift_left; val2 <<= shift_right; spin_lock_irqsave(&chip->reg_lock, flags); if (left_reg != right_reg) { val1 = (chip->image[left_reg] & ~(mask << shift_left)) | val1; val2 = (chip->image[right_reg] & ~(mask << shift_right)) | val2; change = val1 != chip->image[left_reg] || val2 != chip->image[right_reg]; snd_wss_out(chip, left_reg, val1); snd_wss_out(chip, right_reg, val2); } else { mask = (mask << shift_left) | (mask << shift_right); val1 = (chip->image[left_reg] & ~mask) | val1 | val2; change = val1 != chip->image[left_reg]; snd_wss_out(chip, left_reg, val1); } spin_unlock_irqrestore(&chip->reg_lock, flags); return change; } EXPORT_SYMBOL(snd_wss_put_double); static const DECLARE_TLV_DB_SCALE(db_scale_6bit, -9450, 150, 0); static const DECLARE_TLV_DB_SCALE(db_scale_5bit_12db_max, -3450, 150, 0); static const DECLARE_TLV_DB_SCALE(db_scale_rec_gain, 0, 150, 0); static const DECLARE_TLV_DB_SCALE(db_scale_4bit, -4500, 300, 0); static struct snd_kcontrol_new snd_wss_controls[] = { WSS_DOUBLE("PCM Playback Switch", 0, CS4231_LEFT_OUTPUT, CS4231_RIGHT_OUTPUT, 7, 7, 1, 1), WSS_DOUBLE_TLV("PCM Playback Volume", 0, CS4231_LEFT_OUTPUT, CS4231_RIGHT_OUTPUT, 0, 0, 63, 1, db_scale_6bit), WSS_DOUBLE("Aux Playback Switch", 0, CS4231_AUX1_LEFT_INPUT, CS4231_AUX1_RIGHT_INPUT, 7, 7, 1, 1), WSS_DOUBLE_TLV("Aux Playback Volume", 0, CS4231_AUX1_LEFT_INPUT, CS4231_AUX1_RIGHT_INPUT, 0, 0, 31, 1, db_scale_5bit_12db_max), WSS_DOUBLE("Aux Playback Switch", 1, CS4231_AUX2_LEFT_INPUT, CS4231_AUX2_RIGHT_INPUT, 7, 7, 1, 1), WSS_DOUBLE_TLV("Aux Playback Volume", 1, CS4231_AUX2_LEFT_INPUT, CS4231_AUX2_RIGHT_INPUT, 0, 0, 31, 1, db_scale_5bit_12db_max), WSS_DOUBLE_TLV("Capture Volume", 0, CS4231_LEFT_INPUT, CS4231_RIGHT_INPUT, 0, 0, 15, 0, db_scale_rec_gain), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Capture Source", .info = snd_wss_info_mux, .get = snd_wss_get_mux, .put = snd_wss_put_mux, }, WSS_DOUBLE("Mic Boost (+20dB)", 0, CS4231_LEFT_INPUT, CS4231_RIGHT_INPUT, 5, 5, 1, 0), WSS_SINGLE("Loopback Capture Switch", 0, CS4231_LOOPBACK, 0, 1, 0), WSS_SINGLE_TLV("Loopback Capture Volume", 0, CS4231_LOOPBACK, 2, 63, 1, db_scale_6bit), WSS_DOUBLE("Line Playback Switch", 0, CS4231_LEFT_LINE_IN, CS4231_RIGHT_LINE_IN, 7, 7, 1, 1), WSS_DOUBLE_TLV("Line Playback Volume", 0, CS4231_LEFT_LINE_IN, CS4231_RIGHT_LINE_IN, 0, 0, 31, 1, db_scale_5bit_12db_max), WSS_SINGLE("Beep Playback Switch", 0, CS4231_MONO_CTRL, 7, 1, 1), WSS_SINGLE_TLV("Beep Playback Volume", 0, CS4231_MONO_CTRL, 0, 15, 1, db_scale_4bit), WSS_SINGLE("Mono Output Playback Switch", 0, CS4231_MONO_CTRL, 6, 1, 1), WSS_SINGLE("Beep Bypass Playback Switch", 0, CS4231_MONO_CTRL, 5, 1, 0), }; int snd_wss_mixer(struct snd_wss *chip) { struct snd_card *card; unsigned int idx; int err; int count = ARRAY_SIZE(snd_wss_controls); if (snd_BUG_ON(!chip || !chip->pcm)) return -EINVAL; card = chip->card; strcpy(card->mixername, chip->pcm->name); /* Use only the first 11 entries on AD1848 */ if (chip->hardware & WSS_HW_AD1848_MASK) count = 11; /* There is no loopback on OPTI93X */ else if (chip->hardware == WSS_HW_OPTI93X) count = 9; for (idx = 0; idx < count; idx++) { err = snd_ctl_add(card, snd_ctl_new1(&snd_wss_controls[idx], chip)); if (err < 0) return err; } return 0; } EXPORT_SYMBOL(snd_wss_mixer); const struct snd_pcm_ops *snd_wss_get_pcm_ops(int direction) { return direction == SNDRV_PCM_STREAM_PLAYBACK ? &snd_wss_playback_ops : &snd_wss_capture_ops; } EXPORT_SYMBOL(snd_wss_get_pcm_ops); /* * INIT part */ static int __init alsa_wss_init(void) { return 0; } static void __exit alsa_wss_exit(void) { } module_init(alsa_wss_init); module_exit(alsa_wss_exit);
gpl-2.0
Tim1928/DBK-3.0-4.2
drivers/base/devres.c
3628
16330
/* * drivers/base/devres.c - device resource management * * Copyright (c) 2006 SUSE Linux Products GmbH * Copyright (c) 2006 Tejun Heo <teheo@suse.de> * * This file is released under the GPLv2. */ #include <linux/device.h> #include <linux/module.h> #include <linux/slab.h> #include "base.h" struct devres_node { struct list_head entry; dr_release_t release; #ifdef CONFIG_DEBUG_DEVRES const char *name; size_t size; #endif }; struct devres { struct devres_node node; /* -- 3 pointers */ unsigned long long data[]; /* guarantee ull alignment */ }; struct devres_group { struct devres_node node[2]; void *id; int color; /* -- 8 pointers */ }; #ifdef CONFIG_DEBUG_DEVRES static int log_devres = 0; module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR); static void set_node_dbginfo(struct devres_node *node, const char *name, size_t size) { node->name = name; node->size = size; } static void devres_log(struct device *dev, struct devres_node *node, const char *op) { if (unlikely(log_devres)) dev_printk(KERN_ERR, dev, "DEVRES %3s %p %s (%lu bytes)\n", op, node, node->name, (unsigned long)node->size); } #else /* CONFIG_DEBUG_DEVRES */ #define set_node_dbginfo(node, n, s) do {} while (0) #define devres_log(dev, node, op) do {} while (0) #endif /* CONFIG_DEBUG_DEVRES */ /* * Release functions for devres group. These callbacks are used only * for identification. */ static void group_open_release(struct device *dev, void *res) { /* noop */ } static void group_close_release(struct device *dev, void *res) { /* noop */ } static struct devres_group * node_to_group(struct devres_node *node) { if (node->release == &group_open_release) return container_of(node, struct devres_group, node[0]); if (node->release == &group_close_release) return container_of(node, struct devres_group, node[1]); return NULL; } static __always_inline struct devres * alloc_dr(dr_release_t release, size_t size, gfp_t gfp) { size_t tot_size = sizeof(struct devres) + size; struct devres *dr; dr = kmalloc_track_caller(tot_size, gfp); if (unlikely(!dr)) return NULL; memset(dr, 0, tot_size); INIT_LIST_HEAD(&dr->node.entry); dr->node.release = release; return dr; } static void add_dr(struct device *dev, struct devres_node *node) { devres_log(dev, node, "ADD"); BUG_ON(!list_empty(&node->entry)); list_add_tail(&node->entry, &dev->devres_head); } #ifdef CONFIG_DEBUG_DEVRES void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp, const char *name) { struct devres *dr; dr = alloc_dr(release, size, gfp); if (unlikely(!dr)) return NULL; set_node_dbginfo(&dr->node, name, size); return dr->data; } EXPORT_SYMBOL_GPL(__devres_alloc); #else /** * devres_alloc - Allocate device resource data * @release: Release function devres will be associated with * @size: Allocation size * @gfp: Allocation flags * * Allocate devres of @size bytes. The allocated area is zeroed, then * associated with @release. The returned pointer can be passed to * other devres_*() functions. * * RETURNS: * Pointer to allocated devres on success, NULL on failure. */ void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp) { struct devres *dr; dr = alloc_dr(release, size, gfp); if (unlikely(!dr)) return NULL; return dr->data; } EXPORT_SYMBOL_GPL(devres_alloc); #endif /** * devres_free - Free device resource data * @res: Pointer to devres data to free * * Free devres created with devres_alloc(). */ void devres_free(void *res) { if (res) { struct devres *dr = container_of(res, struct devres, data); BUG_ON(!list_empty(&dr->node.entry)); kfree(dr); } } EXPORT_SYMBOL_GPL(devres_free); /** * devres_add - Register device resource * @dev: Device to add resource to * @res: Resource to register * * Register devres @res to @dev. @res should have been allocated * using devres_alloc(). On driver detach, the associated release * function will be invoked and devres will be freed automatically. */ void devres_add(struct device *dev, void *res) { struct devres *dr = container_of(res, struct devres, data); unsigned long flags; spin_lock_irqsave(&dev->devres_lock, flags); add_dr(dev, &dr->node); spin_unlock_irqrestore(&dev->devres_lock, flags); } EXPORT_SYMBOL_GPL(devres_add); static struct devres *find_dr(struct device *dev, dr_release_t release, dr_match_t match, void *match_data) { struct devres_node *node; list_for_each_entry_reverse(node, &dev->devres_head, entry) { struct devres *dr = container_of(node, struct devres, node); if (node->release != release) continue; if (match && !match(dev, dr->data, match_data)) continue; return dr; } return NULL; } /** * devres_find - Find device resource * @dev: Device to lookup resource from * @release: Look for resources associated with this release function * @match: Match function (optional) * @match_data: Data for the match function * * Find the latest devres of @dev which is associated with @release * and for which @match returns 1. If @match is NULL, it's considered * to match all. * * RETURNS: * Pointer to found devres, NULL if not found. */ void * devres_find(struct device *dev, dr_release_t release, dr_match_t match, void *match_data) { struct devres *dr; unsigned long flags; spin_lock_irqsave(&dev->devres_lock, flags); dr = find_dr(dev, release, match, match_data); spin_unlock_irqrestore(&dev->devres_lock, flags); if (dr) return dr->data; return NULL; } EXPORT_SYMBOL_GPL(devres_find); /** * devres_get - Find devres, if non-existent, add one atomically * @dev: Device to lookup or add devres for * @new_res: Pointer to new initialized devres to add if not found * @match: Match function (optional) * @match_data: Data for the match function * * Find the latest devres of @dev which has the same release function * as @new_res and for which @match return 1. If found, @new_res is * freed; otherwise, @new_res is added atomically. * * RETURNS: * Pointer to found or added devres. */ void * devres_get(struct device *dev, void *new_res, dr_match_t match, void *match_data) { struct devres *new_dr = container_of(new_res, struct devres, data); struct devres *dr; unsigned long flags; spin_lock_irqsave(&dev->devres_lock, flags); dr = find_dr(dev, new_dr->node.release, match, match_data); if (!dr) { add_dr(dev, &new_dr->node); dr = new_dr; new_dr = NULL; } spin_unlock_irqrestore(&dev->devres_lock, flags); devres_free(new_dr); return dr->data; } EXPORT_SYMBOL_GPL(devres_get); /** * devres_remove - Find a device resource and remove it * @dev: Device to find resource from * @release: Look for resources associated with this release function * @match: Match function (optional) * @match_data: Data for the match function * * Find the latest devres of @dev associated with @release and for * which @match returns 1. If @match is NULL, it's considered to * match all. If found, the resource is removed atomically and * returned. * * RETURNS: * Pointer to removed devres on success, NULL if not found. */ void * devres_remove(struct device *dev, dr_release_t release, dr_match_t match, void *match_data) { struct devres *dr; unsigned long flags; spin_lock_irqsave(&dev->devres_lock, flags); dr = find_dr(dev, release, match, match_data); if (dr) { list_del_init(&dr->node.entry); devres_log(dev, &dr->node, "REM"); } spin_unlock_irqrestore(&dev->devres_lock, flags); if (dr) return dr->data; return NULL; } EXPORT_SYMBOL_GPL(devres_remove); /** * devres_destroy - Find a device resource and destroy it * @dev: Device to find resource from * @release: Look for resources associated with this release function * @match: Match function (optional) * @match_data: Data for the match function * * Find the latest devres of @dev associated with @release and for * which @match returns 1. If @match is NULL, it's considered to * match all. If found, the resource is removed atomically and freed. * * RETURNS: * 0 if devres is found and freed, -ENOENT if not found. */ int devres_destroy(struct device *dev, dr_release_t release, dr_match_t match, void *match_data) { void *res; res = devres_remove(dev, release, match, match_data); if (unlikely(!res)) return -ENOENT; devres_free(res); return 0; } EXPORT_SYMBOL_GPL(devres_destroy); static int remove_nodes(struct device *dev, struct list_head *first, struct list_head *end, struct list_head *todo) { int cnt = 0, nr_groups = 0; struct list_head *cur; /* First pass - move normal devres entries to @todo and clear * devres_group colors. */ cur = first; while (cur != end) { struct devres_node *node; struct devres_group *grp; node = list_entry(cur, struct devres_node, entry); cur = cur->next; grp = node_to_group(node); if (grp) { /* clear color of group markers in the first pass */ grp->color = 0; nr_groups++; } else { /* regular devres entry */ if (&node->entry == first) first = first->next; list_move_tail(&node->entry, todo); cnt++; } } if (!nr_groups) return cnt; /* Second pass - Scan groups and color them. A group gets * color value of two iff the group is wholly contained in * [cur, end). That is, for a closed group, both opening and * closing markers should be in the range, while just the * opening marker is enough for an open group. */ cur = first; while (cur != end) { struct devres_node *node; struct devres_group *grp; node = list_entry(cur, struct devres_node, entry); cur = cur->next; grp = node_to_group(node); BUG_ON(!grp || list_empty(&grp->node[0].entry)); grp->color++; if (list_empty(&grp->node[1].entry)) grp->color++; BUG_ON(grp->color <= 0 || grp->color > 2); if (grp->color == 2) { /* No need to update cur or end. The removed * nodes are always before both. */ list_move_tail(&grp->node[0].entry, todo); list_del_init(&grp->node[1].entry); } } return cnt; } static int release_nodes(struct device *dev, struct list_head *first, struct list_head *end, unsigned long flags) { LIST_HEAD(todo); int cnt; struct devres *dr, *tmp; cnt = remove_nodes(dev, first, end, &todo); spin_unlock_irqrestore(&dev->devres_lock, flags); /* Release. Note that both devres and devres_group are * handled as devres in the following loop. This is safe. */ list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry) { devres_log(dev, &dr->node, "REL"); dr->node.release(dev, dr->data); kfree(dr); } return cnt; } /** * devres_release_all - Release all managed resources * @dev: Device to release resources for * * Release all resources associated with @dev. This function is * called on driver detach. */ int devres_release_all(struct device *dev) { unsigned long flags; /* Looks like an uninitialized device structure */ if (WARN_ON(dev->devres_head.next == NULL)) return -ENODEV; spin_lock_irqsave(&dev->devres_lock, flags); return release_nodes(dev, dev->devres_head.next, &dev->devres_head, flags); } /** * devres_open_group - Open a new devres group * @dev: Device to open devres group for * @id: Separator ID * @gfp: Allocation flags * * Open a new devres group for @dev with @id. For @id, using a * pointer to an object which won't be used for another group is * recommended. If @id is NULL, address-wise unique ID is created. * * RETURNS: * ID of the new group, NULL on failure. */ void * devres_open_group(struct device *dev, void *id, gfp_t gfp) { struct devres_group *grp; unsigned long flags; grp = kmalloc(sizeof(*grp), gfp); if (unlikely(!grp)) return NULL; grp->node[0].release = &group_open_release; grp->node[1].release = &group_close_release; INIT_LIST_HEAD(&grp->node[0].entry); INIT_LIST_HEAD(&grp->node[1].entry); set_node_dbginfo(&grp->node[0], "grp<", 0); set_node_dbginfo(&grp->node[1], "grp>", 0); grp->id = grp; if (id) grp->id = id; spin_lock_irqsave(&dev->devres_lock, flags); add_dr(dev, &grp->node[0]); spin_unlock_irqrestore(&dev->devres_lock, flags); return grp->id; } EXPORT_SYMBOL_GPL(devres_open_group); /* Find devres group with ID @id. If @id is NULL, look for the latest. */ static struct devres_group * find_group(struct device *dev, void *id) { struct devres_node *node; list_for_each_entry_reverse(node, &dev->devres_head, entry) { struct devres_group *grp; if (node->release != &group_open_release) continue; grp = container_of(node, struct devres_group, node[0]); if (id) { if (grp->id == id) return grp; } else if (list_empty(&grp->node[1].entry)) return grp; } return NULL; } /** * devres_close_group - Close a devres group * @dev: Device to close devres group for * @id: ID of target group, can be NULL * * Close the group identified by @id. If @id is NULL, the latest open * group is selected. */ void devres_close_group(struct device *dev, void *id) { struct devres_group *grp; unsigned long flags; spin_lock_irqsave(&dev->devres_lock, flags); grp = find_group(dev, id); if (grp) add_dr(dev, &grp->node[1]); else WARN_ON(1); spin_unlock_irqrestore(&dev->devres_lock, flags); } EXPORT_SYMBOL_GPL(devres_close_group); /** * devres_remove_group - Remove a devres group * @dev: Device to remove group for * @id: ID of target group, can be NULL * * Remove the group identified by @id. If @id is NULL, the latest * open group is selected. Note that removing a group doesn't affect * any other resources. */ void devres_remove_group(struct device *dev, void *id) { struct devres_group *grp; unsigned long flags; spin_lock_irqsave(&dev->devres_lock, flags); grp = find_group(dev, id); if (grp) { list_del_init(&grp->node[0].entry); list_del_init(&grp->node[1].entry); devres_log(dev, &grp->node[0], "REM"); } else WARN_ON(1); spin_unlock_irqrestore(&dev->devres_lock, flags); kfree(grp); } EXPORT_SYMBOL_GPL(devres_remove_group); /** * devres_release_group - Release resources in a devres group * @dev: Device to release group for * @id: ID of target group, can be NULL * * Release all resources in the group identified by @id. If @id is * NULL, the latest open group is selected. The selected group and * groups properly nested inside the selected group are removed. * * RETURNS: * The number of released non-group resources. */ int devres_release_group(struct device *dev, void *id) { struct devres_group *grp; unsigned long flags; int cnt = 0; spin_lock_irqsave(&dev->devres_lock, flags); grp = find_group(dev, id); if (grp) { struct list_head *first = &grp->node[0].entry; struct list_head *end = &dev->devres_head; if (!list_empty(&grp->node[1].entry)) end = grp->node[1].entry.next; cnt = release_nodes(dev, first, end, flags); } else { WARN_ON(1); spin_unlock_irqrestore(&dev->devres_lock, flags); } return cnt; } EXPORT_SYMBOL_GPL(devres_release_group); /* * Managed kzalloc/kfree */ static void devm_kzalloc_release(struct device *dev, void *res) { /* noop */ } static int devm_kzalloc_match(struct device *dev, void *res, void *data) { return res == data; } /** * devm_kzalloc - Resource-managed kzalloc * @dev: Device to allocate memory for * @size: Allocation size * @gfp: Allocation gfp flags * * Managed kzalloc. Memory allocated with this function is * automatically freed on driver detach. Like all other devres * resources, guaranteed alignment is unsigned long long. * * RETURNS: * Pointer to allocated memory on success, NULL on failure. */ void * devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) { struct devres *dr; /* use raw alloc_dr for kmalloc caller tracing */ dr = alloc_dr(devm_kzalloc_release, size, gfp); if (unlikely(!dr)) return NULL; set_node_dbginfo(&dr->node, "devm_kzalloc_release", size); devres_add(dev, dr->data); return dr->data; } EXPORT_SYMBOL_GPL(devm_kzalloc); /** * devm_kfree - Resource-managed kfree * @dev: Device this memory belongs to * @p: Memory to free * * Free memory allocated with dev_kzalloc(). */ void devm_kfree(struct device *dev, void *p) { int rc; rc = devres_destroy(dev, devm_kzalloc_release, devm_kzalloc_match, p); WARN_ON(rc); } EXPORT_SYMBOL_GPL(devm_kfree);
gpl-2.0
emceethemouth/kernel_jflte
arch/arm/oprofile/common.c
3884
2967
/** * @file common.c * * @remark Copyright 2004 Oprofile Authors * @remark Copyright 2010 ARM Ltd. * @remark Read the file COPYING * * @author Zwane Mwaikambo * @author Will Deacon [move to perf] */ #include <linux/cpumask.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/oprofile.h> #include <linux/perf_event.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <asm/stacktrace.h> #include <linux/uaccess.h> #include <asm/perf_event.h> #include <asm/ptrace.h> #ifdef CONFIG_HW_PERF_EVENTS char *op_name_from_perf_id(void) { enum arm_perf_pmu_ids id = armpmu_get_pmu_id(); switch (id) { case ARM_PERF_PMU_ID_XSCALE1: return "arm/xscale1"; case ARM_PERF_PMU_ID_XSCALE2: return "arm/xscale2"; case ARM_PERF_PMU_ID_V6: return "arm/armv6"; case ARM_PERF_PMU_ID_V6MP: return "arm/mpcore"; case ARM_PERF_PMU_ID_CA5: return "arm/armv7"; case ARM_PERF_PMU_ID_CA8: return "arm/armv7"; case ARM_PERF_PMU_ID_CA9: return "arm/armv7-ca9"; case ARM_PERF_PMU_ID_SCORPION: return "arm/armv7-scorpion"; case ARM_PERF_PMU_ID_SCORPIONMP: return "arm/armv7-scorpionmp"; case ARM_PERF_PMU_ID_KRAIT: return "arm/armv7-krait"; default: return NULL; } } #endif static int report_trace(struct stackframe *frame, void *d) { unsigned int *depth = d; if (*depth) { oprofile_add_trace(frame->pc); (*depth)--; } return *depth == 0; } /* * The registers we're interested in are at the end of the variable * length saved register structure. The fp points at the end of this * structure so the address of this struct is: * (struct frame_tail *)(xxx->fp)-1 */ struct frame_tail { struct frame_tail *fp; unsigned long sp; unsigned long lr; } __attribute__((packed)); static struct frame_tail* user_backtrace(struct frame_tail *tail) { struct frame_tail buftail[2]; /* Also check accessibility of one struct frame_tail beyond */ if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) return NULL; if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) return NULL; oprofile_add_trace(buftail[0].lr); /* frame pointers should strictly progress back up the stack * (towards higher addresses) */ if (tail + 1 >= buftail[0].fp) return NULL; return buftail[0].fp-1; } static void arm_backtrace(struct pt_regs * const regs, unsigned int depth) { struct frame_tail *tail = ((struct frame_tail *) regs->ARM_fp) - 1; if (!user_mode(regs)) { struct stackframe frame; frame.fp = regs->ARM_fp; frame.sp = regs->ARM_sp; frame.lr = regs->ARM_lr; frame.pc = regs->ARM_pc; walk_stackframe(&frame, report_trace, &depth); return; } while (depth-- && tail && !((unsigned long) tail & 3)) tail = user_backtrace(tail); } int __init oprofile_arch_init(struct oprofile_operations *ops) { /* provide backtrace support also in timer mode: */ ops->backtrace = arm_backtrace; return oprofile_perf_init(ops); } void oprofile_arch_exit(void) { oprofile_perf_exit(); }
gpl-2.0
PyYoshi/android_kernel_kyocera_l03
drivers/scsi/libsas/sas_phy.c
4908
4700
/* * Serial Attached SCSI (SAS) Phy class * * Copyright (C) 2005 Adaptec, Inc. All rights reserved. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> * * This file is licensed under GPLv2. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include "sas_internal.h" #include <scsi/scsi_host.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_sas.h> #include "../scsi_sas_internal.h" /* ---------- Phy events ---------- */ static void sas_phye_loss_of_signal(struct work_struct *work) { struct asd_sas_event *ev = to_asd_sas_event(work); struct asd_sas_phy *phy = ev->phy; clear_bit(PHYE_LOSS_OF_SIGNAL, &phy->phy_events_pending); phy->error = 0; sas_deform_port(phy, 1); } static void sas_phye_oob_done(struct work_struct *work) { struct asd_sas_event *ev = to_asd_sas_event(work); struct asd_sas_phy *phy = ev->phy; clear_bit(PHYE_OOB_DONE, &phy->phy_events_pending); phy->error = 0; } static void sas_phye_oob_error(struct work_struct *work) { struct asd_sas_event *ev = to_asd_sas_event(work); struct asd_sas_phy *phy = ev->phy; struct sas_ha_struct *sas_ha = phy->ha; struct asd_sas_port *port = phy->port; struct sas_internal *i = to_sas_internal(sas_ha->core.shost->transportt); clear_bit(PHYE_OOB_ERROR, &phy->phy_events_pending); sas_deform_port(phy, 1); if (!port && phy->enabled && i->dft->lldd_control_phy) { phy->error++; switch (phy->error) { case 1: case 2: i->dft->lldd_control_phy(phy, PHY_FUNC_HARD_RESET, NULL); break; case 3: default: phy->error = 0; phy->enabled = 0; i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE, NULL); break; } } } static void sas_phye_spinup_hold(struct work_struct *work) { struct asd_sas_event *ev = to_asd_sas_event(work); struct asd_sas_phy *phy = ev->phy; struct sas_ha_struct *sas_ha = phy->ha; struct sas_internal *i = to_sas_internal(sas_ha->core.shost->transportt); clear_bit(PHYE_SPINUP_HOLD, &phy->phy_events_pending); phy->error = 0; i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL); } /* ---------- Phy class registration ---------- */ int sas_register_phys(struct sas_ha_struct *sas_ha) { int i; static const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = { [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal, [PHYE_OOB_DONE] = sas_phye_oob_done, [PHYE_OOB_ERROR] = sas_phye_oob_error, [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold, }; static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = { [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed, [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd, [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err, [PORTE_TIMER_EVENT] = sas_porte_timer_event, [PORTE_HARD_RESET] = sas_porte_hard_reset, }; /* Now register the phys. */ for (i = 0; i < sas_ha->num_phys; i++) { int k; struct asd_sas_phy *phy = sas_ha->sas_phy[i]; phy->error = 0; INIT_LIST_HEAD(&phy->port_phy_el); for (k = 0; k < PORT_NUM_EVENTS; k++) { INIT_SAS_WORK(&phy->port_events[k].work, sas_port_event_fns[k]); phy->port_events[k].phy = phy; } for (k = 0; k < PHY_NUM_EVENTS; k++) { INIT_SAS_WORK(&phy->phy_events[k].work, sas_phy_event_fns[k]); phy->phy_events[k].phy = phy; } phy->port = NULL; phy->ha = sas_ha; spin_lock_init(&phy->frame_rcvd_lock); spin_lock_init(&phy->sas_prim_lock); phy->frame_rcvd_size = 0; phy->phy = sas_phy_alloc(&sas_ha->core.shost->shost_gendev, i); if (!phy->phy) return -ENOMEM; phy->phy->identify.initiator_port_protocols = phy->iproto; phy->phy->identify.target_port_protocols = phy->tproto; phy->phy->identify.sas_address = SAS_ADDR(sas_ha->sas_addr); phy->phy->identify.phy_identifier = i; phy->phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN; phy->phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN; phy->phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN; phy->phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN; phy->phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; sas_phy_add(phy->phy); } return 0; }
gpl-2.0
ench0/kernel_samsung_hlte
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
4908
10370
/******************************************************************************* Intel 10 Gigabit PCI Express Linux driver Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ #include "ixgbe.h" #include "ixgbe_type.h" #include "ixgbe_dcb.h" #include "ixgbe_dcb_82598.h" #include "ixgbe_dcb_82599.h" /** * ixgbe_ieee_credits - This calculates the ieee traffic class * credits from the configured bandwidth percentages. Credits * are the smallest unit programmable into the underlying * hardware. The IEEE 802.1Qaz specification do not use bandwidth * groups so this is much simplified from the CEE case. */ static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame) { int min_percent = 100; int min_credit, multiplier; int i; min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) / DCB_CREDIT_QUANTUM; for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { if (bw[i] < min_percent && bw[i]) min_percent = bw[i]; } multiplier = (min_credit / min_percent) + 1; /* Find out the hw credits for each TC */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { int val = min(bw[i] * multiplier, MAX_CREDIT_REFILL); if (val < min_credit) val = min_credit; refill[i] = val; max[i] = bw[i] ? (bw[i] * MAX_CREDIT)/100 : min_credit; } return 0; } /** * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits * @ixgbe_dcb_config: Struct containing DCB settings. * @direction: Configuring either Tx or Rx. * * This function calculates the credits allocated to each traffic class. * It should be called only after the rules are checked by * ixgbe_dcb_check_config(). */ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config, int max_frame, u8 direction) { struct tc_bw_alloc *p; int min_credit; int min_multiplier; int min_percent = 100; s32 ret_val = 0; /* Initialization values default for Tx settings */ u32 credit_refill = 0; u32 credit_max = 0; u16 link_percentage = 0; u8 bw_percent = 0; u8 i; if (dcb_config == NULL) { ret_val = DCB_ERR_CONFIG; goto out; } min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) / DCB_CREDIT_QUANTUM; /* Find smallest link percentage */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { p = &dcb_config->tc_config[i].path[direction]; bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; link_percentage = p->bwg_percent; link_percentage = (link_percentage * bw_percent) / 100; if (link_percentage && link_percentage < min_percent) min_percent = link_percentage; } /* * The ratio between traffic classes will control the bandwidth * percentages seen on the wire. To calculate this ratio we use * a multiplier. It is required that the refill credits must be * larger than the max frame size so here we find the smallest * multiplier that will allow all bandwidth percentages to be * greater than the max frame size. */ min_multiplier = (min_credit / min_percent) + 1; /* Find out the link percentage for each TC first */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { p = &dcb_config->tc_config[i].path[direction]; bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; link_percentage = p->bwg_percent; /* Must be careful of integer division for very small nums */ link_percentage = (link_percentage * bw_percent) / 100; if (p->bwg_percent > 0 && link_percentage == 0) link_percentage = 1; /* Save link_percentage for reference */ p->link_percent = (u8)link_percentage; /* Calculate credit refill ratio using multiplier */ credit_refill = min(link_percentage * min_multiplier, MAX_CREDIT_REFILL); p->data_credits_refill = (u16)credit_refill; /* Calculate maximum credit for the TC */ credit_max = (link_percentage * MAX_CREDIT) / 100; /* * Adjustment based on rule checking, if the percentage * of a TC is too small, the maximum credit may not be * enough to send out a jumbo frame in data plane arbitration. */ if (credit_max && (credit_max < min_credit)) credit_max = min_credit; if (direction == DCB_TX_CONFIG) { /* * Adjustment based on rule checking, if the * percentage of a TC is too small, the maximum * credit may not be enough to send out a TSO * packet in descriptor plane arbitration. */ if ((hw->mac.type == ixgbe_mac_82598EB) && credit_max && (credit_max < MINIMUM_CREDIT_FOR_TSO)) credit_max = MINIMUM_CREDIT_FOR_TSO; dcb_config->tc_config[i].desc_credits_max = (u16)credit_max; } p->data_credits_max = (u16)credit_max; } out: return ret_val; } void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en) { int i; *pfc_en = 0; for (i = 0; i < MAX_TRAFFIC_CLASS; i++) *pfc_en |= !!(cfg->tc_config[i].dcb_pfc & 0xF) << i; } void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction, u16 *refill) { struct tc_bw_alloc *p; int i; for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { p = &cfg->tc_config[i].path[direction]; refill[i] = p->data_credits_refill; } } void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max) { int i; for (i = 0; i < MAX_TRAFFIC_CLASS; i++) max[i] = cfg->tc_config[i].desc_credits_max; } void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction, u8 *bwgid) { struct tc_bw_alloc *p; int i; for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { p = &cfg->tc_config[i].path[direction]; bwgid[i] = p->bwg_id; } } void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction, u8 *ptype) { struct tc_bw_alloc *p; int i; for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { p = &cfg->tc_config[i].path[direction]; ptype[i] = p->prio_type; } } void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map) { int i, up; unsigned long bitmap; for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { bitmap = cfg->tc_config[i].path[direction].up_to_tc_bitmap; for_each_set_bit(up, &bitmap, MAX_USER_PRIORITY) map[up] = i; } } /** * ixgbe_dcb_hw_config - Config and enable DCB * @hw: pointer to hardware structure * @dcb_config: pointer to ixgbe_dcb_config structure * * Configure dcb settings and enable dcb mode. */ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) { s32 ret = 0; u8 pfc_en; u8 ptype[MAX_TRAFFIC_CLASS]; u8 bwgid[MAX_TRAFFIC_CLASS]; u8 prio_tc[MAX_TRAFFIC_CLASS]; u16 refill[MAX_TRAFFIC_CLASS]; u16 max[MAX_TRAFFIC_CLASS]; /* Unpack CEE standard containers */ ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en); ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill); ixgbe_dcb_unpack_max(dcb_config, max); ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid); ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype); ixgbe_dcb_unpack_map(dcb_config, DCB_TX_CONFIG, prio_tc); switch (hw->mac.type) { case ixgbe_mac_82598EB: ret = ixgbe_dcb_hw_config_82598(hw, pfc_en, refill, max, bwgid, ptype); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: ret = ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max, bwgid, ptype, prio_tc); break; default: break; } return ret; } /* Helper routines to abstract HW specifics from DCB netlink ops */ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) { int ret = -EINVAL; switch (hw->mac.type) { case ixgbe_mac_82598EB: ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc); break; default: break; } return ret; } s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame) { __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS]; __u8 prio_type[IEEE_8021QAZ_MAX_TCS]; int i; /* naively give each TC a bwg to map onto CEE hardware */ __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7}; /* Map TSA onto CEE prio type */ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { switch (ets->tc_tsa[i]) { case IEEE_8021QAZ_TSA_STRICT: prio_type[i] = 2; break; case IEEE_8021QAZ_TSA_ETS: prio_type[i] = 0; break; default: /* Hardware only supports priority strict or * ETS transmission selection algorithms if * we receive some other value from dcbnl * throw an error */ return -EINVAL; } } ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame); return ixgbe_dcb_hw_ets_config(hw, refill, max, bwg_id, prio_type, ets->prio_tc); } s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc) { switch (hw->mac.type) { case ixgbe_mac_82598EB: ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type); ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, prio_type); ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, prio_type); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, prio_type, prio_tc); ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, prio_type); ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, prio_type, prio_tc); break; default: break; } return 0; }
gpl-2.0
zarboz/Beastmode_LTE_2.0
arch/m32r/lib/delay.c
13612
2985
/* * linux/arch/m32r/lib/delay.c * * Copyright (c) 2002 Hitoshi Yamamoto, Hirokazu Takata * Copyright (c) 2004 Hirokazu Takata */ #include <linux/param.h> #include <linux/module.h> #ifdef CONFIG_SMP #include <linux/sched.h> #include <asm/current.h> #include <asm/smp.h> #endif /* CONFIG_SMP */ #include <asm/processor.h> void __delay(unsigned long loops) { #ifdef CONFIG_ISA_DUAL_ISSUE __asm__ __volatile__ ( "beqz %0, 2f \n\t" "addi %0, #-1 \n\t" " .fillinsn \n\t" "1: \n\t" "cmpz %0 || addi %0, #-1 \n\t" "bc 2f || cmpz %0 \n\t" "bc 2f || addi %0, #-1 \n\t" "cmpz %0 || addi %0, #-1 \n\t" "bc 2f || cmpz %0 \n\t" "bnc 1b || addi %0, #-1 \n\t" " .fillinsn \n\t" "2: \n\t" : "+r" (loops) : "r" (0) : "cbit" ); #else __asm__ __volatile__ ( "beqz %0, 2f \n\t" " .fillinsn \n\t" "1: \n\t" "addi %0, #-1 \n\t" "blez %0, 2f \n\t" "addi %0, #-1 \n\t" "blez %0, 2f \n\t" "addi %0, #-1 \n\t" "blez %0, 2f \n\t" "addi %0, #-1 \n\t" "bgtz %0, 1b \n\t" " .fillinsn \n\t" "2: \n\t" : "+r" (loops) : "r" (0) ); #endif } void __const_udelay(unsigned long xloops) { #if defined(CONFIG_ISA_M32R2) && defined(CONFIG_ISA_DSP_LEVEL2) /* * loops [1] = (xloops >> 32) [sec] * loops_per_jiffy [1/jiffy] * * HZ [jiffy/sec] * = (xloops >> 32) [sec] * (loops_per_jiffy * HZ) [1/sec] * = (((xloops * loops_per_jiffy) >> 32) * HZ) [1] * * NOTE: * - '[]' depicts variable's dimension in the above equation. * - "rac" instruction rounds the accumulator in word size. */ __asm__ __volatile__ ( "srli %0, #1 \n\t" "mulwhi %0, %1 ; a0 \n\t" "mulwu1 %0, %1 ; a1 \n\t" "sadd ; a0 += (a1 >> 16) \n\t" "rac a0, a0, #1 \n\t" "mvfacmi %0, a0 \n\t" : "+r" (xloops) : "r" (current_cpu_data.loops_per_jiffy) : "a0", "a1" ); #elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R) /* * u64 ull; * ull = (u64)xloops * (u64)current_cpu_data.loops_per_jiffy; * xloops = (ull >> 32); */ __asm__ __volatile__ ( "and3 r4, %0, #0xffff \n\t" "and3 r5, %1, #0xffff \n\t" "mul r4, r5 \n\t" "srl3 r6, %0, #16 \n\t" "srli r4, #16 \n\t" "mul r5, r6 \n\t" "add r4, r5 \n\t" "and3 r5, %0, #0xffff \n\t" "srl3 r6, %1, #16 \n\t" "mul r5, r6 \n\t" "add r4, r5 \n\t" "srl3 r5, %0, #16 \n\t" "srli r4, #16 \n\t" "mul r5, r6 \n\t" "add r4, r5 \n\t" "mv %0, r4 \n\t" : "+r" (xloops) : "r" (current_cpu_data.loops_per_jiffy) : "r4", "r5", "r6" ); #else #error unknown isa configuration #endif __delay(xloops * HZ); } void __udelay(unsigned long usecs) { __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */ } void __ndelay(unsigned long nsecs) { __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ } EXPORT_SYMBOL(__delay); EXPORT_SYMBOL(__const_udelay); EXPORT_SYMBOL(__udelay); EXPORT_SYMBOL(__ndelay);
gpl-2.0