repo_name
string
path
string
copies
string
size
string
content
string
license
string
Thunderoar/android_kernel_samsung_goyave3g
drivers/acpi/acpi_pad.c
2032
13689
/* * acpi_pad.c ACPI Processor Aggregator Driver * * Copyright (c) 2009, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * */ #include <linux/kernel.h> #include <linux/cpumask.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/cpu.h> #include <linux/clockchips.h> #include <linux/slab.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #include <asm/mwait.h> #define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad" #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator" #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80 static DEFINE_MUTEX(isolated_cpus_lock); static DEFINE_MUTEX(round_robin_lock); static unsigned long power_saving_mwait_eax; static unsigned char tsc_detected_unstable; static unsigned char tsc_marked_unstable; static unsigned char lapic_detected_unstable; static unsigned char lapic_marked_unstable; static void power_saving_mwait_init(void) { unsigned int eax, ebx, ecx, edx; unsigned int highest_cstate = 0; unsigned int highest_subcstate = 0; int i; if (!boot_cpu_has(X86_FEATURE_MWAIT)) return; if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) return; cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) return; edx >>= MWAIT_SUBSTATE_SIZE; for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) { if (edx & MWAIT_SUBSTATE_MASK) { highest_cstate = i; highest_subcstate = edx & MWAIT_SUBSTATE_MASK; } } power_saving_mwait_eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) | (highest_subcstate - 1); #if defined(CONFIG_X86) switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_AMD: case X86_VENDOR_INTEL: /* * AMD Fam10h TSC will tick in all * C/P/S0/S1 states when this bit is set. */ if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) tsc_detected_unstable = 1; if (!boot_cpu_has(X86_FEATURE_ARAT)) lapic_detected_unstable = 1; break; default: /* TSC & LAPIC could halt in idle */ tsc_detected_unstable = 1; lapic_detected_unstable = 1; } #endif } static unsigned long cpu_weight[NR_CPUS]; static int tsk_in_cpu[NR_CPUS] = {[0 ... NR_CPUS-1] = -1}; static DECLARE_BITMAP(pad_busy_cpus_bits, NR_CPUS); static void round_robin_cpu(unsigned int tsk_index) { struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits); cpumask_var_t tmp; int cpu; unsigned long min_weight = -1; unsigned long uninitialized_var(preferred_cpu); if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) return; mutex_lock(&round_robin_lock); cpumask_clear(tmp); for_each_cpu(cpu, pad_busy_cpus) cpumask_or(tmp, tmp, topology_thread_cpumask(cpu)); cpumask_andnot(tmp, cpu_online_mask, tmp); /* avoid HT sibilings if possible */ if (cpumask_empty(tmp)) cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus); if (cpumask_empty(tmp)) { mutex_unlock(&round_robin_lock); return; } for_each_cpu(cpu, tmp) { if (cpu_weight[cpu] < min_weight) { min_weight = cpu_weight[cpu]; preferred_cpu = cpu; } } if (tsk_in_cpu[tsk_index] != -1) cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus); tsk_in_cpu[tsk_index] = preferred_cpu; cpumask_set_cpu(preferred_cpu, pad_busy_cpus); cpu_weight[preferred_cpu]++; mutex_unlock(&round_robin_lock); set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu)); } static void exit_round_robin(unsigned int tsk_index) { struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits); cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus); tsk_in_cpu[tsk_index] = -1; } static unsigned int idle_pct = 5; /* percentage */ static unsigned int round_robin_time = 1; /* second */ static int power_saving_thread(void *data) { struct sched_param param = {.sched_priority = 1}; int do_sleep; unsigned int tsk_index = (unsigned long)data; u64 last_jiffies = 0; sched_setscheduler(current, SCHED_RR, &param); while (!kthread_should_stop()) { int cpu; u64 expire_time; try_to_freeze(); /* round robin to cpus */ if (last_jiffies + round_robin_time * HZ < jiffies) { last_jiffies = jiffies; round_robin_cpu(tsk_index); } do_sleep = 0; expire_time = jiffies + HZ * (100 - idle_pct) / 100; while (!need_resched()) { if (tsc_detected_unstable && !tsc_marked_unstable) { /* TSC could halt in idle, so notify users */ mark_tsc_unstable("TSC halts in idle"); tsc_marked_unstable = 1; } if (lapic_detected_unstable && !lapic_marked_unstable) { int i; /* LAPIC could halt in idle, so notify users */ for_each_online_cpu(i) clockevents_notify( CLOCK_EVT_NOTIFY_BROADCAST_ON, &i); lapic_marked_unstable = 1; } local_irq_disable(); cpu = smp_processor_id(); if (lapic_marked_unstable) clockevents_notify( CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); stop_critical_timings(); __monitor((void *)&current_thread_info()->flags, 0, 0); smp_mb(); if (!need_resched()) __mwait(power_saving_mwait_eax, 1); start_critical_timings(); if (lapic_marked_unstable) clockevents_notify( CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); local_irq_enable(); if (jiffies > expire_time) { do_sleep = 1; break; } } /* * current sched_rt has threshold for rt task running time. * When a rt task uses 95% CPU time, the rt thread will be * scheduled out for 5% CPU time to not starve other tasks. But * the mechanism only works when all CPUs have RT task running, * as if one CPU hasn't RT task, RT task from other CPUs will * borrow CPU time from this CPU and cause RT task use > 95% * CPU time. To make 'avoid starvation' work, takes a nap here. */ if (do_sleep) schedule_timeout_killable(HZ * idle_pct / 100); } exit_round_robin(tsk_index); return 0; } static struct task_struct *ps_tsks[NR_CPUS]; static unsigned int ps_tsk_num; static int create_power_saving_task(void) { int rc = -ENOMEM; ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread, (void *)(unsigned long)ps_tsk_num, "acpi_pad/%d", ps_tsk_num); rc = PTR_RET(ps_tsks[ps_tsk_num]); if (!rc) ps_tsk_num++; else ps_tsks[ps_tsk_num] = NULL; return rc; } static void destroy_power_saving_task(void) { if (ps_tsk_num > 0) { ps_tsk_num--; kthread_stop(ps_tsks[ps_tsk_num]); ps_tsks[ps_tsk_num] = NULL; } } static void set_power_saving_task_num(unsigned int num) { if (num > ps_tsk_num) { while (ps_tsk_num < num) { if (create_power_saving_task()) return; } } else if (num < ps_tsk_num) { while (ps_tsk_num > num) destroy_power_saving_task(); } } static void acpi_pad_idle_cpus(unsigned int num_cpus) { get_online_cpus(); num_cpus = min_t(unsigned int, num_cpus, num_online_cpus()); set_power_saving_task_num(num_cpus); put_online_cpus(); } static uint32_t acpi_pad_idle_cpus_num(void) { return ps_tsk_num; } static ssize_t acpi_pad_rrtime_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long num; if (kstrtoul(buf, 0, &num)) return -EINVAL; if (num < 1 || num >= 100) return -EINVAL; mutex_lock(&isolated_cpus_lock); round_robin_time = num; mutex_unlock(&isolated_cpus_lock); return count; } static ssize_t acpi_pad_rrtime_show(struct device *dev, struct device_attribute *attr, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", round_robin_time); } static DEVICE_ATTR(rrtime, S_IRUGO|S_IWUSR, acpi_pad_rrtime_show, acpi_pad_rrtime_store); static ssize_t acpi_pad_idlepct_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long num; if (kstrtoul(buf, 0, &num)) return -EINVAL; if (num < 1 || num >= 100) return -EINVAL; mutex_lock(&isolated_cpus_lock); idle_pct = num; mutex_unlock(&isolated_cpus_lock); return count; } static ssize_t acpi_pad_idlepct_show(struct device *dev, struct device_attribute *attr, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", idle_pct); } static DEVICE_ATTR(idlepct, S_IRUGO|S_IWUSR, acpi_pad_idlepct_show, acpi_pad_idlepct_store); static ssize_t acpi_pad_idlecpus_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long num; if (kstrtoul(buf, 0, &num)) return -EINVAL; mutex_lock(&isolated_cpus_lock); acpi_pad_idle_cpus(num); mutex_unlock(&isolated_cpus_lock); return count; } static ssize_t acpi_pad_idlecpus_show(struct device *dev, struct device_attribute *attr, char *buf) { int n = 0; n = cpumask_scnprintf(buf, PAGE_SIZE-2, to_cpumask(pad_busy_cpus_bits)); buf[n++] = '\n'; buf[n] = '\0'; return n; } static DEVICE_ATTR(idlecpus, S_IRUGO|S_IWUSR, acpi_pad_idlecpus_show, acpi_pad_idlecpus_store); static int acpi_pad_add_sysfs(struct acpi_device *device) { int result; result = device_create_file(&device->dev, &dev_attr_idlecpus); if (result) return -ENODEV; result = device_create_file(&device->dev, &dev_attr_idlepct); if (result) { device_remove_file(&device->dev, &dev_attr_idlecpus); return -ENODEV; } result = device_create_file(&device->dev, &dev_attr_rrtime); if (result) { device_remove_file(&device->dev, &dev_attr_idlecpus); device_remove_file(&device->dev, &dev_attr_idlepct); return -ENODEV; } return 0; } static void acpi_pad_remove_sysfs(struct acpi_device *device) { device_remove_file(&device->dev, &dev_attr_idlecpus); device_remove_file(&device->dev, &dev_attr_idlepct); device_remove_file(&device->dev, &dev_attr_rrtime); } /* * Query firmware how many CPUs should be idle * return -1 on failure */ static int acpi_pad_pur(acpi_handle handle) { struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *package; int num = -1; if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer))) return num; if (!buffer.length || !buffer.pointer) return num; package = buffer.pointer; if (package->type == ACPI_TYPE_PACKAGE && package->package.count == 2 && package->package.elements[0].integer.value == 1) /* rev 1 */ num = package->package.elements[1].integer.value; kfree(buffer.pointer); return num; } /* Notify firmware how many CPUs are idle */ static void acpi_pad_ost(acpi_handle handle, int stat, uint32_t idle_cpus) { union acpi_object params[3] = { {.type = ACPI_TYPE_INTEGER,}, {.type = ACPI_TYPE_INTEGER,}, {.type = ACPI_TYPE_BUFFER,}, }; struct acpi_object_list arg_list = {3, params}; params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY; params[1].integer.value = stat; params[2].buffer.length = 4; params[2].buffer.pointer = (void *)&idle_cpus; acpi_evaluate_object(handle, "_OST", &arg_list, NULL); } static void acpi_pad_handle_notify(acpi_handle handle) { int num_cpus; uint32_t idle_cpus; mutex_lock(&isolated_cpus_lock); num_cpus = acpi_pad_pur(handle); if (num_cpus < 0) { mutex_unlock(&isolated_cpus_lock); return; } acpi_pad_idle_cpus(num_cpus); idle_cpus = acpi_pad_idle_cpus_num(); acpi_pad_ost(handle, 0, idle_cpus); mutex_unlock(&isolated_cpus_lock); } static void acpi_pad_notify(acpi_handle handle, u32 event, void *data) { struct acpi_device *device = data; switch (event) { case ACPI_PROCESSOR_AGGREGATOR_NOTIFY: acpi_pad_handle_notify(handle); acpi_bus_generate_proc_event(device, event, 0); acpi_bus_generate_netlink_event(device->pnp.device_class, dev_name(&device->dev), event, 0); break; default: pr_warn("Unsupported event [0x%x]\n", event); break; } } static int acpi_pad_add(struct acpi_device *device) { acpi_status status; strcpy(acpi_device_name(device), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_PROCESSOR_AGGREGATOR_CLASS); if (acpi_pad_add_sysfs(device)) return -ENODEV; status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, acpi_pad_notify, device); if (ACPI_FAILURE(status)) { acpi_pad_remove_sysfs(device); return -ENODEV; } return 0; } static int acpi_pad_remove(struct acpi_device *device) { mutex_lock(&isolated_cpus_lock); acpi_pad_idle_cpus(0); mutex_unlock(&isolated_cpus_lock); acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, acpi_pad_notify); acpi_pad_remove_sysfs(device); return 0; } static const struct acpi_device_id pad_device_ids[] = { {"ACPI000C", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, pad_device_ids); static struct acpi_driver acpi_pad_driver = { .name = "processor_aggregator", .class = ACPI_PROCESSOR_AGGREGATOR_CLASS, .ids = pad_device_ids, .ops = { .add = acpi_pad_add, .remove = acpi_pad_remove, }, }; static int __init acpi_pad_init(void) { power_saving_mwait_init(); if (power_saving_mwait_eax == 0) return -EINVAL; return acpi_bus_register_driver(&acpi_pad_driver); } static void __exit acpi_pad_exit(void) { acpi_bus_unregister_driver(&acpi_pad_driver); } module_init(acpi_pad_init); module_exit(acpi_pad_exit); MODULE_AUTHOR("Shaohua Li<shaohua.li@intel.com>"); MODULE_DESCRIPTION("ACPI Processor Aggregator Driver"); MODULE_LICENSE("GPL");
gpl-2.0
zeroblade1984/Galbi
arch/arm/kernel/irq.c
2544
5125
/* * linux/arch/arm/kernel/irq.c * * Copyright (C) 1992 Linus Torvalds * Modifications for ARM processor Copyright (C) 1995-2000 Russell King. * * Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation. * Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This file contains the code used by various IRQ handling routines: * asking for different IRQ's should be done through these routines * instead of just grabbing them. Thus setups with different IRQ numbers * shouldn't result in any weird surprises, and installing new handlers * should be easier. * * IRQ's are in fact implemented a bit like signal handlers for the kernel. * Naturally it's not a 1:1 relation, but there are similarities. */ #include <linux/kernel_stat.h> #include <linux/signal.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/random.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/seq_file.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/kallsyms.h> #include <linux/proc_fs.h> #include <asm/exception.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> #include <asm/mach/time.h> #include <asm/perftypes.h> /* * No architecture-specific irq_finish function defined in arm/arch/irqs.h. */ #ifndef irq_finish #define irq_finish(irq) do { } while (0) #endif unsigned long irq_err_count; int arch_show_interrupts(struct seq_file *p, int prec) { #ifdef CONFIG_FIQ show_fiq_list(p, prec); #endif #ifdef CONFIG_SMP show_ipi_list(p, prec); #endif seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); return 0; } /* * handle_IRQ handles all hardware IRQ's. Decoded IRQs should * not come via this function. Instead, they should provide their * own 'handler'. Used by platform code implementing C-based 1st * level decoding. */ void handle_IRQ(unsigned int irq, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); perf_mon_interrupt_in(); irq_enter(); /* * Some hardware gives randomly wrong interrupts. Rather * than crashing, do something sensible. */ if (unlikely(irq >= nr_irqs)) { if (printk_ratelimit()) printk(KERN_WARNING "Bad IRQ%u\n", irq); ack_bad_irq(irq); } else { generic_handle_irq(irq); } /* AT91 specific workaround */ irq_finish(irq); irq_exit(); set_irq_regs(old_regs); perf_mon_interrupt_out(); } /* * asm_do_IRQ is the interface to be used from assembly code. */ asmlinkage void __exception_irq_entry asm_do_IRQ(unsigned int irq, struct pt_regs *regs) { handle_IRQ(irq, regs); } void set_irq_flags(unsigned int irq, unsigned int iflags) { unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; if (irq >= nr_irqs) { printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq); return; } if (iflags & IRQF_VALID) clr |= IRQ_NOREQUEST; if (iflags & IRQF_PROBE) clr |= IRQ_NOPROBE; if (!(iflags & IRQF_NOAUTOEN)) clr |= IRQ_NOAUTOEN; /* Order is clear bits in "clr" then set bits in "set" */ irq_modify_status(irq, clr, set & ~clr); } void __init init_IRQ(void) { machine_desc->init_irq(); } #ifdef CONFIG_SPARSE_IRQ int __init arch_probe_nr_irqs(void) { nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS; return nr_irqs; } #endif #ifdef CONFIG_HOTPLUG_CPU static bool migrate_one_irq(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); const struct cpumask *affinity = d->affinity; struct irq_chip *c; bool ret = false; /* * If this is a per-CPU interrupt, or the affinity does not * include this CPU, then we have nothing to do. */ if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) return false; if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { affinity = cpu_online_mask; ret = true; } c = irq_data_get_irq_chip(d); if (!c->irq_set_affinity) pr_debug("IRQ%u: unable to set affinity\n", d->irq); else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) cpumask_copy(d->affinity, affinity); return ret; } /* * The current CPU has been marked offline. Migrate IRQs off this CPU. * If the affinity settings do not allow other CPUs, force them onto any * available CPU. * * Note: we must iterate over all IRQs, whether they have an attached * action structure or not, as we need to get chained interrupts too. */ void migrate_irqs(void) { unsigned int i; struct irq_desc *desc; unsigned long flags; local_irq_save(flags); for_each_irq_desc(i, desc) { bool affinity_broken; raw_spin_lock(&desc->lock); affinity_broken = migrate_one_irq(desc); raw_spin_unlock(&desc->lock); if (affinity_broken && printk_ratelimit()) pr_warning("IRQ%u no longer affine to CPU%u\n", i, smp_processor_id()); } local_irq_restore(flags); } #endif /* CONFIG_HOTPLUG_CPU */
gpl-2.0
Oleh-Kravchenko/asusp535
arch/alpha/kernel/pci.c
2544
11076
/* * linux/arch/alpha/kernel/pci.c * * Extruded from code written by * Dave Rusling (david.rusling@reo.mts.dec.com) * David Mosberger (davidm@cs.arizona.edu) */ /* 2.3.x PCI/resources, 1999 Andrea Arcangeli <andrea@suse.de> */ /* * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru> * PCI-PCI bridges cleanup */ #include <linux/string.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/bootmem.h> #include <linux/module.h> #include <linux/cache.h> #include <linux/slab.h> #include <asm/machvec.h> #include "proto.h" #include "pci_impl.h" /* * Some string constants used by the various core logics. */ const char *const pci_io_names[] = { "PCI IO bus 0", "PCI IO bus 1", "PCI IO bus 2", "PCI IO bus 3", "PCI IO bus 4", "PCI IO bus 5", "PCI IO bus 6", "PCI IO bus 7" }; const char *const pci_mem_names[] = { "PCI mem bus 0", "PCI mem bus 1", "PCI mem bus 2", "PCI mem bus 3", "PCI mem bus 4", "PCI mem bus 5", "PCI mem bus 6", "PCI mem bus 7" }; const char pci_hae0_name[] = "HAE0"; /* * If PCI_PROBE_ONLY in pci_flags is set, we don't change any PCI resource * assignments. */ /* * The PCI controller list. */ struct pci_controller *hose_head, **hose_tail = &hose_head; struct pci_controller *pci_isa_hose; /* * Quirks. */ static void quirk_isa_bridge(struct pci_dev *dev) { dev->class = PCI_CLASS_BRIDGE_ISA << 8; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82378, quirk_isa_bridge); static void quirk_cypress(struct pci_dev *dev) { /* The Notorious Cy82C693 chip. */ /* The generic legacy mode IDE fixup in drivers/pci/probe.c doesn't work correctly with the Cypress IDE controller as it has non-standard register layout. Fix that. */ if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE) { dev->resource[2].start = dev->resource[3].start = 0; dev->resource[2].end = dev->resource[3].end = 0; dev->resource[2].flags = dev->resource[3].flags = 0; if (PCI_FUNC(dev->devfn) == 2) { dev->resource[0].start = 0x170; dev->resource[0].end = 0x177; dev->resource[1].start = 0x376; dev->resource[1].end = 0x376; } } /* The Cypress bridge responds on the PCI bus in the address range 0xffff0000-0xffffffff (conventional x86 BIOS ROM). There is no way to turn this off. The bridge also supports several extended BIOS ranges (disabled after power-up), and some consoles do turn them on. So if we use a large direct-map window, or a large SG window, we must avoid the entire 0xfff00000-0xffffffff region. */ if (dev->class >> 8 == PCI_CLASS_BRIDGE_ISA) { if (__direct_map_base + __direct_map_size >= 0xfff00000UL) __direct_map_size = 0xfff00000UL - __direct_map_base; else { struct pci_controller *hose = dev->sysdata; struct pci_iommu_arena *pci = hose->sg_pci; if (pci && pci->dma_base + pci->size >= 0xfff00000UL) pci->size = 0xfff00000UL - pci->dma_base; } } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, quirk_cypress); /* Called for each device after PCI setup is done. */ static void pcibios_fixup_final(struct pci_dev *dev) { unsigned int class = dev->class >> 8; if (class == PCI_CLASS_BRIDGE_ISA || class == PCI_CLASS_BRIDGE_EISA) { dev->dma_mask = MAX_ISA_DMA_ADDRESS - 1; isa_bridge = dev; } } DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final); /* Just declaring that the power-of-ten prefixes are actually the power-of-two ones doesn't make it true :) */ #define KB 1024 #define MB (1024*KB) #define GB (1024*MB) resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { struct pci_dev *dev = data; struct pci_controller *hose = dev->sysdata; unsigned long alignto; resource_size_t start = res->start; if (res->flags & IORESOURCE_IO) { /* Make sure we start at our min on all hoses */ if (start - hose->io_space->start < PCIBIOS_MIN_IO) start = PCIBIOS_MIN_IO + hose->io_space->start; /* * Put everything into 0x00-0xff region modulo 0x400 */ if (start & 0x300) start = (start + 0x3ff) & ~0x3ff; } else if (res->flags & IORESOURCE_MEM) { /* Make sure we start at our min on all hoses */ if (start - hose->mem_space->start < PCIBIOS_MIN_MEM) start = PCIBIOS_MIN_MEM + hose->mem_space->start; /* * The following holds at least for the Low Cost * Alpha implementation of the PCI interface: * * In sparse memory address space, the first * octant (16MB) of every 128MB segment is * aliased to the very first 16 MB of the * address space (i.e., it aliases the ISA * memory address space). Thus, we try to * avoid allocating PCI devices in that range. * Can be allocated in 2nd-7th octant only. * Devices that need more than 112MB of * address space must be accessed through * dense memory space only! */ /* Align to multiple of size of minimum base. */ alignto = max_t(resource_size_t, 0x1000, align); start = ALIGN(start, alignto); if (hose->sparse_mem_base && size <= 7 * 16*MB) { if (((start / (16*MB)) & 0x7) == 0) { start &= ~(128*MB - 1); start += 16*MB; start = ALIGN(start, alignto); } if (start/(128*MB) != (start + size - 1)/(128*MB)) { start &= ~(128*MB - 1); start += (128 + 16)*MB; start = ALIGN(start, alignto); } } } return start; } #undef KB #undef MB #undef GB static int __init pcibios_init(void) { if (alpha_mv.init_pci) alpha_mv.init_pci(); return 0; } subsys_initcall(pcibios_init); #ifdef ALPHA_RESTORE_SRM_SETUP static struct pdev_srm_saved_conf *srm_saved_configs; void pdev_save_srm_config(struct pci_dev *dev) { struct pdev_srm_saved_conf *tmp; static int printed = 0; if (!alpha_using_srm || pci_has_flag(PCI_PROBE_ONLY)) return; if (!printed) { printk(KERN_INFO "pci: enabling save/restore of SRM state\n"); printed = 1; } tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) { printk(KERN_ERR "%s: kmalloc() failed!\n", __func__); return; } tmp->next = srm_saved_configs; tmp->dev = dev; pci_save_state(dev); srm_saved_configs = tmp; } void pci_restore_srm_config(void) { struct pdev_srm_saved_conf *tmp; /* No need to restore if probed only. */ if (pci_has_flag(PCI_PROBE_ONLY)) return; /* Restore SRM config. */ for (tmp = srm_saved_configs; tmp; tmp = tmp->next) { pci_restore_state(tmp->dev); } } #endif void pcibios_fixup_bus(struct pci_bus *bus) { struct pci_dev *dev = bus->self; if (pci_has_flag(PCI_PROBE_ONLY) && dev && (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { pci_read_bridge_bases(bus); } list_for_each_entry(dev, &bus->devices, bus_list) { pdev_save_srm_config(dev); } } int pcibios_enable_device(struct pci_dev *dev, int mask) { return pci_enable_resources(dev, mask); } /* * If we set up a device for bus mastering, we need to check the latency * timer as certain firmware forgets to set it properly, as seen * on SX164 and LX164 with SRM. */ void pcibios_set_master(struct pci_dev *dev) { u8 lat; pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); if (lat >= 16) return; printk("PCI: Setting latency timer of device %s to 64\n", pci_name(dev)); pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64); } void __init pcibios_claim_one_bus(struct pci_bus *b) { struct pci_dev *dev; struct pci_bus *child_bus; list_for_each_entry(dev, &b->devices, bus_list) { int i; for (i = 0; i < PCI_NUM_RESOURCES; i++) { struct resource *r = &dev->resource[i]; if (r->parent || !r->start || !r->flags) continue; if (pci_has_flag(PCI_PROBE_ONLY) || (r->flags & IORESOURCE_PCI_FIXED)) pci_claim_resource(dev, i); } } list_for_each_entry(child_bus, &b->children, node) pcibios_claim_one_bus(child_bus); } static void __init pcibios_claim_console_setup(void) { struct pci_bus *b; list_for_each_entry(b, &pci_root_buses, node) pcibios_claim_one_bus(b); } void __init common_init_pci(void) { struct pci_controller *hose; struct list_head resources; struct pci_bus *bus; int next_busno; int need_domain_info = 0; u32 pci_mem_end; u32 sg_base; unsigned long end; /* Scan all of the recorded PCI controllers. */ for (next_busno = 0, hose = hose_head; hose; hose = hose->next) { sg_base = hose->sg_pci ? hose->sg_pci->dma_base : ~0; /* Adjust hose mem_space limit to prevent PCI allocations in the iommu windows. */ pci_mem_end = min((u32)__direct_map_base, sg_base) - 1; end = hose->mem_space->start + pci_mem_end; if (hose->mem_space->end > end) hose->mem_space->end = end; INIT_LIST_HEAD(&resources); pci_add_resource_offset(&resources, hose->io_space, hose->io_space->start); pci_add_resource_offset(&resources, hose->mem_space, hose->mem_space->start); bus = pci_scan_root_bus(NULL, next_busno, alpha_mv.pci_ops, hose, &resources); hose->bus = bus; hose->need_domain_info = need_domain_info; next_busno = bus->busn_res.end + 1; /* Don't allow 8-bit bus number overflow inside the hose - reserve some space for bridges. */ if (next_busno > 224) { next_busno = 0; need_domain_info = 1; } } pcibios_claim_console_setup(); pci_assign_unassigned_resources(); pci_fixup_irqs(alpha_mv.pci_swizzle, alpha_mv.pci_map_irq); } struct pci_controller * __init alloc_pci_controller(void) { struct pci_controller *hose; hose = alloc_bootmem(sizeof(*hose)); *hose_tail = hose; hose_tail = &hose->next; return hose; } struct resource * __init alloc_resource(void) { struct resource *res; res = alloc_bootmem(sizeof(*res)); return res; } /* Provide information on locations of various I/O regions in physical memory. Do this on a per-card basis so that we choose the right hose. */ asmlinkage long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn) { struct pci_controller *hose; struct pci_dev *dev; /* from hose or from bus.devfn */ if (which & IOBASE_FROM_HOSE) { for(hose = hose_head; hose; hose = hose->next) if (hose->index == bus) break; if (!hose) return -ENODEV; } else { /* Special hook for ISA access. */ if (bus == 0 && dfn == 0) { hose = pci_isa_hose; } else { dev = pci_get_bus_and_slot(bus, dfn); if (!dev) return -ENODEV; hose = dev->sysdata; pci_dev_put(dev); } } switch (which & ~IOBASE_FROM_HOSE) { case IOBASE_HOSE: return hose->index; case IOBASE_SPARSE_MEM: return hose->sparse_mem_base; case IOBASE_DENSE_MEM: return hose->dense_mem_base; case IOBASE_SPARSE_IO: return hose->sparse_io_base; case IOBASE_DENSE_IO: return hose->dense_io_base; case IOBASE_ROOT_BUS: return hose->bus->number; } return -EOPNOTSUPP; } /* Destroy an __iomem token. Not copied from lib/iomap.c. */ void pci_iounmap(struct pci_dev *dev, void __iomem * addr) { if (__is_mmio(addr)) iounmap(addr); } EXPORT_SYMBOL(pci_iounmap); /* FIXME: Some boxes have multiple ISA bridges! */ struct pci_dev *isa_bridge; EXPORT_SYMBOL(isa_bridge);
gpl-2.0
Michael-Pizzileo/lichee-3.0.8-leaked
drivers/spi/dw_spi_mid.c
2544
6082
/* * dw_spi_mid.c - special handling for DW core on Intel MID platform * * Copyright (c) 2009, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include "dw_spi.h" #ifdef CONFIG_SPI_DW_MID_DMA #include <linux/intel_mid_dma.h> #include <linux/pci.h> struct mid_dma { struct intel_mid_dma_slave dmas_tx; struct intel_mid_dma_slave dmas_rx; }; static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param) { struct dw_spi *dws = param; return dws->dmac && (&dws->dmac->dev == chan->device->dev); } static int mid_spi_dma_init(struct dw_spi *dws) { struct mid_dma *dw_dma = dws->dma_priv; struct intel_mid_dma_slave *rxs, *txs; dma_cap_mask_t mask; /* * Get pci device for DMA controller, currently it could only * be the DMA controller of either Moorestown or Medfield */ dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0813, NULL); if (!dws->dmac) dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL); dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); /* 1. Init rx channel */ dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws); if (!dws->rxchan) goto err_exit; rxs = &dw_dma->dmas_rx; rxs->hs_mode = LNW_DMA_HW_HS; rxs->cfg_mode = LNW_DMA_PER_TO_MEM; dws->rxchan->private = rxs; /* 2. Init tx channel */ dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws); if (!dws->txchan) goto free_rxchan; txs = &dw_dma->dmas_tx; txs->hs_mode = LNW_DMA_HW_HS; txs->cfg_mode = LNW_DMA_MEM_TO_PER; dws->txchan->private = txs; dws->dma_inited = 1; return 0; free_rxchan: dma_release_channel(dws->rxchan); err_exit: return -1; } static void mid_spi_dma_exit(struct dw_spi *dws) { dma_release_channel(dws->txchan); dma_release_channel(dws->rxchan); } /* * dws->dma_chan_done is cleared before the dma transfer starts, * callback for rx/tx channel will each increment it by 1. * Reaching 2 means the whole spi transaction is done. */ static void dw_spi_dma_done(void *arg) { struct dw_spi *dws = arg; if (++dws->dma_chan_done != 2) return; dw_spi_xfer_done(dws); } static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) { struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL; struct dma_chan *txchan, *rxchan; struct dma_slave_config txconf, rxconf; u16 dma_ctrl = 0; /* 1. setup DMA related registers */ if (cs_change) { spi_enable_chip(dws, 0); dw_writew(dws, dmardlr, 0xf); dw_writew(dws, dmatdlr, 0x10); if (dws->tx_dma) dma_ctrl |= 0x2; if (dws->rx_dma) dma_ctrl |= 0x1; dw_writew(dws, dmacr, dma_ctrl); spi_enable_chip(dws, 1); } dws->dma_chan_done = 0; txchan = dws->txchan; rxchan = dws->rxchan; /* 2. Prepare the TX dma transfer */ txconf.direction = DMA_TO_DEVICE; txconf.dst_addr = dws->dma_addr; txconf.dst_maxburst = LNW_DMA_MSIZE_16; txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; txchan->device->device_control(txchan, DMA_SLAVE_CONFIG, (unsigned long) &txconf); memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl)); dws->tx_sgl.dma_address = dws->tx_dma; dws->tx_sgl.length = dws->len; txdesc = txchan->device->device_prep_slave_sg(txchan, &dws->tx_sgl, 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); txdesc->callback = dw_spi_dma_done; txdesc->callback_param = dws; /* 3. Prepare the RX dma transfer */ rxconf.direction = DMA_FROM_DEVICE; rxconf.src_addr = dws->dma_addr; rxconf.src_maxburst = LNW_DMA_MSIZE_16; rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG, (unsigned long) &rxconf); memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl)); dws->rx_sgl.dma_address = dws->rx_dma; dws->rx_sgl.length = dws->len; rxdesc = rxchan->device->device_prep_slave_sg(rxchan, &dws->rx_sgl, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); rxdesc->callback = dw_spi_dma_done; rxdesc->callback_param = dws; /* rx must be started before tx due to spi instinct */ rxdesc->tx_submit(rxdesc); txdesc->tx_submit(txdesc); return 0; } static struct dw_spi_dma_ops mid_dma_ops = { .dma_init = mid_spi_dma_init, .dma_exit = mid_spi_dma_exit, .dma_transfer = mid_spi_dma_transfer, }; #endif /* Some specific info for SPI0 controller on Moorestown */ /* HW info for MRST CLk Control Unit, one 32b reg */ #define MRST_SPI_CLK_BASE 100000000 /* 100m */ #define MRST_CLK_SPI0_REG 0xff11d86c #define CLK_SPI_BDIV_OFFSET 0 #define CLK_SPI_BDIV_MASK 0x00000007 #define CLK_SPI_CDIV_OFFSET 9 #define CLK_SPI_CDIV_MASK 0x00000e00 #define CLK_SPI_DISABLE_OFFSET 8 int dw_spi_mid_init(struct dw_spi *dws) { u32 *clk_reg, clk_cdiv; clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16); if (!clk_reg) return -ENOMEM; /* get SPI controller operating freq info */ clk_cdiv = (readl(clk_reg) & CLK_SPI_CDIV_MASK) >> CLK_SPI_CDIV_OFFSET; dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1); iounmap(clk_reg); dws->num_cs = 16; dws->fifo_len = 40; /* FIFO has 40 words buffer */ #ifdef CONFIG_SPI_DW_MID_DMA dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL); if (!dws->dma_priv) return -ENOMEM; dws->dma_ops = &mid_dma_ops; #endif return 0; }
gpl-2.0
garwynn/caf_kernel_msm
drivers/staging/ramster/zcache-main.c
4336
93861
/* * zcache.c * * Copyright (c) 2010-2012, Dan Magenheimer, Oracle Corp. * Copyright (c) 2010,2011, Nitin Gupta * * Zcache provides an in-kernel "host implementation" for transcendent memory * and, thus indirectly, for cleancache and frontswap. Zcache includes two * page-accessible memory [1] interfaces, both utilizing lzo1x compression: * 1) "compression buddies" ("zbud") is used for ephemeral pages * 2) xvmalloc is used for persistent pages. * Xvmalloc (based on the TLSF allocator) has very low fragmentation * so maximizes space efficiency, while zbud allows pairs (and potentially, * in the future, more than a pair of) compressed pages to be closely linked * so that reclaiming can be done via the kernel's physical-page-oriented * "shrinker" interface. * * [1] For a definition of page-accessible memory (aka PAM), see: * http://marc.info/?l=linux-mm&m=127811271605009 * RAMSTER TODO: * - handle remotifying of buddied pages (see zbud_remotify_zbpg) * - kernel boot params: nocleancache/nofrontswap don't always work?!? */ #include <linux/module.h> #include <linux/cpu.h> #include <linux/highmem.h> #include <linux/list.h> #include <linux/lzo.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/types.h> #include <linux/atomic.h> #include <linux/math64.h> #include "tmem.h" #include "zcache.h" #include "ramster.h" #include "cluster/tcp.h" #include "xvmalloc.h" /* temporary until change to zsmalloc */ #define RAMSTER_TESTING #if (!defined(CONFIG_CLEANCACHE) && !defined(CONFIG_FRONTSWAP)) #error "ramster is useless without CONFIG_CLEANCACHE or CONFIG_FRONTSWAP" #endif #ifdef CONFIG_CLEANCACHE #include <linux/cleancache.h> #endif #ifdef CONFIG_FRONTSWAP #include <linux/frontswap.h> #endif enum ramster_remotify_op { RAMSTER_REMOTIFY_EPH_PUT, RAMSTER_REMOTIFY_PERS_PUT, RAMSTER_REMOTIFY_FLUSH_PAGE, RAMSTER_REMOTIFY_FLUSH_OBJ, RAMSTER_INTRANSIT_PERS }; struct ramster_remotify_hdr { enum ramster_remotify_op op; struct list_head list; }; #define ZBH_SENTINEL 0x43214321 #define ZBPG_SENTINEL 0xdeadbeef #define ZBUD_MAX_BUDS 2 struct zbud_hdr { struct ramster_remotify_hdr rem_op; uint16_t client_id; uint16_t pool_id; struct tmem_oid oid; uint32_t index; uint16_t size; /* compressed size in bytes, zero means unused */ DECL_SENTINEL }; #define ZVH_SENTINEL 0x43214321 static const int zv_max_page_size = (PAGE_SIZE / 8) * 7; struct zv_hdr { struct ramster_remotify_hdr rem_op; uint16_t client_id; uint16_t pool_id; struct tmem_oid oid; uint32_t index; DECL_SENTINEL }; struct flushlist_node { struct ramster_remotify_hdr rem_op; struct tmem_xhandle xh; }; union { struct ramster_remotify_hdr rem_op; struct zv_hdr zv; struct zbud_hdr zbud; struct flushlist_node flist; } remotify_list_node; static LIST_HEAD(zcache_rem_op_list); static DEFINE_SPINLOCK(zcache_rem_op_list_lock); #if 0 /* this is more aggressive but may cause other problems? */ #define ZCACHE_GFP_MASK (GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN) #else #define ZCACHE_GFP_MASK \ (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC) #endif #define MAX_POOLS_PER_CLIENT 16 #define MAX_CLIENTS 16 #define LOCAL_CLIENT ((uint16_t)-1) MODULE_LICENSE("GPL"); struct zcache_client { struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT]; struct xv_pool *xvpool; bool allocated; atomic_t refcount; }; static struct zcache_client zcache_host; static struct zcache_client zcache_clients[MAX_CLIENTS]; static inline uint16_t get_client_id_from_client(struct zcache_client *cli) { BUG_ON(cli == NULL); if (cli == &zcache_host) return LOCAL_CLIENT; return cli - &zcache_clients[0]; } static inline bool is_local_client(struct zcache_client *cli) { return cli == &zcache_host; } /********** * Compression buddies ("zbud") provides for packing two (or, possibly * in the future, more) compressed ephemeral pages into a single "raw" * (physical) page and tracking them with data structures so that * the raw pages can be easily reclaimed. * * A zbud page ("zbpg") is an aligned page containing a list_head, * a lock, and two "zbud headers". The remainder of the physical * page is divided up into aligned 64-byte "chunks" which contain * the compressed data for zero, one, or two zbuds. Each zbpg * resides on: (1) an "unused list" if it has no zbuds; (2) a * "buddied" list if it is fully populated with two zbuds; or * (3) one of PAGE_SIZE/64 "unbuddied" lists indexed by how many chunks * the one unbuddied zbud uses. The data inside a zbpg cannot be * read or written unless the zbpg's lock is held. */ struct zbud_page { struct list_head bud_list; spinlock_t lock; struct zbud_hdr buddy[ZBUD_MAX_BUDS]; DECL_SENTINEL /* followed by NUM_CHUNK aligned CHUNK_SIZE-byte chunks */ }; #define CHUNK_SHIFT 6 #define CHUNK_SIZE (1 << CHUNK_SHIFT) #define CHUNK_MASK (~(CHUNK_SIZE-1)) #define NCHUNKS (((PAGE_SIZE - sizeof(struct zbud_page)) & \ CHUNK_MASK) >> CHUNK_SHIFT) #define MAX_CHUNK (NCHUNKS-1) static struct { struct list_head list; unsigned count; } zbud_unbuddied[NCHUNKS]; /* list N contains pages with N chunks USED and NCHUNKS-N unused */ /* element 0 is never used but optimizing that isn't worth it */ static unsigned long zbud_cumul_chunk_counts[NCHUNKS]; struct list_head zbud_buddied_list; static unsigned long zcache_zbud_buddied_count; /* protects the buddied list and all unbuddied lists */ static DEFINE_SPINLOCK(zbud_budlists_spinlock); static atomic_t zcache_zbud_curr_raw_pages; static atomic_t zcache_zbud_curr_zpages; static unsigned long zcache_zbud_curr_zbytes; static unsigned long zcache_zbud_cumul_zpages; static unsigned long zcache_zbud_cumul_zbytes; static unsigned long zcache_compress_poor; static unsigned long zcache_policy_percent_exceeded; static unsigned long zcache_mean_compress_poor; /* * RAMster counters * - Remote pages are pages with a local pampd but the data is remote * - Foreign pages are pages stored locally but belonging to another node */ static atomic_t ramster_remote_pers_pages = ATOMIC_INIT(0); static unsigned long ramster_pers_remotify_enable; static unsigned long ramster_eph_remotify_enable; static unsigned long ramster_eph_pages_remoted; static unsigned long ramster_eph_pages_remote_failed; static unsigned long ramster_pers_pages_remoted; static unsigned long ramster_pers_pages_remote_failed; static unsigned long ramster_pers_pages_remote_nomem; static unsigned long ramster_remote_objects_flushed; static unsigned long ramster_remote_object_flushes_failed; static unsigned long ramster_remote_pages_flushed; static unsigned long ramster_remote_page_flushes_failed; static unsigned long ramster_remote_eph_pages_succ_get; static unsigned long ramster_remote_pers_pages_succ_get; static unsigned long ramster_remote_eph_pages_unsucc_get; static unsigned long ramster_remote_pers_pages_unsucc_get; static atomic_t ramster_curr_flnode_count = ATOMIC_INIT(0); static unsigned long ramster_curr_flnode_count_max; static atomic_t ramster_foreign_eph_pampd_count = ATOMIC_INIT(0); static unsigned long ramster_foreign_eph_pampd_count_max; static atomic_t ramster_foreign_pers_pampd_count = ATOMIC_INIT(0); static unsigned long ramster_foreign_pers_pampd_count_max; /* forward references */ static void *zcache_get_free_page(void); static void zcache_free_page(void *p); /* * zbud helper functions */ static inline unsigned zbud_max_buddy_size(void) { return MAX_CHUNK << CHUNK_SHIFT; } static inline unsigned zbud_size_to_chunks(unsigned size) { BUG_ON(size == 0 || size > zbud_max_buddy_size()); return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT; } static inline int zbud_budnum(struct zbud_hdr *zh) { unsigned offset = (unsigned long)zh & (PAGE_SIZE - 1); struct zbud_page *zbpg = NULL; unsigned budnum = -1U; int i; for (i = 0; i < ZBUD_MAX_BUDS; i++) if (offset == offsetof(typeof(*zbpg), buddy[i])) { budnum = i; break; } BUG_ON(budnum == -1U); return budnum; } static char *zbud_data(struct zbud_hdr *zh, unsigned size) { struct zbud_page *zbpg; char *p; unsigned budnum; ASSERT_SENTINEL(zh, ZBH); budnum = zbud_budnum(zh); BUG_ON(size == 0 || size > zbud_max_buddy_size()); zbpg = container_of(zh, struct zbud_page, buddy[budnum]); ASSERT_SPINLOCK(&zbpg->lock); p = (char *)zbpg; if (budnum == 0) p += ((sizeof(struct zbud_page) + CHUNK_SIZE - 1) & CHUNK_MASK); else if (budnum == 1) p += PAGE_SIZE - ((size + CHUNK_SIZE - 1) & CHUNK_MASK); return p; } static void zbud_copy_from_pampd(char *data, size_t *size, struct zbud_hdr *zh) { struct zbud_page *zbpg; char *p; unsigned budnum; ASSERT_SENTINEL(zh, ZBH); budnum = zbud_budnum(zh); zbpg = container_of(zh, struct zbud_page, buddy[budnum]); spin_lock(&zbpg->lock); BUG_ON(zh->size > *size); p = (char *)zbpg; if (budnum == 0) p += ((sizeof(struct zbud_page) + CHUNK_SIZE - 1) & CHUNK_MASK); else if (budnum == 1) p += PAGE_SIZE - ((zh->size + CHUNK_SIZE - 1) & CHUNK_MASK); /* client should be filled in by caller */ memcpy(data, p, zh->size); *size = zh->size; spin_unlock(&zbpg->lock); } /* * zbud raw page management */ static struct zbud_page *zbud_alloc_raw_page(void) { struct zbud_page *zbpg = NULL; struct zbud_hdr *zh0, *zh1; zbpg = zcache_get_free_page(); if (likely(zbpg != NULL)) { INIT_LIST_HEAD(&zbpg->bud_list); zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1]; spin_lock_init(&zbpg->lock); atomic_inc(&zcache_zbud_curr_raw_pages); INIT_LIST_HEAD(&zbpg->bud_list); SET_SENTINEL(zbpg, ZBPG); zh0->size = 0; zh1->size = 0; tmem_oid_set_invalid(&zh0->oid); tmem_oid_set_invalid(&zh1->oid); } return zbpg; } static void zbud_free_raw_page(struct zbud_page *zbpg) { struct zbud_hdr *zh0 = &zbpg->buddy[0], *zh1 = &zbpg->buddy[1]; ASSERT_SENTINEL(zbpg, ZBPG); BUG_ON(!list_empty(&zbpg->bud_list)); ASSERT_SPINLOCK(&zbpg->lock); BUG_ON(zh0->size != 0 || tmem_oid_valid(&zh0->oid)); BUG_ON(zh1->size != 0 || tmem_oid_valid(&zh1->oid)); INVERT_SENTINEL(zbpg, ZBPG); spin_unlock(&zbpg->lock); atomic_dec(&zcache_zbud_curr_raw_pages); zcache_free_page(zbpg); } /* * core zbud handling routines */ static unsigned zbud_free(struct zbud_hdr *zh) { unsigned size; ASSERT_SENTINEL(zh, ZBH); BUG_ON(!tmem_oid_valid(&zh->oid)); size = zh->size; BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size()); zh->size = 0; tmem_oid_set_invalid(&zh->oid); INVERT_SENTINEL(zh, ZBH); zcache_zbud_curr_zbytes -= size; atomic_dec(&zcache_zbud_curr_zpages); return size; } static void zbud_free_and_delist(struct zbud_hdr *zh) { unsigned chunks; struct zbud_hdr *zh_other; unsigned budnum = zbud_budnum(zh), size; struct zbud_page *zbpg = container_of(zh, struct zbud_page, buddy[budnum]); /* FIXME, should be BUG_ON, pool destruction path doesn't disable * interrupts tmem_destroy_pool()->tmem_pampd_destroy_all_in_obj()-> * tmem_objnode_node_destroy()-> zcache_pampd_free() */ WARN_ON(!irqs_disabled()); spin_lock(&zbpg->lock); if (list_empty(&zbpg->bud_list)) { /* ignore zombie page... see zbud_evict_pages() */ spin_unlock(&zbpg->lock); return; } size = zbud_free(zh); ASSERT_SPINLOCK(&zbpg->lock); zh_other = &zbpg->buddy[(budnum == 0) ? 1 : 0]; if (zh_other->size == 0) { /* was unbuddied: unlist and free */ chunks = zbud_size_to_chunks(size) ; spin_lock(&zbud_budlists_spinlock); BUG_ON(list_empty(&zbud_unbuddied[chunks].list)); list_del_init(&zbpg->bud_list); zbud_unbuddied[chunks].count--; spin_unlock(&zbud_budlists_spinlock); zbud_free_raw_page(zbpg); } else { /* was buddied: move remaining buddy to unbuddied list */ chunks = zbud_size_to_chunks(zh_other->size) ; spin_lock(&zbud_budlists_spinlock); list_del_init(&zbpg->bud_list); zcache_zbud_buddied_count--; list_add_tail(&zbpg->bud_list, &zbud_unbuddied[chunks].list); zbud_unbuddied[chunks].count++; spin_unlock(&zbud_budlists_spinlock); spin_unlock(&zbpg->lock); } } static struct zbud_hdr *zbud_create(uint16_t client_id, uint16_t pool_id, struct tmem_oid *oid, uint32_t index, struct page *page, void *cdata, unsigned size) { struct zbud_hdr *zh0, *zh1, *zh = NULL; struct zbud_page *zbpg = NULL, *ztmp; unsigned nchunks; char *to; int i, found_good_buddy = 0; nchunks = zbud_size_to_chunks(size) ; for (i = MAX_CHUNK - nchunks + 1; i > 0; i--) { spin_lock(&zbud_budlists_spinlock); if (!list_empty(&zbud_unbuddied[i].list)) { list_for_each_entry_safe(zbpg, ztmp, &zbud_unbuddied[i].list, bud_list) { if (spin_trylock(&zbpg->lock)) { found_good_buddy = i; goto found_unbuddied; } } } spin_unlock(&zbud_budlists_spinlock); } /* didn't find a good buddy, try allocating a new page */ zbpg = zbud_alloc_raw_page(); if (unlikely(zbpg == NULL)) goto out; /* ok, have a page, now compress the data before taking locks */ spin_lock(&zbud_budlists_spinlock); spin_lock(&zbpg->lock); list_add_tail(&zbpg->bud_list, &zbud_unbuddied[nchunks].list); zbud_unbuddied[nchunks].count++; zh = &zbpg->buddy[0]; goto init_zh; found_unbuddied: ASSERT_SPINLOCK(&zbpg->lock); zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1]; BUG_ON(!((zh0->size == 0) ^ (zh1->size == 0))); if (zh0->size != 0) { /* buddy0 in use, buddy1 is vacant */ ASSERT_SENTINEL(zh0, ZBH); zh = zh1; } else if (zh1->size != 0) { /* buddy1 in use, buddy0 is vacant */ ASSERT_SENTINEL(zh1, ZBH); zh = zh0; } else BUG(); list_del_init(&zbpg->bud_list); zbud_unbuddied[found_good_buddy].count--; list_add_tail(&zbpg->bud_list, &zbud_buddied_list); zcache_zbud_buddied_count++; init_zh: SET_SENTINEL(zh, ZBH); zh->size = size; zh->index = index; zh->oid = *oid; zh->pool_id = pool_id; zh->client_id = client_id; to = zbud_data(zh, size); memcpy(to, cdata, size); spin_unlock(&zbpg->lock); spin_unlock(&zbud_budlists_spinlock); zbud_cumul_chunk_counts[nchunks]++; atomic_inc(&zcache_zbud_curr_zpages); zcache_zbud_cumul_zpages++; zcache_zbud_curr_zbytes += size; zcache_zbud_cumul_zbytes += size; out: return zh; } static int zbud_decompress(struct page *page, struct zbud_hdr *zh) { struct zbud_page *zbpg; unsigned budnum = zbud_budnum(zh); size_t out_len = PAGE_SIZE; char *to_va, *from_va; unsigned size; int ret = 0; zbpg = container_of(zh, struct zbud_page, buddy[budnum]); spin_lock(&zbpg->lock); if (list_empty(&zbpg->bud_list)) { /* ignore zombie page... see zbud_evict_pages() */ ret = -EINVAL; goto out; } ASSERT_SENTINEL(zh, ZBH); BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size()); to_va = kmap_atomic(page); size = zh->size; from_va = zbud_data(zh, size); ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len); BUG_ON(ret != LZO_E_OK); BUG_ON(out_len != PAGE_SIZE); kunmap_atomic(to_va); out: spin_unlock(&zbpg->lock); return ret; } /* * The following routines handle shrinking of ephemeral pages by evicting * pages "least valuable" first. */ static unsigned long zcache_evicted_raw_pages; static unsigned long zcache_evicted_buddied_pages; static unsigned long zcache_evicted_unbuddied_pages; static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid); static void zcache_put_pool(struct tmem_pool *pool); /* * Flush and free all zbuds in a zbpg, then free the pageframe */ static void zbud_evict_zbpg(struct zbud_page *zbpg) { struct zbud_hdr *zh; int i, j; uint32_t pool_id[ZBUD_MAX_BUDS], client_id[ZBUD_MAX_BUDS]; uint32_t index[ZBUD_MAX_BUDS]; struct tmem_oid oid[ZBUD_MAX_BUDS]; struct tmem_pool *pool; unsigned long flags; ASSERT_SPINLOCK(&zbpg->lock); for (i = 0, j = 0; i < ZBUD_MAX_BUDS; i++) { zh = &zbpg->buddy[i]; if (zh->size) { client_id[j] = zh->client_id; pool_id[j] = zh->pool_id; oid[j] = zh->oid; index[j] = zh->index; j++; } } spin_unlock(&zbpg->lock); for (i = 0; i < j; i++) { pool = zcache_get_pool_by_id(client_id[i], pool_id[i]); BUG_ON(pool == NULL); local_irq_save(flags); /* these flushes should dispose of any local storage */ tmem_flush_page(pool, &oid[i], index[i]); local_irq_restore(flags); zcache_put_pool(pool); } } /* * Free nr pages. This code is funky because we want to hold the locks * protecting various lists for as short a time as possible, and in some * circumstances the list may change asynchronously when the list lock is * not held. In some cases we also trylock not only to avoid waiting on a * page in use by another cpu, but also to avoid potential deadlock due to * lock inversion. */ static void zbud_evict_pages(int nr) { struct zbud_page *zbpg; int i, newly_unused_pages = 0; /* now try freeing unbuddied pages, starting with least space avail */ for (i = 0; i < MAX_CHUNK; i++) { retry_unbud_list_i: spin_lock_bh(&zbud_budlists_spinlock); if (list_empty(&zbud_unbuddied[i].list)) { spin_unlock_bh(&zbud_budlists_spinlock); continue; } list_for_each_entry(zbpg, &zbud_unbuddied[i].list, bud_list) { if (unlikely(!spin_trylock(&zbpg->lock))) continue; zbud_unbuddied[i].count--; spin_unlock(&zbud_budlists_spinlock); zcache_evicted_unbuddied_pages++; /* want budlists unlocked when doing zbpg eviction */ zbud_evict_zbpg(zbpg); newly_unused_pages++; local_bh_enable(); if (--nr <= 0) goto evict_unused; goto retry_unbud_list_i; } spin_unlock_bh(&zbud_budlists_spinlock); } /* as a last resort, free buddied pages */ retry_bud_list: spin_lock_bh(&zbud_budlists_spinlock); if (list_empty(&zbud_buddied_list)) { spin_unlock_bh(&zbud_budlists_spinlock); goto evict_unused; } list_for_each_entry(zbpg, &zbud_buddied_list, bud_list) { if (unlikely(!spin_trylock(&zbpg->lock))) continue; zcache_zbud_buddied_count--; spin_unlock(&zbud_budlists_spinlock); zcache_evicted_buddied_pages++; /* want budlists unlocked when doing zbpg eviction */ zbud_evict_zbpg(zbpg); newly_unused_pages++; local_bh_enable(); if (--nr <= 0) goto evict_unused; goto retry_bud_list; } spin_unlock_bh(&zbud_budlists_spinlock); evict_unused: return; } static DEFINE_PER_CPU(unsigned char *, zcache_remoteputmem); static int zbud_remotify_zbud(struct tmem_xhandle *xh, char *data, size_t size) { struct tmem_pool *pool; int i, remotenode, ret = -1; unsigned char cksum, *p; unsigned long flags; for (p = data, cksum = 0, i = 0; i < size; i++) cksum += *p; ret = ramster_remote_put(xh, data, size, true, &remotenode); if (ret == 0) { /* data was successfully remoted so change the local version * to point to the remote node where it landed */ pool = zcache_get_pool_by_id(LOCAL_CLIENT, xh->pool_id); BUG_ON(pool == NULL); local_irq_save(flags); /* tmem_replace will also free up any local space */ (void)tmem_replace(pool, &xh->oid, xh->index, pampd_make_remote(remotenode, size, cksum)); local_irq_restore(flags); zcache_put_pool(pool); ramster_eph_pages_remoted++; ret = 0; } else ramster_eph_pages_remote_failed++; return ret; } static int zbud_remotify_zbpg(struct zbud_page *zbpg) { struct zbud_hdr *zh1, *zh2 = NULL; struct tmem_xhandle xh1, xh2 = { 0 }; char *data1 = NULL, *data2 = NULL; size_t size1 = 0, size2 = 0; int ret = 0; unsigned char *tmpmem = __get_cpu_var(zcache_remoteputmem); ASSERT_SPINLOCK(&zbpg->lock); if (zbpg->buddy[0].size == 0) zh1 = &zbpg->buddy[1]; else if (zbpg->buddy[1].size == 0) zh1 = &zbpg->buddy[0]; else { zh1 = &zbpg->buddy[0]; zh2 = &zbpg->buddy[1]; } /* don't remotify pages that are already remotified */ if (zh1->client_id != LOCAL_CLIENT) zh1 = NULL; if ((zh2 != NULL) && (zh2->client_id != LOCAL_CLIENT)) zh2 = NULL; /* copy the data and metadata so can release lock */ if (zh1 != NULL) { xh1.client_id = zh1->client_id; xh1.pool_id = zh1->pool_id; xh1.oid = zh1->oid; xh1.index = zh1->index; size1 = zh1->size; data1 = zbud_data(zh1, size1); memcpy(tmpmem, zbud_data(zh1, size1), size1); data1 = tmpmem; tmpmem += size1; } if (zh2 != NULL) { xh2.client_id = zh2->client_id; xh2.pool_id = zh2->pool_id; xh2.oid = zh2->oid; xh2.index = zh2->index; size2 = zh2->size; memcpy(tmpmem, zbud_data(zh2, size2), size2); data2 = tmpmem; } spin_unlock(&zbpg->lock); preempt_enable(); /* OK, no locks held anymore, remotify one or both zbuds */ if (zh1 != NULL) ret = zbud_remotify_zbud(&xh1, data1, size1); if (zh2 != NULL) ret |= zbud_remotify_zbud(&xh2, data2, size2); return ret; } void zbud_remotify_pages(int nr) { struct zbud_page *zbpg; int i, ret; /* * for now just try remotifying unbuddied pages, starting with * least space avail */ for (i = 0; i < MAX_CHUNK; i++) { retry_unbud_list_i: preempt_disable(); /* enable in zbud_remotify_zbpg */ spin_lock_bh(&zbud_budlists_spinlock); if (list_empty(&zbud_unbuddied[i].list)) { spin_unlock_bh(&zbud_budlists_spinlock); preempt_enable(); continue; /* next i in for loop */ } list_for_each_entry(zbpg, &zbud_unbuddied[i].list, bud_list) { if (unlikely(!spin_trylock(&zbpg->lock))) continue; /* next list_for_each_entry */ zbud_unbuddied[i].count--; /* want budlists unlocked when doing zbpg remotify */ spin_unlock_bh(&zbud_budlists_spinlock); ret = zbud_remotify_zbpg(zbpg); /* preemption is re-enabled in zbud_remotify_zbpg */ if (ret == 0) { if (--nr <= 0) goto out; goto retry_unbud_list_i; } /* if fail to remotify any page, quit */ pr_err("TESTING zbud_remotify_pages failed on page," " trying to re-add\n"); spin_lock_bh(&zbud_budlists_spinlock); spin_lock(&zbpg->lock); list_add_tail(&zbpg->bud_list, &zbud_unbuddied[i].list); zbud_unbuddied[i].count++; spin_unlock(&zbpg->lock); spin_unlock_bh(&zbud_budlists_spinlock); pr_err("TESTING zbud_remotify_pages failed on page," " finished re-add\n"); goto out; } spin_unlock_bh(&zbud_budlists_spinlock); preempt_enable(); } next_buddied_zbpg: preempt_disable(); /* enable in zbud_remotify_zbpg */ spin_lock_bh(&zbud_budlists_spinlock); if (list_empty(&zbud_buddied_list)) goto unlock_out; list_for_each_entry(zbpg, &zbud_buddied_list, bud_list) { if (unlikely(!spin_trylock(&zbpg->lock))) continue; /* next list_for_each_entry */ zcache_zbud_buddied_count--; /* want budlists unlocked when doing zbpg remotify */ spin_unlock_bh(&zbud_budlists_spinlock); ret = zbud_remotify_zbpg(zbpg); /* preemption is re-enabled in zbud_remotify_zbpg */ if (ret == 0) { if (--nr <= 0) goto out; goto next_buddied_zbpg; } /* if fail to remotify any page, quit */ pr_err("TESTING zbud_remotify_pages failed on BUDDIED page," " trying to re-add\n"); spin_lock_bh(&zbud_budlists_spinlock); spin_lock(&zbpg->lock); list_add_tail(&zbpg->bud_list, &zbud_buddied_list); zcache_zbud_buddied_count++; spin_unlock(&zbpg->lock); spin_unlock_bh(&zbud_budlists_spinlock); pr_err("TESTING zbud_remotify_pages failed on BUDDIED page," " finished re-add\n"); goto out; } unlock_out: spin_unlock_bh(&zbud_budlists_spinlock); preempt_enable(); out: return; } /* the "flush list" asynchronously collects pages to remotely flush */ #define FLUSH_ENTIRE_OBJECT ((uint32_t)-1) static void ramster_flnode_free(struct flushlist_node *, struct tmem_pool *); static void zcache_remote_flush_page(struct flushlist_node *flnode) { struct tmem_xhandle *xh; int remotenode, ret; preempt_disable(); xh = &flnode->xh; remotenode = flnode->xh.client_id; ret = ramster_remote_flush(xh, remotenode); if (ret >= 0) ramster_remote_pages_flushed++; else ramster_remote_page_flushes_failed++; preempt_enable_no_resched(); ramster_flnode_free(flnode, NULL); } static void zcache_remote_flush_object(struct flushlist_node *flnode) { struct tmem_xhandle *xh; int remotenode, ret; preempt_disable(); xh = &flnode->xh; remotenode = flnode->xh.client_id; ret = ramster_remote_flush_object(xh, remotenode); if (ret >= 0) ramster_remote_objects_flushed++; else ramster_remote_object_flushes_failed++; preempt_enable_no_resched(); ramster_flnode_free(flnode, NULL); } static void zcache_remote_eph_put(struct zbud_hdr *zbud) { /* FIXME */ } static void zcache_remote_pers_put(struct zv_hdr *zv) { struct tmem_xhandle xh; uint16_t size; bool ephemeral; int remotenode, ret = -1; char *data; struct tmem_pool *pool; unsigned long flags; unsigned char cksum; char *p; int i; unsigned char *tmpmem = __get_cpu_var(zcache_remoteputmem); ASSERT_SENTINEL(zv, ZVH); BUG_ON(zv->client_id != LOCAL_CLIENT); local_bh_disable(); xh.client_id = zv->client_id; xh.pool_id = zv->pool_id; xh.oid = zv->oid; xh.index = zv->index; size = xv_get_object_size(zv) - sizeof(*zv); BUG_ON(size == 0 || size > zv_max_page_size); data = (char *)zv + sizeof(*zv); for (p = data, cksum = 0, i = 0; i < size; i++) cksum += *p; memcpy(tmpmem, data, size); data = tmpmem; pool = zcache_get_pool_by_id(zv->client_id, zv->pool_id); ephemeral = is_ephemeral(pool); zcache_put_pool(pool); /* now OK to release lock set in caller */ spin_unlock(&zcache_rem_op_list_lock); local_bh_enable(); preempt_disable(); ret = ramster_remote_put(&xh, data, size, ephemeral, &remotenode); preempt_enable_no_resched(); if (ret != 0) { /* * This is some form of a memory leak... if the remote put * fails, there will never be another attempt to remotify * this page. But since we've dropped the zv pointer, * the page may have been freed or the data replaced * so we can't just "put it back" in the remote op list. * Even if we could, not sure where to put it in the list * because there may be flushes that must be strictly * ordered vs the put. So leave this as a FIXME for now. * But count them so we know if it becomes a problem. */ ramster_pers_pages_remote_failed++; goto out; } else atomic_inc(&ramster_remote_pers_pages); ramster_pers_pages_remoted++; /* * data was successfully remoted so change the local version to * point to the remote node where it landed */ local_bh_disable(); pool = zcache_get_pool_by_id(LOCAL_CLIENT, xh.pool_id); local_irq_save(flags); (void)tmem_replace(pool, &xh.oid, xh.index, pampd_make_remote(remotenode, size, cksum)); local_irq_restore(flags); zcache_put_pool(pool); local_bh_enable(); out: return; } static void zcache_do_remotify_ops(int nr) { struct ramster_remotify_hdr *rem_op; union remotify_list_node *u; while (1) { if (!nr) goto out; spin_lock(&zcache_rem_op_list_lock); if (list_empty(&zcache_rem_op_list)) { spin_unlock(&zcache_rem_op_list_lock); goto out; } rem_op = list_first_entry(&zcache_rem_op_list, struct ramster_remotify_hdr, list); list_del_init(&rem_op->list); if (rem_op->op != RAMSTER_REMOTIFY_PERS_PUT) spin_unlock(&zcache_rem_op_list_lock); u = (union remotify_list_node *)rem_op; switch (rem_op->op) { case RAMSTER_REMOTIFY_EPH_PUT: BUG(); zcache_remote_eph_put((struct zbud_hdr *)rem_op); break; case RAMSTER_REMOTIFY_PERS_PUT: zcache_remote_pers_put((struct zv_hdr *)rem_op); break; case RAMSTER_REMOTIFY_FLUSH_PAGE: zcache_remote_flush_page((struct flushlist_node *)u); break; case RAMSTER_REMOTIFY_FLUSH_OBJ: zcache_remote_flush_object((struct flushlist_node *)u); break; default: BUG(); } } out: return; } /* * Communicate interface revision with userspace */ #include "cluster/ramster_nodemanager.h" static unsigned long ramster_interface_revision = R2NM_API_VERSION; /* * For now, just push over a few pages every few seconds to * ensure that it basically works */ static struct workqueue_struct *ramster_remotify_workqueue; static void ramster_remotify_process(struct work_struct *work); static DECLARE_DELAYED_WORK(ramster_remotify_worker, ramster_remotify_process); static void ramster_remotify_queue_delayed_work(unsigned long delay) { if (!queue_delayed_work(ramster_remotify_workqueue, &ramster_remotify_worker, delay)) pr_err("ramster_remotify: bad workqueue\n"); } static int use_frontswap; static int use_cleancache; static int ramster_remote_target_nodenum = -1; static void ramster_remotify_process(struct work_struct *work) { static bool remotify_in_progress; BUG_ON(irqs_disabled()); if (remotify_in_progress) ramster_remotify_queue_delayed_work(HZ); else if (ramster_remote_target_nodenum != -1) { remotify_in_progress = true; #ifdef CONFIG_CLEANCACHE if (use_cleancache && ramster_eph_remotify_enable) zbud_remotify_pages(5000); /* FIXME is this a good number? */ #endif #ifdef CONFIG_FRONTSWAP if (use_frontswap && ramster_pers_remotify_enable) zcache_do_remotify_ops(500); /* FIXME is this a good number? */ #endif remotify_in_progress = false; ramster_remotify_queue_delayed_work(HZ); } } static void ramster_remotify_init(void) { unsigned long n = 60UL; ramster_remotify_workqueue = create_singlethread_workqueue("ramster_remotify"); ramster_remotify_queue_delayed_work(n * HZ); } static void zbud_init(void) { int i; INIT_LIST_HEAD(&zbud_buddied_list); zcache_zbud_buddied_count = 0; for (i = 0; i < NCHUNKS; i++) { INIT_LIST_HEAD(&zbud_unbuddied[i].list); zbud_unbuddied[i].count = 0; } } #ifdef CONFIG_SYSFS /* * These sysfs routines show a nice distribution of how many zbpg's are * currently (and have ever been placed) in each unbuddied list. It's fun * to watch but can probably go away before final merge. */ static int zbud_show_unbuddied_list_counts(char *buf) { int i; char *p = buf; for (i = 0; i < NCHUNKS; i++) p += sprintf(p, "%u ", zbud_unbuddied[i].count); return p - buf; } static int zbud_show_cumul_chunk_counts(char *buf) { unsigned long i, chunks = 0, total_chunks = 0, sum_total_chunks = 0; unsigned long total_chunks_lte_21 = 0, total_chunks_lte_32 = 0; unsigned long total_chunks_lte_42 = 0; char *p = buf; for (i = 0; i < NCHUNKS; i++) { p += sprintf(p, "%lu ", zbud_cumul_chunk_counts[i]); chunks += zbud_cumul_chunk_counts[i]; total_chunks += zbud_cumul_chunk_counts[i]; sum_total_chunks += i * zbud_cumul_chunk_counts[i]; if (i == 21) total_chunks_lte_21 = total_chunks; if (i == 32) total_chunks_lte_32 = total_chunks; if (i == 42) total_chunks_lte_42 = total_chunks; } p += sprintf(p, "<=21:%lu <=32:%lu <=42:%lu, mean:%lu\n", total_chunks_lte_21, total_chunks_lte_32, total_chunks_lte_42, chunks == 0 ? 0 : sum_total_chunks / chunks); return p - buf; } #endif /********** * This "zv" PAM implementation combines the TLSF-based xvMalloc * with lzo1x compression to maximize the amount of data that can * be packed into a physical page. * * Zv represents a PAM page with the index and object (plus a "size" value * necessary for decompression) immediately preceding the compressed data. */ /* rudimentary policy limits */ /* total number of persistent pages may not exceed this percentage */ static unsigned int zv_page_count_policy_percent = 75; /* * byte count defining poor compression; pages with greater zsize will be * rejected */ static unsigned int zv_max_zsize = (PAGE_SIZE / 8) * 7; /* * byte count defining poor *mean* compression; pages with greater zsize * will be rejected until sufficient better-compressed pages are accepted * driving the mean below this threshold */ static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5; static atomic_t zv_curr_dist_counts[NCHUNKS]; static atomic_t zv_cumul_dist_counts[NCHUNKS]; static struct zv_hdr *zv_create(struct zcache_client *cli, uint32_t pool_id, struct tmem_oid *oid, uint32_t index, void *cdata, unsigned clen) { struct page *page; struct zv_hdr *zv = NULL; uint32_t offset; int alloc_size = clen + sizeof(struct zv_hdr); int chunks = (alloc_size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT; int ret; BUG_ON(!irqs_disabled()); BUG_ON(chunks >= NCHUNKS); ret = xv_malloc(cli->xvpool, clen + sizeof(struct zv_hdr), &page, &offset, ZCACHE_GFP_MASK); if (unlikely(ret)) goto out; atomic_inc(&zv_curr_dist_counts[chunks]); atomic_inc(&zv_cumul_dist_counts[chunks]); zv = kmap_atomic(page) + offset; zv->index = index; zv->oid = *oid; zv->pool_id = pool_id; SET_SENTINEL(zv, ZVH); INIT_LIST_HEAD(&zv->rem_op.list); zv->client_id = get_client_id_from_client(cli); zv->rem_op.op = RAMSTER_REMOTIFY_PERS_PUT; if (zv->client_id == LOCAL_CLIENT) { spin_lock(&zcache_rem_op_list_lock); list_add_tail(&zv->rem_op.list, &zcache_rem_op_list); spin_unlock(&zcache_rem_op_list_lock); } memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen); kunmap_atomic(zv); out: return zv; } /* similar to zv_create, but just reserve space, no data yet */ static struct zv_hdr *zv_alloc(struct tmem_pool *pool, struct tmem_oid *oid, uint32_t index, unsigned clen) { struct zcache_client *cli = pool->client; struct page *page; struct zv_hdr *zv = NULL; uint32_t offset; int ret; BUG_ON(!irqs_disabled()); BUG_ON(!is_local_client(pool->client)); ret = xv_malloc(cli->xvpool, clen + sizeof(struct zv_hdr), &page, &offset, ZCACHE_GFP_MASK); if (unlikely(ret)) goto out; zv = kmap_atomic(page) + offset; SET_SENTINEL(zv, ZVH); INIT_LIST_HEAD(&zv->rem_op.list); zv->client_id = LOCAL_CLIENT; zv->rem_op.op = RAMSTER_INTRANSIT_PERS; zv->index = index; zv->oid = *oid; zv->pool_id = pool->pool_id; kunmap_atomic(zv); out: return zv; } static void zv_free(struct xv_pool *xvpool, struct zv_hdr *zv) { unsigned long flags; struct page *page; uint32_t offset; uint16_t size = xv_get_object_size(zv); int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT; ASSERT_SENTINEL(zv, ZVH); BUG_ON(chunks >= NCHUNKS); atomic_dec(&zv_curr_dist_counts[chunks]); size -= sizeof(*zv); spin_lock(&zcache_rem_op_list_lock); size = xv_get_object_size(zv) - sizeof(*zv); BUG_ON(size == 0); INVERT_SENTINEL(zv, ZVH); if (!list_empty(&zv->rem_op.list)) list_del_init(&zv->rem_op.list); spin_unlock(&zcache_rem_op_list_lock); page = virt_to_page(zv); offset = (unsigned long)zv & ~PAGE_MASK; local_irq_save(flags); xv_free(xvpool, page, offset); local_irq_restore(flags); } static void zv_decompress(struct page *page, struct zv_hdr *zv) { size_t clen = PAGE_SIZE; char *to_va; unsigned size; int ret; ASSERT_SENTINEL(zv, ZVH); size = xv_get_object_size(zv) - sizeof(*zv); BUG_ON(size == 0); to_va = kmap_atomic(page); ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv), size, to_va, &clen); kunmap_atomic(to_va); BUG_ON(ret != LZO_E_OK); BUG_ON(clen != PAGE_SIZE); } static void zv_copy_from_pampd(char *data, size_t *bufsize, struct zv_hdr *zv) { unsigned size; ASSERT_SENTINEL(zv, ZVH); size = xv_get_object_size(zv) - sizeof(*zv); BUG_ON(size == 0 || size > zv_max_page_size); BUG_ON(size > *bufsize); memcpy(data, (char *)zv + sizeof(*zv), size); *bufsize = size; } static void zv_copy_to_pampd(struct zv_hdr *zv, char *data, size_t size) { unsigned zv_size; ASSERT_SENTINEL(zv, ZVH); zv_size = xv_get_object_size(zv) - sizeof(*zv); BUG_ON(zv_size != size); BUG_ON(zv_size == 0 || zv_size > zv_max_page_size); memcpy((char *)zv + sizeof(*zv), data, size); } #ifdef CONFIG_SYSFS /* * show a distribution of compression stats for zv pages. */ static int zv_curr_dist_counts_show(char *buf) { unsigned long i, n, chunks = 0, sum_total_chunks = 0; char *p = buf; for (i = 0; i < NCHUNKS; i++) { n = atomic_read(&zv_curr_dist_counts[i]); p += sprintf(p, "%lu ", n); chunks += n; sum_total_chunks += i * n; } p += sprintf(p, "mean:%lu\n", chunks == 0 ? 0 : sum_total_chunks / chunks); return p - buf; } static int zv_cumul_dist_counts_show(char *buf) { unsigned long i, n, chunks = 0, sum_total_chunks = 0; char *p = buf; for (i = 0; i < NCHUNKS; i++) { n = atomic_read(&zv_cumul_dist_counts[i]); p += sprintf(p, "%lu ", n); chunks += n; sum_total_chunks += i * n; } p += sprintf(p, "mean:%lu\n", chunks == 0 ? 0 : sum_total_chunks / chunks); return p - buf; } /* * setting zv_max_zsize via sysfs causes all persistent (e.g. swap) * pages that don't compress to less than this value (including metadata * overhead) to be rejected. We don't allow the value to get too close * to PAGE_SIZE. */ static ssize_t zv_max_zsize_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", zv_max_zsize); } static ssize_t zv_max_zsize_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned long val; int err; if (!capable(CAP_SYS_ADMIN)) return -EPERM; err = kstrtoul(buf, 10, &val); if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7)) return -EINVAL; zv_max_zsize = val; return count; } /* * setting zv_max_mean_zsize via sysfs causes all persistent (e.g. swap) * pages that don't compress to less than this value (including metadata * overhead) to be rejected UNLESS the mean compression is also smaller * than this value. In other words, we are load-balancing-by-zsize the * accepted pages. Again, we don't allow the value to get too close * to PAGE_SIZE. */ static ssize_t zv_max_mean_zsize_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", zv_max_mean_zsize); } static ssize_t zv_max_mean_zsize_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned long val; int err; if (!capable(CAP_SYS_ADMIN)) return -EPERM; err = kstrtoul(buf, 10, &val); if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7)) return -EINVAL; zv_max_mean_zsize = val; return count; } /* * setting zv_page_count_policy_percent via sysfs sets an upper bound of * persistent (e.g. swap) pages that will be retained according to: * (zv_page_count_policy_percent * totalram_pages) / 100) * when that limit is reached, further puts will be rejected (until * some pages have been flushed). Note that, due to compression, * this number may exceed 100; it defaults to 75 and we set an * arbitary limit of 150. A poor choice will almost certainly result * in OOM's, so this value should only be changed prudently. */ static ssize_t zv_page_count_policy_percent_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", zv_page_count_policy_percent); } static ssize_t zv_page_count_policy_percent_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned long val; int err; if (!capable(CAP_SYS_ADMIN)) return -EPERM; err = kstrtoul(buf, 10, &val); if (err || (val == 0) || (val > 150)) return -EINVAL; zv_page_count_policy_percent = val; return count; } static struct kobj_attribute zcache_zv_max_zsize_attr = { .attr = { .name = "zv_max_zsize", .mode = 0644 }, .show = zv_max_zsize_show, .store = zv_max_zsize_store, }; static struct kobj_attribute zcache_zv_max_mean_zsize_attr = { .attr = { .name = "zv_max_mean_zsize", .mode = 0644 }, .show = zv_max_mean_zsize_show, .store = zv_max_mean_zsize_store, }; static struct kobj_attribute zcache_zv_page_count_policy_percent_attr = { .attr = { .name = "zv_page_count_policy_percent", .mode = 0644 }, .show = zv_page_count_policy_percent_show, .store = zv_page_count_policy_percent_store, }; #endif /* * zcache core code starts here */ /* useful stats not collected by cleancache or frontswap */ static unsigned long zcache_flush_total; static unsigned long zcache_flush_found; static unsigned long zcache_flobj_total; static unsigned long zcache_flobj_found; static unsigned long zcache_failed_eph_puts; static unsigned long zcache_nonactive_puts; static unsigned long zcache_failed_pers_puts; /* * Tmem operations assume the poolid implies the invoking client. * Zcache only has one client (the kernel itself): LOCAL_CLIENT. * RAMster has each client numbered by cluster node, and a KVM version * of zcache would have one client per guest and each client might * have a poolid==N. */ static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid) { struct tmem_pool *pool = NULL; struct zcache_client *cli = NULL; if (cli_id == LOCAL_CLIENT) cli = &zcache_host; else { if (cli_id >= MAX_CLIENTS) goto out; cli = &zcache_clients[cli_id]; if (cli == NULL) goto out; atomic_inc(&cli->refcount); } if (poolid < MAX_POOLS_PER_CLIENT) { pool = cli->tmem_pools[poolid]; if (pool != NULL) atomic_inc(&pool->refcount); } out: return pool; } static void zcache_put_pool(struct tmem_pool *pool) { struct zcache_client *cli = NULL; if (pool == NULL) BUG(); cli = pool->client; atomic_dec(&pool->refcount); atomic_dec(&cli->refcount); } int zcache_new_client(uint16_t cli_id) { struct zcache_client *cli = NULL; int ret = -1; if (cli_id == LOCAL_CLIENT) cli = &zcache_host; else if ((unsigned int)cli_id < MAX_CLIENTS) cli = &zcache_clients[cli_id]; if (cli == NULL) goto out; if (cli->allocated) goto out; cli->allocated = 1; #ifdef CONFIG_FRONTSWAP cli->xvpool = xv_create_pool(); if (cli->xvpool == NULL) goto out; #endif ret = 0; out: return ret; } /* counters for debugging */ static unsigned long zcache_failed_get_free_pages; static unsigned long zcache_failed_alloc; static unsigned long zcache_put_to_flush; /* * for now, used named slabs so can easily track usage; later can * either just use kmalloc, or perhaps add a slab-like allocator * to more carefully manage total memory utilization */ static struct kmem_cache *zcache_objnode_cache; static struct kmem_cache *zcache_obj_cache; static struct kmem_cache *ramster_flnode_cache; static atomic_t zcache_curr_obj_count = ATOMIC_INIT(0); static unsigned long zcache_curr_obj_count_max; static atomic_t zcache_curr_objnode_count = ATOMIC_INIT(0); static unsigned long zcache_curr_objnode_count_max; /* * to avoid memory allocation recursion (e.g. due to direct reclaim), we * preload all necessary data structures so the hostops callbacks never * actually do a malloc */ struct zcache_preload { void *page; struct tmem_obj *obj; int nr; struct tmem_objnode *objnodes[OBJNODE_TREE_MAX_PATH]; struct flushlist_node *flnode; }; static DEFINE_PER_CPU(struct zcache_preload, zcache_preloads) = { 0, }; static int zcache_do_preload(struct tmem_pool *pool) { struct zcache_preload *kp; struct tmem_objnode *objnode; struct tmem_obj *obj; struct flushlist_node *flnode; void *page; int ret = -ENOMEM; if (unlikely(zcache_objnode_cache == NULL)) goto out; if (unlikely(zcache_obj_cache == NULL)) goto out; preempt_disable(); kp = &__get_cpu_var(zcache_preloads); while (kp->nr < ARRAY_SIZE(kp->objnodes)) { preempt_enable_no_resched(); objnode = kmem_cache_alloc(zcache_objnode_cache, ZCACHE_GFP_MASK); if (unlikely(objnode == NULL)) { zcache_failed_alloc++; goto out; } preempt_disable(); kp = &__get_cpu_var(zcache_preloads); if (kp->nr < ARRAY_SIZE(kp->objnodes)) kp->objnodes[kp->nr++] = objnode; else kmem_cache_free(zcache_objnode_cache, objnode); } preempt_enable_no_resched(); obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK); if (unlikely(obj == NULL)) { zcache_failed_alloc++; goto out; } flnode = kmem_cache_alloc(ramster_flnode_cache, ZCACHE_GFP_MASK); if (unlikely(flnode == NULL)) { zcache_failed_alloc++; goto out; } if (is_ephemeral(pool)) { page = (void *)__get_free_page(ZCACHE_GFP_MASK); if (unlikely(page == NULL)) { zcache_failed_get_free_pages++; kmem_cache_free(zcache_obj_cache, obj); kmem_cache_free(ramster_flnode_cache, flnode); goto out; } } preempt_disable(); kp = &__get_cpu_var(zcache_preloads); if (kp->obj == NULL) kp->obj = obj; else kmem_cache_free(zcache_obj_cache, obj); if (kp->flnode == NULL) kp->flnode = flnode; else kmem_cache_free(ramster_flnode_cache, flnode); if (is_ephemeral(pool)) { if (kp->page == NULL) kp->page = page; else free_page((unsigned long)page); } ret = 0; out: return ret; } static int ramster_do_preload_flnode_only(struct tmem_pool *pool) { struct zcache_preload *kp; struct flushlist_node *flnode; int ret = -ENOMEM; BUG_ON(!irqs_disabled()); if (unlikely(ramster_flnode_cache == NULL)) BUG(); kp = &__get_cpu_var(zcache_preloads); flnode = kmem_cache_alloc(ramster_flnode_cache, GFP_ATOMIC); if (unlikely(flnode == NULL) && kp->flnode == NULL) BUG(); /* FIXME handle more gracefully, but how??? */ else if (kp->flnode == NULL) kp->flnode = flnode; else kmem_cache_free(ramster_flnode_cache, flnode); return ret; } static void *zcache_get_free_page(void) { struct zcache_preload *kp; void *page; kp = &__get_cpu_var(zcache_preloads); page = kp->page; BUG_ON(page == NULL); kp->page = NULL; return page; } static void zcache_free_page(void *p) { free_page((unsigned long)p); } /* * zcache implementation for tmem host ops */ static struct tmem_objnode *zcache_objnode_alloc(struct tmem_pool *pool) { struct tmem_objnode *objnode = NULL; unsigned long count; struct zcache_preload *kp; kp = &__get_cpu_var(zcache_preloads); if (kp->nr <= 0) goto out; objnode = kp->objnodes[kp->nr - 1]; BUG_ON(objnode == NULL); kp->objnodes[kp->nr - 1] = NULL; kp->nr--; count = atomic_inc_return(&zcache_curr_objnode_count); if (count > zcache_curr_objnode_count_max) zcache_curr_objnode_count_max = count; out: return objnode; } static void zcache_objnode_free(struct tmem_objnode *objnode, struct tmem_pool *pool) { atomic_dec(&zcache_curr_objnode_count); BUG_ON(atomic_read(&zcache_curr_objnode_count) < 0); kmem_cache_free(zcache_objnode_cache, objnode); } static struct tmem_obj *zcache_obj_alloc(struct tmem_pool *pool) { struct tmem_obj *obj = NULL; unsigned long count; struct zcache_preload *kp; kp = &__get_cpu_var(zcache_preloads); obj = kp->obj; BUG_ON(obj == NULL); kp->obj = NULL; count = atomic_inc_return(&zcache_curr_obj_count); if (count > zcache_curr_obj_count_max) zcache_curr_obj_count_max = count; return obj; } static void zcache_obj_free(struct tmem_obj *obj, struct tmem_pool *pool) { atomic_dec(&zcache_curr_obj_count); BUG_ON(atomic_read(&zcache_curr_obj_count) < 0); kmem_cache_free(zcache_obj_cache, obj); } static struct flushlist_node *ramster_flnode_alloc(struct tmem_pool *pool) { struct flushlist_node *flnode = NULL; struct zcache_preload *kp; int count; kp = &__get_cpu_var(zcache_preloads); flnode = kp->flnode; BUG_ON(flnode == NULL); kp->flnode = NULL; count = atomic_inc_return(&ramster_curr_flnode_count); if (count > ramster_curr_flnode_count_max) ramster_curr_flnode_count_max = count; return flnode; } static void ramster_flnode_free(struct flushlist_node *flnode, struct tmem_pool *pool) { atomic_dec(&ramster_curr_flnode_count); BUG_ON(atomic_read(&ramster_curr_flnode_count) < 0); kmem_cache_free(ramster_flnode_cache, flnode); } static struct tmem_hostops zcache_hostops = { .obj_alloc = zcache_obj_alloc, .obj_free = zcache_obj_free, .objnode_alloc = zcache_objnode_alloc, .objnode_free = zcache_objnode_free, }; /* * zcache implementations for PAM page descriptor ops */ static inline void dec_and_check(atomic_t *pvar) { atomic_dec(pvar); /* later when all accounting is fixed, make this a BUG */ WARN_ON_ONCE(atomic_read(pvar) < 0); } static atomic_t zcache_curr_eph_pampd_count = ATOMIC_INIT(0); static unsigned long zcache_curr_eph_pampd_count_max; static atomic_t zcache_curr_pers_pampd_count = ATOMIC_INIT(0); static unsigned long zcache_curr_pers_pampd_count_max; /* forward reference */ static int zcache_compress(struct page *from, void **out_va, size_t *out_len); static int zcache_pampd_eph_create(char *data, size_t size, bool raw, struct tmem_pool *pool, struct tmem_oid *oid, uint32_t index, void **pampd) { int ret = -1; void *cdata = data; size_t clen = size; struct zcache_client *cli = pool->client; uint16_t client_id = get_client_id_from_client(cli); struct page *page = NULL; unsigned long count; if (!raw) { page = virt_to_page(data); ret = zcache_compress(page, &cdata, &clen); if (ret == 0) goto out; if (clen == 0 || clen > zbud_max_buddy_size()) { zcache_compress_poor++; goto out; } } *pampd = (void *)zbud_create(client_id, pool->pool_id, oid, index, page, cdata, clen); if (*pampd == NULL) { ret = -ENOMEM; goto out; } ret = 0; count = atomic_inc_return(&zcache_curr_eph_pampd_count); if (count > zcache_curr_eph_pampd_count_max) zcache_curr_eph_pampd_count_max = count; if (client_id != LOCAL_CLIENT) { count = atomic_inc_return(&ramster_foreign_eph_pampd_count); if (count > ramster_foreign_eph_pampd_count_max) ramster_foreign_eph_pampd_count_max = count; } out: return ret; } static int zcache_pampd_pers_create(char *data, size_t size, bool raw, struct tmem_pool *pool, struct tmem_oid *oid, uint32_t index, void **pampd) { int ret = -1; void *cdata = data; size_t clen = size; struct zcache_client *cli = pool->client; struct page *page; unsigned long count; unsigned long zv_mean_zsize; struct zv_hdr *zv; long curr_pers_pampd_count; u64 total_zsize; #ifdef RAMSTER_TESTING static bool pampd_neg_warned; #endif curr_pers_pampd_count = atomic_read(&zcache_curr_pers_pampd_count) - atomic_read(&ramster_remote_pers_pages); #ifdef RAMSTER_TESTING /* should always be positive, but warn if accounting is off */ if (!pampd_neg_warned) { pr_warn("ramster: bad accounting for curr_pers_pampd_count\n"); pampd_neg_warned = true; } #endif if (curr_pers_pampd_count > (zv_page_count_policy_percent * totalram_pages) / 100) { zcache_policy_percent_exceeded++; goto out; } if (raw) goto ok_to_create; page = virt_to_page(data); if (zcache_compress(page, &cdata, &clen) == 0) goto out; /* reject if compression is too poor */ if (clen > zv_max_zsize) { zcache_compress_poor++; goto out; } /* reject if mean compression is too poor */ if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) { total_zsize = xv_get_total_size_bytes(cli->xvpool); zv_mean_zsize = div_u64(total_zsize, curr_pers_pampd_count); if (zv_mean_zsize > zv_max_mean_zsize) { zcache_mean_compress_poor++; goto out; } } ok_to_create: *pampd = (void *)zv_create(cli, pool->pool_id, oid, index, cdata, clen); if (*pampd == NULL) { ret = -ENOMEM; goto out; } ret = 0; count = atomic_inc_return(&zcache_curr_pers_pampd_count); if (count > zcache_curr_pers_pampd_count_max) zcache_curr_pers_pampd_count_max = count; if (is_local_client(cli)) goto out; zv = *(struct zv_hdr **)pampd; count = atomic_inc_return(&ramster_foreign_pers_pampd_count); if (count > ramster_foreign_pers_pampd_count_max) ramster_foreign_pers_pampd_count_max = count; out: return ret; } static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph, struct tmem_pool *pool, struct tmem_oid *oid, uint32_t index) { void *pampd = NULL; int ret; bool ephemeral; BUG_ON(preemptible()); ephemeral = (eph == 1) || ((eph == 0) && is_ephemeral(pool)); if (ephemeral) ret = zcache_pampd_eph_create(data, size, raw, pool, oid, index, &pampd); else ret = zcache_pampd_pers_create(data, size, raw, pool, oid, index, &pampd); /* FIXME add some counters here for failed creates? */ return pampd; } /* * fill the pageframe corresponding to the struct page with the data * from the passed pampd */ static int zcache_pampd_get_data(char *data, size_t *bufsize, bool raw, void *pampd, struct tmem_pool *pool, struct tmem_oid *oid, uint32_t index) { int ret = 0; BUG_ON(preemptible()); BUG_ON(is_ephemeral(pool)); /* Fix later for shared pools? */ BUG_ON(pampd_is_remote(pampd)); if (raw) zv_copy_from_pampd(data, bufsize, pampd); else zv_decompress(virt_to_page(data), pampd); return ret; } static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw, void *pampd, struct tmem_pool *pool, struct tmem_oid *oid, uint32_t index) { int ret = 0; unsigned long flags; struct zcache_client *cli = pool->client; BUG_ON(preemptible()); BUG_ON(pampd_is_remote(pampd)); if (is_ephemeral(pool)) { local_irq_save(flags); if (raw) zbud_copy_from_pampd(data, bufsize, pampd); else ret = zbud_decompress(virt_to_page(data), pampd); zbud_free_and_delist((struct zbud_hdr *)pampd); local_irq_restore(flags); if (!is_local_client(cli)) dec_and_check(&ramster_foreign_eph_pampd_count); dec_and_check(&zcache_curr_eph_pampd_count); } else { if (is_local_client(cli)) BUG(); if (raw) zv_copy_from_pampd(data, bufsize, pampd); else zv_decompress(virt_to_page(data), pampd); zv_free(cli->xvpool, pampd); if (!is_local_client(cli)) dec_and_check(&ramster_foreign_pers_pampd_count); dec_and_check(&zcache_curr_pers_pampd_count); ret = 0; } return ret; } static bool zcache_pampd_is_remote(void *pampd) { return pampd_is_remote(pampd); } /* * free the pampd and remove it from any zcache lists * pampd must no longer be pointed to from any tmem data structures! */ static void zcache_pampd_free(void *pampd, struct tmem_pool *pool, struct tmem_oid *oid, uint32_t index, bool acct) { struct zcache_client *cli = pool->client; bool eph = is_ephemeral(pool); struct zv_hdr *zv; BUG_ON(preemptible()); if (pampd_is_remote(pampd)) { WARN_ON(acct == false); if (oid == NULL) { /* * a NULL oid means to ignore this pampd free * as the remote freeing will be handled elsewhere */ } else if (eph) { /* FIXME remote flush optional but probably good idea */ /* FIXME get these working properly again */ dec_and_check(&zcache_curr_eph_pampd_count); } else if (pampd_is_intransit(pampd)) { /* did a pers remote get_and_free, so just free local */ pampd = pampd_mask_intransit_and_remote(pampd); goto local_pers; } else { struct flushlist_node *flnode = ramster_flnode_alloc(pool); flnode->xh.client_id = pampd_remote_node(pampd); flnode->xh.pool_id = pool->pool_id; flnode->xh.oid = *oid; flnode->xh.index = index; flnode->rem_op.op = RAMSTER_REMOTIFY_FLUSH_PAGE; spin_lock(&zcache_rem_op_list_lock); list_add(&flnode->rem_op.list, &zcache_rem_op_list); spin_unlock(&zcache_rem_op_list_lock); dec_and_check(&zcache_curr_pers_pampd_count); dec_and_check(&ramster_remote_pers_pages); } } else if (eph) { zbud_free_and_delist((struct zbud_hdr *)pampd); if (!is_local_client(pool->client)) dec_and_check(&ramster_foreign_eph_pampd_count); if (acct) /* FIXME get these working properly again */ dec_and_check(&zcache_curr_eph_pampd_count); } else { local_pers: zv = (struct zv_hdr *)pampd; if (!is_local_client(pool->client)) dec_and_check(&ramster_foreign_pers_pampd_count); zv_free(cli->xvpool, zv); if (acct) /* FIXME get these working properly again */ dec_and_check(&zcache_curr_pers_pampd_count); } } static void zcache_pampd_free_obj(struct tmem_pool *pool, struct tmem_obj *obj) { struct flushlist_node *flnode; BUG_ON(preemptible()); if (obj->extra == NULL) return; BUG_ON(!pampd_is_remote(obj->extra)); flnode = ramster_flnode_alloc(pool); flnode->xh.client_id = pampd_remote_node(obj->extra); flnode->xh.pool_id = pool->pool_id; flnode->xh.oid = obj->oid; flnode->xh.index = FLUSH_ENTIRE_OBJECT; flnode->rem_op.op = RAMSTER_REMOTIFY_FLUSH_OBJ; spin_lock(&zcache_rem_op_list_lock); list_add(&flnode->rem_op.list, &zcache_rem_op_list); spin_unlock(&zcache_rem_op_list_lock); } void zcache_pampd_new_obj(struct tmem_obj *obj) { obj->extra = NULL; } int zcache_pampd_replace_in_obj(void *new_pampd, struct tmem_obj *obj) { int ret = -1; if (new_pampd != NULL) { if (obj->extra == NULL) obj->extra = new_pampd; /* enforce that all remote pages in an object reside * in the same node! */ else if (pampd_remote_node(new_pampd) != pampd_remote_node((void *)(obj->extra))) BUG(); ret = 0; } return ret; } /* * Called by the message handler after a (still compressed) page has been * fetched from the remote machine in response to an "is_remote" tmem_get * or persistent tmem_localify. For a tmem_get, "extra" is the address of * the page that is to be filled to succesfully resolve the tmem_get; for * a (persistent) tmem_localify, "extra" is NULL (as the data is placed only * in the local zcache). "data" points to "size" bytes of (compressed) data * passed in the message. In the case of a persistent remote get, if * pre-allocation was successful (see zcache_repatriate_preload), the page * is placed into both local zcache and at "extra". */ int zcache_localify(int pool_id, struct tmem_oid *oidp, uint32_t index, char *data, size_t size, void *extra) { int ret = -ENOENT; unsigned long flags; struct tmem_pool *pool; bool ephemeral, delete = false; size_t clen = PAGE_SIZE; void *pampd, *saved_hb; struct tmem_obj *obj; pool = zcache_get_pool_by_id(LOCAL_CLIENT, pool_id); if (unlikely(pool == NULL)) /* pool doesn't exist anymore */ goto out; ephemeral = is_ephemeral(pool); local_irq_save(flags); /* FIXME: maybe only disable softirqs? */ pampd = tmem_localify_get_pampd(pool, oidp, index, &obj, &saved_hb); if (pampd == NULL) { /* hmmm... must have been a flush while waiting */ #ifdef RAMSTER_TESTING pr_err("UNTESTED pampd==NULL in zcache_localify\n"); #endif if (ephemeral) ramster_remote_eph_pages_unsucc_get++; else ramster_remote_pers_pages_unsucc_get++; obj = NULL; goto finish; } else if (unlikely(!pampd_is_remote(pampd))) { /* hmmm... must have been a dup put while waiting */ #ifdef RAMSTER_TESTING pr_err("UNTESTED dup while waiting in zcache_localify\n"); #endif if (ephemeral) ramster_remote_eph_pages_unsucc_get++; else ramster_remote_pers_pages_unsucc_get++; obj = NULL; pampd = NULL; ret = -EEXIST; goto finish; } else if (size == 0) { /* no remote data, delete the local is_remote pampd */ pampd = NULL; if (ephemeral) ramster_remote_eph_pages_unsucc_get++; else BUG(); delete = true; goto finish; } if (!ephemeral && pampd_is_intransit(pampd)) { /* localify to zcache */ pampd = pampd_mask_intransit_and_remote(pampd); zv_copy_to_pampd(pampd, data, size); } else { pampd = NULL; obj = NULL; } if (extra != NULL) { /* decompress direct-to-memory to complete remotify */ ret = lzo1x_decompress_safe((char *)data, size, (char *)extra, &clen); BUG_ON(ret != LZO_E_OK); BUG_ON(clen != PAGE_SIZE); } if (ephemeral) ramster_remote_eph_pages_succ_get++; else ramster_remote_pers_pages_succ_get++; ret = 0; finish: tmem_localify_finish(obj, index, pampd, saved_hb, delete); zcache_put_pool(pool); local_irq_restore(flags); out: return ret; } /* * Called on a remote persistent tmem_get to attempt to preallocate * local storage for the data contained in the remote persistent page. * If succesfully preallocated, returns the pampd, marked as remote and * in_transit. Else returns NULL. Note that the appropriate tmem data * structure must be locked. */ static void *zcache_pampd_repatriate_preload(void *pampd, struct tmem_pool *pool, struct tmem_oid *oid, uint32_t index, bool *intransit) { int clen = pampd_remote_size(pampd); void *ret_pampd = NULL; unsigned long flags; if (!pampd_is_remote(pampd)) BUG(); if (is_ephemeral(pool)) BUG(); if (pampd_is_intransit(pampd)) { /* * to avoid multiple allocations (and maybe a memory leak) * don't preallocate if already in the process of being * repatriated */ *intransit = true; goto out; } *intransit = false; local_irq_save(flags); ret_pampd = (void *)zv_alloc(pool, oid, index, clen); if (ret_pampd != NULL) { /* * a pampd is marked intransit if it is remote and space has * been allocated for it locally (note, only happens for * persistent pages, in which case the remote copy is freed) */ ret_pampd = pampd_mark_intransit(ret_pampd); dec_and_check(&ramster_remote_pers_pages); } else ramster_pers_pages_remote_nomem++; local_irq_restore(flags); out: return ret_pampd; } /* * Called on a remote tmem_get to invoke a message to fetch the page. * Might sleep so no tmem locks can be held. "extra" is passed * all the way through the round-trip messaging to zcache_localify. */ static int zcache_pampd_repatriate(void *fake_pampd, void *real_pampd, struct tmem_pool *pool, struct tmem_oid *oid, uint32_t index, bool free, void *extra) { struct tmem_xhandle xh; int ret; if (pampd_is_intransit(real_pampd)) /* have local space pre-reserved, so free remote copy */ free = true; xh = tmem_xhandle_fill(LOCAL_CLIENT, pool, oid, index); /* unreliable request/response for now */ ret = ramster_remote_async_get(&xh, free, pampd_remote_node(fake_pampd), pampd_remote_size(fake_pampd), pampd_remote_cksum(fake_pampd), extra); #ifdef RAMSTER_TESTING if (ret != 0 && ret != -ENOENT) pr_err("TESTING zcache_pampd_repatriate returns, ret=%d\n", ret); #endif return ret; } static struct tmem_pamops zcache_pamops = { .create = zcache_pampd_create, .get_data = zcache_pampd_get_data, .free = zcache_pampd_free, .get_data_and_free = zcache_pampd_get_data_and_free, .free_obj = zcache_pampd_free_obj, .is_remote = zcache_pampd_is_remote, .repatriate_preload = zcache_pampd_repatriate_preload, .repatriate = zcache_pampd_repatriate, .new_obj = zcache_pampd_new_obj, .replace_in_obj = zcache_pampd_replace_in_obj, }; /* * zcache compression/decompression and related per-cpu stuff */ #define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS #define LZO_DSTMEM_PAGE_ORDER 1 static DEFINE_PER_CPU(unsigned char *, zcache_workmem); static DEFINE_PER_CPU(unsigned char *, zcache_dstmem); static int zcache_compress(struct page *from, void **out_va, size_t *out_len) { int ret = 0; unsigned char *dmem = __get_cpu_var(zcache_dstmem); unsigned char *wmem = __get_cpu_var(zcache_workmem); char *from_va; BUG_ON(!irqs_disabled()); if (unlikely(dmem == NULL || wmem == NULL)) goto out; /* no buffer, so can't compress */ from_va = kmap_atomic(from); mb(); ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem); BUG_ON(ret != LZO_E_OK); *out_va = dmem; kunmap_atomic(from_va); ret = 1; out: return ret; } static int zcache_cpu_notifier(struct notifier_block *nb, unsigned long action, void *pcpu) { int cpu = (long)pcpu; struct zcache_preload *kp; switch (action) { case CPU_UP_PREPARE: per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages( GFP_KERNEL | __GFP_REPEAT, LZO_DSTMEM_PAGE_ORDER), per_cpu(zcache_workmem, cpu) = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL | __GFP_REPEAT); per_cpu(zcache_remoteputmem, cpu) = kzalloc(PAGE_SIZE, GFP_KERNEL | __GFP_REPEAT); break; case CPU_DEAD: case CPU_UP_CANCELED: kfree(per_cpu(zcache_remoteputmem, cpu)); per_cpu(zcache_remoteputmem, cpu) = NULL; free_pages((unsigned long)per_cpu(zcache_dstmem, cpu), LZO_DSTMEM_PAGE_ORDER); per_cpu(zcache_dstmem, cpu) = NULL; kfree(per_cpu(zcache_workmem, cpu)); per_cpu(zcache_workmem, cpu) = NULL; kp = &per_cpu(zcache_preloads, cpu); while (kp->nr) { kmem_cache_free(zcache_objnode_cache, kp->objnodes[kp->nr - 1]); kp->objnodes[kp->nr - 1] = NULL; kp->nr--; } if (kp->obj) { kmem_cache_free(zcache_obj_cache, kp->obj); kp->obj = NULL; } if (kp->flnode) { kmem_cache_free(ramster_flnode_cache, kp->flnode); kp->flnode = NULL; } if (kp->page) { free_page((unsigned long)kp->page); kp->page = NULL; } break; default: break; } return NOTIFY_OK; } static struct notifier_block zcache_cpu_notifier_block = { .notifier_call = zcache_cpu_notifier }; #ifdef CONFIG_SYSFS #define ZCACHE_SYSFS_RO(_name) \ static ssize_t zcache_##_name##_show(struct kobject *kobj, \ struct kobj_attribute *attr, char *buf) \ { \ return sprintf(buf, "%lu\n", zcache_##_name); \ } \ static struct kobj_attribute zcache_##_name##_attr = { \ .attr = { .name = __stringify(_name), .mode = 0444 }, \ .show = zcache_##_name##_show, \ } #define ZCACHE_SYSFS_RO_ATOMIC(_name) \ static ssize_t zcache_##_name##_show(struct kobject *kobj, \ struct kobj_attribute *attr, char *buf) \ { \ return sprintf(buf, "%d\n", atomic_read(&zcache_##_name)); \ } \ static struct kobj_attribute zcache_##_name##_attr = { \ .attr = { .name = __stringify(_name), .mode = 0444 }, \ .show = zcache_##_name##_show, \ } #define ZCACHE_SYSFS_RO_CUSTOM(_name, _func) \ static ssize_t zcache_##_name##_show(struct kobject *kobj, \ struct kobj_attribute *attr, char *buf) \ { \ return _func(buf); \ } \ static struct kobj_attribute zcache_##_name##_attr = { \ .attr = { .name = __stringify(_name), .mode = 0444 }, \ .show = zcache_##_name##_show, \ } ZCACHE_SYSFS_RO(curr_obj_count_max); ZCACHE_SYSFS_RO(curr_objnode_count_max); ZCACHE_SYSFS_RO(flush_total); ZCACHE_SYSFS_RO(flush_found); ZCACHE_SYSFS_RO(flobj_total); ZCACHE_SYSFS_RO(flobj_found); ZCACHE_SYSFS_RO(failed_eph_puts); ZCACHE_SYSFS_RO(nonactive_puts); ZCACHE_SYSFS_RO(failed_pers_puts); ZCACHE_SYSFS_RO(zbud_curr_zbytes); ZCACHE_SYSFS_RO(zbud_cumul_zpages); ZCACHE_SYSFS_RO(zbud_cumul_zbytes); ZCACHE_SYSFS_RO(zbud_buddied_count); ZCACHE_SYSFS_RO(evicted_raw_pages); ZCACHE_SYSFS_RO(evicted_unbuddied_pages); ZCACHE_SYSFS_RO(evicted_buddied_pages); ZCACHE_SYSFS_RO(failed_get_free_pages); ZCACHE_SYSFS_RO(failed_alloc); ZCACHE_SYSFS_RO(put_to_flush); ZCACHE_SYSFS_RO(compress_poor); ZCACHE_SYSFS_RO(mean_compress_poor); ZCACHE_SYSFS_RO(policy_percent_exceeded); ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_raw_pages); ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_zpages); ZCACHE_SYSFS_RO_ATOMIC(curr_obj_count); ZCACHE_SYSFS_RO_ATOMIC(curr_objnode_count); ZCACHE_SYSFS_RO_CUSTOM(zbud_unbuddied_list_counts, zbud_show_unbuddied_list_counts); ZCACHE_SYSFS_RO_CUSTOM(zbud_cumul_chunk_counts, zbud_show_cumul_chunk_counts); ZCACHE_SYSFS_RO_CUSTOM(zv_curr_dist_counts, zv_curr_dist_counts_show); ZCACHE_SYSFS_RO_CUSTOM(zv_cumul_dist_counts, zv_cumul_dist_counts_show); static struct attribute *zcache_attrs[] = { &zcache_curr_obj_count_attr.attr, &zcache_curr_obj_count_max_attr.attr, &zcache_curr_objnode_count_attr.attr, &zcache_curr_objnode_count_max_attr.attr, &zcache_flush_total_attr.attr, &zcache_flobj_total_attr.attr, &zcache_flush_found_attr.attr, &zcache_flobj_found_attr.attr, &zcache_failed_eph_puts_attr.attr, &zcache_nonactive_puts_attr.attr, &zcache_failed_pers_puts_attr.attr, &zcache_policy_percent_exceeded_attr.attr, &zcache_compress_poor_attr.attr, &zcache_mean_compress_poor_attr.attr, &zcache_zbud_curr_raw_pages_attr.attr, &zcache_zbud_curr_zpages_attr.attr, &zcache_zbud_curr_zbytes_attr.attr, &zcache_zbud_cumul_zpages_attr.attr, &zcache_zbud_cumul_zbytes_attr.attr, &zcache_zbud_buddied_count_attr.attr, &zcache_evicted_raw_pages_attr.attr, &zcache_evicted_unbuddied_pages_attr.attr, &zcache_evicted_buddied_pages_attr.attr, &zcache_failed_get_free_pages_attr.attr, &zcache_failed_alloc_attr.attr, &zcache_put_to_flush_attr.attr, &zcache_zbud_unbuddied_list_counts_attr.attr, &zcache_zbud_cumul_chunk_counts_attr.attr, &zcache_zv_curr_dist_counts_attr.attr, &zcache_zv_cumul_dist_counts_attr.attr, &zcache_zv_max_zsize_attr.attr, &zcache_zv_max_mean_zsize_attr.attr, &zcache_zv_page_count_policy_percent_attr.attr, NULL, }; static struct attribute_group zcache_attr_group = { .attrs = zcache_attrs, .name = "zcache", }; #define RAMSTER_SYSFS_RO(_name) \ static ssize_t ramster_##_name##_show(struct kobject *kobj, \ struct kobj_attribute *attr, char *buf) \ { \ return sprintf(buf, "%lu\n", ramster_##_name); \ } \ static struct kobj_attribute ramster_##_name##_attr = { \ .attr = { .name = __stringify(_name), .mode = 0444 }, \ .show = ramster_##_name##_show, \ } #define RAMSTER_SYSFS_RW(_name) \ static ssize_t ramster_##_name##_show(struct kobject *kobj, \ struct kobj_attribute *attr, char *buf) \ { \ return sprintf(buf, "%lu\n", ramster_##_name); \ } \ static ssize_t ramster_##_name##_store(struct kobject *kobj, \ struct kobj_attribute *attr, const char *buf, size_t count) \ { \ int err; \ unsigned long enable; \ err = kstrtoul(buf, 10, &enable); \ if (err) \ return -EINVAL; \ ramster_##_name = enable; \ return count; \ } \ static struct kobj_attribute ramster_##_name##_attr = { \ .attr = { .name = __stringify(_name), .mode = 0644 }, \ .show = ramster_##_name##_show, \ .store = ramster_##_name##_store, \ } #define RAMSTER_SYSFS_RO_ATOMIC(_name) \ static ssize_t ramster_##_name##_show(struct kobject *kobj, \ struct kobj_attribute *attr, char *buf) \ { \ return sprintf(buf, "%d\n", atomic_read(&ramster_##_name)); \ } \ static struct kobj_attribute ramster_##_name##_attr = { \ .attr = { .name = __stringify(_name), .mode = 0444 }, \ .show = ramster_##_name##_show, \ } RAMSTER_SYSFS_RO(interface_revision); RAMSTER_SYSFS_RO_ATOMIC(remote_pers_pages); RAMSTER_SYSFS_RW(pers_remotify_enable); RAMSTER_SYSFS_RW(eph_remotify_enable); RAMSTER_SYSFS_RO(eph_pages_remoted); RAMSTER_SYSFS_RO(eph_pages_remote_failed); RAMSTER_SYSFS_RO(pers_pages_remoted); RAMSTER_SYSFS_RO(pers_pages_remote_failed); RAMSTER_SYSFS_RO(pers_pages_remote_nomem); RAMSTER_SYSFS_RO(remote_pages_flushed); RAMSTER_SYSFS_RO(remote_page_flushes_failed); RAMSTER_SYSFS_RO(remote_objects_flushed); RAMSTER_SYSFS_RO(remote_object_flushes_failed); RAMSTER_SYSFS_RO(remote_eph_pages_succ_get); RAMSTER_SYSFS_RO(remote_eph_pages_unsucc_get); RAMSTER_SYSFS_RO(remote_pers_pages_succ_get); RAMSTER_SYSFS_RO(remote_pers_pages_unsucc_get); RAMSTER_SYSFS_RO_ATOMIC(foreign_eph_pampd_count); RAMSTER_SYSFS_RO(foreign_eph_pampd_count_max); RAMSTER_SYSFS_RO_ATOMIC(foreign_pers_pampd_count); RAMSTER_SYSFS_RO(foreign_pers_pampd_count_max); RAMSTER_SYSFS_RO_ATOMIC(curr_flnode_count); RAMSTER_SYSFS_RO(curr_flnode_count_max); #define MANUAL_NODES 8 static bool ramster_nodes_manual_up[MANUAL_NODES]; static ssize_t ramster_manual_node_up_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { int i; char *p = buf; for (i = 0; i < MANUAL_NODES; i++) if (ramster_nodes_manual_up[i]) p += sprintf(p, "%d ", i); p += sprintf(p, "\n"); return p - buf; } static ssize_t ramster_manual_node_up_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long node_num; err = kstrtoul(buf, 10, &node_num); if (err) { pr_err("ramster: bad strtoul?\n"); return -EINVAL; } if (node_num >= MANUAL_NODES) { pr_err("ramster: bad node_num=%lu?\n", node_num); return -EINVAL; } if (ramster_nodes_manual_up[node_num]) { pr_err("ramster: node %d already up, ignoring\n", (int)node_num); } else { ramster_nodes_manual_up[node_num] = true; r2net_hb_node_up_manual((int)node_num); } return count; } static struct kobj_attribute ramster_manual_node_up_attr = { .attr = { .name = "manual_node_up", .mode = 0644 }, .show = ramster_manual_node_up_show, .store = ramster_manual_node_up_store, }; static ssize_t ramster_remote_target_nodenum_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { if (ramster_remote_target_nodenum == -1UL) return sprintf(buf, "unset\n"); else return sprintf(buf, "%d\n", ramster_remote_target_nodenum); } static ssize_t ramster_remote_target_nodenum_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long node_num; err = kstrtoul(buf, 10, &node_num); if (err) { pr_err("ramster: bad strtoul?\n"); return -EINVAL; } else if (node_num == -1UL) { pr_err("ramster: disabling all remotification, " "data may still reside on remote nodes however\n"); return -EINVAL; } else if (node_num >= MANUAL_NODES) { pr_err("ramster: bad node_num=%lu?\n", node_num); return -EINVAL; } else if (!ramster_nodes_manual_up[node_num]) { pr_err("ramster: node %d not up, ignoring setting " "of remotification target\n", (int)node_num); } else if (r2net_remote_target_node_set((int)node_num) >= 0) { pr_info("ramster: node %d set as remotification target\n", (int)node_num); ramster_remote_target_nodenum = (int)node_num; } else { pr_err("ramster: bad num to node node_num=%d?\n", (int)node_num); return -EINVAL; } return count; } static struct kobj_attribute ramster_remote_target_nodenum_attr = { .attr = { .name = "remote_target_nodenum", .mode = 0644 }, .show = ramster_remote_target_nodenum_show, .store = ramster_remote_target_nodenum_store, }; static struct attribute *ramster_attrs[] = { &ramster_interface_revision_attr.attr, &ramster_pers_remotify_enable_attr.attr, &ramster_eph_remotify_enable_attr.attr, &ramster_remote_pers_pages_attr.attr, &ramster_eph_pages_remoted_attr.attr, &ramster_eph_pages_remote_failed_attr.attr, &ramster_pers_pages_remoted_attr.attr, &ramster_pers_pages_remote_failed_attr.attr, &ramster_pers_pages_remote_nomem_attr.attr, &ramster_remote_pages_flushed_attr.attr, &ramster_remote_page_flushes_failed_attr.attr, &ramster_remote_objects_flushed_attr.attr, &ramster_remote_object_flushes_failed_attr.attr, &ramster_remote_eph_pages_succ_get_attr.attr, &ramster_remote_eph_pages_unsucc_get_attr.attr, &ramster_remote_pers_pages_succ_get_attr.attr, &ramster_remote_pers_pages_unsucc_get_attr.attr, &ramster_foreign_eph_pampd_count_attr.attr, &ramster_foreign_eph_pampd_count_max_attr.attr, &ramster_foreign_pers_pampd_count_attr.attr, &ramster_foreign_pers_pampd_count_max_attr.attr, &ramster_curr_flnode_count_attr.attr, &ramster_curr_flnode_count_max_attr.attr, &ramster_manual_node_up_attr.attr, &ramster_remote_target_nodenum_attr.attr, NULL, }; static struct attribute_group ramster_attr_group = { .attrs = ramster_attrs, .name = "ramster", }; #endif /* CONFIG_SYSFS */ /* * When zcache is disabled ("frozen"), pools can be created and destroyed, * but all puts (and thus all other operations that require memory allocation) * must fail. If zcache is unfrozen, accepts puts, then frozen again, * data consistency requires all puts while frozen to be converted into * flushes. */ static bool zcache_freeze; /* * zcache shrinker interface (only useful for ephemeral pages, so zbud only) */ static int shrink_zcache_memory(struct shrinker *shrink, struct shrink_control *sc) { int ret = -1; int nr = sc->nr_to_scan; gfp_t gfp_mask = sc->gfp_mask; if (nr >= 0) { if (!(gfp_mask & __GFP_FS)) /* does this case really need to be skipped? */ goto out; zbud_evict_pages(nr); } ret = (int)atomic_read(&zcache_zbud_curr_raw_pages); out: return ret; } static struct shrinker zcache_shrinker = { .shrink = shrink_zcache_memory, .seeks = DEFAULT_SEEKS, }; /* * zcache shims between cleancache/frontswap ops and tmem */ int zcache_put(int cli_id, int pool_id, struct tmem_oid *oidp, uint32_t index, char *data, size_t size, bool raw, int ephemeral) { struct tmem_pool *pool; int ret = -1; BUG_ON(!irqs_disabled()); pool = zcache_get_pool_by_id(cli_id, pool_id); if (unlikely(pool == NULL)) goto out; if (!zcache_freeze && zcache_do_preload(pool) == 0) { /* preload does preempt_disable on success */ ret = tmem_put(pool, oidp, index, data, size, raw, ephemeral); if (ret < 0) { if (is_ephemeral(pool)) zcache_failed_eph_puts++; else zcache_failed_pers_puts++; } zcache_put_pool(pool); preempt_enable_no_resched(); } else { zcache_put_to_flush++; if (atomic_read(&pool->obj_count) > 0) /* the put fails whether the flush succeeds or not */ (void)tmem_flush_page(pool, oidp, index); zcache_put_pool(pool); } out: return ret; } int zcache_get(int cli_id, int pool_id, struct tmem_oid *oidp, uint32_t index, char *data, size_t *sizep, bool raw, int get_and_free) { struct tmem_pool *pool; int ret = -1; bool eph; if (!raw) { BUG_ON(irqs_disabled()); BUG_ON(in_softirq()); } pool = zcache_get_pool_by_id(cli_id, pool_id); eph = is_ephemeral(pool); if (likely(pool != NULL)) { if (atomic_read(&pool->obj_count) > 0) ret = tmem_get(pool, oidp, index, data, sizep, raw, get_and_free); zcache_put_pool(pool); } WARN_ONCE((!eph && (ret != 0)), "zcache_get fails on persistent pool, " "bad things are very likely to happen soon\n"); #ifdef RAMSTER_TESTING if (ret != 0 && ret != -1 && !(ret == -EINVAL && is_ephemeral(pool))) pr_err("TESTING zcache_get tmem_get returns ret=%d\n", ret); #endif if (ret == -EAGAIN) BUG(); /* FIXME... don't need this anymore??? let's ensure */ return ret; } int zcache_flush(int cli_id, int pool_id, struct tmem_oid *oidp, uint32_t index) { struct tmem_pool *pool; int ret = -1; unsigned long flags; local_irq_save(flags); zcache_flush_total++; pool = zcache_get_pool_by_id(cli_id, pool_id); ramster_do_preload_flnode_only(pool); if (likely(pool != NULL)) { if (atomic_read(&pool->obj_count) > 0) ret = tmem_flush_page(pool, oidp, index); zcache_put_pool(pool); } if (ret >= 0) zcache_flush_found++; local_irq_restore(flags); return ret; } int zcache_flush_object(int cli_id, int pool_id, struct tmem_oid *oidp) { struct tmem_pool *pool; int ret = -1; unsigned long flags; local_irq_save(flags); zcache_flobj_total++; pool = zcache_get_pool_by_id(cli_id, pool_id); ramster_do_preload_flnode_only(pool); if (likely(pool != NULL)) { if (atomic_read(&pool->obj_count) > 0) ret = tmem_flush_object(pool, oidp); zcache_put_pool(pool); } if (ret >= 0) zcache_flobj_found++; local_irq_restore(flags); return ret; } int zcache_client_destroy_pool(int cli_id, int pool_id) { struct tmem_pool *pool = NULL; struct zcache_client *cli = NULL; int ret = -1; if (pool_id < 0) goto out; if (cli_id == LOCAL_CLIENT) cli = &zcache_host; else if ((unsigned int)cli_id < MAX_CLIENTS) cli = &zcache_clients[cli_id]; if (cli == NULL) goto out; atomic_inc(&cli->refcount); pool = cli->tmem_pools[pool_id]; if (pool == NULL) goto out; cli->tmem_pools[pool_id] = NULL; /* wait for pool activity on other cpus to quiesce */ while (atomic_read(&pool->refcount) != 0) ; atomic_dec(&cli->refcount); local_bh_disable(); ret = tmem_destroy_pool(pool); local_bh_enable(); kfree(pool); pr_info("ramster: destroyed pool id=%d cli_id=%d\n", pool_id, cli_id); out: return ret; } static int zcache_destroy_pool(int pool_id) { return zcache_client_destroy_pool(LOCAL_CLIENT, pool_id); } int zcache_new_pool(uint16_t cli_id, uint32_t flags) { int poolid = -1; struct tmem_pool *pool; struct zcache_client *cli = NULL; if (cli_id == LOCAL_CLIENT) cli = &zcache_host; else if ((unsigned int)cli_id < MAX_CLIENTS) cli = &zcache_clients[cli_id]; if (cli == NULL) goto out; atomic_inc(&cli->refcount); pool = kmalloc(sizeof(struct tmem_pool), GFP_ATOMIC); if (pool == NULL) { pr_info("ramster: pool creation failed: out of memory\n"); goto out; } for (poolid = 0; poolid < MAX_POOLS_PER_CLIENT; poolid++) if (cli->tmem_pools[poolid] == NULL) break; if (poolid >= MAX_POOLS_PER_CLIENT) { pr_info("ramster: pool creation failed: max exceeded\n"); kfree(pool); poolid = -1; goto out; } atomic_set(&pool->refcount, 0); pool->client = cli; pool->pool_id = poolid; tmem_new_pool(pool, flags); cli->tmem_pools[poolid] = pool; if (cli_id == LOCAL_CLIENT) pr_info("ramster: created %s tmem pool, id=%d, local client\n", flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral", poolid); else pr_info("ramster: created %s tmem pool, id=%d, client=%d\n", flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral", poolid, cli_id); out: if (cli != NULL) atomic_dec(&cli->refcount); return poolid; } static int zcache_local_new_pool(uint32_t flags) { return zcache_new_pool(LOCAL_CLIENT, flags); } int zcache_autocreate_pool(int cli_id, int pool_id, bool ephemeral) { struct tmem_pool *pool; struct zcache_client *cli = NULL; uint32_t flags = ephemeral ? 0 : TMEM_POOL_PERSIST; int ret = -1; if (cli_id == LOCAL_CLIENT) goto out; if (pool_id >= MAX_POOLS_PER_CLIENT) goto out; else if ((unsigned int)cli_id < MAX_CLIENTS) cli = &zcache_clients[cli_id]; if ((ephemeral && !use_cleancache) || (!ephemeral && !use_frontswap)) BUG(); /* FIXME, handle more gracefully later */ if (!cli->allocated) { if (zcache_new_client(cli_id)) BUG(); /* FIXME, handle more gracefully later */ cli = &zcache_clients[cli_id]; } atomic_inc(&cli->refcount); pool = cli->tmem_pools[pool_id]; if (pool != NULL) { if (pool->persistent && ephemeral) { pr_err("zcache_autocreate_pool: type mismatch\n"); goto out; } ret = 0; goto out; } pool = kmalloc(sizeof(struct tmem_pool), GFP_KERNEL); if (pool == NULL) { pr_info("ramster: pool creation failed: out of memory\n"); goto out; } atomic_set(&pool->refcount, 0); pool->client = cli; pool->pool_id = pool_id; tmem_new_pool(pool, flags); cli->tmem_pools[pool_id] = pool; pr_info("ramster: AUTOcreated %s tmem poolid=%d, for remote client=%d\n", flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral", pool_id, cli_id); ret = 0; out: if (cli == NULL) BUG(); /* FIXME, handle more gracefully later */ /* pr_err("zcache_autocreate_pool: failed\n"); */ if (cli != NULL) atomic_dec(&cli->refcount); return ret; } /********** * Two kernel functionalities currently can be layered on top of tmem. * These are "cleancache" which is used as a second-chance cache for clean * page cache pages; and "frontswap" which is used for swap pages * to avoid writes to disk. A generic "shim" is provided here for each * to translate in-kernel semantics to zcache semantics. */ #ifdef CONFIG_CLEANCACHE static void zcache_cleancache_put_page(int pool_id, struct cleancache_filekey key, pgoff_t index, struct page *page) { u32 ind = (u32) index; struct tmem_oid oid = *(struct tmem_oid *)&key; #ifdef __PG_WAS_ACTIVE if (!PageWasActive(page)) { zcache_nonactive_puts++; return; } #endif if (likely(ind == index)) { char *kva = page_address(page); (void)zcache_put(LOCAL_CLIENT, pool_id, &oid, index, kva, PAGE_SIZE, 0, 1); } } static int zcache_cleancache_get_page(int pool_id, struct cleancache_filekey key, pgoff_t index, struct page *page) { u32 ind = (u32) index; struct tmem_oid oid = *(struct tmem_oid *)&key; int ret = -1; preempt_disable(); if (likely(ind == index)) { char *kva = page_address(page); size_t size = PAGE_SIZE; ret = zcache_get(LOCAL_CLIENT, pool_id, &oid, index, kva, &size, 0, 0); #ifdef __PG_WAS_ACTIVE if (ret == 0) SetPageWasActive(page); #endif } preempt_enable(); return ret; } static void zcache_cleancache_flush_page(int pool_id, struct cleancache_filekey key, pgoff_t index) { u32 ind = (u32) index; struct tmem_oid oid = *(struct tmem_oid *)&key; if (likely(ind == index)) (void)zcache_flush(LOCAL_CLIENT, pool_id, &oid, ind); } static void zcache_cleancache_flush_inode(int pool_id, struct cleancache_filekey key) { struct tmem_oid oid = *(struct tmem_oid *)&key; (void)zcache_flush_object(LOCAL_CLIENT, pool_id, &oid); } static void zcache_cleancache_flush_fs(int pool_id) { if (pool_id >= 0) (void)zcache_destroy_pool(pool_id); } static int zcache_cleancache_init_fs(size_t pagesize) { BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid)); BUG_ON(pagesize != PAGE_SIZE); return zcache_local_new_pool(0); } static int zcache_cleancache_init_shared_fs(char *uuid, size_t pagesize) { /* shared pools are unsupported and map to private */ BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid)); BUG_ON(pagesize != PAGE_SIZE); return zcache_local_new_pool(0); } static struct cleancache_ops zcache_cleancache_ops = { .put_page = zcache_cleancache_put_page, .get_page = zcache_cleancache_get_page, .invalidate_page = zcache_cleancache_flush_page, .invalidate_inode = zcache_cleancache_flush_inode, .invalidate_fs = zcache_cleancache_flush_fs, .init_shared_fs = zcache_cleancache_init_shared_fs, .init_fs = zcache_cleancache_init_fs }; struct cleancache_ops zcache_cleancache_register_ops(void) { struct cleancache_ops old_ops = cleancache_register_ops(&zcache_cleancache_ops); return old_ops; } #endif #ifdef CONFIG_FRONTSWAP /* a single tmem poolid is used for all frontswap "types" (swapfiles) */ static int zcache_frontswap_poolid = -1; /* * Swizzling increases objects per swaptype, increasing tmem concurrency * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS */ #define SWIZ_BITS 8 #define SWIZ_MASK ((1 << SWIZ_BITS) - 1) #define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK)) #define iswiz(_ind) (_ind >> SWIZ_BITS) static inline struct tmem_oid oswiz(unsigned type, u32 ind) { struct tmem_oid oid = { .oid = { 0 } }; oid.oid[0] = _oswiz(type, ind); return oid; } static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, struct page *page) { u64 ind64 = (u64)offset; u32 ind = (u32)offset; struct tmem_oid oid = oswiz(type, ind); int ret = -1; unsigned long flags; char *kva; BUG_ON(!PageLocked(page)); if (likely(ind64 == ind)) { local_irq_save(flags); kva = page_address(page); ret = zcache_put(LOCAL_CLIENT, zcache_frontswap_poolid, &oid, iswiz(ind), kva, PAGE_SIZE, 0, 0); local_irq_restore(flags); } return ret; } /* returns 0 if the page was successfully gotten from frontswap, -1 if * was not present (should never happen!) */ static int zcache_frontswap_get_page(unsigned type, pgoff_t offset, struct page *page) { u64 ind64 = (u64)offset; u32 ind = (u32)offset; struct tmem_oid oid = oswiz(type, ind); int ret = -1; preempt_disable(); /* FIXME, remove this? */ BUG_ON(!PageLocked(page)); if (likely(ind64 == ind)) { char *kva = page_address(page); size_t size = PAGE_SIZE; ret = zcache_get(LOCAL_CLIENT, zcache_frontswap_poolid, &oid, iswiz(ind), kva, &size, 0, -1); } preempt_enable(); /* FIXME, remove this? */ return ret; } /* flush a single page from frontswap */ static void zcache_frontswap_flush_page(unsigned type, pgoff_t offset) { u64 ind64 = (u64)offset; u32 ind = (u32)offset; struct tmem_oid oid = oswiz(type, ind); if (likely(ind64 == ind)) (void)zcache_flush(LOCAL_CLIENT, zcache_frontswap_poolid, &oid, iswiz(ind)); } /* flush all pages from the passed swaptype */ static void zcache_frontswap_flush_area(unsigned type) { struct tmem_oid oid; int ind; for (ind = SWIZ_MASK; ind >= 0; ind--) { oid = oswiz(type, ind); (void)zcache_flush_object(LOCAL_CLIENT, zcache_frontswap_poolid, &oid); } } static void zcache_frontswap_init(unsigned ignored) { /* a single tmem poolid is used for all frontswap "types" (swapfiles) */ if (zcache_frontswap_poolid < 0) zcache_frontswap_poolid = zcache_local_new_pool(TMEM_POOL_PERSIST); } static struct frontswap_ops zcache_frontswap_ops = { .put_page = zcache_frontswap_put_page, .get_page = zcache_frontswap_get_page, .invalidate_page = zcache_frontswap_flush_page, .invalidate_area = zcache_frontswap_flush_area, .init = zcache_frontswap_init }; struct frontswap_ops zcache_frontswap_register_ops(void) { struct frontswap_ops old_ops = frontswap_register_ops(&zcache_frontswap_ops); return old_ops; } #endif /* * frontswap selfshrinking */ #ifdef CONFIG_FRONTSWAP /* In HZ, controls frequency of worker invocation. */ static unsigned int selfshrink_interval __read_mostly = 5; static void selfshrink_process(struct work_struct *work); static DECLARE_DELAYED_WORK(selfshrink_worker, selfshrink_process); /* Enable/disable with sysfs. */ static bool frontswap_selfshrinking __read_mostly; /* Enable/disable with kernel boot option. */ static bool use_frontswap_selfshrink __initdata = true; /* * The default values for the following parameters were deemed reasonable * by experimentation, may be workload-dependent, and can all be * adjusted via sysfs. */ /* Control rate for frontswap shrinking. Higher hysteresis is slower. */ static unsigned int frontswap_hysteresis __read_mostly = 20; /* * Number of selfshrink worker invocations to wait before observing that * frontswap selfshrinking should commence. Note that selfshrinking does * not use a separate worker thread. */ static unsigned int frontswap_inertia __read_mostly = 3; /* Countdown to next invocation of frontswap_shrink() */ static unsigned long frontswap_inertia_counter; /* * Invoked by the selfshrink worker thread, uses current number of pages * in frontswap (frontswap_curr_pages()), previous status, and control * values (hysteresis and inertia) to determine if frontswap should be * shrunk and what the new frontswap size should be. Note that * frontswap_shrink is essentially a partial swapoff that immediately * transfers pages from the "swap device" (frontswap) back into kernel * RAM; despite the name, frontswap "shrinking" is very different from * the "shrinker" interface used by the kernel MM subsystem to reclaim * memory. */ static void frontswap_selfshrink(void) { static unsigned long cur_frontswap_pages; static unsigned long last_frontswap_pages; static unsigned long tgt_frontswap_pages; last_frontswap_pages = cur_frontswap_pages; cur_frontswap_pages = frontswap_curr_pages(); if (!cur_frontswap_pages || (cur_frontswap_pages > last_frontswap_pages)) { frontswap_inertia_counter = frontswap_inertia; return; } if (frontswap_inertia_counter && --frontswap_inertia_counter) return; if (cur_frontswap_pages <= frontswap_hysteresis) tgt_frontswap_pages = 0; else tgt_frontswap_pages = cur_frontswap_pages - (cur_frontswap_pages / frontswap_hysteresis); frontswap_shrink(tgt_frontswap_pages); } static int __init ramster_nofrontswap_selfshrink_setup(char *s) { use_frontswap_selfshrink = false; return 1; } __setup("noselfshrink", ramster_nofrontswap_selfshrink_setup); static void selfshrink_process(struct work_struct *work) { if (frontswap_selfshrinking && frontswap_enabled) { frontswap_selfshrink(); schedule_delayed_work(&selfshrink_worker, selfshrink_interval * HZ); } } static int ramster_enabled; static int __init ramster_selfshrink_init(void) { frontswap_selfshrinking = ramster_enabled && use_frontswap_selfshrink; if (frontswap_selfshrinking) pr_info("ramster: Initializing frontswap " "selfshrinking driver.\n"); else return -ENODEV; schedule_delayed_work(&selfshrink_worker, selfshrink_interval * HZ); return 0; } subsys_initcall(ramster_selfshrink_init); #endif /* * zcache initialization * NOTE FOR NOW ramster MUST BE PROVIDED AS A KERNEL BOOT PARAMETER OR * NOTHING HAPPENS! */ static int ramster_enabled; static int __init enable_ramster(char *s) { ramster_enabled = 1; return 1; } __setup("ramster", enable_ramster); /* allow independent dynamic disabling of cleancache and frontswap */ static int use_cleancache = 1; static int __init no_cleancache(char *s) { pr_info("INIT no_cleancache called\n"); use_cleancache = 0; return 1; } /* * FIXME: need to guarantee this gets checked before zcache_init is called * What is the correct way to achieve this? */ early_param("nocleancache", no_cleancache); static int use_frontswap = 1; static int __init no_frontswap(char *s) { pr_info("INIT no_frontswap called\n"); use_frontswap = 0; return 1; } __setup("nofrontswap", no_frontswap); static int __init zcache_init(void) { int ret = 0; #ifdef CONFIG_SYSFS ret = sysfs_create_group(mm_kobj, &zcache_attr_group); ret = sysfs_create_group(mm_kobj, &ramster_attr_group); if (ret) { pr_err("ramster: can't create sysfs\n"); goto out; } #endif /* CONFIG_SYSFS */ #if defined(CONFIG_CLEANCACHE) || defined(CONFIG_FRONTSWAP) if (ramster_enabled) { unsigned int cpu; (void)r2net_register_handlers(); tmem_register_hostops(&zcache_hostops); tmem_register_pamops(&zcache_pamops); ret = register_cpu_notifier(&zcache_cpu_notifier_block); if (ret) { pr_err("ramster: can't register cpu notifier\n"); goto out; } for_each_online_cpu(cpu) { void *pcpu = (void *)(long)cpu; zcache_cpu_notifier(&zcache_cpu_notifier_block, CPU_UP_PREPARE, pcpu); } } zcache_objnode_cache = kmem_cache_create("zcache_objnode", sizeof(struct tmem_objnode), 0, 0, NULL); zcache_obj_cache = kmem_cache_create("zcache_obj", sizeof(struct tmem_obj), 0, 0, NULL); ramster_flnode_cache = kmem_cache_create("ramster_flnode", sizeof(struct flushlist_node), 0, 0, NULL); #endif #ifdef CONFIG_CLEANCACHE pr_info("INIT ramster_enabled=%d use_cleancache=%d\n", ramster_enabled, use_cleancache); if (ramster_enabled && use_cleancache) { struct cleancache_ops old_ops; zbud_init(); register_shrinker(&zcache_shrinker); old_ops = zcache_cleancache_register_ops(); pr_info("ramster: cleancache enabled using kernel " "transcendent memory and compression buddies\n"); if (old_ops.init_fs != NULL) pr_warning("ramster: cleancache_ops overridden"); } #endif #ifdef CONFIG_FRONTSWAP pr_info("INIT ramster_enabled=%d use_frontswap=%d\n", ramster_enabled, use_frontswap); if (ramster_enabled && use_frontswap) { struct frontswap_ops old_ops; zcache_new_client(LOCAL_CLIENT); old_ops = zcache_frontswap_register_ops(); pr_info("ramster: frontswap enabled using kernel " "transcendent memory and xvmalloc\n"); if (old_ops.init != NULL) pr_warning("ramster: frontswap_ops overridden"); } if (ramster_enabled && (use_frontswap || use_cleancache)) ramster_remotify_init(); #endif out: return ret; } module_init(zcache_init)
gpl-2.0
vaginessa/twrp_kernel
drivers/misc/ti_dac7512.c
4336
2531
/* * dac7512.c - Linux kernel module for * Texas Instruments DAC7512 * * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/spi/spi.h> #define DAC7512_DRV_NAME "dac7512" #define DRIVER_VERSION "1.0" static ssize_t dac7512_store_val(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct spi_device *spi = to_spi_device(dev); unsigned char tmp[2]; unsigned long val; if (strict_strtoul(buf, 10, &val) < 0) return -EINVAL; tmp[0] = val >> 8; tmp[1] = val & 0xff; spi_write(spi, tmp, sizeof(tmp)); return count; } static DEVICE_ATTR(value, S_IWUSR, NULL, dac7512_store_val); static struct attribute *dac7512_attributes[] = { &dev_attr_value.attr, NULL }; static const struct attribute_group dac7512_attr_group = { .attrs = dac7512_attributes, }; static int __devinit dac7512_probe(struct spi_device *spi) { int ret; spi->bits_per_word = 8; spi->mode = SPI_MODE_0; ret = spi_setup(spi); if (ret < 0) return ret; return sysfs_create_group(&spi->dev.kobj, &dac7512_attr_group); } static int __devexit dac7512_remove(struct spi_device *spi) { sysfs_remove_group(&spi->dev.kobj, &dac7512_attr_group); return 0; } static struct spi_driver dac7512_driver = { .driver = { .name = DAC7512_DRV_NAME, .owner = THIS_MODULE, }, .probe = dac7512_probe, .remove = __devexit_p(dac7512_remove), }; static int __init dac7512_init(void) { return spi_register_driver(&dac7512_driver); } static void __exit dac7512_exit(void) { spi_unregister_driver(&dac7512_driver); } MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); MODULE_DESCRIPTION("DAC7512 16-bit DAC"); MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRIVER_VERSION); module_init(dac7512_init); module_exit(dac7512_exit);
gpl-2.0
frap129/Electron_kernel_mecha
drivers/net/wireless/b43legacy/ilt.c
4336
10742
/* Broadcom B43legacy wireless driver Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>, Stefano Brivio <stefano.brivio@polimi.it> Michael Buesch <mbuesch@freenet.de> Danny van Dyk <kugelfang@gentoo.org> Andreas Jaggi <andreas.jaggi@waterwave.ch> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "b43legacy.h" #include "ilt.h" #include "phy.h" /**** Initial Internal Lookup Tables ****/ const u32 b43legacy_ilt_rotor[B43legacy_ILT_ROTOR_SIZE] = { 0xFEB93FFD, 0xFEC63FFD, /* 0 */ 0xFED23FFD, 0xFEDF3FFD, 0xFEEC3FFE, 0xFEF83FFE, 0xFF053FFE, 0xFF113FFE, 0xFF1E3FFE, 0xFF2A3FFF, /* 8 */ 0xFF373FFF, 0xFF443FFF, 0xFF503FFF, 0xFF5D3FFF, 0xFF693FFF, 0xFF763FFF, 0xFF824000, 0xFF8F4000, /* 16 */ 0xFF9B4000, 0xFFA84000, 0xFFB54000, 0xFFC14000, 0xFFCE4000, 0xFFDA4000, 0xFFE74000, 0xFFF34000, /* 24 */ 0x00004000, 0x000D4000, 0x00194000, 0x00264000, 0x00324000, 0x003F4000, 0x004B4000, 0x00584000, /* 32 */ 0x00654000, 0x00714000, 0x007E4000, 0x008A3FFF, 0x00973FFF, 0x00A33FFF, 0x00B03FFF, 0x00BC3FFF, /* 40 */ 0x00C93FFF, 0x00D63FFF, 0x00E23FFE, 0x00EF3FFE, 0x00FB3FFE, 0x01083FFE, 0x01143FFE, 0x01213FFD, /* 48 */ 0x012E3FFD, 0x013A3FFD, 0x01473FFD, }; const u32 b43legacy_ilt_retard[B43legacy_ILT_RETARD_SIZE] = { 0xDB93CB87, 0xD666CF64, /* 0 */ 0xD1FDD358, 0xCDA6D826, 0xCA38DD9F, 0xC729E2B4, 0xC469E88E, 0xC26AEE2B, 0xC0DEF46C, 0xC073FA62, /* 8 */ 0xC01D00D5, 0xC0760743, 0xC1560D1E, 0xC2E51369, 0xC4ED18FF, 0xC7AC1ED7, 0xCB2823B2, 0xCEFA28D9, /* 16 */ 0xD2F62D3F, 0xD7BB3197, 0xDCE53568, 0xE1FE3875, 0xE7D13B35, 0xED663D35, 0xF39B3EC4, 0xF98E3FA7, /* 24 */ 0x00004000, 0x06723FA7, 0x0C653EC4, 0x129A3D35, 0x182F3B35, 0x1E023875, 0x231B3568, 0x28453197, /* 32 */ 0x2D0A2D3F, 0x310628D9, 0x34D823B2, 0x38541ED7, 0x3B1318FF, 0x3D1B1369, 0x3EAA0D1E, 0x3F8A0743, /* 40 */ 0x3FE300D5, 0x3F8DFA62, 0x3F22F46C, 0x3D96EE2B, 0x3B97E88E, 0x38D7E2B4, 0x35C8DD9F, 0x325AD826, /* 48 */ 0x2E03D358, 0x299ACF64, 0x246DCB87, }; const u16 b43legacy_ilt_finefreqa[B43legacy_ILT_FINEFREQA_SIZE] = { 0x0082, 0x0082, 0x0102, 0x0182, /* 0 */ 0x0202, 0x0282, 0x0302, 0x0382, 0x0402, 0x0482, 0x0502, 0x0582, 0x05E2, 0x0662, 0x06E2, 0x0762, 0x07E2, 0x0842, 0x08C2, 0x0942, /* 16 */ 0x09C2, 0x0A22, 0x0AA2, 0x0B02, 0x0B82, 0x0BE2, 0x0C62, 0x0CC2, 0x0D42, 0x0DA2, 0x0E02, 0x0E62, 0x0EE2, 0x0F42, 0x0FA2, 0x1002, /* 32 */ 0x1062, 0x10C2, 0x1122, 0x1182, 0x11E2, 0x1242, 0x12A2, 0x12E2, 0x1342, 0x13A2, 0x1402, 0x1442, 0x14A2, 0x14E2, 0x1542, 0x1582, /* 48 */ 0x15E2, 0x1622, 0x1662, 0x16C1, 0x1701, 0x1741, 0x1781, 0x17E1, 0x1821, 0x1861, 0x18A1, 0x18E1, 0x1921, 0x1961, 0x19A1, 0x19E1, /* 64 */ 0x1A21, 0x1A61, 0x1AA1, 0x1AC1, 0x1B01, 0x1B41, 0x1B81, 0x1BA1, 0x1BE1, 0x1C21, 0x1C41, 0x1C81, 0x1CA1, 0x1CE1, 0x1D01, 0x1D41, /* 80 */ 0x1D61, 0x1DA1, 0x1DC1, 0x1E01, 0x1E21, 0x1E61, 0x1E81, 0x1EA1, 0x1EE1, 0x1F01, 0x1F21, 0x1F41, 0x1F81, 0x1FA1, 0x1FC1, 0x1FE1, /* 96 */ 0x2001, 0x2041, 0x2061, 0x2081, 0x20A1, 0x20C1, 0x20E1, 0x2101, 0x2121, 0x2141, 0x2161, 0x2181, 0x21A1, 0x21C1, 0x21E1, 0x2201, /* 112 */ 0x2221, 0x2241, 0x2261, 0x2281, 0x22A1, 0x22C1, 0x22C1, 0x22E1, 0x2301, 0x2321, 0x2341, 0x2361, 0x2361, 0x2381, 0x23A1, 0x23C1, /* 128 */ 0x23E1, 0x23E1, 0x2401, 0x2421, 0x2441, 0x2441, 0x2461, 0x2481, 0x2481, 0x24A1, 0x24C1, 0x24C1, 0x24E1, 0x2501, 0x2501, 0x2521, /* 144 */ 0x2541, 0x2541, 0x2561, 0x2561, 0x2581, 0x25A1, 0x25A1, 0x25C1, 0x25C1, 0x25E1, 0x2601, 0x2601, 0x2621, 0x2621, 0x2641, 0x2641, /* 160 */ 0x2661, 0x2661, 0x2681, 0x2681, 0x26A1, 0x26A1, 0x26C1, 0x26C1, 0x26E1, 0x26E1, 0x2701, 0x2701, 0x2721, 0x2721, 0x2740, 0x2740, /* 176 */ 0x2760, 0x2760, 0x2780, 0x2780, 0x2780, 0x27A0, 0x27A0, 0x27C0, 0x27C0, 0x27E0, 0x27E0, 0x27E0, 0x2800, 0x2800, 0x2820, 0x2820, /* 192 */ 0x2820, 0x2840, 0x2840, 0x2840, 0x2860, 0x2860, 0x2880, 0x2880, 0x2880, 0x28A0, 0x28A0, 0x28A0, 0x28C0, 0x28C0, 0x28C0, 0x28E0, /* 208 */ 0x28E0, 0x28E0, 0x2900, 0x2900, 0x2900, 0x2920, 0x2920, 0x2920, 0x2940, 0x2940, 0x2940, 0x2960, 0x2960, 0x2960, 0x2960, 0x2980, /* 224 */ 0x2980, 0x2980, 0x29A0, 0x29A0, 0x29A0, 0x29A0, 0x29C0, 0x29C0, 0x29C0, 0x29E0, 0x29E0, 0x29E0, 0x29E0, 0x2A00, 0x2A00, 0x2A00, /* 240 */ 0x2A00, 0x2A20, 0x2A20, 0x2A20, 0x2A20, 0x2A40, 0x2A40, 0x2A40, 0x2A40, 0x2A60, 0x2A60, 0x2A60, }; const u16 b43legacy_ilt_finefreqg[B43legacy_ILT_FINEFREQG_SIZE] = { 0x0089, 0x02E9, 0x0409, 0x04E9, /* 0 */ 0x05A9, 0x0669, 0x0709, 0x0789, 0x0829, 0x08A9, 0x0929, 0x0989, 0x0A09, 0x0A69, 0x0AC9, 0x0B29, 0x0BA9, 0x0BE9, 0x0C49, 0x0CA9, /* 16 */ 0x0D09, 0x0D69, 0x0DA9, 0x0E09, 0x0E69, 0x0EA9, 0x0F09, 0x0F49, 0x0FA9, 0x0FE9, 0x1029, 0x1089, 0x10C9, 0x1109, 0x1169, 0x11A9, /* 32 */ 0x11E9, 0x1229, 0x1289, 0x12C9, 0x1309, 0x1349, 0x1389, 0x13C9, 0x1409, 0x1449, 0x14A9, 0x14E9, 0x1529, 0x1569, 0x15A9, 0x15E9, /* 48 */ 0x1629, 0x1669, 0x16A9, 0x16E8, 0x1728, 0x1768, 0x17A8, 0x17E8, 0x1828, 0x1868, 0x18A8, 0x18E8, 0x1928, 0x1968, 0x19A8, 0x19E8, /* 64 */ 0x1A28, 0x1A68, 0x1AA8, 0x1AE8, 0x1B28, 0x1B68, 0x1BA8, 0x1BE8, 0x1C28, 0x1C68, 0x1CA8, 0x1CE8, 0x1D28, 0x1D68, 0x1DC8, 0x1E08, /* 80 */ 0x1E48, 0x1E88, 0x1EC8, 0x1F08, 0x1F48, 0x1F88, 0x1FE8, 0x2028, 0x2068, 0x20A8, 0x2108, 0x2148, 0x2188, 0x21C8, 0x2228, 0x2268, /* 96 */ 0x22C8, 0x2308, 0x2348, 0x23A8, 0x23E8, 0x2448, 0x24A8, 0x24E8, 0x2548, 0x25A8, 0x2608, 0x2668, 0x26C8, 0x2728, 0x2787, 0x27E7, /* 112 */ 0x2847, 0x28C7, 0x2947, 0x29A7, 0x2A27, 0x2AC7, 0x2B47, 0x2BE7, 0x2CA7, 0x2D67, 0x2E47, 0x2F67, 0x3247, 0x3526, 0x3646, 0x3726, /* 128 */ 0x3806, 0x38A6, 0x3946, 0x39E6, 0x3A66, 0x3AE6, 0x3B66, 0x3BC6, 0x3C45, 0x3CA5, 0x3D05, 0x3D85, 0x3DE5, 0x3E45, 0x3EA5, 0x3EE5, /* 144 */ 0x3F45, 0x3FA5, 0x4005, 0x4045, 0x40A5, 0x40E5, 0x4145, 0x4185, 0x41E5, 0x4225, 0x4265, 0x42C5, 0x4305, 0x4345, 0x43A5, 0x43E5, /* 160 */ 0x4424, 0x4464, 0x44C4, 0x4504, 0x4544, 0x4584, 0x45C4, 0x4604, 0x4644, 0x46A4, 0x46E4, 0x4724, 0x4764, 0x47A4, 0x47E4, 0x4824, /* 176 */ 0x4864, 0x48A4, 0x48E4, 0x4924, 0x4964, 0x49A4, 0x49E4, 0x4A24, 0x4A64, 0x4AA4, 0x4AE4, 0x4B23, 0x4B63, 0x4BA3, 0x4BE3, 0x4C23, /* 192 */ 0x4C63, 0x4CA3, 0x4CE3, 0x4D23, 0x4D63, 0x4DA3, 0x4DE3, 0x4E23, 0x4E63, 0x4EA3, 0x4EE3, 0x4F23, 0x4F63, 0x4FC3, 0x5003, 0x5043, /* 208 */ 0x5083, 0x50C3, 0x5103, 0x5143, 0x5183, 0x51E2, 0x5222, 0x5262, 0x52A2, 0x52E2, 0x5342, 0x5382, 0x53C2, 0x5402, 0x5462, 0x54A2, /* 224 */ 0x5502, 0x5542, 0x55A2, 0x55E2, 0x5642, 0x5682, 0x56E2, 0x5722, 0x5782, 0x57E1, 0x5841, 0x58A1, 0x5901, 0x5961, 0x59C1, 0x5A21, /* 240 */ 0x5AA1, 0x5B01, 0x5B81, 0x5BE1, 0x5C61, 0x5D01, 0x5D80, 0x5E20, 0x5EE0, 0x5FA0, 0x6080, 0x61C0, }; const u16 b43legacy_ilt_noisea2[B43legacy_ILT_NOISEA2_SIZE] = { 0x0001, 0x0001, 0x0001, 0xFFFE, 0xFFFE, 0x3FFF, 0x1000, 0x0393, }; const u16 b43legacy_ilt_noisea3[B43legacy_ILT_NOISEA3_SIZE] = { 0x4C4C, 0x4C4C, 0x4C4C, 0x2D36, 0x4C4C, 0x4C4C, 0x4C4C, 0x2D36, }; const u16 b43legacy_ilt_noiseg1[B43legacy_ILT_NOISEG1_SIZE] = { 0x013C, 0x01F5, 0x031A, 0x0631, 0x0001, 0x0001, 0x0001, 0x0001, }; const u16 b43legacy_ilt_noiseg2[B43legacy_ILT_NOISEG2_SIZE] = { 0x5484, 0x3C40, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; const u16 b43legacy_ilt_noisescaleg1[B43legacy_ILT_NOISESCALEG_SIZE] = { 0x6C77, 0x5162, 0x3B40, 0x3335, /* 0 */ 0x2F2D, 0x2A2A, 0x2527, 0x1F21, 0x1A1D, 0x1719, 0x1616, 0x1414, 0x1414, 0x1400, 0x1414, 0x1614, 0x1716, 0x1A19, 0x1F1D, 0x2521, /* 16 */ 0x2A27, 0x2F2A, 0x332D, 0x3B35, 0x5140, 0x6C62, 0x0077, }; const u16 b43legacy_ilt_noisescaleg2[B43legacy_ILT_NOISESCALEG_SIZE] = { 0xD8DD, 0xCBD4, 0xBCC0, 0XB6B7, /* 0 */ 0xB2B0, 0xADAD, 0xA7A9, 0x9FA1, 0x969B, 0x9195, 0x8F8F, 0x8A8A, 0x8A8A, 0x8A00, 0x8A8A, 0x8F8A, 0x918F, 0x9695, 0x9F9B, 0xA7A1, /* 16 */ 0xADA9, 0xB2AD, 0xB6B0, 0xBCB7, 0xCBC0, 0xD8D4, 0x00DD, }; const u16 b43legacy_ilt_noisescaleg3[B43legacy_ILT_NOISESCALEG_SIZE] = { 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, /* 0 */ 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA400, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, /* 16 */ 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0x00A4, }; const u16 b43legacy_ilt_sigmasqr1[B43legacy_ILT_SIGMASQR_SIZE] = { 0x007A, 0x0075, 0x0071, 0x006C, /* 0 */ 0x0067, 0x0063, 0x005E, 0x0059, 0x0054, 0x0050, 0x004B, 0x0046, 0x0042, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, /* 16 */ 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0x0000, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, /* 32 */ 0x003D, 0x003D, 0x003D, 0x003D, 0x0042, 0x0046, 0x004B, 0x0050, 0x0054, 0x0059, 0x005E, 0x0063, 0x0067, 0x006C, 0x0071, 0x0075, /* 48 */ 0x007A, }; const u16 b43legacy_ilt_sigmasqr2[B43legacy_ILT_SIGMASQR_SIZE] = { 0x00DE, 0x00DC, 0x00DA, 0x00D8, /* 0 */ 0x00D6, 0x00D4, 0x00D2, 0x00CF, 0x00CD, 0x00CA, 0x00C7, 0x00C4, 0x00C1, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, /* 16 */ 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x0000, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, /* 32 */ 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00C1, 0x00C4, 0x00C7, 0x00CA, 0x00CD, 0x00CF, 0x00D2, 0x00D4, 0x00D6, 0x00D8, 0x00DA, 0x00DC, /* 48 */ 0x00DE, }; /**** Helper functions to access the device Internal Lookup Tables ****/ void b43legacy_ilt_write(struct b43legacy_wldev *dev, u16 offset, u16 val) { b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset); mmiowb(); b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA1, val); } void b43legacy_ilt_write32(struct b43legacy_wldev *dev, u16 offset, u32 val) { b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset); mmiowb(); b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA2, (val & 0xFFFF0000) >> 16); b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA1, val & 0x0000FFFF); } u16 b43legacy_ilt_read(struct b43legacy_wldev *dev, u16 offset) { b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset); return b43legacy_phy_read(dev, B43legacy_PHY_ILT_G_DATA1); }
gpl-2.0
lozohcum/kernel
arch/powerpc/platforms/cell/smp.c
4592
4471
/* * SMP support for BPA machines. * * Dave Engebretsen, Peter Bergner, and * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com * * Plus various changes from other IBM teams... * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #undef DEBUG #include <linux/kernel.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/cache.h> #include <linux/err.h> #include <linux/device.h> #include <linux/cpu.h> #include <asm/ptrace.h> #include <linux/atomic.h> #include <asm/irq.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/smp.h> #include <asm/paca.h> #include <asm/machdep.h> #include <asm/cputable.h> #include <asm/firmware.h> #include <asm/rtas.h> #include <asm/cputhreads.h> #include "interrupt.h" #include <asm/udbg.h> #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) #else #define DBG(fmt...) #endif /* * The Primary thread of each non-boot processor was started from the OF client * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop. */ static cpumask_t of_spin_map; /** * smp_startup_cpu() - start the given cpu * * At boot time, there is nothing to do for primary threads which were * started from Open Firmware. For anything else, call RTAS with the * appropriate start location. * * Returns: * 0 - failure * 1 - success */ static inline int __devinit smp_startup_cpu(unsigned int lcpu) { int status; unsigned long start_here = __pa((u32)*((unsigned long *) generic_secondary_smp_init)); unsigned int pcpu; int start_cpu; if (cpumask_test_cpu(lcpu, &of_spin_map)) /* Already started by OF and sitting in spin loop */ return 1; pcpu = get_hard_smp_processor_id(lcpu); /* Fixup atomic count: it exited inside IRQ handler. */ task_thread_info(paca[lcpu].__current)->preempt_count = 0; /* * If the RTAS start-cpu token does not exist then presume the * cpu is already spinning. */ start_cpu = rtas_token("start-cpu"); if (start_cpu == RTAS_UNKNOWN_SERVICE) return 1; status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, lcpu); if (status != 0) { printk(KERN_ERR "start-cpu failed: %i\n", status); return 0; } return 1; } static int __init smp_iic_probe(void) { iic_request_IPIs(); return cpumask_weight(cpu_possible_mask); } static void __devinit smp_cell_setup_cpu(int cpu) { if (cpu != boot_cpuid) iic_setup_cpu(); /* * change default DABRX to allow user watchpoints */ mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); } static int __devinit smp_cell_kick_cpu(int nr) { BUG_ON(nr < 0 || nr >= NR_CPUS); if (!smp_startup_cpu(nr)) return -ENOENT; /* * The processor is currently spinning, waiting for the * cpu_start field to become non-zero After we set cpu_start, * the processor will continue on to secondary_start */ paca[nr].cpu_start = 1; return 0; } static int smp_cell_cpu_bootable(unsigned int nr) { /* Special case - we inhibit secondary thread startup * during boot if the user requests it. Odd-numbered * cpus are assumed to be secondary threads. */ if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT) && !smt_enabled_at_boot && cpu_thread_in_core(nr) != 0) return 0; return 1; } static struct smp_ops_t bpa_iic_smp_ops = { .message_pass = iic_message_pass, .probe = smp_iic_probe, .kick_cpu = smp_cell_kick_cpu, .setup_cpu = smp_cell_setup_cpu, .cpu_bootable = smp_cell_cpu_bootable, }; /* This is called very early */ void __init smp_init_cell(void) { int i; DBG(" -> smp_init_cell()\n"); smp_ops = &bpa_iic_smp_ops; /* Mark threads which are still spinning in hold loops. */ if (cpu_has_feature(CPU_FTR_SMT)) { for_each_present_cpu(i) { if (cpu_thread_in_core(i) == 0) cpumask_set_cpu(i, &of_spin_map); } } else cpumask_copy(&of_spin_map, cpu_present_mask); cpumask_clear_cpu(boot_cpuid, &of_spin_map); /* Non-lpar has additional take/give timebase */ if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { smp_ops->give_timebase = rtas_give_timebase; smp_ops->take_timebase = rtas_take_timebase; } DBG(" <- smp_init_cell()\n"); }
gpl-2.0
talexop/talexop_kernel_i9505_4_4_2
arch/arm/mach-msm/board-trout-rfkill.c
4592
2370
/* * Copyright (C) 2008 Google, Inc. * Author: Nick Pelly <npelly@google.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /* Control bluetooth power for trout platform */ #include <linux/platform_device.h> #include <linux/module.h> #include <linux/device.h> #include <linux/rfkill.h> #include <linux/delay.h> #include <asm/gpio.h> #include "board-trout.h" static struct rfkill *bt_rfk; static const char bt_name[] = "brf6300"; static int bluetooth_set_power(void *data, bool blocked) { if (!blocked) { gpio_set_value(TROUT_GPIO_BT_32K_EN, 1); udelay(10); gpio_direction_output(101, 1); } else { gpio_direction_output(101, 0); gpio_set_value(TROUT_GPIO_BT_32K_EN, 0); } return 0; } static struct rfkill_ops trout_rfkill_ops = { .set_block = bluetooth_set_power, }; static int trout_rfkill_probe(struct platform_device *pdev) { int rc = 0; bool default_state = true; /* off */ bluetooth_set_power(NULL, default_state); bt_rfk = rfkill_alloc(bt_name, &pdev->dev, RFKILL_TYPE_BLUETOOTH, &trout_rfkill_ops, NULL); if (!bt_rfk) return -ENOMEM; rfkill_set_states(bt_rfk, default_state, false); /* userspace cannot take exclusive control */ rc = rfkill_register(bt_rfk); if (rc) rfkill_destroy(bt_rfk); return rc; } static int trout_rfkill_remove(struct platform_device *dev) { rfkill_unregister(bt_rfk); rfkill_destroy(bt_rfk); return 0; } static struct platform_driver trout_rfkill_driver = { .probe = trout_rfkill_probe, .remove = trout_rfkill_remove, .driver = { .name = "trout_rfkill", .owner = THIS_MODULE, }, }; static int __init trout_rfkill_init(void) { return platform_driver_register(&trout_rfkill_driver); } static void __exit trout_rfkill_exit(void) { platform_driver_unregister(&trout_rfkill_driver); } module_init(trout_rfkill_init); module_exit(trout_rfkill_exit); MODULE_DESCRIPTION("trout rfkill"); MODULE_AUTHOR("Nick Pelly <npelly@google.com>"); MODULE_LICENSE("GPL");
gpl-2.0
nimengyu2/jorjin-ap-module-dm37-android-kernel
arch/arm/mach-pnx4008/serial.c
4592
1661
/* * linux/arch/arm/mach-pnx4008/serial.c * * PNX4008 UART initialization * * Copyright: MontaVista Software Inc. (c) 2005 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/io.h> #include <mach/platform.h> #include <mach/hardware.h> #include <linux/serial_core.h> #include <linux/serial_reg.h> #include <mach/gpio.h> #include <mach/clock.h> #define UART_3 0 #define UART_4 1 #define UART_5 2 #define UART_6 3 #define UART_UNKNOWN (-1) #define UART3_BASE_VA IO_ADDRESS(PNX4008_UART3_BASE) #define UART4_BASE_VA IO_ADDRESS(PNX4008_UART4_BASE) #define UART5_BASE_VA IO_ADDRESS(PNX4008_UART5_BASE) #define UART6_BASE_VA IO_ADDRESS(PNX4008_UART6_BASE) #define UART_FCR_OFFSET 8 #define UART_FIFO_SIZE 64 void pnx4008_uart_init(void) { u32 tmp; int i = UART_FIFO_SIZE; __raw_writel(0xC1, UART5_BASE_VA + UART_FCR_OFFSET); __raw_writel(0xC1, UART3_BASE_VA + UART_FCR_OFFSET); /* Send a NULL to fix the UART HW bug */ __raw_writel(0x00, UART5_BASE_VA); __raw_writel(0x00, UART3_BASE_VA); while (i--) { tmp = __raw_readl(UART5_BASE_VA); tmp = __raw_readl(UART3_BASE_VA); } __raw_writel(0, UART5_BASE_VA + UART_FCR_OFFSET); __raw_writel(0, UART3_BASE_VA + UART_FCR_OFFSET); /* setup wakeup interrupt */ start_int_set_rising_edge(SE_U3_RX_INT); start_int_ack(SE_U3_RX_INT); start_int_umask(SE_U3_RX_INT); start_int_set_rising_edge(SE_U5_RX_INT); start_int_ack(SE_U5_RX_INT); start_int_umask(SE_U5_RX_INT); }
gpl-2.0
jazzk/jgedlte-kk
arch/arm/kernel/tcm.c
5104
8280
/* * Copyright (C) 2008-2009 ST-Ericsson AB * License terms: GNU General Public License (GPL) version 2 * TCM memory handling for ARM systems * * Author: Linus Walleij <linus.walleij@stericsson.com> * Author: Rickard Andersson <rickard.andersson@stericsson.com> */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/stddef.h> #include <linux/ioport.h> #include <linux/genalloc.h> #include <linux/string.h> /* memcpy */ #include <asm/cputype.h> #include <asm/mach/map.h> #include <asm/memory.h> #include <asm/system_info.h> #include "tcm.h" static struct gen_pool *tcm_pool; static bool dtcm_present; static bool itcm_present; /* TCM section definitions from the linker */ extern char __itcm_start, __sitcm_text, __eitcm_text; extern char __dtcm_start, __sdtcm_data, __edtcm_data; /* These will be increased as we run */ u32 dtcm_end = DTCM_OFFSET; u32 itcm_end = ITCM_OFFSET; /* * TCM memory resources */ static struct resource dtcm_res = { .name = "DTCM RAM", .start = DTCM_OFFSET, .end = DTCM_OFFSET, .flags = IORESOURCE_MEM }; static struct resource itcm_res = { .name = "ITCM RAM", .start = ITCM_OFFSET, .end = ITCM_OFFSET, .flags = IORESOURCE_MEM }; static struct map_desc dtcm_iomap[] __initdata = { { .virtual = DTCM_OFFSET, .pfn = __phys_to_pfn(DTCM_OFFSET), .length = 0, .type = MT_MEMORY_DTCM } }; static struct map_desc itcm_iomap[] __initdata = { { .virtual = ITCM_OFFSET, .pfn = __phys_to_pfn(ITCM_OFFSET), .length = 0, .type = MT_MEMORY_ITCM } }; /* * Allocate a chunk of TCM memory */ void *tcm_alloc(size_t len) { unsigned long vaddr; if (!tcm_pool) return NULL; vaddr = gen_pool_alloc(tcm_pool, len); if (!vaddr) return NULL; return (void *) vaddr; } EXPORT_SYMBOL(tcm_alloc); /* * Free a chunk of TCM memory */ void tcm_free(void *addr, size_t len) { gen_pool_free(tcm_pool, (unsigned long) addr, len); } EXPORT_SYMBOL(tcm_free); bool tcm_dtcm_present(void) { return dtcm_present; } EXPORT_SYMBOL(tcm_dtcm_present); bool tcm_itcm_present(void) { return itcm_present; } EXPORT_SYMBOL(tcm_itcm_present); static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks, u32 *offset) { const int tcm_sizes[16] = { 0, -1, -1, 4, 8, 16, 32, 64, 128, 256, 512, 1024, -1, -1, -1, -1 }; u32 tcm_region; int tcm_size; /* * If there are more than one TCM bank of this type, * select the TCM bank to operate on in the TCM selection * register. */ if (banks > 1) asm("mcr p15, 0, %0, c9, c2, 0" : /* No output operands */ : "r" (bank)); /* Read the special TCM region register c9, 0 */ if (!type) asm("mrc p15, 0, %0, c9, c1, 0" : "=r" (tcm_region)); else asm("mrc p15, 0, %0, c9, c1, 1" : "=r" (tcm_region)); tcm_size = tcm_sizes[(tcm_region >> 2) & 0x0f]; if (tcm_size < 0) { pr_err("CPU: %sTCM%d of unknown size\n", type ? "I" : "D", bank); return -EINVAL; } else if (tcm_size > 32) { pr_err("CPU: %sTCM%d larger than 32k found\n", type ? "I" : "D", bank); return -EINVAL; } else { pr_info("CPU: found %sTCM%d %dk @ %08x, %senabled\n", type ? "I" : "D", bank, tcm_size, (tcm_region & 0xfffff000U), (tcm_region & 1) ? "" : "not "); } /* Not much fun you can do with a size 0 bank */ if (tcm_size == 0) return 0; /* Force move the TCM bank to where we want it, enable */ tcm_region = *offset | (tcm_region & 0x00000ffeU) | 1; if (!type) asm("mcr p15, 0, %0, c9, c1, 0" : /* No output operands */ : "r" (tcm_region)); else asm("mcr p15, 0, %0, c9, c1, 1" : /* No output operands */ : "r" (tcm_region)); /* Increase offset */ *offset += (tcm_size << 10); pr_info("CPU: moved %sTCM%d %dk to %08x, enabled\n", type ? "I" : "D", bank, tcm_size, (tcm_region & 0xfffff000U)); return 0; } /* * This initializes the TCM memory */ void __init tcm_init(void) { u32 tcm_status; u8 dtcm_banks; u8 itcm_banks; size_t dtcm_code_sz = &__edtcm_data - &__sdtcm_data; size_t itcm_code_sz = &__eitcm_text - &__sitcm_text; char *start; char *end; char *ram; int ret; int i; /* * Prior to ARMv5 there is no TCM, and trying to read the status * register will hang the processor. */ if (cpu_architecture() < CPU_ARCH_ARMv5) { if (dtcm_code_sz || itcm_code_sz) pr_info("CPU TCM: %u bytes of DTCM and %u bytes of " "ITCM code compiled in, but no TCM present " "in pre-v5 CPU\n", dtcm_code_sz, itcm_code_sz); return; } tcm_status = read_cpuid_tcmstatus(); dtcm_banks = (tcm_status >> 16) & 0x03; itcm_banks = (tcm_status & 0x03); /* Values greater than 2 for D/ITCM banks are "reserved" */ if (dtcm_banks > 2) dtcm_banks = 0; if (itcm_banks > 2) itcm_banks = 0; /* Setup DTCM if present */ if (dtcm_banks > 0) { for (i = 0; i < dtcm_banks; i++) { ret = setup_tcm_bank(0, i, dtcm_banks, &dtcm_end); if (ret) return; } /* This means you compiled more code than fits into DTCM */ if (dtcm_code_sz > (dtcm_end - DTCM_OFFSET)) { pr_info("CPU DTCM: %u bytes of code compiled to " "DTCM but only %lu bytes of DTCM present\n", dtcm_code_sz, (dtcm_end - DTCM_OFFSET)); goto no_dtcm; } dtcm_res.end = dtcm_end - 1; request_resource(&iomem_resource, &dtcm_res); dtcm_iomap[0].length = dtcm_end - DTCM_OFFSET; iotable_init(dtcm_iomap, 1); /* Copy data from RAM to DTCM */ start = &__sdtcm_data; end = &__edtcm_data; ram = &__dtcm_start; memcpy(start, ram, dtcm_code_sz); pr_debug("CPU DTCM: copied data from %p - %p\n", start, end); dtcm_present = true; } else if (dtcm_code_sz) { pr_info("CPU DTCM: %u bytes of code compiled to DTCM but no " "DTCM banks present in CPU\n", dtcm_code_sz); } no_dtcm: /* Setup ITCM if present */ if (itcm_banks > 0) { for (i = 0; i < itcm_banks; i++) { ret = setup_tcm_bank(1, i, itcm_banks, &itcm_end); if (ret) return; } /* This means you compiled more code than fits into ITCM */ if (itcm_code_sz > (itcm_end - ITCM_OFFSET)) { pr_info("CPU ITCM: %u bytes of code compiled to " "ITCM but only %lu bytes of ITCM present\n", itcm_code_sz, (itcm_end - ITCM_OFFSET)); return; } itcm_res.end = itcm_end - 1; request_resource(&iomem_resource, &itcm_res); itcm_iomap[0].length = itcm_end - ITCM_OFFSET; iotable_init(itcm_iomap, 1); /* Copy code from RAM to ITCM */ start = &__sitcm_text; end = &__eitcm_text; ram = &__itcm_start; memcpy(start, ram, itcm_code_sz); pr_debug("CPU ITCM: copied code from %p - %p\n", start, end); itcm_present = true; } else if (itcm_code_sz) { pr_info("CPU ITCM: %u bytes of code compiled to ITCM but no " "ITCM banks present in CPU\n", itcm_code_sz); } } /* * This creates the TCM memory pool and has to be done later, * during the core_initicalls, since the allocator is not yet * up and running when the first initialization runs. */ static int __init setup_tcm_pool(void) { u32 dtcm_pool_start = (u32) &__edtcm_data; u32 itcm_pool_start = (u32) &__eitcm_text; int ret; /* * Set up malloc pool, 2^2 = 4 bytes granularity since * the TCM is sometimes just 4 KiB. NB: pages and cache * line alignments does not matter in TCM! */ tcm_pool = gen_pool_create(2, -1); pr_debug("Setting up TCM memory pool\n"); /* Add the rest of DTCM to the TCM pool */ if (dtcm_present) { if (dtcm_pool_start < dtcm_end) { ret = gen_pool_add(tcm_pool, dtcm_pool_start, dtcm_end - dtcm_pool_start, -1); if (ret) { pr_err("CPU DTCM: could not add DTCM " \ "remainder to pool!\n"); return ret; } pr_debug("CPU DTCM: Added %08x bytes @ %08x to " \ "the TCM memory pool\n", dtcm_end - dtcm_pool_start, dtcm_pool_start); } } /* Add the rest of ITCM to the TCM pool */ if (itcm_present) { if (itcm_pool_start < itcm_end) { ret = gen_pool_add(tcm_pool, itcm_pool_start, itcm_end - itcm_pool_start, -1); if (ret) { pr_err("CPU ITCM: could not add ITCM " \ "remainder to pool!\n"); return ret; } pr_debug("CPU ITCM: Added %08x bytes @ %08x to " \ "the TCM memory pool\n", itcm_end - itcm_pool_start, itcm_pool_start); } } return 0; } core_initcall(setup_tcm_pool);
gpl-2.0
Alexiis337/Test
arch/um/os-Linux/registers.c
5360
1158
/* * Copyright (C) 2004 PathScale, Inc * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <errno.h> #include <string.h> #include <sys/ptrace.h> #include "sysdep/ptrace.h" #include "sysdep/ptrace_user.h" #include "registers.h" int save_registers(int pid, struct uml_pt_regs *regs) { int err; err = ptrace(PTRACE_GETREGS, pid, 0, regs->gp); if (err < 0) return -errno; return 0; } int restore_registers(int pid, struct uml_pt_regs *regs) { int err; err = ptrace(PTRACE_SETREGS, pid, 0, regs->gp); if (err < 0) return -errno; return 0; } /* This is set once at boot time and not changed thereafter */ static unsigned long exec_regs[MAX_REG_NR]; static unsigned long exec_fp_regs[FP_SIZE]; int init_registers(int pid) { int err; err = ptrace(PTRACE_GETREGS, pid, 0, exec_regs); if (err < 0) return -errno; arch_init_registers(pid); get_fp_registers(pid, exec_fp_regs); return 0; } void get_safe_registers(unsigned long *regs, unsigned long *fp_regs) { memcpy(regs, exec_regs, sizeof(exec_regs)); if (fp_regs) memcpy(fp_regs, exec_fp_regs, sizeof(exec_fp_regs)); }
gpl-2.0
emansih/d855_CM_kernel
drivers/scsi/aic7xxx/aic7xxx_core.c
8432
216265
/* * Core routines and tables shareable across OS platforms. * * Copyright (c) 1994-2002 Justin T. Gibbs. * Copyright (c) 2000-2002 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#155 $ */ #ifdef __linux__ #include "aic7xxx_osm.h" #include "aic7xxx_inline.h" #include "aicasm/aicasm_insformat.h" #else #include <dev/aic7xxx/aic7xxx_osm.h> #include <dev/aic7xxx/aic7xxx_inline.h> #include <dev/aic7xxx/aicasm/aicasm_insformat.h> #endif /***************************** Lookup Tables **********************************/ static const char *const ahc_chip_names[] = { "NONE", "aic7770", "aic7850", "aic7855", "aic7859", "aic7860", "aic7870", "aic7880", "aic7895", "aic7895C", "aic7890/91", "aic7896/97", "aic7892", "aic7899" }; static const u_int num_chip_names = ARRAY_SIZE(ahc_chip_names); /* * Hardware error codes. */ struct ahc_hard_error_entry { uint8_t errno; const char *errmesg; }; static const struct ahc_hard_error_entry ahc_hard_errors[] = { { ILLHADDR, "Illegal Host Access" }, { ILLSADDR, "Illegal Sequencer Address referrenced" }, { ILLOPCODE, "Illegal Opcode in sequencer program" }, { SQPARERR, "Sequencer Parity Error" }, { DPARERR, "Data-path Parity Error" }, { MPARERR, "Scratch or SCB Memory Parity Error" }, { PCIERRSTAT, "PCI Error detected" }, { CIOPARERR, "CIOBUS Parity Error" }, }; static const u_int num_errors = ARRAY_SIZE(ahc_hard_errors); static const struct ahc_phase_table_entry ahc_phase_table[] = { { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" }, { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" }, { P_COMMAND, MSG_NOOP, "in Command phase" }, { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, { P_BUSFREE, MSG_NOOP, "while idle" }, { 0, MSG_NOOP, "in unknown phase" } }; /* * In most cases we only wish to itterate over real phases, so * exclude the last element from the count. */ static const u_int num_phases = ARRAY_SIZE(ahc_phase_table) - 1; /* * Valid SCSIRATE values. (p. 3-17) * Provides a mapping of tranfer periods in ns to the proper value to * stick in the scsixfer reg. */ static const struct ahc_syncrate ahc_syncrates[] = { /* ultra2 fast/ultra period rate */ { 0x42, 0x000, 9, "80.0" }, { 0x03, 0x000, 10, "40.0" }, { 0x04, 0x000, 11, "33.0" }, { 0x05, 0x100, 12, "20.0" }, { 0x06, 0x110, 15, "16.0" }, { 0x07, 0x120, 18, "13.4" }, { 0x08, 0x000, 25, "10.0" }, { 0x19, 0x010, 31, "8.0" }, { 0x1a, 0x020, 37, "6.67" }, { 0x1b, 0x030, 43, "5.7" }, { 0x1c, 0x040, 50, "5.0" }, { 0x00, 0x050, 56, "4.4" }, { 0x00, 0x060, 62, "4.0" }, { 0x00, 0x070, 68, "3.6" }, { 0x00, 0x000, 0, NULL } }; /* Our Sequencer Program */ #include "aic7xxx_seq.h" /**************************** Function Declarations ***************************/ static void ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static struct ahc_tmode_tstate* ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel); #ifdef AHC_TARGET_MODE static void ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force); #endif static const struct ahc_syncrate* ahc_devlimited_syncrate(struct ahc_softc *ahc, struct ahc_initiator_tinfo *, u_int *period, u_int *ppr_options, role_t role); static void ahc_update_pending_scbs(struct ahc_softc *ahc); static void ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb); static void ahc_assert_atn(struct ahc_softc *ahc); static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb); static void ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset); static void ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int bus_width); static void ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset, u_int bus_width, u_int ppr_options); static void ahc_clear_msg_state(struct ahc_softc *ahc); static void ahc_handle_proto_violation(struct ahc_softc *ahc); static void ahc_handle_message_phase(struct ahc_softc *ahc); typedef enum { AHCMSG_1B, AHCMSG_2B, AHCMSG_EXT } ahc_msgtype; static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full); static int ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static int ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc); static void ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, cam_status status, char *message, int verbose_level); #ifdef AHC_TARGET_MODE static void ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb); #endif static bus_dmamap_callback_t ahc_dmamap_cb; static void ahc_build_free_scb_list(struct ahc_softc *ahc); static int ahc_init_scbdata(struct ahc_softc *ahc); static void ahc_fini_scbdata(struct ahc_softc *ahc); static void ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb, struct scb *scb); static int ahc_qinfifo_count(struct ahc_softc *ahc); static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr); static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc); static u_int ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev); static void ahc_reset_current_bus(struct ahc_softc *ahc); #ifdef AHC_DUMP_SEQ static void ahc_dumpseq(struct ahc_softc *ahc); #endif static int ahc_loadseq(struct ahc_softc *ahc); static int ahc_check_patch(struct ahc_softc *ahc, const struct patch **start_patch, u_int start_instr, u_int *skip_addr); static void ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts); #ifdef AHC_TARGET_MODE static void ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate, u_int initiator_id, u_int event_type, u_int event_arg); static void ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask); static int ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd); #endif static u_int ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl); static void ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl); static void ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int busyid); /************************** SCB and SCB queue management **********************/ static void ahc_run_untagged_queues(struct ahc_softc *ahc); static void ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue); /****************************** Initialization ********************************/ static void ahc_alloc_scbs(struct ahc_softc *ahc); static void ahc_shutdown(void *arg); /*************************** Interrupt Services *******************************/ static void ahc_clear_intstat(struct ahc_softc *ahc); static void ahc_run_qoutfifo(struct ahc_softc *ahc); #ifdef AHC_TARGET_MODE static void ahc_run_tqinfifo(struct ahc_softc *ahc, int paused); #endif static void ahc_handle_brkadrint(struct ahc_softc *ahc); static void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat); static void ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat); static void ahc_clear_critical_section(struct ahc_softc *ahc); /***************************** Error Recovery *********************************/ static void ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb); static int ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, role_t role, uint32_t status); static void ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb); /*********************** Untagged Transaction Routines ************************/ static inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc); static inline void ahc_release_untagged_queues(struct ahc_softc *ahc); /* * Block our completion routine from starting the next untagged * transaction for this target or target lun. */ static inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc) { if ((ahc->flags & AHC_SCB_BTT) == 0) ahc->untagged_queue_lock++; } /* * Allow the next untagged transaction for this target or target lun * to be executed. We use a counting semaphore to allow the lock * to be acquired recursively. Once the count drops to zero, the * transaction queues will be run. */ static inline void ahc_release_untagged_queues(struct ahc_softc *ahc) { if ((ahc->flags & AHC_SCB_BTT) == 0) { ahc->untagged_queue_lock--; if (ahc->untagged_queue_lock == 0) ahc_run_untagged_queues(ahc); } } /************************* Sequencer Execution Control ************************/ /* * Work around any chip bugs related to halting sequencer execution. * On Ultra2 controllers, we must clear the CIOBUS stretch signal by * reading a register that will set this signal and deassert it. * Without this workaround, if the chip is paused, by an interrupt or * manual pause while accessing scb ram, accesses to certain registers * will hang the system (infinite pci retries). */ static void ahc_pause_bug_fix(struct ahc_softc *ahc) { if ((ahc->features & AHC_ULTRA2) != 0) (void)ahc_inb(ahc, CCSCBCTL); } /* * Determine whether the sequencer has halted code execution. * Returns non-zero status if the sequencer is stopped. */ int ahc_is_paused(struct ahc_softc *ahc) { return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0); } /* * Request that the sequencer stop and wait, indefinitely, for it * to stop. The sequencer will only acknowledge that it is paused * once it has reached an instruction boundary and PAUSEDIS is * cleared in the SEQCTL register. The sequencer may use PAUSEDIS * for critical sections. */ void ahc_pause(struct ahc_softc *ahc) { ahc_outb(ahc, HCNTRL, ahc->pause); /* * Since the sequencer can disable pausing in a critical section, we * must loop until it actually stops. */ while (ahc_is_paused(ahc) == 0) ; ahc_pause_bug_fix(ahc); } /* * Allow the sequencer to continue program execution. * We check here to ensure that no additional interrupt * sources that would cause the sequencer to halt have been * asserted. If, for example, a SCSI bus reset is detected * while we are fielding a different, pausing, interrupt type, * we don't want to release the sequencer before going back * into our interrupt handler and dealing with this new * condition. */ void ahc_unpause(struct ahc_softc *ahc) { if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0) ahc_outb(ahc, HCNTRL, ahc->unpause); } /************************** Memory mapping routines ***************************/ static struct ahc_dma_seg * ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr) { int sg_index; sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg); /* sg_list_phys points to entry 1, not 0 */ sg_index++; return (&scb->sg_list[sg_index]); } static uint32_t ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg) { int sg_index; /* sg_list_phys points to entry 1, not 0 */ sg_index = sg - &scb->sg_list[1]; return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list))); } static uint32_t ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index) { return (ahc->scb_data->hscb_busaddr + (sizeof(struct hardware_scb) * index)); } static void ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op) { ahc_dmamap_sync(ahc, ahc->scb_data->hscb_dmat, ahc->scb_data->hscb_dmamap, /*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb), /*len*/sizeof(*scb->hscb), op); } void ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op) { if (scb->sg_count == 0) return; ahc_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap, /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr) * sizeof(struct ahc_dma_seg), /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op); } #ifdef AHC_TARGET_MODE static uint32_t ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index) { return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo); } #endif /*********************** Miscellaneous Support Functions ***********************/ /* * Determine whether the sequencer reported a residual * for this SCB/transaction. */ static void ahc_update_residual(struct ahc_softc *ahc, struct scb *scb) { uint32_t sgptr; sgptr = ahc_le32toh(scb->hscb->sgptr); if ((sgptr & SG_RESID_VALID) != 0) ahc_calc_residual(ahc, scb); } /* * Return pointers to the transfer negotiation information * for the specified our_id/remote_id pair. */ struct ahc_initiator_tinfo * ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id, u_int remote_id, struct ahc_tmode_tstate **tstate) { /* * Transfer data structures are stored from the perspective * of the target role. Since the parameters for a connection * in the initiator role to a given target are the same as * when the roles are reversed, we pretend we are the target. */ if (channel == 'B') our_id += 8; *tstate = ahc->enabled_targets[our_id]; return (&(*tstate)->transinfo[remote_id]); } uint16_t ahc_inw(struct ahc_softc *ahc, u_int port) { uint16_t r = ahc_inb(ahc, port+1) << 8; return r | ahc_inb(ahc, port); } void ahc_outw(struct ahc_softc *ahc, u_int port, u_int value) { ahc_outb(ahc, port, value & 0xFF); ahc_outb(ahc, port+1, (value >> 8) & 0xFF); } uint32_t ahc_inl(struct ahc_softc *ahc, u_int port) { return ((ahc_inb(ahc, port)) | (ahc_inb(ahc, port+1) << 8) | (ahc_inb(ahc, port+2) << 16) | (ahc_inb(ahc, port+3) << 24)); } void ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value) { ahc_outb(ahc, port, (value) & 0xFF); ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF); ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF); ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF); } uint64_t ahc_inq(struct ahc_softc *ahc, u_int port) { return ((ahc_inb(ahc, port)) | (ahc_inb(ahc, port+1) << 8) | (ahc_inb(ahc, port+2) << 16) | (ahc_inb(ahc, port+3) << 24) | (((uint64_t)ahc_inb(ahc, port+4)) << 32) | (((uint64_t)ahc_inb(ahc, port+5)) << 40) | (((uint64_t)ahc_inb(ahc, port+6)) << 48) | (((uint64_t)ahc_inb(ahc, port+7)) << 56)); } void ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value) { ahc_outb(ahc, port, value & 0xFF); ahc_outb(ahc, port+1, (value >> 8) & 0xFF); ahc_outb(ahc, port+2, (value >> 16) & 0xFF); ahc_outb(ahc, port+3, (value >> 24) & 0xFF); ahc_outb(ahc, port+4, (value >> 32) & 0xFF); ahc_outb(ahc, port+5, (value >> 40) & 0xFF); ahc_outb(ahc, port+6, (value >> 48) & 0xFF); ahc_outb(ahc, port+7, (value >> 56) & 0xFF); } /* * Get a free scb. If there are none, see if we can allocate a new SCB. */ struct scb * ahc_get_scb(struct ahc_softc *ahc) { struct scb *scb; if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) { ahc_alloc_scbs(ahc); scb = SLIST_FIRST(&ahc->scb_data->free_scbs); if (scb == NULL) return (NULL); } SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle); return (scb); } /* * Return an SCB resource to the free list. */ void ahc_free_scb(struct ahc_softc *ahc, struct scb *scb) { struct hardware_scb *hscb; hscb = scb->hscb; /* Clean up for the next user */ ahc->scb_data->scbindex[hscb->tag] = NULL; scb->flags = SCB_FREE; hscb->control = 0; SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle); /* Notify the OSM that a resource is now available. */ ahc_platform_scb_free(ahc, scb); } struct scb * ahc_lookup_scb(struct ahc_softc *ahc, u_int tag) { struct scb* scb; scb = ahc->scb_data->scbindex[tag]; if (scb != NULL) ahc_sync_scb(ahc, scb, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); return (scb); } static void ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb) { struct hardware_scb *q_hscb; u_int saved_tag; /* * Our queuing method is a bit tricky. The card * knows in advance which HSCB to download, and we * can't disappoint it. To achieve this, the next * SCB to download is saved off in ahc->next_queued_scb. * When we are called to queue "an arbitrary scb", * we copy the contents of the incoming HSCB to the one * the sequencer knows about, swap HSCB pointers and * finally assign the SCB to the tag indexed location * in the scb_array. This makes sure that we can still * locate the correct SCB by SCB_TAG. */ q_hscb = ahc->next_queued_scb->hscb; saved_tag = q_hscb->tag; memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb)); if ((scb->flags & SCB_CDB32_PTR) != 0) { q_hscb->shared_data.cdb_ptr = ahc_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag) + offsetof(struct hardware_scb, cdb32)); } q_hscb->tag = saved_tag; q_hscb->next = scb->hscb->tag; /* Now swap HSCB pointers. */ ahc->next_queued_scb->hscb = scb->hscb; scb->hscb = q_hscb; /* Now define the mapping from tag to SCB in the scbindex */ ahc->scb_data->scbindex[scb->hscb->tag] = scb; } /* * Tell the sequencer about a new transaction to execute. */ void ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb) { ahc_swap_with_next_hscb(ahc, scb); if (scb->hscb->tag == SCB_LIST_NULL || scb->hscb->next == SCB_LIST_NULL) panic("Attempt to queue invalid SCB tag %x:%x\n", scb->hscb->tag, scb->hscb->next); /* * Setup data "oddness". */ scb->hscb->lun &= LID; if (ahc_get_transfer_length(scb) & 0x1) scb->hscb->lun |= SCB_XFERLEN_ODD; /* * Keep a history of SCBs we've downloaded in the qinfifo. */ ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; /* * Make sure our data is consistent from the * perspective of the adapter. */ ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); /* Tell the adapter about the newly queued SCB */ if ((ahc->features & AHC_QUEUE_REGS) != 0) { ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); } else { if ((ahc->features & AHC_AUTOPAUSE) == 0) ahc_pause(ahc); ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); if ((ahc->features & AHC_AUTOPAUSE) == 0) ahc_unpause(ahc); } } struct scsi_sense_data * ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb) { int offset; offset = scb - ahc->scb_data->scbarray; return (&ahc->scb_data->sense[offset]); } static uint32_t ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb) { int offset; offset = scb - ahc->scb_data->scbarray; return (ahc->scb_data->sense_busaddr + (offset * sizeof(struct scsi_sense_data))); } /************************** Interrupt Processing ******************************/ static void ahc_sync_qoutfifo(struct ahc_softc *ahc, int op) { ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, /*offset*/0, /*len*/256, op); } static void ahc_sync_tqinfifo(struct ahc_softc *ahc, int op) { #ifdef AHC_TARGET_MODE if ((ahc->flags & AHC_TARGETROLE) != 0) { ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, ahc_targetcmd_offset(ahc, 0), sizeof(struct target_cmd) * AHC_TMODE_CMDS, op); } #endif } /* * See if the firmware has posted any completed commands * into our in-core command complete fifos. */ #define AHC_RUN_QOUTFIFO 0x1 #define AHC_RUN_TQINFIFO 0x2 static u_int ahc_check_cmdcmpltqueues(struct ahc_softc *ahc) { u_int retval; retval = 0; ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, /*offset*/ahc->qoutfifonext, /*len*/1, BUS_DMASYNC_POSTREAD); if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) retval |= AHC_RUN_QOUTFIFO; #ifdef AHC_TARGET_MODE if ((ahc->flags & AHC_TARGETROLE) != 0 && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) { ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, ahc_targetcmd_offset(ahc, ahc->tqinfifofnext), /*len*/sizeof(struct target_cmd), BUS_DMASYNC_POSTREAD); if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0) retval |= AHC_RUN_TQINFIFO; } #endif return (retval); } /* * Catch an interrupt from the adapter */ int ahc_intr(struct ahc_softc *ahc) { u_int intstat; if ((ahc->pause & INTEN) == 0) { /* * Our interrupt is not enabled on the chip * and may be disabled for re-entrancy reasons, * so just return. This is likely just a shared * interrupt. */ return (0); } /* * Instead of directly reading the interrupt status register, * infer the cause of the interrupt by checking our in-core * completion queues. This avoids a costly PCI bus read in * most cases. */ if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0 && (ahc_check_cmdcmpltqueues(ahc) != 0)) intstat = CMDCMPLT; else { intstat = ahc_inb(ahc, INTSTAT); } if ((intstat & INT_PEND) == 0) { #if AHC_PCI_CONFIG > 0 if (ahc->unsolicited_ints > 500) { ahc->unsolicited_ints = 0; if ((ahc->chip & AHC_PCI) != 0 && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0) ahc->bus_intr(ahc); } #endif ahc->unsolicited_ints++; return (0); } ahc->unsolicited_ints = 0; if (intstat & CMDCMPLT) { ahc_outb(ahc, CLRINT, CLRCMDINT); /* * Ensure that the chip sees that we've cleared * this interrupt before we walk the output fifo. * Otherwise, we may, due to posted bus writes, * clear the interrupt after we finish the scan, * and after the sequencer has added new entries * and asserted the interrupt again. */ ahc_flush_device_writes(ahc); ahc_run_qoutfifo(ahc); #ifdef AHC_TARGET_MODE if ((ahc->flags & AHC_TARGETROLE) != 0) ahc_run_tqinfifo(ahc, /*paused*/FALSE); #endif } /* * Handle statuses that may invalidate our cached * copy of INTSTAT separately. */ if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) { /* Hot eject. Do nothing */ } else if (intstat & BRKADRINT) { ahc_handle_brkadrint(ahc); } else if ((intstat & (SEQINT|SCSIINT)) != 0) { ahc_pause_bug_fix(ahc); if ((intstat & SEQINT) != 0) ahc_handle_seqint(ahc, intstat); if ((intstat & SCSIINT) != 0) ahc_handle_scsiint(ahc, intstat); } return (1); } /************************* Sequencer Execution Control ************************/ /* * Restart the sequencer program from address zero */ static void ahc_restart(struct ahc_softc *ahc) { uint8_t sblkctl; ahc_pause(ahc); /* No more pending messages. */ ahc_clear_msg_state(ahc); ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */ ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* No message to send */ ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); ahc_outb(ahc, LASTPHASE, P_BUSFREE); ahc_outb(ahc, SAVED_SCSIID, 0xFF); ahc_outb(ahc, SAVED_LUN, 0xFF); /* * Ensure that the sequencer's idea of TQINPOS * matches our own. The sequencer increments TQINPOS * only after it sees a DMA complete and a reset could * occur before the increment leaving the kernel to believe * the command arrived but the sequencer to not. */ ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); /* Always allow reselection */ ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); if ((ahc->features & AHC_CMD_CHAN) != 0) { /* Ensure that no DMA operations are in progress */ ahc_outb(ahc, CCSCBCNT, 0); ahc_outb(ahc, CCSGCTL, 0); ahc_outb(ahc, CCSCBCTL, 0); } /* * If we were in the process of DMA'ing SCB data into * an SCB, replace that SCB on the free list. This prevents * an SCB leak. */ if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) { ahc_add_curscb_to_free_list(ahc); ahc_outb(ahc, SEQ_FLAGS2, ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA); } /* * Clear any pending sequencer interrupt. It is no * longer relevant since we're resetting the Program * Counter. */ ahc_outb(ahc, CLRINT, CLRSEQINT); ahc_outb(ahc, MWI_RESIDUAL, 0); ahc_outb(ahc, SEQCTL, ahc->seqctl); ahc_outb(ahc, SEQADDR0, 0); ahc_outb(ahc, SEQADDR1, 0); /* * Take the LED out of diagnostic mode on PM resume, too */ sblkctl = ahc_inb(ahc, SBLKCTL); ahc_outb(ahc, SBLKCTL, (sblkctl & ~(DIAGLEDEN|DIAGLEDON))); ahc_unpause(ahc); } /************************* Input/Output Queues ********************************/ static void ahc_run_qoutfifo(struct ahc_softc *ahc) { struct scb *scb; u_int scb_index; ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) { scb_index = ahc->qoutfifo[ahc->qoutfifonext]; if ((ahc->qoutfifonext & 0x03) == 0x03) { u_int modnext; /* * Clear 32bits of QOUTFIFO at a time * so that we don't clobber an incoming * byte DMA to the array on architectures * that only support 32bit load and store * operations. */ modnext = ahc->qoutfifonext & ~0x3; *((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL; ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, /*offset*/modnext, /*len*/4, BUS_DMASYNC_PREREAD); } ahc->qoutfifonext++; scb = ahc_lookup_scb(ahc, scb_index); if (scb == NULL) { printk("%s: WARNING no command for scb %d " "(cmdcmplt)\nQOUTPOS = %d\n", ahc_name(ahc), scb_index, (ahc->qoutfifonext - 1) & 0xFF); continue; } /* * Save off the residual * if there is one. */ ahc_update_residual(ahc, scb); ahc_done(ahc, scb); } } static void ahc_run_untagged_queues(struct ahc_softc *ahc) { int i; for (i = 0; i < 16; i++) ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]); } static void ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue) { struct scb *scb; if (ahc->untagged_queue_lock != 0) return; if ((scb = TAILQ_FIRST(queue)) != NULL && (scb->flags & SCB_ACTIVE) == 0) { scb->flags |= SCB_ACTIVE; ahc_queue_scb(ahc, scb); } } /************************* Interrupt Handling *********************************/ static void ahc_handle_brkadrint(struct ahc_softc *ahc) { /* * We upset the sequencer :-( * Lookup the error message */ int i; int error; error = ahc_inb(ahc, ERROR); for (i = 0; error != 1 && i < num_errors; i++) error >>= 1; printk("%s: brkadrint, %s at seqaddr = 0x%x\n", ahc_name(ahc), ahc_hard_errors[i].errmesg, ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); ahc_dump_card_state(ahc); /* Tell everyone that this HBA is no longer available */ ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, CAM_NO_HBA); /* Disable all interrupt sources by resetting the controller */ ahc_shutdown(ahc); } static void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat) { struct scb *scb; struct ahc_devinfo devinfo; ahc_fetch_devinfo(ahc, &devinfo); /* * Clear the upper byte that holds SEQINT status * codes and clear the SEQINT bit. We will unpause * the sequencer, if appropriate, after servicing * the request. */ ahc_outb(ahc, CLRINT, CLRSEQINT); switch (intstat & SEQINT_MASK) { case BAD_STATUS: { u_int scb_index; struct hardware_scb *hscb; /* * Set the default return value to 0 (don't * send sense). The sense code will change * this if needed. */ ahc_outb(ahc, RETURN_1, 0); /* * The sequencer will notify us when a command * has an error that would be of interest to * the kernel. This allows us to leave the sequencer * running in the common case of command completes * without error. The sequencer will already have * dma'd the SCB back up to us, so we can reference * the in kernel copy directly. */ scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (scb == NULL) { ahc_print_devinfo(ahc, &devinfo); printk("ahc_intr - referenced scb " "not valid during seqint 0x%x scb(%d)\n", intstat, scb_index); ahc_dump_card_state(ahc); panic("for safety"); goto unpause; } hscb = scb->hscb; /* Don't want to clobber the original sense code */ if ((scb->flags & SCB_SENSE) != 0) { /* * Clear the SCB_SENSE Flag and have * the sequencer do a normal command * complete. */ scb->flags &= ~SCB_SENSE; ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); break; } ahc_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); /* Freeze the queue until the client sees the error. */ ahc_freeze_devq(ahc, scb); ahc_freeze_scb(scb); ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status); switch (hscb->shared_data.status.scsi_status) { case SCSI_STATUS_OK: printk("%s: Interrupted for status of 0???\n", ahc_name(ahc)); break; case SCSI_STATUS_CMD_TERMINATED: case SCSI_STATUS_CHECK_COND: { struct ahc_dma_seg *sg; struct scsi_sense *sc; struct ahc_initiator_tinfo *targ_info; struct ahc_tmode_tstate *tstate; struct ahc_transinfo *tinfo; #ifdef AHC_DEBUG if (ahc_debug & AHC_SHOW_SENSE) { ahc_print_path(ahc, scb); printk("SCB %d: requests Check Status\n", scb->hscb->tag); } #endif if (ahc_perform_autosense(scb) == 0) break; targ_info = ahc_fetch_transinfo(ahc, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); tinfo = &targ_info->curr; sg = scb->sg_list; sc = (struct scsi_sense *)(&hscb->shared_data.cdb); /* * Save off the residual if there is one. */ ahc_update_residual(ahc, scb); #ifdef AHC_DEBUG if (ahc_debug & AHC_SHOW_SENSE) { ahc_print_path(ahc, scb); printk("Sending Sense\n"); } #endif sg->addr = ahc_get_sense_bufaddr(ahc, scb); sg->len = ahc_get_sense_bufsize(ahc, scb); sg->len |= AHC_DMA_LAST_SEG; /* Fixup byte order */ sg->addr = ahc_htole32(sg->addr); sg->len = ahc_htole32(sg->len); sc->opcode = REQUEST_SENSE; sc->byte2 = 0; if (tinfo->protocol_version <= SCSI_REV_2 && SCB_GET_LUN(scb) < 8) sc->byte2 = SCB_GET_LUN(scb) << 5; sc->unused[0] = 0; sc->unused[1] = 0; sc->length = sg->len; sc->control = 0; /* * We can't allow the target to disconnect. * This will be an untagged transaction and * having the target disconnect will make this * transaction indestinguishable from outstanding * tagged transactions. */ hscb->control = 0; /* * This request sense could be because the * the device lost power or in some other * way has lost our transfer negotiations. * Renegotiate if appropriate. Unit attention * errors will be reported before any data * phases occur. */ if (ahc_get_residual(scb) == ahc_get_transfer_length(scb)) { ahc_update_neg_request(ahc, &devinfo, tstate, targ_info, AHC_NEG_IF_NON_ASYNC); } if (tstate->auto_negotiate & devinfo.target_mask) { hscb->control |= MK_MESSAGE; scb->flags &= ~SCB_NEGOTIATE; scb->flags |= SCB_AUTO_NEGOTIATE; } hscb->cdb_len = sizeof(*sc); hscb->dataptr = sg->addr; hscb->datacnt = sg->len; hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID; hscb->sgptr = ahc_htole32(hscb->sgptr); scb->sg_count = 1; scb->flags |= SCB_SENSE; ahc_qinfifo_requeue_tail(ahc, scb); ahc_outb(ahc, RETURN_1, SEND_SENSE); /* * Ensure we have enough time to actually * retrieve the sense. */ ahc_scb_timer_reset(scb, 5 * 1000000); break; } default: break; } break; } case NO_MATCH: { /* Ensure we don't leave the selection hardware on */ ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); printk("%s:%c:%d: no active SCB for reconnecting " "target - issuing BUS DEVICE RESET\n", ahc_name(ahc), devinfo.channel, devinfo.target); printk("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " "ARG_1 == 0x%x ACCUM = 0x%x\n", ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); printk("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " "SINDEX == 0x%x\n", ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), ahc_index_busy_tcl(ahc, BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN))), ahc_inb(ahc, SINDEX)); printk("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), ahc_inb(ahc, SCB_CONTROL)); printk("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); printk("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0)); printk("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL)); ahc_dump_card_state(ahc); ahc->msgout_buf[0] = MSG_BUS_DEV_RESET; ahc->msgout_len = 1; ahc->msgout_index = 0; ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; ahc_outb(ahc, MSG_OUT, HOST_MSG); ahc_assert_atn(ahc); break; } case SEND_REJECT: { u_int rejbyte = ahc_inb(ahc, ACCUM); printk("%s:%c:%d: Warning - unknown message received from " "target (0x%x). Rejecting\n", ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte); break; } case PROTO_VIOLATION: { ahc_handle_proto_violation(ahc); break; } case IGN_WIDE_RES: ahc_handle_ign_wide_residue(ahc, &devinfo); break; case PDATA_REINIT: ahc_reinitialize_dataptrs(ahc); break; case BAD_PHASE: { u_int lastphase; lastphase = ahc_inb(ahc, LASTPHASE); printk("%s:%c:%d: unknown scsi bus phase %x, " "lastphase = 0x%x. Attempting to continue\n", ahc_name(ahc), devinfo.channel, devinfo.target, lastphase, ahc_inb(ahc, SCSISIGI)); break; } case MISSED_BUSFREE: { u_int lastphase; lastphase = ahc_inb(ahc, LASTPHASE); printk("%s:%c:%d: Missed busfree. " "Lastphase = 0x%x, Curphase = 0x%x\n", ahc_name(ahc), devinfo.channel, devinfo.target, lastphase, ahc_inb(ahc, SCSISIGI)); ahc_restart(ahc); return; } case HOST_MSG_LOOP: { /* * The sequencer has encountered a message phase * that requires host assistance for completion. * While handling the message phase(s), we will be * notified by the sequencer after each byte is * transferred so we can track bus phase changes. * * If this is the first time we've seen a HOST_MSG_LOOP * interrupt, initialize the state of the host message * loop. */ if (ahc->msg_type == MSG_TYPE_NONE) { struct scb *scb; u_int scb_index; u_int bus_phase; bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; if (bus_phase != P_MESGIN && bus_phase != P_MESGOUT) { printk("ahc_intr: HOST_MSG_LOOP bad " "phase 0x%x\n", bus_phase); /* * Probably transitioned to bus free before * we got here. Just punt the message. */ ahc_clear_intstat(ahc); ahc_restart(ahc); return; } scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (devinfo.role == ROLE_INITIATOR) { if (bus_phase == P_MESGOUT) { if (scb == NULL) panic("HOST_MSG_LOOP with " "invalid SCB %x\n", scb_index); ahc_setup_initiator_msgout(ahc, &devinfo, scb); } else { ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; ahc->msgin_index = 0; } } #ifdef AHC_TARGET_MODE else { if (bus_phase == P_MESGOUT) { ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; ahc->msgin_index = 0; } else ahc_setup_target_msgin(ahc, &devinfo, scb); } #endif } ahc_handle_message_phase(ahc); break; } case PERR_DETECTED: { /* * If we've cleared the parity error interrupt * but the sequencer still believes that SCSIPERR * is true, it must be that the parity error is * for the currently presented byte on the bus, * and we are not in a phase (data-in) where we will * eventually ack this byte. Ack the byte and * throw it away in the hope that the target will * take us to message out to deliver the appropriate * error message. */ if ((intstat & SCSIINT) == 0 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) { if ((ahc->features & AHC_DT) == 0) { u_int curphase; /* * The hardware will only let you ack bytes * if the expected phase in SCSISIGO matches * the current phase. Make sure this is * currently the case. */ curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; ahc_outb(ahc, LASTPHASE, curphase); ahc_outb(ahc, SCSISIGO, curphase); } if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) == 0) { int wait; /* * In a data phase. Faster to bitbucket * the data than to individually ack each * byte. This is also the only strategy * that will work with AUTOACK enabled. */ ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) | BITBUCKET); wait = 5000; while (--wait != 0) { if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) != 0) break; ahc_delay(100); } ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); if (wait == 0) { struct scb *scb; u_int scb_index; ahc_print_devinfo(ahc, &devinfo); printk("Unable to clear parity error. " "Resetting bus.\n"); scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (scb != NULL) ahc_set_transaction_status(scb, CAM_UNCOR_PARITY); ahc_reset_channel(ahc, devinfo.channel, /*init reset*/TRUE); } } else { ahc_inb(ahc, SCSIDATL); } } break; } case DATA_OVERRUN: { /* * When the sequencer detects an overrun, it * places the controller in "BITBUCKET" mode * and allows the target to complete its transfer. * Unfortunately, none of the counters get updated * when the controller is in this mode, so we have * no way of knowing how large the overrun was. */ u_int scbindex = ahc_inb(ahc, SCB_TAG); u_int lastphase = ahc_inb(ahc, LASTPHASE); u_int i; scb = ahc_lookup_scb(ahc, scbindex); for (i = 0; i < num_phases; i++) { if (lastphase == ahc_phase_table[i].phase) break; } ahc_print_path(ahc, scb); printk("data overrun detected %s." " Tag == 0x%x.\n", ahc_phase_table[i].phasemsg, scb->hscb->tag); ahc_print_path(ahc, scb); printk("%s seen Data Phase. Length = %ld. NumSGs = %d.\n", ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", ahc_get_transfer_length(scb), scb->sg_count); if (scb->sg_count > 0) { for (i = 0; i < scb->sg_count; i++) { printk("sg[%d] - Addr 0x%x%x : Length %d\n", i, (ahc_le32toh(scb->sg_list[i].len) >> 24 & SG_HIGH_ADDR_BITS), ahc_le32toh(scb->sg_list[i].addr), ahc_le32toh(scb->sg_list[i].len) & AHC_SG_LEN_MASK); } } /* * Set this and it will take effect when the * target does a command complete. */ ahc_freeze_devq(ahc, scb); if ((scb->flags & SCB_SENSE) == 0) { ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR); } else { scb->flags &= ~SCB_SENSE; ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); } ahc_freeze_scb(scb); if ((ahc->features & AHC_ULTRA2) != 0) { /* * Clear the channel in case we return * to data phase later. */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); } if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { u_int dscommand1; /* Ensure HHADDR is 0 for future DMA operations. */ dscommand1 = ahc_inb(ahc, DSCOMMAND1); ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); ahc_outb(ahc, HADDR, 0); ahc_outb(ahc, DSCOMMAND1, dscommand1); } break; } case MKMSG_FAILED: { u_int scbindex; printk("%s:%c:%d:%d: Attempt to issue message failed\n", ahc_name(ahc), devinfo.channel, devinfo.target, devinfo.lun); scbindex = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scbindex); if (scb != NULL && (scb->flags & SCB_RECOVERY_SCB) != 0) /* * Ensure that we didn't put a second instance of this * SCB into the QINFIFO. */ ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), SCB_GET_CHANNEL(ahc, scb), SCB_GET_LUN(scb), scb->hscb->tag, ROLE_INITIATOR, /*status*/0, SEARCH_REMOVE); break; } case NO_FREE_SCB: { printk("%s: No free or disconnected SCBs\n", ahc_name(ahc)); ahc_dump_card_state(ahc); panic("for safety"); break; } case SCB_MISMATCH: { u_int scbptr; scbptr = ahc_inb(ahc, SCBPTR); printk("Bogus TAG after DMA. SCBPTR %d, tag %d, our tag %d\n", scbptr, ahc_inb(ahc, ARG_1), ahc->scb_data->hscbs[scbptr].tag); ahc_dump_card_state(ahc); panic("for safety"); break; } case OUT_OF_RANGE: { printk("%s: BTT calculation out of range\n", ahc_name(ahc)); printk("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " "ARG_1 == 0x%x ACCUM = 0x%x\n", ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); printk("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " "SINDEX == 0x%x\n, A == 0x%x\n", ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), ahc_index_busy_tcl(ahc, BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN))), ahc_inb(ahc, SINDEX), ahc_inb(ahc, ACCUM)); printk("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), ahc_inb(ahc, SCB_CONTROL)); printk("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); ahc_dump_card_state(ahc); panic("for safety"); break; } default: printk("ahc_intr: seqint, " "intstat == 0x%x, scsisigi = 0x%x\n", intstat, ahc_inb(ahc, SCSISIGI)); break; } unpause: /* * The sequencer is paused immediately on * a SEQINT, so we should restart it when * we're done. */ ahc_unpause(ahc); } static void ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat) { u_int scb_index; u_int status0; u_int status; struct scb *scb; char cur_channel; char intr_channel; if ((ahc->features & AHC_TWIN) != 0 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0)) cur_channel = 'B'; else cur_channel = 'A'; intr_channel = cur_channel; if ((ahc->features & AHC_ULTRA2) != 0) status0 = ahc_inb(ahc, SSTAT0) & IOERR; else status0 = 0; status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); if (status == 0 && status0 == 0) { if ((ahc->features & AHC_TWIN) != 0) { /* Try the other channel */ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); intr_channel = (cur_channel == 'A') ? 'B' : 'A'; } if (status == 0) { printk("%s: Spurious SCSI interrupt\n", ahc_name(ahc)); ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_unpause(ahc); return; } } /* Make sure the sequencer is in a safe location. */ ahc_clear_critical_section(ahc); scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (scb != NULL && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) scb = NULL; if ((ahc->features & AHC_ULTRA2) != 0 && (status0 & IOERR) != 0) { int now_lvd; now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40; printk("%s: Transceiver State Has Changed to %s mode\n", ahc_name(ahc), now_lvd ? "LVD" : "SE"); ahc_outb(ahc, CLRSINT0, CLRIOERR); /* * When transitioning to SE mode, the reset line * glitches, triggering an arbitration bug in some * Ultra2 controllers. This bug is cleared when we * assert the reset line. Since a reset glitch has * already occurred with this transition and a * transceiver state change is handled just like * a bus reset anyway, asserting the reset line * ourselves is safe. */ ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/now_lvd == 0); } else if ((status & SCSIRSTI) != 0) { printk("%s: Someone reset channel %c\n", ahc_name(ahc), intr_channel); if (intr_channel != cur_channel) ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE); } else if ((status & SCSIPERR) != 0) { /* * Determine the bus phase and queue an appropriate message. * SCSIPERR is latched true as soon as a parity error * occurs. If the sequencer acked the transfer that * caused the parity error and the currently presented * transfer on the bus has correct parity, SCSIPERR will * be cleared by CLRSCSIPERR. Use this to determine if * we should look at the last phase the sequencer recorded, * or the current phase presented on the bus. */ struct ahc_devinfo devinfo; u_int mesg_out; u_int curphase; u_int errorphase; u_int lastphase; u_int scsirate; u_int i; u_int sstat2; int silent; lastphase = ahc_inb(ahc, LASTPHASE); curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; sstat2 = ahc_inb(ahc, SSTAT2); ahc_outb(ahc, CLRSINT1, CLRSCSIPERR); /* * For all phases save DATA, the sequencer won't * automatically ack a byte that has a parity error * in it. So the only way that the current phase * could be 'data-in' is if the parity error is for * an already acked byte in the data phase. During * synchronous data-in transfers, we may actually * ack bytes before latching the current phase in * LASTPHASE, leading to the discrepancy between * curphase and lastphase. */ if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0 || curphase == P_DATAIN || curphase == P_DATAIN_DT) errorphase = curphase; else errorphase = lastphase; for (i = 0; i < num_phases; i++) { if (errorphase == ahc_phase_table[i].phase) break; } mesg_out = ahc_phase_table[i].mesg_out; silent = FALSE; if (scb != NULL) { if (SCB_IS_SILENT(scb)) silent = TRUE; else ahc_print_path(ahc, scb); scb->flags |= SCB_TRANSMISSION_ERROR; } else printk("%s:%c:%d: ", ahc_name(ahc), intr_channel, SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID))); scsirate = ahc_inb(ahc, SCSIRATE); if (silent == FALSE) { printk("parity error detected %s. " "SEQADDR(0x%x) SCSIRATE(0x%x)\n", ahc_phase_table[i].phasemsg, ahc_inw(ahc, SEQADDR0), scsirate); if ((ahc->features & AHC_DT) != 0) { if ((sstat2 & CRCVALERR) != 0) printk("\tCRC Value Mismatch\n"); if ((sstat2 & CRCENDERR) != 0) printk("\tNo terminal CRC packet " "recevied\n"); if ((sstat2 & CRCREQERR) != 0) printk("\tIllegal CRC packet " "request\n"); if ((sstat2 & DUAL_EDGE_ERR) != 0) printk("\tUnexpected %sDT Data Phase\n", (scsirate & SINGLE_EDGE) ? "" : "non-"); } } if ((ahc->features & AHC_DT) != 0 && (sstat2 & DUAL_EDGE_ERR) != 0) { /* * This error applies regardless of * data direction, so ignore the value * in the phase table. */ mesg_out = MSG_INITIATOR_DET_ERR; } /* * We've set the hardware to assert ATN if we * get a parity error on "in" phases, so all we * need to do is stuff the message buffer with * the appropriate message. "In" phases have set * mesg_out to something other than MSG_NOP. */ if (mesg_out != MSG_NOOP) { if (ahc->msg_type != MSG_TYPE_NONE) ahc->send_msg_perror = TRUE; else ahc_outb(ahc, MSG_OUT, mesg_out); } /* * Force a renegotiation with this target just in * case we are out of sync for some external reason * unknown (or unreported) by the target. */ ahc_fetch_devinfo(ahc, &devinfo); ahc_force_renegotiation(ahc, &devinfo); ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_unpause(ahc); } else if ((status & SELTO) != 0) { u_int scbptr; /* Stop the selection */ ahc_outb(ahc, SCSISEQ, 0); /* No more pending messages */ ahc_clear_msg_state(ahc); /* Clear interrupt state */ ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); /* * Although the driver does not care about the * 'Selection in Progress' status bit, the busy * LED does. SELINGO is only cleared by a successful * selection, so we must manually clear it to insure * the LED turns off just incase no future successful * selections occur (e.g. no devices on the bus). */ ahc_outb(ahc, CLRSINT0, CLRSELINGO); scbptr = ahc_inb(ahc, WAITING_SCBH); ahc_outb(ahc, SCBPTR, scbptr); scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (scb == NULL) { printk("%s: ahc_intr - referenced scb not " "valid during SELTO scb(%d, %d)\n", ahc_name(ahc), scbptr, scb_index); ahc_dump_card_state(ahc); } else { struct ahc_devinfo devinfo; #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_SELTO) != 0) { ahc_print_path(ahc, scb); printk("Saw Selection Timeout for SCB 0x%x\n", scb_index); } #endif ahc_scb_devinfo(ahc, &devinfo, scb); ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT); ahc_freeze_devq(ahc, scb); /* * Cancel any pending transactions on the device * now that it seems to be missing. This will * also revert us to async/narrow transfers until * we can renegotiate with the device. */ ahc_handle_devreset(ahc, &devinfo, CAM_SEL_TIMEOUT, "Selection Timeout", /*verbose_level*/1); } ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_restart(ahc); } else if ((status & BUSFREE) != 0 && (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) { struct ahc_devinfo devinfo; u_int lastphase; u_int saved_scsiid; u_int saved_lun; u_int target; u_int initiator_role_id; char channel; int printerror; /* * Clear our selection hardware as soon as possible. * We may have an entry in the waiting Q for this target, * that is affected by this busfree and we don't want to * go about selecting the target while we handle the event. */ ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); /* * Disable busfree interrupts and clear the busfree * interrupt status. We do this here so that several * bus transactions occur prior to clearing the SCSIINT * latch. It can take a bit for the clearing to take effect. */ ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR); /* * Look at what phase we were last in. * If its message out, chances are pretty good * that the busfree was in response to one of * our abort requests. */ lastphase = ahc_inb(ahc, LASTPHASE); saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); saved_lun = ahc_inb(ahc, SAVED_LUN); target = SCSIID_TARGET(ahc, saved_scsiid); initiator_role_id = SCSIID_OUR_ID(saved_scsiid); channel = SCSIID_CHANNEL(ahc, saved_scsiid); ahc_compile_devinfo(&devinfo, initiator_role_id, target, saved_lun, channel, ROLE_INITIATOR); printerror = 1; if (lastphase == P_MESGOUT) { u_int tag; tag = SCB_LIST_NULL; if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE) || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) { if (ahc->msgout_buf[ahc->msgout_index - 1] == MSG_ABORT_TAG) tag = scb->hscb->tag; ahc_print_path(ahc, scb); printk("SCB %d - Abort%s Completed.\n", scb->hscb->tag, tag == SCB_LIST_NULL ? "" : " Tag"); ahc_abort_scbs(ahc, target, channel, saved_lun, tag, ROLE_INITIATOR, CAM_REQ_ABORTED); printerror = 0; } else if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_BUS_DEV_RESET, TRUE)) { #ifdef __FreeBSD__ /* * Don't mark the user's request for this BDR * as completing with CAM_BDR_SENT. CAM3 * specifies CAM_REQ_CMP. */ if (scb != NULL && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV && ahc_match_scb(ahc, scb, target, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_INITIATOR)) { ahc_set_transaction_status(scb, CAM_REQ_CMP); } #endif ahc_compile_devinfo(&devinfo, initiator_role_id, target, CAM_LUN_WILDCARD, channel, ROLE_INITIATOR); ahc_handle_devreset(ahc, &devinfo, CAM_BDR_SENT, "Bus Device Reset", /*verbose_level*/0); printerror = 0; } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, FALSE)) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; /* * PPR Rejected. Try non-ppr negotiation * and retry command. */ tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); tinfo->curr.transport_version = 2; tinfo->goal.transport_version = 2; tinfo->goal.ppr_options = 0; ahc_qinfifo_requeue_tail(ahc, scb); printerror = 0; } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, FALSE)) { /* * Negotiation Rejected. Go-narrow and * retry command. */ ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_CUR|AHC_TRANS_GOAL, /*paused*/TRUE); ahc_qinfifo_requeue_tail(ahc, scb); printerror = 0; } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, FALSE)) { /* * Negotiation Rejected. Go-async and * retry command. */ ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, /*period*/0, /*offset*/0, /*ppr_options*/0, AHC_TRANS_CUR|AHC_TRANS_GOAL, /*paused*/TRUE); ahc_qinfifo_requeue_tail(ahc, scb); printerror = 0; } } if (printerror != 0) { u_int i; if (scb != NULL) { u_int tag; if ((scb->hscb->control & TAG_ENB) != 0) tag = scb->hscb->tag; else tag = SCB_LIST_NULL; ahc_print_path(ahc, scb); ahc_abort_scbs(ahc, target, channel, SCB_GET_LUN(scb), tag, ROLE_INITIATOR, CAM_UNEXP_BUSFREE); } else { /* * We had not fully identified this connection, * so we cannot abort anything. */ printk("%s: ", ahc_name(ahc)); } for (i = 0; i < num_phases; i++) { if (lastphase == ahc_phase_table[i].phase) break; } if (lastphase != P_BUSFREE) { /* * Renegotiate with this device at the * next opportunity just in case this busfree * is due to a negotiation mismatch with the * device. */ ahc_force_renegotiation(ahc, &devinfo); } printk("Unexpected busfree %s\n" "SEQADDR == 0x%x\n", ahc_phase_table[i].phasemsg, ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); } ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_restart(ahc); } else { printk("%s: Missing case in ahc_handle_scsiint. status = %x\n", ahc_name(ahc), status); ahc_outb(ahc, CLRINT, CLRSCSIINT); } } /* * Force renegotiation to occur the next time we initiate * a command to the current device. */ static void ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { struct ahc_initiator_tinfo *targ_info; struct ahc_tmode_tstate *tstate; targ_info = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); ahc_update_neg_request(ahc, devinfo, tstate, targ_info, AHC_NEG_IF_NON_ASYNC); } #define AHC_MAX_STEPS 2000 static void ahc_clear_critical_section(struct ahc_softc *ahc) { int stepping; int steps; u_int simode0; u_int simode1; if (ahc->num_critical_sections == 0) return; stepping = FALSE; steps = 0; simode0 = 0; simode1 = 0; for (;;) { struct cs *cs; u_int seqaddr; u_int i; seqaddr = ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8); /* * Seqaddr represents the next instruction to execute, * so we are really executing the instruction just * before it. */ if (seqaddr != 0) seqaddr -= 1; cs = ahc->critical_sections; for (i = 0; i < ahc->num_critical_sections; i++, cs++) { if (cs->begin < seqaddr && cs->end >= seqaddr) break; } if (i == ahc->num_critical_sections) break; if (steps > AHC_MAX_STEPS) { printk("%s: Infinite loop in critical section\n", ahc_name(ahc)); ahc_dump_card_state(ahc); panic("critical section loop"); } steps++; if (stepping == FALSE) { /* * Disable all interrupt sources so that the * sequencer will not be stuck by a pausing * interrupt condition while we attempt to * leave a critical section. */ simode0 = ahc_inb(ahc, SIMODE0); ahc_outb(ahc, SIMODE0, 0); simode1 = ahc_inb(ahc, SIMODE1); if ((ahc->features & AHC_DT) != 0) /* * On DT class controllers, we * use the enhanced busfree logic. * Unfortunately we cannot re-enable * busfree detection within the * current connection, so we must * leave it on while single stepping. */ ahc_outb(ahc, SIMODE1, simode1 & ENBUSFREE); else ahc_outb(ahc, SIMODE1, 0); ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_outb(ahc, SEQCTL, ahc->seqctl | STEP); stepping = TRUE; } if ((ahc->features & AHC_DT) != 0) { ahc_outb(ahc, CLRSINT1, CLRBUSFREE); ahc_outb(ahc, CLRINT, CLRSCSIINT); } ahc_outb(ahc, HCNTRL, ahc->unpause); while (!ahc_is_paused(ahc)) ahc_delay(200); } if (stepping) { ahc_outb(ahc, SIMODE0, simode0); ahc_outb(ahc, SIMODE1, simode1); ahc_outb(ahc, SEQCTL, ahc->seqctl); } } /* * Clear any pending interrupt status. */ static void ahc_clear_intstat(struct ahc_softc *ahc) { /* Clear any interrupt conditions this may have caused */ ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG| CLRREQINIT); ahc_flush_device_writes(ahc); ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO); ahc_flush_device_writes(ahc); ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_flush_device_writes(ahc); } /**************************** Debugging Routines ******************************/ #ifdef AHC_DEBUG uint32_t ahc_debug = AHC_DEBUG_OPTS; #endif #if 0 /* unused */ static void ahc_print_scb(struct scb *scb) { int i; struct hardware_scb *hscb = scb->hscb; printk("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", (void *)scb, hscb->control, hscb->scsiid, hscb->lun, hscb->cdb_len); printk("Shared Data: "); for (i = 0; i < sizeof(hscb->shared_data.cdb); i++) printk("%#02x", hscb->shared_data.cdb[i]); printk(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n", ahc_le32toh(hscb->dataptr), ahc_le32toh(hscb->datacnt), ahc_le32toh(hscb->sgptr), hscb->tag); if (scb->sg_count > 0) { for (i = 0; i < scb->sg_count; i++) { printk("sg[%d] - Addr 0x%x%x : Length %d\n", i, (ahc_le32toh(scb->sg_list[i].len) >> 24 & SG_HIGH_ADDR_BITS), ahc_le32toh(scb->sg_list[i].addr), ahc_le32toh(scb->sg_list[i].len)); } } } #endif /************************* Transfer Negotiation *******************************/ /* * Allocate per target mode instance (ID we respond to as a target) * transfer negotiation data structures. */ static struct ahc_tmode_tstate * ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel) { struct ahc_tmode_tstate *master_tstate; struct ahc_tmode_tstate *tstate; int i; master_tstate = ahc->enabled_targets[ahc->our_id]; if (channel == 'B') { scsi_id += 8; master_tstate = ahc->enabled_targets[ahc->our_id_b + 8]; } if (ahc->enabled_targets[scsi_id] != NULL && ahc->enabled_targets[scsi_id] != master_tstate) panic("%s: ahc_alloc_tstate - Target already allocated", ahc_name(ahc)); tstate = kmalloc(sizeof(*tstate), GFP_ATOMIC); if (tstate == NULL) return (NULL); /* * If we have allocated a master tstate, copy user settings from * the master tstate (taken from SRAM or the EEPROM) for this * channel, but reset our current and goal settings to async/narrow * until an initiator talks to us. */ if (master_tstate != NULL) { memcpy(tstate, master_tstate, sizeof(*tstate)); memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); tstate->ultraenb = 0; for (i = 0; i < AHC_NUM_TARGETS; i++) { memset(&tstate->transinfo[i].curr, 0, sizeof(tstate->transinfo[i].curr)); memset(&tstate->transinfo[i].goal, 0, sizeof(tstate->transinfo[i].goal)); } } else memset(tstate, 0, sizeof(*tstate)); ahc->enabled_targets[scsi_id] = tstate; return (tstate); } #ifdef AHC_TARGET_MODE /* * Free per target mode instance (ID we respond to as a target) * transfer negotiation data structures. */ static void ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force) { struct ahc_tmode_tstate *tstate; /* * Don't clean up our "master" tstate. * It has our default user settings. */ if (((channel == 'B' && scsi_id == ahc->our_id_b) || (channel == 'A' && scsi_id == ahc->our_id)) && force == FALSE) return; if (channel == 'B') scsi_id += 8; tstate = ahc->enabled_targets[scsi_id]; if (tstate != NULL) kfree(tstate); ahc->enabled_targets[scsi_id] = NULL; } #endif /* * Called when we have an active connection to a target on the bus, * this function finds the nearest syncrate to the input period limited * by the capabilities of the bus connectivity of and sync settings for * the target. */ const struct ahc_syncrate * ahc_devlimited_syncrate(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, u_int *period, u_int *ppr_options, role_t role) { struct ahc_transinfo *transinfo; u_int maxsync; if ((ahc->features & AHC_ULTRA2) != 0) { if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) { maxsync = AHC_SYNCRATE_DT; } else { maxsync = AHC_SYNCRATE_ULTRA; /* Can't do DT on an SE bus */ *ppr_options &= ~MSG_EXT_PPR_DT_REQ; } } else if ((ahc->features & AHC_ULTRA) != 0) { maxsync = AHC_SYNCRATE_ULTRA; } else { maxsync = AHC_SYNCRATE_FAST; } /* * Never allow a value higher than our current goal * period otherwise we may allow a target initiated * negotiation to go above the limit as set by the * user. In the case of an initiator initiated * sync negotiation, we limit based on the user * setting. This allows the system to still accept * incoming negotiations even if target initiated * negotiation is not performed. */ if (role == ROLE_TARGET) transinfo = &tinfo->user; else transinfo = &tinfo->goal; *ppr_options &= transinfo->ppr_options; if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) { maxsync = max(maxsync, (u_int)AHC_SYNCRATE_ULTRA2); *ppr_options &= ~MSG_EXT_PPR_DT_REQ; } if (transinfo->period == 0) { *period = 0; *ppr_options = 0; return (NULL); } *period = max(*period, (u_int)transinfo->period); return (ahc_find_syncrate(ahc, period, ppr_options, maxsync)); } /* * Look up the valid period to SCSIRATE conversion in our table. * Return the period and offset that should be sent to the target * if this was the beginning of an SDTR. */ const struct ahc_syncrate * ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, u_int *ppr_options, u_int maxsync) { const struct ahc_syncrate *syncrate; if ((ahc->features & AHC_DT) == 0) *ppr_options &= ~MSG_EXT_PPR_DT_REQ; /* Skip all DT only entries if DT is not available */ if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 && maxsync < AHC_SYNCRATE_ULTRA2) maxsync = AHC_SYNCRATE_ULTRA2; /* Now set the maxsync based on the card capabilities * DT is already done above */ if ((ahc->features & (AHC_DT | AHC_ULTRA2)) == 0 && maxsync < AHC_SYNCRATE_ULTRA) maxsync = AHC_SYNCRATE_ULTRA; if ((ahc->features & (AHC_DT | AHC_ULTRA2 | AHC_ULTRA)) == 0 && maxsync < AHC_SYNCRATE_FAST) maxsync = AHC_SYNCRATE_FAST; for (syncrate = &ahc_syncrates[maxsync]; syncrate->rate != NULL; syncrate++) { /* * The Ultra2 table doesn't go as low * as for the Fast/Ultra cards. */ if ((ahc->features & AHC_ULTRA2) != 0 && (syncrate->sxfr_u2 == 0)) break; if (*period <= syncrate->period) { /* * When responding to a target that requests * sync, the requested rate may fall between * two rates that we can output, but still be * a rate that we can receive. Because of this, * we want to respond to the target with * the same rate that it sent to us even * if the period we use to send data to it * is lower. Only lower the response period * if we must. */ if (syncrate == &ahc_syncrates[maxsync]) *period = syncrate->period; /* * At some speeds, we only support * ST transfers. */ if ((syncrate->sxfr_u2 & ST_SXFR) != 0) *ppr_options &= ~MSG_EXT_PPR_DT_REQ; break; } } if ((*period == 0) || (syncrate->rate == NULL) || ((ahc->features & AHC_ULTRA2) != 0 && (syncrate->sxfr_u2 == 0))) { /* Use asynchronous transfers. */ *period = 0; syncrate = NULL; *ppr_options &= ~MSG_EXT_PPR_DT_REQ; } return (syncrate); } /* * Convert from an entry in our syncrate table to the SCSI equivalent * sync "period" factor. */ u_int ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync) { const struct ahc_syncrate *syncrate; if ((ahc->features & AHC_ULTRA2) != 0) scsirate &= SXFR_ULTRA2; else scsirate &= SXFR; /* now set maxsync based on card capabilities */ if ((ahc->features & AHC_DT) == 0 && maxsync < AHC_SYNCRATE_ULTRA2) maxsync = AHC_SYNCRATE_ULTRA2; if ((ahc->features & (AHC_DT | AHC_ULTRA2)) == 0 && maxsync < AHC_SYNCRATE_ULTRA) maxsync = AHC_SYNCRATE_ULTRA; if ((ahc->features & (AHC_DT | AHC_ULTRA2 | AHC_ULTRA)) == 0 && maxsync < AHC_SYNCRATE_FAST) maxsync = AHC_SYNCRATE_FAST; syncrate = &ahc_syncrates[maxsync]; while (syncrate->rate != NULL) { if ((ahc->features & AHC_ULTRA2) != 0) { if (syncrate->sxfr_u2 == 0) break; else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2)) return (syncrate->period); } else if (scsirate == (syncrate->sxfr & SXFR)) { return (syncrate->period); } syncrate++; } return (0); /* async */ } /* * Truncate the given synchronous offset to a value the * current adapter type and syncrate are capable of. */ static void ahc_validate_offset(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, const struct ahc_syncrate *syncrate, u_int *offset, int wide, role_t role) { u_int maxoffset; /* Limit offset to what we can do */ if (syncrate == NULL) { maxoffset = 0; } else if ((ahc->features & AHC_ULTRA2) != 0) { maxoffset = MAX_OFFSET_ULTRA2; } else { if (wide) maxoffset = MAX_OFFSET_16BIT; else maxoffset = MAX_OFFSET_8BIT; } *offset = min(*offset, maxoffset); if (tinfo != NULL) { if (role == ROLE_TARGET) *offset = min(*offset, (u_int)tinfo->user.offset); else *offset = min(*offset, (u_int)tinfo->goal.offset); } } /* * Truncate the given transfer width parameter to a value the * current adapter type is capable of. */ static void ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, u_int *bus_width, role_t role) { switch (*bus_width) { default: if (ahc->features & AHC_WIDE) { /* Respond Wide */ *bus_width = MSG_EXT_WDTR_BUS_16_BIT; break; } /* FALLTHROUGH */ case MSG_EXT_WDTR_BUS_8_BIT: *bus_width = MSG_EXT_WDTR_BUS_8_BIT; break; } if (tinfo != NULL) { if (role == ROLE_TARGET) *bus_width = min((u_int)tinfo->user.width, *bus_width); else *bus_width = min((u_int)tinfo->goal.width, *bus_width); } } /* * Update the bitmask of targets for which the controller should * negotiate with at the next convenient opportunity. This currently * means the next time we send the initial identify messages for * a new transaction. */ int ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct ahc_tmode_tstate *tstate, struct ahc_initiator_tinfo *tinfo, ahc_neg_type neg_type) { u_int auto_negotiate_orig; auto_negotiate_orig = tstate->auto_negotiate; if (neg_type == AHC_NEG_ALWAYS) { /* * Force our "current" settings to be * unknown so that unless a bus reset * occurs the need to renegotiate is * recorded persistently. */ if ((ahc->features & AHC_WIDE) != 0) tinfo->curr.width = AHC_WIDTH_UNKNOWN; tinfo->curr.period = AHC_PERIOD_UNKNOWN; tinfo->curr.offset = AHC_OFFSET_UNKNOWN; } if (tinfo->curr.period != tinfo->goal.period || tinfo->curr.width != tinfo->goal.width || tinfo->curr.offset != tinfo->goal.offset || tinfo->curr.ppr_options != tinfo->goal.ppr_options || (neg_type == AHC_NEG_IF_NON_ASYNC && (tinfo->goal.offset != 0 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT || tinfo->goal.ppr_options != 0))) tstate->auto_negotiate |= devinfo->target_mask; else tstate->auto_negotiate &= ~devinfo->target_mask; return (auto_negotiate_orig != tstate->auto_negotiate); } /* * Update the user/goal/curr tables of synchronous negotiation * parameters as well as, in the case of a current or active update, * any data structures on the host controller. In the case of an * active update, the specified target is currently talking to us on * the bus, so the transfer parameter update must take effect * immediately. */ void ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, const struct ahc_syncrate *syncrate, u_int period, u_int offset, u_int ppr_options, u_int type, int paused) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int old_period; u_int old_offset; u_int old_ppr; int active; int update_needed; active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; update_needed = 0; if (syncrate == NULL) { period = 0; offset = 0; } tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); if ((type & AHC_TRANS_USER) != 0) { tinfo->user.period = period; tinfo->user.offset = offset; tinfo->user.ppr_options = ppr_options; } if ((type & AHC_TRANS_GOAL) != 0) { tinfo->goal.period = period; tinfo->goal.offset = offset; tinfo->goal.ppr_options = ppr_options; } old_period = tinfo->curr.period; old_offset = tinfo->curr.offset; old_ppr = tinfo->curr.ppr_options; if ((type & AHC_TRANS_CUR) != 0 && (old_period != period || old_offset != offset || old_ppr != ppr_options)) { u_int scsirate; update_needed++; scsirate = tinfo->scsirate; if ((ahc->features & AHC_ULTRA2) != 0) { scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC); if (syncrate != NULL) { scsirate |= syncrate->sxfr_u2; if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) scsirate |= ENABLE_CRC; else scsirate |= SINGLE_EDGE; } } else { scsirate &= ~(SXFR|SOFS); /* * Ensure Ultra mode is set properly for * this target. */ tstate->ultraenb &= ~devinfo->target_mask; if (syncrate != NULL) { if (syncrate->sxfr & ULTRA_SXFR) { tstate->ultraenb |= devinfo->target_mask; } scsirate |= syncrate->sxfr & SXFR; scsirate |= offset & SOFS; } if (active) { u_int sxfrctl0; sxfrctl0 = ahc_inb(ahc, SXFRCTL0); sxfrctl0 &= ~FAST20; if (tstate->ultraenb & devinfo->target_mask) sxfrctl0 |= FAST20; ahc_outb(ahc, SXFRCTL0, sxfrctl0); } } if (active) { ahc_outb(ahc, SCSIRATE, scsirate); if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SCSIOFFSET, offset); } tinfo->scsirate = scsirate; tinfo->curr.period = period; tinfo->curr.offset = offset; tinfo->curr.ppr_options = ppr_options; ahc_send_async(ahc, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_TRANSFER_NEG); if (bootverbose) { if (offset != 0) { printk("%s: target %d synchronous at %sMHz%s, " "offset = 0x%x\n", ahc_name(ahc), devinfo->target, syncrate->rate, (ppr_options & MSG_EXT_PPR_DT_REQ) ? " DT" : "", offset); } else { printk("%s: target %d using " "asynchronous transfers\n", ahc_name(ahc), devinfo->target); } } } update_needed += ahc_update_neg_request(ahc, devinfo, tstate, tinfo, AHC_NEG_TO_GOAL); if (update_needed) ahc_update_pending_scbs(ahc); } /* * Update the user/goal/curr tables of wide negotiation * parameters as well as, in the case of a current or active update, * any data structures on the host controller. In the case of an * active update, the specified target is currently talking to us on * the bus, so the transfer parameter update must take effect * immediately. */ void ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int width, u_int type, int paused) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int oldwidth; int active; int update_needed; active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; update_needed = 0; tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); if ((type & AHC_TRANS_USER) != 0) tinfo->user.width = width; if ((type & AHC_TRANS_GOAL) != 0) tinfo->goal.width = width; oldwidth = tinfo->curr.width; if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) { u_int scsirate; update_needed++; scsirate = tinfo->scsirate; scsirate &= ~WIDEXFER; if (width == MSG_EXT_WDTR_BUS_16_BIT) scsirate |= WIDEXFER; tinfo->scsirate = scsirate; if (active) ahc_outb(ahc, SCSIRATE, scsirate); tinfo->curr.width = width; ahc_send_async(ahc, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_TRANSFER_NEG); if (bootverbose) { printk("%s: target %d using %dbit transfers\n", ahc_name(ahc), devinfo->target, 8 * (0x01 << width)); } } update_needed += ahc_update_neg_request(ahc, devinfo, tstate, tinfo, AHC_NEG_TO_GOAL); if (update_needed) ahc_update_pending_scbs(ahc); } /* * Update the current state of tagged queuing for a given target. */ static void ahc_set_tags(struct ahc_softc *ahc, struct scsi_cmnd *cmd, struct ahc_devinfo *devinfo, ahc_queue_alg alg) { struct scsi_device *sdev = cmd->device; ahc_platform_set_tags(ahc, sdev, devinfo, alg); ahc_send_async(ahc, devinfo->channel, devinfo->target, devinfo->lun, AC_TRANSFER_NEG); } /* * When the transfer settings for a connection change, update any * in-transit SCBs to contain the new data so the hardware will * be set correctly during future (re)selections. */ static void ahc_update_pending_scbs(struct ahc_softc *ahc) { struct scb *pending_scb; int pending_scb_count; int i; int paused; u_int saved_scbptr; /* * Traverse the pending SCB list and ensure that all of the * SCBs there have the proper settings. */ pending_scb_count = 0; LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) { struct ahc_devinfo devinfo; struct hardware_scb *pending_hscb; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; ahc_scb_devinfo(ahc, &devinfo, pending_scb); tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); pending_hscb = pending_scb->hscb; pending_hscb->control &= ~ULTRAENB; if ((tstate->ultraenb & devinfo.target_mask) != 0) pending_hscb->control |= ULTRAENB; pending_hscb->scsirate = tinfo->scsirate; pending_hscb->scsioffset = tinfo->curr.offset; if ((tstate->auto_negotiate & devinfo.target_mask) == 0 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; pending_hscb->control &= ~MK_MESSAGE; } ahc_sync_scb(ahc, pending_scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); pending_scb_count++; } if (pending_scb_count == 0) return; if (ahc_is_paused(ahc)) { paused = 1; } else { paused = 0; ahc_pause(ahc); } saved_scbptr = ahc_inb(ahc, SCBPTR); /* Ensure that the hscbs down on the card match the new information */ for (i = 0; i < ahc->scb_data->maxhscbs; i++) { struct hardware_scb *pending_hscb; u_int control; u_int scb_tag; ahc_outb(ahc, SCBPTR, i); scb_tag = ahc_inb(ahc, SCB_TAG); pending_scb = ahc_lookup_scb(ahc, scb_tag); if (pending_scb == NULL) continue; pending_hscb = pending_scb->hscb; control = ahc_inb(ahc, SCB_CONTROL); control &= ~(ULTRAENB|MK_MESSAGE); control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE); ahc_outb(ahc, SCB_CONTROL, control); ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate); ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset); } ahc_outb(ahc, SCBPTR, saved_scbptr); if (paused == 0) ahc_unpause(ahc); } /**************************** Pathing Information *****************************/ static void ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { u_int saved_scsiid; role_t role; int our_id; if (ahc_inb(ahc, SSTAT0) & TARGET) role = ROLE_TARGET; else role = ROLE_INITIATOR; if (role == ROLE_TARGET && (ahc->features & AHC_MULTI_TID) != 0 && (ahc_inb(ahc, SEQ_FLAGS) & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) { /* We were selected, so pull our id from TARGIDIN */ our_id = ahc_inb(ahc, TARGIDIN) & OID; } else if ((ahc->features & AHC_ULTRA2) != 0) our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID; else our_id = ahc_inb(ahc, SCSIID) & OID; saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); ahc_compile_devinfo(devinfo, our_id, SCSIID_TARGET(ahc, saved_scsiid), ahc_inb(ahc, SAVED_LUN), SCSIID_CHANNEL(ahc, saved_scsiid), role); } static const struct ahc_phase_table_entry* ahc_lookup_phase_entry(int phase) { const struct ahc_phase_table_entry *entry; const struct ahc_phase_table_entry *last_entry; /* * num_phases doesn't include the default entry which * will be returned if the phase doesn't match. */ last_entry = &ahc_phase_table[num_phases]; for (entry = ahc_phase_table; entry < last_entry; entry++) { if (phase == entry->phase) break; } return (entry); } void ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target, u_int lun, char channel, role_t role) { devinfo->our_scsiid = our_id; devinfo->target = target; devinfo->lun = lun; devinfo->target_offset = target; devinfo->channel = channel; devinfo->role = role; if (channel == 'B') devinfo->target_offset += 8; devinfo->target_mask = (0x01 << devinfo->target_offset); } void ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { printk("%s:%c:%d:%d: ", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } static void ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb) { role_t role; int our_id; our_id = SCSIID_OUR_ID(scb->hscb->scsiid); role = ROLE_INITIATOR; if ((scb->flags & SCB_TARGET_SCB) != 0) role = ROLE_TARGET; ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb), SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role); } /************************ Message Phase Processing ****************************/ static void ahc_assert_atn(struct ahc_softc *ahc) { u_int scsisigo; scsisigo = ATNO; if ((ahc->features & AHC_DT) == 0) scsisigo |= ahc_inb(ahc, SCSISIGI); ahc_outb(ahc, SCSISIGO, scsisigo); } /* * When an initiator transaction with the MK_MESSAGE flag either reconnects * or enters the initial message out phase, we are interrupted. Fill our * outgoing message buffer with the appropriate message and beging handing * the message phase(s) manually. */ static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb) { /* * To facilitate adding multiple messages together, * each routine should increment the index and len * variables instead of setting them explicitly. */ ahc->msgout_index = 0; ahc->msgout_len = 0; if ((scb->flags & SCB_DEVICE_RESET) == 0 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) { u_int identify_msg; identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); if ((scb->hscb->control & DISCENB) != 0) identify_msg |= MSG_IDENTIFY_DISCFLAG; ahc->msgout_buf[ahc->msgout_index++] = identify_msg; ahc->msgout_len++; if ((scb->hscb->control & TAG_ENB) != 0) { ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag; ahc->msgout_len += 2; } } if (scb->flags & SCB_DEVICE_RESET) { ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET; ahc->msgout_len++; ahc_print_path(ahc, scb); printk("Bus Device Reset Message Sent\n"); /* * Clear our selection hardware in advance of * the busfree. We may have an entry in the waiting * Q for this target, and we don't want to go about * selecting while we handle the busfree and blow it * away. */ ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); } else if ((scb->flags & SCB_ABORT) != 0) { if ((scb->hscb->control & TAG_ENB) != 0) ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG; else ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT; ahc->msgout_len++; ahc_print_path(ahc, scb); printk("Abort%s Message Sent\n", (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); /* * Clear our selection hardware in advance of * the busfree. We may have an entry in the waiting * Q for this target, and we don't want to go about * selecting while we handle the busfree and blow it * away. */ ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { ahc_build_transfer_msg(ahc, devinfo); } else { printk("ahc_intr: AWAITING_MSG for an SCB that " "does not have a waiting message\n"); printk("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, devinfo->target_mask); panic("SCB = %d, SCB Control = %x, MSG_OUT = %x " "SCB flags = %x", scb->hscb->tag, scb->hscb->control, ahc_inb(ahc, MSG_OUT), scb->flags); } /* * Clear the MK_MESSAGE flag from the SCB so we aren't * asked to send this message again. */ ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE); scb->hscb->control &= ~MK_MESSAGE; ahc->msgout_index = 0; ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; } /* * Build an appropriate transfer negotiation message for the * currently active target. */ static void ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { /* * We need to initiate transfer negotiations. * If our current and goal settings are identical, * we want to renegotiate due to a check condition. */ struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; const struct ahc_syncrate *rate; int dowide; int dosync; int doppr; u_int period; u_int ppr_options; u_int offset; tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* * Filter our period based on the current connection. * If we can't perform DT transfers on this segment (not in LVD * mode for instance), then our decision to issue a PPR message * may change. */ period = tinfo->goal.period; offset = tinfo->goal.offset; ppr_options = tinfo->goal.ppr_options; /* Target initiated PPR is not allowed in the SCSI spec */ if (devinfo->role == ROLE_TARGET) ppr_options = 0; rate = ahc_devlimited_syncrate(ahc, tinfo, &period, &ppr_options, devinfo->role); dowide = tinfo->curr.width != tinfo->goal.width; dosync = tinfo->curr.offset != offset || tinfo->curr.period != period; /* * Only use PPR if we have options that need it, even if the device * claims to support it. There might be an expander in the way * that doesn't. */ doppr = ppr_options != 0; if (!dowide && !dosync && !doppr) { dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; dosync = tinfo->goal.offset != 0; } if (!dowide && !dosync && !doppr) { /* * Force async with a WDTR message if we have a wide bus, * or just issue an SDTR with a 0 offset. */ if ((ahc->features & AHC_WIDE) != 0) dowide = 1; else dosync = 1; if (bootverbose) { ahc_print_devinfo(ahc, devinfo); printk("Ensuring async\n"); } } /* Target initiated PPR is not allowed in the SCSI spec */ if (devinfo->role == ROLE_TARGET) doppr = 0; /* * Both the PPR message and SDTR message require the * goal syncrate to be limited to what the target device * is capable of handling (based on whether an LVD->SE * expander is on the bus), so combine these two cases. * Regardless, guarantee that if we are using WDTR and SDTR * messages that WDTR comes first. */ if (doppr || (dosync && !dowide)) { offset = tinfo->goal.offset; ahc_validate_offset(ahc, tinfo, rate, &offset, doppr ? tinfo->goal.width : tinfo->curr.width, devinfo->role); if (doppr) { ahc_construct_ppr(ahc, devinfo, period, offset, tinfo->goal.width, ppr_options); } else { ahc_construct_sdtr(ahc, devinfo, period, offset); } } else { ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width); } } /* * Build a synchronous negotiation message in our message * buffer based on the input parameters. */ static void ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset) { if (offset == 0) period = AHC_ASYNC_XFER_PERIOD; ahc->msgout_index += spi_populate_sync_msg( ahc->msgout_buf + ahc->msgout_index, period, offset); ahc->msgout_len += 5; if (bootverbose) { printk("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, period, offset); } } /* * Build a wide negotiation message in our message * buffer based on the input parameters. */ static void ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int bus_width) { ahc->msgout_index += spi_populate_width_msg( ahc->msgout_buf + ahc->msgout_index, bus_width); ahc->msgout_len += 4; if (bootverbose) { printk("(%s:%c:%d:%d): Sending WDTR %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, bus_width); } } /* * Build a parallel protocol request message in our message * buffer based on the input parameters. */ static void ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset, u_int bus_width, u_int ppr_options) { if (offset == 0) period = AHC_ASYNC_XFER_PERIOD; ahc->msgout_index += spi_populate_ppr_msg( ahc->msgout_buf + ahc->msgout_index, period, offset, bus_width, ppr_options); ahc->msgout_len += 8; if (bootverbose) { printk("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " "offset %x, ppr_options %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, bus_width, period, offset, ppr_options); } } /* * Clear any active message state. */ static void ahc_clear_msg_state(struct ahc_softc *ahc) { ahc->msgout_len = 0; ahc->msgin_index = 0; ahc->msg_type = MSG_TYPE_NONE; if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) { /* * The target didn't care to respond to our * message request, so clear ATN. */ ahc_outb(ahc, CLRSINT1, CLRATNO); } ahc_outb(ahc, MSG_OUT, MSG_NOOP); ahc_outb(ahc, SEQ_FLAGS2, ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); } static void ahc_handle_proto_violation(struct ahc_softc *ahc) { struct ahc_devinfo devinfo; struct scb *scb; u_int scbid; u_int seq_flags; u_int curphase; u_int lastphase; int found; ahc_fetch_devinfo(ahc, &devinfo); scbid = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scbid); seq_flags = ahc_inb(ahc, SEQ_FLAGS); curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; lastphase = ahc_inb(ahc, LASTPHASE); if ((seq_flags & NOT_IDENTIFIED) != 0) { /* * The reconnecting target either did not send an * identify message, or did, but we didn't find an SCB * to match. */ ahc_print_devinfo(ahc, &devinfo); printk("Target did not send an IDENTIFY message. " "LASTPHASE = 0x%x.\n", lastphase); scb = NULL; } else if (scb == NULL) { /* * We don't seem to have an SCB active for this * transaction. Print an error and reset the bus. */ ahc_print_devinfo(ahc, &devinfo); printk("No SCB found during protocol violation\n"); goto proto_violation_reset; } else { ahc_set_transaction_status(scb, CAM_SEQUENCE_FAIL); if ((seq_flags & NO_CDB_SENT) != 0) { ahc_print_path(ahc, scb); printk("No or incomplete CDB sent to device.\n"); } else if ((ahc_inb(ahc, SCB_CONTROL) & STATUS_RCVD) == 0) { /* * The target never bothered to provide status to * us prior to completing the command. Since we don't * know the disposition of this command, we must attempt * to abort it. Assert ATN and prepare to send an abort * message. */ ahc_print_path(ahc, scb); printk("Completed command without status.\n"); } else { ahc_print_path(ahc, scb); printk("Unknown protocol violation.\n"); ahc_dump_card_state(ahc); } } if ((lastphase & ~P_DATAIN_DT) == 0 || lastphase == P_COMMAND) { proto_violation_reset: /* * Target either went directly to data/command * phase or didn't respond to our ATN. * The only safe thing to do is to blow * it away with a bus reset. */ found = ahc_reset_channel(ahc, 'A', TRUE); printk("%s: Issued Channel %c Bus Reset. " "%d SCBs aborted\n", ahc_name(ahc), 'A', found); } else { /* * Leave the selection hardware off in case * this abort attempt will affect yet to * be sent commands. */ ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO); ahc_assert_atn(ahc); ahc_outb(ahc, MSG_OUT, HOST_MSG); if (scb == NULL) { ahc_print_devinfo(ahc, &devinfo); ahc->msgout_buf[0] = MSG_ABORT_TASK; ahc->msgout_len = 1; ahc->msgout_index = 0; ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; } else { ahc_print_path(ahc, scb); scb->flags |= SCB_ABORT; } printk("Protocol violation %s. Attempting to abort.\n", ahc_lookup_phase_entry(curphase)->phasemsg); } } /* * Manual message loop handler. */ static void ahc_handle_message_phase(struct ahc_softc *ahc) { struct ahc_devinfo devinfo; u_int bus_phase; int end_session; ahc_fetch_devinfo(ahc, &devinfo); end_session = FALSE; bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; reswitch: switch (ahc->msg_type) { case MSG_TYPE_INITIATOR_MSGOUT: { int lastbyte; int phasemis; int msgdone; if (ahc->msgout_len == 0) panic("HOST_MSG_LOOP interrupt with no active message"); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { ahc_print_devinfo(ahc, &devinfo); printk("INITIATOR_MSG_OUT"); } #endif phasemis = bus_phase != P_MESGOUT; if (phasemis) { #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { printk(" PHASEMIS %s\n", ahc_lookup_phase_entry(bus_phase) ->phasemsg); } #endif if (bus_phase == P_MESGIN) { /* * Change gears and see if * this messages is of interest to * us or should be passed back to * the sequencer. */ ahc_outb(ahc, CLRSINT1, CLRATNO); ahc->send_msg_perror = FALSE; ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; ahc->msgin_index = 0; goto reswitch; } end_session = TRUE; break; } if (ahc->send_msg_perror) { ahc_outb(ahc, CLRSINT1, CLRATNO); ahc_outb(ahc, CLRSINT1, CLRREQINIT); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) printk(" byte 0x%x\n", ahc->send_msg_perror); #endif ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR); break; } msgdone = ahc->msgout_index == ahc->msgout_len; if (msgdone) { /* * The target has requested a retry. * Re-assert ATN, reset our message index to * 0, and try again. */ ahc->msgout_index = 0; ahc_assert_atn(ahc); } lastbyte = ahc->msgout_index == (ahc->msgout_len - 1); if (lastbyte) { /* Last byte is signified by dropping ATN */ ahc_outb(ahc, CLRSINT1, CLRATNO); } /* * Clear our interrupt status and present * the next byte on the bus. */ ahc_outb(ahc, CLRSINT1, CLRREQINIT); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) printk(" byte 0x%x\n", ahc->msgout_buf[ahc->msgout_index]); #endif ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); break; } case MSG_TYPE_INITIATOR_MSGIN: { int phasemis; int message_done; #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { ahc_print_devinfo(ahc, &devinfo); printk("INITIATOR_MSG_IN"); } #endif phasemis = bus_phase != P_MESGIN; if (phasemis) { #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { printk(" PHASEMIS %s\n", ahc_lookup_phase_entry(bus_phase) ->phasemsg); } #endif ahc->msgin_index = 0; if (bus_phase == P_MESGOUT && (ahc->send_msg_perror == TRUE || (ahc->msgout_len != 0 && ahc->msgout_index == 0))) { ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; goto reswitch; } end_session = TRUE; break; } /* Pull the byte in without acking it */ ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) printk(" byte 0x%x\n", ahc->msgin_buf[ahc->msgin_index]); #endif message_done = ahc_parse_msg(ahc, &devinfo); if (message_done) { /* * Clear our incoming message buffer in case there * is another message following this one. */ ahc->msgin_index = 0; /* * If this message illicited a response, * assert ATN so the target takes us to the * message out phase. */ if (ahc->msgout_len != 0) { #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { ahc_print_devinfo(ahc, &devinfo); printk("Asserting ATN for response\n"); } #endif ahc_assert_atn(ahc); } } else ahc->msgin_index++; if (message_done == MSGLOOP_TERMINATED) { end_session = TRUE; } else { /* Ack the byte */ ahc_outb(ahc, CLRSINT1, CLRREQINIT); ahc_inb(ahc, SCSIDATL); } break; } case MSG_TYPE_TARGET_MSGIN: { int msgdone; int msgout_request; if (ahc->msgout_len == 0) panic("Target MSGIN with no active message"); /* * If we interrupted a mesgout session, the initiator * will not know this until our first REQ. So, we * only honor mesgout requests after we've sent our * first byte. */ if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0 && ahc->msgout_index > 0) msgout_request = TRUE; else msgout_request = FALSE; if (msgout_request) { /* * Change gears and see if * this messages is of interest to * us or should be passed back to * the sequencer. */ ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO); ahc->msgin_index = 0; /* Dummy read to REQ for first byte */ ahc_inb(ahc, SCSIDATL); ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); break; } msgdone = ahc->msgout_index == ahc->msgout_len; if (msgdone) { ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); end_session = TRUE; break; } /* * Present the next byte on the bus. */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); break; } case MSG_TYPE_TARGET_MSGOUT: { int lastbyte; int msgdone; /* * The initiator signals that this is * the last byte by dropping ATN. */ lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0; /* * Read the latched byte, but turn off SPIOEN first * so that we don't inadvertently cause a REQ for the * next byte. */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL); msgdone = ahc_parse_msg(ahc, &devinfo); if (msgdone == MSGLOOP_TERMINATED) { /* * The message is *really* done in that it caused * us to go to bus free. The sequencer has already * been reset at this point, so pull the ejection * handle. */ return; } ahc->msgin_index++; /* * XXX Read spec about initiator dropping ATN too soon * and use msgdone to detect it. */ if (msgdone == MSGLOOP_MSGCOMPLETE) { ahc->msgin_index = 0; /* * If this message illicited a response, transition * to the Message in phase and send it. */ if (ahc->msgout_len != 0) { ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO); ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); ahc->msg_type = MSG_TYPE_TARGET_MSGIN; ahc->msgin_index = 0; break; } } if (lastbyte) end_session = TRUE; else { /* Ask for the next byte. */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); } break; } default: panic("Unknown REQINIT message type"); } if (end_session) { ahc_clear_msg_state(ahc); ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP); } else ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); } /* * See if we sent a particular extended message to the target. * If "full" is true, return true only if the target saw the full * message. If "full" is false, return true if the target saw at * least the first byte of the message. */ static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full) { int found; u_int index; found = FALSE; index = 0; while (index < ahc->msgout_len) { if (ahc->msgout_buf[index] == MSG_EXTENDED) { u_int end_index; end_index = index + 1 + ahc->msgout_buf[index + 1]; if (ahc->msgout_buf[index+2] == msgval && type == AHCMSG_EXT) { if (full) { if (ahc->msgout_index > end_index) found = TRUE; } else if (ahc->msgout_index > index) found = TRUE; } index = end_index; } else if (ahc->msgout_buf[index] >= MSG_SIMPLE_TASK && ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { /* Skip tag type and tag id or residue param*/ index += 2; } else { /* Single byte message */ if (type == AHCMSG_1B && ahc->msgout_buf[index] == msgval && ahc->msgout_index > index) found = TRUE; index++; } if (found) break; } return (found); } /* * Wait for a complete incoming message, parse it, and respond accordingly. */ static int ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; int reject; int done; int response; u_int targ_scsirate; done = MSGLOOP_IN_PROG; response = FALSE; reject = FALSE; tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); targ_scsirate = tinfo->scsirate; /* * Parse as much of the message as is available, * rejecting it if we don't support it. When * the entire message is available and has been * handled, return MSGLOOP_MSGCOMPLETE, indicating * that we have parsed an entire message. * * In the case of extended messages, we accept the length * byte outright and perform more checking once we know the * extended message type. */ switch (ahc->msgin_buf[0]) { case MSG_DISCONNECT: case MSG_SAVEDATAPOINTER: case MSG_CMDCOMPLETE: case MSG_RESTOREPOINTERS: case MSG_IGN_WIDE_RESIDUE: /* * End our message loop as these are messages * the sequencer handles on its own. */ done = MSGLOOP_TERMINATED; break; case MSG_MESSAGE_REJECT: response = ahc_handle_msg_reject(ahc, devinfo); /* FALLTHROUGH */ case MSG_NOOP: done = MSGLOOP_MSGCOMPLETE; break; case MSG_EXTENDED: { /* Wait for enough of the message to begin validation */ if (ahc->msgin_index < 2) break; switch (ahc->msgin_buf[2]) { case MSG_EXT_SDTR: { const struct ahc_syncrate *syncrate; u_int period; u_int ppr_options; u_int offset; u_int saved_offset; if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) { reject = TRUE; break; } /* * Wait until we have both args before validating * and acting on this message. * * Add one to MSG_EXT_SDTR_LEN to account for * the extended message preamble. */ if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1)) break; period = ahc->msgin_buf[3]; ppr_options = 0; saved_offset = offset = ahc->msgin_buf[4]; syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, &ppr_options, devinfo->role); ahc_validate_offset(ahc, tinfo, syncrate, &offset, targ_scsirate & WIDEXFER, devinfo->role); if (bootverbose) { printk("(%s:%c:%d:%d): Received " "SDTR period %x, offset %x\n\t" "Filtered to period %x, offset %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, ahc->msgin_buf[3], saved_offset, period, offset); } ahc_set_syncrate(ahc, devinfo, syncrate, period, offset, ppr_options, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); /* * See if we initiated Sync Negotiation * and didn't have to fall down to async * transfers. */ if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) { /* We started it */ if (saved_offset != offset) { /* Went too low - force async */ reject = TRUE; } } else { /* * Send our own SDTR in reply */ if (bootverbose && devinfo->role == ROLE_INITIATOR) { printk("(%s:%c:%d:%d): Target " "Initiated SDTR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_construct_sdtr(ahc, devinfo, period, offset); ahc->msgout_index = 0; response = TRUE; } done = MSGLOOP_MSGCOMPLETE; break; } case MSG_EXT_WDTR: { u_int bus_width; u_int saved_width; u_int sending_reply; sending_reply = FALSE; if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) { reject = TRUE; break; } /* * Wait until we have our arg before validating * and acting on this message. * * Add one to MSG_EXT_WDTR_LEN to account for * the extended message preamble. */ if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1)) break; bus_width = ahc->msgin_buf[3]; saved_width = bus_width; ahc_validate_width(ahc, tinfo, &bus_width, devinfo->role); if (bootverbose) { printk("(%s:%c:%d:%d): Received WDTR " "%x filtered to %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, saved_width, bus_width); } if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) { /* * Don't send a WDTR back to the * target, since we asked first. * If the width went higher than our * request, reject it. */ if (saved_width > bus_width) { reject = TRUE; printk("(%s:%c:%d:%d): requested %dBit " "transfers. Rejecting...\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, 8 * (0x01 << bus_width)); bus_width = 0; } } else { /* * Send our own WDTR in reply */ if (bootverbose && devinfo->role == ROLE_INITIATOR) { printk("(%s:%c:%d:%d): Target " "Initiated WDTR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_construct_wdtr(ahc, devinfo, bus_width); ahc->msgout_index = 0; response = TRUE; sending_reply = TRUE; } /* * After a wide message, we are async, but * some devices don't seem to honor this portion * of the spec. Force a renegotiation of the * sync component of our transfer agreement even * if our goal is async. By updating our width * after forcing the negotiation, we avoid * renegotiating for width. */ ahc_update_neg_request(ahc, devinfo, tstate, tinfo, AHC_NEG_ALWAYS); ahc_set_width(ahc, devinfo, bus_width, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); if (sending_reply == FALSE && reject == FALSE) { /* * We will always have an SDTR to send. */ ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_build_transfer_msg(ahc, devinfo); ahc->msgout_index = 0; response = TRUE; } done = MSGLOOP_MSGCOMPLETE; break; } case MSG_EXT_PPR: { const struct ahc_syncrate *syncrate; u_int period; u_int offset; u_int bus_width; u_int ppr_options; u_int saved_width; u_int saved_offset; u_int saved_ppr_options; if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) { reject = TRUE; break; } /* * Wait until we have all args before validating * and acting on this message. * * Add one to MSG_EXT_PPR_LEN to account for * the extended message preamble. */ if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1)) break; period = ahc->msgin_buf[3]; offset = ahc->msgin_buf[5]; bus_width = ahc->msgin_buf[6]; saved_width = bus_width; ppr_options = ahc->msgin_buf[7]; /* * According to the spec, a DT only * period factor with no DT option * set implies async. */ if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 && period == 9) offset = 0; saved_ppr_options = ppr_options; saved_offset = offset; /* * Mask out any options we don't support * on any controller. Transfer options are * only available if we are negotiating wide. */ ppr_options &= MSG_EXT_PPR_DT_REQ; if (bus_width == 0) ppr_options = 0; ahc_validate_width(ahc, tinfo, &bus_width, devinfo->role); syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, &ppr_options, devinfo->role); ahc_validate_offset(ahc, tinfo, syncrate, &offset, bus_width, devinfo->role); if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) { /* * If we are unable to do any of the * requested options (we went too low), * then we'll have to reject the message. */ if (saved_width > bus_width || saved_offset != offset || saved_ppr_options != ppr_options) { reject = TRUE; period = 0; offset = 0; bus_width = 0; ppr_options = 0; syncrate = NULL; } } else { if (devinfo->role != ROLE_TARGET) printk("(%s:%c:%d:%d): Target " "Initiated PPR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); else printk("(%s:%c:%d:%d): Initiator " "Initiated PPR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_construct_ppr(ahc, devinfo, period, offset, bus_width, ppr_options); ahc->msgout_index = 0; response = TRUE; } if (bootverbose) { printk("(%s:%c:%d:%d): Received PPR width %x, " "period %x, offset %x,options %x\n" "\tFiltered to width %x, period %x, " "offset %x, options %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, saved_width, ahc->msgin_buf[3], saved_offset, saved_ppr_options, bus_width, period, offset, ppr_options); } ahc_set_width(ahc, devinfo, bus_width, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); ahc_set_syncrate(ahc, devinfo, syncrate, period, offset, ppr_options, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); done = MSGLOOP_MSGCOMPLETE; break; } default: /* Unknown extended message. Reject it. */ reject = TRUE; break; } break; } #ifdef AHC_TARGET_MODE case MSG_BUS_DEV_RESET: ahc_handle_devreset(ahc, devinfo, CAM_BDR_SENT, "Bus Device Reset Received", /*verbose_level*/0); ahc_restart(ahc); done = MSGLOOP_TERMINATED; break; case MSG_ABORT_TAG: case MSG_ABORT: case MSG_CLEAR_QUEUE: { int tag; /* Target mode messages */ if (devinfo->role != ROLE_TARGET) { reject = TRUE; break; } tag = SCB_LIST_NULL; if (ahc->msgin_buf[0] == MSG_ABORT_TAG) tag = ahc_inb(ahc, INITIATOR_TAG); ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, devinfo->lun, tag, ROLE_TARGET, CAM_REQ_ABORTED); tstate = ahc->enabled_targets[devinfo->our_scsiid]; if (tstate != NULL) { struct ahc_tmode_lstate* lstate; lstate = tstate->enabled_luns[devinfo->lun]; if (lstate != NULL) { ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, ahc->msgin_buf[0], /*arg*/tag); ahc_send_lstate_events(ahc, lstate); } } ahc_restart(ahc); done = MSGLOOP_TERMINATED; break; } #endif case MSG_TERM_IO_PROC: default: reject = TRUE; break; } if (reject) { /* * Setup to reject the message. */ ahc->msgout_index = 0; ahc->msgout_len = 1; ahc->msgout_buf[0] = MSG_MESSAGE_REJECT; done = MSGLOOP_MSGCOMPLETE; response = TRUE; } if (done != MSGLOOP_IN_PROG && !response) /* Clear the outgoing message buffer */ ahc->msgout_len = 0; return (done); } /* * Process a message reject message. */ static int ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { /* * What we care about here is if we had an * outstanding SDTR or WDTR message for this * target. If we did, this is a signal that * the target is refusing negotiation. */ struct scb *scb; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int scb_index; u_int last_msg; int response = 0; scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* Might be necessary */ last_msg = ahc_inb(ahc, LAST_MSG); if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) { /* * Target does not support the PPR message. * Attempt to negotiate SPI-2 style. */ if (bootverbose) { printk("(%s:%c:%d:%d): PPR Rejected. " "Trying WDTR/SDTR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } tinfo->goal.ppr_options = 0; tinfo->curr.transport_version = 2; tinfo->goal.transport_version = 2; ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_build_transfer_msg(ahc, devinfo); ahc->msgout_index = 0; response = 1; } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) { /* note 8bit xfers */ printk("(%s:%c:%d:%d): refuses WIDE negotiation. Using " "8bit transfers\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); /* * No need to clear the sync rate. If the target * did not accept the command, our syncrate is * unaffected. If the target started the negotiation, * but rejected our response, we already cleared the * sync rate before sending our WDTR. */ if (tinfo->goal.offset != tinfo->curr.offset) { /* Start the sync negotiation */ ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_build_transfer_msg(ahc, devinfo); ahc->msgout_index = 0; response = 1; } } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) { /* note asynch xfers and clear flag */ ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0, /*offset*/0, /*ppr_options*/0, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); printk("(%s:%c:%d:%d): refuses synchronous negotiation. " "Using asynchronous transfers\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) { int tag_type; int mask; tag_type = (scb->hscb->control & MSG_SIMPLE_TASK); if (tag_type == MSG_SIMPLE_TASK) { printk("(%s:%c:%d:%d): refuses tagged commands. " "Performing non-tagged I/O\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); ahc_set_tags(ahc, scb->io_ctx, devinfo, AHC_QUEUE_NONE); mask = ~0x23; } else { printk("(%s:%c:%d:%d): refuses %s tagged commands. " "Performing simple queue tagged I/O only\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, tag_type == MSG_ORDERED_TASK ? "ordered" : "head of queue"); ahc_set_tags(ahc, scb->io_ctx, devinfo, AHC_QUEUE_BASIC); mask = ~0x03; } /* * Resend the identify for this CCB as the target * may believe that the selection is invalid otherwise. */ ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & mask); scb->hscb->control &= mask; ahc_set_transaction_tag(scb, /*enabled*/FALSE, /*type*/MSG_SIMPLE_TASK); ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG); ahc_assert_atn(ahc); /* * This transaction is now at the head of * the untagged queue for this target. */ if ((ahc->flags & AHC_SCB_BTT) == 0) { struct scb_tailq *untagged_q; untagged_q = &(ahc->untagged_queues[devinfo->target_offset]); TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe); scb->flags |= SCB_UNTAGGEDQ; } ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), scb->hscb->tag); /* * Requeue all tagged commands for this target * currently in our possession so they can be * converted to untagged commands. */ ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), SCB_GET_CHANNEL(ahc, scb), SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQUEUE_REQ, SEARCH_COMPLETE); } else { /* * Otherwise, we ignore it. */ printk("%s:%c:%d: Message reject for %x -- ignored\n", ahc_name(ahc), devinfo->channel, devinfo->target, last_msg); } return (response); } /* * Process an ingnore wide residue message. */ static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { u_int scb_index; struct scb *scb; scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); /* * XXX Actually check data direction in the sequencer? * Perhaps add datadir to some spare bits in the hscb? */ if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0 || ahc_get_transfer_dir(scb) != CAM_DIR_IN) { /* * Ignore the message if we haven't * seen an appropriate data phase yet. */ } else { /* * If the residual occurred on the last * transfer and the transfer request was * expected to end on an odd count, do * nothing. Otherwise, subtract a byte * and update the residual count accordingly. */ uint32_t sgptr; sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR); if ((sgptr & SG_LIST_NULL) != 0 && (ahc_inb(ahc, SCB_LUN) & SCB_XFERLEN_ODD) != 0) { /* * If the residual occurred on the last * transfer and the transfer request was * expected to end on an odd count, do * nothing. */ } else { struct ahc_dma_seg *sg; uint32_t data_cnt; uint32_t data_addr; uint32_t sglen; /* Pull in all of the sgptr */ sgptr = ahc_inl(ahc, SCB_RESIDUAL_SGPTR); data_cnt = ahc_inl(ahc, SCB_RESIDUAL_DATACNT); if ((sgptr & SG_LIST_NULL) != 0) { /* * The residual data count is not updated * for the command run to completion case. * Explicitly zero the count. */ data_cnt &= ~AHC_SG_LEN_MASK; } data_addr = ahc_inl(ahc, SHADDR); data_cnt += 1; data_addr -= 1; sgptr &= SG_PTR_MASK; sg = ahc_sg_bus_to_virt(scb, sgptr); /* * The residual sg ptr points to the next S/G * to load so we must go back one. */ sg--; sglen = ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; if (sg != scb->sg_list && sglen < (data_cnt & AHC_SG_LEN_MASK)) { sg--; sglen = ahc_le32toh(sg->len); /* * Preserve High Address and SG_LIST bits * while setting the count to 1. */ data_cnt = 1 | (sglen & (~AHC_SG_LEN_MASK)); data_addr = ahc_le32toh(sg->addr) + (sglen & AHC_SG_LEN_MASK) - 1; /* * Increment sg so it points to the * "next" sg. */ sg++; sgptr = ahc_sg_virt_to_bus(scb, sg); } ahc_outl(ahc, SCB_RESIDUAL_SGPTR, sgptr); ahc_outl(ahc, SCB_RESIDUAL_DATACNT, data_cnt); /* * Toggle the "oddness" of the transfer length * to handle this mid-transfer ignore wide * residue. This ensures that the oddness is * correct for subsequent data transfers. */ ahc_outb(ahc, SCB_LUN, ahc_inb(ahc, SCB_LUN) ^ SCB_XFERLEN_ODD); } } } /* * Reinitialize the data pointers for the active transfer * based on its current residual. */ static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc) { struct scb *scb; struct ahc_dma_seg *sg; u_int scb_index; uint32_t sgptr; uint32_t resid; uint32_t dataptr; scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); sgptr = (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24) | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16) | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8) | ahc_inb(ahc, SCB_RESIDUAL_SGPTR); sgptr &= SG_PTR_MASK; sg = ahc_sg_bus_to_virt(scb, sgptr); /* The residual sg_ptr always points to the next sg */ sg--; resid = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16) | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8) | ahc_inb(ahc, SCB_RESIDUAL_DATACNT); dataptr = ahc_le32toh(sg->addr) + (ahc_le32toh(sg->len) & AHC_SG_LEN_MASK) - resid; if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { u_int dscommand1; dscommand1 = ahc_inb(ahc, DSCOMMAND1); ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); ahc_outb(ahc, HADDR, (ahc_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS); ahc_outb(ahc, DSCOMMAND1, dscommand1); } ahc_outb(ahc, HADDR + 3, dataptr >> 24); ahc_outb(ahc, HADDR + 2, dataptr >> 16); ahc_outb(ahc, HADDR + 1, dataptr >> 8); ahc_outb(ahc, HADDR, dataptr); ahc_outb(ahc, HCNT + 2, resid >> 16); ahc_outb(ahc, HCNT + 1, resid >> 8); ahc_outb(ahc, HCNT, resid); if ((ahc->features & AHC_ULTRA2) == 0) { ahc_outb(ahc, STCNT + 2, resid >> 16); ahc_outb(ahc, STCNT + 1, resid >> 8); ahc_outb(ahc, STCNT, resid); } } /* * Handle the effects of issuing a bus device reset message. */ static void ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, cam_status status, char *message, int verbose_level) { #ifdef AHC_TARGET_MODE struct ahc_tmode_tstate* tstate; u_int lun; #endif int found; found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role, status); #ifdef AHC_TARGET_MODE /* * Send an immediate notify ccb to all target mord peripheral * drivers affected by this action. */ tstate = ahc->enabled_targets[devinfo->our_scsiid]; if (tstate != NULL) { for (lun = 0; lun < AHC_NUM_LUNS; lun++) { struct ahc_tmode_lstate* lstate; lstate = tstate->enabled_luns[lun]; if (lstate == NULL) continue; ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, MSG_BUS_DEV_RESET, /*arg*/0); ahc_send_lstate_events(ahc, lstate); } } #endif /* * Go back to async/narrow transfers and renegotiate. */ ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_CUR, /*paused*/TRUE); ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0, /*offset*/0, /*ppr_options*/0, AHC_TRANS_CUR, /*paused*/TRUE); if (status != CAM_SEL_TIMEOUT) ahc_send_async(ahc, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_SENT_BDR); if (message != NULL && (verbose_level <= bootverbose)) printk("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc), message, devinfo->channel, devinfo->target, found); } #ifdef AHC_TARGET_MODE static void ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb) { /* * To facilitate adding multiple messages together, * each routine should increment the index and len * variables instead of setting them explicitly. */ ahc->msgout_index = 0; ahc->msgout_len = 0; if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) ahc_build_transfer_msg(ahc, devinfo); else panic("ahc_intr: AWAITING target message with no message"); ahc->msgout_index = 0; ahc->msg_type = MSG_TYPE_TARGET_MSGIN; } #endif /**************************** Initialization **********************************/ /* * Allocate a controller structure for a new device * and perform initial initializion. */ struct ahc_softc * ahc_alloc(void *platform_arg, char *name) { struct ahc_softc *ahc; int i; #ifndef __FreeBSD__ ahc = kmalloc(sizeof(*ahc), GFP_ATOMIC); if (!ahc) { printk("aic7xxx: cannot malloc softc!\n"); kfree(name); return NULL; } #else ahc = device_get_softc((device_t)platform_arg); #endif memset(ahc, 0, sizeof(*ahc)); ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), GFP_ATOMIC); if (ahc->seep_config == NULL) { #ifndef __FreeBSD__ kfree(ahc); #endif kfree(name); return (NULL); } LIST_INIT(&ahc->pending_scbs); /* We don't know our unit number until the OSM sets it */ ahc->name = name; ahc->unit = -1; ahc->description = NULL; ahc->channel = 'A'; ahc->channel_b = 'B'; ahc->chip = AHC_NONE; ahc->features = AHC_FENONE; ahc->bugs = AHC_BUGNONE; ahc->flags = AHC_FNONE; /* * Default to all error reporting enabled with the * sequencer operating at its fastest speed. * The bus attach code may modify this. */ ahc->seqctl = FASTMODE; for (i = 0; i < AHC_NUM_TARGETS; i++) TAILQ_INIT(&ahc->untagged_queues[i]); if (ahc_platform_alloc(ahc, platform_arg) != 0) { ahc_free(ahc); ahc = NULL; } return (ahc); } int ahc_softc_init(struct ahc_softc *ahc) { /* The IRQMS bit is only valid on VL and EISA chips */ if ((ahc->chip & AHC_PCI) == 0) ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS; else ahc->unpause = 0; ahc->pause = ahc->unpause | PAUSE; /* XXX The shared scb data stuff should be deprecated */ if (ahc->scb_data == NULL) { ahc->scb_data = kmalloc(sizeof(*ahc->scb_data), GFP_ATOMIC); if (ahc->scb_data == NULL) return (ENOMEM); memset(ahc->scb_data, 0, sizeof(*ahc->scb_data)); } return (0); } void ahc_set_unit(struct ahc_softc *ahc, int unit) { ahc->unit = unit; } void ahc_set_name(struct ahc_softc *ahc, char *name) { if (ahc->name != NULL) kfree(ahc->name); ahc->name = name; } void ahc_free(struct ahc_softc *ahc) { int i; switch (ahc->init_level) { default: case 5: ahc_shutdown(ahc); /* FALLTHROUGH */ case 4: ahc_dmamap_unload(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap); /* FALLTHROUGH */ case 3: ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo, ahc->shared_data_dmamap); ahc_dmamap_destroy(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap); /* FALLTHROUGH */ case 2: ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat); case 1: #ifndef __linux__ ahc_dma_tag_destroy(ahc, ahc->buffer_dmat); #endif break; case 0: break; } #ifndef __linux__ ahc_dma_tag_destroy(ahc, ahc->parent_dmat); #endif ahc_platform_free(ahc); ahc_fini_scbdata(ahc); for (i = 0; i < AHC_NUM_TARGETS; i++) { struct ahc_tmode_tstate *tstate; tstate = ahc->enabled_targets[i]; if (tstate != NULL) { #ifdef AHC_TARGET_MODE int j; for (j = 0; j < AHC_NUM_LUNS; j++) { struct ahc_tmode_lstate *lstate; lstate = tstate->enabled_luns[j]; if (lstate != NULL) { xpt_free_path(lstate->path); kfree(lstate); } } #endif kfree(tstate); } } #ifdef AHC_TARGET_MODE if (ahc->black_hole != NULL) { xpt_free_path(ahc->black_hole->path); kfree(ahc->black_hole); } #endif if (ahc->name != NULL) kfree(ahc->name); if (ahc->seep_config != NULL) kfree(ahc->seep_config); #ifndef __FreeBSD__ kfree(ahc); #endif return; } static void ahc_shutdown(void *arg) { struct ahc_softc *ahc; int i; ahc = (struct ahc_softc *)arg; /* This will reset most registers to 0, but not all */ ahc_reset(ahc, /*reinit*/FALSE); ahc_outb(ahc, SCSISEQ, 0); ahc_outb(ahc, SXFRCTL0, 0); ahc_outb(ahc, DSPCISTATUS, 0); for (i = TARG_SCSIRATE; i < SCSICONF; i++) ahc_outb(ahc, i, 0); } /* * Reset the controller and record some information about it * that is only available just after a reset. If "reinit" is * non-zero, this reset occurred after initial configuration * and the caller requests that the chip be fully reinitialized * to a runable state. Chip interrupts are *not* enabled after * a reinitialization. The caller must enable interrupts via * ahc_intr_enable(). */ int ahc_reset(struct ahc_softc *ahc, int reinit) { u_int sblkctl; u_int sxfrctl1_a, sxfrctl1_b; int error; int wait; /* * Preserve the value of the SXFRCTL1 register for all channels. * It contains settings that affect termination and we don't want * to disturb the integrity of the bus. */ ahc_pause(ahc); sxfrctl1_b = 0; if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) { u_int sblkctl; /* * Save channel B's settings in case this chip * is setup for TWIN channel operation. */ sblkctl = ahc_inb(ahc, SBLKCTL); ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); sxfrctl1_b = ahc_inb(ahc, SXFRCTL1); ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); } sxfrctl1_a = ahc_inb(ahc, SXFRCTL1); ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause); /* * Ensure that the reset has finished. We delay 1000us * prior to reading the register to make sure the chip * has sufficiently completed its reset to handle register * accesses. */ wait = 1000; do { ahc_delay(1000); } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK)); if (wait == 0) { printk("%s: WARNING - Failed chip reset! " "Trying to initialize anyway.\n", ahc_name(ahc)); } ahc_outb(ahc, HCNTRL, ahc->pause); /* Determine channel configuration */ sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE); /* No Twin Channel PCI cards */ if ((ahc->chip & AHC_PCI) != 0) sblkctl &= ~SELBUSB; switch (sblkctl) { case 0: /* Single Narrow Channel */ break; case 2: /* Wide Channel */ ahc->features |= AHC_WIDE; break; case 8: /* Twin Channel */ ahc->features |= AHC_TWIN; break; default: printk(" Unsupported adapter type. Ignoring\n"); return(-1); } /* * Reload sxfrctl1. * * We must always initialize STPWEN to 1 before we * restore the saved values. STPWEN is initialized * to a tri-state condition which can only be cleared * by turning it on. */ if ((ahc->features & AHC_TWIN) != 0) { u_int sblkctl; sblkctl = ahc_inb(ahc, SBLKCTL); ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); ahc_outb(ahc, SXFRCTL1, sxfrctl1_b); ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); } ahc_outb(ahc, SXFRCTL1, sxfrctl1_a); error = 0; if (reinit != 0) /* * If a recovery action has forced a chip reset, * re-initialize the chip to our liking. */ error = ahc->bus_chip_init(ahc); #ifdef AHC_DUMP_SEQ else ahc_dumpseq(ahc); #endif return (error); } /* * Determine the number of SCBs available on the controller */ int ahc_probe_scbs(struct ahc_softc *ahc) { int i; for (i = 0; i < AHC_SCB_MAX; i++) { ahc_outb(ahc, SCBPTR, i); ahc_outb(ahc, SCB_BASE, i); if (ahc_inb(ahc, SCB_BASE) != i) break; ahc_outb(ahc, SCBPTR, 0); if (ahc_inb(ahc, SCB_BASE) != 0) break; } return (i); } static void ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { dma_addr_t *baddr; baddr = (dma_addr_t *)arg; *baddr = segs->ds_addr; } static void ahc_build_free_scb_list(struct ahc_softc *ahc) { int scbsize; int i; scbsize = 32; if ((ahc->flags & AHC_LSCBS_ENABLED) != 0) scbsize = 64; for (i = 0; i < ahc->scb_data->maxhscbs; i++) { int j; ahc_outb(ahc, SCBPTR, i); /* * Touch all SCB bytes to avoid parity errors * should one of our debugging routines read * an otherwise uninitiatlized byte. */ for (j = 0; j < scbsize; j++) ahc_outb(ahc, SCB_BASE+j, 0xFF); /* Clear the control byte. */ ahc_outb(ahc, SCB_CONTROL, 0); /* Set the next pointer */ if ((ahc->flags & AHC_PAGESCBS) != 0) ahc_outb(ahc, SCB_NEXT, i+1); else ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); /* Make the tag number, SCSIID, and lun invalid */ ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); ahc_outb(ahc, SCB_SCSIID, 0xFF); ahc_outb(ahc, SCB_LUN, 0xFF); } if ((ahc->flags & AHC_PAGESCBS) != 0) { /* SCB 0 heads the free list. */ ahc_outb(ahc, FREE_SCBH, 0); } else { /* No free list. */ ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL); } /* Make sure that the last SCB terminates the free list */ ahc_outb(ahc, SCBPTR, i-1); ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); } static int ahc_init_scbdata(struct ahc_softc *ahc) { struct scb_data *scb_data; scb_data = ahc->scb_data; SLIST_INIT(&scb_data->free_scbs); SLIST_INIT(&scb_data->sg_maps); /* Allocate SCB resources */ scb_data->scbarray = kmalloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, GFP_ATOMIC); if (scb_data->scbarray == NULL) return (ENOMEM); memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC); /* Determine the number of hardware SCBs and initialize them */ scb_data->maxhscbs = ahc_probe_scbs(ahc); if (ahc->scb_data->maxhscbs == 0) { printk("%s: No SCB space found\n", ahc_name(ahc)); return (ENXIO); } /* * Create our DMA tags. These tags define the kinds of device * accessible memory allocations and memory mappings we will * need to perform during normal operation. * * Unless we need to further restrict the allocation, we rely * on the restrictions of the parent dmat, hence the common * use of MAXADDR and MAXSIZE. */ /* DMA tag for our hardware scb structures */ if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->hscb_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* Allocation for our hscbs */ if (ahc_dmamem_alloc(ahc, scb_data->hscb_dmat, (void **)&scb_data->hscbs, BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) { goto error_exit; } scb_data->init_level++; /* And permanently map them */ ahc_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap, scb_data->hscbs, AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0); scb_data->init_level++; /* DMA tag for our sense buffers */ if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->sense_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* Allocate them */ if (ahc_dmamem_alloc(ahc, scb_data->sense_dmat, (void **)&scb_data->sense, BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) { goto error_exit; } scb_data->init_level++; /* And permanently map them */ ahc_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap, scb_data->sense, AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0); scb_data->init_level++; /* DMA tag for our S/G structures. We allocate in page sized chunks */ if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/8, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, PAGE_SIZE, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->sg_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* Perform initial CCB allocation */ memset(scb_data->hscbs, 0, AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb)); ahc_alloc_scbs(ahc); if (scb_data->numscbs == 0) { printk("%s: ahc_init_scbdata - " "Unable to allocate initial scbs\n", ahc_name(ahc)); goto error_exit; } /* * Reserve the next queued SCB. */ ahc->next_queued_scb = ahc_get_scb(ahc); /* * Note that we were successful */ return (0); error_exit: return (ENOMEM); } static void ahc_fini_scbdata(struct ahc_softc *ahc) { struct scb_data *scb_data; scb_data = ahc->scb_data; if (scb_data == NULL) return; switch (scb_data->init_level) { default: case 7: { struct sg_map_node *sg_map; while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) { SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); ahc_dmamap_unload(ahc, scb_data->sg_dmat, sg_map->sg_dmamap); ahc_dmamem_free(ahc, scb_data->sg_dmat, sg_map->sg_vaddr, sg_map->sg_dmamap); kfree(sg_map); } ahc_dma_tag_destroy(ahc, scb_data->sg_dmat); } case 6: ahc_dmamap_unload(ahc, scb_data->sense_dmat, scb_data->sense_dmamap); case 5: ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense, scb_data->sense_dmamap); ahc_dmamap_destroy(ahc, scb_data->sense_dmat, scb_data->sense_dmamap); case 4: ahc_dma_tag_destroy(ahc, scb_data->sense_dmat); case 3: ahc_dmamap_unload(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap); case 2: ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs, scb_data->hscb_dmamap); ahc_dmamap_destroy(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap); case 1: ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat); break; case 0: break; } if (scb_data->scbarray != NULL) kfree(scb_data->scbarray); } static void ahc_alloc_scbs(struct ahc_softc *ahc) { struct scb_data *scb_data; struct scb *next_scb; struct sg_map_node *sg_map; dma_addr_t physaddr; struct ahc_dma_seg *segs; int newcount; int i; scb_data = ahc->scb_data; if (scb_data->numscbs >= AHC_SCB_MAX_ALLOC) /* Can't allocate any more */ return; next_scb = &scb_data->scbarray[scb_data->numscbs]; sg_map = kmalloc(sizeof(*sg_map), GFP_ATOMIC); if (sg_map == NULL) return; /* Allocate S/G space for the next batch of SCBS */ if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat, (void **)&sg_map->sg_vaddr, BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { kfree(sg_map); return; } SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); ahc_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb, &sg_map->sg_physaddr, /*flags*/0); segs = sg_map->sg_vaddr; physaddr = sg_map->sg_physaddr; newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg))); newcount = min(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs)); for (i = 0; i < newcount; i++) { struct scb_platform_data *pdata; #ifndef __linux__ int error; #endif pdata = kmalloc(sizeof(*pdata), GFP_ATOMIC); if (pdata == NULL) break; next_scb->platform_data = pdata; next_scb->sg_map = sg_map; next_scb->sg_list = segs; /* * The sequencer always starts with the second entry. * The first entry is embedded in the scb. */ next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg); next_scb->ahc_softc = ahc; next_scb->flags = SCB_FREE; #ifndef __linux__ error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0, &next_scb->dmamap); if (error != 0) break; #endif next_scb->hscb = &scb_data->hscbs[scb_data->numscbs]; next_scb->hscb->tag = ahc->scb_data->numscbs; SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, next_scb, links.sle); segs += AHC_NSEG; physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg)); next_scb++; ahc->scb_data->numscbs++; } } void ahc_controller_info(struct ahc_softc *ahc, char *buf) { int len; len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]); buf += len; if ((ahc->features & AHC_TWIN) != 0) len = sprintf(buf, "Twin Channel, A SCSI Id=%d, " "B SCSI Id=%d, primary %c, ", ahc->our_id, ahc->our_id_b, (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A'); else { const char *speed; const char *type; speed = ""; if ((ahc->features & AHC_ULTRA) != 0) { speed = "Ultra "; } else if ((ahc->features & AHC_DT) != 0) { speed = "Ultra160 "; } else if ((ahc->features & AHC_ULTRA2) != 0) { speed = "Ultra2 "; } if ((ahc->features & AHC_WIDE) != 0) { type = "Wide"; } else { type = "Single"; } len = sprintf(buf, "%s%s Channel %c, SCSI Id=%d, ", speed, type, ahc->channel, ahc->our_id); } buf += len; if ((ahc->flags & AHC_PAGESCBS) != 0) sprintf(buf, "%d/%d SCBs", ahc->scb_data->maxhscbs, AHC_MAX_QUEUE); else sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs); } int ahc_chip_init(struct ahc_softc *ahc) { int term; int error; u_int i; u_int scsi_conf; u_int scsiseq_template; uint32_t physaddr; ahc_outb(ahc, SEQ_FLAGS, 0); ahc_outb(ahc, SEQ_FLAGS2, 0); /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/ if (ahc->features & AHC_TWIN) { /* * Setup Channel B first. */ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0; ahc_outb(ahc, SCSIID, ahc->our_id_b); scsi_conf = ahc_inb(ahc, SCSICONF + 1); ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) |term|ahc->seltime_b|ENSTIMER|ACTNEGEN); if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); /* Select Channel A */ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); } term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0; if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); else ahc_outb(ahc, SCSIID, ahc->our_id); scsi_conf = ahc_inb(ahc, SCSICONF); ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) |term|ahc->seltime |ENSTIMER|ACTNEGEN); if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); /* There are no untagged SCBs active yet. */ for (i = 0; i < 16; i++) { ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0)); if ((ahc->flags & AHC_SCB_BTT) != 0) { int lun; /* * The SCB based BTT allows an entry per * target and lun pair. */ for (lun = 1; lun < AHC_NUM_LUNS; lun++) ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun)); } } /* All of our queues are empty */ for (i = 0; i < 256; i++) ahc->qoutfifo[i] = SCB_LIST_NULL; ahc_sync_qoutfifo(ahc, BUS_DMASYNC_PREREAD); for (i = 0; i < 256; i++) ahc->qinfifo[i] = SCB_LIST_NULL; if ((ahc->features & AHC_MULTI_TID) != 0) { ahc_outb(ahc, TARGID, 0); ahc_outb(ahc, TARGID + 1, 0); } /* * Tell the sequencer where it can find our arrays in memory. */ physaddr = ahc->scb_data->hscb_busaddr; ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF); ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF); ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF); ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF); physaddr = ahc->shared_data_busaddr; ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF); ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF); ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF); ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF); /* * Initialize the group code to command length table. * This overrides the values in TARG_SCSIRATE, so only * setup the table after we have processed that information. */ ahc_outb(ahc, CMDSIZE_TABLE, 5); ahc_outb(ahc, CMDSIZE_TABLE + 1, 9); ahc_outb(ahc, CMDSIZE_TABLE + 2, 9); ahc_outb(ahc, CMDSIZE_TABLE + 3, 0); ahc_outb(ahc, CMDSIZE_TABLE + 4, 15); ahc_outb(ahc, CMDSIZE_TABLE + 5, 11); ahc_outb(ahc, CMDSIZE_TABLE + 6, 0); ahc_outb(ahc, CMDSIZE_TABLE + 7, 0); if ((ahc->features & AHC_HS_MAILBOX) != 0) ahc_outb(ahc, HS_MAILBOX, 0); /* Tell the sequencer of our initial queue positions */ if ((ahc->features & AHC_TARGETMODE) != 0) { ahc->tqinfifonext = 1; ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); } ahc->qinfifonext = 0; ahc->qoutfifonext = 0; if ((ahc->features & AHC_QUEUE_REGS) != 0) { ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256); ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); ahc_outb(ahc, SNSCB_QOFF, ahc->qinfifonext); ahc_outb(ahc, SDSCB_QOFF, 0); } else { ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); ahc_outb(ahc, QINPOS, ahc->qinfifonext); ahc_outb(ahc, QOUTPOS, ahc->qoutfifonext); } /* We don't have any waiting selections */ ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL); /* Our disconnection list is empty too */ ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL); /* Message out buffer starts empty */ ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* * Setup the allowed SCSI Sequences based on operational mode. * If we are a target, we'll enable select in operations once * we've had a lun enabled. */ scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP; if ((ahc->flags & AHC_INITIATORROLE) != 0) scsiseq_template |= ENRSELI; ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template); /* Initialize our list of free SCBs. */ ahc_build_free_scb_list(ahc); /* * Tell the sequencer which SCB will be the next one it receives. */ ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); /* * Load the Sequencer program and Enable the adapter * in "fast" mode. */ if (bootverbose) printk("%s: Downloading Sequencer Program...", ahc_name(ahc)); error = ahc_loadseq(ahc); if (error != 0) return (error); if ((ahc->features & AHC_ULTRA2) != 0) { int wait; /* * Wait for up to 500ms for our transceivers * to settle. If the adapter does not have * a cable attached, the transceivers may * never settle, so don't complain if we * fail here. */ for (wait = 5000; (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; wait--) ahc_delay(100); } ahc_restart(ahc); return (0); } /* * Start the board, ready for normal operation */ int ahc_init(struct ahc_softc *ahc) { int max_targ; u_int i; u_int scsi_conf; u_int ultraenb; u_int discenable; u_int tagenable; size_t driver_data_size; #ifdef AHC_DEBUG if ((ahc_debug & AHC_DEBUG_SEQUENCER) != 0) ahc->flags |= AHC_SEQUENCER_DEBUG; #endif #ifdef AHC_PRINT_SRAM printk("Scratch Ram:"); for (i = 0x20; i < 0x5f; i++) { if (((i % 8) == 0) && (i != 0)) { printk ("\n "); } printk (" 0x%x", ahc_inb(ahc, i)); } if ((ahc->features & AHC_MORE_SRAM) != 0) { for (i = 0x70; i < 0x7f; i++) { if (((i % 8) == 0) && (i != 0)) { printk ("\n "); } printk (" 0x%x", ahc_inb(ahc, i)); } } printk ("\n"); /* * Reading uninitialized scratch ram may * generate parity errors. */ ahc_outb(ahc, CLRINT, CLRPARERR); ahc_outb(ahc, CLRINT, CLRBRKADRINT); #endif max_targ = 15; /* * Assume we have a board at this stage and it has been reset. */ if ((ahc->flags & AHC_USEDEFAULTS) != 0) ahc->our_id = ahc->our_id_b = 7; /* * Default to allowing initiator operations. */ ahc->flags |= AHC_INITIATORROLE; /* * Only allow target mode features if this unit has them enabled. */ if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0) ahc->features &= ~AHC_TARGETMODE; #ifndef __linux__ /* DMA tag for mapping buffers into device visible space. */ if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/ahc->flags & AHC_39BIT_ADDRESSING ? (dma_addr_t)0x7FFFFFFFFFULL : BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/(AHC_NSEG - 1) * PAGE_SIZE, /*nsegments*/AHC_NSEG, /*maxsegsz*/AHC_MAXTRANSFER_SIZE, /*flags*/BUS_DMA_ALLOCNOW, &ahc->buffer_dmat) != 0) { return (ENOMEM); } #endif ahc->init_level++; /* * DMA tag for our command fifos and other data in system memory * the card's sequencer must be able to access. For initiator * roles, we need to allocate space for the qinfifo and qoutfifo. * The qinfifo and qoutfifo are composed of 256 1 byte elements. * When providing for the target mode role, we must additionally * provide space for the incoming target command fifo and an extra * byte to deal with a dma bug in some chip versions. */ driver_data_size = 2 * 256 * sizeof(uint8_t); if ((ahc->features & AHC_TARGETMODE) != 0) driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd) + /*DMA WideOdd Bug Buffer*/1; if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, driver_data_size, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &ahc->shared_data_dmat) != 0) { return (ENOMEM); } ahc->init_level++; /* Allocation of driver data */ if (ahc_dmamem_alloc(ahc, ahc->shared_data_dmat, (void **)&ahc->qoutfifo, BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) { return (ENOMEM); } ahc->init_level++; /* And permanently map it in */ ahc_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, ahc->qoutfifo, driver_data_size, ahc_dmamap_cb, &ahc->shared_data_busaddr, /*flags*/0); if ((ahc->features & AHC_TARGETMODE) != 0) { ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo; ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS]; ahc->dma_bug_buf = ahc->shared_data_busaddr + driver_data_size - 1; /* All target command blocks start out invalid. */ for (i = 0; i < AHC_TMODE_CMDS; i++) ahc->targetcmds[i].cmd_valid = 0; ahc_sync_tqinfifo(ahc, BUS_DMASYNC_PREREAD); ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256]; } ahc->qinfifo = &ahc->qoutfifo[256]; ahc->init_level++; /* Allocate SCB data now that buffer_dmat is initialized */ if (ahc->scb_data->maxhscbs == 0) if (ahc_init_scbdata(ahc) != 0) return (ENOMEM); /* * Allocate a tstate to house information for our * initiator presence on the bus as well as the user * data for any target mode initiator. */ if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) { printk("%s: unable to allocate ahc_tmode_tstate. " "Failing attach\n", ahc_name(ahc)); return (ENOMEM); } if ((ahc->features & AHC_TWIN) != 0) { if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) { printk("%s: unable to allocate ahc_tmode_tstate. " "Failing attach\n", ahc_name(ahc)); return (ENOMEM); } } if (ahc->scb_data->maxhscbs < AHC_SCB_MAX_ALLOC) { ahc->flags |= AHC_PAGESCBS; } else { ahc->flags &= ~AHC_PAGESCBS; } #ifdef AHC_DEBUG if (ahc_debug & AHC_SHOW_MISC) { printk("%s: hardware scb %u bytes; kernel scb %u bytes; " "ahc_dma %u bytes\n", ahc_name(ahc), (u_int)sizeof(struct hardware_scb), (u_int)sizeof(struct scb), (u_int)sizeof(struct ahc_dma_seg)); } #endif /* AHC_DEBUG */ /* * Look at the information that board initialization or * the board bios has left us. */ if (ahc->features & AHC_TWIN) { scsi_conf = ahc_inb(ahc, SCSICONF + 1); if ((scsi_conf & RESET_SCSI) != 0 && (ahc->flags & AHC_INITIATORROLE) != 0) ahc->flags |= AHC_RESET_BUS_B; } scsi_conf = ahc_inb(ahc, SCSICONF); if ((scsi_conf & RESET_SCSI) != 0 && (ahc->flags & AHC_INITIATORROLE) != 0) ahc->flags |= AHC_RESET_BUS_A; ultraenb = 0; tagenable = ALL_TARGETS_MASK; /* Grab the disconnection disable table and invert it for our needs */ if ((ahc->flags & AHC_USEDEFAULTS) != 0) { printk("%s: Host Adapter Bios disabled. Using default SCSI " "device parameters\n", ahc_name(ahc)); ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B| AHC_TERM_ENB_A|AHC_TERM_ENB_B; discenable = ALL_TARGETS_MASK; if ((ahc->features & AHC_ULTRA) != 0) ultraenb = ALL_TARGETS_MASK; } else { discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8) | ahc_inb(ahc, DISC_DSB)); if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0) ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8) | ahc_inb(ahc, ULTRA_ENB); } if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) max_targ = 7; for (i = 0; i <= max_targ; i++) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int our_id; u_int target_id; char channel; channel = 'A'; our_id = ahc->our_id; target_id = i; if (i > 7 && (ahc->features & AHC_TWIN) != 0) { channel = 'B'; our_id = ahc->our_id_b; target_id = i % 8; } tinfo = ahc_fetch_transinfo(ahc, channel, our_id, target_id, &tstate); /* Default to async narrow across the board */ memset(tinfo, 0, sizeof(*tinfo)); if (ahc->flags & AHC_USEDEFAULTS) { if ((ahc->features & AHC_WIDE) != 0) tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; /* * These will be truncated when we determine the * connection type we have with the target. */ tinfo->user.period = ahc_syncrates->period; tinfo->user.offset = MAX_OFFSET; } else { u_int scsirate; uint16_t mask; /* Take the settings leftover in scratch RAM. */ scsirate = ahc_inb(ahc, TARG_SCSIRATE + i); mask = (0x01 << i); if ((ahc->features & AHC_ULTRA2) != 0) { u_int offset; u_int maxsync; if ((scsirate & SOFS) == 0x0F) { /* * Haven't negotiated yet, * so the format is different. */ scsirate = (scsirate & SXFR) >> 4 | (ultraenb & mask) ? 0x08 : 0x0 | (scsirate & WIDEXFER); offset = MAX_OFFSET_ULTRA2; } else offset = ahc_inb(ahc, TARG_OFFSET + i); if ((scsirate & ~WIDEXFER) == 0 && offset != 0) /* Set to the lowest sync rate, 5MHz */ scsirate |= 0x1c; maxsync = AHC_SYNCRATE_ULTRA2; if ((ahc->features & AHC_DT) != 0) maxsync = AHC_SYNCRATE_DT; tinfo->user.period = ahc_find_period(ahc, scsirate, maxsync); if (offset == 0) tinfo->user.period = 0; else tinfo->user.offset = MAX_OFFSET; if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/ && (ahc->features & AHC_DT) != 0) tinfo->user.ppr_options = MSG_EXT_PPR_DT_REQ; } else if ((scsirate & SOFS) != 0) { if ((scsirate & SXFR) == 0x40 && (ultraenb & mask) != 0) { /* Treat 10MHz as a non-ultra speed */ scsirate &= ~SXFR; ultraenb &= ~mask; } tinfo->user.period = ahc_find_period(ahc, scsirate, (ultraenb & mask) ? AHC_SYNCRATE_ULTRA : AHC_SYNCRATE_FAST); if (tinfo->user.period != 0) tinfo->user.offset = MAX_OFFSET; } if (tinfo->user.period == 0) tinfo->user.offset = 0; if ((scsirate & WIDEXFER) != 0 && (ahc->features & AHC_WIDE) != 0) tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; tinfo->user.protocol_version = 4; if ((ahc->features & AHC_DT) != 0) tinfo->user.transport_version = 3; else tinfo->user.transport_version = 2; tinfo->goal.protocol_version = 2; tinfo->goal.transport_version = 2; tinfo->curr.protocol_version = 2; tinfo->curr.transport_version = 2; } tstate->ultraenb = 0; } ahc->user_discenable = discenable; ahc->user_tagenable = tagenable; return (ahc->bus_chip_init(ahc)); } void ahc_intr_enable(struct ahc_softc *ahc, int enable) { u_int hcntrl; hcntrl = ahc_inb(ahc, HCNTRL); hcntrl &= ~INTEN; ahc->pause &= ~INTEN; ahc->unpause &= ~INTEN; if (enable) { hcntrl |= INTEN; ahc->pause |= INTEN; ahc->unpause |= INTEN; } ahc_outb(ahc, HCNTRL, hcntrl); } /* * Ensure that the card is paused in a location * outside of all critical sections and that all * pending work is completed prior to returning. * This routine should only be called from outside * an interrupt context. */ void ahc_pause_and_flushwork(struct ahc_softc *ahc) { int intstat; int maxloops; int paused; maxloops = 1000; ahc->flags |= AHC_ALL_INTERRUPTS; paused = FALSE; do { if (paused) { ahc_unpause(ahc); /* * Give the sequencer some time to service * any active selections. */ ahc_delay(500); } ahc_intr(ahc); ahc_pause(ahc); paused = TRUE; ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO); intstat = ahc_inb(ahc, INTSTAT); if ((intstat & INT_PEND) == 0) { ahc_clear_critical_section(ahc); intstat = ahc_inb(ahc, INTSTAT); } } while (--maxloops && (intstat != 0xFF || (ahc->features & AHC_REMOVABLE) == 0) && ((intstat & INT_PEND) != 0 || (ahc_inb(ahc, SSTAT0) & (SELDO|SELINGO)) != 0)); if (maxloops == 0) { printk("Infinite interrupt loop, INTSTAT = %x", ahc_inb(ahc, INTSTAT)); } ahc_platform_flushwork(ahc); ahc->flags &= ~AHC_ALL_INTERRUPTS; } #ifdef CONFIG_PM int ahc_suspend(struct ahc_softc *ahc) { ahc_pause_and_flushwork(ahc); if (LIST_FIRST(&ahc->pending_scbs) != NULL) { ahc_unpause(ahc); return (EBUSY); } #ifdef AHC_TARGET_MODE /* * XXX What about ATIOs that have not yet been serviced? * Perhaps we should just refuse to be suspended if we * are acting in a target role. */ if (ahc->pending_device != NULL) { ahc_unpause(ahc); return (EBUSY); } #endif ahc_shutdown(ahc); return (0); } int ahc_resume(struct ahc_softc *ahc) { ahc_reset(ahc, /*reinit*/TRUE); ahc_intr_enable(ahc, TRUE); ahc_restart(ahc); return (0); } #endif /************************** Busy Target Table *********************************/ /* * Return the untagged transaction id for a given target/channel lun. * Optionally, clear the entry. */ static u_int ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl) { u_int scbid; u_int target_offset; if ((ahc->flags & AHC_SCB_BTT) != 0) { u_int saved_scbptr; saved_scbptr = ahc_inb(ahc, SCBPTR); ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl)); ahc_outb(ahc, SCBPTR, saved_scbptr); } else { target_offset = TCL_TARGET_OFFSET(tcl); scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset); } return (scbid); } static void ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl) { u_int target_offset; if ((ahc->flags & AHC_SCB_BTT) != 0) { u_int saved_scbptr; saved_scbptr = ahc_inb(ahc, SCBPTR); ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL); ahc_outb(ahc, SCBPTR, saved_scbptr); } else { target_offset = TCL_TARGET_OFFSET(tcl); ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL); } } static void ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid) { u_int target_offset; if ((ahc->flags & AHC_SCB_BTT) != 0) { u_int saved_scbptr; saved_scbptr = ahc_inb(ahc, SCBPTR); ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid); ahc_outb(ahc, SCBPTR, saved_scbptr); } else { target_offset = TCL_TARGET_OFFSET(tcl); ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid); } } /************************** SCB and SCB queue management **********************/ int ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target, char channel, int lun, u_int tag, role_t role) { int targ = SCB_GET_TARGET(ahc, scb); char chan = SCB_GET_CHANNEL(ahc, scb); int slun = SCB_GET_LUN(scb); int match; match = ((chan == channel) || (channel == ALL_CHANNELS)); if (match != 0) match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); if (match != 0) match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); if (match != 0) { #ifdef AHC_TARGET_MODE int group; group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); if (role == ROLE_INITIATOR) { match = (group != XPT_FC_GROUP_TMODE) && ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); } else if (role == ROLE_TARGET) { match = (group == XPT_FC_GROUP_TMODE) && ((tag == scb->io_ctx->csio.tag_id) || (tag == SCB_LIST_NULL)); } #else /* !AHC_TARGET_MODE */ match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); #endif /* AHC_TARGET_MODE */ } return match; } static void ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb) { int target; char channel; int lun; target = SCB_GET_TARGET(ahc, scb); lun = SCB_GET_LUN(scb); channel = SCB_GET_CHANNEL(ahc, scb); ahc_search_qinfifo(ahc, target, channel, lun, /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, CAM_REQUEUE_REQ, SEARCH_COMPLETE); ahc_platform_freeze_devq(ahc, scb); } void ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb) { struct scb *prev_scb; prev_scb = NULL; if (ahc_qinfifo_count(ahc) != 0) { u_int prev_tag; uint8_t prev_pos; prev_pos = ahc->qinfifonext - 1; prev_tag = ahc->qinfifo[prev_pos]; prev_scb = ahc_lookup_scb(ahc, prev_tag); } ahc_qinfifo_requeue(ahc, prev_scb, scb); if ((ahc->features & AHC_QUEUE_REGS) != 0) { ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); } else { ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); } } static void ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb, struct scb *scb) { if (prev_scb == NULL) { ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); } else { prev_scb->hscb->next = scb->hscb->tag; ahc_sync_scb(ahc, prev_scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; scb->hscb->next = ahc->next_queued_scb->hscb->tag; ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } static int ahc_qinfifo_count(struct ahc_softc *ahc) { uint8_t qinpos; uint8_t diff; if ((ahc->features & AHC_QUEUE_REGS) != 0) { qinpos = ahc_inb(ahc, SNSCB_QOFF); ahc_outb(ahc, SNSCB_QOFF, qinpos); } else qinpos = ahc_inb(ahc, QINPOS); diff = ahc->qinfifonext - qinpos; return (diff); } int ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahc_search_action action) { struct scb *scb; struct scb *prev_scb; uint8_t qinstart; uint8_t qinpos; uint8_t qintail; uint8_t next; uint8_t prev; uint8_t curscbptr; int found; int have_qregs; qintail = ahc->qinfifonext; have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0; if (have_qregs) { qinstart = ahc_inb(ahc, SNSCB_QOFF); ahc_outb(ahc, SNSCB_QOFF, qinstart); } else qinstart = ahc_inb(ahc, QINPOS); qinpos = qinstart; found = 0; prev_scb = NULL; if (action == SEARCH_COMPLETE) { /* * Don't attempt to run any queued untagged transactions * until we are done with the abort process. */ ahc_freeze_untagged_queues(ahc); } /* * Start with an empty queue. Entries that are not chosen * for removal will be re-added to the queue as we go. */ ahc->qinfifonext = qinpos; ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); while (qinpos != qintail) { scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]); if (scb == NULL) { printk("qinpos = %d, SCB index = %d\n", qinpos, ahc->qinfifo[qinpos]); panic("Loop 1\n"); } if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) { /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: { cam_status ostat; cam_status cstat; ostat = ahc_get_transaction_status(scb); if (ostat == CAM_REQ_INPROG) ahc_set_transaction_status(scb, status); cstat = ahc_get_transaction_status(scb); if (cstat != CAM_REQ_CMP) ahc_freeze_scb(scb); if ((scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB in qinfifo\n"); ahc_done(ahc, scb); /* FALLTHROUGH */ } case SEARCH_REMOVE: break; case SEARCH_COUNT: ahc_qinfifo_requeue(ahc, prev_scb, scb); prev_scb = scb; break; } } else { ahc_qinfifo_requeue(ahc, prev_scb, scb); prev_scb = scb; } qinpos++; } if ((ahc->features & AHC_QUEUE_REGS) != 0) { ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); } else { ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); } if (action != SEARCH_COUNT && (found != 0) && (qinstart != ahc->qinfifonext)) { /* * The sequencer may be in the process of dmaing * down the SCB at the beginning of the queue. * This could be problematic if either the first, * or the second SCB is removed from the queue * (the first SCB includes a pointer to the "next" * SCB to dma). If we have removed any entries, swap * the first element in the queue with the next HSCB * so the sequencer will notice that NEXT_QUEUED_SCB * has changed during its dma attempt and will retry * the DMA. */ scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]); if (scb == NULL) { printk("found = %d, qinstart = %d, qinfifionext = %d\n", found, qinstart, ahc->qinfifonext); panic("First/Second Qinfifo fixup\n"); } /* * ahc_swap_with_next_hscb forces our next pointer to * point to the reserved SCB for future commands. Save * and restore our original next pointer to maintain * queue integrity. */ next = scb->hscb->next; ahc->scb_data->scbindex[scb->hscb->tag] = NULL; ahc_swap_with_next_hscb(ahc, scb); scb->hscb->next = next; ahc->qinfifo[qinstart] = scb->hscb->tag; /* Tell the card about the new head of the qinfifo. */ ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); /* Fixup the tail "next" pointer. */ qintail = ahc->qinfifonext - 1; scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]); scb->hscb->next = ahc->next_queued_scb->hscb->tag; } /* * Search waiting for selection list. */ curscbptr = ahc_inb(ahc, SCBPTR); next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */ prev = SCB_LIST_NULL; while (next != SCB_LIST_NULL) { uint8_t scb_index; ahc_outb(ahc, SCBPTR, next); scb_index = ahc_inb(ahc, SCB_TAG); if (scb_index >= ahc->scb_data->numscbs) { printk("Waiting List inconsistency. " "SCB index == %d, yet numscbs == %d.", scb_index, ahc->scb_data->numscbs); ahc_dump_card_state(ahc); panic("for safety"); } scb = ahc_lookup_scb(ahc, scb_index); if (scb == NULL) { printk("scb_index = %d, next = %d\n", scb_index, next); panic("Waiting List traversal\n"); } if (ahc_match_scb(ahc, scb, target, channel, lun, SCB_LIST_NULL, role)) { /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: { cam_status ostat; cam_status cstat; ostat = ahc_get_transaction_status(scb); if (ostat == CAM_REQ_INPROG) ahc_set_transaction_status(scb, status); cstat = ahc_get_transaction_status(scb); if (cstat != CAM_REQ_CMP) ahc_freeze_scb(scb); if ((scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB in Waiting List\n"); ahc_done(ahc, scb); /* FALLTHROUGH */ } case SEARCH_REMOVE: next = ahc_rem_wscb(ahc, next, prev); break; case SEARCH_COUNT: prev = next; next = ahc_inb(ahc, SCB_NEXT); break; } } else { prev = next; next = ahc_inb(ahc, SCB_NEXT); } } ahc_outb(ahc, SCBPTR, curscbptr); found += ahc_search_untagged_queues(ahc, /*ahc_io_ctx_t*/NULL, target, channel, lun, status, action); if (action == SEARCH_COMPLETE) ahc_release_untagged_queues(ahc); return (found); } int ahc_search_untagged_queues(struct ahc_softc *ahc, ahc_io_ctx_t ctx, int target, char channel, int lun, uint32_t status, ahc_search_action action) { struct scb *scb; int maxtarget; int found; int i; if (action == SEARCH_COMPLETE) { /* * Don't attempt to run any queued untagged transactions * until we are done with the abort process. */ ahc_freeze_untagged_queues(ahc); } found = 0; i = 0; if ((ahc->flags & AHC_SCB_BTT) == 0) { maxtarget = 16; if (target != CAM_TARGET_WILDCARD) { i = target; if (channel == 'B') i += 8; maxtarget = i + 1; } } else { maxtarget = 0; } for (; i < maxtarget; i++) { struct scb_tailq *untagged_q; struct scb *next_scb; untagged_q = &(ahc->untagged_queues[i]); next_scb = TAILQ_FIRST(untagged_q); while (next_scb != NULL) { scb = next_scb; next_scb = TAILQ_NEXT(scb, links.tqe); /* * The head of the list may be the currently * active untagged command for a device. * We're only searching for commands that * have not been started. A transaction * marked active but still in the qinfifo * is removed by the qinfifo scanning code * above. */ if ((scb->flags & SCB_ACTIVE) != 0) continue; if (ahc_match_scb(ahc, scb, target, channel, lun, SCB_LIST_NULL, ROLE_INITIATOR) == 0 || (ctx != NULL && ctx != scb->io_ctx)) continue; /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: { cam_status ostat; cam_status cstat; ostat = ahc_get_transaction_status(scb); if (ostat == CAM_REQ_INPROG) ahc_set_transaction_status(scb, status); cstat = ahc_get_transaction_status(scb); if (cstat != CAM_REQ_CMP) ahc_freeze_scb(scb); if ((scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB in untaggedQ\n"); ahc_done(ahc, scb); break; } case SEARCH_REMOVE: scb->flags &= ~SCB_UNTAGGEDQ; TAILQ_REMOVE(untagged_q, scb, links.tqe); break; case SEARCH_COUNT: break; } } } if (action == SEARCH_COMPLETE) ahc_release_untagged_queues(ahc); return (found); } int ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, int stop_on_first, int remove, int save_state) { struct scb *scbp; u_int next; u_int prev; u_int count; u_int active_scb; count = 0; next = ahc_inb(ahc, DISCONNECTED_SCBH); prev = SCB_LIST_NULL; if (save_state) { /* restore this when we're done */ active_scb = ahc_inb(ahc, SCBPTR); } else /* Silence compiler */ active_scb = SCB_LIST_NULL; while (next != SCB_LIST_NULL) { u_int scb_index; ahc_outb(ahc, SCBPTR, next); scb_index = ahc_inb(ahc, SCB_TAG); if (scb_index >= ahc->scb_data->numscbs) { printk("Disconnected List inconsistency. " "SCB index == %d, yet numscbs == %d.", scb_index, ahc->scb_data->numscbs); ahc_dump_card_state(ahc); panic("for safety"); } if (next == prev) { panic("Disconnected List Loop. " "cur SCBPTR == %x, prev SCBPTR == %x.", next, prev); } scbp = ahc_lookup_scb(ahc, scb_index); if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, ROLE_INITIATOR)) { count++; if (remove) { next = ahc_rem_scb_from_disc_list(ahc, prev, next); } else { prev = next; next = ahc_inb(ahc, SCB_NEXT); } if (stop_on_first) break; } else { prev = next; next = ahc_inb(ahc, SCB_NEXT); } } if (save_state) ahc_outb(ahc, SCBPTR, active_scb); return (count); } /* * Remove an SCB from the on chip list of disconnected transactions. * This is empty/unused if we are not performing SCB paging. */ static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr) { u_int next; ahc_outb(ahc, SCBPTR, scbptr); next = ahc_inb(ahc, SCB_NEXT); ahc_outb(ahc, SCB_CONTROL, 0); ahc_add_curscb_to_free_list(ahc); if (prev != SCB_LIST_NULL) { ahc_outb(ahc, SCBPTR, prev); ahc_outb(ahc, SCB_NEXT, next); } else ahc_outb(ahc, DISCONNECTED_SCBH, next); return (next); } /* * Add the SCB as selected by SCBPTR onto the on chip list of * free hardware SCBs. This list is empty/unused if we are not * performing SCB paging. */ static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc) { /* * Invalidate the tag so that our abort * routines don't think it's active. */ ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); if ((ahc->flags & AHC_PAGESCBS) != 0) { ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH)); ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR)); } } /* * Manipulate the waiting for selection list and return the * scb that follows the one that we remove. */ static u_int ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) { u_int curscb, next; /* * Select the SCB we want to abort and * pull the next pointer out of it. */ curscb = ahc_inb(ahc, SCBPTR); ahc_outb(ahc, SCBPTR, scbpos); next = ahc_inb(ahc, SCB_NEXT); /* Clear the necessary fields */ ahc_outb(ahc, SCB_CONTROL, 0); ahc_add_curscb_to_free_list(ahc); /* update the waiting list */ if (prev == SCB_LIST_NULL) { /* First in the list */ ahc_outb(ahc, WAITING_SCBH, next); /* * Ensure we aren't attempting to perform * selection for this entry. */ ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); } else { /* * Select the scb that pointed to us * and update its next pointer. */ ahc_outb(ahc, SCBPTR, prev); ahc_outb(ahc, SCB_NEXT, next); } /* * Point us back at the original scb position. */ ahc_outb(ahc, SCBPTR, curscb); return next; } /******************************** Error Handling ******************************/ /* * Abort all SCBs that match the given description (target/channel/lun/tag), * setting their status to the passed in status if the status has not already * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer * is paused before it is called. */ static int ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, role_t role, uint32_t status) { struct scb *scbp; struct scb *scbp_next; u_int active_scb; int i, j; int maxtarget; int minlun; int maxlun; int found; /* * Don't attempt to run any queued untagged transactions * until we are done with the abort process. */ ahc_freeze_untagged_queues(ahc); /* restore this when we're done */ active_scb = ahc_inb(ahc, SCBPTR); found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL, role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); /* * Clean out the busy target table for any untagged commands. */ i = 0; maxtarget = 16; if (target != CAM_TARGET_WILDCARD) { i = target; if (channel == 'B') i += 8; maxtarget = i + 1; } if (lun == CAM_LUN_WILDCARD) { /* * Unless we are using an SCB based * busy targets table, there is only * one table entry for all luns of * a target. */ minlun = 0; maxlun = 1; if ((ahc->flags & AHC_SCB_BTT) != 0) maxlun = AHC_NUM_LUNS; } else { minlun = lun; maxlun = lun + 1; } if (role != ROLE_TARGET) { for (;i < maxtarget; i++) { for (j = minlun;j < maxlun; j++) { u_int scbid; u_int tcl; tcl = BUILD_TCL(i << 4, j); scbid = ahc_index_busy_tcl(ahc, tcl); scbp = ahc_lookup_scb(ahc, scbid); if (scbp == NULL || ahc_match_scb(ahc, scbp, target, channel, lun, tag, role) == 0) continue; ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j)); } } /* * Go through the disconnected list and remove any entries we * have queued for completion, 0'ing their control byte too. * We save the active SCB and restore it ourselves, so there * is no reason for this search to restore it too. */ ahc_search_disc_list(ahc, target, channel, lun, tag, /*stop_on_first*/FALSE, /*remove*/TRUE, /*save_state*/FALSE); } /* * Go through the hardware SCB array looking for commands that * were active but not on any list. In some cases, these remnants * might not still have mappings in the scbindex array (e.g. unexpected * bus free with the same scb queued for an abort). Don't hold this * against them. */ for (i = 0; i < ahc->scb_data->maxhscbs; i++) { u_int scbid; ahc_outb(ahc, SCBPTR, i); scbid = ahc_inb(ahc, SCB_TAG); scbp = ahc_lookup_scb(ahc, scbid); if ((scbp == NULL && scbid != SCB_LIST_NULL) || (scbp != NULL && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role))) ahc_add_curscb_to_free_list(ahc); } /* * Go through the pending CCB list and look for * commands for this target that are still active. * These are other tagged commands that were * disconnected when the reset occurred. */ scbp_next = LIST_FIRST(&ahc->pending_scbs); while (scbp_next != NULL) { scbp = scbp_next; scbp_next = LIST_NEXT(scbp, pending_links); if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) { cam_status ostat; ostat = ahc_get_transaction_status(scbp); if (ostat == CAM_REQ_INPROG) ahc_set_transaction_status(scbp, status); if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP) ahc_freeze_scb(scbp); if ((scbp->flags & SCB_ACTIVE) == 0) printk("Inactive SCB on pending list\n"); ahc_done(ahc, scbp); found++; } } ahc_outb(ahc, SCBPTR, active_scb); ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status); ahc_release_untagged_queues(ahc); return found; } static void ahc_reset_current_bus(struct ahc_softc *ahc) { uint8_t scsiseq; ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST); scsiseq = ahc_inb(ahc, SCSISEQ); ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO); ahc_flush_device_writes(ahc); ahc_delay(AHC_BUSRESET_DELAY); /* Turn off the bus reset */ ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO); ahc_clear_intstat(ahc); /* Re-enable reset interrupts */ ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST); } int ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset) { struct ahc_devinfo devinfo; u_int initiator, target, max_scsiid; u_int sblkctl; u_int scsiseq; u_int simode1; int found; int restart_needed; char cur_channel; ahc->pending_device = NULL; ahc_compile_devinfo(&devinfo, CAM_TARGET_WILDCARD, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, channel, ROLE_UNKNOWN); ahc_pause(ahc); /* Make sure the sequencer is in a safe location. */ ahc_clear_critical_section(ahc); /* * Run our command complete fifos to ensure that we perform * completion processing on any commands that 'completed' * before the reset occurred. */ ahc_run_qoutfifo(ahc); #ifdef AHC_TARGET_MODE /* * XXX - In Twin mode, the tqinfifo may have commands * for an unaffected channel in it. However, if * we have run out of ATIO resources to drain that * queue, we may not get them all out here. Further, * the blocked transactions for the reset channel * should just be killed off, irrespecitve of whether * we are blocked on ATIO resources. Write a routine * to compact the tqinfifo appropriately. */ if ((ahc->flags & AHC_TARGETROLE) != 0) { ahc_run_tqinfifo(ahc, /*paused*/TRUE); } #endif /* * Reset the bus if we are initiating this reset */ sblkctl = ahc_inb(ahc, SBLKCTL); cur_channel = 'A'; if ((ahc->features & AHC_TWIN) != 0 && ((sblkctl & SELBUSB) != 0)) cur_channel = 'B'; scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); if (cur_channel != channel) { /* Case 1: Command for another bus is active * Stealthily reset the other bus without * upsetting the current bus. */ ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); #ifdef AHC_TARGET_MODE /* * Bus resets clear ENSELI, so we cannot * defer re-enabling bus reset interrupts * if we are in target mode. */ if ((ahc->flags & AHC_TARGETROLE) != 0) simode1 |= ENSCSIRST; #endif ahc_outb(ahc, SIMODE1, simode1); if (initiate_reset) ahc_reset_current_bus(ahc); ahc_clear_intstat(ahc); ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); ahc_outb(ahc, SBLKCTL, sblkctl); restart_needed = FALSE; } else { /* Case 2: A command from this bus is active or we're idle */ simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); #ifdef AHC_TARGET_MODE /* * Bus resets clear ENSELI, so we cannot * defer re-enabling bus reset interrupts * if we are in target mode. */ if ((ahc->flags & AHC_TARGETROLE) != 0) simode1 |= ENSCSIRST; #endif ahc_outb(ahc, SIMODE1, simode1); if (initiate_reset) ahc_reset_current_bus(ahc); ahc_clear_intstat(ahc); ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); restart_needed = TRUE; } /* * Clean up all the state information for the * pending transactions on this bus. */ found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7; #ifdef AHC_TARGET_MODE /* * Send an immediate notify ccb to all target more peripheral * drivers affected by this action. */ for (target = 0; target <= max_scsiid; target++) { struct ahc_tmode_tstate* tstate; u_int lun; tstate = ahc->enabled_targets[target]; if (tstate == NULL) continue; for (lun = 0; lun < AHC_NUM_LUNS; lun++) { struct ahc_tmode_lstate* lstate; lstate = tstate->enabled_luns[lun]; if (lstate == NULL) continue; ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD, EVENT_TYPE_BUS_RESET, /*arg*/0); ahc_send_lstate_events(ahc, lstate); } } #endif /* Notify the XPT that a bus reset occurred */ ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, AC_BUS_RESET); /* * Revert to async/narrow transfers until we renegotiate. */ for (target = 0; target <= max_scsiid; target++) { if (ahc->enabled_targets[target] == NULL) continue; for (initiator = 0; initiator <= max_scsiid; initiator++) { struct ahc_devinfo devinfo; ahc_compile_devinfo(&devinfo, target, initiator, CAM_LUN_WILDCARD, channel, ROLE_UNKNOWN); ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_CUR, /*paused*/TRUE); ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, /*period*/0, /*offset*/0, /*ppr_options*/0, AHC_TRANS_CUR, /*paused*/TRUE); } } if (restart_needed) ahc_restart(ahc); else ahc_unpause(ahc); return found; } /***************************** Residual Processing ****************************/ /* * Calculate the residual for a just completed SCB. */ static void ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb) { struct hardware_scb *hscb; struct status_pkt *spkt; uint32_t sgptr; uint32_t resid_sgptr; uint32_t resid; /* * 5 cases. * 1) No residual. * SG_RESID_VALID clear in sgptr. * 2) Transferless command * 3) Never performed any transfers. * sgptr has SG_FULL_RESID set. * 4) No residual but target did not * save data pointers after the * last transfer, so sgptr was * never updated. * 5) We have a partial residual. * Use residual_sgptr to determine * where we are. */ hscb = scb->hscb; sgptr = ahc_le32toh(hscb->sgptr); if ((sgptr & SG_RESID_VALID) == 0) /* Case 1 */ return; sgptr &= ~SG_RESID_VALID; if ((sgptr & SG_LIST_NULL) != 0) /* Case 2 */ return; spkt = &hscb->shared_data.status; resid_sgptr = ahc_le32toh(spkt->residual_sg_ptr); if ((sgptr & SG_FULL_RESID) != 0) { /* Case 3 */ resid = ahc_get_transfer_length(scb); } else if ((resid_sgptr & SG_LIST_NULL) != 0) { /* Case 4 */ return; } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); } else { struct ahc_dma_seg *sg; /* * Remainder of the SG where the transfer * stopped. */ resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK; sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK); /* The residual sg_ptr always points to the next sg */ sg--; /* * Add up the contents of all residual * SG segments that are after the SG where * the transfer stopped. */ while ((ahc_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) { sg++; resid += ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; } } if ((scb->flags & SCB_SENSE) == 0) ahc_set_residual(scb, resid); else ahc_set_sense_residual(scb, resid); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MISC) != 0) { ahc_print_path(ahc, scb); printk("Handled %sResidual of %d bytes\n", (scb->flags & SCB_SENSE) ? "Sense " : "", resid); } #endif } /******************************* Target Mode **********************************/ #ifdef AHC_TARGET_MODE /* * Add a target mode event to this lun's queue */ static void ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate, u_int initiator_id, u_int event_type, u_int event_arg) { struct ahc_tmode_event *event; int pending; xpt_freeze_devq(lstate->path, /*count*/1); if (lstate->event_w_idx >= lstate->event_r_idx) pending = lstate->event_w_idx - lstate->event_r_idx; else pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1 - (lstate->event_r_idx - lstate->event_w_idx); if (event_type == EVENT_TYPE_BUS_RESET || event_type == MSG_BUS_DEV_RESET) { /* * Any earlier events are irrelevant, so reset our buffer. * This has the effect of allowing us to deal with reset * floods (an external device holding down the reset line) * without losing the event that is really interesting. */ lstate->event_r_idx = 0; lstate->event_w_idx = 0; xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); } if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) { xpt_print_path(lstate->path); printk("immediate event %x:%x lost\n", lstate->event_buffer[lstate->event_r_idx].event_type, lstate->event_buffer[lstate->event_r_idx].event_arg); lstate->event_r_idx++; if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) lstate->event_r_idx = 0; xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); } event = &lstate->event_buffer[lstate->event_w_idx]; event->initiator_id = initiator_id; event->event_type = event_type; event->event_arg = event_arg; lstate->event_w_idx++; if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE) lstate->event_w_idx = 0; } /* * Send any target mode events queued up waiting * for immediate notify resources. */ void ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate) { struct ccb_hdr *ccbh; struct ccb_immed_notify *inot; while (lstate->event_r_idx != lstate->event_w_idx && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { struct ahc_tmode_event *event; event = &lstate->event_buffer[lstate->event_r_idx]; SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); inot = (struct ccb_immed_notify *)ccbh; switch (event->event_type) { case EVENT_TYPE_BUS_RESET: ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; break; default: ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; inot->message_args[0] = event->event_type; inot->message_args[1] = event->event_arg; break; } inot->initiator_id = event->initiator_id; inot->sense_len = 0; xpt_done((union ccb *)inot); lstate->event_r_idx++; if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) lstate->event_r_idx = 0; } } #endif /******************** Sequencer Program Patching/Download *********************/ #ifdef AHC_DUMP_SEQ void ahc_dumpseq(struct ahc_softc* ahc) { int i; ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); ahc_outb(ahc, SEQADDR0, 0); ahc_outb(ahc, SEQADDR1, 0); for (i = 0; i < ahc->instruction_ram_size; i++) { uint8_t ins_bytes[4]; ahc_insb(ahc, SEQRAM, ins_bytes, 4); printk("0x%08x\n", ins_bytes[0] << 24 | ins_bytes[1] << 16 | ins_bytes[2] << 8 | ins_bytes[3]); } } #endif static int ahc_loadseq(struct ahc_softc *ahc) { struct cs cs_table[num_critical_sections]; u_int begin_set[num_critical_sections]; u_int end_set[num_critical_sections]; const struct patch *cur_patch; u_int cs_count; u_int cur_cs; u_int i; u_int skip_addr; u_int sg_prefetch_cnt; int downloaded; uint8_t download_consts[7]; /* * Start out with 0 critical sections * that apply to this firmware load. */ cs_count = 0; cur_cs = 0; memset(begin_set, 0, sizeof(begin_set)); memset(end_set, 0, sizeof(end_set)); /* Setup downloadable constant table */ download_consts[QOUTFIFO_OFFSET] = 0; if (ahc->targetcmds != NULL) download_consts[QOUTFIFO_OFFSET] += 32; download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1; download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1; download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1); sg_prefetch_cnt = ahc->pci_cachesize; if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg))) sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg); download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1); download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1); cur_patch = patches; downloaded = 0; skip_addr = 0; ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); ahc_outb(ahc, SEQADDR0, 0); ahc_outb(ahc, SEQADDR1, 0); for (i = 0; i < sizeof(seqprog)/4; i++) { if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) { /* * Don't download this instruction as it * is in a patch that was removed. */ continue; } if (downloaded == ahc->instruction_ram_size) { /* * We're about to exceed the instruction * storage capacity for this chip. Fail * the load. */ printk("\n%s: Program too large for instruction memory " "size of %d!\n", ahc_name(ahc), ahc->instruction_ram_size); return (ENOMEM); } /* * Move through the CS table until we find a CS * that might apply to this instruction. */ for (; cur_cs < num_critical_sections; cur_cs++) { if (critical_sections[cur_cs].end <= i) { if (begin_set[cs_count] == TRUE && end_set[cs_count] == FALSE) { cs_table[cs_count].end = downloaded; end_set[cs_count] = TRUE; cs_count++; } continue; } if (critical_sections[cur_cs].begin <= i && begin_set[cs_count] == FALSE) { cs_table[cs_count].begin = downloaded; begin_set[cs_count] = TRUE; } break; } ahc_download_instr(ahc, i, download_consts); downloaded++; } ahc->num_critical_sections = cs_count; if (cs_count != 0) { cs_count *= sizeof(struct cs); ahc->critical_sections = kmalloc(cs_count, GFP_ATOMIC); if (ahc->critical_sections == NULL) panic("ahc_loadseq: Could not malloc"); memcpy(ahc->critical_sections, cs_table, cs_count); } ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE); if (bootverbose) { printk(" %d instructions downloaded\n", downloaded); printk("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n", ahc_name(ahc), ahc->features, ahc->bugs, ahc->flags); } return (0); } static int ahc_check_patch(struct ahc_softc *ahc, const struct patch **start_patch, u_int start_instr, u_int *skip_addr) { const struct patch *cur_patch; const struct patch *last_patch; u_int num_patches; num_patches = ARRAY_SIZE(patches); last_patch = &patches[num_patches]; cur_patch = *start_patch; while (cur_patch < last_patch && start_instr == cur_patch->begin) { if (cur_patch->patch_func(ahc) == 0) { /* Start rejecting code */ *skip_addr = start_instr + cur_patch->skip_instr; cur_patch += cur_patch->skip_patch; } else { /* Accepted this patch. Advance to the next * one and wait for our intruction pointer to * hit this point. */ cur_patch++; } } *start_patch = cur_patch; if (start_instr < *skip_addr) /* Still skipping */ return (0); return (1); } static void ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts) { union ins_formats instr; struct ins_format1 *fmt1_ins; struct ins_format3 *fmt3_ins; u_int opcode; /* * The firmware is always compiled into a little endian format. */ instr.integer = ahc_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); fmt1_ins = &instr.format1; fmt3_ins = NULL; /* Pull the opcode */ opcode = instr.format1.opcode; switch (opcode) { case AIC_OP_JMP: case AIC_OP_JC: case AIC_OP_JNC: case AIC_OP_CALL: case AIC_OP_JNE: case AIC_OP_JNZ: case AIC_OP_JE: case AIC_OP_JZ: { const struct patch *cur_patch; int address_offset; u_int address; u_int skip_addr; u_int i; fmt3_ins = &instr.format3; address_offset = 0; address = fmt3_ins->address; cur_patch = patches; skip_addr = 0; for (i = 0; i < address;) { ahc_check_patch(ahc, &cur_patch, i, &skip_addr); if (skip_addr > i) { int end_addr; end_addr = min(address, skip_addr); address_offset += end_addr - i; i = skip_addr; } else { i++; } } address -= address_offset; fmt3_ins->address = address; /* FALLTHROUGH */ } case AIC_OP_OR: case AIC_OP_AND: case AIC_OP_XOR: case AIC_OP_ADD: case AIC_OP_ADC: case AIC_OP_BMOV: if (fmt1_ins->parity != 0) { fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; } fmt1_ins->parity = 0; if ((ahc->features & AHC_CMD_CHAN) == 0 && opcode == AIC_OP_BMOV) { /* * Block move was added at the same time * as the command channel. Verify that * this is only a move of a single element * and convert the BMOV to a MOV * (AND with an immediate of FF). */ if (fmt1_ins->immediate != 1) panic("%s: BMOV not supported\n", ahc_name(ahc)); fmt1_ins->opcode = AIC_OP_AND; fmt1_ins->immediate = 0xff; } /* FALLTHROUGH */ case AIC_OP_ROL: if ((ahc->features & AHC_ULTRA2) != 0) { int i, count; /* Calculate odd parity for the instruction */ for (i = 0, count = 0; i < 31; i++) { uint32_t mask; mask = 0x01 << i; if ((instr.integer & mask) != 0) count++; } if ((count & 0x01) == 0) instr.format1.parity = 1; } else { /* Compress the instruction for older sequencers */ if (fmt3_ins != NULL) { instr.integer = fmt3_ins->immediate | (fmt3_ins->source << 8) | (fmt3_ins->address << 16) | (fmt3_ins->opcode << 25); } else { instr.integer = fmt1_ins->immediate | (fmt1_ins->source << 8) | (fmt1_ins->destination << 16) | (fmt1_ins->ret << 24) | (fmt1_ins->opcode << 25); } } /* The sequencer is a little endian cpu */ instr.integer = ahc_htole32(instr.integer); ahc_outsb(ahc, SEQRAM, instr.bytes, 4); break; default: panic("Unknown opcode encountered in seq program"); break; } } int ahc_print_register(const ahc_reg_parse_entry_t *table, u_int num_entries, const char *name, u_int address, u_int value, u_int *cur_column, u_int wrap_point) { int printed; u_int printed_mask; if (cur_column != NULL && *cur_column >= wrap_point) { printk("\n"); *cur_column = 0; } printed = printk("%s[0x%x]", name, value); if (table == NULL) { printed += printk(" "); *cur_column += printed; return (printed); } printed_mask = 0; while (printed_mask != 0xFF) { int entry; for (entry = 0; entry < num_entries; entry++) { if (((value & table[entry].mask) != table[entry].value) || ((printed_mask & table[entry].mask) == table[entry].mask)) continue; printed += printk("%s%s", printed_mask == 0 ? ":(" : "|", table[entry].name); printed_mask |= table[entry].mask; break; } if (entry >= num_entries) break; } if (printed_mask != 0) printed += printk(") "); else printed += printk(" "); if (cur_column != NULL) *cur_column += printed; return (printed); } void ahc_dump_card_state(struct ahc_softc *ahc) { struct scb *scb; struct scb_tailq *untagged_q; u_int cur_col; int paused; int target; int maxtarget; int i; uint8_t last_phase; uint8_t qinpos; uint8_t qintail; uint8_t qoutpos; uint8_t scb_index; uint8_t saved_scbptr; if (ahc_is_paused(ahc)) { paused = 1; } else { paused = 0; ahc_pause(ahc); } saved_scbptr = ahc_inb(ahc, SCBPTR); last_phase = ahc_inb(ahc, LASTPHASE); printk(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n" "%s: Dumping Card State %s, at SEQADDR 0x%x\n", ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg, ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); if (paused) printk("Card was paused\n"); printk("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n", ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX), ahc_inb(ahc, ARG_2)); printk("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT), ahc_inb(ahc, SCBPTR)); cur_col = 0; if ((ahc->features & AHC_DT) != 0) ahc_scsiphase_print(ahc_inb(ahc, SCSIPHASE), &cur_col, 50); ahc_scsisigi_print(ahc_inb(ahc, SCSISIGI), &cur_col, 50); ahc_error_print(ahc_inb(ahc, ERROR), &cur_col, 50); ahc_scsibusl_print(ahc_inb(ahc, SCSIBUSL), &cur_col, 50); ahc_lastphase_print(ahc_inb(ahc, LASTPHASE), &cur_col, 50); ahc_scsiseq_print(ahc_inb(ahc, SCSISEQ), &cur_col, 50); ahc_sblkctl_print(ahc_inb(ahc, SBLKCTL), &cur_col, 50); ahc_scsirate_print(ahc_inb(ahc, SCSIRATE), &cur_col, 50); ahc_seqctl_print(ahc_inb(ahc, SEQCTL), &cur_col, 50); ahc_seq_flags_print(ahc_inb(ahc, SEQ_FLAGS), &cur_col, 50); ahc_sstat0_print(ahc_inb(ahc, SSTAT0), &cur_col, 50); ahc_sstat1_print(ahc_inb(ahc, SSTAT1), &cur_col, 50); ahc_sstat2_print(ahc_inb(ahc, SSTAT2), &cur_col, 50); ahc_sstat3_print(ahc_inb(ahc, SSTAT3), &cur_col, 50); ahc_simode0_print(ahc_inb(ahc, SIMODE0), &cur_col, 50); ahc_simode1_print(ahc_inb(ahc, SIMODE1), &cur_col, 50); ahc_sxfrctl0_print(ahc_inb(ahc, SXFRCTL0), &cur_col, 50); ahc_dfcntrl_print(ahc_inb(ahc, DFCNTRL), &cur_col, 50); ahc_dfstatus_print(ahc_inb(ahc, DFSTATUS), &cur_col, 50); if (cur_col != 0) printk("\n"); printk("STACK:"); for (i = 0; i < STACK_SIZE; i++) printk(" 0x%x", ahc_inb(ahc, STACK)|(ahc_inb(ahc, STACK) << 8)); printk("\nSCB count = %d\n", ahc->scb_data->numscbs); printk("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag); printk("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB)); /* QINFIFO */ printk("QINFIFO entries: "); if ((ahc->features & AHC_QUEUE_REGS) != 0) { qinpos = ahc_inb(ahc, SNSCB_QOFF); ahc_outb(ahc, SNSCB_QOFF, qinpos); } else qinpos = ahc_inb(ahc, QINPOS); qintail = ahc->qinfifonext; while (qinpos != qintail) { printk("%d ", ahc->qinfifo[qinpos]); qinpos++; } printk("\n"); printk("Waiting Queue entries: "); scb_index = ahc_inb(ahc, WAITING_SCBH); i = 0; while (scb_index != SCB_LIST_NULL && i++ < 256) { ahc_outb(ahc, SCBPTR, scb_index); printk("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); scb_index = ahc_inb(ahc, SCB_NEXT); } printk("\n"); printk("Disconnected Queue entries: "); scb_index = ahc_inb(ahc, DISCONNECTED_SCBH); i = 0; while (scb_index != SCB_LIST_NULL && i++ < 256) { ahc_outb(ahc, SCBPTR, scb_index); printk("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); scb_index = ahc_inb(ahc, SCB_NEXT); } printk("\n"); ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); printk("QOUTFIFO entries: "); qoutpos = ahc->qoutfifonext; i = 0; while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) { printk("%d ", ahc->qoutfifo[qoutpos]); qoutpos++; } printk("\n"); printk("Sequencer Free SCB List: "); scb_index = ahc_inb(ahc, FREE_SCBH); i = 0; while (scb_index != SCB_LIST_NULL && i++ < 256) { ahc_outb(ahc, SCBPTR, scb_index); printk("%d ", scb_index); scb_index = ahc_inb(ahc, SCB_NEXT); } printk("\n"); printk("Sequencer SCB Info: "); for (i = 0; i < ahc->scb_data->maxhscbs; i++) { ahc_outb(ahc, SCBPTR, i); cur_col = printk("\n%3d ", i); ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60); ahc_scb_scsiid_print(ahc_inb(ahc, SCB_SCSIID), &cur_col, 60); ahc_scb_lun_print(ahc_inb(ahc, SCB_LUN), &cur_col, 60); ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); } printk("\n"); printk("Pending list: "); i = 0; LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { if (i++ > 256) break; cur_col = printk("\n%3d ", scb->hscb->tag); ahc_scb_control_print(scb->hscb->control, &cur_col, 60); ahc_scb_scsiid_print(scb->hscb->scsiid, &cur_col, 60); ahc_scb_lun_print(scb->hscb->lun, &cur_col, 60); if ((ahc->flags & AHC_PAGESCBS) == 0) { ahc_outb(ahc, SCBPTR, scb->hscb->tag); printk("("); ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60); ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); printk(")"); } } printk("\n"); printk("Kernel Free SCB list: "); i = 0; SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) { if (i++ > 256) break; printk("%d ", scb->hscb->tag); } printk("\n"); maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7; for (target = 0; target <= maxtarget; target++) { untagged_q = &ahc->untagged_queues[target]; if (TAILQ_FIRST(untagged_q) == NULL) continue; printk("Untagged Q(%d): ", target); i = 0; TAILQ_FOREACH(scb, untagged_q, links.tqe) { if (i++ > 256) break; printk("%d ", scb->hscb->tag); } printk("\n"); } ahc_platform_dump_card_state(ahc); printk("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); ahc_outb(ahc, SCBPTR, saved_scbptr); if (paused == 0) ahc_unpause(ahc); } /************************* Target Mode ****************************************/ #ifdef AHC_TARGET_MODE cam_status ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb, struct ahc_tmode_tstate **tstate, struct ahc_tmode_lstate **lstate, int notfound_failure) { if ((ahc->features & AHC_TARGETMODE) == 0) return (CAM_REQ_INVALID); /* * Handle the 'black hole' device that sucks up * requests to unattached luns on enabled targets. */ if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { *tstate = NULL; *lstate = ahc->black_hole; } else { u_int max_id; max_id = (ahc->features & AHC_WIDE) ? 16 : 8; if (ccb->ccb_h.target_id >= max_id) return (CAM_TID_INVALID); if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS) return (CAM_LUN_INVALID); *tstate = ahc->enabled_targets[ccb->ccb_h.target_id]; *lstate = NULL; if (*tstate != NULL) *lstate = (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; } if (notfound_failure != 0 && *lstate == NULL) return (CAM_PATH_INVALID); return (CAM_REQ_CMP); } void ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) { struct ahc_tmode_tstate *tstate; struct ahc_tmode_lstate *lstate; struct ccb_en_lun *cel; cam_status status; u_long s; u_int target; u_int lun; u_int target_mask; u_int our_id; int error; char channel; status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate, /*notfound_failure*/FALSE); if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; return; } if (cam_sim_bus(sim) == 0) our_id = ahc->our_id; else our_id = ahc->our_id_b; if (ccb->ccb_h.target_id != our_id) { /* * our_id represents our initiator ID, or * the ID of the first target to have an * enabled lun in target mode. There are * two cases that may preclude enabling a * target id other than our_id. * * o our_id is for an active initiator role. * Since the hardware does not support * reselections to the initiator role at * anything other than our_id, and our_id * is used by the hardware to indicate the * ID to use for both select-out and * reselect-out operations, the only target * ID we can support in this mode is our_id. * * o The MULTARGID feature is not available and * a previous target mode ID has been enabled. */ if ((ahc->features & AHC_MULTIROLE) != 0) { if ((ahc->features & AHC_MULTI_TID) != 0 && (ahc->flags & AHC_INITIATORROLE) != 0) { /* * Only allow additional targets if * the initiator role is disabled. * The hardware cannot handle a re-select-in * on the initiator id during a re-select-out * on a different target id. */ status = CAM_TID_INVALID; } else if ((ahc->flags & AHC_INITIATORROLE) != 0 || ahc->enabled_luns > 0) { /* * Only allow our target id to change * if the initiator role is not configured * and there are no enabled luns which * are attached to the currently registered * scsi id. */ status = CAM_TID_INVALID; } } else if ((ahc->features & AHC_MULTI_TID) == 0 && ahc->enabled_luns > 0) { status = CAM_TID_INVALID; } } if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; return; } /* * We now have an id that is valid. * If we aren't in target mode, switch modes. */ if ((ahc->flags & AHC_TARGETROLE) == 0 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { u_long s; ahc_flag saved_flags; printk("Configuring Target Mode\n"); ahc_lock(ahc, &s); if (LIST_FIRST(&ahc->pending_scbs) != NULL) { ccb->ccb_h.status = CAM_BUSY; ahc_unlock(ahc, &s); return; } saved_flags = ahc->flags; ahc->flags |= AHC_TARGETROLE; if ((ahc->features & AHC_MULTIROLE) == 0) ahc->flags &= ~AHC_INITIATORROLE; ahc_pause(ahc); error = ahc_loadseq(ahc); if (error != 0) { /* * Restore original configuration and notify * the caller that we cannot support target mode. * Since the adapter started out in this * configuration, the firmware load will succeed, * so there is no point in checking ahc_loadseq's * return value. */ ahc->flags = saved_flags; (void)ahc_loadseq(ahc); ahc_restart(ahc); ahc_unlock(ahc, &s); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; return; } ahc_restart(ahc); ahc_unlock(ahc, &s); } cel = &ccb->cel; target = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; channel = SIM_CHANNEL(ahc, sim); target_mask = 0x01 << target; if (channel == 'B') target_mask <<= 8; if (cel->enable != 0) { u_int scsiseq; /* Are we already enabled?? */ if (lstate != NULL) { xpt_print_path(ccb->ccb_h.path); printk("Lun already enabled\n"); ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; return; } if (cel->grp6_len != 0 || cel->grp7_len != 0) { /* * Don't (yet?) support vendor * specific commands. */ ccb->ccb_h.status = CAM_REQ_INVALID; printk("Non-zero Group Codes\n"); return; } /* * Seems to be okay. * Setup our data structures. */ if (target != CAM_TARGET_WILDCARD && tstate == NULL) { tstate = ahc_alloc_tstate(ahc, target, channel); if (tstate == NULL) { xpt_print_path(ccb->ccb_h.path); printk("Couldn't allocate tstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } } lstate = kmalloc(sizeof(*lstate), GFP_ATOMIC); if (lstate == NULL) { xpt_print_path(ccb->ccb_h.path); printk("Couldn't allocate lstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } memset(lstate, 0, sizeof(*lstate)); status = xpt_create_path(&lstate->path, /*periph*/NULL, xpt_path_path_id(ccb->ccb_h.path), xpt_path_target_id(ccb->ccb_h.path), xpt_path_lun_id(ccb->ccb_h.path)); if (status != CAM_REQ_CMP) { kfree(lstate); xpt_print_path(ccb->ccb_h.path); printk("Couldn't allocate path\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } SLIST_INIT(&lstate->accept_tios); SLIST_INIT(&lstate->immed_notifies); ahc_lock(ahc, &s); ahc_pause(ahc); if (target != CAM_TARGET_WILDCARD) { tstate->enabled_luns[lun] = lstate; ahc->enabled_luns++; if ((ahc->features & AHC_MULTI_TID) != 0) { u_int targid_mask; targid_mask = ahc_inb(ahc, TARGID) | (ahc_inb(ahc, TARGID + 1) << 8); targid_mask |= target_mask; ahc_outb(ahc, TARGID, targid_mask); ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); ahc_update_scsiid(ahc, targid_mask); } else { u_int our_id; char channel; channel = SIM_CHANNEL(ahc, sim); our_id = SIM_SCSI_ID(ahc, sim); /* * This can only happen if selections * are not enabled */ if (target != our_id) { u_int sblkctl; char cur_channel; int swap; sblkctl = ahc_inb(ahc, SBLKCTL); cur_channel = (sblkctl & SELBUSB) ? 'B' : 'A'; if ((ahc->features & AHC_TWIN) == 0) cur_channel = 'A'; swap = cur_channel != channel; if (channel == 'A') ahc->our_id = target; else ahc->our_id_b = target; if (swap) ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); ahc_outb(ahc, SCSIID, target); if (swap) ahc_outb(ahc, SBLKCTL, sblkctl); } } } else ahc->black_hole = lstate; /* Allow select-in operations */ if (ahc->black_hole != NULL && ahc->enabled_luns > 0) { scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); scsiseq |= ENSELI; ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); scsiseq = ahc_inb(ahc, SCSISEQ); scsiseq |= ENSELI; ahc_outb(ahc, SCSISEQ, scsiseq); } ahc_unpause(ahc); ahc_unlock(ahc, &s); ccb->ccb_h.status = CAM_REQ_CMP; xpt_print_path(ccb->ccb_h.path); printk("Lun now enabled for target mode\n"); } else { struct scb *scb; int i, empty; if (lstate == NULL) { ccb->ccb_h.status = CAM_LUN_INVALID; return; } ahc_lock(ahc, &s); ccb->ccb_h.status = CAM_REQ_CMP; LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { struct ccb_hdr *ccbh; ccbh = &scb->io_ctx->ccb_h; if (ccbh->func_code == XPT_CONT_TARGET_IO && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ printk("CTIO pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; ahc_unlock(ahc, &s); return; } } if (SLIST_FIRST(&lstate->accept_tios) != NULL) { printk("ATIOs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { printk("INOTs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (ccb->ccb_h.status != CAM_REQ_CMP) { ahc_unlock(ahc, &s); return; } xpt_print_path(ccb->ccb_h.path); printk("Target mode disabled\n"); xpt_free_path(lstate->path); kfree(lstate); ahc_pause(ahc); /* Can we clean up the target too? */ if (target != CAM_TARGET_WILDCARD) { tstate->enabled_luns[lun] = NULL; ahc->enabled_luns--; for (empty = 1, i = 0; i < 8; i++) if (tstate->enabled_luns[i] != NULL) { empty = 0; break; } if (empty) { ahc_free_tstate(ahc, target, channel, /*force*/FALSE); if (ahc->features & AHC_MULTI_TID) { u_int targid_mask; targid_mask = ahc_inb(ahc, TARGID) | (ahc_inb(ahc, TARGID + 1) << 8); targid_mask &= ~target_mask; ahc_outb(ahc, TARGID, targid_mask); ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); ahc_update_scsiid(ahc, targid_mask); } } } else { ahc->black_hole = NULL; /* * We can't allow selections without * our black hole device. */ empty = TRUE; } if (ahc->enabled_luns == 0) { /* Disallow select-in */ u_int scsiseq; scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); scsiseq &= ~ENSELI; ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); scsiseq = ahc_inb(ahc, SCSISEQ); scsiseq &= ~ENSELI; ahc_outb(ahc, SCSISEQ, scsiseq); if ((ahc->features & AHC_MULTIROLE) == 0) { printk("Configuring Initiator Mode\n"); ahc->flags &= ~AHC_TARGETROLE; ahc->flags |= AHC_INITIATORROLE; /* * Returning to a configuration that * fit previously will always succeed. */ (void)ahc_loadseq(ahc); ahc_restart(ahc); /* * Unpaused. The extra unpause * that follows is harmless. */ } } ahc_unpause(ahc); ahc_unlock(ahc, &s); } } static void ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask) { u_int scsiid_mask; u_int scsiid; if ((ahc->features & AHC_MULTI_TID) == 0) panic("ahc_update_scsiid called on non-multitid unit\n"); /* * Since we will rely on the TARGID mask * for selection enables, ensure that OID * in SCSIID is not set to some other ID * that we don't want to allow selections on. */ if ((ahc->features & AHC_ULTRA2) != 0) scsiid = ahc_inb(ahc, SCSIID_ULTRA2); else scsiid = ahc_inb(ahc, SCSIID); scsiid_mask = 0x1 << (scsiid & OID); if ((targid_mask & scsiid_mask) == 0) { u_int our_id; /* ffs counts from 1 */ our_id = ffs(targid_mask); if (our_id == 0) our_id = ahc->our_id; else our_id--; scsiid &= TID; scsiid |= our_id; } if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SCSIID_ULTRA2, scsiid); else ahc_outb(ahc, SCSIID, scsiid); } static void ahc_run_tqinfifo(struct ahc_softc *ahc, int paused) { struct target_cmd *cmd; /* * If the card supports auto-access pause, * we can access the card directly regardless * of whether it is paused or not. */ if ((ahc->features & AHC_AUTOPAUSE) != 0) paused = TRUE; ahc_sync_tqinfifo(ahc, BUS_DMASYNC_POSTREAD); while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) { /* * Only advance through the queue if we * have the resources to process the command. */ if (ahc_handle_target_cmd(ahc, cmd) != 0) break; cmd->cmd_valid = 0; ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, ahc_targetcmd_offset(ahc, ahc->tqinfifonext), sizeof(struct target_cmd), BUS_DMASYNC_PREREAD); ahc->tqinfifonext++; /* * Lazily update our position in the target mode incoming * command queue as seen by the sequencer. */ if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { if ((ahc->features & AHC_HS_MAILBOX) != 0) { u_int hs_mailbox; hs_mailbox = ahc_inb(ahc, HS_MAILBOX); hs_mailbox &= ~HOST_TQINPOS; hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS; ahc_outb(ahc, HS_MAILBOX, hs_mailbox); } else { if (!paused) ahc_pause(ahc); ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext & HOST_TQINPOS); if (!paused) ahc_unpause(ahc); } } } } static int ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd) { struct ahc_tmode_tstate *tstate; struct ahc_tmode_lstate *lstate; struct ccb_accept_tio *atio; uint8_t *byte; int initiator; int target; int lun; initiator = SCSIID_TARGET(ahc, cmd->scsiid); target = SCSIID_OUR_ID(cmd->scsiid); lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); byte = cmd->bytes; tstate = ahc->enabled_targets[target]; lstate = NULL; if (tstate != NULL) lstate = tstate->enabled_luns[lun]; /* * Commands for disabled luns go to the black hole driver. */ if (lstate == NULL) lstate = ahc->black_hole; atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); if (atio == NULL) { ahc->flags |= AHC_TQINFIFO_BLOCKED; /* * Wait for more ATIOs from the peripheral driver for this lun. */ if (bootverbose) printk("%s: ATIOs exhausted\n", ahc_name(ahc)); return (1); } else ahc->flags &= ~AHC_TQINFIFO_BLOCKED; #if 0 printk("Incoming command from %d for %d:%d%s\n", initiator, target, lun, lstate == ahc->black_hole ? "(Black Holed)" : ""); #endif SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); if (lstate == ahc->black_hole) { /* Fill in the wildcards */ atio->ccb_h.target_id = target; atio->ccb_h.target_lun = lun; } /* * Package it up and send it off to * whomever has this lun enabled. */ atio->sense_len = 0; atio->init_id = initiator; if (byte[0] != 0xFF) { /* Tag was included */ atio->tag_action = *byte++; atio->tag_id = *byte++; atio->ccb_h.flags = CAM_TAG_ACTION_VALID; } else { atio->ccb_h.flags = 0; } byte++; /* Okay. Now determine the cdb size based on the command code */ switch (*byte >> CMD_GROUP_CODE_SHIFT) { case 0: atio->cdb_len = 6; break; case 1: case 2: atio->cdb_len = 10; break; case 4: atio->cdb_len = 16; break; case 5: atio->cdb_len = 12; break; case 3: default: /* Only copy the opcode. */ atio->cdb_len = 1; printk("Reserved or VU command code type encountered\n"); break; } memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); atio->ccb_h.status |= CAM_CDB_RECVD; if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { /* * We weren't allowed to disconnect. * We're hanging on the bus until a * continue target I/O comes in response * to this accept tio. */ #if 0 printk("Received Immediate Command %d:%d:%d - %p\n", initiator, target, lun, ahc->pending_device); #endif ahc->pending_device = lstate; ahc_freeze_ccb((union ccb *)atio); atio->ccb_h.flags |= CAM_DIS_DISCONNECT; } xpt_done((union ccb*)atio); return (0); } #endif
gpl-2.0
q-li/linux-sunxi
drivers/ide/ide-lib.c
8688
4489
#include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/interrupt.h> #include <linux/ide.h> #include <linux/bitops.h> /** * ide_toggle_bounce - handle bounce buffering * @drive: drive to update * @on: on/off boolean * * Enable or disable bounce buffering for the device. Drives move * between PIO and DMA and that changes the rules we need. */ void ide_toggle_bounce(ide_drive_t *drive, int on) { u64 addr = BLK_BOUNCE_HIGH; /* dma64_addr_t */ if (!PCI_DMA_BUS_IS_PHYS) { addr = BLK_BOUNCE_ANY; } else if (on && drive->media == ide_disk) { struct device *dev = drive->hwif->dev; if (dev && dev->dma_mask) addr = *dev->dma_mask; } if (drive->queue) blk_queue_bounce_limit(drive->queue, addr); } u64 ide_get_lba_addr(struct ide_cmd *cmd, int lba48) { struct ide_taskfile *tf = &cmd->tf; u32 high, low; low = (tf->lbah << 16) | (tf->lbam << 8) | tf->lbal; if (lba48) { tf = &cmd->hob; high = (tf->lbah << 16) | (tf->lbam << 8) | tf->lbal; } else high = tf->device & 0xf; return ((u64)high << 24) | low; } EXPORT_SYMBOL_GPL(ide_get_lba_addr); static void ide_dump_sector(ide_drive_t *drive) { struct ide_cmd cmd; struct ide_taskfile *tf = &cmd.tf; u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48); memset(&cmd, 0, sizeof(cmd)); if (lba48) { cmd.valid.in.tf = IDE_VALID_LBA; cmd.valid.in.hob = IDE_VALID_LBA; cmd.tf_flags = IDE_TFLAG_LBA48; } else cmd.valid.in.tf = IDE_VALID_LBA | IDE_VALID_DEVICE; ide_tf_readback(drive, &cmd); if (lba48 || (tf->device & ATA_LBA)) printk(KERN_CONT ", LBAsect=%llu", (unsigned long long)ide_get_lba_addr(&cmd, lba48)); else printk(KERN_CONT ", CHS=%d/%d/%d", (tf->lbah << 8) + tf->lbam, tf->device & 0xf, tf->lbal); } static void ide_dump_ata_error(ide_drive_t *drive, u8 err) { printk(KERN_CONT "{ "); if (err & ATA_ABORTED) printk(KERN_CONT "DriveStatusError "); if (err & ATA_ICRC) printk(KERN_CONT "%s", (err & ATA_ABORTED) ? "BadCRC " : "BadSector "); if (err & ATA_UNC) printk(KERN_CONT "UncorrectableError "); if (err & ATA_IDNF) printk(KERN_CONT "SectorIdNotFound "); if (err & ATA_TRK0NF) printk(KERN_CONT "TrackZeroNotFound "); if (err & ATA_AMNF) printk(KERN_CONT "AddrMarkNotFound "); printk(KERN_CONT "}"); if ((err & (ATA_BBK | ATA_ABORTED)) == ATA_BBK || (err & (ATA_UNC | ATA_IDNF | ATA_AMNF))) { struct request *rq = drive->hwif->rq; ide_dump_sector(drive); if (rq) printk(KERN_CONT ", sector=%llu", (unsigned long long)blk_rq_pos(rq)); } printk(KERN_CONT "\n"); } static void ide_dump_atapi_error(ide_drive_t *drive, u8 err) { printk(KERN_CONT "{ "); if (err & ATAPI_ILI) printk(KERN_CONT "IllegalLengthIndication "); if (err & ATAPI_EOM) printk(KERN_CONT "EndOfMedia "); if (err & ATA_ABORTED) printk(KERN_CONT "AbortedCommand "); if (err & ATA_MCR) printk(KERN_CONT "MediaChangeRequested "); if (err & ATAPI_LFS) printk(KERN_CONT "LastFailedSense=0x%02x ", (err & ATAPI_LFS) >> 4); printk(KERN_CONT "}\n"); } /** * ide_dump_status - translate ATA/ATAPI error * @drive: drive that status applies to * @msg: text message to print * @stat: status byte to decode * * Error reporting, in human readable form (luxurious, but a memory hog). * Combines the drive name, message and status byte to provide a * user understandable explanation of the device error. */ u8 ide_dump_status(ide_drive_t *drive, const char *msg, u8 stat) { u8 err = 0; printk(KERN_ERR "%s: %s: status=0x%02x { ", drive->name, msg, stat); if (stat & ATA_BUSY) printk(KERN_CONT "Busy "); else { if (stat & ATA_DRDY) printk(KERN_CONT "DriveReady "); if (stat & ATA_DF) printk(KERN_CONT "DeviceFault "); if (stat & ATA_DSC) printk(KERN_CONT "SeekComplete "); if (stat & ATA_DRQ) printk(KERN_CONT "DataRequest "); if (stat & ATA_CORR) printk(KERN_CONT "CorrectedError "); if (stat & ATA_IDX) printk(KERN_CONT "Index "); if (stat & ATA_ERR) printk(KERN_CONT "Error "); } printk(KERN_CONT "}\n"); if ((stat & (ATA_BUSY | ATA_ERR)) == ATA_ERR) { err = ide_read_error(drive); printk(KERN_ERR "%s: %s: error=0x%02x ", drive->name, msg, err); if (drive->media == ide_disk) ide_dump_ata_error(drive, err); else ide_dump_atapi_error(drive, err); } printk(KERN_ERR "%s: possibly failed opcode: 0x%02x\n", drive->name, drive->hwif->cmd.tf.command); return err; } EXPORT_SYMBOL(ide_dump_status);
gpl-2.0
ztemt/NX508J_kernel
arch/x86/kernel/test_nx.c
12016
4673
/* * test_nx.c: functional test for NX functionality * * (C) Copyright 2008 Intel Corporation * Author: Arjan van de Ven <arjan@linux.intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. */ #include <linux/module.h> #include <linux/sort.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <asm/asm.h> extern int rodata_test_data; /* * This file checks 4 things: * 1) Check if the stack is not executable * 2) Check if kmalloc memory is not executable * 3) Check if the .rodata section is not executable * 4) Check if the .data section of a module is not executable * * To do this, the test code tries to execute memory in stack/kmalloc/etc, * and then checks if the expected trap happens. * * Sadly, this implies having a dynamic exception handling table entry. * ... which can be done (and will make Rusty cry)... but it can only * be done in a stand-alone module with only 1 entry total. * (otherwise we'd have to sort and that's just too messy) */ /* * We want to set up an exception handling point on our stack, * which means a variable value. This function is rather dirty * and walks the exception table of the module, looking for a magic * marker and replaces it with a specific function. */ static void fudze_exception_table(void *marker, void *new) { struct module *mod = THIS_MODULE; struct exception_table_entry *extable; /* * Note: This module has only 1 exception table entry, * so searching and sorting is not needed. If that changes, * this would be the place to search and re-sort the exception * table. */ if (mod->num_exentries > 1) { printk(KERN_ERR "test_nx: too many exception table entries!\n"); printk(KERN_ERR "test_nx: test results are not reliable.\n"); return; } extable = (struct exception_table_entry *)mod->extable; extable[0].insn = (unsigned long)new; } /* * exception tables get their symbols translated so we need * to use a fake function to put in there, which we can then * replace at runtime. */ void foo_label(void); /* * returns 0 for not-executable, negative for executable * * Note: we cannot allow this function to be inlined, because * that would give us more than 1 exception table entry. * This in turn would break the assumptions above. */ static noinline int test_address(void *address) { unsigned long result; /* Set up an exception table entry for our address */ fudze_exception_table(&foo_label, address); result = 1; asm volatile( "foo_label:\n" "0: call *%[fake_code]\n" "1:\n" ".section .fixup,\"ax\"\n" "2: mov %[zero], %[rslt]\n" " ret\n" ".previous\n" _ASM_EXTABLE(0b,2b) : [rslt] "=r" (result) : [fake_code] "r" (address), [zero] "r" (0UL), "0" (result) ); /* change the exception table back for the next round */ fudze_exception_table(address, &foo_label); if (result) return -ENODEV; return 0; } static unsigned char test_data = 0xC3; /* 0xC3 is the opcode for "ret" */ static int test_NX(void) { int ret = 0; /* 0xC3 is the opcode for "ret" */ char stackcode[] = {0xC3, 0x90, 0 }; char *heap; test_data = 0xC3; printk(KERN_INFO "Testing NX protection\n"); /* Test 1: check if the stack is not executable */ if (test_address(&stackcode)) { printk(KERN_ERR "test_nx: stack was executable\n"); ret = -ENODEV; } /* Test 2: Check if the heap is executable */ heap = kmalloc(64, GFP_KERNEL); if (!heap) return -ENOMEM; heap[0] = 0xC3; /* opcode for "ret" */ if (test_address(heap)) { printk(KERN_ERR "test_nx: heap was executable\n"); ret = -ENODEV; } kfree(heap); /* * The following 2 tests currently fail, this needs to get fixed * Until then, don't run them to avoid too many people getting scared * by the error message */ #ifdef CONFIG_DEBUG_RODATA /* Test 3: Check if the .rodata section is executable */ if (rodata_test_data != 0xC3) { printk(KERN_ERR "test_nx: .rodata marker has invalid value\n"); ret = -ENODEV; } else if (test_address(&rodata_test_data)) { printk(KERN_ERR "test_nx: .rodata section is executable\n"); ret = -ENODEV; } #endif #if 0 /* Test 4: Check if the .data section of a module is executable */ if (test_address(&test_data)) { printk(KERN_ERR "test_nx: .data section is executable\n"); ret = -ENODEV; } #endif return ret; } static void test_exit(void) { } module_init(test_NX); module_exit(test_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Testcase for the NX infrastructure"); MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
gpl-2.0
OnePlusTech/android_kernel_oneplus_one
sound/pci/echoaudio/indigoiox_dsp.c
12528
2197
/************************************************************************ This file is part of Echo Digital Audio's generic driver library. Copyright Echo Digital Audio Corporation (c) 1998 - 2005 All rights reserved www.echoaudio.com This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ************************************************************************* Translation from C++ and adaptation for use in ALSA-Driver were made by Giuliano Pochini <pochini@shiny.it> *************************************************************************/ static int update_vmixer_level(struct echoaudio *chip); static int set_vmixer_gain(struct echoaudio *chip, u16 output, u16 pipe, int gain); static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id) { int err; DE_INIT(("init_hw() - Indigo IOx\n")); if (snd_BUG_ON((subdevice_id & 0xfff0) != INDIGO_IOX)) return -ENODEV; err = init_dsp_comm_page(chip); if (err < 0) { DE_INIT(("init_hw - could not initialize DSP comm page\n")); return err; } chip->device_id = device_id; chip->subdevice_id = subdevice_id; chip->bad_board = TRUE; chip->dsp_code_to_load = FW_INDIGO_IOX_DSP; /* Since this card has no ASIC, mark it as loaded so everything works OK */ chip->asic_loaded = TRUE; chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL; err = load_firmware(chip); if (err < 0) return err; chip->bad_board = FALSE; DE_INIT(("init_hw done\n")); return err; } static int set_mixer_defaults(struct echoaudio *chip) { return init_line_levels(chip); }
gpl-2.0
droidfivex/kernel_lge_msm8974
fs/efs/symlink.c
14064
1178
/* * symlink.c * * Copyright (c) 1999 Al Smith * * Portions derived from work (c) 1995,1996 Christian Vogelgsang. */ #include <linux/string.h> #include <linux/pagemap.h> #include <linux/buffer_head.h> #include "efs.h" static int efs_symlink_readpage(struct file *file, struct page *page) { char *link = kmap(page); struct buffer_head * bh; struct inode * inode = page->mapping->host; efs_block_t size = inode->i_size; int err; err = -ENAMETOOLONG; if (size > 2 * EFS_BLOCKSIZE) goto fail; /* read first 512 bytes of link target */ err = -EIO; bh = sb_bread(inode->i_sb, efs_bmap(inode, 0)); if (!bh) goto fail; memcpy(link, bh->b_data, (size > EFS_BLOCKSIZE) ? EFS_BLOCKSIZE : size); brelse(bh); if (size > EFS_BLOCKSIZE) { bh = sb_bread(inode->i_sb, efs_bmap(inode, 1)); if (!bh) goto fail; memcpy(link + EFS_BLOCKSIZE, bh->b_data, size - EFS_BLOCKSIZE); brelse(bh); } link[size] = '\0'; SetPageUptodate(page); kunmap(page); unlock_page(page); return 0; fail: SetPageError(page); kunmap(page); unlock_page(page); return err; } const struct address_space_operations efs_symlink_aops = { .readpage = efs_symlink_readpage };
gpl-2.0
ShinySide/G531M
drivers/pinctrl/pinctrl-nomadik.c
241
56828
/* * Generic GPIO driver for logic cells found in the Nomadik SoC * * Copyright (C) 2008,2009 STMicroelectronics * Copyright (C) 2009 Alessandro Rubini <rubini@unipv.it> * Rewritten based on work by Prafulla WADASKAR <prafulla.wadaskar@st.com> * Copyright (C) 2011-2013 Linus Walleij <linus.walleij@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/gpio.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/irqchip/chained_irq.h> #include <linux/slab.h> #include <linux/of_device.h> #include <linux/of_address.h> #include <linux/pinctrl/machine.h> #include <linux/pinctrl/pinctrl.h> #include <linux/pinctrl/pinmux.h> #include <linux/pinctrl/pinconf.h> /* Since we request GPIOs from ourself */ #include <linux/pinctrl/consumer.h> #include "pinctrl-nomadik.h" #include "core.h" /* * The GPIO module in the Nomadik family of Systems-on-Chip is an * AMBA device, managing 32 pins and alternate functions. The logic block * is currently used in the Nomadik and ux500. * * Symbols in this file are called "nmk_gpio" for "nomadik gpio" */ /* * pin configurations are represented by 32-bit integers: * * bit 0.. 8 - Pin Number (512 Pins Maximum) * bit 9..10 - Alternate Function Selection * bit 11..12 - Pull up/down state * bit 13 - Sleep mode behaviour * bit 14 - Direction * bit 15 - Value (if output) * bit 16..18 - SLPM pull up/down state * bit 19..20 - SLPM direction * bit 21..22 - SLPM Value (if output) * bit 23..25 - PDIS value (if input) * bit 26 - Gpio mode * bit 27 - Sleep mode * * to facilitate the definition, the following macros are provided * * PIN_CFG_DEFAULT - default config (0): * pull up/down = disabled * sleep mode = input/wakeup * direction = input * value = low * SLPM direction = same as normal * SLPM pull = same as normal * SLPM value = same as normal * * PIN_CFG - default config with alternate function */ typedef unsigned long pin_cfg_t; #define PIN_NUM_MASK 0x1ff #define PIN_NUM(x) ((x) & PIN_NUM_MASK) #define PIN_ALT_SHIFT 9 #define PIN_ALT_MASK (0x3 << PIN_ALT_SHIFT) #define PIN_ALT(x) (((x) & PIN_ALT_MASK) >> PIN_ALT_SHIFT) #define PIN_GPIO (NMK_GPIO_ALT_GPIO << PIN_ALT_SHIFT) #define PIN_ALT_A (NMK_GPIO_ALT_A << PIN_ALT_SHIFT) #define PIN_ALT_B (NMK_GPIO_ALT_B << PIN_ALT_SHIFT) #define PIN_ALT_C (NMK_GPIO_ALT_C << PIN_ALT_SHIFT) #define PIN_PULL_SHIFT 11 #define PIN_PULL_MASK (0x3 << PIN_PULL_SHIFT) #define PIN_PULL(x) (((x) & PIN_PULL_MASK) >> PIN_PULL_SHIFT) #define PIN_PULL_NONE (NMK_GPIO_PULL_NONE << PIN_PULL_SHIFT) #define PIN_PULL_UP (NMK_GPIO_PULL_UP << PIN_PULL_SHIFT) #define PIN_PULL_DOWN (NMK_GPIO_PULL_DOWN << PIN_PULL_SHIFT) #define PIN_SLPM_SHIFT 13 #define PIN_SLPM_MASK (0x1 << PIN_SLPM_SHIFT) #define PIN_SLPM(x) (((x) & PIN_SLPM_MASK) >> PIN_SLPM_SHIFT) #define PIN_SLPM_MAKE_INPUT (NMK_GPIO_SLPM_INPUT << PIN_SLPM_SHIFT) #define PIN_SLPM_NOCHANGE (NMK_GPIO_SLPM_NOCHANGE << PIN_SLPM_SHIFT) /* These two replace the above in DB8500v2+ */ #define PIN_SLPM_WAKEUP_ENABLE (NMK_GPIO_SLPM_WAKEUP_ENABLE << PIN_SLPM_SHIFT) #define PIN_SLPM_WAKEUP_DISABLE (NMK_GPIO_SLPM_WAKEUP_DISABLE << PIN_SLPM_SHIFT) #define PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP PIN_SLPM_WAKEUP_DISABLE #define PIN_SLPM_GPIO PIN_SLPM_WAKEUP_ENABLE /* In SLPM, pin is a gpio */ #define PIN_SLPM_ALTFUNC PIN_SLPM_WAKEUP_DISABLE /* In SLPM, pin is altfunc */ #define PIN_DIR_SHIFT 14 #define PIN_DIR_MASK (0x1 << PIN_DIR_SHIFT) #define PIN_DIR(x) (((x) & PIN_DIR_MASK) >> PIN_DIR_SHIFT) #define PIN_DIR_INPUT (0 << PIN_DIR_SHIFT) #define PIN_DIR_OUTPUT (1 << PIN_DIR_SHIFT) #define PIN_VAL_SHIFT 15 #define PIN_VAL_MASK (0x1 << PIN_VAL_SHIFT) #define PIN_VAL(x) (((x) & PIN_VAL_MASK) >> PIN_VAL_SHIFT) #define PIN_VAL_LOW (0 << PIN_VAL_SHIFT) #define PIN_VAL_HIGH (1 << PIN_VAL_SHIFT) #define PIN_SLPM_PULL_SHIFT 16 #define PIN_SLPM_PULL_MASK (0x7 << PIN_SLPM_PULL_SHIFT) #define PIN_SLPM_PULL(x) \ (((x) & PIN_SLPM_PULL_MASK) >> PIN_SLPM_PULL_SHIFT) #define PIN_SLPM_PULL_NONE \ ((1 + NMK_GPIO_PULL_NONE) << PIN_SLPM_PULL_SHIFT) #define PIN_SLPM_PULL_UP \ ((1 + NMK_GPIO_PULL_UP) << PIN_SLPM_PULL_SHIFT) #define PIN_SLPM_PULL_DOWN \ ((1 + NMK_GPIO_PULL_DOWN) << PIN_SLPM_PULL_SHIFT) #define PIN_SLPM_DIR_SHIFT 19 #define PIN_SLPM_DIR_MASK (0x3 << PIN_SLPM_DIR_SHIFT) #define PIN_SLPM_DIR(x) \ (((x) & PIN_SLPM_DIR_MASK) >> PIN_SLPM_DIR_SHIFT) #define PIN_SLPM_DIR_INPUT ((1 + 0) << PIN_SLPM_DIR_SHIFT) #define PIN_SLPM_DIR_OUTPUT ((1 + 1) << PIN_SLPM_DIR_SHIFT) #define PIN_SLPM_VAL_SHIFT 21 #define PIN_SLPM_VAL_MASK (0x3 << PIN_SLPM_VAL_SHIFT) #define PIN_SLPM_VAL(x) \ (((x) & PIN_SLPM_VAL_MASK) >> PIN_SLPM_VAL_SHIFT) #define PIN_SLPM_VAL_LOW ((1 + 0) << PIN_SLPM_VAL_SHIFT) #define PIN_SLPM_VAL_HIGH ((1 + 1) << PIN_SLPM_VAL_SHIFT) #define PIN_SLPM_PDIS_SHIFT 23 #define PIN_SLPM_PDIS_MASK (0x3 << PIN_SLPM_PDIS_SHIFT) #define PIN_SLPM_PDIS(x) \ (((x) & PIN_SLPM_PDIS_MASK) >> PIN_SLPM_PDIS_SHIFT) #define PIN_SLPM_PDIS_NO_CHANGE (0 << PIN_SLPM_PDIS_SHIFT) #define PIN_SLPM_PDIS_DISABLED (1 << PIN_SLPM_PDIS_SHIFT) #define PIN_SLPM_PDIS_ENABLED (2 << PIN_SLPM_PDIS_SHIFT) #define PIN_LOWEMI_SHIFT 25 #define PIN_LOWEMI_MASK (0x1 << PIN_LOWEMI_SHIFT) #define PIN_LOWEMI(x) (((x) & PIN_LOWEMI_MASK) >> PIN_LOWEMI_SHIFT) #define PIN_LOWEMI_DISABLED (0 << PIN_LOWEMI_SHIFT) #define PIN_LOWEMI_ENABLED (1 << PIN_LOWEMI_SHIFT) #define PIN_GPIOMODE_SHIFT 26 #define PIN_GPIOMODE_MASK (0x1 << PIN_GPIOMODE_SHIFT) #define PIN_GPIOMODE(x) (((x) & PIN_GPIOMODE_MASK) >> PIN_GPIOMODE_SHIFT) #define PIN_GPIOMODE_DISABLED (0 << PIN_GPIOMODE_SHIFT) #define PIN_GPIOMODE_ENABLED (1 << PIN_GPIOMODE_SHIFT) #define PIN_SLEEPMODE_SHIFT 27 #define PIN_SLEEPMODE_MASK (0x1 << PIN_SLEEPMODE_SHIFT) #define PIN_SLEEPMODE(x) (((x) & PIN_SLEEPMODE_MASK) >> PIN_SLEEPMODE_SHIFT) #define PIN_SLEEPMODE_DISABLED (0 << PIN_SLEEPMODE_SHIFT) #define PIN_SLEEPMODE_ENABLED (1 << PIN_SLEEPMODE_SHIFT) /* Shortcuts. Use these instead of separate DIR, PULL, and VAL. */ #define PIN_INPUT_PULLDOWN (PIN_DIR_INPUT | PIN_PULL_DOWN) #define PIN_INPUT_PULLUP (PIN_DIR_INPUT | PIN_PULL_UP) #define PIN_INPUT_NOPULL (PIN_DIR_INPUT | PIN_PULL_NONE) #define PIN_OUTPUT_LOW (PIN_DIR_OUTPUT | PIN_VAL_LOW) #define PIN_OUTPUT_HIGH (PIN_DIR_OUTPUT | PIN_VAL_HIGH) #define PIN_SLPM_INPUT_PULLDOWN (PIN_SLPM_DIR_INPUT | PIN_SLPM_PULL_DOWN) #define PIN_SLPM_INPUT_PULLUP (PIN_SLPM_DIR_INPUT | PIN_SLPM_PULL_UP) #define PIN_SLPM_INPUT_NOPULL (PIN_SLPM_DIR_INPUT | PIN_SLPM_PULL_NONE) #define PIN_SLPM_OUTPUT_LOW (PIN_SLPM_DIR_OUTPUT | PIN_SLPM_VAL_LOW) #define PIN_SLPM_OUTPUT_HIGH (PIN_SLPM_DIR_OUTPUT | PIN_SLPM_VAL_HIGH) #define PIN_CFG_DEFAULT (0) #define PIN_CFG(num, alt) \ (PIN_CFG_DEFAULT |\ (PIN_NUM(num) | PIN_##alt)) #define PIN_CFG_INPUT(num, alt, pull) \ (PIN_CFG_DEFAULT |\ (PIN_NUM(num) | PIN_##alt | PIN_INPUT_##pull)) #define PIN_CFG_OUTPUT(num, alt, val) \ (PIN_CFG_DEFAULT |\ (PIN_NUM(num) | PIN_##alt | PIN_OUTPUT_##val)) /* * "nmk_gpio" and "NMK_GPIO" stand for "Nomadik GPIO", leaving * the "gpio" namespace for generic and cross-machine functions */ #define GPIO_BLOCK_SHIFT 5 #define NMK_GPIO_PER_CHIP (1 << GPIO_BLOCK_SHIFT) /* Register in the logic block */ #define NMK_GPIO_DAT 0x00 #define NMK_GPIO_DATS 0x04 #define NMK_GPIO_DATC 0x08 #define NMK_GPIO_PDIS 0x0c #define NMK_GPIO_DIR 0x10 #define NMK_GPIO_DIRS 0x14 #define NMK_GPIO_DIRC 0x18 #define NMK_GPIO_SLPC 0x1c #define NMK_GPIO_AFSLA 0x20 #define NMK_GPIO_AFSLB 0x24 #define NMK_GPIO_LOWEMI 0x28 #define NMK_GPIO_RIMSC 0x40 #define NMK_GPIO_FIMSC 0x44 #define NMK_GPIO_IS 0x48 #define NMK_GPIO_IC 0x4c #define NMK_GPIO_RWIMSC 0x50 #define NMK_GPIO_FWIMSC 0x54 #define NMK_GPIO_WKS 0x58 /* These appear in DB8540 and later ASICs */ #define NMK_GPIO_EDGELEVEL 0x5C #define NMK_GPIO_LEVEL 0x60 /* Pull up/down values */ enum nmk_gpio_pull { NMK_GPIO_PULL_NONE, NMK_GPIO_PULL_UP, NMK_GPIO_PULL_DOWN, }; /* Sleep mode */ enum nmk_gpio_slpm { NMK_GPIO_SLPM_INPUT, NMK_GPIO_SLPM_WAKEUP_ENABLE = NMK_GPIO_SLPM_INPUT, NMK_GPIO_SLPM_NOCHANGE, NMK_GPIO_SLPM_WAKEUP_DISABLE = NMK_GPIO_SLPM_NOCHANGE, }; /* * Platform data to register a block: only the initial gpio/irq number. */ struct nmk_gpio_platform_data { char *name; int first_gpio; int first_irq; int num_gpio; u32 (*get_secondary_status)(unsigned int bank); void (*set_ioforce)(bool enable); bool supports_sleepmode; }; struct nmk_gpio_chip { struct gpio_chip chip; struct irq_domain *domain; void __iomem *addr; struct clk *clk; unsigned int bank; unsigned int parent_irq; int secondary_parent_irq; u32 (*get_secondary_status)(unsigned int bank); void (*set_ioforce)(bool enable); spinlock_t lock; bool sleepmode; /* Keep track of configured edges */ u32 edge_rising; u32 edge_falling; u32 real_wake; u32 rwimsc; u32 fwimsc; u32 rimsc; u32 fimsc; u32 pull_up; u32 lowemi; }; /** * struct nmk_pinctrl - state container for the Nomadik pin controller * @dev: containing device pointer * @pctl: corresponding pin controller device * @soc: SoC data for this specific chip * @prcm_base: PRCM register range virtual base */ struct nmk_pinctrl { struct device *dev; struct pinctrl_dev *pctl; const struct nmk_pinctrl_soc_data *soc; void __iomem *prcm_base; }; static struct nmk_gpio_chip * nmk_gpio_chips[DIV_ROUND_UP(ARCH_NR_GPIOS, NMK_GPIO_PER_CHIP)]; static DEFINE_SPINLOCK(nmk_gpio_slpm_lock); #define NUM_BANKS ARRAY_SIZE(nmk_gpio_chips) static void __nmk_gpio_set_mode(struct nmk_gpio_chip *nmk_chip, unsigned offset, int gpio_mode) { u32 bit = 1 << offset; u32 afunc, bfunc; afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & ~bit; bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & ~bit; if (gpio_mode & NMK_GPIO_ALT_A) afunc |= bit; if (gpio_mode & NMK_GPIO_ALT_B) bfunc |= bit; writel(afunc, nmk_chip->addr + NMK_GPIO_AFSLA); writel(bfunc, nmk_chip->addr + NMK_GPIO_AFSLB); } static void __nmk_gpio_set_slpm(struct nmk_gpio_chip *nmk_chip, unsigned offset, enum nmk_gpio_slpm mode) { u32 bit = 1 << offset; u32 slpm; slpm = readl(nmk_chip->addr + NMK_GPIO_SLPC); if (mode == NMK_GPIO_SLPM_NOCHANGE) slpm |= bit; else slpm &= ~bit; writel(slpm, nmk_chip->addr + NMK_GPIO_SLPC); } static void __nmk_gpio_set_pull(struct nmk_gpio_chip *nmk_chip, unsigned offset, enum nmk_gpio_pull pull) { u32 bit = 1 << offset; u32 pdis; pdis = readl(nmk_chip->addr + NMK_GPIO_PDIS); if (pull == NMK_GPIO_PULL_NONE) { pdis |= bit; nmk_chip->pull_up &= ~bit; } else { pdis &= ~bit; } writel(pdis, nmk_chip->addr + NMK_GPIO_PDIS); if (pull == NMK_GPIO_PULL_UP) { nmk_chip->pull_up |= bit; writel(bit, nmk_chip->addr + NMK_GPIO_DATS); } else if (pull == NMK_GPIO_PULL_DOWN) { nmk_chip->pull_up &= ~bit; writel(bit, nmk_chip->addr + NMK_GPIO_DATC); } } static void __nmk_gpio_set_lowemi(struct nmk_gpio_chip *nmk_chip, unsigned offset, bool lowemi) { u32 bit = BIT(offset); bool enabled = nmk_chip->lowemi & bit; if (lowemi == enabled) return; if (lowemi) nmk_chip->lowemi |= bit; else nmk_chip->lowemi &= ~bit; writel_relaxed(nmk_chip->lowemi, nmk_chip->addr + NMK_GPIO_LOWEMI); } static void __nmk_gpio_make_input(struct nmk_gpio_chip *nmk_chip, unsigned offset) { writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRC); } static void __nmk_gpio_set_output(struct nmk_gpio_chip *nmk_chip, unsigned offset, int val) { if (val) writel(1 << offset, nmk_chip->addr + NMK_GPIO_DATS); else writel(1 << offset, nmk_chip->addr + NMK_GPIO_DATC); } static void __nmk_gpio_make_output(struct nmk_gpio_chip *nmk_chip, unsigned offset, int val) { writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRS); __nmk_gpio_set_output(nmk_chip, offset, val); } static void __nmk_gpio_set_mode_safe(struct nmk_gpio_chip *nmk_chip, unsigned offset, int gpio_mode, bool glitch) { u32 rwimsc = nmk_chip->rwimsc; u32 fwimsc = nmk_chip->fwimsc; if (glitch && nmk_chip->set_ioforce) { u32 bit = BIT(offset); /* Prevent spurious wakeups */ writel(rwimsc & ~bit, nmk_chip->addr + NMK_GPIO_RWIMSC); writel(fwimsc & ~bit, nmk_chip->addr + NMK_GPIO_FWIMSC); nmk_chip->set_ioforce(true); } __nmk_gpio_set_mode(nmk_chip, offset, gpio_mode); if (glitch && nmk_chip->set_ioforce) { nmk_chip->set_ioforce(false); writel(rwimsc, nmk_chip->addr + NMK_GPIO_RWIMSC); writel(fwimsc, nmk_chip->addr + NMK_GPIO_FWIMSC); } } static void nmk_gpio_disable_lazy_irq(struct nmk_gpio_chip *nmk_chip, unsigned offset) { u32 falling = nmk_chip->fimsc & BIT(offset); u32 rising = nmk_chip->rimsc & BIT(offset); int gpio = nmk_chip->chip.base + offset; int irq = irq_find_mapping(nmk_chip->domain, offset); struct irq_data *d = irq_get_irq_data(irq); if (!rising && !falling) return; if (!d || !irqd_irq_disabled(d)) return; if (rising) { nmk_chip->rimsc &= ~BIT(offset); writel_relaxed(nmk_chip->rimsc, nmk_chip->addr + NMK_GPIO_RIMSC); } if (falling) { nmk_chip->fimsc &= ~BIT(offset); writel_relaxed(nmk_chip->fimsc, nmk_chip->addr + NMK_GPIO_FIMSC); } dev_dbg(nmk_chip->chip.dev, "%d: clearing interrupt mask\n", gpio); } static void nmk_write_masked(void __iomem *reg, u32 mask, u32 value) { u32 val; val = readl(reg); val = ((val & ~mask) | (value & mask)); writel(val, reg); } static void nmk_prcm_altcx_set_mode(struct nmk_pinctrl *npct, unsigned offset, unsigned alt_num) { int i; u16 reg; u8 bit; u8 alt_index; const struct prcm_gpiocr_altcx_pin_desc *pin_desc; const u16 *gpiocr_regs; if (!npct->prcm_base) return; if (alt_num > PRCM_IDX_GPIOCR_ALTC_MAX) { dev_err(npct->dev, "PRCM GPIOCR: alternate-C%i is invalid\n", alt_num); return; } for (i = 0 ; i < npct->soc->npins_altcx ; i++) { if (npct->soc->altcx_pins[i].pin == offset) break; } if (i == npct->soc->npins_altcx) { dev_dbg(npct->dev, "PRCM GPIOCR: pin %i is not found\n", offset); return; } pin_desc = npct->soc->altcx_pins + i; gpiocr_regs = npct->soc->prcm_gpiocr_registers; /* * If alt_num is NULL, just clear current ALTCx selection * to make sure we come back to a pure ALTC selection */ if (!alt_num) { for (i = 0 ; i < PRCM_IDX_GPIOCR_ALTC_MAX ; i++) { if (pin_desc->altcx[i].used == true) { reg = gpiocr_regs[pin_desc->altcx[i].reg_index]; bit = pin_desc->altcx[i].control_bit; if (readl(npct->prcm_base + reg) & BIT(bit)) { nmk_write_masked(npct->prcm_base + reg, BIT(bit), 0); dev_dbg(npct->dev, "PRCM GPIOCR: pin %i: alternate-C%i has been disabled\n", offset, i+1); } } } return; } alt_index = alt_num - 1; if (pin_desc->altcx[alt_index].used == false) { dev_warn(npct->dev, "PRCM GPIOCR: pin %i: alternate-C%i does not exist\n", offset, alt_num); return; } /* * Check if any other ALTCx functions are activated on this pin * and disable it first. */ for (i = 0 ; i < PRCM_IDX_GPIOCR_ALTC_MAX ; i++) { if (i == alt_index) continue; if (pin_desc->altcx[i].used == true) { reg = gpiocr_regs[pin_desc->altcx[i].reg_index]; bit = pin_desc->altcx[i].control_bit; if (readl(npct->prcm_base + reg) & BIT(bit)) { nmk_write_masked(npct->prcm_base + reg, BIT(bit), 0); dev_dbg(npct->dev, "PRCM GPIOCR: pin %i: alternate-C%i has been disabled\n", offset, i+1); } } } reg = gpiocr_regs[pin_desc->altcx[alt_index].reg_index]; bit = pin_desc->altcx[alt_index].control_bit; dev_dbg(npct->dev, "PRCM GPIOCR: pin %i: alternate-C%i has been selected\n", offset, alt_index+1); nmk_write_masked(npct->prcm_base + reg, BIT(bit), BIT(bit)); } /* * Safe sequence used to switch IOs between GPIO and Alternate-C mode: * - Save SLPM registers * - Set SLPM=0 for the IOs you want to switch and others to 1 * - Configure the GPIO registers for the IOs that are being switched * - Set IOFORCE=1 * - Modify the AFLSA/B registers for the IOs that are being switched * - Set IOFORCE=0 * - Restore SLPM registers * - Any spurious wake up event during switch sequence to be ignored and * cleared */ static void nmk_gpio_glitch_slpm_init(unsigned int *slpm) { int i; for (i = 0; i < NUM_BANKS; i++) { struct nmk_gpio_chip *chip = nmk_gpio_chips[i]; unsigned int temp = slpm[i]; if (!chip) break; clk_enable(chip->clk); slpm[i] = readl(chip->addr + NMK_GPIO_SLPC); writel(temp, chip->addr + NMK_GPIO_SLPC); } } static void nmk_gpio_glitch_slpm_restore(unsigned int *slpm) { int i; for (i = 0; i < NUM_BANKS; i++) { struct nmk_gpio_chip *chip = nmk_gpio_chips[i]; if (!chip) break; writel(slpm[i], chip->addr + NMK_GPIO_SLPC); clk_disable(chip->clk); } } static int __maybe_unused nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev, int gpio) { int i; u16 reg; u8 bit; struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); const struct prcm_gpiocr_altcx_pin_desc *pin_desc; const u16 *gpiocr_regs; if (!npct->prcm_base) return NMK_GPIO_ALT_C; for (i = 0; i < npct->soc->npins_altcx; i++) { if (npct->soc->altcx_pins[i].pin == gpio) break; } if (i == npct->soc->npins_altcx) return NMK_GPIO_ALT_C; pin_desc = npct->soc->altcx_pins + i; gpiocr_regs = npct->soc->prcm_gpiocr_registers; for (i = 0; i < PRCM_IDX_GPIOCR_ALTC_MAX; i++) { if (pin_desc->altcx[i].used == true) { reg = gpiocr_regs[pin_desc->altcx[i].reg_index]; bit = pin_desc->altcx[i].control_bit; if (readl(npct->prcm_base + reg) & BIT(bit)) return NMK_GPIO_ALT_C+i+1; } } return NMK_GPIO_ALT_C; } int nmk_gpio_get_mode(int gpio) { struct nmk_gpio_chip *nmk_chip; u32 afunc, bfunc, bit; nmk_chip = nmk_gpio_chips[gpio / NMK_GPIO_PER_CHIP]; if (!nmk_chip) return -EINVAL; bit = 1 << (gpio % NMK_GPIO_PER_CHIP); clk_enable(nmk_chip->clk); afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & bit; bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & bit; clk_disable(nmk_chip->clk); return (afunc ? NMK_GPIO_ALT_A : 0) | (bfunc ? NMK_GPIO_ALT_B : 0); } EXPORT_SYMBOL(nmk_gpio_get_mode); /* IRQ functions */ static inline int nmk_gpio_get_bitmask(int gpio) { return 1 << (gpio % NMK_GPIO_PER_CHIP); } static void nmk_gpio_irq_ack(struct irq_data *d) { struct nmk_gpio_chip *nmk_chip; nmk_chip = irq_data_get_irq_chip_data(d); if (!nmk_chip) return; clk_enable(nmk_chip->clk); writel(nmk_gpio_get_bitmask(d->hwirq), nmk_chip->addr + NMK_GPIO_IC); clk_disable(nmk_chip->clk); } enum nmk_gpio_irq_type { NORMAL, WAKE, }; static void __nmk_gpio_irq_modify(struct nmk_gpio_chip *nmk_chip, int gpio, enum nmk_gpio_irq_type which, bool enable) { u32 bitmask = nmk_gpio_get_bitmask(gpio); u32 *rimscval; u32 *fimscval; u32 rimscreg; u32 fimscreg; if (which == NORMAL) { rimscreg = NMK_GPIO_RIMSC; fimscreg = NMK_GPIO_FIMSC; rimscval = &nmk_chip->rimsc; fimscval = &nmk_chip->fimsc; } else { rimscreg = NMK_GPIO_RWIMSC; fimscreg = NMK_GPIO_FWIMSC; rimscval = &nmk_chip->rwimsc; fimscval = &nmk_chip->fwimsc; } /* we must individually set/clear the two edges */ if (nmk_chip->edge_rising & bitmask) { if (enable) *rimscval |= bitmask; else *rimscval &= ~bitmask; writel(*rimscval, nmk_chip->addr + rimscreg); } if (nmk_chip->edge_falling & bitmask) { if (enable) *fimscval |= bitmask; else *fimscval &= ~bitmask; writel(*fimscval, nmk_chip->addr + fimscreg); } } static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip, int gpio, bool on) { /* * Ensure WAKEUP_ENABLE is on. No need to disable it if wakeup is * disabled, since setting SLPM to 1 increases power consumption, and * wakeup is anyhow controlled by the RIMSC and FIMSC registers. */ if (nmk_chip->sleepmode && on) { __nmk_gpio_set_slpm(nmk_chip, gpio % NMK_GPIO_PER_CHIP, NMK_GPIO_SLPM_WAKEUP_ENABLE); } __nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, on); } static int nmk_gpio_irq_maskunmask(struct irq_data *d, bool enable) { struct nmk_gpio_chip *nmk_chip; unsigned long flags; u32 bitmask; nmk_chip = irq_data_get_irq_chip_data(d); bitmask = nmk_gpio_get_bitmask(d->hwirq); if (!nmk_chip) return -EINVAL; clk_enable(nmk_chip->clk); spin_lock_irqsave(&nmk_gpio_slpm_lock, flags); spin_lock(&nmk_chip->lock); __nmk_gpio_irq_modify(nmk_chip, d->hwirq, NORMAL, enable); if (!(nmk_chip->real_wake & bitmask)) __nmk_gpio_set_wake(nmk_chip, d->hwirq, enable); spin_unlock(&nmk_chip->lock); spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags); clk_disable(nmk_chip->clk); return 0; } static void nmk_gpio_irq_mask(struct irq_data *d) { nmk_gpio_irq_maskunmask(d, false); } static void nmk_gpio_irq_unmask(struct irq_data *d) { nmk_gpio_irq_maskunmask(d, true); } static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on) { struct nmk_gpio_chip *nmk_chip; unsigned long flags; u32 bitmask; nmk_chip = irq_data_get_irq_chip_data(d); if (!nmk_chip) return -EINVAL; bitmask = nmk_gpio_get_bitmask(d->hwirq); clk_enable(nmk_chip->clk); spin_lock_irqsave(&nmk_gpio_slpm_lock, flags); spin_lock(&nmk_chip->lock); if (irqd_irq_disabled(d)) __nmk_gpio_set_wake(nmk_chip, d->hwirq, on); if (on) nmk_chip->real_wake |= bitmask; else nmk_chip->real_wake &= ~bitmask; spin_unlock(&nmk_chip->lock); spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags); clk_disable(nmk_chip->clk); return 0; } static int nmk_gpio_irq_set_type(struct irq_data *d, unsigned int type) { bool enabled = !irqd_irq_disabled(d); bool wake = irqd_is_wakeup_set(d); struct nmk_gpio_chip *nmk_chip; unsigned long flags; u32 bitmask; nmk_chip = irq_data_get_irq_chip_data(d); bitmask = nmk_gpio_get_bitmask(d->hwirq); if (!nmk_chip) return -EINVAL; if (type & IRQ_TYPE_LEVEL_HIGH) return -EINVAL; if (type & IRQ_TYPE_LEVEL_LOW) return -EINVAL; clk_enable(nmk_chip->clk); spin_lock_irqsave(&nmk_chip->lock, flags); if (enabled) __nmk_gpio_irq_modify(nmk_chip, d->hwirq, NORMAL, false); if (enabled || wake) __nmk_gpio_irq_modify(nmk_chip, d->hwirq, WAKE, false); nmk_chip->edge_rising &= ~bitmask; if (type & IRQ_TYPE_EDGE_RISING) nmk_chip->edge_rising |= bitmask; nmk_chip->edge_falling &= ~bitmask; if (type & IRQ_TYPE_EDGE_FALLING) nmk_chip->edge_falling |= bitmask; if (enabled) __nmk_gpio_irq_modify(nmk_chip, d->hwirq, NORMAL, true); if (enabled || wake) __nmk_gpio_irq_modify(nmk_chip, d->hwirq, WAKE, true); spin_unlock_irqrestore(&nmk_chip->lock, flags); clk_disable(nmk_chip->clk); return 0; } static unsigned int nmk_gpio_irq_startup(struct irq_data *d) { struct nmk_gpio_chip *nmk_chip = irq_data_get_irq_chip_data(d); if (gpio_lock_as_irq(&nmk_chip->chip, d->hwirq)) dev_err(nmk_chip->chip.dev, "unable to lock HW IRQ %lu for IRQ\n", d->hwirq); clk_enable(nmk_chip->clk); nmk_gpio_irq_unmask(d); return 0; } static void nmk_gpio_irq_shutdown(struct irq_data *d) { struct nmk_gpio_chip *nmk_chip = irq_data_get_irq_chip_data(d); nmk_gpio_irq_mask(d); clk_disable(nmk_chip->clk); gpio_unlock_as_irq(&nmk_chip->chip, d->hwirq); } static struct irq_chip nmk_gpio_irq_chip = { .name = "Nomadik-GPIO", .irq_ack = nmk_gpio_irq_ack, .irq_mask = nmk_gpio_irq_mask, .irq_unmask = nmk_gpio_irq_unmask, .irq_set_type = nmk_gpio_irq_set_type, .irq_set_wake = nmk_gpio_irq_set_wake, .irq_startup = nmk_gpio_irq_startup, .irq_shutdown = nmk_gpio_irq_shutdown, .flags = IRQCHIP_MASK_ON_SUSPEND, }; static void __nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc, u32 status) { struct nmk_gpio_chip *nmk_chip; struct irq_chip *host_chip = irq_get_chip(irq); chained_irq_enter(host_chip, desc); nmk_chip = irq_get_handler_data(irq); while (status) { int bit = __ffs(status); generic_handle_irq(irq_find_mapping(nmk_chip->domain, bit)); status &= ~BIT(bit); } chained_irq_exit(host_chip, desc); } static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) { struct nmk_gpio_chip *nmk_chip = irq_get_handler_data(irq); u32 status; clk_enable(nmk_chip->clk); status = readl(nmk_chip->addr + NMK_GPIO_IS); clk_disable(nmk_chip->clk); __nmk_gpio_irq_handler(irq, desc, status); } static void nmk_gpio_secondary_irq_handler(unsigned int irq, struct irq_desc *desc) { struct nmk_gpio_chip *nmk_chip = irq_get_handler_data(irq); u32 status = nmk_chip->get_secondary_status(nmk_chip->bank); __nmk_gpio_irq_handler(irq, desc, status); } static int nmk_gpio_init_irq(struct nmk_gpio_chip *nmk_chip) { irq_set_chained_handler(nmk_chip->parent_irq, nmk_gpio_irq_handler); irq_set_handler_data(nmk_chip->parent_irq, nmk_chip); if (nmk_chip->secondary_parent_irq >= 0) { irq_set_chained_handler(nmk_chip->secondary_parent_irq, nmk_gpio_secondary_irq_handler); irq_set_handler_data(nmk_chip->secondary_parent_irq, nmk_chip); } return 0; } /* I/O Functions */ static int nmk_gpio_request(struct gpio_chip *chip, unsigned offset) { /* * Map back to global GPIO space and request muxing, the direction * parameter does not matter for this controller. */ int gpio = chip->base + offset; return pinctrl_request_gpio(gpio); } static void nmk_gpio_free(struct gpio_chip *chip, unsigned offset) { int gpio = chip->base + offset; pinctrl_free_gpio(gpio); } static int nmk_gpio_make_input(struct gpio_chip *chip, unsigned offset) { struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); clk_enable(nmk_chip->clk); writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRC); clk_disable(nmk_chip->clk); return 0; } static int nmk_gpio_get_input(struct gpio_chip *chip, unsigned offset) { struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); u32 bit = 1 << offset; int value; clk_enable(nmk_chip->clk); value = (readl(nmk_chip->addr + NMK_GPIO_DAT) & bit) != 0; clk_disable(nmk_chip->clk); return value; } static void nmk_gpio_set_output(struct gpio_chip *chip, unsigned offset, int val) { struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); clk_enable(nmk_chip->clk); __nmk_gpio_set_output(nmk_chip, offset, val); clk_disable(nmk_chip->clk); } static int nmk_gpio_make_output(struct gpio_chip *chip, unsigned offset, int val) { struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); clk_enable(nmk_chip->clk); __nmk_gpio_make_output(nmk_chip, offset, val); clk_disable(nmk_chip->clk); return 0; } static int nmk_gpio_to_irq(struct gpio_chip *chip, unsigned offset) { struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); return irq_create_mapping(nmk_chip->domain, offset); } #ifdef CONFIG_DEBUG_FS #include <linux/seq_file.h> static void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev, struct gpio_chip *chip, unsigned offset, unsigned gpio) { const char *label = gpiochip_is_requested(chip, offset); struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); int mode; bool is_out; bool pull; u32 bit = 1 << offset; const char *modes[] = { [NMK_GPIO_ALT_GPIO] = "gpio", [NMK_GPIO_ALT_A] = "altA", [NMK_GPIO_ALT_B] = "altB", [NMK_GPIO_ALT_C] = "altC", [NMK_GPIO_ALT_C+1] = "altC1", [NMK_GPIO_ALT_C+2] = "altC2", [NMK_GPIO_ALT_C+3] = "altC3", [NMK_GPIO_ALT_C+4] = "altC4", }; clk_enable(nmk_chip->clk); is_out = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & bit); pull = !(readl(nmk_chip->addr + NMK_GPIO_PDIS) & bit); mode = nmk_gpio_get_mode(gpio); if ((mode == NMK_GPIO_ALT_C) && pctldev) mode = nmk_prcm_gpiocr_get_mode(pctldev, gpio); seq_printf(s, " gpio-%-3d (%-20.20s) %s %s %s %s", gpio, label ?: "(none)", is_out ? "out" : "in ", chip->get ? (chip->get(chip, offset) ? "hi" : "lo") : "? ", (mode < 0) ? "unknown" : modes[mode], pull ? "pull" : "none"); if (!is_out) { int irq = gpio_to_irq(gpio); struct irq_desc *desc = irq_to_desc(irq); /* This races with request_irq(), set_irq_type(), * and set_irq_wake() ... but those are "rare". */ if (irq > 0 && desc && desc->action) { char *trigger; u32 bitmask = nmk_gpio_get_bitmask(gpio); if (nmk_chip->edge_rising & bitmask) trigger = "edge-rising"; else if (nmk_chip->edge_falling & bitmask) trigger = "edge-falling"; else trigger = "edge-undefined"; seq_printf(s, " irq-%d %s%s", irq, trigger, irqd_is_wakeup_set(&desc->irq_data) ? " wakeup" : ""); } } clk_disable(nmk_chip->clk); } static void nmk_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) { unsigned i; unsigned gpio = chip->base; for (i = 0; i < chip->ngpio; i++, gpio++) { nmk_gpio_dbg_show_one(s, NULL, chip, i, gpio); seq_printf(s, "\n"); } } #else static inline void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev, struct gpio_chip *chip, unsigned offset, unsigned gpio) { } #define nmk_gpio_dbg_show NULL #endif /* This structure is replicated for each GPIO block allocated at probe time */ static struct gpio_chip nmk_gpio_template = { .request = nmk_gpio_request, .free = nmk_gpio_free, .direction_input = nmk_gpio_make_input, .get = nmk_gpio_get_input, .direction_output = nmk_gpio_make_output, .set = nmk_gpio_set_output, .to_irq = nmk_gpio_to_irq, .dbg_show = nmk_gpio_dbg_show, .can_sleep = false, }; void nmk_gpio_clocks_enable(void) { int i; for (i = 0; i < NUM_BANKS; i++) { struct nmk_gpio_chip *chip = nmk_gpio_chips[i]; if (!chip) continue; clk_enable(chip->clk); } } void nmk_gpio_clocks_disable(void) { int i; for (i = 0; i < NUM_BANKS; i++) { struct nmk_gpio_chip *chip = nmk_gpio_chips[i]; if (!chip) continue; clk_disable(chip->clk); } } /* * Called from the suspend/resume path to only keep the real wakeup interrupts * (those that have had set_irq_wake() called on them) as wakeup interrupts, * and not the rest of the interrupts which we needed to have as wakeups for * cpuidle. * * PM ops are not used since this needs to be done at the end, after all the * other drivers are done with their suspend callbacks. */ void nmk_gpio_wakeups_suspend(void) { int i; for (i = 0; i < NUM_BANKS; i++) { struct nmk_gpio_chip *chip = nmk_gpio_chips[i]; if (!chip) break; clk_enable(chip->clk); writel(chip->rwimsc & chip->real_wake, chip->addr + NMK_GPIO_RWIMSC); writel(chip->fwimsc & chip->real_wake, chip->addr + NMK_GPIO_FWIMSC); clk_disable(chip->clk); } } void nmk_gpio_wakeups_resume(void) { int i; for (i = 0; i < NUM_BANKS; i++) { struct nmk_gpio_chip *chip = nmk_gpio_chips[i]; if (!chip) break; clk_enable(chip->clk); writel(chip->rwimsc, chip->addr + NMK_GPIO_RWIMSC); writel(chip->fwimsc, chip->addr + NMK_GPIO_FWIMSC); clk_disable(chip->clk); } } /* * Read the pull up/pull down status. * A bit set in 'pull_up' means that pull up * is selected if pull is enabled in PDIS register. * Note: only pull up/down set via this driver can * be detected due to HW limitations. */ void nmk_gpio_read_pull(int gpio_bank, u32 *pull_up) { if (gpio_bank < NUM_BANKS) { struct nmk_gpio_chip *chip = nmk_gpio_chips[gpio_bank]; if (!chip) return; *pull_up = chip->pull_up; } } static int nmk_gpio_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { struct nmk_gpio_chip *nmk_chip = d->host_data; if (!nmk_chip) return -EINVAL; irq_set_chip_and_handler(irq, &nmk_gpio_irq_chip, handle_edge_irq); set_irq_flags(irq, IRQF_VALID); irq_set_chip_data(irq, nmk_chip); irq_set_irq_type(irq, IRQ_TYPE_EDGE_FALLING); return 0; } static const struct irq_domain_ops nmk_gpio_irq_simple_ops = { .map = nmk_gpio_irq_map, .xlate = irq_domain_xlate_twocell, }; static int nmk_gpio_probe(struct platform_device *dev) { struct nmk_gpio_platform_data *pdata; struct device_node *np = dev->dev.of_node; struct nmk_gpio_chip *nmk_chip; struct gpio_chip *chip; struct resource *res; struct clk *clk; int secondary_irq; void __iomem *base; int irq; int ret; pdata = devm_kzalloc(&dev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; if (of_get_property(np, "st,supports-sleepmode", NULL)) pdata->supports_sleepmode = true; if (of_property_read_u32(np, "gpio-bank", &dev->id)) { dev_err(&dev->dev, "gpio-bank property not found\n"); return -EINVAL; } pdata->first_gpio = dev->id * NMK_GPIO_PER_CHIP; pdata->num_gpio = NMK_GPIO_PER_CHIP; irq = platform_get_irq(dev, 0); if (irq < 0) return irq; secondary_irq = platform_get_irq(dev, 1); if (secondary_irq >= 0 && !pdata->get_secondary_status) return -EINVAL; res = platform_get_resource(dev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&dev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); clk = devm_clk_get(&dev->dev, NULL); if (IS_ERR(clk)) return PTR_ERR(clk); clk_prepare(clk); nmk_chip = devm_kzalloc(&dev->dev, sizeof(*nmk_chip), GFP_KERNEL); if (!nmk_chip) return -ENOMEM; /* * The virt address in nmk_chip->addr is in the nomadik register space, * so we can simply convert the resource address, without remapping */ nmk_chip->bank = dev->id; nmk_chip->clk = clk; nmk_chip->addr = base; nmk_chip->chip = nmk_gpio_template; nmk_chip->parent_irq = irq; nmk_chip->secondary_parent_irq = secondary_irq; nmk_chip->get_secondary_status = pdata->get_secondary_status; nmk_chip->set_ioforce = pdata->set_ioforce; nmk_chip->sleepmode = pdata->supports_sleepmode; spin_lock_init(&nmk_chip->lock); chip = &nmk_chip->chip; chip->base = pdata->first_gpio; chip->ngpio = pdata->num_gpio; chip->label = pdata->name ?: dev_name(&dev->dev); chip->dev = &dev->dev; chip->owner = THIS_MODULE; clk_enable(nmk_chip->clk); nmk_chip->lowemi = readl_relaxed(nmk_chip->addr + NMK_GPIO_LOWEMI); clk_disable(nmk_chip->clk); chip->of_node = np; ret = gpiochip_add(&nmk_chip->chip); if (ret) return ret; BUG_ON(nmk_chip->bank >= ARRAY_SIZE(nmk_gpio_chips)); nmk_gpio_chips[nmk_chip->bank] = nmk_chip; platform_set_drvdata(dev, nmk_chip); nmk_chip->domain = irq_domain_add_simple(np, NMK_GPIO_PER_CHIP, 0, &nmk_gpio_irq_simple_ops, nmk_chip); if (!nmk_chip->domain) { dev_err(&dev->dev, "failed to create irqdomain\n"); /* Just do this, no matter if it fails */ ret = gpiochip_remove(&nmk_chip->chip); return -ENOSYS; } nmk_gpio_init_irq(nmk_chip); dev_info(&dev->dev, "at address %p\n", nmk_chip->addr); return 0; } static int nmk_get_groups_cnt(struct pinctrl_dev *pctldev) { struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); return npct->soc->ngroups; } static const char *nmk_get_group_name(struct pinctrl_dev *pctldev, unsigned selector) { struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); return npct->soc->groups[selector].name; } static int nmk_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector, const unsigned **pins, unsigned *num_pins) { struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); *pins = npct->soc->groups[selector].pins; *num_pins = npct->soc->groups[selector].npins; return 0; } static struct pinctrl_gpio_range * nmk_match_gpio_range(struct pinctrl_dev *pctldev, unsigned offset) { struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); int i; for (i = 0; i < npct->soc->gpio_num_ranges; i++) { struct pinctrl_gpio_range *range; range = &npct->soc->gpio_ranges[i]; if (offset >= range->pin_base && offset <= (range->pin_base + range->npins - 1)) return range; } return NULL; } static void nmk_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned offset) { struct pinctrl_gpio_range *range; struct gpio_chip *chip; range = nmk_match_gpio_range(pctldev, offset); if (!range || !range->gc) { seq_printf(s, "invalid pin offset"); return; } chip = range->gc; nmk_gpio_dbg_show_one(s, pctldev, chip, offset - chip->base, offset); } static void nmk_pinctrl_dt_free_map(struct pinctrl_dev *pctldev, struct pinctrl_map *map, unsigned num_maps) { int i; for (i = 0; i < num_maps; i++) if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN) kfree(map[i].data.configs.configs); kfree(map); } static int nmk_dt_reserve_map(struct pinctrl_map **map, unsigned *reserved_maps, unsigned *num_maps, unsigned reserve) { unsigned old_num = *reserved_maps; unsigned new_num = *num_maps + reserve; struct pinctrl_map *new_map; if (old_num >= new_num) return 0; new_map = krealloc(*map, sizeof(*new_map) * new_num, GFP_KERNEL); if (!new_map) return -ENOMEM; memset(new_map + old_num, 0, (new_num - old_num) * sizeof(*new_map)); *map = new_map; *reserved_maps = new_num; return 0; } static int nmk_dt_add_map_mux(struct pinctrl_map **map, unsigned *reserved_maps, unsigned *num_maps, const char *group, const char *function) { if (*num_maps == *reserved_maps) return -ENOSPC; (*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP; (*map)[*num_maps].data.mux.group = group; (*map)[*num_maps].data.mux.function = function; (*num_maps)++; return 0; } static int nmk_dt_add_map_configs(struct pinctrl_map **map, unsigned *reserved_maps, unsigned *num_maps, const char *group, unsigned long *configs, unsigned num_configs) { unsigned long *dup_configs; if (*num_maps == *reserved_maps) return -ENOSPC; dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs), GFP_KERNEL); if (!dup_configs) return -ENOMEM; (*map)[*num_maps].type = PIN_MAP_TYPE_CONFIGS_PIN; (*map)[*num_maps].data.configs.group_or_pin = group; (*map)[*num_maps].data.configs.configs = dup_configs; (*map)[*num_maps].data.configs.num_configs = num_configs; (*num_maps)++; return 0; } #define NMK_CONFIG_PIN(x, y) { .property = x, .config = y, } #define NMK_CONFIG_PIN_ARRAY(x, y) { .property = x, .choice = y, \ .size = ARRAY_SIZE(y), } static const unsigned long nmk_pin_input_modes[] = { PIN_INPUT_NOPULL, PIN_INPUT_PULLUP, PIN_INPUT_PULLDOWN, }; static const unsigned long nmk_pin_output_modes[] = { PIN_OUTPUT_LOW, PIN_OUTPUT_HIGH, PIN_DIR_OUTPUT, }; static const unsigned long nmk_pin_sleep_modes[] = { PIN_SLEEPMODE_DISABLED, PIN_SLEEPMODE_ENABLED, }; static const unsigned long nmk_pin_sleep_input_modes[] = { PIN_SLPM_INPUT_NOPULL, PIN_SLPM_INPUT_PULLUP, PIN_SLPM_INPUT_PULLDOWN, PIN_SLPM_DIR_INPUT, }; static const unsigned long nmk_pin_sleep_output_modes[] = { PIN_SLPM_OUTPUT_LOW, PIN_SLPM_OUTPUT_HIGH, PIN_SLPM_DIR_OUTPUT, }; static const unsigned long nmk_pin_sleep_wakeup_modes[] = { PIN_SLPM_WAKEUP_DISABLE, PIN_SLPM_WAKEUP_ENABLE, }; static const unsigned long nmk_pin_gpio_modes[] = { PIN_GPIOMODE_DISABLED, PIN_GPIOMODE_ENABLED, }; static const unsigned long nmk_pin_sleep_pdis_modes[] = { PIN_SLPM_PDIS_DISABLED, PIN_SLPM_PDIS_ENABLED, }; struct nmk_cfg_param { const char *property; unsigned long config; const unsigned long *choice; int size; }; static const struct nmk_cfg_param nmk_cfg_params[] = { NMK_CONFIG_PIN_ARRAY("ste,input", nmk_pin_input_modes), NMK_CONFIG_PIN_ARRAY("ste,output", nmk_pin_output_modes), NMK_CONFIG_PIN_ARRAY("ste,sleep", nmk_pin_sleep_modes), NMK_CONFIG_PIN_ARRAY("ste,sleep-input", nmk_pin_sleep_input_modes), NMK_CONFIG_PIN_ARRAY("ste,sleep-output", nmk_pin_sleep_output_modes), NMK_CONFIG_PIN_ARRAY("ste,sleep-wakeup", nmk_pin_sleep_wakeup_modes), NMK_CONFIG_PIN_ARRAY("ste,gpio", nmk_pin_gpio_modes), NMK_CONFIG_PIN_ARRAY("ste,sleep-pull-disable", nmk_pin_sleep_pdis_modes), }; static int nmk_dt_pin_config(int index, int val, unsigned long *config) { int ret = 0; if (nmk_cfg_params[index].choice == NULL) *config = nmk_cfg_params[index].config; else { /* test if out of range */ if (val < nmk_cfg_params[index].size) { *config = nmk_cfg_params[index].config | nmk_cfg_params[index].choice[val]; } } return ret; } static const char *nmk_find_pin_name(struct pinctrl_dev *pctldev, const char *pin_name) { int i, pin_number; struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); if (sscanf((char *)pin_name, "GPIO%d", &pin_number) == 1) for (i = 0; i < npct->soc->npins; i++) if (npct->soc->pins[i].number == pin_number) return npct->soc->pins[i].name; return NULL; } static bool nmk_pinctrl_dt_get_config(struct device_node *np, unsigned long *configs) { bool has_config = 0; unsigned long cfg = 0; int i, val, ret; for (i = 0; i < ARRAY_SIZE(nmk_cfg_params); i++) { ret = of_property_read_u32(np, nmk_cfg_params[i].property, &val); if (ret != -EINVAL) { if (nmk_dt_pin_config(i, val, &cfg) == 0) { *configs |= cfg; has_config = 1; } } } return has_config; } static int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev, struct device_node *np, struct pinctrl_map **map, unsigned *reserved_maps, unsigned *num_maps) { int ret; const char *function = NULL; unsigned long configs = 0; bool has_config = 0; unsigned reserve = 0; struct property *prop; const char *group, *gpio_name; struct device_node *np_config; ret = of_property_read_string(np, "ste,function", &function); if (ret >= 0) reserve = 1; has_config = nmk_pinctrl_dt_get_config(np, &configs); np_config = of_parse_phandle(np, "ste,config", 0); if (np_config) has_config |= nmk_pinctrl_dt_get_config(np_config, &configs); ret = of_property_count_strings(np, "ste,pins"); if (ret < 0) goto exit; if (has_config) reserve++; reserve *= ret; ret = nmk_dt_reserve_map(map, reserved_maps, num_maps, reserve); if (ret < 0) goto exit; of_property_for_each_string(np, "ste,pins", prop, group) { if (function) { ret = nmk_dt_add_map_mux(map, reserved_maps, num_maps, group, function); if (ret < 0) goto exit; } if (has_config) { gpio_name = nmk_find_pin_name(pctldev, group); ret = nmk_dt_add_map_configs(map, reserved_maps, num_maps, gpio_name, &configs, 1); if (ret < 0) goto exit; } } exit: return ret; } static int nmk_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node *np_config, struct pinctrl_map **map, unsigned *num_maps) { unsigned reserved_maps; struct device_node *np; int ret; reserved_maps = 0; *map = NULL; *num_maps = 0; for_each_child_of_node(np_config, np) { ret = nmk_pinctrl_dt_subnode_to_map(pctldev, np, map, &reserved_maps, num_maps); if (ret < 0) { nmk_pinctrl_dt_free_map(pctldev, *map, *num_maps); return ret; } } return 0; } static const struct pinctrl_ops nmk_pinctrl_ops = { .get_groups_count = nmk_get_groups_cnt, .get_group_name = nmk_get_group_name, .get_group_pins = nmk_get_group_pins, .pin_dbg_show = nmk_pin_dbg_show, .dt_node_to_map = nmk_pinctrl_dt_node_to_map, .dt_free_map = nmk_pinctrl_dt_free_map, }; static int nmk_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev) { struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); return npct->soc->nfunctions; } static const char *nmk_pmx_get_func_name(struct pinctrl_dev *pctldev, unsigned function) { struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); return npct->soc->functions[function].name; } static int nmk_pmx_get_func_groups(struct pinctrl_dev *pctldev, unsigned function, const char * const **groups, unsigned * const num_groups) { struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); *groups = npct->soc->functions[function].groups; *num_groups = npct->soc->functions[function].ngroups; return 0; } static int nmk_pmx_enable(struct pinctrl_dev *pctldev, unsigned function, unsigned group) { struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); const struct nmk_pingroup *g; static unsigned int slpm[NUM_BANKS]; unsigned long flags = 0; bool glitch; int ret = -EINVAL; int i; g = &npct->soc->groups[group]; if (g->altsetting < 0) return -EINVAL; dev_dbg(npct->dev, "enable group %s, %u pins\n", g->name, g->npins); /* * If we're setting altfunc C by setting both AFSLA and AFSLB to 1, * we may pass through an undesired state. In this case we take * some extra care. * * Safe sequence used to switch IOs between GPIO and Alternate-C mode: * - Save SLPM registers (since we have a shadow register in the * nmk_chip we're using that as backup) * - Set SLPM=0 for the IOs you want to switch and others to 1 * - Configure the GPIO registers for the IOs that are being switched * - Set IOFORCE=1 * - Modify the AFLSA/B registers for the IOs that are being switched * - Set IOFORCE=0 * - Restore SLPM registers * - Any spurious wake up event during switch sequence to be ignored * and cleared * * We REALLY need to save ALL slpm registers, because the external * IOFORCE will switch *all* ports to their sleepmode setting to as * to avoid glitches. (Not just one port!) */ glitch = ((g->altsetting & NMK_GPIO_ALT_C) == NMK_GPIO_ALT_C); if (glitch) { spin_lock_irqsave(&nmk_gpio_slpm_lock, flags); /* Initially don't put any pins to sleep when switching */ memset(slpm, 0xff, sizeof(slpm)); /* * Then mask the pins that need to be sleeping now when we're * switching to the ALT C function. */ for (i = 0; i < g->npins; i++) slpm[g->pins[i] / NMK_GPIO_PER_CHIP] &= ~BIT(g->pins[i]); nmk_gpio_glitch_slpm_init(slpm); } for (i = 0; i < g->npins; i++) { struct pinctrl_gpio_range *range; struct nmk_gpio_chip *nmk_chip; struct gpio_chip *chip; unsigned bit; range = nmk_match_gpio_range(pctldev, g->pins[i]); if (!range) { dev_err(npct->dev, "invalid pin offset %d in group %s at index %d\n", g->pins[i], g->name, i); goto out_glitch; } if (!range->gc) { dev_err(npct->dev, "GPIO chip missing in range for pin offset %d in group %s at index %d\n", g->pins[i], g->name, i); goto out_glitch; } chip = range->gc; nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); dev_dbg(npct->dev, "setting pin %d to altsetting %d\n", g->pins[i], g->altsetting); clk_enable(nmk_chip->clk); bit = g->pins[i] % NMK_GPIO_PER_CHIP; /* * If the pin is switching to altfunc, and there was an * interrupt installed on it which has been lazy disabled, * actually mask the interrupt to prevent spurious interrupts * that would occur while the pin is under control of the * peripheral. Only SKE does this. */ nmk_gpio_disable_lazy_irq(nmk_chip, bit); __nmk_gpio_set_mode_safe(nmk_chip, bit, (g->altsetting & NMK_GPIO_ALT_C), glitch); clk_disable(nmk_chip->clk); /* * Call PRCM GPIOCR config function in case ALTC * has been selected: * - If selection is a ALTCx, some bits in PRCM GPIOCR registers * must be set. * - If selection is pure ALTC and previous selection was ALTCx, * then some bits in PRCM GPIOCR registers must be cleared. */ if ((g->altsetting & NMK_GPIO_ALT_C) == NMK_GPIO_ALT_C) nmk_prcm_altcx_set_mode(npct, g->pins[i], g->altsetting >> NMK_GPIO_ALT_CX_SHIFT); } /* When all pins are successfully reconfigured we get here */ ret = 0; out_glitch: if (glitch) { nmk_gpio_glitch_slpm_restore(slpm); spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags); } return ret; } static void nmk_pmx_disable(struct pinctrl_dev *pctldev, unsigned function, unsigned group) { struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); const struct nmk_pingroup *g; g = &npct->soc->groups[group]; if (g->altsetting < 0) return; /* Poke out the mux, set the pin to some default state? */ dev_dbg(npct->dev, "disable group %s, %u pins\n", g->name, g->npins); } static int nmk_gpio_request_enable(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned offset) { struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); struct nmk_gpio_chip *nmk_chip; struct gpio_chip *chip; unsigned bit; if (!range) { dev_err(npct->dev, "invalid range\n"); return -EINVAL; } if (!range->gc) { dev_err(npct->dev, "missing GPIO chip in range\n"); return -EINVAL; } chip = range->gc; nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); dev_dbg(npct->dev, "enable pin %u as GPIO\n", offset); clk_enable(nmk_chip->clk); bit = offset % NMK_GPIO_PER_CHIP; /* There is no glitch when converting any pin to GPIO */ __nmk_gpio_set_mode(nmk_chip, bit, NMK_GPIO_ALT_GPIO); clk_disable(nmk_chip->clk); return 0; } static void nmk_gpio_disable_free(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned offset) { struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); dev_dbg(npct->dev, "disable pin %u as GPIO\n", offset); /* Set the pin to some default state, GPIO is usually default */ } static const struct pinmux_ops nmk_pinmux_ops = { .get_functions_count = nmk_pmx_get_funcs_cnt, .get_function_name = nmk_pmx_get_func_name, .get_function_groups = nmk_pmx_get_func_groups, .enable = nmk_pmx_enable, .disable = nmk_pmx_disable, .gpio_request_enable = nmk_gpio_request_enable, .gpio_disable_free = nmk_gpio_disable_free, }; static int nmk_pin_config_get(struct pinctrl_dev *pctldev, unsigned pin, unsigned long *config) { /* Not implemented */ return -EINVAL; } static int nmk_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin, unsigned long *configs, unsigned num_configs) { static const char *pullnames[] = { [NMK_GPIO_PULL_NONE] = "none", [NMK_GPIO_PULL_UP] = "up", [NMK_GPIO_PULL_DOWN] = "down", [3] /* illegal */ = "??" }; static const char *slpmnames[] = { [NMK_GPIO_SLPM_INPUT] = "input/wakeup", [NMK_GPIO_SLPM_NOCHANGE] = "no-change/no-wakeup", }; struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); struct nmk_gpio_chip *nmk_chip; struct pinctrl_gpio_range *range; struct gpio_chip *chip; unsigned bit; pin_cfg_t cfg; int pull, slpm, output, val, i; bool lowemi, gpiomode, sleep; range = nmk_match_gpio_range(pctldev, pin); if (!range) { dev_err(npct->dev, "invalid pin offset %d\n", pin); return -EINVAL; } if (!range->gc) { dev_err(npct->dev, "GPIO chip missing in range for pin %d\n", pin); return -EINVAL; } chip = range->gc; nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); for (i = 0; i < num_configs; i++) { /* * The pin config contains pin number and altfunction fields, * here we just ignore that part. It's being handled by the * framework and pinmux callback respectively. */ cfg = (pin_cfg_t) configs[i]; pull = PIN_PULL(cfg); slpm = PIN_SLPM(cfg); output = PIN_DIR(cfg); val = PIN_VAL(cfg); lowemi = PIN_LOWEMI(cfg); gpiomode = PIN_GPIOMODE(cfg); sleep = PIN_SLEEPMODE(cfg); if (sleep) { int slpm_pull = PIN_SLPM_PULL(cfg); int slpm_output = PIN_SLPM_DIR(cfg); int slpm_val = PIN_SLPM_VAL(cfg); /* All pins go into GPIO mode at sleep */ gpiomode = true; /* * The SLPM_* values are normal values + 1 to allow zero * to mean "same as normal". */ if (slpm_pull) pull = slpm_pull - 1; if (slpm_output) output = slpm_output - 1; if (slpm_val) val = slpm_val - 1; dev_dbg(nmk_chip->chip.dev, "pin %d: sleep pull %s, dir %s, val %s\n", pin, slpm_pull ? pullnames[pull] : "same", slpm_output ? (output ? "output" : "input") : "same", slpm_val ? (val ? "high" : "low") : "same"); } dev_dbg(nmk_chip->chip.dev, "pin %d [%#lx]: pull %s, slpm %s (%s%s), lowemi %s\n", pin, cfg, pullnames[pull], slpmnames[slpm], output ? "output " : "input", output ? (val ? "high" : "low") : "", lowemi ? "on" : "off"); clk_enable(nmk_chip->clk); bit = pin % NMK_GPIO_PER_CHIP; if (gpiomode) /* No glitch when going to GPIO mode */ __nmk_gpio_set_mode(nmk_chip, bit, NMK_GPIO_ALT_GPIO); if (output) __nmk_gpio_make_output(nmk_chip, bit, val); else { __nmk_gpio_make_input(nmk_chip, bit); __nmk_gpio_set_pull(nmk_chip, bit, pull); } /* TODO: isn't this only applicable on output pins? */ __nmk_gpio_set_lowemi(nmk_chip, bit, lowemi); __nmk_gpio_set_slpm(nmk_chip, bit, slpm); clk_disable(nmk_chip->clk); } /* for each config */ return 0; } static const struct pinconf_ops nmk_pinconf_ops = { .pin_config_get = nmk_pin_config_get, .pin_config_set = nmk_pin_config_set, }; static struct pinctrl_desc nmk_pinctrl_desc = { .name = "pinctrl-nomadik", .pctlops = &nmk_pinctrl_ops, .pmxops = &nmk_pinmux_ops, .confops = &nmk_pinconf_ops, .owner = THIS_MODULE, }; static const struct of_device_id nmk_pinctrl_match[] = { { .compatible = "stericsson,stn8815-pinctrl", .data = (void *)PINCTRL_NMK_STN8815, }, { .compatible = "stericsson,db8500-pinctrl", .data = (void *)PINCTRL_NMK_DB8500, }, { .compatible = "stericsson,db8540-pinctrl", .data = (void *)PINCTRL_NMK_DB8540, }, {}, }; static int nmk_pinctrl_suspend(struct platform_device *pdev, pm_message_t state) { struct nmk_pinctrl *npct; npct = platform_get_drvdata(pdev); if (!npct) return -EINVAL; return pinctrl_force_sleep(npct->pctl); } static int nmk_pinctrl_resume(struct platform_device *pdev) { struct nmk_pinctrl *npct; npct = platform_get_drvdata(pdev); if (!npct) return -EINVAL; return pinctrl_force_default(npct->pctl); } static int nmk_pinctrl_probe(struct platform_device *pdev) { const struct of_device_id *match; struct device_node *np = pdev->dev.of_node; struct device_node *prcm_np; struct nmk_pinctrl *npct; unsigned int version = 0; int i; npct = devm_kzalloc(&pdev->dev, sizeof(*npct), GFP_KERNEL); if (!npct) return -ENOMEM; match = of_match_device(nmk_pinctrl_match, &pdev->dev); if (!match) return -ENODEV; version = (unsigned int) match->data; /* Poke in other ASIC variants here */ if (version == PINCTRL_NMK_STN8815) nmk_pinctrl_stn8815_init(&npct->soc); if (version == PINCTRL_NMK_DB8500) nmk_pinctrl_db8500_init(&npct->soc); if (version == PINCTRL_NMK_DB8540) nmk_pinctrl_db8540_init(&npct->soc); prcm_np = of_parse_phandle(np, "prcm", 0); if (prcm_np) npct->prcm_base = of_iomap(prcm_np, 0); if (!npct->prcm_base) { if (version == PINCTRL_NMK_STN8815) { dev_info(&pdev->dev, "No PRCM base, " "assuming no ALT-Cx control is available\n"); } else { dev_err(&pdev->dev, "missing PRCM base address\n"); return -EINVAL; } } /* * We need all the GPIO drivers to probe FIRST, or we will not be able * to obtain references to the struct gpio_chip * for them, and we * need this to proceed. */ for (i = 0; i < npct->soc->gpio_num_ranges; i++) { if (!nmk_gpio_chips[npct->soc->gpio_ranges[i].id]) { dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i); return -EPROBE_DEFER; } npct->soc->gpio_ranges[i].gc = &nmk_gpio_chips[npct->soc->gpio_ranges[i].id]->chip; } nmk_pinctrl_desc.pins = npct->soc->pins; nmk_pinctrl_desc.npins = npct->soc->npins; npct->dev = &pdev->dev; npct->pctl = pinctrl_register(&nmk_pinctrl_desc, &pdev->dev, npct); if (!npct->pctl) { dev_err(&pdev->dev, "could not register Nomadik pinctrl driver\n"); return -EINVAL; } /* We will handle a range of GPIO pins */ for (i = 0; i < npct->soc->gpio_num_ranges; i++) pinctrl_add_gpio_range(npct->pctl, &npct->soc->gpio_ranges[i]); platform_set_drvdata(pdev, npct); dev_info(&pdev->dev, "initialized Nomadik pin control driver\n"); return 0; } static const struct of_device_id nmk_gpio_match[] = { { .compatible = "st,nomadik-gpio", }, {} }; static struct platform_driver nmk_gpio_driver = { .driver = { .owner = THIS_MODULE, .name = "gpio", .of_match_table = nmk_gpio_match, }, .probe = nmk_gpio_probe, }; static struct platform_driver nmk_pinctrl_driver = { .driver = { .owner = THIS_MODULE, .name = "pinctrl-nomadik", .of_match_table = nmk_pinctrl_match, }, .probe = nmk_pinctrl_probe, #ifdef CONFIG_PM .suspend = nmk_pinctrl_suspend, .resume = nmk_pinctrl_resume, #endif }; static int __init nmk_gpio_init(void) { int ret; ret = platform_driver_register(&nmk_gpio_driver); if (ret) return ret; return platform_driver_register(&nmk_pinctrl_driver); } core_initcall(nmk_gpio_init); MODULE_AUTHOR("Prafulla WADASKAR and Alessandro Rubini"); MODULE_DESCRIPTION("Nomadik GPIO Driver"); MODULE_LICENSE("GPL");
gpl-2.0
Maroc-OS/Merruk-Technology
common/drivers/staging/msm/staging-devices.c
753
7704
#include <linux/kernel.h> #include <linux/irq.h> #include <linux/gpio.h> #include <linux/platform_device.h> #include <linux/bootmem.h> #include <linux/delay.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/io.h> #include <asm/setup.h> #include <mach/board.h> #include <mach/irqs.h> #include <mach/sirc.h> #include <mach/gpio.h> #include "msm_mdp.h" #include "memory_ll.h" //#include "android_pmem.h" #include <mach/board.h> #ifdef CONFIG_MSM_SOC_REV_A #define MSM_SMI_BASE 0xE0000000 #else #define MSM_SMI_BASE 0x00000000 #endif #define TOUCHPAD_SUSPEND 34 #define TOUCHPAD_IRQ 38 #define MSM_PMEM_MDP_SIZE 0x1591000 #ifdef CONFIG_MSM_SOC_REV_A #define SMEM_SPINLOCK_I2C "D:I2C02000021" #else #define SMEM_SPINLOCK_I2C "S:6" #endif #define MSM_PMEM_ADSP_SIZE 0x1C00000 #define MSM_FB_SIZE 0x500000 #define MSM_FB_SIZE_ST15 0x800000 #define MSM_AUDIO_SIZE 0x80000 #define MSM_GPU_PHYS_SIZE SZ_2M #ifdef CONFIG_MSM_SOC_REV_A #define MSM_SMI_BASE 0xE0000000 #else #define MSM_SMI_BASE 0x00000000 #endif #define MSM_SHARED_RAM_PHYS (MSM_SMI_BASE + 0x00100000) #define MSM_PMEM_SMI_BASE (MSM_SMI_BASE + 0x02B00000) #define MSM_PMEM_SMI_SIZE 0x01500000 #define MSM_FB_BASE MSM_PMEM_SMI_BASE #define MSM_GPU_PHYS_BASE (MSM_FB_BASE + MSM_FB_SIZE) #define MSM_PMEM_SMIPOOL_BASE (MSM_GPU_PHYS_BASE + MSM_GPU_PHYS_SIZE) #define MSM_PMEM_SMIPOOL_SIZE (MSM_PMEM_SMI_SIZE - MSM_FB_SIZE \ - MSM_GPU_PHYS_SIZE) #if defined(CONFIG_FB_MSM_MDP40) #define MDP_BASE 0xA3F00000 #define PMDH_BASE 0xAD600000 #define EMDH_BASE 0xAD700000 #define TVENC_BASE 0xAD400000 #else #define MDP_BASE 0xAA200000 #define PMDH_BASE 0xAA600000 #define EMDH_BASE 0xAA700000 #define TVENC_BASE 0xAA400000 #endif #define PMEM_KERNEL_EBI1_SIZE (CONFIG_PMEM_KERNEL_SIZE * 1024 * 1024) static struct resource msm_fb_resources[] = { { .flags = IORESOURCE_DMA, } }; static struct resource msm_mdp_resources[] = { { .name = "mdp", .start = MDP_BASE, .end = MDP_BASE + 0x000F0000 - 1, .flags = IORESOURCE_MEM, } }; static struct platform_device msm_mdp_device = { .name = "mdp", .id = 0, .num_resources = ARRAY_SIZE(msm_mdp_resources), .resource = msm_mdp_resources, }; static struct platform_device msm_lcdc_device = { .name = "lcdc", .id = 0, }; static int msm_fb_detect_panel(const char *name) { int ret = -EPERM; if (machine_is_qsd8x50_ffa() || machine_is_qsd8x50a_ffa()) { if (!strncmp(name, "mddi_toshiba_wvga_pt", 20)) ret = 0; else ret = -ENODEV; } else if ((machine_is_qsd8x50_surf() || machine_is_qsd8x50a_surf()) && !strcmp(name, "lcdc_external")) ret = 0; else if (0 /*machine_is_qsd8x50_grapefruit() */) { if (!strcmp(name, "lcdc_grapefruit_vga")) ret = 0; else ret = -ENODEV; } else if (machine_is_qsd8x50_st1()) { if (!strcmp(name, "lcdc_st1_wxga")) ret = 0; else ret = -ENODEV; } else if (machine_is_qsd8x50a_st1_5()) { if (!strcmp(name, "lcdc_st15") || !strcmp(name, "hdmi_sii9022")) ret = 0; else ret = -ENODEV; } return ret; } /* Only allow a small subset of machines to set the offset via FB PAN_DISPLAY */ static int msm_fb_allow_set_offset(void) { return (machine_is_qsd8x50_st1() || machine_is_qsd8x50a_st1_5()) ? 1 : 0; } static struct msm_fb_platform_data msm_fb_pdata = { .detect_client = msm_fb_detect_panel, .allow_set_offset = msm_fb_allow_set_offset, }; static struct platform_device msm_fb_device = { .name = "msm_fb", .id = 0, .num_resources = ARRAY_SIZE(msm_fb_resources), .resource = msm_fb_resources, .dev = { .platform_data = &msm_fb_pdata, } }; static void __init qsd8x50_allocate_memory_regions(void) { void *addr; unsigned long size; if (machine_is_qsd8x50a_st1_5()) size = MSM_FB_SIZE_ST15; else size = MSM_FB_SIZE; addr = alloc_bootmem(size); // (void *)MSM_FB_BASE; if (!addr) printk("Failed to allocate bootmem for framebuffer\n"); msm_fb_resources[0].start = __pa(addr); msm_fb_resources[0].end = msm_fb_resources[0].start + size - 1; pr_info(KERN_ERR "using %lu bytes of SMI at %lx physical for fb\n", size, (unsigned long)addr); } static int msm_fb_lcdc_gpio_config(int on) { // return 0; if (machine_is_qsd8x50_st1()) { if (on) { gpio_set_value(32, 1); mdelay(100); gpio_set_value(20, 1); gpio_set_value(17, 1); gpio_set_value(19, 1); } else { gpio_set_value(17, 0); gpio_set_value(19, 0); gpio_set_value(20, 0); mdelay(100); gpio_set_value(32, 0); } } else if (machine_is_qsd8x50a_st1_5()) { if (on) { gpio_set_value(17, 1); gpio_set_value(19, 1); gpio_set_value(20, 1); gpio_set_value(22, 0); gpio_set_value(32, 1); gpio_set_value(155, 1); //st15_hdmi_power(1); gpio_set_value(22, 1); } else { gpio_set_value(17, 0); gpio_set_value(19, 0); gpio_set_value(22, 0); gpio_set_value(32, 0); gpio_set_value(155, 0); // st15_hdmi_power(0); } } return 0; } static struct lcdc_platform_data lcdc_pdata = { .lcdc_gpio_config = msm_fb_lcdc_gpio_config, }; static struct msm_gpio msm_fb_st15_gpio_config_data[] = { { GPIO_CFG(17, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), "lcdc_en0" }, { GPIO_CFG(19, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), "dat_pwr_sv" }, { GPIO_CFG(20, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), "lvds_pwr_dn" }, { GPIO_CFG(22, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), "lcdc_en1" }, { GPIO_CFG(32, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), "lcdc_en2" }, { GPIO_CFG(103, 0, GPIO_INPUT, GPIO_NO_PULL, GPIO_2MA), "hdmi_irq" }, { GPIO_CFG(155, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), "hdmi_3v3" }, }; static struct msm_panel_common_pdata mdp_pdata = { .gpio = 98, }; static struct platform_device *devices[] __initdata = { &msm_fb_device, }; static void __init msm_register_device(struct platform_device *pdev, void *data) { int ret; pdev->dev.platform_data = data; ret = platform_device_register(pdev); if (ret) dev_err(&pdev->dev, "%s: platform_device_register() failed = %d\n", __func__, ret); } void __init msm_fb_register_device(char *name, void *data) { if (!strncmp(name, "mdp", 3)) msm_register_device(&msm_mdp_device, data); /* else if (!strncmp(name, "pmdh", 4)) msm_register_device(&msm_mddi_device, data); else if (!strncmp(name, "emdh", 4)) msm_register_device(&msm_mddi_ext_device, data); else if (!strncmp(name, "ebi2", 4)) msm_register_device(&msm_ebi2_lcd_device, data); else if (!strncmp(name, "tvenc", 5)) msm_register_device(&msm_tvenc_device, data); else */ if (!strncmp(name, "lcdc", 4)) msm_register_device(&msm_lcdc_device, data); /*else printk(KERN_ERR "%s: unknown device! %s\n", __func__, name); */ } static void __init msm_fb_add_devices(void) { int rc; msm_fb_register_device("mdp", &mdp_pdata); // msm_fb_register_device("pmdh", &mddi_pdata); // msm_fb_register_device("emdh", &mddi_pdata); // msm_fb_register_device("tvenc", 0); if (machine_is_qsd8x50a_st1_5()) { /* rc = st15_hdmi_vreg_init(); if (rc) return; */ rc = msm_gpios_request_enable( msm_fb_st15_gpio_config_data, ARRAY_SIZE(msm_fb_st15_gpio_config_data)); if (rc) { printk(KERN_ERR "%s: unable to init lcdc gpios\n", __func__); return; } msm_fb_register_device("lcdc", &lcdc_pdata); } else msm_fb_register_device("lcdc", 0); } int __init staging_init_pmem(void) { qsd8x50_allocate_memory_regions(); return 0; } int __init staging_init_devices(void) { platform_add_devices(devices, ARRAY_SIZE(devices)); msm_fb_add_devices(); return 0; } arch_initcall(staging_init_pmem); arch_initcall(staging_init_devices);
gpl-2.0
TeamWin/android_kernel_samsung_j2lte
drivers/spi/spi-lm70llp.c
2545
9342
/* * Driver for LM70EVAL-LLP board for the LM70 sensor * * Copyright (C) 2006 Kaiwan N Billimoria <kaiwan@designergraphix.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/parport.h> #include <linux/sysfs.h> #include <linux/workqueue.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> /* * The LM70 communicates with a host processor using a 3-wire variant of * the SPI/Microwire bus interface. This driver specifically supports an * NS LM70 LLP Evaluation Board, interfacing to a PC using its parallel * port to bitbang an SPI-parport bridge. Accordingly, this is an SPI * master controller driver. The hwmon/lm70 driver is a "SPI protocol * driver", layered on top of this one and usable without the lm70llp. * * Datasheet and Schematic: * The LM70 is a temperature sensor chip from National Semiconductor; its * datasheet is available at http://www.national.com/pf/LM/LM70.html * The schematic for this particular board (the LM70EVAL-LLP) is * available (on page 4) here: * http://www.national.com/appinfo/tempsensors/files/LM70LLPEVALmanual.pdf * * Also see Documentation/spi/spi-lm70llp. The SPI<->parport code here is * (heavily) based on spi-butterfly by David Brownell. * * The LM70 LLP connects to the PC parallel port in the following manner: * * Parallel LM70 LLP * Port Direction JP2 Header * ----------- --------- ------------ * D0 2 - - * D1 3 --> V+ 5 * D2 4 --> V+ 5 * D3 5 --> V+ 5 * D4 6 --> V+ 5 * D5 7 --> nCS 8 * D6 8 --> SCLK 3 * D7 9 --> SI/O 5 * GND 25 - GND 7 * Select 13 <-- SI/O 1 * * Note that parport pin 13 actually gets inverted by the transistor * arrangement which lets either the parport or the LM70 drive the * SI/SO signal (see the schematic for details). */ #define DRVNAME "spi-lm70llp" #define lm70_INIT 0xBE #define SIO 0x10 #define nCS 0x20 #define SCLK 0x40 /*-------------------------------------------------------------------------*/ struct spi_lm70llp { struct spi_bitbang bitbang; struct parport *port; struct pardevice *pd; struct spi_device *spidev_lm70; struct spi_board_info info; //struct device *dev; }; /* REVISIT : ugly global ; provides "exclusive open" facility */ static struct spi_lm70llp *lm70llp; /*-------------------------------------------------------------------*/ static inline struct spi_lm70llp *spidev_to_pp(struct spi_device *spi) { return spi->controller_data; } /*---------------------- LM70 LLP eval board-specific inlines follow */ /* NOTE: we don't actually need to reread the output values, since they'll * still be what we wrote before. Plus, going through parport builds in * a ~1ms/operation delay; these SPI transfers could easily be faster. */ static inline void deassertCS(struct spi_lm70llp *pp) { u8 data = parport_read_data(pp->port); data &= ~0x80; /* pull D7/SI-out low while de-asserted */ parport_write_data(pp->port, data | nCS); } static inline void assertCS(struct spi_lm70llp *pp) { u8 data = parport_read_data(pp->port); data |= 0x80; /* pull D7/SI-out high so lm70 drives SO-in */ parport_write_data(pp->port, data & ~nCS); } static inline void clkHigh(struct spi_lm70llp *pp) { u8 data = parport_read_data(pp->port); parport_write_data(pp->port, data | SCLK); } static inline void clkLow(struct spi_lm70llp *pp) { u8 data = parport_read_data(pp->port); parport_write_data(pp->port, data & ~SCLK); } /*------------------------- SPI-LM70-specific inlines ----------------------*/ static inline void spidelay(unsigned d) { udelay(d); } static inline void setsck(struct spi_device *s, int is_on) { struct spi_lm70llp *pp = spidev_to_pp(s); if (is_on) clkHigh(pp); else clkLow(pp); } static inline void setmosi(struct spi_device *s, int is_on) { /* FIXME update D7 ... this way we can put the chip * into shutdown mode and read the manufacturer ID, * but we can't put it back into operational mode. */ } /* * getmiso: * Why do we return 0 when the SIO line is high and vice-versa? * The fact is, the lm70 eval board from NS (which this driver drives), * is wired in just such a way : when the lm70's SIO goes high, a transistor * switches it to low reflecting this on the parport (pin 13), and vice-versa. */ static inline int getmiso(struct spi_device *s) { struct spi_lm70llp *pp = spidev_to_pp(s); return ((SIO == (parport_read_status(pp->port) & SIO)) ? 0 : 1 ); } /*--------------------------------------------------------------------*/ #include "spi-bitbang-txrx.h" static void lm70_chipselect(struct spi_device *spi, int value) { struct spi_lm70llp *pp = spidev_to_pp(spi); if (value) assertCS(pp); else deassertCS(pp); } /* * Our actual bitbanger routine. */ static u32 lm70_txrx(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); } static void spi_lm70llp_attach(struct parport *p) { struct pardevice *pd; struct spi_lm70llp *pp; struct spi_master *master; int status; if (lm70llp) { printk(KERN_WARNING "%s: spi_lm70llp instance already loaded. Aborting.\n", DRVNAME); return; } /* TODO: this just _assumes_ a lm70 is there ... no probe; * the lm70 driver could verify it, reading the manf ID. */ master = spi_alloc_master(p->physport->dev, sizeof *pp); if (!master) { status = -ENOMEM; goto out_fail; } pp = spi_master_get_devdata(master); /* * SPI and bitbang hookup. */ pp->bitbang.master = spi_master_get(master); pp->bitbang.chipselect = lm70_chipselect; pp->bitbang.txrx_word[SPI_MODE_0] = lm70_txrx; pp->bitbang.flags = SPI_3WIRE; /* * Parport hookup */ pp->port = p; pd = parport_register_device(p, DRVNAME, NULL, NULL, NULL, PARPORT_FLAG_EXCL, pp); if (!pd) { status = -ENOMEM; goto out_free_master; } pp->pd = pd; status = parport_claim(pd); if (status < 0) goto out_parport_unreg; /* * Start SPI ... */ status = spi_bitbang_start(&pp->bitbang); if (status < 0) { printk(KERN_WARNING "%s: spi_bitbang_start failed with status %d\n", DRVNAME, status); goto out_off_and_release; } /* * The modalias name MUST match the device_driver name * for the bus glue code to match and subsequently bind them. * We are binding to the generic drivers/hwmon/lm70.c device * driver. */ strcpy(pp->info.modalias, "lm70"); pp->info.max_speed_hz = 6 * 1000 * 1000; pp->info.chip_select = 0; pp->info.mode = SPI_3WIRE | SPI_MODE_0; /* power up the chip, and let the LM70 control SI/SO */ parport_write_data(pp->port, lm70_INIT); /* Enable access to our primary data structure via * the board info's (void *)controller_data. */ pp->info.controller_data = pp; pp->spidev_lm70 = spi_new_device(pp->bitbang.master, &pp->info); if (pp->spidev_lm70) dev_dbg(&pp->spidev_lm70->dev, "spidev_lm70 at %s\n", dev_name(&pp->spidev_lm70->dev)); else { printk(KERN_WARNING "%s: spi_new_device failed\n", DRVNAME); status = -ENODEV; goto out_bitbang_stop; } pp->spidev_lm70->bits_per_word = 8; lm70llp = pp; return; out_bitbang_stop: spi_bitbang_stop(&pp->bitbang); out_off_and_release: /* power down */ parport_write_data(pp->port, 0); mdelay(10); parport_release(pp->pd); out_parport_unreg: parport_unregister_device(pd); out_free_master: (void) spi_master_put(master); out_fail: pr_info("%s: spi_lm70llp probe fail, status %d\n", DRVNAME, status); } static void spi_lm70llp_detach(struct parport *p) { struct spi_lm70llp *pp; if (!lm70llp || lm70llp->port != p) return; pp = lm70llp; spi_bitbang_stop(&pp->bitbang); /* power down */ parport_write_data(pp->port, 0); parport_release(pp->pd); parport_unregister_device(pp->pd); (void) spi_master_put(pp->bitbang.master); lm70llp = NULL; } static struct parport_driver spi_lm70llp_drv = { .name = DRVNAME, .attach = spi_lm70llp_attach, .detach = spi_lm70llp_detach, }; static int __init init_spi_lm70llp(void) { return parport_register_driver(&spi_lm70llp_drv); } module_init(init_spi_lm70llp); static void __exit cleanup_spi_lm70llp(void) { parport_unregister_driver(&spi_lm70llp_drv); } module_exit(cleanup_spi_lm70llp); MODULE_AUTHOR("Kaiwan N Billimoria <kaiwan@designergraphix.com>"); MODULE_DESCRIPTION( "Parport adapter for the National Semiconductor LM70 LLP eval board"); MODULE_LICENSE("GPL");
gpl-2.0
ownhere/samsung-kernel-sgs2-ownhere
drivers/staging/msm/mdp_dma_s.c
3057
3974
/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/hrtimer.h> #include <mach/hardware.h> #include <linux/io.h> #include <asm/system.h> #include <asm/mach-types.h> #include <linux/semaphore.h> #include <linux/spinlock.h> #include <linux/fb.h> #include "mdp.h" #include "msm_fb.h" static void mdp_dma_s_update_lcd(struct msm_fb_data_type *mfd) { MDPIBUF *iBuf = &mfd->ibuf; int mddi_dest = FALSE; uint32 outBpp = iBuf->bpp; uint32 dma_s_cfg_reg; uint8 *src; struct msm_fb_panel_data *pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data; dma_s_cfg_reg = DMA_PACK_TIGHT | DMA_PACK_ALIGN_LSB | DMA_OUT_SEL_AHB | DMA_IBUF_NONCONTIGUOUS; if (mfd->fb_imgType == MDP_BGR_565) dma_s_cfg_reg |= DMA_PACK_PATTERN_BGR; else dma_s_cfg_reg |= DMA_PACK_PATTERN_RGB; if (outBpp == 4) dma_s_cfg_reg |= DMA_IBUF_C3ALPHA_EN; if (outBpp == 2) dma_s_cfg_reg |= DMA_IBUF_FORMAT_RGB565; if (mfd->panel_info.pdest != DISPLAY_2) { printk(KERN_ERR "error: non-secondary type through dma_s!\n"); return; } if (mfd->panel_info.type == MDDI_PANEL) { dma_s_cfg_reg |= DMA_OUT_SEL_MDDI; mddi_dest = TRUE; } else { dma_s_cfg_reg |= DMA_AHBM_LCD_SEL_SECONDARY; outp32(MDP_EBI2_LCD1, mfd->data_port_phys); } dma_s_cfg_reg |= DMA_DITHER_EN; src = (uint8 *) iBuf->buf; /* starting input address */ src += (iBuf->dma_x + iBuf->dma_y * iBuf->ibuf_width) * outBpp; /* MDP cmd block enable */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); /* PIXELSIZE */ MDP_OUTP(MDP_BASE + 0xa0004, (iBuf->dma_h << 16 | iBuf->dma_w)); MDP_OUTP(MDP_BASE + 0xa0008, src); /* ibuf address */ MDP_OUTP(MDP_BASE + 0xa000c, iBuf->ibuf_width * outBpp);/* ystride */ if (mfd->panel_info.bpp == 18) { dma_s_cfg_reg |= DMA_DSTC0G_6BITS | /* 666 18BPP */ DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS; } else { dma_s_cfg_reg |= DMA_DSTC0G_6BITS | /* 565 16BPP */ DMA_DSTC1B_5BITS | DMA_DSTC2R_5BITS; } if (mddi_dest) { MDP_OUTP(MDP_BASE + 0xa0010, (iBuf->dma_y << 16) | iBuf->dma_x); MDP_OUTP(MDP_BASE + 0x00090, 1); MDP_OUTP(MDP_BASE + 0x00094, (MDDI_VDO_PACKET_DESC << 16) | mfd->panel_info.mddi.vdopkt); } else { /* setting LCDC write window */ pdata->set_rect(iBuf->dma_x, iBuf->dma_y, iBuf->dma_w, iBuf->dma_h); } MDP_OUTP(MDP_BASE + 0xa0000, dma_s_cfg_reg); /* MDP cmd block disable */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); mdp_pipe_kickoff(MDP_DMA_S_TERM, mfd); } void mdp_dma_s_update(struct msm_fb_data_type *mfd) { down(&mfd->dma->mutex); if ((mfd) && (!mfd->dma->busy) && (mfd->panel_power_on)) { down(&mfd->sem); mdp_enable_irq(MDP_DMA_S_TERM); mfd->dma->busy = TRUE; INIT_COMPLETION(mfd->dma->comp); mfd->ibuf_flushed = TRUE; mdp_dma_s_update_lcd(mfd); up(&mfd->sem); /* wait until DMA finishes the current job */ wait_for_completion_killable(&mfd->dma->comp); mdp_disable_irq(MDP_DMA_S_TERM); /* signal if pan function is waiting for the update completion */ if (mfd->pan_waiting) { mfd->pan_waiting = FALSE; complete(&mfd->pan_comp); } } up(&mfd->dma->mutex); }
gpl-2.0
thicklizard/sense-4.2
arch/alpha/kernel/sys_nautilus.c
3313
6842
/* * linux/arch/alpha/kernel/sys_nautilus.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1998 Richard Henderson * Copyright (C) 1999 Alpha Processor, Inc., * (David Daniel, Stig Telfer, Soohoon Lee) * * Code supporting NAUTILUS systems. * * * NAUTILUS has the following I/O features: * * a) Driven by AMD 751 aka IRONGATE (northbridge): * 4 PCI slots * 1 AGP slot * * b) Driven by ALI M1543C (southbridge) * 2 ISA slots * 2 IDE connectors * 1 dual drive capable FDD controller * 2 serial ports * 1 ECP/EPP/SP parallel port * 2 USB ports */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/reboot.h> #include <linux/bootmem.h> #include <linux/bitops.h> #include <asm/ptrace.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/pci.h> #include <asm/pgtable.h> #include <asm/core_irongate.h> #include <asm/hwrpb.h> #include <asm/tlbflush.h> #include "proto.h" #include "err_impl.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" static void __init nautilus_init_irq(void) { if (alpha_using_srm) { alpha_mv.device_interrupt = srm_device_interrupt; } init_i8259a_irqs(); common_init_isa_dma(); } static int __init nautilus_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { /* Preserve the IRQ set up by the console. */ u8 irq; /* UP1500: AGP INTA is actually routed to IRQ 5, not IRQ 10 as console reports. Check the device id of AGP bridge to distinguish UP1500 from UP1000/1100. Note: 'pin' is 2 due to bridge swizzle. */ if (slot == 1 && pin == 2 && dev->bus->self && dev->bus->self->device == 0x700f) return 5; pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); return irq; } void nautilus_kill_arch(int mode) { struct pci_bus *bus = pci_isa_hose->bus; u32 pmuport; int off; switch (mode) { case LINUX_REBOOT_CMD_RESTART: if (! alpha_using_srm) { u8 t8; pci_bus_read_config_byte(bus, 0x38, 0x43, &t8); pci_bus_write_config_byte(bus, 0x38, 0x43, t8 | 0x80); outb(1, 0x92); outb(0, 0x92); /* NOTREACHED */ } break; case LINUX_REBOOT_CMD_POWER_OFF: /* Assume M1543C */ off = 0x2000; /* SLP_TYPE = 0, SLP_EN = 1 */ pci_bus_read_config_dword(bus, 0x88, 0x10, &pmuport); if (!pmuport) { /* M1535D/D+ */ off = 0x3400; /* SLP_TYPE = 5, SLP_EN = 1 */ pci_bus_read_config_dword(bus, 0x88, 0xe0, &pmuport); } pmuport &= 0xfffe; outw(0xffff, pmuport); /* Clear pending events. */ outw(off, pmuport + 4); /* NOTREACHED */ break; } } /* Perform analysis of a machine check that arrived from the system (NMI) */ static void naut_sys_machine_check(unsigned long vector, unsigned long la_ptr, struct pt_regs *regs) { printk("PC %lx RA %lx\n", regs->pc, regs->r26); irongate_pci_clr_err(); } /* Machine checks can come from two sources - those on the CPU and those in the system. They are analysed separately but all starts here. */ void nautilus_machine_check(unsigned long vector, unsigned long la_ptr) { char *mchk_class; /* Now for some analysis. Machine checks fall into two classes -- those picked up by the system, and those picked up by the CPU. Add to that the two levels of severity - correctable or not. */ if (vector == SCB_Q_SYSMCHK && ((IRONGATE0->dramms & 0x300) == 0x300)) { unsigned long nmi_ctl; /* Clear ALI NMI */ nmi_ctl = inb(0x61); nmi_ctl |= 0x0c; outb(nmi_ctl, 0x61); nmi_ctl &= ~0x0c; outb(nmi_ctl, 0x61); /* Write again clears error bits. */ IRONGATE0->stat_cmd = IRONGATE0->stat_cmd & ~0x100; mb(); IRONGATE0->stat_cmd; /* Write again clears error bits. */ IRONGATE0->dramms = IRONGATE0->dramms; mb(); IRONGATE0->dramms; draina(); wrmces(0x7); mb(); return; } if (vector == SCB_Q_SYSERR) mchk_class = "Correctable"; else if (vector == SCB_Q_SYSMCHK) mchk_class = "Fatal"; else { ev6_machine_check(vector, la_ptr); return; } printk(KERN_CRIT "NAUTILUS Machine check 0x%lx " "[%s System Machine Check (NMI)]\n", vector, mchk_class); naut_sys_machine_check(vector, la_ptr, get_irq_regs()); /* Tell the PALcode to clear the machine check */ draina(); wrmces(0x7); mb(); } extern void free_reserved_mem(void *, void *); extern void pcibios_claim_one_bus(struct pci_bus *); static struct resource irongate_mem = { .name = "Irongate PCI MEM", .flags = IORESOURCE_MEM, }; void __init nautilus_init_pci(void) { struct pci_controller *hose = hose_head; struct pci_bus *bus; struct pci_dev *irongate; unsigned long bus_align, bus_size, pci_mem; unsigned long memtop = max_low_pfn << PAGE_SHIFT; /* Scan our single hose. */ bus = pci_scan_bus(0, alpha_mv.pci_ops, hose); hose->bus = bus; pcibios_claim_one_bus(bus); irongate = pci_get_bus_and_slot(0, 0); bus->self = irongate; bus->resource[1] = &irongate_mem; pci_bus_size_bridges(bus); /* IO port range. */ bus->resource[0]->start = 0; bus->resource[0]->end = 0xffff; /* Set up PCI memory range - limit is hardwired to 0xffffffff, base must be at aligned to 16Mb. */ bus_align = bus->resource[1]->start; bus_size = bus->resource[1]->end + 1 - bus_align; if (bus_align < 0x1000000UL) bus_align = 0x1000000UL; pci_mem = (0x100000000UL - bus_size) & -bus_align; bus->resource[1]->start = pci_mem; bus->resource[1]->end = 0xffffffffUL; if (request_resource(&iomem_resource, bus->resource[1]) < 0) printk(KERN_ERR "Failed to request MEM on hose 0\n"); if (pci_mem < memtop) memtop = pci_mem; if (memtop > alpha_mv.min_mem_address) { free_reserved_mem(__va(alpha_mv.min_mem_address), __va(memtop)); printk("nautilus_init_pci: %ldk freed\n", (memtop - alpha_mv.min_mem_address) >> 10); } if ((IRONGATE0->dev_vendor >> 16) > 0x7006) /* Albacore? */ IRONGATE0->pci_mem = pci_mem; pci_bus_assign_resources(bus); /* pci_common_swizzle() relies on bus->self being NULL for the root bus, so just clear it. */ bus->self = NULL; pci_fixup_irqs(alpha_mv.pci_swizzle, alpha_mv.pci_map_irq); } /* * The System Vectors */ struct alpha_machine_vector nautilus_mv __initmv = { .vector_name = "Nautilus", DO_EV6_MMU, DO_DEFAULT_RTC, DO_IRONGATE_IO, .machine_check = nautilus_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = IRONGATE_DEFAULT_MEM_BASE, .nr_irqs = 16, .device_interrupt = isa_device_interrupt, .init_arch = irongate_init_arch, .init_irq = nautilus_init_irq, .init_rtc = common_init_rtc, .init_pci = nautilus_init_pci, .kill_arch = nautilus_kill_arch, .pci_map_irq = nautilus_map_irq, .pci_swizzle = common_swizzle, }; ALIAS_MV(nautilus)
gpl-2.0
p12tic/tf700-kernel
net/ipv4/netfilter/iptable_filter.c
4593
3064
/* * This is the 1999 rewrite of IP Firewalling, aiming for kernel 2.3.x. * * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <linux/slab.h> #include <net/ip.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); MODULE_DESCRIPTION("iptables filter table"); #define FILTER_VALID_HOOKS ((1 << NF_INET_LOCAL_IN) | \ (1 << NF_INET_FORWARD) | \ (1 << NF_INET_LOCAL_OUT)) static const struct xt_table packet_filter = { .name = "filter", .valid_hooks = FILTER_VALID_HOOKS, .me = THIS_MODULE, .af = NFPROTO_IPV4, .priority = NF_IP_PRI_FILTER, }; static unsigned int iptable_filter_hook(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { const struct net *net; if (hook == NF_INET_LOCAL_OUT && (skb->len < sizeof(struct iphdr) || ip_hdrlen(skb) < sizeof(struct iphdr))) /* root is playing with raw sockets. */ return NF_ACCEPT; net = dev_net((in != NULL) ? in : out); return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_filter); } static struct nf_hook_ops *filter_ops __read_mostly; /* Default to forward because I got too much mail already. */ static bool forward = true; module_param(forward, bool, 0000); static int __net_init iptable_filter_net_init(struct net *net) { struct ipt_replace *repl; repl = ipt_alloc_initial_table(&packet_filter); if (repl == NULL) return -ENOMEM; /* Entry 1 is the FORWARD hook */ ((struct ipt_standard *)repl->entries)[1].target.verdict = forward ? -NF_ACCEPT - 1 : -NF_DROP - 1; net->ipv4.iptable_filter = ipt_register_table(net, &packet_filter, repl); kfree(repl); if (IS_ERR(net->ipv4.iptable_filter)) return PTR_ERR(net->ipv4.iptable_filter); return 0; } static void __net_exit iptable_filter_net_exit(struct net *net) { ipt_unregister_table(net, net->ipv4.iptable_filter); } static struct pernet_operations iptable_filter_net_ops = { .init = iptable_filter_net_init, .exit = iptable_filter_net_exit, }; static int __init iptable_filter_init(void) { int ret; ret = register_pernet_subsys(&iptable_filter_net_ops); if (ret < 0) return ret; /* Register hooks */ filter_ops = xt_hook_link(&packet_filter, iptable_filter_hook); if (IS_ERR(filter_ops)) { ret = PTR_ERR(filter_ops); goto cleanup_table; } return ret; cleanup_table: unregister_pernet_subsys(&iptable_filter_net_ops); return ret; } static void __exit iptable_filter_fini(void) { xt_hook_unlink(&packet_filter, filter_ops); unregister_pernet_subsys(&iptable_filter_net_ops); } module_init(iptable_filter_init); module_exit(iptable_filter_fini);
gpl-2.0
micchie/mptcp
drivers/media/common/tuners/tda18271-common.c
4849
18000
/* tda18271-common.c - driver for the Philips / NXP TDA18271 silicon tuner Copyright (C) 2007, 2008 Michael Krufky <mkrufky@linuxtv.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "tda18271-priv.h" static int tda18271_i2c_gate_ctrl(struct dvb_frontend *fe, int enable) { struct tda18271_priv *priv = fe->tuner_priv; enum tda18271_i2c_gate gate; int ret = 0; switch (priv->gate) { case TDA18271_GATE_DIGITAL: case TDA18271_GATE_ANALOG: gate = priv->gate; break; case TDA18271_GATE_AUTO: default: switch (priv->mode) { case TDA18271_DIGITAL: gate = TDA18271_GATE_DIGITAL; break; case TDA18271_ANALOG: default: gate = TDA18271_GATE_ANALOG; break; } } switch (gate) { case TDA18271_GATE_ANALOG: if (fe->ops.analog_ops.i2c_gate_ctrl) ret = fe->ops.analog_ops.i2c_gate_ctrl(fe, enable); break; case TDA18271_GATE_DIGITAL: if (fe->ops.i2c_gate_ctrl) ret = fe->ops.i2c_gate_ctrl(fe, enable); break; default: ret = -EINVAL; break; } return ret; }; /*---------------------------------------------------------------------*/ static void tda18271_dump_regs(struct dvb_frontend *fe, int extended) { struct tda18271_priv *priv = fe->tuner_priv; unsigned char *regs = priv->tda18271_regs; tda_reg("=== TDA18271 REG DUMP ===\n"); tda_reg("ID_BYTE = 0x%02x\n", 0xff & regs[R_ID]); tda_reg("THERMO_BYTE = 0x%02x\n", 0xff & regs[R_TM]); tda_reg("POWER_LEVEL_BYTE = 0x%02x\n", 0xff & regs[R_PL]); tda_reg("EASY_PROG_BYTE_1 = 0x%02x\n", 0xff & regs[R_EP1]); tda_reg("EASY_PROG_BYTE_2 = 0x%02x\n", 0xff & regs[R_EP2]); tda_reg("EASY_PROG_BYTE_3 = 0x%02x\n", 0xff & regs[R_EP3]); tda_reg("EASY_PROG_BYTE_4 = 0x%02x\n", 0xff & regs[R_EP4]); tda_reg("EASY_PROG_BYTE_5 = 0x%02x\n", 0xff & regs[R_EP5]); tda_reg("CAL_POST_DIV_BYTE = 0x%02x\n", 0xff & regs[R_CPD]); tda_reg("CAL_DIV_BYTE_1 = 0x%02x\n", 0xff & regs[R_CD1]); tda_reg("CAL_DIV_BYTE_2 = 0x%02x\n", 0xff & regs[R_CD2]); tda_reg("CAL_DIV_BYTE_3 = 0x%02x\n", 0xff & regs[R_CD3]); tda_reg("MAIN_POST_DIV_BYTE = 0x%02x\n", 0xff & regs[R_MPD]); tda_reg("MAIN_DIV_BYTE_1 = 0x%02x\n", 0xff & regs[R_MD1]); tda_reg("MAIN_DIV_BYTE_2 = 0x%02x\n", 0xff & regs[R_MD2]); tda_reg("MAIN_DIV_BYTE_3 = 0x%02x\n", 0xff & regs[R_MD3]); /* only dump extended regs if DBG_ADV is set */ if (!(tda18271_debug & DBG_ADV)) return; /* W indicates write-only registers. * Register dump for write-only registers shows last value written. */ tda_reg("EXTENDED_BYTE_1 = 0x%02x\n", 0xff & regs[R_EB1]); tda_reg("EXTENDED_BYTE_2 = 0x%02x\n", 0xff & regs[R_EB2]); tda_reg("EXTENDED_BYTE_3 = 0x%02x\n", 0xff & regs[R_EB3]); tda_reg("EXTENDED_BYTE_4 = 0x%02x\n", 0xff & regs[R_EB4]); tda_reg("EXTENDED_BYTE_5 = 0x%02x\n", 0xff & regs[R_EB5]); tda_reg("EXTENDED_BYTE_6 = 0x%02x\n", 0xff & regs[R_EB6]); tda_reg("EXTENDED_BYTE_7 = 0x%02x\n", 0xff & regs[R_EB7]); tda_reg("EXTENDED_BYTE_8 = 0x%02x\n", 0xff & regs[R_EB8]); tda_reg("EXTENDED_BYTE_9 W = 0x%02x\n", 0xff & regs[R_EB9]); tda_reg("EXTENDED_BYTE_10 = 0x%02x\n", 0xff & regs[R_EB10]); tda_reg("EXTENDED_BYTE_11 = 0x%02x\n", 0xff & regs[R_EB11]); tda_reg("EXTENDED_BYTE_12 = 0x%02x\n", 0xff & regs[R_EB12]); tda_reg("EXTENDED_BYTE_13 = 0x%02x\n", 0xff & regs[R_EB13]); tda_reg("EXTENDED_BYTE_14 = 0x%02x\n", 0xff & regs[R_EB14]); tda_reg("EXTENDED_BYTE_15 = 0x%02x\n", 0xff & regs[R_EB15]); tda_reg("EXTENDED_BYTE_16 W = 0x%02x\n", 0xff & regs[R_EB16]); tda_reg("EXTENDED_BYTE_17 W = 0x%02x\n", 0xff & regs[R_EB17]); tda_reg("EXTENDED_BYTE_18 = 0x%02x\n", 0xff & regs[R_EB18]); tda_reg("EXTENDED_BYTE_19 W = 0x%02x\n", 0xff & regs[R_EB19]); tda_reg("EXTENDED_BYTE_20 W = 0x%02x\n", 0xff & regs[R_EB20]); tda_reg("EXTENDED_BYTE_21 = 0x%02x\n", 0xff & regs[R_EB21]); tda_reg("EXTENDED_BYTE_22 = 0x%02x\n", 0xff & regs[R_EB22]); tda_reg("EXTENDED_BYTE_23 = 0x%02x\n", 0xff & regs[R_EB23]); } int tda18271_read_regs(struct dvb_frontend *fe) { struct tda18271_priv *priv = fe->tuner_priv; unsigned char *regs = priv->tda18271_regs; unsigned char buf = 0x00; int ret; struct i2c_msg msg[] = { { .addr = priv->i2c_props.addr, .flags = 0, .buf = &buf, .len = 1 }, { .addr = priv->i2c_props.addr, .flags = I2C_M_RD, .buf = regs, .len = 16 } }; tda18271_i2c_gate_ctrl(fe, 1); /* read all registers */ ret = i2c_transfer(priv->i2c_props.adap, msg, 2); tda18271_i2c_gate_ctrl(fe, 0); if (ret != 2) tda_err("ERROR: i2c_transfer returned: %d\n", ret); if (tda18271_debug & DBG_REG) tda18271_dump_regs(fe, 0); return (ret == 2 ? 0 : ret); } int tda18271_read_extended(struct dvb_frontend *fe) { struct tda18271_priv *priv = fe->tuner_priv; unsigned char *regs = priv->tda18271_regs; unsigned char regdump[TDA18271_NUM_REGS]; unsigned char buf = 0x00; int ret, i; struct i2c_msg msg[] = { { .addr = priv->i2c_props.addr, .flags = 0, .buf = &buf, .len = 1 }, { .addr = priv->i2c_props.addr, .flags = I2C_M_RD, .buf = regdump, .len = TDA18271_NUM_REGS } }; tda18271_i2c_gate_ctrl(fe, 1); /* read all registers */ ret = i2c_transfer(priv->i2c_props.adap, msg, 2); tda18271_i2c_gate_ctrl(fe, 0); if (ret != 2) tda_err("ERROR: i2c_transfer returned: %d\n", ret); for (i = 0; i < TDA18271_NUM_REGS; i++) { /* don't update write-only registers */ if ((i != R_EB9) && (i != R_EB16) && (i != R_EB17) && (i != R_EB19) && (i != R_EB20)) regs[i] = regdump[i]; } if (tda18271_debug & DBG_REG) tda18271_dump_regs(fe, 1); return (ret == 2 ? 0 : ret); } int tda18271_write_regs(struct dvb_frontend *fe, int idx, int len) { struct tda18271_priv *priv = fe->tuner_priv; unsigned char *regs = priv->tda18271_regs; unsigned char buf[TDA18271_NUM_REGS + 1]; struct i2c_msg msg = { .addr = priv->i2c_props.addr, .flags = 0, .buf = buf }; int i, ret = 1, max; BUG_ON((len == 0) || (idx + len > sizeof(buf))); switch (priv->small_i2c) { case TDA18271_03_BYTE_CHUNK_INIT: max = 3; break; case TDA18271_08_BYTE_CHUNK_INIT: max = 8; break; case TDA18271_16_BYTE_CHUNK_INIT: max = 16; break; case TDA18271_39_BYTE_CHUNK_INIT: default: max = 39; } tda18271_i2c_gate_ctrl(fe, 1); while (len) { if (max > len) max = len; buf[0] = idx; for (i = 1; i <= max; i++) buf[i] = regs[idx - 1 + i]; msg.len = max + 1; /* write registers */ ret = i2c_transfer(priv->i2c_props.adap, &msg, 1); if (ret != 1) break; idx += max; len -= max; } tda18271_i2c_gate_ctrl(fe, 0); if (ret != 1) tda_err("ERROR: idx = 0x%x, len = %d, " "i2c_transfer returned: %d\n", idx, max, ret); return (ret == 1 ? 0 : ret); } /*---------------------------------------------------------------------*/ int tda18271_charge_pump_source(struct dvb_frontend *fe, enum tda18271_pll pll, int force) { struct tda18271_priv *priv = fe->tuner_priv; unsigned char *regs = priv->tda18271_regs; int r_cp = (pll == TDA18271_CAL_PLL) ? R_EB7 : R_EB4; regs[r_cp] &= ~0x20; regs[r_cp] |= ((force & 1) << 5); return tda18271_write_regs(fe, r_cp, 1); } int tda18271_init_regs(struct dvb_frontend *fe) { struct tda18271_priv *priv = fe->tuner_priv; unsigned char *regs = priv->tda18271_regs; tda_dbg("initializing registers for device @ %d-%04x\n", i2c_adapter_id(priv->i2c_props.adap), priv->i2c_props.addr); /* initialize registers */ switch (priv->id) { case TDA18271HDC1: regs[R_ID] = 0x83; break; case TDA18271HDC2: regs[R_ID] = 0x84; break; }; regs[R_TM] = 0x08; regs[R_PL] = 0x80; regs[R_EP1] = 0xc6; regs[R_EP2] = 0xdf; regs[R_EP3] = 0x16; regs[R_EP4] = 0x60; regs[R_EP5] = 0x80; regs[R_CPD] = 0x80; regs[R_CD1] = 0x00; regs[R_CD2] = 0x00; regs[R_CD3] = 0x00; regs[R_MPD] = 0x00; regs[R_MD1] = 0x00; regs[R_MD2] = 0x00; regs[R_MD3] = 0x00; switch (priv->id) { case TDA18271HDC1: regs[R_EB1] = 0xff; break; case TDA18271HDC2: regs[R_EB1] = 0xfc; break; }; regs[R_EB2] = 0x01; regs[R_EB3] = 0x84; regs[R_EB4] = 0x41; regs[R_EB5] = 0x01; regs[R_EB6] = 0x84; regs[R_EB7] = 0x40; regs[R_EB8] = 0x07; regs[R_EB9] = 0x00; regs[R_EB10] = 0x00; regs[R_EB11] = 0x96; switch (priv->id) { case TDA18271HDC1: regs[R_EB12] = 0x0f; break; case TDA18271HDC2: regs[R_EB12] = 0x33; break; }; regs[R_EB13] = 0xc1; regs[R_EB14] = 0x00; regs[R_EB15] = 0x8f; regs[R_EB16] = 0x00; regs[R_EB17] = 0x00; switch (priv->id) { case TDA18271HDC1: regs[R_EB18] = 0x00; break; case TDA18271HDC2: regs[R_EB18] = 0x8c; break; }; regs[R_EB19] = 0x00; regs[R_EB20] = 0x20; switch (priv->id) { case TDA18271HDC1: regs[R_EB21] = 0x33; break; case TDA18271HDC2: regs[R_EB21] = 0xb3; break; }; regs[R_EB22] = 0x48; regs[R_EB23] = 0xb0; tda18271_write_regs(fe, 0x00, TDA18271_NUM_REGS); /* setup agc1 gain */ regs[R_EB17] = 0x00; tda18271_write_regs(fe, R_EB17, 1); regs[R_EB17] = 0x03; tda18271_write_regs(fe, R_EB17, 1); regs[R_EB17] = 0x43; tda18271_write_regs(fe, R_EB17, 1); regs[R_EB17] = 0x4c; tda18271_write_regs(fe, R_EB17, 1); /* setup agc2 gain */ if ((priv->id) == TDA18271HDC1) { regs[R_EB20] = 0xa0; tda18271_write_regs(fe, R_EB20, 1); regs[R_EB20] = 0xa7; tda18271_write_regs(fe, R_EB20, 1); regs[R_EB20] = 0xe7; tda18271_write_regs(fe, R_EB20, 1); regs[R_EB20] = 0xec; tda18271_write_regs(fe, R_EB20, 1); } /* image rejection calibration */ /* low-band */ regs[R_EP3] = 0x1f; regs[R_EP4] = 0x66; regs[R_EP5] = 0x81; regs[R_CPD] = 0xcc; regs[R_CD1] = 0x6c; regs[R_CD2] = 0x00; regs[R_CD3] = 0x00; regs[R_MPD] = 0xcd; regs[R_MD1] = 0x77; regs[R_MD2] = 0x08; regs[R_MD3] = 0x00; tda18271_write_regs(fe, R_EP3, 11); if ((priv->id) == TDA18271HDC2) { /* main pll cp source on */ tda18271_charge_pump_source(fe, TDA18271_MAIN_PLL, 1); msleep(1); /* main pll cp source off */ tda18271_charge_pump_source(fe, TDA18271_MAIN_PLL, 0); } msleep(5); /* pll locking */ /* launch detector */ tda18271_write_regs(fe, R_EP1, 1); msleep(5); /* wanted low measurement */ regs[R_EP5] = 0x85; regs[R_CPD] = 0xcb; regs[R_CD1] = 0x66; regs[R_CD2] = 0x70; tda18271_write_regs(fe, R_EP3, 7); msleep(5); /* pll locking */ /* launch optimization algorithm */ tda18271_write_regs(fe, R_EP2, 1); msleep(30); /* image low optimization completion */ /* mid-band */ regs[R_EP5] = 0x82; regs[R_CPD] = 0xa8; regs[R_CD2] = 0x00; regs[R_MPD] = 0xa9; regs[R_MD1] = 0x73; regs[R_MD2] = 0x1a; tda18271_write_regs(fe, R_EP3, 11); msleep(5); /* pll locking */ /* launch detector */ tda18271_write_regs(fe, R_EP1, 1); msleep(5); /* wanted mid measurement */ regs[R_EP5] = 0x86; regs[R_CPD] = 0xa8; regs[R_CD1] = 0x66; regs[R_CD2] = 0xa0; tda18271_write_regs(fe, R_EP3, 7); msleep(5); /* pll locking */ /* launch optimization algorithm */ tda18271_write_regs(fe, R_EP2, 1); msleep(30); /* image mid optimization completion */ /* high-band */ regs[R_EP5] = 0x83; regs[R_CPD] = 0x98; regs[R_CD1] = 0x65; regs[R_CD2] = 0x00; regs[R_MPD] = 0x99; regs[R_MD1] = 0x71; regs[R_MD2] = 0xcd; tda18271_write_regs(fe, R_EP3, 11); msleep(5); /* pll locking */ /* launch detector */ tda18271_write_regs(fe, R_EP1, 1); msleep(5); /* wanted high measurement */ regs[R_EP5] = 0x87; regs[R_CD1] = 0x65; regs[R_CD2] = 0x50; tda18271_write_regs(fe, R_EP3, 7); msleep(5); /* pll locking */ /* launch optimization algorithm */ tda18271_write_regs(fe, R_EP2, 1); msleep(30); /* image high optimization completion */ /* return to normal mode */ regs[R_EP4] = 0x64; tda18271_write_regs(fe, R_EP4, 1); /* synchronize */ tda18271_write_regs(fe, R_EP1, 1); return 0; } /*---------------------------------------------------------------------*/ /* * Standby modes, EP3 [7:5] * * | SM || SM_LT || SM_XT || mode description * |=====\\=======\\=======\\=================================== * | 0 || 0 || 0 || normal mode * |-----||-------||-------||----------------------------------- * | || || || standby mode w/ slave tuner output * | 1 || 0 || 0 || & loop thru & xtal oscillator on * |-----||-------||-------||----------------------------------- * | 1 || 1 || 0 || standby mode w/ xtal oscillator on * |-----||-------||-------||----------------------------------- * | 1 || 1 || 1 || power off * */ int tda18271_set_standby_mode(struct dvb_frontend *fe, int sm, int sm_lt, int sm_xt) { struct tda18271_priv *priv = fe->tuner_priv; unsigned char *regs = priv->tda18271_regs; if (tda18271_debug & DBG_ADV) tda_dbg("sm = %d, sm_lt = %d, sm_xt = %d\n", sm, sm_lt, sm_xt); regs[R_EP3] &= ~0xe0; /* clear sm, sm_lt, sm_xt */ regs[R_EP3] |= (sm ? (1 << 7) : 0) | (sm_lt ? (1 << 6) : 0) | (sm_xt ? (1 << 5) : 0); return tda18271_write_regs(fe, R_EP3, 1); } /*---------------------------------------------------------------------*/ int tda18271_calc_main_pll(struct dvb_frontend *fe, u32 freq) { /* sets main post divider & divider bytes, but does not write them */ struct tda18271_priv *priv = fe->tuner_priv; unsigned char *regs = priv->tda18271_regs; u8 d, pd; u32 div; int ret = tda18271_lookup_pll_map(fe, MAIN_PLL, &freq, &pd, &d); if (tda_fail(ret)) goto fail; regs[R_MPD] = (0x7f & pd); div = ((d * (freq / 1000)) << 7) / 125; regs[R_MD1] = 0x7f & (div >> 16); regs[R_MD2] = 0xff & (div >> 8); regs[R_MD3] = 0xff & div; fail: return ret; } int tda18271_calc_cal_pll(struct dvb_frontend *fe, u32 freq) { /* sets cal post divider & divider bytes, but does not write them */ struct tda18271_priv *priv = fe->tuner_priv; unsigned char *regs = priv->tda18271_regs; u8 d, pd; u32 div; int ret = tda18271_lookup_pll_map(fe, CAL_PLL, &freq, &pd, &d); if (tda_fail(ret)) goto fail; regs[R_CPD] = pd; div = ((d * (freq / 1000)) << 7) / 125; regs[R_CD1] = 0x7f & (div >> 16); regs[R_CD2] = 0xff & (div >> 8); regs[R_CD3] = 0xff & div; fail: return ret; } /*---------------------------------------------------------------------*/ int tda18271_calc_bp_filter(struct dvb_frontend *fe, u32 *freq) { /* sets bp filter bits, but does not write them */ struct tda18271_priv *priv = fe->tuner_priv; unsigned char *regs = priv->tda18271_regs; u8 val; int ret = tda18271_lookup_map(fe, BP_FILTER, freq, &val); if (tda_fail(ret)) goto fail; regs[R_EP1] &= ~0x07; /* clear bp filter bits */ regs[R_EP1] |= (0x07 & val); fail: return ret; } int tda18271_calc_km(struct dvb_frontend *fe, u32 *freq) { /* sets K & M bits, but does not write them */ struct tda18271_priv *priv = fe->tuner_priv; unsigned char *regs = priv->tda18271_regs; u8 val; int ret = tda18271_lookup_map(fe, RF_CAL_KMCO, freq, &val); if (tda_fail(ret)) goto fail; regs[R_EB13] &= ~0x7c; /* clear k & m bits */ regs[R_EB13] |= (0x7c & val); fail: return ret; } int tda18271_calc_rf_band(struct dvb_frontend *fe, u32 *freq) { /* sets rf band bits, but does not write them */ struct tda18271_priv *priv = fe->tuner_priv; unsigned char *regs = priv->tda18271_regs; u8 val; int ret = tda18271_lookup_map(fe, RF_BAND, freq, &val); if (tda_fail(ret)) goto fail; regs[R_EP2] &= ~0xe0; /* clear rf band bits */ regs[R_EP2] |= (0xe0 & (val << 5)); fail: return ret; } int tda18271_calc_gain_taper(struct dvb_frontend *fe, u32 *freq) { /* sets gain taper bits, but does not write them */ struct tda18271_priv *priv = fe->tuner_priv; unsigned char *regs = priv->tda18271_regs; u8 val; int ret = tda18271_lookup_map(fe, GAIN_TAPER, freq, &val); if (tda_fail(ret)) goto fail; regs[R_EP2] &= ~0x1f; /* clear gain taper bits */ regs[R_EP2] |= (0x1f & val); fail: return ret; } int tda18271_calc_ir_measure(struct dvb_frontend *fe, u32 *freq) { /* sets IR Meas bits, but does not write them */ struct tda18271_priv *priv = fe->tuner_priv; unsigned char *regs = priv->tda18271_regs; u8 val; int ret = tda18271_lookup_map(fe, IR_MEASURE, freq, &val); if (tda_fail(ret)) goto fail; regs[R_EP5] &= ~0x07; regs[R_EP5] |= (0x07 & val); fail: return ret; } int tda18271_calc_rf_cal(struct dvb_frontend *fe, u32 *freq) { /* sets rf cal byte (RFC_Cprog), but does not write it */ struct tda18271_priv *priv = fe->tuner_priv; unsigned char *regs = priv->tda18271_regs; u8 val; int ret = tda18271_lookup_map(fe, RF_CAL, freq, &val); /* The TDA18271HD/C1 rf_cal map lookup is expected to go out of range * for frequencies above 61.1 MHz. In these cases, the internal RF * tracking filters calibration mechanism is used. * * There is no need to warn the user about this. */ if (ret < 0) goto fail; regs[R_EB14] = val; fail: return ret; } int _tda_printk(struct tda18271_priv *state, const char *level, const char *func, const char *fmt, ...) { struct va_format vaf; va_list args; int rtn; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; if (state) rtn = printk("%s%s: [%d-%04x|%c] %pV", level, func, i2c_adapter_id(state->i2c_props.adap), state->i2c_props.addr, (state->role == TDA18271_MASTER) ? 'M' : 'S', &vaf); else rtn = printk("%s%s: %pV", level, func, &vaf); va_end(args); return rtn; }
gpl-2.0
noobnl/android_kernel_samsung_d2-jb_2.5.1
drivers/hwmon/max6639.c
4849
19308
/* * max6639.c - Support for Maxim MAX6639 * * 2-Channel Temperature Monitor with Dual PWM Fan-Speed Controller * * Copyright (C) 2010, 2011 Roland Stigge <stigge@antcom.de> * * based on the initial MAX6639 support from semptian.net * by He Changqing <hechangqing@semptian.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/i2c/max6639.h> /* Addresses to scan */ static unsigned short normal_i2c[] = { 0x2c, 0x2e, 0x2f, I2C_CLIENT_END }; /* The MAX6639 registers, valid channel numbers: 0, 1 */ #define MAX6639_REG_TEMP(ch) (0x00 + (ch)) #define MAX6639_REG_STATUS 0x02 #define MAX6639_REG_OUTPUT_MASK 0x03 #define MAX6639_REG_GCONFIG 0x04 #define MAX6639_REG_TEMP_EXT(ch) (0x05 + (ch)) #define MAX6639_REG_ALERT_LIMIT(ch) (0x08 + (ch)) #define MAX6639_REG_OT_LIMIT(ch) (0x0A + (ch)) #define MAX6639_REG_THERM_LIMIT(ch) (0x0C + (ch)) #define MAX6639_REG_FAN_CONFIG1(ch) (0x10 + (ch) * 4) #define MAX6639_REG_FAN_CONFIG2a(ch) (0x11 + (ch) * 4) #define MAX6639_REG_FAN_CONFIG2b(ch) (0x12 + (ch) * 4) #define MAX6639_REG_FAN_CONFIG3(ch) (0x13 + (ch) * 4) #define MAX6639_REG_FAN_CNT(ch) (0x20 + (ch)) #define MAX6639_REG_TARGET_CNT(ch) (0x22 + (ch)) #define MAX6639_REG_FAN_PPR(ch) (0x24 + (ch)) #define MAX6639_REG_TARGTDUTY(ch) (0x26 + (ch)) #define MAX6639_REG_FAN_START_TEMP(ch) (0x28 + (ch)) #define MAX6639_REG_DEVID 0x3D #define MAX6639_REG_MANUID 0x3E #define MAX6639_REG_DEVREV 0x3F /* Register bits */ #define MAX6639_GCONFIG_STANDBY 0x80 #define MAX6639_GCONFIG_POR 0x40 #define MAX6639_GCONFIG_DISABLE_TIMEOUT 0x20 #define MAX6639_GCONFIG_CH2_LOCAL 0x10 #define MAX6639_GCONFIG_PWM_FREQ_HI 0x08 #define MAX6639_FAN_CONFIG1_PWM 0x80 #define MAX6639_FAN_CONFIG3_THERM_FULL_SPEED 0x40 static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 }; #define FAN_FROM_REG(val, rpm_range) ((val) == 0 || (val) == 255 ? \ 0 : (rpm_ranges[rpm_range] * 30) / (val)) #define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val) / 1000, 0, 255) /* * Client data (each client gets its own) */ struct max6639_data { struct device *hwmon_dev; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ /* Register values sampled regularly */ u16 temp[2]; /* Temperature, in 1/8 C, 0..255 C */ bool temp_fault[2]; /* Detected temperature diode failure */ u8 fan[2]; /* Register value: TACH count for fans >=30 */ u8 status; /* Detected channel alarms and fan failures */ /* Register values only written to */ u8 pwm[2]; /* Register value: Duty cycle 0..120 */ u8 temp_therm[2]; /* THERM Temperature, 0..255 C (->_max) */ u8 temp_alert[2]; /* ALERT Temperature, 0..255 C (->_crit) */ u8 temp_ot[2]; /* OT Temperature, 0..255 C (->_emergency) */ /* Register values initialized only once */ u8 ppr; /* Pulses per rotation 0..3 for 1..4 ppr */ u8 rpm_range; /* Index in above rpm_ranges table */ }; static struct max6639_data *max6639_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct max6639_data *data = i2c_get_clientdata(client); struct max6639_data *ret = data; int i; int status_reg; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) { int res; dev_dbg(&client->dev, "Starting max6639 update\n"); status_reg = i2c_smbus_read_byte_data(client, MAX6639_REG_STATUS); if (status_reg < 0) { ret = ERR_PTR(status_reg); goto abort; } data->status = status_reg; for (i = 0; i < 2; i++) { res = i2c_smbus_read_byte_data(client, MAX6639_REG_FAN_CNT(i)); if (res < 0) { ret = ERR_PTR(res); goto abort; } data->fan[i] = res; res = i2c_smbus_read_byte_data(client, MAX6639_REG_TEMP_EXT(i)); if (res < 0) { ret = ERR_PTR(res); goto abort; } data->temp[i] = res >> 5; data->temp_fault[i] = res & 0x01; res = i2c_smbus_read_byte_data(client, MAX6639_REG_TEMP(i)); if (res < 0) { ret = ERR_PTR(res); goto abort; } data->temp[i] |= res << 3; } data->last_updated = jiffies; data->valid = 1; } abort: mutex_unlock(&data->update_lock); return ret; } static ssize_t show_temp_input(struct device *dev, struct device_attribute *dev_attr, char *buf) { long temp; struct max6639_data *data = max6639_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); if (IS_ERR(data)) return PTR_ERR(data); temp = data->temp[attr->index] * 125; return sprintf(buf, "%ld\n", temp); } static ssize_t show_temp_fault(struct device *dev, struct device_attribute *dev_attr, char *buf) { struct max6639_data *data = max6639_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); if (IS_ERR(data)) return PTR_ERR(data); return sprintf(buf, "%d\n", data->temp_fault[attr->index]); } static ssize_t show_temp_max(struct device *dev, struct device_attribute *dev_attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct max6639_data *data = i2c_get_clientdata(client); struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); return sprintf(buf, "%d\n", (data->temp_therm[attr->index] * 1000)); } static ssize_t set_temp_max(struct device *dev, struct device_attribute *dev_attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct max6639_data *data = i2c_get_clientdata(client); struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); unsigned long val; int res; res = kstrtoul(buf, 10, &val); if (res) return res; mutex_lock(&data->update_lock); data->temp_therm[attr->index] = TEMP_LIMIT_TO_REG(val); i2c_smbus_write_byte_data(client, MAX6639_REG_THERM_LIMIT(attr->index), data->temp_therm[attr->index]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_crit(struct device *dev, struct device_attribute *dev_attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct max6639_data *data = i2c_get_clientdata(client); struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); return sprintf(buf, "%d\n", (data->temp_alert[attr->index] * 1000)); } static ssize_t set_temp_crit(struct device *dev, struct device_attribute *dev_attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct max6639_data *data = i2c_get_clientdata(client); struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); unsigned long val; int res; res = kstrtoul(buf, 10, &val); if (res) return res; mutex_lock(&data->update_lock); data->temp_alert[attr->index] = TEMP_LIMIT_TO_REG(val); i2c_smbus_write_byte_data(client, MAX6639_REG_ALERT_LIMIT(attr->index), data->temp_alert[attr->index]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_emergency(struct device *dev, struct device_attribute *dev_attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct max6639_data *data = i2c_get_clientdata(client); struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); return sprintf(buf, "%d\n", (data->temp_ot[attr->index] * 1000)); } static ssize_t set_temp_emergency(struct device *dev, struct device_attribute *dev_attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct max6639_data *data = i2c_get_clientdata(client); struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); unsigned long val; int res; res = kstrtoul(buf, 10, &val); if (res) return res; mutex_lock(&data->update_lock); data->temp_ot[attr->index] = TEMP_LIMIT_TO_REG(val); i2c_smbus_write_byte_data(client, MAX6639_REG_OT_LIMIT(attr->index), data->temp_ot[attr->index]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_pwm(struct device *dev, struct device_attribute *dev_attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct max6639_data *data = i2c_get_clientdata(client); struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); return sprintf(buf, "%d\n", data->pwm[attr->index] * 255 / 120); } static ssize_t set_pwm(struct device *dev, struct device_attribute *dev_attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct max6639_data *data = i2c_get_clientdata(client); struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); unsigned long val; int res; res = kstrtoul(buf, 10, &val); if (res) return res; val = SENSORS_LIMIT(val, 0, 255); mutex_lock(&data->update_lock); data->pwm[attr->index] = (u8)(val * 120 / 255); i2c_smbus_write_byte_data(client, MAX6639_REG_TARGTDUTY(attr->index), data->pwm[attr->index]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_fan_input(struct device *dev, struct device_attribute *dev_attr, char *buf) { struct max6639_data *data = max6639_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); if (IS_ERR(data)) return PTR_ERR(data); return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[attr->index], data->rpm_range)); } static ssize_t show_alarm(struct device *dev, struct device_attribute *dev_attr, char *buf) { struct max6639_data *data = max6639_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); if (IS_ERR(data)) return PTR_ERR(data); return sprintf(buf, "%d\n", !!(data->status & (1 << attr->index))); } static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_input, NULL, 1); static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_temp_fault, NULL, 1); static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max, set_temp_max, 0); static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_max, set_temp_max, 1); static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp_crit, set_temp_crit, 0); static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp_crit, set_temp_crit, 1); static SENSOR_DEVICE_ATTR(temp1_emergency, S_IWUSR | S_IRUGO, show_temp_emergency, set_temp_emergency, 0); static SENSOR_DEVICE_ATTR(temp2_emergency, S_IWUSR | S_IRUGO, show_temp_emergency, set_temp_emergency, 1); static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 0); static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 1); static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan_input, NULL, 0); static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan_input, NULL, 1); static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(fan2_fault, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 7); static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 6); static SENSOR_DEVICE_ATTR(temp1_emergency_alarm, S_IRUGO, show_alarm, NULL, 5); static SENSOR_DEVICE_ATTR(temp2_emergency_alarm, S_IRUGO, show_alarm, NULL, 4); static struct attribute *max6639_attributes[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp1_fault.dev_attr.attr, &sensor_dev_attr_temp2_fault.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp1_crit.dev_attr.attr, &sensor_dev_attr_temp2_crit.dev_attr.attr, &sensor_dev_attr_temp1_emergency.dev_attr.attr, &sensor_dev_attr_temp2_emergency.dev_attr.attr, &sensor_dev_attr_pwm1.dev_attr.attr, &sensor_dev_attr_pwm2.dev_attr.attr, &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan1_fault.dev_attr.attr, &sensor_dev_attr_fan2_fault.dev_attr.attr, &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp1_emergency_alarm.dev_attr.attr, &sensor_dev_attr_temp2_emergency_alarm.dev_attr.attr, NULL }; static const struct attribute_group max6639_group = { .attrs = max6639_attributes, }; /* * returns respective index in rpm_ranges table * 1 by default on invalid range */ static int rpm_range_to_reg(int range) { int i; for (i = 0; i < ARRAY_SIZE(rpm_ranges); i++) { if (rpm_ranges[i] == range) return i; } return 1; /* default: 4000 RPM */ } static int max6639_init_client(struct i2c_client *client) { struct max6639_data *data = i2c_get_clientdata(client); struct max6639_platform_data *max6639_info = client->dev.platform_data; int i; int rpm_range = 1; /* default: 4000 RPM */ int err; /* Reset chip to default values, see below for GCONFIG setup */ err = i2c_smbus_write_byte_data(client, MAX6639_REG_GCONFIG, MAX6639_GCONFIG_POR); if (err) goto exit; /* Fans pulse per revolution is 2 by default */ if (max6639_info && max6639_info->ppr > 0 && max6639_info->ppr < 5) data->ppr = max6639_info->ppr; else data->ppr = 2; data->ppr -= 1; if (max6639_info) rpm_range = rpm_range_to_reg(max6639_info->rpm_range); data->rpm_range = rpm_range; for (i = 0; i < 2; i++) { /* Set Fan pulse per revolution */ err = i2c_smbus_write_byte_data(client, MAX6639_REG_FAN_PPR(i), data->ppr << 6); if (err) goto exit; /* Fans config PWM, RPM */ err = i2c_smbus_write_byte_data(client, MAX6639_REG_FAN_CONFIG1(i), MAX6639_FAN_CONFIG1_PWM | rpm_range); if (err) goto exit; /* Fans PWM polarity high by default */ if (max6639_info && max6639_info->pwm_polarity == 0) err = i2c_smbus_write_byte_data(client, MAX6639_REG_FAN_CONFIG2a(i), 0x00); else err = i2c_smbus_write_byte_data(client, MAX6639_REG_FAN_CONFIG2a(i), 0x02); if (err) goto exit; /* * /THERM full speed enable, * PWM frequency 25kHz, see also GCONFIG below */ err = i2c_smbus_write_byte_data(client, MAX6639_REG_FAN_CONFIG3(i), MAX6639_FAN_CONFIG3_THERM_FULL_SPEED | 0x03); if (err) goto exit; /* Max. temp. 80C/90C/100C */ data->temp_therm[i] = 80; data->temp_alert[i] = 90; data->temp_ot[i] = 100; err = i2c_smbus_write_byte_data(client, MAX6639_REG_THERM_LIMIT(i), data->temp_therm[i]); if (err) goto exit; err = i2c_smbus_write_byte_data(client, MAX6639_REG_ALERT_LIMIT(i), data->temp_alert[i]); if (err) goto exit; err = i2c_smbus_write_byte_data(client, MAX6639_REG_OT_LIMIT(i), data->temp_ot[i]); if (err) goto exit; /* PWM 120/120 (i.e. 100%) */ data->pwm[i] = 120; err = i2c_smbus_write_byte_data(client, MAX6639_REG_TARGTDUTY(i), data->pwm[i]); if (err) goto exit; } /* Start monitoring */ err = i2c_smbus_write_byte_data(client, MAX6639_REG_GCONFIG, MAX6639_GCONFIG_DISABLE_TIMEOUT | MAX6639_GCONFIG_CH2_LOCAL | MAX6639_GCONFIG_PWM_FREQ_HI); exit: return err; } /* Return 0 if detection is successful, -ENODEV otherwise */ static int max6639_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int dev_id, manu_id; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; /* Actual detection via device and manufacturer ID */ dev_id = i2c_smbus_read_byte_data(client, MAX6639_REG_DEVID); manu_id = i2c_smbus_read_byte_data(client, MAX6639_REG_MANUID); if (dev_id != 0x58 || manu_id != 0x4D) return -ENODEV; strlcpy(info->type, "max6639", I2C_NAME_SIZE); return 0; } static int max6639_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct max6639_data *data; int err; data = kzalloc(sizeof(struct max6639_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit; } i2c_set_clientdata(client, data); mutex_init(&data->update_lock); /* Initialize the max6639 chip */ err = max6639_init_client(client); if (err < 0) goto error_free; /* Register sysfs hooks */ err = sysfs_create_group(&client->dev.kobj, &max6639_group); if (err) goto error_free; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto error_remove; } dev_info(&client->dev, "temperature sensor and fan control found\n"); return 0; error_remove: sysfs_remove_group(&client->dev.kobj, &max6639_group); error_free: kfree(data); exit: return err; } static int max6639_remove(struct i2c_client *client) { struct max6639_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &max6639_group); kfree(data); return 0; } #ifdef CONFIG_PM_SLEEP static int max6639_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); int data = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG); if (data < 0) return data; return i2c_smbus_write_byte_data(client, MAX6639_REG_GCONFIG, data | MAX6639_GCONFIG_STANDBY); } static int max6639_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); int data = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG); if (data < 0) return data; return i2c_smbus_write_byte_data(client, MAX6639_REG_GCONFIG, data & ~MAX6639_GCONFIG_STANDBY); } #endif /* CONFIG_PM_SLEEP */ static const struct i2c_device_id max6639_id[] = { {"max6639", 0}, { } }; MODULE_DEVICE_TABLE(i2c, max6639_id); static const struct dev_pm_ops max6639_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(max6639_suspend, max6639_resume) }; static struct i2c_driver max6639_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "max6639", .pm = &max6639_pm_ops, }, .probe = max6639_probe, .remove = max6639_remove, .id_table = max6639_id, .detect = max6639_detect, .address_list = normal_i2c, }; module_i2c_driver(max6639_driver); MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>"); MODULE_DESCRIPTION("max6639 driver"); MODULE_LICENSE("GPL");
gpl-2.0
pinkflozd/L5_Kernel_3.4
sound/pci/trident/trident_main.c
5105
124744
/* * Maintained by Jaroslav Kysela <perex@perex.cz> * Originated by audio@tridentmicro.com * Fri Feb 19 15:55:28 MST 1999 * Routines for control of Trident 4DWave (DX and NX) chip * * BUGS: * * TODO: * --- * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * SiS7018 S/PDIF support by Thomas Winischhofer <thomas@winischhofer.net> */ #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/gameport.h> #include <linux/dma-mapping.h> #include <linux/export.h> #include <sound/core.h> #include <sound/info.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/trident.h> #include <sound/asoundef.h> #include <asm/io.h> static int snd_trident_pcm_mixer_build(struct snd_trident *trident, struct snd_trident_voice * voice, struct snd_pcm_substream *substream); static int snd_trident_pcm_mixer_free(struct snd_trident *trident, struct snd_trident_voice * voice, struct snd_pcm_substream *substream); static irqreturn_t snd_trident_interrupt(int irq, void *dev_id); static int snd_trident_sis_reset(struct snd_trident *trident); static void snd_trident_clear_voices(struct snd_trident * trident, unsigned short v_min, unsigned short v_max); static int snd_trident_free(struct snd_trident *trident); /* * common I/O routines */ #if 0 static void snd_trident_print_voice_regs(struct snd_trident *trident, int voice) { unsigned int val, tmp; printk(KERN_DEBUG "Trident voice %i:\n", voice); outb(voice, TRID_REG(trident, T4D_LFO_GC_CIR)); val = inl(TRID_REG(trident, CH_LBA)); printk(KERN_DEBUG "LBA: 0x%x\n", val); val = inl(TRID_REG(trident, CH_GVSEL_PAN_VOL_CTRL_EC)); printk(KERN_DEBUG "GVSel: %i\n", val >> 31); printk(KERN_DEBUG "Pan: 0x%x\n", (val >> 24) & 0x7f); printk(KERN_DEBUG "Vol: 0x%x\n", (val >> 16) & 0xff); printk(KERN_DEBUG "CTRL: 0x%x\n", (val >> 12) & 0x0f); printk(KERN_DEBUG "EC: 0x%x\n", val & 0x0fff); if (trident->device != TRIDENT_DEVICE_ID_NX) { val = inl(TRID_REG(trident, CH_DX_CSO_ALPHA_FMS)); printk(KERN_DEBUG "CSO: 0x%x\n", val >> 16); printk("Alpha: 0x%x\n", (val >> 4) & 0x0fff); printk(KERN_DEBUG "FMS: 0x%x\n", val & 0x0f); val = inl(TRID_REG(trident, CH_DX_ESO_DELTA)); printk(KERN_DEBUG "ESO: 0x%x\n", val >> 16); printk(KERN_DEBUG "Delta: 0x%x\n", val & 0xffff); val = inl(TRID_REG(trident, CH_DX_FMC_RVOL_CVOL)); } else { // TRIDENT_DEVICE_ID_NX val = inl(TRID_REG(trident, CH_NX_DELTA_CSO)); tmp = (val >> 24) & 0xff; printk(KERN_DEBUG "CSO: 0x%x\n", val & 0x00ffffff); val = inl(TRID_REG(trident, CH_NX_DELTA_ESO)); tmp |= (val >> 16) & 0xff00; printk(KERN_DEBUG "Delta: 0x%x\n", tmp); printk(KERN_DEBUG "ESO: 0x%x\n", val & 0x00ffffff); val = inl(TRID_REG(trident, CH_NX_ALPHA_FMS_FMC_RVOL_CVOL)); printk(KERN_DEBUG "Alpha: 0x%x\n", val >> 20); printk(KERN_DEBUG "FMS: 0x%x\n", (val >> 16) & 0x0f); } printk(KERN_DEBUG "FMC: 0x%x\n", (val >> 14) & 3); printk(KERN_DEBUG "RVol: 0x%x\n", (val >> 7) & 0x7f); printk(KERN_DEBUG "CVol: 0x%x\n", val & 0x7f); } #endif /*--------------------------------------------------------------------------- unsigned short snd_trident_codec_read(struct snd_ac97 *ac97, unsigned short reg) Description: This routine will do all of the reading from the external CODEC (AC97). Parameters: ac97 - ac97 codec structure reg - CODEC register index, from AC97 Hal. returns: 16 bit value read from the AC97. ---------------------------------------------------------------------------*/ static unsigned short snd_trident_codec_read(struct snd_ac97 *ac97, unsigned short reg) { unsigned int data = 0, treg; unsigned short count = 0xffff; unsigned long flags; struct snd_trident *trident = ac97->private_data; spin_lock_irqsave(&trident->reg_lock, flags); if (trident->device == TRIDENT_DEVICE_ID_DX) { data = (DX_AC97_BUSY_READ | (reg & 0x000000ff)); outl(data, TRID_REG(trident, DX_ACR1_AC97_R)); do { data = inl(TRID_REG(trident, DX_ACR1_AC97_R)); if ((data & DX_AC97_BUSY_READ) == 0) break; } while (--count); } else if (trident->device == TRIDENT_DEVICE_ID_NX) { data = (NX_AC97_BUSY_READ | (reg & 0x000000ff)); treg = ac97->num == 0 ? NX_ACR2_AC97_R_PRIMARY : NX_ACR3_AC97_R_SECONDARY; outl(data, TRID_REG(trident, treg)); do { data = inl(TRID_REG(trident, treg)); if ((data & 0x00000C00) == 0) break; } while (--count); } else if (trident->device == TRIDENT_DEVICE_ID_SI7018) { data = SI_AC97_BUSY_READ | SI_AC97_AUDIO_BUSY | (reg & 0x000000ff); if (ac97->num == 1) data |= SI_AC97_SECONDARY; outl(data, TRID_REG(trident, SI_AC97_READ)); do { data = inl(TRID_REG(trident, SI_AC97_READ)); if ((data & (SI_AC97_BUSY_READ)) == 0) break; } while (--count); } if (count == 0 && !trident->ac97_detect) { snd_printk(KERN_ERR "ac97 codec read TIMEOUT [0x%x/0x%x]!!!\n", reg, data); data = 0; } spin_unlock_irqrestore(&trident->reg_lock, flags); return ((unsigned short) (data >> 16)); } /*--------------------------------------------------------------------------- void snd_trident_codec_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short wdata) Description: This routine will do all of the writing to the external CODEC (AC97). Parameters: ac97 - ac97 codec structure reg - CODEC register index, from AC97 Hal. data - Lower 16 bits are the data to write to CODEC. returns: TRUE if everything went ok, else FALSE. ---------------------------------------------------------------------------*/ static void snd_trident_codec_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short wdata) { unsigned int address, data; unsigned short count = 0xffff; unsigned long flags; struct snd_trident *trident = ac97->private_data; data = ((unsigned long) wdata) << 16; spin_lock_irqsave(&trident->reg_lock, flags); if (trident->device == TRIDENT_DEVICE_ID_DX) { address = DX_ACR0_AC97_W; /* read AC-97 write register status */ do { if ((inw(TRID_REG(trident, address)) & DX_AC97_BUSY_WRITE) == 0) break; } while (--count); data |= (DX_AC97_BUSY_WRITE | (reg & 0x000000ff)); } else if (trident->device == TRIDENT_DEVICE_ID_NX) { address = NX_ACR1_AC97_W; /* read AC-97 write register status */ do { if ((inw(TRID_REG(trident, address)) & NX_AC97_BUSY_WRITE) == 0) break; } while (--count); data |= (NX_AC97_BUSY_WRITE | (ac97->num << 8) | (reg & 0x000000ff)); } else if (trident->device == TRIDENT_DEVICE_ID_SI7018) { address = SI_AC97_WRITE; /* read AC-97 write register status */ do { if ((inw(TRID_REG(trident, address)) & (SI_AC97_BUSY_WRITE)) == 0) break; } while (--count); data |= SI_AC97_BUSY_WRITE | SI_AC97_AUDIO_BUSY | (reg & 0x000000ff); if (ac97->num == 1) data |= SI_AC97_SECONDARY; } else { address = 0; /* keep GCC happy */ count = 0; /* return */ } if (count == 0) { spin_unlock_irqrestore(&trident->reg_lock, flags); return; } outl(data, TRID_REG(trident, address)); spin_unlock_irqrestore(&trident->reg_lock, flags); } /*--------------------------------------------------------------------------- void snd_trident_enable_eso(struct snd_trident *trident) Description: This routine will enable end of loop interrupts. End of loop interrupts will occur when a running channel reaches ESO. Also enables middle of loop interrupts. Parameters: trident - pointer to target device class for 4DWave. ---------------------------------------------------------------------------*/ static void snd_trident_enable_eso(struct snd_trident * trident) { unsigned int val; val = inl(TRID_REG(trident, T4D_LFO_GC_CIR)); val |= ENDLP_IE; val |= MIDLP_IE; if (trident->device == TRIDENT_DEVICE_ID_SI7018) val |= BANK_B_EN; outl(val, TRID_REG(trident, T4D_LFO_GC_CIR)); } /*--------------------------------------------------------------------------- void snd_trident_disable_eso(struct snd_trident *trident) Description: This routine will disable end of loop interrupts. End of loop interrupts will occur when a running channel reaches ESO. Also disables middle of loop interrupts. Parameters: trident - pointer to target device class for 4DWave. returns: TRUE if everything went ok, else FALSE. ---------------------------------------------------------------------------*/ static void snd_trident_disable_eso(struct snd_trident * trident) { unsigned int tmp; tmp = inl(TRID_REG(trident, T4D_LFO_GC_CIR)); tmp &= ~ENDLP_IE; tmp &= ~MIDLP_IE; outl(tmp, TRID_REG(trident, T4D_LFO_GC_CIR)); } /*--------------------------------------------------------------------------- void snd_trident_start_voice(struct snd_trident * trident, unsigned int voice) Description: Start a voice, any channel 0 thru 63. This routine automatically handles the fact that there are more than 32 channels available. Parameters : voice - Voice number 0 thru n. trident - pointer to target device class for 4DWave. Return Value: None. ---------------------------------------------------------------------------*/ void snd_trident_start_voice(struct snd_trident * trident, unsigned int voice) { unsigned int mask = 1 << (voice & 0x1f); unsigned int reg = (voice & 0x20) ? T4D_START_B : T4D_START_A; outl(mask, TRID_REG(trident, reg)); } EXPORT_SYMBOL(snd_trident_start_voice); /*--------------------------------------------------------------------------- void snd_trident_stop_voice(struct snd_trident * trident, unsigned int voice) Description: Stop a voice, any channel 0 thru 63. This routine automatically handles the fact that there are more than 32 channels available. Parameters : voice - Voice number 0 thru n. trident - pointer to target device class for 4DWave. Return Value: None. ---------------------------------------------------------------------------*/ void snd_trident_stop_voice(struct snd_trident * trident, unsigned int voice) { unsigned int mask = 1 << (voice & 0x1f); unsigned int reg = (voice & 0x20) ? T4D_STOP_B : T4D_STOP_A; outl(mask, TRID_REG(trident, reg)); } EXPORT_SYMBOL(snd_trident_stop_voice); /*--------------------------------------------------------------------------- int snd_trident_allocate_pcm_channel(struct snd_trident *trident) Description: Allocate hardware channel in Bank B (32-63). Parameters : trident - pointer to target device class for 4DWave. Return Value: hardware channel - 32-63 or -1 when no channel is available ---------------------------------------------------------------------------*/ static int snd_trident_allocate_pcm_channel(struct snd_trident * trident) { int idx; if (trident->ChanPCMcnt >= trident->ChanPCM) return -1; for (idx = 31; idx >= 0; idx--) { if (!(trident->ChanMap[T4D_BANK_B] & (1 << idx))) { trident->ChanMap[T4D_BANK_B] |= 1 << idx; trident->ChanPCMcnt++; return idx + 32; } } return -1; } /*--------------------------------------------------------------------------- void snd_trident_free_pcm_channel(int channel) Description: Free hardware channel in Bank B (32-63) Parameters : trident - pointer to target device class for 4DWave. channel - hardware channel number 0-63 Return Value: none ---------------------------------------------------------------------------*/ static void snd_trident_free_pcm_channel(struct snd_trident *trident, int channel) { if (channel < 32 || channel > 63) return; channel &= 0x1f; if (trident->ChanMap[T4D_BANK_B] & (1 << channel)) { trident->ChanMap[T4D_BANK_B] &= ~(1 << channel); trident->ChanPCMcnt--; } } /*--------------------------------------------------------------------------- unsigned int snd_trident_allocate_synth_channel(void) Description: Allocate hardware channel in Bank A (0-31). Parameters : trident - pointer to target device class for 4DWave. Return Value: hardware channel - 0-31 or -1 when no channel is available ---------------------------------------------------------------------------*/ static int snd_trident_allocate_synth_channel(struct snd_trident * trident) { int idx; for (idx = 31; idx >= 0; idx--) { if (!(trident->ChanMap[T4D_BANK_A] & (1 << idx))) { trident->ChanMap[T4D_BANK_A] |= 1 << idx; trident->synth.ChanSynthCount++; return idx; } } return -1; } /*--------------------------------------------------------------------------- void snd_trident_free_synth_channel( int channel ) Description: Free hardware channel in Bank B (0-31). Parameters : trident - pointer to target device class for 4DWave. channel - hardware channel number 0-63 Return Value: none ---------------------------------------------------------------------------*/ static void snd_trident_free_synth_channel(struct snd_trident *trident, int channel) { if (channel < 0 || channel > 31) return; channel &= 0x1f; if (trident->ChanMap[T4D_BANK_A] & (1 << channel)) { trident->ChanMap[T4D_BANK_A] &= ~(1 << channel); trident->synth.ChanSynthCount--; } } /*--------------------------------------------------------------------------- snd_trident_write_voice_regs Description: This routine will complete and write the 5 hardware channel registers to hardware. Parameters: trident - pointer to target device class for 4DWave. voice - synthesizer voice structure Each register field. ---------------------------------------------------------------------------*/ void snd_trident_write_voice_regs(struct snd_trident * trident, struct snd_trident_voice * voice) { unsigned int FmcRvolCvol; unsigned int regs[5]; regs[1] = voice->LBA; regs[4] = (voice->GVSel << 31) | ((voice->Pan & 0x0000007f) << 24) | ((voice->CTRL & 0x0000000f) << 12); FmcRvolCvol = ((voice->FMC & 3) << 14) | ((voice->RVol & 0x7f) << 7) | (voice->CVol & 0x7f); switch (trident->device) { case TRIDENT_DEVICE_ID_SI7018: regs[4] |= voice->number > 31 ? (voice->Vol & 0x000003ff) : ((voice->Vol & 0x00003fc) << (16-2)) | (voice->EC & 0x00000fff); regs[0] = (voice->CSO << 16) | ((voice->Alpha & 0x00000fff) << 4) | (voice->FMS & 0x0000000f); regs[2] = (voice->ESO << 16) | (voice->Delta & 0x0ffff); regs[3] = (voice->Attribute << 16) | FmcRvolCvol; break; case TRIDENT_DEVICE_ID_DX: regs[4] |= ((voice->Vol & 0x000003fc) << (16-2)) | (voice->EC & 0x00000fff); regs[0] = (voice->CSO << 16) | ((voice->Alpha & 0x00000fff) << 4) | (voice->FMS & 0x0000000f); regs[2] = (voice->ESO << 16) | (voice->Delta & 0x0ffff); regs[3] = FmcRvolCvol; break; case TRIDENT_DEVICE_ID_NX: regs[4] |= ((voice->Vol & 0x000003fc) << (16-2)) | (voice->EC & 0x00000fff); regs[0] = (voice->Delta << 24) | (voice->CSO & 0x00ffffff); regs[2] = ((voice->Delta << 16) & 0xff000000) | (voice->ESO & 0x00ffffff); regs[3] = (voice->Alpha << 20) | ((voice->FMS & 0x0000000f) << 16) | FmcRvolCvol; break; default: snd_BUG(); return; } outb(voice->number, TRID_REG(trident, T4D_LFO_GC_CIR)); outl(regs[0], TRID_REG(trident, CH_START + 0)); outl(regs[1], TRID_REG(trident, CH_START + 4)); outl(regs[2], TRID_REG(trident, CH_START + 8)); outl(regs[3], TRID_REG(trident, CH_START + 12)); outl(regs[4], TRID_REG(trident, CH_START + 16)); #if 0 printk(KERN_DEBUG "written %i channel:\n", voice->number); printk(KERN_DEBUG " regs[0] = 0x%x/0x%x\n", regs[0], inl(TRID_REG(trident, CH_START + 0))); printk(KERN_DEBUG " regs[1] = 0x%x/0x%x\n", regs[1], inl(TRID_REG(trident, CH_START + 4))); printk(KERN_DEBUG " regs[2] = 0x%x/0x%x\n", regs[2], inl(TRID_REG(trident, CH_START + 8))); printk(KERN_DEBUG " regs[3] = 0x%x/0x%x\n", regs[3], inl(TRID_REG(trident, CH_START + 12))); printk(KERN_DEBUG " regs[4] = 0x%x/0x%x\n", regs[4], inl(TRID_REG(trident, CH_START + 16))); #endif } EXPORT_SYMBOL(snd_trident_write_voice_regs); /*--------------------------------------------------------------------------- snd_trident_write_cso_reg Description: This routine will write the new CSO offset register to hardware. Parameters: trident - pointer to target device class for 4DWave. voice - synthesizer voice structure CSO - new CSO value ---------------------------------------------------------------------------*/ static void snd_trident_write_cso_reg(struct snd_trident * trident, struct snd_trident_voice * voice, unsigned int CSO) { voice->CSO = CSO; outb(voice->number, TRID_REG(trident, T4D_LFO_GC_CIR)); if (trident->device != TRIDENT_DEVICE_ID_NX) { outw(voice->CSO, TRID_REG(trident, CH_DX_CSO_ALPHA_FMS) + 2); } else { outl((voice->Delta << 24) | (voice->CSO & 0x00ffffff), TRID_REG(trident, CH_NX_DELTA_CSO)); } } /*--------------------------------------------------------------------------- snd_trident_write_eso_reg Description: This routine will write the new ESO offset register to hardware. Parameters: trident - pointer to target device class for 4DWave. voice - synthesizer voice structure ESO - new ESO value ---------------------------------------------------------------------------*/ static void snd_trident_write_eso_reg(struct snd_trident * trident, struct snd_trident_voice * voice, unsigned int ESO) { voice->ESO = ESO; outb(voice->number, TRID_REG(trident, T4D_LFO_GC_CIR)); if (trident->device != TRIDENT_DEVICE_ID_NX) { outw(voice->ESO, TRID_REG(trident, CH_DX_ESO_DELTA) + 2); } else { outl(((voice->Delta << 16) & 0xff000000) | (voice->ESO & 0x00ffffff), TRID_REG(trident, CH_NX_DELTA_ESO)); } } /*--------------------------------------------------------------------------- snd_trident_write_vol_reg Description: This routine will write the new voice volume register to hardware. Parameters: trident - pointer to target device class for 4DWave. voice - synthesizer voice structure Vol - new voice volume ---------------------------------------------------------------------------*/ static void snd_trident_write_vol_reg(struct snd_trident * trident, struct snd_trident_voice * voice, unsigned int Vol) { voice->Vol = Vol; outb(voice->number, TRID_REG(trident, T4D_LFO_GC_CIR)); switch (trident->device) { case TRIDENT_DEVICE_ID_DX: case TRIDENT_DEVICE_ID_NX: outb(voice->Vol >> 2, TRID_REG(trident, CH_GVSEL_PAN_VOL_CTRL_EC + 2)); break; case TRIDENT_DEVICE_ID_SI7018: /* printk(KERN_DEBUG "voice->Vol = 0x%x\n", voice->Vol); */ outw((voice->CTRL << 12) | voice->Vol, TRID_REG(trident, CH_GVSEL_PAN_VOL_CTRL_EC)); break; } } /*--------------------------------------------------------------------------- snd_trident_write_pan_reg Description: This routine will write the new voice pan register to hardware. Parameters: trident - pointer to target device class for 4DWave. voice - synthesizer voice structure Pan - new pan value ---------------------------------------------------------------------------*/ static void snd_trident_write_pan_reg(struct snd_trident * trident, struct snd_trident_voice * voice, unsigned int Pan) { voice->Pan = Pan; outb(voice->number, TRID_REG(trident, T4D_LFO_GC_CIR)); outb(((voice->GVSel & 0x01) << 7) | (voice->Pan & 0x7f), TRID_REG(trident, CH_GVSEL_PAN_VOL_CTRL_EC + 3)); } /*--------------------------------------------------------------------------- snd_trident_write_rvol_reg Description: This routine will write the new reverb volume register to hardware. Parameters: trident - pointer to target device class for 4DWave. voice - synthesizer voice structure RVol - new reverb volume ---------------------------------------------------------------------------*/ static void snd_trident_write_rvol_reg(struct snd_trident * trident, struct snd_trident_voice * voice, unsigned int RVol) { voice->RVol = RVol; outb(voice->number, TRID_REG(trident, T4D_LFO_GC_CIR)); outw(((voice->FMC & 0x0003) << 14) | ((voice->RVol & 0x007f) << 7) | (voice->CVol & 0x007f), TRID_REG(trident, trident->device == TRIDENT_DEVICE_ID_NX ? CH_NX_ALPHA_FMS_FMC_RVOL_CVOL : CH_DX_FMC_RVOL_CVOL)); } /*--------------------------------------------------------------------------- snd_trident_write_cvol_reg Description: This routine will write the new chorus volume register to hardware. Parameters: trident - pointer to target device class for 4DWave. voice - synthesizer voice structure CVol - new chorus volume ---------------------------------------------------------------------------*/ static void snd_trident_write_cvol_reg(struct snd_trident * trident, struct snd_trident_voice * voice, unsigned int CVol) { voice->CVol = CVol; outb(voice->number, TRID_REG(trident, T4D_LFO_GC_CIR)); outw(((voice->FMC & 0x0003) << 14) | ((voice->RVol & 0x007f) << 7) | (voice->CVol & 0x007f), TRID_REG(trident, trident->device == TRIDENT_DEVICE_ID_NX ? CH_NX_ALPHA_FMS_FMC_RVOL_CVOL : CH_DX_FMC_RVOL_CVOL)); } /*--------------------------------------------------------------------------- snd_trident_convert_rate Description: This routine converts rate in HZ to hardware delta value. Parameters: trident - pointer to target device class for 4DWave. rate - Real or Virtual channel number. Returns: Delta value. ---------------------------------------------------------------------------*/ static unsigned int snd_trident_convert_rate(unsigned int rate) { unsigned int delta; // We special case 44100 and 8000 since rounding with the equation // does not give us an accurate enough value. For 11025 and 22050 // the equation gives us the best answer. All other frequencies will // also use the equation. JDW if (rate == 44100) delta = 0xeb3; else if (rate == 8000) delta = 0x2ab; else if (rate == 48000) delta = 0x1000; else delta = (((rate << 12) + 24000) / 48000) & 0x0000ffff; return delta; } /*--------------------------------------------------------------------------- snd_trident_convert_adc_rate Description: This routine converts rate in HZ to hardware delta value. Parameters: trident - pointer to target device class for 4DWave. rate - Real or Virtual channel number. Returns: Delta value. ---------------------------------------------------------------------------*/ static unsigned int snd_trident_convert_adc_rate(unsigned int rate) { unsigned int delta; // We special case 44100 and 8000 since rounding with the equation // does not give us an accurate enough value. For 11025 and 22050 // the equation gives us the best answer. All other frequencies will // also use the equation. JDW if (rate == 44100) delta = 0x116a; else if (rate == 8000) delta = 0x6000; else if (rate == 48000) delta = 0x1000; else delta = ((48000 << 12) / rate) & 0x0000ffff; return delta; } /*--------------------------------------------------------------------------- snd_trident_spurious_threshold Description: This routine converts rate in HZ to spurious threshold. Parameters: trident - pointer to target device class for 4DWave. rate - Real or Virtual channel number. Returns: Delta value. ---------------------------------------------------------------------------*/ static unsigned int snd_trident_spurious_threshold(unsigned int rate, unsigned int period_size) { unsigned int res = (rate * period_size) / 48000; if (res < 64) res = res / 2; else res -= 32; return res; } /*--------------------------------------------------------------------------- snd_trident_control_mode Description: This routine returns a control mode for a PCM channel. Parameters: trident - pointer to target device class for 4DWave. substream - PCM substream Returns: Control value. ---------------------------------------------------------------------------*/ static unsigned int snd_trident_control_mode(struct snd_pcm_substream *substream) { unsigned int CTRL; struct snd_pcm_runtime *runtime = substream->runtime; /* set ctrl mode CTRL default: 8-bit (unsigned) mono, loop mode enabled */ CTRL = 0x00000001; if (snd_pcm_format_width(runtime->format) == 16) CTRL |= 0x00000008; // 16-bit data if (snd_pcm_format_signed(runtime->format)) CTRL |= 0x00000002; // signed data if (runtime->channels > 1) CTRL |= 0x00000004; // stereo data return CTRL; } /* * PCM part */ /*--------------------------------------------------------------------------- snd_trident_ioctl Description: Device I/O control handler for playback/capture parameters. Parameters: substream - PCM substream class cmd - what ioctl message to process arg - additional message infoarg Returns: Error status ---------------------------------------------------------------------------*/ static int snd_trident_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg) { /* FIXME: it seems that with small periods the behaviour of trident hardware is unpredictable and interrupt generator is broken */ return snd_pcm_lib_ioctl(substream, cmd, arg); } /*--------------------------------------------------------------------------- snd_trident_allocate_pcm_mem Description: Allocate PCM ring buffer for given substream Parameters: substream - PCM substream class hw_params - hardware parameters Returns: Error status ---------------------------------------------------------------------------*/ static int snd_trident_allocate_pcm_mem(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_trident *trident = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_trident_voice *voice = runtime->private_data; int err; if ((err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0) return err; if (trident->tlb.entries) { if (err > 0) { /* change */ if (voice->memblk) snd_trident_free_pages(trident, voice->memblk); voice->memblk = snd_trident_alloc_pages(trident, substream); if (voice->memblk == NULL) return -ENOMEM; } } return 0; } /*--------------------------------------------------------------------------- snd_trident_allocate_evoice Description: Allocate extra voice as interrupt generator Parameters: substream - PCM substream class hw_params - hardware parameters Returns: Error status ---------------------------------------------------------------------------*/ static int snd_trident_allocate_evoice(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_trident *trident = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_trident_voice *voice = runtime->private_data; struct snd_trident_voice *evoice = voice->extra; /* voice management */ if (params_buffer_size(hw_params) / 2 != params_period_size(hw_params)) { if (evoice == NULL) { evoice = snd_trident_alloc_voice(trident, SNDRV_TRIDENT_VOICE_TYPE_PCM, 0, 0); if (evoice == NULL) return -ENOMEM; voice->extra = evoice; evoice->substream = substream; } } else { if (evoice != NULL) { snd_trident_free_voice(trident, evoice); voice->extra = evoice = NULL; } } return 0; } /*--------------------------------------------------------------------------- snd_trident_hw_params Description: Set the hardware parameters for the playback device. Parameters: substream - PCM substream class hw_params - hardware parameters Returns: Error status ---------------------------------------------------------------------------*/ static int snd_trident_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { int err; err = snd_trident_allocate_pcm_mem(substream, hw_params); if (err >= 0) err = snd_trident_allocate_evoice(substream, hw_params); return err; } /*--------------------------------------------------------------------------- snd_trident_playback_hw_free Description: Release the hardware resources for the playback device. Parameters: substream - PCM substream class Returns: Error status ---------------------------------------------------------------------------*/ static int snd_trident_hw_free(struct snd_pcm_substream *substream) { struct snd_trident *trident = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_trident_voice *voice = runtime->private_data; struct snd_trident_voice *evoice = voice ? voice->extra : NULL; if (trident->tlb.entries) { if (voice && voice->memblk) { snd_trident_free_pages(trident, voice->memblk); voice->memblk = NULL; } } snd_pcm_lib_free_pages(substream); if (evoice != NULL) { snd_trident_free_voice(trident, evoice); voice->extra = NULL; } return 0; } /*--------------------------------------------------------------------------- snd_trident_playback_prepare Description: Prepare playback device for playback. Parameters: substream - PCM substream class Returns: Error status ---------------------------------------------------------------------------*/ static int snd_trident_playback_prepare(struct snd_pcm_substream *substream) { struct snd_trident *trident = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_trident_voice *voice = runtime->private_data; struct snd_trident_voice *evoice = voice->extra; struct snd_trident_pcm_mixer *mix = &trident->pcm_mixer[substream->number]; spin_lock_irq(&trident->reg_lock); /* set delta (rate) value */ voice->Delta = snd_trident_convert_rate(runtime->rate); voice->spurious_threshold = snd_trident_spurious_threshold(runtime->rate, runtime->period_size); /* set Loop Begin Address */ if (voice->memblk) voice->LBA = voice->memblk->offset; else voice->LBA = runtime->dma_addr; voice->CSO = 0; voice->ESO = runtime->buffer_size - 1; /* in samples */ voice->CTRL = snd_trident_control_mode(substream); voice->FMC = 3; voice->GVSel = 1; voice->EC = 0; voice->Alpha = 0; voice->FMS = 0; voice->Vol = mix->vol; voice->RVol = mix->rvol; voice->CVol = mix->cvol; voice->Pan = mix->pan; voice->Attribute = 0; #if 0 voice->Attribute = (1<<(30-16))|(2<<(26-16))| (0<<(24-16))|(0x1f<<(19-16)); #else voice->Attribute = 0; #endif snd_trident_write_voice_regs(trident, voice); if (evoice != NULL) { evoice->Delta = voice->Delta; evoice->spurious_threshold = voice->spurious_threshold; evoice->LBA = voice->LBA; evoice->CSO = 0; evoice->ESO = (runtime->period_size * 2) + 4 - 1; /* in samples */ evoice->CTRL = voice->CTRL; evoice->FMC = 3; evoice->GVSel = trident->device == TRIDENT_DEVICE_ID_SI7018 ? 0 : 1; evoice->EC = 0; evoice->Alpha = 0; evoice->FMS = 0; evoice->Vol = 0x3ff; /* mute */ evoice->RVol = evoice->CVol = 0x7f; /* mute */ evoice->Pan = 0x7f; /* mute */ #if 0 evoice->Attribute = (1<<(30-16))|(2<<(26-16))| (0<<(24-16))|(0x1f<<(19-16)); #else evoice->Attribute = 0; #endif snd_trident_write_voice_regs(trident, evoice); evoice->isync2 = 1; evoice->isync_mark = runtime->period_size; evoice->ESO = (runtime->period_size * 2) - 1; } spin_unlock_irq(&trident->reg_lock); return 0; } /*--------------------------------------------------------------------------- snd_trident_capture_hw_params Description: Set the hardware parameters for the capture device. Parameters: substream - PCM substream class hw_params - hardware parameters Returns: Error status ---------------------------------------------------------------------------*/ static int snd_trident_capture_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_trident_allocate_pcm_mem(substream, hw_params); } /*--------------------------------------------------------------------------- snd_trident_capture_prepare Description: Prepare capture device for playback. Parameters: substream - PCM substream class Returns: Error status ---------------------------------------------------------------------------*/ static int snd_trident_capture_prepare(struct snd_pcm_substream *substream) { struct snd_trident *trident = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_trident_voice *voice = runtime->private_data; unsigned int val, ESO_bytes; spin_lock_irq(&trident->reg_lock); // Initialize the channel and set channel Mode outb(0, TRID_REG(trident, LEGACY_DMAR15)); // Set DMA channel operation mode register outb(0x54, TRID_REG(trident, LEGACY_DMAR11)); // Set channel buffer Address, DMAR0 expects contiguous PCI memory area voice->LBA = runtime->dma_addr; outl(voice->LBA, TRID_REG(trident, LEGACY_DMAR0)); if (voice->memblk) voice->LBA = voice->memblk->offset; // set ESO ESO_bytes = snd_pcm_lib_buffer_bytes(substream) - 1; outb((ESO_bytes & 0x00ff0000) >> 16, TRID_REG(trident, LEGACY_DMAR6)); outw((ESO_bytes & 0x0000ffff), TRID_REG(trident, LEGACY_DMAR4)); ESO_bytes++; // Set channel sample rate, 4.12 format val = (((unsigned int) 48000L << 12) + (runtime->rate/2)) / runtime->rate; outw(val, TRID_REG(trident, T4D_SBDELTA_DELTA_R)); // Set channel interrupt blk length if (snd_pcm_format_width(runtime->format) == 16) { val = (unsigned short) ((ESO_bytes >> 1) - 1); } else { val = (unsigned short) (ESO_bytes - 1); } outl((val << 16) | val, TRID_REG(trident, T4D_SBBL_SBCL)); // Right now, set format and start to run captureing, // continuous run loop enable. trident->bDMAStart = 0x19; // 0001 1001b if (snd_pcm_format_width(runtime->format) == 16) trident->bDMAStart |= 0x80; if (snd_pcm_format_signed(runtime->format)) trident->bDMAStart |= 0x20; if (runtime->channels > 1) trident->bDMAStart |= 0x40; // Prepare capture intr channel voice->Delta = snd_trident_convert_rate(runtime->rate); voice->spurious_threshold = snd_trident_spurious_threshold(runtime->rate, runtime->period_size); voice->isync = 1; voice->isync_mark = runtime->period_size; voice->isync_max = runtime->buffer_size; // Set voice parameters voice->CSO = 0; voice->ESO = voice->isync_ESO = (runtime->period_size * 2) + 6 - 1; voice->CTRL = snd_trident_control_mode(substream); voice->FMC = 3; voice->RVol = 0x7f; voice->CVol = 0x7f; voice->GVSel = 1; voice->Pan = 0x7f; /* mute */ voice->Vol = 0x3ff; /* mute */ voice->EC = 0; voice->Alpha = 0; voice->FMS = 0; voice->Attribute = 0; snd_trident_write_voice_regs(trident, voice); spin_unlock_irq(&trident->reg_lock); return 0; } /*--------------------------------------------------------------------------- snd_trident_si7018_capture_hw_params Description: Set the hardware parameters for the capture device. Parameters: substream - PCM substream class hw_params - hardware parameters Returns: Error status ---------------------------------------------------------------------------*/ static int snd_trident_si7018_capture_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { int err; if ((err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0) return err; return snd_trident_allocate_evoice(substream, hw_params); } /*--------------------------------------------------------------------------- snd_trident_si7018_capture_hw_free Description: Release the hardware resources for the capture device. Parameters: substream - PCM substream class Returns: Error status ---------------------------------------------------------------------------*/ static int snd_trident_si7018_capture_hw_free(struct snd_pcm_substream *substream) { struct snd_trident *trident = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_trident_voice *voice = runtime->private_data; struct snd_trident_voice *evoice = voice ? voice->extra : NULL; snd_pcm_lib_free_pages(substream); if (evoice != NULL) { snd_trident_free_voice(trident, evoice); voice->extra = NULL; } return 0; } /*--------------------------------------------------------------------------- snd_trident_si7018_capture_prepare Description: Prepare capture device for playback. Parameters: substream - PCM substream class Returns: Error status ---------------------------------------------------------------------------*/ static int snd_trident_si7018_capture_prepare(struct snd_pcm_substream *substream) { struct snd_trident *trident = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_trident_voice *voice = runtime->private_data; struct snd_trident_voice *evoice = voice->extra; spin_lock_irq(&trident->reg_lock); voice->LBA = runtime->dma_addr; voice->Delta = snd_trident_convert_adc_rate(runtime->rate); voice->spurious_threshold = snd_trident_spurious_threshold(runtime->rate, runtime->period_size); // Set voice parameters voice->CSO = 0; voice->ESO = runtime->buffer_size - 1; /* in samples */ voice->CTRL = snd_trident_control_mode(substream); voice->FMC = 0; voice->RVol = 0; voice->CVol = 0; voice->GVSel = 1; voice->Pan = T4D_DEFAULT_PCM_PAN; voice->Vol = 0; voice->EC = 0; voice->Alpha = 0; voice->FMS = 0; voice->Attribute = (2 << (30-16)) | (2 << (26-16)) | (2 << (24-16)) | (1 << (23-16)); snd_trident_write_voice_regs(trident, voice); if (evoice != NULL) { evoice->Delta = snd_trident_convert_rate(runtime->rate); evoice->spurious_threshold = voice->spurious_threshold; evoice->LBA = voice->LBA; evoice->CSO = 0; evoice->ESO = (runtime->period_size * 2) + 20 - 1; /* in samples, 20 means correction */ evoice->CTRL = voice->CTRL; evoice->FMC = 3; evoice->GVSel = 0; evoice->EC = 0; evoice->Alpha = 0; evoice->FMS = 0; evoice->Vol = 0x3ff; /* mute */ evoice->RVol = evoice->CVol = 0x7f; /* mute */ evoice->Pan = 0x7f; /* mute */ evoice->Attribute = 0; snd_trident_write_voice_regs(trident, evoice); evoice->isync2 = 1; evoice->isync_mark = runtime->period_size; evoice->ESO = (runtime->period_size * 2) - 1; } spin_unlock_irq(&trident->reg_lock); return 0; } /*--------------------------------------------------------------------------- snd_trident_foldback_prepare Description: Prepare foldback capture device for playback. Parameters: substream - PCM substream class Returns: Error status ---------------------------------------------------------------------------*/ static int snd_trident_foldback_prepare(struct snd_pcm_substream *substream) { struct snd_trident *trident = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_trident_voice *voice = runtime->private_data; struct snd_trident_voice *evoice = voice->extra; spin_lock_irq(&trident->reg_lock); /* Set channel buffer Address */ if (voice->memblk) voice->LBA = voice->memblk->offset; else voice->LBA = runtime->dma_addr; /* set target ESO for channel */ voice->ESO = runtime->buffer_size - 1; /* in samples */ /* set sample rate */ voice->Delta = 0x1000; voice->spurious_threshold = snd_trident_spurious_threshold(48000, runtime->period_size); voice->CSO = 0; voice->CTRL = snd_trident_control_mode(substream); voice->FMC = 3; voice->RVol = 0x7f; voice->CVol = 0x7f; voice->GVSel = 1; voice->Pan = 0x7f; /* mute */ voice->Vol = 0x3ff; /* mute */ voice->EC = 0; voice->Alpha = 0; voice->FMS = 0; voice->Attribute = 0; /* set up capture channel */ outb(((voice->number & 0x3f) | 0x80), TRID_REG(trident, T4D_RCI + voice->foldback_chan)); snd_trident_write_voice_regs(trident, voice); if (evoice != NULL) { evoice->Delta = voice->Delta; evoice->spurious_threshold = voice->spurious_threshold; evoice->LBA = voice->LBA; evoice->CSO = 0; evoice->ESO = (runtime->period_size * 2) + 4 - 1; /* in samples */ evoice->CTRL = voice->CTRL; evoice->FMC = 3; evoice->GVSel = trident->device == TRIDENT_DEVICE_ID_SI7018 ? 0 : 1; evoice->EC = 0; evoice->Alpha = 0; evoice->FMS = 0; evoice->Vol = 0x3ff; /* mute */ evoice->RVol = evoice->CVol = 0x7f; /* mute */ evoice->Pan = 0x7f; /* mute */ evoice->Attribute = 0; snd_trident_write_voice_regs(trident, evoice); evoice->isync2 = 1; evoice->isync_mark = runtime->period_size; evoice->ESO = (runtime->period_size * 2) - 1; } spin_unlock_irq(&trident->reg_lock); return 0; } /*--------------------------------------------------------------------------- snd_trident_spdif_hw_params Description: Set the hardware parameters for the spdif device. Parameters: substream - PCM substream class hw_params - hardware parameters Returns: Error status ---------------------------------------------------------------------------*/ static int snd_trident_spdif_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_trident *trident = snd_pcm_substream_chip(substream); unsigned int old_bits = 0, change = 0; int err; err = snd_trident_allocate_pcm_mem(substream, hw_params); if (err < 0) return err; if (trident->device == TRIDENT_DEVICE_ID_SI7018) { err = snd_trident_allocate_evoice(substream, hw_params); if (err < 0) return err; } /* prepare SPDIF channel */ spin_lock_irq(&trident->reg_lock); old_bits = trident->spdif_pcm_bits; if (old_bits & IEC958_AES0_PROFESSIONAL) trident->spdif_pcm_bits &= ~IEC958_AES0_PRO_FS; else trident->spdif_pcm_bits &= ~(IEC958_AES3_CON_FS << 24); if (params_rate(hw_params) >= 48000) { trident->spdif_pcm_ctrl = 0x3c; // 48000 Hz trident->spdif_pcm_bits |= trident->spdif_bits & IEC958_AES0_PROFESSIONAL ? IEC958_AES0_PRO_FS_48000 : (IEC958_AES3_CON_FS_48000 << 24); } else if (params_rate(hw_params) >= 44100) { trident->spdif_pcm_ctrl = 0x3e; // 44100 Hz trident->spdif_pcm_bits |= trident->spdif_bits & IEC958_AES0_PROFESSIONAL ? IEC958_AES0_PRO_FS_44100 : (IEC958_AES3_CON_FS_44100 << 24); } else { trident->spdif_pcm_ctrl = 0x3d; // 32000 Hz trident->spdif_pcm_bits |= trident->spdif_bits & IEC958_AES0_PROFESSIONAL ? IEC958_AES0_PRO_FS_32000 : (IEC958_AES3_CON_FS_32000 << 24); } change = old_bits != trident->spdif_pcm_bits; spin_unlock_irq(&trident->reg_lock); if (change) snd_ctl_notify(trident->card, SNDRV_CTL_EVENT_MASK_VALUE, &trident->spdif_pcm_ctl->id); return 0; } /*--------------------------------------------------------------------------- snd_trident_spdif_prepare Description: Prepare SPDIF device for playback. Parameters: substream - PCM substream class Returns: Error status ---------------------------------------------------------------------------*/ static int snd_trident_spdif_prepare(struct snd_pcm_substream *substream) { struct snd_trident *trident = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_trident_voice *voice = runtime->private_data; struct snd_trident_voice *evoice = voice->extra; struct snd_trident_pcm_mixer *mix = &trident->pcm_mixer[substream->number]; unsigned int RESO, LBAO; unsigned int temp; spin_lock_irq(&trident->reg_lock); if (trident->device != TRIDENT_DEVICE_ID_SI7018) { /* set delta (rate) value */ voice->Delta = snd_trident_convert_rate(runtime->rate); voice->spurious_threshold = snd_trident_spurious_threshold(runtime->rate, runtime->period_size); /* set Loop Back Address */ LBAO = runtime->dma_addr; if (voice->memblk) voice->LBA = voice->memblk->offset; else voice->LBA = LBAO; voice->isync = 1; voice->isync3 = 1; voice->isync_mark = runtime->period_size; voice->isync_max = runtime->buffer_size; /* set target ESO for channel */ RESO = runtime->buffer_size - 1; voice->ESO = voice->isync_ESO = (runtime->period_size * 2) + 6 - 1; /* set ctrl mode */ voice->CTRL = snd_trident_control_mode(substream); voice->FMC = 3; voice->RVol = 0x7f; voice->CVol = 0x7f; voice->GVSel = 1; voice->Pan = 0x7f; voice->Vol = 0x3ff; voice->EC = 0; voice->CSO = 0; voice->Alpha = 0; voice->FMS = 0; voice->Attribute = 0; /* prepare surrogate IRQ channel */ snd_trident_write_voice_regs(trident, voice); outw((RESO & 0xffff), TRID_REG(trident, NX_SPESO)); outb((RESO >> 16), TRID_REG(trident, NX_SPESO + 2)); outl((LBAO & 0xfffffffc), TRID_REG(trident, NX_SPLBA)); outw((voice->CSO & 0xffff), TRID_REG(trident, NX_SPCTRL_SPCSO)); outb((voice->CSO >> 16), TRID_REG(trident, NX_SPCTRL_SPCSO + 2)); /* set SPDIF setting */ outb(trident->spdif_pcm_ctrl, TRID_REG(trident, NX_SPCTRL_SPCSO + 3)); outl(trident->spdif_pcm_bits, TRID_REG(trident, NX_SPCSTATUS)); } else { /* SiS */ /* set delta (rate) value */ voice->Delta = 0x800; voice->spurious_threshold = snd_trident_spurious_threshold(48000, runtime->period_size); /* set Loop Begin Address */ if (voice->memblk) voice->LBA = voice->memblk->offset; else voice->LBA = runtime->dma_addr; voice->CSO = 0; voice->ESO = runtime->buffer_size - 1; /* in samples */ voice->CTRL = snd_trident_control_mode(substream); voice->FMC = 3; voice->GVSel = 1; voice->EC = 0; voice->Alpha = 0; voice->FMS = 0; voice->Vol = mix->vol; voice->RVol = mix->rvol; voice->CVol = mix->cvol; voice->Pan = mix->pan; voice->Attribute = (1<<(30-16))|(7<<(26-16))| (0<<(24-16))|(0<<(19-16)); snd_trident_write_voice_regs(trident, voice); if (evoice != NULL) { evoice->Delta = voice->Delta; evoice->spurious_threshold = voice->spurious_threshold; evoice->LBA = voice->LBA; evoice->CSO = 0; evoice->ESO = (runtime->period_size * 2) + 4 - 1; /* in samples */ evoice->CTRL = voice->CTRL; evoice->FMC = 3; evoice->GVSel = trident->device == TRIDENT_DEVICE_ID_SI7018 ? 0 : 1; evoice->EC = 0; evoice->Alpha = 0; evoice->FMS = 0; evoice->Vol = 0x3ff; /* mute */ evoice->RVol = evoice->CVol = 0x7f; /* mute */ evoice->Pan = 0x7f; /* mute */ evoice->Attribute = 0; snd_trident_write_voice_regs(trident, evoice); evoice->isync2 = 1; evoice->isync_mark = runtime->period_size; evoice->ESO = (runtime->period_size * 2) - 1; } outl(trident->spdif_pcm_bits, TRID_REG(trident, SI_SPDIF_CS)); temp = inl(TRID_REG(trident, T4D_LFO_GC_CIR)); temp &= ~(1<<19); outl(temp, TRID_REG(trident, T4D_LFO_GC_CIR)); temp = inl(TRID_REG(trident, SI_SERIAL_INTF_CTRL)); temp |= SPDIF_EN; outl(temp, TRID_REG(trident, SI_SERIAL_INTF_CTRL)); } spin_unlock_irq(&trident->reg_lock); return 0; } /*--------------------------------------------------------------------------- snd_trident_trigger Description: Start/stop devices Parameters: substream - PCM substream class cmd - trigger command (STOP, GO) Returns: Error status ---------------------------------------------------------------------------*/ static int snd_trident_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_trident *trident = snd_pcm_substream_chip(substream); struct snd_pcm_substream *s; unsigned int what, whati, capture_flag, spdif_flag; struct snd_trident_voice *voice, *evoice; unsigned int val, go; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: case SNDRV_PCM_TRIGGER_RESUME: go = 1; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_SUSPEND: go = 0; break; default: return -EINVAL; } what = whati = capture_flag = spdif_flag = 0; spin_lock(&trident->reg_lock); val = inl(TRID_REG(trident, T4D_STIMER)) & 0x00ffffff; snd_pcm_group_for_each_entry(s, substream) { if ((struct snd_trident *) snd_pcm_substream_chip(s) == trident) { voice = s->runtime->private_data; evoice = voice->extra; what |= 1 << (voice->number & 0x1f); if (evoice == NULL) { whati |= 1 << (voice->number & 0x1f); } else { what |= 1 << (evoice->number & 0x1f); whati |= 1 << (evoice->number & 0x1f); if (go) evoice->stimer = val; } if (go) { voice->running = 1; voice->stimer = val; } else { voice->running = 0; } snd_pcm_trigger_done(s, substream); if (voice->capture) capture_flag = 1; if (voice->spdif) spdif_flag = 1; } } if (spdif_flag) { if (trident->device != TRIDENT_DEVICE_ID_SI7018) { outl(trident->spdif_pcm_bits, TRID_REG(trident, NX_SPCSTATUS)); val = trident->spdif_pcm_ctrl; if (!go) val &= ~(0x28); outb(val, TRID_REG(trident, NX_SPCTRL_SPCSO + 3)); } else { outl(trident->spdif_pcm_bits, TRID_REG(trident, SI_SPDIF_CS)); val = inl(TRID_REG(trident, SI_SERIAL_INTF_CTRL)) | SPDIF_EN; outl(val, TRID_REG(trident, SI_SERIAL_INTF_CTRL)); } } if (!go) outl(what, TRID_REG(trident, T4D_STOP_B)); val = inl(TRID_REG(trident, T4D_AINTEN_B)); if (go) { val |= whati; } else { val &= ~whati; } outl(val, TRID_REG(trident, T4D_AINTEN_B)); if (go) { outl(what, TRID_REG(trident, T4D_START_B)); if (capture_flag && trident->device != TRIDENT_DEVICE_ID_SI7018) outb(trident->bDMAStart, TRID_REG(trident, T4D_SBCTRL_SBE2R_SBDD)); } else { if (capture_flag && trident->device != TRIDENT_DEVICE_ID_SI7018) outb(0x00, TRID_REG(trident, T4D_SBCTRL_SBE2R_SBDD)); } spin_unlock(&trident->reg_lock); return 0; } /*--------------------------------------------------------------------------- snd_trident_playback_pointer Description: This routine return the playback position Parameters: substream - PCM substream class Returns: position of buffer ---------------------------------------------------------------------------*/ static snd_pcm_uframes_t snd_trident_playback_pointer(struct snd_pcm_substream *substream) { struct snd_trident *trident = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_trident_voice *voice = runtime->private_data; unsigned int cso; if (!voice->running) return 0; spin_lock(&trident->reg_lock); outb(voice->number, TRID_REG(trident, T4D_LFO_GC_CIR)); if (trident->device != TRIDENT_DEVICE_ID_NX) { cso = inw(TRID_REG(trident, CH_DX_CSO_ALPHA_FMS + 2)); } else { // ID_4DWAVE_NX cso = (unsigned int) inl(TRID_REG(trident, CH_NX_DELTA_CSO)) & 0x00ffffff; } spin_unlock(&trident->reg_lock); if (cso >= runtime->buffer_size) cso = 0; return cso; } /*--------------------------------------------------------------------------- snd_trident_capture_pointer Description: This routine return the capture position Parameters: pcm1 - PCM device class Returns: position of buffer ---------------------------------------------------------------------------*/ static snd_pcm_uframes_t snd_trident_capture_pointer(struct snd_pcm_substream *substream) { struct snd_trident *trident = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_trident_voice *voice = runtime->private_data; unsigned int result; if (!voice->running) return 0; result = inw(TRID_REG(trident, T4D_SBBL_SBCL)); if (runtime->channels > 1) result >>= 1; if (result > 0) result = runtime->buffer_size - result; return result; } /*--------------------------------------------------------------------------- snd_trident_spdif_pointer Description: This routine return the SPDIF playback position Parameters: substream - PCM substream class Returns: position of buffer ---------------------------------------------------------------------------*/ static snd_pcm_uframes_t snd_trident_spdif_pointer(struct snd_pcm_substream *substream) { struct snd_trident *trident = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_trident_voice *voice = runtime->private_data; unsigned int result; if (!voice->running) return 0; result = inl(TRID_REG(trident, NX_SPCTRL_SPCSO)) & 0x00ffffff; return result; } /* * Playback support device description */ static struct snd_pcm_hardware snd_trident_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START | SNDRV_PCM_INFO_PAUSE /* | SNDRV_PCM_INFO_RESUME */), .formats = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U16_LE), .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 4000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (256*1024), .period_bytes_min = 64, .period_bytes_max = (256*1024), .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; /* * Capture support device description */ static struct snd_pcm_hardware snd_trident_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START | SNDRV_PCM_INFO_PAUSE /* | SNDRV_PCM_INFO_RESUME */), .formats = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U16_LE), .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 4000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; /* * Foldback capture support device description */ static struct snd_pcm_hardware snd_trident_foldback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START | SNDRV_PCM_INFO_PAUSE /* | SNDRV_PCM_INFO_RESUME */), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; /* * SPDIF playback support device description */ static struct snd_pcm_hardware snd_trident_spdif = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START | SNDRV_PCM_INFO_PAUSE /* | SNDRV_PCM_INFO_RESUME */), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000), .rate_min = 32000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; static struct snd_pcm_hardware snd_trident_spdif_7018 = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START | SNDRV_PCM_INFO_PAUSE /* | SNDRV_PCM_INFO_RESUME */), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; static void snd_trident_pcm_free_substream(struct snd_pcm_runtime *runtime) { struct snd_trident_voice *voice = runtime->private_data; struct snd_trident *trident; if (voice) { trident = voice->trident; snd_trident_free_voice(trident, voice); } } static int snd_trident_playback_open(struct snd_pcm_substream *substream) { struct snd_trident *trident = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_trident_voice *voice; voice = snd_trident_alloc_voice(trident, SNDRV_TRIDENT_VOICE_TYPE_PCM, 0, 0); if (voice == NULL) return -EAGAIN; snd_trident_pcm_mixer_build(trident, voice, substream); voice->substream = substream; runtime->private_data = voice; runtime->private_free = snd_trident_pcm_free_substream; runtime->hw = snd_trident_playback; snd_pcm_set_sync(substream); snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 0, 64*1024); return 0; } /*--------------------------------------------------------------------------- snd_trident_playback_close Description: This routine will close the 4DWave playback device. For now we will simply free the dma transfer buffer. Parameters: substream - PCM substream class ---------------------------------------------------------------------------*/ static int snd_trident_playback_close(struct snd_pcm_substream *substream) { struct snd_trident *trident = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_trident_voice *voice = runtime->private_data; snd_trident_pcm_mixer_free(trident, voice, substream); return 0; } /*--------------------------------------------------------------------------- snd_trident_spdif_open Description: This routine will open the 4DWave SPDIF device. Parameters: substream - PCM substream class Returns: status - success or failure flag ---------------------------------------------------------------------------*/ static int snd_trident_spdif_open(struct snd_pcm_substream *substream) { struct snd_trident *trident = snd_pcm_substream_chip(substream); struct snd_trident_voice *voice; struct snd_pcm_runtime *runtime = substream->runtime; voice = snd_trident_alloc_voice(trident, SNDRV_TRIDENT_VOICE_TYPE_PCM, 0, 0); if (voice == NULL) return -EAGAIN; voice->spdif = 1; voice->substream = substream; spin_lock_irq(&trident->reg_lock); trident->spdif_pcm_bits = trident->spdif_bits; spin_unlock_irq(&trident->reg_lock); runtime->private_data = voice; runtime->private_free = snd_trident_pcm_free_substream; if (trident->device == TRIDENT_DEVICE_ID_SI7018) { runtime->hw = snd_trident_spdif; } else { runtime->hw = snd_trident_spdif_7018; } trident->spdif_pcm_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(trident->card, SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO, &trident->spdif_pcm_ctl->id); snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 0, 64*1024); return 0; } /*--------------------------------------------------------------------------- snd_trident_spdif_close Description: This routine will close the 4DWave SPDIF device. Parameters: substream - PCM substream class ---------------------------------------------------------------------------*/ static int snd_trident_spdif_close(struct snd_pcm_substream *substream) { struct snd_trident *trident = snd_pcm_substream_chip(substream); unsigned int temp; spin_lock_irq(&trident->reg_lock); // restore default SPDIF setting if (trident->device != TRIDENT_DEVICE_ID_SI7018) { outb(trident->spdif_ctrl, TRID_REG(trident, NX_SPCTRL_SPCSO + 3)); outl(trident->spdif_bits, TRID_REG(trident, NX_SPCSTATUS)); } else { outl(trident->spdif_bits, TRID_REG(trident, SI_SPDIF_CS)); temp = inl(TRID_REG(trident, SI_SERIAL_INTF_CTRL)); if (trident->spdif_ctrl) { temp |= SPDIF_EN; } else { temp &= ~SPDIF_EN; } outl(temp, TRID_REG(trident, SI_SERIAL_INTF_CTRL)); } spin_unlock_irq(&trident->reg_lock); trident->spdif_pcm_ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(trident->card, SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO, &trident->spdif_pcm_ctl->id); return 0; } /*--------------------------------------------------------------------------- snd_trident_capture_open Description: This routine will open the 4DWave capture device. Parameters: substream - PCM substream class Returns: status - success or failure flag ---------------------------------------------------------------------------*/ static int snd_trident_capture_open(struct snd_pcm_substream *substream) { struct snd_trident *trident = snd_pcm_substream_chip(substream); struct snd_trident_voice *voice; struct snd_pcm_runtime *runtime = substream->runtime; voice = snd_trident_alloc_voice(trident, SNDRV_TRIDENT_VOICE_TYPE_PCM, 0, 0); if (voice == NULL) return -EAGAIN; voice->capture = 1; voice->substream = substream; runtime->private_data = voice; runtime->private_free = snd_trident_pcm_free_substream; runtime->hw = snd_trident_capture; snd_pcm_set_sync(substream); snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 0, 64*1024); return 0; } /*--------------------------------------------------------------------------- snd_trident_capture_close Description: This routine will close the 4DWave capture device. For now we will simply free the dma transfer buffer. Parameters: substream - PCM substream class ---------------------------------------------------------------------------*/ static int snd_trident_capture_close(struct snd_pcm_substream *substream) { return 0; } /*--------------------------------------------------------------------------- snd_trident_foldback_open Description: This routine will open the 4DWave foldback capture device. Parameters: substream - PCM substream class Returns: status - success or failure flag ---------------------------------------------------------------------------*/ static int snd_trident_foldback_open(struct snd_pcm_substream *substream) { struct snd_trident *trident = snd_pcm_substream_chip(substream); struct snd_trident_voice *voice; struct snd_pcm_runtime *runtime = substream->runtime; voice = snd_trident_alloc_voice(trident, SNDRV_TRIDENT_VOICE_TYPE_PCM, 0, 0); if (voice == NULL) return -EAGAIN; voice->foldback_chan = substream->number; voice->substream = substream; runtime->private_data = voice; runtime->private_free = snd_trident_pcm_free_substream; runtime->hw = snd_trident_foldback; snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 0, 64*1024); return 0; } /*--------------------------------------------------------------------------- snd_trident_foldback_close Description: This routine will close the 4DWave foldback capture device. For now we will simply free the dma transfer buffer. Parameters: substream - PCM substream class ---------------------------------------------------------------------------*/ static int snd_trident_foldback_close(struct snd_pcm_substream *substream) { struct snd_trident *trident = snd_pcm_substream_chip(substream); struct snd_trident_voice *voice; struct snd_pcm_runtime *runtime = substream->runtime; voice = runtime->private_data; /* stop capture channel */ spin_lock_irq(&trident->reg_lock); outb(0x00, TRID_REG(trident, T4D_RCI + voice->foldback_chan)); spin_unlock_irq(&trident->reg_lock); return 0; } /*--------------------------------------------------------------------------- PCM operations ---------------------------------------------------------------------------*/ static struct snd_pcm_ops snd_trident_playback_ops = { .open = snd_trident_playback_open, .close = snd_trident_playback_close, .ioctl = snd_trident_ioctl, .hw_params = snd_trident_hw_params, .hw_free = snd_trident_hw_free, .prepare = snd_trident_playback_prepare, .trigger = snd_trident_trigger, .pointer = snd_trident_playback_pointer, }; static struct snd_pcm_ops snd_trident_nx_playback_ops = { .open = snd_trident_playback_open, .close = snd_trident_playback_close, .ioctl = snd_trident_ioctl, .hw_params = snd_trident_hw_params, .hw_free = snd_trident_hw_free, .prepare = snd_trident_playback_prepare, .trigger = snd_trident_trigger, .pointer = snd_trident_playback_pointer, .page = snd_pcm_sgbuf_ops_page, }; static struct snd_pcm_ops snd_trident_capture_ops = { .open = snd_trident_capture_open, .close = snd_trident_capture_close, .ioctl = snd_trident_ioctl, .hw_params = snd_trident_capture_hw_params, .hw_free = snd_trident_hw_free, .prepare = snd_trident_capture_prepare, .trigger = snd_trident_trigger, .pointer = snd_trident_capture_pointer, }; static struct snd_pcm_ops snd_trident_si7018_capture_ops = { .open = snd_trident_capture_open, .close = snd_trident_capture_close, .ioctl = snd_trident_ioctl, .hw_params = snd_trident_si7018_capture_hw_params, .hw_free = snd_trident_si7018_capture_hw_free, .prepare = snd_trident_si7018_capture_prepare, .trigger = snd_trident_trigger, .pointer = snd_trident_playback_pointer, }; static struct snd_pcm_ops snd_trident_foldback_ops = { .open = snd_trident_foldback_open, .close = snd_trident_foldback_close, .ioctl = snd_trident_ioctl, .hw_params = snd_trident_hw_params, .hw_free = snd_trident_hw_free, .prepare = snd_trident_foldback_prepare, .trigger = snd_trident_trigger, .pointer = snd_trident_playback_pointer, }; static struct snd_pcm_ops snd_trident_nx_foldback_ops = { .open = snd_trident_foldback_open, .close = snd_trident_foldback_close, .ioctl = snd_trident_ioctl, .hw_params = snd_trident_hw_params, .hw_free = snd_trident_hw_free, .prepare = snd_trident_foldback_prepare, .trigger = snd_trident_trigger, .pointer = snd_trident_playback_pointer, .page = snd_pcm_sgbuf_ops_page, }; static struct snd_pcm_ops snd_trident_spdif_ops = { .open = snd_trident_spdif_open, .close = snd_trident_spdif_close, .ioctl = snd_trident_ioctl, .hw_params = snd_trident_spdif_hw_params, .hw_free = snd_trident_hw_free, .prepare = snd_trident_spdif_prepare, .trigger = snd_trident_trigger, .pointer = snd_trident_spdif_pointer, }; static struct snd_pcm_ops snd_trident_spdif_7018_ops = { .open = snd_trident_spdif_open, .close = snd_trident_spdif_close, .ioctl = snd_trident_ioctl, .hw_params = snd_trident_spdif_hw_params, .hw_free = snd_trident_hw_free, .prepare = snd_trident_spdif_prepare, .trigger = snd_trident_trigger, .pointer = snd_trident_playback_pointer, }; /*--------------------------------------------------------------------------- snd_trident_pcm Description: This routine registers the 4DWave device for PCM support. Parameters: trident - pointer to target device class for 4DWave. Returns: None ---------------------------------------------------------------------------*/ int __devinit snd_trident_pcm(struct snd_trident * trident, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(trident->card, "trident_dx_nx", device, trident->ChanPCM, 1, &pcm)) < 0) return err; pcm->private_data = trident; if (trident->tlb.entries) { snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_trident_nx_playback_ops); } else { snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_trident_playback_ops); } snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, trident->device != TRIDENT_DEVICE_ID_SI7018 ? &snd_trident_capture_ops : &snd_trident_si7018_capture_ops); pcm->info_flags = 0; pcm->dev_subclass = SNDRV_PCM_SUBCLASS_GENERIC_MIX; strcpy(pcm->name, "Trident 4DWave"); trident->pcm = pcm; if (trident->tlb.entries) { struct snd_pcm_substream *substream; for (substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; substream; substream = substream->next) snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV_SG, snd_dma_pci_data(trident->pci), 64*1024, 128*1024); snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(trident->pci), 64*1024, 128*1024); } else { snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(trident->pci), 64*1024, 128*1024); } if (rpcm) *rpcm = pcm; return 0; } /*--------------------------------------------------------------------------- snd_trident_foldback_pcm Description: This routine registers the 4DWave device for foldback PCM support. Parameters: trident - pointer to target device class for 4DWave. Returns: None ---------------------------------------------------------------------------*/ int __devinit snd_trident_foldback_pcm(struct snd_trident * trident, int device, struct snd_pcm ** rpcm) { struct snd_pcm *foldback; int err; int num_chan = 3; struct snd_pcm_substream *substream; if (rpcm) *rpcm = NULL; if (trident->device == TRIDENT_DEVICE_ID_NX) num_chan = 4; if ((err = snd_pcm_new(trident->card, "trident_dx_nx", device, 0, num_chan, &foldback)) < 0) return err; foldback->private_data = trident; if (trident->tlb.entries) snd_pcm_set_ops(foldback, SNDRV_PCM_STREAM_CAPTURE, &snd_trident_nx_foldback_ops); else snd_pcm_set_ops(foldback, SNDRV_PCM_STREAM_CAPTURE, &snd_trident_foldback_ops); foldback->info_flags = 0; strcpy(foldback->name, "Trident 4DWave"); substream = foldback->streams[SNDRV_PCM_STREAM_CAPTURE].substream; strcpy(substream->name, "Front Mixer"); substream = substream->next; strcpy(substream->name, "Reverb Mixer"); substream = substream->next; strcpy(substream->name, "Chorus Mixer"); if (num_chan == 4) { substream = substream->next; strcpy(substream->name, "Second AC'97 ADC"); } trident->foldback = foldback; if (trident->tlb.entries) snd_pcm_lib_preallocate_pages_for_all(foldback, SNDRV_DMA_TYPE_DEV_SG, snd_dma_pci_data(trident->pci), 0, 128*1024); else snd_pcm_lib_preallocate_pages_for_all(foldback, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(trident->pci), 64*1024, 128*1024); if (rpcm) *rpcm = foldback; return 0; } /*--------------------------------------------------------------------------- snd_trident_spdif Description: This routine registers the 4DWave-NX device for SPDIF support. Parameters: trident - pointer to target device class for 4DWave-NX. Returns: None ---------------------------------------------------------------------------*/ int __devinit snd_trident_spdif_pcm(struct snd_trident * trident, int device, struct snd_pcm ** rpcm) { struct snd_pcm *spdif; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(trident->card, "trident_dx_nx IEC958", device, 1, 0, &spdif)) < 0) return err; spdif->private_data = trident; if (trident->device != TRIDENT_DEVICE_ID_SI7018) { snd_pcm_set_ops(spdif, SNDRV_PCM_STREAM_PLAYBACK, &snd_trident_spdif_ops); } else { snd_pcm_set_ops(spdif, SNDRV_PCM_STREAM_PLAYBACK, &snd_trident_spdif_7018_ops); } spdif->info_flags = 0; strcpy(spdif->name, "Trident 4DWave IEC958"); trident->spdif = spdif; snd_pcm_lib_preallocate_pages_for_all(spdif, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(trident->pci), 64*1024, 128*1024); if (rpcm) *rpcm = spdif; return 0; } /* * Mixer part */ /*--------------------------------------------------------------------------- snd_trident_spdif_control Description: enable/disable S/PDIF out from ac97 mixer ---------------------------------------------------------------------------*/ #define snd_trident_spdif_control_info snd_ctl_boolean_mono_info static int snd_trident_spdif_control_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_trident *trident = snd_kcontrol_chip(kcontrol); unsigned char val; spin_lock_irq(&trident->reg_lock); val = trident->spdif_ctrl; ucontrol->value.integer.value[0] = val == kcontrol->private_value; spin_unlock_irq(&trident->reg_lock); return 0; } static int snd_trident_spdif_control_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_trident *trident = snd_kcontrol_chip(kcontrol); unsigned char val; int change; val = ucontrol->value.integer.value[0] ? (unsigned char) kcontrol->private_value : 0x00; spin_lock_irq(&trident->reg_lock); /* S/PDIF C Channel bits 0-31 : 48khz, SCMS disabled */ change = trident->spdif_ctrl != val; trident->spdif_ctrl = val; if (trident->device != TRIDENT_DEVICE_ID_SI7018) { if ((inb(TRID_REG(trident, NX_SPCTRL_SPCSO + 3)) & 0x10) == 0) { outl(trident->spdif_bits, TRID_REG(trident, NX_SPCSTATUS)); outb(trident->spdif_ctrl, TRID_REG(trident, NX_SPCTRL_SPCSO + 3)); } } else { if (trident->spdif == NULL) { unsigned int temp; outl(trident->spdif_bits, TRID_REG(trident, SI_SPDIF_CS)); temp = inl(TRID_REG(trident, SI_SERIAL_INTF_CTRL)) & ~SPDIF_EN; if (val) temp |= SPDIF_EN; outl(temp, TRID_REG(trident, SI_SERIAL_INTF_CTRL)); } } spin_unlock_irq(&trident->reg_lock); return change; } static struct snd_kcontrol_new snd_trident_spdif_control __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,SWITCH), .info = snd_trident_spdif_control_info, .get = snd_trident_spdif_control_get, .put = snd_trident_spdif_control_put, .private_value = 0x28, }; /*--------------------------------------------------------------------------- snd_trident_spdif_default Description: put/get the S/PDIF default settings ---------------------------------------------------------------------------*/ static int snd_trident_spdif_default_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_trident_spdif_default_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_trident *trident = snd_kcontrol_chip(kcontrol); spin_lock_irq(&trident->reg_lock); ucontrol->value.iec958.status[0] = (trident->spdif_bits >> 0) & 0xff; ucontrol->value.iec958.status[1] = (trident->spdif_bits >> 8) & 0xff; ucontrol->value.iec958.status[2] = (trident->spdif_bits >> 16) & 0xff; ucontrol->value.iec958.status[3] = (trident->spdif_bits >> 24) & 0xff; spin_unlock_irq(&trident->reg_lock); return 0; } static int snd_trident_spdif_default_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_trident *trident = snd_kcontrol_chip(kcontrol); unsigned int val; int change; val = (ucontrol->value.iec958.status[0] << 0) | (ucontrol->value.iec958.status[1] << 8) | (ucontrol->value.iec958.status[2] << 16) | (ucontrol->value.iec958.status[3] << 24); spin_lock_irq(&trident->reg_lock); change = trident->spdif_bits != val; trident->spdif_bits = val; if (trident->device != TRIDENT_DEVICE_ID_SI7018) { if ((inb(TRID_REG(trident, NX_SPCTRL_SPCSO + 3)) & 0x10) == 0) outl(trident->spdif_bits, TRID_REG(trident, NX_SPCSTATUS)); } else { if (trident->spdif == NULL) outl(trident->spdif_bits, TRID_REG(trident, SI_SPDIF_CS)); } spin_unlock_irq(&trident->reg_lock); return change; } static struct snd_kcontrol_new snd_trident_spdif_default __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT), .info = snd_trident_spdif_default_info, .get = snd_trident_spdif_default_get, .put = snd_trident_spdif_default_put }; /*--------------------------------------------------------------------------- snd_trident_spdif_mask Description: put/get the S/PDIF mask ---------------------------------------------------------------------------*/ static int snd_trident_spdif_mask_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_trident_spdif_mask_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.iec958.status[0] = 0xff; ucontrol->value.iec958.status[1] = 0xff; ucontrol->value.iec958.status[2] = 0xff; ucontrol->value.iec958.status[3] = 0xff; return 0; } static struct snd_kcontrol_new snd_trident_spdif_mask __devinitdata = { .access = SNDRV_CTL_ELEM_ACCESS_READ, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,MASK), .info = snd_trident_spdif_mask_info, .get = snd_trident_spdif_mask_get, }; /*--------------------------------------------------------------------------- snd_trident_spdif_stream Description: put/get the S/PDIF stream settings ---------------------------------------------------------------------------*/ static int snd_trident_spdif_stream_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_trident_spdif_stream_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_trident *trident = snd_kcontrol_chip(kcontrol); spin_lock_irq(&trident->reg_lock); ucontrol->value.iec958.status[0] = (trident->spdif_pcm_bits >> 0) & 0xff; ucontrol->value.iec958.status[1] = (trident->spdif_pcm_bits >> 8) & 0xff; ucontrol->value.iec958.status[2] = (trident->spdif_pcm_bits >> 16) & 0xff; ucontrol->value.iec958.status[3] = (trident->spdif_pcm_bits >> 24) & 0xff; spin_unlock_irq(&trident->reg_lock); return 0; } static int snd_trident_spdif_stream_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_trident *trident = snd_kcontrol_chip(kcontrol); unsigned int val; int change; val = (ucontrol->value.iec958.status[0] << 0) | (ucontrol->value.iec958.status[1] << 8) | (ucontrol->value.iec958.status[2] << 16) | (ucontrol->value.iec958.status[3] << 24); spin_lock_irq(&trident->reg_lock); change = trident->spdif_pcm_bits != val; trident->spdif_pcm_bits = val; if (trident->spdif != NULL) { if (trident->device != TRIDENT_DEVICE_ID_SI7018) { outl(trident->spdif_pcm_bits, TRID_REG(trident, NX_SPCSTATUS)); } else { outl(trident->spdif_bits, TRID_REG(trident, SI_SPDIF_CS)); } } spin_unlock_irq(&trident->reg_lock); return change; } static struct snd_kcontrol_new snd_trident_spdif_stream __devinitdata = { .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PCM_STREAM), .info = snd_trident_spdif_stream_info, .get = snd_trident_spdif_stream_get, .put = snd_trident_spdif_stream_put }; /*--------------------------------------------------------------------------- snd_trident_ac97_control Description: enable/disable rear path for ac97 ---------------------------------------------------------------------------*/ #define snd_trident_ac97_control_info snd_ctl_boolean_mono_info static int snd_trident_ac97_control_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_trident *trident = snd_kcontrol_chip(kcontrol); unsigned char val; spin_lock_irq(&trident->reg_lock); val = trident->ac97_ctrl = inl(TRID_REG(trident, NX_ACR0_AC97_COM_STAT)); ucontrol->value.integer.value[0] = (val & (1 << kcontrol->private_value)) ? 1 : 0; spin_unlock_irq(&trident->reg_lock); return 0; } static int snd_trident_ac97_control_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_trident *trident = snd_kcontrol_chip(kcontrol); unsigned char val; int change = 0; spin_lock_irq(&trident->reg_lock); val = trident->ac97_ctrl = inl(TRID_REG(trident, NX_ACR0_AC97_COM_STAT)); val &= ~(1 << kcontrol->private_value); if (ucontrol->value.integer.value[0]) val |= 1 << kcontrol->private_value; change = val != trident->ac97_ctrl; trident->ac97_ctrl = val; outl(trident->ac97_ctrl = val, TRID_REG(trident, NX_ACR0_AC97_COM_STAT)); spin_unlock_irq(&trident->reg_lock); return change; } static struct snd_kcontrol_new snd_trident_ac97_rear_control __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Rear Path", .info = snd_trident_ac97_control_info, .get = snd_trident_ac97_control_get, .put = snd_trident_ac97_control_put, .private_value = 4, }; /*--------------------------------------------------------------------------- snd_trident_vol_control Description: wave & music volume control ---------------------------------------------------------------------------*/ static int snd_trident_vol_control_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 255; return 0; } static int snd_trident_vol_control_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_trident *trident = snd_kcontrol_chip(kcontrol); unsigned int val; val = trident->musicvol_wavevol; ucontrol->value.integer.value[0] = 255 - ((val >> kcontrol->private_value) & 0xff); ucontrol->value.integer.value[1] = 255 - ((val >> (kcontrol->private_value + 8)) & 0xff); return 0; } static const DECLARE_TLV_DB_SCALE(db_scale_gvol, -6375, 25, 0); static int snd_trident_vol_control_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_trident *trident = snd_kcontrol_chip(kcontrol); unsigned int val; int change = 0; spin_lock_irq(&trident->reg_lock); val = trident->musicvol_wavevol; val &= ~(0xffff << kcontrol->private_value); val |= ((255 - (ucontrol->value.integer.value[0] & 0xff)) | ((255 - (ucontrol->value.integer.value[1] & 0xff)) << 8)) << kcontrol->private_value; change = val != trident->musicvol_wavevol; outl(trident->musicvol_wavevol = val, TRID_REG(trident, T4D_MUSICVOL_WAVEVOL)); spin_unlock_irq(&trident->reg_lock); return change; } static struct snd_kcontrol_new snd_trident_vol_music_control __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Music Playback Volume", .info = snd_trident_vol_control_info, .get = snd_trident_vol_control_get, .put = snd_trident_vol_control_put, .private_value = 16, .tlv = { .p = db_scale_gvol }, }; static struct snd_kcontrol_new snd_trident_vol_wave_control __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Wave Playback Volume", .info = snd_trident_vol_control_info, .get = snd_trident_vol_control_get, .put = snd_trident_vol_control_put, .private_value = 0, .tlv = { .p = db_scale_gvol }, }; /*--------------------------------------------------------------------------- snd_trident_pcm_vol_control Description: PCM front volume control ---------------------------------------------------------------------------*/ static int snd_trident_pcm_vol_control_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct snd_trident *trident = snd_kcontrol_chip(kcontrol); uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 255; if (trident->device == TRIDENT_DEVICE_ID_SI7018) uinfo->value.integer.max = 1023; return 0; } static int snd_trident_pcm_vol_control_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_trident *trident = snd_kcontrol_chip(kcontrol); struct snd_trident_pcm_mixer *mix = &trident->pcm_mixer[snd_ctl_get_ioffnum(kcontrol, &ucontrol->id)]; if (trident->device == TRIDENT_DEVICE_ID_SI7018) { ucontrol->value.integer.value[0] = 1023 - mix->vol; } else { ucontrol->value.integer.value[0] = 255 - (mix->vol>>2); } return 0; } static int snd_trident_pcm_vol_control_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_trident *trident = snd_kcontrol_chip(kcontrol); struct snd_trident_pcm_mixer *mix = &trident->pcm_mixer[snd_ctl_get_ioffnum(kcontrol, &ucontrol->id)]; unsigned int val; int change = 0; if (trident->device == TRIDENT_DEVICE_ID_SI7018) { val = 1023 - (ucontrol->value.integer.value[0] & 1023); } else { val = (255 - (ucontrol->value.integer.value[0] & 255)) << 2; } spin_lock_irq(&trident->reg_lock); change = val != mix->vol; mix->vol = val; if (mix->voice != NULL) snd_trident_write_vol_reg(trident, mix->voice, val); spin_unlock_irq(&trident->reg_lock); return change; } static struct snd_kcontrol_new snd_trident_pcm_vol_control __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "PCM Front Playback Volume", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE, .count = 32, .info = snd_trident_pcm_vol_control_info, .get = snd_trident_pcm_vol_control_get, .put = snd_trident_pcm_vol_control_put, /* FIXME: no tlv yet */ }; /*--------------------------------------------------------------------------- snd_trident_pcm_pan_control Description: PCM front pan control ---------------------------------------------------------------------------*/ static int snd_trident_pcm_pan_control_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 127; return 0; } static int snd_trident_pcm_pan_control_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_trident *trident = snd_kcontrol_chip(kcontrol); struct snd_trident_pcm_mixer *mix = &trident->pcm_mixer[snd_ctl_get_ioffnum(kcontrol, &ucontrol->id)]; ucontrol->value.integer.value[0] = mix->pan; if (ucontrol->value.integer.value[0] & 0x40) { ucontrol->value.integer.value[0] = (0x3f - (ucontrol->value.integer.value[0] & 0x3f)); } else { ucontrol->value.integer.value[0] |= 0x40; } return 0; } static int snd_trident_pcm_pan_control_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_trident *trident = snd_kcontrol_chip(kcontrol); struct snd_trident_pcm_mixer *mix = &trident->pcm_mixer[snd_ctl_get_ioffnum(kcontrol, &ucontrol->id)]; unsigned char val; int change = 0; if (ucontrol->value.integer.value[0] & 0x40) val = ucontrol->value.integer.value[0] & 0x3f; else val = (0x3f - (ucontrol->value.integer.value[0] & 0x3f)) | 0x40; spin_lock_irq(&trident->reg_lock); change = val != mix->pan; mix->pan = val; if (mix->voice != NULL) snd_trident_write_pan_reg(trident, mix->voice, val); spin_unlock_irq(&trident->reg_lock); return change; } static struct snd_kcontrol_new snd_trident_pcm_pan_control __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "PCM Pan Playback Control", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE, .count = 32, .info = snd_trident_pcm_pan_control_info, .get = snd_trident_pcm_pan_control_get, .put = snd_trident_pcm_pan_control_put, }; /*--------------------------------------------------------------------------- snd_trident_pcm_rvol_control Description: PCM reverb volume control ---------------------------------------------------------------------------*/ static int snd_trident_pcm_rvol_control_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 127; return 0; } static int snd_trident_pcm_rvol_control_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_trident *trident = snd_kcontrol_chip(kcontrol); struct snd_trident_pcm_mixer *mix = &trident->pcm_mixer[snd_ctl_get_ioffnum(kcontrol, &ucontrol->id)]; ucontrol->value.integer.value[0] = 127 - mix->rvol; return 0; } static int snd_trident_pcm_rvol_control_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_trident *trident = snd_kcontrol_chip(kcontrol); struct snd_trident_pcm_mixer *mix = &trident->pcm_mixer[snd_ctl_get_ioffnum(kcontrol, &ucontrol->id)]; unsigned short val; int change = 0; val = 0x7f - (ucontrol->value.integer.value[0] & 0x7f); spin_lock_irq(&trident->reg_lock); change = val != mix->rvol; mix->rvol = val; if (mix->voice != NULL) snd_trident_write_rvol_reg(trident, mix->voice, val); spin_unlock_irq(&trident->reg_lock); return change; } static const DECLARE_TLV_DB_SCALE(db_scale_crvol, -3175, 25, 1); static struct snd_kcontrol_new snd_trident_pcm_rvol_control __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "PCM Reverb Playback Volume", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE, .count = 32, .info = snd_trident_pcm_rvol_control_info, .get = snd_trident_pcm_rvol_control_get, .put = snd_trident_pcm_rvol_control_put, .tlv = { .p = db_scale_crvol }, }; /*--------------------------------------------------------------------------- snd_trident_pcm_cvol_control Description: PCM chorus volume control ---------------------------------------------------------------------------*/ static int snd_trident_pcm_cvol_control_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 127; return 0; } static int snd_trident_pcm_cvol_control_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_trident *trident = snd_kcontrol_chip(kcontrol); struct snd_trident_pcm_mixer *mix = &trident->pcm_mixer[snd_ctl_get_ioffnum(kcontrol, &ucontrol->id)]; ucontrol->value.integer.value[0] = 127 - mix->cvol; return 0; } static int snd_trident_pcm_cvol_control_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_trident *trident = snd_kcontrol_chip(kcontrol); struct snd_trident_pcm_mixer *mix = &trident->pcm_mixer[snd_ctl_get_ioffnum(kcontrol, &ucontrol->id)]; unsigned short val; int change = 0; val = 0x7f - (ucontrol->value.integer.value[0] & 0x7f); spin_lock_irq(&trident->reg_lock); change = val != mix->cvol; mix->cvol = val; if (mix->voice != NULL) snd_trident_write_cvol_reg(trident, mix->voice, val); spin_unlock_irq(&trident->reg_lock); return change; } static struct snd_kcontrol_new snd_trident_pcm_cvol_control __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "PCM Chorus Playback Volume", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE, .count = 32, .info = snd_trident_pcm_cvol_control_info, .get = snd_trident_pcm_cvol_control_get, .put = snd_trident_pcm_cvol_control_put, .tlv = { .p = db_scale_crvol }, }; static void snd_trident_notify_pcm_change1(struct snd_card *card, struct snd_kcontrol *kctl, int num, int activate) { struct snd_ctl_elem_id id; if (! kctl) return; if (activate) kctl->vd[num].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; else kctl->vd[num].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO, snd_ctl_build_ioff(&id, kctl, num)); } static void snd_trident_notify_pcm_change(struct snd_trident *trident, struct snd_trident_pcm_mixer *tmix, int num, int activate) { snd_trident_notify_pcm_change1(trident->card, trident->ctl_vol, num, activate); snd_trident_notify_pcm_change1(trident->card, trident->ctl_pan, num, activate); snd_trident_notify_pcm_change1(trident->card, trident->ctl_rvol, num, activate); snd_trident_notify_pcm_change1(trident->card, trident->ctl_cvol, num, activate); } static int snd_trident_pcm_mixer_build(struct snd_trident *trident, struct snd_trident_voice *voice, struct snd_pcm_substream *substream) { struct snd_trident_pcm_mixer *tmix; if (snd_BUG_ON(!trident || !voice || !substream)) return -EINVAL; tmix = &trident->pcm_mixer[substream->number]; tmix->voice = voice; tmix->vol = T4D_DEFAULT_PCM_VOL; tmix->pan = T4D_DEFAULT_PCM_PAN; tmix->rvol = T4D_DEFAULT_PCM_RVOL; tmix->cvol = T4D_DEFAULT_PCM_CVOL; snd_trident_notify_pcm_change(trident, tmix, substream->number, 1); return 0; } static int snd_trident_pcm_mixer_free(struct snd_trident *trident, struct snd_trident_voice *voice, struct snd_pcm_substream *substream) { struct snd_trident_pcm_mixer *tmix; if (snd_BUG_ON(!trident || !substream)) return -EINVAL; tmix = &trident->pcm_mixer[substream->number]; tmix->voice = NULL; snd_trident_notify_pcm_change(trident, tmix, substream->number, 0); return 0; } /*--------------------------------------------------------------------------- snd_trident_mixer Description: This routine registers the 4DWave device for mixer support. Parameters: trident - pointer to target device class for 4DWave. Returns: None ---------------------------------------------------------------------------*/ static int __devinit snd_trident_mixer(struct snd_trident * trident, int pcm_spdif_device) { struct snd_ac97_template _ac97; struct snd_card *card = trident->card; struct snd_kcontrol *kctl; struct snd_ctl_elem_value *uctl; int idx, err, retries = 2; static struct snd_ac97_bus_ops ops = { .write = snd_trident_codec_write, .read = snd_trident_codec_read, }; uctl = kzalloc(sizeof(*uctl), GFP_KERNEL); if (!uctl) return -ENOMEM; if ((err = snd_ac97_bus(trident->card, 0, &ops, NULL, &trident->ac97_bus)) < 0) goto __out; memset(&_ac97, 0, sizeof(_ac97)); _ac97.private_data = trident; trident->ac97_detect = 1; __again: if ((err = snd_ac97_mixer(trident->ac97_bus, &_ac97, &trident->ac97)) < 0) { if (trident->device == TRIDENT_DEVICE_ID_SI7018) { if ((err = snd_trident_sis_reset(trident)) < 0) goto __out; if (retries-- > 0) goto __again; err = -EIO; } goto __out; } /* secondary codec? */ if (trident->device == TRIDENT_DEVICE_ID_SI7018 && (inl(TRID_REG(trident, SI_SERIAL_INTF_CTRL)) & SI_AC97_PRIMARY_READY) != 0) { _ac97.num = 1; err = snd_ac97_mixer(trident->ac97_bus, &_ac97, &trident->ac97_sec); if (err < 0) snd_printk(KERN_ERR "SI7018: the secondary codec - invalid access\n"); #if 0 // only for my testing purpose --jk { struct snd_ac97 *mc97; err = snd_ac97_modem(trident->card, &_ac97, &mc97); if (err < 0) snd_printk(KERN_ERR "snd_ac97_modem returned error %i\n", err); } #endif } trident->ac97_detect = 0; if (trident->device != TRIDENT_DEVICE_ID_SI7018) { if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_trident_vol_wave_control, trident))) < 0) goto __out; kctl->put(kctl, uctl); if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_trident_vol_music_control, trident))) < 0) goto __out; kctl->put(kctl, uctl); outl(trident->musicvol_wavevol = 0x00000000, TRID_REG(trident, T4D_MUSICVOL_WAVEVOL)); } else { outl(trident->musicvol_wavevol = 0xffff0000, TRID_REG(trident, T4D_MUSICVOL_WAVEVOL)); } for (idx = 0; idx < 32; idx++) { struct snd_trident_pcm_mixer *tmix; tmix = &trident->pcm_mixer[idx]; tmix->voice = NULL; } if ((trident->ctl_vol = snd_ctl_new1(&snd_trident_pcm_vol_control, trident)) == NULL) goto __nomem; if ((err = snd_ctl_add(card, trident->ctl_vol))) goto __out; if ((trident->ctl_pan = snd_ctl_new1(&snd_trident_pcm_pan_control, trident)) == NULL) goto __nomem; if ((err = snd_ctl_add(card, trident->ctl_pan))) goto __out; if ((trident->ctl_rvol = snd_ctl_new1(&snd_trident_pcm_rvol_control, trident)) == NULL) goto __nomem; if ((err = snd_ctl_add(card, trident->ctl_rvol))) goto __out; if ((trident->ctl_cvol = snd_ctl_new1(&snd_trident_pcm_cvol_control, trident)) == NULL) goto __nomem; if ((err = snd_ctl_add(card, trident->ctl_cvol))) goto __out; if (trident->device == TRIDENT_DEVICE_ID_NX) { if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_trident_ac97_rear_control, trident))) < 0) goto __out; kctl->put(kctl, uctl); } if (trident->device == TRIDENT_DEVICE_ID_NX || trident->device == TRIDENT_DEVICE_ID_SI7018) { kctl = snd_ctl_new1(&snd_trident_spdif_control, trident); if (kctl == NULL) { err = -ENOMEM; goto __out; } if (trident->ac97->ext_id & AC97_EI_SPDIF) kctl->id.index++; if (trident->ac97_sec && (trident->ac97_sec->ext_id & AC97_EI_SPDIF)) kctl->id.index++; idx = kctl->id.index; if ((err = snd_ctl_add(card, kctl)) < 0) goto __out; kctl->put(kctl, uctl); kctl = snd_ctl_new1(&snd_trident_spdif_default, trident); if (kctl == NULL) { err = -ENOMEM; goto __out; } kctl->id.index = idx; kctl->id.device = pcm_spdif_device; if ((err = snd_ctl_add(card, kctl)) < 0) goto __out; kctl = snd_ctl_new1(&snd_trident_spdif_mask, trident); if (kctl == NULL) { err = -ENOMEM; goto __out; } kctl->id.index = idx; kctl->id.device = pcm_spdif_device; if ((err = snd_ctl_add(card, kctl)) < 0) goto __out; kctl = snd_ctl_new1(&snd_trident_spdif_stream, trident); if (kctl == NULL) { err = -ENOMEM; goto __out; } kctl->id.index = idx; kctl->id.device = pcm_spdif_device; if ((err = snd_ctl_add(card, kctl)) < 0) goto __out; trident->spdif_pcm_ctl = kctl; } err = 0; goto __out; __nomem: err = -ENOMEM; __out: kfree(uctl); return err; } /* * gameport interface */ #if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE)) static unsigned char snd_trident_gameport_read(struct gameport *gameport) { struct snd_trident *chip = gameport_get_port_data(gameport); if (snd_BUG_ON(!chip)) return 0; return inb(TRID_REG(chip, GAMEPORT_LEGACY)); } static void snd_trident_gameport_trigger(struct gameport *gameport) { struct snd_trident *chip = gameport_get_port_data(gameport); if (snd_BUG_ON(!chip)) return; outb(0xff, TRID_REG(chip, GAMEPORT_LEGACY)); } static int snd_trident_gameport_cooked_read(struct gameport *gameport, int *axes, int *buttons) { struct snd_trident *chip = gameport_get_port_data(gameport); int i; if (snd_BUG_ON(!chip)) return 0; *buttons = (~inb(TRID_REG(chip, GAMEPORT_LEGACY)) >> 4) & 0xf; for (i = 0; i < 4; i++) { axes[i] = inw(TRID_REG(chip, GAMEPORT_AXES + i * 2)); if (axes[i] == 0xffff) axes[i] = -1; } return 0; } static int snd_trident_gameport_open(struct gameport *gameport, int mode) { struct snd_trident *chip = gameport_get_port_data(gameport); if (snd_BUG_ON(!chip)) return 0; switch (mode) { case GAMEPORT_MODE_COOKED: outb(GAMEPORT_MODE_ADC, TRID_REG(chip, GAMEPORT_GCR)); msleep(20); return 0; case GAMEPORT_MODE_RAW: outb(0, TRID_REG(chip, GAMEPORT_GCR)); return 0; default: return -1; } } int __devinit snd_trident_create_gameport(struct snd_trident *chip) { struct gameport *gp; chip->gameport = gp = gameport_allocate_port(); if (!gp) { printk(KERN_ERR "trident: cannot allocate memory for gameport\n"); return -ENOMEM; } gameport_set_name(gp, "Trident 4DWave"); gameport_set_phys(gp, "pci%s/gameport0", pci_name(chip->pci)); gameport_set_dev_parent(gp, &chip->pci->dev); gameport_set_port_data(gp, chip); gp->fuzz = 64; gp->read = snd_trident_gameport_read; gp->trigger = snd_trident_gameport_trigger; gp->cooked_read = snd_trident_gameport_cooked_read; gp->open = snd_trident_gameport_open; gameport_register_port(gp); return 0; } static inline void snd_trident_free_gameport(struct snd_trident *chip) { if (chip->gameport) { gameport_unregister_port(chip->gameport); chip->gameport = NULL; } } #else int __devinit snd_trident_create_gameport(struct snd_trident *chip) { return -ENOSYS; } static inline void snd_trident_free_gameport(struct snd_trident *chip) { } #endif /* CONFIG_GAMEPORT */ /* * delay for 1 tick */ static inline void do_delay(struct snd_trident *chip) { schedule_timeout_uninterruptible(1); } /* * SiS reset routine */ static int snd_trident_sis_reset(struct snd_trident *trident) { unsigned long end_time; unsigned int i; int r; r = trident->in_suspend ? 0 : 2; /* count of retries */ __si7018_retry: pci_write_config_byte(trident->pci, 0x46, 0x04); /* SOFTWARE RESET */ udelay(100); pci_write_config_byte(trident->pci, 0x46, 0x00); udelay(100); /* disable AC97 GPIO interrupt */ outb(0x00, TRID_REG(trident, SI_AC97_GPIO)); /* initialize serial interface, force cold reset */ i = PCMOUT|SURROUT|CENTEROUT|LFEOUT|SECONDARY_ID|COLD_RESET; outl(i, TRID_REG(trident, SI_SERIAL_INTF_CTRL)); udelay(1000); /* remove cold reset */ i &= ~COLD_RESET; outl(i, TRID_REG(trident, SI_SERIAL_INTF_CTRL)); udelay(2000); /* wait, until the codec is ready */ end_time = (jiffies + (HZ * 3) / 4) + 1; do { if ((inl(TRID_REG(trident, SI_SERIAL_INTF_CTRL)) & SI_AC97_PRIMARY_READY) != 0) goto __si7018_ok; do_delay(trident); } while (time_after_eq(end_time, jiffies)); snd_printk(KERN_ERR "AC'97 codec ready error [0x%x]\n", inl(TRID_REG(trident, SI_SERIAL_INTF_CTRL))); if (r-- > 0) { end_time = jiffies + HZ; do { do_delay(trident); } while (time_after_eq(end_time, jiffies)); goto __si7018_retry; } __si7018_ok: /* wait for the second codec */ do { if ((inl(TRID_REG(trident, SI_SERIAL_INTF_CTRL)) & SI_AC97_SECONDARY_READY) != 0) break; do_delay(trident); } while (time_after_eq(end_time, jiffies)); /* enable 64 channel mode */ outl(BANK_B_EN, TRID_REG(trident, T4D_LFO_GC_CIR)); return 0; } /* * /proc interface */ static void snd_trident_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_trident *trident = entry->private_data; char *s; switch (trident->device) { case TRIDENT_DEVICE_ID_SI7018: s = "SiS 7018 Audio"; break; case TRIDENT_DEVICE_ID_DX: s = "Trident 4DWave PCI DX"; break; case TRIDENT_DEVICE_ID_NX: s = "Trident 4DWave PCI NX"; break; default: s = "???"; } snd_iprintf(buffer, "%s\n\n", s); snd_iprintf(buffer, "Spurious IRQs : %d\n", trident->spurious_irq_count); snd_iprintf(buffer, "Spurious IRQ dlta: %d\n", trident->spurious_irq_max_delta); if (trident->device == TRIDENT_DEVICE_ID_NX || trident->device == TRIDENT_DEVICE_ID_SI7018) snd_iprintf(buffer, "IEC958 Mixer Out : %s\n", trident->spdif_ctrl == 0x28 ? "on" : "off"); if (trident->device == TRIDENT_DEVICE_ID_NX) { snd_iprintf(buffer, "Rear Speakers : %s\n", trident->ac97_ctrl & 0x00000010 ? "on" : "off"); if (trident->tlb.entries) { snd_iprintf(buffer,"\nVirtual Memory\n"); snd_iprintf(buffer, "Memory Maximum : %d\n", trident->tlb.memhdr->size); snd_iprintf(buffer, "Memory Used : %d\n", trident->tlb.memhdr->used); snd_iprintf(buffer, "Memory Free : %d\n", snd_util_mem_avail(trident->tlb.memhdr)); } } } static void __devinit snd_trident_proc_init(struct snd_trident * trident) { struct snd_info_entry *entry; const char *s = "trident"; if (trident->device == TRIDENT_DEVICE_ID_SI7018) s = "sis7018"; if (! snd_card_proc_new(trident->card, s, &entry)) snd_info_set_text_ops(entry, trident, snd_trident_proc_read); } static int snd_trident_dev_free(struct snd_device *device) { struct snd_trident *trident = device->device_data; return snd_trident_free(trident); } /*--------------------------------------------------------------------------- snd_trident_tlb_alloc Description: Allocate and set up the TLB page table on 4D NX. Each entry has 4 bytes (physical PCI address). Parameters: trident - pointer to target device class for 4DWave. Returns: 0 or negative error code ---------------------------------------------------------------------------*/ static int __devinit snd_trident_tlb_alloc(struct snd_trident *trident) { int i; /* TLB array must be aligned to 16kB !!! so we allocate 32kB region and correct offset when necessary */ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(trident->pci), 2 * SNDRV_TRIDENT_MAX_PAGES * 4, &trident->tlb.buffer) < 0) { snd_printk(KERN_ERR "trident: unable to allocate TLB buffer\n"); return -ENOMEM; } trident->tlb.entries = (unsigned int*)ALIGN((unsigned long)trident->tlb.buffer.area, SNDRV_TRIDENT_MAX_PAGES * 4); trident->tlb.entries_dmaaddr = ALIGN(trident->tlb.buffer.addr, SNDRV_TRIDENT_MAX_PAGES * 4); /* allocate shadow TLB page table (virtual addresses) */ trident->tlb.shadow_entries = vmalloc(SNDRV_TRIDENT_MAX_PAGES*sizeof(unsigned long)); if (trident->tlb.shadow_entries == NULL) { snd_printk(KERN_ERR "trident: unable to allocate shadow TLB entries\n"); return -ENOMEM; } /* allocate and setup silent page and initialise TLB entries */ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(trident->pci), SNDRV_TRIDENT_PAGE_SIZE, &trident->tlb.silent_page) < 0) { snd_printk(KERN_ERR "trident: unable to allocate silent page\n"); return -ENOMEM; } memset(trident->tlb.silent_page.area, 0, SNDRV_TRIDENT_PAGE_SIZE); for (i = 0; i < SNDRV_TRIDENT_MAX_PAGES; i++) { trident->tlb.entries[i] = cpu_to_le32(trident->tlb.silent_page.addr & ~(SNDRV_TRIDENT_PAGE_SIZE-1)); trident->tlb.shadow_entries[i] = (unsigned long)trident->tlb.silent_page.area; } /* use emu memory block manager code to manage tlb page allocation */ trident->tlb.memhdr = snd_util_memhdr_new(SNDRV_TRIDENT_PAGE_SIZE * SNDRV_TRIDENT_MAX_PAGES); if (trident->tlb.memhdr == NULL) return -ENOMEM; trident->tlb.memhdr->block_extra_size = sizeof(struct snd_trident_memblk_arg); return 0; } /* * initialize 4D DX chip */ static void snd_trident_stop_all_voices(struct snd_trident *trident) { outl(0xffffffff, TRID_REG(trident, T4D_STOP_A)); outl(0xffffffff, TRID_REG(trident, T4D_STOP_B)); outl(0, TRID_REG(trident, T4D_AINTEN_A)); outl(0, TRID_REG(trident, T4D_AINTEN_B)); } static int snd_trident_4d_dx_init(struct snd_trident *trident) { struct pci_dev *pci = trident->pci; unsigned long end_time; /* reset the legacy configuration and whole audio/wavetable block */ pci_write_config_dword(pci, 0x40, 0); /* DDMA */ pci_write_config_byte(pci, 0x44, 0); /* ports */ pci_write_config_byte(pci, 0x45, 0); /* Legacy DMA */ pci_write_config_byte(pci, 0x46, 4); /* reset */ udelay(100); pci_write_config_byte(pci, 0x46, 0); /* release reset */ udelay(100); /* warm reset of the AC'97 codec */ outl(0x00000001, TRID_REG(trident, DX_ACR2_AC97_COM_STAT)); udelay(100); outl(0x00000000, TRID_REG(trident, DX_ACR2_AC97_COM_STAT)); /* DAC on, disable SB IRQ and try to force ADC valid signal */ trident->ac97_ctrl = 0x0000004a; outl(trident->ac97_ctrl, TRID_REG(trident, DX_ACR2_AC97_COM_STAT)); /* wait, until the codec is ready */ end_time = (jiffies + (HZ * 3) / 4) + 1; do { if ((inl(TRID_REG(trident, DX_ACR2_AC97_COM_STAT)) & 0x0010) != 0) goto __dx_ok; do_delay(trident); } while (time_after_eq(end_time, jiffies)); snd_printk(KERN_ERR "AC'97 codec ready error\n"); return -EIO; __dx_ok: snd_trident_stop_all_voices(trident); return 0; } /* * initialize 4D NX chip */ static int snd_trident_4d_nx_init(struct snd_trident *trident) { struct pci_dev *pci = trident->pci; unsigned long end_time; /* reset the legacy configuration and whole audio/wavetable block */ pci_write_config_dword(pci, 0x40, 0); /* DDMA */ pci_write_config_byte(pci, 0x44, 0); /* ports */ pci_write_config_byte(pci, 0x45, 0); /* Legacy DMA */ pci_write_config_byte(pci, 0x46, 1); /* reset */ udelay(100); pci_write_config_byte(pci, 0x46, 0); /* release reset */ udelay(100); /* warm reset of the AC'97 codec */ outl(0x00000001, TRID_REG(trident, NX_ACR0_AC97_COM_STAT)); udelay(100); outl(0x00000000, TRID_REG(trident, NX_ACR0_AC97_COM_STAT)); /* wait, until the codec is ready */ end_time = (jiffies + (HZ * 3) / 4) + 1; do { if ((inl(TRID_REG(trident, NX_ACR0_AC97_COM_STAT)) & 0x0008) != 0) goto __nx_ok; do_delay(trident); } while (time_after_eq(end_time, jiffies)); snd_printk(KERN_ERR "AC'97 codec ready error [0x%x]\n", inl(TRID_REG(trident, NX_ACR0_AC97_COM_STAT))); return -EIO; __nx_ok: /* DAC on */ trident->ac97_ctrl = 0x00000002; outl(trident->ac97_ctrl, TRID_REG(trident, NX_ACR0_AC97_COM_STAT)); /* disable SB IRQ */ outl(NX_SB_IRQ_DISABLE, TRID_REG(trident, T4D_MISCINT)); snd_trident_stop_all_voices(trident); if (trident->tlb.entries != NULL) { unsigned int i; /* enable virtual addressing via TLB */ i = trident->tlb.entries_dmaaddr; i |= 0x00000001; outl(i, TRID_REG(trident, NX_TLBC)); } else { outl(0, TRID_REG(trident, NX_TLBC)); } /* initialize S/PDIF */ outl(trident->spdif_bits, TRID_REG(trident, NX_SPCSTATUS)); outb(trident->spdif_ctrl, TRID_REG(trident, NX_SPCTRL_SPCSO + 3)); return 0; } /* * initialize sis7018 chip */ static int snd_trident_sis_init(struct snd_trident *trident) { int err; if ((err = snd_trident_sis_reset(trident)) < 0) return err; snd_trident_stop_all_voices(trident); /* initialize S/PDIF */ outl(trident->spdif_bits, TRID_REG(trident, SI_SPDIF_CS)); return 0; } /*--------------------------------------------------------------------------- snd_trident_create Description: This routine will create the device specific class for the 4DWave card. It will also perform basic initialization. Parameters: card - which card to create pci - interface to PCI bus resource info dma1ptr - playback dma buffer dma2ptr - capture dma buffer irqptr - interrupt resource info Returns: 4DWave device class private data ---------------------------------------------------------------------------*/ int __devinit snd_trident_create(struct snd_card *card, struct pci_dev *pci, int pcm_streams, int pcm_spdif_device, int max_wavetable_size, struct snd_trident ** rtrident) { struct snd_trident *trident; int i, err; struct snd_trident_voice *voice; struct snd_trident_pcm_mixer *tmix; static struct snd_device_ops ops = { .dev_free = snd_trident_dev_free, }; *rtrident = NULL; /* enable PCI device */ if ((err = pci_enable_device(pci)) < 0) return err; /* check, if we can restrict PCI DMA transfers to 30 bits */ if (pci_set_dma_mask(pci, DMA_BIT_MASK(30)) < 0 || pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(30)) < 0) { snd_printk(KERN_ERR "architecture does not support 30bit PCI busmaster DMA\n"); pci_disable_device(pci); return -ENXIO; } trident = kzalloc(sizeof(*trident), GFP_KERNEL); if (trident == NULL) { pci_disable_device(pci); return -ENOMEM; } trident->device = (pci->vendor << 16) | pci->device; trident->card = card; trident->pci = pci; spin_lock_init(&trident->reg_lock); spin_lock_init(&trident->event_lock); spin_lock_init(&trident->voice_alloc); if (pcm_streams < 1) pcm_streams = 1; if (pcm_streams > 32) pcm_streams = 32; trident->ChanPCM = pcm_streams; if (max_wavetable_size < 0 ) max_wavetable_size = 0; trident->synth.max_size = max_wavetable_size * 1024; trident->irq = -1; trident->midi_port = TRID_REG(trident, T4D_MPU401_BASE); pci_set_master(pci); if ((err = pci_request_regions(pci, "Trident Audio")) < 0) { kfree(trident); pci_disable_device(pci); return err; } trident->port = pci_resource_start(pci, 0); if (request_irq(pci->irq, snd_trident_interrupt, IRQF_SHARED, KBUILD_MODNAME, trident)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_trident_free(trident); return -EBUSY; } trident->irq = pci->irq; /* allocate 16k-aligned TLB for NX cards */ trident->tlb.entries = NULL; trident->tlb.buffer.area = NULL; if (trident->device == TRIDENT_DEVICE_ID_NX) { if ((err = snd_trident_tlb_alloc(trident)) < 0) { snd_trident_free(trident); return err; } } trident->spdif_bits = trident->spdif_pcm_bits = SNDRV_PCM_DEFAULT_CON_SPDIF; /* initialize chip */ switch (trident->device) { case TRIDENT_DEVICE_ID_DX: err = snd_trident_4d_dx_init(trident); break; case TRIDENT_DEVICE_ID_NX: err = snd_trident_4d_nx_init(trident); break; case TRIDENT_DEVICE_ID_SI7018: err = snd_trident_sis_init(trident); break; default: snd_BUG(); break; } if (err < 0) { snd_trident_free(trident); return err; } if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, trident, &ops)) < 0) { snd_trident_free(trident); return err; } if ((err = snd_trident_mixer(trident, pcm_spdif_device)) < 0) return err; /* initialise synth voices */ for (i = 0; i < 64; i++) { voice = &trident->synth.voices[i]; voice->number = i; voice->trident = trident; } /* initialize pcm mixer entries */ for (i = 0; i < 32; i++) { tmix = &trident->pcm_mixer[i]; tmix->vol = T4D_DEFAULT_PCM_VOL; tmix->pan = T4D_DEFAULT_PCM_PAN; tmix->rvol = T4D_DEFAULT_PCM_RVOL; tmix->cvol = T4D_DEFAULT_PCM_CVOL; } snd_trident_enable_eso(trident); snd_trident_proc_init(trident); snd_card_set_dev(card, &pci->dev); *rtrident = trident; return 0; } /*--------------------------------------------------------------------------- snd_trident_free Description: This routine will free the device specific class for the 4DWave card. Parameters: trident - device specific private data for 4DWave card Returns: None. ---------------------------------------------------------------------------*/ static int snd_trident_free(struct snd_trident *trident) { snd_trident_free_gameport(trident); snd_trident_disable_eso(trident); // Disable S/PDIF out if (trident->device == TRIDENT_DEVICE_ID_NX) outb(0x00, TRID_REG(trident, NX_SPCTRL_SPCSO + 3)); else if (trident->device == TRIDENT_DEVICE_ID_SI7018) { outl(0, TRID_REG(trident, SI_SERIAL_INTF_CTRL)); } if (trident->irq >= 0) free_irq(trident->irq, trident); if (trident->tlb.buffer.area) { outl(0, TRID_REG(trident, NX_TLBC)); if (trident->tlb.memhdr) snd_util_memhdr_free(trident->tlb.memhdr); if (trident->tlb.silent_page.area) snd_dma_free_pages(&trident->tlb.silent_page); vfree(trident->tlb.shadow_entries); snd_dma_free_pages(&trident->tlb.buffer); } pci_release_regions(trident->pci); pci_disable_device(trident->pci); kfree(trident); return 0; } /*--------------------------------------------------------------------------- snd_trident_interrupt Description: ISR for Trident 4DWave device Parameters: trident - device specific private data for 4DWave card Problems: It seems that Trident chips generates interrupts more than one time in special cases. The spurious interrupts are detected via sample timer (T4D_STIMER) and computing corresponding delta value. The limits are detected with the method try & fail so it is possible that it won't work on all computers. [jaroslav] Returns: None. ---------------------------------------------------------------------------*/ static irqreturn_t snd_trident_interrupt(int irq, void *dev_id) { struct snd_trident *trident = dev_id; unsigned int audio_int, chn_int, stimer, channel, mask, tmp; int delta; struct snd_trident_voice *voice; audio_int = inl(TRID_REG(trident, T4D_MISCINT)); if ((audio_int & (ADDRESS_IRQ|MPU401_IRQ)) == 0) return IRQ_NONE; if (audio_int & ADDRESS_IRQ) { // get interrupt status for all channels spin_lock(&trident->reg_lock); stimer = inl(TRID_REG(trident, T4D_STIMER)) & 0x00ffffff; chn_int = inl(TRID_REG(trident, T4D_AINT_A)); if (chn_int == 0) goto __skip1; outl(chn_int, TRID_REG(trident, T4D_AINT_A)); /* ack */ __skip1: chn_int = inl(TRID_REG(trident, T4D_AINT_B)); if (chn_int == 0) goto __skip2; for (channel = 63; channel >= 32; channel--) { mask = 1 << (channel&0x1f); if ((chn_int & mask) == 0) continue; voice = &trident->synth.voices[channel]; if (!voice->pcm || voice->substream == NULL) { outl(mask, TRID_REG(trident, T4D_STOP_B)); continue; } delta = (int)stimer - (int)voice->stimer; if (delta < 0) delta = -delta; if ((unsigned int)delta < voice->spurious_threshold) { /* do some statistics here */ trident->spurious_irq_count++; if (trident->spurious_irq_max_delta < (unsigned int)delta) trident->spurious_irq_max_delta = delta; continue; } voice->stimer = stimer; if (voice->isync) { if (!voice->isync3) { tmp = inw(TRID_REG(trident, T4D_SBBL_SBCL)); if (trident->bDMAStart & 0x40) tmp >>= 1; if (tmp > 0) tmp = voice->isync_max - tmp; } else { tmp = inl(TRID_REG(trident, NX_SPCTRL_SPCSO)) & 0x00ffffff; } if (tmp < voice->isync_mark) { if (tmp > 0x10) tmp = voice->isync_ESO - 7; else tmp = voice->isync_ESO + 2; /* update ESO for IRQ voice to preserve sync */ snd_trident_stop_voice(trident, voice->number); snd_trident_write_eso_reg(trident, voice, tmp); snd_trident_start_voice(trident, voice->number); } } else if (voice->isync2) { voice->isync2 = 0; /* write original ESO and update CSO for IRQ voice to preserve sync */ snd_trident_stop_voice(trident, voice->number); snd_trident_write_cso_reg(trident, voice, voice->isync_mark); snd_trident_write_eso_reg(trident, voice, voice->ESO); snd_trident_start_voice(trident, voice->number); } #if 0 if (voice->extra) { /* update CSO for extra voice to preserve sync */ snd_trident_stop_voice(trident, voice->extra->number); snd_trident_write_cso_reg(trident, voice->extra, 0); snd_trident_start_voice(trident, voice->extra->number); } #endif spin_unlock(&trident->reg_lock); snd_pcm_period_elapsed(voice->substream); spin_lock(&trident->reg_lock); } outl(chn_int, TRID_REG(trident, T4D_AINT_B)); /* ack */ __skip2: spin_unlock(&trident->reg_lock); } if (audio_int & MPU401_IRQ) { if (trident->rmidi) { snd_mpu401_uart_interrupt(irq, trident->rmidi->private_data); } else { inb(TRID_REG(trident, T4D_MPUR0)); } } // outl((ST_TARGET_REACHED | MIXER_OVERFLOW | MIXER_UNDERFLOW), TRID_REG(trident, T4D_MISCINT)); return IRQ_HANDLED; } struct snd_trident_voice *snd_trident_alloc_voice(struct snd_trident * trident, int type, int client, int port) { struct snd_trident_voice *pvoice; unsigned long flags; int idx; spin_lock_irqsave(&trident->voice_alloc, flags); if (type == SNDRV_TRIDENT_VOICE_TYPE_PCM) { idx = snd_trident_allocate_pcm_channel(trident); if(idx < 0) { spin_unlock_irqrestore(&trident->voice_alloc, flags); return NULL; } pvoice = &trident->synth.voices[idx]; pvoice->use = 1; pvoice->pcm = 1; pvoice->capture = 0; pvoice->spdif = 0; pvoice->memblk = NULL; pvoice->substream = NULL; spin_unlock_irqrestore(&trident->voice_alloc, flags); return pvoice; } if (type == SNDRV_TRIDENT_VOICE_TYPE_SYNTH) { idx = snd_trident_allocate_synth_channel(trident); if(idx < 0) { spin_unlock_irqrestore(&trident->voice_alloc, flags); return NULL; } pvoice = &trident->synth.voices[idx]; pvoice->use = 1; pvoice->synth = 1; pvoice->client = client; pvoice->port = port; pvoice->memblk = NULL; spin_unlock_irqrestore(&trident->voice_alloc, flags); return pvoice; } if (type == SNDRV_TRIDENT_VOICE_TYPE_MIDI) { } spin_unlock_irqrestore(&trident->voice_alloc, flags); return NULL; } EXPORT_SYMBOL(snd_trident_alloc_voice); void snd_trident_free_voice(struct snd_trident * trident, struct snd_trident_voice *voice) { unsigned long flags; void (*private_free)(struct snd_trident_voice *); void *private_data; if (voice == NULL || !voice->use) return; snd_trident_clear_voices(trident, voice->number, voice->number); spin_lock_irqsave(&trident->voice_alloc, flags); private_free = voice->private_free; private_data = voice->private_data; voice->private_free = NULL; voice->private_data = NULL; if (voice->pcm) snd_trident_free_pcm_channel(trident, voice->number); if (voice->synth) snd_trident_free_synth_channel(trident, voice->number); voice->use = voice->pcm = voice->synth = voice->midi = 0; voice->capture = voice->spdif = 0; voice->sample_ops = NULL; voice->substream = NULL; voice->extra = NULL; spin_unlock_irqrestore(&trident->voice_alloc, flags); if (private_free) private_free(voice); } EXPORT_SYMBOL(snd_trident_free_voice); static void snd_trident_clear_voices(struct snd_trident * trident, unsigned short v_min, unsigned short v_max) { unsigned int i, val, mask[2] = { 0, 0 }; if (snd_BUG_ON(v_min > 63 || v_max > 63)) return; for (i = v_min; i <= v_max; i++) mask[i >> 5] |= 1 << (i & 0x1f); if (mask[0]) { outl(mask[0], TRID_REG(trident, T4D_STOP_A)); val = inl(TRID_REG(trident, T4D_AINTEN_A)); outl(val & ~mask[0], TRID_REG(trident, T4D_AINTEN_A)); } if (mask[1]) { outl(mask[1], TRID_REG(trident, T4D_STOP_B)); val = inl(TRID_REG(trident, T4D_AINTEN_B)); outl(val & ~mask[1], TRID_REG(trident, T4D_AINTEN_B)); } } #ifdef CONFIG_PM int snd_trident_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct snd_trident *trident = card->private_data; trident->in_suspend = 1; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(trident->pcm); snd_pcm_suspend_all(trident->foldback); snd_pcm_suspend_all(trident->spdif); snd_ac97_suspend(trident->ac97); snd_ac97_suspend(trident->ac97_sec); pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return 0; } int snd_trident_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct snd_trident *trident = card->private_data; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "trident: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); switch (trident->device) { case TRIDENT_DEVICE_ID_DX: snd_trident_4d_dx_init(trident); break; case TRIDENT_DEVICE_ID_NX: snd_trident_4d_nx_init(trident); break; case TRIDENT_DEVICE_ID_SI7018: snd_trident_sis_init(trident); break; } snd_ac97_resume(trident->ac97); snd_ac97_resume(trident->ac97_sec); /* restore some registers */ outl(trident->musicvol_wavevol, TRID_REG(trident, T4D_MUSICVOL_WAVEVOL)); snd_trident_enable_eso(trident); snd_power_change_state(card, SNDRV_CTL_POWER_D0); trident->in_suspend = 0; return 0; } #endif /* CONFIG_PM */
gpl-2.0
AOKP/kernel_htc_m7
net/ceph/debugfs.c
6641
6656
#include <linux/ceph/ceph_debug.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/ctype.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/ceph/libceph.h> #include <linux/ceph/mon_client.h> #include <linux/ceph/auth.h> #include <linux/ceph/debugfs.h> #ifdef CONFIG_DEBUG_FS /* * Implement /sys/kernel/debug/ceph fun * * /sys/kernel/debug/ceph/client* - an instance of the ceph client * .../osdmap - current osdmap * .../monmap - current monmap * .../osdc - active osd requests * .../monc - mon client state * .../dentry_lru - dump contents of dentry lru * .../caps - expose cap (reservation) stats * .../bdi - symlink to ../../bdi/something */ static struct dentry *ceph_debugfs_dir; static int monmap_show(struct seq_file *s, void *p) { int i; struct ceph_client *client = s->private; if (client->monc.monmap == NULL) return 0; seq_printf(s, "epoch %d\n", client->monc.monmap->epoch); for (i = 0; i < client->monc.monmap->num_mon; i++) { struct ceph_entity_inst *inst = &client->monc.monmap->mon_inst[i]; seq_printf(s, "\t%s%lld\t%s\n", ENTITY_NAME(inst->name), ceph_pr_addr(&inst->addr.in_addr)); } return 0; } static int osdmap_show(struct seq_file *s, void *p) { int i; struct ceph_client *client = s->private; struct rb_node *n; if (client->osdc.osdmap == NULL) return 0; seq_printf(s, "epoch %d\n", client->osdc.osdmap->epoch); seq_printf(s, "flags%s%s\n", (client->osdc.osdmap->flags & CEPH_OSDMAP_NEARFULL) ? " NEARFULL" : "", (client->osdc.osdmap->flags & CEPH_OSDMAP_FULL) ? " FULL" : ""); for (n = rb_first(&client->osdc.osdmap->pg_pools); n; n = rb_next(n)) { struct ceph_pg_pool_info *pool = rb_entry(n, struct ceph_pg_pool_info, node); seq_printf(s, "pg_pool %d pg_num %d / %d, lpg_num %d / %d\n", pool->id, pool->v.pg_num, pool->pg_num_mask, pool->v.lpg_num, pool->lpg_num_mask); } for (i = 0; i < client->osdc.osdmap->max_osd; i++) { struct ceph_entity_addr *addr = &client->osdc.osdmap->osd_addr[i]; int state = client->osdc.osdmap->osd_state[i]; char sb[64]; seq_printf(s, "\tosd%d\t%s\t%3d%%\t(%s)\n", i, ceph_pr_addr(&addr->in_addr), ((client->osdc.osdmap->osd_weight[i]*100) >> 16), ceph_osdmap_state_str(sb, sizeof(sb), state)); } return 0; } static int monc_show(struct seq_file *s, void *p) { struct ceph_client *client = s->private; struct ceph_mon_generic_request *req; struct ceph_mon_client *monc = &client->monc; struct rb_node *rp; mutex_lock(&monc->mutex); if (monc->have_mdsmap) seq_printf(s, "have mdsmap %u\n", (unsigned)monc->have_mdsmap); if (monc->have_osdmap) seq_printf(s, "have osdmap %u\n", (unsigned)monc->have_osdmap); if (monc->want_next_osdmap) seq_printf(s, "want next osdmap\n"); for (rp = rb_first(&monc->generic_request_tree); rp; rp = rb_next(rp)) { __u16 op; req = rb_entry(rp, struct ceph_mon_generic_request, node); op = le16_to_cpu(req->request->hdr.type); if (op == CEPH_MSG_STATFS) seq_printf(s, "%lld statfs\n", req->tid); else seq_printf(s, "%lld unknown\n", req->tid); } mutex_unlock(&monc->mutex); return 0; } static int osdc_show(struct seq_file *s, void *pp) { struct ceph_client *client = s->private; struct ceph_osd_client *osdc = &client->osdc; struct rb_node *p; mutex_lock(&osdc->request_mutex); for (p = rb_first(&osdc->requests); p; p = rb_next(p)) { struct ceph_osd_request *req; struct ceph_osd_request_head *head; struct ceph_osd_op *op; int num_ops; int opcode, olen; int i; req = rb_entry(p, struct ceph_osd_request, r_node); seq_printf(s, "%lld\tosd%d\t%d.%x\t", req->r_tid, req->r_osd ? req->r_osd->o_osd : -1, le32_to_cpu(req->r_pgid.pool), le16_to_cpu(req->r_pgid.ps)); head = req->r_request->front.iov_base; op = (void *)(head + 1); num_ops = le16_to_cpu(head->num_ops); olen = le32_to_cpu(head->object_len); seq_printf(s, "%.*s", olen, (const char *)(head->ops + num_ops)); if (req->r_reassert_version.epoch) seq_printf(s, "\t%u'%llu", (unsigned)le32_to_cpu(req->r_reassert_version.epoch), le64_to_cpu(req->r_reassert_version.version)); else seq_printf(s, "\t"); for (i = 0; i < num_ops; i++) { opcode = le16_to_cpu(op->op); seq_printf(s, "\t%s", ceph_osd_op_name(opcode)); op++; } seq_printf(s, "\n"); } mutex_unlock(&osdc->request_mutex); return 0; } CEPH_DEFINE_SHOW_FUNC(monmap_show) CEPH_DEFINE_SHOW_FUNC(osdmap_show) CEPH_DEFINE_SHOW_FUNC(monc_show) CEPH_DEFINE_SHOW_FUNC(osdc_show) int ceph_debugfs_init(void) { ceph_debugfs_dir = debugfs_create_dir("ceph", NULL); if (!ceph_debugfs_dir) return -ENOMEM; return 0; } void ceph_debugfs_cleanup(void) { debugfs_remove(ceph_debugfs_dir); } int ceph_debugfs_client_init(struct ceph_client *client) { int ret = -ENOMEM; char name[80]; snprintf(name, sizeof(name), "%pU.client%lld", &client->fsid, client->monc.auth->global_id); client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir); if (!client->debugfs_dir) goto out; client->monc.debugfs_file = debugfs_create_file("monc", 0600, client->debugfs_dir, client, &monc_show_fops); if (!client->monc.debugfs_file) goto out; client->osdc.debugfs_file = debugfs_create_file("osdc", 0600, client->debugfs_dir, client, &osdc_show_fops); if (!client->osdc.debugfs_file) goto out; client->debugfs_monmap = debugfs_create_file("monmap", 0600, client->debugfs_dir, client, &monmap_show_fops); if (!client->debugfs_monmap) goto out; client->debugfs_osdmap = debugfs_create_file("osdmap", 0600, client->debugfs_dir, client, &osdmap_show_fops); if (!client->debugfs_osdmap) goto out; return 0; out: ceph_debugfs_client_cleanup(client); return ret; } void ceph_debugfs_client_cleanup(struct ceph_client *client) { debugfs_remove(client->debugfs_osdmap); debugfs_remove(client->debugfs_monmap); debugfs_remove(client->osdc.debugfs_file); debugfs_remove(client->monc.debugfs_file); debugfs_remove(client->debugfs_dir); } #else /* CONFIG_DEBUG_FS */ int ceph_debugfs_init(void) { return 0; } void ceph_debugfs_cleanup(void) { } int ceph_debugfs_client_init(struct ceph_client *client) { return 0; } void ceph_debugfs_client_cleanup(struct ceph_client *client) { } #endif /* CONFIG_DEBUG_FS */ EXPORT_SYMBOL(ceph_debugfs_init); EXPORT_SYMBOL(ceph_debugfs_cleanup);
gpl-2.0
VilleEvitaCake/android_kernel_htc_msm8960
arch/mips/powertv/asic/prealloc-cronuslite.c
8689
4794
/* * Memory pre-allocations for Cronus Lite boxes. * * Copyright (C) 2005-2009 Scientific-Atlanta, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Author: Ken Eppinett * David Schleef <ds@schleef.org> */ #include <linux/init.h> #include <linux/ioport.h> #include <asm/mach-powertv/asic.h> #include "prealloc.h" /* * NON_DVR_CAPABLE CRONUSLITE RESOURCES */ struct resource non_dvr_cronuslite_resources[] __initdata = { /* * VIDEO2 / LX2 */ /* Delta-Mu 1 image (2MiB) */ PREALLOC_NORMAL("ST231aImage", 0x60000000, 0x60200000-1, IORESOURCE_MEM) /* Delta-Mu 1 monitor (8KiB) */ PREALLOC_NORMAL("ST231aMonitor", 0x60200000, 0x60202000-1, IORESOURCE_MEM) /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */ PREALLOC_NORMAL("MediaMemory1", 0x60202000, 0x62000000-1, IORESOURCE_MEM) /* * Sysaudio Driver */ /* DSP code and data images (1MiB) */ PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* ADSC CPU PCM buffer (40KiB) */ PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* ADSC AUX buffer (128KiB) */ PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* ADSC Main buffer (128KiB) */ PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * STAVEM driver/STAPI * * This memory area is used for allocating buffers for Video decoding * purposes. Allocation/De-allocation within this buffer is managed * by the STAVMEM driver of the STAPI. They could be Decimated * Picture Buffers, Intermediate Buffers, as deemed necessary for * video decoding purposes, for any video decoders on Zeus. */ /* 6MiB */ PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00600000-1, IORESOURCE_MEM) /* * DOCSIS Subsystem */ /* 7MiB */ PREALLOC_DOCSIS("Docsis", 0x67500000, 0x67c00000-1, IORESOURCE_MEM) /* * GHW HAL Driver */ /* PowerTV Graphics Heap (14MiB) */ PREALLOC_NORMAL("GraphicsHeap", 0x62700000, 0x63500000-1, IORESOURCE_MEM) /* * multi com buffer area */ /* 128KiB */ PREALLOC_NORMAL("MulticomSHM", 0x26000000, 0x26020000-1, IORESOURCE_MEM) /* * DMA Ring buffer (don't need recording buffers) */ /* 680KiB */ PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * Display bins buffer for unit0 */ /* 4KiB */ PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * Display bins buffer for unit1 */ /* 4KiB */ PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1, IORESOURCE_MEM) /* * AVFS: player HAL memory */ /* 945K * 3 for playback */ PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * PMEM */ /* Persistent memory for diagnostics (64KiB) */ PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * Smartcard */ /* Read and write buffers for Internal/External cards (10KiB) */ PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1, IORESOURCE_MEM) /* * KAVNET */ /* NP Reset Vector - must be of the form xxCxxxxx (4KiB) */ PREALLOC_NORMAL("NP_Reset_Vector", 0x27c00000, 0x27c01000-1, IORESOURCE_MEM) /* NP Image - must be video bank 1 (320KiB) */ PREALLOC_NORMAL("NP_Image", 0x27020000, 0x27070000-1, IORESOURCE_MEM) /* NP IPC - must be video bank 2 (512KiB) */ PREALLOC_NORMAL("NP_IPC", 0x63500000, 0x63580000-1, IORESOURCE_MEM) /* * NAND Flash */ /* 10KiB */ PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1, IORESOURCE_MEM) /* * TFTPBuffer * * This buffer is used in some minimal configurations (e.g. two-way * loader) for storing software images */ PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * Add other resources here */ /* * End of Resource marker */ { .flags = 0, }, };
gpl-2.0
UnicronNL/vyos-kernel-utilite
arch/sparc/kernel/sigutil_32.c
8689
3264
#include <linux/kernel.h> #include <linux/types.h> #include <linux/thread_info.h> #include <linux/uaccess.h> #include <linux/sched.h> #include <asm/sigcontext.h> #include <asm/fpumacro.h> #include <asm/ptrace.h> #include <asm/switch_to.h> #include "sigutil.h" int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) { int err = 0; #ifdef CONFIG_SMP if (test_tsk_thread_flag(current, TIF_USEDFPU)) { put_psr(get_psr() | PSR_EF); fpsave(&current->thread.float_regs[0], &current->thread.fsr, &current->thread.fpqueue[0], &current->thread.fpqdepth); regs->psr &= ~(PSR_EF); clear_tsk_thread_flag(current, TIF_USEDFPU); } #else if (current == last_task_used_math) { put_psr(get_psr() | PSR_EF); fpsave(&current->thread.float_regs[0], &current->thread.fsr, &current->thread.fpqueue[0], &current->thread.fpqdepth); last_task_used_math = NULL; regs->psr &= ~(PSR_EF); } #endif err |= __copy_to_user(&fpu->si_float_regs[0], &current->thread.float_regs[0], (sizeof(unsigned long) * 32)); err |= __put_user(current->thread.fsr, &fpu->si_fsr); err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth); if (current->thread.fpqdepth != 0) err |= __copy_to_user(&fpu->si_fpqueue[0], &current->thread.fpqueue[0], ((sizeof(unsigned long) + (sizeof(unsigned long *)))*16)); clear_used_math(); return err; } int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) { int err; #ifdef CONFIG_SMP if (test_tsk_thread_flag(current, TIF_USEDFPU)) regs->psr &= ~PSR_EF; #else if (current == last_task_used_math) { last_task_used_math = NULL; regs->psr &= ~PSR_EF; } #endif set_used_math(); clear_tsk_thread_flag(current, TIF_USEDFPU); if (!access_ok(VERIFY_READ, fpu, sizeof(*fpu))) return -EFAULT; err = __copy_from_user(&current->thread.float_regs[0], &fpu->si_float_regs[0], (sizeof(unsigned long) * 32)); err |= __get_user(current->thread.fsr, &fpu->si_fsr); err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth); if (current->thread.fpqdepth != 0) err |= __copy_from_user(&current->thread.fpqueue[0], &fpu->si_fpqueue[0], ((sizeof(unsigned long) + (sizeof(unsigned long *)))*16)); return err; } int save_rwin_state(int wsaved, __siginfo_rwin_t __user *rwin) { int i, err = __put_user(wsaved, &rwin->wsaved); for (i = 0; i < wsaved; i++) { struct reg_window32 *rp; unsigned long fp; rp = &current_thread_info()->reg_window[i]; fp = current_thread_info()->rwbuf_stkptrs[i]; err |= copy_to_user(&rwin->reg_window[i], rp, sizeof(struct reg_window32)); err |= __put_user(fp, &rwin->rwbuf_stkptrs[i]); } return err; } int restore_rwin_state(__siginfo_rwin_t __user *rp) { struct thread_info *t = current_thread_info(); int i, wsaved, err; __get_user(wsaved, &rp->wsaved); if (wsaved > NSWINS) return -EFAULT; err = 0; for (i = 0; i < wsaved; i++) { err |= copy_from_user(&t->reg_window[i], &rp->reg_window[i], sizeof(struct reg_window32)); err |= __get_user(t->rwbuf_stkptrs[i], &rp->rwbuf_stkptrs[i]); } if (err) return err; t->w_saved = wsaved; synchronize_user_stack(); if (t->w_saved) return -EFAULT; return 0; }
gpl-2.0
Split-Screen/android_kernel_samsung_msm7x30-common
arch/sh/boards/mach-migor/lcd_qvga.c
9201
5008
/* * Support for SuperH MigoR Quarter VGA LCD Panel * * Copyright (C) 2008 Magnus Damm * * Based on lcd_powertip.c from Kenati Technologies Pvt Ltd. * Copyright (c) 2007 Ujjwal Pande <ujjwal@kenati.com>, * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/err.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/gpio.h> #include <video/sh_mobile_lcdc.h> #include <cpu/sh7722.h> #include <mach/migor.h> /* LCD Module is a PH240320T according to board schematics. This module * is made up of a 240x320 LCD hooked up to a R61505U (or HX8347-A01?) * Driver IC. This IC is connected to the SH7722 built-in LCDC using a * SYS-80 interface configured in 16 bit mode. * * Index 0: "Device Code Read" returns 0x1505. */ static void reset_lcd_module(void) { gpio_set_value(GPIO_PTH2, 0); mdelay(2); gpio_set_value(GPIO_PTH2, 1); mdelay(1); } /* DB0-DB7 are connected to D1-D8, and DB8-DB15 to D10-D17 */ static unsigned long adjust_reg18(unsigned short data) { unsigned long tmp1, tmp2; tmp1 = (data<<1 | 0x00000001) & 0x000001FF; tmp2 = (data<<2 | 0x00000200) & 0x0003FE00; return tmp1 | tmp2; } static void write_reg(void *sys_ops_handle, struct sh_mobile_lcdc_sys_bus_ops *sys_ops, unsigned short reg, unsigned short data) { sys_ops->write_index(sys_ops_handle, adjust_reg18(reg << 8 | data)); } static void write_reg16(void *sys_ops_handle, struct sh_mobile_lcdc_sys_bus_ops *sys_ops, unsigned short reg, unsigned short data) { sys_ops->write_index(sys_ops_handle, adjust_reg18(reg)); sys_ops->write_data(sys_ops_handle, adjust_reg18(data)); } static unsigned long read_reg16(void *sys_ops_handle, struct sh_mobile_lcdc_sys_bus_ops *sys_ops, unsigned short reg) { unsigned long data; sys_ops->write_index(sys_ops_handle, adjust_reg18(reg)); data = sys_ops->read_data(sys_ops_handle); return ((data >> 1) & 0xff) | ((data >> 2) & 0xff00); } static void migor_lcd_qvga_seq(void *sys_ops_handle, struct sh_mobile_lcdc_sys_bus_ops *sys_ops, unsigned short const *data, int no_data) { int i; for (i = 0; i < no_data; i += 2) write_reg16(sys_ops_handle, sys_ops, data[i], data[i + 1]); } static const unsigned short sync_data[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const unsigned short magic0_data[] = { 0x0060, 0x2700, 0x0008, 0x0808, 0x0090, 0x001A, 0x0007, 0x0001, 0x0017, 0x0001, 0x0019, 0x0000, 0x0010, 0x17B0, 0x0011, 0x0116, 0x0012, 0x0198, 0x0013, 0x1400, 0x0029, 0x000C, 0x0012, 0x01B8, }; static const unsigned short magic1_data[] = { 0x0030, 0x0307, 0x0031, 0x0303, 0x0032, 0x0603, 0x0033, 0x0202, 0x0034, 0x0202, 0x0035, 0x0202, 0x0036, 0x1F1F, 0x0037, 0x0303, 0x0038, 0x0303, 0x0039, 0x0603, 0x003A, 0x0202, 0x003B, 0x0102, 0x003C, 0x0204, 0x003D, 0x0000, 0x0001, 0x0100, 0x0002, 0x0300, 0x0003, 0x5028, 0x0020, 0x00ef, 0x0021, 0x0000, 0x0004, 0x0000, 0x0009, 0x0000, 0x000A, 0x0008, 0x000C, 0x0000, 0x000D, 0x0000, 0x0015, 0x8000, }; static const unsigned short magic2_data[] = { 0x0061, 0x0001, 0x0092, 0x0100, 0x0093, 0x0001, 0x0007, 0x0021, }; static const unsigned short magic3_data[] = { 0x0010, 0x16B0, 0x0011, 0x0111, 0x0007, 0x0061, }; int migor_lcd_qvga_setup(void *sohandle, struct sh_mobile_lcdc_sys_bus_ops *so) { unsigned long xres = 320; unsigned long yres = 240; int k; reset_lcd_module(); migor_lcd_qvga_seq(sohandle, so, sync_data, ARRAY_SIZE(sync_data)); if (read_reg16(sohandle, so, 0) != 0x1505) return -ENODEV; pr_info("Migo-R QVGA LCD Module detected.\n"); migor_lcd_qvga_seq(sohandle, so, sync_data, ARRAY_SIZE(sync_data)); write_reg16(sohandle, so, 0x00A4, 0x0001); mdelay(10); migor_lcd_qvga_seq(sohandle, so, magic0_data, ARRAY_SIZE(magic0_data)); mdelay(100); migor_lcd_qvga_seq(sohandle, so, magic1_data, ARRAY_SIZE(magic1_data)); write_reg16(sohandle, so, 0x0050, 0xef - (yres - 1)); write_reg16(sohandle, so, 0x0051, 0x00ef); write_reg16(sohandle, so, 0x0052, 0x0000); write_reg16(sohandle, so, 0x0053, xres - 1); migor_lcd_qvga_seq(sohandle, so, magic2_data, ARRAY_SIZE(magic2_data)); mdelay(10); migor_lcd_qvga_seq(sohandle, so, magic3_data, ARRAY_SIZE(magic3_data)); mdelay(40); /* clear GRAM to avoid displaying garbage */ write_reg16(sohandle, so, 0x0020, 0x0000); /* horiz addr */ write_reg16(sohandle, so, 0x0021, 0x0000); /* vert addr */ for (k = 0; k < (xres * 256); k++) /* yes, 256 words per line */ write_reg16(sohandle, so, 0x0022, 0x0000); write_reg16(sohandle, so, 0x0020, 0x0000); /* reset horiz addr */ write_reg16(sohandle, so, 0x0021, 0x0000); /* reset vert addr */ write_reg16(sohandle, so, 0x0007, 0x0173); mdelay(40); /* enable display */ write_reg(sohandle, so, 0x00, 0x22); mdelay(100); return 0; }
gpl-2.0
crystalfontz/linux-2.6
drivers/net/irda/litelink-sir.c
12529
5436
/********************************************************************* * * Filename: litelink.c * Version: 1.1 * Description: Driver for the Parallax LiteLink dongle * Status: Stable * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Fri May 7 12:50:33 1999 * Modified at: Fri Dec 17 09:14:23 1999 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1999 Dag Brattli, All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA * ********************************************************************/ /* * Modified at: Thu Jan 15 2003 * Modified by: Eugene Crosser <crosser@average.org> * * Convert to "new" IRDA infrastructure for kernel 2.6 */ #include <linux/module.h> #include <linux/delay.h> #include <linux/init.h> #include <net/irda/irda.h> #include "sir-dev.h" #define MIN_DELAY 25 /* 15 us, but wait a little more to be sure */ #define MAX_DELAY 10000 /* 1 ms */ static int litelink_open(struct sir_dev *dev); static int litelink_close(struct sir_dev *dev); static int litelink_change_speed(struct sir_dev *dev, unsigned speed); static int litelink_reset(struct sir_dev *dev); /* These are the baudrates supported - 9600 must be last one! */ static unsigned baud_rates[] = { 115200, 57600, 38400, 19200, 9600 }; static struct dongle_driver litelink = { .owner = THIS_MODULE, .driver_name = "Parallax LiteLink", .type = IRDA_LITELINK_DONGLE, .open = litelink_open, .close = litelink_close, .reset = litelink_reset, .set_speed = litelink_change_speed, }; static int __init litelink_sir_init(void) { return irda_register_dongle(&litelink); } static void __exit litelink_sir_cleanup(void) { irda_unregister_dongle(&litelink); } static int litelink_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; IRDA_DEBUG(2, "%s()\n", __func__); /* Power up dongle */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Set the speeds we can accept */ qos->baud_rate.bits &= IR_115200|IR_57600|IR_38400|IR_19200|IR_9600; qos->min_turn_time.bits = 0x7f; /* Needs 0.01 ms */ irda_qos_bits_to_value(qos); /* irda thread waits 50 msec for power settling */ return 0; } static int litelink_close(struct sir_dev *dev) { IRDA_DEBUG(2, "%s()\n", __func__); /* Power off dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); return 0; } /* * Function litelink_change_speed (task) * * Change speed of the Litelink dongle. To cycle through the available * baud rates, pulse RTS low for a few ms. */ static int litelink_change_speed(struct sir_dev *dev, unsigned speed) { int i; IRDA_DEBUG(2, "%s()\n", __func__); /* dongle already reset by irda-thread - current speed (dongle and * port) is the default speed (115200 for litelink!) */ /* Cycle through avaiable baudrates until we reach the correct one */ for (i = 0; baud_rates[i] != speed; i++) { /* end-of-list reached due to invalid speed request */ if (baud_rates[i] == 9600) break; /* Set DTR, clear RTS */ sirdev_set_dtr_rts(dev, FALSE, TRUE); /* Sleep a minimum of 15 us */ udelay(MIN_DELAY); /* Set DTR, Set RTS */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Sleep a minimum of 15 us */ udelay(MIN_DELAY); } dev->speed = baud_rates[i]; /* invalid baudrate should not happen - but if, we return -EINVAL and * the dongle configured for 9600 so the stack has a chance to recover */ return (dev->speed == speed) ? 0 : -EINVAL; } /* * Function litelink_reset (task) * * Reset the Litelink type dongle. * */ static int litelink_reset(struct sir_dev *dev) { IRDA_DEBUG(2, "%s()\n", __func__); /* probably the power-up can be dropped here, but with only * 15 usec delay it's not worth the risk unless somebody with * the hardware confirms it doesn't break anything... */ /* Power on dongle */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Sleep a minimum of 15 us */ udelay(MIN_DELAY); /* Clear RTS to reset dongle */ sirdev_set_dtr_rts(dev, TRUE, FALSE); /* Sleep a minimum of 15 us */ udelay(MIN_DELAY); /* Go back to normal mode */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Sleep a minimum of 15 us */ udelay(MIN_DELAY); /* This dongles speed defaults to 115200 bps */ dev->speed = 115200; return 0; } MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>"); MODULE_DESCRIPTION("Parallax Litelink dongle driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("irda-dongle-5"); /* IRDA_LITELINK_DONGLE */ /* * Function init_module (void) * * Initialize Litelink module * */ module_init(litelink_sir_init); /* * Function cleanup_module (void) * * Cleanup Litelink module * */ module_exit(litelink_sir_cleanup);
gpl-2.0
NeptunIDE/linux
arch/ia64/mm/extable.c
13297
3019
/* * Kernel exception handling table support. Derived from arch/alpha/mm/extable.c. * * Copyright (C) 1998, 1999, 2001-2002, 2004 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> */ #include <linux/sort.h> #include <asm/uaccess.h> #include <linux/module.h> static int cmp_ex(const void *a, const void *b) { const struct exception_table_entry *l = a, *r = b; u64 lip = (u64) &l->addr + l->addr; u64 rip = (u64) &r->addr + r->addr; /* avoid overflow */ if (lip > rip) return 1; if (lip < rip) return -1; return 0; } static void swap_ex(void *a, void *b, int size) { struct exception_table_entry *l = a, *r = b, tmp; u64 delta = (u64) r - (u64) l; tmp = *l; l->addr = r->addr + delta; l->cont = r->cont + delta; r->addr = tmp.addr - delta; r->cont = tmp.cont - delta; } /* * Sort the exception table. It's usually already sorted, but there * may be unordered entries due to multiple text sections (such as the * .init text section). Note that the exception-table-entries contain * location-relative addresses, which requires a bit of care during * sorting to avoid overflows in the offset members (e.g., it would * not be safe to make a temporary copy of an exception-table entry on * the stack, because the stack may be more than 2GB away from the * exception-table). */ void sort_extable (struct exception_table_entry *start, struct exception_table_entry *finish) { sort(start, finish - start, sizeof(struct exception_table_entry), cmp_ex, swap_ex); } static inline unsigned long ex_to_addr(const struct exception_table_entry *x) { return (unsigned long)&x->addr + x->addr; } #ifdef CONFIG_MODULES /* * Any entry referring to the module init will be at the beginning or * the end. */ void trim_init_extable(struct module *m) { /*trim the beginning*/ while (m->num_exentries && within_module_init(ex_to_addr(&m->extable[0]), m)) { m->extable++; m->num_exentries--; } /*trim the end*/ while (m->num_exentries && within_module_init(ex_to_addr(&m->extable[m->num_exentries-1]), m)) m->num_exentries--; } #endif /* CONFIG_MODULES */ const struct exception_table_entry * search_extable (const struct exception_table_entry *first, const struct exception_table_entry *last, unsigned long ip) { const struct exception_table_entry *mid; unsigned long mid_ip; long diff; while (first <= last) { mid = &first[(last - first)/2]; mid_ip = (u64) &mid->addr + mid->addr; diff = mid_ip - ip; if (diff == 0) return mid; else if (diff < 0) first = mid + 1; else last = mid - 1; } return NULL; } void ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e) { long fix = (u64) &e->cont + e->cont; regs->r8 = -EFAULT; if (fix & 4) regs->r9 = 0; regs->cr_iip = fix & ~0xf; ia64_psr(regs)->ri = fix & 0x3; /* set continuation slot number */ }
gpl-2.0
VRToxin-AOSP/android_kernel_moto_shamu
arch/ia64/mm/extable.c
13297
3019
/* * Kernel exception handling table support. Derived from arch/alpha/mm/extable.c. * * Copyright (C) 1998, 1999, 2001-2002, 2004 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> */ #include <linux/sort.h> #include <asm/uaccess.h> #include <linux/module.h> static int cmp_ex(const void *a, const void *b) { const struct exception_table_entry *l = a, *r = b; u64 lip = (u64) &l->addr + l->addr; u64 rip = (u64) &r->addr + r->addr; /* avoid overflow */ if (lip > rip) return 1; if (lip < rip) return -1; return 0; } static void swap_ex(void *a, void *b, int size) { struct exception_table_entry *l = a, *r = b, tmp; u64 delta = (u64) r - (u64) l; tmp = *l; l->addr = r->addr + delta; l->cont = r->cont + delta; r->addr = tmp.addr - delta; r->cont = tmp.cont - delta; } /* * Sort the exception table. It's usually already sorted, but there * may be unordered entries due to multiple text sections (such as the * .init text section). Note that the exception-table-entries contain * location-relative addresses, which requires a bit of care during * sorting to avoid overflows in the offset members (e.g., it would * not be safe to make a temporary copy of an exception-table entry on * the stack, because the stack may be more than 2GB away from the * exception-table). */ void sort_extable (struct exception_table_entry *start, struct exception_table_entry *finish) { sort(start, finish - start, sizeof(struct exception_table_entry), cmp_ex, swap_ex); } static inline unsigned long ex_to_addr(const struct exception_table_entry *x) { return (unsigned long)&x->addr + x->addr; } #ifdef CONFIG_MODULES /* * Any entry referring to the module init will be at the beginning or * the end. */ void trim_init_extable(struct module *m) { /*trim the beginning*/ while (m->num_exentries && within_module_init(ex_to_addr(&m->extable[0]), m)) { m->extable++; m->num_exentries--; } /*trim the end*/ while (m->num_exentries && within_module_init(ex_to_addr(&m->extable[m->num_exentries-1]), m)) m->num_exentries--; } #endif /* CONFIG_MODULES */ const struct exception_table_entry * search_extable (const struct exception_table_entry *first, const struct exception_table_entry *last, unsigned long ip) { const struct exception_table_entry *mid; unsigned long mid_ip; long diff; while (first <= last) { mid = &first[(last - first)/2]; mid_ip = (u64) &mid->addr + mid->addr; diff = mid_ip - ip; if (diff == 0) return mid; else if (diff < 0) first = mid + 1; else last = mid - 1; } return NULL; } void ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e) { long fix = (u64) &e->cont + e->cont; regs->r8 = -EFAULT; if (fix & 4) regs->r9 = 0; regs->cr_iip = fix & ~0xf; ia64_psr(regs)->ri = fix & 0x3; /* set continuation slot number */ }
gpl-2.0
XperiaSTE/android_kernel_sony_u8500
arch/arm/mach-ixp4xx/wg302v2-setup.c
498
2513
/* * arch/arm/mach-ixp4xx/wg302-setup.c * * Board setup for the Netgear WG302 v2 and WAG302 v2 * * Copyright (C) 2007 Imre Kaloz <Kaloz@openwrt.org> * * based on coyote-setup.c: * Copyright (C) 2003-2005 MontaVista Software, Inc. * * Author: Imre Kaloz <kaloz@openwrt.org> * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/device.h> #include <linux/serial.h> #include <linux/tty.h> #include <linux/serial_8250.h> #include <asm/types.h> #include <asm/setup.h> #include <asm/memory.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> static struct flash_platform_data wg302v2_flash_data = { .map_name = "cfi_probe", .width = 2, }; static struct resource wg302v2_flash_resource = { .flags = IORESOURCE_MEM, }; static struct platform_device wg302v2_flash = { .name = "IXP4XX-Flash", .id = 0, .dev = { .platform_data = &wg302v2_flash_data, }, .num_resources = 1, .resource = &wg302v2_flash_resource, }; static struct resource wg302v2_uart_resource = { .start = IXP4XX_UART2_BASE_PHYS, .end = IXP4XX_UART2_BASE_PHYS + 0x0fff, .flags = IORESOURCE_MEM, }; static struct plat_serial8250_port wg302v2_uart_data[] = { { .mapbase = IXP4XX_UART2_BASE_PHYS, .membase = (char *)IXP4XX_UART2_BASE_VIRT + REG_OFFSET, .irq = IRQ_IXP4XX_UART2, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .iotype = UPIO_MEM, .regshift = 2, .uartclk = IXP4XX_UART_XTAL, }, { }, }; static struct platform_device wg302v2_uart = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = wg302v2_uart_data, }, .num_resources = 1, .resource = &wg302v2_uart_resource, }; static struct platform_device *wg302v2_devices[] __initdata = { &wg302v2_flash, &wg302v2_uart, }; static void __init wg302v2_init(void) { ixp4xx_sys_init(); wg302v2_flash_resource.start = IXP4XX_EXP_BUS_BASE(0); wg302v2_flash_resource.end = IXP4XX_EXP_BUS_BASE(0) + SZ_32M - 1; *IXP4XX_EXP_CS0 |= IXP4XX_FLASH_WRITABLE; *IXP4XX_EXP_CS1 = *IXP4XX_EXP_CS0; platform_add_devices(wg302v2_devices, ARRAY_SIZE(wg302v2_devices)); } #ifdef CONFIG_MACH_WG302V2 MACHINE_START(WG302V2, "Netgear WG302 v2 / WAG302 v2") /* Maintainer: Imre Kaloz <kaloz@openwrt.org> */ .map_io = ixp4xx_map_io, .init_irq = ixp4xx_init_irq, .timer = &ixp4xx_timer, .boot_params = 0x0100, .init_machine = wg302v2_init, #if defined(CONFIG_PCI) .dma_zone_size = SZ_64M, #endif MACHINE_END #endif
gpl-2.0
Kwiboo/kernel_samsung_crespo
drivers/net/wireless/ath/ath9k/ar5008_phy.c
754
38174
/* * Copyright (c) 2008-2010 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "hw.h" #include "hw-ops.h" #include "../regd.h" #include "ar9002_phy.h" /* All code below is for non single-chip solutions */ /** * ar5008_hw_phy_modify_rx_buffer() - perform analog swizzling of parameters * @rfbuf: * @reg32: * @numBits: * @firstBit: * @column: * * Performs analog "swizzling" of parameters into their location. * Used on external AR2133/AR5133 radios. */ static void ar5008_hw_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32, u32 numBits, u32 firstBit, u32 column) { u32 tmp32, mask, arrayEntry, lastBit; int32_t bitPosition, bitsLeft; tmp32 = ath9k_hw_reverse_bits(reg32, numBits); arrayEntry = (firstBit - 1) / 8; bitPosition = (firstBit - 1) % 8; bitsLeft = numBits; while (bitsLeft > 0) { lastBit = (bitPosition + bitsLeft > 8) ? 8 : bitPosition + bitsLeft; mask = (((1 << lastBit) - 1) ^ ((1 << bitPosition) - 1)) << (column * 8); rfBuf[arrayEntry] &= ~mask; rfBuf[arrayEntry] |= ((tmp32 << bitPosition) << (column * 8)) & mask; bitsLeft -= 8 - bitPosition; tmp32 = tmp32 >> (8 - bitPosition); bitPosition = 0; arrayEntry++; } } /* * Fix on 2.4 GHz band for orientation sensitivity issue by increasing * rf_pwd_icsyndiv. * * Theoretical Rules: * if 2 GHz band * if forceBiasAuto * if synth_freq < 2412 * bias = 0 * else if 2412 <= synth_freq <= 2422 * bias = 1 * else // synth_freq > 2422 * bias = 2 * else if forceBias > 0 * bias = forceBias & 7 * else * no change, use value from ini file * else * no change, invalid band * * 1st Mod: * 2422 also uses value of 2 * <approved> * * 2nd Mod: * Less than 2412 uses value of 0, 2412 and above uses value of 2 */ static void ar5008_hw_force_bias(struct ath_hw *ah, u16 synth_freq) { struct ath_common *common = ath9k_hw_common(ah); u32 tmp_reg; int reg_writes = 0; u32 new_bias = 0; if (!AR_SREV_5416(ah) || synth_freq >= 3000) return; BUG_ON(AR_SREV_9280_10_OR_LATER(ah)); if (synth_freq < 2412) new_bias = 0; else if (synth_freq < 2422) new_bias = 1; else new_bias = 2; /* pre-reverse this field */ tmp_reg = ath9k_hw_reverse_bits(new_bias, 3); ath_print(common, ATH_DBG_CONFIG, "Force rf_pwd_icsyndiv to %1d on %4d\n", new_bias, synth_freq); /* swizzle rf_pwd_icsyndiv */ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data, tmp_reg, 3, 181, 3); /* write Bank 6 with new params */ REG_WRITE_RF_ARRAY(&ah->iniBank6, ah->analogBank6Data, reg_writes); } /** * ar5008_hw_set_channel - tune to a channel on the external AR2133/AR5133 radios * @ah: atheros hardware stucture * @chan: * * For the external AR2133/AR5133 radios, takes the MHz channel value and set * the channel value. Assumes writes enabled to analog bus and bank6 register * cache in ah->analogBank6Data. */ static int ar5008_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath_common *common = ath9k_hw_common(ah); u32 channelSel = 0; u32 bModeSynth = 0; u32 aModeRefSel = 0; u32 reg32 = 0; u16 freq; struct chan_centers centers; ath9k_hw_get_channel_centers(ah, chan, &centers); freq = centers.synth_center; if (freq < 4800) { u32 txctl; if (((freq - 2192) % 5) == 0) { channelSel = ((freq - 672) * 2 - 3040) / 10; bModeSynth = 0; } else if (((freq - 2224) % 5) == 0) { channelSel = ((freq - 704) * 2 - 3040) / 10; bModeSynth = 1; } else { ath_print(common, ATH_DBG_FATAL, "Invalid channel %u MHz\n", freq); return -EINVAL; } channelSel = (channelSel << 2) & 0xff; channelSel = ath9k_hw_reverse_bits(channelSel, 8); txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL); if (freq == 2484) { REG_WRITE(ah, AR_PHY_CCK_TX_CTRL, txctl | AR_PHY_CCK_TX_CTRL_JAPAN); } else { REG_WRITE(ah, AR_PHY_CCK_TX_CTRL, txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN); } } else if ((freq % 20) == 0 && freq >= 5120) { channelSel = ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8); aModeRefSel = ath9k_hw_reverse_bits(1, 2); } else if ((freq % 10) == 0) { channelSel = ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8); if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) aModeRefSel = ath9k_hw_reverse_bits(2, 2); else aModeRefSel = ath9k_hw_reverse_bits(1, 2); } else if ((freq % 5) == 0) { channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8); aModeRefSel = ath9k_hw_reverse_bits(1, 2); } else { ath_print(common, ATH_DBG_FATAL, "Invalid channel %u MHz\n", freq); return -EINVAL; } ar5008_hw_force_bias(ah, freq); reg32 = (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) | (1 << 5) | 0x1; REG_WRITE(ah, AR_PHY(0x37), reg32); ah->curchan = chan; ah->curchan_rad_index = -1; return 0; } /** * ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios * @ah: atheros hardware structure * @chan: * * For non single-chip solutions. Converts to baseband spur frequency given the * input channel frequency and compute register settings below. */ static void ar5008_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan) { int bb_spur = AR_NO_SPUR; int bin, cur_bin; int spur_freq_sd; int spur_delta_phase; int denominator; int upper, lower, cur_vit_mask; int tmp, new; int i; int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8, AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60 }; int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10, AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60 }; int inc[4] = { 0, 100, 0, 0 }; int8_t mask_m[123]; int8_t mask_p[123]; int8_t mask_amt; int tmp_mask; int cur_bb_spur; bool is2GHz = IS_CHAN_2GHZ(chan); memset(&mask_m, 0, sizeof(int8_t) * 123); memset(&mask_p, 0, sizeof(int8_t) * 123); for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz); if (AR_NO_SPUR == cur_bb_spur) break; cur_bb_spur = cur_bb_spur - (chan->channel * 10); if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) { bb_spur = cur_bb_spur; break; } } if (AR_NO_SPUR == bb_spur) return; bin = bb_spur * 32; tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0)); new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI | AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER | AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK | AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK); REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new); new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL | AR_PHY_SPUR_REG_ENABLE_MASK_PPM | AR_PHY_SPUR_REG_MASK_RATE_SELECT | AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI | SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH)); REG_WRITE(ah, AR_PHY_SPUR_REG, new); spur_delta_phase = ((bb_spur * 524288) / 100) & AR_PHY_TIMING11_SPUR_DELTA_PHASE; denominator = IS_CHAN_2GHZ(chan) ? 440 : 400; spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff; new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC | SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) | SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE)); REG_WRITE(ah, AR_PHY_TIMING11, new); cur_bin = -6000; upper = bin + 100; lower = bin - 100; for (i = 0; i < 4; i++) { int pilot_mask = 0; int chan_mask = 0; int bp = 0; for (bp = 0; bp < 30; bp++) { if ((cur_bin > lower) && (cur_bin < upper)) { pilot_mask = pilot_mask | 0x1 << bp; chan_mask = chan_mask | 0x1 << bp; } cur_bin += 100; } cur_bin += inc[i]; REG_WRITE(ah, pilot_mask_reg[i], pilot_mask); REG_WRITE(ah, chan_mask_reg[i], chan_mask); } cur_vit_mask = 6100; upper = bin + 120; lower = bin - 120; for (i = 0; i < 123; i++) { if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) { /* workaround for gcc bug #37014 */ volatile int tmp_v = abs(cur_vit_mask - bin); if (tmp_v < 75) mask_amt = 1; else mask_amt = 0; if (cur_vit_mask < 0) mask_m[abs(cur_vit_mask / 100)] = mask_amt; else mask_p[cur_vit_mask / 100] = mask_amt; } cur_vit_mask -= 100; } tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28) | (mask_m[48] << 26) | (mask_m[49] << 24) | (mask_m[50] << 22) | (mask_m[51] << 20) | (mask_m[52] << 18) | (mask_m[53] << 16) | (mask_m[54] << 14) | (mask_m[55] << 12) | (mask_m[56] << 10) | (mask_m[57] << 8) | (mask_m[58] << 6) | (mask_m[59] << 4) | (mask_m[60] << 2) | (mask_m[61] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask); REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask); tmp_mask = (mask_m[31] << 28) | (mask_m[32] << 26) | (mask_m[33] << 24) | (mask_m[34] << 22) | (mask_m[35] << 20) | (mask_m[36] << 18) | (mask_m[37] << 16) | (mask_m[48] << 14) | (mask_m[39] << 12) | (mask_m[40] << 10) | (mask_m[41] << 8) | (mask_m[42] << 6) | (mask_m[43] << 4) | (mask_m[44] << 2) | (mask_m[45] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask); tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28) | (mask_m[18] << 26) | (mask_m[18] << 24) | (mask_m[20] << 22) | (mask_m[20] << 20) | (mask_m[22] << 18) | (mask_m[22] << 16) | (mask_m[24] << 14) | (mask_m[24] << 12) | (mask_m[25] << 10) | (mask_m[26] << 8) | (mask_m[27] << 6) | (mask_m[28] << 4) | (mask_m[29] << 2) | (mask_m[30] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask); tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28) | (mask_m[2] << 26) | (mask_m[3] << 24) | (mask_m[4] << 22) | (mask_m[5] << 20) | (mask_m[6] << 18) | (mask_m[7] << 16) | (mask_m[8] << 14) | (mask_m[9] << 12) | (mask_m[10] << 10) | (mask_m[11] << 8) | (mask_m[12] << 6) | (mask_m[13] << 4) | (mask_m[14] << 2) | (mask_m[15] << 0); REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask); tmp_mask = (mask_p[15] << 28) | (mask_p[14] << 26) | (mask_p[13] << 24) | (mask_p[12] << 22) | (mask_p[11] << 20) | (mask_p[10] << 18) | (mask_p[9] << 16) | (mask_p[8] << 14) | (mask_p[7] << 12) | (mask_p[6] << 10) | (mask_p[5] << 8) | (mask_p[4] << 6) | (mask_p[3] << 4) | (mask_p[2] << 2) | (mask_p[1] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask); tmp_mask = (mask_p[30] << 28) | (mask_p[29] << 26) | (mask_p[28] << 24) | (mask_p[27] << 22) | (mask_p[26] << 20) | (mask_p[25] << 18) | (mask_p[24] << 16) | (mask_p[23] << 14) | (mask_p[22] << 12) | (mask_p[21] << 10) | (mask_p[20] << 8) | (mask_p[19] << 6) | (mask_p[18] << 4) | (mask_p[17] << 2) | (mask_p[16] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask); tmp_mask = (mask_p[45] << 28) | (mask_p[44] << 26) | (mask_p[43] << 24) | (mask_p[42] << 22) | (mask_p[41] << 20) | (mask_p[40] << 18) | (mask_p[39] << 16) | (mask_p[38] << 14) | (mask_p[37] << 12) | (mask_p[36] << 10) | (mask_p[35] << 8) | (mask_p[34] << 6) | (mask_p[33] << 4) | (mask_p[32] << 2) | (mask_p[31] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask); tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28) | (mask_p[59] << 26) | (mask_p[58] << 24) | (mask_p[57] << 22) | (mask_p[56] << 20) | (mask_p[55] << 18) | (mask_p[54] << 16) | (mask_p[53] << 14) | (mask_p[52] << 12) | (mask_p[51] << 10) | (mask_p[50] << 8) | (mask_p[49] << 6) | (mask_p[48] << 4) | (mask_p[47] << 2) | (mask_p[46] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask); } /** * ar5008_hw_rf_alloc_ext_banks - allocates banks for external radio programming * @ah: atheros hardware structure * * Only required for older devices with external AR2133/AR5133 radios. */ static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah) { #define ATH_ALLOC_BANK(bank, size) do { \ bank = kzalloc((sizeof(u32) * size), GFP_KERNEL); \ if (!bank) { \ ath_print(common, ATH_DBG_FATAL, \ "Cannot allocate RF banks\n"); \ return -ENOMEM; \ } \ } while (0); struct ath_common *common = ath9k_hw_common(ah); BUG_ON(AR_SREV_9280_10_OR_LATER(ah)); ATH_ALLOC_BANK(ah->analogBank0Data, ah->iniBank0.ia_rows); ATH_ALLOC_BANK(ah->analogBank1Data, ah->iniBank1.ia_rows); ATH_ALLOC_BANK(ah->analogBank2Data, ah->iniBank2.ia_rows); ATH_ALLOC_BANK(ah->analogBank3Data, ah->iniBank3.ia_rows); ATH_ALLOC_BANK(ah->analogBank6Data, ah->iniBank6.ia_rows); ATH_ALLOC_BANK(ah->analogBank6TPCData, ah->iniBank6TPC.ia_rows); ATH_ALLOC_BANK(ah->analogBank7Data, ah->iniBank7.ia_rows); ATH_ALLOC_BANK(ah->addac5416_21, ah->iniAddac.ia_rows * ah->iniAddac.ia_columns); ATH_ALLOC_BANK(ah->bank6Temp, ah->iniBank6.ia_rows); return 0; #undef ATH_ALLOC_BANK } /** * ar5008_hw_rf_free_ext_banks - Free memory for analog bank scratch buffers * @ah: atheros hardware struture * For the external AR2133/AR5133 radios banks. */ static void ar5008_hw_rf_free_ext_banks(struct ath_hw *ah) { #define ATH_FREE_BANK(bank) do { \ kfree(bank); \ bank = NULL; \ } while (0); BUG_ON(AR_SREV_9280_10_OR_LATER(ah)); ATH_FREE_BANK(ah->analogBank0Data); ATH_FREE_BANK(ah->analogBank1Data); ATH_FREE_BANK(ah->analogBank2Data); ATH_FREE_BANK(ah->analogBank3Data); ATH_FREE_BANK(ah->analogBank6Data); ATH_FREE_BANK(ah->analogBank6TPCData); ATH_FREE_BANK(ah->analogBank7Data); ATH_FREE_BANK(ah->addac5416_21); ATH_FREE_BANK(ah->bank6Temp); #undef ATH_FREE_BANK } /* * * ar5008_hw_set_rf_regs - programs rf registers based on EEPROM * @ah: atheros hardware structure * @chan: * @modesIndex: * * Used for the external AR2133/AR5133 radios. * * Reads the EEPROM header info from the device structure and programs * all rf registers. This routine requires access to the analog * rf device. This is not required for single-chip devices. */ static bool ar5008_hw_set_rf_regs(struct ath_hw *ah, struct ath9k_channel *chan, u16 modesIndex) { u32 eepMinorRev; u32 ob5GHz = 0, db5GHz = 0; u32 ob2GHz = 0, db2GHz = 0; int regWrites = 0; /* * Software does not need to program bank data * for single chip devices, that is AR9280 or anything * after that. */ if (AR_SREV_9280_10_OR_LATER(ah)) return true; /* Setup rf parameters */ eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV); /* Setup Bank 0 Write */ RF_BANK_SETUP(ah->analogBank0Data, &ah->iniBank0, 1); /* Setup Bank 1 Write */ RF_BANK_SETUP(ah->analogBank1Data, &ah->iniBank1, 1); /* Setup Bank 2 Write */ RF_BANK_SETUP(ah->analogBank2Data, &ah->iniBank2, 1); /* Setup Bank 6 Write */ RF_BANK_SETUP(ah->analogBank3Data, &ah->iniBank3, modesIndex); { int i; for (i = 0; i < ah->iniBank6TPC.ia_rows; i++) { ah->analogBank6Data[i] = INI_RA(&ah->iniBank6TPC, i, modesIndex); } } /* Only the 5 or 2 GHz OB/DB need to be set for a mode */ if (eepMinorRev >= 2) { if (IS_CHAN_2GHZ(chan)) { ob2GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_2); db2GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_2); ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data, ob2GHz, 3, 197, 0); ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data, db2GHz, 3, 194, 0); } else { ob5GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_5); db5GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_5); ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data, ob5GHz, 3, 203, 0); ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data, db5GHz, 3, 200, 0); } } /* Setup Bank 7 Setup */ RF_BANK_SETUP(ah->analogBank7Data, &ah->iniBank7, 1); /* Write Analog registers */ REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data, regWrites); REG_WRITE_RF_ARRAY(&ah->iniBank1, ah->analogBank1Data, regWrites); REG_WRITE_RF_ARRAY(&ah->iniBank2, ah->analogBank2Data, regWrites); REG_WRITE_RF_ARRAY(&ah->iniBank3, ah->analogBank3Data, regWrites); REG_WRITE_RF_ARRAY(&ah->iniBank6TPC, ah->analogBank6Data, regWrites); REG_WRITE_RF_ARRAY(&ah->iniBank7, ah->analogBank7Data, regWrites); return true; } static void ar5008_hw_init_bb(struct ath_hw *ah, struct ath9k_channel *chan) { u32 synthDelay; synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; if (IS_CHAN_B(chan)) synthDelay = (4 * synthDelay) / 22; else synthDelay /= 10; REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); udelay(synthDelay + BASE_ACTIVATE_DELAY); } static void ar5008_hw_init_chain_masks(struct ath_hw *ah) { int rx_chainmask, tx_chainmask; rx_chainmask = ah->rxchainmask; tx_chainmask = ah->txchainmask; ENABLE_REGWRITE_BUFFER(ah); switch (rx_chainmask) { case 0x5: DISABLE_REGWRITE_BUFFER(ah); REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP, AR_PHY_SWAP_ALT_CHAIN); ENABLE_REGWRITE_BUFFER(ah); case 0x3: if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) { REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7); REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7); break; } case 0x1: case 0x2: case 0x7: REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask); REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask); break; default: break; } REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask); REGWRITE_BUFFER_FLUSH(ah); DISABLE_REGWRITE_BUFFER(ah); if (tx_chainmask == 0x5) { REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP, AR_PHY_SWAP_ALT_CHAIN); } if (AR_SREV_9100(ah)) REG_WRITE(ah, AR_PHY_ANALOG_SWAP, REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001); } static void ar5008_hw_override_ini(struct ath_hw *ah, struct ath9k_channel *chan) { u32 val; /* * Set the RX_ABORT and RX_DIS and clear if off only after * RXE is set for MAC. This prevents frames with corrupted * descriptor status. */ REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); if (AR_SREV_9280_10_OR_LATER(ah)) { val = REG_READ(ah, AR_PCU_MISC_MODE2); if (!AR_SREV_9271(ah)) val &= ~AR_PCU_MISC_MODE2_HWWAR1; if (AR_SREV_9287_10_OR_LATER(ah)) val = val & (~AR_PCU_MISC_MODE2_HWWAR2); REG_WRITE(ah, AR_PCU_MISC_MODE2, val); } if (!AR_SREV_5416_20_OR_LATER(ah) || AR_SREV_9280_10_OR_LATER(ah)) return; /* * Disable BB clock gating * Necessary to avoid issues on AR5416 2.0 */ REG_WRITE(ah, 0x9800 + (651 << 2), 0x11); /* * Disable RIFS search on some chips to avoid baseband * hang issues. */ if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) { val = REG_READ(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS); val &= ~AR_PHY_RIFS_INIT_DELAY; REG_WRITE(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS, val); } } static void ar5008_hw_set_channel_regs(struct ath_hw *ah, struct ath9k_channel *chan) { u32 phymode; u32 enableDacFifo = 0; if (AR_SREV_9285_10_OR_LATER(ah)) enableDacFifo = (REG_READ(ah, AR_PHY_TURBO) & AR_PHY_FC_ENABLE_DAC_FIFO); phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40 | AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH | enableDacFifo; if (IS_CHAN_HT40(chan)) { phymode |= AR_PHY_FC_DYN2040_EN; if ((chan->chanmode == CHANNEL_A_HT40PLUS) || (chan->chanmode == CHANNEL_G_HT40PLUS)) phymode |= AR_PHY_FC_DYN2040_PRI_CH; } REG_WRITE(ah, AR_PHY_TURBO, phymode); ath9k_hw_set11nmac2040(ah); ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S); REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S); REGWRITE_BUFFER_FLUSH(ah); DISABLE_REGWRITE_BUFFER(ah); } static int ar5008_hw_process_ini(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); int i, regWrites = 0; struct ieee80211_channel *channel = chan->chan; u32 modesIndex, freqIndex; switch (chan->chanmode) { case CHANNEL_A: case CHANNEL_A_HT20: modesIndex = 1; freqIndex = 1; break; case CHANNEL_A_HT40PLUS: case CHANNEL_A_HT40MINUS: modesIndex = 2; freqIndex = 1; break; case CHANNEL_G: case CHANNEL_G_HT20: case CHANNEL_B: modesIndex = 4; freqIndex = 2; break; case CHANNEL_G_HT40PLUS: case CHANNEL_G_HT40MINUS: modesIndex = 3; freqIndex = 2; break; default: return -EINVAL; } if (AR_SREV_9287_12_OR_LATER(ah)) { /* Enable ASYNC FIFO */ REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3, AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL); REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO); REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3, AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET); REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3, AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET); } /* * Set correct baseband to analog shift setting to * access analog chips. */ REG_WRITE(ah, AR_PHY(0), 0x00000007); /* Write ADDAC shifts */ REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO); ah->eep_ops->set_addac(ah, chan); if (AR_SREV_5416_22_OR_LATER(ah)) { REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites); } else { struct ar5416IniArray temp; u32 addacSize = sizeof(u32) * ah->iniAddac.ia_rows * ah->iniAddac.ia_columns; /* For AR5416 2.0/2.1 */ memcpy(ah->addac5416_21, ah->iniAddac.ia_array, addacSize); /* override CLKDRV value at [row, column] = [31, 1] */ (ah->addac5416_21)[31 * ah->iniAddac.ia_columns + 1] = 0; temp.ia_array = ah->addac5416_21; temp.ia_columns = ah->iniAddac.ia_columns; temp.ia_rows = ah->iniAddac.ia_rows; REG_WRITE_ARRAY(&temp, 1, regWrites); } REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC); ENABLE_REGWRITE_BUFFER(ah); for (i = 0; i < ah->iniModes.ia_rows; i++) { u32 reg = INI_RA(&ah->iniModes, i, 0); u32 val = INI_RA(&ah->iniModes, i, modesIndex); if (reg == AR_AN_TOP2 && ah->need_an_top2_fixup) val &= ~AR_AN_TOP2_PWDCLKIND; REG_WRITE(ah, reg, val); if (reg >= 0x7800 && reg < 0x78a0 && ah->config.analog_shiftreg) { udelay(100); } DO_DELAY(regWrites); } REGWRITE_BUFFER_FLUSH(ah); DISABLE_REGWRITE_BUFFER(ah); if (AR_SREV_9280(ah) || AR_SREV_9287_10_OR_LATER(ah)) REG_WRITE_ARRAY(&ah->iniModesRxGain, modesIndex, regWrites); if (AR_SREV_9280(ah) || AR_SREV_9285_12_OR_LATER(ah) || AR_SREV_9287_10_OR_LATER(ah)) REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites); if (AR_SREV_9271_10(ah)) REG_WRITE_ARRAY(&ah->iniModes_9271_1_0_only, modesIndex, regWrites); ENABLE_REGWRITE_BUFFER(ah); /* Write common array parameters */ for (i = 0; i < ah->iniCommon.ia_rows; i++) { u32 reg = INI_RA(&ah->iniCommon, i, 0); u32 val = INI_RA(&ah->iniCommon, i, 1); REG_WRITE(ah, reg, val); if (reg >= 0x7800 && reg < 0x78a0 && ah->config.analog_shiftreg) { udelay(100); } DO_DELAY(regWrites); } REGWRITE_BUFFER_FLUSH(ah); DISABLE_REGWRITE_BUFFER(ah); if (AR_SREV_9271(ah)) { if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) == 1) REG_WRITE_ARRAY(&ah->iniModes_high_power_tx_gain_9271, modesIndex, regWrites); else REG_WRITE_ARRAY(&ah->iniModes_normal_power_tx_gain_9271, modesIndex, regWrites); } REG_WRITE_ARRAY(&ah->iniBB_RfGain, freqIndex, regWrites); if (IS_CHAN_A_FAST_CLOCK(ah, chan)) { REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex, regWrites); } ar5008_hw_override_ini(ah, chan); ar5008_hw_set_channel_regs(ah, chan); ar5008_hw_init_chain_masks(ah); ath9k_olc_init(ah); /* Set TX power */ ah->eep_ops->set_txpower(ah, chan, ath9k_regd_get_ctl(regulatory, chan), channel->max_antenna_gain * 2, channel->max_power * 2, min((u32) MAX_RATE_POWER, (u32) regulatory->power_limit)); /* Write analog registers */ if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) { ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, "ar5416SetRfRegs failed\n"); return -EIO; } return 0; } static void ar5008_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan) { u32 rfMode = 0; if (chan == NULL) return; rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan)) ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM; if (!AR_SREV_9280_10_OR_LATER(ah)) rfMode |= (IS_CHAN_5GHZ(chan)) ? AR_PHY_MODE_RF5GHZ : AR_PHY_MODE_RF2GHZ; if (IS_CHAN_A_FAST_CLOCK(ah, chan)) rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE); REG_WRITE(ah, AR_PHY_MODE, rfMode); } static void ar5008_hw_mark_phy_inactive(struct ath_hw *ah) { REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS); } static void ar5008_hw_set_delta_slope(struct ath_hw *ah, struct ath9k_channel *chan) { u32 coef_scaled, ds_coef_exp, ds_coef_man; u32 clockMhzScaled = 0x64000000; struct chan_centers centers; if (IS_CHAN_HALF_RATE(chan)) clockMhzScaled = clockMhzScaled >> 1; else if (IS_CHAN_QUARTER_RATE(chan)) clockMhzScaled = clockMhzScaled >> 2; ath9k_hw_get_channel_centers(ah, chan, &centers); coef_scaled = clockMhzScaled / centers.synth_center; ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man, &ds_coef_exp); REG_RMW_FIELD(ah, AR_PHY_TIMING3, AR_PHY_TIMING3_DSC_MAN, ds_coef_man); REG_RMW_FIELD(ah, AR_PHY_TIMING3, AR_PHY_TIMING3_DSC_EXP, ds_coef_exp); coef_scaled = (9 * coef_scaled) / 10; ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man, &ds_coef_exp); REG_RMW_FIELD(ah, AR_PHY_HALFGI, AR_PHY_HALFGI_DSC_MAN, ds_coef_man); REG_RMW_FIELD(ah, AR_PHY_HALFGI, AR_PHY_HALFGI_DSC_EXP, ds_coef_exp); } static bool ar5008_hw_rfbus_req(struct ath_hw *ah) { REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN); return ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN, AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT); } static void ar5008_hw_rfbus_done(struct ath_hw *ah) { u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; if (IS_CHAN_B(ah->curchan)) synthDelay = (4 * synthDelay) / 22; else synthDelay /= 10; udelay(synthDelay + BASE_ACTIVATE_DELAY); REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0); } static void ar5008_hw_enable_rfkill(struct ath_hw *ah) { REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_INPUT_EN_VAL_RFSILENT_BB); REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2, AR_GPIO_INPUT_MUX2_RFSILENT); ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio); REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB); } static void ar5008_restore_chainmask(struct ath_hw *ah) { int rx_chainmask = ah->rxchainmask; if ((rx_chainmask == 0x5) || (rx_chainmask == 0x3)) { REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask); REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask); } } static void ar5008_set_diversity(struct ath_hw *ah, bool value) { u32 v = REG_READ(ah, AR_PHY_CCK_DETECT); if (value) v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV; else v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV; REG_WRITE(ah, AR_PHY_CCK_DETECT, v); } static u32 ar9100_hw_compute_pll_control(struct ath_hw *ah, struct ath9k_channel *chan) { if (chan && IS_CHAN_5GHZ(chan)) return 0x1450; return 0x1458; } static u32 ar9160_hw_compute_pll_control(struct ath_hw *ah, struct ath9k_channel *chan) { u32 pll; pll = SM(0x5, AR_RTC_9160_PLL_REFDIV); if (chan && IS_CHAN_HALF_RATE(chan)) pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL); else if (chan && IS_CHAN_QUARTER_RATE(chan)) pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL); if (chan && IS_CHAN_5GHZ(chan)) pll |= SM(0x50, AR_RTC_9160_PLL_DIV); else pll |= SM(0x58, AR_RTC_9160_PLL_DIV); return pll; } static u32 ar5008_hw_compute_pll_control(struct ath_hw *ah, struct ath9k_channel *chan) { u32 pll; pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2; if (chan && IS_CHAN_HALF_RATE(chan)) pll |= SM(0x1, AR_RTC_PLL_CLKSEL); else if (chan && IS_CHAN_QUARTER_RATE(chan)) pll |= SM(0x2, AR_RTC_PLL_CLKSEL); if (chan && IS_CHAN_5GHZ(chan)) pll |= SM(0xa, AR_RTC_PLL_DIV); else pll |= SM(0xb, AR_RTC_PLL_DIV); return pll; } static bool ar5008_hw_ani_control(struct ath_hw *ah, enum ath9k_ani_cmd cmd, int param) { struct ar5416AniState *aniState = ah->curani; struct ath_common *common = ath9k_hw_common(ah); switch (cmd & ah->ani_function) { case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{ u32 level = param; if (level >= ARRAY_SIZE(ah->totalSizeDesired)) { ath_print(common, ATH_DBG_ANI, "level out of range (%u > %u)\n", level, (unsigned)ARRAY_SIZE(ah->totalSizeDesired)); return false; } REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_TOT_DES, ah->totalSizeDesired[level]); REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1, AR_PHY_AGC_CTL1_COARSE_LOW, ah->coarse_low[level]); REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1, AR_PHY_AGC_CTL1_COARSE_HIGH, ah->coarse_high[level]); REG_RMW_FIELD(ah, AR_PHY_FIND_SIG, AR_PHY_FIND_SIG_FIRPWR, ah->firpwr[level]); if (level > aniState->noiseImmunityLevel) ah->stats.ast_ani_niup++; else if (level < aniState->noiseImmunityLevel) ah->stats.ast_ani_nidown++; aniState->noiseImmunityLevel = level; break; } case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{ const int m1ThreshLow[] = { 127, 50 }; const int m2ThreshLow[] = { 127, 40 }; const int m1Thresh[] = { 127, 0x4d }; const int m2Thresh[] = { 127, 0x40 }; const int m2CountThr[] = { 31, 16 }; const int m2CountThrLow[] = { 63, 48 }; u32 on = param ? 1 : 0; REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, AR_PHY_SFCORR_LOW_M1_THRESH_LOW, m1ThreshLow[on]); REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, AR_PHY_SFCORR_LOW_M2_THRESH_LOW, m2ThreshLow[on]); REG_RMW_FIELD(ah, AR_PHY_SFCORR, AR_PHY_SFCORR_M1_THRESH, m1Thresh[on]); REG_RMW_FIELD(ah, AR_PHY_SFCORR, AR_PHY_SFCORR_M2_THRESH, m2Thresh[on]); REG_RMW_FIELD(ah, AR_PHY_SFCORR, AR_PHY_SFCORR_M2COUNT_THR, m2CountThr[on]); REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW, m2CountThrLow[on]); REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLow[on]); REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLow[on]); REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, AR_PHY_SFCORR_EXT_M1_THRESH, m1Thresh[on]); REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, AR_PHY_SFCORR_EXT_M2_THRESH, m2Thresh[on]); if (on) REG_SET_BIT(ah, AR_PHY_SFCORR_LOW, AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); else REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW, AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); if (!on != aniState->ofdmWeakSigDetectOff) { if (on) ah->stats.ast_ani_ofdmon++; else ah->stats.ast_ani_ofdmoff++; aniState->ofdmWeakSigDetectOff = !on; } break; } case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{ const int weakSigThrCck[] = { 8, 6 }; u32 high = param ? 1 : 0; REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT, AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK, weakSigThrCck[high]); if (high != aniState->cckWeakSigThreshold) { if (high) ah->stats.ast_ani_cckhigh++; else ah->stats.ast_ani_ccklow++; aniState->cckWeakSigThreshold = high; } break; } case ATH9K_ANI_FIRSTEP_LEVEL:{ const int firstep[] = { 0, 4, 8 }; u32 level = param; if (level >= ARRAY_SIZE(firstep)) { ath_print(common, ATH_DBG_ANI, "level out of range (%u > %u)\n", level, (unsigned) ARRAY_SIZE(firstep)); return false; } REG_RMW_FIELD(ah, AR_PHY_FIND_SIG, AR_PHY_FIND_SIG_FIRSTEP, firstep[level]); if (level > aniState->firstepLevel) ah->stats.ast_ani_stepup++; else if (level < aniState->firstepLevel) ah->stats.ast_ani_stepdown++; aniState->firstepLevel = level; break; } case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{ const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 }; u32 level = param; if (level >= ARRAY_SIZE(cycpwrThr1)) { ath_print(common, ATH_DBG_ANI, "level out of range (%u > %u)\n", level, (unsigned) ARRAY_SIZE(cycpwrThr1)); return false; } REG_RMW_FIELD(ah, AR_PHY_TIMING5, AR_PHY_TIMING5_CYCPWR_THR1, cycpwrThr1[level]); if (level > aniState->spurImmunityLevel) ah->stats.ast_ani_spurup++; else if (level < aniState->spurImmunityLevel) ah->stats.ast_ani_spurdown++; aniState->spurImmunityLevel = level; break; } case ATH9K_ANI_PRESENT: break; default: ath_print(common, ATH_DBG_ANI, "invalid cmd %u\n", cmd); return false; } ath_print(common, ATH_DBG_ANI, "ANI parameters:\n"); ath_print(common, ATH_DBG_ANI, "noiseImmunityLevel=%d, spurImmunityLevel=%d, " "ofdmWeakSigDetectOff=%d\n", aniState->noiseImmunityLevel, aniState->spurImmunityLevel, !aniState->ofdmWeakSigDetectOff); ath_print(common, ATH_DBG_ANI, "cckWeakSigThreshold=%d, " "firstepLevel=%d, listenTime=%d\n", aniState->cckWeakSigThreshold, aniState->firstepLevel, aniState->listenTime); ath_print(common, ATH_DBG_ANI, "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n", aniState->cycleCount, aniState->ofdmPhyErrCount, aniState->cckPhyErrCount); return true; } static void ar5008_hw_do_getnf(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]) { struct ath_common *common = ath9k_hw_common(ah); int16_t nf; nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR); if (nf & 0x100) nf = 0 - ((nf ^ 0x1ff) + 1); ath_print(common, ATH_DBG_CALIBRATE, "NF calibrated [ctl] [chain 0] is %d\n", nf); nfarray[0] = nf; nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR_PHY_CH1_MINCCA_PWR); if (nf & 0x100) nf = 0 - ((nf ^ 0x1ff) + 1); ath_print(common, ATH_DBG_CALIBRATE, "NF calibrated [ctl] [chain 1] is %d\n", nf); nfarray[1] = nf; nf = MS(REG_READ(ah, AR_PHY_CH2_CCA), AR_PHY_CH2_MINCCA_PWR); if (nf & 0x100) nf = 0 - ((nf ^ 0x1ff) + 1); ath_print(common, ATH_DBG_CALIBRATE, "NF calibrated [ctl] [chain 2] is %d\n", nf); nfarray[2] = nf; nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR); if (nf & 0x100) nf = 0 - ((nf ^ 0x1ff) + 1); ath_print(common, ATH_DBG_CALIBRATE, "NF calibrated [ext] [chain 0] is %d\n", nf); nfarray[3] = nf; nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR_PHY_CH1_EXT_MINCCA_PWR); if (nf & 0x100) nf = 0 - ((nf ^ 0x1ff) + 1); ath_print(common, ATH_DBG_CALIBRATE, "NF calibrated [ext] [chain 1] is %d\n", nf); nfarray[4] = nf; nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA), AR_PHY_CH2_EXT_MINCCA_PWR); if (nf & 0x100) nf = 0 - ((nf ^ 0x1ff) + 1); ath_print(common, ATH_DBG_CALIBRATE, "NF calibrated [ext] [chain 2] is %d\n", nf); nfarray[5] = nf; } static void ar5008_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath9k_nfcal_hist *h; int i, j; int32_t val; const u32 ar5416_cca_regs[6] = { AR_PHY_CCA, AR_PHY_CH1_CCA, AR_PHY_CH2_CCA, AR_PHY_EXT_CCA, AR_PHY_CH1_EXT_CCA, AR_PHY_CH2_EXT_CCA }; u8 chainmask, rx_chain_status; rx_chain_status = REG_READ(ah, AR_PHY_RX_CHAINMASK); if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) chainmask = 0x9; else if (AR_SREV_9280(ah) || AR_SREV_9287(ah)) { if ((rx_chain_status & 0x2) || (rx_chain_status & 0x4)) chainmask = 0x1B; else chainmask = 0x09; } else { if (rx_chain_status & 0x4) chainmask = 0x3F; else if (rx_chain_status & 0x2) chainmask = 0x1B; else chainmask = 0x09; } h = ah->nfCalHist; for (i = 0; i < NUM_NF_READINGS; i++) { if (chainmask & (1 << i)) { val = REG_READ(ah, ar5416_cca_regs[i]); val &= 0xFFFFFE00; val |= (((u32) (h[i].privNF) << 1) & 0x1ff); REG_WRITE(ah, ar5416_cca_regs[i], val); } } REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_ENABLE_NF); REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NO_UPDATE_NF); REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); for (j = 0; j < 5; j++) { if ((REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) == 0) break; udelay(50); } ENABLE_REGWRITE_BUFFER(ah); for (i = 0; i < NUM_NF_READINGS; i++) { if (chainmask & (1 << i)) { val = REG_READ(ah, ar5416_cca_regs[i]); val &= 0xFFFFFE00; val |= (((u32) (-50) << 1) & 0x1ff); REG_WRITE(ah, ar5416_cca_regs[i], val); } } REGWRITE_BUFFER_FLUSH(ah); DISABLE_REGWRITE_BUFFER(ah); } void ar5008_hw_attach_phy_ops(struct ath_hw *ah) { struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); priv_ops->rf_set_freq = ar5008_hw_set_channel; priv_ops->spur_mitigate_freq = ar5008_hw_spur_mitigate; priv_ops->rf_alloc_ext_banks = ar5008_hw_rf_alloc_ext_banks; priv_ops->rf_free_ext_banks = ar5008_hw_rf_free_ext_banks; priv_ops->set_rf_regs = ar5008_hw_set_rf_regs; priv_ops->set_channel_regs = ar5008_hw_set_channel_regs; priv_ops->init_bb = ar5008_hw_init_bb; priv_ops->process_ini = ar5008_hw_process_ini; priv_ops->set_rfmode = ar5008_hw_set_rfmode; priv_ops->mark_phy_inactive = ar5008_hw_mark_phy_inactive; priv_ops->set_delta_slope = ar5008_hw_set_delta_slope; priv_ops->rfbus_req = ar5008_hw_rfbus_req; priv_ops->rfbus_done = ar5008_hw_rfbus_done; priv_ops->enable_rfkill = ar5008_hw_enable_rfkill; priv_ops->restore_chainmask = ar5008_restore_chainmask; priv_ops->set_diversity = ar5008_set_diversity; priv_ops->ani_control = ar5008_hw_ani_control; priv_ops->do_getnf = ar5008_hw_do_getnf; priv_ops->loadnf = ar5008_hw_loadnf; if (AR_SREV_9100(ah)) priv_ops->compute_pll_control = ar9100_hw_compute_pll_control; else if (AR_SREV_9160_10_OR_LATER(ah)) priv_ops->compute_pll_control = ar9160_hw_compute_pll_control; else priv_ops->compute_pll_control = ar5008_hw_compute_pll_control; }
gpl-2.0
cile381/s7_flat_kernel
net/netfilter/nf_conntrack_proto_dccp.c
754
29462
/* * DCCP connection tracking protocol helper * * Copyright (c) 2005, 2006, 2008 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/sysctl.h> #include <linux/spinlock.h> #include <linux/skbuff.h> #include <linux/dccp.h> #include <linux/slab.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <linux/netfilter/nfnetlink_conntrack.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_ecache.h> #include <net/netfilter/nf_log.h> /* Timeouts are based on values from RFC4340: * * - REQUEST: * * 8.1.2. Client Request * * A client MAY give up on its DCCP-Requests after some time * (3 minutes, for example). * * - RESPOND: * * 8.1.3. Server Response * * It MAY also leave the RESPOND state for CLOSED after a timeout of * not less than 4MSL (8 minutes); * * - PARTOPEN: * * 8.1.5. Handshake Completion * * If the client remains in PARTOPEN for more than 4MSL (8 minutes), * it SHOULD reset the connection with Reset Code 2, "Aborted". * * - OPEN: * * The DCCP timestamp overflows after 11.9 hours. If the connection * stays idle this long the sequence number won't be recognized * as valid anymore. * * - CLOSEREQ/CLOSING: * * 8.3. Termination * * The retransmission timer should initially be set to go off in two * round-trip times and should back off to not less than once every * 64 seconds ... * * - TIMEWAIT: * * 4.3. States * * A server or client socket remains in this state for 2MSL (4 minutes) * after the connection has been town down, ... */ #define DCCP_MSL (2 * 60 * HZ) static const char * const dccp_state_names[] = { [CT_DCCP_NONE] = "NONE", [CT_DCCP_REQUEST] = "REQUEST", [CT_DCCP_RESPOND] = "RESPOND", [CT_DCCP_PARTOPEN] = "PARTOPEN", [CT_DCCP_OPEN] = "OPEN", [CT_DCCP_CLOSEREQ] = "CLOSEREQ", [CT_DCCP_CLOSING] = "CLOSING", [CT_DCCP_TIMEWAIT] = "TIMEWAIT", [CT_DCCP_IGNORE] = "IGNORE", [CT_DCCP_INVALID] = "INVALID", }; #define sNO CT_DCCP_NONE #define sRQ CT_DCCP_REQUEST #define sRS CT_DCCP_RESPOND #define sPO CT_DCCP_PARTOPEN #define sOP CT_DCCP_OPEN #define sCR CT_DCCP_CLOSEREQ #define sCG CT_DCCP_CLOSING #define sTW CT_DCCP_TIMEWAIT #define sIG CT_DCCP_IGNORE #define sIV CT_DCCP_INVALID /* * DCCP state transition table * * The assumption is the same as for TCP tracking: * * We are the man in the middle. All the packets go through us but might * get lost in transit to the destination. It is assumed that the destination * can't receive segments we haven't seen. * * The following states exist: * * NONE: Initial state, expecting Request * REQUEST: Request seen, waiting for Response from server * RESPOND: Response from server seen, waiting for Ack from client * PARTOPEN: Ack after Response seen, waiting for packet other than Response, * Reset or Sync from server * OPEN: Packet other than Response, Reset or Sync seen * CLOSEREQ: CloseReq from server seen, expecting Close from client * CLOSING: Close seen, expecting Reset * TIMEWAIT: Reset seen * IGNORE: Not determinable whether packet is valid * * Some states exist only on one side of the connection: REQUEST, RESPOND, * PARTOPEN, CLOSEREQ. For the other side these states are equivalent to * the one it was in before. * * Packets are marked as ignored (sIG) if we don't know if they're valid * (for example a reincarnation of a connection we didn't notice is dead * already) and the server may send back a connection closing Reset or a * Response. They're also used for Sync/SyncAck packets, which we don't * care about. */ static const u_int8_t dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] = { [CT_DCCP_ROLE_CLIENT] = { [DCCP_PKT_REQUEST] = { /* * sNO -> sRQ Regular Request * sRQ -> sRQ Retransmitted Request or reincarnation * sRS -> sRS Retransmitted Request (apparently Response * got lost after we saw it) or reincarnation * sPO -> sIG Ignore, conntrack might be out of sync * sOP -> sIG Ignore, conntrack might be out of sync * sCR -> sIG Ignore, conntrack might be out of sync * sCG -> sIG Ignore, conntrack might be out of sync * sTW -> sRQ Reincarnation * * sNO, sRQ, sRS, sPO. sOP, sCR, sCG, sTW, */ sRQ, sRQ, sRS, sIG, sIG, sIG, sIG, sRQ, }, [DCCP_PKT_RESPONSE] = { /* * sNO -> sIV Invalid * sRQ -> sIG Ignore, might be response to ignored Request * sRS -> sIG Ignore, might be response to ignored Request * sPO -> sIG Ignore, might be response to ignored Request * sOP -> sIG Ignore, might be response to ignored Request * sCR -> sIG Ignore, might be response to ignored Request * sCG -> sIG Ignore, might be response to ignored Request * sTW -> sIV Invalid, reincarnation in reverse direction * goes through sRQ * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIV, }, [DCCP_PKT_ACK] = { /* * sNO -> sIV No connection * sRQ -> sIV No connection * sRS -> sPO Ack for Response, move to PARTOPEN (8.1.5.) * sPO -> sPO Retransmitted Ack for Response, remain in PARTOPEN * sOP -> sOP Regular ACK, remain in OPEN * sCR -> sCR Ack in CLOSEREQ MAY be processed (8.3.) * sCG -> sCG Ack in CLOSING MAY be processed (8.3.) * sTW -> sIV * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sIV, sPO, sPO, sOP, sCR, sCG, sIV }, [DCCP_PKT_DATA] = { /* * sNO -> sIV No connection * sRQ -> sIV No connection * sRS -> sIV No connection * sPO -> sIV MUST use DataAck in PARTOPEN state (8.1.5.) * sOP -> sOP Regular Data packet * sCR -> sCR Data in CLOSEREQ MAY be processed (8.3.) * sCG -> sCG Data in CLOSING MAY be processed (8.3.) * sTW -> sIV * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sIV, sIV, sIV, sOP, sCR, sCG, sIV, }, [DCCP_PKT_DATAACK] = { /* * sNO -> sIV No connection * sRQ -> sIV No connection * sRS -> sPO Ack for Response, move to PARTOPEN (8.1.5.) * sPO -> sPO Remain in PARTOPEN state * sOP -> sOP Regular DataAck packet in OPEN state * sCR -> sCR DataAck in CLOSEREQ MAY be processed (8.3.) * sCG -> sCG DataAck in CLOSING MAY be processed (8.3.) * sTW -> sIV * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sIV, sPO, sPO, sOP, sCR, sCG, sIV }, [DCCP_PKT_CLOSEREQ] = { /* * CLOSEREQ may only be sent by the server. * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }, [DCCP_PKT_CLOSE] = { /* * sNO -> sIV No connection * sRQ -> sIV No connection * sRS -> sIV No connection * sPO -> sCG Client-initiated close * sOP -> sCG Client-initiated close * sCR -> sCG Close in response to CloseReq (8.3.) * sCG -> sCG Retransmit * sTW -> sIV Late retransmit, already in TIME_WAIT * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sIV, sIV, sCG, sCG, sCG, sIV, sIV }, [DCCP_PKT_RESET] = { /* * sNO -> sIV No connection * sRQ -> sTW Sync received or timeout, SHOULD send Reset (8.1.1.) * sRS -> sTW Response received without Request * sPO -> sTW Timeout, SHOULD send Reset (8.1.5.) * sOP -> sTW Connection reset * sCR -> sTW Connection reset * sCG -> sTW Connection reset * sTW -> sIG Ignore (don't refresh timer) * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sTW, sTW, sTW, sTW, sTW, sTW, sIG }, [DCCP_PKT_SYNC] = { /* * We currently ignore Sync packets * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, }, [DCCP_PKT_SYNCACK] = { /* * We currently ignore SyncAck packets * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, }, }, [CT_DCCP_ROLE_SERVER] = { [DCCP_PKT_REQUEST] = { /* * sNO -> sIV Invalid * sRQ -> sIG Ignore, conntrack might be out of sync * sRS -> sIG Ignore, conntrack might be out of sync * sPO -> sIG Ignore, conntrack might be out of sync * sOP -> sIG Ignore, conntrack might be out of sync * sCR -> sIG Ignore, conntrack might be out of sync * sCG -> sIG Ignore, conntrack might be out of sync * sTW -> sRQ Reincarnation, must reverse roles * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sIG, sIG, sIG, sIG, sIG, sIG, sRQ }, [DCCP_PKT_RESPONSE] = { /* * sNO -> sIV Response without Request * sRQ -> sRS Response to clients Request * sRS -> sRS Retransmitted Response (8.1.3. SHOULD NOT) * sPO -> sIG Response to an ignored Request or late retransmit * sOP -> sIG Ignore, might be response to ignored Request * sCR -> sIG Ignore, might be response to ignored Request * sCG -> sIG Ignore, might be response to ignored Request * sTW -> sIV Invalid, Request from client in sTW moves to sRQ * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sRS, sRS, sIG, sIG, sIG, sIG, sIV }, [DCCP_PKT_ACK] = { /* * sNO -> sIV No connection * sRQ -> sIV No connection * sRS -> sIV No connection * sPO -> sOP Enter OPEN state (8.1.5.) * sOP -> sOP Regular Ack in OPEN state * sCR -> sIV Waiting for Close from client * sCG -> sCG Ack in CLOSING MAY be processed (8.3.) * sTW -> sIV * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sIV, sIV, sOP, sOP, sIV, sCG, sIV }, [DCCP_PKT_DATA] = { /* * sNO -> sIV No connection * sRQ -> sIV No connection * sRS -> sIV No connection * sPO -> sOP Enter OPEN state (8.1.5.) * sOP -> sOP Regular Data packet in OPEN state * sCR -> sIV Waiting for Close from client * sCG -> sCG Data in CLOSING MAY be processed (8.3.) * sTW -> sIV * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sIV, sIV, sOP, sOP, sIV, sCG, sIV }, [DCCP_PKT_DATAACK] = { /* * sNO -> sIV No connection * sRQ -> sIV No connection * sRS -> sIV No connection * sPO -> sOP Enter OPEN state (8.1.5.) * sOP -> sOP Regular DataAck in OPEN state * sCR -> sIV Waiting for Close from client * sCG -> sCG Data in CLOSING MAY be processed (8.3.) * sTW -> sIV * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sIV, sIV, sOP, sOP, sIV, sCG, sIV }, [DCCP_PKT_CLOSEREQ] = { /* * sNO -> sIV No connection * sRQ -> sIV No connection * sRS -> sIV No connection * sPO -> sOP -> sCR Move directly to CLOSEREQ (8.1.5.) * sOP -> sCR CloseReq in OPEN state * sCR -> sCR Retransmit * sCG -> sCR Simultaneous close, client sends another Close * sTW -> sIV Already closed * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sIV, sIV, sCR, sCR, sCR, sCR, sIV }, [DCCP_PKT_CLOSE] = { /* * sNO -> sIV No connection * sRQ -> sIV No connection * sRS -> sIV No connection * sPO -> sOP -> sCG Move direcly to CLOSING * sOP -> sCG Move to CLOSING * sCR -> sIV Close after CloseReq is invalid * sCG -> sCG Retransmit * sTW -> sIV Already closed * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sIV, sIV, sCG, sCG, sIV, sCG, sIV }, [DCCP_PKT_RESET] = { /* * sNO -> sIV No connection * sRQ -> sTW Reset in response to Request * sRS -> sTW Timeout, SHOULD send Reset (8.1.3.) * sPO -> sTW Timeout, SHOULD send Reset (8.1.3.) * sOP -> sTW * sCR -> sTW * sCG -> sTW * sTW -> sIG Ignore (don't refresh timer) * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW, sTW */ sIV, sTW, sTW, sTW, sTW, sTW, sTW, sTW, sIG }, [DCCP_PKT_SYNC] = { /* * We currently ignore Sync packets * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, }, [DCCP_PKT_SYNCACK] = { /* * We currently ignore SyncAck packets * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, }, }, }; /* this module per-net specifics */ static int dccp_net_id __read_mostly; struct dccp_net { struct nf_proto_net pn; int dccp_loose; unsigned int dccp_timeout[CT_DCCP_MAX + 1]; }; static inline struct dccp_net *dccp_pernet(struct net *net) { return net_generic(net, dccp_net_id); } static bool dccp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, struct nf_conntrack_tuple *tuple) { struct dccp_hdr _hdr, *dh; dh = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); if (dh == NULL) return false; tuple->src.u.dccp.port = dh->dccph_sport; tuple->dst.u.dccp.port = dh->dccph_dport; return true; } static bool dccp_invert_tuple(struct nf_conntrack_tuple *inv, const struct nf_conntrack_tuple *tuple) { inv->src.u.dccp.port = tuple->dst.u.dccp.port; inv->dst.u.dccp.port = tuple->src.u.dccp.port; return true; } static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, unsigned int *timeouts) { struct net *net = nf_ct_net(ct); struct dccp_net *dn; struct dccp_hdr _dh, *dh; const char *msg; u_int8_t state; dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh); BUG_ON(dh == NULL); state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE]; switch (state) { default: dn = dccp_pernet(net); if (dn->dccp_loose == 0) { msg = "nf_ct_dccp: not picking up existing connection "; goto out_invalid; } case CT_DCCP_REQUEST: break; case CT_DCCP_INVALID: msg = "nf_ct_dccp: invalid state transition "; goto out_invalid; } ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT; ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER; ct->proto.dccp.state = CT_DCCP_NONE; ct->proto.dccp.last_pkt = DCCP_PKT_REQUEST; ct->proto.dccp.last_dir = IP_CT_DIR_ORIGINAL; ct->proto.dccp.handshake_seq = 0; return true; out_invalid: if (LOG_INVALID(net, IPPROTO_DCCP)) nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, "%s", msg); return false; } static u64 dccp_ack_seq(const struct dccp_hdr *dh) { const struct dccp_hdr_ack_bits *dhack; dhack = (void *)dh + __dccp_basic_hdr_len(dh); return ((u64)ntohs(dhack->dccph_ack_nr_high) << 32) + ntohl(dhack->dccph_ack_nr_low); } static unsigned int *dccp_get_timeouts(struct net *net) { return dccp_pernet(net)->dccp_timeout; } static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, u_int8_t pf, unsigned int hooknum, unsigned int *timeouts) { struct net *net = nf_ct_net(ct); enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); struct dccp_hdr _dh, *dh; u_int8_t type, old_state, new_state; enum ct_dccp_roles role; dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh); BUG_ON(dh == NULL); type = dh->dccph_type; if (type == DCCP_PKT_RESET && !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { /* Tear down connection immediately if only reply is a RESET */ nf_ct_kill_acct(ct, ctinfo, skb); return NF_ACCEPT; } spin_lock_bh(&ct->lock); role = ct->proto.dccp.role[dir]; old_state = ct->proto.dccp.state; new_state = dccp_state_table[role][type][old_state]; switch (new_state) { case CT_DCCP_REQUEST: if (old_state == CT_DCCP_TIMEWAIT && role == CT_DCCP_ROLE_SERVER) { /* Reincarnation in the reverse direction: reopen and * reverse client/server roles. */ ct->proto.dccp.role[dir] = CT_DCCP_ROLE_CLIENT; ct->proto.dccp.role[!dir] = CT_DCCP_ROLE_SERVER; } break; case CT_DCCP_RESPOND: if (old_state == CT_DCCP_REQUEST) ct->proto.dccp.handshake_seq = dccp_hdr_seq(dh); break; case CT_DCCP_PARTOPEN: if (old_state == CT_DCCP_RESPOND && type == DCCP_PKT_ACK && dccp_ack_seq(dh) == ct->proto.dccp.handshake_seq) set_bit(IPS_ASSURED_BIT, &ct->status); break; case CT_DCCP_IGNORE: /* * Connection tracking might be out of sync, so we ignore * packets that might establish a new connection and resync * if the server responds with a valid Response. */ if (ct->proto.dccp.last_dir == !dir && ct->proto.dccp.last_pkt == DCCP_PKT_REQUEST && type == DCCP_PKT_RESPONSE) { ct->proto.dccp.role[!dir] = CT_DCCP_ROLE_CLIENT; ct->proto.dccp.role[dir] = CT_DCCP_ROLE_SERVER; ct->proto.dccp.handshake_seq = dccp_hdr_seq(dh); new_state = CT_DCCP_RESPOND; break; } ct->proto.dccp.last_dir = dir; ct->proto.dccp.last_pkt = type; spin_unlock_bh(&ct->lock); if (LOG_INVALID(net, IPPROTO_DCCP)) nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "nf_ct_dccp: invalid packet ignored "); return NF_ACCEPT; case CT_DCCP_INVALID: spin_unlock_bh(&ct->lock); if (LOG_INVALID(net, IPPROTO_DCCP)) nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "nf_ct_dccp: invalid state transition "); return -NF_ACCEPT; } ct->proto.dccp.last_dir = dir; ct->proto.dccp.last_pkt = type; ct->proto.dccp.state = new_state; spin_unlock_bh(&ct->lock); if (new_state != old_state) nf_conntrack_event_cache(IPCT_PROTOINFO, ct); nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]); return NF_ACCEPT; } static int dccp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) { struct dccp_hdr _dh, *dh; unsigned int dccp_len = skb->len - dataoff; unsigned int cscov; const char *msg; dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh); if (dh == NULL) { msg = "nf_ct_dccp: short packet "; goto out_invalid; } if (dh->dccph_doff * 4 < sizeof(struct dccp_hdr) || dh->dccph_doff * 4 > dccp_len) { msg = "nf_ct_dccp: truncated/malformed packet "; goto out_invalid; } cscov = dccp_len; if (dh->dccph_cscov) { cscov = (dh->dccph_cscov - 1) * 4; if (cscov > dccp_len) { msg = "nf_ct_dccp: bad checksum coverage "; goto out_invalid; } } if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_DCCP, pf)) { msg = "nf_ct_dccp: bad checksum "; goto out_invalid; } if (dh->dccph_type >= DCCP_PKT_INVALID) { msg = "nf_ct_dccp: reserved packet type "; goto out_invalid; } return NF_ACCEPT; out_invalid: if (LOG_INVALID(net, IPPROTO_DCCP)) nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg); return -NF_ACCEPT; } static int dccp_print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple) { return seq_printf(s, "sport=%hu dport=%hu ", ntohs(tuple->src.u.dccp.port), ntohs(tuple->dst.u.dccp.port)); } static int dccp_print_conntrack(struct seq_file *s, struct nf_conn *ct) { return seq_printf(s, "%s ", dccp_state_names[ct->proto.dccp.state]); } #if IS_ENABLED(CONFIG_NF_CT_NETLINK) static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla, struct nf_conn *ct) { struct nlattr *nest_parms; spin_lock_bh(&ct->lock); nest_parms = nla_nest_start(skb, CTA_PROTOINFO_DCCP | NLA_F_NESTED); if (!nest_parms) goto nla_put_failure; if (nla_put_u8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state) || nla_put_u8(skb, CTA_PROTOINFO_DCCP_ROLE, ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]) || nla_put_be64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ, cpu_to_be64(ct->proto.dccp.handshake_seq))) goto nla_put_failure; nla_nest_end(skb, nest_parms); spin_unlock_bh(&ct->lock); return 0; nla_put_failure: spin_unlock_bh(&ct->lock); return -1; } static const struct nla_policy dccp_nla_policy[CTA_PROTOINFO_DCCP_MAX + 1] = { [CTA_PROTOINFO_DCCP_STATE] = { .type = NLA_U8 }, [CTA_PROTOINFO_DCCP_ROLE] = { .type = NLA_U8 }, [CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ] = { .type = NLA_U64 }, }; static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct) { struct nlattr *attr = cda[CTA_PROTOINFO_DCCP]; struct nlattr *tb[CTA_PROTOINFO_DCCP_MAX + 1]; int err; if (!attr) return 0; err = nla_parse_nested(tb, CTA_PROTOINFO_DCCP_MAX, attr, dccp_nla_policy); if (err < 0) return err; if (!tb[CTA_PROTOINFO_DCCP_STATE] || !tb[CTA_PROTOINFO_DCCP_ROLE] || nla_get_u8(tb[CTA_PROTOINFO_DCCP_ROLE]) > CT_DCCP_ROLE_MAX || nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]) >= CT_DCCP_IGNORE) { return -EINVAL; } spin_lock_bh(&ct->lock); ct->proto.dccp.state = nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]); if (nla_get_u8(tb[CTA_PROTOINFO_DCCP_ROLE]) == CT_DCCP_ROLE_CLIENT) { ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT; ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER; } else { ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_SERVER; ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_CLIENT; } if (tb[CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ]) { ct->proto.dccp.handshake_seq = be64_to_cpu(nla_get_be64(tb[CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ])); } spin_unlock_bh(&ct->lock); return 0; } static int dccp_nlattr_size(void) { return nla_total_size(0) /* CTA_PROTOINFO_DCCP */ + nla_policy_len(dccp_nla_policy, CTA_PROTOINFO_DCCP_MAX + 1); } #endif #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_cttimeout.h> static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[], struct net *net, void *data) { struct dccp_net *dn = dccp_pernet(net); unsigned int *timeouts = data; int i; /* set default DCCP timeouts. */ for (i=0; i<CT_DCCP_MAX; i++) timeouts[i] = dn->dccp_timeout[i]; /* there's a 1:1 mapping between attributes and protocol states. */ for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++) { if (tb[i]) { timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ; } } return 0; } static int dccp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeouts = data; int i; for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++) { if (nla_put_be32(skb, i, htonl(timeouts[i] / HZ))) goto nla_put_failure; } return 0; nla_put_failure: return -ENOSPC; } static const struct nla_policy dccp_timeout_nla_policy[CTA_TIMEOUT_DCCP_MAX+1] = { [CTA_TIMEOUT_DCCP_REQUEST] = { .type = NLA_U32 }, [CTA_TIMEOUT_DCCP_RESPOND] = { .type = NLA_U32 }, [CTA_TIMEOUT_DCCP_PARTOPEN] = { .type = NLA_U32 }, [CTA_TIMEOUT_DCCP_OPEN] = { .type = NLA_U32 }, [CTA_TIMEOUT_DCCP_CLOSEREQ] = { .type = NLA_U32 }, [CTA_TIMEOUT_DCCP_CLOSING] = { .type = NLA_U32 }, [CTA_TIMEOUT_DCCP_TIMEWAIT] = { .type = NLA_U32 }, }; #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ #ifdef CONFIG_SYSCTL /* template, data assigned later */ static struct ctl_table dccp_sysctl_table[] = { { .procname = "nf_conntrack_dccp_timeout_request", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nf_conntrack_dccp_timeout_respond", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nf_conntrack_dccp_timeout_partopen", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nf_conntrack_dccp_timeout_open", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nf_conntrack_dccp_timeout_closereq", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nf_conntrack_dccp_timeout_closing", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nf_conntrack_dccp_timeout_timewait", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nf_conntrack_dccp_loose", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { } }; #endif /* CONFIG_SYSCTL */ static int dccp_kmemdup_sysctl_table(struct net *net, struct nf_proto_net *pn, struct dccp_net *dn) { #ifdef CONFIG_SYSCTL if (pn->ctl_table) return 0; pn->ctl_table = kmemdup(dccp_sysctl_table, sizeof(dccp_sysctl_table), GFP_KERNEL); if (!pn->ctl_table) return -ENOMEM; pn->ctl_table[0].data = &dn->dccp_timeout[CT_DCCP_REQUEST]; pn->ctl_table[1].data = &dn->dccp_timeout[CT_DCCP_RESPOND]; pn->ctl_table[2].data = &dn->dccp_timeout[CT_DCCP_PARTOPEN]; pn->ctl_table[3].data = &dn->dccp_timeout[CT_DCCP_OPEN]; pn->ctl_table[4].data = &dn->dccp_timeout[CT_DCCP_CLOSEREQ]; pn->ctl_table[5].data = &dn->dccp_timeout[CT_DCCP_CLOSING]; pn->ctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT]; pn->ctl_table[7].data = &dn->dccp_loose; /* Don't export sysctls to unprivileged users */ if (net->user_ns != &init_user_ns) pn->ctl_table[0].procname = NULL; #endif return 0; } static int dccp_init_net(struct net *net, u_int16_t proto) { struct dccp_net *dn = dccp_pernet(net); struct nf_proto_net *pn = &dn->pn; if (!pn->users) { /* default values */ dn->dccp_loose = 1; dn->dccp_timeout[CT_DCCP_REQUEST] = 2 * DCCP_MSL; dn->dccp_timeout[CT_DCCP_RESPOND] = 4 * DCCP_MSL; dn->dccp_timeout[CT_DCCP_PARTOPEN] = 4 * DCCP_MSL; dn->dccp_timeout[CT_DCCP_OPEN] = 12 * 3600 * HZ; dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ; dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ; dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL; } return dccp_kmemdup_sysctl_table(net, pn, dn); } static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = { .l3proto = AF_INET, .l4proto = IPPROTO_DCCP, .name = "dccp", .pkt_to_tuple = dccp_pkt_to_tuple, .invert_tuple = dccp_invert_tuple, .new = dccp_new, .packet = dccp_packet, .get_timeouts = dccp_get_timeouts, .error = dccp_error, .print_tuple = dccp_print_tuple, .print_conntrack = dccp_print_conntrack, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) .to_nlattr = dccp_to_nlattr, .nlattr_size = dccp_nlattr_size, .from_nlattr = nlattr_to_dccp, .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, .nla_policy = nf_ct_port_nla_policy, #endif #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) .ctnl_timeout = { .nlattr_to_obj = dccp_timeout_nlattr_to_obj, .obj_to_nlattr = dccp_timeout_obj_to_nlattr, .nlattr_max = CTA_TIMEOUT_DCCP_MAX, .obj_size = sizeof(unsigned int) * CT_DCCP_MAX, .nla_policy = dccp_timeout_nla_policy, }, #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ .net_id = &dccp_net_id, .init_net = dccp_init_net, }; static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = { .l3proto = AF_INET6, .l4proto = IPPROTO_DCCP, .name = "dccp", .pkt_to_tuple = dccp_pkt_to_tuple, .invert_tuple = dccp_invert_tuple, .new = dccp_new, .packet = dccp_packet, .get_timeouts = dccp_get_timeouts, .error = dccp_error, .print_tuple = dccp_print_tuple, .print_conntrack = dccp_print_conntrack, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) .to_nlattr = dccp_to_nlattr, .nlattr_size = dccp_nlattr_size, .from_nlattr = nlattr_to_dccp, .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, .nla_policy = nf_ct_port_nla_policy, #endif #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) .ctnl_timeout = { .nlattr_to_obj = dccp_timeout_nlattr_to_obj, .obj_to_nlattr = dccp_timeout_obj_to_nlattr, .nlattr_max = CTA_TIMEOUT_DCCP_MAX, .obj_size = sizeof(unsigned int) * CT_DCCP_MAX, .nla_policy = dccp_timeout_nla_policy, }, #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ .net_id = &dccp_net_id, .init_net = dccp_init_net, }; static __net_init int dccp_net_init(struct net *net) { int ret = 0; ret = nf_ct_l4proto_pernet_register(net, &dccp_proto4); if (ret < 0) { pr_err("nf_conntrack_dccp4: pernet registration failed.\n"); goto out; } ret = nf_ct_l4proto_pernet_register(net, &dccp_proto6); if (ret < 0) { pr_err("nf_conntrack_dccp6: pernet registration failed.\n"); goto cleanup_dccp4; } return 0; cleanup_dccp4: nf_ct_l4proto_pernet_unregister(net, &dccp_proto4); out: return ret; } static __net_exit void dccp_net_exit(struct net *net) { nf_ct_l4proto_pernet_unregister(net, &dccp_proto6); nf_ct_l4proto_pernet_unregister(net, &dccp_proto4); } static struct pernet_operations dccp_net_ops = { .init = dccp_net_init, .exit = dccp_net_exit, .id = &dccp_net_id, .size = sizeof(struct dccp_net), }; static int __init nf_conntrack_proto_dccp_init(void) { int ret; ret = register_pernet_subsys(&dccp_net_ops); if (ret < 0) goto out_pernet; ret = nf_ct_l4proto_register(&dccp_proto4); if (ret < 0) goto out_dccp4; ret = nf_ct_l4proto_register(&dccp_proto6); if (ret < 0) goto out_dccp6; return 0; out_dccp6: nf_ct_l4proto_unregister(&dccp_proto4); out_dccp4: unregister_pernet_subsys(&dccp_net_ops); out_pernet: return ret; } static void __exit nf_conntrack_proto_dccp_fini(void) { nf_ct_l4proto_unregister(&dccp_proto6); nf_ct_l4proto_unregister(&dccp_proto4); unregister_pernet_subsys(&dccp_net_ops); } module_init(nf_conntrack_proto_dccp_init); module_exit(nf_conntrack_proto_dccp_fini); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_DESCRIPTION("DCCP connection tracking protocol helper"); MODULE_LICENSE("GPL");
gpl-2.0
fhasovic/LG-G2-D802-Kernel
drivers/gpu/drm/radeon/radeon_legacy_encoders.c
1522
52126
/* * Copyright 2007-8 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher */ #include "drmP.h" #include "drm_crtc_helper.h" #include "radeon_drm.h" #include "radeon.h" #include "atom.h" #include <linux/backlight.h> #ifdef CONFIG_PMAC_BACKLIGHT #include <asm/backlight.h> #endif static void radeon_legacy_encoder_disable(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_encoder_helper_funcs *encoder_funcs; encoder_funcs = encoder->helper_private; encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); radeon_encoder->active_device = 0; } static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man; int panel_pwr_delay = 2000; bool is_mac = false; uint8_t backlight_level; DRM_DEBUG_KMS("\n"); lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); backlight_level = (lvds_gen_cntl >> RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff; if (radeon_encoder->enc_priv) { if (rdev->is_atom_bios) { struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; panel_pwr_delay = lvds->panel_pwr_delay; if (lvds->bl_dev) backlight_level = lvds->backlight_level; } else { struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; panel_pwr_delay = lvds->panel_pwr_delay; if (lvds->bl_dev) backlight_level = lvds->backlight_level; } } /* macs (and possibly some x86 oem systems?) wire up LVDS strangely * Taken from radeonfb. */ if ((rdev->mode_info.connector_table == CT_IBOOK) || (rdev->mode_info.connector_table == CT_POWERBOOK_EXTERNAL) || (rdev->mode_info.connector_table == CT_POWERBOOK_INTERNAL) || (rdev->mode_info.connector_table == CT_POWERBOOK_VGA)) is_mac = true; switch (mode) { case DRM_MODE_DPMS_ON: disp_pwr_man = RREG32(RADEON_DISP_PWR_MAN); disp_pwr_man |= RADEON_AUTO_PWRUP_EN; WREG32(RADEON_DISP_PWR_MAN, disp_pwr_man); lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL); lvds_pll_cntl |= RADEON_LVDS_PLL_EN; WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl); mdelay(1); lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL); lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET; WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl); lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS | RADEON_LVDS_BL_MOD_LEVEL_MASK); lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | RADEON_LVDS_DIGON | RADEON_LVDS_BLON | (backlight_level << RADEON_LVDS_BL_MOD_LEVEL_SHIFT)); if (is_mac) lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN; mdelay(panel_pwr_delay); WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL); WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb); lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; if (is_mac) { lvds_gen_cntl &= ~RADEON_LVDS_BL_MOD_EN; WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_EN); } else { WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON); } mdelay(panel_pwr_delay); WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); mdelay(panel_pwr_delay); break; } if (rdev->is_atom_bios) radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); else radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); } static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) { struct radeon_device *rdev = encoder->dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); DRM_DEBUG("\n"); if (radeon_encoder->enc_priv) { if (rdev->is_atom_bios) { struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; lvds->dpms_mode = mode; } else { struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; lvds->dpms_mode = mode; } } radeon_legacy_lvds_update(encoder, mode); } static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, true); else radeon_combios_output_lock(encoder, true); radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_OFF); } static void radeon_legacy_lvds_commit(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_ON); if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, false); else radeon_combios_output_lock(encoder, false); } static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t lvds_pll_cntl, lvds_gen_cntl, lvds_ss_gen_cntl; DRM_DEBUG_KMS("\n"); lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL); lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN; lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL); if (rdev->is_atom_bios) { /* LVDS_GEN_CNTL parameters are computed in LVDSEncoderControl * need to call that on resume to set up the reg properly. */ radeon_encoder->pixel_clock = adjusted_mode->clock; atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE); lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); } else { struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv; if (lvds) { DRM_DEBUG_KMS("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl); lvds_gen_cntl = lvds->lvds_gen_cntl; lvds_ss_gen_cntl &= ~((0xf << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) | (0xf << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT)); lvds_ss_gen_cntl |= ((lvds->panel_digon_delay << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) | (lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT)); } else lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); } lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_RST_FM); if (ASIC_IS_R300(rdev)) lvds_pll_cntl &= ~(R300_LVDS_SRC_SEL_MASK); if (radeon_crtc->crtc_id == 0) { if (ASIC_IS_R300(rdev)) { if (radeon_encoder->rmx_type != RMX_OFF) lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX; } else lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2; } else { if (ASIC_IS_R300(rdev)) lvds_pll_cntl |= R300_LVDS_SRC_SEL_CRTC2; else lvds_gen_cntl |= RADEON_LVDS_SEL_CRTC2; } WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl); WREG32(RADEON_LVDS_SS_GEN_CNTL, lvds_ss_gen_cntl); if (rdev->family == CHIP_RV410) WREG32(RADEON_CLOCK_CNTL_INDEX, 0); if (rdev->is_atom_bios) radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); else radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); } static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); /* set the active encoder to connector routing */ radeon_encoder_set_active_device(encoder); drm_mode_set_crtcinfo(adjusted_mode, 0); /* get the native mode for LVDS */ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) radeon_panel_mode_fixup(encoder, adjusted_mode); return true; } static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = { .dpms = radeon_legacy_lvds_dpms, .mode_fixup = radeon_legacy_mode_fixup, .prepare = radeon_legacy_lvds_prepare, .mode_set = radeon_legacy_lvds_mode_set, .commit = radeon_legacy_lvds_commit, .disable = radeon_legacy_encoder_disable, }; #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) #define MAX_RADEON_LEVEL 0xFF struct radeon_backlight_privdata { struct radeon_encoder *encoder; uint8_t negative; }; static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd) { struct radeon_backlight_privdata *pdata = bl_get_data(bd); uint8_t level; /* Convert brightness to hardware level */ if (bd->props.brightness < 0) level = 0; else if (bd->props.brightness > MAX_RADEON_LEVEL) level = MAX_RADEON_LEVEL; else level = bd->props.brightness; if (pdata->negative) level = MAX_RADEON_LEVEL - level; return level; } static int radeon_legacy_backlight_update_status(struct backlight_device *bd) { struct radeon_backlight_privdata *pdata = bl_get_data(bd); struct radeon_encoder *radeon_encoder = pdata->encoder; struct drm_device *dev = radeon_encoder->base.dev; struct radeon_device *rdev = dev->dev_private; int dpms_mode = DRM_MODE_DPMS_ON; if (radeon_encoder->enc_priv) { if (rdev->is_atom_bios) { struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; dpms_mode = lvds->dpms_mode; lvds->backlight_level = radeon_legacy_lvds_level(bd); } else { struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; dpms_mode = lvds->dpms_mode; lvds->backlight_level = radeon_legacy_lvds_level(bd); } } if (bd->props.brightness > 0) radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode); else radeon_legacy_lvds_update(&radeon_encoder->base, DRM_MODE_DPMS_OFF); return 0; } static int radeon_legacy_backlight_get_brightness(struct backlight_device *bd) { struct radeon_backlight_privdata *pdata = bl_get_data(bd); struct radeon_encoder *radeon_encoder = pdata->encoder; struct drm_device *dev = radeon_encoder->base.dev; struct radeon_device *rdev = dev->dev_private; uint8_t backlight_level; backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >> RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff; return pdata->negative ? MAX_RADEON_LEVEL - backlight_level : backlight_level; } static const struct backlight_ops radeon_backlight_ops = { .get_brightness = radeon_legacy_backlight_get_brightness, .update_status = radeon_legacy_backlight_update_status, }; void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, struct drm_connector *drm_connector) { struct drm_device *dev = radeon_encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct backlight_device *bd; struct backlight_properties props; struct radeon_backlight_privdata *pdata; uint8_t backlight_level; if (!radeon_encoder->enc_priv) return; #ifdef CONFIG_PMAC_BACKLIGHT if (!pmac_has_backlight_type("ati") && !pmac_has_backlight_type("mnca")) return; #endif pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL); if (!pdata) { DRM_ERROR("Memory allocation failed\n"); goto error; } props.max_brightness = MAX_RADEON_LEVEL; props.type = BACKLIGHT_RAW; bd = backlight_device_register("radeon_bl", &drm_connector->kdev, pdata, &radeon_backlight_ops, &props); if (IS_ERR(bd)) { DRM_ERROR("Backlight registration failed\n"); goto error; } pdata->encoder = radeon_encoder; backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >> RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff; /* First, try to detect backlight level sense based on the assumption * that firmware set it up at full brightness */ if (backlight_level == 0) pdata->negative = true; else if (backlight_level == 0xff) pdata->negative = false; else { /* XXX hack... maybe some day we can figure out in what direction * backlight should work on a given panel? */ pdata->negative = (rdev->family != CHIP_RV200 && rdev->family != CHIP_RV250 && rdev->family != CHIP_RV280 && rdev->family != CHIP_RV350); #ifdef CONFIG_PMAC_BACKLIGHT pdata->negative = (pdata->negative || of_machine_is_compatible("PowerBook4,3") || of_machine_is_compatible("PowerBook6,3") || of_machine_is_compatible("PowerBook6,5")); #endif } if (rdev->is_atom_bios) { struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; lvds->bl_dev = bd; } else { struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; lvds->bl_dev = bd; } bd->props.brightness = radeon_legacy_backlight_get_brightness(bd); bd->props.power = FB_BLANK_UNBLANK; backlight_update_status(bd); DRM_INFO("radeon legacy LVDS backlight initialized\n"); return; error: kfree(pdata); return; } static void radeon_legacy_backlight_exit(struct radeon_encoder *radeon_encoder) { struct drm_device *dev = radeon_encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct backlight_device *bd = NULL; if (!radeon_encoder->enc_priv) return; if (rdev->is_atom_bios) { struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; bd = lvds->bl_dev; lvds->bl_dev = NULL; } else { struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; bd = lvds->bl_dev; lvds->bl_dev = NULL; } if (bd) { struct radeon_legacy_backlight_privdata *pdata; pdata = bl_get_data(bd); backlight_device_unregister(bd); kfree(pdata); DRM_INFO("radeon legacy LVDS backlight unloaded\n"); } } #else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */ void radeon_legacy_backlight_init(struct radeon_encoder *encoder) { } static void radeon_legacy_backlight_exit(struct radeon_encoder *encoder) { } #endif static void radeon_lvds_enc_destroy(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->enc_priv) { radeon_legacy_backlight_exit(radeon_encoder); kfree(radeon_encoder->enc_priv); } drm_encoder_cleanup(encoder); kfree(radeon_encoder); } static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = { .destroy = radeon_lvds_enc_destroy, }; static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); uint32_t dac_cntl = RREG32(RADEON_DAC_CNTL); uint32_t dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL); DRM_DEBUG_KMS("\n"); switch (mode) { case DRM_MODE_DPMS_ON: crtc_ext_cntl |= RADEON_CRTC_CRT_ON; dac_cntl &= ~RADEON_DAC_PDWN; dac_macro_cntl &= ~(RADEON_DAC_PDWN_R | RADEON_DAC_PDWN_G | RADEON_DAC_PDWN_B); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: crtc_ext_cntl &= ~RADEON_CRTC_CRT_ON; dac_cntl |= RADEON_DAC_PDWN; dac_macro_cntl |= (RADEON_DAC_PDWN_R | RADEON_DAC_PDWN_G | RADEON_DAC_PDWN_B); break; } WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); WREG32(RADEON_DAC_CNTL, dac_cntl); WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl); if (rdev->is_atom_bios) radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); else radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); } static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, true); else radeon_combios_output_lock(encoder, true); radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_OFF); } static void radeon_legacy_primary_dac_commit(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_ON); if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, false); else radeon_combios_output_lock(encoder, false); } static void radeon_legacy_primary_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t disp_output_cntl, dac_cntl, dac2_cntl, dac_macro_cntl; DRM_DEBUG_KMS("\n"); if (radeon_crtc->crtc_id == 0) { if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) { disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) & ~(RADEON_DISP_DAC_SOURCE_MASK); WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); } else { dac2_cntl = RREG32(RADEON_DAC_CNTL2) & ~(RADEON_DAC2_DAC_CLK_SEL); WREG32(RADEON_DAC_CNTL2, dac2_cntl); } } else { if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) { disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) & ~(RADEON_DISP_DAC_SOURCE_MASK); disp_output_cntl |= RADEON_DISP_DAC_SOURCE_CRTC2; WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); } else { dac2_cntl = RREG32(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC_CLK_SEL; WREG32(RADEON_DAC_CNTL2, dac2_cntl); } } dac_cntl = (RADEON_DAC_MASK_ALL | RADEON_DAC_VGA_ADR_EN | /* TODO 6-bits */ RADEON_DAC_8BIT_EN); WREG32_P(RADEON_DAC_CNTL, dac_cntl, RADEON_DAC_RANGE_CNTL | RADEON_DAC_BLANKING); if (radeon_encoder->enc_priv) { struct radeon_encoder_primary_dac *p_dac = (struct radeon_encoder_primary_dac *)radeon_encoder->enc_priv; dac_macro_cntl = p_dac->ps2_pdac_adj; } else dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL); dac_macro_cntl |= RADEON_DAC_PDWN_R | RADEON_DAC_PDWN_G | RADEON_DAC_PDWN_B; WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl); if (rdev->is_atom_bios) radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); else radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); } static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t vclk_ecp_cntl, crtc_ext_cntl; uint32_t dac_ext_cntl, dac_cntl, dac_macro_cntl, tmp; enum drm_connector_status found = connector_status_disconnected; bool color = true; /* just don't bother on RN50 those chip are often connected to remoting * console hw and often we get failure to load detect those. So to make * everyone happy report the encoder as always connected. */ if (ASIC_IS_RN50(rdev)) { return connector_status_connected; } /* save the regs we need */ vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL); crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL); dac_cntl = RREG32(RADEON_DAC_CNTL); dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL); tmp = vclk_ecp_cntl & ~(RADEON_PIXCLK_ALWAYS_ONb | RADEON_PIXCLK_DAC_ALWAYS_ONb); WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp); tmp = crtc_ext_cntl | RADEON_CRTC_CRT_ON; WREG32(RADEON_CRTC_EXT_CNTL, tmp); tmp = RADEON_DAC_FORCE_BLANK_OFF_EN | RADEON_DAC_FORCE_DATA_EN; if (color) tmp |= RADEON_DAC_FORCE_DATA_SEL_RGB; else tmp |= RADEON_DAC_FORCE_DATA_SEL_G; if (ASIC_IS_R300(rdev)) tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT); else tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT); WREG32(RADEON_DAC_EXT_CNTL, tmp); tmp = dac_cntl & ~(RADEON_DAC_RANGE_CNTL_MASK | RADEON_DAC_PDWN); tmp |= RADEON_DAC_RANGE_CNTL_PS2 | RADEON_DAC_CMP_EN; WREG32(RADEON_DAC_CNTL, tmp); tmp = dac_macro_cntl; tmp &= ~(RADEON_DAC_PDWN_R | RADEON_DAC_PDWN_G | RADEON_DAC_PDWN_B); WREG32(RADEON_DAC_MACRO_CNTL, tmp); mdelay(2); if (RREG32(RADEON_DAC_CNTL) & RADEON_DAC_CMP_OUTPUT) found = connector_status_connected; /* restore the regs we used */ WREG32(RADEON_DAC_CNTL, dac_cntl); WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl); WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl); WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); WREG32_PLL(RADEON_VCLK_ECP_CNTL, vclk_ecp_cntl); return found; } static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = { .dpms = radeon_legacy_primary_dac_dpms, .mode_fixup = radeon_legacy_mode_fixup, .prepare = radeon_legacy_primary_dac_prepare, .mode_set = radeon_legacy_primary_dac_mode_set, .commit = radeon_legacy_primary_dac_commit, .detect = radeon_legacy_primary_dac_detect, .disable = radeon_legacy_encoder_disable, }; static const struct drm_encoder_funcs radeon_legacy_primary_dac_enc_funcs = { .destroy = radeon_enc_destroy, }; static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t fp_gen_cntl = RREG32(RADEON_FP_GEN_CNTL); DRM_DEBUG_KMS("\n"); switch (mode) { case DRM_MODE_DPMS_ON: fp_gen_cntl |= (RADEON_FP_FPON | RADEON_FP_TMDS_EN); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN); break; } WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl); if (rdev->is_atom_bios) radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); else radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); } static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, true); else radeon_combios_output_lock(encoder, true); radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_OFF); } static void radeon_legacy_tmds_int_commit(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_ON); if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, true); else radeon_combios_output_lock(encoder, true); } static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t tmp, tmds_pll_cntl, tmds_transmitter_cntl, fp_gen_cntl; int i; DRM_DEBUG_KMS("\n"); tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL); tmp &= 0xfffff; if (rdev->family == CHIP_RV280) { /* bit 22 of TMDS_PLL_CNTL is read-back inverted */ tmp ^= (1 << 22); tmds_pll_cntl ^= (1 << 22); } if (radeon_encoder->enc_priv) { struct radeon_encoder_int_tmds *tmds = (struct radeon_encoder_int_tmds *)radeon_encoder->enc_priv; for (i = 0; i < 4; i++) { if (tmds->tmds_pll[i].freq == 0) break; if ((uint32_t)(mode->clock / 10) < tmds->tmds_pll[i].freq) { tmp = tmds->tmds_pll[i].value ; break; } } } if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV280)) { if (tmp & 0xfff00000) tmds_pll_cntl = tmp; else { tmds_pll_cntl &= 0xfff00000; tmds_pll_cntl |= tmp; } } else tmds_pll_cntl = tmp; tmds_transmitter_cntl = RREG32(RADEON_TMDS_TRANSMITTER_CNTL) & ~(RADEON_TMDS_TRANSMITTER_PLLRST); if (rdev->family == CHIP_R200 || rdev->family == CHIP_R100 || ASIC_IS_R300(rdev)) tmds_transmitter_cntl &= ~(RADEON_TMDS_TRANSMITTER_PLLEN); else /* RV chips got this bit reversed */ tmds_transmitter_cntl |= RADEON_TMDS_TRANSMITTER_PLLEN; fp_gen_cntl = (RREG32(RADEON_FP_GEN_CNTL) | (RADEON_FP_CRTC_DONT_SHADOW_VPAR | RADEON_FP_CRTC_DONT_SHADOW_HEND)); fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN); fp_gen_cntl &= ~(RADEON_FP_RMX_HVSYNC_CONTROL_EN | RADEON_FP_DFP_SYNC_SEL | RADEON_FP_CRT_SYNC_SEL | RADEON_FP_CRTC_LOCK_8DOT | RADEON_FP_USE_SHADOW_EN | RADEON_FP_CRTC_USE_SHADOW_VEND | RADEON_FP_CRT_SYNC_ALT); if (1) /* FIXME rgbBits == 8 */ fp_gen_cntl |= RADEON_FP_PANEL_FORMAT; /* 24 bit format */ else fp_gen_cntl &= ~RADEON_FP_PANEL_FORMAT;/* 18 bit format */ if (radeon_crtc->crtc_id == 0) { if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) { fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; if (radeon_encoder->rmx_type != RMX_OFF) fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX; else fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1; } else fp_gen_cntl &= ~RADEON_FP_SEL_CRTC2; } else { if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) { fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC2; } else fp_gen_cntl |= RADEON_FP_SEL_CRTC2; } WREG32(RADEON_TMDS_PLL_CNTL, tmds_pll_cntl); WREG32(RADEON_TMDS_TRANSMITTER_CNTL, tmds_transmitter_cntl); WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl); if (rdev->is_atom_bios) radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); else radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); } static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = { .dpms = radeon_legacy_tmds_int_dpms, .mode_fixup = radeon_legacy_mode_fixup, .prepare = radeon_legacy_tmds_int_prepare, .mode_set = radeon_legacy_tmds_int_mode_set, .commit = radeon_legacy_tmds_int_commit, .disable = radeon_legacy_encoder_disable, }; static const struct drm_encoder_funcs radeon_legacy_tmds_int_enc_funcs = { .destroy = radeon_enc_destroy, }; static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); DRM_DEBUG_KMS("\n"); switch (mode) { case DRM_MODE_DPMS_ON: fp2_gen_cntl &= ~RADEON_FP2_BLANK_EN; fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: fp2_gen_cntl |= RADEON_FP2_BLANK_EN; fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN); break; } WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); if (rdev->is_atom_bios) radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); else radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); } static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, true); else radeon_combios_output_lock(encoder, true); radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_OFF); } static void radeon_legacy_tmds_ext_commit(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_ON); if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, false); else radeon_combios_output_lock(encoder, false); } static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t fp2_gen_cntl; DRM_DEBUG_KMS("\n"); if (rdev->is_atom_bios) { radeon_encoder->pixel_clock = adjusted_mode->clock; atombios_dvo_setup(encoder, ATOM_ENABLE); fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); } else { fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); if (1) /* FIXME rgbBits == 8 */ fp2_gen_cntl |= RADEON_FP2_PANEL_FORMAT; /* 24 bit format, */ else fp2_gen_cntl &= ~RADEON_FP2_PANEL_FORMAT;/* 18 bit format, */ fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN | RADEON_FP2_DVO_RATE_SEL_SDR); /* XXX: these are oem specific */ if (ASIC_IS_R300(rdev)) { if ((dev->pdev->device == 0x4850) && (dev->pdev->subsystem_vendor == 0x1028) && (dev->pdev->subsystem_device == 0x2001)) /* Dell Inspiron 8600 */ fp2_gen_cntl |= R300_FP2_DVO_CLOCK_MODE_SINGLE; else fp2_gen_cntl |= RADEON_FP2_PAD_FLOP_EN | R300_FP2_DVO_CLOCK_MODE_SINGLE; /*if (mode->clock > 165000) fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/ } if (!radeon_combios_external_tmds_setup(encoder)) radeon_external_tmds_setup(encoder); } if (radeon_crtc->crtc_id == 0) { if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) { fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK; if (radeon_encoder->rmx_type != RMX_OFF) fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX; else fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1; } else fp2_gen_cntl &= ~RADEON_FP2_SRC_SEL_CRTC2; } else { if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) { fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK; fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2; } else fp2_gen_cntl |= RADEON_FP2_SRC_SEL_CRTC2; } WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); if (rdev->is_atom_bios) radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); else radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); } static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); /* don't destroy the i2c bus record here, this will be done in radeon_i2c_fini */ kfree(radeon_encoder->enc_priv); drm_encoder_cleanup(encoder); kfree(radeon_encoder); } static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = { .dpms = radeon_legacy_tmds_ext_dpms, .mode_fixup = radeon_legacy_mode_fixup, .prepare = radeon_legacy_tmds_ext_prepare, .mode_set = radeon_legacy_tmds_ext_mode_set, .commit = radeon_legacy_tmds_ext_commit, .disable = radeon_legacy_encoder_disable, }; static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = { .destroy = radeon_ext_tmds_enc_destroy, }; static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t fp2_gen_cntl = 0, crtc2_gen_cntl = 0, tv_dac_cntl = 0; uint32_t tv_master_cntl = 0; bool is_tv; DRM_DEBUG_KMS("\n"); is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false; if (rdev->family == CHIP_R200) fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); else { if (is_tv) tv_master_cntl = RREG32(RADEON_TV_MASTER_CNTL); else crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); } switch (mode) { case DRM_MODE_DPMS_ON: if (rdev->family == CHIP_R200) { fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN); } else { if (is_tv) tv_master_cntl |= RADEON_TV_ON; else crtc2_gen_cntl |= RADEON_CRTC2_CRT2_ON; if (rdev->family == CHIP_R420 || rdev->family == CHIP_R423 || rdev->family == CHIP_RV410) tv_dac_cntl &= ~(R420_TV_DAC_RDACPD | R420_TV_DAC_GDACPD | R420_TV_DAC_BDACPD | RADEON_TV_DAC_BGSLEEP); else tv_dac_cntl &= ~(RADEON_TV_DAC_RDACPD | RADEON_TV_DAC_GDACPD | RADEON_TV_DAC_BDACPD | RADEON_TV_DAC_BGSLEEP); } break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: if (rdev->family == CHIP_R200) fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN); else { if (is_tv) tv_master_cntl &= ~RADEON_TV_ON; else crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON; if (rdev->family == CHIP_R420 || rdev->family == CHIP_R423 || rdev->family == CHIP_RV410) tv_dac_cntl |= (R420_TV_DAC_RDACPD | R420_TV_DAC_GDACPD | R420_TV_DAC_BDACPD | RADEON_TV_DAC_BGSLEEP); else tv_dac_cntl |= (RADEON_TV_DAC_RDACPD | RADEON_TV_DAC_GDACPD | RADEON_TV_DAC_BDACPD | RADEON_TV_DAC_BGSLEEP); } break; } if (rdev->family == CHIP_R200) { WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); } else { if (is_tv) WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl); else WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); } if (rdev->is_atom_bios) radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); else radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); } static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, true); else radeon_combios_output_lock(encoder, true); radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_OFF); } static void radeon_legacy_tv_dac_commit(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_ON); if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, true); else radeon_combios_output_lock(encoder, true); } static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; uint32_t tv_dac_cntl, gpiopad_a = 0, dac2_cntl, disp_output_cntl = 0; uint32_t disp_hw_debug = 0, fp2_gen_cntl = 0, disp_tv_out_cntl = 0; bool is_tv = false; DRM_DEBUG_KMS("\n"); is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false; if (rdev->family != CHIP_R200) { tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); if (rdev->family == CHIP_R420 || rdev->family == CHIP_R423 || rdev->family == CHIP_RV410) { tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | RADEON_TV_DAC_BGADJ_MASK | R420_TV_DAC_DACADJ_MASK | R420_TV_DAC_RDACPD | R420_TV_DAC_GDACPD | R420_TV_DAC_BDACPD | R420_TV_DAC_TVENABLE); } else { tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | RADEON_TV_DAC_BGADJ_MASK | RADEON_TV_DAC_DACADJ_MASK | RADEON_TV_DAC_RDACPD | RADEON_TV_DAC_GDACPD | RADEON_TV_DAC_BDACPD); } tv_dac_cntl |= RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD; if (is_tv) { if (tv_dac->tv_std == TV_STD_NTSC || tv_dac->tv_std == TV_STD_NTSC_J || tv_dac->tv_std == TV_STD_PAL_M || tv_dac->tv_std == TV_STD_PAL_60) tv_dac_cntl |= tv_dac->ntsc_tvdac_adj; else tv_dac_cntl |= tv_dac->pal_tvdac_adj; if (tv_dac->tv_std == TV_STD_NTSC || tv_dac->tv_std == TV_STD_NTSC_J) tv_dac_cntl |= RADEON_TV_DAC_STD_NTSC; else tv_dac_cntl |= RADEON_TV_DAC_STD_PAL; } else tv_dac_cntl |= (RADEON_TV_DAC_STD_PS2 | tv_dac->ps2_tvdac_adj); WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); } if (ASIC_IS_R300(rdev)) { gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1; disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL); } else if (rdev->family != CHIP_R200) disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); else if (rdev->family == CHIP_R200) fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); if (rdev->family >= CHIP_R200) disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL); if (is_tv) { uint32_t dac_cntl; dac_cntl = RREG32(RADEON_DAC_CNTL); dac_cntl &= ~RADEON_DAC_TVO_EN; WREG32(RADEON_DAC_CNTL, dac_cntl); if (ASIC_IS_R300(rdev)) gpiopad_a = RREG32(RADEON_GPIOPAD_A) & ~1; dac2_cntl = RREG32(RADEON_DAC_CNTL2) & ~RADEON_DAC2_DAC2_CLK_SEL; if (radeon_crtc->crtc_id == 0) { if (ASIC_IS_R300(rdev)) { disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK; disp_output_cntl |= (RADEON_DISP_TVDAC_SOURCE_CRTC | RADEON_DISP_TV_SOURCE_CRTC); } if (rdev->family >= CHIP_R200) { disp_tv_out_cntl &= ~RADEON_DISP_TV_PATH_SRC_CRTC2; } else { disp_hw_debug |= RADEON_CRT2_DISP1_SEL; } } else { if (ASIC_IS_R300(rdev)) { disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK; disp_output_cntl |= RADEON_DISP_TV_SOURCE_CRTC; } if (rdev->family >= CHIP_R200) { disp_tv_out_cntl |= RADEON_DISP_TV_PATH_SRC_CRTC2; } else { disp_hw_debug &= ~RADEON_CRT2_DISP1_SEL; } } WREG32(RADEON_DAC_CNTL2, dac2_cntl); } else { dac2_cntl = RREG32(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC2_CLK_SEL; if (radeon_crtc->crtc_id == 0) { if (ASIC_IS_R300(rdev)) { disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK; disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC; } else if (rdev->family == CHIP_R200) { fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK | RADEON_FP2_DVO_RATE_SEL_SDR); } else disp_hw_debug |= RADEON_CRT2_DISP1_SEL; } else { if (ASIC_IS_R300(rdev)) { disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK; disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC2; } else if (rdev->family == CHIP_R200) { fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK | RADEON_FP2_DVO_RATE_SEL_SDR); fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2; } else disp_hw_debug &= ~RADEON_CRT2_DISP1_SEL; } WREG32(RADEON_DAC_CNTL2, dac2_cntl); } if (ASIC_IS_R300(rdev)) { WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1); WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); } else if (rdev->family != CHIP_R200) WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); else if (rdev->family == CHIP_R200) WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); if (rdev->family >= CHIP_R200) WREG32(RADEON_DISP_TV_OUT_CNTL, disp_tv_out_cntl); if (is_tv) radeon_legacy_tv_mode_set(encoder, mode, adjusted_mode); if (rdev->is_atom_bios) radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); else radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); } static bool r300_legacy_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t crtc2_gen_cntl, tv_dac_cntl, dac_cntl2, dac_ext_cntl; uint32_t disp_output_cntl, gpiopad_a, tmp; bool found = false; /* save regs needed */ gpiopad_a = RREG32(RADEON_GPIOPAD_A); dac_cntl2 = RREG32(RADEON_DAC_CNTL2); crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL); tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL); WREG32_P(RADEON_GPIOPAD_A, 0, ~1); WREG32(RADEON_DAC_CNTL2, RADEON_DAC2_DAC2_CLK_SEL); WREG32(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_CRT2_ON | RADEON_CRTC2_VSYNC_TRISTAT); tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK; tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2; WREG32(RADEON_DISP_OUTPUT_CNTL, tmp); WREG32(RADEON_DAC_EXT_CNTL, RADEON_DAC2_FORCE_BLANK_OFF_EN | RADEON_DAC2_FORCE_DATA_EN | RADEON_DAC_FORCE_DATA_SEL_RGB | (0xec << RADEON_DAC_FORCE_DATA_SHIFT)); WREG32(RADEON_TV_DAC_CNTL, RADEON_TV_DAC_STD_NTSC | (8 << RADEON_TV_DAC_BGADJ_SHIFT) | (6 << RADEON_TV_DAC_DACADJ_SHIFT)); RREG32(RADEON_TV_DAC_CNTL); mdelay(4); WREG32(RADEON_TV_DAC_CNTL, RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD | RADEON_TV_MONITOR_DETECT_EN | RADEON_TV_DAC_STD_NTSC | (8 << RADEON_TV_DAC_BGADJ_SHIFT) | (6 << RADEON_TV_DAC_DACADJ_SHIFT)); RREG32(RADEON_TV_DAC_CNTL); mdelay(6); tmp = RREG32(RADEON_TV_DAC_CNTL); if ((tmp & RADEON_TV_DAC_GDACDET) != 0) { found = true; DRM_DEBUG_KMS("S-video TV connection detected\n"); } else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) { found = true; DRM_DEBUG_KMS("Composite TV connection detected\n"); } WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl); WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); WREG32(RADEON_DAC_CNTL2, dac_cntl2); WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1); return found; } static bool radeon_legacy_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t tv_dac_cntl, dac_cntl2; uint32_t config_cntl, tv_pre_dac_mux_cntl, tv_master_cntl, tmp; bool found = false; if (ASIC_IS_R300(rdev)) return r300_legacy_tv_detect(encoder, connector); dac_cntl2 = RREG32(RADEON_DAC_CNTL2); tv_master_cntl = RREG32(RADEON_TV_MASTER_CNTL); tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); config_cntl = RREG32(RADEON_CONFIG_CNTL); tv_pre_dac_mux_cntl = RREG32(RADEON_TV_PRE_DAC_MUX_CNTL); tmp = dac_cntl2 & ~RADEON_DAC2_DAC2_CLK_SEL; WREG32(RADEON_DAC_CNTL2, tmp); tmp = tv_master_cntl | RADEON_TV_ON; tmp &= ~(RADEON_TV_ASYNC_RST | RADEON_RESTART_PHASE_FIX | RADEON_CRT_FIFO_CE_EN | RADEON_TV_FIFO_CE_EN | RADEON_RE_SYNC_NOW_SEL_MASK); tmp |= RADEON_TV_FIFO_ASYNC_RST | RADEON_CRT_ASYNC_RST; WREG32(RADEON_TV_MASTER_CNTL, tmp); tmp = RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD | RADEON_TV_MONITOR_DETECT_EN | RADEON_TV_DAC_STD_NTSC | (8 << RADEON_TV_DAC_BGADJ_SHIFT); if (config_cntl & RADEON_CFG_ATI_REV_ID_MASK) tmp |= (4 << RADEON_TV_DAC_DACADJ_SHIFT); else tmp |= (8 << RADEON_TV_DAC_DACADJ_SHIFT); WREG32(RADEON_TV_DAC_CNTL, tmp); tmp = RADEON_C_GRN_EN | RADEON_CMP_BLU_EN | RADEON_RED_MX_FORCE_DAC_DATA | RADEON_GRN_MX_FORCE_DAC_DATA | RADEON_BLU_MX_FORCE_DAC_DATA | (0x109 << RADEON_TV_FORCE_DAC_DATA_SHIFT); WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, tmp); mdelay(3); tmp = RREG32(RADEON_TV_DAC_CNTL); if (tmp & RADEON_TV_DAC_GDACDET) { found = true; DRM_DEBUG_KMS("S-video TV connection detected\n"); } else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) { found = true; DRM_DEBUG_KMS("Composite TV connection detected\n"); } WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, tv_pre_dac_mux_cntl); WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl); WREG32(RADEON_DAC_CNTL2, dac_cntl2); return found; } static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t crtc2_gen_cntl, tv_dac_cntl, dac_cntl2, dac_ext_cntl; uint32_t disp_hw_debug, disp_output_cntl, gpiopad_a, pixclks_cntl, tmp; enum drm_connector_status found = connector_status_disconnected; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; bool color = true; struct drm_crtc *crtc; /* find out if crtc2 is in use or if this encoder is using it */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); if ((radeon_crtc->crtc_id == 1) && crtc->enabled) { if (encoder->crtc != crtc) { return connector_status_disconnected; } } } if (connector->connector_type == DRM_MODE_CONNECTOR_SVIDEO || connector->connector_type == DRM_MODE_CONNECTOR_Composite || connector->connector_type == DRM_MODE_CONNECTOR_9PinDIN) { bool tv_detect; if (radeon_encoder->active_device && !(radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT)) return connector_status_disconnected; tv_detect = radeon_legacy_tv_detect(encoder, connector); if (tv_detect && tv_dac) found = connector_status_connected; return found; } /* don't probe if the encoder is being used for something else not CRT related */ if (radeon_encoder->active_device && !(radeon_encoder->active_device & ATOM_DEVICE_CRT_SUPPORT)) { DRM_INFO("not detecting due to %08x\n", radeon_encoder->active_device); return connector_status_disconnected; } /* save the regs we need */ pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL); gpiopad_a = ASIC_IS_R300(rdev) ? RREG32(RADEON_GPIOPAD_A) : 0; disp_output_cntl = ASIC_IS_R300(rdev) ? RREG32(RADEON_DISP_OUTPUT_CNTL) : 0; disp_hw_debug = ASIC_IS_R300(rdev) ? 0 : RREG32(RADEON_DISP_HW_DEBUG); crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL); dac_cntl2 = RREG32(RADEON_DAC_CNTL2); tmp = pixclks_cntl & ~(RADEON_PIX2CLK_ALWAYS_ONb | RADEON_PIX2CLK_DAC_ALWAYS_ONb); WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); if (ASIC_IS_R300(rdev)) WREG32_P(RADEON_GPIOPAD_A, 1, ~1); tmp = crtc2_gen_cntl & ~RADEON_CRTC2_PIX_WIDTH_MASK; tmp |= RADEON_CRTC2_CRT2_ON | (2 << RADEON_CRTC2_PIX_WIDTH_SHIFT); WREG32(RADEON_CRTC2_GEN_CNTL, tmp); if (ASIC_IS_R300(rdev)) { tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK; tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2; WREG32(RADEON_DISP_OUTPUT_CNTL, tmp); } else { tmp = disp_hw_debug & ~RADEON_CRT2_DISP1_SEL; WREG32(RADEON_DISP_HW_DEBUG, tmp); } tmp = RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD | RADEON_TV_MONITOR_DETECT_EN | RADEON_TV_DAC_STD_PS2; WREG32(RADEON_TV_DAC_CNTL, tmp); tmp = RADEON_DAC2_FORCE_BLANK_OFF_EN | RADEON_DAC2_FORCE_DATA_EN; if (color) tmp |= RADEON_DAC_FORCE_DATA_SEL_RGB; else tmp |= RADEON_DAC_FORCE_DATA_SEL_G; if (ASIC_IS_R300(rdev)) tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT); else tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT); WREG32(RADEON_DAC_EXT_CNTL, tmp); tmp = dac_cntl2 | RADEON_DAC2_DAC2_CLK_SEL | RADEON_DAC2_CMP_EN; WREG32(RADEON_DAC_CNTL2, tmp); mdelay(10); if (ASIC_IS_R300(rdev)) { if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUT_B) found = connector_status_connected; } else { if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUTPUT) found = connector_status_connected; } /* restore regs we used */ WREG32(RADEON_DAC_CNTL2, dac_cntl2); WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl); WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); if (ASIC_IS_R300(rdev)) { WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1); } else { WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); } WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); return found; } static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = { .dpms = radeon_legacy_tv_dac_dpms, .mode_fixup = radeon_legacy_mode_fixup, .prepare = radeon_legacy_tv_dac_prepare, .mode_set = radeon_legacy_tv_dac_mode_set, .commit = radeon_legacy_tv_dac_commit, .detect = radeon_legacy_tv_dac_detect, .disable = radeon_legacy_encoder_disable, }; static const struct drm_encoder_funcs radeon_legacy_tv_dac_enc_funcs = { .destroy = radeon_enc_destroy, }; static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder_int_tmds *tmds = NULL; bool ret; tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL); if (!tmds) return NULL; if (rdev->is_atom_bios) ret = radeon_atombios_get_tmds_info(encoder, tmds); else ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds); if (ret == false) radeon_legacy_get_tmds_info_from_table(encoder, tmds); return tmds; } static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct radeon_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder_ext_tmds *tmds = NULL; bool ret; if (rdev->is_atom_bios) return NULL; tmds = kzalloc(sizeof(struct radeon_encoder_ext_tmds), GFP_KERNEL); if (!tmds) return NULL; ret = radeon_legacy_get_ext_tmds_info_from_combios(encoder, tmds); if (ret == false) radeon_legacy_get_ext_tmds_info_from_table(encoder, tmds); return tmds; } void radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device) { struct radeon_device *rdev = dev->dev_private; struct drm_encoder *encoder; struct radeon_encoder *radeon_encoder; /* see if we already added it */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->encoder_enum == encoder_enum) { radeon_encoder->devices |= supported_device; return; } } /* add a new one */ radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL); if (!radeon_encoder) return; encoder = &radeon_encoder->base; if (rdev->flags & RADEON_SINGLE_CRTC) encoder->possible_crtcs = 0x1; else encoder->possible_crtcs = 0x3; radeon_encoder->enc_priv = NULL; radeon_encoder->encoder_enum = encoder_enum; radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; radeon_encoder->devices = supported_device; radeon_encoder->rmx_type = RMX_OFF; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_LVDS: encoder->possible_crtcs = 0x1; drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs); if (rdev->is_atom_bios) radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); else radeon_encoder->enc_priv = radeon_combios_get_lvds_info(radeon_encoder); radeon_encoder->rmx_type = RMX_FULL; break; case ENCODER_OBJECT_ID_INTERNAL_TMDS1: drm_encoder_init(dev, encoder, &radeon_legacy_tmds_int_enc_funcs, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(encoder, &radeon_legacy_tmds_int_helper_funcs); radeon_encoder->enc_priv = radeon_legacy_get_tmds_info(radeon_encoder); break; case ENCODER_OBJECT_ID_INTERNAL_DAC1: drm_encoder_init(dev, encoder, &radeon_legacy_primary_dac_enc_funcs, DRM_MODE_ENCODER_DAC); drm_encoder_helper_add(encoder, &radeon_legacy_primary_dac_helper_funcs); if (rdev->is_atom_bios) radeon_encoder->enc_priv = radeon_atombios_get_primary_dac_info(radeon_encoder); else radeon_encoder->enc_priv = radeon_combios_get_primary_dac_info(radeon_encoder); break; case ENCODER_OBJECT_ID_INTERNAL_DAC2: drm_encoder_init(dev, encoder, &radeon_legacy_tv_dac_enc_funcs, DRM_MODE_ENCODER_TVDAC); drm_encoder_helper_add(encoder, &radeon_legacy_tv_dac_helper_funcs); if (rdev->is_atom_bios) radeon_encoder->enc_priv = radeon_atombios_get_tv_dac_info(radeon_encoder); else radeon_encoder->enc_priv = radeon_combios_get_tv_dac_info(radeon_encoder); break; case ENCODER_OBJECT_ID_INTERNAL_DVO1: drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs); if (!rdev->is_atom_bios) radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder); break; } }
gpl-2.0
TroNit/BlackDome_Kernel_JB
fs/btrfs/print-tree.c
3058
11058
/* * Copyright (C) 2007 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include "ctree.h" #include "disk-io.h" #include "print-tree.h" static void print_chunk(struct extent_buffer *eb, struct btrfs_chunk *chunk) { int num_stripes = btrfs_chunk_num_stripes(eb, chunk); int i; printk(KERN_INFO "\t\tchunk length %llu owner %llu type %llu " "num_stripes %d\n", (unsigned long long)btrfs_chunk_length(eb, chunk), (unsigned long long)btrfs_chunk_owner(eb, chunk), (unsigned long long)btrfs_chunk_type(eb, chunk), num_stripes); for (i = 0 ; i < num_stripes ; i++) { printk(KERN_INFO "\t\t\tstripe %d devid %llu offset %llu\n", i, (unsigned long long)btrfs_stripe_devid_nr(eb, chunk, i), (unsigned long long)btrfs_stripe_offset_nr(eb, chunk, i)); } } static void print_dev_item(struct extent_buffer *eb, struct btrfs_dev_item *dev_item) { printk(KERN_INFO "\t\tdev item devid %llu " "total_bytes %llu bytes used %llu\n", (unsigned long long)btrfs_device_id(eb, dev_item), (unsigned long long)btrfs_device_total_bytes(eb, dev_item), (unsigned long long)btrfs_device_bytes_used(eb, dev_item)); } static void print_extent_data_ref(struct extent_buffer *eb, struct btrfs_extent_data_ref *ref) { printk(KERN_INFO "\t\textent data backref root %llu " "objectid %llu offset %llu count %u\n", (unsigned long long)btrfs_extent_data_ref_root(eb, ref), (unsigned long long)btrfs_extent_data_ref_objectid(eb, ref), (unsigned long long)btrfs_extent_data_ref_offset(eb, ref), btrfs_extent_data_ref_count(eb, ref)); } static void print_extent_item(struct extent_buffer *eb, int slot) { struct btrfs_extent_item *ei; struct btrfs_extent_inline_ref *iref; struct btrfs_extent_data_ref *dref; struct btrfs_shared_data_ref *sref; struct btrfs_disk_key key; unsigned long end; unsigned long ptr; int type; u32 item_size = btrfs_item_size_nr(eb, slot); u64 flags; u64 offset; if (item_size < sizeof(*ei)) { #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 struct btrfs_extent_item_v0 *ei0; BUG_ON(item_size != sizeof(*ei0)); ei0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_item_v0); printk(KERN_INFO "\t\textent refs %u\n", btrfs_extent_refs_v0(eb, ei0)); return; #else BUG(); #endif } ei = btrfs_item_ptr(eb, slot, struct btrfs_extent_item); flags = btrfs_extent_flags(eb, ei); printk(KERN_INFO "\t\textent refs %llu gen %llu flags %llu\n", (unsigned long long)btrfs_extent_refs(eb, ei), (unsigned long long)btrfs_extent_generation(eb, ei), (unsigned long long)flags); if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { struct btrfs_tree_block_info *info; info = (struct btrfs_tree_block_info *)(ei + 1); btrfs_tree_block_key(eb, info, &key); printk(KERN_INFO "\t\ttree block key (%llu %x %llu) " "level %d\n", (unsigned long long)btrfs_disk_key_objectid(&key), key.type, (unsigned long long)btrfs_disk_key_offset(&key), btrfs_tree_block_level(eb, info)); iref = (struct btrfs_extent_inline_ref *)(info + 1); } else { iref = (struct btrfs_extent_inline_ref *)(ei + 1); } ptr = (unsigned long)iref; end = (unsigned long)ei + item_size; while (ptr < end) { iref = (struct btrfs_extent_inline_ref *)ptr; type = btrfs_extent_inline_ref_type(eb, iref); offset = btrfs_extent_inline_ref_offset(eb, iref); switch (type) { case BTRFS_TREE_BLOCK_REF_KEY: printk(KERN_INFO "\t\ttree block backref " "root %llu\n", (unsigned long long)offset); break; case BTRFS_SHARED_BLOCK_REF_KEY: printk(KERN_INFO "\t\tshared block backref " "parent %llu\n", (unsigned long long)offset); break; case BTRFS_EXTENT_DATA_REF_KEY: dref = (struct btrfs_extent_data_ref *)(&iref->offset); print_extent_data_ref(eb, dref); break; case BTRFS_SHARED_DATA_REF_KEY: sref = (struct btrfs_shared_data_ref *)(iref + 1); printk(KERN_INFO "\t\tshared data backref " "parent %llu count %u\n", (unsigned long long)offset, btrfs_shared_data_ref_count(eb, sref)); break; default: BUG(); } ptr += btrfs_extent_inline_ref_size(type); } WARN_ON(ptr > end); } #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 static void print_extent_ref_v0(struct extent_buffer *eb, int slot) { struct btrfs_extent_ref_v0 *ref0; ref0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_ref_v0); printk("\t\textent back ref root %llu gen %llu " "owner %llu num_refs %lu\n", (unsigned long long)btrfs_ref_root_v0(eb, ref0), (unsigned long long)btrfs_ref_generation_v0(eb, ref0), (unsigned long long)btrfs_ref_objectid_v0(eb, ref0), (unsigned long)btrfs_ref_count_v0(eb, ref0)); } #endif void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) { int i; u32 type; u32 nr = btrfs_header_nritems(l); struct btrfs_item *item; struct btrfs_root_item *ri; struct btrfs_dir_item *di; struct btrfs_inode_item *ii; struct btrfs_block_group_item *bi; struct btrfs_file_extent_item *fi; struct btrfs_extent_data_ref *dref; struct btrfs_shared_data_ref *sref; struct btrfs_dev_extent *dev_extent; struct btrfs_key key; struct btrfs_key found_key; printk(KERN_INFO "leaf %llu total ptrs %d free space %d\n", (unsigned long long)btrfs_header_bytenr(l), nr, btrfs_leaf_free_space(root, l)); for (i = 0 ; i < nr ; i++) { item = btrfs_item_nr(l, i); btrfs_item_key_to_cpu(l, &key, i); type = btrfs_key_type(&key); printk(KERN_INFO "\titem %d key (%llu %x %llu) itemoff %d " "itemsize %d\n", i, (unsigned long long)key.objectid, type, (unsigned long long)key.offset, btrfs_item_offset(l, item), btrfs_item_size(l, item)); switch (type) { case BTRFS_INODE_ITEM_KEY: ii = btrfs_item_ptr(l, i, struct btrfs_inode_item); printk(KERN_INFO "\t\tinode generation %llu size %llu " "mode %o\n", (unsigned long long) btrfs_inode_generation(l, ii), (unsigned long long)btrfs_inode_size(l, ii), btrfs_inode_mode(l, ii)); break; case BTRFS_DIR_ITEM_KEY: di = btrfs_item_ptr(l, i, struct btrfs_dir_item); btrfs_dir_item_key_to_cpu(l, di, &found_key); printk(KERN_INFO "\t\tdir oid %llu type %u\n", (unsigned long long)found_key.objectid, btrfs_dir_type(l, di)); break; case BTRFS_ROOT_ITEM_KEY: ri = btrfs_item_ptr(l, i, struct btrfs_root_item); printk(KERN_INFO "\t\troot data bytenr %llu refs %u\n", (unsigned long long) btrfs_disk_root_bytenr(l, ri), btrfs_disk_root_refs(l, ri)); break; case BTRFS_EXTENT_ITEM_KEY: print_extent_item(l, i); break; case BTRFS_TREE_BLOCK_REF_KEY: printk(KERN_INFO "\t\ttree block backref\n"); break; case BTRFS_SHARED_BLOCK_REF_KEY: printk(KERN_INFO "\t\tshared block backref\n"); break; case BTRFS_EXTENT_DATA_REF_KEY: dref = btrfs_item_ptr(l, i, struct btrfs_extent_data_ref); print_extent_data_ref(l, dref); break; case BTRFS_SHARED_DATA_REF_KEY: sref = btrfs_item_ptr(l, i, struct btrfs_shared_data_ref); printk(KERN_INFO "\t\tshared data backref count %u\n", btrfs_shared_data_ref_count(l, sref)); break; case BTRFS_EXTENT_DATA_KEY: fi = btrfs_item_ptr(l, i, struct btrfs_file_extent_item); if (btrfs_file_extent_type(l, fi) == BTRFS_FILE_EXTENT_INLINE) { printk(KERN_INFO "\t\tinline extent data " "size %u\n", btrfs_file_extent_inline_len(l, fi)); break; } printk(KERN_INFO "\t\textent data disk bytenr %llu " "nr %llu\n", (unsigned long long) btrfs_file_extent_disk_bytenr(l, fi), (unsigned long long) btrfs_file_extent_disk_num_bytes(l, fi)); printk(KERN_INFO "\t\textent data offset %llu " "nr %llu ram %llu\n", (unsigned long long) btrfs_file_extent_offset(l, fi), (unsigned long long) btrfs_file_extent_num_bytes(l, fi), (unsigned long long) btrfs_file_extent_ram_bytes(l, fi)); break; case BTRFS_EXTENT_REF_V0_KEY: #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 print_extent_ref_v0(l, i); #else BUG(); #endif break; case BTRFS_BLOCK_GROUP_ITEM_KEY: bi = btrfs_item_ptr(l, i, struct btrfs_block_group_item); printk(KERN_INFO "\t\tblock group used %llu\n", (unsigned long long) btrfs_disk_block_group_used(l, bi)); break; case BTRFS_CHUNK_ITEM_KEY: print_chunk(l, btrfs_item_ptr(l, i, struct btrfs_chunk)); break; case BTRFS_DEV_ITEM_KEY: print_dev_item(l, btrfs_item_ptr(l, i, struct btrfs_dev_item)); break; case BTRFS_DEV_EXTENT_KEY: dev_extent = btrfs_item_ptr(l, i, struct btrfs_dev_extent); printk(KERN_INFO "\t\tdev extent chunk_tree %llu\n" "\t\tchunk objectid %llu chunk offset %llu " "length %llu\n", (unsigned long long) btrfs_dev_extent_chunk_tree(l, dev_extent), (unsigned long long) btrfs_dev_extent_chunk_objectid(l, dev_extent), (unsigned long long) btrfs_dev_extent_chunk_offset(l, dev_extent), (unsigned long long) btrfs_dev_extent_length(l, dev_extent)); }; } } void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c) { int i; u32 nr; struct btrfs_key key; int level; if (!c) return; nr = btrfs_header_nritems(c); level = btrfs_header_level(c); if (level == 0) { btrfs_print_leaf(root, c); return; } printk(KERN_INFO "node %llu level %d total ptrs %d free spc %u\n", (unsigned long long)btrfs_header_bytenr(c), level, nr, (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr); for (i = 0; i < nr; i++) { btrfs_node_key_to_cpu(c, &key, i); printk(KERN_INFO "\tkey %d (%llu %u %llu) block %llu\n", i, (unsigned long long)key.objectid, key.type, (unsigned long long)key.offset, (unsigned long long)btrfs_node_blockptr(c, i)); } for (i = 0; i < nr; i++) { struct extent_buffer *next = read_tree_block(root, btrfs_node_blockptr(c, i), btrfs_level_size(root, level - 1), btrfs_node_ptr_generation(c, i)); if (btrfs_is_leaf(next) && level != 1) BUG(); if (btrfs_header_level(next) != level - 1) BUG(); btrfs_print_tree(root, next); free_extent_buffer(next); } }
gpl-2.0
TeamEOS/kernel_oppo_msm8960
arch/arm/mach-msm/btpintest.c
3314
5465
/* Copyright (c) 2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/uaccess.h> #include <linux/version.h> #include <linux/debugfs.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <mach/gpio.h> #include <mach/vreg.h> #include <mach/gpiomux.h> #define VERSION "1.0" struct dentry *pin_debugfs_dent; /* UART GPIO lines for 8660 */ enum uartpins { UARTDM_TX = 53, UARTDM_RX = 54, UARTDM_CTS = 55, UARTDM_RFR = 56 }; /* Aux PCM GPIO lines for 8660 */ enum auxpcmpins { AUX_PCM_CLK = 114, AUX_PCM_SYNC = 113, AUX_PCM_DIN = 112, AUX_PCM_DOUT = 111 }; /*Number of UART and PCM pins */ #define PIN_COUNT 8 static struct gpiomux_setting pin_test_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; /* Static array to intialise the return config */ static struct gpiomux_setting currentconfig[2*PIN_COUNT]; static struct msm_gpiomux_config pin_test_configs[] = { { .gpio = AUX_PCM_DOUT, .settings = { [GPIOMUX_ACTIVE] = &pin_test_config, [GPIOMUX_SUSPENDED] = &pin_test_config, }, }, { .gpio = AUX_PCM_DIN, .settings = { [GPIOMUX_ACTIVE] = &pin_test_config, [GPIOMUX_SUSPENDED] = &pin_test_config, }, }, { .gpio = AUX_PCM_SYNC, .settings = { [GPIOMUX_ACTIVE] = &pin_test_config, [GPIOMUX_SUSPENDED] = &pin_test_config, }, }, { .gpio = AUX_PCM_CLK, .settings = { [GPIOMUX_ACTIVE] = &pin_test_config, [GPIOMUX_SUSPENDED] = &pin_test_config, }, }, { .gpio = UARTDM_TX, .settings = { [GPIOMUX_ACTIVE] = &pin_test_config, [GPIOMUX_SUSPENDED] = &pin_test_config, }, }, { .gpio = UARTDM_RX, .settings = { [GPIOMUX_ACTIVE] = &pin_test_config, [GPIOMUX_SUSPENDED] = &pin_test_config, }, }, { .gpio = UARTDM_CTS, .settings = { [GPIOMUX_ACTIVE] = &pin_test_config, [GPIOMUX_SUSPENDED] = &pin_test_config, }, }, { .gpio = UARTDM_RFR, .settings = { [GPIOMUX_ACTIVE] = &pin_test_config, [GPIOMUX_SUSPENDED] = &pin_test_config, }, }, }; static struct msm_gpiomux_config pin_config[PIN_COUNT]; static int pintest_open(struct inode *inode, struct file *file) { /* non-seekable */ file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); return 0; } static int pintest_release(struct inode *inode, struct file *file) { return 0; } static int configure_pins(struct msm_gpiomux_config *config, struct msm_gpiomux_config *oldconfig, unsigned int num_configs) { int rc = 0, j, i; for (i = 0; i < num_configs; i++) { for (j = 0; j < GPIOMUX_NSETTINGS; j++) { (oldconfig + i)->gpio = (config + i)->gpio; rc = msm_gpiomux_write((config + i)->gpio, j, (config + i)->settings[j], (oldconfig + i)->settings[j]); if (rc < 0) break; } } return rc; } static void init_current_config_pointers(void) { int i = 0, j = 0; /* The current config variables will hold the current configuration * which is getting overwritten during a msm_gpiomux_write call */ for (i = 0, j = 0; i < PIN_COUNT; i += 1, j += 2) { pin_config[i].settings[GPIOMUX_ACTIVE] = &currentconfig[j]; pin_config[i].settings[GPIOMUX_SUSPENDED] = &currentconfig[j + 1]; } } static ssize_t pintest_write( struct file *file, const char __user *buff, size_t count, loff_t *ppos) { char mode; int rc = 0; if (count < 1) return -EINVAL; if (buff == NULL) return -EINVAL; if (copy_from_user(&mode, buff, count)) return -EFAULT; mode = mode - '0'; init_current_config_pointers(); if (mode) { /* Configure all pin test gpios for the custom settings */ rc = configure_pins(pin_test_configs, pin_config, ARRAY_SIZE(pin_test_configs)); if (rc < 0) return rc; } else { /* Configure all pin test gpios for the original settings */ rc = configure_pins(pin_config, pin_test_configs, ARRAY_SIZE(pin_test_configs)); if (rc < 0) return rc; } return rc; } static const struct file_operations pintest_debugfs_fops = { .open = pintest_open, .release = pintest_release, .write = pintest_write, }; static int __init bluepintest_init(void) { pin_debugfs_dent = debugfs_create_dir("btpintest", NULL); if (IS_ERR(pin_debugfs_dent)) { printk(KERN_ERR "%s(%d): debugfs_create_dir fail, error %ld\n", __FILE__, __LINE__, PTR_ERR(pin_debugfs_dent)); return -ENOMEM; } if (debugfs_create_file("enable", 0644, pin_debugfs_dent, 0, &pintest_debugfs_fops) == NULL) { printk(KERN_ERR "%s(%d): debugfs_create_file: index fail\n", __FILE__, __LINE__); return -ENOMEM; } return 0; } static void __exit bluepintest_exit(void) { debugfs_remove_recursive(pin_debugfs_dent); } module_init(bluepintest_init); module_exit(bluepintest_exit); MODULE_DESCRIPTION("Bluetooth Pin Connectivty Test Driver ver %s " VERSION); MODULE_LICENSE("GPL v2");
gpl-2.0
StanTRC/lge-kernel-e400
arch/arm/mach-msm/btpintest.c
3314
5465
/* Copyright (c) 2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/uaccess.h> #include <linux/version.h> #include <linux/debugfs.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <mach/gpio.h> #include <mach/vreg.h> #include <mach/gpiomux.h> #define VERSION "1.0" struct dentry *pin_debugfs_dent; /* UART GPIO lines for 8660 */ enum uartpins { UARTDM_TX = 53, UARTDM_RX = 54, UARTDM_CTS = 55, UARTDM_RFR = 56 }; /* Aux PCM GPIO lines for 8660 */ enum auxpcmpins { AUX_PCM_CLK = 114, AUX_PCM_SYNC = 113, AUX_PCM_DIN = 112, AUX_PCM_DOUT = 111 }; /*Number of UART and PCM pins */ #define PIN_COUNT 8 static struct gpiomux_setting pin_test_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; /* Static array to intialise the return config */ static struct gpiomux_setting currentconfig[2*PIN_COUNT]; static struct msm_gpiomux_config pin_test_configs[] = { { .gpio = AUX_PCM_DOUT, .settings = { [GPIOMUX_ACTIVE] = &pin_test_config, [GPIOMUX_SUSPENDED] = &pin_test_config, }, }, { .gpio = AUX_PCM_DIN, .settings = { [GPIOMUX_ACTIVE] = &pin_test_config, [GPIOMUX_SUSPENDED] = &pin_test_config, }, }, { .gpio = AUX_PCM_SYNC, .settings = { [GPIOMUX_ACTIVE] = &pin_test_config, [GPIOMUX_SUSPENDED] = &pin_test_config, }, }, { .gpio = AUX_PCM_CLK, .settings = { [GPIOMUX_ACTIVE] = &pin_test_config, [GPIOMUX_SUSPENDED] = &pin_test_config, }, }, { .gpio = UARTDM_TX, .settings = { [GPIOMUX_ACTIVE] = &pin_test_config, [GPIOMUX_SUSPENDED] = &pin_test_config, }, }, { .gpio = UARTDM_RX, .settings = { [GPIOMUX_ACTIVE] = &pin_test_config, [GPIOMUX_SUSPENDED] = &pin_test_config, }, }, { .gpio = UARTDM_CTS, .settings = { [GPIOMUX_ACTIVE] = &pin_test_config, [GPIOMUX_SUSPENDED] = &pin_test_config, }, }, { .gpio = UARTDM_RFR, .settings = { [GPIOMUX_ACTIVE] = &pin_test_config, [GPIOMUX_SUSPENDED] = &pin_test_config, }, }, }; static struct msm_gpiomux_config pin_config[PIN_COUNT]; static int pintest_open(struct inode *inode, struct file *file) { /* non-seekable */ file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); return 0; } static int pintest_release(struct inode *inode, struct file *file) { return 0; } static int configure_pins(struct msm_gpiomux_config *config, struct msm_gpiomux_config *oldconfig, unsigned int num_configs) { int rc = 0, j, i; for (i = 0; i < num_configs; i++) { for (j = 0; j < GPIOMUX_NSETTINGS; j++) { (oldconfig + i)->gpio = (config + i)->gpio; rc = msm_gpiomux_write((config + i)->gpio, j, (config + i)->settings[j], (oldconfig + i)->settings[j]); if (rc < 0) break; } } return rc; } static void init_current_config_pointers(void) { int i = 0, j = 0; /* The current config variables will hold the current configuration * which is getting overwritten during a msm_gpiomux_write call */ for (i = 0, j = 0; i < PIN_COUNT; i += 1, j += 2) { pin_config[i].settings[GPIOMUX_ACTIVE] = &currentconfig[j]; pin_config[i].settings[GPIOMUX_SUSPENDED] = &currentconfig[j + 1]; } } static ssize_t pintest_write( struct file *file, const char __user *buff, size_t count, loff_t *ppos) { char mode; int rc = 0; if (count < 1) return -EINVAL; if (buff == NULL) return -EINVAL; if (copy_from_user(&mode, buff, count)) return -EFAULT; mode = mode - '0'; init_current_config_pointers(); if (mode) { /* Configure all pin test gpios for the custom settings */ rc = configure_pins(pin_test_configs, pin_config, ARRAY_SIZE(pin_test_configs)); if (rc < 0) return rc; } else { /* Configure all pin test gpios for the original settings */ rc = configure_pins(pin_config, pin_test_configs, ARRAY_SIZE(pin_test_configs)); if (rc < 0) return rc; } return rc; } static const struct file_operations pintest_debugfs_fops = { .open = pintest_open, .release = pintest_release, .write = pintest_write, }; static int __init bluepintest_init(void) { pin_debugfs_dent = debugfs_create_dir("btpintest", NULL); if (IS_ERR(pin_debugfs_dent)) { printk(KERN_ERR "%s(%d): debugfs_create_dir fail, error %ld\n", __FILE__, __LINE__, PTR_ERR(pin_debugfs_dent)); return -ENOMEM; } if (debugfs_create_file("enable", 0644, pin_debugfs_dent, 0, &pintest_debugfs_fops) == NULL) { printk(KERN_ERR "%s(%d): debugfs_create_file: index fail\n", __FILE__, __LINE__); return -ENOMEM; } return 0; } static void __exit bluepintest_exit(void) { debugfs_remove_recursive(pin_debugfs_dent); } module_init(bluepintest_init); module_exit(bluepintest_exit); MODULE_DESCRIPTION("Bluetooth Pin Connectivty Test Driver ver %s " VERSION); MODULE_LICENSE("GPL v2");
gpl-2.0
genopublic/kernel_u8800
drivers/net/wireless/ath/ath5k/rfkill.c
4082
4100
/* * RFKILL support for ath5k * * Copyright (c) 2009 Tobias Doerffel <tobias.doerffel@gmail.com> * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include "base.h" static inline void ath5k_rfkill_disable(struct ath5k_softc *sc) { ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "rfkill disable (gpio:%d polarity:%d)\n", sc->rf_kill.gpio, sc->rf_kill.polarity); ath5k_hw_set_gpio_output(sc->ah, sc->rf_kill.gpio); ath5k_hw_set_gpio(sc->ah, sc->rf_kill.gpio, !sc->rf_kill.polarity); } static inline void ath5k_rfkill_enable(struct ath5k_softc *sc) { ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "rfkill enable (gpio:%d polarity:%d)\n", sc->rf_kill.gpio, sc->rf_kill.polarity); ath5k_hw_set_gpio_output(sc->ah, sc->rf_kill.gpio); ath5k_hw_set_gpio(sc->ah, sc->rf_kill.gpio, sc->rf_kill.polarity); } static inline void ath5k_rfkill_set_intr(struct ath5k_softc *sc, bool enable) { struct ath5k_hw *ah = sc->ah; u32 curval; ath5k_hw_set_gpio_input(ah, sc->rf_kill.gpio); curval = ath5k_hw_get_gpio(ah, sc->rf_kill.gpio); ath5k_hw_set_gpio_intr(ah, sc->rf_kill.gpio, enable ? !!curval : !curval); } static bool ath5k_is_rfkill_set(struct ath5k_softc *sc) { /* configuring GPIO for input for some reason disables rfkill */ /*ath5k_hw_set_gpio_input(sc->ah, sc->rf_kill.gpio);*/ return ath5k_hw_get_gpio(sc->ah, sc->rf_kill.gpio) == sc->rf_kill.polarity; } static void ath5k_tasklet_rfkill_toggle(unsigned long data) { struct ath5k_softc *sc = (void *)data; bool blocked; blocked = ath5k_is_rfkill_set(sc); wiphy_rfkill_set_hw_state(sc->hw->wiphy, blocked); } void ath5k_rfkill_hw_start(struct ath5k_hw *ah) { struct ath5k_softc *sc = ah->ah_sc; /* read rfkill GPIO configuration from EEPROM header */ sc->rf_kill.gpio = ah->ah_capabilities.cap_eeprom.ee_rfkill_pin; sc->rf_kill.polarity = ah->ah_capabilities.cap_eeprom.ee_rfkill_pol; tasklet_init(&sc->rf_kill.toggleq, ath5k_tasklet_rfkill_toggle, (unsigned long)sc); ath5k_rfkill_disable(sc); /* enable interrupt for rfkill switch */ if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header)) ath5k_rfkill_set_intr(sc, true); } void ath5k_rfkill_hw_stop(struct ath5k_hw *ah) { struct ath5k_softc *sc = ah->ah_sc; /* disable interrupt for rfkill switch */ if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header)) ath5k_rfkill_set_intr(sc, false); tasklet_kill(&sc->rf_kill.toggleq); /* enable RFKILL when stopping HW so Wifi LED is turned off */ ath5k_rfkill_enable(sc); }
gpl-2.0
TeamExodus/kernel_asus_fugu
arch/blackfin/kernel/cplbinfo.c
4338
3934
/* * arch/blackfin/kernel/cplbinfo.c - display CPLB status * * Copyright 2004-2008 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/ctype.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <asm/cplbinit.h> #include <asm/blackfin.h> static char const page_strtbl[][4] = { "1K", "4K", "1M", "4M", #ifdef CONFIG_BF60x "16K", "64K", "16M", "64M", #endif }; #define page(flags) (((flags) & 0x70000) >> 16) #define strpage(flags) page_strtbl[page(flags)] struct cplbinfo_data { loff_t pos; char cplb_type; u32 mem_control; struct cplb_entry *tbl; int switched; }; static void cplbinfo_print_header(struct seq_file *m) { seq_printf(m, "Index\tAddress\t\tData\tSize\tU/RD\tU/WR\tS/WR\tSwitch\n"); } static int cplbinfo_nomore(struct cplbinfo_data *cdata) { return cdata->pos >= MAX_CPLBS; } static int cplbinfo_show(struct seq_file *m, void *p) { struct cplbinfo_data *cdata; unsigned long data, addr; loff_t pos; cdata = p; pos = cdata->pos; addr = cdata->tbl[pos].addr; data = cdata->tbl[pos].data; seq_printf(m, "%d\t0x%08lx\t%05lx\t%s\t%c\t%c\t%c\t%c\n", (int)pos, addr, data, strpage(data), (data & CPLB_USER_RD) ? 'Y' : 'N', (data & CPLB_USER_WR) ? 'Y' : 'N', (data & CPLB_SUPV_WR) ? 'Y' : 'N', pos < cdata->switched ? 'N' : 'Y'); return 0; } static void cplbinfo_seq_init(struct cplbinfo_data *cdata, unsigned int cpu) { if (cdata->cplb_type == 'I') { cdata->mem_control = bfin_read_IMEM_CONTROL(); cdata->tbl = icplb_tbl[cpu]; cdata->switched = first_switched_icplb; } else { cdata->mem_control = bfin_read_DMEM_CONTROL(); cdata->tbl = dcplb_tbl[cpu]; cdata->switched = first_switched_dcplb; } } static void *cplbinfo_start(struct seq_file *m, loff_t *pos) { struct cplbinfo_data *cdata = m->private; if (!*pos) { seq_printf(m, "%cCPLBs are %sabled: 0x%x\n", cdata->cplb_type, (cdata->mem_control & ENDCPLB ? "en" : "dis"), cdata->mem_control); cplbinfo_print_header(m); } else if (cplbinfo_nomore(cdata)) return NULL; get_cpu(); return cdata; } static void *cplbinfo_next(struct seq_file *m, void *p, loff_t *pos) { struct cplbinfo_data *cdata = p; cdata->pos = ++(*pos); if (cplbinfo_nomore(cdata)) return NULL; else return cdata; } static void cplbinfo_stop(struct seq_file *m, void *p) { put_cpu(); } static const struct seq_operations cplbinfo_sops = { .start = cplbinfo_start, .next = cplbinfo_next, .stop = cplbinfo_stop, .show = cplbinfo_show, }; #define CPLBINFO_DCPLB_FLAG 0x80000000 static int cplbinfo_open(struct inode *inode, struct file *file) { char cplb_type; unsigned int cpu = (unsigned long)PDE_DATA(file_inode(file)); int ret; struct seq_file *m; struct cplbinfo_data *cdata; cplb_type = cpu & CPLBINFO_DCPLB_FLAG ? 'D' : 'I'; cpu &= ~CPLBINFO_DCPLB_FLAG; if (!cpu_online(cpu)) return -ENODEV; ret = seq_open_private(file, &cplbinfo_sops, sizeof(*cdata)); if (ret) return ret; m = file->private_data; cdata = m->private; cdata->pos = 0; cdata->cplb_type = cplb_type; cplbinfo_seq_init(cdata, cpu); return 0; } static const struct file_operations cplbinfo_fops = { .open = cplbinfo_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; static int __init cplbinfo_init(void) { struct proc_dir_entry *cplb_dir, *cpu_dir; char buf[10]; unsigned int cpu; cplb_dir = proc_mkdir("cplbinfo", NULL); if (!cplb_dir) return -ENOMEM; for_each_possible_cpu(cpu) { sprintf(buf, "cpu%i", cpu); cpu_dir = proc_mkdir(buf, cplb_dir); if (!cpu_dir) return -ENOMEM; proc_create_data("icplb", S_IRUGO, cpu_dir, &cplbinfo_fops, (void *)cpu); proc_create_data("dcplb", S_IRUGO, cpu_dir, &cplbinfo_fops, (void *)(cpu | CPLBINFO_DCPLB_FLAG)); } return 0; } late_initcall(cplbinfo_init);
gpl-2.0
drewx2/android_kernel_htc_dlx
drivers/pci/hotplug/ibmphp_core.c
4850
35120
/* * IBM Hot Plug Controller Driver * * Written By: Chuck Cole, Jyoti Shah, Tong Yu, Irene Zubarev, IBM Corporation * * Copyright (C) 2001,2003 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2001-2003 IBM Corp. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <gregkh@us.ibm.com> * */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/wait.h> #include "../pci.h" #include <asm/pci_x86.h> /* for struct irq_routing_table */ #include "ibmphp.h" #define attn_on(sl) ibmphp_hpc_writeslot (sl, HPC_SLOT_ATTNON) #define attn_off(sl) ibmphp_hpc_writeslot (sl, HPC_SLOT_ATTNOFF) #define attn_LED_blink(sl) ibmphp_hpc_writeslot (sl, HPC_SLOT_BLINKLED) #define get_ctrl_revision(sl, rev) ibmphp_hpc_readslot (sl, READ_REVLEVEL, rev) #define get_hpc_options(sl, opt) ibmphp_hpc_readslot (sl, READ_HPCOPTIONS, opt) #define DRIVER_VERSION "0.6" #define DRIVER_DESC "IBM Hot Plug PCI Controller Driver" int ibmphp_debug; static bool debug; module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC (debug, "Debugging mode enabled or not"); MODULE_LICENSE ("GPL"); MODULE_DESCRIPTION (DRIVER_DESC); struct pci_bus *ibmphp_pci_bus; static int max_slots; static int irqs[16]; /* PIC mode IRQ's we're using so far (in case MPS * tables don't provide default info for empty slots */ static int init_flag; /* static int get_max_adapter_speed_1 (struct hotplug_slot *, u8 *, u8); static inline int get_max_adapter_speed (struct hotplug_slot *hs, u8 *value) { return get_max_adapter_speed_1 (hs, value, 1); } */ static inline int get_cur_bus_info(struct slot **sl) { int rc = 1; struct slot * slot_cur = *sl; debug("options = %x\n", slot_cur->ctrl->options); debug("revision = %x\n", slot_cur->ctrl->revision); if (READ_BUS_STATUS(slot_cur->ctrl)) rc = ibmphp_hpc_readslot(slot_cur, READ_BUSSTATUS, NULL); if (rc) return rc; slot_cur->bus_on->current_speed = CURRENT_BUS_SPEED(slot_cur->busstatus); if (READ_BUS_MODE(slot_cur->ctrl)) slot_cur->bus_on->current_bus_mode = CURRENT_BUS_MODE(slot_cur->busstatus); else slot_cur->bus_on->current_bus_mode = 0xFF; debug("busstatus = %x, bus_speed = %x, bus_mode = %x\n", slot_cur->busstatus, slot_cur->bus_on->current_speed, slot_cur->bus_on->current_bus_mode); *sl = slot_cur; return 0; } static inline int slot_update(struct slot **sl) { int rc; rc = ibmphp_hpc_readslot(*sl, READ_ALLSTAT, NULL); if (rc) return rc; if (!init_flag) rc = get_cur_bus_info(sl); return rc; } static int __init get_max_slots (void) { struct slot * slot_cur; struct list_head * tmp; u8 slot_count = 0; list_for_each(tmp, &ibmphp_slot_head) { slot_cur = list_entry(tmp, struct slot, ibm_slot_list); /* sometimes the hot-pluggable slots start with 4 (not always from 1) */ slot_count = max(slot_count, slot_cur->number); } return slot_count; } /* This routine will put the correct slot->device information per slot. It's * called from initialization of the slot structures. It will also assign * interrupt numbers per each slot. * Parameters: struct slot * Returns 0 or errors */ int ibmphp_init_devno(struct slot **cur_slot) { struct irq_routing_table *rtable; int len; int loop; int i; rtable = pcibios_get_irq_routing_table(); if (!rtable) { err("no BIOS routing table...\n"); return -ENOMEM; } len = (rtable->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); if (!len) { kfree(rtable); return -1; } for (loop = 0; loop < len; loop++) { if ((*cur_slot)->number == rtable->slots[loop].slot && (*cur_slot)->bus == rtable->slots[loop].bus) { struct io_apic_irq_attr irq_attr; (*cur_slot)->device = PCI_SLOT(rtable->slots[loop].devfn); for (i = 0; i < 4; i++) (*cur_slot)->irq[i] = IO_APIC_get_PCI_irq_vector((int) (*cur_slot)->bus, (int) (*cur_slot)->device, i, &irq_attr); debug("(*cur_slot)->irq[0] = %x\n", (*cur_slot)->irq[0]); debug("(*cur_slot)->irq[1] = %x\n", (*cur_slot)->irq[1]); debug("(*cur_slot)->irq[2] = %x\n", (*cur_slot)->irq[2]); debug("(*cur_slot)->irq[3] = %x\n", (*cur_slot)->irq[3]); debug("rtable->exlusive_irqs = %x\n", rtable->exclusive_irqs); debug("rtable->slots[loop].irq[0].bitmap = %x\n", rtable->slots[loop].irq[0].bitmap); debug("rtable->slots[loop].irq[1].bitmap = %x\n", rtable->slots[loop].irq[1].bitmap); debug("rtable->slots[loop].irq[2].bitmap = %x\n", rtable->slots[loop].irq[2].bitmap); debug("rtable->slots[loop].irq[3].bitmap = %x\n", rtable->slots[loop].irq[3].bitmap); debug("rtable->slots[loop].irq[0].link = %x\n", rtable->slots[loop].irq[0].link); debug("rtable->slots[loop].irq[1].link = %x\n", rtable->slots[loop].irq[1].link); debug("rtable->slots[loop].irq[2].link = %x\n", rtable->slots[loop].irq[2].link); debug("rtable->slots[loop].irq[3].link = %x\n", rtable->slots[loop].irq[3].link); debug("end of init_devno\n"); kfree(rtable); return 0; } } kfree(rtable); return -1; } static inline int power_on(struct slot *slot_cur) { u8 cmd = HPC_SLOT_ON; int retval; retval = ibmphp_hpc_writeslot(slot_cur, cmd); if (retval) { err("power on failed\n"); return retval; } if (CTLR_RESULT(slot_cur->ctrl->status)) { err("command not completed successfully in power_on\n"); return -EIO; } msleep(3000); /* For ServeRAID cards, and some 66 PCI */ return 0; } static inline int power_off(struct slot *slot_cur) { u8 cmd = HPC_SLOT_OFF; int retval; retval = ibmphp_hpc_writeslot(slot_cur, cmd); if (retval) { err("power off failed\n"); return retval; } if (CTLR_RESULT(slot_cur->ctrl->status)) { err("command not completed successfully in power_off\n"); retval = -EIO; } return retval; } static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value) { int rc = 0; struct slot *pslot; u8 cmd = 0x00; /* avoid compiler warning */ debug("set_attention_status - Entry hotplug_slot[%lx] value[%x]\n", (ulong) hotplug_slot, value); ibmphp_lock_operations(); if (hotplug_slot) { switch (value) { case HPC_SLOT_ATTN_OFF: cmd = HPC_SLOT_ATTNOFF; break; case HPC_SLOT_ATTN_ON: cmd = HPC_SLOT_ATTNON; break; case HPC_SLOT_ATTN_BLINK: cmd = HPC_SLOT_BLINKLED; break; default: rc = -ENODEV; err("set_attention_status - Error : invalid input [%x]\n", value); break; } if (rc == 0) { pslot = hotplug_slot->private; if (pslot) rc = ibmphp_hpc_writeslot(pslot, cmd); else rc = -ENODEV; } } else rc = -ENODEV; ibmphp_unlock_operations(); debug("set_attention_status - Exit rc[%d]\n", rc); return rc; } static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 * value) { int rc = -ENODEV; struct slot *pslot; struct slot myslot; debug("get_attention_status - Entry hotplug_slot[%lx] pvalue[%lx]\n", (ulong) hotplug_slot, (ulong) value); ibmphp_lock_operations(); if (hotplug_slot) { pslot = hotplug_slot->private; if (pslot) { memcpy(&myslot, pslot, sizeof(struct slot)); rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS, &(myslot.status)); if (!rc) rc = ibmphp_hpc_readslot(pslot, READ_EXTSLOTSTATUS, &(myslot.ext_status)); if (!rc) *value = SLOT_ATTN(myslot.status, myslot.ext_status); } } ibmphp_unlock_operations(); debug("get_attention_status - Exit rc[%d] value[%x]\n", rc, *value); return rc; } static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 * value) { int rc = -ENODEV; struct slot *pslot; struct slot myslot; debug("get_latch_status - Entry hotplug_slot[%lx] pvalue[%lx]\n", (ulong) hotplug_slot, (ulong) value); ibmphp_lock_operations(); if (hotplug_slot) { pslot = hotplug_slot->private; if (pslot) { memcpy(&myslot, pslot, sizeof(struct slot)); rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS, &(myslot.status)); if (!rc) *value = SLOT_LATCH(myslot.status); } } ibmphp_unlock_operations(); debug("get_latch_status - Exit rc[%d] rc[%x] value[%x]\n", rc, rc, *value); return rc; } static int get_power_status(struct hotplug_slot *hotplug_slot, u8 * value) { int rc = -ENODEV; struct slot *pslot; struct slot myslot; debug("get_power_status - Entry hotplug_slot[%lx] pvalue[%lx]\n", (ulong) hotplug_slot, (ulong) value); ibmphp_lock_operations(); if (hotplug_slot) { pslot = hotplug_slot->private; if (pslot) { memcpy(&myslot, pslot, sizeof(struct slot)); rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS, &(myslot.status)); if (!rc) *value = SLOT_PWRGD(myslot.status); } } ibmphp_unlock_operations(); debug("get_power_status - Exit rc[%d] rc[%x] value[%x]\n", rc, rc, *value); return rc; } static int get_adapter_present(struct hotplug_slot *hotplug_slot, u8 * value) { int rc = -ENODEV; struct slot *pslot; u8 present; struct slot myslot; debug("get_adapter_status - Entry hotplug_slot[%lx] pvalue[%lx]\n", (ulong) hotplug_slot, (ulong) value); ibmphp_lock_operations(); if (hotplug_slot) { pslot = hotplug_slot->private; if (pslot) { memcpy(&myslot, pslot, sizeof(struct slot)); rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS, &(myslot.status)); if (!rc) { present = SLOT_PRESENT(myslot.status); if (present == HPC_SLOT_EMPTY) *value = 0; else *value = 1; } } } ibmphp_unlock_operations(); debug("get_adapter_present - Exit rc[%d] value[%x]\n", rc, *value); return rc; } static int get_max_bus_speed(struct slot *slot) { int rc; u8 mode = 0; enum pci_bus_speed speed; struct pci_bus *bus = slot->hotplug_slot->pci_slot->bus; debug("%s - Entry slot[%p]\n", __func__, slot); ibmphp_lock_operations(); mode = slot->supported_bus_mode; speed = slot->supported_speed; ibmphp_unlock_operations(); switch (speed) { case BUS_SPEED_33: break; case BUS_SPEED_66: if (mode == BUS_MODE_PCIX) speed += 0x01; break; case BUS_SPEED_100: case BUS_SPEED_133: speed += 0x01; break; default: /* Note (will need to change): there would be soon 256, 512 also */ rc = -ENODEV; } if (!rc) bus->max_bus_speed = speed; debug("%s - Exit rc[%d] speed[%x]\n", __func__, rc, speed); return rc; } /* static int get_max_adapter_speed_1(struct hotplug_slot *hotplug_slot, u8 * value, u8 flag) { int rc = -ENODEV; struct slot *pslot; struct slot myslot; debug("get_max_adapter_speed_1 - Entry hotplug_slot[%lx] pvalue[%lx]\n", (ulong)hotplug_slot, (ulong) value); if (flag) ibmphp_lock_operations(); if (hotplug_slot && value) { pslot = hotplug_slot->private; if (pslot) { memcpy(&myslot, pslot, sizeof(struct slot)); rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS, &(myslot.status)); if (!(SLOT_LATCH (myslot.status)) && (SLOT_PRESENT (myslot.status))) { rc = ibmphp_hpc_readslot(pslot, READ_EXTSLOTSTATUS, &(myslot.ext_status)); if (!rc) *value = SLOT_SPEED(myslot.ext_status); } else *value = MAX_ADAPTER_NONE; } } if (flag) ibmphp_unlock_operations(); debug("get_max_adapter_speed_1 - Exit rc[%d] value[%x]\n", rc, *value); return rc; } static int get_bus_name(struct hotplug_slot *hotplug_slot, char * value) { int rc = -ENODEV; struct slot *pslot = NULL; debug("get_bus_name - Entry hotplug_slot[%lx]\n", (ulong)hotplug_slot); ibmphp_lock_operations(); if (hotplug_slot) { pslot = hotplug_slot->private; if (pslot) { rc = 0; snprintf(value, 100, "Bus %x", pslot->bus); } } else rc = -ENODEV; ibmphp_unlock_operations(); debug("get_bus_name - Exit rc[%d] value[%x]\n", rc, *value); return rc; } */ /**************************************************************************** * This routine will initialize the ops data structure used in the validate * function. It will also power off empty slots that are powered on since BIOS * leaves those on, albeit disconnected ****************************************************************************/ static int __init init_ops(void) { struct slot *slot_cur; struct list_head *tmp; int retval; int rc; list_for_each(tmp, &ibmphp_slot_head) { slot_cur = list_entry(tmp, struct slot, ibm_slot_list); if (!slot_cur) return -ENODEV; debug("BEFORE GETTING SLOT STATUS, slot # %x\n", slot_cur->number); if (slot_cur->ctrl->revision == 0xFF) if (get_ctrl_revision(slot_cur, &slot_cur->ctrl->revision)) return -1; if (slot_cur->bus_on->current_speed == 0xFF) if (get_cur_bus_info(&slot_cur)) return -1; get_max_bus_speed(slot_cur); if (slot_cur->ctrl->options == 0xFF) if (get_hpc_options(slot_cur, &slot_cur->ctrl->options)) return -1; retval = slot_update(&slot_cur); if (retval) return retval; debug("status = %x\n", slot_cur->status); debug("ext_status = %x\n", slot_cur->ext_status); debug("SLOT_POWER = %x\n", SLOT_POWER(slot_cur->status)); debug("SLOT_PRESENT = %x\n", SLOT_PRESENT(slot_cur->status)); debug("SLOT_LATCH = %x\n", SLOT_LATCH(slot_cur->status)); if ((SLOT_PWRGD(slot_cur->status)) && !(SLOT_PRESENT(slot_cur->status)) && !(SLOT_LATCH(slot_cur->status))) { debug("BEFORE POWER OFF COMMAND\n"); rc = power_off(slot_cur); if (rc) return rc; /* retval = slot_update(&slot_cur); * if (retval) * return retval; * ibmphp_update_slot_info(slot_cur); */ } } init_flag = 0; return 0; } /* This operation will check whether the slot is within the bounds and * the operation is valid to perform on that slot * Parameters: slot, operation * Returns: 0 or error codes */ static int validate(struct slot *slot_cur, int opn) { int number; int retval; if (!slot_cur) return -ENODEV; number = slot_cur->number; if ((number > max_slots) || (number < 0)) return -EBADSLT; debug("slot_number in validate is %d\n", slot_cur->number); retval = slot_update(&slot_cur); if (retval) return retval; switch (opn) { case ENABLE: if (!(SLOT_PWRGD(slot_cur->status)) && (SLOT_PRESENT(slot_cur->status)) && !(SLOT_LATCH(slot_cur->status))) return 0; break; case DISABLE: if ((SLOT_PWRGD(slot_cur->status)) && (SLOT_PRESENT(slot_cur->status)) && !(SLOT_LATCH(slot_cur->status))) return 0; break; default: break; } err("validate failed....\n"); return -EINVAL; } /**************************************************************************** * This routine is for updating the data structures in the hotplug core * Parameters: struct slot * Returns: 0 or error ****************************************************************************/ int ibmphp_update_slot_info(struct slot *slot_cur) { struct hotplug_slot_info *info; struct pci_bus *bus = slot_cur->hotplug_slot->pci_slot->bus; int rc; u8 bus_speed; u8 mode; info = kmalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL); if (!info) { err("out of system memory\n"); return -ENOMEM; } info->power_status = SLOT_PWRGD(slot_cur->status); info->attention_status = SLOT_ATTN(slot_cur->status, slot_cur->ext_status); info->latch_status = SLOT_LATCH(slot_cur->status); if (!SLOT_PRESENT(slot_cur->status)) { info->adapter_status = 0; /* info->max_adapter_speed_status = MAX_ADAPTER_NONE; */ } else { info->adapter_status = 1; /* get_max_adapter_speed_1(slot_cur->hotplug_slot, &info->max_adapter_speed_status, 0); */ } bus_speed = slot_cur->bus_on->current_speed; mode = slot_cur->bus_on->current_bus_mode; switch (bus_speed) { case BUS_SPEED_33: break; case BUS_SPEED_66: if (mode == BUS_MODE_PCIX) bus_speed += 0x01; else if (mode == BUS_MODE_PCI) ; else bus_speed = PCI_SPEED_UNKNOWN; break; case BUS_SPEED_100: case BUS_SPEED_133: bus_speed += 0x01; break; default: bus_speed = PCI_SPEED_UNKNOWN; } bus->cur_bus_speed = bus_speed; // To do: bus_names rc = pci_hp_change_slot_info(slot_cur->hotplug_slot, info); kfree(info); return rc; } /****************************************************************************** * This function will return the pci_func, given bus and devfunc, or NULL. It * is called from visit routines ******************************************************************************/ static struct pci_func *ibm_slot_find(u8 busno, u8 device, u8 function) { struct pci_func *func_cur; struct slot *slot_cur; struct list_head * tmp; list_for_each(tmp, &ibmphp_slot_head) { slot_cur = list_entry(tmp, struct slot, ibm_slot_list); if (slot_cur->func) { func_cur = slot_cur->func; while (func_cur) { if ((func_cur->busno == busno) && (func_cur->device == device) && (func_cur->function == function)) return func_cur; func_cur = func_cur->next; } } } return NULL; } /************************************************************* * This routine frees up memory used by struct slot, including * the pointers to pci_func, bus, hotplug_slot, controller, * and deregistering from the hotplug core *************************************************************/ static void free_slots(void) { struct slot *slot_cur; struct list_head * tmp; struct list_head * next; debug("%s -- enter\n", __func__); list_for_each_safe(tmp, next, &ibmphp_slot_head) { slot_cur = list_entry(tmp, struct slot, ibm_slot_list); pci_hp_deregister(slot_cur->hotplug_slot); } debug("%s -- exit\n", __func__); } static void ibm_unconfigure_device(struct pci_func *func) { struct pci_dev *temp; u8 j; debug("inside %s\n", __func__); debug("func->device = %x, func->function = %x\n", func->device, func->function); debug("func->device << 3 | 0x0 = %x\n", func->device << 3 | 0x0); for (j = 0; j < 0x08; j++) { temp = pci_get_bus_and_slot(func->busno, (func->device << 3) | j); if (temp) { pci_stop_and_remove_bus_device(temp); pci_dev_put(temp); } } pci_dev_put(func->dev); } /* * The following function is to fix kernel bug regarding * getting bus entries, here we manually add those primary * bus entries to kernel bus structure whenever apply */ static u8 bus_structure_fixup(u8 busno) { struct pci_bus *bus; struct pci_dev *dev; u16 l; if (pci_find_bus(0, busno) || !(ibmphp_find_same_bus_num(busno))) return 1; bus = kmalloc(sizeof(*bus), GFP_KERNEL); if (!bus) { err("%s - out of memory\n", __func__); return 1; } dev = kmalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { kfree(bus); err("%s - out of memory\n", __func__); return 1; } bus->number = busno; bus->ops = ibmphp_pci_bus->ops; dev->bus = bus; for (dev->devfn = 0; dev->devfn < 256; dev->devfn += 8) { if (!pci_read_config_word(dev, PCI_VENDOR_ID, &l) && (l != 0x0000) && (l != 0xffff)) { debug("%s - Inside bus_struture_fixup()\n", __func__); pci_scan_bus(busno, ibmphp_pci_bus->ops, NULL); break; } } kfree(dev); kfree(bus); return 0; } static int ibm_configure_device(struct pci_func *func) { unsigned char bus; struct pci_bus *child; int num; int flag = 0; /* this is to make sure we don't double scan the bus, for bridged devices primarily */ if (!(bus_structure_fixup(func->busno))) flag = 1; if (func->dev == NULL) func->dev = pci_get_bus_and_slot(func->busno, PCI_DEVFN(func->device, func->function)); if (func->dev == NULL) { struct pci_bus *bus = pci_find_bus(0, func->busno); if (!bus) return 0; num = pci_scan_slot(bus, PCI_DEVFN(func->device, func->function)); if (num) pci_bus_add_devices(bus); func->dev = pci_get_bus_and_slot(func->busno, PCI_DEVFN(func->device, func->function)); if (func->dev == NULL) { err("ERROR... : pci_dev still NULL\n"); return 0; } } if (!(flag) && (func->dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)) { pci_read_config_byte(func->dev, PCI_SECONDARY_BUS, &bus); child = pci_add_new_bus(func->dev->bus, func->dev, bus); pci_do_scan_bus(child); } return 0; } /******************************************************* * Returns whether the bus is empty or not *******************************************************/ static int is_bus_empty(struct slot * slot_cur) { int rc; struct slot * tmp_slot; u8 i = slot_cur->bus_on->slot_min; while (i <= slot_cur->bus_on->slot_max) { if (i == slot_cur->number) { i++; continue; } tmp_slot = ibmphp_get_slot_from_physical_num(i); if (!tmp_slot) return 0; rc = slot_update(&tmp_slot); if (rc) return 0; if (SLOT_PRESENT(tmp_slot->status) && SLOT_PWRGD(tmp_slot->status)) return 0; i++; } return 1; } /*********************************************************** * If the HPC permits and the bus currently empty, tries to set the * bus speed and mode at the maximum card and bus capability * Parameters: slot * Returns: bus is set (0) or error code ***********************************************************/ static int set_bus(struct slot * slot_cur) { int rc; u8 speed; u8 cmd = 0x0; int retval; static struct pci_device_id ciobx[] = { { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, 0x0101) }, { }, }; debug("%s - entry slot # %d\n", __func__, slot_cur->number); if (SET_BUS_STATUS(slot_cur->ctrl) && is_bus_empty(slot_cur)) { rc = slot_update(&slot_cur); if (rc) return rc; speed = SLOT_SPEED(slot_cur->ext_status); debug("ext_status = %x, speed = %x\n", slot_cur->ext_status, speed); switch (speed) { case HPC_SLOT_SPEED_33: cmd = HPC_BUS_33CONVMODE; break; case HPC_SLOT_SPEED_66: if (SLOT_PCIX(slot_cur->ext_status)) { if ((slot_cur->supported_speed >= BUS_SPEED_66) && (slot_cur->supported_bus_mode == BUS_MODE_PCIX)) cmd = HPC_BUS_66PCIXMODE; else if (!SLOT_BUS_MODE(slot_cur->ext_status)) /* if max slot/bus capability is 66 pci and there's no bus mode mismatch, then the adapter supports 66 pci */ cmd = HPC_BUS_66CONVMODE; else cmd = HPC_BUS_33CONVMODE; } else { if (slot_cur->supported_speed >= BUS_SPEED_66) cmd = HPC_BUS_66CONVMODE; else cmd = HPC_BUS_33CONVMODE; } break; case HPC_SLOT_SPEED_133: switch (slot_cur->supported_speed) { case BUS_SPEED_33: cmd = HPC_BUS_33CONVMODE; break; case BUS_SPEED_66: if (slot_cur->supported_bus_mode == BUS_MODE_PCIX) cmd = HPC_BUS_66PCIXMODE; else cmd = HPC_BUS_66CONVMODE; break; case BUS_SPEED_100: cmd = HPC_BUS_100PCIXMODE; break; case BUS_SPEED_133: /* This is to take care of the bug in CIOBX chip */ if (pci_dev_present(ciobx)) ibmphp_hpc_writeslot(slot_cur, HPC_BUS_100PCIXMODE); cmd = HPC_BUS_133PCIXMODE; break; default: err("Wrong bus speed\n"); return -ENODEV; } break; default: err("wrong slot speed\n"); return -ENODEV; } debug("setting bus speed for slot %d, cmd %x\n", slot_cur->number, cmd); retval = ibmphp_hpc_writeslot(slot_cur, cmd); if (retval) { err("setting bus speed failed\n"); return retval; } if (CTLR_RESULT(slot_cur->ctrl->status)) { err("command not completed successfully in set_bus\n"); return -EIO; } } /* This is for x440, once Brandon fixes the firmware, will not need this delay */ msleep(1000); debug("%s -Exit\n", __func__); return 0; } /* This routine checks the bus limitations that the slot is on from the BIOS. * This is used in deciding whether or not to power up the slot. * (electrical/spec limitations. For example, >1 133 MHz or >2 66 PCI cards on * same bus) * Parameters: slot * Returns: 0 = no limitations, -EINVAL = exceeded limitations on the bus */ static int check_limitations(struct slot *slot_cur) { u8 i; struct slot * tmp_slot; u8 count = 0; u8 limitation = 0; for (i = slot_cur->bus_on->slot_min; i <= slot_cur->bus_on->slot_max; i++) { tmp_slot = ibmphp_get_slot_from_physical_num(i); if (!tmp_slot) return -ENODEV; if ((SLOT_PWRGD(tmp_slot->status)) && !(SLOT_CONNECT(tmp_slot->status))) count++; } get_cur_bus_info(&slot_cur); switch (slot_cur->bus_on->current_speed) { case BUS_SPEED_33: limitation = slot_cur->bus_on->slots_at_33_conv; break; case BUS_SPEED_66: if (slot_cur->bus_on->current_bus_mode == BUS_MODE_PCIX) limitation = slot_cur->bus_on->slots_at_66_pcix; else limitation = slot_cur->bus_on->slots_at_66_conv; break; case BUS_SPEED_100: limitation = slot_cur->bus_on->slots_at_100_pcix; break; case BUS_SPEED_133: limitation = slot_cur->bus_on->slots_at_133_pcix; break; } if ((count + 1) > limitation) return -EINVAL; return 0; } static inline void print_card_capability(struct slot *slot_cur) { info("capability of the card is "); if ((slot_cur->ext_status & CARD_INFO) == PCIX133) info(" 133 MHz PCI-X\n"); else if ((slot_cur->ext_status & CARD_INFO) == PCIX66) info(" 66 MHz PCI-X\n"); else if ((slot_cur->ext_status & CARD_INFO) == PCI66) info(" 66 MHz PCI\n"); else info(" 33 MHz PCI\n"); } /* This routine will power on the slot, configure the device(s) and find the * drivers for them. * Parameters: hotplug_slot * Returns: 0 or failure codes */ static int enable_slot(struct hotplug_slot *hs) { int rc, i, rcpr; struct slot *slot_cur; u8 function; struct pci_func *tmp_func; ibmphp_lock_operations(); debug("ENABLING SLOT........\n"); slot_cur = hs->private; if ((rc = validate(slot_cur, ENABLE))) { err("validate function failed\n"); goto error_nopower; } attn_LED_blink(slot_cur); rc = set_bus(slot_cur); if (rc) { err("was not able to set the bus\n"); goto error_nopower; } /*-----------------debugging------------------------------*/ get_cur_bus_info(&slot_cur); debug("the current bus speed right after set_bus = %x\n", slot_cur->bus_on->current_speed); /*----------------------------------------------------------*/ rc = check_limitations(slot_cur); if (rc) { err("Adding this card exceeds the limitations of this bus.\n"); err("(i.e., >1 133MHz cards running on same bus, or " ">2 66 PCI cards running on same bus.\n"); err("Try hot-adding into another bus\n"); rc = -EINVAL; goto error_nopower; } rc = power_on(slot_cur); if (rc) { err("something wrong when powering up... please see below for details\n"); /* need to turn off before on, otherwise, blinking overwrites */ attn_off(slot_cur); attn_on(slot_cur); if (slot_update(&slot_cur)) { attn_off(slot_cur); attn_on(slot_cur); rc = -ENODEV; goto exit; } /* Check to see the error of why it failed */ if ((SLOT_POWER(slot_cur->status)) && !(SLOT_PWRGD(slot_cur->status))) err("power fault occurred trying to power up\n"); else if (SLOT_BUS_SPEED(slot_cur->status)) { err("bus speed mismatch occurred. please check " "current bus speed and card capability\n"); print_card_capability(slot_cur); } else if (SLOT_BUS_MODE(slot_cur->ext_status)) { err("bus mode mismatch occurred. please check " "current bus mode and card capability\n"); print_card_capability(slot_cur); } ibmphp_update_slot_info(slot_cur); goto exit; } debug("after power_on\n"); /*-----------------------debugging---------------------------*/ get_cur_bus_info(&slot_cur); debug("the current bus speed right after power_on = %x\n", slot_cur->bus_on->current_speed); /*----------------------------------------------------------*/ rc = slot_update(&slot_cur); if (rc) goto error_power; rc = -EINVAL; if (SLOT_POWER(slot_cur->status) && !(SLOT_PWRGD(slot_cur->status))) { err("power fault occurred trying to power up...\n"); goto error_power; } if (SLOT_POWER(slot_cur->status) && (SLOT_BUS_SPEED(slot_cur->status))) { err("bus speed mismatch occurred. please check current bus " "speed and card capability\n"); print_card_capability(slot_cur); goto error_power; } /* Don't think this case will happen after above checks... * but just in case, for paranoia sake */ if (!(SLOT_POWER(slot_cur->status))) { err("power on failed...\n"); goto error_power; } slot_cur->func = kzalloc(sizeof(struct pci_func), GFP_KERNEL); if (!slot_cur->func) { /* We cannot do update_slot_info here, since no memory for * kmalloc n.e.ways, and update_slot_info allocates some */ err("out of system memory\n"); rc = -ENOMEM; goto error_power; } slot_cur->func->busno = slot_cur->bus; slot_cur->func->device = slot_cur->device; for (i = 0; i < 4; i++) slot_cur->func->irq[i] = slot_cur->irq[i]; debug("b4 configure_card, slot_cur->bus = %x, slot_cur->device = %x\n", slot_cur->bus, slot_cur->device); if (ibmphp_configure_card(slot_cur->func, slot_cur->number)) { err("configure_card was unsuccessful...\n"); /* true because don't need to actually deallocate resources, * just remove references */ ibmphp_unconfigure_card(&slot_cur, 1); debug("after unconfigure_card\n"); slot_cur->func = NULL; rc = -ENOMEM; goto error_power; } function = 0x00; do { tmp_func = ibm_slot_find(slot_cur->bus, slot_cur->func->device, function++); if (tmp_func && !(tmp_func->dev)) ibm_configure_device(tmp_func); } while (tmp_func); attn_off(slot_cur); if (slot_update(&slot_cur)) { rc = -EFAULT; goto exit; } ibmphp_print_test(); rc = ibmphp_update_slot_info(slot_cur); exit: ibmphp_unlock_operations(); return rc; error_nopower: attn_off(slot_cur); /* need to turn off if was blinking b4 */ attn_on(slot_cur); error_cont: rcpr = slot_update(&slot_cur); if (rcpr) { rc = rcpr; goto exit; } ibmphp_update_slot_info(slot_cur); goto exit; error_power: attn_off(slot_cur); /* need to turn off if was blinking b4 */ attn_on(slot_cur); rcpr = power_off(slot_cur); if (rcpr) { rc = rcpr; goto exit; } goto error_cont; } /************************************************************** * HOT REMOVING ADAPTER CARD * * INPUT: POINTER TO THE HOTPLUG SLOT STRUCTURE * * OUTPUT: SUCCESS 0 ; FAILURE: UNCONFIGURE , VALIDATE * DISABLE POWER , * **************************************************************/ static int ibmphp_disable_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = hotplug_slot->private; int rc; ibmphp_lock_operations(); rc = ibmphp_do_disable_slot(slot); ibmphp_unlock_operations(); return rc; } int ibmphp_do_disable_slot(struct slot *slot_cur) { int rc; u8 flag; debug("DISABLING SLOT...\n"); if ((slot_cur == NULL) || (slot_cur->ctrl == NULL)) { return -ENODEV; } flag = slot_cur->flag; slot_cur->flag = 1; if (flag == 1) { rc = validate(slot_cur, DISABLE); /* checking if powered off already & valid slot # */ if (rc) goto error; } attn_LED_blink(slot_cur); if (slot_cur->func == NULL) { /* We need this for fncs's that were there on bootup */ slot_cur->func = kzalloc(sizeof(struct pci_func), GFP_KERNEL); if (!slot_cur->func) { err("out of system memory\n"); rc = -ENOMEM; goto error; } slot_cur->func->busno = slot_cur->bus; slot_cur->func->device = slot_cur->device; } ibm_unconfigure_device(slot_cur->func); /* If we got here from latch suddenly opening on operating card or a power fault, there's no power to the card, so cannot read from it to determine what resources it occupied. This operation is forbidden anyhow. The best we can do is remove it from kernel lists at least */ if (!flag) { attn_off(slot_cur); return 0; } rc = ibmphp_unconfigure_card(&slot_cur, 0); slot_cur->func = NULL; debug("in disable_slot. after unconfigure_card\n"); if (rc) { err("could not unconfigure card.\n"); goto error; } rc = ibmphp_hpc_writeslot(slot_cur, HPC_SLOT_OFF); if (rc) goto error; attn_off(slot_cur); rc = slot_update(&slot_cur); if (rc) goto exit; rc = ibmphp_update_slot_info(slot_cur); ibmphp_print_test(); exit: return rc; error: /* Need to turn off if was blinking b4 */ attn_off(slot_cur); attn_on(slot_cur); if (slot_update(&slot_cur)) { rc = -EFAULT; goto exit; } if (flag) ibmphp_update_slot_info(slot_cur); goto exit; } struct hotplug_slot_ops ibmphp_hotplug_slot_ops = { .set_attention_status = set_attention_status, .enable_slot = enable_slot, .disable_slot = ibmphp_disable_slot, .hardware_test = NULL, .get_power_status = get_power_status, .get_attention_status = get_attention_status, .get_latch_status = get_latch_status, .get_adapter_status = get_adapter_present, /* .get_max_adapter_speed = get_max_adapter_speed, .get_bus_name_status = get_bus_name, */ }; static void ibmphp_unload(void) { free_slots(); debug("after slots\n"); ibmphp_free_resources(); debug("after resources\n"); ibmphp_free_bus_info_queue(); debug("after bus info\n"); ibmphp_free_ebda_hpc_queue(); debug("after ebda hpc\n"); ibmphp_free_ebda_pci_rsrc_queue(); debug("after ebda pci rsrc\n"); kfree(ibmphp_pci_bus); } static int __init ibmphp_init(void) { struct pci_bus *bus; int i = 0; int rc = 0; init_flag = 1; info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); ibmphp_pci_bus = kmalloc(sizeof(*ibmphp_pci_bus), GFP_KERNEL); if (!ibmphp_pci_bus) { err("out of memory\n"); rc = -ENOMEM; goto exit; } bus = pci_find_bus(0, 0); if (!bus) { err("Can't find the root pci bus, can not continue\n"); rc = -ENODEV; goto error; } memcpy(ibmphp_pci_bus, bus, sizeof(*ibmphp_pci_bus)); ibmphp_debug = debug; ibmphp_hpc_initvars(); for (i = 0; i < 16; i++) irqs[i] = 0; if ((rc = ibmphp_access_ebda())) goto error; debug("after ibmphp_access_ebda()\n"); if ((rc = ibmphp_rsrc_init())) goto error; debug("AFTER Resource & EBDA INITIALIZATIONS\n"); max_slots = get_max_slots(); if ((rc = ibmphp_register_pci())) goto error; if (init_ops()) { rc = -ENODEV; goto error; } ibmphp_print_test(); if ((rc = ibmphp_hpc_start_poll_thread())) { goto error; } exit: return rc; error: ibmphp_unload(); goto exit; } static void __exit ibmphp_exit(void) { ibmphp_hpc_stop_poll_thread(); debug("after polling\n"); ibmphp_unload(); debug("done\n"); } module_init(ibmphp_init); module_exit(ibmphp_exit);
gpl-2.0
sxwzhw/iproj-su640
drivers/infiniband/hw/qib/qib_sdma.c
5106
26414
/* * Copyright (c) 2007, 2008, 2009, 2010 QLogic Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/spinlock.h> #include <linux/netdevice.h> #include <linux/moduleparam.h> #include "qib.h" #include "qib_common.h" /* default pio off, sdma on */ static ushort sdma_descq_cnt = 256; module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO); MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries"); /* * Bits defined in the send DMA descriptor. */ #define SDMA_DESC_LAST (1ULL << 11) #define SDMA_DESC_FIRST (1ULL << 12) #define SDMA_DESC_DMA_HEAD (1ULL << 13) #define SDMA_DESC_USE_LARGE_BUF (1ULL << 14) #define SDMA_DESC_INTR (1ULL << 15) #define SDMA_DESC_COUNT_LSB 16 #define SDMA_DESC_GEN_LSB 30 char *qib_sdma_state_names[] = { [qib_sdma_state_s00_hw_down] = "s00_HwDown", [qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait", [qib_sdma_state_s20_idle] = "s20_Idle", [qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait", [qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait", [qib_sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait", [qib_sdma_state_s99_running] = "s99_Running", }; char *qib_sdma_event_names[] = { [qib_sdma_event_e00_go_hw_down] = "e00_GoHwDown", [qib_sdma_event_e10_go_hw_start] = "e10_GoHwStart", [qib_sdma_event_e20_hw_started] = "e20_HwStarted", [qib_sdma_event_e30_go_running] = "e30_GoRunning", [qib_sdma_event_e40_sw_cleaned] = "e40_SwCleaned", [qib_sdma_event_e50_hw_cleaned] = "e50_HwCleaned", [qib_sdma_event_e60_hw_halted] = "e60_HwHalted", [qib_sdma_event_e70_go_idle] = "e70_GoIdle", [qib_sdma_event_e7220_err_halted] = "e7220_ErrHalted", [qib_sdma_event_e7322_err_halted] = "e7322_ErrHalted", [qib_sdma_event_e90_timer_tick] = "e90_TimerTick", }; /* declare all statics here rather than keep sorting */ static int alloc_sdma(struct qib_pportdata *); static void sdma_complete(struct kref *); static void sdma_finalput(struct qib_sdma_state *); static void sdma_get(struct qib_sdma_state *); static void sdma_put(struct qib_sdma_state *); static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states); static void sdma_start_sw_clean_up(struct qib_pportdata *); static void sdma_sw_clean_up_task(unsigned long); static void unmap_desc(struct qib_pportdata *, unsigned); static void sdma_get(struct qib_sdma_state *ss) { kref_get(&ss->kref); } static void sdma_complete(struct kref *kref) { struct qib_sdma_state *ss = container_of(kref, struct qib_sdma_state, kref); complete(&ss->comp); } static void sdma_put(struct qib_sdma_state *ss) { kref_put(&ss->kref, sdma_complete); } static void sdma_finalput(struct qib_sdma_state *ss) { sdma_put(ss); wait_for_completion(&ss->comp); } /* * Complete all the sdma requests on the active list, in the correct * order, and with appropriate processing. Called when cleaning up * after sdma shutdown, and when new sdma requests are submitted for * a link that is down. This matches what is done for requests * that complete normally, it's just the full list. * * Must be called with sdma_lock held */ static void clear_sdma_activelist(struct qib_pportdata *ppd) { struct qib_sdma_txreq *txp, *txp_next; list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) { list_del_init(&txp->list); if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) { unsigned idx; idx = txp->start_idx; while (idx != txp->next_descq_idx) { unmap_desc(ppd, idx); if (++idx == ppd->sdma_descq_cnt) idx = 0; } } if (txp->callback) (*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED); } } static void sdma_sw_clean_up_task(unsigned long opaque) { struct qib_pportdata *ppd = (struct qib_pportdata *) opaque; unsigned long flags; spin_lock_irqsave(&ppd->sdma_lock, flags); /* * At this point, the following should always be true: * - We are halted, so no more descriptors are getting retired. * - We are not running, so no one is submitting new work. * - Only we can send the e40_sw_cleaned, so we can't start * running again until we say so. So, the active list and * descq are ours to play with. */ /* Process all retired requests. */ qib_sdma_make_progress(ppd); clear_sdma_activelist(ppd); /* * Resync count of added and removed. It is VERY important that * sdma_descq_removed NEVER decrement - user_sdma depends on it. */ ppd->sdma_descq_removed = ppd->sdma_descq_added; /* * Reset our notion of head and tail. * Note that the HW registers will be reset when switching states * due to calling __qib_sdma_process_event() below. */ ppd->sdma_descq_tail = 0; ppd->sdma_descq_head = 0; ppd->sdma_head_dma[0] = 0; ppd->sdma_generation = 0; __qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned); spin_unlock_irqrestore(&ppd->sdma_lock, flags); } /* * This is called when changing to state qib_sdma_state_s10_hw_start_up_wait * as a result of send buffer errors or send DMA descriptor errors. * We want to disarm the buffers in these cases. */ static void sdma_hw_start_up(struct qib_pportdata *ppd) { struct qib_sdma_state *ss = &ppd->sdma_state; unsigned bufno; for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno) ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno)); ppd->dd->f_sdma_hw_start_up(ppd); } static void sdma_sw_tear_down(struct qib_pportdata *ppd) { struct qib_sdma_state *ss = &ppd->sdma_state; /* Releasing this reference means the state machine has stopped. */ sdma_put(ss); } static void sdma_start_sw_clean_up(struct qib_pportdata *ppd) { tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task); } static void sdma_set_state(struct qib_pportdata *ppd, enum qib_sdma_states next_state) { struct qib_sdma_state *ss = &ppd->sdma_state; struct sdma_set_state_action *action = ss->set_state_action; unsigned op = 0; /* debugging bookkeeping */ ss->previous_state = ss->current_state; ss->previous_op = ss->current_op; ss->current_state = next_state; if (action[next_state].op_enable) op |= QIB_SDMA_SENDCTRL_OP_ENABLE; if (action[next_state].op_intenable) op |= QIB_SDMA_SENDCTRL_OP_INTENABLE; if (action[next_state].op_halt) op |= QIB_SDMA_SENDCTRL_OP_HALT; if (action[next_state].op_drain) op |= QIB_SDMA_SENDCTRL_OP_DRAIN; if (action[next_state].go_s99_running_tofalse) ss->go_s99_running = 0; if (action[next_state].go_s99_running_totrue) ss->go_s99_running = 1; ss->current_op = op; ppd->dd->f_sdma_sendctrl(ppd, ss->current_op); } static void unmap_desc(struct qib_pportdata *ppd, unsigned head) { __le64 *descqp = &ppd->sdma_descq[head].qw[0]; u64 desc[2]; dma_addr_t addr; size_t len; desc[0] = le64_to_cpu(descqp[0]); desc[1] = le64_to_cpu(descqp[1]); addr = (desc[1] << 32) | (desc[0] >> 32); len = (desc[0] >> 14) & (0x7ffULL << 2); dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE); } static int alloc_sdma(struct qib_pportdata *ppd) { ppd->sdma_descq_cnt = sdma_descq_cnt; if (!ppd->sdma_descq_cnt) ppd->sdma_descq_cnt = 256; /* Allocate memory for SendDMA descriptor FIFO */ ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev, ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys, GFP_KERNEL); if (!ppd->sdma_descq) { qib_dev_err(ppd->dd, "failed to allocate SendDMA descriptor " "FIFO memory\n"); goto bail; } /* Allocate memory for DMA of head register to memory */ ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev, PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL); if (!ppd->sdma_head_dma) { qib_dev_err(ppd->dd, "failed to allocate SendDMA " "head memory\n"); goto cleanup_descq; } ppd->sdma_head_dma[0] = 0; return 0; cleanup_descq: dma_free_coherent(&ppd->dd->pcidev->dev, ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq, ppd->sdma_descq_phys); ppd->sdma_descq = NULL; ppd->sdma_descq_phys = 0; bail: ppd->sdma_descq_cnt = 0; return -ENOMEM; } static void free_sdma(struct qib_pportdata *ppd) { struct qib_devdata *dd = ppd->dd; if (ppd->sdma_head_dma) { dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, (void *)ppd->sdma_head_dma, ppd->sdma_head_phys); ppd->sdma_head_dma = NULL; ppd->sdma_head_phys = 0; } if (ppd->sdma_descq) { dma_free_coherent(&dd->pcidev->dev, ppd->sdma_descq_cnt * sizeof(u64[2]), ppd->sdma_descq, ppd->sdma_descq_phys); ppd->sdma_descq = NULL; ppd->sdma_descq_phys = 0; } } static inline void make_sdma_desc(struct qib_pportdata *ppd, u64 *sdmadesc, u64 addr, u64 dwlen, u64 dwoffset) { WARN_ON(addr & 3); /* SDmaPhyAddr[47:32] */ sdmadesc[1] = addr >> 32; /* SDmaPhyAddr[31:0] */ sdmadesc[0] = (addr & 0xfffffffcULL) << 32; /* SDmaGeneration[1:0] */ sdmadesc[0] |= (ppd->sdma_generation & 3ULL) << SDMA_DESC_GEN_LSB; /* SDmaDwordCount[10:0] */ sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB; /* SDmaBufOffset[12:2] */ sdmadesc[0] |= dwoffset & 0x7ffULL; } /* sdma_lock must be held */ int qib_sdma_make_progress(struct qib_pportdata *ppd) { struct list_head *lp = NULL; struct qib_sdma_txreq *txp = NULL; struct qib_devdata *dd = ppd->dd; int progress = 0; u16 hwhead; u16 idx = 0; hwhead = dd->f_sdma_gethead(ppd); /* The reason for some of the complexity of this code is that * not all descriptors have corresponding txps. So, we have to * be able to skip over descs until we wander into the range of * the next txp on the list. */ if (!list_empty(&ppd->sdma_activelist)) { lp = ppd->sdma_activelist.next; txp = list_entry(lp, struct qib_sdma_txreq, list); idx = txp->start_idx; } while (ppd->sdma_descq_head != hwhead) { /* if desc is part of this txp, unmap if needed */ if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) && (idx == ppd->sdma_descq_head)) { unmap_desc(ppd, ppd->sdma_descq_head); if (++idx == ppd->sdma_descq_cnt) idx = 0; } /* increment dequed desc count */ ppd->sdma_descq_removed++; /* advance head, wrap if needed */ if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt) ppd->sdma_descq_head = 0; /* if now past this txp's descs, do the callback */ if (txp && txp->next_descq_idx == ppd->sdma_descq_head) { /* remove from active list */ list_del_init(&txp->list); if (txp->callback) (*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK); /* see if there is another txp */ if (list_empty(&ppd->sdma_activelist)) txp = NULL; else { lp = ppd->sdma_activelist.next; txp = list_entry(lp, struct qib_sdma_txreq, list); idx = txp->start_idx; } } progress = 1; } if (progress) qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd)); return progress; } /* * This is called from interrupt context. */ void qib_sdma_intr(struct qib_pportdata *ppd) { unsigned long flags; spin_lock_irqsave(&ppd->sdma_lock, flags); __qib_sdma_intr(ppd); spin_unlock_irqrestore(&ppd->sdma_lock, flags); } void __qib_sdma_intr(struct qib_pportdata *ppd) { if (__qib_sdma_running(ppd)) qib_sdma_make_progress(ppd); } int qib_setup_sdma(struct qib_pportdata *ppd) { struct qib_devdata *dd = ppd->dd; unsigned long flags; int ret = 0; ret = alloc_sdma(ppd); if (ret) goto bail; /* set consistent sdma state */ ppd->dd->f_sdma_init_early(ppd); spin_lock_irqsave(&ppd->sdma_lock, flags); sdma_set_state(ppd, qib_sdma_state_s00_hw_down); spin_unlock_irqrestore(&ppd->sdma_lock, flags); /* set up reference counting */ kref_init(&ppd->sdma_state.kref); init_completion(&ppd->sdma_state.comp); ppd->sdma_generation = 0; ppd->sdma_descq_head = 0; ppd->sdma_descq_removed = 0; ppd->sdma_descq_added = 0; INIT_LIST_HEAD(&ppd->sdma_activelist); tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task, (unsigned long)ppd); ret = dd->f_init_sdma_regs(ppd); if (ret) goto bail_alloc; qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start); return 0; bail_alloc: qib_teardown_sdma(ppd); bail: return ret; } void qib_teardown_sdma(struct qib_pportdata *ppd) { qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down); /* * This waits for the state machine to exit so it is not * necessary to kill the sdma_sw_clean_up_task to make sure * it is not running. */ sdma_finalput(&ppd->sdma_state); free_sdma(ppd); } int qib_sdma_running(struct qib_pportdata *ppd) { unsigned long flags; int ret; spin_lock_irqsave(&ppd->sdma_lock, flags); ret = __qib_sdma_running(ppd); spin_unlock_irqrestore(&ppd->sdma_lock, flags); return ret; } /* * Complete a request when sdma not running; likely only request * but to simplify the code, always queue it, then process the full * activelist. We process the entire list to ensure that this particular * request does get it's callback, but in the correct order. * Must be called with sdma_lock held */ static void complete_sdma_err_req(struct qib_pportdata *ppd, struct qib_verbs_txreq *tx) { atomic_inc(&tx->qp->s_dma_busy); /* no sdma descriptors, so no unmap_desc */ tx->txreq.start_idx = 0; tx->txreq.next_descq_idx = 0; list_add_tail(&tx->txreq.list, &ppd->sdma_activelist); clear_sdma_activelist(ppd); } /* * This function queues one IB packet onto the send DMA queue per call. * The caller is responsible for checking: * 1) The number of send DMA descriptor entries is less than the size of * the descriptor queue. * 2) The IB SGE addresses and lengths are 32-bit aligned * (except possibly the last SGE's length) * 3) The SGE addresses are suitable for passing to dma_map_single(). */ int qib_sdma_verbs_send(struct qib_pportdata *ppd, struct qib_sge_state *ss, u32 dwords, struct qib_verbs_txreq *tx) { unsigned long flags; struct qib_sge *sge; struct qib_qp *qp; int ret = 0; u16 tail; __le64 *descqp; u64 sdmadesc[2]; u32 dwoffset; dma_addr_t addr; spin_lock_irqsave(&ppd->sdma_lock, flags); retry: if (unlikely(!__qib_sdma_running(ppd))) { complete_sdma_err_req(ppd, tx); goto unlock; } if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) { if (qib_sdma_make_progress(ppd)) goto retry; if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT) ppd->dd->f_sdma_set_desc_cnt(ppd, ppd->sdma_descq_cnt / 2); goto busy; } dwoffset = tx->hdr_dwords; make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0); sdmadesc[0] |= SDMA_DESC_FIRST; if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF) sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF; /* write to the descq */ tail = ppd->sdma_descq_tail; descqp = &ppd->sdma_descq[tail].qw[0]; *descqp++ = cpu_to_le64(sdmadesc[0]); *descqp++ = cpu_to_le64(sdmadesc[1]); /* increment the tail */ if (++tail == ppd->sdma_descq_cnt) { tail = 0; descqp = &ppd->sdma_descq[0].qw[0]; ++ppd->sdma_generation; } tx->txreq.start_idx = tail; sge = &ss->sge; while (dwords) { u32 dw; u32 len; len = dwords << 2; if (len > sge->length) len = sge->length; if (len > sge->sge_length) len = sge->sge_length; BUG_ON(len == 0); dw = (len + 3) >> 2; addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr, dw << 2, DMA_TO_DEVICE); if (dma_mapping_error(&ppd->dd->pcidev->dev, addr)) goto unmap; sdmadesc[0] = 0; make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset); /* SDmaUseLargeBuf has to be set in every descriptor */ if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF) sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF; /* write to the descq */ *descqp++ = cpu_to_le64(sdmadesc[0]); *descqp++ = cpu_to_le64(sdmadesc[1]); /* increment the tail */ if (++tail == ppd->sdma_descq_cnt) { tail = 0; descqp = &ppd->sdma_descq[0].qw[0]; ++ppd->sdma_generation; } sge->vaddr += len; sge->length -= len; sge->sge_length -= len; if (sge->sge_length == 0) { if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr->lkey) { if (++sge->n >= QIB_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; } sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; sge->length = sge->mr->map[sge->m]->segs[sge->n].length; } dwoffset += dw; dwords -= dw; } if (!tail) descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0]; descqp -= 2; descqp[0] |= cpu_to_le64(SDMA_DESC_LAST); if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST) descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD); if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ) descqp[0] |= cpu_to_le64(SDMA_DESC_INTR); atomic_inc(&tx->qp->s_dma_busy); tx->txreq.next_descq_idx = tail; ppd->dd->f_sdma_update_tail(ppd, tail); ppd->sdma_descq_added += tx->txreq.sg_count; list_add_tail(&tx->txreq.list, &ppd->sdma_activelist); goto unlock; unmap: for (;;) { if (!tail) tail = ppd->sdma_descq_cnt - 1; else tail--; if (tail == ppd->sdma_descq_tail) break; unmap_desc(ppd, tail); } qp = tx->qp; qib_put_txreq(tx); spin_lock(&qp->r_lock); spin_lock(&qp->s_lock); if (qp->ibqp.qp_type == IB_QPT_RC) { /* XXX what about error sending RDMA read responses? */ if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) qib_error_qp(qp, IB_WC_GENERAL_ERR); } else if (qp->s_wqe) qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR); spin_unlock(&qp->s_lock); spin_unlock(&qp->r_lock); /* return zero to process the next send work request */ goto unlock; busy: qp = tx->qp; spin_lock(&qp->s_lock); if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { struct qib_ibdev *dev; /* * If we couldn't queue the DMA request, save the info * and try again later rather than destroying the * buffer and undoing the side effects of the copy. */ tx->ss = ss; tx->dwords = dwords; qp->s_tx = tx; dev = &ppd->dd->verbs_dev; spin_lock(&dev->pending_lock); if (list_empty(&qp->iowait)) { struct qib_ibport *ibp; ibp = &ppd->ibport_data; ibp->n_dmawait++; qp->s_flags |= QIB_S_WAIT_DMA_DESC; list_add_tail(&qp->iowait, &dev->dmawait); } spin_unlock(&dev->pending_lock); qp->s_flags &= ~QIB_S_BUSY; spin_unlock(&qp->s_lock); ret = -EBUSY; } else { spin_unlock(&qp->s_lock); qib_put_txreq(tx); } unlock: spin_unlock_irqrestore(&ppd->sdma_lock, flags); return ret; } void qib_sdma_process_event(struct qib_pportdata *ppd, enum qib_sdma_events event) { unsigned long flags; spin_lock_irqsave(&ppd->sdma_lock, flags); __qib_sdma_process_event(ppd, event); if (ppd->sdma_state.current_state == qib_sdma_state_s99_running) qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd)); spin_unlock_irqrestore(&ppd->sdma_lock, flags); } void __qib_sdma_process_event(struct qib_pportdata *ppd, enum qib_sdma_events event) { struct qib_sdma_state *ss = &ppd->sdma_state; switch (ss->current_state) { case qib_sdma_state_s00_hw_down: switch (event) { case qib_sdma_event_e00_go_hw_down: break; case qib_sdma_event_e30_go_running: /* * If down, but running requested (usually result * of link up, then we need to start up. * This can happen when hw down is requested while * bringing the link up with traffic active on * 7220, e.g. */ ss->go_s99_running = 1; /* fall through and start dma engine */ case qib_sdma_event_e10_go_hw_start: /* This reference means the state machine is started */ sdma_get(&ppd->sdma_state); sdma_set_state(ppd, qib_sdma_state_s10_hw_start_up_wait); break; case qib_sdma_event_e20_hw_started: break; case qib_sdma_event_e40_sw_cleaned: sdma_sw_tear_down(ppd); break; case qib_sdma_event_e50_hw_cleaned: break; case qib_sdma_event_e60_hw_halted: break; case qib_sdma_event_e70_go_idle: break; case qib_sdma_event_e7220_err_halted: break; case qib_sdma_event_e7322_err_halted: break; case qib_sdma_event_e90_timer_tick: break; } break; case qib_sdma_state_s10_hw_start_up_wait: switch (event) { case qib_sdma_event_e00_go_hw_down: sdma_set_state(ppd, qib_sdma_state_s00_hw_down); sdma_sw_tear_down(ppd); break; case qib_sdma_event_e10_go_hw_start: break; case qib_sdma_event_e20_hw_started: sdma_set_state(ppd, ss->go_s99_running ? qib_sdma_state_s99_running : qib_sdma_state_s20_idle); break; case qib_sdma_event_e30_go_running: ss->go_s99_running = 1; break; case qib_sdma_event_e40_sw_cleaned: break; case qib_sdma_event_e50_hw_cleaned: break; case qib_sdma_event_e60_hw_halted: break; case qib_sdma_event_e70_go_idle: ss->go_s99_running = 0; break; case qib_sdma_event_e7220_err_halted: break; case qib_sdma_event_e7322_err_halted: break; case qib_sdma_event_e90_timer_tick: break; } break; case qib_sdma_state_s20_idle: switch (event) { case qib_sdma_event_e00_go_hw_down: sdma_set_state(ppd, qib_sdma_state_s00_hw_down); sdma_sw_tear_down(ppd); break; case qib_sdma_event_e10_go_hw_start: break; case qib_sdma_event_e20_hw_started: break; case qib_sdma_event_e30_go_running: sdma_set_state(ppd, qib_sdma_state_s99_running); ss->go_s99_running = 1; break; case qib_sdma_event_e40_sw_cleaned: break; case qib_sdma_event_e50_hw_cleaned: break; case qib_sdma_event_e60_hw_halted: break; case qib_sdma_event_e70_go_idle: break; case qib_sdma_event_e7220_err_halted: break; case qib_sdma_event_e7322_err_halted: break; case qib_sdma_event_e90_timer_tick: break; } break; case qib_sdma_state_s30_sw_clean_up_wait: switch (event) { case qib_sdma_event_e00_go_hw_down: sdma_set_state(ppd, qib_sdma_state_s00_hw_down); break; case qib_sdma_event_e10_go_hw_start: break; case qib_sdma_event_e20_hw_started: break; case qib_sdma_event_e30_go_running: ss->go_s99_running = 1; break; case qib_sdma_event_e40_sw_cleaned: sdma_set_state(ppd, qib_sdma_state_s10_hw_start_up_wait); sdma_hw_start_up(ppd); break; case qib_sdma_event_e50_hw_cleaned: break; case qib_sdma_event_e60_hw_halted: break; case qib_sdma_event_e70_go_idle: ss->go_s99_running = 0; break; case qib_sdma_event_e7220_err_halted: break; case qib_sdma_event_e7322_err_halted: break; case qib_sdma_event_e90_timer_tick: break; } break; case qib_sdma_state_s40_hw_clean_up_wait: switch (event) { case qib_sdma_event_e00_go_hw_down: sdma_set_state(ppd, qib_sdma_state_s00_hw_down); sdma_start_sw_clean_up(ppd); break; case qib_sdma_event_e10_go_hw_start: break; case qib_sdma_event_e20_hw_started: break; case qib_sdma_event_e30_go_running: ss->go_s99_running = 1; break; case qib_sdma_event_e40_sw_cleaned: break; case qib_sdma_event_e50_hw_cleaned: sdma_set_state(ppd, qib_sdma_state_s30_sw_clean_up_wait); sdma_start_sw_clean_up(ppd); break; case qib_sdma_event_e60_hw_halted: break; case qib_sdma_event_e70_go_idle: ss->go_s99_running = 0; break; case qib_sdma_event_e7220_err_halted: break; case qib_sdma_event_e7322_err_halted: break; case qib_sdma_event_e90_timer_tick: break; } break; case qib_sdma_state_s50_hw_halt_wait: switch (event) { case qib_sdma_event_e00_go_hw_down: sdma_set_state(ppd, qib_sdma_state_s00_hw_down); sdma_start_sw_clean_up(ppd); break; case qib_sdma_event_e10_go_hw_start: break; case qib_sdma_event_e20_hw_started: break; case qib_sdma_event_e30_go_running: ss->go_s99_running = 1; break; case qib_sdma_event_e40_sw_cleaned: break; case qib_sdma_event_e50_hw_cleaned: break; case qib_sdma_event_e60_hw_halted: sdma_set_state(ppd, qib_sdma_state_s40_hw_clean_up_wait); ppd->dd->f_sdma_hw_clean_up(ppd); break; case qib_sdma_event_e70_go_idle: ss->go_s99_running = 0; break; case qib_sdma_event_e7220_err_halted: break; case qib_sdma_event_e7322_err_halted: break; case qib_sdma_event_e90_timer_tick: break; } break; case qib_sdma_state_s99_running: switch (event) { case qib_sdma_event_e00_go_hw_down: sdma_set_state(ppd, qib_sdma_state_s00_hw_down); sdma_start_sw_clean_up(ppd); break; case qib_sdma_event_e10_go_hw_start: break; case qib_sdma_event_e20_hw_started: break; case qib_sdma_event_e30_go_running: break; case qib_sdma_event_e40_sw_cleaned: break; case qib_sdma_event_e50_hw_cleaned: break; case qib_sdma_event_e60_hw_halted: sdma_set_state(ppd, qib_sdma_state_s30_sw_clean_up_wait); sdma_start_sw_clean_up(ppd); break; case qib_sdma_event_e70_go_idle: sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait); ss->go_s99_running = 0; break; case qib_sdma_event_e7220_err_halted: sdma_set_state(ppd, qib_sdma_state_s30_sw_clean_up_wait); sdma_start_sw_clean_up(ppd); break; case qib_sdma_event_e7322_err_halted: sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait); break; case qib_sdma_event_e90_timer_tick: break; } break; } ss->last_event = event; }
gpl-2.0
flar2/bulletproof-m7-5.0
drivers/media/video/pwc/pwc-uncompress.c
9202
3165
/* Linux driver for Philips webcam Decompression frontend. (C) 1999-2003 Nemosoft Unv. (C) 2004-2006 Luc Saillard (luc@saillard.org) NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx driver and thus may have bugs that are not present in the original version. Please send bug reports and support requests to <luc@saillard.org>. The decompression routines have been implemented by reverse-engineering the Nemosoft binary pwcx module. Caveat emptor. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA vim: set ts=8: */ #include <asm/current.h> #include <asm/types.h> #include "pwc.h" #include "pwc-dec1.h" #include "pwc-dec23.h" int pwc_decompress(struct pwc_device *pdev, struct pwc_frame_buf *fbuf) { int n, line, col; void *yuv, *image; u16 *src; u16 *dsty, *dstu, *dstv; image = vb2_plane_vaddr(&fbuf->vb, 0); yuv = fbuf->data + pdev->frame_header_size; /* Skip header */ /* Raw format; that's easy... */ if (pdev->pixfmt != V4L2_PIX_FMT_YUV420) { struct pwc_raw_frame *raw_frame = image; raw_frame->type = cpu_to_le16(pdev->type); raw_frame->vbandlength = cpu_to_le16(pdev->vbandlength); /* cmd_buf is always 4 bytes, but sometimes, only the * first 3 bytes is filled (Nala case). We can * determine this using the type of the webcam */ memcpy(raw_frame->cmd, pdev->cmd_buf, 4); memcpy(raw_frame+1, yuv, pdev->frame_size); vb2_set_plane_payload(&fbuf->vb, 0, pdev->frame_size + sizeof(struct pwc_raw_frame)); return 0; } vb2_set_plane_payload(&fbuf->vb, 0, pdev->width * pdev->height * 3 / 2); if (pdev->vbandlength == 0) { /* Uncompressed mode. * * We do some byte shuffling here to go from the * native format to YUV420P. */ src = (u16 *)yuv; n = pdev->width * pdev->height; dsty = (u16 *)(image); dstu = (u16 *)(image + n); dstv = (u16 *)(image + n + n / 4); for (line = 0; line < pdev->height; line++) { for (col = 0; col < pdev->width; col += 4) { *dsty++ = *src++; *dsty++ = *src++; if (line & 1) *dstv++ = *src++; else *dstu++ = *src++; } } return 0; } /* * Compressed; * the decompressor routines will write the data in planar format * immediately. */ if (DEVICE_USE_CODEC1(pdev->type)) { /* TODO & FIXME */ PWC_ERROR("This chipset is not supported for now\n"); return -ENXIO; /* No such device or address: missing decompressor */ } else { pwc_dec23_decompress(pdev, yuv, image); } return 0; }
gpl-2.0
TEAM-Gummy/kernel_oppo_msm8974
tools/power/cpupower/debug/i386/centrino-decode.c
10482
2053
/* * (C) 2003 - 2004 Dominik Brodowski <linux@dominikbrodowski.de> * * Licensed under the terms of the GNU GPL License version 2. * * Based on code found in * linux/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c * and originally developed by Jeremy Fitzhardinge. * * USAGE: simply run it to decode the current settings on CPU 0, * or pass the CPU number as argument, or pass the MSR content * as argument. */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <unistd.h> #include <errno.h> #include <fcntl.h> #include <sys/types.h> #include <sys/stat.h> #define MCPU 32 #define MSR_IA32_PERF_STATUS 0x198 static int rdmsr(unsigned int cpu, unsigned int msr, unsigned int *lo, unsigned int *hi) { int fd; char file[20]; unsigned long long val; int retval = -1; *lo = *hi = 0; if (cpu > MCPU) goto err1; sprintf(file, "/dev/cpu/%d/msr", cpu); fd = open(file, O_RDONLY); if (fd < 0) goto err1; if (lseek(fd, msr, SEEK_CUR) == -1) goto err2; if (read(fd, &val, 8) != 8) goto err2; *lo = (uint32_t )(val & 0xffffffffull); *hi = (uint32_t )(val>>32 & 0xffffffffull); retval = 0; err2: close(fd); err1: return retval; } static void decode (unsigned int msr) { unsigned int multiplier; unsigned int mv; multiplier = ((msr >> 8) & 0xFF); mv = (((msr & 0xFF) * 16) + 700); printf("0x%x means multiplier %d @ %d mV\n", msr, multiplier, mv); } static int decode_live(unsigned int cpu) { unsigned int lo, hi; int err; err = rdmsr(cpu, MSR_IA32_PERF_STATUS, &lo, &hi); if (err) { printf("can't get MSR_IA32_PERF_STATUS for cpu %d\n", cpu); printf("Possible trouble: you don't run an Enhanced SpeedStep capable cpu\n"); printf("or you are not root, or the msr driver is not present\n"); return 1; } decode(lo); return 0; } int main (int argc, char **argv) { unsigned int cpu, mode = 0; if (argc < 2) cpu = 0; else { cpu = strtoul(argv[1], NULL, 0); if (cpu >= MCPU) mode = 1; } if (mode) decode(cpu); else decode_live(cpu); return 0; }
gpl-2.0
p500-ics-cm9/LGE_3.0_KERNEL
arch/arm/common/locomo.c
12018
24146
/* * linux/arch/arm/common/locomo.c * * Sharp LoCoMo support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This file contains all generic LoCoMo support. * * All initialization functions provided here are intended to be called * from machine specific code with proper arguments when required. * * Based on sa1111.c */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mach/irq.h> #include <asm/hardware/locomo.h> /* LoCoMo Interrupts */ #define IRQ_LOCOMO_KEY (0) #define IRQ_LOCOMO_GPIO (1) #define IRQ_LOCOMO_LT (2) #define IRQ_LOCOMO_SPI (3) /* M62332 output channel selection */ #define M62332_EVR_CH 1 /* M62332 volume channel number */ /* 0 : CH.1 , 1 : CH. 2 */ /* DAC send data */ #define M62332_SLAVE_ADDR 0x4e /* Slave address */ #define M62332_W_BIT 0x00 /* W bit (0 only) */ #define M62332_SUB_ADDR 0x00 /* Sub address */ #define M62332_A_BIT 0x00 /* A bit (0 only) */ /* DAC setup and hold times (expressed in us) */ #define DAC_BUS_FREE_TIME 5 /* 4.7 us */ #define DAC_START_SETUP_TIME 5 /* 4.7 us */ #define DAC_STOP_SETUP_TIME 4 /* 4.0 us */ #define DAC_START_HOLD_TIME 5 /* 4.7 us */ #define DAC_SCL_LOW_HOLD_TIME 5 /* 4.7 us */ #define DAC_SCL_HIGH_HOLD_TIME 4 /* 4.0 us */ #define DAC_DATA_SETUP_TIME 1 /* 250 ns */ #define DAC_DATA_HOLD_TIME 1 /* 300 ns */ #define DAC_LOW_SETUP_TIME 1 /* 300 ns */ #define DAC_HIGH_SETUP_TIME 1 /* 1000 ns */ /* the following is the overall data for the locomo chip */ struct locomo { struct device *dev; unsigned long phys; unsigned int irq; int irq_base; spinlock_t lock; void __iomem *base; #ifdef CONFIG_PM void *saved_state; #endif }; struct locomo_dev_info { unsigned long offset; unsigned long length; unsigned int devid; unsigned int irq[1]; const char * name; }; /* All the locomo devices. If offset is non-zero, the mapbase for the * locomo_dev will be set to the chip base plus offset. If offset is * zero, then the mapbase for the locomo_dev will be set to zero. An * offset of zero means the device only uses GPIOs or other helper * functions inside this file */ static struct locomo_dev_info locomo_devices[] = { { .devid = LOCOMO_DEVID_KEYBOARD, .irq = { IRQ_LOCOMO_KEY }, .name = "locomo-keyboard", .offset = LOCOMO_KEYBOARD, .length = 16, }, { .devid = LOCOMO_DEVID_FRONTLIGHT, .irq = {}, .name = "locomo-frontlight", .offset = LOCOMO_FRONTLIGHT, .length = 8, }, { .devid = LOCOMO_DEVID_BACKLIGHT, .irq = {}, .name = "locomo-backlight", .offset = LOCOMO_BACKLIGHT, .length = 8, }, { .devid = LOCOMO_DEVID_AUDIO, .irq = {}, .name = "locomo-audio", .offset = LOCOMO_AUDIO, .length = 4, }, { .devid = LOCOMO_DEVID_LED, .irq = {}, .name = "locomo-led", .offset = LOCOMO_LED, .length = 8, }, { .devid = LOCOMO_DEVID_UART, .irq = {}, .name = "locomo-uart", .offset = 0, .length = 0, }, { .devid = LOCOMO_DEVID_SPI, .irq = {}, .name = "locomo-spi", .offset = LOCOMO_SPI, .length = 0x30, }, }; static void locomo_handler(unsigned int irq, struct irq_desc *desc) { struct locomo *lchip = irq_get_chip_data(irq); int req, i; /* Acknowledge the parent IRQ */ desc->irq_data.chip->irq_ack(&desc->irq_data); /* check why this interrupt was generated */ req = locomo_readl(lchip->base + LOCOMO_ICR) & 0x0f00; if (req) { /* generate the next interrupt(s) */ irq = lchip->irq_base; for (i = 0; i <= 3; i++, irq++) { if (req & (0x0100 << i)) { generic_handle_irq(irq); } } } } static void locomo_ack_irq(struct irq_data *d) { } static void locomo_mask_irq(struct irq_data *d) { struct locomo *lchip = irq_data_get_irq_chip_data(d); unsigned int r; r = locomo_readl(lchip->base + LOCOMO_ICR); r &= ~(0x0010 << (d->irq - lchip->irq_base)); locomo_writel(r, lchip->base + LOCOMO_ICR); } static void locomo_unmask_irq(struct irq_data *d) { struct locomo *lchip = irq_data_get_irq_chip_data(d); unsigned int r; r = locomo_readl(lchip->base + LOCOMO_ICR); r |= (0x0010 << (d->irq - lchip->irq_base)); locomo_writel(r, lchip->base + LOCOMO_ICR); } static struct irq_chip locomo_chip = { .name = "LOCOMO", .irq_ack = locomo_ack_irq, .irq_mask = locomo_mask_irq, .irq_unmask = locomo_unmask_irq, }; static void locomo_setup_irq(struct locomo *lchip) { int irq = lchip->irq_base; /* * Install handler for IRQ_LOCOMO_HW. */ irq_set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING); irq_set_chip_data(lchip->irq, lchip); irq_set_chained_handler(lchip->irq, locomo_handler); /* Install handlers for IRQ_LOCOMO_* */ for ( ; irq <= lchip->irq_base + 3; irq++) { irq_set_chip_and_handler(irq, &locomo_chip, handle_level_irq); irq_set_chip_data(irq, lchip); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } } static void locomo_dev_release(struct device *_dev) { struct locomo_dev *dev = LOCOMO_DEV(_dev); kfree(dev); } static int locomo_init_one_child(struct locomo *lchip, struct locomo_dev_info *info) { struct locomo_dev *dev; int ret; dev = kzalloc(sizeof(struct locomo_dev), GFP_KERNEL); if (!dev) { ret = -ENOMEM; goto out; } /* * If the parent device has a DMA mask associated with it, * propagate it down to the children. */ if (lchip->dev->dma_mask) { dev->dma_mask = *lchip->dev->dma_mask; dev->dev.dma_mask = &dev->dma_mask; } dev_set_name(&dev->dev, "%s", info->name); dev->devid = info->devid; dev->dev.parent = lchip->dev; dev->dev.bus = &locomo_bus_type; dev->dev.release = locomo_dev_release; dev->dev.coherent_dma_mask = lchip->dev->coherent_dma_mask; if (info->offset) dev->mapbase = lchip->base + info->offset; else dev->mapbase = 0; dev->length = info->length; dev->irq[0] = (lchip->irq_base == NO_IRQ) ? NO_IRQ : lchip->irq_base + info->irq[0]; ret = device_register(&dev->dev); if (ret) { out: kfree(dev); } return ret; } #ifdef CONFIG_PM struct locomo_save_data { u16 LCM_GPO; u16 LCM_SPICT; u16 LCM_GPE; u16 LCM_ASD; u16 LCM_SPIMD; }; static int locomo_suspend(struct platform_device *dev, pm_message_t state) { struct locomo *lchip = platform_get_drvdata(dev); struct locomo_save_data *save; unsigned long flags; save = kmalloc(sizeof(struct locomo_save_data), GFP_KERNEL); if (!save) return -ENOMEM; lchip->saved_state = save; spin_lock_irqsave(&lchip->lock, flags); save->LCM_GPO = locomo_readl(lchip->base + LOCOMO_GPO); /* GPIO */ locomo_writel(0x00, lchip->base + LOCOMO_GPO); save->LCM_SPICT = locomo_readl(lchip->base + LOCOMO_SPI + LOCOMO_SPICT); /* SPI */ locomo_writel(0x40, lchip->base + LOCOMO_SPI + LOCOMO_SPICT); save->LCM_GPE = locomo_readl(lchip->base + LOCOMO_GPE); /* GPIO */ locomo_writel(0x00, lchip->base + LOCOMO_GPE); save->LCM_ASD = locomo_readl(lchip->base + LOCOMO_ASD); /* ADSTART */ locomo_writel(0x00, lchip->base + LOCOMO_ASD); save->LCM_SPIMD = locomo_readl(lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); /* SPI */ locomo_writel(0x3C14, lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); locomo_writel(0x00, lchip->base + LOCOMO_PAIF); locomo_writel(0x00, lchip->base + LOCOMO_DAC); locomo_writel(0x00, lchip->base + LOCOMO_BACKLIGHT + LOCOMO_TC); if ((locomo_readl(lchip->base + LOCOMO_LED + LOCOMO_LPT0) & 0x88) && (locomo_readl(lchip->base + LOCOMO_LED + LOCOMO_LPT1) & 0x88)) locomo_writel(0x00, lchip->base + LOCOMO_C32K); /* CLK32 off */ else /* 18MHz already enabled, so no wait */ locomo_writel(0xc1, lchip->base + LOCOMO_C32K); /* CLK32 on */ locomo_writel(0x00, lchip->base + LOCOMO_TADC); /* 18MHz clock off*/ locomo_writel(0x00, lchip->base + LOCOMO_AUDIO + LOCOMO_ACC); /* 22MHz/24MHz clock off */ locomo_writel(0x00, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); /* FL */ spin_unlock_irqrestore(&lchip->lock, flags); return 0; } static int locomo_resume(struct platform_device *dev) { struct locomo *lchip = platform_get_drvdata(dev); struct locomo_save_data *save; unsigned long r; unsigned long flags; save = lchip->saved_state; if (!save) return 0; spin_lock_irqsave(&lchip->lock, flags); locomo_writel(save->LCM_GPO, lchip->base + LOCOMO_GPO); locomo_writel(save->LCM_SPICT, lchip->base + LOCOMO_SPI + LOCOMO_SPICT); locomo_writel(save->LCM_GPE, lchip->base + LOCOMO_GPE); locomo_writel(save->LCM_ASD, lchip->base + LOCOMO_ASD); locomo_writel(save->LCM_SPIMD, lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); locomo_writel(0x00, lchip->base + LOCOMO_C32K); locomo_writel(0x90, lchip->base + LOCOMO_TADC); locomo_writel(0, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KSC); r = locomo_readl(lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC); r &= 0xFEFF; locomo_writel(r, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC); locomo_writel(0x1, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KCMD); spin_unlock_irqrestore(&lchip->lock, flags); lchip->saved_state = NULL; kfree(save); return 0; } #endif /** * locomo_probe - probe for a single LoCoMo chip. * @phys_addr: physical address of device. * * Probe for a LoCoMo chip. This must be called * before any other locomo-specific code. * * Returns: * %-ENODEV device not found. * %-EBUSY physical address already marked in-use. * %0 successful. */ static int __locomo_probe(struct device *me, struct resource *mem, int irq) { struct locomo_platform_data *pdata = me->platform_data; struct locomo *lchip; unsigned long r; int i, ret = -ENODEV; lchip = kzalloc(sizeof(struct locomo), GFP_KERNEL); if (!lchip) return -ENOMEM; spin_lock_init(&lchip->lock); lchip->dev = me; dev_set_drvdata(lchip->dev, lchip); lchip->phys = mem->start; lchip->irq = irq; lchip->irq_base = (pdata) ? pdata->irq_base : NO_IRQ; /* * Map the whole region. This also maps the * registers for our children. */ lchip->base = ioremap(mem->start, PAGE_SIZE); if (!lchip->base) { ret = -ENOMEM; goto out; } /* locomo initialize */ locomo_writel(0, lchip->base + LOCOMO_ICR); /* KEYBOARD */ locomo_writel(0, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC); /* GPIO */ locomo_writel(0, lchip->base + LOCOMO_GPO); locomo_writel((LOCOMO_GPIO(1) | LOCOMO_GPIO(2) | LOCOMO_GPIO(13) | LOCOMO_GPIO(14)) , lchip->base + LOCOMO_GPE); locomo_writel((LOCOMO_GPIO(1) | LOCOMO_GPIO(2) | LOCOMO_GPIO(13) | LOCOMO_GPIO(14)) , lchip->base + LOCOMO_GPD); locomo_writel(0, lchip->base + LOCOMO_GIE); /* Frontlight */ locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD); /* Longtime timer */ locomo_writel(0, lchip->base + LOCOMO_LTINT); /* SPI */ locomo_writel(0, lchip->base + LOCOMO_SPI + LOCOMO_SPIIE); locomo_writel(6 + 8 + 320 + 30 - 10, lchip->base + LOCOMO_ASD); r = locomo_readl(lchip->base + LOCOMO_ASD); r |= 0x8000; locomo_writel(r, lchip->base + LOCOMO_ASD); locomo_writel(6 + 8 + 320 + 30 - 10 - 128 + 4, lchip->base + LOCOMO_HSD); r = locomo_readl(lchip->base + LOCOMO_HSD); r |= 0x8000; locomo_writel(r, lchip->base + LOCOMO_HSD); locomo_writel(128 / 8, lchip->base + LOCOMO_HSC); /* XON */ locomo_writel(0x80, lchip->base + LOCOMO_TADC); udelay(1000); /* CLK9MEN */ r = locomo_readl(lchip->base + LOCOMO_TADC); r |= 0x10; locomo_writel(r, lchip->base + LOCOMO_TADC); udelay(100); /* init DAC */ r = locomo_readl(lchip->base + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB; locomo_writel(r, lchip->base + LOCOMO_DAC); r = locomo_readl(lchip->base + LOCOMO_VER); printk(KERN_INFO "LoCoMo Chip: %lu%lu\n", (r >> 8), (r & 0xff)); /* * The interrupt controller must be initialised before any * other device to ensure that the interrupts are available. */ if (lchip->irq != NO_IRQ && lchip->irq_base != NO_IRQ) locomo_setup_irq(lchip); for (i = 0; i < ARRAY_SIZE(locomo_devices); i++) locomo_init_one_child(lchip, &locomo_devices[i]); return 0; out: kfree(lchip); return ret; } static int locomo_remove_child(struct device *dev, void *data) { device_unregister(dev); return 0; } static void __locomo_remove(struct locomo *lchip) { device_for_each_child(lchip->dev, NULL, locomo_remove_child); if (lchip->irq != NO_IRQ) { irq_set_chained_handler(lchip->irq, NULL); irq_set_handler_data(lchip->irq, NULL); } iounmap(lchip->base); kfree(lchip); } static int locomo_probe(struct platform_device *dev) { struct resource *mem; int irq; mem = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!mem) return -EINVAL; irq = platform_get_irq(dev, 0); if (irq < 0) return -ENXIO; return __locomo_probe(&dev->dev, mem, irq); } static int locomo_remove(struct platform_device *dev) { struct locomo *lchip = platform_get_drvdata(dev); if (lchip) { __locomo_remove(lchip); platform_set_drvdata(dev, NULL); } return 0; } /* * Not sure if this should be on the system bus or not yet. * We really want some way to register a system device at * the per-machine level, and then have this driver pick * up the registered devices. */ static struct platform_driver locomo_device_driver = { .probe = locomo_probe, .remove = locomo_remove, #ifdef CONFIG_PM .suspend = locomo_suspend, .resume = locomo_resume, #endif .driver = { .name = "locomo", }, }; /* * Get the parent device driver (us) structure * from a child function device */ static inline struct locomo *locomo_chip_driver(struct locomo_dev *ldev) { return (struct locomo *)dev_get_drvdata(ldev->dev.parent); } void locomo_gpio_set_dir(struct device *dev, unsigned int bits, unsigned int dir) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int r; if (!lchip) return; spin_lock_irqsave(&lchip->lock, flags); r = locomo_readl(lchip->base + LOCOMO_GPD); if (dir) r |= bits; else r &= ~bits; locomo_writel(r, lchip->base + LOCOMO_GPD); r = locomo_readl(lchip->base + LOCOMO_GPE); if (dir) r |= bits; else r &= ~bits; locomo_writel(r, lchip->base + LOCOMO_GPE); spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_gpio_set_dir); int locomo_gpio_read_level(struct device *dev, unsigned int bits) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int ret; if (!lchip) return -ENODEV; spin_lock_irqsave(&lchip->lock, flags); ret = locomo_readl(lchip->base + LOCOMO_GPL); spin_unlock_irqrestore(&lchip->lock, flags); ret &= bits; return ret; } EXPORT_SYMBOL(locomo_gpio_read_level); int locomo_gpio_read_output(struct device *dev, unsigned int bits) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int ret; if (!lchip) return -ENODEV; spin_lock_irqsave(&lchip->lock, flags); ret = locomo_readl(lchip->base + LOCOMO_GPO); spin_unlock_irqrestore(&lchip->lock, flags); ret &= bits; return ret; } EXPORT_SYMBOL(locomo_gpio_read_output); void locomo_gpio_write(struct device *dev, unsigned int bits, unsigned int set) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int r; if (!lchip) return; spin_lock_irqsave(&lchip->lock, flags); r = locomo_readl(lchip->base + LOCOMO_GPO); if (set) r |= bits; else r &= ~bits; locomo_writel(r, lchip->base + LOCOMO_GPO); spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_gpio_write); static void locomo_m62332_sendbit(void *mapbase, int bit) { unsigned int r; r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_DATA_HOLD_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ if (bit & 1) { r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ } else { r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ } udelay(DAC_DATA_SETUP_TIME); /* 250 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.0 usec */ } void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int channel) { struct locomo *lchip = locomo_chip_driver(ldev); int i; unsigned char data; unsigned int r; void *mapbase = lchip->base; unsigned long flags; spin_lock_irqsave(&lchip->lock, flags); /* Start */ udelay(DAC_BUS_FREE_TIME); /* 5.0 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.0 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_START_HOLD_TIME); /* 5.0 usec */ udelay(DAC_DATA_HOLD_TIME); /* 300 nsec */ /* Send slave address and W bit (LSB is W bit) */ data = (M62332_SLAVE_ADDR << 1) | M62332_W_BIT; for (i = 1; i <= 8; i++) { locomo_m62332_sendbit(mapbase, data >> (8 - i)); } /* Check A bit */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ printk(KERN_WARNING "locomo: m62332_senddata Error 1\n"); goto out; } /* Send Sub address (LSB is channel select) */ /* channel = 0 : ch1 select */ /* = 1 : ch2 select */ data = M62332_SUB_ADDR + channel; for (i = 1; i <= 8; i++) { locomo_m62332_sendbit(mapbase, data >> (8 - i)); } /* Check A bit */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ printk(KERN_WARNING "locomo: m62332_senddata Error 2\n"); goto out; } /* Send DAC data */ for (i = 1; i <= 8; i++) { locomo_m62332_sendbit(mapbase, dac_data >> (8 - i)); } /* Check A bit */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ printk(KERN_WARNING "locomo: m62332_senddata Error 3\n"); } out: /* stop */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_m62332_senddata); /* * Frontlight control */ void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf) { unsigned long flags; struct locomo *lchip = locomo_chip_driver(dev); if (vr) locomo_gpio_write(dev->dev.parent, LOCOMO_GPIO_FL_VR, 1); else locomo_gpio_write(dev->dev.parent, LOCOMO_GPIO_FL_VR, 0); spin_lock_irqsave(&lchip->lock, flags); locomo_writel(bpwf, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); udelay(100); locomo_writel(duty, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD); locomo_writel(bpwf | LOCOMO_ALC_EN, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_frontlight_set); /* * LoCoMo "Register Access Bus." * * We model this as a regular bus type, and hang devices directly * off this. */ static int locomo_match(struct device *_dev, struct device_driver *_drv) { struct locomo_dev *dev = LOCOMO_DEV(_dev); struct locomo_driver *drv = LOCOMO_DRV(_drv); return dev->devid == drv->devid; } static int locomo_bus_suspend(struct device *dev, pm_message_t state) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); int ret = 0; if (drv && drv->suspend) ret = drv->suspend(ldev, state); return ret; } static int locomo_bus_resume(struct device *dev) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); int ret = 0; if (drv && drv->resume) ret = drv->resume(ldev); return ret; } static int locomo_bus_probe(struct device *dev) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); int ret = -ENODEV; if (drv->probe) ret = drv->probe(ldev); return ret; } static int locomo_bus_remove(struct device *dev) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); int ret = 0; if (drv->remove) ret = drv->remove(ldev); return ret; } struct bus_type locomo_bus_type = { .name = "locomo-bus", .match = locomo_match, .probe = locomo_bus_probe, .remove = locomo_bus_remove, .suspend = locomo_bus_suspend, .resume = locomo_bus_resume, }; int locomo_driver_register(struct locomo_driver *driver) { driver->drv.bus = &locomo_bus_type; return driver_register(&driver->drv); } EXPORT_SYMBOL(locomo_driver_register); void locomo_driver_unregister(struct locomo_driver *driver) { driver_unregister(&driver->drv); } EXPORT_SYMBOL(locomo_driver_unregister); static int __init locomo_init(void) { int ret = bus_register(&locomo_bus_type); if (ret == 0) platform_driver_register(&locomo_device_driver); return ret; } static void __exit locomo_exit(void) { platform_driver_unregister(&locomo_device_driver); bus_unregister(&locomo_bus_type); } module_init(locomo_init); module_exit(locomo_exit); MODULE_DESCRIPTION("Sharp LoCoMo core driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("John Lenz <lenz@cs.wisc.edu>");
gpl-2.0
TripNRaVeR/tripndroid-m7-unleashed-3.4
arch/arm/common/locomo.c
12018
24146
/* * linux/arch/arm/common/locomo.c * * Sharp LoCoMo support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This file contains all generic LoCoMo support. * * All initialization functions provided here are intended to be called * from machine specific code with proper arguments when required. * * Based on sa1111.c */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mach/irq.h> #include <asm/hardware/locomo.h> /* LoCoMo Interrupts */ #define IRQ_LOCOMO_KEY (0) #define IRQ_LOCOMO_GPIO (1) #define IRQ_LOCOMO_LT (2) #define IRQ_LOCOMO_SPI (3) /* M62332 output channel selection */ #define M62332_EVR_CH 1 /* M62332 volume channel number */ /* 0 : CH.1 , 1 : CH. 2 */ /* DAC send data */ #define M62332_SLAVE_ADDR 0x4e /* Slave address */ #define M62332_W_BIT 0x00 /* W bit (0 only) */ #define M62332_SUB_ADDR 0x00 /* Sub address */ #define M62332_A_BIT 0x00 /* A bit (0 only) */ /* DAC setup and hold times (expressed in us) */ #define DAC_BUS_FREE_TIME 5 /* 4.7 us */ #define DAC_START_SETUP_TIME 5 /* 4.7 us */ #define DAC_STOP_SETUP_TIME 4 /* 4.0 us */ #define DAC_START_HOLD_TIME 5 /* 4.7 us */ #define DAC_SCL_LOW_HOLD_TIME 5 /* 4.7 us */ #define DAC_SCL_HIGH_HOLD_TIME 4 /* 4.0 us */ #define DAC_DATA_SETUP_TIME 1 /* 250 ns */ #define DAC_DATA_HOLD_TIME 1 /* 300 ns */ #define DAC_LOW_SETUP_TIME 1 /* 300 ns */ #define DAC_HIGH_SETUP_TIME 1 /* 1000 ns */ /* the following is the overall data for the locomo chip */ struct locomo { struct device *dev; unsigned long phys; unsigned int irq; int irq_base; spinlock_t lock; void __iomem *base; #ifdef CONFIG_PM void *saved_state; #endif }; struct locomo_dev_info { unsigned long offset; unsigned long length; unsigned int devid; unsigned int irq[1]; const char * name; }; /* All the locomo devices. If offset is non-zero, the mapbase for the * locomo_dev will be set to the chip base plus offset. If offset is * zero, then the mapbase for the locomo_dev will be set to zero. An * offset of zero means the device only uses GPIOs or other helper * functions inside this file */ static struct locomo_dev_info locomo_devices[] = { { .devid = LOCOMO_DEVID_KEYBOARD, .irq = { IRQ_LOCOMO_KEY }, .name = "locomo-keyboard", .offset = LOCOMO_KEYBOARD, .length = 16, }, { .devid = LOCOMO_DEVID_FRONTLIGHT, .irq = {}, .name = "locomo-frontlight", .offset = LOCOMO_FRONTLIGHT, .length = 8, }, { .devid = LOCOMO_DEVID_BACKLIGHT, .irq = {}, .name = "locomo-backlight", .offset = LOCOMO_BACKLIGHT, .length = 8, }, { .devid = LOCOMO_DEVID_AUDIO, .irq = {}, .name = "locomo-audio", .offset = LOCOMO_AUDIO, .length = 4, }, { .devid = LOCOMO_DEVID_LED, .irq = {}, .name = "locomo-led", .offset = LOCOMO_LED, .length = 8, }, { .devid = LOCOMO_DEVID_UART, .irq = {}, .name = "locomo-uart", .offset = 0, .length = 0, }, { .devid = LOCOMO_DEVID_SPI, .irq = {}, .name = "locomo-spi", .offset = LOCOMO_SPI, .length = 0x30, }, }; static void locomo_handler(unsigned int irq, struct irq_desc *desc) { struct locomo *lchip = irq_get_chip_data(irq); int req, i; /* Acknowledge the parent IRQ */ desc->irq_data.chip->irq_ack(&desc->irq_data); /* check why this interrupt was generated */ req = locomo_readl(lchip->base + LOCOMO_ICR) & 0x0f00; if (req) { /* generate the next interrupt(s) */ irq = lchip->irq_base; for (i = 0; i <= 3; i++, irq++) { if (req & (0x0100 << i)) { generic_handle_irq(irq); } } } } static void locomo_ack_irq(struct irq_data *d) { } static void locomo_mask_irq(struct irq_data *d) { struct locomo *lchip = irq_data_get_irq_chip_data(d); unsigned int r; r = locomo_readl(lchip->base + LOCOMO_ICR); r &= ~(0x0010 << (d->irq - lchip->irq_base)); locomo_writel(r, lchip->base + LOCOMO_ICR); } static void locomo_unmask_irq(struct irq_data *d) { struct locomo *lchip = irq_data_get_irq_chip_data(d); unsigned int r; r = locomo_readl(lchip->base + LOCOMO_ICR); r |= (0x0010 << (d->irq - lchip->irq_base)); locomo_writel(r, lchip->base + LOCOMO_ICR); } static struct irq_chip locomo_chip = { .name = "LOCOMO", .irq_ack = locomo_ack_irq, .irq_mask = locomo_mask_irq, .irq_unmask = locomo_unmask_irq, }; static void locomo_setup_irq(struct locomo *lchip) { int irq = lchip->irq_base; /* * Install handler for IRQ_LOCOMO_HW. */ irq_set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING); irq_set_chip_data(lchip->irq, lchip); irq_set_chained_handler(lchip->irq, locomo_handler); /* Install handlers for IRQ_LOCOMO_* */ for ( ; irq <= lchip->irq_base + 3; irq++) { irq_set_chip_and_handler(irq, &locomo_chip, handle_level_irq); irq_set_chip_data(irq, lchip); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } } static void locomo_dev_release(struct device *_dev) { struct locomo_dev *dev = LOCOMO_DEV(_dev); kfree(dev); } static int locomo_init_one_child(struct locomo *lchip, struct locomo_dev_info *info) { struct locomo_dev *dev; int ret; dev = kzalloc(sizeof(struct locomo_dev), GFP_KERNEL); if (!dev) { ret = -ENOMEM; goto out; } /* * If the parent device has a DMA mask associated with it, * propagate it down to the children. */ if (lchip->dev->dma_mask) { dev->dma_mask = *lchip->dev->dma_mask; dev->dev.dma_mask = &dev->dma_mask; } dev_set_name(&dev->dev, "%s", info->name); dev->devid = info->devid; dev->dev.parent = lchip->dev; dev->dev.bus = &locomo_bus_type; dev->dev.release = locomo_dev_release; dev->dev.coherent_dma_mask = lchip->dev->coherent_dma_mask; if (info->offset) dev->mapbase = lchip->base + info->offset; else dev->mapbase = 0; dev->length = info->length; dev->irq[0] = (lchip->irq_base == NO_IRQ) ? NO_IRQ : lchip->irq_base + info->irq[0]; ret = device_register(&dev->dev); if (ret) { out: kfree(dev); } return ret; } #ifdef CONFIG_PM struct locomo_save_data { u16 LCM_GPO; u16 LCM_SPICT; u16 LCM_GPE; u16 LCM_ASD; u16 LCM_SPIMD; }; static int locomo_suspend(struct platform_device *dev, pm_message_t state) { struct locomo *lchip = platform_get_drvdata(dev); struct locomo_save_data *save; unsigned long flags; save = kmalloc(sizeof(struct locomo_save_data), GFP_KERNEL); if (!save) return -ENOMEM; lchip->saved_state = save; spin_lock_irqsave(&lchip->lock, flags); save->LCM_GPO = locomo_readl(lchip->base + LOCOMO_GPO); /* GPIO */ locomo_writel(0x00, lchip->base + LOCOMO_GPO); save->LCM_SPICT = locomo_readl(lchip->base + LOCOMO_SPI + LOCOMO_SPICT); /* SPI */ locomo_writel(0x40, lchip->base + LOCOMO_SPI + LOCOMO_SPICT); save->LCM_GPE = locomo_readl(lchip->base + LOCOMO_GPE); /* GPIO */ locomo_writel(0x00, lchip->base + LOCOMO_GPE); save->LCM_ASD = locomo_readl(lchip->base + LOCOMO_ASD); /* ADSTART */ locomo_writel(0x00, lchip->base + LOCOMO_ASD); save->LCM_SPIMD = locomo_readl(lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); /* SPI */ locomo_writel(0x3C14, lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); locomo_writel(0x00, lchip->base + LOCOMO_PAIF); locomo_writel(0x00, lchip->base + LOCOMO_DAC); locomo_writel(0x00, lchip->base + LOCOMO_BACKLIGHT + LOCOMO_TC); if ((locomo_readl(lchip->base + LOCOMO_LED + LOCOMO_LPT0) & 0x88) && (locomo_readl(lchip->base + LOCOMO_LED + LOCOMO_LPT1) & 0x88)) locomo_writel(0x00, lchip->base + LOCOMO_C32K); /* CLK32 off */ else /* 18MHz already enabled, so no wait */ locomo_writel(0xc1, lchip->base + LOCOMO_C32K); /* CLK32 on */ locomo_writel(0x00, lchip->base + LOCOMO_TADC); /* 18MHz clock off*/ locomo_writel(0x00, lchip->base + LOCOMO_AUDIO + LOCOMO_ACC); /* 22MHz/24MHz clock off */ locomo_writel(0x00, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); /* FL */ spin_unlock_irqrestore(&lchip->lock, flags); return 0; } static int locomo_resume(struct platform_device *dev) { struct locomo *lchip = platform_get_drvdata(dev); struct locomo_save_data *save; unsigned long r; unsigned long flags; save = lchip->saved_state; if (!save) return 0; spin_lock_irqsave(&lchip->lock, flags); locomo_writel(save->LCM_GPO, lchip->base + LOCOMO_GPO); locomo_writel(save->LCM_SPICT, lchip->base + LOCOMO_SPI + LOCOMO_SPICT); locomo_writel(save->LCM_GPE, lchip->base + LOCOMO_GPE); locomo_writel(save->LCM_ASD, lchip->base + LOCOMO_ASD); locomo_writel(save->LCM_SPIMD, lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); locomo_writel(0x00, lchip->base + LOCOMO_C32K); locomo_writel(0x90, lchip->base + LOCOMO_TADC); locomo_writel(0, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KSC); r = locomo_readl(lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC); r &= 0xFEFF; locomo_writel(r, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC); locomo_writel(0x1, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KCMD); spin_unlock_irqrestore(&lchip->lock, flags); lchip->saved_state = NULL; kfree(save); return 0; } #endif /** * locomo_probe - probe for a single LoCoMo chip. * @phys_addr: physical address of device. * * Probe for a LoCoMo chip. This must be called * before any other locomo-specific code. * * Returns: * %-ENODEV device not found. * %-EBUSY physical address already marked in-use. * %0 successful. */ static int __locomo_probe(struct device *me, struct resource *mem, int irq) { struct locomo_platform_data *pdata = me->platform_data; struct locomo *lchip; unsigned long r; int i, ret = -ENODEV; lchip = kzalloc(sizeof(struct locomo), GFP_KERNEL); if (!lchip) return -ENOMEM; spin_lock_init(&lchip->lock); lchip->dev = me; dev_set_drvdata(lchip->dev, lchip); lchip->phys = mem->start; lchip->irq = irq; lchip->irq_base = (pdata) ? pdata->irq_base : NO_IRQ; /* * Map the whole region. This also maps the * registers for our children. */ lchip->base = ioremap(mem->start, PAGE_SIZE); if (!lchip->base) { ret = -ENOMEM; goto out; } /* locomo initialize */ locomo_writel(0, lchip->base + LOCOMO_ICR); /* KEYBOARD */ locomo_writel(0, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC); /* GPIO */ locomo_writel(0, lchip->base + LOCOMO_GPO); locomo_writel((LOCOMO_GPIO(1) | LOCOMO_GPIO(2) | LOCOMO_GPIO(13) | LOCOMO_GPIO(14)) , lchip->base + LOCOMO_GPE); locomo_writel((LOCOMO_GPIO(1) | LOCOMO_GPIO(2) | LOCOMO_GPIO(13) | LOCOMO_GPIO(14)) , lchip->base + LOCOMO_GPD); locomo_writel(0, lchip->base + LOCOMO_GIE); /* Frontlight */ locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD); /* Longtime timer */ locomo_writel(0, lchip->base + LOCOMO_LTINT); /* SPI */ locomo_writel(0, lchip->base + LOCOMO_SPI + LOCOMO_SPIIE); locomo_writel(6 + 8 + 320 + 30 - 10, lchip->base + LOCOMO_ASD); r = locomo_readl(lchip->base + LOCOMO_ASD); r |= 0x8000; locomo_writel(r, lchip->base + LOCOMO_ASD); locomo_writel(6 + 8 + 320 + 30 - 10 - 128 + 4, lchip->base + LOCOMO_HSD); r = locomo_readl(lchip->base + LOCOMO_HSD); r |= 0x8000; locomo_writel(r, lchip->base + LOCOMO_HSD); locomo_writel(128 / 8, lchip->base + LOCOMO_HSC); /* XON */ locomo_writel(0x80, lchip->base + LOCOMO_TADC); udelay(1000); /* CLK9MEN */ r = locomo_readl(lchip->base + LOCOMO_TADC); r |= 0x10; locomo_writel(r, lchip->base + LOCOMO_TADC); udelay(100); /* init DAC */ r = locomo_readl(lchip->base + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB; locomo_writel(r, lchip->base + LOCOMO_DAC); r = locomo_readl(lchip->base + LOCOMO_VER); printk(KERN_INFO "LoCoMo Chip: %lu%lu\n", (r >> 8), (r & 0xff)); /* * The interrupt controller must be initialised before any * other device to ensure that the interrupts are available. */ if (lchip->irq != NO_IRQ && lchip->irq_base != NO_IRQ) locomo_setup_irq(lchip); for (i = 0; i < ARRAY_SIZE(locomo_devices); i++) locomo_init_one_child(lchip, &locomo_devices[i]); return 0; out: kfree(lchip); return ret; } static int locomo_remove_child(struct device *dev, void *data) { device_unregister(dev); return 0; } static void __locomo_remove(struct locomo *lchip) { device_for_each_child(lchip->dev, NULL, locomo_remove_child); if (lchip->irq != NO_IRQ) { irq_set_chained_handler(lchip->irq, NULL); irq_set_handler_data(lchip->irq, NULL); } iounmap(lchip->base); kfree(lchip); } static int locomo_probe(struct platform_device *dev) { struct resource *mem; int irq; mem = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!mem) return -EINVAL; irq = platform_get_irq(dev, 0); if (irq < 0) return -ENXIO; return __locomo_probe(&dev->dev, mem, irq); } static int locomo_remove(struct platform_device *dev) { struct locomo *lchip = platform_get_drvdata(dev); if (lchip) { __locomo_remove(lchip); platform_set_drvdata(dev, NULL); } return 0; } /* * Not sure if this should be on the system bus or not yet. * We really want some way to register a system device at * the per-machine level, and then have this driver pick * up the registered devices. */ static struct platform_driver locomo_device_driver = { .probe = locomo_probe, .remove = locomo_remove, #ifdef CONFIG_PM .suspend = locomo_suspend, .resume = locomo_resume, #endif .driver = { .name = "locomo", }, }; /* * Get the parent device driver (us) structure * from a child function device */ static inline struct locomo *locomo_chip_driver(struct locomo_dev *ldev) { return (struct locomo *)dev_get_drvdata(ldev->dev.parent); } void locomo_gpio_set_dir(struct device *dev, unsigned int bits, unsigned int dir) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int r; if (!lchip) return; spin_lock_irqsave(&lchip->lock, flags); r = locomo_readl(lchip->base + LOCOMO_GPD); if (dir) r |= bits; else r &= ~bits; locomo_writel(r, lchip->base + LOCOMO_GPD); r = locomo_readl(lchip->base + LOCOMO_GPE); if (dir) r |= bits; else r &= ~bits; locomo_writel(r, lchip->base + LOCOMO_GPE); spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_gpio_set_dir); int locomo_gpio_read_level(struct device *dev, unsigned int bits) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int ret; if (!lchip) return -ENODEV; spin_lock_irqsave(&lchip->lock, flags); ret = locomo_readl(lchip->base + LOCOMO_GPL); spin_unlock_irqrestore(&lchip->lock, flags); ret &= bits; return ret; } EXPORT_SYMBOL(locomo_gpio_read_level); int locomo_gpio_read_output(struct device *dev, unsigned int bits) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int ret; if (!lchip) return -ENODEV; spin_lock_irqsave(&lchip->lock, flags); ret = locomo_readl(lchip->base + LOCOMO_GPO); spin_unlock_irqrestore(&lchip->lock, flags); ret &= bits; return ret; } EXPORT_SYMBOL(locomo_gpio_read_output); void locomo_gpio_write(struct device *dev, unsigned int bits, unsigned int set) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int r; if (!lchip) return; spin_lock_irqsave(&lchip->lock, flags); r = locomo_readl(lchip->base + LOCOMO_GPO); if (set) r |= bits; else r &= ~bits; locomo_writel(r, lchip->base + LOCOMO_GPO); spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_gpio_write); static void locomo_m62332_sendbit(void *mapbase, int bit) { unsigned int r; r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_DATA_HOLD_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ if (bit & 1) { r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ } else { r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ } udelay(DAC_DATA_SETUP_TIME); /* 250 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.0 usec */ } void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int channel) { struct locomo *lchip = locomo_chip_driver(ldev); int i; unsigned char data; unsigned int r; void *mapbase = lchip->base; unsigned long flags; spin_lock_irqsave(&lchip->lock, flags); /* Start */ udelay(DAC_BUS_FREE_TIME); /* 5.0 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.0 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_START_HOLD_TIME); /* 5.0 usec */ udelay(DAC_DATA_HOLD_TIME); /* 300 nsec */ /* Send slave address and W bit (LSB is W bit) */ data = (M62332_SLAVE_ADDR << 1) | M62332_W_BIT; for (i = 1; i <= 8; i++) { locomo_m62332_sendbit(mapbase, data >> (8 - i)); } /* Check A bit */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ printk(KERN_WARNING "locomo: m62332_senddata Error 1\n"); goto out; } /* Send Sub address (LSB is channel select) */ /* channel = 0 : ch1 select */ /* = 1 : ch2 select */ data = M62332_SUB_ADDR + channel; for (i = 1; i <= 8; i++) { locomo_m62332_sendbit(mapbase, data >> (8 - i)); } /* Check A bit */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ printk(KERN_WARNING "locomo: m62332_senddata Error 2\n"); goto out; } /* Send DAC data */ for (i = 1; i <= 8; i++) { locomo_m62332_sendbit(mapbase, dac_data >> (8 - i)); } /* Check A bit */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ printk(KERN_WARNING "locomo: m62332_senddata Error 3\n"); } out: /* stop */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_m62332_senddata); /* * Frontlight control */ void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf) { unsigned long flags; struct locomo *lchip = locomo_chip_driver(dev); if (vr) locomo_gpio_write(dev->dev.parent, LOCOMO_GPIO_FL_VR, 1); else locomo_gpio_write(dev->dev.parent, LOCOMO_GPIO_FL_VR, 0); spin_lock_irqsave(&lchip->lock, flags); locomo_writel(bpwf, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); udelay(100); locomo_writel(duty, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD); locomo_writel(bpwf | LOCOMO_ALC_EN, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_frontlight_set); /* * LoCoMo "Register Access Bus." * * We model this as a regular bus type, and hang devices directly * off this. */ static int locomo_match(struct device *_dev, struct device_driver *_drv) { struct locomo_dev *dev = LOCOMO_DEV(_dev); struct locomo_driver *drv = LOCOMO_DRV(_drv); return dev->devid == drv->devid; } static int locomo_bus_suspend(struct device *dev, pm_message_t state) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); int ret = 0; if (drv && drv->suspend) ret = drv->suspend(ldev, state); return ret; } static int locomo_bus_resume(struct device *dev) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); int ret = 0; if (drv && drv->resume) ret = drv->resume(ldev); return ret; } static int locomo_bus_probe(struct device *dev) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); int ret = -ENODEV; if (drv->probe) ret = drv->probe(ldev); return ret; } static int locomo_bus_remove(struct device *dev) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); int ret = 0; if (drv->remove) ret = drv->remove(ldev); return ret; } struct bus_type locomo_bus_type = { .name = "locomo-bus", .match = locomo_match, .probe = locomo_bus_probe, .remove = locomo_bus_remove, .suspend = locomo_bus_suspend, .resume = locomo_bus_resume, }; int locomo_driver_register(struct locomo_driver *driver) { driver->drv.bus = &locomo_bus_type; return driver_register(&driver->drv); } EXPORT_SYMBOL(locomo_driver_register); void locomo_driver_unregister(struct locomo_driver *driver) { driver_unregister(&driver->drv); } EXPORT_SYMBOL(locomo_driver_unregister); static int __init locomo_init(void) { int ret = bus_register(&locomo_bus_type); if (ret == 0) platform_driver_register(&locomo_device_driver); return ret; } static void __exit locomo_exit(void) { platform_driver_unregister(&locomo_device_driver); bus_unregister(&locomo_bus_type); } module_init(locomo_init); module_exit(locomo_exit); MODULE_DESCRIPTION("Sharp LoCoMo core driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("John Lenz <lenz@cs.wisc.edu>");
gpl-2.0
jazzk/jgedlte-kk
crypto/compress.c
13810
1378
/* * Cryptographic API. * * Compression operations. * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <linux/types.h> #include <linux/crypto.h> #include <linux/errno.h> #include <linux/string.h> #include "internal.h" static int crypto_compress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { return tfm->__crt_alg->cra_compress.coa_compress(tfm, src, slen, dst, dlen); } static int crypto_decompress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { return tfm->__crt_alg->cra_compress.coa_decompress(tfm, src, slen, dst, dlen); } int crypto_init_compress_ops(struct crypto_tfm *tfm) { struct compress_tfm *ops = &tfm->crt_compress; ops->cot_compress = crypto_compress; ops->cot_decompress = crypto_decompress; return 0; } void crypto_exit_compress_ops(struct crypto_tfm *tfm) { }
gpl-2.0
arco/samsung-kernel-msm7x30
drivers/net/wireless/rt2x00/rt2500usb.c
755
62694
/* Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> <http://rt2x00.serialmonkey.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Module: rt2500usb Abstract: rt2500usb device specific routines. Supported chipsets: RT2570. */ #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include "rt2x00.h" #include "rt2x00usb.h" #include "rt2500usb.h" /* * Allow hardware encryption to be disabled. */ static bool modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); /* * Register access. * All access to the CSR registers will go through the methods * rt2500usb_register_read and rt2500usb_register_write. * BBP and RF register require indirect register access, * and use the CSR registers BBPCSR and RFCSR to achieve this. * These indirect registers work with busy bits, * and we will try maximal REGISTER_BUSY_COUNT times to access * the register while taking a REGISTER_BUSY_DELAY us delay * between each attampt. When the busy bit is still set at that time, * the access attempt is considered to have failed, * and we will print an error. * If the csr_mutex is already held then the _lock variants must * be used instead. */ static inline void rt2500usb_register_read(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u16 *value) { __le16 reg; rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, USB_VENDOR_REQUEST_IN, offset, &reg, sizeof(reg), REGISTER_TIMEOUT); *value = le16_to_cpu(reg); } static inline void rt2500usb_register_read_lock(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u16 *value) { __le16 reg; rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_READ, USB_VENDOR_REQUEST_IN, offset, &reg, sizeof(reg), REGISTER_TIMEOUT); *value = le16_to_cpu(reg); } static inline void rt2500usb_register_multiread(struct rt2x00_dev *rt2x00dev, const unsigned int offset, void *value, const u16 length) { rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, USB_VENDOR_REQUEST_IN, offset, value, length, REGISTER_TIMEOUT16(length)); } static inline void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u16 value) { __le16 reg = cpu_to_le16(value); rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, USB_VENDOR_REQUEST_OUT, offset, &reg, sizeof(reg), REGISTER_TIMEOUT); } static inline void rt2500usb_register_write_lock(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u16 value) { __le16 reg = cpu_to_le16(value); rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_WRITE, USB_VENDOR_REQUEST_OUT, offset, &reg, sizeof(reg), REGISTER_TIMEOUT); } static inline void rt2500usb_register_multiwrite(struct rt2x00_dev *rt2x00dev, const unsigned int offset, void *value, const u16 length) { rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, USB_VENDOR_REQUEST_OUT, offset, value, length, REGISTER_TIMEOUT16(length)); } static int rt2500usb_regbusy_read(struct rt2x00_dev *rt2x00dev, const unsigned int offset, struct rt2x00_field16 field, u16 *reg) { unsigned int i; for (i = 0; i < REGISTER_BUSY_COUNT; i++) { rt2500usb_register_read_lock(rt2x00dev, offset, reg); if (!rt2x00_get_field16(*reg, field)) return 1; udelay(REGISTER_BUSY_DELAY); } ERROR(rt2x00dev, "Indirect register access failed: " "offset=0x%.08x, value=0x%.08x\n", offset, *reg); *reg = ~0; return 0; } #define WAIT_FOR_BBP(__dev, __reg) \ rt2500usb_regbusy_read((__dev), PHY_CSR8, PHY_CSR8_BUSY, (__reg)) #define WAIT_FOR_RF(__dev, __reg) \ rt2500usb_regbusy_read((__dev), PHY_CSR10, PHY_CSR10_RF_BUSY, (__reg)) static void rt2500usb_bbp_write(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u8 value) { u16 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the BBP becomes available, afterwards we * can safely write the new data into the register. */ if (WAIT_FOR_BBP(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field16(&reg, PHY_CSR7_DATA, value); rt2x00_set_field16(&reg, PHY_CSR7_REG_ID, word); rt2x00_set_field16(&reg, PHY_CSR7_READ_CONTROL, 0); rt2500usb_register_write_lock(rt2x00dev, PHY_CSR7, reg); } mutex_unlock(&rt2x00dev->csr_mutex); } static void rt2500usb_bbp_read(struct rt2x00_dev *rt2x00dev, const unsigned int word, u8 *value) { u16 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the BBP becomes available, afterwards we * can safely write the read request into the register. * After the data has been written, we wait until hardware * returns the correct value, if at any time the register * doesn't become available in time, reg will be 0xffffffff * which means we return 0xff to the caller. */ if (WAIT_FOR_BBP(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field16(&reg, PHY_CSR7_REG_ID, word); rt2x00_set_field16(&reg, PHY_CSR7_READ_CONTROL, 1); rt2500usb_register_write_lock(rt2x00dev, PHY_CSR7, reg); if (WAIT_FOR_BBP(rt2x00dev, &reg)) rt2500usb_register_read_lock(rt2x00dev, PHY_CSR7, &reg); } *value = rt2x00_get_field16(reg, PHY_CSR7_DATA); mutex_unlock(&rt2x00dev->csr_mutex); } static void rt2500usb_rf_write(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u32 value) { u16 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the RF becomes available, afterwards we * can safely write the new data into the register. */ if (WAIT_FOR_RF(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field16(&reg, PHY_CSR9_RF_VALUE, value); rt2500usb_register_write_lock(rt2x00dev, PHY_CSR9, reg); reg = 0; rt2x00_set_field16(&reg, PHY_CSR10_RF_VALUE, value >> 16); rt2x00_set_field16(&reg, PHY_CSR10_RF_NUMBER_OF_BITS, 20); rt2x00_set_field16(&reg, PHY_CSR10_RF_IF_SELECT, 0); rt2x00_set_field16(&reg, PHY_CSR10_RF_BUSY, 1); rt2500usb_register_write_lock(rt2x00dev, PHY_CSR10, reg); rt2x00_rf_write(rt2x00dev, word, value); } mutex_unlock(&rt2x00dev->csr_mutex); } #ifdef CONFIG_RT2X00_LIB_DEBUGFS static void _rt2500usb_register_read(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u32 *value) { rt2500usb_register_read(rt2x00dev, offset, (u16 *)value); } static void _rt2500usb_register_write(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u32 value) { rt2500usb_register_write(rt2x00dev, offset, value); } static const struct rt2x00debug rt2500usb_rt2x00debug = { .owner = THIS_MODULE, .csr = { .read = _rt2500usb_register_read, .write = _rt2500usb_register_write, .flags = RT2X00DEBUGFS_OFFSET, .word_base = CSR_REG_BASE, .word_size = sizeof(u16), .word_count = CSR_REG_SIZE / sizeof(u16), }, .eeprom = { .read = rt2x00_eeprom_read, .write = rt2x00_eeprom_write, .word_base = EEPROM_BASE, .word_size = sizeof(u16), .word_count = EEPROM_SIZE / sizeof(u16), }, .bbp = { .read = rt2500usb_bbp_read, .write = rt2500usb_bbp_write, .word_base = BBP_BASE, .word_size = sizeof(u8), .word_count = BBP_SIZE / sizeof(u8), }, .rf = { .read = rt2x00_rf_read, .write = rt2500usb_rf_write, .word_base = RF_BASE, .word_size = sizeof(u32), .word_count = RF_SIZE / sizeof(u32), }, }; #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ static int rt2500usb_rfkill_poll(struct rt2x00_dev *rt2x00dev) { u16 reg; rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg); return rt2x00_get_field16(reg, MAC_CSR19_BIT7); } #ifdef CONFIG_RT2X00_LIB_LEDS static void rt2500usb_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct rt2x00_led *led = container_of(led_cdev, struct rt2x00_led, led_dev); unsigned int enabled = brightness != LED_OFF; u16 reg; rt2500usb_register_read(led->rt2x00dev, MAC_CSR20, &reg); if (led->type == LED_TYPE_RADIO || led->type == LED_TYPE_ASSOC) rt2x00_set_field16(&reg, MAC_CSR20_LINK, enabled); else if (led->type == LED_TYPE_ACTIVITY) rt2x00_set_field16(&reg, MAC_CSR20_ACTIVITY, enabled); rt2500usb_register_write(led->rt2x00dev, MAC_CSR20, reg); } static int rt2500usb_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { struct rt2x00_led *led = container_of(led_cdev, struct rt2x00_led, led_dev); u16 reg; rt2500usb_register_read(led->rt2x00dev, MAC_CSR21, &reg); rt2x00_set_field16(&reg, MAC_CSR21_ON_PERIOD, *delay_on); rt2x00_set_field16(&reg, MAC_CSR21_OFF_PERIOD, *delay_off); rt2500usb_register_write(led->rt2x00dev, MAC_CSR21, reg); return 0; } static void rt2500usb_init_led(struct rt2x00_dev *rt2x00dev, struct rt2x00_led *led, enum led_type type) { led->rt2x00dev = rt2x00dev; led->type = type; led->led_dev.brightness_set = rt2500usb_brightness_set; led->led_dev.blink_set = rt2500usb_blink_set; led->flags = LED_INITIALIZED; } #endif /* CONFIG_RT2X00_LIB_LEDS */ /* * Configuration handlers. */ /* * rt2500usb does not differentiate between shared and pairwise * keys, so we should use the same function for both key types. */ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_crypto *crypto, struct ieee80211_key_conf *key) { u32 mask; u16 reg; enum cipher curr_cipher; if (crypto->cmd == SET_KEY) { /* * Disallow to set WEP key other than with index 0, * it is known that not work at least on some hardware. * SW crypto will be used in that case. */ if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || key->cipher == WLAN_CIPHER_SUITE_WEP104) && key->keyidx != 0) return -EOPNOTSUPP; /* * Pairwise key will always be entry 0, but this * could collide with a shared key on the same * position... */ mask = TXRX_CSR0_KEY_ID.bit_mask; rt2500usb_register_read(rt2x00dev, TXRX_CSR0, &reg); curr_cipher = rt2x00_get_field16(reg, TXRX_CSR0_ALGORITHM); reg &= mask; if (reg && reg == mask) return -ENOSPC; reg = rt2x00_get_field16(reg, TXRX_CSR0_KEY_ID); key->hw_key_idx += reg ? ffz(reg) : 0; /* * Hardware requires that all keys use the same cipher * (e.g. TKIP-only, AES-only, but not TKIP+AES). * If this is not the first key, compare the cipher with the * first one and fall back to SW crypto if not the same. */ if (key->hw_key_idx > 0 && crypto->cipher != curr_cipher) return -EOPNOTSUPP; rt2500usb_register_multiwrite(rt2x00dev, KEY_ENTRY(key->hw_key_idx), crypto->key, sizeof(crypto->key)); /* * The driver does not support the IV/EIV generation * in hardware. However it demands the data to be provided * both separately as well as inside the frame. * We already provided the CONFIG_CRYPTO_COPY_IV to rt2x00lib * to ensure rt2x00lib will not strip the data from the * frame after the copy, now we must tell mac80211 * to generate the IV/EIV data. */ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; } /* * TXRX_CSR0_KEY_ID contains only single-bit fields to indicate * a particular key is valid. */ rt2500usb_register_read(rt2x00dev, TXRX_CSR0, &reg); rt2x00_set_field16(&reg, TXRX_CSR0_ALGORITHM, crypto->cipher); rt2x00_set_field16(&reg, TXRX_CSR0_IV_OFFSET, IEEE80211_HEADER); mask = rt2x00_get_field16(reg, TXRX_CSR0_KEY_ID); if (crypto->cmd == SET_KEY) mask |= 1 << key->hw_key_idx; else if (crypto->cmd == DISABLE_KEY) mask &= ~(1 << key->hw_key_idx); rt2x00_set_field16(&reg, TXRX_CSR0_KEY_ID, mask); rt2500usb_register_write(rt2x00dev, TXRX_CSR0, reg); return 0; } static void rt2500usb_config_filter(struct rt2x00_dev *rt2x00dev, const unsigned int filter_flags) { u16 reg; /* * Start configuration steps. * Note that the version error will always be dropped * and broadcast frames will always be accepted since * there is no filter for it at this time. */ rt2500usb_register_read(rt2x00dev, TXRX_CSR2, &reg); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_CRC, !(filter_flags & FIF_FCSFAIL)); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_PHYSICAL, !(filter_flags & FIF_PLCPFAIL)); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_CONTROL, !(filter_flags & FIF_CONTROL)); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_NOT_TO_ME, !(filter_flags & FIF_PROMISC_IN_BSS)); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_TODS, !(filter_flags & FIF_PROMISC_IN_BSS) && !rt2x00dev->intf_ap_count); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_VERSION_ERROR, 1); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_MULTICAST, !(filter_flags & FIF_ALLMULTI)); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_BROADCAST, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); } static void rt2500usb_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, struct rt2x00intf_conf *conf, const unsigned int flags) { unsigned int bcn_preload; u16 reg; if (flags & CONFIG_UPDATE_TYPE) { /* * Enable beacon config */ bcn_preload = PREAMBLE + GET_DURATION(IEEE80211_HEADER, 20); rt2500usb_register_read(rt2x00dev, TXRX_CSR20, &reg); rt2x00_set_field16(&reg, TXRX_CSR20_OFFSET, bcn_preload >> 6); rt2x00_set_field16(&reg, TXRX_CSR20_BCN_EXPECT_WINDOW, 2 * (conf->type != NL80211_IFTYPE_STATION)); rt2500usb_register_write(rt2x00dev, TXRX_CSR20, reg); /* * Enable synchronisation. */ rt2500usb_register_read(rt2x00dev, TXRX_CSR18, &reg); rt2x00_set_field16(&reg, TXRX_CSR18_OFFSET, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg); rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg); rt2x00_set_field16(&reg, TXRX_CSR19_TSF_SYNC, conf->sync); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); } if (flags & CONFIG_UPDATE_MAC) rt2500usb_register_multiwrite(rt2x00dev, MAC_CSR2, conf->mac, (3 * sizeof(__le16))); if (flags & CONFIG_UPDATE_BSSID) rt2500usb_register_multiwrite(rt2x00dev, MAC_CSR5, conf->bssid, (3 * sizeof(__le16))); } static void rt2500usb_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp, u32 changed) { u16 reg; if (changed & BSS_CHANGED_ERP_PREAMBLE) { rt2500usb_register_read(rt2x00dev, TXRX_CSR10, &reg); rt2x00_set_field16(&reg, TXRX_CSR10_AUTORESPOND_PREAMBLE, !!erp->short_preamble); rt2500usb_register_write(rt2x00dev, TXRX_CSR10, reg); } if (changed & BSS_CHANGED_BASIC_RATES) rt2500usb_register_write(rt2x00dev, TXRX_CSR11, erp->basic_rates); if (changed & BSS_CHANGED_BEACON_INT) { rt2500usb_register_read(rt2x00dev, TXRX_CSR18, &reg); rt2x00_set_field16(&reg, TXRX_CSR18_INTERVAL, erp->beacon_int * 4); rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg); } if (changed & BSS_CHANGED_ERP_SLOT) { rt2500usb_register_write(rt2x00dev, MAC_CSR10, erp->slot_time); rt2500usb_register_write(rt2x00dev, MAC_CSR11, erp->sifs); rt2500usb_register_write(rt2x00dev, MAC_CSR12, erp->eifs); } } static void rt2500usb_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) { u8 r2; u8 r14; u16 csr5; u16 csr6; /* * We should never come here because rt2x00lib is supposed * to catch this and send us the correct antenna explicitely. */ BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY || ant->tx == ANTENNA_SW_DIVERSITY); rt2500usb_bbp_read(rt2x00dev, 2, &r2); rt2500usb_bbp_read(rt2x00dev, 14, &r14); rt2500usb_register_read(rt2x00dev, PHY_CSR5, &csr5); rt2500usb_register_read(rt2x00dev, PHY_CSR6, &csr6); /* * Configure the TX antenna. */ switch (ant->tx) { case ANTENNA_HW_DIVERSITY: rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 1); rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 1); rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 1); break; case ANTENNA_A: rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 0); rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 0); rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 0); break; case ANTENNA_B: default: rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 2); rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 2); rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 2); break; } /* * Configure the RX antenna. */ switch (ant->rx) { case ANTENNA_HW_DIVERSITY: rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 1); break; case ANTENNA_A: rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 0); break; case ANTENNA_B: default: rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 2); break; } /* * RT2525E and RT5222 need to flip TX I/Q */ if (rt2x00_rf(rt2x00dev, RF2525E) || rt2x00_rf(rt2x00dev, RF5222)) { rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1); rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 1); rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 1); /* * RT2525E does not need RX I/Q Flip. */ if (rt2x00_rf(rt2x00dev, RF2525E)) rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0); } else { rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 0); rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 0); } rt2500usb_bbp_write(rt2x00dev, 2, r2); rt2500usb_bbp_write(rt2x00dev, 14, r14); rt2500usb_register_write(rt2x00dev, PHY_CSR5, csr5); rt2500usb_register_write(rt2x00dev, PHY_CSR6, csr6); } static void rt2500usb_config_channel(struct rt2x00_dev *rt2x00dev, struct rf_channel *rf, const int txpower) { /* * Set TXpower. */ rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); /* * For RT2525E we should first set the channel to half band higher. */ if (rt2x00_rf(rt2x00dev, RF2525E)) { static const u32 vals[] = { 0x000008aa, 0x000008ae, 0x000008ae, 0x000008b2, 0x000008b2, 0x000008b6, 0x000008b6, 0x000008ba, 0x000008ba, 0x000008be, 0x000008b7, 0x00000902, 0x00000902, 0x00000906 }; rt2500usb_rf_write(rt2x00dev, 2, vals[rf->channel - 1]); if (rf->rf4) rt2500usb_rf_write(rt2x00dev, 4, rf->rf4); } rt2500usb_rf_write(rt2x00dev, 1, rf->rf1); rt2500usb_rf_write(rt2x00dev, 2, rf->rf2); rt2500usb_rf_write(rt2x00dev, 3, rf->rf3); if (rf->rf4) rt2500usb_rf_write(rt2x00dev, 4, rf->rf4); } static void rt2500usb_config_txpower(struct rt2x00_dev *rt2x00dev, const int txpower) { u32 rf3; rt2x00_rf_read(rt2x00dev, 3, &rf3); rt2x00_set_field32(&rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); rt2500usb_rf_write(rt2x00dev, 3, rf3); } static void rt2500usb_config_ps(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf) { enum dev_state state = (libconf->conf->flags & IEEE80211_CONF_PS) ? STATE_SLEEP : STATE_AWAKE; u16 reg; if (state == STATE_SLEEP) { rt2500usb_register_read(rt2x00dev, MAC_CSR18, &reg); rt2x00_set_field16(&reg, MAC_CSR18_DELAY_AFTER_BEACON, rt2x00dev->beacon_int - 20); rt2x00_set_field16(&reg, MAC_CSR18_BEACONS_BEFORE_WAKEUP, libconf->conf->listen_interval - 1); /* We must first disable autowake before it can be enabled */ rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 0); rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 1); rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); } else { rt2500usb_register_read(rt2x00dev, MAC_CSR18, &reg); rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 0); rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); } rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); } static void rt2500usb_config(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf, const unsigned int flags) { if (flags & IEEE80211_CONF_CHANGE_CHANNEL) rt2500usb_config_channel(rt2x00dev, &libconf->rf, libconf->conf->power_level); if ((flags & IEEE80211_CONF_CHANGE_POWER) && !(flags & IEEE80211_CONF_CHANGE_CHANNEL)) rt2500usb_config_txpower(rt2x00dev, libconf->conf->power_level); if (flags & IEEE80211_CONF_CHANGE_PS) rt2500usb_config_ps(rt2x00dev, libconf); } /* * Link tuning */ static void rt2500usb_link_stats(struct rt2x00_dev *rt2x00dev, struct link_qual *qual) { u16 reg; /* * Update FCS error count from register. */ rt2500usb_register_read(rt2x00dev, STA_CSR0, &reg); qual->rx_failed = rt2x00_get_field16(reg, STA_CSR0_FCS_ERROR); /* * Update False CCA count from register. */ rt2500usb_register_read(rt2x00dev, STA_CSR3, &reg); qual->false_cca = rt2x00_get_field16(reg, STA_CSR3_FALSE_CCA_ERROR); } static void rt2500usb_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual) { u16 eeprom; u16 value; rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24, &eeprom); value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R24_LOW); rt2500usb_bbp_write(rt2x00dev, 24, value); rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25, &eeprom); value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R25_LOW); rt2500usb_bbp_write(rt2x00dev, 25, value); rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61, &eeprom); value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R61_LOW); rt2500usb_bbp_write(rt2x00dev, 61, value); rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC, &eeprom); value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_VGCUPPER); rt2500usb_bbp_write(rt2x00dev, 17, value); qual->vgc_level = value; } /* * Queue handlers. */ static void rt2500usb_start_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u16 reg; switch (queue->qid) { case QID_RX: rt2500usb_register_read(rt2x00dev, TXRX_CSR2, &reg); rt2x00_set_field16(&reg, TXRX_CSR2_DISABLE_RX, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); break; case QID_BEACON: rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg); rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1); rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1); rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); break; default: break; } } static void rt2500usb_stop_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u16 reg; switch (queue->qid) { case QID_RX: rt2500usb_register_read(rt2x00dev, TXRX_CSR2, &reg); rt2x00_set_field16(&reg, TXRX_CSR2_DISABLE_RX, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); break; case QID_BEACON: rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg); rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 0); rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 0); rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); break; default: break; } } /* * Initialization functions. */ static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev) { u16 reg; rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0x0001, USB_MODE_TEST, REGISTER_TIMEOUT); rt2x00usb_vendor_request_sw(rt2x00dev, USB_SINGLE_WRITE, 0x0308, 0x00f0, REGISTER_TIMEOUT); rt2500usb_register_read(rt2x00dev, TXRX_CSR2, &reg); rt2x00_set_field16(&reg, TXRX_CSR2_DISABLE_RX, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); rt2500usb_register_write(rt2x00dev, MAC_CSR13, 0x1111); rt2500usb_register_write(rt2x00dev, MAC_CSR14, 0x1e11); rt2500usb_register_read(rt2x00dev, MAC_CSR1, &reg); rt2x00_set_field16(&reg, MAC_CSR1_SOFT_RESET, 1); rt2x00_set_field16(&reg, MAC_CSR1_BBP_RESET, 1); rt2x00_set_field16(&reg, MAC_CSR1_HOST_READY, 0); rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); rt2500usb_register_read(rt2x00dev, MAC_CSR1, &reg); rt2x00_set_field16(&reg, MAC_CSR1_SOFT_RESET, 0); rt2x00_set_field16(&reg, MAC_CSR1_BBP_RESET, 0); rt2x00_set_field16(&reg, MAC_CSR1_HOST_READY, 0); rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); rt2500usb_register_read(rt2x00dev, TXRX_CSR5, &reg); rt2x00_set_field16(&reg, TXRX_CSR5_BBP_ID0, 13); rt2x00_set_field16(&reg, TXRX_CSR5_BBP_ID0_VALID, 1); rt2x00_set_field16(&reg, TXRX_CSR5_BBP_ID1, 12); rt2x00_set_field16(&reg, TXRX_CSR5_BBP_ID1_VALID, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR5, reg); rt2500usb_register_read(rt2x00dev, TXRX_CSR6, &reg); rt2x00_set_field16(&reg, TXRX_CSR6_BBP_ID0, 10); rt2x00_set_field16(&reg, TXRX_CSR6_BBP_ID0_VALID, 1); rt2x00_set_field16(&reg, TXRX_CSR6_BBP_ID1, 11); rt2x00_set_field16(&reg, TXRX_CSR6_BBP_ID1_VALID, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR6, reg); rt2500usb_register_read(rt2x00dev, TXRX_CSR7, &reg); rt2x00_set_field16(&reg, TXRX_CSR7_BBP_ID0, 7); rt2x00_set_field16(&reg, TXRX_CSR7_BBP_ID0_VALID, 1); rt2x00_set_field16(&reg, TXRX_CSR7_BBP_ID1, 6); rt2x00_set_field16(&reg, TXRX_CSR7_BBP_ID1_VALID, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR7, reg); rt2500usb_register_read(rt2x00dev, TXRX_CSR8, &reg); rt2x00_set_field16(&reg, TXRX_CSR8_BBP_ID0, 5); rt2x00_set_field16(&reg, TXRX_CSR8_BBP_ID0_VALID, 1); rt2x00_set_field16(&reg, TXRX_CSR8_BBP_ID1, 0); rt2x00_set_field16(&reg, TXRX_CSR8_BBP_ID1_VALID, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR8, reg); rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg); rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 0); rt2x00_set_field16(&reg, TXRX_CSR19_TSF_SYNC, 0); rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 0); rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); rt2500usb_register_write(rt2x00dev, TXRX_CSR21, 0xe78f); rt2500usb_register_write(rt2x00dev, MAC_CSR9, 0xff1d); if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) return -EBUSY; rt2500usb_register_read(rt2x00dev, MAC_CSR1, &reg); rt2x00_set_field16(&reg, MAC_CSR1_SOFT_RESET, 0); rt2x00_set_field16(&reg, MAC_CSR1_BBP_RESET, 0); rt2x00_set_field16(&reg, MAC_CSR1_HOST_READY, 1); rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); if (rt2x00_rev(rt2x00dev) >= RT2570_VERSION_C) { rt2500usb_register_read(rt2x00dev, PHY_CSR2, &reg); rt2x00_set_field16(&reg, PHY_CSR2_LNA, 0); } else { reg = 0; rt2x00_set_field16(&reg, PHY_CSR2_LNA, 1); rt2x00_set_field16(&reg, PHY_CSR2_LNA_MODE, 3); } rt2500usb_register_write(rt2x00dev, PHY_CSR2, reg); rt2500usb_register_write(rt2x00dev, MAC_CSR11, 0x0002); rt2500usb_register_write(rt2x00dev, MAC_CSR22, 0x0053); rt2500usb_register_write(rt2x00dev, MAC_CSR15, 0x01ee); rt2500usb_register_write(rt2x00dev, MAC_CSR16, 0x0000); rt2500usb_register_read(rt2x00dev, MAC_CSR8, &reg); rt2x00_set_field16(&reg, MAC_CSR8_MAX_FRAME_UNIT, rt2x00dev->rx->data_size); rt2500usb_register_write(rt2x00dev, MAC_CSR8, reg); rt2500usb_register_read(rt2x00dev, TXRX_CSR0, &reg); rt2x00_set_field16(&reg, TXRX_CSR0_ALGORITHM, CIPHER_NONE); rt2x00_set_field16(&reg, TXRX_CSR0_IV_OFFSET, IEEE80211_HEADER); rt2x00_set_field16(&reg, TXRX_CSR0_KEY_ID, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR0, reg); rt2500usb_register_read(rt2x00dev, MAC_CSR18, &reg); rt2x00_set_field16(&reg, MAC_CSR18_DELAY_AFTER_BEACON, 90); rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); rt2500usb_register_read(rt2x00dev, PHY_CSR4, &reg); rt2x00_set_field16(&reg, PHY_CSR4_LOW_RF_LE, 1); rt2500usb_register_write(rt2x00dev, PHY_CSR4, reg); rt2500usb_register_read(rt2x00dev, TXRX_CSR1, &reg); rt2x00_set_field16(&reg, TXRX_CSR1_AUTO_SEQUENCE, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR1, reg); return 0; } static int rt2500usb_wait_bbp_ready(struct rt2x00_dev *rt2x00dev) { unsigned int i; u8 value; for (i = 0; i < REGISTER_BUSY_COUNT; i++) { rt2500usb_bbp_read(rt2x00dev, 0, &value); if ((value != 0xff) && (value != 0x00)) return 0; udelay(REGISTER_BUSY_DELAY); } ERROR(rt2x00dev, "BBP register access failed, aborting.\n"); return -EACCES; } static int rt2500usb_init_bbp(struct rt2x00_dev *rt2x00dev) { unsigned int i; u16 eeprom; u8 value; u8 reg_id; if (unlikely(rt2500usb_wait_bbp_ready(rt2x00dev))) return -EACCES; rt2500usb_bbp_write(rt2x00dev, 3, 0x02); rt2500usb_bbp_write(rt2x00dev, 4, 0x19); rt2500usb_bbp_write(rt2x00dev, 14, 0x1c); rt2500usb_bbp_write(rt2x00dev, 15, 0x30); rt2500usb_bbp_write(rt2x00dev, 16, 0xac); rt2500usb_bbp_write(rt2x00dev, 18, 0x18); rt2500usb_bbp_write(rt2x00dev, 19, 0xff); rt2500usb_bbp_write(rt2x00dev, 20, 0x1e); rt2500usb_bbp_write(rt2x00dev, 21, 0x08); rt2500usb_bbp_write(rt2x00dev, 22, 0x08); rt2500usb_bbp_write(rt2x00dev, 23, 0x08); rt2500usb_bbp_write(rt2x00dev, 24, 0x80); rt2500usb_bbp_write(rt2x00dev, 25, 0x50); rt2500usb_bbp_write(rt2x00dev, 26, 0x08); rt2500usb_bbp_write(rt2x00dev, 27, 0x23); rt2500usb_bbp_write(rt2x00dev, 30, 0x10); rt2500usb_bbp_write(rt2x00dev, 31, 0x2b); rt2500usb_bbp_write(rt2x00dev, 32, 0xb9); rt2500usb_bbp_write(rt2x00dev, 34, 0x12); rt2500usb_bbp_write(rt2x00dev, 35, 0x50); rt2500usb_bbp_write(rt2x00dev, 39, 0xc4); rt2500usb_bbp_write(rt2x00dev, 40, 0x02); rt2500usb_bbp_write(rt2x00dev, 41, 0x60); rt2500usb_bbp_write(rt2x00dev, 53, 0x10); rt2500usb_bbp_write(rt2x00dev, 54, 0x18); rt2500usb_bbp_write(rt2x00dev, 56, 0x08); rt2500usb_bbp_write(rt2x00dev, 57, 0x10); rt2500usb_bbp_write(rt2x00dev, 58, 0x08); rt2500usb_bbp_write(rt2x00dev, 61, 0x60); rt2500usb_bbp_write(rt2x00dev, 62, 0x10); rt2500usb_bbp_write(rt2x00dev, 75, 0xff); for (i = 0; i < EEPROM_BBP_SIZE; i++) { rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); if (eeprom != 0xffff && eeprom != 0x0000) { reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); rt2500usb_bbp_write(rt2x00dev, reg_id, value); } } return 0; } /* * Device state switch handlers. */ static int rt2500usb_enable_radio(struct rt2x00_dev *rt2x00dev) { /* * Initialize all registers. */ if (unlikely(rt2500usb_init_registers(rt2x00dev) || rt2500usb_init_bbp(rt2x00dev))) return -EIO; return 0; } static void rt2500usb_disable_radio(struct rt2x00_dev *rt2x00dev) { rt2500usb_register_write(rt2x00dev, MAC_CSR13, 0x2121); rt2500usb_register_write(rt2x00dev, MAC_CSR14, 0x2121); /* * Disable synchronisation. */ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, 0); rt2x00usb_disable_radio(rt2x00dev); } static int rt2500usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { u16 reg; u16 reg2; unsigned int i; char put_to_sleep; char bbp_state; char rf_state; put_to_sleep = (state != STATE_AWAKE); reg = 0; rt2x00_set_field16(&reg, MAC_CSR17_BBP_DESIRE_STATE, state); rt2x00_set_field16(&reg, MAC_CSR17_RF_DESIRE_STATE, state); rt2x00_set_field16(&reg, MAC_CSR17_PUT_TO_SLEEP, put_to_sleep); rt2500usb_register_write(rt2x00dev, MAC_CSR17, reg); rt2x00_set_field16(&reg, MAC_CSR17_SET_STATE, 1); rt2500usb_register_write(rt2x00dev, MAC_CSR17, reg); /* * Device is not guaranteed to be in the requested state yet. * We must wait until the register indicates that the * device has entered the correct state. */ for (i = 0; i < REGISTER_BUSY_COUNT; i++) { rt2500usb_register_read(rt2x00dev, MAC_CSR17, &reg2); bbp_state = rt2x00_get_field16(reg2, MAC_CSR17_BBP_CURR_STATE); rf_state = rt2x00_get_field16(reg2, MAC_CSR17_RF_CURR_STATE); if (bbp_state == state && rf_state == state) return 0; rt2500usb_register_write(rt2x00dev, MAC_CSR17, reg); msleep(30); } return -EBUSY; } static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { int retval = 0; switch (state) { case STATE_RADIO_ON: retval = rt2500usb_enable_radio(rt2x00dev); break; case STATE_RADIO_OFF: rt2500usb_disable_radio(rt2x00dev); break; case STATE_RADIO_IRQ_ON: case STATE_RADIO_IRQ_OFF: /* No support, but no error either */ break; case STATE_DEEP_SLEEP: case STATE_SLEEP: case STATE_STANDBY: case STATE_AWAKE: retval = rt2500usb_set_state(rt2x00dev, state); break; default: retval = -ENOTSUPP; break; } if (unlikely(retval)) ERROR(rt2x00dev, "Device failed to enter state %d (%d).\n", state, retval); return retval; } /* * TX descriptor initialization */ static void rt2500usb_write_tx_desc(struct queue_entry *entry, struct txentry_desc *txdesc) { struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); __le32 *txd = (__le32 *) entry->skb->data; u32 word; /* * Start writing the descriptor words. */ rt2x00_desc_read(txd, 0, &word); rt2x00_set_field32(&word, TXD_W0_RETRY_LIMIT, txdesc->retry_limit); rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_ACK, test_bit(ENTRY_TXD_ACK, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_OFDM, (txdesc->rate_mode == RATE_MODE_OFDM)); rt2x00_set_field32(&word, TXD_W0_NEW_SEQ, test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs); rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length); rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher); rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx); rt2x00_desc_write(txd, 0, word); rt2x00_desc_read(txd, 1, &word); rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset); rt2x00_set_field32(&word, TXD_W1_AIFS, entry->queue->aifs); rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min); rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max); rt2x00_desc_write(txd, 1, word); rt2x00_desc_read(txd, 2, &word); rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal); rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service); rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->u.plcp.length_low); rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->u.plcp.length_high); rt2x00_desc_write(txd, 2, word); if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) { _rt2x00_desc_write(txd, 3, skbdesc->iv[0]); _rt2x00_desc_write(txd, 4, skbdesc->iv[1]); } /* * Register descriptor details in skb frame descriptor. */ skbdesc->flags |= SKBDESC_DESC_IN_SKB; skbdesc->desc = txd; skbdesc->desc_len = TXD_DESC_SIZE; } /* * TX data initialization */ static void rt2500usb_beacondone(struct urb *urb); static void rt2500usb_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data; int pipe = usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint); int length; u16 reg, reg0; /* * Disable beaconing while we are reloading the beacon data, * otherwise we might be sending out invalid data. */ rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg); rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); /* * Add space for the descriptor in front of the skb. */ skb_push(entry->skb, TXD_DESC_SIZE); memset(entry->skb->data, 0, TXD_DESC_SIZE); /* * Write the TX descriptor for the beacon. */ rt2500usb_write_tx_desc(entry, txdesc); /* * Dump beacon to userspace through debugfs. */ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb); /* * USB devices cannot blindly pass the skb->len as the * length of the data to usb_fill_bulk_urb. Pass the skb * to the driver to determine what the length should be. */ length = rt2x00dev->ops->lib->get_tx_data_len(entry); usb_fill_bulk_urb(bcn_priv->urb, usb_dev, pipe, entry->skb->data, length, rt2500usb_beacondone, entry); /* * Second we need to create the guardian byte. * We only need a single byte, so lets recycle * the 'flags' field we are not using for beacons. */ bcn_priv->guardian_data = 0; usb_fill_bulk_urb(bcn_priv->guardian_urb, usb_dev, pipe, &bcn_priv->guardian_data, 1, rt2500usb_beacondone, entry); /* * Send out the guardian byte. */ usb_submit_urb(bcn_priv->guardian_urb, GFP_ATOMIC); /* * Enable beaconing again. */ rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1); rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1); reg0 = reg; rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 1); /* * Beacon generation will fail initially. * To prevent this we need to change the TXRX_CSR19 * register several times (reg0 is the same as reg * except for TXRX_CSR19_BEACON_GEN, which is 0 in reg0 * and 1 in reg). */ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); } static int rt2500usb_get_tx_data_len(struct queue_entry *entry) { int length; /* * The length _must_ be a multiple of 2, * but it must _not_ be a multiple of the USB packet size. */ length = roundup(entry->skb->len, 2); length += (2 * !(length % entry->queue->usb_maxpacket)); return length; } /* * RX control handlers */ static void rt2500usb_fill_rxdone(struct queue_entry *entry, struct rxdone_entry_desc *rxdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct queue_entry_priv_usb *entry_priv = entry->priv_data; struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); __le32 *rxd = (__le32 *)(entry->skb->data + (entry_priv->urb->actual_length - entry->queue->desc_size)); u32 word0; u32 word1; /* * Copy descriptor to the skbdesc->desc buffer, making it safe from moving of * frame data in rt2x00usb. */ memcpy(skbdesc->desc, rxd, skbdesc->desc_len); rxd = (__le32 *)skbdesc->desc; /* * It is now safe to read the descriptor on all architectures. */ rt2x00_desc_read(rxd, 0, &word0); rt2x00_desc_read(rxd, 1, &word1); if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) rxdesc->flags |= RX_FLAG_FAILED_PLCP_CRC; rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER); if (rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR)) rxdesc->cipher_status = RX_CRYPTO_FAIL_KEY; if (rxdesc->cipher != CIPHER_NONE) { _rt2x00_desc_read(rxd, 2, &rxdesc->iv[0]); _rt2x00_desc_read(rxd, 3, &rxdesc->iv[1]); rxdesc->dev_flags |= RXDONE_CRYPTO_IV; /* ICV is located at the end of frame */ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) rxdesc->flags |= RX_FLAG_DECRYPTED; else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) rxdesc->flags |= RX_FLAG_MMIC_ERROR; } /* * Obtain the status about this packet. * When frame was received with an OFDM bitrate, * the signal is the PLCP value. If it was received with * a CCK bitrate the signal is the rate in 100kbit/s. */ rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL); rxdesc->rssi = rt2x00_get_field32(word1, RXD_W1_RSSI) - rt2x00dev->rssi_offset; rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); if (rt2x00_get_field32(word0, RXD_W0_OFDM)) rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; else rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE; if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) rxdesc->dev_flags |= RXDONE_MY_BSS; /* * Adjust the skb memory window to the frame boundaries. */ skb_trim(entry->skb, rxdesc->size); } /* * Interrupt functions. */ static void rt2500usb_beacondone(struct urb *urb) { struct queue_entry *entry = (struct queue_entry *)urb->context; struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data; if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags)) return; /* * Check if this was the guardian beacon, * if that was the case we need to send the real beacon now. * Otherwise we should free the sk_buffer, the device * should be doing the rest of the work now. */ if (bcn_priv->guardian_urb == urb) { usb_submit_urb(bcn_priv->urb, GFP_ATOMIC); } else if (bcn_priv->urb == urb) { dev_kfree_skb(entry->skb); entry->skb = NULL; } } /* * Device probe functions. */ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev) { u16 word; u8 *mac; u8 bbp; rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, EEPROM_SIZE); /* * Start validation of the data that has been read. */ mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); if (!is_valid_ether_addr(mac)) { random_ether_addr(mac); EEPROM(rt2x00dev, "MAC: %pM\n", mac); } rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2); rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT, ANTENNA_SW_DIVERSITY); rt2x00_set_field16(&word, EEPROM_ANTENNA_RX_DEFAULT, ANTENNA_SW_DIVERSITY); rt2x00_set_field16(&word, EEPROM_ANTENNA_LED_MODE, LED_MODE_DEFAULT); rt2x00_set_field16(&word, EEPROM_ANTENNA_DYN_TXAGC, 0); rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0); rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2522); rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); } rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0); rt2x00_set_field16(&word, EEPROM_NIC_DYN_BBP_TUNE, 0); rt2x00_set_field16(&word, EEPROM_NIC_CCK_TX_POWER, 0); rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); EEPROM(rt2x00dev, "NIC: 0x%04x\n", word); } rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &word); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_CALIBRATE_OFFSET_RSSI, DEFAULT_RSSI_OFFSET); rt2x00_eeprom_write(rt2x00dev, EEPROM_CALIBRATE_OFFSET, word); EEPROM(rt2x00dev, "Calibrate offset: 0x%04x\n", word); } rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE, &word); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_THRESHOLD, 45); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE, word); EEPROM(rt2x00dev, "BBPtune: 0x%04x\n", word); } /* * Switch lower vgc bound to current BBP R17 value, * lower the value a bit for better quality. */ rt2500usb_bbp_read(rt2x00dev, 17, &bbp); bbp -= 6; rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC, &word); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCUPPER, 0x40); rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word); EEPROM(rt2x00dev, "BBPtune vgc: 0x%04x\n", word); } else { rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word); } rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R17, &word); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_LOW, 0x48); rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_HIGH, 0x41); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R17, word); EEPROM(rt2x00dev, "BBPtune r17: 0x%04x\n", word); } rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24, &word); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_R24_LOW, 0x40); rt2x00_set_field16(&word, EEPROM_BBPTUNE_R24_HIGH, 0x80); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R24, word); EEPROM(rt2x00dev, "BBPtune r24: 0x%04x\n", word); } rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25, &word); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_R25_LOW, 0x40); rt2x00_set_field16(&word, EEPROM_BBPTUNE_R25_HIGH, 0x50); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R25, word); EEPROM(rt2x00dev, "BBPtune r25: 0x%04x\n", word); } rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61, &word); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_R61_LOW, 0x60); rt2x00_set_field16(&word, EEPROM_BBPTUNE_R61_HIGH, 0x6d); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R61, word); EEPROM(rt2x00dev, "BBPtune r61: 0x%04x\n", word); } return 0; } static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev) { u16 reg; u16 value; u16 eeprom; /* * Read EEPROM word for configuration. */ rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); /* * Identify RF chipset. */ value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); rt2500usb_register_read(rt2x00dev, MAC_CSR0, &reg); rt2x00_set_chip(rt2x00dev, RT2570, value, reg); if (((reg & 0xfff0) != 0) || ((reg & 0x0000000f) == 0)) { ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); return -ENODEV; } if (!rt2x00_rf(rt2x00dev, RF2522) && !rt2x00_rf(rt2x00dev, RF2523) && !rt2x00_rf(rt2x00dev, RF2524) && !rt2x00_rf(rt2x00dev, RF2525) && !rt2x00_rf(rt2x00dev, RF2525E) && !rt2x00_rf(rt2x00dev, RF5222)) { ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); return -ENODEV; } /* * Identify default antenna configuration. */ rt2x00dev->default_ant.tx = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); rt2x00dev->default_ant.rx = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); /* * When the eeprom indicates SW_DIVERSITY use HW_DIVERSITY instead. * I am not 100% sure about this, but the legacy drivers do not * indicate antenna swapping in software is required when * diversity is enabled. */ if (rt2x00dev->default_ant.tx == ANTENNA_SW_DIVERSITY) rt2x00dev->default_ant.tx = ANTENNA_HW_DIVERSITY; if (rt2x00dev->default_ant.rx == ANTENNA_SW_DIVERSITY) rt2x00dev->default_ant.rx = ANTENNA_HW_DIVERSITY; /* * Store led mode, for correct led behaviour. */ #ifdef CONFIG_RT2X00_LIB_LEDS value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); if (value == LED_MODE_TXRX_ACTIVITY || value == LED_MODE_DEFAULT || value == LED_MODE_ASUS) rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_qual, LED_TYPE_ACTIVITY); #endif /* CONFIG_RT2X00_LIB_LEDS */ /* * Detect if this device has an hardware controlled radio. */ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags); /* * Read the RSSI <-> dBm offset information. */ rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &eeprom); rt2x00dev->rssi_offset = rt2x00_get_field16(eeprom, EEPROM_CALIBRATE_OFFSET_RSSI); return 0; } /* * RF value list for RF2522 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2522[] = { { 1, 0x00002050, 0x000c1fda, 0x00000101, 0 }, { 2, 0x00002050, 0x000c1fee, 0x00000101, 0 }, { 3, 0x00002050, 0x000c2002, 0x00000101, 0 }, { 4, 0x00002050, 0x000c2016, 0x00000101, 0 }, { 5, 0x00002050, 0x000c202a, 0x00000101, 0 }, { 6, 0x00002050, 0x000c203e, 0x00000101, 0 }, { 7, 0x00002050, 0x000c2052, 0x00000101, 0 }, { 8, 0x00002050, 0x000c2066, 0x00000101, 0 }, { 9, 0x00002050, 0x000c207a, 0x00000101, 0 }, { 10, 0x00002050, 0x000c208e, 0x00000101, 0 }, { 11, 0x00002050, 0x000c20a2, 0x00000101, 0 }, { 12, 0x00002050, 0x000c20b6, 0x00000101, 0 }, { 13, 0x00002050, 0x000c20ca, 0x00000101, 0 }, { 14, 0x00002050, 0x000c20fa, 0x00000101, 0 }, }; /* * RF value list for RF2523 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2523[] = { { 1, 0x00022010, 0x00000c9e, 0x000e0111, 0x00000a1b }, { 2, 0x00022010, 0x00000ca2, 0x000e0111, 0x00000a1b }, { 3, 0x00022010, 0x00000ca6, 0x000e0111, 0x00000a1b }, { 4, 0x00022010, 0x00000caa, 0x000e0111, 0x00000a1b }, { 5, 0x00022010, 0x00000cae, 0x000e0111, 0x00000a1b }, { 6, 0x00022010, 0x00000cb2, 0x000e0111, 0x00000a1b }, { 7, 0x00022010, 0x00000cb6, 0x000e0111, 0x00000a1b }, { 8, 0x00022010, 0x00000cba, 0x000e0111, 0x00000a1b }, { 9, 0x00022010, 0x00000cbe, 0x000e0111, 0x00000a1b }, { 10, 0x00022010, 0x00000d02, 0x000e0111, 0x00000a1b }, { 11, 0x00022010, 0x00000d06, 0x000e0111, 0x00000a1b }, { 12, 0x00022010, 0x00000d0a, 0x000e0111, 0x00000a1b }, { 13, 0x00022010, 0x00000d0e, 0x000e0111, 0x00000a1b }, { 14, 0x00022010, 0x00000d1a, 0x000e0111, 0x00000a03 }, }; /* * RF value list for RF2524 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2524[] = { { 1, 0x00032020, 0x00000c9e, 0x00000101, 0x00000a1b }, { 2, 0x00032020, 0x00000ca2, 0x00000101, 0x00000a1b }, { 3, 0x00032020, 0x00000ca6, 0x00000101, 0x00000a1b }, { 4, 0x00032020, 0x00000caa, 0x00000101, 0x00000a1b }, { 5, 0x00032020, 0x00000cae, 0x00000101, 0x00000a1b }, { 6, 0x00032020, 0x00000cb2, 0x00000101, 0x00000a1b }, { 7, 0x00032020, 0x00000cb6, 0x00000101, 0x00000a1b }, { 8, 0x00032020, 0x00000cba, 0x00000101, 0x00000a1b }, { 9, 0x00032020, 0x00000cbe, 0x00000101, 0x00000a1b }, { 10, 0x00032020, 0x00000d02, 0x00000101, 0x00000a1b }, { 11, 0x00032020, 0x00000d06, 0x00000101, 0x00000a1b }, { 12, 0x00032020, 0x00000d0a, 0x00000101, 0x00000a1b }, { 13, 0x00032020, 0x00000d0e, 0x00000101, 0x00000a1b }, { 14, 0x00032020, 0x00000d1a, 0x00000101, 0x00000a03 }, }; /* * RF value list for RF2525 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2525[] = { { 1, 0x00022020, 0x00080c9e, 0x00060111, 0x00000a1b }, { 2, 0x00022020, 0x00080ca2, 0x00060111, 0x00000a1b }, { 3, 0x00022020, 0x00080ca6, 0x00060111, 0x00000a1b }, { 4, 0x00022020, 0x00080caa, 0x00060111, 0x00000a1b }, { 5, 0x00022020, 0x00080cae, 0x00060111, 0x00000a1b }, { 6, 0x00022020, 0x00080cb2, 0x00060111, 0x00000a1b }, { 7, 0x00022020, 0x00080cb6, 0x00060111, 0x00000a1b }, { 8, 0x00022020, 0x00080cba, 0x00060111, 0x00000a1b }, { 9, 0x00022020, 0x00080cbe, 0x00060111, 0x00000a1b }, { 10, 0x00022020, 0x00080d02, 0x00060111, 0x00000a1b }, { 11, 0x00022020, 0x00080d06, 0x00060111, 0x00000a1b }, { 12, 0x00022020, 0x00080d0a, 0x00060111, 0x00000a1b }, { 13, 0x00022020, 0x00080d0e, 0x00060111, 0x00000a1b }, { 14, 0x00022020, 0x00080d1a, 0x00060111, 0x00000a03 }, }; /* * RF value list for RF2525e * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2525e[] = { { 1, 0x00022010, 0x0000089a, 0x00060111, 0x00000e1b }, { 2, 0x00022010, 0x0000089e, 0x00060111, 0x00000e07 }, { 3, 0x00022010, 0x0000089e, 0x00060111, 0x00000e1b }, { 4, 0x00022010, 0x000008a2, 0x00060111, 0x00000e07 }, { 5, 0x00022010, 0x000008a2, 0x00060111, 0x00000e1b }, { 6, 0x00022010, 0x000008a6, 0x00060111, 0x00000e07 }, { 7, 0x00022010, 0x000008a6, 0x00060111, 0x00000e1b }, { 8, 0x00022010, 0x000008aa, 0x00060111, 0x00000e07 }, { 9, 0x00022010, 0x000008aa, 0x00060111, 0x00000e1b }, { 10, 0x00022010, 0x000008ae, 0x00060111, 0x00000e07 }, { 11, 0x00022010, 0x000008ae, 0x00060111, 0x00000e1b }, { 12, 0x00022010, 0x000008b2, 0x00060111, 0x00000e07 }, { 13, 0x00022010, 0x000008b2, 0x00060111, 0x00000e1b }, { 14, 0x00022010, 0x000008b6, 0x00060111, 0x00000e23 }, }; /* * RF value list for RF5222 * Supports: 2.4 GHz & 5.2 GHz */ static const struct rf_channel rf_vals_5222[] = { { 1, 0x00022020, 0x00001136, 0x00000101, 0x00000a0b }, { 2, 0x00022020, 0x0000113a, 0x00000101, 0x00000a0b }, { 3, 0x00022020, 0x0000113e, 0x00000101, 0x00000a0b }, { 4, 0x00022020, 0x00001182, 0x00000101, 0x00000a0b }, { 5, 0x00022020, 0x00001186, 0x00000101, 0x00000a0b }, { 6, 0x00022020, 0x0000118a, 0x00000101, 0x00000a0b }, { 7, 0x00022020, 0x0000118e, 0x00000101, 0x00000a0b }, { 8, 0x00022020, 0x00001192, 0x00000101, 0x00000a0b }, { 9, 0x00022020, 0x00001196, 0x00000101, 0x00000a0b }, { 10, 0x00022020, 0x0000119a, 0x00000101, 0x00000a0b }, { 11, 0x00022020, 0x0000119e, 0x00000101, 0x00000a0b }, { 12, 0x00022020, 0x000011a2, 0x00000101, 0x00000a0b }, { 13, 0x00022020, 0x000011a6, 0x00000101, 0x00000a0b }, { 14, 0x00022020, 0x000011ae, 0x00000101, 0x00000a1b }, /* 802.11 UNI / HyperLan 2 */ { 36, 0x00022010, 0x00018896, 0x00000101, 0x00000a1f }, { 40, 0x00022010, 0x0001889a, 0x00000101, 0x00000a1f }, { 44, 0x00022010, 0x0001889e, 0x00000101, 0x00000a1f }, { 48, 0x00022010, 0x000188a2, 0x00000101, 0x00000a1f }, { 52, 0x00022010, 0x000188a6, 0x00000101, 0x00000a1f }, { 66, 0x00022010, 0x000188aa, 0x00000101, 0x00000a1f }, { 60, 0x00022010, 0x000188ae, 0x00000101, 0x00000a1f }, { 64, 0x00022010, 0x000188b2, 0x00000101, 0x00000a1f }, /* 802.11 HyperLan 2 */ { 100, 0x00022010, 0x00008802, 0x00000101, 0x00000a0f }, { 104, 0x00022010, 0x00008806, 0x00000101, 0x00000a0f }, { 108, 0x00022010, 0x0000880a, 0x00000101, 0x00000a0f }, { 112, 0x00022010, 0x0000880e, 0x00000101, 0x00000a0f }, { 116, 0x00022010, 0x00008812, 0x00000101, 0x00000a0f }, { 120, 0x00022010, 0x00008816, 0x00000101, 0x00000a0f }, { 124, 0x00022010, 0x0000881a, 0x00000101, 0x00000a0f }, { 128, 0x00022010, 0x0000881e, 0x00000101, 0x00000a0f }, { 132, 0x00022010, 0x00008822, 0x00000101, 0x00000a0f }, { 136, 0x00022010, 0x00008826, 0x00000101, 0x00000a0f }, /* 802.11 UNII */ { 140, 0x00022010, 0x0000882a, 0x00000101, 0x00000a0f }, { 149, 0x00022020, 0x000090a6, 0x00000101, 0x00000a07 }, { 153, 0x00022020, 0x000090ae, 0x00000101, 0x00000a07 }, { 157, 0x00022020, 0x000090b6, 0x00000101, 0x00000a07 }, { 161, 0x00022020, 0x000090be, 0x00000101, 0x00000a07 }, }; static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) { struct hw_mode_spec *spec = &rt2x00dev->spec; struct channel_info *info; char *tx_power; unsigned int i; /* * Initialize all hw fields. * * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING unless we are * capable of sending the buffered frames out after the DTIM * transmission using rt2x00lib_beacondone. This will send out * multicast and broadcast traffic immediately instead of buffering it * infinitly and thus dropping it after some time. */ rt2x00dev->hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_PS_NULLFUNC_STACK; SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0)); /* * Initialize hw_mode information. */ spec->supported_bands = SUPPORT_BAND_2GHZ; spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; if (rt2x00_rf(rt2x00dev, RF2522)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522); spec->channels = rf_vals_bg_2522; } else if (rt2x00_rf(rt2x00dev, RF2523)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523); spec->channels = rf_vals_bg_2523; } else if (rt2x00_rf(rt2x00dev, RF2524)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524); spec->channels = rf_vals_bg_2524; } else if (rt2x00_rf(rt2x00dev, RF2525)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525); spec->channels = rf_vals_bg_2525; } else if (rt2x00_rf(rt2x00dev, RF2525E)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e); spec->channels = rf_vals_bg_2525e; } else if (rt2x00_rf(rt2x00dev, RF5222)) { spec->supported_bands |= SUPPORT_BAND_5GHZ; spec->num_channels = ARRAY_SIZE(rf_vals_5222); spec->channels = rf_vals_5222; } /* * Create channel information array */ info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; spec->channels_info = info; tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START); for (i = 0; i < 14; i++) { info[i].max_power = MAX_TXPOWER; info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]); } if (spec->num_channels > 14) { for (i = 14; i < spec->num_channels; i++) { info[i].max_power = MAX_TXPOWER; info[i].default_power1 = DEFAULT_TXPOWER; } } return 0; } static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; u16 reg; /* * Allocate eeprom data. */ retval = rt2500usb_validate_eeprom(rt2x00dev); if (retval) return retval; retval = rt2500usb_init_eeprom(rt2x00dev); if (retval) return retval; /* * Enable rfkill polling by setting GPIO direction of the * rfkill switch GPIO pin correctly. */ rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg); rt2x00_set_field16(&reg, MAC_CSR19_BIT8, 0); rt2500usb_register_write(rt2x00dev, MAC_CSR19, reg); /* * Initialize hw specifications. */ retval = rt2500usb_probe_hw_mode(rt2x00dev); if (retval) return retval; /* * This device requires the atim queue */ __set_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags); __set_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags); if (!modparam_nohwcrypt) { __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags); __set_bit(REQUIRE_COPY_IV, &rt2x00dev->cap_flags); } __set_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags); __set_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags); /* * Set the rssi offset. */ rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; return 0; } static const struct ieee80211_ops rt2500usb_mac80211_ops = { .tx = rt2x00mac_tx, .start = rt2x00mac_start, .stop = rt2x00mac_stop, .add_interface = rt2x00mac_add_interface, .remove_interface = rt2x00mac_remove_interface, .config = rt2x00mac_config, .configure_filter = rt2x00mac_configure_filter, .set_tim = rt2x00mac_set_tim, .set_key = rt2x00mac_set_key, .sw_scan_start = rt2x00mac_sw_scan_start, .sw_scan_complete = rt2x00mac_sw_scan_complete, .get_stats = rt2x00mac_get_stats, .bss_info_changed = rt2x00mac_bss_info_changed, .conf_tx = rt2x00mac_conf_tx, .rfkill_poll = rt2x00mac_rfkill_poll, .flush = rt2x00mac_flush, .set_antenna = rt2x00mac_set_antenna, .get_antenna = rt2x00mac_get_antenna, .get_ringparam = rt2x00mac_get_ringparam, .tx_frames_pending = rt2x00mac_tx_frames_pending, }; static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = { .probe_hw = rt2500usb_probe_hw, .initialize = rt2x00usb_initialize, .uninitialize = rt2x00usb_uninitialize, .clear_entry = rt2x00usb_clear_entry, .set_device_state = rt2500usb_set_device_state, .rfkill_poll = rt2500usb_rfkill_poll, .link_stats = rt2500usb_link_stats, .reset_tuner = rt2500usb_reset_tuner, .watchdog = rt2x00usb_watchdog, .start_queue = rt2500usb_start_queue, .kick_queue = rt2x00usb_kick_queue, .stop_queue = rt2500usb_stop_queue, .flush_queue = rt2x00usb_flush_queue, .write_tx_desc = rt2500usb_write_tx_desc, .write_beacon = rt2500usb_write_beacon, .get_tx_data_len = rt2500usb_get_tx_data_len, .fill_rxdone = rt2500usb_fill_rxdone, .config_shared_key = rt2500usb_config_key, .config_pairwise_key = rt2500usb_config_key, .config_filter = rt2500usb_config_filter, .config_intf = rt2500usb_config_intf, .config_erp = rt2500usb_config_erp, .config_ant = rt2500usb_config_ant, .config = rt2500usb_config, }; static const struct data_queue_desc rt2500usb_queue_rx = { .entry_num = 32, .data_size = DATA_FRAME_SIZE, .desc_size = RXD_DESC_SIZE, .priv_size = sizeof(struct queue_entry_priv_usb), }; static const struct data_queue_desc rt2500usb_queue_tx = { .entry_num = 32, .data_size = DATA_FRAME_SIZE, .desc_size = TXD_DESC_SIZE, .priv_size = sizeof(struct queue_entry_priv_usb), }; static const struct data_queue_desc rt2500usb_queue_bcn = { .entry_num = 1, .data_size = MGMT_FRAME_SIZE, .desc_size = TXD_DESC_SIZE, .priv_size = sizeof(struct queue_entry_priv_usb_bcn), }; static const struct data_queue_desc rt2500usb_queue_atim = { .entry_num = 8, .data_size = DATA_FRAME_SIZE, .desc_size = TXD_DESC_SIZE, .priv_size = sizeof(struct queue_entry_priv_usb), }; static const struct rt2x00_ops rt2500usb_ops = { .name = KBUILD_MODNAME, .max_sta_intf = 1, .max_ap_intf = 1, .eeprom_size = EEPROM_SIZE, .rf_size = RF_SIZE, .tx_queues = NUM_TX_QUEUES, .extra_tx_headroom = TXD_DESC_SIZE, .rx = &rt2500usb_queue_rx, .tx = &rt2500usb_queue_tx, .bcn = &rt2500usb_queue_bcn, .atim = &rt2500usb_queue_atim, .lib = &rt2500usb_rt2x00_ops, .hw = &rt2500usb_mac80211_ops, #ifdef CONFIG_RT2X00_LIB_DEBUGFS .debugfs = &rt2500usb_rt2x00debug, #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ }; /* * rt2500usb module information. */ static struct usb_device_id rt2500usb_device_table[] = { /* ASUS */ { USB_DEVICE(0x0b05, 0x1706) }, { USB_DEVICE(0x0b05, 0x1707) }, /* Belkin */ { USB_DEVICE(0x050d, 0x7050) }, /* FCC ID: K7SF5D7050A ver. 2.x */ { USB_DEVICE(0x050d, 0x7051) }, /* Cisco Systems */ { USB_DEVICE(0x13b1, 0x000d) }, { USB_DEVICE(0x13b1, 0x0011) }, { USB_DEVICE(0x13b1, 0x001a) }, /* Conceptronic */ { USB_DEVICE(0x14b2, 0x3c02) }, /* D-LINK */ { USB_DEVICE(0x2001, 0x3c00) }, /* Gigabyte */ { USB_DEVICE(0x1044, 0x8001) }, { USB_DEVICE(0x1044, 0x8007) }, /* Hercules */ { USB_DEVICE(0x06f8, 0xe000) }, /* Melco */ { USB_DEVICE(0x0411, 0x005e) }, { USB_DEVICE(0x0411, 0x0066) }, { USB_DEVICE(0x0411, 0x0067) }, { USB_DEVICE(0x0411, 0x008b) }, { USB_DEVICE(0x0411, 0x0097) }, /* MSI */ { USB_DEVICE(0x0db0, 0x6861) }, { USB_DEVICE(0x0db0, 0x6865) }, { USB_DEVICE(0x0db0, 0x6869) }, /* Ralink */ { USB_DEVICE(0x148f, 0x1706) }, { USB_DEVICE(0x148f, 0x2570) }, { USB_DEVICE(0x148f, 0x9020) }, /* Sagem */ { USB_DEVICE(0x079b, 0x004b) }, /* Siemens */ { USB_DEVICE(0x0681, 0x3c06) }, /* SMC */ { USB_DEVICE(0x0707, 0xee13) }, /* Spairon */ { USB_DEVICE(0x114b, 0x0110) }, /* SURECOM */ { USB_DEVICE(0x0769, 0x11f3) }, /* Trust */ { USB_DEVICE(0x0eb0, 0x9020) }, /* VTech */ { USB_DEVICE(0x0f88, 0x3012) }, /* Zinwell */ { USB_DEVICE(0x5a57, 0x0260) }, { 0, } }; MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT2500 USB Wireless LAN driver."); MODULE_SUPPORTED_DEVICE("Ralink RT2570 USB chipset based cards"); MODULE_DEVICE_TABLE(usb, rt2500usb_device_table); MODULE_LICENSE("GPL"); static int rt2500usb_probe(struct usb_interface *usb_intf, const struct usb_device_id *id) { return rt2x00usb_probe(usb_intf, &rt2500usb_ops); } static struct usb_driver rt2500usb_driver = { .name = KBUILD_MODNAME, .id_table = rt2500usb_device_table, .probe = rt2500usb_probe, .disconnect = rt2x00usb_disconnect, .suspend = rt2x00usb_suspend, .resume = rt2x00usb_resume, }; module_usb_driver(rt2500usb_driver);
gpl-2.0
b8e5n/KTG-kernel_es209ra
net/sunrpc/auth_gss/gss_krb5_wrap.c
755
8807
#include <linux/types.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/sunrpc/gss_krb5.h> #include <linux/random.h> #include <linux/pagemap.h> #include <linux/crypto.h> #ifdef RPC_DEBUG # define RPCDBG_FACILITY RPCDBG_AUTH #endif static inline int gss_krb5_padding(int blocksize, int length) { /* Most of the code is block-size independent but currently we * use only 8: */ BUG_ON(blocksize != 8); return 8 - (length & 7); } static inline void gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize) { int padding = gss_krb5_padding(blocksize, buf->len - offset); char *p; struct kvec *iov; if (buf->page_len || buf->tail[0].iov_len) iov = &buf->tail[0]; else iov = &buf->head[0]; p = iov->iov_base + iov->iov_len; iov->iov_len += padding; buf->len += padding; memset(p, padding, padding); } static inline int gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize) { u8 *ptr; u8 pad; size_t len = buf->len; if (len <= buf->head[0].iov_len) { pad = *(u8 *)(buf->head[0].iov_base + len - 1); if (pad > buf->head[0].iov_len) return -EINVAL; buf->head[0].iov_len -= pad; goto out; } else len -= buf->head[0].iov_len; if (len <= buf->page_len) { unsigned int last = (buf->page_base + len - 1) >>PAGE_CACHE_SHIFT; unsigned int offset = (buf->page_base + len - 1) & (PAGE_CACHE_SIZE - 1); ptr = kmap_atomic(buf->pages[last], KM_USER0); pad = *(ptr + offset); kunmap_atomic(ptr, KM_USER0); goto out; } else len -= buf->page_len; BUG_ON(len > buf->tail[0].iov_len); pad = *(u8 *)(buf->tail[0].iov_base + len - 1); out: /* XXX: NOTE: we do not adjust the page lengths--they represent * a range of data in the real filesystem page cache, and we need * to know that range so the xdr code can properly place read data. * However adjusting the head length, as we do above, is harmless. * In the case of a request that fits into a single page, the server * also uses length and head length together to determine the original * start of the request to copy the request for deferal; so it's * easier on the server if we adjust head and tail length in tandem. * It's not really a problem that we don't fool with the page and * tail lengths, though--at worst badly formed xdr might lead the * server to attempt to parse the padding. * XXX: Document all these weird requirements for gss mechanism * wrap/unwrap functions. */ if (pad > blocksize) return -EINVAL; if (buf->len > pad) buf->len -= pad; else return -EINVAL; return 0; } static void make_confounder(char *p, u32 conflen) { static u64 i = 0; u64 *q = (u64 *)p; /* rfc1964 claims this should be "random". But all that's really * necessary is that it be unique. And not even that is necessary in * our case since our "gssapi" implementation exists only to support * rpcsec_gss, so we know that the only buffers we will ever encrypt * already begin with a unique sequence number. Just to hedge my bets * I'll make a half-hearted attempt at something unique, but ensuring * uniqueness would mean worrying about atomicity and rollover, and I * don't care enough. */ /* initialize to random value */ if (i == 0) { i = random32(); i = (i << 32) | random32(); } switch (conflen) { case 16: *q++ = i++; /* fall through */ case 8: *q++ = i++; break; default: BUG(); } } /* Assumptions: the head and tail of inbuf are ours to play with. * The pages, however, may be real pages in the page cache and we replace * them with scratch pages from **pages before writing to them. */ /* XXX: obviously the above should be documentation of wrap interface, * and shouldn't be in this kerberos-specific file. */ /* XXX factor out common code with seal/unseal. */ u32 gss_wrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf, struct page **pages) { struct krb5_ctx *kctx = ctx->internal_ctx_id; char cksumdata[16]; struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; int blocksize = 0, plainlen; unsigned char *ptr, *msg_start; s32 now; int headlen; struct page **tmp_pages; u32 seq_send; dprintk("RPC: gss_wrap_kerberos\n"); now = get_seconds(); blocksize = crypto_blkcipher_blocksize(kctx->enc); gss_krb5_add_padding(buf, offset, blocksize); BUG_ON((buf->len - offset) % blocksize); plainlen = blocksize + buf->len - offset; headlen = g_token_size(&kctx->mech_used, 24 + plainlen) - (buf->len - offset); ptr = buf->head[0].iov_base + offset; /* shift data to make room for header. */ /* XXX Would be cleverer to encrypt while copying. */ /* XXX bounds checking, slack, etc. */ memmove(ptr + headlen, ptr, buf->head[0].iov_len - offset); buf->head[0].iov_len += headlen; buf->len += headlen; BUG_ON((buf->len - offset - headlen) % blocksize); g_make_token_header(&kctx->mech_used, GSS_KRB5_TOK_HDR_LEN + 8 + plainlen, &ptr); /* ptr now at header described in rfc 1964, section 1.2.1: */ ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff); ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff); msg_start = ptr + 24; *(__be16 *)(ptr + 2) = htons(SGN_ALG_DES_MAC_MD5); memset(ptr + 4, 0xff, 4); *(__be16 *)(ptr + 4) = htons(SEAL_ALG_DES); make_confounder(msg_start, blocksize); /* XXXJBF: UGH!: */ tmp_pages = buf->pages; buf->pages = pages; if (make_checksum("md5", ptr, 8, buf, offset + headlen - blocksize, &md5cksum)) return GSS_S_FAILURE; buf->pages = tmp_pages; if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, md5cksum.data, md5cksum.len)) return GSS_S_FAILURE; memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data + md5cksum.len - 8, 8); spin_lock(&krb5_seq_lock); seq_send = kctx->seq_send++; spin_unlock(&krb5_seq_lock); /* XXX would probably be more efficient to compute checksum * and encrypt at the same time: */ if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff, seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))) return GSS_S_FAILURE; if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize, pages)) return GSS_S_FAILURE; return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; } u32 gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) { struct krb5_ctx *kctx = ctx->internal_ctx_id; int signalg; int sealalg; char cksumdata[16]; struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; s32 now; int direction; s32 seqnum; unsigned char *ptr; int bodysize; void *data_start, *orig_start; int data_len; int blocksize; dprintk("RPC: gss_unwrap_kerberos\n"); ptr = (u8 *)buf->head[0].iov_base + offset; if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr, buf->len - offset)) return GSS_S_DEFECTIVE_TOKEN; if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) || (ptr[1] != (KG_TOK_WRAP_MSG & 0xff))) return GSS_S_DEFECTIVE_TOKEN; /* XXX sanity-check bodysize?? */ /* get the sign and seal algorithms */ signalg = ptr[2] + (ptr[3] << 8); if (signalg != SGN_ALG_DES_MAC_MD5) return GSS_S_DEFECTIVE_TOKEN; sealalg = ptr[4] + (ptr[5] << 8); if (sealalg != SEAL_ALG_DES) return GSS_S_DEFECTIVE_TOKEN; if ((ptr[6] != 0xff) || (ptr[7] != 0xff)) return GSS_S_DEFECTIVE_TOKEN; if (gss_decrypt_xdr_buf(kctx->enc, buf, ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base)) return GSS_S_DEFECTIVE_TOKEN; if (make_checksum("md5", ptr, 8, buf, ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base, &md5cksum)) return GSS_S_FAILURE; if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, md5cksum.data, md5cksum.len)) return GSS_S_FAILURE; if (memcmp(md5cksum.data + 8, ptr + GSS_KRB5_TOK_HDR_LEN, 8)) return GSS_S_BAD_SIG; /* it got through unscathed. Make sure the context is unexpired */ now = get_seconds(); if (now > kctx->endtime) return GSS_S_CONTEXT_EXPIRED; /* do sequencing checks */ if (krb5_get_seq_num(kctx->seq, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8, &direction, &seqnum)) return GSS_S_BAD_SIG; if ((kctx->initiate && direction != 0xff) || (!kctx->initiate && direction != 0)) return GSS_S_BAD_SIG; /* Copy the data back to the right position. XXX: Would probably be * better to copy and encrypt at the same time. */ blocksize = crypto_blkcipher_blocksize(kctx->enc); data_start = ptr + GSS_KRB5_TOK_HDR_LEN + 8 + blocksize; orig_start = buf->head[0].iov_base + offset; data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; memmove(orig_start, data_start, data_len); buf->head[0].iov_len -= (data_start - orig_start); buf->len -= (data_start - orig_start); if (gss_krb5_remove_padding(buf, blocksize)) return GSS_S_DEFECTIVE_TOKEN; return GSS_S_COMPLETE; }
gpl-2.0
mikedanese/linux
drivers/tty/serial/serial_txx9.c
1267
33766
/* * Derived from many drivers using generic_serial interface, * especially serial_tx3912.c by Steven J. Hill and r39xx_serial.c * (was in Linux/VR tree) by Jim Pick. * * Copyright (C) 1999 Harald Koerfgen * Copyright (C) 2000 Jim Pick <jim@jimpick.com> * Copyright (C) 2001 Steven J. Hill (sjhill@realitydiluted.com) * Copyright (C) 2000-2002 Toshiba Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Serial driver for TX3927/TX4927/TX4925/TX4938 internal SIO controller */ #if defined(CONFIG_SERIAL_TXX9_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/module.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/pci.h> #include <linux/serial_core.h> #include <linux/serial.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <asm/io.h> static char *serial_version = "1.11"; static char *serial_name = "TX39/49 Serial driver"; #define PASS_LIMIT 256 #if !defined(CONFIG_SERIAL_TXX9_STDSERIAL) /* "ttyS" is used for standard serial driver */ #define TXX9_TTY_NAME "ttyTX" #define TXX9_TTY_MINOR_START 196 #define TXX9_TTY_MAJOR 204 #else /* acts like standard serial driver */ #define TXX9_TTY_NAME "ttyS" #define TXX9_TTY_MINOR_START 64 #define TXX9_TTY_MAJOR TTY_MAJOR #endif /* flag aliases */ #define UPF_TXX9_HAVE_CTS_LINE UPF_BUGGY_UART #define UPF_TXX9_USE_SCLK UPF_MAGIC_MULTIPLIER #ifdef CONFIG_PCI /* support for Toshiba TC86C001 SIO */ #define ENABLE_SERIAL_TXX9_PCI #endif /* * Number of serial ports */ #define UART_NR CONFIG_SERIAL_TXX9_NR_UARTS struct uart_txx9_port { struct uart_port port; /* No additional info for now */ }; #define TXX9_REGION_SIZE 0x24 /* TXX9 Serial Registers */ #define TXX9_SILCR 0x00 #define TXX9_SIDICR 0x04 #define TXX9_SIDISR 0x08 #define TXX9_SICISR 0x0c #define TXX9_SIFCR 0x10 #define TXX9_SIFLCR 0x14 #define TXX9_SIBGR 0x18 #define TXX9_SITFIFO 0x1c #define TXX9_SIRFIFO 0x20 /* SILCR : Line Control */ #define TXX9_SILCR_SCS_MASK 0x00000060 #define TXX9_SILCR_SCS_IMCLK 0x00000000 #define TXX9_SILCR_SCS_IMCLK_BG 0x00000020 #define TXX9_SILCR_SCS_SCLK 0x00000040 #define TXX9_SILCR_SCS_SCLK_BG 0x00000060 #define TXX9_SILCR_UEPS 0x00000010 #define TXX9_SILCR_UPEN 0x00000008 #define TXX9_SILCR_USBL_MASK 0x00000004 #define TXX9_SILCR_USBL_1BIT 0x00000000 #define TXX9_SILCR_USBL_2BIT 0x00000004 #define TXX9_SILCR_UMODE_MASK 0x00000003 #define TXX9_SILCR_UMODE_8BIT 0x00000000 #define TXX9_SILCR_UMODE_7BIT 0x00000001 /* SIDICR : DMA/Int. Control */ #define TXX9_SIDICR_TDE 0x00008000 #define TXX9_SIDICR_RDE 0x00004000 #define TXX9_SIDICR_TIE 0x00002000 #define TXX9_SIDICR_RIE 0x00001000 #define TXX9_SIDICR_SPIE 0x00000800 #define TXX9_SIDICR_CTSAC 0x00000600 #define TXX9_SIDICR_STIE_MASK 0x0000003f #define TXX9_SIDICR_STIE_OERS 0x00000020 #define TXX9_SIDICR_STIE_CTSS 0x00000010 #define TXX9_SIDICR_STIE_RBRKD 0x00000008 #define TXX9_SIDICR_STIE_TRDY 0x00000004 #define TXX9_SIDICR_STIE_TXALS 0x00000002 #define TXX9_SIDICR_STIE_UBRKD 0x00000001 /* SIDISR : DMA/Int. Status */ #define TXX9_SIDISR_UBRK 0x00008000 #define TXX9_SIDISR_UVALID 0x00004000 #define TXX9_SIDISR_UFER 0x00002000 #define TXX9_SIDISR_UPER 0x00001000 #define TXX9_SIDISR_UOER 0x00000800 #define TXX9_SIDISR_ERI 0x00000400 #define TXX9_SIDISR_TOUT 0x00000200 #define TXX9_SIDISR_TDIS 0x00000100 #define TXX9_SIDISR_RDIS 0x00000080 #define TXX9_SIDISR_STIS 0x00000040 #define TXX9_SIDISR_RFDN_MASK 0x0000001f /* SICISR : Change Int. Status */ #define TXX9_SICISR_OERS 0x00000020 #define TXX9_SICISR_CTSS 0x00000010 #define TXX9_SICISR_RBRKD 0x00000008 #define TXX9_SICISR_TRDY 0x00000004 #define TXX9_SICISR_TXALS 0x00000002 #define TXX9_SICISR_UBRKD 0x00000001 /* SIFCR : FIFO Control */ #define TXX9_SIFCR_SWRST 0x00008000 #define TXX9_SIFCR_RDIL_MASK 0x00000180 #define TXX9_SIFCR_RDIL_1 0x00000000 #define TXX9_SIFCR_RDIL_4 0x00000080 #define TXX9_SIFCR_RDIL_8 0x00000100 #define TXX9_SIFCR_RDIL_12 0x00000180 #define TXX9_SIFCR_RDIL_MAX 0x00000180 #define TXX9_SIFCR_TDIL_MASK 0x00000018 #define TXX9_SIFCR_TDIL_1 0x00000000 #define TXX9_SIFCR_TDIL_4 0x00000001 #define TXX9_SIFCR_TDIL_8 0x00000010 #define TXX9_SIFCR_TDIL_MAX 0x00000010 #define TXX9_SIFCR_TFRST 0x00000004 #define TXX9_SIFCR_RFRST 0x00000002 #define TXX9_SIFCR_FRSTE 0x00000001 #define TXX9_SIO_TX_FIFO 8 #define TXX9_SIO_RX_FIFO 16 /* SIFLCR : Flow Control */ #define TXX9_SIFLCR_RCS 0x00001000 #define TXX9_SIFLCR_TES 0x00000800 #define TXX9_SIFLCR_RTSSC 0x00000200 #define TXX9_SIFLCR_RSDE 0x00000100 #define TXX9_SIFLCR_TSDE 0x00000080 #define TXX9_SIFLCR_RTSTL_MASK 0x0000001e #define TXX9_SIFLCR_RTSTL_MAX 0x0000001e #define TXX9_SIFLCR_TBRK 0x00000001 /* SIBGR : Baudrate Control */ #define TXX9_SIBGR_BCLK_MASK 0x00000300 #define TXX9_SIBGR_BCLK_T0 0x00000000 #define TXX9_SIBGR_BCLK_T2 0x00000100 #define TXX9_SIBGR_BCLK_T4 0x00000200 #define TXX9_SIBGR_BCLK_T6 0x00000300 #define TXX9_SIBGR_BRD_MASK 0x000000ff static inline unsigned int sio_in(struct uart_txx9_port *up, int offset) { switch (up->port.iotype) { default: return __raw_readl(up->port.membase + offset); case UPIO_PORT: return inl(up->port.iobase + offset); } } static inline void sio_out(struct uart_txx9_port *up, int offset, int value) { switch (up->port.iotype) { default: __raw_writel(value, up->port.membase + offset); break; case UPIO_PORT: outl(value, up->port.iobase + offset); break; } } static inline void sio_mask(struct uart_txx9_port *up, int offset, unsigned int value) { sio_out(up, offset, sio_in(up, offset) & ~value); } static inline void sio_set(struct uart_txx9_port *up, int offset, unsigned int value) { sio_out(up, offset, sio_in(up, offset) | value); } static inline void sio_quot_set(struct uart_txx9_port *up, int quot) { quot >>= 1; if (quot < 256) sio_out(up, TXX9_SIBGR, quot | TXX9_SIBGR_BCLK_T0); else if (quot < (256 << 2)) sio_out(up, TXX9_SIBGR, (quot >> 2) | TXX9_SIBGR_BCLK_T2); else if (quot < (256 << 4)) sio_out(up, TXX9_SIBGR, (quot >> 4) | TXX9_SIBGR_BCLK_T4); else if (quot < (256 << 6)) sio_out(up, TXX9_SIBGR, (quot >> 6) | TXX9_SIBGR_BCLK_T6); else sio_out(up, TXX9_SIBGR, 0xff | TXX9_SIBGR_BCLK_T6); } static struct uart_txx9_port *to_uart_txx9_port(struct uart_port *port) { return container_of(port, struct uart_txx9_port, port); } static void serial_txx9_stop_tx(struct uart_port *port) { struct uart_txx9_port *up = to_uart_txx9_port(port); sio_mask(up, TXX9_SIDICR, TXX9_SIDICR_TIE); } static void serial_txx9_start_tx(struct uart_port *port) { struct uart_txx9_port *up = to_uart_txx9_port(port); sio_set(up, TXX9_SIDICR, TXX9_SIDICR_TIE); } static void serial_txx9_stop_rx(struct uart_port *port) { struct uart_txx9_port *up = to_uart_txx9_port(port); up->port.read_status_mask &= ~TXX9_SIDISR_RDIS; } static void serial_txx9_initialize(struct uart_port *port) { struct uart_txx9_port *up = to_uart_txx9_port(port); unsigned int tmout = 10000; sio_out(up, TXX9_SIFCR, TXX9_SIFCR_SWRST); /* TX4925 BUG WORKAROUND. Accessing SIOC register * immediately after soft reset causes bus error. */ mmiowb(); udelay(1); while ((sio_in(up, TXX9_SIFCR) & TXX9_SIFCR_SWRST) && --tmout) udelay(1); /* TX Int by FIFO Empty, RX Int by Receiving 1 char. */ sio_set(up, TXX9_SIFCR, TXX9_SIFCR_TDIL_MAX | TXX9_SIFCR_RDIL_1); /* initial settings */ sio_out(up, TXX9_SILCR, TXX9_SILCR_UMODE_8BIT | TXX9_SILCR_USBL_1BIT | ((up->port.flags & UPF_TXX9_USE_SCLK) ? TXX9_SILCR_SCS_SCLK_BG : TXX9_SILCR_SCS_IMCLK_BG)); sio_quot_set(up, uart_get_divisor(port, 9600)); sio_out(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSTL_MAX /* 15 */); sio_out(up, TXX9_SIDICR, 0); } static inline void receive_chars(struct uart_txx9_port *up, unsigned int *status) { unsigned char ch; unsigned int disr = *status; int max_count = 256; char flag; unsigned int next_ignore_status_mask; do { ch = sio_in(up, TXX9_SIRFIFO); flag = TTY_NORMAL; up->port.icount.rx++; /* mask out RFDN_MASK bit added by previous overrun */ next_ignore_status_mask = up->port.ignore_status_mask & ~TXX9_SIDISR_RFDN_MASK; if (unlikely(disr & (TXX9_SIDISR_UBRK | TXX9_SIDISR_UPER | TXX9_SIDISR_UFER | TXX9_SIDISR_UOER))) { /* * For statistics only */ if (disr & TXX9_SIDISR_UBRK) { disr &= ~(TXX9_SIDISR_UFER | TXX9_SIDISR_UPER); up->port.icount.brk++; /* * We do the SysRQ and SAK checking * here because otherwise the break * may get masked by ignore_status_mask * or read_status_mask. */ if (uart_handle_break(&up->port)) goto ignore_char; } else if (disr & TXX9_SIDISR_UPER) up->port.icount.parity++; else if (disr & TXX9_SIDISR_UFER) up->port.icount.frame++; if (disr & TXX9_SIDISR_UOER) { up->port.icount.overrun++; /* * The receiver read buffer still hold * a char which caused overrun. * Ignore next char by adding RFDN_MASK * to ignore_status_mask temporarily. */ next_ignore_status_mask |= TXX9_SIDISR_RFDN_MASK; } /* * Mask off conditions which should be ingored. */ disr &= up->port.read_status_mask; if (disr & TXX9_SIDISR_UBRK) { flag = TTY_BREAK; } else if (disr & TXX9_SIDISR_UPER) flag = TTY_PARITY; else if (disr & TXX9_SIDISR_UFER) flag = TTY_FRAME; } if (uart_handle_sysrq_char(&up->port, ch)) goto ignore_char; uart_insert_char(&up->port, disr, TXX9_SIDISR_UOER, ch, flag); ignore_char: up->port.ignore_status_mask = next_ignore_status_mask; disr = sio_in(up, TXX9_SIDISR); } while (!(disr & TXX9_SIDISR_UVALID) && (max_count-- > 0)); spin_unlock(&up->port.lock); tty_flip_buffer_push(&up->port.state->port); spin_lock(&up->port.lock); *status = disr; } static inline void transmit_chars(struct uart_txx9_port *up) { struct circ_buf *xmit = &up->port.state->xmit; int count; if (up->port.x_char) { sio_out(up, TXX9_SITFIFO, up->port.x_char); up->port.icount.tx++; up->port.x_char = 0; return; } if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { serial_txx9_stop_tx(&up->port); return; } count = TXX9_SIO_TX_FIFO; do { sio_out(up, TXX9_SITFIFO, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); up->port.icount.tx++; if (uart_circ_empty(xmit)) break; } while (--count > 0); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&up->port); if (uart_circ_empty(xmit)) serial_txx9_stop_tx(&up->port); } static irqreturn_t serial_txx9_interrupt(int irq, void *dev_id) { int pass_counter = 0; struct uart_txx9_port *up = dev_id; unsigned int status; while (1) { spin_lock(&up->port.lock); status = sio_in(up, TXX9_SIDISR); if (!(sio_in(up, TXX9_SIDICR) & TXX9_SIDICR_TIE)) status &= ~TXX9_SIDISR_TDIS; if (!(status & (TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS | TXX9_SIDISR_TOUT))) { spin_unlock(&up->port.lock); break; } if (status & TXX9_SIDISR_RDIS) receive_chars(up, &status); if (status & TXX9_SIDISR_TDIS) transmit_chars(up); /* Clear TX/RX Int. Status */ sio_mask(up, TXX9_SIDISR, TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS | TXX9_SIDISR_TOUT); spin_unlock(&up->port.lock); if (pass_counter++ > PASS_LIMIT) break; } return pass_counter ? IRQ_HANDLED : IRQ_NONE; } static unsigned int serial_txx9_tx_empty(struct uart_port *port) { struct uart_txx9_port *up = to_uart_txx9_port(port); unsigned long flags; unsigned int ret; spin_lock_irqsave(&up->port.lock, flags); ret = (sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS) ? TIOCSER_TEMT : 0; spin_unlock_irqrestore(&up->port.lock, flags); return ret; } static unsigned int serial_txx9_get_mctrl(struct uart_port *port) { struct uart_txx9_port *up = to_uart_txx9_port(port); unsigned int ret; /* no modem control lines */ ret = TIOCM_CAR | TIOCM_DSR; ret |= (sio_in(up, TXX9_SIFLCR) & TXX9_SIFLCR_RTSSC) ? 0 : TIOCM_RTS; ret |= (sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS) ? 0 : TIOCM_CTS; return ret; } static void serial_txx9_set_mctrl(struct uart_port *port, unsigned int mctrl) { struct uart_txx9_port *up = to_uart_txx9_port(port); if (mctrl & TIOCM_RTS) sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSSC); else sio_set(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSSC); } static void serial_txx9_break_ctl(struct uart_port *port, int break_state) { struct uart_txx9_port *up = to_uart_txx9_port(port); unsigned long flags; spin_lock_irqsave(&up->port.lock, flags); if (break_state == -1) sio_set(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK); else sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK); spin_unlock_irqrestore(&up->port.lock, flags); } #if defined(CONFIG_SERIAL_TXX9_CONSOLE) || defined(CONFIG_CONSOLE_POLL) /* * Wait for transmitter & holding register to empty */ static void wait_for_xmitr(struct uart_txx9_port *up) { unsigned int tmout = 10000; /* Wait up to 10ms for the character(s) to be sent. */ while (--tmout && !(sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS)) udelay(1); /* Wait up to 1s for flow control if necessary */ if (up->port.flags & UPF_CONS_FLOW) { tmout = 1000000; while (--tmout && (sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS)) udelay(1); } } #endif #ifdef CONFIG_CONSOLE_POLL /* * Console polling routines for writing and reading from the uart while * in an interrupt or debug context. */ static int serial_txx9_get_poll_char(struct uart_port *port) { unsigned int ier; unsigned char c; struct uart_txx9_port *up = to_uart_txx9_port(port); /* * First save the IER then disable the interrupts */ ier = sio_in(up, TXX9_SIDICR); sio_out(up, TXX9_SIDICR, 0); while (sio_in(up, TXX9_SIDISR) & TXX9_SIDISR_UVALID) ; c = sio_in(up, TXX9_SIRFIFO); /* * Finally, clear RX interrupt status * and restore the IER */ sio_mask(up, TXX9_SIDISR, TXX9_SIDISR_RDIS); sio_out(up, TXX9_SIDICR, ier); return c; } static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c) { unsigned int ier; struct uart_txx9_port *up = to_uart_txx9_port(port); /* * First save the IER then disable the interrupts */ ier = sio_in(up, TXX9_SIDICR); sio_out(up, TXX9_SIDICR, 0); wait_for_xmitr(up); /* * Send the character out. */ sio_out(up, TXX9_SITFIFO, c); /* * Finally, wait for transmitter to become empty * and restore the IER */ wait_for_xmitr(up); sio_out(up, TXX9_SIDICR, ier); } #endif /* CONFIG_CONSOLE_POLL */ static int serial_txx9_startup(struct uart_port *port) { struct uart_txx9_port *up = to_uart_txx9_port(port); unsigned long flags; int retval; /* * Clear the FIFO buffers and disable them. * (they will be reenabled in set_termios()) */ sio_set(up, TXX9_SIFCR, TXX9_SIFCR_TFRST | TXX9_SIFCR_RFRST | TXX9_SIFCR_FRSTE); /* clear reset */ sio_mask(up, TXX9_SIFCR, TXX9_SIFCR_TFRST | TXX9_SIFCR_RFRST | TXX9_SIFCR_FRSTE); sio_out(up, TXX9_SIDICR, 0); /* * Clear the interrupt registers. */ sio_out(up, TXX9_SIDISR, 0); retval = request_irq(up->port.irq, serial_txx9_interrupt, IRQF_SHARED, "serial_txx9", up); if (retval) return retval; /* * Now, initialize the UART */ spin_lock_irqsave(&up->port.lock, flags); serial_txx9_set_mctrl(&up->port, up->port.mctrl); spin_unlock_irqrestore(&up->port.lock, flags); /* Enable RX/TX */ sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RSDE | TXX9_SIFLCR_TSDE); /* * Finally, enable interrupts. */ sio_set(up, TXX9_SIDICR, TXX9_SIDICR_RIE); return 0; } static void serial_txx9_shutdown(struct uart_port *port) { struct uart_txx9_port *up = to_uart_txx9_port(port); unsigned long flags; /* * Disable interrupts from this port */ sio_out(up, TXX9_SIDICR, 0); /* disable all intrs */ spin_lock_irqsave(&up->port.lock, flags); serial_txx9_set_mctrl(&up->port, up->port.mctrl); spin_unlock_irqrestore(&up->port.lock, flags); /* * Disable break condition */ sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK); #ifdef CONFIG_SERIAL_TXX9_CONSOLE if (up->port.cons && up->port.line == up->port.cons->index) { free_irq(up->port.irq, up); return; } #endif /* reset FIFOs */ sio_set(up, TXX9_SIFCR, TXX9_SIFCR_TFRST | TXX9_SIFCR_RFRST | TXX9_SIFCR_FRSTE); /* clear reset */ sio_mask(up, TXX9_SIFCR, TXX9_SIFCR_TFRST | TXX9_SIFCR_RFRST | TXX9_SIFCR_FRSTE); /* Disable RX/TX */ sio_set(up, TXX9_SIFLCR, TXX9_SIFLCR_RSDE | TXX9_SIFLCR_TSDE); free_irq(up->port.irq, up); } static void serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct uart_txx9_port *up = to_uart_txx9_port(port); unsigned int cval, fcr = 0; unsigned long flags; unsigned int baud, quot; /* * We don't support modem control lines. */ termios->c_cflag &= ~(HUPCL | CMSPAR); termios->c_cflag |= CLOCAL; cval = sio_in(up, TXX9_SILCR); /* byte size and parity */ cval &= ~TXX9_SILCR_UMODE_MASK; switch (termios->c_cflag & CSIZE) { case CS7: cval |= TXX9_SILCR_UMODE_7BIT; break; default: case CS5: /* not supported */ case CS6: /* not supported */ case CS8: cval |= TXX9_SILCR_UMODE_8BIT; break; } cval &= ~TXX9_SILCR_USBL_MASK; if (termios->c_cflag & CSTOPB) cval |= TXX9_SILCR_USBL_2BIT; else cval |= TXX9_SILCR_USBL_1BIT; cval &= ~(TXX9_SILCR_UPEN | TXX9_SILCR_UEPS); if (termios->c_cflag & PARENB) cval |= TXX9_SILCR_UPEN; if (!(termios->c_cflag & PARODD)) cval |= TXX9_SILCR_UEPS; /* * Ask the core to calculate the divisor for us. */ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16/2); quot = uart_get_divisor(port, baud); /* Set up FIFOs */ /* TX Int by FIFO Empty, RX Int by Receiving 1 char. */ fcr = TXX9_SIFCR_TDIL_MAX | TXX9_SIFCR_RDIL_1; /* * Ok, we're now changing the port state. Do it with * interrupts disabled. */ spin_lock_irqsave(&up->port.lock, flags); /* * Update the per-port timeout. */ uart_update_timeout(port, termios->c_cflag, baud); up->port.read_status_mask = TXX9_SIDISR_UOER | TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS; if (termios->c_iflag & INPCK) up->port.read_status_mask |= TXX9_SIDISR_UFER | TXX9_SIDISR_UPER; if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) up->port.read_status_mask |= TXX9_SIDISR_UBRK; /* * Characteres to ignore */ up->port.ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) up->port.ignore_status_mask |= TXX9_SIDISR_UPER | TXX9_SIDISR_UFER; if (termios->c_iflag & IGNBRK) { up->port.ignore_status_mask |= TXX9_SIDISR_UBRK; /* * If we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (termios->c_iflag & IGNPAR) up->port.ignore_status_mask |= TXX9_SIDISR_UOER; } /* * ignore all characters if CREAD is not set */ if ((termios->c_cflag & CREAD) == 0) up->port.ignore_status_mask |= TXX9_SIDISR_RDIS; /* CTS flow control flag */ if ((termios->c_cflag & CRTSCTS) && (up->port.flags & UPF_TXX9_HAVE_CTS_LINE)) { sio_set(up, TXX9_SIFLCR, TXX9_SIFLCR_RCS | TXX9_SIFLCR_TES); } else { sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RCS | TXX9_SIFLCR_TES); } sio_out(up, TXX9_SILCR, cval); sio_quot_set(up, quot); sio_out(up, TXX9_SIFCR, fcr); serial_txx9_set_mctrl(&up->port, up->port.mctrl); spin_unlock_irqrestore(&up->port.lock, flags); } static void serial_txx9_pm(struct uart_port *port, unsigned int state, unsigned int oldstate) { /* * If oldstate was -1 this is called from * uart_configure_port(). In this case do not initialize the * port now, because the port was already initialized (for * non-console port) or should not be initialized here (for * console port). If we initialized the port here we lose * serial console settings. */ if (state == 0 && oldstate != -1) serial_txx9_initialize(port); } static int serial_txx9_request_resource(struct uart_txx9_port *up) { unsigned int size = TXX9_REGION_SIZE; int ret = 0; switch (up->port.iotype) { default: if (!up->port.mapbase) break; if (!request_mem_region(up->port.mapbase, size, "serial_txx9")) { ret = -EBUSY; break; } if (up->port.flags & UPF_IOREMAP) { up->port.membase = ioremap(up->port.mapbase, size); if (!up->port.membase) { release_mem_region(up->port.mapbase, size); ret = -ENOMEM; } } break; case UPIO_PORT: if (!request_region(up->port.iobase, size, "serial_txx9")) ret = -EBUSY; break; } return ret; } static void serial_txx9_release_resource(struct uart_txx9_port *up) { unsigned int size = TXX9_REGION_SIZE; switch (up->port.iotype) { default: if (!up->port.mapbase) break; if (up->port.flags & UPF_IOREMAP) { iounmap(up->port.membase); up->port.membase = NULL; } release_mem_region(up->port.mapbase, size); break; case UPIO_PORT: release_region(up->port.iobase, size); break; } } static void serial_txx9_release_port(struct uart_port *port) { struct uart_txx9_port *up = to_uart_txx9_port(port); serial_txx9_release_resource(up); } static int serial_txx9_request_port(struct uart_port *port) { struct uart_txx9_port *up = to_uart_txx9_port(port); return serial_txx9_request_resource(up); } static void serial_txx9_config_port(struct uart_port *port, int uflags) { struct uart_txx9_port *up = to_uart_txx9_port(port); int ret; /* * Find the region that we can probe for. This in turn * tells us whether we can probe for the type of port. */ ret = serial_txx9_request_resource(up); if (ret < 0) return; port->type = PORT_TXX9; up->port.fifosize = TXX9_SIO_TX_FIFO; #ifdef CONFIG_SERIAL_TXX9_CONSOLE if (up->port.line == up->port.cons->index) return; #endif serial_txx9_initialize(port); } static const char * serial_txx9_type(struct uart_port *port) { return "txx9"; } static struct uart_ops serial_txx9_pops = { .tx_empty = serial_txx9_tx_empty, .set_mctrl = serial_txx9_set_mctrl, .get_mctrl = serial_txx9_get_mctrl, .stop_tx = serial_txx9_stop_tx, .start_tx = serial_txx9_start_tx, .stop_rx = serial_txx9_stop_rx, .break_ctl = serial_txx9_break_ctl, .startup = serial_txx9_startup, .shutdown = serial_txx9_shutdown, .set_termios = serial_txx9_set_termios, .pm = serial_txx9_pm, .type = serial_txx9_type, .release_port = serial_txx9_release_port, .request_port = serial_txx9_request_port, .config_port = serial_txx9_config_port, #ifdef CONFIG_CONSOLE_POLL .poll_get_char = serial_txx9_get_poll_char, .poll_put_char = serial_txx9_put_poll_char, #endif }; static struct uart_txx9_port serial_txx9_ports[UART_NR]; static void __init serial_txx9_register_ports(struct uart_driver *drv, struct device *dev) { int i; for (i = 0; i < UART_NR; i++) { struct uart_txx9_port *up = &serial_txx9_ports[i]; up->port.line = i; up->port.ops = &serial_txx9_pops; up->port.dev = dev; if (up->port.iobase || up->port.mapbase) uart_add_one_port(drv, &up->port); } } #ifdef CONFIG_SERIAL_TXX9_CONSOLE static void serial_txx9_console_putchar(struct uart_port *port, int ch) { struct uart_txx9_port *up = to_uart_txx9_port(port); wait_for_xmitr(up); sio_out(up, TXX9_SITFIFO, ch); } /* * Print a string to the serial port trying not to disturb * any possible real use of the port... * * The console_lock must be held when we get here. */ static void serial_txx9_console_write(struct console *co, const char *s, unsigned int count) { struct uart_txx9_port *up = &serial_txx9_ports[co->index]; unsigned int ier, flcr; /* * First save the UER then disable the interrupts */ ier = sio_in(up, TXX9_SIDICR); sio_out(up, TXX9_SIDICR, 0); /* * Disable flow-control if enabled (and unnecessary) */ flcr = sio_in(up, TXX9_SIFLCR); if (!(up->port.flags & UPF_CONS_FLOW) && (flcr & TXX9_SIFLCR_TES)) sio_out(up, TXX9_SIFLCR, flcr & ~TXX9_SIFLCR_TES); uart_console_write(&up->port, s, count, serial_txx9_console_putchar); /* * Finally, wait for transmitter to become empty * and restore the IER */ wait_for_xmitr(up); sio_out(up, TXX9_SIFLCR, flcr); sio_out(up, TXX9_SIDICR, ier); } static int __init serial_txx9_console_setup(struct console *co, char *options) { struct uart_port *port; struct uart_txx9_port *up; int baud = 9600; int bits = 8; int parity = 'n'; int flow = 'n'; /* * Check whether an invalid uart number has been specified, and * if so, search for the first available port that does have * console support. */ if (co->index >= UART_NR) co->index = 0; up = &serial_txx9_ports[co->index]; port = &up->port; if (!port->ops) return -ENODEV; serial_txx9_initialize(&up->port); if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); return uart_set_options(port, co, baud, parity, bits, flow); } static struct uart_driver serial_txx9_reg; static struct console serial_txx9_console = { .name = TXX9_TTY_NAME, .write = serial_txx9_console_write, .device = uart_console_device, .setup = serial_txx9_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &serial_txx9_reg, }; static int __init serial_txx9_console_init(void) { register_console(&serial_txx9_console); return 0; } console_initcall(serial_txx9_console_init); #define SERIAL_TXX9_CONSOLE &serial_txx9_console #else #define SERIAL_TXX9_CONSOLE NULL #endif static struct uart_driver serial_txx9_reg = { .owner = THIS_MODULE, .driver_name = "serial_txx9", .dev_name = TXX9_TTY_NAME, .major = TXX9_TTY_MAJOR, .minor = TXX9_TTY_MINOR_START, .nr = UART_NR, .cons = SERIAL_TXX9_CONSOLE, }; int __init early_serial_txx9_setup(struct uart_port *port) { if (port->line >= ARRAY_SIZE(serial_txx9_ports)) return -ENODEV; serial_txx9_ports[port->line].port = *port; serial_txx9_ports[port->line].port.ops = &serial_txx9_pops; serial_txx9_ports[port->line].port.flags |= UPF_BOOT_AUTOCONF | UPF_FIXED_PORT; return 0; } static DEFINE_MUTEX(serial_txx9_mutex); /** * serial_txx9_register_port - register a serial port * @port: serial port template * * Configure the serial port specified by the request. * * The port is then probed and if necessary the IRQ is autodetected * If this fails an error is returned. * * On success the port is ready to use and the line number is returned. */ static int serial_txx9_register_port(struct uart_port *port) { int i; struct uart_txx9_port *uart; int ret = -ENOSPC; mutex_lock(&serial_txx9_mutex); for (i = 0; i < UART_NR; i++) { uart = &serial_txx9_ports[i]; if (uart_match_port(&uart->port, port)) { uart_remove_one_port(&serial_txx9_reg, &uart->port); break; } } if (i == UART_NR) { /* Find unused port */ for (i = 0; i < UART_NR; i++) { uart = &serial_txx9_ports[i]; if (!(uart->port.iobase || uart->port.mapbase)) break; } } if (i < UART_NR) { uart->port.iobase = port->iobase; uart->port.membase = port->membase; uart->port.irq = port->irq; uart->port.uartclk = port->uartclk; uart->port.iotype = port->iotype; uart->port.flags = port->flags | UPF_BOOT_AUTOCONF | UPF_FIXED_PORT; uart->port.mapbase = port->mapbase; if (port->dev) uart->port.dev = port->dev; ret = uart_add_one_port(&serial_txx9_reg, &uart->port); if (ret == 0) ret = uart->port.line; } mutex_unlock(&serial_txx9_mutex); return ret; } /** * serial_txx9_unregister_port - remove a txx9 serial port at runtime * @line: serial line number * * Remove one serial port. This may not be called from interrupt * context. We hand the port back to the our control. */ static void serial_txx9_unregister_port(int line) { struct uart_txx9_port *uart = &serial_txx9_ports[line]; mutex_lock(&serial_txx9_mutex); uart_remove_one_port(&serial_txx9_reg, &uart->port); uart->port.flags = 0; uart->port.type = PORT_UNKNOWN; uart->port.iobase = 0; uart->port.mapbase = 0; uart->port.membase = NULL; uart->port.dev = NULL; mutex_unlock(&serial_txx9_mutex); } /* * Register a set of serial devices attached to a platform device. */ static int serial_txx9_probe(struct platform_device *dev) { struct uart_port *p = dev_get_platdata(&dev->dev); struct uart_port port; int ret, i; memset(&port, 0, sizeof(struct uart_port)); for (i = 0; p && p->uartclk != 0; p++, i++) { port.iobase = p->iobase; port.membase = p->membase; port.irq = p->irq; port.uartclk = p->uartclk; port.iotype = p->iotype; port.flags = p->flags; port.mapbase = p->mapbase; port.dev = &dev->dev; ret = serial_txx9_register_port(&port); if (ret < 0) { dev_err(&dev->dev, "unable to register port at index %d " "(IO%lx MEM%llx IRQ%d): %d\n", i, p->iobase, (unsigned long long)p->mapbase, p->irq, ret); } } return 0; } /* * Remove serial ports registered against a platform device. */ static int serial_txx9_remove(struct platform_device *dev) { int i; for (i = 0; i < UART_NR; i++) { struct uart_txx9_port *up = &serial_txx9_ports[i]; if (up->port.dev == &dev->dev) serial_txx9_unregister_port(i); } return 0; } #ifdef CONFIG_PM static int serial_txx9_suspend(struct platform_device *dev, pm_message_t state) { int i; for (i = 0; i < UART_NR; i++) { struct uart_txx9_port *up = &serial_txx9_ports[i]; if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev) uart_suspend_port(&serial_txx9_reg, &up->port); } return 0; } static int serial_txx9_resume(struct platform_device *dev) { int i; for (i = 0; i < UART_NR; i++) { struct uart_txx9_port *up = &serial_txx9_ports[i]; if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev) uart_resume_port(&serial_txx9_reg, &up->port); } return 0; } #endif static struct platform_driver serial_txx9_plat_driver = { .probe = serial_txx9_probe, .remove = serial_txx9_remove, #ifdef CONFIG_PM .suspend = serial_txx9_suspend, .resume = serial_txx9_resume, #endif .driver = { .name = "serial_txx9", }, }; #ifdef ENABLE_SERIAL_TXX9_PCI /* * Probe one serial board. Unfortunately, there is no rhyme nor reason * to the arrangement of serial ports on a PCI card. */ static int pciserial_txx9_init_one(struct pci_dev *dev, const struct pci_device_id *ent) { struct uart_port port; int line; int rc; rc = pci_enable_device(dev); if (rc) return rc; memset(&port, 0, sizeof(port)); port.ops = &serial_txx9_pops; port.flags |= UPF_TXX9_HAVE_CTS_LINE; port.uartclk = 66670000; port.irq = dev->irq; port.iotype = UPIO_PORT; port.iobase = pci_resource_start(dev, 1); port.dev = &dev->dev; line = serial_txx9_register_port(&port); if (line < 0) { printk(KERN_WARNING "Couldn't register serial port %s: %d\n", pci_name(dev), line); pci_disable_device(dev); return line; } pci_set_drvdata(dev, &serial_txx9_ports[line]); return 0; } static void pciserial_txx9_remove_one(struct pci_dev *dev) { struct uart_txx9_port *up = pci_get_drvdata(dev); if (up) { serial_txx9_unregister_port(up->port.line); pci_disable_device(dev); } } #ifdef CONFIG_PM static int pciserial_txx9_suspend_one(struct pci_dev *dev, pm_message_t state) { struct uart_txx9_port *up = pci_get_drvdata(dev); if (up) uart_suspend_port(&serial_txx9_reg, &up->port); pci_save_state(dev); pci_set_power_state(dev, pci_choose_state(dev, state)); return 0; } static int pciserial_txx9_resume_one(struct pci_dev *dev) { struct uart_txx9_port *up = pci_get_drvdata(dev); pci_set_power_state(dev, PCI_D0); pci_restore_state(dev); if (up) uart_resume_port(&serial_txx9_reg, &up->port); return 0; } #endif static const struct pci_device_id serial_txx9_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC86C001_MISC) }, { 0, } }; static struct pci_driver serial_txx9_pci_driver = { .name = "serial_txx9", .probe = pciserial_txx9_init_one, .remove = pciserial_txx9_remove_one, #ifdef CONFIG_PM .suspend = pciserial_txx9_suspend_one, .resume = pciserial_txx9_resume_one, #endif .id_table = serial_txx9_pci_tbl, }; MODULE_DEVICE_TABLE(pci, serial_txx9_pci_tbl); #endif /* ENABLE_SERIAL_TXX9_PCI */ static struct platform_device *serial_txx9_plat_devs; static int __init serial_txx9_init(void) { int ret; printk(KERN_INFO "%s version %s\n", serial_name, serial_version); ret = uart_register_driver(&serial_txx9_reg); if (ret) goto out; serial_txx9_plat_devs = platform_device_alloc("serial_txx9", -1); if (!serial_txx9_plat_devs) { ret = -ENOMEM; goto unreg_uart_drv; } ret = platform_device_add(serial_txx9_plat_devs); if (ret) goto put_dev; serial_txx9_register_ports(&serial_txx9_reg, &serial_txx9_plat_devs->dev); ret = platform_driver_register(&serial_txx9_plat_driver); if (ret) goto del_dev; #ifdef ENABLE_SERIAL_TXX9_PCI ret = pci_register_driver(&serial_txx9_pci_driver); #endif if (ret == 0) goto out; del_dev: platform_device_del(serial_txx9_plat_devs); put_dev: platform_device_put(serial_txx9_plat_devs); unreg_uart_drv: uart_unregister_driver(&serial_txx9_reg); out: return ret; } static void __exit serial_txx9_exit(void) { int i; #ifdef ENABLE_SERIAL_TXX9_PCI pci_unregister_driver(&serial_txx9_pci_driver); #endif platform_driver_unregister(&serial_txx9_plat_driver); platform_device_unregister(serial_txx9_plat_devs); for (i = 0; i < UART_NR; i++) { struct uart_txx9_port *up = &serial_txx9_ports[i]; if (up->port.iobase || up->port.mapbase) uart_remove_one_port(&serial_txx9_reg, &up->port); } uart_unregister_driver(&serial_txx9_reg); } module_init(serial_txx9_init); module_exit(serial_txx9_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("TX39/49 serial driver"); MODULE_ALIAS_CHARDEV_MAJOR(TXX9_TTY_MAJOR);
gpl-2.0
shichao-an/linux-2.6.34.7
drivers/ieee1394/csr1212.c
1523
38810
/* * csr1212.c -- IEEE 1212 Control and Status Register support for Linux * * Copyright (C) 2003 Francois Retief <fgretief@sun.ac.za> * Steve Kinneberg <kinnebergsteve@acmsystems.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* TODO List: * - Verify interface consistency: i.e., public functions that take a size * parameter expect size to be in bytes. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/kmemcheck.h> #include <linux/string.h> #include <asm/bug.h> #include <asm/byteorder.h> #include "csr1212.h" /* Permitted key type for each key id */ #define __I (1 << CSR1212_KV_TYPE_IMMEDIATE) #define __C (1 << CSR1212_KV_TYPE_CSR_OFFSET) #define __D (1 << CSR1212_KV_TYPE_DIRECTORY) #define __L (1 << CSR1212_KV_TYPE_LEAF) static const u8 csr1212_key_id_type_map[0x30] = { __C, /* used by Apple iSight */ __D | __L, /* Descriptor */ __I | __D | __L, /* Bus_Dependent_Info */ __I | __D | __L, /* Vendor */ __I, /* Hardware_Version */ 0, 0, /* Reserved */ __D | __L | __I, /* Module */ __I, 0, 0, 0, /* used by Apple iSight, Reserved */ __I, /* Node_Capabilities */ __L, /* EUI_64 */ 0, 0, 0, /* Reserved */ __D, /* Unit */ __I, /* Specifier_ID */ __I, /* Version */ __I | __C | __D | __L, /* Dependent_Info */ __L, /* Unit_Location */ 0, /* Reserved */ __I, /* Model */ __D, /* Instance */ __L, /* Keyword */ __D, /* Feature */ __L, /* Extended_ROM */ __I, /* Extended_Key_Specifier_ID */ __I, /* Extended_Key */ __I | __C | __D | __L, /* Extended_Data */ __L, /* Modifiable_Descriptor */ __I, /* Directory_ID */ __I, /* Revision */ }; #undef __I #undef __C #undef __D #undef __L #define quads_to_bytes(_q) ((_q) * sizeof(u32)) #define bytes_to_quads(_b) DIV_ROUND_UP(_b, sizeof(u32)) static void free_keyval(struct csr1212_keyval *kv) { if ((kv->key.type == CSR1212_KV_TYPE_LEAF) && (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)) CSR1212_FREE(kv->value.leaf.data); CSR1212_FREE(kv); } static u16 csr1212_crc16(const u32 *buffer, size_t length) { int shift; u32 data; u16 sum, crc = 0; for (; length; length--) { data = be32_to_cpu(*buffer); buffer++; for (shift = 28; shift >= 0; shift -= 4 ) { sum = ((crc >> 12) ^ (data >> shift)) & 0xf; crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum); } crc &= 0xffff; } return cpu_to_be16(crc); } /* Microsoft computes the CRC with the bytes in reverse order. */ static u16 csr1212_msft_crc16(const u32 *buffer, size_t length) { int shift; u32 data; u16 sum, crc = 0; for (; length; length--) { data = le32_to_cpu(*buffer); buffer++; for (shift = 28; shift >= 0; shift -= 4 ) { sum = ((crc >> 12) ^ (data >> shift)) & 0xf; crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum); } crc &= 0xffff; } return cpu_to_be16(crc); } static struct csr1212_dentry * csr1212_find_keyval(struct csr1212_keyval *dir, struct csr1212_keyval *kv) { struct csr1212_dentry *pos; for (pos = dir->value.directory.dentries_head; pos != NULL; pos = pos->next) if (pos->kv == kv) return pos; return NULL; } static struct csr1212_keyval * csr1212_find_keyval_offset(struct csr1212_keyval *kv_list, u32 offset) { struct csr1212_keyval *kv; for (kv = kv_list->next; kv && (kv != kv_list); kv = kv->next) if (kv->offset == offset) return kv; return NULL; } /* Creation Routines */ struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops, size_t bus_info_size, void *private) { struct csr1212_csr *csr; csr = CSR1212_MALLOC(sizeof(*csr)); if (!csr) return NULL; csr->cache_head = csr1212_rom_cache_malloc(CSR1212_CONFIG_ROM_SPACE_OFFSET, CSR1212_CONFIG_ROM_SPACE_SIZE); if (!csr->cache_head) { CSR1212_FREE(csr); return NULL; } /* The keyval key id is not used for the root node, but a valid key id * that can be used for a directory needs to be passed to * csr1212_new_directory(). */ csr->root_kv = csr1212_new_directory(CSR1212_KV_ID_VENDOR); if (!csr->root_kv) { CSR1212_FREE(csr->cache_head); CSR1212_FREE(csr); return NULL; } csr->bus_info_data = csr->cache_head->data; csr->bus_info_len = bus_info_size; csr->crc_len = bus_info_size; csr->ops = ops; csr->private = private; csr->cache_tail = csr->cache_head; return csr; } void csr1212_init_local_csr(struct csr1212_csr *csr, const u32 *bus_info_data, int max_rom) { static const int mr_map[] = { 4, 64, 1024, 0 }; BUG_ON(max_rom & ~0x3); csr->max_rom = mr_map[max_rom]; memcpy(csr->bus_info_data, bus_info_data, csr->bus_info_len); } static struct csr1212_keyval *csr1212_new_keyval(u8 type, u8 key) { struct csr1212_keyval *kv; if (key < 0x30 && ((csr1212_key_id_type_map[key] & (1 << type)) == 0)) return NULL; kv = CSR1212_MALLOC(sizeof(*kv)); if (!kv) return NULL; atomic_set(&kv->refcnt, 1); kv->key.type = type; kv->key.id = key; kv->associate = NULL; kv->next = NULL; kv->prev = NULL; kv->offset = 0; kv->valid = 0; return kv; } struct csr1212_keyval *csr1212_new_immediate(u8 key, u32 value) { struct csr1212_keyval *kv; kv = csr1212_new_keyval(CSR1212_KV_TYPE_IMMEDIATE, key); if (!kv) return NULL; kv->value.immediate = value; kv->valid = 1; return kv; } static struct csr1212_keyval * csr1212_new_leaf(u8 key, const void *data, size_t data_len) { struct csr1212_keyval *kv; kv = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, key); if (!kv) return NULL; if (data_len > 0) { kv->value.leaf.data = CSR1212_MALLOC(data_len); if (!kv->value.leaf.data) { CSR1212_FREE(kv); return NULL; } if (data) memcpy(kv->value.leaf.data, data, data_len); } else { kv->value.leaf.data = NULL; } kv->value.leaf.len = bytes_to_quads(data_len); kv->offset = 0; kv->valid = 1; return kv; } static struct csr1212_keyval * csr1212_new_csr_offset(u8 key, u32 csr_offset) { struct csr1212_keyval *kv; kv = csr1212_new_keyval(CSR1212_KV_TYPE_CSR_OFFSET, key); if (!kv) return NULL; kv->value.csr_offset = csr_offset; kv->offset = 0; kv->valid = 1; return kv; } struct csr1212_keyval *csr1212_new_directory(u8 key) { struct csr1212_keyval *kv; kv = csr1212_new_keyval(CSR1212_KV_TYPE_DIRECTORY, key); if (!kv) return NULL; kv->value.directory.len = 0; kv->offset = 0; kv->value.directory.dentries_head = NULL; kv->value.directory.dentries_tail = NULL; kv->valid = 1; return kv; } void csr1212_associate_keyval(struct csr1212_keyval *kv, struct csr1212_keyval *associate) { BUG_ON(!kv || !associate || kv->key.id == CSR1212_KV_ID_DESCRIPTOR || (associate->key.id != CSR1212_KV_ID_DESCRIPTOR && associate->key.id != CSR1212_KV_ID_DEPENDENT_INFO && associate->key.id != CSR1212_KV_ID_EXTENDED_KEY && associate->key.id != CSR1212_KV_ID_EXTENDED_DATA && associate->key.id < 0x30) || (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID && associate->key.id != CSR1212_KV_ID_EXTENDED_KEY) || (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY && associate->key.id != CSR1212_KV_ID_EXTENDED_DATA) || (associate->key.id == CSR1212_KV_ID_EXTENDED_KEY && kv->key.id != CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) || (associate->key.id == CSR1212_KV_ID_EXTENDED_DATA && kv->key.id != CSR1212_KV_ID_EXTENDED_KEY)); if (kv->associate) csr1212_release_keyval(kv->associate); csr1212_keep_keyval(associate); kv->associate = associate; } static int __csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir, struct csr1212_keyval *kv, bool keep_keyval) { struct csr1212_dentry *dentry; BUG_ON(!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY); dentry = CSR1212_MALLOC(sizeof(*dentry)); if (!dentry) return -ENOMEM; if (keep_keyval) csr1212_keep_keyval(kv); dentry->kv = kv; dentry->next = NULL; dentry->prev = dir->value.directory.dentries_tail; if (!dir->value.directory.dentries_head) dir->value.directory.dentries_head = dentry; if (dir->value.directory.dentries_tail) dir->value.directory.dentries_tail->next = dentry; dir->value.directory.dentries_tail = dentry; return CSR1212_SUCCESS; } int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir, struct csr1212_keyval *kv) { return __csr1212_attach_keyval_to_directory(dir, kv, true); } #define CSR1212_DESCRIPTOR_LEAF_DATA(kv) \ (&((kv)->value.leaf.data[1])) #define CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, type) \ ((kv)->value.leaf.data[0] = \ cpu_to_be32(CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) | \ ((type) << CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT))) #define CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, spec_id) \ ((kv)->value.leaf.data[0] = \ cpu_to_be32((CSR1212_DESCRIPTOR_LEAF_TYPE(kv) << \ CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT) | \ ((spec_id) & CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK))) static struct csr1212_keyval * csr1212_new_descriptor_leaf(u8 dtype, u32 specifier_id, const void *data, size_t data_len) { struct csr1212_keyval *kv; kv = csr1212_new_leaf(CSR1212_KV_ID_DESCRIPTOR, NULL, data_len + CSR1212_DESCRIPTOR_LEAF_OVERHEAD); if (!kv) return NULL; kmemcheck_annotate_variable(kv->value.leaf.data[0]); CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype); CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id); if (data) memcpy(CSR1212_DESCRIPTOR_LEAF_DATA(kv), data, data_len); return kv; } /* Check if string conforms to minimal ASCII as per IEEE 1212 clause 7.4 */ static int csr1212_check_minimal_ascii(const char *s) { static const char minimal_ascii_table[] = { /* 1 2 4 8 16 32 64 128 */ 128, /* --, --, --, --, --, --, --, 07, */ 4 + 16 + 32, /* --, --, 0a, --, 0C, 0D, --, --, */ 0, /* --, --, --, --, --, --, --, --, */ 0, /* --, --, --, --, --, --, --, --, */ 255 - 8 - 16, /* 20, 21, 22, --, --, 25, 26, 27, */ 255, /* 28, 29, 2a, 2b, 2c, 2d, 2e, 2f, */ 255, /* 30, 31, 32, 33, 34, 35, 36, 37, */ 255, /* 38, 39, 3a, 3b, 3c, 3d, 3e, 3f, */ 255, /* 40, 41, 42, 43, 44, 45, 46, 47, */ 255, /* 48, 49, 4a, 4b, 4c, 4d, 4e, 4f, */ 255, /* 50, 51, 52, 53, 54, 55, 56, 57, */ 1 + 2 + 4 + 128, /* 58, 59, 5a, --, --, --, --, 5f, */ 255 - 1, /* --, 61, 62, 63, 64, 65, 66, 67, */ 255, /* 68, 69, 6a, 6b, 6c, 6d, 6e, 6f, */ 255, /* 70, 71, 72, 73, 74, 75, 76, 77, */ 1 + 2 + 4, /* 78, 79, 7a, --, --, --, --, --, */ }; int i, j; for (; *s; s++) { i = *s >> 3; /* i = *s / 8; */ j = 1 << (*s & 3); /* j = 1 << (*s % 8); */ if (i >= ARRAY_SIZE(minimal_ascii_table) || !(minimal_ascii_table[i] & j)) return -EINVAL; } return 0; } /* IEEE 1212 clause 7.5.4.1 textual descriptors (English, minimal ASCII) */ struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s) { struct csr1212_keyval *kv; u32 *text; size_t str_len, quads; if (!s || !*s || csr1212_check_minimal_ascii(s)) return NULL; str_len = strlen(s); quads = bytes_to_quads(str_len); kv = csr1212_new_descriptor_leaf(0, 0, NULL, quads_to_bytes(quads) + CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD); if (!kv) return NULL; kv->value.leaf.data[1] = 0; /* width, character_set, language */ text = CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv); text[quads - 1] = 0; /* padding */ memcpy(text, s, str_len); return kv; } /* Destruction Routines */ void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir, struct csr1212_keyval *kv) { struct csr1212_dentry *dentry; if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY) return; dentry = csr1212_find_keyval(dir, kv); if (!dentry) return; if (dentry->prev) dentry->prev->next = dentry->next; if (dentry->next) dentry->next->prev = dentry->prev; if (dir->value.directory.dentries_head == dentry) dir->value.directory.dentries_head = dentry->next; if (dir->value.directory.dentries_tail == dentry) dir->value.directory.dentries_tail = dentry->prev; CSR1212_FREE(dentry); csr1212_release_keyval(kv); } /* This function is used to free the memory taken by a keyval. If the given * keyval is a directory type, then any keyvals contained in that directory * will be destroyed as well if noone holds a reference on them. By means of * list manipulation, this routine will descend a directory structure in a * non-recursive manner. */ void csr1212_release_keyval(struct csr1212_keyval *kv) { struct csr1212_keyval *k, *a; struct csr1212_dentry dentry; struct csr1212_dentry *head, *tail; if (!atomic_dec_and_test(&kv->refcnt)) return; dentry.kv = kv; dentry.next = NULL; dentry.prev = NULL; head = &dentry; tail = head; while (head) { k = head->kv; while (k) { /* must not dec_and_test kv->refcnt again */ if (k != kv && !atomic_dec_and_test(&k->refcnt)) break; a = k->associate; if (k->key.type == CSR1212_KV_TYPE_DIRECTORY) { /* If the current entry is a directory, move all * the entries to the destruction list. */ if (k->value.directory.dentries_head) { tail->next = k->value.directory.dentries_head; k->value.directory.dentries_head->prev = tail; tail = k->value.directory.dentries_tail; } } free_keyval(k); k = a; } head = head->next; if (head) { if (head->prev && head->prev != &dentry) CSR1212_FREE(head->prev); head->prev = NULL; } else if (tail != &dentry) { CSR1212_FREE(tail); } } } void csr1212_destroy_csr(struct csr1212_csr *csr) { struct csr1212_csr_rom_cache *c, *oc; struct csr1212_cache_region *cr, *ocr; csr1212_release_keyval(csr->root_kv); c = csr->cache_head; while (c) { oc = c; cr = c->filled_head; while (cr) { ocr = cr; cr = cr->next; CSR1212_FREE(ocr); } c = c->next; CSR1212_FREE(oc); } CSR1212_FREE(csr); } /* CSR Image Creation */ static int csr1212_append_new_cache(struct csr1212_csr *csr, size_t romsize) { struct csr1212_csr_rom_cache *cache; u64 csr_addr; BUG_ON(!csr || !csr->ops || !csr->ops->allocate_addr_range || !csr->ops->release_addr || csr->max_rom < 1); /* ROM size must be a multiple of csr->max_rom */ romsize = (romsize + (csr->max_rom - 1)) & ~(csr->max_rom - 1); csr_addr = csr->ops->allocate_addr_range(romsize, csr->max_rom, csr->private); if (csr_addr == CSR1212_INVALID_ADDR_SPACE) return -ENOMEM; if (csr_addr < CSR1212_REGISTER_SPACE_BASE) { /* Invalid address returned from allocate_addr_range(). */ csr->ops->release_addr(csr_addr, csr->private); return -ENOMEM; } cache = csr1212_rom_cache_malloc(csr_addr - CSR1212_REGISTER_SPACE_BASE, romsize); if (!cache) { csr->ops->release_addr(csr_addr, csr->private); return -ENOMEM; } cache->ext_rom = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, CSR1212_KV_ID_EXTENDED_ROM); if (!cache->ext_rom) { csr->ops->release_addr(csr_addr, csr->private); CSR1212_FREE(cache); return -ENOMEM; } if (csr1212_attach_keyval_to_directory(csr->root_kv, cache->ext_rom) != CSR1212_SUCCESS) { csr1212_release_keyval(cache->ext_rom); csr->ops->release_addr(csr_addr, csr->private); CSR1212_FREE(cache); return -ENOMEM; } cache->ext_rom->offset = csr_addr - CSR1212_REGISTER_SPACE_BASE; cache->ext_rom->value.leaf.len = -1; cache->ext_rom->value.leaf.data = cache->data; /* Add cache to tail of cache list */ cache->prev = csr->cache_tail; csr->cache_tail->next = cache; csr->cache_tail = cache; return CSR1212_SUCCESS; } static void csr1212_remove_cache(struct csr1212_csr *csr, struct csr1212_csr_rom_cache *cache) { if (csr->cache_head == cache) csr->cache_head = cache->next; if (csr->cache_tail == cache) csr->cache_tail = cache->prev; if (cache->prev) cache->prev->next = cache->next; if (cache->next) cache->next->prev = cache->prev; if (cache->ext_rom) { csr1212_detach_keyval_from_directory(csr->root_kv, cache->ext_rom); csr1212_release_keyval(cache->ext_rom); } CSR1212_FREE(cache); } static int csr1212_generate_layout_subdir(struct csr1212_keyval *dir, struct csr1212_keyval **layout_tail) { struct csr1212_dentry *dentry; struct csr1212_keyval *dkv; struct csr1212_keyval *last_extkey_spec = NULL; struct csr1212_keyval *last_extkey = NULL; int num_entries = 0; for (dentry = dir->value.directory.dentries_head; dentry; dentry = dentry->next) { for (dkv = dentry->kv; dkv; dkv = dkv->associate) { /* Special Case: Extended Key Specifier_ID */ if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) { if (last_extkey_spec == NULL) last_extkey_spec = dkv; else if (dkv->value.immediate != last_extkey_spec->value.immediate) last_extkey_spec = dkv; else continue; /* Special Case: Extended Key */ } else if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY) { if (last_extkey == NULL) last_extkey = dkv; else if (dkv->value.immediate != last_extkey->value.immediate) last_extkey = dkv; else continue; } num_entries += 1; switch (dkv->key.type) { default: case CSR1212_KV_TYPE_IMMEDIATE: case CSR1212_KV_TYPE_CSR_OFFSET: break; case CSR1212_KV_TYPE_LEAF: case CSR1212_KV_TYPE_DIRECTORY: /* Remove from list */ if (dkv->prev && (dkv->prev->next == dkv)) dkv->prev->next = dkv->next; if (dkv->next && (dkv->next->prev == dkv)) dkv->next->prev = dkv->prev; //if (dkv == *layout_tail) // *layout_tail = dkv->prev; /* Special case: Extended ROM leafs */ if (dkv->key.id == CSR1212_KV_ID_EXTENDED_ROM) { dkv->value.leaf.len = -1; /* Don't add Extended ROM leafs in the * layout list, they are handled * differently. */ break; } /* Add to tail of list */ dkv->next = NULL; dkv->prev = *layout_tail; (*layout_tail)->next = dkv; *layout_tail = dkv; break; } } } return num_entries; } static size_t csr1212_generate_layout_order(struct csr1212_keyval *kv) { struct csr1212_keyval *ltail = kv; size_t agg_size = 0; while (kv) { switch (kv->key.type) { case CSR1212_KV_TYPE_LEAF: /* Add 1 quadlet for crc/len field */ agg_size += kv->value.leaf.len + 1; break; case CSR1212_KV_TYPE_DIRECTORY: kv->value.directory.len = csr1212_generate_layout_subdir(kv, &ltail); /* Add 1 quadlet for crc/len field */ agg_size += kv->value.directory.len + 1; break; } kv = kv->next; } return quads_to_bytes(agg_size); } static struct csr1212_keyval * csr1212_generate_positions(struct csr1212_csr_rom_cache *cache, struct csr1212_keyval *start_kv, int start_pos) { struct csr1212_keyval *kv = start_kv; struct csr1212_keyval *okv = start_kv; int pos = start_pos; int kv_len = 0, okv_len = 0; cache->layout_head = kv; while (kv && pos < cache->size) { /* Special case: Extended ROM leafs */ if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) kv->offset = cache->offset + pos; switch (kv->key.type) { case CSR1212_KV_TYPE_LEAF: kv_len = kv->value.leaf.len; break; case CSR1212_KV_TYPE_DIRECTORY: kv_len = kv->value.directory.len; break; default: /* Should never get here */ WARN_ON(1); break; } pos += quads_to_bytes(kv_len + 1); if (pos <= cache->size) { okv = kv; okv_len = kv_len; kv = kv->next; } } cache->layout_tail = okv; cache->len = okv->offset - cache->offset + quads_to_bytes(okv_len + 1); return kv; } #define CSR1212_KV_KEY_SHIFT 24 #define CSR1212_KV_KEY_TYPE_SHIFT 6 #define CSR1212_KV_KEY_ID_MASK 0x3f #define CSR1212_KV_KEY_TYPE_MASK 0x3 /* after shift */ static void csr1212_generate_tree_subdir(struct csr1212_keyval *dir, u32 *data_buffer) { struct csr1212_dentry *dentry; struct csr1212_keyval *last_extkey_spec = NULL; struct csr1212_keyval *last_extkey = NULL; int index = 0; for (dentry = dir->value.directory.dentries_head; dentry; dentry = dentry->next) { struct csr1212_keyval *a; for (a = dentry->kv; a; a = a->associate) { u32 value = 0; /* Special Case: Extended Key Specifier_ID */ if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) { if (last_extkey_spec == NULL) last_extkey_spec = a; else if (a->value.immediate != last_extkey_spec->value.immediate) last_extkey_spec = a; else continue; /* Special Case: Extended Key */ } else if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY) { if (last_extkey == NULL) last_extkey = a; else if (a->value.immediate != last_extkey->value.immediate) last_extkey = a; else continue; } switch (a->key.type) { case CSR1212_KV_TYPE_IMMEDIATE: value = a->value.immediate; break; case CSR1212_KV_TYPE_CSR_OFFSET: value = a->value.csr_offset; break; case CSR1212_KV_TYPE_LEAF: value = a->offset; value -= dir->offset + quads_to_bytes(1+index); value = bytes_to_quads(value); break; case CSR1212_KV_TYPE_DIRECTORY: value = a->offset; value -= dir->offset + quads_to_bytes(1+index); value = bytes_to_quads(value); break; default: /* Should never get here */ WARN_ON(1); break; } value |= (a->key.id & CSR1212_KV_KEY_ID_MASK) << CSR1212_KV_KEY_SHIFT; value |= (a->key.type & CSR1212_KV_KEY_TYPE_MASK) << (CSR1212_KV_KEY_SHIFT + CSR1212_KV_KEY_TYPE_SHIFT); data_buffer[index] = cpu_to_be32(value); index++; } } } struct csr1212_keyval_img { u16 length; u16 crc; /* Must be last */ u32 data[0]; /* older gcc can't handle [] which is standard */ }; static void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache) { struct csr1212_keyval *kv, *nkv; struct csr1212_keyval_img *kvi; for (kv = cache->layout_head; kv != cache->layout_tail->next; kv = nkv) { kvi = (struct csr1212_keyval_img *)(cache->data + bytes_to_quads(kv->offset - cache->offset)); switch (kv->key.type) { default: case CSR1212_KV_TYPE_IMMEDIATE: case CSR1212_KV_TYPE_CSR_OFFSET: /* Should never get here */ WARN_ON(1); break; case CSR1212_KV_TYPE_LEAF: /* Don't copy over Extended ROM areas, they are * already filled out! */ if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) memcpy(kvi->data, kv->value.leaf.data, quads_to_bytes(kv->value.leaf.len)); kvi->length = cpu_to_be16(kv->value.leaf.len); kvi->crc = csr1212_crc16(kvi->data, kv->value.leaf.len); break; case CSR1212_KV_TYPE_DIRECTORY: csr1212_generate_tree_subdir(kv, kvi->data); kvi->length = cpu_to_be16(kv->value.directory.len); kvi->crc = csr1212_crc16(kvi->data, kv->value.directory.len); break; } nkv = kv->next; if (kv->prev) kv->prev->next = NULL; if (kv->next) kv->next->prev = NULL; kv->prev = NULL; kv->next = NULL; } } /* This size is arbitrarily chosen. * The struct overhead is subtracted for more economic allocations. */ #define CSR1212_EXTENDED_ROM_SIZE (2048 - sizeof(struct csr1212_csr_rom_cache)) int csr1212_generate_csr_image(struct csr1212_csr *csr) { struct csr1212_bus_info_block_img *bi; struct csr1212_csr_rom_cache *cache; struct csr1212_keyval *kv; size_t agg_size; int ret; int init_offset; BUG_ON(!csr); cache = csr->cache_head; bi = (struct csr1212_bus_info_block_img*)cache->data; bi->length = bytes_to_quads(csr->bus_info_len) - 1; bi->crc_length = bi->length; bi->crc = csr1212_crc16(bi->data, bi->crc_length); csr->root_kv->next = NULL; csr->root_kv->prev = NULL; agg_size = csr1212_generate_layout_order(csr->root_kv); init_offset = csr->bus_info_len; for (kv = csr->root_kv, cache = csr->cache_head; kv; cache = cache->next) { if (!cache) { /* Estimate approximate number of additional cache * regions needed (it assumes that the cache holding * the first 1K Config ROM space always exists). */ int est_c = agg_size / (CSR1212_EXTENDED_ROM_SIZE - (2 * sizeof(u32))) + 1; /* Add additional cache regions, extras will be * removed later */ for (; est_c; est_c--) { ret = csr1212_append_new_cache(csr, CSR1212_EXTENDED_ROM_SIZE); if (ret != CSR1212_SUCCESS) return ret; } /* Need to re-layout for additional cache regions */ agg_size = csr1212_generate_layout_order(csr->root_kv); kv = csr->root_kv; cache = csr->cache_head; init_offset = csr->bus_info_len; } kv = csr1212_generate_positions(cache, kv, init_offset); agg_size -= cache->len; init_offset = sizeof(u32); } /* Remove unused, excess cache regions */ while (cache) { struct csr1212_csr_rom_cache *oc = cache; cache = cache->next; csr1212_remove_cache(csr, oc); } /* Go through the list backward so that when done, the correct CRC * will be calculated for the Extended ROM areas. */ for (cache = csr->cache_tail; cache; cache = cache->prev) { /* Only Extended ROM caches should have this set. */ if (cache->ext_rom) { int leaf_size; /* Make sure the Extended ROM leaf is a multiple of * max_rom in size. */ BUG_ON(csr->max_rom < 1); leaf_size = (cache->len + (csr->max_rom - 1)) & ~(csr->max_rom - 1); /* Zero out the unused ROM region */ memset(cache->data + bytes_to_quads(cache->len), 0x00, leaf_size - cache->len); /* Subtract leaf header */ leaf_size -= sizeof(u32); /* Update the Extended ROM leaf length */ cache->ext_rom->value.leaf.len = bytes_to_quads(leaf_size); } else { /* Zero out the unused ROM region */ memset(cache->data + bytes_to_quads(cache->len), 0x00, cache->size - cache->len); } /* Copy the data into the cache buffer */ csr1212_fill_cache(cache); if (cache != csr->cache_head) { /* Set the length and CRC of the extended ROM. */ struct csr1212_keyval_img *kvi = (struct csr1212_keyval_img*)cache->data; u16 len = bytes_to_quads(cache->len) - 1; kvi->length = cpu_to_be16(len); kvi->crc = csr1212_crc16(kvi->data, len); } } return CSR1212_SUCCESS; } int csr1212_read(struct csr1212_csr *csr, u32 offset, void *buffer, u32 len) { struct csr1212_csr_rom_cache *cache; for (cache = csr->cache_head; cache; cache = cache->next) if (offset >= cache->offset && (offset + len) <= (cache->offset + cache->size)) { memcpy(buffer, &cache->data[ bytes_to_quads(offset - cache->offset)], len); return CSR1212_SUCCESS; } return -ENOENT; } /* * Apparently there are many different wrong implementations of the CRC * algorithm. We don't fail, we just warn... approximately once per GUID. */ static void csr1212_check_crc(const u32 *buffer, size_t length, u16 crc, __be32 *guid) { static u64 last_bad_eui64; u64 eui64 = ((u64)be32_to_cpu(guid[0]) << 32) | be32_to_cpu(guid[1]); if (csr1212_crc16(buffer, length) == crc || csr1212_msft_crc16(buffer, length) == crc || eui64 == last_bad_eui64) return; printk(KERN_DEBUG "ieee1394: config ROM CRC error\n"); last_bad_eui64 = eui64; } /* Parse a chunk of data as a Config ROM */ static int csr1212_parse_bus_info_block(struct csr1212_csr *csr) { struct csr1212_bus_info_block_img *bi; struct csr1212_cache_region *cr; int i; int ret; for (i = 0; i < csr->bus_info_len; i += sizeof(u32)) { ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i, &csr->cache_head->data[bytes_to_quads(i)], csr->private); if (ret != CSR1212_SUCCESS) return ret; /* check ROM header's info_length */ if (i == 0 && be32_to_cpu(csr->cache_head->data[0]) >> 24 != bytes_to_quads(csr->bus_info_len) - 1) return -EINVAL; } bi = (struct csr1212_bus_info_block_img*)csr->cache_head->data; csr->crc_len = quads_to_bytes(bi->crc_length); /* IEEE 1212 recommends that crc_len be equal to bus_info_len, but that * is not always the case, so read the rest of the crc area 1 quadlet at * a time. */ for (i = csr->bus_info_len; i <= csr->crc_len; i += sizeof(u32)) { ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i, &csr->cache_head->data[bytes_to_quads(i)], csr->private); if (ret != CSR1212_SUCCESS) return ret; } csr1212_check_crc(bi->data, bi->crc_length, bi->crc, &csr->bus_info_data[3]); cr = CSR1212_MALLOC(sizeof(*cr)); if (!cr) return -ENOMEM; cr->next = NULL; cr->prev = NULL; cr->offset_start = 0; cr->offset_end = csr->crc_len + 4; csr->cache_head->filled_head = cr; csr->cache_head->filled_tail = cr; return CSR1212_SUCCESS; } #define CSR1212_KV_KEY(q) (be32_to_cpu(q) >> CSR1212_KV_KEY_SHIFT) #define CSR1212_KV_KEY_TYPE(q) (CSR1212_KV_KEY(q) >> CSR1212_KV_KEY_TYPE_SHIFT) #define CSR1212_KV_KEY_ID(q) (CSR1212_KV_KEY(q) & CSR1212_KV_KEY_ID_MASK) #define CSR1212_KV_VAL_MASK 0xffffff #define CSR1212_KV_VAL(q) (be32_to_cpu(q) & CSR1212_KV_VAL_MASK) static int csr1212_parse_dir_entry(struct csr1212_keyval *dir, u32 ki, u32 kv_pos) { int ret = CSR1212_SUCCESS; struct csr1212_keyval *k = NULL; u32 offset; bool keep_keyval = true; switch (CSR1212_KV_KEY_TYPE(ki)) { case CSR1212_KV_TYPE_IMMEDIATE: k = csr1212_new_immediate(CSR1212_KV_KEY_ID(ki), CSR1212_KV_VAL(ki)); if (!k) { ret = -ENOMEM; goto out; } /* Don't keep local reference when parsing. */ keep_keyval = false; break; case CSR1212_KV_TYPE_CSR_OFFSET: k = csr1212_new_csr_offset(CSR1212_KV_KEY_ID(ki), CSR1212_KV_VAL(ki)); if (!k) { ret = -ENOMEM; goto out; } /* Don't keep local reference when parsing. */ keep_keyval = false; break; default: /* Compute the offset from 0xffff f000 0000. */ offset = quads_to_bytes(CSR1212_KV_VAL(ki)) + kv_pos; if (offset == kv_pos) { /* Uh-oh. Can't have a relative offset of 0 for Leaves * or Directories. The Config ROM image is most likely * messed up, so we'll just abort here. */ ret = -EIO; goto out; } k = csr1212_find_keyval_offset(dir, offset); if (k) break; /* Found it. */ if (CSR1212_KV_KEY_TYPE(ki) == CSR1212_KV_TYPE_DIRECTORY) k = csr1212_new_directory(CSR1212_KV_KEY_ID(ki)); else k = csr1212_new_leaf(CSR1212_KV_KEY_ID(ki), NULL, 0); if (!k) { ret = -ENOMEM; goto out; } /* Don't keep local reference when parsing. */ keep_keyval = false; /* Contents not read yet so it's not valid. */ k->valid = 0; k->offset = offset; k->prev = dir; k->next = dir->next; dir->next->prev = k; dir->next = k; } ret = __csr1212_attach_keyval_to_directory(dir, k, keep_keyval); out: if (ret != CSR1212_SUCCESS && k != NULL) free_keyval(k); return ret; } int csr1212_parse_keyval(struct csr1212_keyval *kv, struct csr1212_csr_rom_cache *cache) { struct csr1212_keyval_img *kvi; int i; int ret = CSR1212_SUCCESS; int kvi_len; kvi = (struct csr1212_keyval_img*) &cache->data[bytes_to_quads(kv->offset - cache->offset)]; kvi_len = be16_to_cpu(kvi->length); /* GUID is wrong in here in case of extended ROM. We don't care. */ csr1212_check_crc(kvi->data, kvi_len, kvi->crc, &cache->data[3]); switch (kv->key.type) { case CSR1212_KV_TYPE_DIRECTORY: for (i = 0; i < kvi_len; i++) { u32 ki = kvi->data[i]; /* Some devices put null entries in their unit * directories. If we come across such an entry, * then skip it. */ if (ki == 0x0) continue; ret = csr1212_parse_dir_entry(kv, ki, kv->offset + quads_to_bytes(i + 1)); } kv->value.directory.len = kvi_len; break; case CSR1212_KV_TYPE_LEAF: if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) { size_t size = quads_to_bytes(kvi_len); kv->value.leaf.data = CSR1212_MALLOC(size); if (!kv->value.leaf.data) { ret = -ENOMEM; goto out; } kv->value.leaf.len = kvi_len; memcpy(kv->value.leaf.data, kvi->data, size); } break; } kv->valid = 1; out: return ret; } static int csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv) { struct csr1212_cache_region *cr, *ncr, *newcr = NULL; struct csr1212_keyval_img *kvi = NULL; struct csr1212_csr_rom_cache *cache; int cache_index; u64 addr; u32 *cache_ptr; u16 kv_len = 0; BUG_ON(!csr || !kv || csr->max_rom < 1); /* First find which cache the data should be in (or go in if not read * yet). */ for (cache = csr->cache_head; cache; cache = cache->next) if (kv->offset >= cache->offset && kv->offset < (cache->offset + cache->size)) break; if (!cache) { u32 q, cache_size; /* Only create a new cache for Extended ROM leaves. */ if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) return -EINVAL; if (csr->ops->bus_read(csr, CSR1212_REGISTER_SPACE_BASE + kv->offset, &q, csr->private)) return -EIO; kv->value.leaf.len = be32_to_cpu(q) >> 16; cache_size = (quads_to_bytes(kv->value.leaf.len + 1) + (csr->max_rom - 1)) & ~(csr->max_rom - 1); cache = csr1212_rom_cache_malloc(kv->offset, cache_size); if (!cache) return -ENOMEM; kv->value.leaf.data = &cache->data[1]; csr->cache_tail->next = cache; cache->prev = csr->cache_tail; cache->next = NULL; csr->cache_tail = cache; cache->filled_head = CSR1212_MALLOC(sizeof(*cache->filled_head)); if (!cache->filled_head) return -ENOMEM; cache->filled_head->offset_start = 0; cache->filled_head->offset_end = sizeof(u32); cache->filled_tail = cache->filled_head; cache->filled_head->next = NULL; cache->filled_head->prev = NULL; cache->data[0] = q; /* Don't read the entire extended ROM now. Pieces of it will * be read when entries inside it are read. */ return csr1212_parse_keyval(kv, cache); } cache_index = kv->offset - cache->offset; /* Now seach read portions of the cache to see if it is there. */ for (cr = cache->filled_head; cr; cr = cr->next) { if (cache_index < cr->offset_start) { newcr = CSR1212_MALLOC(sizeof(*newcr)); if (!newcr) return -ENOMEM; newcr->offset_start = cache_index & ~(csr->max_rom - 1); newcr->offset_end = newcr->offset_start; newcr->next = cr; newcr->prev = cr->prev; cr->prev = newcr; cr = newcr; break; } else if ((cache_index >= cr->offset_start) && (cache_index < cr->offset_end)) { kvi = (struct csr1212_keyval_img*) (&cache->data[bytes_to_quads(cache_index)]); kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1); break; } else if (cache_index == cr->offset_end) { break; } } if (!cr) { cr = cache->filled_tail; newcr = CSR1212_MALLOC(sizeof(*newcr)); if (!newcr) return -ENOMEM; newcr->offset_start = cache_index & ~(csr->max_rom - 1); newcr->offset_end = newcr->offset_start; newcr->prev = cr; newcr->next = cr->next; cr->next = newcr; cr = newcr; cache->filled_tail = newcr; } while(!kvi || cr->offset_end < cache_index + kv_len) { cache_ptr = &cache->data[bytes_to_quads(cr->offset_end & ~(csr->max_rom - 1))]; addr = (CSR1212_CSR_ARCH_REG_SPACE_BASE + cache->offset + cr->offset_end) & ~(csr->max_rom - 1); if (csr->ops->bus_read(csr, addr, cache_ptr, csr->private)) return -EIO; cr->offset_end += csr->max_rom - (cr->offset_end & (csr->max_rom - 1)); if (!kvi && (cr->offset_end > cache_index)) { kvi = (struct csr1212_keyval_img*) (&cache->data[bytes_to_quads(cache_index)]); kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1); } if ((kv_len + (kv->offset - cache->offset)) > cache->size) { /* The Leaf or Directory claims its length extends * beyond the ConfigROM image region and thus beyond the * end of our cache region. Therefore, we abort now * rather than seg faulting later. */ return -EIO; } ncr = cr->next; if (ncr && (cr->offset_end >= ncr->offset_start)) { /* consolidate region entries */ ncr->offset_start = cr->offset_start; if (cr->prev) cr->prev->next = cr->next; ncr->prev = cr->prev; if (cache->filled_head == cr) cache->filled_head = ncr; CSR1212_FREE(cr); cr = ncr; } } return csr1212_parse_keyval(kv, cache); } struct csr1212_keyval * csr1212_get_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv) { if (!kv) return NULL; if (!kv->valid) if (csr1212_read_keyval(csr, kv) != CSR1212_SUCCESS) return NULL; return kv; } int csr1212_parse_csr(struct csr1212_csr *csr) { struct csr1212_dentry *dentry; int ret; BUG_ON(!csr || !csr->ops || !csr->ops->bus_read); ret = csr1212_parse_bus_info_block(csr); if (ret != CSR1212_SUCCESS) return ret; /* * There has been a buggy firmware with bus_info_block.max_rom > 0 * spotted which actually only supported quadlet read requests to the * config ROM. Therefore read everything quadlet by quadlet regardless * of what the bus info block says. */ csr->max_rom = 4; csr->cache_head->layout_head = csr->root_kv; csr->cache_head->layout_tail = csr->root_kv; csr->root_kv->offset = (CSR1212_CONFIG_ROM_SPACE_BASE & 0xffff) + csr->bus_info_len; csr->root_kv->valid = 0; csr->root_kv->next = csr->root_kv; csr->root_kv->prev = csr->root_kv; ret = csr1212_read_keyval(csr, csr->root_kv); if (ret != CSR1212_SUCCESS) return ret; /* Scan through the Root directory finding all extended ROM regions * and make cache regions for them */ for (dentry = csr->root_kv->value.directory.dentries_head; dentry; dentry = dentry->next) { if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM && !dentry->kv->valid) { ret = csr1212_read_keyval(csr, dentry->kv); if (ret != CSR1212_SUCCESS) return ret; } } return CSR1212_SUCCESS; }
gpl-2.0
Mustaavalkosta/htc7x30-3.0
sound/soc/fsl/mpc5200_dma.c
1523
14989
/* * Freescale MPC5200 PSC DMA * ALSA SoC Platform driver * * Copyright (C) 2008 Secret Lab Technologies Ltd. * Copyright (C) 2009 Jon Smirl, Digispeaker */ #include <linux/module.h> #include <linux/of_device.h> #include <linux/slab.h> #include <linux/of_platform.h> #include <sound/soc.h> #include <sysdev/bestcomm/bestcomm.h> #include <sysdev/bestcomm/gen_bd.h> #include <asm/mpc52xx_psc.h> #include "mpc5200_dma.h" /* * Interrupt handlers */ static irqreturn_t psc_dma_status_irq(int irq, void *_psc_dma) { struct psc_dma *psc_dma = _psc_dma; struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs; u16 isr; isr = in_be16(&regs->mpc52xx_psc_isr); /* Playback underrun error */ if (psc_dma->playback.active && (isr & MPC52xx_PSC_IMR_TXEMP)) psc_dma->stats.underrun_count++; /* Capture overrun error */ if (psc_dma->capture.active && (isr & MPC52xx_PSC_IMR_ORERR)) psc_dma->stats.overrun_count++; out_8(&regs->command, MPC52xx_PSC_RST_ERR_STAT); return IRQ_HANDLED; } /** * psc_dma_bcom_enqueue_next_buffer - Enqueue another audio buffer * @s: pointer to stream private data structure * * Enqueues another audio period buffer into the bestcomm queue. * * Note: The routine must only be called when there is space available in * the queue. Otherwise the enqueue will fail and the audio ring buffer * will get out of sync */ static void psc_dma_bcom_enqueue_next_buffer(struct psc_dma_stream *s) { struct bcom_bd *bd; /* Prepare and enqueue the next buffer descriptor */ bd = bcom_prepare_next_buffer(s->bcom_task); bd->status = s->period_bytes; bd->data[0] = s->runtime->dma_addr + (s->period_next * s->period_bytes); bcom_submit_next_buffer(s->bcom_task, NULL); /* Update for next period */ s->period_next = (s->period_next + 1) % s->runtime->periods; } /* Bestcomm DMA irq handler */ static irqreturn_t psc_dma_bcom_irq(int irq, void *_psc_dma_stream) { struct psc_dma_stream *s = _psc_dma_stream; spin_lock(&s->psc_dma->lock); /* For each finished period, dequeue the completed period buffer * and enqueue a new one in it's place. */ while (bcom_buffer_done(s->bcom_task)) { bcom_retrieve_buffer(s->bcom_task, NULL, NULL); s->period_current = (s->period_current+1) % s->runtime->periods; s->period_count++; psc_dma_bcom_enqueue_next_buffer(s); } spin_unlock(&s->psc_dma->lock); /* If the stream is active, then also inform the PCM middle layer * of the period finished event. */ if (s->active) snd_pcm_period_elapsed(s->stream); return IRQ_HANDLED; } static int psc_dma_hw_free(struct snd_pcm_substream *substream) { snd_pcm_set_runtime_buffer(substream, NULL); return 0; } /** * psc_dma_trigger: start and stop the DMA transfer. * * This function is called by ALSA to start, stop, pause, and resume the DMA * transfer of data. */ static int psc_dma_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai); struct snd_pcm_runtime *runtime = substream->runtime; struct psc_dma_stream *s = to_psc_dma_stream(substream, psc_dma); struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs; u16 imr; unsigned long flags; int i; switch (cmd) { case SNDRV_PCM_TRIGGER_START: dev_dbg(psc_dma->dev, "START: stream=%i fbits=%u ps=%u #p=%u\n", substream->pstr->stream, runtime->frame_bits, (int)runtime->period_size, runtime->periods); s->period_bytes = frames_to_bytes(runtime, runtime->period_size); s->period_next = 0; s->period_current = 0; s->active = 1; s->period_count = 0; s->runtime = runtime; /* Fill up the bestcomm bd queue and enable DMA. * This will begin filling the PSC's fifo. */ spin_lock_irqsave(&psc_dma->lock, flags); if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) bcom_gen_bd_rx_reset(s->bcom_task); else bcom_gen_bd_tx_reset(s->bcom_task); for (i = 0; i < runtime->periods; i++) if (!bcom_queue_full(s->bcom_task)) psc_dma_bcom_enqueue_next_buffer(s); bcom_enable(s->bcom_task); spin_unlock_irqrestore(&psc_dma->lock, flags); out_8(&regs->command, MPC52xx_PSC_RST_ERR_STAT); break; case SNDRV_PCM_TRIGGER_STOP: dev_dbg(psc_dma->dev, "STOP: stream=%i periods_count=%i\n", substream->pstr->stream, s->period_count); s->active = 0; spin_lock_irqsave(&psc_dma->lock, flags); bcom_disable(s->bcom_task); if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) bcom_gen_bd_rx_reset(s->bcom_task); else bcom_gen_bd_tx_reset(s->bcom_task); spin_unlock_irqrestore(&psc_dma->lock, flags); break; default: dev_dbg(psc_dma->dev, "unhandled trigger: stream=%i cmd=%i\n", substream->pstr->stream, cmd); return -EINVAL; } /* Update interrupt enable settings */ imr = 0; if (psc_dma->playback.active) imr |= MPC52xx_PSC_IMR_TXEMP; if (psc_dma->capture.active) imr |= MPC52xx_PSC_IMR_ORERR; out_be16(&regs->isr_imr.imr, psc_dma->imr | imr); return 0; } /* --------------------------------------------------------------------- * The PSC DMA 'ASoC platform' driver * * Can be referenced by an 'ASoC machine' driver * This driver only deals with the audio bus; it doesn't have any * interaction with the attached codec */ static const struct snd_pcm_hardware psc_dma_hardware = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_BATCH, .formats = SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_S24_BE | SNDRV_PCM_FMTBIT_S32_BE, .rate_min = 8000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .period_bytes_max = 1024 * 1024, .period_bytes_min = 32, .periods_min = 2, .periods_max = 256, .buffer_bytes_max = 2 * 1024 * 1024, .fifo_size = 512, }; static int psc_dma_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai); struct psc_dma_stream *s; int rc; dev_dbg(psc_dma->dev, "psc_dma_open(substream=%p)\n", substream); if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) s = &psc_dma->capture; else s = &psc_dma->playback; snd_soc_set_runtime_hwparams(substream, &psc_dma_hardware); rc = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (rc < 0) { dev_err(substream->pcm->card->dev, "invalid buffer size\n"); return rc; } s->stream = substream; return 0; } static int psc_dma_close(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai); struct psc_dma_stream *s; dev_dbg(psc_dma->dev, "psc_dma_close(substream=%p)\n", substream); if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) s = &psc_dma->capture; else s = &psc_dma->playback; if (!psc_dma->playback.active && !psc_dma->capture.active) { /* Disable all interrupts and reset the PSC */ out_be16(&psc_dma->psc_regs->isr_imr.imr, psc_dma->imr); out_8(&psc_dma->psc_regs->command, 4 << 4); /* reset error */ } s->stream = NULL; return 0; } static snd_pcm_uframes_t psc_dma_pointer(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai); struct psc_dma_stream *s; dma_addr_t count; if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) s = &psc_dma->capture; else s = &psc_dma->playback; count = s->period_current * s->period_bytes; return bytes_to_frames(substream->runtime, count); } static int psc_dma_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); return 0; } static struct snd_pcm_ops psc_dma_ops = { .open = psc_dma_open, .close = psc_dma_close, .hw_free = psc_dma_hw_free, .ioctl = snd_pcm_lib_ioctl, .pointer = psc_dma_pointer, .trigger = psc_dma_trigger, .hw_params = psc_dma_hw_params, }; static u64 psc_dma_dmamask = 0xffffffff; static int psc_dma_new(struct snd_card *card, struct snd_soc_dai *dai, struct snd_pcm *pcm) { struct snd_soc_pcm_runtime *rtd = pcm->private_data; struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai); size_t size = psc_dma_hardware.buffer_bytes_max; int rc = 0; dev_dbg(rtd->platform->dev, "psc_dma_new(card=%p, dai=%p, pcm=%p)\n", card, dai, pcm); if (!card->dev->dma_mask) card->dev->dma_mask = &psc_dma_dmamask; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = 0xffffffff; if (pcm->streams[0].substream) { rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev, size, &pcm->streams[0].substream->dma_buffer); if (rc) goto playback_alloc_err; } if (pcm->streams[1].substream) { rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev, size, &pcm->streams[1].substream->dma_buffer); if (rc) goto capture_alloc_err; } if (rtd->codec->ac97) rtd->codec->ac97->private_data = psc_dma; return 0; capture_alloc_err: if (pcm->streams[0].substream) snd_dma_free_pages(&pcm->streams[0].substream->dma_buffer); playback_alloc_err: dev_err(card->dev, "Cannot allocate buffer(s)\n"); return -ENOMEM; } static void psc_dma_free(struct snd_pcm *pcm) { struct snd_soc_pcm_runtime *rtd = pcm->private_data; struct snd_pcm_substream *substream; int stream; dev_dbg(rtd->platform->dev, "psc_dma_free(pcm=%p)\n", pcm); for (stream = 0; stream < 2; stream++) { substream = pcm->streams[stream].substream; if (substream) { snd_dma_free_pages(&substream->dma_buffer); substream->dma_buffer.area = NULL; substream->dma_buffer.addr = 0; } } } static struct snd_soc_platform_driver mpc5200_audio_dma_platform = { .ops = &psc_dma_ops, .pcm_new = &psc_dma_new, .pcm_free = &psc_dma_free, }; static int mpc5200_hpcd_probe(struct platform_device *op) { phys_addr_t fifo; struct psc_dma *psc_dma; struct resource res; int size, irq, rc; const __be32 *prop; void __iomem *regs; int ret; /* Fetch the registers and IRQ of the PSC */ irq = irq_of_parse_and_map(op->dev.of_node, 0); if (of_address_to_resource(op->dev.of_node, 0, &res)) { dev_err(&op->dev, "Missing reg property\n"); return -ENODEV; } regs = ioremap(res.start, 1 + res.end - res.start); if (!regs) { dev_err(&op->dev, "Could not map registers\n"); return -ENODEV; } /* Allocate and initialize the driver private data */ psc_dma = kzalloc(sizeof *psc_dma, GFP_KERNEL); if (!psc_dma) { ret = -ENOMEM; goto out_unmap; } /* Get the PSC ID */ prop = of_get_property(op->dev.of_node, "cell-index", &size); if (!prop || size < sizeof *prop) { ret = -ENODEV; goto out_free; } spin_lock_init(&psc_dma->lock); mutex_init(&psc_dma->mutex); psc_dma->id = be32_to_cpu(*prop); psc_dma->irq = irq; psc_dma->psc_regs = regs; psc_dma->fifo_regs = regs + sizeof *psc_dma->psc_regs; psc_dma->dev = &op->dev; psc_dma->playback.psc_dma = psc_dma; psc_dma->capture.psc_dma = psc_dma; snprintf(psc_dma->name, sizeof psc_dma->name, "PSC%u", psc_dma->id); /* Find the address of the fifo data registers and setup the * DMA tasks */ fifo = res.start + offsetof(struct mpc52xx_psc, buffer.buffer_32); psc_dma->capture.bcom_task = bcom_psc_gen_bd_rx_init(psc_dma->id, 10, fifo, 512); psc_dma->playback.bcom_task = bcom_psc_gen_bd_tx_init(psc_dma->id, 10, fifo); if (!psc_dma->capture.bcom_task || !psc_dma->playback.bcom_task) { dev_err(&op->dev, "Could not allocate bestcomm tasks\n"); ret = -ENODEV; goto out_free; } /* Disable all interrupts and reset the PSC */ out_be16(&psc_dma->psc_regs->isr_imr.imr, psc_dma->imr); /* reset receiver */ out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_RST_RX); /* reset transmitter */ out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_RST_TX); /* reset error */ out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_RST_ERR_STAT); /* reset mode */ out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_SEL_MODE_REG_1); /* Set up mode register; * First write: RxRdy (FIFO Alarm) generates rx FIFO irq * Second write: register Normal mode for non loopback */ out_8(&psc_dma->psc_regs->mode, 0); out_8(&psc_dma->psc_regs->mode, 0); /* Set the TX and RX fifo alarm thresholds */ out_be16(&psc_dma->fifo_regs->rfalarm, 0x100); out_8(&psc_dma->fifo_regs->rfcntl, 0x4); out_be16(&psc_dma->fifo_regs->tfalarm, 0x100); out_8(&psc_dma->fifo_regs->tfcntl, 0x7); /* Lookup the IRQ numbers */ psc_dma->playback.irq = bcom_get_task_irq(psc_dma->playback.bcom_task); psc_dma->capture.irq = bcom_get_task_irq(psc_dma->capture.bcom_task); rc = request_irq(psc_dma->irq, &psc_dma_status_irq, IRQF_SHARED, "psc-dma-status", psc_dma); rc |= request_irq(psc_dma->capture.irq, &psc_dma_bcom_irq, IRQF_SHARED, "psc-dma-capture", &psc_dma->capture); rc |= request_irq(psc_dma->playback.irq, &psc_dma_bcom_irq, IRQF_SHARED, "psc-dma-playback", &psc_dma->playback); if (rc) { ret = -ENODEV; goto out_irq; } /* Save what we've done so it can be found again later */ dev_set_drvdata(&op->dev, psc_dma); /* Tell the ASoC OF helpers about it */ return snd_soc_register_platform(&op->dev, &mpc5200_audio_dma_platform); out_irq: free_irq(psc_dma->irq, psc_dma); free_irq(psc_dma->capture.irq, &psc_dma->capture); free_irq(psc_dma->playback.irq, &psc_dma->playback); out_free: kfree(psc_dma); out_unmap: iounmap(regs); return ret; } static int mpc5200_hpcd_remove(struct platform_device *op) { struct psc_dma *psc_dma = dev_get_drvdata(&op->dev); dev_dbg(&op->dev, "mpc5200_audio_dma_destroy()\n"); snd_soc_unregister_platform(&op->dev); bcom_gen_bd_rx_release(psc_dma->capture.bcom_task); bcom_gen_bd_tx_release(psc_dma->playback.bcom_task); /* Release irqs */ free_irq(psc_dma->irq, psc_dma); free_irq(psc_dma->capture.irq, &psc_dma->capture); free_irq(psc_dma->playback.irq, &psc_dma->playback); iounmap(psc_dma->psc_regs); kfree(psc_dma); dev_set_drvdata(&op->dev, NULL); return 0; } static struct of_device_id mpc5200_hpcd_match[] = { { .compatible = "fsl,mpc5200-pcm", }, {} }; MODULE_DEVICE_TABLE(of, mpc5200_hpcd_match); static struct platform_driver mpc5200_hpcd_of_driver = { .probe = mpc5200_hpcd_probe, .remove = mpc5200_hpcd_remove, .driver = { .owner = THIS_MODULE, .name = "mpc5200-pcm-audio", .of_match_table = mpc5200_hpcd_match, } }; static int __init mpc5200_hpcd_init(void) { return platform_driver_register(&mpc5200_hpcd_of_driver); } module_init(mpc5200_hpcd_init); static void __exit mpc5200_hpcd_exit(void) { platform_driver_unregister(&mpc5200_hpcd_of_driver); } module_exit(mpc5200_hpcd_exit); MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); MODULE_DESCRIPTION("Freescale MPC5200 PSC in DMA mode ASoC Driver"); MODULE_LICENSE("GPL");
gpl-2.0
dh-harald/android_kernel_samsung_codina
scripts/kconfig/util.c
2803
2844
/* * Copyright (C) 2002-2005 Roman Zippel <zippel@linux-m68k.org> * Copyright (C) 2002-2005 Sam Ravnborg <sam@ravnborg.org> * * Released under the terms of the GNU GPL v2.0. */ #include <string.h> #include "lkc.h" /* file already present in list? If not add it */ struct file *file_lookup(const char *name) { struct file *file; const char *file_name = sym_expand_string_value(name); for (file = file_list; file; file = file->next) { if (!strcmp(name, file->name)) { free((void *)file_name); return file; } } file = malloc(sizeof(*file)); memset(file, 0, sizeof(*file)); file->name = file_name; file->next = file_list; file_list = file; return file; } /* write a dependency file as used by kbuild to track dependencies */ int file_write_dep(const char *name) { struct symbol *sym, *env_sym; struct expr *e; struct file *file; FILE *out; if (!name) name = ".kconfig.d"; out = fopen("..config.tmp", "w"); if (!out) return 1; fprintf(out, "deps_config := \\\n"); for (file = file_list; file; file = file->next) { if (file->next) fprintf(out, "\t%s \\\n", file->name); else fprintf(out, "\t%s\n", file->name); } fprintf(out, "\n%s: \\\n" "\t$(deps_config)\n\n", conf_get_autoconfig_name()); expr_list_for_each_sym(sym_env_list, e, sym) { struct property *prop; const char *value; prop = sym_get_env_prop(sym); env_sym = prop_get_symbol(prop); if (!env_sym) continue; value = getenv(env_sym->name); if (!value) value = ""; fprintf(out, "ifneq \"$(%s)\" \"%s\"\n", env_sym->name, value); fprintf(out, "%s: FORCE\n", conf_get_autoconfig_name()); fprintf(out, "endif\n"); } fprintf(out, "\n$(deps_config): ;\n"); fclose(out); rename("..config.tmp", name); return 0; } /* Allocate initial growable string */ struct gstr str_new(void) { struct gstr gs; gs.s = malloc(sizeof(char) * 64); gs.len = 64; gs.max_width = 0; strcpy(gs.s, "\0"); return gs; } /* Allocate and assign growable string */ struct gstr str_assign(const char *s) { struct gstr gs; gs.s = strdup(s); gs.len = strlen(s) + 1; gs.max_width = 0; return gs; } /* Free storage for growable string */ void str_free(struct gstr *gs) { if (gs->s) free(gs->s); gs->s = NULL; gs->len = 0; } /* Append to growable string */ void str_append(struct gstr *gs, const char *s) { size_t l; if (s) { l = strlen(gs->s) + strlen(s) + 1; if (l > gs->len) { gs->s = realloc(gs->s, l); gs->len = l; } strcat(gs->s, s); } } /* Append printf formatted string to growable string */ void str_printf(struct gstr *gs, const char *fmt, ...) { va_list ap; char s[10000]; /* big enough... */ va_start(ap, fmt); vsnprintf(s, sizeof(s), fmt, ap); str_append(gs, s); va_end(ap); } /* Retrieve value of growable string */ const char *str_get(struct gstr *gs) { return gs->s; }
gpl-2.0
CyanideL/android_kernel_samsung_jf
arch/arm/mach-kirkwood/cpuidle.c
4851
1922
/* * arch/arm/mach-kirkwood/cpuidle.c * * CPU idle Marvell Kirkwood SoCs * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. * * The cpu idle uses wait-for-interrupt and DDR self refresh in order * to implement two idle states - * #1 wait-for-interrupt * #2 wait-for-interrupt and DDR self refresh */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/cpuidle.h> #include <linux/io.h> #include <linux/export.h> #include <asm/proc-fns.h> #include <asm/cpuidle.h> #include <mach/kirkwood.h> #define KIRKWOOD_MAX_STATES 2 /* Actual code that puts the SoC in different idle states */ static int kirkwood_enter_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { writel(0x7, DDR_OPERATION_BASE); cpu_do_idle(); return index; } static struct cpuidle_driver kirkwood_idle_driver = { .name = "kirkwood_idle", .owner = THIS_MODULE, .en_core_tk_irqen = 1, .states[0] = ARM_CPUIDLE_WFI_STATE, .states[1] = { .enter = kirkwood_enter_idle, .exit_latency = 10, .target_residency = 100000, .flags = CPUIDLE_FLAG_TIME_VALID, .name = "DDR SR", .desc = "WFI and DDR Self Refresh", }, .state_count = KIRKWOOD_MAX_STATES, }; static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device); /* Initialize CPU idle by registering the idle states */ static int kirkwood_init_cpuidle(void) { struct cpuidle_device *device; device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id()); device->state_count = KIRKWOOD_MAX_STATES; cpuidle_register_driver(&kirkwood_idle_driver); if (cpuidle_register_device(device)) { printk(KERN_ERR "kirkwood_init_cpuidle: Failed registering\n"); return -EIO; } return 0; } device_initcall(kirkwood_init_cpuidle);
gpl-2.0
civato/SphereEngine-Note8.0
drivers/pnp/pnpbios/core.c
4851
14510
/* * pnpbios -- PnP BIOS driver * * This driver provides access to Plug-'n'-Play services provided by * the PnP BIOS firmware, described in the following documents: * Plug and Play BIOS Specification, Version 1.0A, 5 May 1994 * Plug and Play BIOS Clarification Paper, 6 October 1994 * Compaq Computer Corporation, Phoenix Technologies Ltd., Intel Corp. * * Originally (C) 1998 Christian Schmidt <schmidt@digadd.de> * Modifications (C) 1998 Tom Lees <tom@lpsg.demon.co.uk> * Minor reorganizations by David Hinds <dahinds@users.sourceforge.net> * Further modifications (C) 2001, 2002 by: * Alan Cox <alan@redhat.com> * Thomas Hood * Brian Gerst <bgerst@didntduck.org> * * Ported to the PnP Layer and several additional improvements (C) 2002 * by Adam Belay <ambx1@neo.rr.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* Change Log * * Adam Belay - <ambx1@neo.rr.com> - March 16, 2003 * rev 1.01 Only call pnp_bios_dev_node_info once * Added pnpbios_print_status * Added several new error messages and info messages * Added pnpbios_interface_attach_device * integrated core and proc init system * Introduced PNPMODE flags * Removed some useless includes */ #include <linux/types.h> #include <linux/module.h> #include <linux/init.h> #include <linux/linkage.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/pnp.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/slab.h> #include <linux/completion.h> #include <linux/spinlock.h> #include <linux/dmi.h> #include <linux/delay.h> #include <linux/acpi.h> #include <linux/freezer.h> #include <linux/kthread.h> #include <asm/page.h> #include <asm/desc.h> #include <asm/system.h> #include <asm/byteorder.h> #include "../base.h" #include "pnpbios.h" /* * * PnP BIOS INTERFACE * */ static union pnp_bios_install_struct *pnp_bios_install = NULL; int pnp_bios_present(void) { return (pnp_bios_install != NULL); } struct pnp_dev_node_info node_info; /* * * DOCKING FUNCTIONS * */ #ifdef CONFIG_HOTPLUG static struct completion unload_sem; /* * (Much of this belongs in a shared routine somewhere) */ static int pnp_dock_event(int dock, struct pnp_docking_station_info *info) { char *argv[3], **envp, *buf, *scratch; int i = 0, value; if (!(envp = kcalloc(20, sizeof(char *), GFP_KERNEL))) return -ENOMEM; if (!(buf = kzalloc(256, GFP_KERNEL))) { kfree(envp); return -ENOMEM; } /* FIXME: if there are actual users of this, it should be * integrated into the driver core and use the usual infrastructure * like sysfs and uevents */ argv[0] = "/sbin/pnpbios"; argv[1] = "dock"; argv[2] = NULL; /* minimal command environment */ envp[i++] = "HOME=/"; envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; #ifdef DEBUG /* hint that policy agent should enter no-stdout debug mode */ envp[i++] = "DEBUG=kernel"; #endif /* extensible set of named bus-specific parameters, * supporting multiple driver selection algorithms. */ scratch = buf; /* action: add, remove */ envp[i++] = scratch; scratch += sprintf(scratch, "ACTION=%s", dock ? "add" : "remove") + 1; /* Report the ident for the dock */ envp[i++] = scratch; scratch += sprintf(scratch, "DOCK=%x/%x/%x", info->location_id, info->serial, info->capabilities); envp[i] = NULL; value = call_usermodehelper(argv [0], argv, envp, UMH_WAIT_EXEC); kfree(buf); kfree(envp); return 0; } /* * Poll the PnP docking at regular intervals */ static int pnp_dock_thread(void *unused) { static struct pnp_docking_station_info now; int docked = -1, d = 0; set_freezable(); while (1) { int status; /* * Poll every 2 seconds */ msleep_interruptible(2000); if (try_to_freeze()) continue; status = pnp_bios_dock_station_info(&now); switch (status) { /* * No dock to manage */ case PNP_FUNCTION_NOT_SUPPORTED: complete_and_exit(&unload_sem, 0); case PNP_SYSTEM_NOT_DOCKED: d = 0; break; case PNP_SUCCESS: d = 1; break; default: pnpbios_print_status("pnp_dock_thread", status); continue; } if (d != docked) { if (pnp_dock_event(d, &now) == 0) { docked = d; #if 0 printk(KERN_INFO "PnPBIOS: Docking station %stached\n", docked ? "at" : "de"); #endif } } } complete_and_exit(&unload_sem, 0); } #endif /* CONFIG_HOTPLUG */ static int pnpbios_get_resources(struct pnp_dev *dev) { u8 nodenum = dev->number; struct pnp_bios_node *node; if (!pnpbios_is_dynamic(dev)) return -EPERM; pnp_dbg(&dev->dev, "get resources\n"); node = kzalloc(node_info.max_node_size, GFP_KERNEL); if (!node) return -1; if (pnp_bios_get_dev_node(&nodenum, (char)PNPMODE_DYNAMIC, node)) { kfree(node); return -ENODEV; } pnpbios_read_resources_from_node(dev, node); dev->active = pnp_is_active(dev); kfree(node); return 0; } static int pnpbios_set_resources(struct pnp_dev *dev) { u8 nodenum = dev->number; struct pnp_bios_node *node; int ret; if (!pnpbios_is_dynamic(dev)) return -EPERM; pnp_dbg(&dev->dev, "set resources\n"); node = kzalloc(node_info.max_node_size, GFP_KERNEL); if (!node) return -1; if (pnp_bios_get_dev_node(&nodenum, (char)PNPMODE_DYNAMIC, node)) { kfree(node); return -ENODEV; } if (pnpbios_write_resources_to_node(dev, node) < 0) { kfree(node); return -1; } ret = pnp_bios_set_dev_node(node->handle, (char)PNPMODE_DYNAMIC, node); kfree(node); if (ret > 0) ret = -1; return ret; } static void pnpbios_zero_data_stream(struct pnp_bios_node *node) { unsigned char *p = (char *)node->data; unsigned char *end = (char *)(node->data + node->size); unsigned int len; int i; while ((char *)p < (char *)end) { if (p[0] & 0x80) { /* large tag */ len = (p[2] << 8) | p[1]; p += 3; } else { if (((p[0] >> 3) & 0x0f) == 0x0f) return; len = p[0] & 0x07; p += 1; } for (i = 0; i < len; i++) p[i] = 0; p += len; } printk(KERN_ERR "PnPBIOS: Resource structure did not contain an end tag.\n"); } static int pnpbios_disable_resources(struct pnp_dev *dev) { struct pnp_bios_node *node; u8 nodenum = dev->number; int ret; if (dev->flags & PNPBIOS_NO_DISABLE || !pnpbios_is_dynamic(dev)) return -EPERM; node = kzalloc(node_info.max_node_size, GFP_KERNEL); if (!node) return -ENOMEM; if (pnp_bios_get_dev_node(&nodenum, (char)PNPMODE_DYNAMIC, node)) { kfree(node); return -ENODEV; } pnpbios_zero_data_stream(node); ret = pnp_bios_set_dev_node(dev->number, (char)PNPMODE_DYNAMIC, node); kfree(node); if (ret > 0) ret = -1; return ret; } /* PnP Layer support */ struct pnp_protocol pnpbios_protocol = { .name = "Plug and Play BIOS", .get = pnpbios_get_resources, .set = pnpbios_set_resources, .disable = pnpbios_disable_resources, }; static int __init insert_device(struct pnp_bios_node *node) { struct list_head *pos; struct pnp_dev *dev; char id[8]; /* check if the device is already added */ list_for_each(pos, &pnpbios_protocol.devices) { dev = list_entry(pos, struct pnp_dev, protocol_list); if (dev->number == node->handle) return -1; } pnp_eisa_id_to_string(node->eisa_id & PNP_EISA_ID_MASK, id); dev = pnp_alloc_dev(&pnpbios_protocol, node->handle, id); if (!dev) return -1; pnpbios_parse_data_stream(dev, node); dev->active = pnp_is_active(dev); dev->flags = node->flags; if (!(dev->flags & PNPBIOS_NO_CONFIG)) dev->capabilities |= PNP_CONFIGURABLE; if (!(dev->flags & PNPBIOS_NO_DISABLE) && pnpbios_is_dynamic(dev)) dev->capabilities |= PNP_DISABLE; dev->capabilities |= PNP_READ; if (pnpbios_is_dynamic(dev)) dev->capabilities |= PNP_WRITE; if (dev->flags & PNPBIOS_REMOVABLE) dev->capabilities |= PNP_REMOVABLE; /* clear out the damaged flags */ if (!dev->active) pnp_init_resources(dev); pnp_add_device(dev); pnpbios_interface_attach_device(node); return 0; } static void __init build_devlist(void) { u8 nodenum; unsigned int nodes_got = 0; unsigned int devs = 0; struct pnp_bios_node *node; node = kzalloc(node_info.max_node_size, GFP_KERNEL); if (!node) return; for (nodenum = 0; nodenum < 0xff;) { u8 thisnodenum = nodenum; /* eventually we will want to use PNPMODE_STATIC here but for now * dynamic will help us catch buggy bioses to add to the blacklist. */ if (!pnpbios_dont_use_current_config) { if (pnp_bios_get_dev_node (&nodenum, (char)PNPMODE_DYNAMIC, node)) break; } else { if (pnp_bios_get_dev_node (&nodenum, (char)PNPMODE_STATIC, node)) break; } nodes_got++; if (insert_device(node) == 0) devs++; if (nodenum <= thisnodenum) { printk(KERN_ERR "PnPBIOS: build_devlist: Node number 0x%x is out of sequence following node 0x%x. Aborting.\n", (unsigned int)nodenum, (unsigned int)thisnodenum); break; } } kfree(node); printk(KERN_INFO "PnPBIOS: %i node%s reported by PnP BIOS; %i recorded by driver\n", nodes_got, nodes_got != 1 ? "s" : "", devs); } /* * * INIT AND EXIT * */ static int pnpbios_disabled; int pnpbios_dont_use_current_config; static int __init pnpbios_setup(char *str) { int invert; while ((str != NULL) && (*str != '\0')) { if (strncmp(str, "off", 3) == 0) pnpbios_disabled = 1; if (strncmp(str, "on", 2) == 0) pnpbios_disabled = 0; invert = (strncmp(str, "no-", 3) == 0); if (invert) str += 3; if (strncmp(str, "curr", 4) == 0) pnpbios_dont_use_current_config = invert; str = strchr(str, ','); if (str != NULL) str += strspn(str, ", \t"); } return 1; } __setup("pnpbios=", pnpbios_setup); /* PnP BIOS signature: "$PnP" */ #define PNP_SIGNATURE (('$' << 0) + ('P' << 8) + ('n' << 16) + ('P' << 24)) static int __init pnpbios_probe_system(void) { union pnp_bios_install_struct *check; u8 sum; int length, i; printk(KERN_INFO "PnPBIOS: Scanning system for PnP BIOS support...\n"); /* * Search the defined area (0xf0000-0xffff0) for a valid PnP BIOS * structure and, if one is found, sets up the selectors and * entry points */ for (check = (union pnp_bios_install_struct *)__va(0xf0000); check < (union pnp_bios_install_struct *)__va(0xffff0); check = (void *)check + 16) { if (check->fields.signature != PNP_SIGNATURE) continue; printk(KERN_INFO "PnPBIOS: Found PnP BIOS installation structure at 0x%p\n", check); length = check->fields.length; if (!length) { printk(KERN_ERR "PnPBIOS: installation structure is invalid, skipping\n"); continue; } for (sum = 0, i = 0; i < length; i++) sum += check->chars[i]; if (sum) { printk(KERN_ERR "PnPBIOS: installation structure is corrupted, skipping\n"); continue; } if (check->fields.version < 0x10) { printk(KERN_WARNING "PnPBIOS: PnP BIOS version %d.%d is not supported\n", check->fields.version >> 4, check->fields.version & 15); continue; } printk(KERN_INFO "PnPBIOS: PnP BIOS version %d.%d, entry 0x%x:0x%x, dseg 0x%x\n", check->fields.version >> 4, check->fields.version & 15, check->fields.pm16cseg, check->fields.pm16offset, check->fields.pm16dseg); pnp_bios_install = check; return 1; } printk(KERN_INFO "PnPBIOS: PnP BIOS support was not detected.\n"); return 0; } static int __init exploding_pnp_bios(const struct dmi_system_id *d) { printk(KERN_WARNING "%s detected. Disabling PnPBIOS\n", d->ident); return 0; } static struct dmi_system_id pnpbios_dmi_table[] __initdata = { { /* PnPBIOS GPF on boot */ .callback = exploding_pnp_bios, .ident = "Higraded P14H", .matches = { DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), DMI_MATCH(DMI_BIOS_VERSION, "07.00T"), DMI_MATCH(DMI_SYS_VENDOR, "Higraded"), DMI_MATCH(DMI_PRODUCT_NAME, "P14H"), }, }, { /* PnPBIOS GPF on boot */ .callback = exploding_pnp_bios, .ident = "ASUS P4P800", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_BOARD_NAME, "P4P800"), }, }, {} }; static int __init pnpbios_init(void) { int ret; #if defined(CONFIG_PPC) if (check_legacy_ioport(PNPBIOS_BASE)) return -ENODEV; #endif if (pnpbios_disabled || dmi_check_system(pnpbios_dmi_table) || paravirt_enabled()) { printk(KERN_INFO "PnPBIOS: Disabled\n"); return -ENODEV; } #ifdef CONFIG_PNPACPI if (!acpi_disabled && !pnpacpi_disabled) { pnpbios_disabled = 1; printk(KERN_INFO "PnPBIOS: Disabled by ACPI PNP\n"); return -ENODEV; } #endif /* CONFIG_ACPI */ /* scan the system for pnpbios support */ if (!pnpbios_probe_system()) return -ENODEV; /* make preparations for bios calls */ pnpbios_calls_init(pnp_bios_install); /* read the node info */ ret = pnp_bios_dev_node_info(&node_info); if (ret) { printk(KERN_ERR "PnPBIOS: Unable to get node info. Aborting.\n"); return ret; } /* register with the pnp layer */ ret = pnp_register_protocol(&pnpbios_protocol); if (ret) { printk(KERN_ERR "PnPBIOS: Unable to register driver. Aborting.\n"); return ret; } /* start the proc interface */ ret = pnpbios_proc_init(); if (ret) printk(KERN_ERR "PnPBIOS: Failed to create proc interface.\n"); /* scan for pnpbios devices */ build_devlist(); pnp_platform_devices = 1; return 0; } fs_initcall(pnpbios_init); static int __init pnpbios_thread_init(void) { #if defined(CONFIG_PPC) if (check_legacy_ioport(PNPBIOS_BASE)) return 0; #endif if (pnpbios_disabled) return 0; #ifdef CONFIG_HOTPLUG { struct task_struct *task; init_completion(&unload_sem); task = kthread_run(pnp_dock_thread, NULL, "kpnpbiosd"); if (IS_ERR(task)) return PTR_ERR(task); } #endif return 0; } /* Start the kernel thread later: */ module_init(pnpbios_thread_init); EXPORT_SYMBOL(pnpbios_protocol);
gpl-2.0
AltraMayor/XIA-for-Linux
scripts/dtc/libfdt/fdt_empty_tree.c
6131
2903
/* * libfdt - Flat Device Tree manipulation * Copyright (C) 2012 David Gibson, IBM Corporation. * * libfdt is dual licensed: you can use it either under the terms of * the GPL, or the BSD license, at your option. * * a) This library is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, * MA 02110-1301 USA * * Alternatively, * * b) Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "libfdt_env.h" #include <fdt.h> #include <libfdt.h> #include "libfdt_internal.h" int fdt_create_empty_tree(void *buf, int bufsize) { int err; err = fdt_create(buf, bufsize); if (err) return err; err = fdt_finish_reservemap(buf); if (err) return err; err = fdt_begin_node(buf, ""); if (err) return err; err = fdt_end_node(buf); if (err) return err; err = fdt_finish(buf); if (err) return err; return fdt_open_into(buf, buf, bufsize); }
gpl-2.0
emceethemouth/kernel_cancro
drivers/scsi/device_handler/scsi_dh_emc.c
7667
19159
/* * Target driver for EMC CLARiiON AX/CX-series hardware. * Based on code from Lars Marowsky-Bree <lmb@suse.de> * and Ed Goggin <egoggin@emc.com>. * * Copyright (C) 2006 Red Hat, Inc. All rights reserved. * Copyright (C) 2006 Mike Christie * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/slab.h> #include <linux/module.h> #include <scsi/scsi.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_dh.h> #include <scsi/scsi_device.h> #define CLARIION_NAME "emc" #define CLARIION_TRESPASS_PAGE 0x22 #define CLARIION_BUFFER_SIZE 0xFC #define CLARIION_TIMEOUT (60 * HZ) #define CLARIION_RETRIES 3 #define CLARIION_UNBOUND_LU -1 #define CLARIION_SP_A 0 #define CLARIION_SP_B 1 /* Flags */ #define CLARIION_SHORT_TRESPASS 1 #define CLARIION_HONOR_RESERVATIONS 2 /* LUN states */ #define CLARIION_LUN_UNINITIALIZED -1 #define CLARIION_LUN_UNBOUND 0 #define CLARIION_LUN_BOUND 1 #define CLARIION_LUN_OWNED 2 static unsigned char long_trespass[] = { 0, 0, 0, 0, 0, 0, 0, 0, CLARIION_TRESPASS_PAGE, /* Page code */ 0x09, /* Page length - 2 */ 0x01, /* Trespass code */ 0xff, 0xff, /* Trespass target */ 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */ }; static unsigned char short_trespass[] = { 0, 0, 0, 0, CLARIION_TRESPASS_PAGE, /* Page code */ 0x02, /* Page length - 2 */ 0x01, /* Trespass code */ 0xff, /* Trespass target */ }; static const char * lun_state[] = { "not bound", "bound", "owned", }; struct clariion_dh_data { /* * Flags: * CLARIION_SHORT_TRESPASS * Use short trespass command (FC-series) or the long version * (default for AX/CX CLARiiON arrays). * * CLARIION_HONOR_RESERVATIONS * Whether or not (default) to honor SCSI reservations when * initiating a switch-over. */ unsigned flags; /* * I/O buffer for both MODE_SELECT and INQUIRY commands. */ unsigned char buffer[CLARIION_BUFFER_SIZE]; /* * SCSI sense buffer for commands -- assumes serial issuance * and completion sequence of all commands for same multipath. */ unsigned char sense[SCSI_SENSE_BUFFERSIZE]; unsigned int senselen; /* * LUN state */ int lun_state; /* * SP Port number */ int port; /* * which SP (A=0,B=1,UNBOUND=-1) is the default SP for this * path's mapped LUN */ int default_sp; /* * which SP (A=0,B=1,UNBOUND=-1) is the active SP for this * path's mapped LUN */ int current_sp; }; static inline struct clariion_dh_data *get_clariion_data(struct scsi_device *sdev) { struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; BUG_ON(scsi_dh_data == NULL); return ((struct clariion_dh_data *) scsi_dh_data->buf); } /* * Parse MODE_SELECT cmd reply. */ static int trespass_endio(struct scsi_device *sdev, char *sense) { int err = SCSI_DH_IO; struct scsi_sense_hdr sshdr; if (!scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) { sdev_printk(KERN_ERR, sdev, "%s: Found valid sense data 0x%2x, " "0x%2x, 0x%2x while sending CLARiiON trespass " "command.\n", CLARIION_NAME, sshdr.sense_key, sshdr.asc, sshdr.ascq); if ((sshdr.sense_key == 0x05) && (sshdr.asc == 0x04) && (sshdr.ascq == 0x00)) { /* * Array based copy in progress -- do not send * mode_select or copy will be aborted mid-stream. */ sdev_printk(KERN_INFO, sdev, "%s: Array Based Copy in " "progress while sending CLARiiON trespass " "command.\n", CLARIION_NAME); err = SCSI_DH_DEV_TEMP_BUSY; } else if ((sshdr.sense_key == 0x02) && (sshdr.asc == 0x04) && (sshdr.ascq == 0x03)) { /* * LUN Not Ready - Manual Intervention Required * indicates in-progress ucode upgrade (NDU). */ sdev_printk(KERN_INFO, sdev, "%s: Detected in-progress " "ucode upgrade NDU operation while sending " "CLARiiON trespass command.\n", CLARIION_NAME); err = SCSI_DH_DEV_TEMP_BUSY; } else err = SCSI_DH_DEV_FAILED; } else { sdev_printk(KERN_INFO, sdev, "%s: failed to send MODE SELECT, no sense available\n", CLARIION_NAME); } return err; } static int parse_sp_info_reply(struct scsi_device *sdev, struct clariion_dh_data *csdev) { int err = SCSI_DH_OK; /* check for in-progress ucode upgrade (NDU) */ if (csdev->buffer[48] != 0) { sdev_printk(KERN_NOTICE, sdev, "%s: Detected in-progress " "ucode upgrade NDU operation while finding " "current active SP.", CLARIION_NAME); err = SCSI_DH_DEV_TEMP_BUSY; goto out; } if (csdev->buffer[4] > 2) { /* Invalid buffer format */ sdev_printk(KERN_NOTICE, sdev, "%s: invalid VPD page 0xC0 format\n", CLARIION_NAME); err = SCSI_DH_NOSYS; goto out; } switch (csdev->buffer[28] & 0x0f) { case 6: sdev_printk(KERN_NOTICE, sdev, "%s: ALUA failover mode detected\n", CLARIION_NAME); break; case 4: /* Linux failover */ break; default: sdev_printk(KERN_WARNING, sdev, "%s: Invalid failover mode %d\n", CLARIION_NAME, csdev->buffer[28] & 0x0f); err = SCSI_DH_NOSYS; goto out; } csdev->default_sp = csdev->buffer[5]; csdev->lun_state = csdev->buffer[4]; csdev->current_sp = csdev->buffer[8]; csdev->port = csdev->buffer[7]; out: return err; } #define emc_default_str "FC (Legacy)" static char * parse_sp_model(struct scsi_device *sdev, unsigned char *buffer) { unsigned char len = buffer[4] + 5; char *sp_model = NULL; unsigned char sp_len, serial_len; if (len < 160) { sdev_printk(KERN_WARNING, sdev, "%s: Invalid information section length %d\n", CLARIION_NAME, len); /* Check for old FC arrays */ if (!strncmp(buffer + 8, "DGC", 3)) { /* Old FC array, not supporting extended information */ sp_model = emc_default_str; } goto out; } /* * Parse extended information for SP model number */ serial_len = buffer[160]; if (serial_len == 0 || serial_len + 161 > len) { sdev_printk(KERN_WARNING, sdev, "%s: Invalid array serial number length %d\n", CLARIION_NAME, serial_len); goto out; } sp_len = buffer[99]; if (sp_len == 0 || serial_len + sp_len + 161 > len) { sdev_printk(KERN_WARNING, sdev, "%s: Invalid model number length %d\n", CLARIION_NAME, sp_len); goto out; } sp_model = &buffer[serial_len + 161]; /* Strip whitespace at the end */ while (sp_len > 1 && sp_model[sp_len - 1] == ' ') sp_len--; sp_model[sp_len] = '\0'; out: return sp_model; } /* * Get block request for REQ_BLOCK_PC command issued to path. Currently * limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands. * * Uses data and sense buffers in hardware handler context structure and * assumes serial servicing of commands, both issuance and completion. */ static struct request *get_req(struct scsi_device *sdev, int cmd, unsigned char *buffer) { struct request *rq; int len = 0; rq = blk_get_request(sdev->request_queue, (cmd != INQUIRY) ? WRITE : READ, GFP_NOIO); if (!rq) { sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed"); return NULL; } rq->cmd_len = COMMAND_SIZE(cmd); rq->cmd[0] = cmd; switch (cmd) { case MODE_SELECT: len = sizeof(short_trespass); rq->cmd[1] = 0x10; rq->cmd[4] = len; break; case MODE_SELECT_10: len = sizeof(long_trespass); rq->cmd[1] = 0x10; rq->cmd[8] = len; break; case INQUIRY: len = CLARIION_BUFFER_SIZE; rq->cmd[4] = len; memset(buffer, 0, len); break; default: BUG_ON(1); break; } rq->cmd_type = REQ_TYPE_BLOCK_PC; rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; rq->timeout = CLARIION_TIMEOUT; rq->retries = CLARIION_RETRIES; if (blk_rq_map_kern(rq->q, rq, buffer, len, GFP_NOIO)) { blk_put_request(rq); return NULL; } return rq; } static int send_inquiry_cmd(struct scsi_device *sdev, int page, struct clariion_dh_data *csdev) { struct request *rq = get_req(sdev, INQUIRY, csdev->buffer); int err; if (!rq) return SCSI_DH_RES_TEMP_UNAVAIL; rq->sense = csdev->sense; memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); rq->sense_len = csdev->senselen = 0; rq->cmd[0] = INQUIRY; if (page != 0) { rq->cmd[1] = 1; rq->cmd[2] = page; } err = blk_execute_rq(sdev->request_queue, NULL, rq, 1); if (err == -EIO) { sdev_printk(KERN_INFO, sdev, "%s: failed to send %s INQUIRY: %x\n", CLARIION_NAME, page?"EVPD":"standard", rq->errors); csdev->senselen = rq->sense_len; err = SCSI_DH_IO; } blk_put_request(rq); return err; } static int send_trespass_cmd(struct scsi_device *sdev, struct clariion_dh_data *csdev) { struct request *rq; unsigned char *page22; int err, len, cmd; if (csdev->flags & CLARIION_SHORT_TRESPASS) { page22 = short_trespass; if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS)) /* Set Honor Reservations bit */ page22[6] |= 0x80; len = sizeof(short_trespass); cmd = MODE_SELECT; } else { page22 = long_trespass; if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS)) /* Set Honor Reservations bit */ page22[10] |= 0x80; len = sizeof(long_trespass); cmd = MODE_SELECT_10; } BUG_ON((len > CLARIION_BUFFER_SIZE)); memcpy(csdev->buffer, page22, len); rq = get_req(sdev, cmd, csdev->buffer); if (!rq) return SCSI_DH_RES_TEMP_UNAVAIL; rq->sense = csdev->sense; memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); rq->sense_len = csdev->senselen = 0; err = blk_execute_rq(sdev->request_queue, NULL, rq, 1); if (err == -EIO) { if (rq->sense_len) { err = trespass_endio(sdev, csdev->sense); } else { sdev_printk(KERN_INFO, sdev, "%s: failed to send MODE SELECT: %x\n", CLARIION_NAME, rq->errors); } } blk_put_request(rq); return err; } static int clariion_check_sense(struct scsi_device *sdev, struct scsi_sense_hdr *sense_hdr) { switch (sense_hdr->sense_key) { case NOT_READY: if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x03) /* * LUN Not Ready - Manual Intervention Required * indicates this is a passive path. * * FIXME: However, if this is seen and EVPD C0 * indicates that this is due to a NDU in * progress, we should set FAIL_PATH too. * This indicates we might have to do a SCSI * inquiry in the end_io path. Ugh. * * Can return FAILED only when we want the error * recovery process to kick in. */ return SUCCESS; break; case ILLEGAL_REQUEST: if (sense_hdr->asc == 0x25 && sense_hdr->ascq == 0x01) /* * An array based copy is in progress. Do not * fail the path, do not bypass to another PG, * do not retry. Fail the IO immediately. * (Actually this is the same conclusion as in * the default handler, but lets make sure.) * * Can return FAILED only when we want the error * recovery process to kick in. */ return SUCCESS; break; case UNIT_ATTENTION: if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) /* * Unit Attention Code. This is the first IO * to the new path, so just retry. */ return ADD_TO_MLQUEUE; break; } return SCSI_RETURN_NOT_HANDLED; } static int clariion_prep_fn(struct scsi_device *sdev, struct request *req) { struct clariion_dh_data *h = get_clariion_data(sdev); int ret = BLKPREP_OK; if (h->lun_state != CLARIION_LUN_OWNED) { ret = BLKPREP_KILL; req->cmd_flags |= REQ_QUIET; } return ret; } static int clariion_std_inquiry(struct scsi_device *sdev, struct clariion_dh_data *csdev) { int err; char *sp_model; err = send_inquiry_cmd(sdev, 0, csdev); if (err != SCSI_DH_OK && csdev->senselen) { struct scsi_sense_hdr sshdr; if (scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) { sdev_printk(KERN_ERR, sdev, "%s: INQUIRY sense code " "%02x/%02x/%02x\n", CLARIION_NAME, sshdr.sense_key, sshdr.asc, sshdr.ascq); } err = SCSI_DH_IO; goto out; } sp_model = parse_sp_model(sdev, csdev->buffer); if (!sp_model) { err = SCSI_DH_DEV_UNSUPP; goto out; } /* * FC Series arrays do not support long trespass */ if (!strlen(sp_model) || !strncmp(sp_model, "FC",2)) csdev->flags |= CLARIION_SHORT_TRESPASS; sdev_printk(KERN_INFO, sdev, "%s: detected Clariion %s, flags %x\n", CLARIION_NAME, sp_model, csdev->flags); out: return err; } static int clariion_send_inquiry(struct scsi_device *sdev, struct clariion_dh_data *csdev) { int err, retry = CLARIION_RETRIES; retry: err = send_inquiry_cmd(sdev, 0xC0, csdev); if (err != SCSI_DH_OK && csdev->senselen) { struct scsi_sense_hdr sshdr; err = scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE, &sshdr); if (!err) return SCSI_DH_IO; err = clariion_check_sense(sdev, &sshdr); if (retry > 0 && err == ADD_TO_MLQUEUE) { retry--; goto retry; } sdev_printk(KERN_ERR, sdev, "%s: INQUIRY sense code " "%02x/%02x/%02x\n", CLARIION_NAME, sshdr.sense_key, sshdr.asc, sshdr.ascq); err = SCSI_DH_IO; } else { err = parse_sp_info_reply(sdev, csdev); } return err; } static int clariion_activate(struct scsi_device *sdev, activate_complete fn, void *data) { struct clariion_dh_data *csdev = get_clariion_data(sdev); int result; result = clariion_send_inquiry(sdev, csdev); if (result != SCSI_DH_OK) goto done; if (csdev->lun_state == CLARIION_LUN_OWNED) goto done; result = send_trespass_cmd(sdev, csdev); if (result != SCSI_DH_OK) goto done; sdev_printk(KERN_INFO, sdev,"%s: %s trespass command sent\n", CLARIION_NAME, csdev->flags&CLARIION_SHORT_TRESPASS?"short":"long" ); /* Update status */ result = clariion_send_inquiry(sdev, csdev); if (result != SCSI_DH_OK) goto done; done: sdev_printk(KERN_INFO, sdev, "%s: at SP %c Port %d (%s, default SP %c)\n", CLARIION_NAME, csdev->current_sp + 'A', csdev->port, lun_state[csdev->lun_state], csdev->default_sp + 'A'); if (fn) fn(data, result); return 0; } /* * params - parameters in the following format * "no_of_params\0param1\0param2\0param3\0...\0" * for example, string for 2 parameters with value 10 and 21 * is specified as "2\010\021\0". */ static int clariion_set_params(struct scsi_device *sdev, const char *params) { struct clariion_dh_data *csdev = get_clariion_data(sdev); unsigned int hr = 0, st = 0, argc; const char *p = params; int result = SCSI_DH_OK; if ((sscanf(params, "%u", &argc) != 1) || (argc != 2)) return -EINVAL; while (*p++) ; if ((sscanf(p, "%u", &st) != 1) || (st > 1)) return -EINVAL; while (*p++) ; if ((sscanf(p, "%u", &hr) != 1) || (hr > 1)) return -EINVAL; if (st) csdev->flags |= CLARIION_SHORT_TRESPASS; else csdev->flags &= ~CLARIION_SHORT_TRESPASS; if (hr) csdev->flags |= CLARIION_HONOR_RESERVATIONS; else csdev->flags &= ~CLARIION_HONOR_RESERVATIONS; /* * If this path is owned, we have to send a trespass command * with the new parameters. If not, simply return. Next trespass * command would use the parameters. */ if (csdev->lun_state != CLARIION_LUN_OWNED) goto done; csdev->lun_state = CLARIION_LUN_UNINITIALIZED; result = send_trespass_cmd(sdev, csdev); if (result != SCSI_DH_OK) goto done; /* Update status */ result = clariion_send_inquiry(sdev, csdev); done: return result; } static const struct scsi_dh_devlist clariion_dev_list[] = { {"DGC", "RAID"}, {"DGC", "DISK"}, {"DGC", "VRAID"}, {NULL, NULL}, }; static bool clariion_match(struct scsi_device *sdev) { int i; if (scsi_device_tpgs(sdev)) return false; for (i = 0; clariion_dev_list[i].vendor; i++) { if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor, strlen(clariion_dev_list[i].vendor)) && !strncmp(sdev->model, clariion_dev_list[i].model, strlen(clariion_dev_list[i].model))) { return true; } } return false; } static int clariion_bus_attach(struct scsi_device *sdev); static void clariion_bus_detach(struct scsi_device *sdev); static struct scsi_device_handler clariion_dh = { .name = CLARIION_NAME, .module = THIS_MODULE, .devlist = clariion_dev_list, .attach = clariion_bus_attach, .detach = clariion_bus_detach, .check_sense = clariion_check_sense, .activate = clariion_activate, .prep_fn = clariion_prep_fn, .set_params = clariion_set_params, .match = clariion_match, }; static int clariion_bus_attach(struct scsi_device *sdev) { struct scsi_dh_data *scsi_dh_data; struct clariion_dh_data *h; unsigned long flags; int err; scsi_dh_data = kzalloc(sizeof(*scsi_dh_data) + sizeof(*h) , GFP_KERNEL); if (!scsi_dh_data) { sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n", CLARIION_NAME); return -ENOMEM; } scsi_dh_data->scsi_dh = &clariion_dh; h = (struct clariion_dh_data *) scsi_dh_data->buf; h->lun_state = CLARIION_LUN_UNINITIALIZED; h->default_sp = CLARIION_UNBOUND_LU; h->current_sp = CLARIION_UNBOUND_LU; err = clariion_std_inquiry(sdev, h); if (err != SCSI_DH_OK) goto failed; err = clariion_send_inquiry(sdev, h); if (err != SCSI_DH_OK) goto failed; if (!try_module_get(THIS_MODULE)) goto failed; spin_lock_irqsave(sdev->request_queue->queue_lock, flags); sdev->scsi_dh_data = scsi_dh_data; spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); sdev_printk(KERN_INFO, sdev, "%s: connected to SP %c Port %d (%s, default SP %c)\n", CLARIION_NAME, h->current_sp + 'A', h->port, lun_state[h->lun_state], h->default_sp + 'A'); return 0; failed: kfree(scsi_dh_data); sdev_printk(KERN_ERR, sdev, "%s: not attached\n", CLARIION_NAME); return -EINVAL; } static void clariion_bus_detach(struct scsi_device *sdev) { struct scsi_dh_data *scsi_dh_data; unsigned long flags; spin_lock_irqsave(sdev->request_queue->queue_lock, flags); scsi_dh_data = sdev->scsi_dh_data; sdev->scsi_dh_data = NULL; spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", CLARIION_NAME); kfree(scsi_dh_data); module_put(THIS_MODULE); } static int __init clariion_init(void) { int r; r = scsi_register_device_handler(&clariion_dh); if (r != 0) printk(KERN_ERR "%s: Failed to register scsi device handler.", CLARIION_NAME); return r; } static void __exit clariion_exit(void) { scsi_unregister_device_handler(&clariion_dh); } module_init(clariion_init); module_exit(clariion_exit); MODULE_DESCRIPTION("EMC CX/AX/FC-family driver"); MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, Chandra Seetharaman <sekharan@us.ibm.com>"); MODULE_LICENSE("GPL");
gpl-2.0
mmontuori/tegra-olympus
arch/parisc/kernel/inventory.c
7667
16348
/* * inventory.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Copyright (c) 1999 The Puffin Group (David Kennedy and Alex deVries) * Copyright (c) 2001 Matthew Wilcox for Hewlett-Packard * * These are the routines to discover what hardware exists in this box. * This task is complicated by there being 3 different ways of * performing an inventory, depending largely on the age of the box. * The recommended way to do this is to check to see whether the machine * is a `Snake' first, then try System Map, then try PAT. We try System * Map before checking for a Snake -- this probably doesn't cause any * problems, but... */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/mm.h> #include <asm/hardware.h> #include <asm/io.h> #include <asm/mmzone.h> #include <asm/pdc.h> #include <asm/pdcpat.h> #include <asm/processor.h> #include <asm/page.h> #include <asm/parisc-device.h> /* ** Debug options ** DEBUG_PAT Dump details which PDC PAT provides about ranges/devices. */ #undef DEBUG_PAT int pdc_type __read_mostly = PDC_TYPE_ILLEGAL; void __init setup_pdc(void) { long status; unsigned int bus_id; struct pdc_system_map_mod_info module_result; struct pdc_module_path module_path; struct pdc_model model; #ifdef CONFIG_64BIT struct pdc_pat_cell_num cell_info; #endif /* Determine the pdc "type" used on this machine */ printk(KERN_INFO "Determining PDC firmware type: "); status = pdc_system_map_find_mods(&module_result, &module_path, 0); if (status == PDC_OK) { pdc_type = PDC_TYPE_SYSTEM_MAP; printk("System Map.\n"); return; } /* * If the machine doesn't support PDC_SYSTEM_MAP then either it * is a pdc pat box, or it is an older box. All 64 bit capable * machines are either pdc pat boxes or they support PDC_SYSTEM_MAP. */ /* * TODO: We should test for 64 bit capability and give a * clearer message. */ #ifdef CONFIG_64BIT status = pdc_pat_cell_get_number(&cell_info); if (status == PDC_OK) { pdc_type = PDC_TYPE_PAT; printk("64 bit PAT.\n"); return; } #endif /* Check the CPU's bus ID. There's probably a better test. */ status = pdc_model_info(&model); bus_id = (model.hversion >> (4 + 7)) & 0x1f; switch (bus_id) { case 0x4: /* 720, 730, 750, 735, 755 */ case 0x6: /* 705, 710 */ case 0x7: /* 715, 725 */ case 0x8: /* 745, 747, 742 */ case 0xA: /* 712 and similar */ case 0xC: /* 715/64, at least */ pdc_type = PDC_TYPE_SNAKE; printk("Snake.\n"); return; default: /* Everything else */ printk("Unsupported.\n"); panic("If this is a 64-bit machine, please try a 64-bit kernel.\n"); } } #define PDC_PAGE_ADJ_SHIFT (PAGE_SHIFT - 12) /* pdc pages are always 4k */ static void __init set_pmem_entry(physmem_range_t *pmem_ptr, unsigned long start, unsigned long pages4k) { /* Rather than aligning and potentially throwing away * memory, we'll assume that any ranges are already * nicely aligned with any reasonable page size, and * panic if they are not (it's more likely that the * pdc info is bad in this case). */ if (unlikely( ((start & (PAGE_SIZE - 1)) != 0) || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) )) { panic("Memory range doesn't align with page size!\n"); } pmem_ptr->start_pfn = (start >> PAGE_SHIFT); pmem_ptr->pages = (pages4k >> PDC_PAGE_ADJ_SHIFT); } static void __init pagezero_memconfig(void) { unsigned long npages; /* Use the 32 bit information from page zero to create a single * entry in the pmem_ranges[] table. * * We currently don't support machines with contiguous memory * >= 4 Gb, who report that memory using 64 bit only fields * on page zero. It's not worth doing until it can be tested, * and it is not clear we can support those machines for other * reasons. * * If that support is done in the future, this is where it * should be done. */ npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT); set_pmem_entry(pmem_ranges,0UL,npages); npmem_ranges = 1; } #ifdef CONFIG_64BIT /* All of the PDC PAT specific code is 64-bit only */ /* ** The module object is filled via PDC_PAT_CELL[Return Cell Module]. ** If a module is found, register module will get the IODC bytes via ** pdc_iodc_read() using the PA view of conf_base_addr for the hpa parameter. ** ** The IO view can be used by PDC_PAT_CELL[Return Cell Module] ** only for SBAs and LBAs. This view will cause an invalid ** argument error for all other cell module types. ** */ static int __init pat_query_module(ulong pcell_loc, ulong mod_index) { pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell; unsigned long bytecnt; unsigned long temp; /* 64-bit scratch value */ long status; /* PDC return value status */ struct parisc_device *dev; pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL); if (!pa_pdc_cell) panic("couldn't allocate memory for PDC_PAT_CELL!"); /* return cell module (PA or Processor view) */ status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index, PA_VIEW, pa_pdc_cell); if (status != PDC_OK) { /* no more cell modules or error */ return status; } temp = pa_pdc_cell->cba; dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path)); if (!dev) { return PDC_OK; } /* alloc_pa_dev sets dev->hpa */ /* ** save parameters in the parisc_device ** (The idea being the device driver will call pdc_pat_cell_module() ** and store the results in its own data structure.) */ dev->pcell_loc = pcell_loc; dev->mod_index = mod_index; /* save generic info returned from the call */ /* REVISIT: who is the consumer of this? not sure yet... */ dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */ dev->pmod_loc = pa_pdc_cell->mod_location; register_parisc_device(dev); /* advertise device */ #ifdef DEBUG_PAT pdc_pat_cell_mod_maddr_block_t io_pdc_cell; /* dump what we see so far... */ switch (PAT_GET_ENTITY(dev->mod_info)) { unsigned long i; case PAT_ENTITY_PROC: printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n", pa_pdc_cell->mod[0]); break; case PAT_ENTITY_MEM: printk(KERN_DEBUG "PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n", pa_pdc_cell->mod[0], pa_pdc_cell->mod[1], pa_pdc_cell->mod[2]); break; case PAT_ENTITY_CA: printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc); break; case PAT_ENTITY_PBC: printk(KERN_DEBUG "PAT_ENTITY_PBC: "); goto print_ranges; case PAT_ENTITY_SBA: printk(KERN_DEBUG "PAT_ENTITY_SBA: "); goto print_ranges; case PAT_ENTITY_LBA: printk(KERN_DEBUG "PAT_ENTITY_LBA: "); print_ranges: pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index, IO_VIEW, &io_pdc_cell); printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell->mod[1]); for (i = 0; i < pa_pdc_cell->mod[1]; i++) { printk(KERN_DEBUG " PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n", i, pa_pdc_cell->mod[2 + i * 3], /* type */ pa_pdc_cell->mod[3 + i * 3], /* start */ pa_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */ printk(KERN_DEBUG " IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n", i, io_pdc_cell->mod[2 + i * 3], /* type */ io_pdc_cell->mod[3 + i * 3], /* start */ io_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */ } printk(KERN_DEBUG "\n"); break; } #endif /* DEBUG_PAT */ kfree(pa_pdc_cell); return PDC_OK; } /* pat pdc can return information about a variety of different * types of memory (e.g. firmware,i/o, etc) but we only care about * the usable physical ram right now. Since the firmware specific * information is allocated on the stack, we'll be generous, in * case there is a lot of other information we don't care about. */ #define PAT_MAX_RANGES (4 * MAX_PHYSMEM_RANGES) static void __init pat_memconfig(void) { unsigned long actual_len; struct pdc_pat_pd_addr_map_entry mem_table[PAT_MAX_RANGES+1]; struct pdc_pat_pd_addr_map_entry *mtbl_ptr; physmem_range_t *pmem_ptr; long status; int entries; unsigned long length; int i; length = (PAT_MAX_RANGES + 1) * sizeof(struct pdc_pat_pd_addr_map_entry); status = pdc_pat_pd_get_addr_map(&actual_len, mem_table, length, 0L); if ((status != PDC_OK) || ((actual_len % sizeof(struct pdc_pat_pd_addr_map_entry)) != 0)) { /* The above pdc call shouldn't fail, but, just in * case, just use the PAGE0 info. */ printk("\n\n\n"); printk(KERN_WARNING "WARNING! Could not get full memory configuration. " "All memory may not be used!\n\n\n"); pagezero_memconfig(); return; } entries = actual_len / sizeof(struct pdc_pat_pd_addr_map_entry); if (entries > PAT_MAX_RANGES) { printk(KERN_WARNING "This Machine has more memory ranges than we support!\n"); printk(KERN_WARNING "Some memory may not be used!\n"); } /* Copy information into the firmware independent pmem_ranges * array, skipping types we don't care about. Notice we said * "may" above. We'll use all the entries that were returned. */ npmem_ranges = 0; mtbl_ptr = mem_table; pmem_ptr = pmem_ranges; /* Global firmware independent table */ for (i = 0; i < entries; i++,mtbl_ptr++) { if ( (mtbl_ptr->entry_type != PAT_MEMORY_DESCRIPTOR) || (mtbl_ptr->memory_type != PAT_MEMTYPE_MEMORY) || (mtbl_ptr->pages == 0) || ( (mtbl_ptr->memory_usage != PAT_MEMUSE_GENERAL) && (mtbl_ptr->memory_usage != PAT_MEMUSE_GI) && (mtbl_ptr->memory_usage != PAT_MEMUSE_GNI) ) ) { continue; } if (npmem_ranges == MAX_PHYSMEM_RANGES) { printk(KERN_WARNING "This Machine has more memory ranges than we support!\n"); printk(KERN_WARNING "Some memory will not be used!\n"); break; } set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages); npmem_ranges++; } } static int __init pat_inventory(void) { int status; ulong mod_index = 0; struct pdc_pat_cell_num cell_info; /* ** Note: Prelude (and it's successors: Lclass, A400/500) only ** implement PDC_PAT_CELL sub-options 0 and 2. */ status = pdc_pat_cell_get_number(&cell_info); if (status != PDC_OK) { return 0; } #ifdef DEBUG_PAT printk(KERN_DEBUG "CELL_GET_NUMBER: 0x%lx 0x%lx\n", cell_info.cell_num, cell_info.cell_loc); #endif while (PDC_OK == pat_query_module(cell_info.cell_loc, mod_index)) { mod_index++; } return mod_index; } /* We only look for extended memory ranges on a 64 bit capable box */ static void __init sprockets_memconfig(void) { struct pdc_memory_table_raddr r_addr; struct pdc_memory_table mem_table[MAX_PHYSMEM_RANGES]; struct pdc_memory_table *mtbl_ptr; physmem_range_t *pmem_ptr; long status; int entries; int i; status = pdc_mem_mem_table(&r_addr,mem_table, (unsigned long)MAX_PHYSMEM_RANGES); if (status != PDC_OK) { /* The above pdc call only works on boxes with sprockets * firmware (newer B,C,J class). Other non PAT PDC machines * do support more than 3.75 Gb of memory, but we don't * support them yet. */ pagezero_memconfig(); return; } if (r_addr.entries_total > MAX_PHYSMEM_RANGES) { printk(KERN_WARNING "This Machine has more memory ranges than we support!\n"); printk(KERN_WARNING "Some memory will not be used!\n"); } entries = (int)r_addr.entries_returned; npmem_ranges = 0; mtbl_ptr = mem_table; pmem_ptr = pmem_ranges; /* Global firmware independent table */ for (i = 0; i < entries; i++,mtbl_ptr++) { set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages); npmem_ranges++; } } #else /* !CONFIG_64BIT */ #define pat_inventory() do { } while (0) #define pat_memconfig() do { } while (0) #define sprockets_memconfig() pagezero_memconfig() #endif /* !CONFIG_64BIT */ #ifndef CONFIG_PA20 /* Code to support Snake machines (7[2350], 7[235]5, 715/Scorpio) */ static struct parisc_device * __init legacy_create_device(struct pdc_memory_map *r_addr, struct pdc_module_path *module_path) { struct parisc_device *dev; int status = pdc_mem_map_hpa(r_addr, module_path); if (status != PDC_OK) return NULL; dev = alloc_pa_dev(r_addr->hpa, &module_path->path); if (dev == NULL) return NULL; register_parisc_device(dev); return dev; } /** * snake_inventory * * Before PDC_SYSTEM_MAP was invented, the PDC_MEM_MAP call was used. * To use it, we initialise the mod_path.bc to 0xff and try all values of * mod to get the HPA for the top-level devices. Bus adapters may have * sub-devices which are discovered by setting bc[5] to 0 and bc[4] to the * module, then trying all possible functions. */ static void __init snake_inventory(void) { int mod; for (mod = 0; mod < 16; mod++) { struct parisc_device *dev; struct pdc_module_path module_path; struct pdc_memory_map r_addr; unsigned int func; memset(module_path.path.bc, 0xff, 6); module_path.path.mod = mod; dev = legacy_create_device(&r_addr, &module_path); if ((!dev) || (dev->id.hw_type != HPHW_BA)) continue; memset(module_path.path.bc, 0xff, 4); module_path.path.bc[4] = mod; for (func = 0; func < 16; func++) { module_path.path.bc[5] = 0; module_path.path.mod = func; legacy_create_device(&r_addr, &module_path); } } } #else /* CONFIG_PA20 */ #define snake_inventory() do { } while (0) #endif /* CONFIG_PA20 */ /* Common 32/64 bit based code goes here */ /** * add_system_map_addresses - Add additional addresses to the parisc device. * @dev: The parisc device. * @num_addrs: Then number of addresses to add; * @module_instance: The system_map module instance. * * This function adds any additional addresses reported by the system_map * firmware to the parisc device. */ static void __init add_system_map_addresses(struct parisc_device *dev, int num_addrs, int module_instance) { int i; long status; struct pdc_system_map_addr_info addr_result; dev->addr = kmalloc(num_addrs * sizeof(unsigned long), GFP_KERNEL); if(!dev->addr) { printk(KERN_ERR "%s %s(): memory allocation failure\n", __FILE__, __func__); return; } for(i = 1; i <= num_addrs; ++i) { status = pdc_system_map_find_addrs(&addr_result, module_instance, i); if(PDC_OK == status) { dev->addr[dev->num_addrs] = (unsigned long)addr_result.mod_addr; dev->num_addrs++; } else { printk(KERN_WARNING "Bad PDC_FIND_ADDRESS status return (%ld) for index %d\n", status, i); } } } /** * system_map_inventory - Retrieve firmware devices via SYSTEM_MAP. * * This function attempts to retrieve and register all the devices firmware * knows about via the SYSTEM_MAP PDC call. */ static void __init system_map_inventory(void) { int i; long status = PDC_OK; for (i = 0; i < 256; i++) { struct parisc_device *dev; struct pdc_system_map_mod_info module_result; struct pdc_module_path module_path; status = pdc_system_map_find_mods(&module_result, &module_path, i); if ((status == PDC_BAD_PROC) || (status == PDC_NE_MOD)) break; if (status != PDC_OK) continue; dev = alloc_pa_dev(module_result.mod_addr, &module_path.path); if (!dev) continue; register_parisc_device(dev); /* if available, get the additional addresses for a module */ if (!module_result.add_addrs) continue; add_system_map_addresses(dev, module_result.add_addrs, i); } walk_central_bus(); return; } void __init do_memory_inventory(void) { switch (pdc_type) { case PDC_TYPE_PAT: pat_memconfig(); break; case PDC_TYPE_SYSTEM_MAP: sprockets_memconfig(); break; case PDC_TYPE_SNAKE: pagezero_memconfig(); return; default: panic("Unknown PDC type!\n"); } if (npmem_ranges == 0 || pmem_ranges[0].start_pfn != 0) { printk(KERN_WARNING "Bad memory configuration returned!\n"); printk(KERN_WARNING "Some memory may not be used!\n"); pagezero_memconfig(); } } void __init do_device_inventory(void) { printk(KERN_INFO "Searching for devices...\n"); init_parisc_bus(); switch (pdc_type) { case PDC_TYPE_PAT: pat_inventory(); break; case PDC_TYPE_SYSTEM_MAP: system_map_inventory(); break; case PDC_TYPE_SNAKE: snake_inventory(); break; default: panic("Unknown PDC type!\n"); } printk(KERN_INFO "Found devices:\n"); print_parisc_devices(); }
gpl-2.0
thicklizard/Komodo1
arch/microblaze/lib/memset.c
7667
2427
/* * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> * Copyright (C) 2008-2009 PetaLogix * Copyright (C) 2007 John Williams * * Reasonably optimised generic C-code for memset on Microblaze * This is generic C code to do efficient, alignment-aware memcpy. * * It is based on demo code originally Copyright 2001 by Intel Corp, taken from * http://www.embedded.com/showArticle.jhtml?articleID=19205567 * * Attempts were made, unsuccessfully, to contact the original * author of this code (Michael Morrow, Intel). Below is the original * copyright notice. * * This software has been developed by Intel Corporation. * Intel specifically disclaims all warranties, express or * implied, and all liability, including consequential and * other indirect damages, for the use of this program, including * liability for infringement of any proprietary rights, * and including the warranties of merchantability and fitness * for a particular purpose. Intel does not assume any * responsibility for and errors which may appear in this program * not any responsibility to update it. */ #include <linux/types.h> #include <linux/stddef.h> #include <linux/compiler.h> #include <linux/module.h> #include <linux/string.h> #ifdef __HAVE_ARCH_MEMSET #ifndef CONFIG_OPT_LIB_FUNCTION void *memset(void *v_src, int c, __kernel_size_t n) { char *src = v_src; /* Truncate c to 8 bits */ c = (c & 0xFF); /* Simple, byte oriented memset or the rest of count. */ while (n--) *src++ = c; return v_src; } #else /* CONFIG_OPT_LIB_FUNCTION */ void *memset(void *v_src, int c, __kernel_size_t n) { char *src = v_src; uint32_t *i_src; uint32_t w32 = 0; /* Truncate c to 8 bits */ c = (c & 0xFF); if (unlikely(c)) { /* Make a repeating word out of it */ w32 = c; w32 |= w32 << 8; w32 |= w32 << 16; } if (likely(n >= 4)) { /* Align the destination to a word boundary */ /* This is done in an endian independent manner */ switch ((unsigned) src & 3) { case 1: *src++ = c; --n; case 2: *src++ = c; --n; case 3: *src++ = c; --n; } i_src = (void *)src; /* Do as many full-word copies as we can */ for (; n >= 4; n -= 4) *i_src++ = w32; src = (void *)i_src; } /* Simple, byte oriented memset or the rest of count. */ while (n--) *src++ = c; return v_src; } #endif /* CONFIG_OPT_LIB_FUNCTION */ EXPORT_SYMBOL(memset); #endif /* __HAVE_ARCH_MEMSET */
gpl-2.0
jacknb1ack/android_kernel_sony_msm8960t-test
fs/ceph/strings.c
8435
3774
/* * Ceph fs string constants */ #include <linux/module.h> #include <linux/ceph/types.h> const char *ceph_mds_state_name(int s) { switch (s) { /* down and out */ case CEPH_MDS_STATE_DNE: return "down:dne"; case CEPH_MDS_STATE_STOPPED: return "down:stopped"; /* up and out */ case CEPH_MDS_STATE_BOOT: return "up:boot"; case CEPH_MDS_STATE_STANDBY: return "up:standby"; case CEPH_MDS_STATE_STANDBY_REPLAY: return "up:standby-replay"; case CEPH_MDS_STATE_CREATING: return "up:creating"; case CEPH_MDS_STATE_STARTING: return "up:starting"; /* up and in */ case CEPH_MDS_STATE_REPLAY: return "up:replay"; case CEPH_MDS_STATE_RESOLVE: return "up:resolve"; case CEPH_MDS_STATE_RECONNECT: return "up:reconnect"; case CEPH_MDS_STATE_REJOIN: return "up:rejoin"; case CEPH_MDS_STATE_CLIENTREPLAY: return "up:clientreplay"; case CEPH_MDS_STATE_ACTIVE: return "up:active"; case CEPH_MDS_STATE_STOPPING: return "up:stopping"; } return "???"; } const char *ceph_session_op_name(int op) { switch (op) { case CEPH_SESSION_REQUEST_OPEN: return "request_open"; case CEPH_SESSION_OPEN: return "open"; case CEPH_SESSION_REQUEST_CLOSE: return "request_close"; case CEPH_SESSION_CLOSE: return "close"; case CEPH_SESSION_REQUEST_RENEWCAPS: return "request_renewcaps"; case CEPH_SESSION_RENEWCAPS: return "renewcaps"; case CEPH_SESSION_STALE: return "stale"; case CEPH_SESSION_RECALL_STATE: return "recall_state"; } return "???"; } const char *ceph_mds_op_name(int op) { switch (op) { case CEPH_MDS_OP_LOOKUP: return "lookup"; case CEPH_MDS_OP_LOOKUPHASH: return "lookuphash"; case CEPH_MDS_OP_LOOKUPPARENT: return "lookupparent"; case CEPH_MDS_OP_GETATTR: return "getattr"; case CEPH_MDS_OP_SETXATTR: return "setxattr"; case CEPH_MDS_OP_SETATTR: return "setattr"; case CEPH_MDS_OP_RMXATTR: return "rmxattr"; case CEPH_MDS_OP_READDIR: return "readdir"; case CEPH_MDS_OP_MKNOD: return "mknod"; case CEPH_MDS_OP_LINK: return "link"; case CEPH_MDS_OP_UNLINK: return "unlink"; case CEPH_MDS_OP_RENAME: return "rename"; case CEPH_MDS_OP_MKDIR: return "mkdir"; case CEPH_MDS_OP_RMDIR: return "rmdir"; case CEPH_MDS_OP_SYMLINK: return "symlink"; case CEPH_MDS_OP_CREATE: return "create"; case CEPH_MDS_OP_OPEN: return "open"; case CEPH_MDS_OP_LOOKUPSNAP: return "lookupsnap"; case CEPH_MDS_OP_LSSNAP: return "lssnap"; case CEPH_MDS_OP_MKSNAP: return "mksnap"; case CEPH_MDS_OP_RMSNAP: return "rmsnap"; case CEPH_MDS_OP_SETFILELOCK: return "setfilelock"; case CEPH_MDS_OP_GETFILELOCK: return "getfilelock"; } return "???"; } const char *ceph_cap_op_name(int op) { switch (op) { case CEPH_CAP_OP_GRANT: return "grant"; case CEPH_CAP_OP_REVOKE: return "revoke"; case CEPH_CAP_OP_TRUNC: return "trunc"; case CEPH_CAP_OP_EXPORT: return "export"; case CEPH_CAP_OP_IMPORT: return "import"; case CEPH_CAP_OP_UPDATE: return "update"; case CEPH_CAP_OP_DROP: return "drop"; case CEPH_CAP_OP_FLUSH: return "flush"; case CEPH_CAP_OP_FLUSH_ACK: return "flush_ack"; case CEPH_CAP_OP_FLUSHSNAP: return "flushsnap"; case CEPH_CAP_OP_FLUSHSNAP_ACK: return "flushsnap_ack"; case CEPH_CAP_OP_RELEASE: return "release"; case CEPH_CAP_OP_RENEW: return "renew"; } return "???"; } const char *ceph_lease_op_name(int o) { switch (o) { case CEPH_MDS_LEASE_REVOKE: return "revoke"; case CEPH_MDS_LEASE_RELEASE: return "release"; case CEPH_MDS_LEASE_RENEW: return "renew"; case CEPH_MDS_LEASE_REVOKE_ACK: return "revoke_ack"; } return "???"; } const char *ceph_snap_op_name(int o) { switch (o) { case CEPH_SNAP_OP_UPDATE: return "update"; case CEPH_SNAP_OP_CREATE: return "create"; case CEPH_SNAP_OP_DESTROY: return "destroy"; case CEPH_SNAP_OP_SPLIT: return "split"; } return "???"; }
gpl-2.0
ShinySide/HispAsian_5.1.1
drivers/misc/ibmasm/event.c
9715
4991
/* * IBM ASM Service Processor Device Driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2004 * * Author: Max Asböck <amax@us.ibm.com> * */ #include <linux/sched.h> #include <linux/slab.h> #include "ibmasm.h" #include "lowlevel.h" /* * ASM service processor event handling routines. * * Events are signalled to the device drivers through interrupts. * They have the format of dot commands, with the type field set to * sp_event. * The driver does not interpret the events, it simply stores them in a * circular buffer. */ static void wake_up_event_readers(struct service_processor *sp) { struct event_reader *reader; list_for_each_entry(reader, &sp->event_buffer->readers, node) wake_up_interruptible(&reader->wait); } /** * receive_event * Called by the interrupt handler when a dot command of type sp_event is * received. * Store the event in the circular event buffer, wake up any sleeping * event readers. * There is no reader marker in the buffer, therefore readers are * responsible for keeping up with the writer, or they will lose events. */ void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int data_size) { struct event_buffer *buffer = sp->event_buffer; struct ibmasm_event *event; unsigned long flags; data_size = min(data_size, IBMASM_EVENT_MAX_SIZE); spin_lock_irqsave(&sp->lock, flags); /* copy the event into the next slot in the circular buffer */ event = &buffer->events[buffer->next_index]; memcpy_fromio(event->data, data, data_size); event->data_size = data_size; event->serial_number = buffer->next_serial_number; /* advance indices in the buffer */ buffer->next_index = (buffer->next_index + 1) % IBMASM_NUM_EVENTS; buffer->next_serial_number++; spin_unlock_irqrestore(&sp->lock, flags); wake_up_event_readers(sp); } static inline int event_available(struct event_buffer *b, struct event_reader *r) { return (r->next_serial_number < b->next_serial_number); } /** * get_next_event * Called by event readers (initiated from user space through the file * system). * Sleeps until a new event is available. */ int ibmasm_get_next_event(struct service_processor *sp, struct event_reader *reader) { struct event_buffer *buffer = sp->event_buffer; struct ibmasm_event *event; unsigned int index; unsigned long flags; reader->cancelled = 0; if (wait_event_interruptible(reader->wait, event_available(buffer, reader) || reader->cancelled)) return -ERESTARTSYS; if (!event_available(buffer, reader)) return 0; spin_lock_irqsave(&sp->lock, flags); index = buffer->next_index; event = &buffer->events[index]; while (event->serial_number < reader->next_serial_number) { index = (index + 1) % IBMASM_NUM_EVENTS; event = &buffer->events[index]; } memcpy(reader->data, event->data, event->data_size); reader->data_size = event->data_size; reader->next_serial_number = event->serial_number + 1; spin_unlock_irqrestore(&sp->lock, flags); return event->data_size; } void ibmasm_cancel_next_event(struct event_reader *reader) { reader->cancelled = 1; wake_up_interruptible(&reader->wait); } void ibmasm_event_reader_register(struct service_processor *sp, struct event_reader *reader) { unsigned long flags; reader->next_serial_number = sp->event_buffer->next_serial_number; init_waitqueue_head(&reader->wait); spin_lock_irqsave(&sp->lock, flags); list_add(&reader->node, &sp->event_buffer->readers); spin_unlock_irqrestore(&sp->lock, flags); } void ibmasm_event_reader_unregister(struct service_processor *sp, struct event_reader *reader) { unsigned long flags; spin_lock_irqsave(&sp->lock, flags); list_del(&reader->node); spin_unlock_irqrestore(&sp->lock, flags); } int ibmasm_event_buffer_init(struct service_processor *sp) { struct event_buffer *buffer; struct ibmasm_event *event; int i; buffer = kmalloc(sizeof(struct event_buffer), GFP_KERNEL); if (!buffer) return 1; buffer->next_index = 0; buffer->next_serial_number = 1; event = buffer->events; for (i=0; i<IBMASM_NUM_EVENTS; i++, event++) event->serial_number = 0; INIT_LIST_HEAD(&buffer->readers); sp->event_buffer = buffer; return 0; } void ibmasm_event_buffer_exit(struct service_processor *sp) { kfree(sp->event_buffer); }
gpl-2.0
HardbitCoded/linux-3.4-rtws
net/rds/rdma.c
11251
23017
/* * Copyright (c) 2007 Oracle. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/rbtree.h> #include <linux/dma-mapping.h> /* for DMA_*_DEVICE */ #include "rds.h" /* * XXX * - build with sparse * - should we limit the size of a mr region? let transport return failure? * - should we detect duplicate keys on a socket? hmm. * - an rdma is an mlock, apply rlimit? */ /* * get the number of pages by looking at the page indices that the start and * end addresses fall in. * * Returns 0 if the vec is invalid. It is invalid if the number of bytes * causes the address to wrap or overflows an unsigned int. This comes * from being stored in the 'length' member of 'struct scatterlist'. */ static unsigned int rds_pages_in_vec(struct rds_iovec *vec) { if ((vec->addr + vec->bytes <= vec->addr) || (vec->bytes > (u64)UINT_MAX)) return 0; return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) - (vec->addr >> PAGE_SHIFT); } static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key, struct rds_mr *insert) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct rds_mr *mr; while (*p) { parent = *p; mr = rb_entry(parent, struct rds_mr, r_rb_node); if (key < mr->r_key) p = &(*p)->rb_left; else if (key > mr->r_key) p = &(*p)->rb_right; else return mr; } if (insert) { rb_link_node(&insert->r_rb_node, parent, p); rb_insert_color(&insert->r_rb_node, root); atomic_inc(&insert->r_refcount); } return NULL; } /* * Destroy the transport-specific part of a MR. */ static void rds_destroy_mr(struct rds_mr *mr) { struct rds_sock *rs = mr->r_sock; void *trans_private = NULL; unsigned long flags; rdsdebug("RDS: destroy mr key is %x refcnt %u\n", mr->r_key, atomic_read(&mr->r_refcount)); if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state)) return; spin_lock_irqsave(&rs->rs_rdma_lock, flags); if (!RB_EMPTY_NODE(&mr->r_rb_node)) rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); trans_private = mr->r_trans_private; mr->r_trans_private = NULL; spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); if (trans_private) mr->r_trans->free_mr(trans_private, mr->r_invalidate); } void __rds_put_mr_final(struct rds_mr *mr) { rds_destroy_mr(mr); kfree(mr); } /* * By the time this is called we can't have any more ioctls called on * the socket so we don't need to worry about racing with others. */ void rds_rdma_drop_keys(struct rds_sock *rs) { struct rds_mr *mr; struct rb_node *node; unsigned long flags; /* Release any MRs associated with this socket */ spin_lock_irqsave(&rs->rs_rdma_lock, flags); while ((node = rb_first(&rs->rs_rdma_keys))) { mr = container_of(node, struct rds_mr, r_rb_node); if (mr->r_trans == rs->rs_transport) mr->r_invalidate = 0; rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); RB_CLEAR_NODE(&mr->r_rb_node); spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); rds_destroy_mr(mr); rds_mr_put(mr); spin_lock_irqsave(&rs->rs_rdma_lock, flags); } spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); if (rs->rs_transport && rs->rs_transport->flush_mrs) rs->rs_transport->flush_mrs(); } /* * Helper function to pin user pages. */ static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages, struct page **pages, int write) { int ret; ret = get_user_pages_fast(user_addr, nr_pages, write, pages); if (ret >= 0 && ret < nr_pages) { while (ret--) put_page(pages[ret]); ret = -EFAULT; } return ret; } static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, u64 *cookie_ret, struct rds_mr **mr_ret) { struct rds_mr *mr = NULL, *found; unsigned int nr_pages; struct page **pages = NULL; struct scatterlist *sg; void *trans_private; unsigned long flags; rds_rdma_cookie_t cookie; unsigned int nents; long i; int ret; if (rs->rs_bound_addr == 0) { ret = -ENOTCONN; /* XXX not a great errno */ goto out; } if (!rs->rs_transport->get_mr) { ret = -EOPNOTSUPP; goto out; } nr_pages = rds_pages_in_vec(&args->vec); if (nr_pages == 0) { ret = -EINVAL; goto out; } rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n", args->vec.addr, args->vec.bytes, nr_pages); /* XXX clamp nr_pages to limit the size of this alloc? */ pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) { ret = -ENOMEM; goto out; } mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL); if (!mr) { ret = -ENOMEM; goto out; } atomic_set(&mr->r_refcount, 1); RB_CLEAR_NODE(&mr->r_rb_node); mr->r_trans = rs->rs_transport; mr->r_sock = rs; if (args->flags & RDS_RDMA_USE_ONCE) mr->r_use_once = 1; if (args->flags & RDS_RDMA_INVALIDATE) mr->r_invalidate = 1; if (args->flags & RDS_RDMA_READWRITE) mr->r_write = 1; /* * Pin the pages that make up the user buffer and transfer the page * pointers to the mr's sg array. We check to see if we've mapped * the whole region after transferring the partial page references * to the sg array so that we can have one page ref cleanup path. * * For now we have no flag that tells us whether the mapping is * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to * the zero page. */ ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1); if (ret < 0) goto out; nents = ret; sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL); if (!sg) { ret = -ENOMEM; goto out; } WARN_ON(!nents); sg_init_table(sg, nents); /* Stick all pages into the scatterlist */ for (i = 0 ; i < nents; i++) sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0); rdsdebug("RDS: trans_private nents is %u\n", nents); /* Obtain a transport specific MR. If this succeeds, the * s/g list is now owned by the MR. * Note that dma_map() implies that pending writes are * flushed to RAM, so no dma_sync is needed here. */ trans_private = rs->rs_transport->get_mr(sg, nents, rs, &mr->r_key); if (IS_ERR(trans_private)) { for (i = 0 ; i < nents; i++) put_page(sg_page(&sg[i])); kfree(sg); ret = PTR_ERR(trans_private); goto out; } mr->r_trans_private = trans_private; rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n", mr->r_key, (void *)(unsigned long) args->cookie_addr); /* The user may pass us an unaligned address, but we can only * map page aligned regions. So we keep the offset, and build * a 64bit cookie containing <R_Key, offset> and pass that * around. */ cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK); if (cookie_ret) *cookie_ret = cookie; if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) { ret = -EFAULT; goto out; } /* Inserting the new MR into the rbtree bumps its * reference count. */ spin_lock_irqsave(&rs->rs_rdma_lock, flags); found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr); spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); BUG_ON(found && found != mr); rdsdebug("RDS: get_mr key is %x\n", mr->r_key); if (mr_ret) { atomic_inc(&mr->r_refcount); *mr_ret = mr; } ret = 0; out: kfree(pages); if (mr) rds_mr_put(mr); return ret; } int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen) { struct rds_get_mr_args args; if (optlen != sizeof(struct rds_get_mr_args)) return -EINVAL; if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval, sizeof(struct rds_get_mr_args))) return -EFAULT; return __rds_rdma_map(rs, &args, NULL, NULL); } int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen) { struct rds_get_mr_for_dest_args args; struct rds_get_mr_args new_args; if (optlen != sizeof(struct rds_get_mr_for_dest_args)) return -EINVAL; if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval, sizeof(struct rds_get_mr_for_dest_args))) return -EFAULT; /* * Initially, just behave like get_mr(). * TODO: Implement get_mr as wrapper around this * and deprecate it. */ new_args.vec = args.vec; new_args.cookie_addr = args.cookie_addr; new_args.flags = args.flags; return __rds_rdma_map(rs, &new_args, NULL, NULL); } /* * Free the MR indicated by the given R_Key */ int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen) { struct rds_free_mr_args args; struct rds_mr *mr; unsigned long flags; if (optlen != sizeof(struct rds_free_mr_args)) return -EINVAL; if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval, sizeof(struct rds_free_mr_args))) return -EFAULT; /* Special case - a null cookie means flush all unused MRs */ if (args.cookie == 0) { if (!rs->rs_transport || !rs->rs_transport->flush_mrs) return -EINVAL; rs->rs_transport->flush_mrs(); return 0; } /* Look up the MR given its R_key and remove it from the rbtree * so nobody else finds it. * This should also prevent races with rds_rdma_unuse. */ spin_lock_irqsave(&rs->rs_rdma_lock, flags); mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL); if (mr) { rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); RB_CLEAR_NODE(&mr->r_rb_node); if (args.flags & RDS_RDMA_INVALIDATE) mr->r_invalidate = 1; } spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); if (!mr) return -EINVAL; /* * call rds_destroy_mr() ourselves so that we're sure it's done by the time * we return. If we let rds_mr_put() do it it might not happen until * someone else drops their ref. */ rds_destroy_mr(mr); rds_mr_put(mr); return 0; } /* * This is called when we receive an extension header that * tells us this MR was used. It allows us to implement * use_once semantics */ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force) { struct rds_mr *mr; unsigned long flags; int zot_me = 0; spin_lock_irqsave(&rs->rs_rdma_lock, flags); mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); if (!mr) { printk(KERN_ERR "rds: trying to unuse MR with unknown r_key %u!\n", r_key); spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); return; } if (mr->r_use_once || force) { rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); RB_CLEAR_NODE(&mr->r_rb_node); zot_me = 1; } spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); /* May have to issue a dma_sync on this memory region. * Note we could avoid this if the operation was a RDMA READ, * but at this point we can't tell. */ if (mr->r_trans->sync_mr) mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE); /* If the MR was marked as invalidate, this will * trigger an async flush. */ if (zot_me) rds_destroy_mr(mr); rds_mr_put(mr); } void rds_rdma_free_op(struct rm_rdma_op *ro) { unsigned int i; for (i = 0; i < ro->op_nents; i++) { struct page *page = sg_page(&ro->op_sg[i]); /* Mark page dirty if it was possibly modified, which * is the case for a RDMA_READ which copies from remote * to local memory */ if (!ro->op_write) { BUG_ON(irqs_disabled()); set_page_dirty(page); } put_page(page); } kfree(ro->op_notifier); ro->op_notifier = NULL; ro->op_active = 0; } void rds_atomic_free_op(struct rm_atomic_op *ao) { struct page *page = sg_page(ao->op_sg); /* Mark page dirty if it was possibly modified, which * is the case for a RDMA_READ which copies from remote * to local memory */ set_page_dirty(page); put_page(page); kfree(ao->op_notifier); ao->op_notifier = NULL; ao->op_active = 0; } /* * Count the number of pages needed to describe an incoming iovec array. */ static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs) { int tot_pages = 0; unsigned int nr_pages; unsigned int i; /* figure out the number of pages in the vector */ for (i = 0; i < nr_iovecs; i++) { nr_pages = rds_pages_in_vec(&iov[i]); if (nr_pages == 0) return -EINVAL; tot_pages += nr_pages; /* * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1, * so tot_pages cannot overflow without first going negative. */ if (tot_pages < 0) return -EINVAL; } return tot_pages; } int rds_rdma_extra_size(struct rds_rdma_args *args) { struct rds_iovec vec; struct rds_iovec __user *local_vec; int tot_pages = 0; unsigned int nr_pages; unsigned int i; local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr; /* figure out the number of pages in the vector */ for (i = 0; i < args->nr_local; i++) { if (copy_from_user(&vec, &local_vec[i], sizeof(struct rds_iovec))) return -EFAULT; nr_pages = rds_pages_in_vec(&vec); if (nr_pages == 0) return -EINVAL; tot_pages += nr_pages; /* * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1, * so tot_pages cannot overflow without first going negative. */ if (tot_pages < 0) return -EINVAL; } return tot_pages * sizeof(struct scatterlist); } /* * The application asks for a RDMA transfer. * Extract all arguments and set up the rdma_op */ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg) { struct rds_rdma_args *args; struct rm_rdma_op *op = &rm->rdma; int nr_pages; unsigned int nr_bytes; struct page **pages = NULL; struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack; int iov_size; unsigned int i, j; int ret = 0; if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) || rm->rdma.op_active) return -EINVAL; args = CMSG_DATA(cmsg); if (rs->rs_bound_addr == 0) { ret = -ENOTCONN; /* XXX not a great errno */ goto out; } if (args->nr_local > UIO_MAXIOV) { ret = -EMSGSIZE; goto out; } /* Check whether to allocate the iovec area */ iov_size = args->nr_local * sizeof(struct rds_iovec); if (args->nr_local > UIO_FASTIOV) { iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL); if (!iovs) { ret = -ENOMEM; goto out; } } if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) { ret = -EFAULT; goto out; } nr_pages = rds_rdma_pages(iovs, args->nr_local); if (nr_pages < 0) { ret = -EINVAL; goto out; } pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) { ret = -ENOMEM; goto out; } op->op_write = !!(args->flags & RDS_RDMA_READWRITE); op->op_fence = !!(args->flags & RDS_RDMA_FENCE); op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME); op->op_silent = !!(args->flags & RDS_RDMA_SILENT); op->op_active = 1; op->op_recverr = rs->rs_recverr; WARN_ON(!nr_pages); op->op_sg = rds_message_alloc_sgs(rm, nr_pages); if (!op->op_sg) { ret = -ENOMEM; goto out; } if (op->op_notify || op->op_recverr) { /* We allocate an uninitialized notifier here, because * we don't want to do that in the completion handler. We * would have to use GFP_ATOMIC there, and don't want to deal * with failed allocations. */ op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL); if (!op->op_notifier) { ret = -ENOMEM; goto out; } op->op_notifier->n_user_token = args->user_token; op->op_notifier->n_status = RDS_RDMA_SUCCESS; } /* The cookie contains the R_Key of the remote memory region, and * optionally an offset into it. This is how we implement RDMA into * unaligned memory. * When setting up the RDMA, we need to add that offset to the * destination address (which is really an offset into the MR) * FIXME: We may want to move this into ib_rdma.c */ op->op_rkey = rds_rdma_cookie_key(args->cookie); op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie); nr_bytes = 0; rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n", (unsigned long long)args->nr_local, (unsigned long long)args->remote_vec.addr, op->op_rkey); for (i = 0; i < args->nr_local; i++) { struct rds_iovec *iov = &iovs[i]; /* don't need to check, rds_rdma_pages() verified nr will be +nonzero */ unsigned int nr = rds_pages_in_vec(iov); rs->rs_user_addr = iov->addr; rs->rs_user_bytes = iov->bytes; /* If it's a WRITE operation, we want to pin the pages for reading. * If it's a READ operation, we need to pin the pages for writing. */ ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write); if (ret < 0) goto out; rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n", nr_bytes, nr, iov->bytes, iov->addr); nr_bytes += iov->bytes; for (j = 0; j < nr; j++) { unsigned int offset = iov->addr & ~PAGE_MASK; struct scatterlist *sg; sg = &op->op_sg[op->op_nents + j]; sg_set_page(sg, pages[j], min_t(unsigned int, iov->bytes, PAGE_SIZE - offset), offset); rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n", sg->offset, sg->length, iov->addr, iov->bytes); iov->addr += sg->length; iov->bytes -= sg->length; } op->op_nents += nr; } if (nr_bytes > args->remote_vec.bytes) { rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n", nr_bytes, (unsigned int) args->remote_vec.bytes); ret = -EINVAL; goto out; } op->op_bytes = nr_bytes; out: if (iovs != iovstack) sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size); kfree(pages); if (ret) rds_rdma_free_op(op); else rds_stats_inc(s_send_rdma); return ret; } /* * The application wants us to pass an RDMA destination (aka MR) * to the remote */ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg) { unsigned long flags; struct rds_mr *mr; u32 r_key; int err = 0; if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) || rm->m_rdma_cookie != 0) return -EINVAL; memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie)); /* We are reusing a previously mapped MR here. Most likely, the * application has written to the buffer, so we need to explicitly * flush those writes to RAM. Otherwise the HCA may not see them * when doing a DMA from that buffer. */ r_key = rds_rdma_cookie_key(rm->m_rdma_cookie); spin_lock_irqsave(&rs->rs_rdma_lock, flags); mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); if (!mr) err = -EINVAL; /* invalid r_key */ else atomic_inc(&mr->r_refcount); spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); if (mr) { mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE); rm->rdma.op_rdma_mr = mr; } return err; } /* * The application passes us an address range it wants to enable RDMA * to/from. We map the area, and save the <R_Key,offset> pair * in rm->m_rdma_cookie. This causes it to be sent along to the peer * in an extension header. */ int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg) { if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) || rm->m_rdma_cookie != 0) return -EINVAL; return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr); } /* * Fill in rds_message for an atomic request. */ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg) { struct page *page = NULL; struct rds_atomic_args *args; int ret = 0; if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args)) || rm->atomic.op_active) return -EINVAL; args = CMSG_DATA(cmsg); /* Nonmasked & masked cmsg ops converted to masked hw ops */ switch (cmsg->cmsg_type) { case RDS_CMSG_ATOMIC_FADD: rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD; rm->atomic.op_m_fadd.add = args->fadd.add; rm->atomic.op_m_fadd.nocarry_mask = 0; break; case RDS_CMSG_MASKED_ATOMIC_FADD: rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD; rm->atomic.op_m_fadd.add = args->m_fadd.add; rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask; break; case RDS_CMSG_ATOMIC_CSWP: rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP; rm->atomic.op_m_cswp.compare = args->cswp.compare; rm->atomic.op_m_cswp.swap = args->cswp.swap; rm->atomic.op_m_cswp.compare_mask = ~0; rm->atomic.op_m_cswp.swap_mask = ~0; break; case RDS_CMSG_MASKED_ATOMIC_CSWP: rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP; rm->atomic.op_m_cswp.compare = args->m_cswp.compare; rm->atomic.op_m_cswp.swap = args->m_cswp.swap; rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask; rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask; break; default: BUG(); /* should never happen */ } rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME); rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT); rm->atomic.op_active = 1; rm->atomic.op_recverr = rs->rs_recverr; rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1); if (!rm->atomic.op_sg) { ret = -ENOMEM; goto err; } /* verify 8 byte-aligned */ if (args->local_addr & 0x7) { ret = -EFAULT; goto err; } ret = rds_pin_pages(args->local_addr, 1, &page, 1); if (ret != 1) goto err; ret = 0; sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr)); if (rm->atomic.op_notify || rm->atomic.op_recverr) { /* We allocate an uninitialized notifier here, because * we don't want to do that in the completion handler. We * would have to use GFP_ATOMIC there, and don't want to deal * with failed allocations. */ rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL); if (!rm->atomic.op_notifier) { ret = -ENOMEM; goto err; } rm->atomic.op_notifier->n_user_token = args->user_token; rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS; } rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie); rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie); return ret; err: if (page) put_page(page); kfree(rm->atomic.op_notifier); return ret; }
gpl-2.0
cuboxi/android_kernel_imx6_cuboxi
fs/nls/nls_euc-jp.c
12531
24438
/* * linux/fs/nls/nls_euc-jp.c * * Added `OSF/JVC Recommended Code Set Conversion Specification * between Japanese EUC and Shift-JIS' support: <hirofumi@mail.parknet.co.jp> * (http://www.opengroup.or.jp/jvc/cde/sjis-euc-e.html) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static struct nls_table *p_nls; #define IS_SJIS_LOW_BYTE(l) ((0x40 <= (l)) && ((l) <= 0xFC) && ((l) != 0x7F)) /* JIS X 0208 (include NEC spesial characters) */ #define IS_SJIS_JISX0208(h, l) ((((0x81 <= (h)) && ((h) <= 0x9F)) \ || ((0xE0 <= (h)) && ((h) <= 0xEA))) \ && IS_SJIS_LOW_BYTE(l)) #define IS_SJIS_JISX0201KANA(c) ((0xA1 <= (c)) && ((c) <= 0xDF)) #define IS_SJIS_UDC_LOW(h, l) (((0xF0 <= (h)) && ((h) <= 0xF4)) \ && IS_SJIS_LOW_BYTE(l)) #define IS_SJIS_UDC_HI(h, l) (((0xF5 <= (h)) && ((h) <= 0xF9)) \ && IS_SJIS_LOW_BYTE(l)) #define IS_SJIS_IBM(h, l) (((0xFA <= (h)) && ((h) <= 0xFC)) \ && IS_SJIS_LOW_BYTE(l)) #define IS_SJIS_NECIBM(h, l) (((0xED <= (h)) && ((h) <= 0xEE)) \ && IS_SJIS_LOW_BYTE(l)) #define MAP_SJIS2EUC(sjis_hi, sjis_lo, sjis_p, euc_hi, euc_lo, euc_p) { \ if ((sjis_lo) >= 0x9F) { \ (euc_hi) = (sjis_hi) * 2 - (((sjis_p) * 2 - (euc_p)) - 1); \ (euc_lo) = (sjis_lo) + 2; \ } else { \ (euc_hi) = (sjis_hi) * 2 - ((sjis_p) * 2 - (euc_p)); \ (euc_lo) = (sjis_lo) + ((sjis_lo) >= 0x7F ? 0x60 : 0x61); \ } \ } while(0) #define SS2 (0x8E) /* Single Shift 2 */ #define SS3 (0x8F) /* Single Shift 3 */ #define IS_EUC_BYTE(c) ((0xA1 <= (c)) && ((c) <= 0xFE)) #define IS_EUC_JISX0208(h, l) (IS_EUC_BYTE(h) && IS_EUC_BYTE(l)) #define IS_EUC_JISX0201KANA(h, l) (((h) == SS2) && (0xA1 <= (l) && (l) <= 0xDF)) #define IS_EUC_UDC_LOW(h, l) (((0xF5 <= (h)) && ((h) <= 0xFE)) \ && IS_EUC_BYTE(l)) #define IS_EUC_UDC_HI(h, l) IS_EUC_UDC_LOW(h, l) /* G3 block */ #define MAP_EUC2SJIS(euc_hi, euc_lo, euc_p, sjis_hi, sjis_lo, sjis_p) { \ if ((euc_hi) & 1) { \ (sjis_hi) = (euc_hi) / 2 + ((sjis_p) - (euc_p) / 2); \ (sjis_lo) = (euc_lo) - ((euc_lo) >= 0xE0 ? 0x60 : 0x61); \ } else { \ (sjis_hi) = (euc_hi) / 2 + (((sjis_p) - (euc_p) / 2) - 1); \ (sjis_lo) = (euc_lo) - 2; \ } \ } while(0) /* SJIS IBM extended characters to EUC map */ static const unsigned char sjisibm2euc_map[][2] = { {0xF3, 0xF3}, {0xF3, 0xF4}, {0xF3, 0xF5}, {0xF3, 0xF6}, {0xF3, 0xF7}, {0xF3, 0xF8}, {0xF3, 0xF9}, {0xF3, 0xFA}, {0xF3, 0xFB}, {0xF3, 0xFC}, {0xF3, 0xFD}, {0xF3, 0xFE}, {0xF4, 0xA1}, {0xF4, 0xA2}, {0xF4, 0xA3}, {0xF4, 0xA4}, {0xF4, 0xA5}, {0xF4, 0xA6}, {0xF4, 0xA7}, {0xF4, 0xA8}, {0xA2, 0xCC}, {0xA2, 0xC3}, {0xF4, 0xA9}, {0xF4, 0xAA}, {0xF4, 0xAB}, {0xF4, 0xAC}, {0xF4, 0xAD}, {0xA2, 0xE8}, {0xD4, 0xE3}, {0xDC, 0xDF}, {0xE4, 0xE9}, {0xE3, 0xF8}, {0xD9, 0xA1}, {0xB1, 0xBB}, {0xF4, 0xAE}, {0xC2, 0xAD}, {0xC3, 0xFC}, {0xE4, 0xD0}, {0xC2, 0xBF}, {0xBC, 0xF4}, {0xB0, 0xA9}, {0xB0, 0xC8}, {0xF4, 0xAF}, {0xB0, 0xD2}, {0xB0, 0xD4}, {0xB0, 0xE3}, {0xB0, 0xEE}, {0xB1, 0xA7}, {0xB1, 0xA3}, {0xB1, 0xAC}, {0xB1, 0xA9}, {0xB1, 0xBE}, {0xB1, 0xDF}, {0xB1, 0xD8}, {0xB1, 0xC8}, {0xB1, 0xD7}, {0xB1, 0xE3}, {0xB1, 0xF4}, {0xB1, 0xE1}, {0xB2, 0xA3}, {0xF4, 0xB0}, {0xB2, 0xBB}, {0xB2, 0xE6}, {0x00, 0x00}, {0xB2, 0xED}, {0xB2, 0xF5}, {0xB2, 0xFC}, {0xF4, 0xB1}, {0xB3, 0xB5}, {0xB3, 0xD8}, {0xB3, 0xDB}, {0xB3, 0xE5}, {0xB3, 0xEE}, {0xB3, 0xFB}, {0xF4, 0xB2}, {0xF4, 0xB3}, {0xB4, 0xC0}, {0xB4, 0xC7}, {0xB4, 0xD0}, {0xB4, 0xDE}, {0xF4, 0xB4}, {0xB5, 0xAA}, {0xF4, 0xB5}, {0xB5, 0xAF}, {0xB5, 0xC4}, {0xB5, 0xE8}, {0xF4, 0xB6}, {0xB7, 0xC2}, {0xB7, 0xE4}, {0xB7, 0xE8}, {0xB7, 0xE7}, {0xF4, 0xB7}, {0xF4, 0xB8}, {0xF4, 0xB9}, {0xB8, 0xCE}, {0xB8, 0xE1}, {0xB8, 0xF5}, {0xB8, 0xF7}, {0xB8, 0xF8}, {0xB8, 0xFC}, {0xB9, 0xAF}, {0xB9, 0xB7}, {0xBA, 0xBE}, {0xBA, 0xDB}, {0xCD, 0xAA}, {0xBA, 0xE1}, {0xF4, 0xBA}, {0xBA, 0xEB}, {0xBB, 0xB3}, {0xBB, 0xB8}, {0xF4, 0xBB}, {0xBB, 0xCA}, {0xF4, 0xBC}, {0xF4, 0xBD}, {0xBB, 0xD0}, {0xBB, 0xDE}, {0xBB, 0xF4}, {0xBB, 0xF5}, {0xBB, 0xF9}, {0xBC, 0xE4}, {0xBC, 0xED}, {0xBC, 0xFE}, {0xF4, 0xBE}, {0xBD, 0xC2}, {0xBD, 0xE7}, {0xF4, 0xBF}, {0xBD, 0xF0}, {0xBE, 0xB0}, {0xBE, 0xAC}, {0xF4, 0xC0}, {0xBE, 0xB3}, {0xBE, 0xBD}, {0xBE, 0xCD}, {0xBE, 0xC9}, {0xBE, 0xE4}, {0xBF, 0xA8}, {0xBF, 0xC9}, {0xC0, 0xC4}, {0xC0, 0xE4}, {0xC0, 0xF4}, {0xC1, 0xA6}, {0xF4, 0xC1}, {0xC1, 0xF5}, {0xC1, 0xFC}, {0xF4, 0xC2}, {0xC1, 0xF8}, {0xC2, 0xAB}, {0xC2, 0xA1}, {0xC2, 0xA5}, {0xF4, 0xC3}, {0xC2, 0xB8}, {0xC2, 0xBA}, {0xF4, 0xC4}, {0xC2, 0xC4}, {0xC2, 0xD2}, {0xC2, 0xD7}, {0xC2, 0xDB}, {0xC2, 0xDE}, {0xC2, 0xED}, {0xC2, 0xF0}, {0xF4, 0xC5}, {0xC3, 0xA1}, {0xC3, 0xB5}, {0xC3, 0xC9}, {0xC3, 0xB9}, {0xF4, 0xC6}, {0xC3, 0xD8}, {0xC3, 0xFE}, {0xF4, 0xC7}, {0xC4, 0xCC}, {0xF4, 0xC8}, {0xC4, 0xD9}, {0xC4, 0xEA}, {0xC4, 0xFD}, {0xF4, 0xC9}, {0xC5, 0xA7}, {0xC5, 0xB5}, {0xC5, 0xB6}, {0xF4, 0xCA}, {0xC5, 0xD5}, {0xC6, 0xB8}, {0xC6, 0xD7}, {0xC6, 0xE0}, {0xC6, 0xEA}, {0xC6, 0xE3}, {0xC7, 0xA1}, {0xC7, 0xAB}, {0xC7, 0xC7}, {0xC7, 0xC3}, {0xC7, 0xCB}, {0xC7, 0xCF}, {0xC7, 0xD9}, {0xF4, 0xCB}, {0xF4, 0xCC}, {0xC7, 0xE6}, {0xC7, 0xEE}, {0xC7, 0xFC}, {0xC7, 0xEB}, {0xC7, 0xF0}, {0xC8, 0xB1}, {0xC8, 0xE5}, {0xC8, 0xF8}, {0xC9, 0xA6}, {0xC9, 0xAB}, {0xC9, 0xAD}, {0xF4, 0xCD}, {0xC9, 0xCA}, {0xC9, 0xD3}, {0xC9, 0xE9}, {0xC9, 0xE3}, {0xC9, 0xFC}, {0xC9, 0xF4}, {0xC9, 0xF5}, {0xF4, 0xCE}, {0xCA, 0xB3}, {0xCA, 0xBD}, {0xCA, 0xEF}, {0xCA, 0xF1}, {0xCB, 0xAE}, {0xF4, 0xCF}, {0xCB, 0xCA}, {0xCB, 0xE6}, {0xCB, 0xEA}, {0xCB, 0xF0}, {0xCB, 0xF4}, {0xCB, 0xEE}, {0xCC, 0xA5}, {0xCB, 0xF9}, {0xCC, 0xAB}, {0xCC, 0xAE}, {0xCC, 0xAD}, {0xCC, 0xB2}, {0xCC, 0xC2}, {0xCC, 0xD0}, {0xCC, 0xD9}, {0xF4, 0xD0}, {0xCD, 0xBB}, {0xF4, 0xD1}, {0xCE, 0xBB}, {0xF4, 0xD2}, {0xCE, 0xBA}, {0xCE, 0xC3}, {0xF4, 0xD3}, {0xCE, 0xF2}, {0xB3, 0xDD}, {0xCF, 0xD5}, {0xCF, 0xE2}, {0xCF, 0xE9}, {0xCF, 0xED}, {0xF4, 0xD4}, {0xF4, 0xD5}, {0xF4, 0xD6}, {0x00, 0x00}, {0xF4, 0xD7}, {0xD0, 0xE5}, {0xF4, 0xD8}, {0xD0, 0xE9}, {0xD1, 0xE8}, {0xF4, 0xD9}, {0xF4, 0xDA}, {0xD1, 0xEC}, {0xD2, 0xBB}, {0xF4, 0xDB}, {0xD3, 0xE1}, {0xD3, 0xE8}, {0xD4, 0xA7}, {0xF4, 0xDC}, {0xF4, 0xDD}, {0xD4, 0xD4}, {0xD4, 0xF2}, {0xD5, 0xAE}, {0xF4, 0xDE}, {0xD7, 0xDE}, {0xF4, 0xDF}, {0xD8, 0xA2}, {0xD8, 0xB7}, {0xD8, 0xC1}, {0xD8, 0xD1}, {0xD8, 0xF4}, {0xD9, 0xC6}, {0xD9, 0xC8}, {0xD9, 0xD1}, {0xF4, 0xE0}, {0xF4, 0xE1}, {0xF4, 0xE2}, {0xF4, 0xE3}, {0xF4, 0xE4}, {0xDC, 0xD3}, {0xDD, 0xC8}, {0xDD, 0xD4}, {0xDD, 0xEA}, {0xDD, 0xFA}, {0xDE, 0xA4}, {0xDE, 0xB0}, {0xF4, 0xE5}, {0xDE, 0xB5}, {0xDE, 0xCB}, {0xF4, 0xE6}, {0xDF, 0xB9}, {0xF4, 0xE7}, {0xDF, 0xC3}, {0xF4, 0xE8}, {0xF4, 0xE9}, {0xE0, 0xD9}, {0xF4, 0xEA}, {0xF4, 0xEB}, {0xE1, 0xE2}, {0xF4, 0xEC}, {0xF4, 0xED}, {0xF4, 0xEE}, {0xE2, 0xC7}, {0xE3, 0xA8}, {0xE3, 0xA6}, {0xE3, 0xA9}, {0xE3, 0xAF}, {0xE3, 0xB0}, {0xE3, 0xAA}, {0xE3, 0xAB}, {0xE3, 0xBC}, {0xE3, 0xC1}, {0xE3, 0xBF}, {0xE3, 0xD5}, {0xE3, 0xD8}, {0xE3, 0xD6}, {0xE3, 0xDF}, {0xE3, 0xE3}, {0xE3, 0xE1}, {0xE3, 0xD4}, {0xE3, 0xE9}, {0xE4, 0xA6}, {0xE3, 0xF1}, {0xE3, 0xF2}, {0xE4, 0xCB}, {0xE4, 0xC1}, {0xE4, 0xC3}, {0xE4, 0xBE}, {0xF4, 0xEF}, {0xE4, 0xC0}, {0xE4, 0xC7}, {0xE4, 0xBF}, {0xE4, 0xE0}, {0xE4, 0xDE}, {0xE4, 0xD1}, {0xF4, 0xF0}, {0xE4, 0xDC}, {0xE4, 0xD2}, {0xE4, 0xDB}, {0xE4, 0xD4}, {0xE4, 0xFA}, {0xE4, 0xEF}, {0xE5, 0xB3}, {0xE5, 0xBF}, {0xE5, 0xC9}, {0xE5, 0xD0}, {0xE5, 0xE2}, {0xE5, 0xEA}, {0xE5, 0xEB}, {0xF4, 0xF1}, {0xF4, 0xF2}, {0xF4, 0xF3}, {0xE6, 0xE8}, {0xE6, 0xEF}, {0xE7, 0xAC}, {0xF4, 0xF4}, {0xE7, 0xAE}, {0xF4, 0xF5}, {0xE7, 0xB1}, {0xF4, 0xF6}, {0xE7, 0xB2}, {0xE8, 0xB1}, {0xE8, 0xB6}, {0xF4, 0xF7}, {0xF4, 0xF8}, {0xE8, 0xDD}, {0xF4, 0xF9}, {0xF4, 0xFA}, {0xE9, 0xD1}, {0xF4, 0xFB}, {0xE9, 0xED}, {0xEA, 0xCD}, {0xF4, 0xFC}, {0xEA, 0xDB}, {0xEA, 0xE6}, {0xEA, 0xEA}, {0xEB, 0xA5}, {0xEB, 0xFB}, {0xEB, 0xFA}, {0xF4, 0xFD}, {0xEC, 0xD6}, {0xF4, 0xFE}, }; #define IS_EUC_IBM2JISX0208(h, l) \ (((h) == 0xA2 && (l) == 0xCC) || ((h) == 0xA2 && (l) == 0xE8)) /* EUC to SJIS IBM extended characters map (G3 JIS X 0212 block) */ static struct { unsigned short euc; unsigned char sjis[2]; } euc2sjisibm_jisx0212_map[] = { {0xA2C3, {0xFA, 0x55}}, {0xB0A9, {0xFA, 0x68}}, {0xB0C8, {0xFA, 0x69}}, {0xB0D2, {0xFA, 0x6B}}, {0xB0D4, {0xFA, 0x6C}}, {0xB0E3, {0xFA, 0x6D}}, {0xB0EE, {0xFA, 0x6E}}, {0xB1A3, {0xFA, 0x70}}, {0xB1A7, {0xFA, 0x6F}}, {0xB1A9, {0xFA, 0x72}}, {0xB1AC, {0xFA, 0x71}}, {0xB1BB, {0xFA, 0x61}}, {0xB1BE, {0xFA, 0x73}}, {0xB1C8, {0xFA, 0x76}}, {0xB1D7, {0xFA, 0x77}}, {0xB1D8, {0xFA, 0x75}}, {0xB1DF, {0xFA, 0x74}}, {0xB1E1, {0xFA, 0x7A}}, {0xB1E3, {0xFA, 0x78}}, {0xB1F4, {0xFA, 0x79}}, {0xB2A3, {0xFA, 0x7B}}, {0xB2BB, {0xFA, 0x7D}}, {0xB2E6, {0xFA, 0x7E}}, {0xB2ED, {0xFA, 0x80}}, {0xB2F5, {0xFA, 0x81}}, {0xB2FC, {0xFA, 0x82}}, {0xB3B5, {0xFA, 0x84}}, {0xB3D8, {0xFA, 0x85}}, {0xB3DB, {0xFA, 0x86}}, {0xB3DD, {0xFB, 0x77}}, {0xB3E5, {0xFA, 0x87}}, {0xB3EE, {0xFA, 0x88}}, {0xB3FB, {0xFA, 0x89}}, {0xB4C0, {0xFA, 0x8C}}, {0xB4C7, {0xFA, 0x8D}}, {0xB4D0, {0xFA, 0x8E}}, {0xB4DE, {0xFA, 0x8F}}, {0xB5AA, {0xFA, 0x91}}, {0xB5AF, {0xFA, 0x93}}, {0xB5C4, {0xFA, 0x94}}, {0xB5E8, {0xFA, 0x95}}, {0xB7C2, {0xFA, 0x97}}, {0xB7E4, {0xFA, 0x98}}, {0xB7E7, {0xFA, 0x9A}}, {0xB7E8, {0xFA, 0x99}}, {0xB8CE, {0xFA, 0x9E}}, {0xB8E1, {0xFA, 0x9F}}, {0xB8F5, {0xFA, 0xA0}}, {0xB8F7, {0xFA, 0xA1}}, {0xB8F8, {0xFA, 0xA2}}, {0xB8FC, {0xFA, 0xA3}}, {0xB9AF, {0xFA, 0xA4}}, {0xB9B7, {0xFA, 0xA5}}, {0xBABE, {0xFA, 0xA6}}, {0xBADB, {0xFA, 0xA7}}, {0xBAE1, {0xFA, 0xA9}}, {0xBAEB, {0xFA, 0xAB}}, {0xBBB3, {0xFA, 0xAC}}, {0xBBB8, {0xFA, 0xAD}}, {0xBBCA, {0xFA, 0xAF}}, {0xBBD0, {0xFA, 0xB2}}, {0xBBDE, {0xFA, 0xB3}}, {0xBBF4, {0xFA, 0xB4}}, {0xBBF5, {0xFA, 0xB5}}, {0xBBF9, {0xFA, 0xB6}}, {0xBCE4, {0xFA, 0xB7}}, {0xBCED, {0xFA, 0xB8}}, {0xBCF4, {0xFA, 0x67}}, {0xBCFE, {0xFA, 0xB9}}, {0xBDC2, {0xFA, 0xBB}}, {0xBDE7, {0xFA, 0xBC}}, {0xBDF0, {0xFA, 0xBE}}, {0xBEAC, {0xFA, 0xC0}}, {0xBEB0, {0xFA, 0xBF}}, {0xBEB3, {0xFA, 0xC2}}, {0xBEBD, {0xFA, 0xC3}}, {0xBEC9, {0xFA, 0xC5}}, {0xBECD, {0xFA, 0xC4}}, {0xBEE4, {0xFA, 0xC6}}, {0xBFA8, {0xFA, 0xC7}}, {0xBFC9, {0xFA, 0xC8}}, {0xC0C4, {0xFA, 0xC9}}, {0xC0E4, {0xFA, 0xCA}}, {0xC0F4, {0xFA, 0xCB}}, {0xC1A6, {0xFA, 0xCC}}, {0xC1F5, {0xFA, 0xCE}}, {0xC1F8, {0xFA, 0xD1}}, {0xC1FC, {0xFA, 0xCF}}, {0xC2A1, {0xFA, 0xD3}}, {0xC2A5, {0xFA, 0xD4}}, {0xC2AB, {0xFA, 0xD2}}, {0xC2AD, {0xFA, 0x63}}, {0xC2B8, {0xFA, 0xD6}}, {0xC2BA, {0xFA, 0xD7}}, {0xC2BF, {0xFA, 0x66}}, {0xC2C4, {0xFA, 0xD9}}, {0xC2D2, {0xFA, 0xDA}}, {0xC2D7, {0xFA, 0xDB}}, {0xC2DB, {0xFA, 0xDC}}, {0xC2DE, {0xFA, 0xDD}}, {0xC2ED, {0xFA, 0xDE}}, {0xC2F0, {0xFA, 0xDF}}, {0xC3A1, {0xFA, 0xE1}}, {0xC3B5, {0xFA, 0xE2}}, {0xC3B9, {0xFA, 0xE4}}, {0xC3C9, {0xFA, 0xE3}}, {0xC3D8, {0xFA, 0xE6}}, {0xC3FC, {0xFA, 0x64}}, {0xC3FE, {0xFA, 0xE7}}, {0xC4CC, {0xFA, 0xE9}}, {0xC4D9, {0xFA, 0xEB}}, {0xC4EA, {0xFA, 0xEC}}, {0xC4FD, {0xFA, 0xED}}, {0xC5A7, {0xFA, 0xEF}}, {0xC5B5, {0xFA, 0xF0}}, {0xC5B6, {0xFA, 0xF1}}, {0xC5D5, {0xFA, 0xF3}}, {0xC6B8, {0xFA, 0xF4}}, {0xC6D7, {0xFA, 0xF5}}, {0xC6E0, {0xFA, 0xF6}}, {0xC6E3, {0xFA, 0xF8}}, {0xC6EA, {0xFA, 0xF7}}, {0xC7A1, {0xFA, 0xF9}}, {0xC7AB, {0xFA, 0xFA}}, {0xC7C3, {0xFA, 0xFC}}, {0xC7C7, {0xFA, 0xFB}}, {0xC7CB, {0xFB, 0x40}}, {0xC7CF, {0xFB, 0x41}}, {0xC7D9, {0xFB, 0x42}}, {0xC7E6, {0xFB, 0x45}}, {0xC7EB, {0xFB, 0x48}}, {0xC7EE, {0xFB, 0x46}}, {0xC7F0, {0xFB, 0x49}}, {0xC7FC, {0xFB, 0x47}}, {0xC8B1, {0xFB, 0x4A}}, {0xC8E5, {0xFB, 0x4B}}, {0xC8F8, {0xFB, 0x4C}}, {0xC9A6, {0xFB, 0x4D}}, {0xC9AB, {0xFB, 0x4E}}, {0xC9AD, {0xFB, 0x4F}}, {0xC9CA, {0xFB, 0x51}}, {0xC9D3, {0xFB, 0x52}}, {0xC9E3, {0xFB, 0x54}}, {0xC9E9, {0xFB, 0x53}}, {0xC9F4, {0xFB, 0x56}}, {0xC9F5, {0xFB, 0x57}}, {0xC9FC, {0xFB, 0x55}}, {0xCAB3, {0xFB, 0x59}}, {0xCABD, {0xFB, 0x5A}}, {0xCAEF, {0xFB, 0x5B}}, {0xCAF1, {0xFB, 0x5C}}, {0xCBAE, {0xFB, 0x5D}}, {0xCBCA, {0xFB, 0x5F}}, {0xCBE6, {0xFB, 0x60}}, {0xCBEA, {0xFB, 0x61}}, {0xCBEE, {0xFB, 0x64}}, {0xCBF0, {0xFB, 0x62}}, {0xCBF4, {0xFB, 0x63}}, {0xCBF9, {0xFB, 0x66}}, {0xCCA5, {0xFB, 0x65}}, {0xCCAB, {0xFB, 0x67}}, {0xCCAD, {0xFB, 0x69}}, {0xCCAE, {0xFB, 0x68}}, {0xCCB2, {0xFB, 0x6A}}, {0xCCC2, {0xFB, 0x6B}}, {0xCCD0, {0xFB, 0x6C}}, {0xCCD9, {0xFB, 0x6D}}, {0xCDAA, {0xFA, 0xA8}}, {0xCDBB, {0xFB, 0x6F}}, {0xCEBA, {0xFB, 0x73}}, {0xCEBB, {0xFB, 0x71}}, {0xCEC3, {0xFB, 0x74}}, {0xCEF2, {0xFB, 0x76}}, {0xCFD5, {0xFB, 0x78}}, {0xCFE2, {0xFB, 0x79}}, {0xCFE9, {0xFB, 0x7A}}, {0xCFED, {0xFB, 0x7B}}, {0xD0E5, {0xFB, 0x81}}, {0xD0E9, {0xFB, 0x83}}, {0xD1E8, {0xFB, 0x84}}, {0xD1EC, {0xFB, 0x87}}, {0xD2BB, {0xFB, 0x88}}, {0xD3E1, {0xFB, 0x8A}}, {0xD3E8, {0xFB, 0x8B}}, {0xD4A7, {0xFB, 0x8C}}, {0xD4D4, {0xFB, 0x8F}}, {0xD4E3, {0xFA, 0x5C}}, {0xD4F2, {0xFB, 0x90}}, {0xD5AE, {0xFB, 0x91}}, {0xD7DE, {0xFB, 0x93}}, {0xD8A2, {0xFB, 0x95}}, {0xD8B7, {0xFB, 0x96}}, {0xD8C1, {0xFB, 0x97}}, {0xD8D1, {0xFB, 0x98}}, {0xD8F4, {0xFB, 0x99}}, {0xD9A1, {0xFA, 0x60}}, {0xD9C6, {0xFB, 0x9A}}, {0xD9C8, {0xFB, 0x9B}}, {0xD9D1, {0xFB, 0x9C}}, {0xDCD3, {0xFB, 0xA2}}, {0xDCDF, {0xFA, 0x5D}}, {0xDDC8, {0xFB, 0xA3}}, {0xDDD4, {0xFB, 0xA4}}, {0xDDEA, {0xFB, 0xA5}}, {0xDDFA, {0xFB, 0xA6}}, {0xDEA4, {0xFB, 0xA7}}, {0xDEB0, {0xFB, 0xA8}}, {0xDEB5, {0xFB, 0xAA}}, {0xDECB, {0xFB, 0xAB}}, {0xDFB9, {0xFB, 0xAD}}, {0xDFC3, {0xFB, 0xAF}}, {0xE0D9, {0xFB, 0xB2}}, {0xE1E2, {0xFB, 0xB5}}, {0xE2C7, {0xFB, 0xB9}}, {0xE3A6, {0xFB, 0xBB}}, {0xE3A8, {0xFB, 0xBA}}, {0xE3A9, {0xFB, 0xBC}}, {0xE3AA, {0xFB, 0xBF}}, {0xE3AB, {0xFB, 0xC0}}, {0xE3AF, {0xFB, 0xBD}}, {0xE3B0, {0xFB, 0xBE}}, {0xE3BC, {0xFB, 0xC1}}, {0xE3BF, {0xFB, 0xC3}}, {0xE3C1, {0xFB, 0xC2}}, {0xE3D4, {0xFB, 0xCA}}, {0xE3D5, {0xFB, 0xC4}}, {0xE3D6, {0xFB, 0xC6}}, {0xE3D8, {0xFB, 0xC5}}, {0xE3DF, {0xFB, 0xC7}}, {0xE3E1, {0xFB, 0xC9}}, {0xE3E3, {0xFB, 0xC8}}, {0xE3E9, {0xFB, 0xCB}}, {0xE3F1, {0xFB, 0xCD}}, {0xE3F2, {0xFB, 0xCE}}, {0xE3F8, {0xFA, 0x5F}}, {0xE4A6, {0xFB, 0xCC}}, {0xE4BE, {0xFB, 0xD2}}, {0xE4BF, {0xFB, 0xD6}}, {0xE4C0, {0xFB, 0xD4}}, {0xE4C1, {0xFB, 0xD0}}, {0xE4C3, {0xFB, 0xD1}}, {0xE4C7, {0xFB, 0xD5}}, {0xE4CB, {0xFB, 0xCF}}, {0xE4D0, {0xFA, 0x65}}, {0xE4D1, {0xFB, 0xD9}}, {0xE4D2, {0xFB, 0xDC}}, {0xE4D4, {0xFB, 0xDE}}, {0xE4DB, {0xFB, 0xDD}}, {0xE4DC, {0xFB, 0xDB}}, {0xE4DE, {0xFB, 0xD8}}, {0xE4E0, {0xFB, 0xD7}}, {0xE4E9, {0xFA, 0x5E}}, {0xE4EF, {0xFB, 0xE0}}, {0xE4FA, {0xFB, 0xDF}}, {0xE5B3, {0xFB, 0xE1}}, {0xE5BF, {0xFB, 0xE2}}, {0xE5C9, {0xFB, 0xE3}}, {0xE5D0, {0xFB, 0xE4}}, {0xE5E2, {0xFB, 0xE5}}, {0xE5EA, {0xFB, 0xE6}}, {0xE5EB, {0xFB, 0xE7}}, {0xE6E8, {0xFB, 0xEB}}, {0xE6EF, {0xFB, 0xEC}}, {0xE7AC, {0xFB, 0xED}}, {0xE7AE, {0xFB, 0xEF}}, {0xE7B1, {0xFB, 0xF1}}, {0xE7B2, {0xFB, 0xF3}}, {0xE8B1, {0xFB, 0xF4}}, {0xE8B6, {0xFB, 0xF5}}, {0xE8DD, {0xFB, 0xF8}}, {0xE9D1, {0xFB, 0xFB}}, {0xE9ED, {0xFC, 0x40}}, {0xEACD, {0xFC, 0x41}}, {0xEADB, {0xFC, 0x43}}, {0xEAE6, {0xFC, 0x44}}, {0xEAEA, {0xFC, 0x45}}, {0xEBA5, {0xFC, 0x46}}, {0xEBFA, {0xFC, 0x48}}, {0xEBFB, {0xFC, 0x47}}, {0xECD6, {0xFC, 0x4A}}, }; /* EUC to SJIS IBM extended characters map (G3 Upper block) */ static const unsigned char euc2sjisibm_g3upper_map[][2] = { {0xFA, 0x40}, {0xFA, 0x41}, {0xFA, 0x42}, {0xFA, 0x43}, {0xFA, 0x44}, {0xFA, 0x45}, {0xFA, 0x46}, {0xFA, 0x47}, {0xFA, 0x48}, {0xFA, 0x49}, {0xFA, 0x4A}, {0xFA, 0x4B}, {0xFA, 0x4C}, {0xFA, 0x4D}, {0xFA, 0x4E}, {0xFA, 0x4F}, {0xFA, 0x50}, {0xFA, 0x51}, {0xFA, 0x52}, {0xFA, 0x53}, {0xFA, 0x56}, {0xFA, 0x57}, {0xFA, 0x58}, {0xFA, 0x59}, {0xFA, 0x5A}, {0xFA, 0x62}, {0xFA, 0x6A}, {0xFA, 0x7C}, {0xFA, 0x83}, {0xFA, 0x8A}, {0xFA, 0x8B}, {0xFA, 0x90}, {0xFA, 0x92}, {0xFA, 0x96}, {0xFA, 0x9B}, {0xFA, 0x9C}, {0xFA, 0x9D}, {0xFA, 0xAA}, {0xFA, 0xAE}, {0xFA, 0xB0}, {0xFA, 0xB1}, {0xFA, 0xBA}, {0xFA, 0xBD}, {0xFA, 0xC1}, {0xFA, 0xCD}, {0xFA, 0xD0}, {0xFA, 0xD5}, {0xFA, 0xD8}, {0xFA, 0xE0}, {0xFA, 0xE5}, {0xFA, 0xE8}, {0xFA, 0xEA}, {0xFA, 0xEE}, {0xFA, 0xF2}, {0xFB, 0x43}, {0xFB, 0x44}, {0xFB, 0x50}, {0xFB, 0x58}, {0xFB, 0x5E}, {0xFB, 0x6E}, {0xFB, 0x70}, {0xFB, 0x72}, {0xFB, 0x75}, {0xFB, 0x7C}, {0xFB, 0x7D}, {0xFB, 0x7E}, {0xFB, 0x80}, {0xFB, 0x82}, {0xFB, 0x85}, {0xFB, 0x86}, {0xFB, 0x89}, {0xFB, 0x8D}, {0xFB, 0x8E}, {0xFB, 0x92}, {0xFB, 0x94}, {0xFB, 0x9D}, {0xFB, 0x9E}, {0xFB, 0x9F}, {0xFB, 0xA0}, {0xFB, 0xA1}, {0xFB, 0xA9}, {0xFB, 0xAC}, {0xFB, 0xAE}, {0xFB, 0xB0}, {0xFB, 0xB1}, {0xFB, 0xB3}, {0xFB, 0xB4}, {0xFB, 0xB6}, {0xFB, 0xB7}, {0xFB, 0xB8}, {0xFB, 0xD3}, {0xFB, 0xDA}, {0xFB, 0xE8}, {0xFB, 0xE9}, {0xFB, 0xEA}, {0xFB, 0xEE}, {0xFB, 0xF0}, {0xFB, 0xF2}, {0xFB, 0xF6}, {0xFB, 0xF7}, {0xFB, 0xF9}, {0xFB, 0xFA}, {0xFB, 0xFC}, {0xFC, 0x42}, {0xFC, 0x49}, {0xFC, 0x4B}, }; static inline int sjisibm2euc(unsigned char *euc, const unsigned char sjis_hi, const unsigned char sjis_lo); static inline int euc2sjisibm_jisx0212(unsigned char *sjis, const unsigned char euc_hi, const unsigned char euc_lo); static inline int euc2sjisibm_g3upper(unsigned char *sjis, const unsigned char euc_hi, const unsigned char euc_lo); static inline int euc2sjisibm(unsigned char *sjis, const unsigned char euc_hi, const unsigned char euc_lo); static inline int sjisnec2sjisibm(unsigned char *sjisibm, const unsigned char sjisnec_hi, const unsigned char sjisnec_lo); /* SJIS IBM extended characters to EUC */ static inline int sjisibm2euc(unsigned char *euc, const unsigned char sjis_hi, const unsigned char sjis_lo) { int index; index = ((sjis_hi - 0xFA) * (0xFD - 0x40)) + (sjis_lo - 0x40); if (IS_EUC_IBM2JISX0208(sjisibm2euc_map[index][0], sjisibm2euc_map[index][1])) { euc[0] = sjisibm2euc_map[index][0]; euc[1] = sjisibm2euc_map[index][1]; return 2; } else { euc[0] = SS3; euc[1] = sjisibm2euc_map[index][0]; euc[2] = sjisibm2euc_map[index][1]; return 3; } } /* EUC to SJIS IBM extended characters (G3 JIS X 0212 block) */ static inline int euc2sjisibm_jisx0212(unsigned char *sjis, const unsigned char euc_hi, const unsigned char euc_lo) { int index, min_index, max_index; unsigned short euc; min_index = 0; max_index = ARRAY_SIZE(euc2sjisibm_jisx0212_map) - 1; euc = (euc_hi << 8) | euc_lo; while (min_index <= max_index) { index = (min_index + max_index) / 2; if (euc < euc2sjisibm_jisx0212_map[index].euc) max_index = index - 1; else min_index = index + 1; if (euc == euc2sjisibm_jisx0212_map[index].euc) { sjis[0] = euc2sjisibm_jisx0212_map[index].sjis[0]; sjis[1] = euc2sjisibm_jisx0212_map[index].sjis[1]; return 3; } } return 0; } /* EUC to SJIS IBM extended characters (G3 Upper block) */ static inline int euc2sjisibm_g3upper(unsigned char *sjis, const unsigned char euc_hi, const unsigned char euc_lo) { int index; if (euc_hi == 0xF3) index = ((euc_hi << 8) | euc_lo) - 0xF3F3; else index = ((euc_hi << 8) | euc_lo) - 0xF4A1 + 12; if ((index < 0) || (index >= ARRAY_SIZE(euc2sjisibm_g3upper_map))) return 0; sjis[0] = euc2sjisibm_g3upper_map[index][0]; sjis[1] = euc2sjisibm_g3upper_map[index][1]; return 3; } /* EUC to SJIS IBM extended characters (G3 block) */ static inline int euc2sjisibm(unsigned char *sjis, const unsigned char euc_hi, const unsigned char euc_lo) { int n; #if 0 if ((euc_hi == 0xA2) && (euc_lo == 0xCC)) { sjis[0] = 0xFA; sjis[1] = 0x54; return 2; } else if ((euc_hi == 0xA2) && (euc_lo == 0xE8)) { sjis[0] = 0xFA; sjis[1] = 0x5B; return 2; } #endif if ((n = euc2sjisibm_g3upper(sjis, euc_hi, euc_lo))) { return n; } else if ((n = euc2sjisibm_jisx0212(sjis, euc_hi, euc_lo))) { return n; } return 0; } /* NEC/IBM extended characters to IBM extended characters */ static inline int sjisnec2sjisibm(unsigned char *sjisibm, const unsigned char sjisnec_hi, const unsigned char sjisnec_lo) { int count; if (! IS_SJIS_NECIBM(sjisnec_hi, sjisnec_lo)) return 0; if ((sjisnec_hi == 0xEE) && (sjisnec_lo == 0xF9)) { sjisibm[0] = 0x81; sjisibm[1] = 0xCA; return 2; } if ((sjisnec_hi == 0xEE) && (sjisnec_lo >= 0xEF)) { count = (sjisnec_hi << 8 | sjisnec_lo) - (sjisnec_lo <= 0xF9 ? 0xEEEF : (0xEEEF - 10)); } else { count = (sjisnec_hi - 0xED) * (0xFC - 0x40) + (sjisnec_lo - 0x40) + (0x5C - 0x40); if (sjisnec_lo >= 0x7F) count--; } sjisibm[0] = 0xFA + (count / (0xFC - 0x40)); sjisibm[1] = 0x40 + (count % (0xFC - 0x40)); if (sjisibm[1] >= 0x7F) sjisibm[1]++; return 2; } static int uni2char(const wchar_t uni, unsigned char *out, int boundlen) { int n; if (!p_nls) return -EINVAL; if ((n = p_nls->uni2char(uni, out, boundlen)) < 0) return n; /* translate SJIS into EUC-JP */ if (n == 1) { if (IS_SJIS_JISX0201KANA(out[0])) { /* JIS X 0201 KANA */ if (boundlen < 2) return -ENAMETOOLONG; out[1] = out[0]; out[0] = SS2; return 2; } } else if (n == 2) { /* NEC/IBM extended characters to IBM extended characters */ sjisnec2sjisibm(out, out[0], out[1]); if (IS_SJIS_UDC_LOW(out[0], out[1])) { /* User defined characters half low */ MAP_SJIS2EUC(out[0], out[1], 0xF0, out[0], out[1], 0xF5); } else if (IS_SJIS_UDC_HI(out[0], out[1])) { /* User defined characters half high */ unsigned char ch, cl; if (boundlen < 3) return -ENAMETOOLONG; n = 3; ch = out[0]; cl = out[1]; out[0] = SS3; MAP_SJIS2EUC(ch, cl, 0xF5, out[1], out[2], 0xF5); } else if (IS_SJIS_IBM(out[0], out[1])) { /* IBM extended characters */ unsigned char euc[3], i; n = sjisibm2euc(euc, out[0], out[1]); if (boundlen < n) return -ENAMETOOLONG; for (i = 0; i < n; i++) out[i] = euc[i]; } else if (IS_SJIS_JISX0208(out[0], out[1])) { /* JIS X 0208 (include NEC special characters) */ out[0] = (out[0]^0xA0)*2 + 0x5F; if (out[1] > 0x9E) out[0]++; if (out[1] < 0x7F) out[1] = out[1] + 0x61; else if (out[1] < 0x9F) out[1] = out[1] + 0x60; else out[1] = out[1] + 0x02; } else { /* Invalid characters */ return -EINVAL; } } else return -EINVAL; return n; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { unsigned char sjis_temp[2]; int euc_offset, n; if ( !p_nls ) return -EINVAL; if (boundlen <= 0) return -ENAMETOOLONG; /* translate EUC-JP into SJIS */ if (rawstring[0] > 0x7F) { if (rawstring[0] == SS3) { if (boundlen < 3) return -EINVAL; euc_offset = 3; if (IS_EUC_UDC_HI(rawstring[1], rawstring[2])) { /* User defined characters half high */ MAP_EUC2SJIS(rawstring[1], rawstring[2], 0xF5, sjis_temp[0], sjis_temp[1], 0xF5); } else if (euc2sjisibm(sjis_temp,rawstring[1],rawstring[2])) { /* IBM extended characters */ } else { /* JIS X 0212 and Invalid characters*/ return -EINVAL; /* 'GETA' with SJIS coding */ /* sjis_temp[0] = 0x81; */ /* sjis_temp[1] = 0xAC; */ } } else { if (boundlen < 2) return -EINVAL; euc_offset = 2; if (IS_EUC_JISX0201KANA(rawstring[0], rawstring[1])) { /* JIS X 0201 KANA */ sjis_temp[0] = rawstring[1]; sjis_temp[1] = 0x00; } else if (IS_EUC_UDC_LOW(rawstring[0], rawstring[1])) { /* User defined characters half low */ MAP_EUC2SJIS(rawstring[0], rawstring[1], 0xF5, sjis_temp[0], sjis_temp[1], 0xF0); } else if (IS_EUC_JISX0208(rawstring[0], rawstring[1])) { /* JIS X 0208 (include NEC spesial characters) */ sjis_temp[0] = ((rawstring[0]-0x5f)/2) ^ 0xA0; if (!(rawstring[0] & 1)) sjis_temp[1] = rawstring[1] - 0x02; else if (rawstring[1] < 0xE0) sjis_temp[1] = rawstring[1] - 0x61; else sjis_temp[1] = rawstring[1] - 0x60; } else { /* Invalid characters */ return -EINVAL; } } } else { euc_offset = 1; /* JIS X 0201 ROMAJI */ sjis_temp[0] = rawstring[0]; sjis_temp[1] = 0x00; } if ( (n = p_nls->char2uni(sjis_temp, sizeof(sjis_temp), uni)) < 0) return n; return euc_offset; } static struct nls_table table = { .charset = "euc-jp", .uni2char = uni2char, .char2uni = char2uni, .owner = THIS_MODULE, }; static int __init init_nls_euc_jp(void) { p_nls = load_nls("cp932"); if (p_nls) { table.charset2upper = p_nls->charset2upper; table.charset2lower = p_nls->charset2lower; return register_nls(&table); } return -EINVAL; } static void __exit exit_nls_euc_jp(void) { unregister_nls(&table); unload_nls(p_nls); } module_init(init_nls_euc_jp) module_exit(exit_nls_euc_jp) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
Ameisentaetowierer/kernel-p9516-2.6.36.3
drivers/mmc/core/sd_ops.c
244
7946
/* * linux/drivers/mmc/core/sd_ops.h * * Copyright 2006-2007 Pierre Ossman * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. */ #include <linux/types.h> #include <linux/scatterlist.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> #include <linux/mmc/mmc.h> #include <linux/mmc/sd.h> #include "core.h" #include "sd_ops.h" static int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card) { int err; struct mmc_command cmd; BUG_ON(!host); BUG_ON(card && (card->host != host)); cmd.opcode = MMC_APP_CMD; if (card) { cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; } else { cmd.arg = 0; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_BCR; } err = mmc_wait_for_cmd(host, &cmd, 0); if (err) return err; /* Check that card supported application commands */ if (!mmc_host_is_spi(host) && !(cmd.resp[0] & R1_APP_CMD)) return -EOPNOTSUPP; return 0; } /** * mmc_wait_for_app_cmd - start an application command and wait for completion * @host: MMC host to start command * @card: Card to send MMC_APP_CMD to * @cmd: MMC command to start * @retries: maximum number of retries * * Sends a MMC_APP_CMD, checks the card response, sends the command * in the parameter and waits for it to complete. Return any error * that occurred while the command was executing. Do not attempt to * parse the response. */ int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card, struct mmc_command *cmd, int retries) { struct mmc_request mrq; int i, err; BUG_ON(!cmd); BUG_ON(retries < 0); err = -EIO; /* * We have to resend MMC_APP_CMD for each attempt so * we cannot use the retries field in mmc_command. */ for (i = 0;i <= retries;i++) { err = mmc_app_cmd(host, card); if (err) { /* no point in retrying; no APP commands allowed */ if (mmc_host_is_spi(host)) { if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND) break; } continue; } memset(&mrq, 0, sizeof(struct mmc_request)); memset(cmd->resp, 0, sizeof(cmd->resp)); cmd->retries = 0; mrq.cmd = cmd; cmd->data = NULL; mmc_wait_for_req(host, &mrq); err = cmd->error; if (!cmd->error) break; /* no point in retrying illegal APP commands */ if (mmc_host_is_spi(host)) { if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND) break; } } return err; } EXPORT_SYMBOL(mmc_wait_for_app_cmd); int mmc_app_set_bus_width(struct mmc_card *card, int width) { int err; struct mmc_command cmd; BUG_ON(!card); BUG_ON(!card->host); memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = SD_APP_SET_BUS_WIDTH; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; switch (width) { case MMC_BUS_WIDTH_1: cmd.arg = SD_BUS_WIDTH_1; break; case MMC_BUS_WIDTH_4: cmd.arg = SD_BUS_WIDTH_4; break; default: return -EINVAL; } err = mmc_wait_for_app_cmd(card->host, card, &cmd, MMC_CMD_RETRIES); if (err) return err; return 0; } int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) { struct mmc_command cmd; int i, err = 0; BUG_ON(!host); memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = SD_APP_OP_COND; if (mmc_host_is_spi(host)) cmd.arg = ocr & (1 << 30); /* SPI only defines one bit */ else cmd.arg = ocr; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; for (i = 100; i; i--) { err = mmc_wait_for_app_cmd(host, NULL, &cmd, MMC_CMD_RETRIES); if (err) break; /* if we're just probing, do a single pass */ if (ocr == 0) break; /* otherwise wait until reset completes */ if (mmc_host_is_spi(host)) { if (!(cmd.resp[0] & R1_SPI_IDLE)) break; } else { if (cmd.resp[0] & MMC_CARD_BUSY) break; } err = -ETIMEDOUT; mmc_delay(10); } if (rocr && !mmc_host_is_spi(host)) *rocr = cmd.resp[0]; return err; } int mmc_send_if_cond(struct mmc_host *host, u32 ocr) { struct mmc_command cmd; int err; static const u8 test_pattern = 0xAA; u8 result_pattern; /* * To support SD 2.0 cards, we must always invoke SD_SEND_IF_COND * before SD_APP_OP_COND. This command will harmlessly fail for * SD 1.0 cards. */ cmd.opcode = SD_SEND_IF_COND; cmd.arg = ((ocr & 0xFF8000) != 0) << 8 | test_pattern; cmd.flags = MMC_RSP_SPI_R7 | MMC_RSP_R7 | MMC_CMD_BCR; err = mmc_wait_for_cmd(host, &cmd, 0); if (err) return err; if (mmc_host_is_spi(host)) result_pattern = cmd.resp[1] & 0xFF; else result_pattern = cmd.resp[0] & 0xFF; if (result_pattern != test_pattern) return -EIO; return 0; } int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca) { int err; struct mmc_command cmd; BUG_ON(!host); BUG_ON(!rca); memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = SD_SEND_RELATIVE_ADDR; cmd.arg = 0; cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR; err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); if (err) return err; *rca = cmd.resp[0] >> 16; return 0; } int mmc_app_send_scr(struct mmc_card *card, u32 *scr) { int err; struct mmc_request mrq; struct mmc_command cmd; struct mmc_data data; struct scatterlist sg; BUG_ON(!card); BUG_ON(!card->host); BUG_ON(!scr); /* NOTE: caller guarantees scr is heap-allocated */ err = mmc_app_cmd(card->host, card); if (err) return err; memset(&mrq, 0, sizeof(struct mmc_request)); memset(&cmd, 0, sizeof(struct mmc_command)); memset(&data, 0, sizeof(struct mmc_data)); mrq.cmd = &cmd; mrq.data = &data; cmd.opcode = SD_APP_SEND_SCR; cmd.arg = 0; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; data.blksz = 8; data.blocks = 1; data.flags = MMC_DATA_READ; data.sg = &sg; data.sg_len = 1; sg_init_one(&sg, scr, 8); mmc_set_data_timeout(&data, card); mmc_wait_for_req(card->host, &mrq); if (cmd.error) return cmd.error; if (data.error) return data.error; scr[0] = be32_to_cpu(scr[0]); scr[1] = be32_to_cpu(scr[1]); return 0; } int mmc_sd_switch(struct mmc_card *card, int mode, int group, u8 value, u8 *resp) { struct mmc_request mrq; struct mmc_command cmd; struct mmc_data data; struct scatterlist sg; BUG_ON(!card); BUG_ON(!card->host); /* NOTE: caller guarantees resp is heap-allocated */ mode = !!mode; value &= 0xF; memset(&mrq, 0, sizeof(struct mmc_request)); memset(&cmd, 0, sizeof(struct mmc_command)); memset(&data, 0, sizeof(struct mmc_data)); mrq.cmd = &cmd; mrq.data = &data; cmd.opcode = SD_SWITCH; cmd.arg = mode << 31 | 0x00FFFFFF; cmd.arg &= ~(0xF << (group * 4)); cmd.arg |= value << (group * 4); cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; data.blksz = 64; data.blocks = 1; data.flags = MMC_DATA_READ; data.sg = &sg; data.sg_len = 1; sg_init_one(&sg, resp, 64); mmc_set_data_timeout(&data, card); mmc_wait_for_req(card->host, &mrq); if (cmd.error) return cmd.error; if (data.error) return data.error; return 0; } int mmc_app_sd_status(struct mmc_card *card, void *ssr) { int err; struct mmc_request mrq; struct mmc_command cmd; struct mmc_data data; struct scatterlist sg; BUG_ON(!card); BUG_ON(!card->host); BUG_ON(!ssr); /* NOTE: caller guarantees ssr is heap-allocated */ err = mmc_app_cmd(card->host, card); if (err) return err; memset(&mrq, 0, sizeof(struct mmc_request)); memset(&cmd, 0, sizeof(struct mmc_command)); memset(&data, 0, sizeof(struct mmc_data)); mrq.cmd = &cmd; mrq.data = &data; cmd.opcode = SD_APP_SD_STATUS; cmd.arg = 0; cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_ADTC; data.blksz = 64; data.blocks = 1; data.flags = MMC_DATA_READ; data.sg = &sg; data.sg_len = 1; sg_init_one(&sg, ssr, 64); mmc_set_data_timeout(&data, card); mmc_wait_for_req(card->host, &mrq); if (cmd.error) return cmd.error; if (data.error) return data.error; return 0; }
gpl-2.0
varigit/kernel-VAR-SOM-AMxx
drivers/regulator/as3722-regulator.c
244
27496
/* * Voltage regulator support for AMS AS3722 PMIC * * Copyright (C) 2013 ams * * Author: Florian Lobmaier <florian.lobmaier@ams.com> * Author: Laxman Dewangan <ldewangan@nvidia.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mfd/as3722.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/regulator/of_regulator.h> #include <linux/slab.h> /* Regulator IDs */ enum as3722_regulators_id { AS3722_REGULATOR_ID_SD0, AS3722_REGULATOR_ID_SD1, AS3722_REGULATOR_ID_SD2, AS3722_REGULATOR_ID_SD3, AS3722_REGULATOR_ID_SD4, AS3722_REGULATOR_ID_SD5, AS3722_REGULATOR_ID_SD6, AS3722_REGULATOR_ID_LDO0, AS3722_REGULATOR_ID_LDO1, AS3722_REGULATOR_ID_LDO2, AS3722_REGULATOR_ID_LDO3, AS3722_REGULATOR_ID_LDO4, AS3722_REGULATOR_ID_LDO5, AS3722_REGULATOR_ID_LDO6, AS3722_REGULATOR_ID_LDO7, AS3722_REGULATOR_ID_LDO9, AS3722_REGULATOR_ID_LDO10, AS3722_REGULATOR_ID_LDO11, AS3722_REGULATOR_ID_MAX, }; struct as3722_register_mapping { u8 regulator_id; const char *name; const char *sname; u8 vsel_reg; u8 vsel_mask; int n_voltages; u32 enable_reg; u8 enable_mask; u32 control_reg; u8 mode_mask; u32 sleep_ctrl_reg; u8 sleep_ctrl_mask; }; struct as3722_regulator_config_data { struct regulator_init_data *reg_init; bool enable_tracking; int ext_control; }; struct as3722_regulators { struct device *dev; struct as3722 *as3722; struct regulator_dev *rdevs[AS3722_REGULATOR_ID_MAX]; struct regulator_desc desc[AS3722_REGULATOR_ID_MAX]; struct as3722_regulator_config_data reg_config_data[AS3722_REGULATOR_ID_MAX]; }; static const struct as3722_register_mapping as3722_reg_lookup[] = { { .regulator_id = AS3722_REGULATOR_ID_SD0, .name = "as3722-sd0", .vsel_reg = AS3722_SD0_VOLTAGE_REG, .vsel_mask = AS3722_SD_VSEL_MASK, .enable_reg = AS3722_SD_CONTROL_REG, .enable_mask = AS3722_SDn_CTRL(0), .sleep_ctrl_reg = AS3722_ENABLE_CTRL1_REG, .sleep_ctrl_mask = AS3722_SD0_EXT_ENABLE_MASK, .control_reg = AS3722_SD0_CONTROL_REG, .mode_mask = AS3722_SD0_MODE_FAST, }, { .regulator_id = AS3722_REGULATOR_ID_SD1, .name = "as3722-sd1", .vsel_reg = AS3722_SD1_VOLTAGE_REG, .vsel_mask = AS3722_SD_VSEL_MASK, .enable_reg = AS3722_SD_CONTROL_REG, .enable_mask = AS3722_SDn_CTRL(1), .sleep_ctrl_reg = AS3722_ENABLE_CTRL1_REG, .sleep_ctrl_mask = AS3722_SD1_EXT_ENABLE_MASK, .control_reg = AS3722_SD1_CONTROL_REG, .mode_mask = AS3722_SD1_MODE_FAST, }, { .regulator_id = AS3722_REGULATOR_ID_SD2, .name = "as3722-sd2", .sname = "vsup-sd2", .vsel_reg = AS3722_SD2_VOLTAGE_REG, .vsel_mask = AS3722_SD_VSEL_MASK, .enable_reg = AS3722_SD_CONTROL_REG, .enable_mask = AS3722_SDn_CTRL(2), .sleep_ctrl_reg = AS3722_ENABLE_CTRL1_REG, .sleep_ctrl_mask = AS3722_SD2_EXT_ENABLE_MASK, .control_reg = AS3722_SD23_CONTROL_REG, .mode_mask = AS3722_SD2_MODE_FAST, .n_voltages = AS3722_SD2_VSEL_MAX + 1, }, { .regulator_id = AS3722_REGULATOR_ID_SD3, .name = "as3722-sd3", .sname = "vsup-sd3", .vsel_reg = AS3722_SD3_VOLTAGE_REG, .vsel_mask = AS3722_SD_VSEL_MASK, .enable_reg = AS3722_SD_CONTROL_REG, .enable_mask = AS3722_SDn_CTRL(3), .sleep_ctrl_reg = AS3722_ENABLE_CTRL1_REG, .sleep_ctrl_mask = AS3722_SD3_EXT_ENABLE_MASK, .control_reg = AS3722_SD23_CONTROL_REG, .mode_mask = AS3722_SD3_MODE_FAST, .n_voltages = AS3722_SD2_VSEL_MAX + 1, }, { .regulator_id = AS3722_REGULATOR_ID_SD4, .name = "as3722-sd4", .sname = "vsup-sd4", .vsel_reg = AS3722_SD4_VOLTAGE_REG, .vsel_mask = AS3722_SD_VSEL_MASK, .enable_reg = AS3722_SD_CONTROL_REG, .enable_mask = AS3722_SDn_CTRL(4), .sleep_ctrl_reg = AS3722_ENABLE_CTRL2_REG, .sleep_ctrl_mask = AS3722_SD4_EXT_ENABLE_MASK, .control_reg = AS3722_SD4_CONTROL_REG, .mode_mask = AS3722_SD4_MODE_FAST, .n_voltages = AS3722_SD2_VSEL_MAX + 1, }, { .regulator_id = AS3722_REGULATOR_ID_SD5, .name = "as3722-sd5", .sname = "vsup-sd5", .vsel_reg = AS3722_SD5_VOLTAGE_REG, .vsel_mask = AS3722_SD_VSEL_MASK, .enable_reg = AS3722_SD_CONTROL_REG, .enable_mask = AS3722_SDn_CTRL(5), .sleep_ctrl_reg = AS3722_ENABLE_CTRL2_REG, .sleep_ctrl_mask = AS3722_SD5_EXT_ENABLE_MASK, .control_reg = AS3722_SD5_CONTROL_REG, .mode_mask = AS3722_SD5_MODE_FAST, .n_voltages = AS3722_SD2_VSEL_MAX + 1, }, { .regulator_id = AS3722_REGULATOR_ID_SD6, .name = "as3722-sd6", .vsel_reg = AS3722_SD6_VOLTAGE_REG, .vsel_mask = AS3722_SD_VSEL_MASK, .enable_reg = AS3722_SD_CONTROL_REG, .enable_mask = AS3722_SDn_CTRL(6), .sleep_ctrl_reg = AS3722_ENABLE_CTRL2_REG, .sleep_ctrl_mask = AS3722_SD6_EXT_ENABLE_MASK, .control_reg = AS3722_SD6_CONTROL_REG, .mode_mask = AS3722_SD6_MODE_FAST, }, { .regulator_id = AS3722_REGULATOR_ID_LDO0, .name = "as3722-ldo0", .sname = "vin-ldo0", .vsel_reg = AS3722_LDO0_VOLTAGE_REG, .vsel_mask = AS3722_LDO0_VSEL_MASK, .enable_reg = AS3722_LDOCONTROL0_REG, .enable_mask = AS3722_LDO0_CTRL, .sleep_ctrl_reg = AS3722_ENABLE_CTRL3_REG, .sleep_ctrl_mask = AS3722_LDO0_EXT_ENABLE_MASK, .n_voltages = AS3722_LDO0_NUM_VOLT, }, { .regulator_id = AS3722_REGULATOR_ID_LDO1, .name = "as3722-ldo1", .sname = "vin-ldo1-6", .vsel_reg = AS3722_LDO1_VOLTAGE_REG, .vsel_mask = AS3722_LDO_VSEL_MASK, .enable_reg = AS3722_LDOCONTROL0_REG, .enable_mask = AS3722_LDO1_CTRL, .sleep_ctrl_reg = AS3722_ENABLE_CTRL3_REG, .sleep_ctrl_mask = AS3722_LDO1_EXT_ENABLE_MASK, .n_voltages = AS3722_LDO_NUM_VOLT, }, { .regulator_id = AS3722_REGULATOR_ID_LDO2, .name = "as3722-ldo2", .sname = "vin-ldo2-5-7", .vsel_reg = AS3722_LDO2_VOLTAGE_REG, .vsel_mask = AS3722_LDO_VSEL_MASK, .enable_reg = AS3722_LDOCONTROL0_REG, .enable_mask = AS3722_LDO2_CTRL, .sleep_ctrl_reg = AS3722_ENABLE_CTRL3_REG, .sleep_ctrl_mask = AS3722_LDO2_EXT_ENABLE_MASK, .n_voltages = AS3722_LDO_NUM_VOLT, }, { .regulator_id = AS3722_REGULATOR_ID_LDO3, .name = "as3722-ldo3", .name = "vin-ldo3-4", .vsel_reg = AS3722_LDO3_VOLTAGE_REG, .vsel_mask = AS3722_LDO3_VSEL_MASK, .enable_reg = AS3722_LDOCONTROL0_REG, .enable_mask = AS3722_LDO3_CTRL, .sleep_ctrl_reg = AS3722_ENABLE_CTRL3_REG, .sleep_ctrl_mask = AS3722_LDO3_EXT_ENABLE_MASK, .n_voltages = AS3722_LDO3_NUM_VOLT, }, { .regulator_id = AS3722_REGULATOR_ID_LDO4, .name = "as3722-ldo4", .name = "vin-ldo3-4", .vsel_reg = AS3722_LDO4_VOLTAGE_REG, .vsel_mask = AS3722_LDO_VSEL_MASK, .enable_reg = AS3722_LDOCONTROL0_REG, .enable_mask = AS3722_LDO4_CTRL, .sleep_ctrl_reg = AS3722_ENABLE_CTRL4_REG, .sleep_ctrl_mask = AS3722_LDO4_EXT_ENABLE_MASK, .n_voltages = AS3722_LDO_NUM_VOLT, }, { .regulator_id = AS3722_REGULATOR_ID_LDO5, .name = "as3722-ldo5", .sname = "vin-ldo2-5-7", .vsel_reg = AS3722_LDO5_VOLTAGE_REG, .vsel_mask = AS3722_LDO_VSEL_MASK, .enable_reg = AS3722_LDOCONTROL0_REG, .enable_mask = AS3722_LDO5_CTRL, .sleep_ctrl_reg = AS3722_ENABLE_CTRL4_REG, .sleep_ctrl_mask = AS3722_LDO5_EXT_ENABLE_MASK, .n_voltages = AS3722_LDO_NUM_VOLT, }, { .regulator_id = AS3722_REGULATOR_ID_LDO6, .name = "as3722-ldo6", .sname = "vin-ldo1-6", .vsel_reg = AS3722_LDO6_VOLTAGE_REG, .vsel_mask = AS3722_LDO_VSEL_MASK, .enable_reg = AS3722_LDOCONTROL0_REG, .enable_mask = AS3722_LDO6_CTRL, .sleep_ctrl_reg = AS3722_ENABLE_CTRL4_REG, .sleep_ctrl_mask = AS3722_LDO6_EXT_ENABLE_MASK, .n_voltages = AS3722_LDO_NUM_VOLT, }, { .regulator_id = AS3722_REGULATOR_ID_LDO7, .name = "as3722-ldo7", .sname = "vin-ldo2-5-7", .vsel_reg = AS3722_LDO7_VOLTAGE_REG, .vsel_mask = AS3722_LDO_VSEL_MASK, .enable_reg = AS3722_LDOCONTROL0_REG, .enable_mask = AS3722_LDO7_CTRL, .sleep_ctrl_reg = AS3722_ENABLE_CTRL4_REG, .sleep_ctrl_mask = AS3722_LDO7_EXT_ENABLE_MASK, .n_voltages = AS3722_LDO_NUM_VOLT, }, { .regulator_id = AS3722_REGULATOR_ID_LDO9, .name = "as3722-ldo9", .sname = "vin-ldo9-10", .vsel_reg = AS3722_LDO9_VOLTAGE_REG, .vsel_mask = AS3722_LDO_VSEL_MASK, .enable_reg = AS3722_LDOCONTROL1_REG, .enable_mask = AS3722_LDO9_CTRL, .sleep_ctrl_reg = AS3722_ENABLE_CTRL5_REG, .sleep_ctrl_mask = AS3722_LDO9_EXT_ENABLE_MASK, .n_voltages = AS3722_LDO_NUM_VOLT, }, { .regulator_id = AS3722_REGULATOR_ID_LDO10, .name = "as3722-ldo10", .sname = "vin-ldo9-10", .vsel_reg = AS3722_LDO10_VOLTAGE_REG, .vsel_mask = AS3722_LDO_VSEL_MASK, .enable_reg = AS3722_LDOCONTROL1_REG, .enable_mask = AS3722_LDO10_CTRL, .sleep_ctrl_reg = AS3722_ENABLE_CTRL5_REG, .sleep_ctrl_mask = AS3722_LDO10_EXT_ENABLE_MASK, .n_voltages = AS3722_LDO_NUM_VOLT, }, { .regulator_id = AS3722_REGULATOR_ID_LDO11, .name = "as3722-ldo11", .sname = "vin-ldo11", .vsel_reg = AS3722_LDO11_VOLTAGE_REG, .vsel_mask = AS3722_LDO_VSEL_MASK, .enable_reg = AS3722_LDOCONTROL1_REG, .enable_mask = AS3722_LDO11_CTRL, .sleep_ctrl_reg = AS3722_ENABLE_CTRL5_REG, .sleep_ctrl_mask = AS3722_LDO11_EXT_ENABLE_MASK, .n_voltages = AS3722_LDO_NUM_VOLT, }, }; static const int as3722_ldo_current[] = { 150000, 300000 }; static const int as3722_sd016_current[] = { 2500000, 3000000, 3500000 }; static int as3722_current_to_index(int min_uA, int max_uA, const int *curr_table, int n_currents) { int i; for (i = n_currents - 1; i >= 0; i--) { if ((min_uA <= curr_table[i]) && (curr_table[i] <= max_uA)) return i; } return -EINVAL; } static int as3722_ldo_get_current_limit(struct regulator_dev *rdev) { struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev); struct as3722 *as3722 = as3722_regs->as3722; int id = rdev_get_id(rdev); u32 val; int ret; ret = as3722_read(as3722, as3722_reg_lookup[id].vsel_reg, &val); if (ret < 0) { dev_err(as3722_regs->dev, "Reg 0x%02x read failed: %d\n", as3722_reg_lookup[id].vsel_reg, ret); return ret; } if (val & AS3722_LDO_ILIMIT_MASK) return 300000; return 150000; } static int as3722_ldo_set_current_limit(struct regulator_dev *rdev, int min_uA, int max_uA) { struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev); struct as3722 *as3722 = as3722_regs->as3722; int id = rdev_get_id(rdev); int ret; u32 reg = 0; ret = as3722_current_to_index(min_uA, max_uA, as3722_ldo_current, ARRAY_SIZE(as3722_ldo_current)); if (ret < 0) { dev_err(as3722_regs->dev, "Current range min:max = %d:%d does not support\n", min_uA, max_uA); return ret; } if (ret) reg = AS3722_LDO_ILIMIT_BIT; return as3722_update_bits(as3722, as3722_reg_lookup[id].vsel_reg, AS3722_LDO_ILIMIT_MASK, reg); } static struct regulator_ops as3722_ldo0_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .list_voltage = regulator_list_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_current_limit = as3722_ldo_get_current_limit, .set_current_limit = as3722_ldo_set_current_limit, }; static struct regulator_ops as3722_ldo0_extcntrl_ops = { .list_voltage = regulator_list_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_current_limit = as3722_ldo_get_current_limit, .set_current_limit = as3722_ldo_set_current_limit, }; static int as3722_ldo3_set_tracking_mode(struct as3722_regulators *as3722_reg, int id, u8 mode) { struct as3722 *as3722 = as3722_reg->as3722; switch (mode) { case AS3722_LDO3_MODE_PMOS: case AS3722_LDO3_MODE_PMOS_TRACKING: case AS3722_LDO3_MODE_NMOS: case AS3722_LDO3_MODE_SWITCH: return as3722_update_bits(as3722, as3722_reg_lookup[id].vsel_reg, AS3722_LDO3_MODE_MASK, mode); default: return -EINVAL; } } static int as3722_ldo3_get_current_limit(struct regulator_dev *rdev) { return 150000; } static struct regulator_ops as3722_ldo3_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .list_voltage = regulator_list_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_current_limit = as3722_ldo3_get_current_limit, }; static struct regulator_ops as3722_ldo3_extcntrl_ops = { .list_voltage = regulator_list_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_current_limit = as3722_ldo3_get_current_limit, }; static const struct regulator_linear_range as3722_ldo_ranges[] = { REGULATOR_LINEAR_RANGE(825000, 0x01, 0x24, 25000), REGULATOR_LINEAR_RANGE(1725000, 0x40, 0x7F, 25000), }; static struct regulator_ops as3722_ldo_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .map_voltage = regulator_map_voltage_linear_range, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .list_voltage = regulator_list_voltage_linear_range, .get_current_limit = as3722_ldo_get_current_limit, .set_current_limit = as3722_ldo_set_current_limit, }; static struct regulator_ops as3722_ldo_extcntrl_ops = { .map_voltage = regulator_map_voltage_linear_range, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .list_voltage = regulator_list_voltage_linear_range, .get_current_limit = as3722_ldo_get_current_limit, .set_current_limit = as3722_ldo_set_current_limit, }; static unsigned int as3722_sd_get_mode(struct regulator_dev *rdev) { struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev); struct as3722 *as3722 = as3722_regs->as3722; int id = rdev_get_id(rdev); u32 val; int ret; if (!as3722_reg_lookup[id].control_reg) return -ENOTSUPP; ret = as3722_read(as3722, as3722_reg_lookup[id].control_reg, &val); if (ret < 0) { dev_err(as3722_regs->dev, "Reg 0x%02x read failed: %d\n", as3722_reg_lookup[id].control_reg, ret); return ret; } if (val & as3722_reg_lookup[id].mode_mask) return REGULATOR_MODE_FAST; else return REGULATOR_MODE_NORMAL; } static int as3722_sd_set_mode(struct regulator_dev *rdev, unsigned int mode) { struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev); struct as3722 *as3722 = as3722_regs->as3722; u8 id = rdev_get_id(rdev); u8 val = 0; int ret; if (!as3722_reg_lookup[id].control_reg) return -ERANGE; switch (mode) { case REGULATOR_MODE_FAST: val = as3722_reg_lookup[id].mode_mask; case REGULATOR_MODE_NORMAL: /* fall down */ break; default: return -EINVAL; } ret = as3722_update_bits(as3722, as3722_reg_lookup[id].control_reg, as3722_reg_lookup[id].mode_mask, val); if (ret < 0) { dev_err(as3722_regs->dev, "Reg 0x%02x update failed: %d\n", as3722_reg_lookup[id].control_reg, ret); return ret; } return ret; } static int as3722_sd016_get_current_limit(struct regulator_dev *rdev) { struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev); struct as3722 *as3722 = as3722_regs->as3722; int id = rdev_get_id(rdev); u32 val, reg; int mask; int ret; switch (id) { case AS3722_REGULATOR_ID_SD0: reg = AS3722_OVCURRENT_REG; mask = AS3722_OVCURRENT_SD0_TRIP_MASK; break; case AS3722_REGULATOR_ID_SD1: reg = AS3722_OVCURRENT_REG; mask = AS3722_OVCURRENT_SD1_TRIP_MASK; break; case AS3722_REGULATOR_ID_SD6: reg = AS3722_OVCURRENT_DEB_REG; mask = AS3722_OVCURRENT_SD6_TRIP_MASK; break; default: return -EINVAL; } ret = as3722_read(as3722, reg, &val); if (ret < 0) { dev_err(as3722_regs->dev, "Reg 0x%02x read failed: %d\n", reg, ret); return ret; } val &= mask; val >>= ffs(mask) - 1; if (val == 3) return -EINVAL; return as3722_sd016_current[val]; } static int as3722_sd016_set_current_limit(struct regulator_dev *rdev, int min_uA, int max_uA) { struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev); struct as3722 *as3722 = as3722_regs->as3722; int id = rdev_get_id(rdev); int ret; int val; int mask; u32 reg; ret = as3722_current_to_index(min_uA, max_uA, as3722_sd016_current, ARRAY_SIZE(as3722_sd016_current)); if (ret < 0) { dev_err(as3722_regs->dev, "Current range min:max = %d:%d does not support\n", min_uA, max_uA); return ret; } switch (id) { case AS3722_REGULATOR_ID_SD0: reg = AS3722_OVCURRENT_REG; mask = AS3722_OVCURRENT_SD0_TRIP_MASK; break; case AS3722_REGULATOR_ID_SD1: reg = AS3722_OVCURRENT_REG; mask = AS3722_OVCURRENT_SD1_TRIP_MASK; break; case AS3722_REGULATOR_ID_SD6: reg = AS3722_OVCURRENT_DEB_REG; mask = AS3722_OVCURRENT_SD6_TRIP_MASK; break; default: return -EINVAL; } ret <<= ffs(mask) - 1; val = ret & mask; return as3722_update_bits(as3722, reg, mask, val); } static bool as3722_sd0_is_low_voltage(struct as3722_regulators *as3722_regs) { int err; unsigned val; err = as3722_read(as3722_regs->as3722, AS3722_FUSE7_REG, &val); if (err < 0) { dev_err(as3722_regs->dev, "Reg 0x%02x read failed: %d\n", AS3722_FUSE7_REG, err); return false; } if (val & AS3722_FUSE7_SD0_LOW_VOLTAGE) return true; return false; } static const struct regulator_linear_range as3722_sd2345_ranges[] = { REGULATOR_LINEAR_RANGE(612500, 0x01, 0x40, 12500), REGULATOR_LINEAR_RANGE(1425000, 0x41, 0x70, 25000), REGULATOR_LINEAR_RANGE(2650000, 0x71, 0x7F, 50000), }; static struct regulator_ops as3722_sd016_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .list_voltage = regulator_list_voltage_linear, .map_voltage = regulator_map_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_current_limit = as3722_sd016_get_current_limit, .set_current_limit = as3722_sd016_set_current_limit, .get_mode = as3722_sd_get_mode, .set_mode = as3722_sd_set_mode, }; static struct regulator_ops as3722_sd016_extcntrl_ops = { .list_voltage = regulator_list_voltage_linear, .map_voltage = regulator_map_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_current_limit = as3722_sd016_get_current_limit, .set_current_limit = as3722_sd016_set_current_limit, .get_mode = as3722_sd_get_mode, .set_mode = as3722_sd_set_mode, }; static struct regulator_ops as3722_sd2345_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .list_voltage = regulator_list_voltage_linear_range, .map_voltage = regulator_map_voltage_linear_range, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .get_mode = as3722_sd_get_mode, .set_mode = as3722_sd_set_mode, }; static struct regulator_ops as3722_sd2345_extcntrl_ops = { .list_voltage = regulator_list_voltage_linear_range, .map_voltage = regulator_map_voltage_linear_range, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .get_mode = as3722_sd_get_mode, .set_mode = as3722_sd_set_mode, }; static int as3722_extreg_init(struct as3722_regulators *as3722_regs, int id, int ext_pwr_ctrl) { int ret; unsigned int val; if ((ext_pwr_ctrl < AS3722_EXT_CONTROL_ENABLE1) || (ext_pwr_ctrl > AS3722_EXT_CONTROL_ENABLE3)) return -EINVAL; val = ext_pwr_ctrl << (ffs(as3722_reg_lookup[id].sleep_ctrl_mask) - 1); ret = as3722_update_bits(as3722_regs->as3722, as3722_reg_lookup[id].sleep_ctrl_reg, as3722_reg_lookup[id].sleep_ctrl_mask, val); if (ret < 0) dev_err(as3722_regs->dev, "Reg 0x%02x update failed: %d\n", as3722_reg_lookup[id].sleep_ctrl_reg, ret); return ret; } static struct of_regulator_match as3722_regulator_matches[] = { { .name = "sd0", }, { .name = "sd1", }, { .name = "sd2", }, { .name = "sd3", }, { .name = "sd4", }, { .name = "sd5", }, { .name = "sd6", }, { .name = "ldo0", }, { .name = "ldo1", }, { .name = "ldo2", }, { .name = "ldo3", }, { .name = "ldo4", }, { .name = "ldo5", }, { .name = "ldo6", }, { .name = "ldo7", }, { .name = "ldo9", }, { .name = "ldo10", }, { .name = "ldo11", }, }; static int as3722_get_regulator_dt_data(struct platform_device *pdev, struct as3722_regulators *as3722_regs) { struct device_node *np; struct as3722_regulator_config_data *reg_config; u32 prop; int id; int ret; np = of_get_child_by_name(pdev->dev.parent->of_node, "regulators"); if (!np) { dev_err(&pdev->dev, "Device is not having regulators node\n"); return -ENODEV; } pdev->dev.of_node = np; ret = of_regulator_match(&pdev->dev, np, as3722_regulator_matches, ARRAY_SIZE(as3722_regulator_matches)); if (ret < 0) { dev_err(&pdev->dev, "Parsing of regulator node failed: %d\n", ret); return ret; } for (id = 0; id < ARRAY_SIZE(as3722_regulator_matches); ++id) { struct device_node *reg_node; reg_config = &as3722_regs->reg_config_data[id]; reg_config->reg_init = as3722_regulator_matches[id].init_data; reg_node = as3722_regulator_matches[id].of_node; if (!reg_config->reg_init || !reg_node) continue; ret = of_property_read_u32(reg_node, "ams,ext-control", &prop); if (!ret) { if (prop < 3) reg_config->ext_control = prop; else dev_warn(&pdev->dev, "ext-control have invalid option: %u\n", prop); } reg_config->enable_tracking = of_property_read_bool(reg_node, "ams,enable-tracking"); } return 0; } static int as3722_regulator_probe(struct platform_device *pdev) { struct as3722 *as3722 = dev_get_drvdata(pdev->dev.parent); struct as3722_regulators *as3722_regs; struct as3722_regulator_config_data *reg_config; struct regulator_dev *rdev; struct regulator_config config = { }; struct regulator_ops *ops; int id; int ret; as3722_regs = devm_kzalloc(&pdev->dev, sizeof(*as3722_regs), GFP_KERNEL); if (!as3722_regs) return -ENOMEM; as3722_regs->dev = &pdev->dev; as3722_regs->as3722 = as3722; platform_set_drvdata(pdev, as3722_regs); ret = as3722_get_regulator_dt_data(pdev, as3722_regs); if (ret < 0) return ret; config.dev = &pdev->dev; config.driver_data = as3722_regs; config.regmap = as3722->regmap; for (id = 0; id < AS3722_REGULATOR_ID_MAX; id++) { reg_config = &as3722_regs->reg_config_data[id]; as3722_regs->desc[id].name = as3722_reg_lookup[id].name; as3722_regs->desc[id].supply_name = as3722_reg_lookup[id].sname; as3722_regs->desc[id].id = as3722_reg_lookup[id].regulator_id; as3722_regs->desc[id].n_voltages = as3722_reg_lookup[id].n_voltages; as3722_regs->desc[id].type = REGULATOR_VOLTAGE; as3722_regs->desc[id].owner = THIS_MODULE; as3722_regs->desc[id].enable_reg = as3722_reg_lookup[id].enable_reg; as3722_regs->desc[id].enable_mask = as3722_reg_lookup[id].enable_mask; as3722_regs->desc[id].vsel_reg = as3722_reg_lookup[id].vsel_reg; as3722_regs->desc[id].vsel_mask = as3722_reg_lookup[id].vsel_mask; switch (id) { case AS3722_REGULATOR_ID_LDO0: if (reg_config->ext_control) ops = &as3722_ldo0_extcntrl_ops; else ops = &as3722_ldo0_ops; as3722_regs->desc[id].min_uV = 825000; as3722_regs->desc[id].uV_step = 25000; as3722_regs->desc[id].linear_min_sel = 1; as3722_regs->desc[id].enable_time = 500; break; case AS3722_REGULATOR_ID_LDO3: if (reg_config->ext_control) ops = &as3722_ldo3_extcntrl_ops; else ops = &as3722_ldo3_ops; as3722_regs->desc[id].min_uV = 620000; as3722_regs->desc[id].uV_step = 20000; as3722_regs->desc[id].linear_min_sel = 1; as3722_regs->desc[id].enable_time = 500; if (reg_config->enable_tracking) { ret = as3722_ldo3_set_tracking_mode(as3722_regs, id, AS3722_LDO3_MODE_PMOS_TRACKING); if (ret < 0) { dev_err(&pdev->dev, "LDO3 tracking failed: %d\n", ret); return ret; } } break; case AS3722_REGULATOR_ID_SD0: case AS3722_REGULATOR_ID_SD1: case AS3722_REGULATOR_ID_SD6: if (reg_config->ext_control) ops = &as3722_sd016_extcntrl_ops; else ops = &as3722_sd016_ops; if (id == AS3722_REGULATOR_ID_SD0 && as3722_sd0_is_low_voltage(as3722_regs)) { as3722_regs->desc[id].n_voltages = AS3722_SD0_VSEL_LOW_VOL_MAX + 1; as3722_regs->desc[id].min_uV = 410000; } else { as3722_regs->desc[id].n_voltages = AS3722_SD0_VSEL_MAX + 1, as3722_regs->desc[id].min_uV = 610000; } as3722_regs->desc[id].uV_step = 10000; as3722_regs->desc[id].linear_min_sel = 1; as3722_regs->desc[id].enable_time = 600; break; case AS3722_REGULATOR_ID_SD2: case AS3722_REGULATOR_ID_SD3: case AS3722_REGULATOR_ID_SD4: case AS3722_REGULATOR_ID_SD5: if (reg_config->ext_control) ops = &as3722_sd2345_extcntrl_ops; else ops = &as3722_sd2345_ops; as3722_regs->desc[id].linear_ranges = as3722_sd2345_ranges; as3722_regs->desc[id].n_linear_ranges = ARRAY_SIZE(as3722_sd2345_ranges); break; default: if (reg_config->ext_control) ops = &as3722_ldo_extcntrl_ops; else ops = &as3722_ldo_ops; as3722_regs->desc[id].enable_time = 500; as3722_regs->desc[id].linear_ranges = as3722_ldo_ranges; as3722_regs->desc[id].n_linear_ranges = ARRAY_SIZE(as3722_ldo_ranges); break; } as3722_regs->desc[id].ops = ops; config.init_data = reg_config->reg_init; config.of_node = as3722_regulator_matches[id].of_node; rdev = devm_regulator_register(&pdev->dev, &as3722_regs->desc[id], &config); if (IS_ERR(rdev)) { ret = PTR_ERR(rdev); dev_err(&pdev->dev, "regulator %d register failed %d\n", id, ret); return ret; } as3722_regs->rdevs[id] = rdev; if (reg_config->ext_control) { ret = regulator_enable_regmap(rdev); if (ret < 0) { dev_err(&pdev->dev, "Regulator %d enable failed: %d\n", id, ret); return ret; } ret = as3722_extreg_init(as3722_regs, id, reg_config->ext_control); if (ret < 0) { dev_err(&pdev->dev, "AS3722 ext control failed: %d", ret); return ret; } } } return 0; } static const struct of_device_id of_as3722_regulator_match[] = { { .compatible = "ams,as3722-regulator", }, {}, }; MODULE_DEVICE_TABLE(of, of_as3722_regulator_match); static struct platform_driver as3722_regulator_driver = { .driver = { .name = "as3722-regulator", .owner = THIS_MODULE, .of_match_table = of_as3722_regulator_match, }, .probe = as3722_regulator_probe, }; module_platform_driver(as3722_regulator_driver); MODULE_ALIAS("platform:as3722-regulator"); MODULE_DESCRIPTION("AS3722 regulator driver"); MODULE_AUTHOR("Florian Lobmaier <florian.lobmaier@ams.com>"); MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); MODULE_LICENSE("GPL");
gpl-2.0
gototem/kernel
arch/powerpc/platforms/pasemi/cpufreq.c
756
7713
/* * Copyright (C) 2007 PA Semi, Inc * * Authors: Egor Martovetsky <egor@pasemi.com> * Olof Johansson <olof@lixom.net> * * Maintained by: Olof Johansson <olof@lixom.net> * * Based on arch/powerpc/platforms/cell/cbe_cpufreq.c: * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/cpufreq.h> #include <linux/timer.h> #include <linux/module.h> #include <asm/hw_irq.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/time.h> #include <asm/smp.h> #define SDCASR_REG 0x0100 #define SDCASR_REG_STRIDE 0x1000 #define SDCPWR_CFGA0_REG 0x0100 #define SDCPWR_PWST0_REG 0x0000 #define SDCPWR_GIZTIME_REG 0x0440 /* SDCPWR_GIZTIME_REG fields */ #define SDCPWR_GIZTIME_GR 0x80000000 #define SDCPWR_GIZTIME_LONGLOCK 0x000000ff /* Offset of ASR registers from SDC base */ #define SDCASR_OFFSET 0x120000 static void __iomem *sdcpwr_mapbase; static void __iomem *sdcasr_mapbase; static DEFINE_MUTEX(pas_switch_mutex); /* Current astate, is used when waking up from power savings on * one core, in case the other core has switched states during * the idle time. */ static int current_astate; /* We support 5(A0-A4) power states excluding turbo(A5-A6) modes */ static struct cpufreq_frequency_table pas_freqs[] = { {0, 0}, {1, 0}, {2, 0}, {3, 0}, {4, 0}, {0, CPUFREQ_TABLE_END}, }; static struct freq_attr *pas_cpu_freqs_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; /* * hardware specific functions */ static int get_astate_freq(int astate) { u32 ret; ret = in_le32(sdcpwr_mapbase + SDCPWR_CFGA0_REG + (astate * 0x10)); return ret & 0x3f; } static int get_cur_astate(int cpu) { u32 ret; ret = in_le32(sdcpwr_mapbase + SDCPWR_PWST0_REG); ret = (ret >> (cpu * 4)) & 0x7; return ret; } static int get_gizmo_latency(void) { u32 giztime, ret; giztime = in_le32(sdcpwr_mapbase + SDCPWR_GIZTIME_REG); /* just provide the upper bound */ if (giztime & SDCPWR_GIZTIME_GR) ret = (giztime & SDCPWR_GIZTIME_LONGLOCK) * 128000; else ret = (giztime & SDCPWR_GIZTIME_LONGLOCK) * 1000; return ret; } static void set_astate(int cpu, unsigned int astate) { unsigned long flags; /* Return if called before init has run */ if (unlikely(!sdcasr_mapbase)) return; local_irq_save(flags); out_le32(sdcasr_mapbase + SDCASR_REG + SDCASR_REG_STRIDE*cpu, astate); local_irq_restore(flags); } int check_astate(void) { return get_cur_astate(hard_smp_processor_id()); } void restore_astate(int cpu) { set_astate(cpu, current_astate); } /* * cpufreq functions */ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) { const u32 *max_freqp; u32 max_freq; int i, cur_astate; struct resource res; struct device_node *cpu, *dn; int err = -ENODEV; cpu = of_get_cpu_node(policy->cpu, NULL); if (!cpu) goto out; dn = of_find_compatible_node(NULL, NULL, "1682m-sdc"); if (!dn) dn = of_find_compatible_node(NULL, NULL, "pasemi,pwrficient-sdc"); if (!dn) goto out; err = of_address_to_resource(dn, 0, &res); of_node_put(dn); if (err) goto out; sdcasr_mapbase = ioremap(res.start + SDCASR_OFFSET, 0x2000); if (!sdcasr_mapbase) { err = -EINVAL; goto out; } dn = of_find_compatible_node(NULL, NULL, "1682m-gizmo"); if (!dn) dn = of_find_compatible_node(NULL, NULL, "pasemi,pwrficient-gizmo"); if (!dn) { err = -ENODEV; goto out_unmap_sdcasr; } err = of_address_to_resource(dn, 0, &res); of_node_put(dn); if (err) goto out_unmap_sdcasr; sdcpwr_mapbase = ioremap(res.start, 0x1000); if (!sdcpwr_mapbase) { err = -EINVAL; goto out_unmap_sdcasr; } pr_debug("init cpufreq on CPU %d\n", policy->cpu); max_freqp = of_get_property(cpu, "clock-frequency", NULL); if (!max_freqp) { err = -EINVAL; goto out_unmap_sdcpwr; } /* we need the freq in kHz */ max_freq = *max_freqp / 1000; pr_debug("max clock-frequency is at %u kHz\n", max_freq); pr_debug("initializing frequency table\n"); /* initialize frequency table */ for (i=0; pas_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) { pas_freqs[i].frequency = get_astate_freq(pas_freqs[i].index) * 100000; pr_debug("%d: %d\n", i, pas_freqs[i].frequency); } policy->cpuinfo.transition_latency = get_gizmo_latency(); cur_astate = get_cur_astate(policy->cpu); pr_debug("current astate is at %d\n",cur_astate); policy->cur = pas_freqs[cur_astate].frequency; cpumask_copy(policy->cpus, cpu_online_mask); ppc_proc_freq = policy->cur * 1000ul; cpufreq_frequency_table_get_attr(pas_freqs, policy->cpu); /* this ensures that policy->cpuinfo_min and policy->cpuinfo_max * are set correctly */ return cpufreq_frequency_table_cpuinfo(policy, pas_freqs); out_unmap_sdcpwr: iounmap(sdcpwr_mapbase); out_unmap_sdcasr: iounmap(sdcasr_mapbase); out: return err; } static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy) { /* * We don't support CPU hotplug. Don't unmap after the system * has already made it to a running state. */ if (system_state != SYSTEM_BOOTING) return 0; if (sdcasr_mapbase) iounmap(sdcasr_mapbase); if (sdcpwr_mapbase) iounmap(sdcpwr_mapbase); cpufreq_frequency_table_put_attr(policy->cpu); return 0; } static int pas_cpufreq_verify(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, pas_freqs); } static int pas_cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { struct cpufreq_freqs freqs; int pas_astate_new; int i; cpufreq_frequency_table_target(policy, pas_freqs, target_freq, relation, &pas_astate_new); freqs.old = policy->cur; freqs.new = pas_freqs[pas_astate_new].frequency; mutex_lock(&pas_switch_mutex); cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n", policy->cpu, pas_freqs[pas_astate_new].frequency, pas_freqs[pas_astate_new].index); current_astate = pas_astate_new; for_each_online_cpu(i) set_astate(i, pas_astate_new); cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); mutex_unlock(&pas_switch_mutex); ppc_proc_freq = freqs.new * 1000ul; return 0; } static struct cpufreq_driver pas_cpufreq_driver = { .name = "pas-cpufreq", .owner = THIS_MODULE, .flags = CPUFREQ_CONST_LOOPS, .init = pas_cpufreq_cpu_init, .exit = pas_cpufreq_cpu_exit, .verify = pas_cpufreq_verify, .target = pas_cpufreq_target, .attr = pas_cpu_freqs_attr, }; /* * module init and destoy */ static int __init pas_cpufreq_init(void) { if (!of_machine_is_compatible("PA6T-1682M") && !of_machine_is_compatible("pasemi,pwrficient")) return -ENODEV; return cpufreq_register_driver(&pas_cpufreq_driver); } static void __exit pas_cpufreq_exit(void) { cpufreq_unregister_driver(&pas_cpufreq_driver); } module_init(pas_cpufreq_init); module_exit(pas_cpufreq_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>, Olof Johansson <olof@lixom.net>");
gpl-2.0
WaRP7/linux-fslc
drivers/cpuidle/cpuidle-exynos.c
756
3451
/* * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Coupled cpuidle support based on the work of: * Colin Cross <ccross@android.com> * Daniel Lezcano <daniel.lezcano@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/cpuidle.h> #include <linux/cpu_pm.h> #include <linux/export.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/platform_data/cpuidle-exynos.h> #include <asm/suspend.h> #include <asm/cpuidle.h> static atomic_t exynos_idle_barrier; static struct cpuidle_exynos_data *exynos_cpuidle_pdata; static void (*exynos_enter_aftr)(void); static int exynos_enter_coupled_lowpower(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { int ret; exynos_cpuidle_pdata->pre_enter_aftr(); /* * Waiting all cpus to reach this point at the same moment */ cpuidle_coupled_parallel_barrier(dev, &exynos_idle_barrier); /* * Both cpus will reach this point at the same time */ ret = dev->cpu ? exynos_cpuidle_pdata->cpu1_powerdown() : exynos_cpuidle_pdata->cpu0_enter_aftr(); if (ret) index = ret; /* * Waiting all cpus to finish the power sequence before going further */ cpuidle_coupled_parallel_barrier(dev, &exynos_idle_barrier); exynos_cpuidle_pdata->post_enter_aftr(); return index; } static int exynos_enter_lowpower(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { int new_index = index; /* AFTR can only be entered when cores other than CPU0 are offline */ if (num_online_cpus() > 1 || dev->cpu != 0) new_index = drv->safe_state_index; if (new_index == 0) return arm_cpuidle_simple_enter(dev, drv, new_index); exynos_enter_aftr(); return new_index; } static struct cpuidle_driver exynos_idle_driver = { .name = "exynos_idle", .owner = THIS_MODULE, .states = { [0] = ARM_CPUIDLE_WFI_STATE, [1] = { .enter = exynos_enter_lowpower, .exit_latency = 300, .target_residency = 100000, .name = "C1", .desc = "ARM power down", }, }, .state_count = 2, .safe_state_index = 0, }; static struct cpuidle_driver exynos_coupled_idle_driver = { .name = "exynos_coupled_idle", .owner = THIS_MODULE, .states = { [0] = ARM_CPUIDLE_WFI_STATE, [1] = { .enter = exynos_enter_coupled_lowpower, .exit_latency = 5000, .target_residency = 10000, .flags = CPUIDLE_FLAG_COUPLED | CPUIDLE_FLAG_TIMER_STOP, .name = "C1", .desc = "ARM power down", }, }, .state_count = 2, .safe_state_index = 0, }; static int exynos_cpuidle_probe(struct platform_device *pdev) { int ret; if (IS_ENABLED(CONFIG_SMP) && of_machine_is_compatible("samsung,exynos4210")) { exynos_cpuidle_pdata = pdev->dev.platform_data; ret = cpuidle_register(&exynos_coupled_idle_driver, cpu_possible_mask); } else { exynos_enter_aftr = (void *)(pdev->dev.platform_data); ret = cpuidle_register(&exynos_idle_driver, NULL); } if (ret) { dev_err(&pdev->dev, "failed to register cpuidle driver\n"); return ret; } return 0; } static struct platform_driver exynos_cpuidle_driver = { .probe = exynos_cpuidle_probe, .driver = { .name = "exynos_cpuidle", }, }; module_platform_driver(exynos_cpuidle_driver);
gpl-2.0
vathpela/linux-esrt
drivers/media/usb/go7007/go7007-fw.c
1268
40930
/* * Copyright (C) 2005-2006 Micronas USA Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * This file contains code to generate a firmware image for the GO7007SB * encoder. Much of the firmware is read verbatim from a file, but some of * it concerning bitrate control and other things that can be configured at * run-time are generated dynamically. Note that the format headers * generated here do not affect the functioning of the encoder; they are * merely parroted back to the host at the start of each frame. */ #include <linux/module.h> #include <linux/time.h> #include <linux/mm.h> #include <linux/device.h> #include <linux/i2c.h> #include <linux/firmware.h> #include <linux/slab.h> #include <asm/byteorder.h> #include "go7007-priv.h" #define GO7007_FW_NAME "go7007/go7007tv.bin" /* Constants used in the source firmware image to describe code segments */ #define FLAG_MODE_MJPEG (1) #define FLAG_MODE_MPEG1 (1<<1) #define FLAG_MODE_MPEG2 (1<<2) #define FLAG_MODE_MPEG4 (1<<3) #define FLAG_MODE_H263 (1<<4) #define FLAG_MODE_ALL (FLAG_MODE_MJPEG | FLAG_MODE_MPEG1 | \ FLAG_MODE_MPEG2 | FLAG_MODE_MPEG4 | \ FLAG_MODE_H263) #define FLAG_SPECIAL (1<<8) #define SPECIAL_FRM_HEAD 0 #define SPECIAL_BRC_CTRL 1 #define SPECIAL_CONFIG 2 #define SPECIAL_SEQHEAD 3 #define SPECIAL_AV_SYNC 4 #define SPECIAL_FINAL 5 #define SPECIAL_AUDIO 6 #define SPECIAL_MODET 7 /* Little data class for creating MPEG headers bit-by-bit */ struct code_gen { unsigned char *p; /* destination */ u32 a; /* collects bits at the top of the variable */ int b; /* bit position of most recently-written bit */ int len; /* written out so far */ }; #define CODE_GEN(name, dest) struct code_gen name = { dest, 0, 32, 0 } #define CODE_ADD(name, val, length) do { \ name.b -= (length); \ name.a |= (val) << name.b; \ while (name.b <= 24) { \ *name.p = name.a >> 24; \ ++name.p; \ name.a <<= 8; \ name.b += 8; \ name.len += 8; \ } \ } while (0) #define CODE_LENGTH(name) (name.len + (32 - name.b)) /* Tables for creating the bitrate control data */ static const s16 converge_speed_ip[101] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 8, 8, 9, 10, 10, 11, 12, 13, 14, 15, 16, 17, 19, 20, 22, 23, 25, 27, 30, 32, 35, 38, 41, 45, 49, 53, 58, 63, 69, 76, 83, 91, 100 }; static const s16 converge_speed_ipb[101] = { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 17, 18, 19, 20, 22, 23, 25, 26, 28, 30, 32, 34, 37, 40, 42, 46, 49, 53, 57, 61, 66, 71, 77, 83, 90, 97, 106, 115, 125, 135, 147, 161, 175, 191, 209, 228, 249, 273, 300 }; static const s16 LAMBDA_table[4][101] = { { 16, 16, 16, 16, 17, 17, 17, 18, 18, 18, 19, 19, 19, 20, 20, 20, 21, 21, 22, 22, 22, 23, 23, 24, 24, 25, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 31, 31, 32, 32, 33, 33, 34, 35, 35, 36, 37, 37, 38, 39, 39, 40, 41, 42, 42, 43, 44, 45, 46, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 68, 69, 70, 72, 73, 74, 76, 77, 78, 80, 81, 83, 84, 86, 87, 89, 90, 92, 94, 96 }, { 20, 20, 20, 21, 21, 21, 22, 22, 23, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 33, 33, 34, 34, 35, 36, 36, 37, 38, 38, 39, 40, 40, 41, 42, 43, 43, 44, 45, 46, 47, 48, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 64, 65, 66, 67, 68, 70, 71, 72, 73, 75, 76, 78, 79, 80, 82, 83, 85, 86, 88, 90, 91, 93, 95, 96, 98, 100, 102, 103, 105, 107, 109, 111, 113, 115, 117, 120 }, { 24, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 33, 33, 34, 34, 35, 36, 36, 37, 38, 38, 39, 40, 41, 41, 42, 43, 44, 44, 45, 46, 47, 48, 49, 50, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 62, 63, 64, 65, 66, 67, 69, 70, 71, 72, 74, 75, 76, 78, 79, 81, 82, 84, 85, 87, 88, 90, 92, 93, 95, 97, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 127, 129, 131, 134, 136, 138, 141, 144 }, { 32, 32, 33, 33, 34, 34, 35, 36, 36, 37, 38, 38, 39, 40, 41, 41, 42, 43, 44, 44, 45, 46, 47, 48, 49, 50, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 62, 63, 64, 65, 66, 67, 69, 70, 71, 72, 74, 75, 76, 78, 79, 81, 82, 84, 85, 87, 88, 90, 92, 93, 95, 97, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 127, 129, 131, 134, 136, 139, 141, 144, 146, 149, 152, 154, 157, 160, 163, 166, 169, 172, 175, 178, 181, 185, 188, 192 } }; /* MPEG blank frame generation tables */ enum mpeg_frame_type { PFRAME, BFRAME_PRE, BFRAME_POST, BFRAME_BIDIR, BFRAME_EMPTY }; static const u32 addrinctab[33][2] = { { 0x01, 1 }, { 0x03, 3 }, { 0x02, 3 }, { 0x03, 4 }, { 0x02, 4 }, { 0x03, 5 }, { 0x02, 5 }, { 0x07, 7 }, { 0x06, 7 }, { 0x0b, 8 }, { 0x0a, 8 }, { 0x09, 8 }, { 0x08, 8 }, { 0x07, 8 }, { 0x06, 8 }, { 0x17, 10 }, { 0x16, 10 }, { 0x15, 10 }, { 0x14, 10 }, { 0x13, 10 }, { 0x12, 10 }, { 0x23, 11 }, { 0x22, 11 }, { 0x21, 11 }, { 0x20, 11 }, { 0x1f, 11 }, { 0x1e, 11 }, { 0x1d, 11 }, { 0x1c, 11 }, { 0x1b, 11 }, { 0x1a, 11 }, { 0x19, 11 }, { 0x18, 11 } }; /* Standard JPEG tables */ static const u8 default_intra_quant_table[] = { 8, 16, 19, 22, 26, 27, 29, 34, 16, 16, 22, 24, 27, 29, 34, 37, 19, 22, 26, 27, 29, 34, 34, 38, 22, 22, 26, 27, 29, 34, 37, 40, 22, 26, 27, 29, 32, 35, 40, 48, 26, 27, 29, 32, 35, 40, 48, 58, 26, 27, 29, 34, 38, 46, 56, 69, 27, 29, 35, 38, 46, 56, 69, 83 }; static const u8 bits_dc_luminance[] = { 0, 0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 }; static const u8 val_dc_luminance[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 }; static const u8 bits_dc_chrominance[] = { 0, 0, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 }; static const u8 val_dc_chrominance[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 }; static const u8 bits_ac_luminance[] = { 0, 0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d }; static const u8 val_ac_luminance[] = { 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08, 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0, 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa }; static const u8 bits_ac_chrominance[] = { 0, 0, 2, 1, 2, 4, 4, 3, 4, 7, 5, 4, 4, 0, 1, 2, 0x77 }; static const u8 val_ac_chrominance[] = { 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, 0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0, 0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34, 0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa }; /* Zig-zag mapping for quant table * * OK, let's do this mapping on the actual table above so it doesn't have * to be done on the fly. */ static const int zz[64] = { 0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5, 12, 19, 26, 33, 40, 48, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23, 30, 37, 44, 51, 58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63 }; static int copy_packages(__le16 *dest, u16 *src, int pkg_cnt, int space) { int i, cnt = pkg_cnt * 32; if (space < cnt) return -1; for (i = 0; i < cnt; ++i) dest[i] = cpu_to_le16p(src + i); return cnt; } static int mjpeg_frame_header(struct go7007 *go, unsigned char *buf, int q) { int i, p = 0; buf[p++] = 0xff; buf[p++] = 0xd8; buf[p++] = 0xff; buf[p++] = 0xdb; buf[p++] = 0; buf[p++] = 2 + 65; buf[p++] = 0; buf[p++] = default_intra_quant_table[0]; for (i = 1; i < 64; ++i) /* buf[p++] = (default_intra_quant_table[i] * q) >> 3; */ buf[p++] = (default_intra_quant_table[zz[i]] * q) >> 3; buf[p++] = 0xff; buf[p++] = 0xc0; buf[p++] = 0; buf[p++] = 17; buf[p++] = 8; buf[p++] = go->height >> 8; buf[p++] = go->height & 0xff; buf[p++] = go->width >> 8; buf[p++] = go->width & 0xff; buf[p++] = 3; buf[p++] = 1; buf[p++] = 0x22; buf[p++] = 0; buf[p++] = 2; buf[p++] = 0x11; buf[p++] = 0; buf[p++] = 3; buf[p++] = 0x11; buf[p++] = 0; buf[p++] = 0xff; buf[p++] = 0xc4; buf[p++] = 418 >> 8; buf[p++] = 418 & 0xff; buf[p++] = 0x00; memcpy(buf + p, bits_dc_luminance + 1, 16); p += 16; memcpy(buf + p, val_dc_luminance, sizeof(val_dc_luminance)); p += sizeof(val_dc_luminance); buf[p++] = 0x01; memcpy(buf + p, bits_dc_chrominance + 1, 16); p += 16; memcpy(buf + p, val_dc_chrominance, sizeof(val_dc_chrominance)); p += sizeof(val_dc_chrominance); buf[p++] = 0x10; memcpy(buf + p, bits_ac_luminance + 1, 16); p += 16; memcpy(buf + p, val_ac_luminance, sizeof(val_ac_luminance)); p += sizeof(val_ac_luminance); buf[p++] = 0x11; memcpy(buf + p, bits_ac_chrominance + 1, 16); p += 16; memcpy(buf + p, val_ac_chrominance, sizeof(val_ac_chrominance)); p += sizeof(val_ac_chrominance); buf[p++] = 0xff; buf[p++] = 0xda; buf[p++] = 0; buf[p++] = 12; buf[p++] = 3; buf[p++] = 1; buf[p++] = 0x00; buf[p++] = 2; buf[p++] = 0x11; buf[p++] = 3; buf[p++] = 0x11; buf[p++] = 0; buf[p++] = 63; buf[p++] = 0; return p; } static int gen_mjpeghdr_to_package(struct go7007 *go, __le16 *code, int space) { u8 *buf; u16 mem = 0x3e00; unsigned int addr = 0x19; int size = 0, i, off = 0, chunk; buf = kzalloc(4096, GFP_KERNEL); if (buf == NULL) return -1; for (i = 1; i < 32; ++i) { mjpeg_frame_header(go, buf + size, i); size += 80; } chunk = mjpeg_frame_header(go, buf + size, 1); memmove(buf + size, buf + size + 80, chunk - 80); size += chunk - 80; for (i = 0; i < size; i += chunk * 2) { if (space - off < 32) { off = -1; goto done; } code[off + 1] = __cpu_to_le16(0x8000 | mem); chunk = 28; if (mem + chunk > 0x4000) chunk = 0x4000 - mem; if (i + 2 * chunk > size) chunk = (size - i) / 2; if (chunk < 28) { code[off] = __cpu_to_le16(0x4000 | chunk); code[off + 31] = __cpu_to_le16(addr++); mem = 0x3e00; } else { code[off] = __cpu_to_le16(0x1000 | 28); code[off + 31] = 0; mem += 28; } memcpy(&code[off + 2], buf + i, chunk * 2); off += 32; } done: kfree(buf); return off; } static int mpeg1_frame_header(struct go7007 *go, unsigned char *buf, int modulo, int pict_struct, enum mpeg_frame_type frame) { int i, j, mb_code, mb_len; int rows = go->interlace_coding ? go->height / 32 : go->height / 16; CODE_GEN(c, buf + 6); switch (frame) { case PFRAME: mb_code = 0x1; mb_len = 3; break; case BFRAME_PRE: mb_code = 0x2; mb_len = 4; break; case BFRAME_POST: mb_code = 0x2; mb_len = 3; break; case BFRAME_BIDIR: mb_code = 0x2; mb_len = 2; break; default: /* keep the compiler happy */ mb_code = mb_len = 0; break; } CODE_ADD(c, frame == PFRAME ? 0x2 : 0x3, 13); CODE_ADD(c, 0xffff, 16); CODE_ADD(c, go->format == V4L2_PIX_FMT_MPEG2 ? 0x7 : 0x4, 4); if (frame != PFRAME) CODE_ADD(c, go->format == V4L2_PIX_FMT_MPEG2 ? 0x7 : 0x4, 4); else CODE_ADD(c, 0, 4); /* Is this supposed to be here?? */ CODE_ADD(c, 0, 3); /* What is this?? */ /* Byte-align with zeros */ j = 8 - (CODE_LENGTH(c) % 8); if (j != 8) CODE_ADD(c, 0, j); if (go->format == V4L2_PIX_FMT_MPEG2) { CODE_ADD(c, 0x1, 24); CODE_ADD(c, 0xb5, 8); CODE_ADD(c, 0x844, 12); CODE_ADD(c, frame == PFRAME ? 0xff : 0x44, 8); if (go->interlace_coding) { CODE_ADD(c, pict_struct, 4); if (go->dvd_mode) CODE_ADD(c, 0x000, 11); else CODE_ADD(c, 0x200, 11); } else { CODE_ADD(c, 0x3, 4); CODE_ADD(c, 0x20c, 11); } /* Byte-align with zeros */ j = 8 - (CODE_LENGTH(c) % 8); if (j != 8) CODE_ADD(c, 0, j); } for (i = 0; i < rows; ++i) { CODE_ADD(c, 1, 24); CODE_ADD(c, i + 1, 8); CODE_ADD(c, 0x2, 6); CODE_ADD(c, 0x1, 1); CODE_ADD(c, mb_code, mb_len); if (go->interlace_coding) { CODE_ADD(c, 0x1, 2); CODE_ADD(c, pict_struct == 1 ? 0x0 : 0x1, 1); } if (frame == BFRAME_BIDIR) { CODE_ADD(c, 0x3, 2); if (go->interlace_coding) CODE_ADD(c, pict_struct == 1 ? 0x0 : 0x1, 1); } CODE_ADD(c, 0x3, 2); for (j = (go->width >> 4) - 2; j >= 33; j -= 33) CODE_ADD(c, 0x8, 11); CODE_ADD(c, addrinctab[j][0], addrinctab[j][1]); CODE_ADD(c, mb_code, mb_len); if (go->interlace_coding) { CODE_ADD(c, 0x1, 2); CODE_ADD(c, pict_struct == 1 ? 0x0 : 0x1, 1); } if (frame == BFRAME_BIDIR) { CODE_ADD(c, 0x3, 2); if (go->interlace_coding) CODE_ADD(c, pict_struct == 1 ? 0x0 : 0x1, 1); } CODE_ADD(c, 0x3, 2); /* Byte-align with zeros */ j = 8 - (CODE_LENGTH(c) % 8); if (j != 8) CODE_ADD(c, 0, j); } i = CODE_LENGTH(c) + 4 * 8; buf[2] = 0x00; buf[3] = 0x00; buf[4] = 0x01; buf[5] = 0x00; return i; } static int mpeg1_sequence_header(struct go7007 *go, unsigned char *buf, int ext) { int i, aspect_ratio, picture_rate; CODE_GEN(c, buf + 6); if (go->format == V4L2_PIX_FMT_MPEG1) { switch (go->aspect_ratio) { case GO7007_RATIO_4_3: aspect_ratio = go->standard == GO7007_STD_NTSC ? 3 : 2; break; case GO7007_RATIO_16_9: aspect_ratio = go->standard == GO7007_STD_NTSC ? 5 : 4; break; default: aspect_ratio = 1; break; } } else { switch (go->aspect_ratio) { case GO7007_RATIO_4_3: aspect_ratio = 2; break; case GO7007_RATIO_16_9: aspect_ratio = 3; break; default: aspect_ratio = 1; break; } } switch (go->sensor_framerate) { case 24000: picture_rate = 1; break; case 24024: picture_rate = 2; break; case 25025: picture_rate = go->interlace_coding ? 6 : 3; break; case 30000: picture_rate = go->interlace_coding ? 7 : 4; break; case 30030: picture_rate = go->interlace_coding ? 8 : 5; break; default: picture_rate = 5; /* 30 fps seems like a reasonable default */ break; } CODE_ADD(c, go->width, 12); CODE_ADD(c, go->height, 12); CODE_ADD(c, aspect_ratio, 4); CODE_ADD(c, picture_rate, 4); CODE_ADD(c, go->format == V4L2_PIX_FMT_MPEG2 ? 20000 : 0x3ffff, 18); CODE_ADD(c, 1, 1); CODE_ADD(c, go->format == V4L2_PIX_FMT_MPEG2 ? 112 : 20, 10); CODE_ADD(c, 0, 3); /* Byte-align with zeros */ i = 8 - (CODE_LENGTH(c) % 8); if (i != 8) CODE_ADD(c, 0, i); if (go->format == V4L2_PIX_FMT_MPEG2) { CODE_ADD(c, 0x1, 24); CODE_ADD(c, 0xb5, 8); CODE_ADD(c, 0x148, 12); if (go->interlace_coding) CODE_ADD(c, 0x20001, 20); else CODE_ADD(c, 0xa0001, 20); CODE_ADD(c, 0, 16); /* Byte-align with zeros */ i = 8 - (CODE_LENGTH(c) % 8); if (i != 8) CODE_ADD(c, 0, i); if (ext) { CODE_ADD(c, 0x1, 24); CODE_ADD(c, 0xb52, 12); CODE_ADD(c, go->standard == GO7007_STD_NTSC ? 2 : 1, 3); CODE_ADD(c, 0x105, 9); CODE_ADD(c, 0x505, 16); CODE_ADD(c, go->width, 14); CODE_ADD(c, 1, 1); CODE_ADD(c, go->height, 14); /* Byte-align with zeros */ i = 8 - (CODE_LENGTH(c) % 8); if (i != 8) CODE_ADD(c, 0, i); } } i = CODE_LENGTH(c) + 4 * 8; buf[0] = i & 0xff; buf[1] = i >> 8; buf[2] = 0x00; buf[3] = 0x00; buf[4] = 0x01; buf[5] = 0xb3; return i; } static int gen_mpeg1hdr_to_package(struct go7007 *go, __le16 *code, int space, int *framelen) { u8 *buf; u16 mem = 0x3e00; unsigned int addr = 0x19; int i, off = 0, chunk; buf = kzalloc(5120, GFP_KERNEL); if (buf == NULL) return -1; framelen[0] = mpeg1_frame_header(go, buf, 0, 1, PFRAME); if (go->interlace_coding) framelen[0] += mpeg1_frame_header(go, buf + framelen[0] / 8, 0, 2, PFRAME); buf[0] = framelen[0] & 0xff; buf[1] = framelen[0] >> 8; i = 368; framelen[1] = mpeg1_frame_header(go, buf + i, 0, 1, BFRAME_PRE); if (go->interlace_coding) framelen[1] += mpeg1_frame_header(go, buf + i + framelen[1] / 8, 0, 2, BFRAME_PRE); buf[i] = framelen[1] & 0xff; buf[i + 1] = framelen[1] >> 8; i += 1632; framelen[2] = mpeg1_frame_header(go, buf + i, 0, 1, BFRAME_POST); if (go->interlace_coding) framelen[2] += mpeg1_frame_header(go, buf + i + framelen[2] / 8, 0, 2, BFRAME_POST); buf[i] = framelen[2] & 0xff; buf[i + 1] = framelen[2] >> 8; i += 1432; framelen[3] = mpeg1_frame_header(go, buf + i, 0, 1, BFRAME_BIDIR); if (go->interlace_coding) framelen[3] += mpeg1_frame_header(go, buf + i + framelen[3] / 8, 0, 2, BFRAME_BIDIR); buf[i] = framelen[3] & 0xff; buf[i + 1] = framelen[3] >> 8; i += 1632 + 16; mpeg1_sequence_header(go, buf + i, 0); i += 40; for (i = 0; i < 5120; i += chunk * 2) { if (space - off < 32) { off = -1; goto done; } code[off + 1] = __cpu_to_le16(0x8000 | mem); chunk = 28; if (mem + chunk > 0x4000) chunk = 0x4000 - mem; if (i + 2 * chunk > 5120) chunk = (5120 - i) / 2; if (chunk < 28) { code[off] = __cpu_to_le16(0x4000 | chunk); code[off + 31] = __cpu_to_le16(addr); if (mem + chunk == 0x4000) { mem = 0x3e00; ++addr; } } else { code[off] = __cpu_to_le16(0x1000 | 28); code[off + 31] = 0; mem += 28; } memcpy(&code[off + 2], buf + i, chunk * 2); off += 32; } done: kfree(buf); return off; } static int vti_bitlen(struct go7007 *go) { unsigned int i, max_time_incr = go->sensor_framerate / go->fps_scale; for (i = 31; (max_time_incr & ((1 << i) - 1)) == max_time_incr; --i) ; return i + 1; } static int mpeg4_frame_header(struct go7007 *go, unsigned char *buf, int modulo, enum mpeg_frame_type frame) { int i; CODE_GEN(c, buf + 6); int mb_count = (go->width >> 4) * (go->height >> 4); CODE_ADD(c, frame == PFRAME ? 0x1 : 0x2, 2); if (modulo) CODE_ADD(c, 0x1, 1); CODE_ADD(c, 0x1, 2); CODE_ADD(c, 0, vti_bitlen(go)); CODE_ADD(c, 0x3, 2); if (frame == PFRAME) CODE_ADD(c, 0, 1); CODE_ADD(c, 0xc, 11); if (frame != PFRAME) CODE_ADD(c, 0x4, 3); if (frame != BFRAME_EMPTY) { for (i = 0; i < mb_count; ++i) { switch (frame) { case PFRAME: CODE_ADD(c, 0x1, 1); break; case BFRAME_PRE: CODE_ADD(c, 0x47, 8); break; case BFRAME_POST: CODE_ADD(c, 0x27, 7); break; case BFRAME_BIDIR: CODE_ADD(c, 0x5f, 8); break; case BFRAME_EMPTY: /* keep compiler quiet */ break; } } } /* Byte-align with a zero followed by ones */ i = 8 - (CODE_LENGTH(c) % 8); CODE_ADD(c, 0, 1); CODE_ADD(c, (1 << (i - 1)) - 1, i - 1); i = CODE_LENGTH(c) + 4 * 8; buf[0] = i & 0xff; buf[1] = i >> 8; buf[2] = 0x00; buf[3] = 0x00; buf[4] = 0x01; buf[5] = 0xb6; return i; } static int mpeg4_sequence_header(struct go7007 *go, unsigned char *buf, int ext) { const unsigned char head[] = { 0x00, 0x00, 0x01, 0xb0, go->pali, 0x00, 0x00, 0x01, 0xb5, 0x09, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x20, }; int i, aspect_ratio; int fps = go->sensor_framerate / go->fps_scale; CODE_GEN(c, buf + 2 + sizeof(head)); switch (go->aspect_ratio) { case GO7007_RATIO_4_3: aspect_ratio = go->standard == GO7007_STD_NTSC ? 3 : 2; break; case GO7007_RATIO_16_9: aspect_ratio = go->standard == GO7007_STD_NTSC ? 5 : 4; break; default: aspect_ratio = 1; break; } memcpy(buf + 2, head, sizeof(head)); CODE_ADD(c, 0x191, 17); CODE_ADD(c, aspect_ratio, 4); CODE_ADD(c, 0x1, 4); CODE_ADD(c, fps, 16); CODE_ADD(c, 0x3, 2); CODE_ADD(c, 1001, vti_bitlen(go)); CODE_ADD(c, 1, 1); CODE_ADD(c, go->width, 13); CODE_ADD(c, 1, 1); CODE_ADD(c, go->height, 13); CODE_ADD(c, 0x2830, 14); /* Byte-align */ i = 8 - (CODE_LENGTH(c) % 8); CODE_ADD(c, 0, 1); CODE_ADD(c, (1 << (i - 1)) - 1, i - 1); i = CODE_LENGTH(c) + sizeof(head) * 8; buf[0] = i & 0xff; buf[1] = i >> 8; return i; } static int gen_mpeg4hdr_to_package(struct go7007 *go, __le16 *code, int space, int *framelen) { u8 *buf; u16 mem = 0x3e00; unsigned int addr = 0x19; int i, off = 0, chunk; buf = kzalloc(5120, GFP_KERNEL); if (buf == NULL) return -1; framelen[0] = mpeg4_frame_header(go, buf, 0, PFRAME); i = 368; framelen[1] = mpeg4_frame_header(go, buf + i, 0, BFRAME_PRE); i += 1632; framelen[2] = mpeg4_frame_header(go, buf + i, 0, BFRAME_POST); i += 1432; framelen[3] = mpeg4_frame_header(go, buf + i, 0, BFRAME_BIDIR); i += 1632; mpeg4_frame_header(go, buf + i, 0, BFRAME_EMPTY); i += 16; mpeg4_sequence_header(go, buf + i, 0); i += 40; for (i = 0; i < 5120; i += chunk * 2) { if (space - off < 32) { off = -1; goto done; } code[off + 1] = __cpu_to_le16(0x8000 | mem); chunk = 28; if (mem + chunk > 0x4000) chunk = 0x4000 - mem; if (i + 2 * chunk > 5120) chunk = (5120 - i) / 2; if (chunk < 28) { code[off] = __cpu_to_le16(0x4000 | chunk); code[off + 31] = __cpu_to_le16(addr); if (mem + chunk == 0x4000) { mem = 0x3e00; ++addr; } } else { code[off] = __cpu_to_le16(0x1000 | 28); code[off + 31] = 0; mem += 28; } memcpy(&code[off + 2], buf + i, chunk * 2); off += 32; } mem = 0x3e00; addr = go->ipb ? 0x14f9 : 0x0af9; memset(buf, 0, 5120); framelen[4] = mpeg4_frame_header(go, buf, 1, PFRAME); i = 368; framelen[5] = mpeg4_frame_header(go, buf + i, 1, BFRAME_PRE); i += 1632; framelen[6] = mpeg4_frame_header(go, buf + i, 1, BFRAME_POST); i += 1432; framelen[7] = mpeg4_frame_header(go, buf + i, 1, BFRAME_BIDIR); i += 1632; mpeg4_frame_header(go, buf + i, 1, BFRAME_EMPTY); i += 16; for (i = 0; i < 5120; i += chunk * 2) { if (space - off < 32) { off = -1; goto done; } code[off + 1] = __cpu_to_le16(0x8000 | mem); chunk = 28; if (mem + chunk > 0x4000) chunk = 0x4000 - mem; if (i + 2 * chunk > 5120) chunk = (5120 - i) / 2; if (chunk < 28) { code[off] = __cpu_to_le16(0x4000 | chunk); code[off + 31] = __cpu_to_le16(addr); if (mem + chunk == 0x4000) { mem = 0x3e00; ++addr; } } else { code[off] = __cpu_to_le16(0x1000 | 28); code[off + 31] = 0; mem += 28; } memcpy(&code[off + 2], buf + i, chunk * 2); off += 32; } done: kfree(buf); return off; } static int brctrl_to_package(struct go7007 *go, __le16 *code, int space, int *framelen) { int converge_speed = 0; int lambda = (go->format == V4L2_PIX_FMT_MJPEG || go->dvd_mode) ? 100 : 0; int peak_rate = 6 * go->bitrate / 5; int vbv_buffer = go->format == V4L2_PIX_FMT_MJPEG ? go->bitrate : (go->dvd_mode ? 900000 : peak_rate); int fps = go->sensor_framerate / go->fps_scale; int q = 0; /* Bizarre math below depends on rounding errors in division */ u32 sgop_expt_addr = go->bitrate / 32 * (go->ipb ? 3 : 1) * 1001 / fps; u32 sgop_peak_addr = peak_rate / 32 * 1001 / fps; u32 total_expt_addr = go->bitrate / 32 * 1000 / fps * (fps / 1000); u32 vbv_alert_addr = vbv_buffer * 3 / (4 * 32); u32 cplx[] = { q > 0 ? sgop_expt_addr * q : 2 * go->width * go->height * (go->ipb ? 6 : 4) / 32, q > 0 ? sgop_expt_addr * q : 2 * go->width * go->height * (go->ipb ? 6 : 4) / 32, q > 0 ? sgop_expt_addr * q : 2 * go->width * go->height * (go->ipb ? 6 : 4) / 32, q > 0 ? sgop_expt_addr * q : 2 * go->width * go->height * (go->ipb ? 6 : 4) / 32, }; u32 calc_q = q > 0 ? q : cplx[0] / sgop_expt_addr; u16 pack[] = { 0x200e, 0x0000, 0xBF20, go->ipb ? converge_speed_ipb[converge_speed] : converge_speed_ip[converge_speed], 0xBF21, go->ipb ? 2 : 0, 0xBF22, go->ipb ? LAMBDA_table[0][lambda / 2 + 50] : 32767, 0xBF23, go->ipb ? LAMBDA_table[1][lambda] : 32767, 0xBF24, 32767, 0xBF25, lambda > 99 ? 32767 : LAMBDA_table[3][lambda], 0xBF26, sgop_expt_addr & 0x0000FFFF, 0xBF27, sgop_expt_addr >> 16, 0xBF28, sgop_peak_addr & 0x0000FFFF, 0xBF29, sgop_peak_addr >> 16, 0xBF2A, vbv_alert_addr & 0x0000FFFF, 0xBF2B, vbv_alert_addr >> 16, 0xBF2C, 0, 0xBF2D, 0, 0, 0, 0x200e, 0x0000, 0xBF2E, vbv_alert_addr & 0x0000FFFF, 0xBF2F, vbv_alert_addr >> 16, 0xBF30, cplx[0] & 0x0000FFFF, 0xBF31, cplx[0] >> 16, 0xBF32, cplx[1] & 0x0000FFFF, 0xBF33, cplx[1] >> 16, 0xBF34, cplx[2] & 0x0000FFFF, 0xBF35, cplx[2] >> 16, 0xBF36, cplx[3] & 0x0000FFFF, 0xBF37, cplx[3] >> 16, 0xBF38, 0, 0xBF39, 0, 0xBF3A, total_expt_addr & 0x0000FFFF, 0xBF3B, total_expt_addr >> 16, 0, 0, 0x200e, 0x0000, 0xBF3C, total_expt_addr & 0x0000FFFF, 0xBF3D, total_expt_addr >> 16, 0xBF3E, 0, 0xBF3F, 0, 0xBF48, 0, 0xBF49, 0, 0xBF4A, calc_q < 4 ? 4 : (calc_q > 124 ? 124 : calc_q), 0xBF4B, 4, 0xBF4C, 0, 0xBF4D, 0, 0xBF4E, 0, 0xBF4F, 0, 0xBF50, 0, 0xBF51, 0, 0, 0, 0x200e, 0x0000, 0xBF40, sgop_expt_addr & 0x0000FFFF, 0xBF41, sgop_expt_addr >> 16, 0xBF42, 0, 0xBF43, 0, 0xBF44, 0, 0xBF45, 0, 0xBF46, (go->width >> 4) * (go->height >> 4), 0xBF47, 0, 0xBF64, 0, 0xBF65, 0, 0xBF18, framelen[4], 0xBF19, framelen[5], 0xBF1A, framelen[6], 0xBF1B, framelen[7], 0, 0, #if 0 /* Remove once we don't care about matching */ 0x200e, 0x0000, 0xBF56, 4, 0xBF57, 0, 0xBF58, 5, 0xBF59, 0, 0xBF5A, 6, 0xBF5B, 0, 0xBF5C, 8, 0xBF5D, 0, 0xBF5E, 1, 0xBF5F, 0, 0xBF60, 1, 0xBF61, 0, 0xBF62, 0, 0xBF63, 0, 0, 0, #else 0x2008, 0x0000, 0xBF56, 4, 0xBF57, 0, 0xBF58, 5, 0xBF59, 0, 0xBF5A, 6, 0xBF5B, 0, 0xBF5C, 8, 0xBF5D, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, #endif 0x200e, 0x0000, 0xBF10, 0, 0xBF11, 0, 0xBF12, 0, 0xBF13, 0, 0xBF14, 0, 0xBF15, 0, 0xBF16, 0, 0xBF17, 0, 0xBF7E, 0, 0xBF7F, 1, 0xBF52, framelen[0], 0xBF53, framelen[1], 0xBF54, framelen[2], 0xBF55, framelen[3], 0, 0, }; return copy_packages(code, pack, 6, space); } static int config_package(struct go7007 *go, __le16 *code, int space) { int fps = go->sensor_framerate / go->fps_scale / 1000; int rows = go->interlace_coding ? go->height / 32 : go->height / 16; int brc_window_size = fps; int q_min = 2, q_max = 31; int THACCoeffSet0 = 0; u16 pack[] = { 0x200e, 0x0000, 0xc002, 0x14b4, 0xc003, 0x28b4, 0xc004, 0x3c5a, 0xdc05, 0x2a77, 0xc6c3, go->format == V4L2_PIX_FMT_MPEG4 ? 0 : (go->format == V4L2_PIX_FMT_H263 ? 0 : 1), 0xc680, go->format == V4L2_PIX_FMT_MPEG4 ? 0xf1 : (go->format == V4L2_PIX_FMT_H263 ? 0x61 : 0xd3), 0xc780, 0x0140, 0xe009, 0x0001, 0xc60f, 0x0008, 0xd4ff, 0x0002, 0xe403, 2340, 0xe406, 75, 0xd411, 0x0001, 0xd410, 0xa1d6, 0x0001, 0x2801, 0x200d, 0x0000, 0xe402, 0x018b, 0xe401, 0x8b01, 0xd472, (go->board_info->sensor_flags & GO7007_SENSOR_TV) && (!go->interlace_coding) ? 0x01b0 : 0x0170, 0xd475, (go->board_info->sensor_flags & GO7007_SENSOR_TV) && (!go->interlace_coding) ? 0x0008 : 0x0009, 0xc404, go->interlace_coding ? 0x44 : (go->format == V4L2_PIX_FMT_MPEG4 ? 0x11 : (go->format == V4L2_PIX_FMT_MPEG1 ? 0x02 : (go->format == V4L2_PIX_FMT_MPEG2 ? 0x04 : (go->format == V4L2_PIX_FMT_H263 ? 0x08 : 0x20)))), 0xbf0a, (go->format == V4L2_PIX_FMT_MPEG4 ? 8 : (go->format == V4L2_PIX_FMT_MPEG1 ? 1 : (go->format == V4L2_PIX_FMT_MPEG2 ? 2 : (go->format == V4L2_PIX_FMT_H263 ? 4 : 16)))) | ((go->repeat_seqhead ? 1 : 0) << 6) | ((go->dvd_mode ? 1 : 0) << 9) | ((go->gop_header_enable ? 1 : 0) << 10), 0xbf0b, 0, 0xdd5a, go->ipb ? 0x14 : 0x0a, 0xbf0c, 0, 0xbf0d, 0, 0xc683, THACCoeffSet0, 0xc40a, (go->width << 4) | rows, 0xe01a, go->board_info->hpi_buffer_cap, 0, 0, 0, 0, 0x2008, 0, 0xe402, 0x88, 0xe401, 0x8f01, 0xbf6a, 0, 0xbf6b, 0, 0xbf6c, 0, 0xbf6d, 0, 0xbf6e, 0, 0xbf6f, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x200e, 0, 0xbf66, brc_window_size, 0xbf67, 0, 0xbf68, q_min, 0xbf69, q_max, 0xbfe0, 0, 0xbfe1, 0, 0xbfe2, 0, 0xbfe3, go->ipb ? 3 : 1, 0xc031, go->board_info->sensor_flags & GO7007_SENSOR_VBI ? 1 : 0, 0xc01c, 0x1f, 0xdd8c, 0x15, 0xdd94, 0x15, 0xdd88, go->ipb ? 0x1401 : 0x0a01, 0xdd90, go->ipb ? 0x1401 : 0x0a01, 0, 0, 0x200e, 0, 0xbfe4, 0, 0xbfe5, 0, 0xbfe6, 0, 0xbfe7, fps << 8, 0xbfe8, 0x3a00, 0xbfe9, 0, 0xbfea, 0, 0xbfeb, 0, 0xbfec, (go->interlace_coding ? 1 << 15 : 0) | (go->modet_enable ? 0xa : 0) | (go->board_info->sensor_flags & GO7007_SENSOR_VBI ? 1 : 0), 0xbfed, 0, 0xbfee, 0, 0xbfef, 0, 0xbff0, go->board_info->sensor_flags & GO7007_SENSOR_TV ? 0xf060 : 0xb060, 0xbff1, 0, 0, 0, }; return copy_packages(code, pack, 5, space); } static int seqhead_to_package(struct go7007 *go, __le16 *code, int space, int (*sequence_header_func)(struct go7007 *go, unsigned char *buf, int ext)) { int vop_time_increment_bitlength = vti_bitlen(go); int fps = go->sensor_framerate / go->fps_scale * (go->interlace_coding ? 2 : 1); unsigned char buf[40] = { }; int len = sequence_header_func(go, buf, 1); u16 pack[] = { 0x2006, 0, 0xbf08, fps, 0xbf09, 0, 0xbff2, vop_time_increment_bitlength, 0xbff3, (1 << vop_time_increment_bitlength) - 1, 0xbfe6, 0, 0xbfe7, (fps / 1000) << 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x2007, 0, 0xc800, buf[2] << 8 | buf[3], 0xc801, buf[4] << 8 | buf[5], 0xc802, buf[6] << 8 | buf[7], 0xc803, buf[8] << 8 | buf[9], 0xc406, 64, 0xc407, len - 64, 0xc61b, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x200e, 0, 0xc808, buf[10] << 8 | buf[11], 0xc809, buf[12] << 8 | buf[13], 0xc80a, buf[14] << 8 | buf[15], 0xc80b, buf[16] << 8 | buf[17], 0xc80c, buf[18] << 8 | buf[19], 0xc80d, buf[20] << 8 | buf[21], 0xc80e, buf[22] << 8 | buf[23], 0xc80f, buf[24] << 8 | buf[25], 0xc810, buf[26] << 8 | buf[27], 0xc811, buf[28] << 8 | buf[29], 0xc812, buf[30] << 8 | buf[31], 0xc813, buf[32] << 8 | buf[33], 0xc814, buf[34] << 8 | buf[35], 0xc815, buf[36] << 8 | buf[37], 0, 0, 0, 0, 0, 0, }; return copy_packages(code, pack, 3, space); } static int relative_prime(int big, int little) { int remainder; while (little != 0) { remainder = big % little; big = little; little = remainder; } return big; } static int avsync_to_package(struct go7007 *go, __le16 *code, int space) { int arate = go->board_info->audio_rate * 1001 * go->fps_scale; int ratio = arate / go->sensor_framerate; int adjratio = ratio * 215 / 100; int rprime = relative_prime(go->sensor_framerate, arate % go->sensor_framerate); int f1 = (arate % go->sensor_framerate) / rprime; int f2 = (go->sensor_framerate - arate % go->sensor_framerate) / rprime; u16 pack[] = { 0x200e, 0, 0xbf98, (u16)((-adjratio) & 0xffff), 0xbf99, (u16)((-adjratio) >> 16), 0xbf92, 0, 0xbf93, 0, 0xbff4, f1 > f2 ? f1 : f2, 0xbff5, f1 < f2 ? f1 : f2, 0xbff6, f1 < f2 ? ratio : ratio + 1, 0xbff7, f1 > f2 ? ratio : ratio + 1, 0xbff8, 0, 0xbff9, 0, 0xbffa, adjratio & 0xffff, 0xbffb, adjratio >> 16, 0xbf94, 0, 0xbf95, 0, 0, 0, }; return copy_packages(code, pack, 1, space); } static int final_package(struct go7007 *go, __le16 *code, int space) { int rows = go->interlace_coding ? go->height / 32 : go->height / 16; u16 pack[] = { 0x8000, 0, 0, 0, 0, 0, 0, 2, ((go->board_info->sensor_flags & GO7007_SENSOR_TV) && (!go->interlace_coding) ? (1 << 14) | (1 << 9) : 0) | ((go->encoder_subsample ? 1 : 0) << 8) | (go->board_info->sensor_flags & GO7007_SENSOR_CONFIG_MASK), ((go->encoder_v_halve ? 1 : 0) << 14) | (go->encoder_v_halve ? rows << 9 : rows << 8) | (go->encoder_h_halve ? 1 << 6 : 0) | (go->encoder_h_halve ? go->width >> 3 : go->width >> 4), (1 << 15) | (go->encoder_v_offset << 6) | (1 << 7) | (go->encoder_h_offset >> 2), (1 << 6), 0, 0, ((go->fps_scale - 1) << 8) | (go->board_info->sensor_flags & GO7007_SENSOR_TV ? (1 << 7) : 0) | 0x41, go->ipb ? 0xd4c : 0x36b, (rows << 8) | (go->width >> 4), go->format == V4L2_PIX_FMT_MPEG4 ? 0x0404 : 0, (1 << 15) | ((go->interlace_coding ? 1 : 0) << 13) | ((go->closed_gop ? 1 : 0) << 12) | ((go->format == V4L2_PIX_FMT_MPEG4 ? 1 : 0) << 11) | /* (1 << 9) | */ ((go->ipb ? 3 : 0) << 7) | ((go->modet_enable ? 1 : 0) << 2) | ((go->dvd_mode ? 1 : 0) << 1) | 1, (go->format == V4L2_PIX_FMT_MPEG1 ? 0x89a0 : (go->format == V4L2_PIX_FMT_MPEG2 ? 0x89a0 : (go->format == V4L2_PIX_FMT_MJPEG ? 0x89a0 : (go->format == V4L2_PIX_FMT_MPEG4 ? 0x8920 : (go->format == V4L2_PIX_FMT_H263 ? 0x8920 : 0))))), go->ipb ? 0x1f15 : 0x1f0b, go->ipb ? 0x0015 : 0x000b, go->ipb ? 0xa800 : 0x5800, 0xffff, 0x0020 + 0x034b * 0, 0x0020 + 0x034b * 1, 0x0020 + 0x034b * 2, 0x0020 + 0x034b * 3, 0x0020 + 0x034b * 4, 0x0020 + 0x034b * 5, go->ipb ? (go->gop_size / 3) : go->gop_size, (go->height >> 4) * (go->width >> 4) * 110 / 100, }; return copy_packages(code, pack, 1, space); } static int audio_to_package(struct go7007 *go, __le16 *code, int space) { int clock_config = ((go->board_info->audio_flags & GO7007_AUDIO_I2S_MASTER ? 1 : 0) << 11) | ((go->board_info->audio_flags & GO7007_AUDIO_OKI_MODE ? 1 : 0) << 8) | (((go->board_info->audio_bclk_div / 4) - 1) << 4) | (go->board_info->audio_main_div - 1); u16 pack[] = { 0x200d, 0, 0x9002, 0, 0x9002, 0, 0x9031, 0, 0x9032, 0, 0x9033, 0, 0x9034, 0, 0x9035, 0, 0x9036, 0, 0x9037, 0, 0x9040, 0, 0x9000, clock_config, 0x9001, (go->board_info->audio_flags & 0xffff) | (1 << 9), 0x9000, ((go->board_info->audio_flags & GO7007_AUDIO_I2S_MASTER ? 1 : 0) << 10) | clock_config, 0, 0, 0, 0, 0x2005, 0, 0x9041, 0, 0x9042, 256, 0x9043, 0, 0x9044, 16, 0x9045, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; return copy_packages(code, pack, 2, space); } static int modet_to_package(struct go7007 *go, __le16 *code, int space) { bool has_modet0 = go->modet[0].enable; bool has_modet1 = go->modet[1].enable; bool has_modet2 = go->modet[2].enable; bool has_modet3 = go->modet[3].enable; int ret, mb, i, addr, cnt = 0; u16 pack[32]; u16 thresholds[] = { 0x200e, 0, 0xbf82, has_modet0 ? go->modet[0].pixel_threshold : 32767, 0xbf83, has_modet1 ? go->modet[1].pixel_threshold : 32767, 0xbf84, has_modet2 ? go->modet[2].pixel_threshold : 32767, 0xbf85, has_modet3 ? go->modet[3].pixel_threshold : 32767, 0xbf86, has_modet0 ? go->modet[0].motion_threshold : 32767, 0xbf87, has_modet1 ? go->modet[1].motion_threshold : 32767, 0xbf88, has_modet2 ? go->modet[2].motion_threshold : 32767, 0xbf89, has_modet3 ? go->modet[3].motion_threshold : 32767, 0xbf8a, has_modet0 ? go->modet[0].mb_threshold : 32767, 0xbf8b, has_modet1 ? go->modet[1].mb_threshold : 32767, 0xbf8c, has_modet2 ? go->modet[2].mb_threshold : 32767, 0xbf8d, has_modet3 ? go->modet[3].mb_threshold : 32767, 0xbf8e, 0, 0xbf8f, 0, 0, 0, }; ret = copy_packages(code, thresholds, 1, space); if (ret < 0) return -1; cnt += ret; addr = 0xbac0; memset(pack, 0, 64); i = 0; for (mb = 0; mb < 1624; ++mb) { pack[i * 2 + 3] <<= 2; pack[i * 2 + 3] |= go->modet_map[mb]; if (mb % 8 != 7) continue; pack[i * 2 + 2] = addr++; ++i; if (i == 10 || mb == 1623) { pack[0] = 0x2000 | i; ret = copy_packages(code + cnt, pack, 1, space - cnt); if (ret < 0) return -1; cnt += ret; i = 0; memset(pack, 0, 64); } pack[i * 2 + 3] = 0; } memset(pack, 0, 64); i = 0; for (addr = 0xbb90; addr < 0xbbfa; ++addr) { pack[i * 2 + 2] = addr; pack[i * 2 + 3] = 0; ++i; if (i == 10 || addr == 0xbbf9) { pack[0] = 0x2000 | i; ret = copy_packages(code + cnt, pack, 1, space - cnt); if (ret < 0) return -1; cnt += ret; i = 0; memset(pack, 0, 64); } } return cnt; } static int do_special(struct go7007 *go, u16 type, __le16 *code, int space, int *framelen) { switch (type) { case SPECIAL_FRM_HEAD: switch (go->format) { case V4L2_PIX_FMT_MJPEG: return gen_mjpeghdr_to_package(go, code, space); case V4L2_PIX_FMT_MPEG1: case V4L2_PIX_FMT_MPEG2: return gen_mpeg1hdr_to_package(go, code, space, framelen); case V4L2_PIX_FMT_MPEG4: return gen_mpeg4hdr_to_package(go, code, space, framelen); } case SPECIAL_BRC_CTRL: return brctrl_to_package(go, code, space, framelen); case SPECIAL_CONFIG: return config_package(go, code, space); case SPECIAL_SEQHEAD: switch (go->format) { case V4L2_PIX_FMT_MPEG1: case V4L2_PIX_FMT_MPEG2: return seqhead_to_package(go, code, space, mpeg1_sequence_header); case V4L2_PIX_FMT_MPEG4: return seqhead_to_package(go, code, space, mpeg4_sequence_header); default: return 0; } case SPECIAL_AV_SYNC: return avsync_to_package(go, code, space); case SPECIAL_FINAL: return final_package(go, code, space); case SPECIAL_AUDIO: return audio_to_package(go, code, space); case SPECIAL_MODET: return modet_to_package(go, code, space); } dev_err(go->dev, "firmware file contains unsupported feature %04x\n", type); return -1; } int go7007_construct_fw_image(struct go7007 *go, u8 **fw, int *fwlen) { const struct firmware *fw_entry; __le16 *code, *src; int framelen[8] = { }; /* holds the lengths of empty frame templates */ int codespace = 64 * 1024, i = 0, srclen, chunk_len, chunk_flags; int mode_flag; int ret; switch (go->format) { case V4L2_PIX_FMT_MJPEG: mode_flag = FLAG_MODE_MJPEG; break; case V4L2_PIX_FMT_MPEG1: mode_flag = FLAG_MODE_MPEG1; break; case V4L2_PIX_FMT_MPEG2: mode_flag = FLAG_MODE_MPEG2; break; case V4L2_PIX_FMT_MPEG4: mode_flag = FLAG_MODE_MPEG4; break; default: return -1; } if (request_firmware(&fw_entry, GO7007_FW_NAME, go->dev)) { dev_err(go->dev, "unable to load firmware from file \"%s\"\n", GO7007_FW_NAME); return -1; } code = kzalloc(codespace * 2, GFP_KERNEL); if (code == NULL) goto fw_failed; src = (__le16 *)fw_entry->data; srclen = fw_entry->size / 2; while (srclen >= 2) { chunk_flags = __le16_to_cpu(src[0]); chunk_len = __le16_to_cpu(src[1]); if (chunk_len + 2 > srclen) { dev_err(go->dev, "firmware file \"%s\" appears to be corrupted\n", GO7007_FW_NAME); goto fw_failed; } if (chunk_flags & mode_flag) { if (chunk_flags & FLAG_SPECIAL) { ret = do_special(go, __le16_to_cpu(src[2]), &code[i], codespace - i, framelen); if (ret < 0) { dev_err(go->dev, "insufficient memory for firmware construction\n"); goto fw_failed; } i += ret; } else { if (codespace - i < chunk_len) { dev_err(go->dev, "insufficient memory for firmware construction\n"); goto fw_failed; } memcpy(&code[i], &src[2], chunk_len * 2); i += chunk_len; } } srclen -= chunk_len + 2; src += chunk_len + 2; } release_firmware(fw_entry); *fw = (u8 *)code; *fwlen = i * 2; return 0; fw_failed: kfree(code); release_firmware(fw_entry); return -1; } MODULE_FIRMWARE(GO7007_FW_NAME);
gpl-2.0
BruceBushby/linux-4.1
drivers/w1/slaves/w1_ds2406.c
1524
3645
/* * w1_ds2406.c - w1 family 12 (DS2406) driver * based on w1_ds2413.c by Mariusz Bialonczyk <manio@skyboo.net> * * Copyright (c) 2014 Scott Alfter <scott@alfter.us> * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/crc16.h> #include "../w1.h" #include "../w1_int.h" #include "../w1_family.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Scott Alfter <scott@alfter.us>"); MODULE_DESCRIPTION("w1 family 12 driver for DS2406 2 Pin IO"); #define W1_F12_FUNC_READ_STATUS 0xAA #define W1_F12_FUNC_WRITE_STATUS 0x55 static ssize_t w1_f12_read_state( struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { u8 w1_buf[6]={W1_F12_FUNC_READ_STATUS, 7, 0, 0, 0, 0}; struct w1_slave *sl = kobj_to_w1_slave(kobj); u16 crc=0; int i; ssize_t rtnval=1; if (off != 0) return 0; if (!buf) return -EINVAL; mutex_lock(&sl->master->bus_mutex); if (w1_reset_select_slave(sl)) { mutex_unlock(&sl->master->bus_mutex); return -EIO; } w1_write_block(sl->master, w1_buf, 3); w1_read_block(sl->master, w1_buf+3, 3); for (i=0; i<6; i++) crc=crc16_byte(crc, w1_buf[i]); if (crc==0xb001) /* good read? */ *buf=((w1_buf[3]>>5)&3)|0x30; else rtnval=-EIO; mutex_unlock(&sl->master->bus_mutex); return rtnval; } static ssize_t w1_f12_write_output( struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct w1_slave *sl = kobj_to_w1_slave(kobj); u8 w1_buf[6]={W1_F12_FUNC_WRITE_STATUS, 7, 0, 0, 0, 0}; u16 crc=0; int i; ssize_t rtnval=1; if (count != 1 || off != 0) return -EFAULT; mutex_lock(&sl->master->bus_mutex); if (w1_reset_select_slave(sl)) { mutex_unlock(&sl->master->bus_mutex); return -EIO; } w1_buf[3] = (((*buf)&3)<<5)|0x1F; w1_write_block(sl->master, w1_buf, 4); w1_read_block(sl->master, w1_buf+4, 2); for (i=0; i<6; i++) crc=crc16_byte(crc, w1_buf[i]); if (crc==0xb001) /* good read? */ w1_write_8(sl->master, 0xFF); else rtnval=-EIO; mutex_unlock(&sl->master->bus_mutex); return rtnval; } #define NB_SYSFS_BIN_FILES 2 static struct bin_attribute w1_f12_sysfs_bin_files[NB_SYSFS_BIN_FILES] = { { .attr = { .name = "state", .mode = S_IRUGO, }, .size = 1, .read = w1_f12_read_state, }, { .attr = { .name = "output", .mode = S_IRUGO | S_IWUSR | S_IWGRP, }, .size = 1, .write = w1_f12_write_output, } }; static int w1_f12_add_slave(struct w1_slave *sl) { int err = 0; int i; for (i = 0; i < NB_SYSFS_BIN_FILES && !err; ++i) err = sysfs_create_bin_file( &sl->dev.kobj, &(w1_f12_sysfs_bin_files[i])); if (err) while (--i >= 0) sysfs_remove_bin_file(&sl->dev.kobj, &(w1_f12_sysfs_bin_files[i])); return err; } static void w1_f12_remove_slave(struct w1_slave *sl) { int i; for (i = NB_SYSFS_BIN_FILES - 1; i >= 0; --i) sysfs_remove_bin_file(&sl->dev.kobj, &(w1_f12_sysfs_bin_files[i])); } static struct w1_family_ops w1_f12_fops = { .add_slave = w1_f12_add_slave, .remove_slave = w1_f12_remove_slave, }; static struct w1_family w1_family_12 = { .fid = W1_FAMILY_DS2406, .fops = &w1_f12_fops, }; static int __init w1_f12_init(void) { return w1_register_family(&w1_family_12); } static void __exit w1_f12_exit(void) { w1_unregister_family(&w1_family_12); } module_init(w1_f12_init); module_exit(w1_f12_exit);
gpl-2.0
kevin0100/android_kernel_qcom_msm8916
net/vmw_vsock/af_vsock.c
1524
48493
/* * VMware vSockets Driver * * Copyright (C) 2007-2013 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ /* Implementation notes: * * - There are two kinds of sockets: those created by user action (such as * calling socket(2)) and those created by incoming connection request packets. * * - There are two "global" tables, one for bound sockets (sockets that have * specified an address that they are responsible for) and one for connected * sockets (sockets that have established a connection with another socket). * These tables are "global" in that all sockets on the system are placed * within them. - Note, though, that the bound table contains an extra entry * for a list of unbound sockets and SOCK_DGRAM sockets will always remain in * that list. The bound table is used solely for lookup of sockets when packets * are received and that's not necessary for SOCK_DGRAM sockets since we create * a datagram handle for each and need not perform a lookup. Keeping SOCK_DGRAM * sockets out of the bound hash buckets will reduce the chance of collisions * when looking for SOCK_STREAM sockets and prevents us from having to check the * socket type in the hash table lookups. * * - Sockets created by user action will either be "client" sockets that * initiate a connection or "server" sockets that listen for connections; we do * not support simultaneous connects (two "client" sockets connecting). * * - "Server" sockets are referred to as listener sockets throughout this * implementation because they are in the SS_LISTEN state. When a connection * request is received (the second kind of socket mentioned above), we create a * new socket and refer to it as a pending socket. These pending sockets are * placed on the pending connection list of the listener socket. When future * packets are received for the address the listener socket is bound to, we * check if the source of the packet is from one that has an existing pending * connection. If it does, we process the packet for the pending socket. When * that socket reaches the connected state, it is removed from the listener * socket's pending list and enqueued in the listener socket's accept queue. * Callers of accept(2) will accept connected sockets from the listener socket's * accept queue. If the socket cannot be accepted for some reason then it is * marked rejected. Once the connection is accepted, it is owned by the user * process and the responsibility for cleanup falls with that user process. * * - It is possible that these pending sockets will never reach the connected * state; in fact, we may never receive another packet after the connection * request. Because of this, we must schedule a cleanup function to run in the * future, after some amount of time passes where a connection should have been * established. This function ensures that the socket is off all lists so it * cannot be retrieved, then drops all references to the socket so it is cleaned * up (sock_put() -> sk_free() -> our sk_destruct implementation). Note this * function will also cleanup rejected sockets, those that reach the connected * state but leave it before they have been accepted. * * - Sockets created by user action will be cleaned up when the user process * calls close(2), causing our release implementation to be called. Our release * implementation will perform some cleanup then drop the last reference so our * sk_destruct implementation is invoked. Our sk_destruct implementation will * perform additional cleanup that's common for both types of sockets. * * - A socket's reference count is what ensures that the structure won't be * freed. Each entry in a list (such as the "global" bound and connected tables * and the listener socket's pending list and connected queue) ensures a * reference. When we defer work until process context and pass a socket as our * argument, we must ensure the reference count is increased to ensure the * socket isn't freed before the function is run; the deferred function will * then drop the reference. */ #include <linux/types.h> #include <linux/bitops.h> #include <linux/cred.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/list.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/net.h> #include <linux/poll.h> #include <linux/skbuff.h> #include <linux/smp.h> #include <linux/socket.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/wait.h> #include <linux/workqueue.h> #include <net/sock.h> #include "af_vsock.h" static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr); static void vsock_sk_destruct(struct sock *sk); static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); /* Protocol family. */ static struct proto vsock_proto = { .name = "AF_VSOCK", .owner = THIS_MODULE, .obj_size = sizeof(struct vsock_sock), }; /* The default peer timeout indicates how long we will wait for a peer response * to a control message. */ #define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ) #define SS_LISTEN 255 static const struct vsock_transport *transport; static DEFINE_MUTEX(vsock_register_mutex); /**** EXPORTS ****/ /* Get the ID of the local context. This is transport dependent. */ int vm_sockets_get_local_cid(void) { return transport->get_local_cid(); } EXPORT_SYMBOL_GPL(vm_sockets_get_local_cid); /**** UTILS ****/ /* Each bound VSocket is stored in the bind hash table and each connected * VSocket is stored in the connected hash table. * * Unbound sockets are all put on the same list attached to the end of the hash * table (vsock_unbound_sockets). Bound sockets are added to the hash table in * the bucket that their local address hashes to (vsock_bound_sockets(addr) * represents the list that addr hashes to). * * Specifically, we initialize the vsock_bind_table array to a size of * VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through * vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and * vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets. The hash function * mods with VSOCK_HASH_SIZE - 1 to ensure this. */ #define VSOCK_HASH_SIZE 251 #define MAX_PORT_RETRIES 24 #define VSOCK_HASH(addr) ((addr)->svm_port % (VSOCK_HASH_SIZE - 1)) #define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)]) #define vsock_unbound_sockets (&vsock_bind_table[VSOCK_HASH_SIZE]) /* XXX This can probably be implemented in a better way. */ #define VSOCK_CONN_HASH(src, dst) \ (((src)->svm_cid ^ (dst)->svm_port) % (VSOCK_HASH_SIZE - 1)) #define vsock_connected_sockets(src, dst) \ (&vsock_connected_table[VSOCK_CONN_HASH(src, dst)]) #define vsock_connected_sockets_vsk(vsk) \ vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr) static struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1]; static struct list_head vsock_connected_table[VSOCK_HASH_SIZE]; static DEFINE_SPINLOCK(vsock_table_lock); static void vsock_init_tables(void) { int i; for (i = 0; i < ARRAY_SIZE(vsock_bind_table); i++) INIT_LIST_HEAD(&vsock_bind_table[i]); for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) INIT_LIST_HEAD(&vsock_connected_table[i]); } static void __vsock_insert_bound(struct list_head *list, struct vsock_sock *vsk) { sock_hold(&vsk->sk); list_add(&vsk->bound_table, list); } static void __vsock_insert_connected(struct list_head *list, struct vsock_sock *vsk) { sock_hold(&vsk->sk); list_add(&vsk->connected_table, list); } static void __vsock_remove_bound(struct vsock_sock *vsk) { list_del_init(&vsk->bound_table); sock_put(&vsk->sk); } static void __vsock_remove_connected(struct vsock_sock *vsk) { list_del_init(&vsk->connected_table); sock_put(&vsk->sk); } static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr) { struct vsock_sock *vsk; list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) if (addr->svm_port == vsk->local_addr.svm_port) return sk_vsock(vsk); return NULL; } static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src, struct sockaddr_vm *dst) { struct vsock_sock *vsk; list_for_each_entry(vsk, vsock_connected_sockets(src, dst), connected_table) { if (vsock_addr_equals_addr(src, &vsk->remote_addr) && dst->svm_port == vsk->local_addr.svm_port) { return sk_vsock(vsk); } } return NULL; } static bool __vsock_in_bound_table(struct vsock_sock *vsk) { return !list_empty(&vsk->bound_table); } static bool __vsock_in_connected_table(struct vsock_sock *vsk) { return !list_empty(&vsk->connected_table); } static void vsock_insert_unbound(struct vsock_sock *vsk) { spin_lock_bh(&vsock_table_lock); __vsock_insert_bound(vsock_unbound_sockets, vsk); spin_unlock_bh(&vsock_table_lock); } void vsock_insert_connected(struct vsock_sock *vsk) { struct list_head *list = vsock_connected_sockets( &vsk->remote_addr, &vsk->local_addr); spin_lock_bh(&vsock_table_lock); __vsock_insert_connected(list, vsk); spin_unlock_bh(&vsock_table_lock); } EXPORT_SYMBOL_GPL(vsock_insert_connected); void vsock_remove_bound(struct vsock_sock *vsk) { spin_lock_bh(&vsock_table_lock); __vsock_remove_bound(vsk); spin_unlock_bh(&vsock_table_lock); } EXPORT_SYMBOL_GPL(vsock_remove_bound); void vsock_remove_connected(struct vsock_sock *vsk) { spin_lock_bh(&vsock_table_lock); __vsock_remove_connected(vsk); spin_unlock_bh(&vsock_table_lock); } EXPORT_SYMBOL_GPL(vsock_remove_connected); struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr) { struct sock *sk; spin_lock_bh(&vsock_table_lock); sk = __vsock_find_bound_socket(addr); if (sk) sock_hold(sk); spin_unlock_bh(&vsock_table_lock); return sk; } EXPORT_SYMBOL_GPL(vsock_find_bound_socket); struct sock *vsock_find_connected_socket(struct sockaddr_vm *src, struct sockaddr_vm *dst) { struct sock *sk; spin_lock_bh(&vsock_table_lock); sk = __vsock_find_connected_socket(src, dst); if (sk) sock_hold(sk); spin_unlock_bh(&vsock_table_lock); return sk; } EXPORT_SYMBOL_GPL(vsock_find_connected_socket); static bool vsock_in_bound_table(struct vsock_sock *vsk) { bool ret; spin_lock_bh(&vsock_table_lock); ret = __vsock_in_bound_table(vsk); spin_unlock_bh(&vsock_table_lock); return ret; } static bool vsock_in_connected_table(struct vsock_sock *vsk) { bool ret; spin_lock_bh(&vsock_table_lock); ret = __vsock_in_connected_table(vsk); spin_unlock_bh(&vsock_table_lock); return ret; } void vsock_for_each_connected_socket(void (*fn)(struct sock *sk)) { int i; spin_lock_bh(&vsock_table_lock); for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) { struct vsock_sock *vsk; list_for_each_entry(vsk, &vsock_connected_table[i], connected_table); fn(sk_vsock(vsk)); } spin_unlock_bh(&vsock_table_lock); } EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket); void vsock_add_pending(struct sock *listener, struct sock *pending) { struct vsock_sock *vlistener; struct vsock_sock *vpending; vlistener = vsock_sk(listener); vpending = vsock_sk(pending); sock_hold(pending); sock_hold(listener); list_add_tail(&vpending->pending_links, &vlistener->pending_links); } EXPORT_SYMBOL_GPL(vsock_add_pending); void vsock_remove_pending(struct sock *listener, struct sock *pending) { struct vsock_sock *vpending = vsock_sk(pending); list_del_init(&vpending->pending_links); sock_put(listener); sock_put(pending); } EXPORT_SYMBOL_GPL(vsock_remove_pending); void vsock_enqueue_accept(struct sock *listener, struct sock *connected) { struct vsock_sock *vlistener; struct vsock_sock *vconnected; vlistener = vsock_sk(listener); vconnected = vsock_sk(connected); sock_hold(connected); sock_hold(listener); list_add_tail(&vconnected->accept_queue, &vlistener->accept_queue); } EXPORT_SYMBOL_GPL(vsock_enqueue_accept); static struct sock *vsock_dequeue_accept(struct sock *listener) { struct vsock_sock *vlistener; struct vsock_sock *vconnected; vlistener = vsock_sk(listener); if (list_empty(&vlistener->accept_queue)) return NULL; vconnected = list_entry(vlistener->accept_queue.next, struct vsock_sock, accept_queue); list_del_init(&vconnected->accept_queue); sock_put(listener); /* The caller will need a reference on the connected socket so we let * it call sock_put(). */ return sk_vsock(vconnected); } static bool vsock_is_accept_queue_empty(struct sock *sk) { struct vsock_sock *vsk = vsock_sk(sk); return list_empty(&vsk->accept_queue); } static bool vsock_is_pending(struct sock *sk) { struct vsock_sock *vsk = vsock_sk(sk); return !list_empty(&vsk->pending_links); } static int vsock_send_shutdown(struct sock *sk, int mode) { return transport->shutdown(vsock_sk(sk), mode); } void vsock_pending_work(struct work_struct *work) { struct sock *sk; struct sock *listener; struct vsock_sock *vsk; bool cleanup; vsk = container_of(work, struct vsock_sock, dwork.work); sk = sk_vsock(vsk); listener = vsk->listener; cleanup = true; lock_sock(listener); lock_sock(sk); if (vsock_is_pending(sk)) { vsock_remove_pending(listener, sk); } else if (!vsk->rejected) { /* We are not on the pending list and accept() did not reject * us, so we must have been accepted by our user process. We * just need to drop our references to the sockets and be on * our way. */ cleanup = false; goto out; } listener->sk_ack_backlog--; /* We need to remove ourself from the global connected sockets list so * incoming packets can't find this socket, and to reduce the reference * count. */ if (vsock_in_connected_table(vsk)) vsock_remove_connected(vsk); sk->sk_state = SS_FREE; out: release_sock(sk); release_sock(listener); if (cleanup) sock_put(sk); sock_put(sk); sock_put(listener); } EXPORT_SYMBOL_GPL(vsock_pending_work); /**** SOCKET OPERATIONS ****/ static int __vsock_bind_stream(struct vsock_sock *vsk, struct sockaddr_vm *addr) { static u32 port = LAST_RESERVED_PORT + 1; struct sockaddr_vm new_addr; vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port); if (addr->svm_port == VMADDR_PORT_ANY) { bool found = false; unsigned int i; for (i = 0; i < MAX_PORT_RETRIES; i++) { if (port <= LAST_RESERVED_PORT) port = LAST_RESERVED_PORT + 1; new_addr.svm_port = port++; if (!__vsock_find_bound_socket(&new_addr)) { found = true; break; } } if (!found) return -EADDRNOTAVAIL; } else { /* If port is in reserved range, ensure caller * has necessary privileges. */ if (addr->svm_port <= LAST_RESERVED_PORT && !capable(CAP_NET_BIND_SERVICE)) { return -EACCES; } if (__vsock_find_bound_socket(&new_addr)) return -EADDRINUSE; } vsock_addr_init(&vsk->local_addr, new_addr.svm_cid, new_addr.svm_port); /* Remove stream sockets from the unbound list and add them to the hash * table for easy lookup by its address. The unbound list is simply an * extra entry at the end of the hash table, a trick used by AF_UNIX. */ __vsock_remove_bound(vsk); __vsock_insert_bound(vsock_bound_sockets(&vsk->local_addr), vsk); return 0; } static int __vsock_bind_dgram(struct vsock_sock *vsk, struct sockaddr_vm *addr) { return transport->dgram_bind(vsk, addr); } static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr) { struct vsock_sock *vsk = vsock_sk(sk); u32 cid; int retval; /* First ensure this socket isn't already bound. */ if (vsock_addr_bound(&vsk->local_addr)) return -EINVAL; /* Now bind to the provided address or select appropriate values if * none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY). Note that * like AF_INET prevents binding to a non-local IP address (in most * cases), we only allow binding to the local CID. */ cid = transport->get_local_cid(); if (addr->svm_cid != cid && addr->svm_cid != VMADDR_CID_ANY) return -EADDRNOTAVAIL; switch (sk->sk_socket->type) { case SOCK_STREAM: spin_lock_bh(&vsock_table_lock); retval = __vsock_bind_stream(vsk, addr); spin_unlock_bh(&vsock_table_lock); break; case SOCK_DGRAM: retval = __vsock_bind_dgram(vsk, addr); break; default: retval = -EINVAL; break; } return retval; } struct sock *__vsock_create(struct net *net, struct socket *sock, struct sock *parent, gfp_t priority, unsigned short type) { struct sock *sk; struct vsock_sock *psk; struct vsock_sock *vsk; sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto); if (!sk) return NULL; sock_init_data(sock, sk); /* sk->sk_type is normally set in sock_init_data, but only if sock is * non-NULL. We make sure that our sockets always have a type by * setting it here if needed. */ if (!sock) sk->sk_type = type; vsk = vsock_sk(sk); vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); sk->sk_destruct = vsock_sk_destruct; sk->sk_backlog_rcv = vsock_queue_rcv_skb; sk->sk_state = 0; sock_reset_flag(sk, SOCK_DONE); INIT_LIST_HEAD(&vsk->bound_table); INIT_LIST_HEAD(&vsk->connected_table); vsk->listener = NULL; INIT_LIST_HEAD(&vsk->pending_links); INIT_LIST_HEAD(&vsk->accept_queue); vsk->rejected = false; vsk->sent_request = false; vsk->ignore_connecting_rst = false; vsk->peer_shutdown = 0; psk = parent ? vsock_sk(parent) : NULL; if (parent) { vsk->trusted = psk->trusted; vsk->owner = get_cred(psk->owner); vsk->connect_timeout = psk->connect_timeout; } else { vsk->trusted = capable(CAP_NET_ADMIN); vsk->owner = get_current_cred(); vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT; } if (transport->init(vsk, psk) < 0) { sk_free(sk); return NULL; } if (sock) vsock_insert_unbound(vsk); return sk; } EXPORT_SYMBOL_GPL(__vsock_create); static void __vsock_release(struct sock *sk) { if (sk) { struct sk_buff *skb; struct sock *pending; struct vsock_sock *vsk; vsk = vsock_sk(sk); pending = NULL; /* Compiler warning. */ if (vsock_in_bound_table(vsk)) vsock_remove_bound(vsk); if (vsock_in_connected_table(vsk)) vsock_remove_connected(vsk); transport->release(vsk); lock_sock(sk); sock_orphan(sk); sk->sk_shutdown = SHUTDOWN_MASK; while ((skb = skb_dequeue(&sk->sk_receive_queue))) kfree_skb(skb); /* Clean up any sockets that never were accepted. */ while ((pending = vsock_dequeue_accept(sk)) != NULL) { __vsock_release(pending); sock_put(pending); } release_sock(sk); sock_put(sk); } } static void vsock_sk_destruct(struct sock *sk) { struct vsock_sock *vsk = vsock_sk(sk); transport->destruct(vsk); /* When clearing these addresses, there's no need to set the family and * possibly register the address family with the kernel. */ vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); put_cred(vsk->owner); } static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int err; err = sock_queue_rcv_skb(sk, skb); if (err) kfree_skb(skb); return err; } s64 vsock_stream_has_data(struct vsock_sock *vsk) { return transport->stream_has_data(vsk); } EXPORT_SYMBOL_GPL(vsock_stream_has_data); s64 vsock_stream_has_space(struct vsock_sock *vsk) { return transport->stream_has_space(vsk); } EXPORT_SYMBOL_GPL(vsock_stream_has_space); static int vsock_release(struct socket *sock) { __vsock_release(sock->sk); sock->sk = NULL; sock->state = SS_FREE; return 0; } static int vsock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { int err; struct sock *sk; struct sockaddr_vm *vm_addr; sk = sock->sk; if (vsock_addr_cast(addr, addr_len, &vm_addr) != 0) return -EINVAL; lock_sock(sk); err = __vsock_bind(sk, vm_addr); release_sock(sk); return err; } static int vsock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer) { int err; struct sock *sk; struct vsock_sock *vsk; struct sockaddr_vm *vm_addr; sk = sock->sk; vsk = vsock_sk(sk); err = 0; lock_sock(sk); if (peer) { if (sock->state != SS_CONNECTED) { err = -ENOTCONN; goto out; } vm_addr = &vsk->remote_addr; } else { vm_addr = &vsk->local_addr; } if (!vm_addr) { err = -EINVAL; goto out; } /* sys_getsockname() and sys_getpeername() pass us a * MAX_SOCK_ADDR-sized buffer and don't set addr_len. Unfortunately * that macro is defined in socket.c instead of .h, so we hardcode its * value here. */ BUILD_BUG_ON(sizeof(*vm_addr) > 128); memcpy(addr, vm_addr, sizeof(*vm_addr)); *addr_len = sizeof(*vm_addr); out: release_sock(sk); return err; } static int vsock_shutdown(struct socket *sock, int mode) { int err; struct sock *sk; /* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses * RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode * here like the other address families do. Note also that the * increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3), * which is what we want. */ mode++; if ((mode & ~SHUTDOWN_MASK) || !mode) return -EINVAL; /* If this is a STREAM socket and it is not connected then bail out * immediately. If it is a DGRAM socket then we must first kick the * socket so that it wakes up from any sleeping calls, for example * recv(), and then afterwards return the error. */ sk = sock->sk; if (sock->state == SS_UNCONNECTED) { err = -ENOTCONN; if (sk->sk_type == SOCK_STREAM) return err; } else { sock->state = SS_DISCONNECTING; err = 0; } /* Receive and send shutdowns are treated alike. */ mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN); if (mode) { lock_sock(sk); sk->sk_shutdown |= mode; sk->sk_state_change(sk); release_sock(sk); if (sk->sk_type == SOCK_STREAM) { sock_reset_flag(sk, SOCK_DONE); vsock_send_shutdown(sk, mode); } } return err; } static unsigned int vsock_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk; unsigned int mask; struct vsock_sock *vsk; sk = sock->sk; vsk = vsock_sk(sk); poll_wait(file, sk_sleep(sk), wait); mask = 0; if (sk->sk_err) /* Signify that there has been an error on this socket. */ mask |= POLLERR; /* INET sockets treat local write shutdown and peer write shutdown as a * case of POLLHUP set. */ if ((sk->sk_shutdown == SHUTDOWN_MASK) || ((sk->sk_shutdown & SEND_SHUTDOWN) && (vsk->peer_shutdown & SEND_SHUTDOWN))) { mask |= POLLHUP; } if (sk->sk_shutdown & RCV_SHUTDOWN || vsk->peer_shutdown & SEND_SHUTDOWN) { mask |= POLLRDHUP; } if (sock->type == SOCK_DGRAM) { /* For datagram sockets we can read if there is something in * the queue and write as long as the socket isn't shutdown for * sending. */ if (!skb_queue_empty(&sk->sk_receive_queue) || (sk->sk_shutdown & RCV_SHUTDOWN)) { mask |= POLLIN | POLLRDNORM; } if (!(sk->sk_shutdown & SEND_SHUTDOWN)) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; } else if (sock->type == SOCK_STREAM) { lock_sock(sk); /* Listening sockets that have connections in their accept * queue can be read. */ if (sk->sk_state == SS_LISTEN && !vsock_is_accept_queue_empty(sk)) mask |= POLLIN | POLLRDNORM; /* If there is something in the queue then we can read. */ if (transport->stream_is_active(vsk) && !(sk->sk_shutdown & RCV_SHUTDOWN)) { bool data_ready_now = false; int ret = transport->notify_poll_in( vsk, 1, &data_ready_now); if (ret < 0) { mask |= POLLERR; } else { if (data_ready_now) mask |= POLLIN | POLLRDNORM; } } /* Sockets whose connections have been closed, reset, or * terminated should also be considered read, and we check the * shutdown flag for that. */ if (sk->sk_shutdown & RCV_SHUTDOWN || vsk->peer_shutdown & SEND_SHUTDOWN) { mask |= POLLIN | POLLRDNORM; } /* Connected sockets that can produce data can be written. */ if (sk->sk_state == SS_CONNECTED) { if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { bool space_avail_now = false; int ret = transport->notify_poll_out( vsk, 1, &space_avail_now); if (ret < 0) { mask |= POLLERR; } else { if (space_avail_now) /* Remove POLLWRBAND since INET * sockets are not setting it. */ mask |= POLLOUT | POLLWRNORM; } } } /* Simulate INET socket poll behaviors, which sets * POLLOUT|POLLWRNORM when peer is closed and nothing to read, * but local send is not shutdown. */ if (sk->sk_state == SS_UNCONNECTED) { if (!(sk->sk_shutdown & SEND_SHUTDOWN)) mask |= POLLOUT | POLLWRNORM; } release_sock(sk); } return mask; } static int vsock_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { int err; struct sock *sk; struct vsock_sock *vsk; struct sockaddr_vm *remote_addr; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; /* For now, MSG_DONTWAIT is always assumed... */ err = 0; sk = sock->sk; vsk = vsock_sk(sk); lock_sock(sk); if (!vsock_addr_bound(&vsk->local_addr)) { struct sockaddr_vm local_addr; vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); err = __vsock_bind(sk, &local_addr); if (err != 0) goto out; } /* If the provided message contains an address, use that. Otherwise * fall back on the socket's remote handle (if it has been connected). */ if (msg->msg_name && vsock_addr_cast(msg->msg_name, msg->msg_namelen, &remote_addr) == 0) { /* Ensure this address is of the right type and is a valid * destination. */ if (remote_addr->svm_cid == VMADDR_CID_ANY) remote_addr->svm_cid = transport->get_local_cid(); if (!vsock_addr_bound(remote_addr)) { err = -EINVAL; goto out; } } else if (sock->state == SS_CONNECTED) { remote_addr = &vsk->remote_addr; if (remote_addr->svm_cid == VMADDR_CID_ANY) remote_addr->svm_cid = transport->get_local_cid(); /* XXX Should connect() or this function ensure remote_addr is * bound? */ if (!vsock_addr_bound(&vsk->remote_addr)) { err = -EINVAL; goto out; } } else { err = -EINVAL; goto out; } if (!transport->dgram_allow(remote_addr->svm_cid, remote_addr->svm_port)) { err = -EINVAL; goto out; } err = transport->dgram_enqueue(vsk, remote_addr, msg->msg_iov, len); out: release_sock(sk); return err; } static int vsock_dgram_connect(struct socket *sock, struct sockaddr *addr, int addr_len, int flags) { int err; struct sock *sk; struct vsock_sock *vsk; struct sockaddr_vm *remote_addr; sk = sock->sk; vsk = vsock_sk(sk); err = vsock_addr_cast(addr, addr_len, &remote_addr); if (err == -EAFNOSUPPORT && remote_addr->svm_family == AF_UNSPEC) { lock_sock(sk); vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); sock->state = SS_UNCONNECTED; release_sock(sk); return 0; } else if (err != 0) return -EINVAL; lock_sock(sk); if (!vsock_addr_bound(&vsk->local_addr)) { struct sockaddr_vm local_addr; vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); err = __vsock_bind(sk, &local_addr); if (err != 0) goto out; } if (!transport->dgram_allow(remote_addr->svm_cid, remote_addr->svm_port)) { err = -EINVAL; goto out; } memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr)); sock->state = SS_CONNECTED; out: release_sock(sk); return err; } static int vsock_dgram_recvmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { return transport->dgram_dequeue(kiocb, vsock_sk(sock->sk), msg, len, flags); } static const struct proto_ops vsock_dgram_ops = { .family = PF_VSOCK, .owner = THIS_MODULE, .release = vsock_release, .bind = vsock_bind, .connect = vsock_dgram_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = vsock_getname, .poll = vsock_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = vsock_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = vsock_dgram_sendmsg, .recvmsg = vsock_dgram_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static void vsock_connect_timeout(struct work_struct *work) { struct sock *sk; struct vsock_sock *vsk; vsk = container_of(work, struct vsock_sock, dwork.work); sk = sk_vsock(vsk); lock_sock(sk); if (sk->sk_state == SS_CONNECTING && (sk->sk_shutdown != SHUTDOWN_MASK)) { sk->sk_state = SS_UNCONNECTED; sk->sk_err = ETIMEDOUT; sk->sk_error_report(sk); } release_sock(sk); sock_put(sk); } static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr, int addr_len, int flags) { int err; struct sock *sk; struct vsock_sock *vsk; struct sockaddr_vm *remote_addr; long timeout; DEFINE_WAIT(wait); err = 0; sk = sock->sk; vsk = vsock_sk(sk); lock_sock(sk); /* XXX AF_UNSPEC should make us disconnect like AF_INET. */ switch (sock->state) { case SS_CONNECTED: err = -EISCONN; goto out; case SS_DISCONNECTING: err = -EINVAL; goto out; case SS_CONNECTING: /* This continues on so we can move sock into the SS_CONNECTED * state once the connection has completed (at which point err * will be set to zero also). Otherwise, we will either wait * for the connection or return -EALREADY should this be a * non-blocking call. */ err = -EALREADY; break; default: if ((sk->sk_state == SS_LISTEN) || vsock_addr_cast(addr, addr_len, &remote_addr) != 0) { err = -EINVAL; goto out; } /* The hypervisor and well-known contexts do not have socket * endpoints. */ if (!transport->stream_allow(remote_addr->svm_cid, remote_addr->svm_port)) { err = -ENETUNREACH; goto out; } /* Set the remote address that we are connecting to. */ memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr)); /* Autobind this socket to the local address if necessary. */ if (!vsock_addr_bound(&vsk->local_addr)) { struct sockaddr_vm local_addr; vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); err = __vsock_bind(sk, &local_addr); if (err != 0) goto out; } sk->sk_state = SS_CONNECTING; err = transport->connect(vsk); if (err < 0) goto out; /* Mark sock as connecting and set the error code to in * progress in case this is a non-blocking connect. */ sock->state = SS_CONNECTING; err = -EINPROGRESS; } /* The receive path will handle all communication until we are able to * enter the connected state. Here we wait for the connection to be * completed or a notification of an error. */ timeout = vsk->connect_timeout; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); while (sk->sk_state != SS_CONNECTED && sk->sk_err == 0) { if (flags & O_NONBLOCK) { /* If we're not going to block, we schedule a timeout * function to generate a timeout on the connection * attempt, in case the peer doesn't respond in a * timely manner. We hold on to the socket until the * timeout fires. */ sock_hold(sk); INIT_DELAYED_WORK(&vsk->dwork, vsock_connect_timeout); schedule_delayed_work(&vsk->dwork, timeout); /* Skip ahead to preserve error code set above. */ goto out_wait; } release_sock(sk); timeout = schedule_timeout(timeout); lock_sock(sk); if (signal_pending(current)) { err = sock_intr_errno(timeout); goto out_wait_error; } else if (timeout == 0) { err = -ETIMEDOUT; goto out_wait_error; } prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } if (sk->sk_err) { err = -sk->sk_err; goto out_wait_error; } else err = 0; out_wait: finish_wait(sk_sleep(sk), &wait); out: release_sock(sk); return err; out_wait_error: sk->sk_state = SS_UNCONNECTED; sock->state = SS_UNCONNECTED; goto out_wait; } static int vsock_accept(struct socket *sock, struct socket *newsock, int flags) { struct sock *listener; int err; struct sock *connected; struct vsock_sock *vconnected; long timeout; DEFINE_WAIT(wait); err = 0; listener = sock->sk; lock_sock(listener); if (sock->type != SOCK_STREAM) { err = -EOPNOTSUPP; goto out; } if (listener->sk_state != SS_LISTEN) { err = -EINVAL; goto out; } /* Wait for children sockets to appear; these are the new sockets * created upon connection establishment. */ timeout = sock_sndtimeo(listener, flags & O_NONBLOCK); prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); while ((connected = vsock_dequeue_accept(listener)) == NULL && listener->sk_err == 0) { release_sock(listener); timeout = schedule_timeout(timeout); lock_sock(listener); if (signal_pending(current)) { err = sock_intr_errno(timeout); goto out_wait; } else if (timeout == 0) { err = -EAGAIN; goto out_wait; } prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); } if (listener->sk_err) err = -listener->sk_err; if (connected) { listener->sk_ack_backlog--; lock_sock(connected); vconnected = vsock_sk(connected); /* If the listener socket has received an error, then we should * reject this socket and return. Note that we simply mark the * socket rejected, drop our reference, and let the cleanup * function handle the cleanup; the fact that we found it in * the listener's accept queue guarantees that the cleanup * function hasn't run yet. */ if (err) { vconnected->rejected = true; release_sock(connected); sock_put(connected); goto out_wait; } newsock->state = SS_CONNECTED; sock_graft(connected, newsock); release_sock(connected); sock_put(connected); } out_wait: finish_wait(sk_sleep(listener), &wait); out: release_sock(listener); return err; } static int vsock_listen(struct socket *sock, int backlog) { int err; struct sock *sk; struct vsock_sock *vsk; sk = sock->sk; lock_sock(sk); if (sock->type != SOCK_STREAM) { err = -EOPNOTSUPP; goto out; } if (sock->state != SS_UNCONNECTED) { err = -EINVAL; goto out; } vsk = vsock_sk(sk); if (!vsock_addr_bound(&vsk->local_addr)) { err = -EINVAL; goto out; } sk->sk_max_ack_backlog = backlog; sk->sk_state = SS_LISTEN; err = 0; out: release_sock(sk); return err; } static int vsock_stream_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { int err; struct sock *sk; struct vsock_sock *vsk; u64 val; if (level != AF_VSOCK) return -ENOPROTOOPT; #define COPY_IN(_v) \ do { \ if (optlen < sizeof(_v)) { \ err = -EINVAL; \ goto exit; \ } \ if (copy_from_user(&_v, optval, sizeof(_v)) != 0) { \ err = -EFAULT; \ goto exit; \ } \ } while (0) err = 0; sk = sock->sk; vsk = vsock_sk(sk); lock_sock(sk); switch (optname) { case SO_VM_SOCKETS_BUFFER_SIZE: COPY_IN(val); transport->set_buffer_size(vsk, val); break; case SO_VM_SOCKETS_BUFFER_MAX_SIZE: COPY_IN(val); transport->set_max_buffer_size(vsk, val); break; case SO_VM_SOCKETS_BUFFER_MIN_SIZE: COPY_IN(val); transport->set_min_buffer_size(vsk, val); break; case SO_VM_SOCKETS_CONNECT_TIMEOUT: { struct timeval tv; COPY_IN(tv); if (tv.tv_sec >= 0 && tv.tv_usec < USEC_PER_SEC && tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) { vsk->connect_timeout = tv.tv_sec * HZ + DIV_ROUND_UP(tv.tv_usec, (1000000 / HZ)); if (vsk->connect_timeout == 0) vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT; } else { err = -ERANGE; } break; } default: err = -ENOPROTOOPT; break; } #undef COPY_IN exit: release_sock(sk); return err; } static int vsock_stream_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { int err; int len; struct sock *sk; struct vsock_sock *vsk; u64 val; if (level != AF_VSOCK) return -ENOPROTOOPT; err = get_user(len, optlen); if (err != 0) return err; #define COPY_OUT(_v) \ do { \ if (len < sizeof(_v)) \ return -EINVAL; \ \ len = sizeof(_v); \ if (copy_to_user(optval, &_v, len) != 0) \ return -EFAULT; \ \ } while (0) err = 0; sk = sock->sk; vsk = vsock_sk(sk); switch (optname) { case SO_VM_SOCKETS_BUFFER_SIZE: val = transport->get_buffer_size(vsk); COPY_OUT(val); break; case SO_VM_SOCKETS_BUFFER_MAX_SIZE: val = transport->get_max_buffer_size(vsk); COPY_OUT(val); break; case SO_VM_SOCKETS_BUFFER_MIN_SIZE: val = transport->get_min_buffer_size(vsk); COPY_OUT(val); break; case SO_VM_SOCKETS_CONNECT_TIMEOUT: { struct timeval tv; tv.tv_sec = vsk->connect_timeout / HZ; tv.tv_usec = (vsk->connect_timeout - tv.tv_sec * HZ) * (1000000 / HZ); COPY_OUT(tv); break; } default: return -ENOPROTOOPT; } err = put_user(len, optlen); if (err != 0) return -EFAULT; #undef COPY_OUT return 0; } static int vsock_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk; struct vsock_sock *vsk; ssize_t total_written; long timeout; int err; struct vsock_transport_send_notify_data send_data; DEFINE_WAIT(wait); sk = sock->sk; vsk = vsock_sk(sk); total_written = 0; err = 0; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; lock_sock(sk); /* Callers should not provide a destination with stream sockets. */ if (msg->msg_namelen) { err = sk->sk_state == SS_CONNECTED ? -EISCONN : -EOPNOTSUPP; goto out; } /* Send data only if both sides are not shutdown in the direction. */ if (sk->sk_shutdown & SEND_SHUTDOWN || vsk->peer_shutdown & RCV_SHUTDOWN) { err = -EPIPE; goto out; } if (sk->sk_state != SS_CONNECTED || !vsock_addr_bound(&vsk->local_addr)) { err = -ENOTCONN; goto out; } if (!vsock_addr_bound(&vsk->remote_addr)) { err = -EDESTADDRREQ; goto out; } /* Wait for room in the produce queue to enqueue our user's data. */ timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); err = transport->notify_send_init(vsk, &send_data); if (err < 0) goto out; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); while (total_written < len) { ssize_t written; while (vsock_stream_has_space(vsk) == 0 && sk->sk_err == 0 && !(sk->sk_shutdown & SEND_SHUTDOWN) && !(vsk->peer_shutdown & RCV_SHUTDOWN)) { /* Don't wait for non-blocking sockets. */ if (timeout == 0) { err = -EAGAIN; goto out_wait; } err = transport->notify_send_pre_block(vsk, &send_data); if (err < 0) goto out_wait; release_sock(sk); timeout = schedule_timeout(timeout); lock_sock(sk); if (signal_pending(current)) { err = sock_intr_errno(timeout); goto out_wait; } else if (timeout == 0) { err = -EAGAIN; goto out_wait; } prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } /* These checks occur both as part of and after the loop * conditional since we need to check before and after * sleeping. */ if (sk->sk_err) { err = -sk->sk_err; goto out_wait; } else if ((sk->sk_shutdown & SEND_SHUTDOWN) || (vsk->peer_shutdown & RCV_SHUTDOWN)) { err = -EPIPE; goto out_wait; } err = transport->notify_send_pre_enqueue(vsk, &send_data); if (err < 0) goto out_wait; /* Note that enqueue will only write as many bytes as are free * in the produce queue, so we don't need to ensure len is * smaller than the queue size. It is the caller's * responsibility to check how many bytes we were able to send. */ written = transport->stream_enqueue( vsk, msg->msg_iov, len - total_written); if (written < 0) { err = -ENOMEM; goto out_wait; } total_written += written; err = transport->notify_send_post_enqueue( vsk, written, &send_data); if (err < 0) goto out_wait; } out_wait: if (total_written > 0) err = total_written; finish_wait(sk_sleep(sk), &wait); out: release_sock(sk); return err; } static int vsock_stream_recvmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk; struct vsock_sock *vsk; int err; size_t target; ssize_t copied; long timeout; struct vsock_transport_recv_notify_data recv_data; DEFINE_WAIT(wait); sk = sock->sk; vsk = vsock_sk(sk); err = 0; lock_sock(sk); if (sk->sk_state != SS_CONNECTED) { /* Recvmsg is supposed to return 0 if a peer performs an * orderly shutdown. Differentiate between that case and when a * peer has not connected or a local shutdown occured with the * SOCK_DONE flag. */ if (sock_flag(sk, SOCK_DONE)) err = 0; else err = -ENOTCONN; goto out; } if (flags & MSG_OOB) { err = -EOPNOTSUPP; goto out; } /* We don't check peer_shutdown flag here since peer may actually shut * down, but there can be data in the queue that a local socket can * receive. */ if (sk->sk_shutdown & RCV_SHUTDOWN) { err = 0; goto out; } /* It is valid on Linux to pass in a zero-length receive buffer. This * is not an error. We may as well bail out now. */ if (!len) { err = 0; goto out; } /* We must not copy less than target bytes into the user's buffer * before returning successfully, so we wait for the consume queue to * have that much data to consume before dequeueing. Note that this * makes it impossible to handle cases where target is greater than the * queue size. */ target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); if (target >= transport->stream_rcvhiwat(vsk)) { err = -ENOMEM; goto out; } timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); copied = 0; err = transport->notify_recv_init(vsk, target, &recv_data); if (err < 0) goto out; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); while (1) { s64 ready = vsock_stream_has_data(vsk); if (ready < 0) { /* Invalid queue pair content. XXX This should be * changed to a connection reset in a later change. */ err = -ENOMEM; goto out_wait; } else if (ready > 0) { ssize_t read; err = transport->notify_recv_pre_dequeue( vsk, target, &recv_data); if (err < 0) break; read = transport->stream_dequeue( vsk, msg->msg_iov, len - copied, flags); if (read < 0) { err = -ENOMEM; break; } copied += read; err = transport->notify_recv_post_dequeue( vsk, target, read, !(flags & MSG_PEEK), &recv_data); if (err < 0) goto out_wait; if (read >= target || flags & MSG_PEEK) break; target -= read; } else { if (sk->sk_err != 0 || (sk->sk_shutdown & RCV_SHUTDOWN) || (vsk->peer_shutdown & SEND_SHUTDOWN)) { break; } /* Don't wait for non-blocking sockets. */ if (timeout == 0) { err = -EAGAIN; break; } err = transport->notify_recv_pre_block( vsk, target, &recv_data); if (err < 0) break; release_sock(sk); timeout = schedule_timeout(timeout); lock_sock(sk); if (signal_pending(current)) { err = sock_intr_errno(timeout); break; } else if (timeout == 0) { err = -EAGAIN; break; } prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } } if (sk->sk_err) err = -sk->sk_err; else if (sk->sk_shutdown & RCV_SHUTDOWN) err = 0; if (copied > 0) { /* We only do these additional bookkeeping/notification steps * if we actually copied something out of the queue pair * instead of just peeking ahead. */ if (!(flags & MSG_PEEK)) { /* If the other side has shutdown for sending and there * is nothing more to read, then modify the socket * state. */ if (vsk->peer_shutdown & SEND_SHUTDOWN) { if (vsock_stream_has_data(vsk) <= 0) { sk->sk_state = SS_UNCONNECTED; sock_set_flag(sk, SOCK_DONE); sk->sk_state_change(sk); } } } err = copied; } out_wait: finish_wait(sk_sleep(sk), &wait); out: release_sock(sk); return err; } static const struct proto_ops vsock_stream_ops = { .family = PF_VSOCK, .owner = THIS_MODULE, .release = vsock_release, .bind = vsock_bind, .connect = vsock_stream_connect, .socketpair = sock_no_socketpair, .accept = vsock_accept, .getname = vsock_getname, .poll = vsock_poll, .ioctl = sock_no_ioctl, .listen = vsock_listen, .shutdown = vsock_shutdown, .setsockopt = vsock_stream_setsockopt, .getsockopt = vsock_stream_getsockopt, .sendmsg = vsock_stream_sendmsg, .recvmsg = vsock_stream_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static int vsock_create(struct net *net, struct socket *sock, int protocol, int kern) { if (!sock) return -EINVAL; if (protocol && protocol != PF_VSOCK) return -EPROTONOSUPPORT; switch (sock->type) { case SOCK_DGRAM: sock->ops = &vsock_dgram_ops; break; case SOCK_STREAM: sock->ops = &vsock_stream_ops; break; default: return -ESOCKTNOSUPPORT; } sock->state = SS_UNCONNECTED; return __vsock_create(net, sock, NULL, GFP_KERNEL, 0) ? 0 : -ENOMEM; } static const struct net_proto_family vsock_family_ops = { .family = AF_VSOCK, .create = vsock_create, .owner = THIS_MODULE, }; static long vsock_dev_do_ioctl(struct file *filp, unsigned int cmd, void __user *ptr) { u32 __user *p = ptr; int retval = 0; switch (cmd) { case IOCTL_VM_SOCKETS_GET_LOCAL_CID: if (put_user(transport->get_local_cid(), p) != 0) retval = -EFAULT; break; default: pr_err("Unknown ioctl %d\n", cmd); retval = -EINVAL; } return retval; } static long vsock_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { return vsock_dev_do_ioctl(filp, cmd, (void __user *)arg); } #ifdef CONFIG_COMPAT static long vsock_dev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { return vsock_dev_do_ioctl(filp, cmd, compat_ptr(arg)); } #endif static const struct file_operations vsock_device_ops = { .owner = THIS_MODULE, .unlocked_ioctl = vsock_dev_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = vsock_dev_compat_ioctl, #endif .open = nonseekable_open, }; static struct miscdevice vsock_device = { .name = "vsock", .fops = &vsock_device_ops, }; static int __vsock_core_init(void) { int err; vsock_init_tables(); vsock_device.minor = MISC_DYNAMIC_MINOR; err = misc_register(&vsock_device); if (err) { pr_err("Failed to register misc device\n"); return -ENOENT; } err = proto_register(&vsock_proto, 1); /* we want our slab */ if (err) { pr_err("Cannot register vsock protocol\n"); goto err_misc_deregister; } err = sock_register(&vsock_family_ops); if (err) { pr_err("could not register af_vsock (%d) address family: %d\n", AF_VSOCK, err); goto err_unregister_proto; } return 0; err_unregister_proto: proto_unregister(&vsock_proto); err_misc_deregister: misc_deregister(&vsock_device); return err; } int vsock_core_init(const struct vsock_transport *t) { int retval = mutex_lock_interruptible(&vsock_register_mutex); if (retval) return retval; if (transport) { retval = -EBUSY; goto out; } transport = t; retval = __vsock_core_init(); if (retval) transport = NULL; out: mutex_unlock(&vsock_register_mutex); return retval; } EXPORT_SYMBOL_GPL(vsock_core_init); void vsock_core_exit(void) { mutex_lock(&vsock_register_mutex); misc_deregister(&vsock_device); sock_unregister(AF_VSOCK); proto_unregister(&vsock_proto); /* We do not want the assignment below re-ordered. */ mb(); transport = NULL; mutex_unlock(&vsock_register_mutex); } EXPORT_SYMBOL_GPL(vsock_core_exit); MODULE_AUTHOR("VMware, Inc."); MODULE_DESCRIPTION("VMware Virtual Socket Family"); MODULE_VERSION("1.0.0.0-k"); MODULE_LICENSE("GPL v2");
gpl-2.0
Androguide/android_kernel_samsung_smdk4412
arch/arm/mach-s3c2412/mach-jive.c
2292
17169
/* linux/arch/arm/mach-s3c2410/mach-jive.c * * Copyright 2007 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * http://armlinux.simtec.co.uk/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/gpio.h> #include <linux/syscore_ops.h> #include <linux/serial_core.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <video/ili9320.h> #include <linux/spi/spi.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <plat/regs-serial.h> #include <plat/nand.h> #include <plat/iic.h> #include <mach/regs-power.h> #include <mach/regs-gpio.h> #include <mach/regs-mem.h> #include <mach/regs-lcd.h> #include <mach/spi-gpio.h> #include <mach/fb.h> #include <asm/mach-types.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <plat/gpio-cfg.h> #include <plat/clock.h> #include <plat/devs.h> #include <plat/cpu.h> #include <plat/pm.h> #include <plat/udc.h> static struct map_desc jive_iodesc[] __initdata = { }; #define UCON S3C2410_UCON_DEFAULT #define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE #define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE static struct s3c2410_uartcfg jive_uartcfgs[] = { [0] = { .hwport = 0, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, [1] = { .hwport = 1, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, [2] = { .hwport = 2, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, } }; /* Jive flash assignment * * 0x00000000-0x00028000 : uboot * 0x00028000-0x0002c000 : uboot env * 0x0002c000-0x00030000 : spare * 0x00030000-0x00200000 : zimage A * 0x00200000-0x01600000 : cramfs A * 0x01600000-0x017d0000 : zimage B * 0x017d0000-0x02bd0000 : cramfs B * 0x02bd0000-0x03fd0000 : yaffs */ static struct mtd_partition __initdata jive_imageA_nand_part[] = { #ifdef CONFIG_MACH_JIVE_SHOW_BOOTLOADER /* Don't allow access to the bootloader from linux */ { .name = "uboot", .offset = 0, .size = (160 * SZ_1K), .mask_flags = MTD_WRITEABLE, /* force read-only */ }, /* spare */ { .name = "spare", .offset = (176 * SZ_1K), .size = (16 * SZ_1K), }, #endif /* booted images */ { .name = "kernel (ro)", .offset = (192 * SZ_1K), .size = (SZ_2M) - (192 * SZ_1K), .mask_flags = MTD_WRITEABLE, /* force read-only */ }, { .name = "root (ro)", .offset = (SZ_2M), .size = (20 * SZ_1M), .mask_flags = MTD_WRITEABLE, /* force read-only */ }, /* yaffs */ { .name = "yaffs", .offset = (44 * SZ_1M), .size = (20 * SZ_1M), }, /* bootloader environment */ { .name = "env", .offset = (160 * SZ_1K), .size = (16 * SZ_1K), }, /* upgrade images */ { .name = "zimage", .offset = (22 * SZ_1M), .size = (2 * SZ_1M) - (192 * SZ_1K), }, { .name = "cramfs", .offset = (24 * SZ_1M) - (192*SZ_1K), .size = (20 * SZ_1M), }, }; static struct mtd_partition __initdata jive_imageB_nand_part[] = { #ifdef CONFIG_MACH_JIVE_SHOW_BOOTLOADER /* Don't allow access to the bootloader from linux */ { .name = "uboot", .offset = 0, .size = (160 * SZ_1K), .mask_flags = MTD_WRITEABLE, /* force read-only */ }, /* spare */ { .name = "spare", .offset = (176 * SZ_1K), .size = (16 * SZ_1K), }, #endif /* booted images */ { .name = "kernel (ro)", .offset = (22 * SZ_1M), .size = (2 * SZ_1M) - (192 * SZ_1K), .mask_flags = MTD_WRITEABLE, /* force read-only */ }, { .name = "root (ro)", .offset = (24 * SZ_1M) - (192 * SZ_1K), .size = (20 * SZ_1M), .mask_flags = MTD_WRITEABLE, /* force read-only */ }, /* yaffs */ { .name = "yaffs", .offset = (44 * SZ_1M), .size = (20 * SZ_1M), }, /* bootloader environment */ { .name = "env", .offset = (160 * SZ_1K), .size = (16 * SZ_1K), }, /* upgrade images */ { .name = "zimage", .offset = (192 * SZ_1K), .size = (2 * SZ_1M) - (192 * SZ_1K), }, { .name = "cramfs", .offset = (2 * SZ_1M), .size = (20 * SZ_1M), }, }; static struct s3c2410_nand_set __initdata jive_nand_sets[] = { [0] = { .name = "flash", .nr_chips = 1, .nr_partitions = ARRAY_SIZE(jive_imageA_nand_part), .partitions = jive_imageA_nand_part, }, }; static struct s3c2410_platform_nand __initdata jive_nand_info = { /* set taken from osiris nand timings, possibly still conservative */ .tacls = 30, .twrph0 = 55, .twrph1 = 40, .sets = jive_nand_sets, .nr_sets = ARRAY_SIZE(jive_nand_sets), }; static int __init jive_mtdset(char *options) { struct s3c2410_nand_set *nand = &jive_nand_sets[0]; unsigned long set; if (options == NULL || options[0] == '\0') return 0; if (strict_strtoul(options, 10, &set)) { printk(KERN_ERR "failed to parse mtdset=%s\n", options); return 0; } switch (set) { case 1: nand->nr_partitions = ARRAY_SIZE(jive_imageB_nand_part); nand->partitions = jive_imageB_nand_part; case 0: /* this is already setup in the nand info */ break; default: printk(KERN_ERR "Unknown mtd set %ld specified," "using default.", set); } return 0; } /* parse the mtdset= option given to the kernel command line */ __setup("mtdset=", jive_mtdset); /* LCD timing and setup */ #define LCD_XRES (240) #define LCD_YRES (320) #define LCD_LEFT_MARGIN (12) #define LCD_RIGHT_MARGIN (12) #define LCD_LOWER_MARGIN (12) #define LCD_UPPER_MARGIN (12) #define LCD_VSYNC (2) #define LCD_HSYNC (2) #define LCD_REFRESH (60) #define LCD_HTOT (LCD_HSYNC + LCD_LEFT_MARGIN + LCD_XRES + LCD_RIGHT_MARGIN) #define LCD_VTOT (LCD_VSYNC + LCD_LOWER_MARGIN + LCD_YRES + LCD_UPPER_MARGIN) static struct s3c2410fb_display jive_vgg2432a4_display[] = { [0] = { .width = LCD_XRES, .height = LCD_YRES, .xres = LCD_XRES, .yres = LCD_YRES, .left_margin = LCD_LEFT_MARGIN, .right_margin = LCD_RIGHT_MARGIN, .upper_margin = LCD_UPPER_MARGIN, .lower_margin = LCD_LOWER_MARGIN, .hsync_len = LCD_HSYNC, .vsync_len = LCD_VSYNC, .pixclock = (1000000000000LL / (LCD_REFRESH * LCD_HTOT * LCD_VTOT)), .bpp = 16, .type = (S3C2410_LCDCON1_TFT16BPP | S3C2410_LCDCON1_TFT), .lcdcon5 = (S3C2410_LCDCON5_FRM565 | S3C2410_LCDCON5_INVVLINE | S3C2410_LCDCON5_INVVFRAME | S3C2410_LCDCON5_INVVDEN | S3C2410_LCDCON5_PWREN), }, }; /* todo - put into gpio header */ #define S3C2410_GPCCON_MASK(x) (3 << ((x) * 2)) #define S3C2410_GPDCON_MASK(x) (3 << ((x) * 2)) static struct s3c2410fb_mach_info jive_lcd_config = { .displays = jive_vgg2432a4_display, .num_displays = ARRAY_SIZE(jive_vgg2432a4_display), .default_display = 0, /* Enable VD[2..7], VD[10..15], VD[18..23] and VCLK, syncs, VDEN * and disable the pull down resistors on pins we are using for LCD * data. */ .gpcup = (0xf << 1) | (0x3f << 10), .gpccon = (S3C2410_GPC1_VCLK | S3C2410_GPC2_VLINE | S3C2410_GPC3_VFRAME | S3C2410_GPC4_VM | S3C2410_GPC10_VD2 | S3C2410_GPC11_VD3 | S3C2410_GPC12_VD4 | S3C2410_GPC13_VD5 | S3C2410_GPC14_VD6 | S3C2410_GPC15_VD7), .gpccon_mask = (S3C2410_GPCCON_MASK(1) | S3C2410_GPCCON_MASK(2) | S3C2410_GPCCON_MASK(3) | S3C2410_GPCCON_MASK(4) | S3C2410_GPCCON_MASK(10) | S3C2410_GPCCON_MASK(11) | S3C2410_GPCCON_MASK(12) | S3C2410_GPCCON_MASK(13) | S3C2410_GPCCON_MASK(14) | S3C2410_GPCCON_MASK(15)), .gpdup = (0x3f << 2) | (0x3f << 10), .gpdcon = (S3C2410_GPD2_VD10 | S3C2410_GPD3_VD11 | S3C2410_GPD4_VD12 | S3C2410_GPD5_VD13 | S3C2410_GPD6_VD14 | S3C2410_GPD7_VD15 | S3C2410_GPD10_VD18 | S3C2410_GPD11_VD19 | S3C2410_GPD12_VD20 | S3C2410_GPD13_VD21 | S3C2410_GPD14_VD22 | S3C2410_GPD15_VD23), .gpdcon_mask = (S3C2410_GPDCON_MASK(2) | S3C2410_GPDCON_MASK(3) | S3C2410_GPDCON_MASK(4) | S3C2410_GPDCON_MASK(5) | S3C2410_GPDCON_MASK(6) | S3C2410_GPDCON_MASK(7) | S3C2410_GPDCON_MASK(10) | S3C2410_GPDCON_MASK(11)| S3C2410_GPDCON_MASK(12) | S3C2410_GPDCON_MASK(13)| S3C2410_GPDCON_MASK(14) | S3C2410_GPDCON_MASK(15)), }; /* ILI9320 support. */ static void jive_lcm_reset(unsigned int set) { printk(KERN_DEBUG "%s(%d)\n", __func__, set); gpio_set_value(S3C2410_GPG(13), set); } #undef LCD_UPPER_MARGIN #define LCD_UPPER_MARGIN 2 static struct ili9320_platdata jive_lcm_config = { .hsize = LCD_XRES, .vsize = LCD_YRES, .reset = jive_lcm_reset, .suspend = ILI9320_SUSPEND_DEEP, .entry_mode = ILI9320_ENTRYMODE_ID(3) | ILI9320_ENTRYMODE_BGR, .display2 = (ILI9320_DISPLAY2_FP(LCD_UPPER_MARGIN) | ILI9320_DISPLAY2_BP(LCD_LOWER_MARGIN)), .display3 = 0x0, .display4 = 0x0, .rgb_if1 = (ILI9320_RGBIF1_RIM_RGB18 | ILI9320_RGBIF1_RM | ILI9320_RGBIF1_CLK_RGBIF), .rgb_if2 = ILI9320_RGBIF2_DPL, .interface2 = 0x0, .interface3 = 0x3, .interface4 = (ILI9320_INTERFACE4_RTNE(16) | ILI9320_INTERFACE4_DIVE(1)), .interface5 = 0x0, .interface6 = 0x0, }; /* LCD SPI support */ static void jive_lcd_spi_chipselect(struct s3c2410_spigpio_info *spi, int cs) { gpio_set_value(S3C2410_GPB(7), cs ? 0 : 1); } static struct s3c2410_spigpio_info jive_lcd_spi = { .bus_num = 1, .pin_clk = S3C2410_GPG(8), .pin_mosi = S3C2410_GPB(8), .num_chipselect = 1, .chip_select = jive_lcd_spi_chipselect, }; static struct platform_device jive_device_lcdspi = { .name = "spi_s3c24xx_gpio", .id = 1, .num_resources = 0, .dev.platform_data = &jive_lcd_spi, }; /* WM8750 audio code SPI definition */ static void jive_wm8750_chipselect(struct s3c2410_spigpio_info *spi, int cs) { gpio_set_value(S3C2410_GPH(10), cs ? 0 : 1); } static struct s3c2410_spigpio_info jive_wm8750_spi = { .bus_num = 2, .pin_clk = S3C2410_GPB(4), .pin_mosi = S3C2410_GPB(9), .num_chipselect = 1, .chip_select = jive_wm8750_chipselect, }; static struct platform_device jive_device_wm8750 = { .name = "spi_s3c24xx_gpio", .id = 2, .num_resources = 0, .dev.platform_data = &jive_wm8750_spi, }; /* JIVE SPI devices. */ static struct spi_board_info __initdata jive_spi_devs[] = { [0] = { .modalias = "VGG2432A4", .bus_num = 1, .chip_select = 0, .mode = SPI_MODE_3, /* CPOL=1, CPHA=1 */ .max_speed_hz = 100000, .platform_data = &jive_lcm_config, }, { .modalias = "WM8750", .bus_num = 2, .chip_select = 0, .mode = SPI_MODE_0, /* CPOL=0, CPHA=0 */ .max_speed_hz = 100000, }, }; /* I2C bus and device configuration. */ static struct s3c2410_platform_i2c jive_i2c_cfg __initdata = { .frequency = 80 * 1000, .flags = S3C_IICFLG_FILTER, .sda_delay = 2, }; static struct i2c_board_info jive_i2c_devs[] __initdata = { [0] = { I2C_BOARD_INFO("lis302dl", 0x1c), .irq = IRQ_EINT14, }, }; /* The platform devices being used. */ static struct platform_device *jive_devices[] __initdata = { &s3c_device_ohci, &s3c_device_rtc, &s3c_device_wdt, &s3c_device_i2c0, &s3c_device_lcd, &jive_device_lcdspi, &jive_device_wm8750, &s3c_device_nand, &s3c_device_usbgadget, }; static struct s3c2410_udc_mach_info jive_udc_cfg __initdata = { .vbus_pin = S3C2410_GPG(1), /* detect is on GPG1 */ }; /* Jive power management device */ #ifdef CONFIG_PM static int jive_pm_suspend(void) { /* Write the magic value u-boot uses to check for resume into * the INFORM0 register, and ensure INFORM1 is set to the * correct address to resume from. */ __raw_writel(0x2BED, S3C2412_INFORM0); __raw_writel(virt_to_phys(s3c_cpu_resume), S3C2412_INFORM1); return 0; } static void jive_pm_resume(void) { __raw_writel(0x0, S3C2412_INFORM0); } #else #define jive_pm_suspend NULL #define jive_pm_resume NULL #endif static struct syscore_ops jive_pm_syscore_ops = { .suspend = jive_pm_suspend, .resume = jive_pm_resume, }; static void __init jive_map_io(void) { s3c24xx_init_io(jive_iodesc, ARRAY_SIZE(jive_iodesc)); s3c24xx_init_clocks(12000000); s3c24xx_init_uarts(jive_uartcfgs, ARRAY_SIZE(jive_uartcfgs)); } static void jive_power_off(void) { printk(KERN_INFO "powering system down...\n"); s3c2410_gpio_setpin(S3C2410_GPC(5), 1); s3c_gpio_cfgpin(S3C2410_GPC(5), S3C2410_GPIO_OUTPUT); } static void __init jive_machine_init(void) { /* register system core operations for managing low level suspend */ register_syscore_ops(&jive_pm_syscore_ops); /* write our sleep configurations for the IO. Pull down all unused * IO, ensure that we have turned off all peripherals we do not * need, and configure the ones we do need. */ /* Port B sleep */ __raw_writel(S3C2412_SLPCON_IN(0) | S3C2412_SLPCON_PULL(1) | S3C2412_SLPCON_HIGH(2) | S3C2412_SLPCON_PULL(3) | S3C2412_SLPCON_PULL(4) | S3C2412_SLPCON_PULL(5) | S3C2412_SLPCON_PULL(6) | S3C2412_SLPCON_HIGH(7) | S3C2412_SLPCON_PULL(8) | S3C2412_SLPCON_PULL(9) | S3C2412_SLPCON_PULL(10), S3C2412_GPBSLPCON); /* Port C sleep */ __raw_writel(S3C2412_SLPCON_PULL(0) | S3C2412_SLPCON_PULL(1) | S3C2412_SLPCON_PULL(2) | S3C2412_SLPCON_PULL(3) | S3C2412_SLPCON_PULL(4) | S3C2412_SLPCON_PULL(5) | S3C2412_SLPCON_LOW(6) | S3C2412_SLPCON_PULL(6) | S3C2412_SLPCON_PULL(7) | S3C2412_SLPCON_PULL(8) | S3C2412_SLPCON_PULL(9) | S3C2412_SLPCON_PULL(10) | S3C2412_SLPCON_PULL(11) | S3C2412_SLPCON_PULL(12) | S3C2412_SLPCON_PULL(13) | S3C2412_SLPCON_PULL(14) | S3C2412_SLPCON_PULL(15), S3C2412_GPCSLPCON); /* Port D sleep */ __raw_writel(S3C2412_SLPCON_ALL_PULL, S3C2412_GPDSLPCON); /* Port F sleep */ __raw_writel(S3C2412_SLPCON_LOW(0) | S3C2412_SLPCON_LOW(1) | S3C2412_SLPCON_LOW(2) | S3C2412_SLPCON_EINT(3) | S3C2412_SLPCON_EINT(4) | S3C2412_SLPCON_EINT(5) | S3C2412_SLPCON_EINT(6) | S3C2412_SLPCON_EINT(7), S3C2412_GPFSLPCON); /* Port G sleep */ __raw_writel(S3C2412_SLPCON_IN(0) | S3C2412_SLPCON_IN(1) | S3C2412_SLPCON_IN(2) | S3C2412_SLPCON_IN(3) | S3C2412_SLPCON_IN(4) | S3C2412_SLPCON_IN(5) | S3C2412_SLPCON_IN(6) | S3C2412_SLPCON_IN(7) | S3C2412_SLPCON_PULL(8) | S3C2412_SLPCON_PULL(9) | S3C2412_SLPCON_IN(10) | S3C2412_SLPCON_PULL(11) | S3C2412_SLPCON_PULL(12) | S3C2412_SLPCON_PULL(13) | S3C2412_SLPCON_IN(14) | S3C2412_SLPCON_PULL(15), S3C2412_GPGSLPCON); /* Port H sleep */ __raw_writel(S3C2412_SLPCON_PULL(0) | S3C2412_SLPCON_PULL(1) | S3C2412_SLPCON_PULL(2) | S3C2412_SLPCON_PULL(3) | S3C2412_SLPCON_PULL(4) | S3C2412_SLPCON_PULL(5) | S3C2412_SLPCON_PULL(6) | S3C2412_SLPCON_IN(7) | S3C2412_SLPCON_IN(8) | S3C2412_SLPCON_PULL(9) | S3C2412_SLPCON_IN(10), S3C2412_GPHSLPCON); /* initialise the power management now we've setup everything. */ s3c_pm_init(); /** TODO - check that this is after the cmdline option! */ s3c_nand_set_platdata(&jive_nand_info); /* initialise the spi */ gpio_request(S3C2410_GPG(13), "lcm reset"); gpio_direction_output(S3C2410_GPG(13), 0); gpio_request(S3C2410_GPB(7), "jive spi"); gpio_direction_output(S3C2410_GPB(7), 1); s3c2410_gpio_setpin(S3C2410_GPB(6), 0); s3c_gpio_cfgpin(S3C2410_GPB(6), S3C2410_GPIO_OUTPUT); s3c2410_gpio_setpin(S3C2410_GPG(8), 1); s3c_gpio_cfgpin(S3C2410_GPG(8), S3C2410_GPIO_OUTPUT); /* initialise the WM8750 spi */ gpio_request(S3C2410_GPH(10), "jive wm8750 spi"); gpio_direction_output(S3C2410_GPH(10), 1); /* Turn off suspend on both USB ports, and switch the * selectable USB port to USB device mode. */ s3c2410_modify_misccr(S3C2410_MISCCR_USBHOST | S3C2410_MISCCR_USBSUSPND0 | S3C2410_MISCCR_USBSUSPND1, 0x0); s3c24xx_udc_set_platdata(&jive_udc_cfg); s3c24xx_fb_set_platdata(&jive_lcd_config); spi_register_board_info(jive_spi_devs, ARRAY_SIZE(jive_spi_devs)); s3c_i2c0_set_platdata(&jive_i2c_cfg); i2c_register_board_info(0, jive_i2c_devs, ARRAY_SIZE(jive_i2c_devs)); pm_power_off = jive_power_off; platform_add_devices(jive_devices, ARRAY_SIZE(jive_devices)); } MACHINE_START(JIVE, "JIVE") /* Maintainer: Ben Dooks <ben-linux@fluff.org> */ .boot_params = S3C2410_SDRAM_PA + 0x100, .init_irq = s3c24xx_init_irq, .map_io = jive_map_io, .init_machine = jive_machine_init, .timer = &s3c24xx_timer, MACHINE_END
gpl-2.0
cargabsj175/android_kernel_alcatel_4012a
arch/arm/mach-shmobile/clock-sh73a0.c
2292
13415
/* * sh73a0 clock framework support * * Copyright (C) 2010 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/sh_clk.h> #include <linux/clkdev.h> #include <mach/common.h> #define FRQCRA 0xe6150000 #define FRQCRB 0xe6150004 #define FRQCRD 0xe61500e4 #define VCLKCR1 0xe6150008 #define VCLKCR2 0xe615000C #define VCLKCR3 0xe615001C #define ZBCKCR 0xe6150010 #define FLCKCR 0xe6150014 #define SD0CKCR 0xe6150074 #define SD1CKCR 0xe6150078 #define SD2CKCR 0xe615007C #define FSIACKCR 0xe6150018 #define FSIBCKCR 0xe6150090 #define SUBCKCR 0xe6150080 #define SPUACKCR 0xe6150084 #define SPUVCKCR 0xe6150094 #define MSUCKCR 0xe6150088 #define HSICKCR 0xe615008C #define MFCK1CR 0xe6150098 #define MFCK2CR 0xe615009C #define DSITCKCR 0xe6150060 #define DSI0PCKCR 0xe6150064 #define DSI1PCKCR 0xe6150068 #define DSI0PHYCR 0xe615006C #define DSI1PHYCR 0xe6150070 #define PLLECR 0xe61500d0 #define PLL0CR 0xe61500d8 #define PLL1CR 0xe6150028 #define PLL2CR 0xe615002c #define PLL3CR 0xe61500dc #define SMSTPCR0 0xe6150130 #define SMSTPCR1 0xe6150134 #define SMSTPCR2 0xe6150138 #define SMSTPCR3 0xe615013c #define SMSTPCR4 0xe6150140 #define SMSTPCR5 0xe6150144 #define CKSCR 0xe61500c0 /* Fixed 32 KHz root clock from EXTALR pin */ static struct clk r_clk = { .rate = 32768, }; /* * 26MHz default rate for the EXTAL1 root input clock. * If needed, reset this with clk_set_rate() from the platform code. */ struct clk sh73a0_extal1_clk = { .rate = 26000000, }; /* * 48MHz default rate for the EXTAL2 root input clock. * If needed, reset this with clk_set_rate() from the platform code. */ struct clk sh73a0_extal2_clk = { .rate = 48000000, }; /* A fixed divide-by-2 block */ static unsigned long div2_recalc(struct clk *clk) { return clk->parent->rate / 2; } static struct clk_ops div2_clk_ops = { .recalc = div2_recalc, }; /* Divide extal1 by two */ static struct clk extal1_div2_clk = { .ops = &div2_clk_ops, .parent = &sh73a0_extal1_clk, }; /* Divide extal2 by two */ static struct clk extal2_div2_clk = { .ops = &div2_clk_ops, .parent = &sh73a0_extal2_clk, }; static struct clk_ops main_clk_ops = { .recalc = followparent_recalc, }; /* Main clock */ static struct clk main_clk = { .ops = &main_clk_ops, }; /* PLL0, PLL1, PLL2, PLL3 */ static unsigned long pll_recalc(struct clk *clk) { unsigned long mult = 1; if (__raw_readl(PLLECR) & (1 << clk->enable_bit)) { mult = (((__raw_readl(clk->enable_reg) >> 24) & 0x3f) + 1); /* handle CFG bit for PLL1 and PLL2 */ switch (clk->enable_bit) { case 1: case 2: if (__raw_readl(clk->enable_reg) & (1 << 20)) mult *= 2; } } return clk->parent->rate * mult; } static struct clk_ops pll_clk_ops = { .recalc = pll_recalc, }; static struct clk pll0_clk = { .ops = &pll_clk_ops, .flags = CLK_ENABLE_ON_INIT, .parent = &main_clk, .enable_reg = (void __iomem *)PLL0CR, .enable_bit = 0, }; static struct clk pll1_clk = { .ops = &pll_clk_ops, .flags = CLK_ENABLE_ON_INIT, .parent = &main_clk, .enable_reg = (void __iomem *)PLL1CR, .enable_bit = 1, }; static struct clk pll2_clk = { .ops = &pll_clk_ops, .flags = CLK_ENABLE_ON_INIT, .parent = &main_clk, .enable_reg = (void __iomem *)PLL2CR, .enable_bit = 2, }; static struct clk pll3_clk = { .ops = &pll_clk_ops, .flags = CLK_ENABLE_ON_INIT, .parent = &main_clk, .enable_reg = (void __iomem *)PLL3CR, .enable_bit = 3, }; /* Divide PLL1 by two */ static struct clk pll1_div2_clk = { .ops = &div2_clk_ops, .parent = &pll1_clk, }; static struct clk *main_clks[] = { &r_clk, &sh73a0_extal1_clk, &sh73a0_extal2_clk, &extal1_div2_clk, &extal2_div2_clk, &main_clk, &pll0_clk, &pll1_clk, &pll2_clk, &pll3_clk, &pll1_div2_clk, }; static void div4_kick(struct clk *clk) { unsigned long value; /* set KICK bit in FRQCRB to update hardware setting */ value = __raw_readl(FRQCRB); value |= (1 << 31); __raw_writel(value, FRQCRB); } static int divisors[] = { 2, 3, 4, 6, 8, 12, 16, 18, 24, 0, 36, 48, 7 }; static struct clk_div_mult_table div4_div_mult_table = { .divisors = divisors, .nr_divisors = ARRAY_SIZE(divisors), }; static struct clk_div4_table div4_table = { .div_mult_table = &div4_div_mult_table, .kick = div4_kick, }; enum { DIV4_I, DIV4_ZG, DIV4_M3, DIV4_B, DIV4_M1, DIV4_M2, DIV4_Z, DIV4_ZTR, DIV4_ZT, DIV4_ZX, DIV4_HP, DIV4_NR }; #define DIV4(_reg, _bit, _mask, _flags) \ SH_CLK_DIV4(&pll1_clk, _reg, _bit, _mask, _flags) static struct clk div4_clks[DIV4_NR] = { [DIV4_I] = DIV4(FRQCRA, 20, 0xfff, CLK_ENABLE_ON_INIT), [DIV4_ZG] = DIV4(FRQCRA, 16, 0xbff, CLK_ENABLE_ON_INIT), [DIV4_M3] = DIV4(FRQCRA, 12, 0xfff, CLK_ENABLE_ON_INIT), [DIV4_B] = DIV4(FRQCRA, 8, 0xfff, CLK_ENABLE_ON_INIT), [DIV4_M1] = DIV4(FRQCRA, 4, 0xfff, 0), [DIV4_M2] = DIV4(FRQCRA, 0, 0xfff, 0), [DIV4_Z] = DIV4(FRQCRB, 24, 0xbff, 0), [DIV4_ZTR] = DIV4(FRQCRB, 20, 0xfff, 0), [DIV4_ZT] = DIV4(FRQCRB, 16, 0xfff, 0), [DIV4_ZX] = DIV4(FRQCRB, 12, 0xfff, 0), [DIV4_HP] = DIV4(FRQCRB, 4, 0xfff, 0), }; enum { DIV6_VCK1, DIV6_VCK2, DIV6_VCK3, DIV6_ZB1, DIV6_FLCTL, DIV6_SDHI0, DIV6_SDHI1, DIV6_SDHI2, DIV6_FSIA, DIV6_FSIB, DIV6_SUB, DIV6_SPUA, DIV6_SPUV, DIV6_MSU, DIV6_HSI, DIV6_MFG1, DIV6_MFG2, DIV6_DSIT, DIV6_DSI0P, DIV6_DSI1P, DIV6_NR }; static struct clk div6_clks[DIV6_NR] = { [DIV6_VCK1] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR1, 0), [DIV6_VCK2] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR2, 0), [DIV6_VCK3] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR3, 0), [DIV6_ZB1] = SH_CLK_DIV6(&pll1_div2_clk, ZBCKCR, 0), [DIV6_FLCTL] = SH_CLK_DIV6(&pll1_div2_clk, FLCKCR, 0), [DIV6_SDHI0] = SH_CLK_DIV6(&pll1_div2_clk, SD0CKCR, 0), [DIV6_SDHI1] = SH_CLK_DIV6(&pll1_div2_clk, SD1CKCR, 0), [DIV6_SDHI2] = SH_CLK_DIV6(&pll1_div2_clk, SD2CKCR, 0), [DIV6_FSIA] = SH_CLK_DIV6(&pll1_div2_clk, FSIACKCR, 0), [DIV6_FSIB] = SH_CLK_DIV6(&pll1_div2_clk, FSIBCKCR, 0), [DIV6_SUB] = SH_CLK_DIV6(&sh73a0_extal2_clk, SUBCKCR, 0), [DIV6_SPUA] = SH_CLK_DIV6(&pll1_div2_clk, SPUACKCR, 0), [DIV6_SPUV] = SH_CLK_DIV6(&pll1_div2_clk, SPUVCKCR, 0), [DIV6_MSU] = SH_CLK_DIV6(&pll1_div2_clk, MSUCKCR, 0), [DIV6_HSI] = SH_CLK_DIV6(&pll1_div2_clk, HSICKCR, 0), [DIV6_MFG1] = SH_CLK_DIV6(&pll1_div2_clk, MFCK1CR, 0), [DIV6_MFG2] = SH_CLK_DIV6(&pll1_div2_clk, MFCK2CR, 0), [DIV6_DSIT] = SH_CLK_DIV6(&pll1_div2_clk, DSITCKCR, 0), [DIV6_DSI0P] = SH_CLK_DIV6(&pll1_div2_clk, DSI0PCKCR, 0), [DIV6_DSI1P] = SH_CLK_DIV6(&pll1_div2_clk, DSI1PCKCR, 0), }; enum { MSTP001, MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100, MSTP219, MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, MSTP331, MSTP329, MSTP325, MSTP323, MSTP318, MSTP314, MSTP313, MSTP312, MSTP311, MSTP411, MSTP410, MSTP403, MSTP_NR }; #define MSTP(_parent, _reg, _bit, _flags) \ SH_CLK_MSTP32(_parent, _reg, _bit, _flags) static struct clk mstp_clks[MSTP_NR] = { [MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */ [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* CEU1 */ [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* CSI2-RX1 */ [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU0 */ [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2-RX0 */ [MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */ [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX0 */ [MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */ [MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */ [MSTP219] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 19, 0), /* SCIFA7 */ [MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */ [MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */ [MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */ [MSTP203] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 3, 0), /* SCIFA1 */ [MSTP202] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 2, 0), /* SCIFA2 */ [MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */ [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */ [MSTP331] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 31, 0), /* SCIFA6 */ [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */ [MSTP325] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 25, 0), /* IrDA */ [MSTP323] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 23, 0), /* IIC1 */ [MSTP318] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 18, 0), /* SY-DMAC */ [MSTP314] = MSTP(&div6_clks[DIV6_SDHI0], SMSTPCR3, 14, 0), /* SDHI0 */ [MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */ [MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */ [MSTP311] = MSTP(&div6_clks[DIV6_SDHI2], SMSTPCR3, 11, 0), /* SDHI2 */ [MSTP411] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */ [MSTP410] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 10, 0), /* IIC4 */ [MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */ }; #define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk } #define CLKDEV_DEV_ID(_id, _clk) { .dev_id = _id, .clk = _clk } #define CLKDEV_ICK_ID(_cid, _did, _clk) { .con_id = _cid, .dev_id = _did, .clk = _clk } static struct clk_lookup lookups[] = { /* main clocks */ CLKDEV_CON_ID("r_clk", &r_clk), /* DIV6 clocks */ CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]), CLKDEV_CON_ID("vck2_clk", &div6_clks[DIV6_VCK2]), CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]), CLKDEV_CON_ID("sdhi0_clk", &div6_clks[DIV6_SDHI0]), CLKDEV_CON_ID("sdhi1_clk", &div6_clks[DIV6_SDHI1]), CLKDEV_CON_ID("sdhi2_clk", &div6_clks[DIV6_SDHI2]), CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]), CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]), CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]), CLKDEV_ICK_ID("dsi1p_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSI1P]), /* MSTP32 clocks */ CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */ CLKDEV_DEV_ID("sh_mobile_ceu.1", &mstp_clks[MSTP129]), /* CEU1 */ CLKDEV_DEV_ID("sh-mobile-csi2.1", &mstp_clks[MSTP128]), /* CSI2-RX1 */ CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU0 */ CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2-RX0 */ CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */ CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */ CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */ CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */ CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */ CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */ CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */ CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */ CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */ CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP203]), /* SCIFA1 */ CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP202]), /* SCIFA2 */ CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP201]), /* SCIFA3 */ CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), /* SCIFA4 */ CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP331]), /* SCIFA6 */ CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */ CLKDEV_DEV_ID("sh_irda.0", &mstp_clks[MSTP325]), /* IrDA */ CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* I2C1 */ CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP318]), /* SY-DMAC */ CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */ CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */ CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */ CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP311]), /* SDHI2 */ CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* I2C3 */ CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* I2C4 */ CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */ }; void __init sh73a0_clock_init(void) { int k, ret = 0; /* Set SDHI clocks to a known state */ __raw_writel(0x108, SD0CKCR); __raw_writel(0x108, SD1CKCR); __raw_writel(0x108, SD2CKCR); /* detect main clock parent */ switch ((__raw_readl(CKSCR) >> 24) & 0x03) { case 0: main_clk.parent = &sh73a0_extal1_clk; break; case 1: main_clk.parent = &extal1_div2_clk; break; case 2: main_clk.parent = &sh73a0_extal2_clk; break; case 3: main_clk.parent = &extal2_div2_clk; break; } for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) ret = clk_register(main_clks[k]); if (!ret) ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); if (!ret) ret = sh_clk_div6_register(div6_clks, DIV6_NR); if (!ret) ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR); clkdev_add_table(lookups, ARRAY_SIZE(lookups)); if (!ret) clk_init(); else panic("failed to setup sh73a0 clocks\n"); }
gpl-2.0
TeamRegular/android_kernel_tegra
drivers/net/wireless/ath/ath9k/htc_hst.c
2292
12104
/* * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "htc.h" static int htc_issue_send(struct htc_target *target, struct sk_buff* skb, u16 len, u8 flags, u8 epid) { struct htc_frame_hdr *hdr; struct htc_endpoint *endpoint = &target->endpoint[epid]; int status; hdr = (struct htc_frame_hdr *) skb_push(skb, sizeof(struct htc_frame_hdr)); hdr->endpoint_id = epid; hdr->flags = flags; hdr->payload_len = cpu_to_be16(len); status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb); return status; } static struct htc_endpoint *get_next_avail_ep(struct htc_endpoint *endpoint) { enum htc_endpoint_id avail_epid; for (avail_epid = (ENDPOINT_MAX - 1); avail_epid > ENDPOINT0; avail_epid--) if (endpoint[avail_epid].service_id == 0) return &endpoint[avail_epid]; return NULL; } static u8 service_to_ulpipe(u16 service_id) { switch (service_id) { case WMI_CONTROL_SVC: return 4; case WMI_BEACON_SVC: case WMI_CAB_SVC: case WMI_UAPSD_SVC: case WMI_MGMT_SVC: case WMI_DATA_VO_SVC: case WMI_DATA_VI_SVC: case WMI_DATA_BE_SVC: case WMI_DATA_BK_SVC: return 1; default: return 0; } } static u8 service_to_dlpipe(u16 service_id) { switch (service_id) { case WMI_CONTROL_SVC: return 3; case WMI_BEACON_SVC: case WMI_CAB_SVC: case WMI_UAPSD_SVC: case WMI_MGMT_SVC: case WMI_DATA_VO_SVC: case WMI_DATA_VI_SVC: case WMI_DATA_BE_SVC: case WMI_DATA_BK_SVC: return 2; default: return 0; } } static void htc_process_target_rdy(struct htc_target *target, void *buf) { struct htc_endpoint *endpoint; struct htc_ready_msg *htc_ready_msg = (struct htc_ready_msg *) buf; target->credit_size = be16_to_cpu(htc_ready_msg->credit_size); endpoint = &target->endpoint[ENDPOINT0]; endpoint->service_id = HTC_CTRL_RSVD_SVC; endpoint->max_msglen = HTC_MAX_CONTROL_MESSAGE_LENGTH; atomic_inc(&target->tgt_ready); complete(&target->target_wait); } static void htc_process_conn_rsp(struct htc_target *target, struct htc_frame_hdr *htc_hdr) { struct htc_conn_svc_rspmsg *svc_rspmsg; struct htc_endpoint *endpoint, *tmp_endpoint = NULL; u16 service_id; u16 max_msglen; enum htc_endpoint_id epid, tepid; svc_rspmsg = (struct htc_conn_svc_rspmsg *) ((void *) htc_hdr + sizeof(struct htc_frame_hdr)); if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) { epid = svc_rspmsg->endpoint_id; service_id = be16_to_cpu(svc_rspmsg->service_id); max_msglen = be16_to_cpu(svc_rspmsg->max_msg_len); endpoint = &target->endpoint[epid]; for (tepid = (ENDPOINT_MAX - 1); tepid > ENDPOINT0; tepid--) { tmp_endpoint = &target->endpoint[tepid]; if (tmp_endpoint->service_id == service_id) { tmp_endpoint->service_id = 0; break; } } if (tepid == ENDPOINT0) return; endpoint->service_id = service_id; endpoint->max_txqdepth = tmp_endpoint->max_txqdepth; endpoint->ep_callbacks = tmp_endpoint->ep_callbacks; endpoint->ul_pipeid = tmp_endpoint->ul_pipeid; endpoint->dl_pipeid = tmp_endpoint->dl_pipeid; endpoint->max_msglen = max_msglen; target->conn_rsp_epid = epid; complete(&target->cmd_wait); } else { target->conn_rsp_epid = ENDPOINT_UNUSED; } } static int htc_config_pipe_credits(struct htc_target *target) { struct sk_buff *skb; struct htc_config_pipe_msg *cp_msg; int ret, time_left; skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC); if (!skb) { dev_err(target->dev, "failed to allocate send buffer\n"); return -ENOMEM; } skb_reserve(skb, sizeof(struct htc_frame_hdr)); cp_msg = (struct htc_config_pipe_msg *) skb_put(skb, sizeof(struct htc_config_pipe_msg)); cp_msg->message_id = cpu_to_be16(HTC_MSG_CONFIG_PIPE_ID); cp_msg->pipe_id = USB_WLAN_TX_PIPE; cp_msg->credits = target->credits; target->htc_flags |= HTC_OP_CONFIG_PIPE_CREDITS; ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0); if (ret) goto err; time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); if (!time_left) { dev_err(target->dev, "HTC credit config timeout\n"); return -ETIMEDOUT; } return 0; err: kfree_skb(skb); return -EINVAL; } static int htc_setup_complete(struct htc_target *target) { struct sk_buff *skb; struct htc_comp_msg *comp_msg; int ret = 0, time_left; skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC); if (!skb) { dev_err(target->dev, "failed to allocate send buffer\n"); return -ENOMEM; } skb_reserve(skb, sizeof(struct htc_frame_hdr)); comp_msg = (struct htc_comp_msg *) skb_put(skb, sizeof(struct htc_comp_msg)); comp_msg->msg_id = cpu_to_be16(HTC_MSG_SETUP_COMPLETE_ID); target->htc_flags |= HTC_OP_START_WAIT; ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0); if (ret) goto err; time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); if (!time_left) { dev_err(target->dev, "HTC start timeout\n"); return -ETIMEDOUT; } return 0; err: kfree_skb(skb); return -EINVAL; } /* HTC APIs */ int htc_init(struct htc_target *target) { int ret; ret = htc_config_pipe_credits(target); if (ret) return ret; return htc_setup_complete(target); } int htc_connect_service(struct htc_target *target, struct htc_service_connreq *service_connreq, enum htc_endpoint_id *conn_rsp_epid) { struct sk_buff *skb; struct htc_endpoint *endpoint; struct htc_conn_svc_msg *conn_msg; int ret, time_left; /* Find an available endpoint */ endpoint = get_next_avail_ep(target->endpoint); if (!endpoint) { dev_err(target->dev, "Endpoint is not available for" "service %d\n", service_connreq->service_id); return -EINVAL; } endpoint->service_id = service_connreq->service_id; endpoint->max_txqdepth = service_connreq->max_send_qdepth; endpoint->ul_pipeid = service_to_ulpipe(service_connreq->service_id); endpoint->dl_pipeid = service_to_dlpipe(service_connreq->service_id); endpoint->ep_callbacks = service_connreq->ep_callbacks; skb = alloc_skb(sizeof(struct htc_conn_svc_msg) + sizeof(struct htc_frame_hdr), GFP_ATOMIC); if (!skb) { dev_err(target->dev, "Failed to allocate buf to send" "service connect req\n"); return -ENOMEM; } skb_reserve(skb, sizeof(struct htc_frame_hdr)); conn_msg = (struct htc_conn_svc_msg *) skb_put(skb, sizeof(struct htc_conn_svc_msg)); conn_msg->service_id = cpu_to_be16(service_connreq->service_id); conn_msg->msg_id = cpu_to_be16(HTC_MSG_CONNECT_SERVICE_ID); conn_msg->con_flags = cpu_to_be16(service_connreq->con_flags); conn_msg->dl_pipeid = endpoint->dl_pipeid; conn_msg->ul_pipeid = endpoint->ul_pipeid; ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0); if (ret) goto err; time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); if (!time_left) { dev_err(target->dev, "Service connection timeout for: %d\n", service_connreq->service_id); return -ETIMEDOUT; } *conn_rsp_epid = target->conn_rsp_epid; return 0; err: kfree_skb(skb); return ret; } int htc_send(struct htc_target *target, struct sk_buff *skb) { struct ath9k_htc_tx_ctl *tx_ctl; tx_ctl = HTC_SKB_CB(skb); return htc_issue_send(target, skb, skb->len, 0, tx_ctl->epid); } int htc_send_epid(struct htc_target *target, struct sk_buff *skb, enum htc_endpoint_id epid) { return htc_issue_send(target, skb, skb->len, 0, epid); } void htc_stop(struct htc_target *target) { target->hif->stop(target->hif_dev); } void htc_start(struct htc_target *target) { target->hif->start(target->hif_dev); } void htc_sta_drain(struct htc_target *target, u8 idx) { target->hif->sta_drain(target->hif_dev, idx); } void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle, struct sk_buff *skb, bool txok) { struct htc_endpoint *endpoint; struct htc_frame_hdr *htc_hdr = NULL; if (htc_handle->htc_flags & HTC_OP_CONFIG_PIPE_CREDITS) { complete(&htc_handle->cmd_wait); htc_handle->htc_flags &= ~HTC_OP_CONFIG_PIPE_CREDITS; goto ret; } if (htc_handle->htc_flags & HTC_OP_START_WAIT) { complete(&htc_handle->cmd_wait); htc_handle->htc_flags &= ~HTC_OP_START_WAIT; goto ret; } if (skb) { htc_hdr = (struct htc_frame_hdr *) skb->data; endpoint = &htc_handle->endpoint[htc_hdr->endpoint_id]; skb_pull(skb, sizeof(struct htc_frame_hdr)); if (endpoint->ep_callbacks.tx) { endpoint->ep_callbacks.tx(endpoint->ep_callbacks.priv, skb, htc_hdr->endpoint_id, txok); } } return; ret: /* HTC-generated packets are freed here. */ if (htc_hdr && htc_hdr->endpoint_id != ENDPOINT0) dev_kfree_skb_any(skb); else kfree_skb(skb); } /* * HTC Messages are handled directly here and the obtained SKB * is freed. * * Service messages (Data, WMI) passed to the corresponding * endpoint RX handlers, which have to free the SKB. */ void ath9k_htc_rx_msg(struct htc_target *htc_handle, struct sk_buff *skb, u32 len, u8 pipe_id) { struct htc_frame_hdr *htc_hdr; enum htc_endpoint_id epid; struct htc_endpoint *endpoint; __be16 *msg_id; if (!htc_handle || !skb) return; htc_hdr = (struct htc_frame_hdr *) skb->data; epid = htc_hdr->endpoint_id; if (epid >= ENDPOINT_MAX) { if (pipe_id != USB_REG_IN_PIPE) dev_kfree_skb_any(skb); else kfree_skb(skb); return; } if (epid == ENDPOINT0) { /* Handle trailer */ if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER) { if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000) /* Move past the Watchdog pattern */ htc_hdr = (struct htc_frame_hdr *)(skb->data + 4); } /* Get the message ID */ msg_id = (__be16 *) ((void *) htc_hdr + sizeof(struct htc_frame_hdr)); /* Now process HTC messages */ switch (be16_to_cpu(*msg_id)) { case HTC_MSG_READY_ID: htc_process_target_rdy(htc_handle, htc_hdr); break; case HTC_MSG_CONNECT_SERVICE_RESPONSE_ID: htc_process_conn_rsp(htc_handle, htc_hdr); break; default: break; } kfree_skb(skb); } else { if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER) skb_trim(skb, len - htc_hdr->control[0]); skb_pull(skb, sizeof(struct htc_frame_hdr)); endpoint = &htc_handle->endpoint[epid]; if (endpoint->ep_callbacks.rx) endpoint->ep_callbacks.rx(endpoint->ep_callbacks.priv, skb, epid); } } struct htc_target *ath9k_htc_hw_alloc(void *hif_handle, struct ath9k_htc_hif *hif, struct device *dev) { struct htc_endpoint *endpoint; struct htc_target *target; target = kzalloc(sizeof(struct htc_target), GFP_KERNEL); if (!target) { printk(KERN_ERR "Unable to allocate memory for" "target device\n"); return NULL; } init_completion(&target->target_wait); init_completion(&target->cmd_wait); target->hif = hif; target->hif_dev = hif_handle; target->dev = dev; /* Assign control endpoint pipe IDs */ endpoint = &target->endpoint[ENDPOINT0]; endpoint->ul_pipeid = hif->control_ul_pipe; endpoint->dl_pipeid = hif->control_dl_pipe; atomic_set(&target->tgt_ready, 0); return target; } void ath9k_htc_hw_free(struct htc_target *htc) { kfree(htc); } int ath9k_htc_hw_init(struct htc_target *target, struct device *dev, u16 devid, char *product, u32 drv_info) { if (ath9k_htc_probe_device(target, dev, devid, product, drv_info)) { printk(KERN_ERR "Failed to initialize the device\n"); return -ENODEV; } return 0; } void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug) { if (target) ath9k_htc_disconnect_device(target, hot_unplug); }
gpl-2.0
Marvell-Semi/PXA168_kernel
fs/fscache/stats.c
3572
10245
/* FS-Cache statistics * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define FSCACHE_DEBUG_LEVEL THREAD #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include "internal.h" /* * operation counters */ atomic_t fscache_n_op_pend; atomic_t fscache_n_op_run; atomic_t fscache_n_op_enqueue; atomic_t fscache_n_op_requeue; atomic_t fscache_n_op_deferred_release; atomic_t fscache_n_op_release; atomic_t fscache_n_op_gc; atomic_t fscache_n_op_cancelled; atomic_t fscache_n_op_rejected; atomic_t fscache_n_attr_changed; atomic_t fscache_n_attr_changed_ok; atomic_t fscache_n_attr_changed_nobufs; atomic_t fscache_n_attr_changed_nomem; atomic_t fscache_n_attr_changed_calls; atomic_t fscache_n_allocs; atomic_t fscache_n_allocs_ok; atomic_t fscache_n_allocs_wait; atomic_t fscache_n_allocs_nobufs; atomic_t fscache_n_allocs_intr; atomic_t fscache_n_allocs_object_dead; atomic_t fscache_n_alloc_ops; atomic_t fscache_n_alloc_op_waits; atomic_t fscache_n_retrievals; atomic_t fscache_n_retrievals_ok; atomic_t fscache_n_retrievals_wait; atomic_t fscache_n_retrievals_nodata; atomic_t fscache_n_retrievals_nobufs; atomic_t fscache_n_retrievals_intr; atomic_t fscache_n_retrievals_nomem; atomic_t fscache_n_retrievals_object_dead; atomic_t fscache_n_retrieval_ops; atomic_t fscache_n_retrieval_op_waits; atomic_t fscache_n_stores; atomic_t fscache_n_stores_ok; atomic_t fscache_n_stores_again; atomic_t fscache_n_stores_nobufs; atomic_t fscache_n_stores_oom; atomic_t fscache_n_store_ops; atomic_t fscache_n_store_calls; atomic_t fscache_n_store_pages; atomic_t fscache_n_store_radix_deletes; atomic_t fscache_n_store_pages_over_limit; atomic_t fscache_n_store_vmscan_not_storing; atomic_t fscache_n_store_vmscan_gone; atomic_t fscache_n_store_vmscan_busy; atomic_t fscache_n_store_vmscan_cancelled; atomic_t fscache_n_store_vmscan_wait; atomic_t fscache_n_marks; atomic_t fscache_n_uncaches; atomic_t fscache_n_acquires; atomic_t fscache_n_acquires_null; atomic_t fscache_n_acquires_no_cache; atomic_t fscache_n_acquires_ok; atomic_t fscache_n_acquires_nobufs; atomic_t fscache_n_acquires_oom; atomic_t fscache_n_invalidates; atomic_t fscache_n_invalidates_run; atomic_t fscache_n_updates; atomic_t fscache_n_updates_null; atomic_t fscache_n_updates_run; atomic_t fscache_n_relinquishes; atomic_t fscache_n_relinquishes_null; atomic_t fscache_n_relinquishes_waitcrt; atomic_t fscache_n_relinquishes_retire; atomic_t fscache_n_cookie_index; atomic_t fscache_n_cookie_data; atomic_t fscache_n_cookie_special; atomic_t fscache_n_object_alloc; atomic_t fscache_n_object_no_alloc; atomic_t fscache_n_object_lookups; atomic_t fscache_n_object_lookups_negative; atomic_t fscache_n_object_lookups_positive; atomic_t fscache_n_object_lookups_timed_out; atomic_t fscache_n_object_created; atomic_t fscache_n_object_avail; atomic_t fscache_n_object_dead; atomic_t fscache_n_checkaux_none; atomic_t fscache_n_checkaux_okay; atomic_t fscache_n_checkaux_update; atomic_t fscache_n_checkaux_obsolete; atomic_t fscache_n_cop_alloc_object; atomic_t fscache_n_cop_lookup_object; atomic_t fscache_n_cop_lookup_complete; atomic_t fscache_n_cop_grab_object; atomic_t fscache_n_cop_invalidate_object; atomic_t fscache_n_cop_update_object; atomic_t fscache_n_cop_drop_object; atomic_t fscache_n_cop_put_object; atomic_t fscache_n_cop_sync_cache; atomic_t fscache_n_cop_attr_changed; atomic_t fscache_n_cop_read_or_alloc_page; atomic_t fscache_n_cop_read_or_alloc_pages; atomic_t fscache_n_cop_allocate_page; atomic_t fscache_n_cop_allocate_pages; atomic_t fscache_n_cop_write_page; atomic_t fscache_n_cop_uncache_page; atomic_t fscache_n_cop_dissociate_pages; /* * display the general statistics */ static int fscache_stats_show(struct seq_file *m, void *v) { seq_puts(m, "FS-Cache statistics\n"); seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n", atomic_read(&fscache_n_cookie_index), atomic_read(&fscache_n_cookie_data), atomic_read(&fscache_n_cookie_special)); seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n", atomic_read(&fscache_n_object_alloc), atomic_read(&fscache_n_object_no_alloc), atomic_read(&fscache_n_object_avail), atomic_read(&fscache_n_object_dead)); seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n", atomic_read(&fscache_n_checkaux_none), atomic_read(&fscache_n_checkaux_okay), atomic_read(&fscache_n_checkaux_update), atomic_read(&fscache_n_checkaux_obsolete)); seq_printf(m, "Pages : mrk=%u unc=%u\n", atomic_read(&fscache_n_marks), atomic_read(&fscache_n_uncaches)); seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u" " oom=%u\n", atomic_read(&fscache_n_acquires), atomic_read(&fscache_n_acquires_null), atomic_read(&fscache_n_acquires_no_cache), atomic_read(&fscache_n_acquires_ok), atomic_read(&fscache_n_acquires_nobufs), atomic_read(&fscache_n_acquires_oom)); seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n", atomic_read(&fscache_n_object_lookups), atomic_read(&fscache_n_object_lookups_negative), atomic_read(&fscache_n_object_lookups_positive), atomic_read(&fscache_n_object_created), atomic_read(&fscache_n_object_lookups_timed_out)); seq_printf(m, "Invals : n=%u run=%u\n", atomic_read(&fscache_n_invalidates), atomic_read(&fscache_n_invalidates_run)); seq_printf(m, "Updates: n=%u nul=%u run=%u\n", atomic_read(&fscache_n_updates), atomic_read(&fscache_n_updates_null), atomic_read(&fscache_n_updates_run)); seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n", atomic_read(&fscache_n_relinquishes), atomic_read(&fscache_n_relinquishes_null), atomic_read(&fscache_n_relinquishes_waitcrt), atomic_read(&fscache_n_relinquishes_retire)); seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n", atomic_read(&fscache_n_attr_changed), atomic_read(&fscache_n_attr_changed_ok), atomic_read(&fscache_n_attr_changed_nobufs), atomic_read(&fscache_n_attr_changed_nomem), atomic_read(&fscache_n_attr_changed_calls)); seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n", atomic_read(&fscache_n_allocs), atomic_read(&fscache_n_allocs_ok), atomic_read(&fscache_n_allocs_wait), atomic_read(&fscache_n_allocs_nobufs), atomic_read(&fscache_n_allocs_intr)); seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n", atomic_read(&fscache_n_alloc_ops), atomic_read(&fscache_n_alloc_op_waits), atomic_read(&fscache_n_allocs_object_dead)); seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u" " int=%u oom=%u\n", atomic_read(&fscache_n_retrievals), atomic_read(&fscache_n_retrievals_ok), atomic_read(&fscache_n_retrievals_wait), atomic_read(&fscache_n_retrievals_nodata), atomic_read(&fscache_n_retrievals_nobufs), atomic_read(&fscache_n_retrievals_intr), atomic_read(&fscache_n_retrievals_nomem)); seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n", atomic_read(&fscache_n_retrieval_ops), atomic_read(&fscache_n_retrieval_op_waits), atomic_read(&fscache_n_retrievals_object_dead)); seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n", atomic_read(&fscache_n_stores), atomic_read(&fscache_n_stores_ok), atomic_read(&fscache_n_stores_again), atomic_read(&fscache_n_stores_nobufs), atomic_read(&fscache_n_stores_oom)); seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n", atomic_read(&fscache_n_store_ops), atomic_read(&fscache_n_store_calls), atomic_read(&fscache_n_store_pages), atomic_read(&fscache_n_store_radix_deletes), atomic_read(&fscache_n_store_pages_over_limit)); seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n", atomic_read(&fscache_n_store_vmscan_not_storing), atomic_read(&fscache_n_store_vmscan_gone), atomic_read(&fscache_n_store_vmscan_busy), atomic_read(&fscache_n_store_vmscan_cancelled), atomic_read(&fscache_n_store_vmscan_wait)); seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n", atomic_read(&fscache_n_op_pend), atomic_read(&fscache_n_op_run), atomic_read(&fscache_n_op_enqueue), atomic_read(&fscache_n_op_cancelled), atomic_read(&fscache_n_op_rejected)); seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n", atomic_read(&fscache_n_op_deferred_release), atomic_read(&fscache_n_op_release), atomic_read(&fscache_n_op_gc)); seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n", atomic_read(&fscache_n_cop_alloc_object), atomic_read(&fscache_n_cop_lookup_object), atomic_read(&fscache_n_cop_lookup_complete), atomic_read(&fscache_n_cop_grab_object)); seq_printf(m, "CacheOp: inv=%d upo=%d dro=%d pto=%d atc=%d syn=%d\n", atomic_read(&fscache_n_cop_invalidate_object), atomic_read(&fscache_n_cop_update_object), atomic_read(&fscache_n_cop_drop_object), atomic_read(&fscache_n_cop_put_object), atomic_read(&fscache_n_cop_attr_changed), atomic_read(&fscache_n_cop_sync_cache)); seq_printf(m, "CacheOp: rap=%d ras=%d alp=%d als=%d wrp=%d ucp=%d dsp=%d\n", atomic_read(&fscache_n_cop_read_or_alloc_page), atomic_read(&fscache_n_cop_read_or_alloc_pages), atomic_read(&fscache_n_cop_allocate_page), atomic_read(&fscache_n_cop_allocate_pages), atomic_read(&fscache_n_cop_write_page), atomic_read(&fscache_n_cop_uncache_page), atomic_read(&fscache_n_cop_dissociate_pages)); return 0; } /* * open "/proc/fs/fscache/stats" allowing provision of a statistical summary */ static int fscache_stats_open(struct inode *inode, struct file *file) { return single_open(file, fscache_stats_show, NULL); } const struct file_operations fscache_stats_fops = { .owner = THIS_MODULE, .open = fscache_stats_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, };
gpl-2.0
Ander-Alvarez/UltraKernel
arch/alpha/kernel/binfmt_loader.c
4596
1120
#include <linux/init.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/mm_types.h> #include <linux/binfmts.h> #include <linux/a.out.h> static int load_binary(struct linux_binprm *bprm, struct pt_regs *regs) { struct exec *eh = (struct exec *)bprm->buf; unsigned long loader; struct file *file; int retval; if (eh->fh.f_magic != 0x183 || (eh->fh.f_flags & 0x3000) != 0x3000) return -ENOEXEC; if (bprm->loader) return -ENOEXEC; allow_write_access(bprm->file); fput(bprm->file); bprm->file = NULL; loader = bprm->vma->vm_end - sizeof(void *); file = open_exec("/sbin/loader"); retval = PTR_ERR(file); if (IS_ERR(file)) return retval; /* Remember if the application is TASO. */ bprm->taso = eh->ah.entry < 0x100000000UL; bprm->file = file; bprm->loader = loader; retval = prepare_binprm(bprm); if (retval < 0) return retval; return search_binary_handler(bprm,regs); } static struct linux_binfmt loader_format = { .load_binary = load_binary, }; static int __init init_loader_binfmt(void) { insert_binfmt(&loader_format); return 0; } arch_initcall(init_loader_binfmt);
gpl-2.0
MassStash/htc_m8_kernel_GPE_5.0.1
fs/dlm/dir.c
4852
10634
/****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. ** ** This copyrighted material is made available to anyone wishing to use, ** modify, copy, or redistribute it subject to the terms and conditions ** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ #include "dlm_internal.h" #include "lockspace.h" #include "member.h" #include "lowcomms.h" #include "rcom.h" #include "config.h" #include "memory.h" #include "recover.h" #include "util.h" #include "lock.h" #include "dir.h" static void put_free_de(struct dlm_ls *ls, struct dlm_direntry *de) { spin_lock(&ls->ls_recover_list_lock); list_add(&de->list, &ls->ls_recover_list); spin_unlock(&ls->ls_recover_list_lock); } static struct dlm_direntry *get_free_de(struct dlm_ls *ls, int len) { int found = 0; struct dlm_direntry *de; spin_lock(&ls->ls_recover_list_lock); list_for_each_entry(de, &ls->ls_recover_list, list) { if (de->length == len) { list_del(&de->list); de->master_nodeid = 0; memset(de->name, 0, len); found = 1; break; } } spin_unlock(&ls->ls_recover_list_lock); if (!found) de = kzalloc(sizeof(struct dlm_direntry) + len, GFP_NOFS); return de; } void dlm_clear_free_entries(struct dlm_ls *ls) { struct dlm_direntry *de; spin_lock(&ls->ls_recover_list_lock); while (!list_empty(&ls->ls_recover_list)) { de = list_entry(ls->ls_recover_list.next, struct dlm_direntry, list); list_del(&de->list); kfree(de); } spin_unlock(&ls->ls_recover_list_lock); } /* * We use the upper 16 bits of the hash value to select the directory node. * Low bits are used for distribution of rsb's among hash buckets on each node. * * To give the exact range wanted (0 to num_nodes-1), we apply a modulus of * num_nodes to the hash value. This value in the desired range is used as an * offset into the sorted list of nodeid's to give the particular nodeid. */ int dlm_hash2nodeid(struct dlm_ls *ls, uint32_t hash) { struct list_head *tmp; struct dlm_member *memb = NULL; uint32_t node, n = 0; int nodeid; if (ls->ls_num_nodes == 1) { nodeid = dlm_our_nodeid(); goto out; } if (ls->ls_node_array) { node = (hash >> 16) % ls->ls_total_weight; nodeid = ls->ls_node_array[node]; goto out; } /* make_member_array() failed to kmalloc ls_node_array... */ node = (hash >> 16) % ls->ls_num_nodes; list_for_each(tmp, &ls->ls_nodes) { if (n++ != node) continue; memb = list_entry(tmp, struct dlm_member, list); break; } DLM_ASSERT(memb , printk("num_nodes=%u n=%u node=%u\n", ls->ls_num_nodes, n, node);); nodeid = memb->nodeid; out: return nodeid; } int dlm_dir_nodeid(struct dlm_rsb *r) { return dlm_hash2nodeid(r->res_ls, r->res_hash); } static inline uint32_t dir_hash(struct dlm_ls *ls, char *name, int len) { uint32_t val; val = jhash(name, len, 0); val &= (ls->ls_dirtbl_size - 1); return val; } static void add_entry_to_hash(struct dlm_ls *ls, struct dlm_direntry *de) { uint32_t bucket; bucket = dir_hash(ls, de->name, de->length); list_add_tail(&de->list, &ls->ls_dirtbl[bucket].list); } static struct dlm_direntry *search_bucket(struct dlm_ls *ls, char *name, int namelen, uint32_t bucket) { struct dlm_direntry *de; list_for_each_entry(de, &ls->ls_dirtbl[bucket].list, list) { if (de->length == namelen && !memcmp(name, de->name, namelen)) goto out; } de = NULL; out: return de; } void dlm_dir_remove_entry(struct dlm_ls *ls, int nodeid, char *name, int namelen) { struct dlm_direntry *de; uint32_t bucket; bucket = dir_hash(ls, name, namelen); spin_lock(&ls->ls_dirtbl[bucket].lock); de = search_bucket(ls, name, namelen, bucket); if (!de) { log_error(ls, "remove fr %u none", nodeid); goto out; } if (de->master_nodeid != nodeid) { log_error(ls, "remove fr %u ID %u", nodeid, de->master_nodeid); goto out; } list_del(&de->list); kfree(de); out: spin_unlock(&ls->ls_dirtbl[bucket].lock); } void dlm_dir_clear(struct dlm_ls *ls) { struct list_head *head; struct dlm_direntry *de; int i; DLM_ASSERT(list_empty(&ls->ls_recover_list), ); for (i = 0; i < ls->ls_dirtbl_size; i++) { spin_lock(&ls->ls_dirtbl[i].lock); head = &ls->ls_dirtbl[i].list; while (!list_empty(head)) { de = list_entry(head->next, struct dlm_direntry, list); list_del(&de->list); put_free_de(ls, de); } spin_unlock(&ls->ls_dirtbl[i].lock); } } int dlm_recover_directory(struct dlm_ls *ls) { struct dlm_member *memb; struct dlm_direntry *de; char *b, *last_name = NULL; int error = -ENOMEM, last_len, count = 0; uint16_t namelen; log_debug(ls, "dlm_recover_directory"); if (dlm_no_directory(ls)) goto out_status; dlm_dir_clear(ls); last_name = kmalloc(DLM_RESNAME_MAXLEN, GFP_NOFS); if (!last_name) goto out; list_for_each_entry(memb, &ls->ls_nodes, list) { memset(last_name, 0, DLM_RESNAME_MAXLEN); last_len = 0; for (;;) { int left; error = dlm_recovery_stopped(ls); if (error) goto out_free; error = dlm_rcom_names(ls, memb->nodeid, last_name, last_len); if (error) goto out_free; schedule(); /* * pick namelen/name pairs out of received buffer */ b = ls->ls_recover_buf->rc_buf; left = ls->ls_recover_buf->rc_header.h_length; left -= sizeof(struct dlm_rcom); for (;;) { __be16 v; error = -EINVAL; if (left < sizeof(__be16)) goto out_free; memcpy(&v, b, sizeof(__be16)); namelen = be16_to_cpu(v); b += sizeof(__be16); left -= sizeof(__be16); /* namelen of 0xFFFFF marks end of names for this node; namelen of 0 marks end of the buffer */ if (namelen == 0xFFFF) goto done; if (!namelen) break; if (namelen > left) goto out_free; if (namelen > DLM_RESNAME_MAXLEN) goto out_free; error = -ENOMEM; de = get_free_de(ls, namelen); if (!de) goto out_free; de->master_nodeid = memb->nodeid; de->length = namelen; last_len = namelen; memcpy(de->name, b, namelen); memcpy(last_name, b, namelen); b += namelen; left -= namelen; add_entry_to_hash(ls, de); count++; } } done: ; } out_status: error = 0; log_debug(ls, "dlm_recover_directory %d entries", count); out_free: kfree(last_name); out: dlm_clear_free_entries(ls); return error; } static int get_entry(struct dlm_ls *ls, int nodeid, char *name, int namelen, int *r_nodeid) { struct dlm_direntry *de, *tmp; uint32_t bucket; bucket = dir_hash(ls, name, namelen); spin_lock(&ls->ls_dirtbl[bucket].lock); de = search_bucket(ls, name, namelen, bucket); if (de) { *r_nodeid = de->master_nodeid; spin_unlock(&ls->ls_dirtbl[bucket].lock); if (*r_nodeid == nodeid) return -EEXIST; return 0; } spin_unlock(&ls->ls_dirtbl[bucket].lock); if (namelen > DLM_RESNAME_MAXLEN) return -EINVAL; de = kzalloc(sizeof(struct dlm_direntry) + namelen, GFP_NOFS); if (!de) return -ENOMEM; de->master_nodeid = nodeid; de->length = namelen; memcpy(de->name, name, namelen); spin_lock(&ls->ls_dirtbl[bucket].lock); tmp = search_bucket(ls, name, namelen, bucket); if (tmp) { kfree(de); de = tmp; } else { list_add_tail(&de->list, &ls->ls_dirtbl[bucket].list); } *r_nodeid = de->master_nodeid; spin_unlock(&ls->ls_dirtbl[bucket].lock); return 0; } int dlm_dir_lookup(struct dlm_ls *ls, int nodeid, char *name, int namelen, int *r_nodeid) { return get_entry(ls, nodeid, name, namelen, r_nodeid); } static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, char *name, int len) { struct dlm_rsb *r; uint32_t hash, bucket; int rv; hash = jhash(name, len, 0); bucket = hash & (ls->ls_rsbtbl_size - 1); spin_lock(&ls->ls_rsbtbl[bucket].lock); rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].keep, name, len, 0, &r); if (rv) rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].toss, name, len, 0, &r); spin_unlock(&ls->ls_rsbtbl[bucket].lock); if (!rv) return r; down_read(&ls->ls_root_sem); list_for_each_entry(r, &ls->ls_root_list, res_root_list) { if (len == r->res_length && !memcmp(name, r->res_name, len)) { up_read(&ls->ls_root_sem); log_error(ls, "find_rsb_root revert to root_list %s", r->res_name); return r; } } up_read(&ls->ls_root_sem); return NULL; } /* Find the rsb where we left off (or start again), then send rsb names for rsb's we're master of and whose directory node matches the requesting node. inbuf is the rsb name last sent, inlen is the name's length */ void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen, char *outbuf, int outlen, int nodeid) { struct list_head *list; struct dlm_rsb *r; int offset = 0, dir_nodeid; __be16 be_namelen; down_read(&ls->ls_root_sem); if (inlen > 1) { r = find_rsb_root(ls, inbuf, inlen); if (!r) { inbuf[inlen - 1] = '\0'; log_error(ls, "copy_master_names from %d start %d %s", nodeid, inlen, inbuf); goto out; } list = r->res_root_list.next; } else { list = ls->ls_root_list.next; } for (offset = 0; list != &ls->ls_root_list; list = list->next) { r = list_entry(list, struct dlm_rsb, res_root_list); if (r->res_nodeid) continue; dir_nodeid = dlm_dir_nodeid(r); if (dir_nodeid != nodeid) continue; /* * The block ends when we can't fit the following in the * remaining buffer space: * namelen (uint16_t) + * name (r->res_length) + * end-of-block record 0x0000 (uint16_t) */ if (offset + sizeof(uint16_t)*2 + r->res_length > outlen) { /* Write end-of-block record */ be_namelen = cpu_to_be16(0); memcpy(outbuf + offset, &be_namelen, sizeof(__be16)); offset += sizeof(__be16); goto out; } be_namelen = cpu_to_be16(r->res_length); memcpy(outbuf + offset, &be_namelen, sizeof(__be16)); offset += sizeof(__be16); memcpy(outbuf + offset, r->res_name, r->res_length); offset += r->res_length; } /* * If we've reached the end of the list (and there's room) write a * terminating record. */ if ((list == &ls->ls_root_list) && (offset + sizeof(uint16_t) <= outlen)) { be_namelen = cpu_to_be16(0xFFFF); memcpy(outbuf + offset, &be_namelen, sizeof(__be16)); offset += sizeof(__be16); } out: up_read(&ls->ls_root_sem); }
gpl-2.0
KylinUI/android_kernel_htc_msm8960
drivers/scsi/aacraid/comminit.c
4852
14963
/* * Adaptec AAC series RAID controller driver * (c) Copyright 2001 Red Hat Inc. * * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * * Module Name: * comminit.c * * Abstract: This supports the initialization of the host adapter commuication interface. * This is a platform dependent module for the pci cyclone board. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/completion.h> #include <linux/mm.h> #include <scsi/scsi_host.h> #include "aacraid.h" struct aac_common aac_config = { .irq_mod = 1 }; static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign) { unsigned char *base; unsigned long size, align; const unsigned long fibsize = 4096; const unsigned long printfbufsiz = 256; unsigned long host_rrq_size = 0; struct aac_init *init; dma_addr_t phys; unsigned long aac_max_hostphysmempages; if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) host_rrq_size = (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) * sizeof(u32); size = fibsize + sizeof(struct aac_init) + commsize + commalign + printfbufsiz + host_rrq_size; base = pci_alloc_consistent(dev->pdev, size, &phys); if(base == NULL) { printk(KERN_ERR "aacraid: unable to create mapping.\n"); return 0; } dev->comm_addr = (void *)base; dev->comm_phys = phys; dev->comm_size = size; if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { dev->host_rrq = (u32 *)(base + fibsize); dev->host_rrq_pa = phys + fibsize; memset(dev->host_rrq, 0, host_rrq_size); } dev->init = (struct aac_init *)(base + fibsize + host_rrq_size); dev->init_pa = phys + fibsize + host_rrq_size; init = dev->init; init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION); if (dev->max_fib_size != sizeof(struct hw_fib)) init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4); init->MiniPortRevision = cpu_to_le32(Sa_MINIPORT_REVISION); init->fsrev = cpu_to_le32(dev->fsrev); /* * Adapter Fibs are the first thing allocated so that they * start page aligned */ dev->aif_base_va = (struct hw_fib *)base; init->AdapterFibsVirtualAddress = 0; init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys); init->AdapterFibsSize = cpu_to_le32(fibsize); init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib)); /* * number of 4k pages of host physical memory. The aacraid fw needs * this number to be less than 4gb worth of pages. New firmware doesn't * have any issues with the mapping system, but older Firmware did, and * had *troubles* dealing with the math overloading past 32 bits, thus * we must limit this field. */ aac_max_hostphysmempages = dma_get_required_mask(&dev->pdev->dev) >> 12; if (aac_max_hostphysmempages < AAC_MAX_HOSTPHYSMEMPAGES) init->HostPhysMemPages = cpu_to_le32(aac_max_hostphysmempages); else init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES); init->InitFlags = 0; if (dev->comm_interface == AAC_COMM_MESSAGE) { init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n")); } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6); init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_TYPE1_SUPPORTED); dprintk((KERN_WARNING "aacraid: New Comm Interface type1 enabled\n")); } init->InitFlags |= cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME | INITFLAGS_DRIVER_SUPPORTS_PM); init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); init->MaxFibSize = cpu_to_le32(dev->max_fib_size); init->MaxNumAif = cpu_to_le32(dev->max_num_aif); init->HostRRQ_AddrHigh = (u32)((u64)dev->host_rrq_pa >> 32); init->HostRRQ_AddrLow = (u32)(dev->host_rrq_pa & 0xffffffff); /* * Increment the base address by the amount already used */ base = base + fibsize + host_rrq_size + sizeof(struct aac_init); phys = (dma_addr_t)((ulong)phys + fibsize + host_rrq_size + sizeof(struct aac_init)); /* * Align the beginning of Headers to commalign */ align = (commalign - ((uintptr_t)(base) & (commalign - 1))); base = base + align; phys = phys + align; /* * Fill in addresses of the Comm Area Headers and Queues */ *commaddr = base; init->CommHeaderAddress = cpu_to_le32((u32)phys); /* * Increment the base address by the size of the CommArea */ base = base + commsize; phys = phys + commsize; /* * Place the Printf buffer area after the Fast I/O comm area. */ dev->printfbuf = (void *)base; init->printfbuf = cpu_to_le32(phys); init->printfbufsiz = cpu_to_le32(printfbufsiz); memset(base, 0, printfbufsiz); return 1; } static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize) { q->numpending = 0; q->dev = dev; init_waitqueue_head(&q->cmdready); INIT_LIST_HEAD(&q->cmdq); init_waitqueue_head(&q->qfull); spin_lock_init(&q->lockdata); q->lock = &q->lockdata; q->headers.producer = (__le32 *)mem; q->headers.consumer = (__le32 *)(mem+1); *(q->headers.producer) = cpu_to_le32(qsize); *(q->headers.consumer) = cpu_to_le32(qsize); q->entries = qsize; } /** * aac_send_shutdown - shutdown an adapter * @dev: Adapter to shutdown * * This routine will send a VM_CloseAll (shutdown) request to the adapter. */ int aac_send_shutdown(struct aac_dev * dev) { struct fib * fibctx; struct aac_close *cmd; int status; fibctx = aac_fib_alloc(dev); if (!fibctx) return -ENOMEM; aac_fib_init(fibctx); cmd = (struct aac_close *) fib_data(fibctx); cmd->command = cpu_to_le32(VM_CloseAll); cmd->cid = cpu_to_le32(0xffffffff); status = aac_fib_send(ContainerCommand, fibctx, sizeof(struct aac_close), FsaNormal, -2 /* Timeout silently */, 1, NULL, NULL); if (status >= 0) aac_fib_complete(fibctx); /* FIB should be freed only after getting the response from the F/W */ if (status != -ERESTARTSYS) aac_fib_free(fibctx); return status; } /** * aac_comm_init - Initialise FSA data structures * @dev: Adapter to initialise * * Initializes the data structures that are required for the FSA commuication * interface to operate. * Returns * 1 - if we were able to init the commuication interface. * 0 - If there were errors initing. This is a fatal error. */ static int aac_comm_init(struct aac_dev * dev) { unsigned long hdrsize = (sizeof(u32) * NUMBER_OF_COMM_QUEUES) * 2; unsigned long queuesize = sizeof(struct aac_entry) * TOTAL_QUEUE_ENTRIES; u32 *headers; struct aac_entry * queues; unsigned long size; struct aac_queue_block * comm = dev->queues; /* * Now allocate and initialize the zone structures used as our * pool of FIB context records. The size of the zone is based * on the system memory size. We also initialize the mutex used * to protect the zone. */ spin_lock_init(&dev->fib_lock); /* * Allocate the physically contiguous space for the commuication * queue headers. */ size = hdrsize + queuesize; if (!aac_alloc_comm(dev, (void * *)&headers, size, QUEUE_ALIGNMENT)) return -ENOMEM; queues = (struct aac_entry *)(((ulong)headers) + hdrsize); /* Adapter to Host normal priority Command queue */ comm->queue[HostNormCmdQueue].base = queues; aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES); queues += HOST_NORM_CMD_ENTRIES; headers += 2; /* Adapter to Host high priority command queue */ comm->queue[HostHighCmdQueue].base = queues; aac_queue_init(dev, &comm->queue[HostHighCmdQueue], headers, HOST_HIGH_CMD_ENTRIES); queues += HOST_HIGH_CMD_ENTRIES; headers +=2; /* Host to adapter normal priority command queue */ comm->queue[AdapNormCmdQueue].base = queues; aac_queue_init(dev, &comm->queue[AdapNormCmdQueue], headers, ADAP_NORM_CMD_ENTRIES); queues += ADAP_NORM_CMD_ENTRIES; headers += 2; /* host to adapter high priority command queue */ comm->queue[AdapHighCmdQueue].base = queues; aac_queue_init(dev, &comm->queue[AdapHighCmdQueue], headers, ADAP_HIGH_CMD_ENTRIES); queues += ADAP_HIGH_CMD_ENTRIES; headers += 2; /* adapter to host normal priority response queue */ comm->queue[HostNormRespQueue].base = queues; aac_queue_init(dev, &comm->queue[HostNormRespQueue], headers, HOST_NORM_RESP_ENTRIES); queues += HOST_NORM_RESP_ENTRIES; headers += 2; /* adapter to host high priority response queue */ comm->queue[HostHighRespQueue].base = queues; aac_queue_init(dev, &comm->queue[HostHighRespQueue], headers, HOST_HIGH_RESP_ENTRIES); queues += HOST_HIGH_RESP_ENTRIES; headers += 2; /* host to adapter normal priority response queue */ comm->queue[AdapNormRespQueue].base = queues; aac_queue_init(dev, &comm->queue[AdapNormRespQueue], headers, ADAP_NORM_RESP_ENTRIES); queues += ADAP_NORM_RESP_ENTRIES; headers += 2; /* host to adapter high priority response queue */ comm->queue[AdapHighRespQueue].base = queues; aac_queue_init(dev, &comm->queue[AdapHighRespQueue], headers, ADAP_HIGH_RESP_ENTRIES); comm->queue[AdapNormCmdQueue].lock = comm->queue[HostNormRespQueue].lock; comm->queue[AdapHighCmdQueue].lock = comm->queue[HostHighRespQueue].lock; comm->queue[AdapNormRespQueue].lock = comm->queue[HostNormCmdQueue].lock; comm->queue[AdapHighRespQueue].lock = comm->queue[HostHighCmdQueue].lock; return 0; } struct aac_dev *aac_init_adapter(struct aac_dev *dev) { u32 status[5]; struct Scsi_Host * host = dev->scsi_host_ptr; extern int aac_sync_mode; /* * Check the preferred comm settings, defaults from template. */ dev->management_fib_count = 0; spin_lock_init(&dev->manage_lock); spin_lock_init(&dev->sync_lock); dev->max_fib_size = sizeof(struct hw_fib); dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size - sizeof(struct aac_fibhdr) - sizeof(struct aac_write) + sizeof(struct sgentry)) / sizeof(struct sgentry); dev->comm_interface = AAC_COMM_PRODUCER; dev->raw_io_interface = dev->raw_io_64 = 0; if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) && (status[0] == 0x00000001)) { if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64)) dev->raw_io_64 = 1; dev->sync_mode = aac_sync_mode; if (dev->a_ops.adapter_comm && (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM))) { dev->comm_interface = AAC_COMM_MESSAGE; dev->raw_io_interface = 1; if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1))) { /* driver supports TYPE1 (Tupelo) */ dev->comm_interface = AAC_COMM_MESSAGE_TYPE1; } else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE4)) || (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE3)) || (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE2))) { /* driver doesn't support TYPE2 (Series7), TYPE3 and TYPE4 */ /* switch to sync. mode */ dev->comm_interface = AAC_COMM_MESSAGE_TYPE1; dev->sync_mode = 1; } } if ((dev->comm_interface == AAC_COMM_MESSAGE) && (status[2] > dev->base_size)) { aac_adapter_ioremap(dev, 0); dev->base_size = status[2]; if (aac_adapter_ioremap(dev, status[2])) { /* remap failed, go back ... */ dev->comm_interface = AAC_COMM_PRODUCER; if (aac_adapter_ioremap(dev, AAC_MIN_FOOTPRINT_SIZE)) { printk(KERN_WARNING "aacraid: unable to map adapter.\n"); return NULL; } } } } if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS, 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, status+3, status+4)) && (status[0] == 0x00000001)) { /* * status[1] >> 16 maximum command size in KB * status[1] & 0xFFFF maximum FIB size * status[2] >> 16 maximum SG elements to driver * status[2] & 0xFFFF maximum SG elements from driver * status[3] & 0xFFFF maximum number FIBs outstanding */ host->max_sectors = (status[1] >> 16) << 1; /* Multiple of 32 for PMC */ dev->max_fib_size = status[1] & 0xFFE0; host->sg_tablesize = status[2] >> 16; dev->sg_tablesize = status[2] & 0xFFFF; host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB; dev->max_num_aif = status[4] & 0xFFFF; /* * NOTE: * All these overrides are based on a fixed internal * knowledge and understanding of existing adapters, * acbsize should be set with caution. */ if (acbsize == 512) { host->max_sectors = AAC_MAX_32BIT_SGBCOUNT; dev->max_fib_size = 512; dev->sg_tablesize = host->sg_tablesize = (512 - sizeof(struct aac_fibhdr) - sizeof(struct aac_write) + sizeof(struct sgentry)) / sizeof(struct sgentry); host->can_queue = AAC_NUM_IO_FIB; } else if (acbsize == 2048) { host->max_sectors = 512; dev->max_fib_size = 2048; host->sg_tablesize = 65; dev->sg_tablesize = 81; host->can_queue = 512 - AAC_NUM_MGT_FIB; } else if (acbsize == 4096) { host->max_sectors = 1024; dev->max_fib_size = 4096; host->sg_tablesize = 129; dev->sg_tablesize = 166; host->can_queue = 256 - AAC_NUM_MGT_FIB; } else if (acbsize == 8192) { host->max_sectors = 2048; dev->max_fib_size = 8192; host->sg_tablesize = 257; dev->sg_tablesize = 337; host->can_queue = 128 - AAC_NUM_MGT_FIB; } else if (acbsize > 0) { printk("Illegal acbsize=%d ignored\n", acbsize); } } { if (numacb > 0) { if (numacb < host->can_queue) host->can_queue = numacb; else printk("numacb=%d ignored\n", numacb); } } /* * Ok now init the communication subsystem */ dev->queues = kzalloc(sizeof(struct aac_queue_block), GFP_KERNEL); if (dev->queues == NULL) { printk(KERN_ERR "Error could not allocate comm region.\n"); return NULL; } if (aac_comm_init(dev)<0){ kfree(dev->queues); return NULL; } /* * Initialize the list of fibs */ if (aac_fib_setup(dev) < 0) { kfree(dev->queues); return NULL; } INIT_LIST_HEAD(&dev->fib_list); INIT_LIST_HEAD(&dev->sync_fib_list); return dev; }
gpl-2.0
talnoah/msm8960_Carbon-Kernel
arch/arm/plat-omap/fb.c
4852
2636
/* * File: arch/arm/plat-omap/fb.c * * Framebuffer device registration for TI OMAP platforms * * Copyright (C) 2006 Nokia Corporation * Author: Imre Deak <imre.deak@nokia.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/memblock.h> #include <linux/io.h> #include <linux/omapfb.h> #include <mach/hardware.h> #include <asm/mach/map.h> #include <plat/board.h> #if defined(CONFIG_FB_OMAP) || defined(CONFIG_FB_OMAP_MODULE) static bool omapfb_lcd_configured; static struct omapfb_platform_data omapfb_config; static u64 omap_fb_dma_mask = ~(u32)0; static struct platform_device omap_fb_device = { .name = "omapfb", .id = -1, .dev = { .dma_mask = &omap_fb_dma_mask, .coherent_dma_mask = ~(u32)0, .platform_data = &omapfb_config, }, .num_resources = 0, }; void __init omapfb_set_lcd_config(const struct omap_lcd_config *config) { omapfb_config.lcd = *config; omapfb_lcd_configured = true; } static int __init omap_init_fb(void) { /* * If the board file has not set the lcd config with * omapfb_set_lcd_config(), don't bother registering the omapfb device */ if (!omapfb_lcd_configured) return 0; return platform_device_register(&omap_fb_device); } arch_initcall(omap_init_fb); #elif defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE) static u64 omap_fb_dma_mask = ~(u32)0; static struct omapfb_platform_data omapfb_config; static struct platform_device omap_fb_device = { .name = "omapfb", .id = -1, .dev = { .dma_mask = &omap_fb_dma_mask, .coherent_dma_mask = ~(u32)0, .platform_data = &omapfb_config, }, .num_resources = 0, }; static int __init omap_init_fb(void) { return platform_device_register(&omap_fb_device); } arch_initcall(omap_init_fb); #else void __init omapfb_set_lcd_config(const struct omap_lcd_config *config) { } #endif
gpl-2.0
TV-LP51-Devices/kernel_sony_msm8974
drivers/acpi/acpica/nsdumpdv.c
4852
4690
/****************************************************************************** * * Module Name: nsdump - table dumping routines for debug * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" /* TBD: This entire module is apparently obsolete and should be removed */ #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nsdumpdv") #ifdef ACPI_OBSOLETE_FUNCTIONS #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) #include "acnamesp.h" /******************************************************************************* * * FUNCTION: acpi_ns_dump_one_device * * PARAMETERS: Handle - Node to be dumped * Level - Nesting level of the handle * Context - Passed into walk_namespace * return_value - Not used * * RETURN: Status * * DESCRIPTION: Dump a single Node that represents a device * This procedure is a user_function called by acpi_ns_walk_namespace. * ******************************************************************************/ static acpi_status acpi_ns_dump_one_device(acpi_handle obj_handle, u32 level, void *context, void **return_value) { struct acpi_device_info *info; acpi_status status; u32 i; ACPI_FUNCTION_NAME(ns_dump_one_device); status = acpi_ns_dump_one_object(obj_handle, level, context, return_value); status = acpi_get_object_info(obj_handle, &info); if (ACPI_SUCCESS(status)) { for (i = 0; i < level; i++) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_TABLES, " ")); } ACPI_DEBUG_PRINT_RAW((ACPI_DB_TABLES, " HID: %s, ADR: %8.8X%8.8X, Status: %X\n", info->hardware_id.string, ACPI_FORMAT_UINT64(info->address), info->current_status)); ACPI_FREE(info); } return (status); } /******************************************************************************* * * FUNCTION: acpi_ns_dump_root_devices * * PARAMETERS: None * * RETURN: None * * DESCRIPTION: Dump all objects of type "device" * ******************************************************************************/ void acpi_ns_dump_root_devices(void) { acpi_handle sys_bus_handle; acpi_status status; ACPI_FUNCTION_NAME(ns_dump_root_devices); /* Only dump the table if tracing is enabled */ if (!(ACPI_LV_TABLES & acpi_dbg_level)) { return; } status = acpi_get_handle(NULL, METHOD_NAME__SB_, &sys_bus_handle); if (ACPI_FAILURE(status)) { return; } ACPI_DEBUG_PRINT((ACPI_DB_TABLES, "Display of all devices in the namespace:\n")); status = acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, sys_bus_handle, ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK, acpi_ns_dump_one_device, NULL, NULL, NULL); } #endif #endif
gpl-2.0
TeamVilleC2/android_kernel_htc_liberty-villec2
drivers/acpi/acpica/evxface.c
4852
28505
/****************************************************************************** * * Module Name: evxface - External interfaces for ACPI events * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <linux/export.h> #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #include "acevents.h" #include "acinterp.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evxface") /******************************************************************************* * * FUNCTION: acpi_populate_handler_object * * PARAMETERS: handler_obj - Handler object to populate * handler_type - The type of handler: * ACPI_SYSTEM_NOTIFY: system_handler (00-7f) * ACPI_DEVICE_NOTIFY: driver_handler (80-ff) * ACPI_ALL_NOTIFY: both system and device * handler - Address of the handler * context - Value passed to the handler on each GPE * next - Address of a handler object to link to * * RETURN: None * * DESCRIPTION: Populate a handler object. * ******************************************************************************/ static void acpi_populate_handler_object(struct acpi_object_notify_handler *handler_obj, u32 handler_type, acpi_notify_handler handler, void *context, struct acpi_object_notify_handler *next) { handler_obj->handler_type = handler_type; handler_obj->handler = handler; handler_obj->context = context; handler_obj->next = next; } /******************************************************************************* * * FUNCTION: acpi_add_handler_object * * PARAMETERS: parent_obj - Parent of the new object * handler - Address of the handler * context - Value passed to the handler on each GPE * * RETURN: Status * * DESCRIPTION: Create a new handler object and populate it. * ******************************************************************************/ static acpi_status acpi_add_handler_object(struct acpi_object_notify_handler *parent_obj, acpi_notify_handler handler, void *context) { struct acpi_object_notify_handler *handler_obj; /* The parent must not be a defice notify handler object. */ if (parent_obj->handler_type & ACPI_DEVICE_NOTIFY) return AE_BAD_PARAMETER; handler_obj = ACPI_ALLOCATE_ZEROED(sizeof(*handler_obj)); if (!handler_obj) return AE_NO_MEMORY; acpi_populate_handler_object(handler_obj, ACPI_SYSTEM_NOTIFY, handler, context, parent_obj->next); parent_obj->next = handler_obj; return AE_OK; } /******************************************************************************* * * FUNCTION: acpi_install_notify_handler * * PARAMETERS: Device - The device for which notifies will be handled * handler_type - The type of handler: * ACPI_SYSTEM_NOTIFY: system_handler (00-7f) * ACPI_DEVICE_NOTIFY: driver_handler (80-ff) * ACPI_ALL_NOTIFY: both system and device * Handler - Address of the handler * Context - Value passed to the handler on each GPE * * RETURN: Status * * DESCRIPTION: Install a handler for notifies on an ACPI device * ******************************************************************************/ acpi_status acpi_install_notify_handler(acpi_handle device, u32 handler_type, acpi_notify_handler handler, void *context) { union acpi_operand_object *obj_desc; union acpi_operand_object *notify_obj; struct acpi_namespace_node *node; acpi_status status; ACPI_FUNCTION_TRACE(acpi_install_notify_handler); /* Parameter validation */ if ((!device) || (!handler) || (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) { return_ACPI_STATUS(AE_BAD_PARAMETER); } status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Convert and validate the device handle */ node = acpi_ns_validate_handle(device); if (!node) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* * Root Object: * Registering a notify handler on the root object indicates that the * caller wishes to receive notifications for all objects. Note that * only one <external> global handler can be regsitered (per notify type). */ if (device == ACPI_ROOT_OBJECT) { /* Make sure the handler is not already installed */ if (((handler_type & ACPI_SYSTEM_NOTIFY) && acpi_gbl_system_notify.handler) || ((handler_type & ACPI_DEVICE_NOTIFY) && acpi_gbl_device_notify.handler)) { status = AE_ALREADY_EXISTS; goto unlock_and_exit; } if (handler_type & ACPI_SYSTEM_NOTIFY) { acpi_gbl_system_notify.node = node; acpi_gbl_system_notify.handler = handler; acpi_gbl_system_notify.context = context; } if (handler_type & ACPI_DEVICE_NOTIFY) { acpi_gbl_device_notify.node = node; acpi_gbl_device_notify.handler = handler; acpi_gbl_device_notify.context = context; } /* Global notify handler installed */ } /* * All Other Objects: * Caller will only receive notifications specific to the target object. * Note that only certain object types can receive notifications. */ else { /* Notifies allowed on this object? */ if (!acpi_ev_is_notify_object(node)) { status = AE_TYPE; goto unlock_and_exit; } /* Check for an existing internal object */ obj_desc = acpi_ns_get_attached_object(node); if (obj_desc) { /* Object exists. */ /* For a device notify, make sure there's no handler. */ if ((handler_type & ACPI_DEVICE_NOTIFY) && obj_desc->common_notify.device_notify) { status = AE_ALREADY_EXISTS; goto unlock_and_exit; } /* System notifies may have more handlers installed. */ notify_obj = obj_desc->common_notify.system_notify; if ((handler_type & ACPI_SYSTEM_NOTIFY) && notify_obj) { struct acpi_object_notify_handler *parent_obj; if (handler_type & ACPI_DEVICE_NOTIFY) { status = AE_ALREADY_EXISTS; goto unlock_and_exit; } parent_obj = &notify_obj->notify; status = acpi_add_handler_object(parent_obj, handler, context); goto unlock_and_exit; } } else { /* Create a new object */ obj_desc = acpi_ut_create_internal_object(node->type); if (!obj_desc) { status = AE_NO_MEMORY; goto unlock_and_exit; } /* Attach new object to the Node */ status = acpi_ns_attach_object(device, obj_desc, node->type); /* Remove local reference to the object */ acpi_ut_remove_reference(obj_desc); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } } /* Install the handler */ notify_obj = acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_NOTIFY); if (!notify_obj) { status = AE_NO_MEMORY; goto unlock_and_exit; } acpi_populate_handler_object(&notify_obj->notify, handler_type, handler, context, NULL); if (handler_type & ACPI_SYSTEM_NOTIFY) { obj_desc->common_notify.system_notify = notify_obj; } if (handler_type & ACPI_DEVICE_NOTIFY) { obj_desc->common_notify.device_notify = notify_obj; } if (handler_type == ACPI_ALL_NOTIFY) { /* Extra ref if installed in both */ acpi_ut_add_reference(notify_obj); } } unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_install_notify_handler) /******************************************************************************* * * FUNCTION: acpi_remove_notify_handler * * PARAMETERS: Device - The device for which notifies will be handled * handler_type - The type of handler: * ACPI_SYSTEM_NOTIFY: system_handler (00-7f) * ACPI_DEVICE_NOTIFY: driver_handler (80-ff) * ACPI_ALL_NOTIFY: both system and device * Handler - Address of the handler * * RETURN: Status * * DESCRIPTION: Remove a handler for notifies on an ACPI device * ******************************************************************************/ acpi_status acpi_remove_notify_handler(acpi_handle device, u32 handler_type, acpi_notify_handler handler) { union acpi_operand_object *notify_obj; union acpi_operand_object *obj_desc; struct acpi_namespace_node *node; acpi_status status; ACPI_FUNCTION_TRACE(acpi_remove_notify_handler); /* Parameter validation */ if ((!device) || (!handler) || (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) { status = AE_BAD_PARAMETER; goto exit; } /* Make sure all deferred tasks are completed */ acpi_os_wait_events_complete(NULL); status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { goto exit; } /* Convert and validate the device handle */ node = acpi_ns_validate_handle(device); if (!node) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* Root Object */ if (device == ACPI_ROOT_OBJECT) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Removing notify handler for namespace root object\n")); if (((handler_type & ACPI_SYSTEM_NOTIFY) && !acpi_gbl_system_notify.handler) || ((handler_type & ACPI_DEVICE_NOTIFY) && !acpi_gbl_device_notify.handler)) { status = AE_NOT_EXIST; goto unlock_and_exit; } if (handler_type & ACPI_SYSTEM_NOTIFY) { acpi_gbl_system_notify.node = NULL; acpi_gbl_system_notify.handler = NULL; acpi_gbl_system_notify.context = NULL; } if (handler_type & ACPI_DEVICE_NOTIFY) { acpi_gbl_device_notify.node = NULL; acpi_gbl_device_notify.handler = NULL; acpi_gbl_device_notify.context = NULL; } } /* All Other Objects */ else { /* Notifies allowed on this object? */ if (!acpi_ev_is_notify_object(node)) { status = AE_TYPE; goto unlock_and_exit; } /* Check for an existing internal object */ obj_desc = acpi_ns_get_attached_object(node); if (!obj_desc) { status = AE_NOT_EXIST; goto unlock_and_exit; } /* Object exists - make sure there's an existing handler */ if (handler_type & ACPI_SYSTEM_NOTIFY) { struct acpi_object_notify_handler *handler_obj; struct acpi_object_notify_handler *parent_obj; notify_obj = obj_desc->common_notify.system_notify; if (!notify_obj) { status = AE_NOT_EXIST; goto unlock_and_exit; } handler_obj = &notify_obj->notify; parent_obj = NULL; while (handler_obj->handler != handler) { if (handler_obj->next) { parent_obj = handler_obj; handler_obj = handler_obj->next; } else { break; } } if (handler_obj->handler != handler) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* * Remove the handler. There are three possible cases. * First, we may need to remove a non-embedded object. * Second, we may need to remove the embedded object's * handler data, while non-embedded objects exist. * Finally, we may need to remove the embedded object * entirely along with its container. */ if (parent_obj) { /* Non-embedded object is being removed. */ parent_obj->next = handler_obj->next; ACPI_FREE(handler_obj); } else if (notify_obj->notify.next) { /* * The handler matches the embedded object, but * there are more handler objects in the list. * Replace the embedded object's data with the * first next object's data and remove that * object. */ parent_obj = &notify_obj->notify; handler_obj = notify_obj->notify.next; *parent_obj = *handler_obj; ACPI_FREE(handler_obj); } else { /* No more handler objects in the list. */ obj_desc->common_notify.system_notify = NULL; acpi_ut_remove_reference(notify_obj); } } if (handler_type & ACPI_DEVICE_NOTIFY) { notify_obj = obj_desc->common_notify.device_notify; if (!notify_obj) { status = AE_NOT_EXIST; goto unlock_and_exit; } if (notify_obj->notify.handler != handler) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* Remove the handler */ obj_desc->common_notify.device_notify = NULL; acpi_ut_remove_reference(notify_obj); } } unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); exit: if (ACPI_FAILURE(status)) ACPI_EXCEPTION((AE_INFO, status, "Removing notify handler")); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_remove_notify_handler) /******************************************************************************* * * FUNCTION: acpi_install_exception_handler * * PARAMETERS: Handler - Pointer to the handler function for the * event * * RETURN: Status * * DESCRIPTION: Saves the pointer to the handler function * ******************************************************************************/ #ifdef ACPI_FUTURE_USAGE acpi_status acpi_install_exception_handler(acpi_exception_handler handler) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_install_exception_handler); status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Don't allow two handlers. */ if (acpi_gbl_exception_handler) { status = AE_ALREADY_EXISTS; goto cleanup; } /* Install the handler */ acpi_gbl_exception_handler = handler; cleanup: (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_install_exception_handler) #endif /* ACPI_FUTURE_USAGE */ #if (!ACPI_REDUCED_HARDWARE) /******************************************************************************* * * FUNCTION: acpi_install_global_event_handler * * PARAMETERS: Handler - Pointer to the global event handler function * Context - Value passed to the handler on each event * * RETURN: Status * * DESCRIPTION: Saves the pointer to the handler function. The global handler * is invoked upon each incoming GPE and Fixed Event. It is * invoked at interrupt level at the time of the event dispatch. * Can be used to update event counters, etc. * ******************************************************************************/ acpi_status acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, void *context) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_install_global_event_handler); /* Parameter validation */ if (!handler) { return_ACPI_STATUS(AE_BAD_PARAMETER); } status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Don't allow two handlers. */ if (acpi_gbl_global_event_handler) { status = AE_ALREADY_EXISTS; goto cleanup; } acpi_gbl_global_event_handler = handler; acpi_gbl_global_event_handler_context = context; cleanup: (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_install_global_event_handler) /******************************************************************************* * * FUNCTION: acpi_install_fixed_event_handler * * PARAMETERS: Event - Event type to enable. * Handler - Pointer to the handler function for the * event * Context - Value passed to the handler on each GPE * * RETURN: Status * * DESCRIPTION: Saves the pointer to the handler function and then enables the * event. * ******************************************************************************/ acpi_status acpi_install_fixed_event_handler(u32 event, acpi_event_handler handler, void *context) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_install_fixed_event_handler); /* Parameter validation */ if (event > ACPI_EVENT_MAX) { return_ACPI_STATUS(AE_BAD_PARAMETER); } status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Don't allow two handlers. */ if (NULL != acpi_gbl_fixed_event_handlers[event].handler) { status = AE_ALREADY_EXISTS; goto cleanup; } /* Install the handler before enabling the event */ acpi_gbl_fixed_event_handlers[event].handler = handler; acpi_gbl_fixed_event_handlers[event].context = context; status = acpi_clear_event(event); if (ACPI_SUCCESS(status)) status = acpi_enable_event(event, 0); if (ACPI_FAILURE(status)) { ACPI_WARNING((AE_INFO, "Could not enable fixed event 0x%X", event)); /* Remove the handler */ acpi_gbl_fixed_event_handlers[event].handler = NULL; acpi_gbl_fixed_event_handlers[event].context = NULL; } else { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Enabled fixed event %X, Handler=%p\n", event, handler)); } cleanup: (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_install_fixed_event_handler) /******************************************************************************* * * FUNCTION: acpi_remove_fixed_event_handler * * PARAMETERS: Event - Event type to disable. * Handler - Address of the handler * * RETURN: Status * * DESCRIPTION: Disables the event and unregisters the event handler. * ******************************************************************************/ acpi_status acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(acpi_remove_fixed_event_handler); /* Parameter validation */ if (event > ACPI_EVENT_MAX) { return_ACPI_STATUS(AE_BAD_PARAMETER); } status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Disable the event before removing the handler */ status = acpi_disable_event(event, 0); /* Always Remove the handler */ acpi_gbl_fixed_event_handlers[event].handler = NULL; acpi_gbl_fixed_event_handlers[event].context = NULL; if (ACPI_FAILURE(status)) { ACPI_WARNING((AE_INFO, "Could not write to fixed event enable register 0x%X", event)); } else { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n", event)); } (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler) /******************************************************************************* * * FUNCTION: acpi_install_gpe_handler * * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT * defined GPEs) * gpe_number - The GPE number within the GPE block * Type - Whether this GPE should be treated as an * edge- or level-triggered interrupt. * Address - Address of the handler * Context - Value passed to the handler on each GPE * * RETURN: Status * * DESCRIPTION: Install a handler for a General Purpose Event. * ******************************************************************************/ acpi_status acpi_install_gpe_handler(acpi_handle gpe_device, u32 gpe_number, u32 type, acpi_gpe_handler address, void *context) { struct acpi_gpe_event_info *gpe_event_info; struct acpi_gpe_handler_info *handler; acpi_status status; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(acpi_install_gpe_handler); /* Parameter validation */ if ((!address) || (type & ~ACPI_GPE_XRUPT_TYPE_MASK)) { return_ACPI_STATUS(AE_BAD_PARAMETER); } status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Allocate memory for the handler object */ handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_handler_info)); if (!handler) { status = AE_NO_MEMORY; goto unlock_and_exit; } flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (!gpe_event_info) { status = AE_BAD_PARAMETER; goto free_and_exit; } /* Make sure that there isn't a handler there already */ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_HANDLER) { status = AE_ALREADY_EXISTS; goto free_and_exit; } /* Allocate and init handler object */ handler->address = address; handler->context = context; handler->method_node = gpe_event_info->dispatch.method_node; handler->original_flags = gpe_event_info->flags & (ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); /* * If the GPE is associated with a method, it might have been enabled * automatically during initialization, in which case it has to be * disabled now to avoid spurious execution of the handler. */ if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) && gpe_event_info->runtime_count) { handler->originally_enabled = 1; (void)acpi_ev_remove_gpe_reference(gpe_event_info); } /* Install the handler */ gpe_event_info->dispatch.handler = handler; /* Setup up dispatch flags to indicate handler (vs. method) */ gpe_event_info->flags &= ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); gpe_event_info->flags |= (u8) (type | ACPI_GPE_DISPATCH_HANDLER); acpi_os_release_lock(acpi_gbl_gpe_lock, flags); unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); return_ACPI_STATUS(status); free_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); ACPI_FREE(handler); goto unlock_and_exit; } ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler) /******************************************************************************* * * FUNCTION: acpi_remove_gpe_handler * * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT * defined GPEs) * gpe_number - The event to remove a handler * Address - Address of the handler * * RETURN: Status * * DESCRIPTION: Remove a handler for a General Purpose acpi_event. * ******************************************************************************/ acpi_status acpi_remove_gpe_handler(acpi_handle gpe_device, u32 gpe_number, acpi_gpe_handler address) { struct acpi_gpe_event_info *gpe_event_info; struct acpi_gpe_handler_info *handler; acpi_status status; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(acpi_remove_gpe_handler); /* Parameter validation */ if (!address) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Make sure all deferred tasks are completed */ acpi_os_wait_events_complete(NULL); status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (!gpe_event_info) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* Make sure that a handler is indeed installed */ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) != ACPI_GPE_DISPATCH_HANDLER) { status = AE_NOT_EXIST; goto unlock_and_exit; } /* Make sure that the installed handler is the same */ if (gpe_event_info->dispatch.handler->address != address) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* Remove the handler */ handler = gpe_event_info->dispatch.handler; /* Restore Method node (if any), set dispatch flags */ gpe_event_info->dispatch.method_node = handler->method_node; gpe_event_info->flags &= ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); gpe_event_info->flags |= handler->original_flags; /* * If the GPE was previously associated with a method and it was * enabled, it should be enabled at this point to restore the * post-initialization configuration. */ if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) && handler->originally_enabled) (void)acpi_ev_add_gpe_reference(gpe_event_info); /* Now we can free the handler object */ ACPI_FREE(handler); unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_remove_gpe_handler) /******************************************************************************* * * FUNCTION: acpi_acquire_global_lock * * PARAMETERS: Timeout - How long the caller is willing to wait * Handle - Where the handle to the lock is returned * (if acquired) * * RETURN: Status * * DESCRIPTION: Acquire the ACPI Global Lock * * Note: Allows callers with the same thread ID to acquire the global lock * multiple times. In other words, externally, the behavior of the global lock * is identical to an AML mutex. On the first acquire, a new handle is * returned. On any subsequent calls to acquire by the same thread, the same * handle is returned. * ******************************************************************************/ acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle) { acpi_status status; if (!handle) { return (AE_BAD_PARAMETER); } /* Must lock interpreter to prevent race conditions */ acpi_ex_enter_interpreter(); status = acpi_ex_acquire_mutex_object(timeout, acpi_gbl_global_lock_mutex, acpi_os_get_thread_id()); if (ACPI_SUCCESS(status)) { /* Return the global lock handle (updated in acpi_ev_acquire_global_lock) */ *handle = acpi_gbl_global_lock_handle; } acpi_ex_exit_interpreter(); return (status); } ACPI_EXPORT_SYMBOL(acpi_acquire_global_lock) /******************************************************************************* * * FUNCTION: acpi_release_global_lock * * PARAMETERS: Handle - Returned from acpi_acquire_global_lock * * RETURN: Status * * DESCRIPTION: Release the ACPI Global Lock. The handle must be valid. * ******************************************************************************/ acpi_status acpi_release_global_lock(u32 handle) { acpi_status status; if (!handle || (handle != acpi_gbl_global_lock_handle)) { return (AE_NOT_ACQUIRED); } status = acpi_ex_release_mutex_object(acpi_gbl_global_lock_mutex); return (status); } ACPI_EXPORT_SYMBOL(acpi_release_global_lock) #endif /* !ACPI_REDUCED_HARDWARE */
gpl-2.0
fivestars/ubuntu-nexus7
net/sched/sch_fifo.c
7668
4215
/* * net/sched/sch_fifo.c The simplest FIFO queue. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> */ #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <net/pkt_sched.h> /* 1 band FIFO pseudo-"scheduler" */ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) { if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) return qdisc_enqueue_tail(skb, sch); return qdisc_reshape_fail(skb, sch); } static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) { if (likely(skb_queue_len(&sch->q) < sch->limit)) return qdisc_enqueue_tail(skb, sch); return qdisc_reshape_fail(skb, sch); } static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch) { if (likely(skb_queue_len(&sch->q) < sch->limit)) return qdisc_enqueue_tail(skb, sch); /* queue full, remove one skb to fulfill the limit */ __qdisc_queue_drop_head(sch, &sch->q); sch->qstats.drops++; qdisc_enqueue_tail(skb, sch); return NET_XMIT_CN; } static int fifo_init(struct Qdisc *sch, struct nlattr *opt) { bool bypass; bool is_bfifo = sch->ops == &bfifo_qdisc_ops; if (opt == NULL) { u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1; if (is_bfifo) limit *= psched_mtu(qdisc_dev(sch)); sch->limit = limit; } else { struct tc_fifo_qopt *ctl = nla_data(opt); if (nla_len(opt) < sizeof(*ctl)) return -EINVAL; sch->limit = ctl->limit; } if (is_bfifo) bypass = sch->limit >= psched_mtu(qdisc_dev(sch)); else bypass = sch->limit >= 1; if (bypass) sch->flags |= TCQ_F_CAN_BYPASS; else sch->flags &= ~TCQ_F_CAN_BYPASS; return 0; } static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb) { struct tc_fifo_qopt opt = { .limit = sch->limit }; NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); return skb->len; nla_put_failure: return -1; } struct Qdisc_ops pfifo_qdisc_ops __read_mostly = { .id = "pfifo", .priv_size = 0, .enqueue = pfifo_enqueue, .dequeue = qdisc_dequeue_head, .peek = qdisc_peek_head, .drop = qdisc_queue_drop, .init = fifo_init, .reset = qdisc_reset_queue, .change = fifo_init, .dump = fifo_dump, .owner = THIS_MODULE, }; EXPORT_SYMBOL(pfifo_qdisc_ops); struct Qdisc_ops bfifo_qdisc_ops __read_mostly = { .id = "bfifo", .priv_size = 0, .enqueue = bfifo_enqueue, .dequeue = qdisc_dequeue_head, .peek = qdisc_peek_head, .drop = qdisc_queue_drop, .init = fifo_init, .reset = qdisc_reset_queue, .change = fifo_init, .dump = fifo_dump, .owner = THIS_MODULE, }; EXPORT_SYMBOL(bfifo_qdisc_ops); struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = { .id = "pfifo_head_drop", .priv_size = 0, .enqueue = pfifo_tail_enqueue, .dequeue = qdisc_dequeue_head, .peek = qdisc_peek_head, .drop = qdisc_queue_drop_head, .init = fifo_init, .reset = qdisc_reset_queue, .change = fifo_init, .dump = fifo_dump, .owner = THIS_MODULE, }; /* Pass size change message down to embedded FIFO */ int fifo_set_limit(struct Qdisc *q, unsigned int limit) { struct nlattr *nla; int ret = -ENOMEM; /* Hack to avoid sending change message to non-FIFO */ if (strncmp(q->ops->id + 1, "fifo", 4) != 0) return 0; nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); if (nla) { nla->nla_type = RTM_NEWQDISC; nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt)); ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit; ret = q->ops->change(q, nla); kfree(nla); } return ret; } EXPORT_SYMBOL(fifo_set_limit); struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, unsigned int limit) { struct Qdisc *q; int err = -ENOMEM; q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1)); if (q) { err = fifo_set_limit(q, limit); if (err < 0) { qdisc_destroy(q); q = NULL; } } return q ? : ERR_PTR(err); } EXPORT_SYMBOL(fifo_create_dflt);
gpl-2.0
Eliminater74/temp-kernel
arch/alpha/oprofile/op_model_ev67.c
7924
7162
/** * @file arch/alpha/oprofile/op_model_ev67.c * * @remark Copyright 2002 OProfile authors * @remark Read the file COPYING * * @author Richard Henderson <rth@twiddle.net> * @author Falk Hueffner <falk@debian.org> */ #include <linux/oprofile.h> #include <linux/init.h> #include <linux/smp.h> #include <asm/ptrace.h> #include "op_impl.h" /* Compute all of the registers in preparation for enabling profiling. */ static void ev67_reg_setup(struct op_register_config *reg, struct op_counter_config *ctr, struct op_system_config *sys) { unsigned long ctl, reset, need_reset, i; /* Select desired events. */ ctl = 1UL << 4; /* Enable ProfileMe mode. */ /* The event numbers are chosen so we can use them directly if PCTR1 is enabled. */ if (ctr[1].enabled) { ctl |= (ctr[1].event & 3) << 2; } else { if (ctr[0].event == 0) /* cycles */ ctl |= 1UL << 2; } reg->mux_select = ctl; /* Select logging options. */ /* ??? Need to come up with some mechanism to trace only selected processes. EV67 does not have a mechanism to select kernel or user mode only. For now, enable always. */ reg->proc_mode = 0; /* EV67 cannot change the width of the counters as with the other implementations. But fortunately, we can write to the counters and set the value such that it will overflow at the right time. */ reset = need_reset = 0; for (i = 0; i < 2; ++i) { unsigned long count = ctr[i].count; if (!ctr[i].enabled) continue; if (count > 0x100000) count = 0x100000; ctr[i].count = count; reset |= (0x100000 - count) << (i ? 6 : 28); if (count != 0x100000) need_reset |= 1 << i; } reg->reset_values = reset; reg->need_reset = need_reset; } /* Program all of the registers in preparation for enabling profiling. */ static void ev67_cpu_setup (void *x) { struct op_register_config *reg = x; wrperfmon(2, reg->mux_select); wrperfmon(3, reg->proc_mode); wrperfmon(6, reg->reset_values | 3); } /* CTR is a counter for which the user has requested an interrupt count in between one of the widths selectable in hardware. Reset the count for CTR to the value stored in REG->RESET_VALUES. */ static void ev67_reset_ctr(struct op_register_config *reg, unsigned long ctr) { wrperfmon(6, reg->reset_values | (1 << ctr)); } /* ProfileMe conditions which will show up as counters. We can also detect the following, but it seems unlikely that anybody is interested in counting them: * Reset * MT_FPCR (write to floating point control register) * Arithmetic trap * Dstream Fault * Machine Check (ECC fault, etc.) * OPCDEC (illegal opcode) * Floating point disabled * Differentiate between DTB single/double misses and 3 or 4 level page tables * Istream access violation * Interrupt * Icache Parity Error. * Instruction killed (nop, trapb) Unfortunately, there seems to be no way to detect Dcache and Bcache misses; the latter could be approximated by making the counter count Bcache misses, but that is not precise. We model this as 20 counters: * PCTR0 * PCTR1 * 9 ProfileMe events, induced by PCTR0 * 9 ProfileMe events, induced by PCTR1 */ enum profileme_counters { PM_STALLED, /* Stalled for at least one cycle between the fetch and map stages */ PM_TAKEN, /* Conditional branch taken */ PM_MISPREDICT, /* Branch caused mispredict trap */ PM_ITB_MISS, /* ITB miss */ PM_DTB_MISS, /* DTB miss */ PM_REPLAY, /* Replay trap */ PM_LOAD_STORE, /* Load-store order trap */ PM_ICACHE_MISS, /* Icache miss */ PM_UNALIGNED, /* Unaligned Load/Store */ PM_NUM_COUNTERS }; static inline void op_add_pm(unsigned long pc, int kern, unsigned long counter, struct op_counter_config *ctr, unsigned long event) { unsigned long fake_counter = 2 + event; if (counter == 1) fake_counter += PM_NUM_COUNTERS; if (ctr[fake_counter].enabled) oprofile_add_pc(pc, kern, fake_counter); } static void ev67_handle_interrupt(unsigned long which, struct pt_regs *regs, struct op_counter_config *ctr) { unsigned long pmpc, pctr_ctl; int kern = !user_mode(regs); int mispredict = 0; union { unsigned long v; struct { unsigned reserved: 30; /* 0-29 */ unsigned overcount: 3; /* 30-32 */ unsigned icache_miss: 1; /* 33 */ unsigned trap_type: 4; /* 34-37 */ unsigned load_store: 1; /* 38 */ unsigned trap: 1; /* 39 */ unsigned mispredict: 1; /* 40 */ } fields; } i_stat; enum trap_types { TRAP_REPLAY, TRAP_INVALID0, TRAP_DTB_DOUBLE_MISS_3, TRAP_DTB_DOUBLE_MISS_4, TRAP_FP_DISABLED, TRAP_UNALIGNED, TRAP_DTB_SINGLE_MISS, TRAP_DSTREAM_FAULT, TRAP_OPCDEC, TRAP_INVALID1, TRAP_MACHINE_CHECK, TRAP_INVALID2, TRAP_ARITHMETIC, TRAP_INVALID3, TRAP_MT_FPCR, TRAP_RESET }; pmpc = wrperfmon(9, 0); /* ??? Don't know how to handle physical-mode PALcode address. */ if (pmpc & 1) return; pmpc &= ~2; /* clear reserved bit */ i_stat.v = wrperfmon(8, 0); if (i_stat.fields.trap) { switch (i_stat.fields.trap_type) { case TRAP_INVALID1: case TRAP_INVALID2: case TRAP_INVALID3: /* Pipeline redirection occurred. PMPC points to PALcode. Recognize ITB miss by PALcode offset address, and get actual PC from EXC_ADDR. */ oprofile_add_pc(regs->pc, kern, which); if ((pmpc & ((1 << 15) - 1)) == 581) op_add_pm(regs->pc, kern, which, ctr, PM_ITB_MISS); /* Most other bit and counter values will be those for the first instruction in the fault handler, so we're done. */ return; case TRAP_REPLAY: op_add_pm(pmpc, kern, which, ctr, (i_stat.fields.load_store ? PM_LOAD_STORE : PM_REPLAY)); break; case TRAP_DTB_DOUBLE_MISS_3: case TRAP_DTB_DOUBLE_MISS_4: case TRAP_DTB_SINGLE_MISS: op_add_pm(pmpc, kern, which, ctr, PM_DTB_MISS); break; case TRAP_UNALIGNED: op_add_pm(pmpc, kern, which, ctr, PM_UNALIGNED); break; case TRAP_INVALID0: case TRAP_FP_DISABLED: case TRAP_DSTREAM_FAULT: case TRAP_OPCDEC: case TRAP_MACHINE_CHECK: case TRAP_ARITHMETIC: case TRAP_MT_FPCR: case TRAP_RESET: break; } /* ??? JSR/JMP/RET/COR or HW_JSR/HW_JMP/HW_RET/HW_COR mispredicts do not set this bit but can be recognized by the presence of one of these instructions at the PMPC location with bit 39 set. */ if (i_stat.fields.mispredict) { mispredict = 1; op_add_pm(pmpc, kern, which, ctr, PM_MISPREDICT); } } oprofile_add_pc(pmpc, kern, which); pctr_ctl = wrperfmon(5, 0); if (pctr_ctl & (1UL << 27)) op_add_pm(pmpc, kern, which, ctr, PM_STALLED); /* Unfortunately, TAK is undefined on mispredicted branches. ??? It is also undefined for non-cbranch insns, should check that. */ if (!mispredict && pctr_ctl & (1UL << 0)) op_add_pm(pmpc, kern, which, ctr, PM_TAKEN); } struct op_axp_model op_model_ev67 = { .reg_setup = ev67_reg_setup, .cpu_setup = ev67_cpu_setup, .reset_ctr = ev67_reset_ctr, .handle_interrupt = ev67_handle_interrupt, .cpu_type = "alpha/ev67", .num_counters = 20, .can_set_proc_mode = 0, };
gpl-2.0
palloteos/exoduscaf
drivers/w1/slaves/w1_smem.c
7924
1784
/* * w1_smem.c * * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> * * * This program is free software; you can redistribute it and/or modify * it under the smems of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <asm/types.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/types.h> #include "../w1.h" #include "../w1_int.h" #include "../w1_family.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, 64bit memory family."); static struct w1_family w1_smem_family_01 = { .fid = W1_FAMILY_SMEM_01, }; static struct w1_family w1_smem_family_81 = { .fid = W1_FAMILY_SMEM_81, }; static int __init w1_smem_init(void) { int err; err = w1_register_family(&w1_smem_family_01); if (err) return err; err = w1_register_family(&w1_smem_family_81); if (err) { w1_unregister_family(&w1_smem_family_01); return err; } return 0; } static void __exit w1_smem_fini(void) { w1_unregister_family(&w1_smem_family_01); w1_unregister_family(&w1_smem_family_81); } module_init(w1_smem_init); module_exit(w1_smem_fini);
gpl-2.0
holyangel/HTC_M8_Sense-4.4.4
drivers/isdn/sc/packet.c
9204
6731
/* $Id: packet.c,v 1.5.8.1 2001/09/23 22:24:59 kai Exp $ * * Copyright (C) 1996 SpellCaster Telecommunications Inc. * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * For more information, please contact gpl-info@spellcast.com or write: * * SpellCaster Telecommunications Inc. * 5621 Finch Avenue East, Unit #3 * Scarborough, Ontario Canada * M1B 2T9 * +1 (416) 297-8565 * +1 (416) 297-6433 Facsimile */ #include "includes.h" #include "hardware.h" #include "message.h" #include "card.h" int sndpkt(int devId, int channel, int ack, struct sk_buff *data) { LLData ReqLnkWrite; int status; int card; unsigned long len; card = get_card_from_id(devId); if (!IS_VALID_CARD(card)) { pr_debug("invalid param: %d is not a valid card id\n", card); return -ENODEV; } pr_debug("%s: sndpkt: frst = 0x%lx nxt = %d f = %d n = %d\n", sc_adapter[card]->devicename, sc_adapter[card]->channel[channel].first_sendbuf, sc_adapter[card]->channel[channel].next_sendbuf, sc_adapter[card]->channel[channel].free_sendbufs, sc_adapter[card]->channel[channel].num_sendbufs); if (!sc_adapter[card]->channel[channel].free_sendbufs) { pr_debug("%s: out of TX buffers\n", sc_adapter[card]->devicename); return -EINVAL; } if (data->len > BUFFER_SIZE) { pr_debug("%s: data overflows buffer size (data > buffer)\n", sc_adapter[card]->devicename); return -EINVAL; } ReqLnkWrite.buff_offset = sc_adapter[card]->channel[channel].next_sendbuf * BUFFER_SIZE + sc_adapter[card]->channel[channel].first_sendbuf; ReqLnkWrite.msg_len = data->len; /* sk_buff size */ pr_debug("%s: writing %d bytes to buffer offset 0x%lx\n", sc_adapter[card]->devicename, ReqLnkWrite.msg_len, ReqLnkWrite.buff_offset); memcpy_toshmem(card, (char *)ReqLnkWrite.buff_offset, data->data, ReqLnkWrite.msg_len); /* * sendmessage */ pr_debug("%s: sndpkt size=%d, buf_offset=0x%lx buf_indx=%d\n", sc_adapter[card]->devicename, ReqLnkWrite.msg_len, ReqLnkWrite.buff_offset, sc_adapter[card]->channel[channel].next_sendbuf); status = sendmessage(card, CEPID, ceReqTypeLnk, ceReqClass1, ceReqLnkWrite, channel + 1, sizeof(LLData), (unsigned int *)&ReqLnkWrite); len = data->len; if (status) { pr_debug("%s: failed to send packet, status = %d\n", sc_adapter[card]->devicename, status); return -1; } else { sc_adapter[card]->channel[channel].free_sendbufs--; sc_adapter[card]->channel[channel].next_sendbuf = ++sc_adapter[card]->channel[channel].next_sendbuf == sc_adapter[card]->channel[channel].num_sendbufs ? 0 : sc_adapter[card]->channel[channel].next_sendbuf; pr_debug("%s: packet sent successfully\n", sc_adapter[card]->devicename); dev_kfree_skb(data); indicate_status(card, ISDN_STAT_BSENT, channel, (char *)&len); } return len; } void rcvpkt(int card, RspMessage *rcvmsg) { LLData newll; struct sk_buff *skb; if (!IS_VALID_CARD(card)) { pr_debug("invalid param: %d is not a valid card id\n", card); return; } switch (rcvmsg->rsp_status) { case 0x01: case 0x02: case 0x70: pr_debug("%s: error status code: 0x%x\n", sc_adapter[card]->devicename, rcvmsg->rsp_status); return; case 0x00: if (!(skb = dev_alloc_skb(rcvmsg->msg_data.response.msg_len))) { printk(KERN_WARNING "%s: rcvpkt out of memory, dropping packet\n", sc_adapter[card]->devicename); return; } skb_put(skb, rcvmsg->msg_data.response.msg_len); pr_debug("%s: getting data from offset: 0x%lx\n", sc_adapter[card]->devicename, rcvmsg->msg_data.response.buff_offset); memcpy_fromshmem(card, skb_put(skb, rcvmsg->msg_data.response.msg_len), (char *)rcvmsg->msg_data.response.buff_offset, rcvmsg->msg_data.response.msg_len); sc_adapter[card]->card->rcvcallb_skb(sc_adapter[card]->driverId, rcvmsg->phy_link_no - 1, skb); case 0x03: /* * Recycle the buffer */ pr_debug("%s: buffer size : %d\n", sc_adapter[card]->devicename, BUFFER_SIZE); /* memset_shmem(card, rcvmsg->msg_data.response.buff_offset, 0, BUFFER_SIZE); */ newll.buff_offset = rcvmsg->msg_data.response.buff_offset; newll.msg_len = BUFFER_SIZE; pr_debug("%s: recycled buffer at offset 0x%lx size %d\n", sc_adapter[card]->devicename, newll.buff_offset, newll.msg_len); sendmessage(card, CEPID, ceReqTypeLnk, ceReqClass1, ceReqLnkRead, rcvmsg->phy_link_no, sizeof(LLData), (unsigned int *)&newll); } } int setup_buffers(int card, int c) { unsigned int nBuffers, i, cBase; unsigned int buffer_size; LLData RcvBuffOffset; if (!IS_VALID_CARD(card)) { pr_debug("invalid param: %d is not a valid card id\n", card); return -ENODEV; } /* * Calculate the buffer offsets (send/recv/send/recv) */ pr_debug("%s: setting up channel buffer space in shared RAM\n", sc_adapter[card]->devicename); buffer_size = BUFFER_SIZE; nBuffers = ((sc_adapter[card]->ramsize - BUFFER_BASE) / buffer_size) / 2; nBuffers = nBuffers > BUFFERS_MAX ? BUFFERS_MAX : nBuffers; pr_debug("%s: calculating buffer space: %d buffers, %d big\n", sc_adapter[card]->devicename, nBuffers, buffer_size); if (nBuffers < 2) { pr_debug("%s: not enough buffer space\n", sc_adapter[card]->devicename); return -1; } cBase = (nBuffers * buffer_size) * (c - 1); pr_debug("%s: channel buffer offset from shared RAM: 0x%x\n", sc_adapter[card]->devicename, cBase); sc_adapter[card]->channel[c - 1].first_sendbuf = BUFFER_BASE + cBase; sc_adapter[card]->channel[c - 1].num_sendbufs = nBuffers / 2; sc_adapter[card]->channel[c - 1].free_sendbufs = nBuffers / 2; sc_adapter[card]->channel[c - 1].next_sendbuf = 0; pr_debug("%s: send buffer setup complete: first=0x%lx n=%d f=%d, nxt=%d\n", sc_adapter[card]->devicename, sc_adapter[card]->channel[c - 1].first_sendbuf, sc_adapter[card]->channel[c - 1].num_sendbufs, sc_adapter[card]->channel[c - 1].free_sendbufs, sc_adapter[card]->channel[c - 1].next_sendbuf); /* * Prep the receive buffers */ pr_debug("%s: adding %d RecvBuffers:\n", sc_adapter[card]->devicename, nBuffers / 2); for (i = 0; i < nBuffers / 2; i++) { RcvBuffOffset.buff_offset = ((sc_adapter[card]->channel[c - 1].first_sendbuf + (nBuffers / 2) * buffer_size) + (buffer_size * i)); RcvBuffOffset.msg_len = buffer_size; pr_debug("%s: adding RcvBuffer #%d offset=0x%lx sz=%d bufsz:%d\n", sc_adapter[card]->devicename, i + 1, RcvBuffOffset.buff_offset, RcvBuffOffset.msg_len, buffer_size); sendmessage(card, CEPID, ceReqTypeLnk, ceReqClass1, ceReqLnkRead, c, sizeof(LLData), (unsigned int *)&RcvBuffOffset); } return 0; }
gpl-2.0
ResurrectionRemix-Devices/android_kernel_samsung_smdk4412
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
245
34264
/************************************************************************** * * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ #include <linux/module.h> #include "drmP.h" #include "vmwgfx_drv.h" #include "ttm/ttm_placement.h" #include "ttm/ttm_bo_driver.h" #include "ttm/ttm_object.h" #include "ttm/ttm_module.h" #define VMWGFX_DRIVER_NAME "vmwgfx" #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" #define VMWGFX_CHIP_SVGAII 0 #define VMW_FB_RESERVATION 0 #define VMW_MIN_INITIAL_WIDTH 800 #define VMW_MIN_INITIAL_HEIGHT 600 /** * Fully encoded drm commands. Might move to vmw_drm.h */ #define DRM_IOCTL_VMW_GET_PARAM \ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \ struct drm_vmw_getparam_arg) #define DRM_IOCTL_VMW_ALLOC_DMABUF \ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \ union drm_vmw_alloc_dmabuf_arg) #define DRM_IOCTL_VMW_UNREF_DMABUF \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \ struct drm_vmw_unref_dmabuf_arg) #define DRM_IOCTL_VMW_CURSOR_BYPASS \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \ struct drm_vmw_cursor_bypass_arg) #define DRM_IOCTL_VMW_CONTROL_STREAM \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \ struct drm_vmw_control_stream_arg) #define DRM_IOCTL_VMW_CLAIM_STREAM \ DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \ struct drm_vmw_stream_arg) #define DRM_IOCTL_VMW_UNREF_STREAM \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \ struct drm_vmw_stream_arg) #define DRM_IOCTL_VMW_CREATE_CONTEXT \ DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \ struct drm_vmw_context_arg) #define DRM_IOCTL_VMW_UNREF_CONTEXT \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \ struct drm_vmw_context_arg) #define DRM_IOCTL_VMW_CREATE_SURFACE \ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \ union drm_vmw_surface_create_arg) #define DRM_IOCTL_VMW_UNREF_SURFACE \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \ struct drm_vmw_surface_arg) #define DRM_IOCTL_VMW_REF_SURFACE \ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \ union drm_vmw_surface_reference_arg) #define DRM_IOCTL_VMW_EXECBUF \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \ struct drm_vmw_execbuf_arg) #define DRM_IOCTL_VMW_GET_3D_CAP \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \ struct drm_vmw_get_3d_cap_arg) #define DRM_IOCTL_VMW_FENCE_WAIT \ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ struct drm_vmw_fence_wait_arg) #define DRM_IOCTL_VMW_FENCE_SIGNALED \ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \ struct drm_vmw_fence_signaled_arg) #define DRM_IOCTL_VMW_FENCE_UNREF \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \ struct drm_vmw_fence_arg) #define DRM_IOCTL_VMW_FENCE_EVENT \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \ struct drm_vmw_fence_event_arg) #define DRM_IOCTL_VMW_PRESENT \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \ struct drm_vmw_present_arg) #define DRM_IOCTL_VMW_PRESENT_READBACK \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \ struct drm_vmw_present_readback_arg) #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ struct drm_vmw_update_layout_arg) /** * The core DRM version of this macro doesn't account for * DRM_COMMAND_BASE. */ #define VMW_IOCTL_DEF(ioctl, func, flags) \ [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl} /** * Ioctl definitions. */ static struct drm_ioctl_desc vmw_ioctls[] = { VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, vmw_kms_cursor_bypass_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, vmw_fence_obj_signaled_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, DRM_AUTH | DRM_UNLOCKED), /* these allow direct access to the framebuffers mark as master only */ VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl, DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_PRESENT_READBACK, vmw_present_readback_ioctl, DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl, DRM_MASTER | DRM_UNLOCKED), }; static struct pci_device_id vmw_pci_id_list[] = { {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, {0, 0, 0} }; static int enable_fbdev; static int vmw_probe(struct pci_dev *, const struct pci_device_id *); static void vmw_master_init(struct vmw_master *); static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, void *ptr); MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); module_param_named(enable_fbdev, enable_fbdev, int, 0600); static void vmw_print_capabilities(uint32_t capabilities) { DRM_INFO("Capabilities:\n"); if (capabilities & SVGA_CAP_RECT_COPY) DRM_INFO(" Rect copy.\n"); if (capabilities & SVGA_CAP_CURSOR) DRM_INFO(" Cursor.\n"); if (capabilities & SVGA_CAP_CURSOR_BYPASS) DRM_INFO(" Cursor bypass.\n"); if (capabilities & SVGA_CAP_CURSOR_BYPASS_2) DRM_INFO(" Cursor bypass 2.\n"); if (capabilities & SVGA_CAP_8BIT_EMULATION) DRM_INFO(" 8bit emulation.\n"); if (capabilities & SVGA_CAP_ALPHA_CURSOR) DRM_INFO(" Alpha cursor.\n"); if (capabilities & SVGA_CAP_3D) DRM_INFO(" 3D.\n"); if (capabilities & SVGA_CAP_EXTENDED_FIFO) DRM_INFO(" Extended Fifo.\n"); if (capabilities & SVGA_CAP_MULTIMON) DRM_INFO(" Multimon.\n"); if (capabilities & SVGA_CAP_PITCHLOCK) DRM_INFO(" Pitchlock.\n"); if (capabilities & SVGA_CAP_IRQMASK) DRM_INFO(" Irq mask.\n"); if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) DRM_INFO(" Display Topology.\n"); if (capabilities & SVGA_CAP_GMR) DRM_INFO(" GMR.\n"); if (capabilities & SVGA_CAP_TRACES) DRM_INFO(" Traces.\n"); if (capabilities & SVGA_CAP_GMR2) DRM_INFO(" GMR2.\n"); if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) DRM_INFO(" Screen Object 2.\n"); } /** * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at * the start of a buffer object. * * @dev_priv: The device private structure. * * This function will idle the buffer using an uninterruptible wait, then * map the first page and initialize a pending occlusion query result structure, * Finally it will unmap the buffer. * * TODO: Since we're only mapping a single page, we should optimize the map * to use kmap_atomic / iomap_atomic. */ static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv) { struct ttm_bo_kmap_obj map; volatile SVGA3dQueryResult *result; bool dummy; int ret; struct ttm_bo_device *bdev = &dev_priv->bdev; struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; ttm_bo_reserve(bo, false, false, false, 0); spin_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, false, false); spin_unlock(&bdev->fence_lock); if (unlikely(ret != 0)) (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ); ret = ttm_bo_kmap(bo, 0, 1, &map); if (likely(ret == 0)) { result = ttm_kmap_obj_virtual(&map, &dummy); result->totalSize = sizeof(*result); result->state = SVGA3D_QUERYSTATE_PENDING; result->result32 = 0xff; ttm_bo_kunmap(&map); } else DRM_ERROR("Dummy query buffer map failed.\n"); ttm_bo_unreserve(bo); } /** * vmw_dummy_query_bo_create - create a bo to hold a dummy query result * * @dev_priv: A device private structure. * * This function creates a small buffer object that holds the query * result for dummy queries emitted as query barriers. * No interruptible waits are done within this function. * * Returns an error if bo creation fails. */ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) { return ttm_bo_create(&dev_priv->bdev, PAGE_SIZE, ttm_bo_type_device, &vmw_vram_sys_placement, 0, 0, false, NULL, &dev_priv->dummy_query_bo); } static int vmw_request_device(struct vmw_private *dev_priv) { int ret; ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); if (unlikely(ret != 0)) { DRM_ERROR("Unable to initialize FIFO.\n"); return ret; } vmw_fence_fifo_up(dev_priv->fman); ret = vmw_dummy_query_bo_create(dev_priv); if (unlikely(ret != 0)) goto out_no_query_bo; vmw_dummy_query_bo_prepare(dev_priv); return 0; out_no_query_bo: vmw_fence_fifo_down(dev_priv->fman); vmw_fifo_release(dev_priv, &dev_priv->fifo); return ret; } static void vmw_release_device(struct vmw_private *dev_priv) { /* * Previous destructions should've released * the pinned bo. */ BUG_ON(dev_priv->pinned_bo != NULL); ttm_bo_unref(&dev_priv->dummy_query_bo); vmw_fence_fifo_down(dev_priv->fman); vmw_fifo_release(dev_priv, &dev_priv->fifo); } /** * Increase the 3d resource refcount. * If the count was prevously zero, initialize the fifo, switching to svga * mode. Note that the master holds a ref as well, and may request an * explicit switch to svga mode if fb is not running, using @unhide_svga. */ int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga) { int ret = 0; mutex_lock(&dev_priv->release_mutex); if (unlikely(dev_priv->num_3d_resources++ == 0)) { ret = vmw_request_device(dev_priv); if (unlikely(ret != 0)) --dev_priv->num_3d_resources; } else if (unhide_svga) { mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_ENABLE, vmw_read(dev_priv, SVGA_REG_ENABLE) & ~SVGA_REG_ENABLE_HIDE); mutex_unlock(&dev_priv->hw_mutex); } mutex_unlock(&dev_priv->release_mutex); return ret; } /** * Decrease the 3d resource refcount. * If the count reaches zero, disable the fifo, switching to vga mode. * Note that the master holds a refcount as well, and may request an * explicit switch to vga mode when it releases its refcount to account * for the situation of an X server vt switch to VGA with 3d resources * active. */ void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga) { int32_t n3d; mutex_lock(&dev_priv->release_mutex); if (unlikely(--dev_priv->num_3d_resources == 0)) vmw_release_device(dev_priv); else if (hide_svga) { mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_ENABLE, vmw_read(dev_priv, SVGA_REG_ENABLE) | SVGA_REG_ENABLE_HIDE); mutex_unlock(&dev_priv->hw_mutex); } n3d = (int32_t) dev_priv->num_3d_resources; mutex_unlock(&dev_priv->release_mutex); BUG_ON(n3d < 0); } /** * Sets the initial_[width|height] fields on the given vmw_private. * * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then * clamping the value to fb_max_[width|height] fields and the * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. * If the values appear to be invalid, set them to * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. */ static void vmw_get_initial_size(struct vmw_private *dev_priv) { uint32_t width; uint32_t height; width = vmw_read(dev_priv, SVGA_REG_WIDTH); height = vmw_read(dev_priv, SVGA_REG_HEIGHT); width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH); height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT); if (width > dev_priv->fb_max_width || height > dev_priv->fb_max_height) { /* * This is a host error and shouldn't occur. */ width = VMW_MIN_INITIAL_WIDTH; height = VMW_MIN_INITIAL_HEIGHT; } dev_priv->initial_width = width; dev_priv->initial_height = height; } static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) { struct vmw_private *dev_priv; int ret; uint32_t svga_id; dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); if (unlikely(dev_priv == NULL)) { DRM_ERROR("Failed allocating a device private struct.\n"); return -ENOMEM; } memset(dev_priv, 0, sizeof(*dev_priv)); pci_set_master(dev->pdev); dev_priv->dev = dev; dev_priv->vmw_chipset = chipset; dev_priv->last_read_seqno = (uint32_t) -100; mutex_init(&dev_priv->hw_mutex); mutex_init(&dev_priv->cmdbuf_mutex); mutex_init(&dev_priv->release_mutex); rwlock_init(&dev_priv->resource_lock); idr_init(&dev_priv->context_idr); idr_init(&dev_priv->surface_idr); idr_init(&dev_priv->stream_idr); mutex_init(&dev_priv->init_mutex); init_waitqueue_head(&dev_priv->fence_queue); init_waitqueue_head(&dev_priv->fifo_queue); dev_priv->fence_queue_waiters = 0; atomic_set(&dev_priv->fifo_queue_waiters, 0); INIT_LIST_HEAD(&dev_priv->surface_lru); dev_priv->used_memory_size = 0; dev_priv->io_start = pci_resource_start(dev->pdev, 0); dev_priv->vram_start = pci_resource_start(dev->pdev, 1); dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); dev_priv->enable_fb = enable_fbdev; mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); svga_id = vmw_read(dev_priv, SVGA_REG_ID); if (svga_id != SVGA_ID_2) { ret = -ENOSYS; DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id); mutex_unlock(&dev_priv->hw_mutex); goto out_err0; } dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); vmw_get_initial_size(dev_priv); if (dev_priv->capabilities & SVGA_CAP_GMR) { dev_priv->max_gmr_descriptors = vmw_read(dev_priv, SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH); dev_priv->max_gmr_ids = vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); } if (dev_priv->capabilities & SVGA_CAP_GMR2) { dev_priv->max_gmr_pages = vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); dev_priv->memory_size = vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE); dev_priv->memory_size -= dev_priv->vram_size; } else { /* * An arbitrary limit of 512MiB on surface * memory. But all HWV8 hardware supports GMR2. */ dev_priv->memory_size = 512*1024*1024; } mutex_unlock(&dev_priv->hw_mutex); vmw_print_capabilities(dev_priv->capabilities); if (dev_priv->capabilities & SVGA_CAP_GMR) { DRM_INFO("Max GMR ids is %u\n", (unsigned)dev_priv->max_gmr_ids); DRM_INFO("Max GMR descriptors is %u\n", (unsigned)dev_priv->max_gmr_descriptors); } if (dev_priv->capabilities & SVGA_CAP_GMR2) { DRM_INFO("Max number of GMR pages is %u\n", (unsigned)dev_priv->max_gmr_pages); DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", (unsigned)dev_priv->memory_size / 1024); } DRM_INFO("VRAM at 0x%08x size is %u kiB\n", dev_priv->vram_start, dev_priv->vram_size / 1024); DRM_INFO("MMIO at 0x%08x size is %u kiB\n", dev_priv->mmio_start, dev_priv->mmio_size / 1024); ret = vmw_ttm_global_init(dev_priv); if (unlikely(ret != 0)) goto out_err0; vmw_master_init(&dev_priv->fbdev_master); ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); dev_priv->active_master = &dev_priv->fbdev_master; ret = ttm_bo_device_init(&dev_priv->bdev, dev_priv->bo_global_ref.ref.object, &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET, false); if (unlikely(ret != 0)) { DRM_ERROR("Failed initializing TTM buffer object driver.\n"); goto out_err1; } ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, (dev_priv->vram_size >> PAGE_SHIFT)); if (unlikely(ret != 0)) { DRM_ERROR("Failed initializing memory manager for VRAM.\n"); goto out_err2; } dev_priv->has_gmr = true; if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, dev_priv->max_gmr_ids) != 0) { DRM_INFO("No GMR memory available. " "Graphics memory resources are very limited.\n"); dev_priv->has_gmr = false; } dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start, dev_priv->mmio_size, DRM_MTRR_WC); dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start, dev_priv->mmio_size); if (unlikely(dev_priv->mmio_virt == NULL)) { ret = -ENOMEM; DRM_ERROR("Failed mapping MMIO.\n"); goto out_err3; } /* Need mmio memory to check for fifo pitchlock cap. */ if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && !vmw_fifo_have_pitchlock(dev_priv)) { ret = -ENOSYS; DRM_ERROR("Hardware has no pitchlock\n"); goto out_err4; } dev_priv->tdev = ttm_object_device_init (dev_priv->mem_global_ref.object, 12); if (unlikely(dev_priv->tdev == NULL)) { DRM_ERROR("Unable to initialize TTM object management.\n"); ret = -ENOMEM; goto out_err4; } dev->dev_private = dev_priv; ret = pci_request_regions(dev->pdev, "vmwgfx probe"); dev_priv->stealth = (ret != 0); if (dev_priv->stealth) { /** * Request at least the mmio PCI resource. */ DRM_INFO("It appears like vesafb is loaded. " "Ignore above error if any.\n"); ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); if (unlikely(ret != 0)) { DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); goto out_no_device; } } dev_priv->fman = vmw_fence_manager_init(dev_priv); if (unlikely(dev_priv->fman == NULL)) goto out_no_fman; /* Need to start the fifo to check if we can do screen objects */ ret = vmw_3d_resource_inc(dev_priv, true); if (unlikely(ret != 0)) goto out_no_fifo; vmw_kms_save_vga(dev_priv); /* Start kms and overlay systems, needs fifo. */ ret = vmw_kms_init(dev_priv); if (unlikely(ret != 0)) goto out_no_kms; vmw_overlay_init(dev_priv); /* 3D Depends on Screen Objects being used. */ DRM_INFO("Detected %sdevice 3D availability.\n", vmw_fifo_have_3d(dev_priv) ? "" : "no "); /* We might be done with the fifo now */ if (dev_priv->enable_fb) { vmw_fb_init(dev_priv); } else { vmw_kms_restore_vga(dev_priv); vmw_3d_resource_dec(dev_priv, true); } if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { ret = drm_irq_install(dev); if (unlikely(ret != 0)) { DRM_ERROR("Failed installing irq: %d\n", ret); goto out_no_irq; } } dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; register_pm_notifier(&dev_priv->pm_nb); return 0; out_no_irq: if (dev_priv->enable_fb) vmw_fb_close(dev_priv); vmw_overlay_close(dev_priv); vmw_kms_close(dev_priv); out_no_kms: /* We still have a 3D resource reference held */ if (dev_priv->enable_fb) { vmw_kms_restore_vga(dev_priv); vmw_3d_resource_dec(dev_priv, false); } out_no_fifo: vmw_fence_manager_takedown(dev_priv->fman); out_no_fman: if (dev_priv->stealth) pci_release_region(dev->pdev, 2); else pci_release_regions(dev->pdev); out_no_device: ttm_object_device_release(&dev_priv->tdev); out_err4: iounmap(dev_priv->mmio_virt); out_err3: drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, dev_priv->mmio_size, DRM_MTRR_WC); if (dev_priv->has_gmr) (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); out_err2: (void)ttm_bo_device_release(&dev_priv->bdev); out_err1: vmw_ttm_global_release(dev_priv); out_err0: idr_destroy(&dev_priv->surface_idr); idr_destroy(&dev_priv->context_idr); idr_destroy(&dev_priv->stream_idr); kfree(dev_priv); return ret; } static int vmw_driver_unload(struct drm_device *dev) { struct vmw_private *dev_priv = vmw_priv(dev); unregister_pm_notifier(&dev_priv->pm_nb); if (dev_priv->ctx.cmd_bounce) vfree(dev_priv->ctx.cmd_bounce); if (dev_priv->capabilities & SVGA_CAP_IRQMASK) drm_irq_uninstall(dev_priv->dev); if (dev_priv->enable_fb) { vmw_fb_close(dev_priv); vmw_kms_restore_vga(dev_priv); vmw_3d_resource_dec(dev_priv, false); } vmw_kms_close(dev_priv); vmw_overlay_close(dev_priv); vmw_fence_manager_takedown(dev_priv->fman); if (dev_priv->stealth) pci_release_region(dev->pdev, 2); else pci_release_regions(dev->pdev); ttm_object_device_release(&dev_priv->tdev); iounmap(dev_priv->mmio_virt); drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, dev_priv->mmio_size, DRM_MTRR_WC); if (dev_priv->has_gmr) (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); (void)ttm_bo_device_release(&dev_priv->bdev); vmw_ttm_global_release(dev_priv); idr_destroy(&dev_priv->surface_idr); idr_destroy(&dev_priv->context_idr); idr_destroy(&dev_priv->stream_idr); kfree(dev_priv); return 0; } static void vmw_preclose(struct drm_device *dev, struct drm_file *file_priv) { struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); struct vmw_private *dev_priv = vmw_priv(dev); vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events); } static void vmw_postclose(struct drm_device *dev, struct drm_file *file_priv) { struct vmw_fpriv *vmw_fp; vmw_fp = vmw_fpriv(file_priv); ttm_object_file_release(&vmw_fp->tfile); if (vmw_fp->locked_master) drm_master_put(&vmw_fp->locked_master); kfree(vmw_fp); } static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_fpriv *vmw_fp; int ret = -ENOMEM; vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); if (unlikely(vmw_fp == NULL)) return ret; INIT_LIST_HEAD(&vmw_fp->fence_events); vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); if (unlikely(vmw_fp->tfile == NULL)) goto out_no_tfile; file_priv->driver_priv = vmw_fp; if (unlikely(dev_priv->bdev.dev_mapping == NULL)) dev_priv->bdev.dev_mapping = file_priv->filp->f_path.dentry->d_inode->i_mapping; return 0; out_no_tfile: kfree(vmw_fp); return ret; } static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct drm_file *file_priv = filp->private_data; struct drm_device *dev = file_priv->minor->dev; unsigned int nr = DRM_IOCTL_NR(cmd); /* * Do extra checking on driver private ioctls. */ if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { struct drm_ioctl_desc *ioctl = &vmw_ioctls[nr - DRM_COMMAND_BASE]; if (unlikely(ioctl->cmd_drv != cmd)) { DRM_ERROR("Invalid command format, ioctl %d\n", nr - DRM_COMMAND_BASE); return -EINVAL; } } return drm_ioctl(filp, cmd, arg); } static int vmw_firstopen(struct drm_device *dev) { struct vmw_private *dev_priv = vmw_priv(dev); dev_priv->is_opened = true; return 0; } static void vmw_lastclose(struct drm_device *dev) { struct vmw_private *dev_priv = vmw_priv(dev); struct drm_crtc *crtc; struct drm_mode_set set; int ret; /** * Do nothing on the lastclose call from drm_unload. */ if (!dev_priv->is_opened) return; dev_priv->is_opened = false; set.x = 0; set.y = 0; set.fb = NULL; set.mode = NULL; set.connectors = NULL; set.num_connectors = 0; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { set.crtc = crtc; ret = crtc->funcs->set_config(&set); WARN_ON(ret != 0); } } static void vmw_master_init(struct vmw_master *vmaster) { ttm_lock_init(&vmaster->lock); INIT_LIST_HEAD(&vmaster->fb_surf); mutex_init(&vmaster->fb_surf_mutex); } static int vmw_master_create(struct drm_device *dev, struct drm_master *master) { struct vmw_master *vmaster; vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); if (unlikely(vmaster == NULL)) return -ENOMEM; vmw_master_init(vmaster); ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); master->driver_priv = vmaster; return 0; } static void vmw_master_destroy(struct drm_device *dev, struct drm_master *master) { struct vmw_master *vmaster = vmw_master(master); master->driver_priv = NULL; kfree(vmaster); } static int vmw_master_set(struct drm_device *dev, struct drm_file *file_priv, bool from_open) { struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); struct vmw_master *active = dev_priv->active_master; struct vmw_master *vmaster = vmw_master(file_priv->master); int ret = 0; if (!dev_priv->enable_fb) { ret = vmw_3d_resource_inc(dev_priv, true); if (unlikely(ret != 0)) return ret; vmw_kms_save_vga(dev_priv); mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_TRACES, 0); mutex_unlock(&dev_priv->hw_mutex); } if (active) { BUG_ON(active != &dev_priv->fbdev_master); ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); if (unlikely(ret != 0)) goto out_no_active_lock; ttm_lock_set_kill(&active->lock, true, SIGTERM); ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); if (unlikely(ret != 0)) { DRM_ERROR("Unable to clean VRAM on " "master drop.\n"); } dev_priv->active_master = NULL; } ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); if (!from_open) { ttm_vt_unlock(&vmaster->lock); BUG_ON(vmw_fp->locked_master != file_priv->master); drm_master_put(&vmw_fp->locked_master); } dev_priv->active_master = vmaster; return 0; out_no_active_lock: if (!dev_priv->enable_fb) { mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_TRACES, 1); mutex_unlock(&dev_priv->hw_mutex); vmw_kms_restore_vga(dev_priv); vmw_3d_resource_dec(dev_priv, true); } return ret; } static void vmw_master_drop(struct drm_device *dev, struct drm_file *file_priv, bool from_release) { struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); struct vmw_master *vmaster = vmw_master(file_priv->master); int ret; /** * Make sure the master doesn't disappear while we have * it locked. */ vmw_fp->locked_master = drm_master_get(file_priv->master); ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); vmw_execbuf_release_pinned_bo(dev_priv, false, 0); if (unlikely((ret != 0))) { DRM_ERROR("Unable to lock TTM at VT switch.\n"); drm_master_put(&vmw_fp->locked_master); } ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); if (!dev_priv->enable_fb) { ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); if (unlikely(ret != 0)) DRM_ERROR("Unable to clean VRAM on master drop.\n"); mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_TRACES, 1); mutex_unlock(&dev_priv->hw_mutex); vmw_kms_restore_vga(dev_priv); vmw_3d_resource_dec(dev_priv, true); } dev_priv->active_master = &dev_priv->fbdev_master; ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); ttm_vt_unlock(&dev_priv->fbdev_master.lock); if (dev_priv->enable_fb) vmw_fb_on(dev_priv); } static void vmw_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); drm_put_dev(dev); } static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, void *ptr) { struct vmw_private *dev_priv = container_of(nb, struct vmw_private, pm_nb); struct vmw_master *vmaster = dev_priv->active_master; switch (val) { case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: ttm_suspend_lock(&vmaster->lock); /** * This empties VRAM and unbinds all GMR bindings. * Buffer contents is moved to swappable memory. */ vmw_execbuf_release_pinned_bo(dev_priv, false, 0); ttm_bo_swapout_all(&dev_priv->bdev); break; case PM_POST_HIBERNATION: case PM_POST_SUSPEND: case PM_POST_RESTORE: ttm_suspend_unlock(&vmaster->lock); break; case PM_RESTORE_PREPARE: break; default: break; } return 0; } /** * These might not be needed with the virtual SVGA device. */ static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) { struct drm_device *dev = pci_get_drvdata(pdev); struct vmw_private *dev_priv = vmw_priv(dev); if (dev_priv->num_3d_resources != 0) { DRM_INFO("Can't suspend or hibernate " "while 3D resources are active.\n"); return -EBUSY; } pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot); return 0; } static int vmw_pci_resume(struct pci_dev *pdev) { pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); return pci_enable_device(pdev); } static int vmw_pm_suspend(struct device *kdev) { struct pci_dev *pdev = to_pci_dev(kdev); struct pm_message dummy; dummy.event = 0; return vmw_pci_suspend(pdev, dummy); } static int vmw_pm_resume(struct device *kdev) { struct pci_dev *pdev = to_pci_dev(kdev); return vmw_pci_resume(pdev); } static int vmw_pm_prepare(struct device *kdev) { struct pci_dev *pdev = to_pci_dev(kdev); struct drm_device *dev = pci_get_drvdata(pdev); struct vmw_private *dev_priv = vmw_priv(dev); /** * Release 3d reference held by fbdev and potentially * stop fifo. */ dev_priv->suspended = true; if (dev_priv->enable_fb) vmw_3d_resource_dec(dev_priv, true); if (dev_priv->num_3d_resources != 0) { DRM_INFO("Can't suspend or hibernate " "while 3D resources are active.\n"); if (dev_priv->enable_fb) vmw_3d_resource_inc(dev_priv, true); dev_priv->suspended = false; return -EBUSY; } return 0; } static void vmw_pm_complete(struct device *kdev) { struct pci_dev *pdev = to_pci_dev(kdev); struct drm_device *dev = pci_get_drvdata(pdev); struct vmw_private *dev_priv = vmw_priv(dev); /** * Reclaim 3d reference held by fbdev and potentially * start fifo. */ if (dev_priv->enable_fb) vmw_3d_resource_inc(dev_priv, false); dev_priv->suspended = false; } static const struct dev_pm_ops vmw_pm_ops = { .prepare = vmw_pm_prepare, .complete = vmw_pm_complete, .suspend = vmw_pm_suspend, .resume = vmw_pm_resume, }; static const struct file_operations vmwgfx_driver_fops = { .owner = THIS_MODULE, .open = drm_open, .release = drm_release, .unlocked_ioctl = vmw_unlocked_ioctl, .mmap = vmw_mmap, .poll = vmw_fops_poll, .read = vmw_fops_read, .fasync = drm_fasync, #if defined(CONFIG_COMPAT) .compat_ioctl = drm_compat_ioctl, #endif .llseek = noop_llseek, }; static struct drm_driver driver = { .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_MODESET, .load = vmw_driver_load, .unload = vmw_driver_unload, .firstopen = vmw_firstopen, .lastclose = vmw_lastclose, .irq_preinstall = vmw_irq_preinstall, .irq_postinstall = vmw_irq_postinstall, .irq_uninstall = vmw_irq_uninstall, .irq_handler = vmw_irq_handler, .get_vblank_counter = vmw_get_vblank_counter, .enable_vblank = vmw_enable_vblank, .disable_vblank = vmw_disable_vblank, .reclaim_buffers_locked = NULL, .ioctls = vmw_ioctls, .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), .dma_quiescent = NULL, /*vmw_dma_quiescent, */ .master_create = vmw_master_create, .master_destroy = vmw_master_destroy, .master_set = vmw_master_set, .master_drop = vmw_master_drop, .open = vmw_driver_open, .preclose = vmw_preclose, .postclose = vmw_postclose, .fops = &vmwgfx_driver_fops, .name = VMWGFX_DRIVER_NAME, .desc = VMWGFX_DRIVER_DESC, .date = VMWGFX_DRIVER_DATE, .major = VMWGFX_DRIVER_MAJOR, .minor = VMWGFX_DRIVER_MINOR, .patchlevel = VMWGFX_DRIVER_PATCHLEVEL }; static struct pci_driver vmw_pci_driver = { .name = VMWGFX_DRIVER_NAME, .id_table = vmw_pci_id_list, .probe = vmw_probe, .remove = vmw_remove, .driver = { .pm = &vmw_pm_ops } }; static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { return drm_get_pci_dev(pdev, ent, &driver); } static int __init vmwgfx_init(void) { int ret; ret = drm_pci_init(&driver, &vmw_pci_driver); if (ret) DRM_ERROR("Failed initializing DRM.\n"); return ret; } static void __exit vmwgfx_exit(void) { drm_pci_exit(&driver, &vmw_pci_driver); } module_init(vmwgfx_init); module_exit(vmwgfx_exit); MODULE_AUTHOR("VMware Inc. and others"); MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); MODULE_LICENSE("GPL and additional rights"); MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "." __stringify(VMWGFX_DRIVER_MINOR) "." __stringify(VMWGFX_DRIVER_PATCHLEVEL) "." "0");
gpl-2.0
MicronSW/linux-zynq
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
245
60014
/* * QLogic qlcnic NIC Driver * Copyright (c) 2009-2013 QLogic Corporation * * See LICENSE.qlcnic for copyright and licensing details. */ #include "qlcnic_sriov.h" #include "qlcnic.h" #include "qlcnic_hw.h" /* Reset template definitions */ #define QLC_83XX_RESTART_TEMPLATE_SIZE 0x2000 #define QLC_83XX_RESET_TEMPLATE_ADDR 0x4F0000 #define QLC_83XX_RESET_SEQ_VERSION 0x0101 #define QLC_83XX_OPCODE_NOP 0x0000 #define QLC_83XX_OPCODE_WRITE_LIST 0x0001 #define QLC_83XX_OPCODE_READ_WRITE_LIST 0x0002 #define QLC_83XX_OPCODE_POLL_LIST 0x0004 #define QLC_83XX_OPCODE_POLL_WRITE_LIST 0x0008 #define QLC_83XX_OPCODE_READ_MODIFY_WRITE 0x0010 #define QLC_83XX_OPCODE_SEQ_PAUSE 0x0020 #define QLC_83XX_OPCODE_SEQ_END 0x0040 #define QLC_83XX_OPCODE_TMPL_END 0x0080 #define QLC_83XX_OPCODE_POLL_READ_LIST 0x0100 /* EPORT control registers */ #define QLC_83XX_RESET_CONTROL 0x28084E50 #define QLC_83XX_RESET_REG 0x28084E60 #define QLC_83XX_RESET_PORT0 0x28084E70 #define QLC_83XX_RESET_PORT1 0x28084E80 #define QLC_83XX_RESET_PORT2 0x28084E90 #define QLC_83XX_RESET_PORT3 0x28084EA0 #define QLC_83XX_RESET_SRESHIM 0x28084EB0 #define QLC_83XX_RESET_EPGSHIM 0x28084EC0 #define QLC_83XX_RESET_ETHERPCS 0x28084ED0 static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter); static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev); static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter); static int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev); static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *); static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *); /* Template header */ struct qlc_83xx_reset_hdr { #if defined(__LITTLE_ENDIAN) u16 version; u16 signature; u16 size; u16 entries; u16 hdr_size; u16 checksum; u16 init_offset; u16 start_offset; #elif defined(__BIG_ENDIAN) u16 signature; u16 version; u16 entries; u16 size; u16 checksum; u16 hdr_size; u16 start_offset; u16 init_offset; #endif } __packed; /* Command entry header. */ struct qlc_83xx_entry_hdr { #if defined(__LITTLE_ENDIAN) u16 cmd; u16 size; u16 count; u16 delay; #elif defined(__BIG_ENDIAN) u16 size; u16 cmd; u16 delay; u16 count; #endif } __packed; /* Generic poll command */ struct qlc_83xx_poll { u32 mask; u32 status; } __packed; /* Read modify write command */ struct qlc_83xx_rmw { u32 mask; u32 xor_value; u32 or_value; #if defined(__LITTLE_ENDIAN) u8 shl; u8 shr; u8 index_a; u8 rsvd; #elif defined(__BIG_ENDIAN) u8 rsvd; u8 index_a; u8 shr; u8 shl; #endif } __packed; /* Generic command with 2 DWORD */ struct qlc_83xx_entry { u32 arg1; u32 arg2; } __packed; /* Generic command with 4 DWORD */ struct qlc_83xx_quad_entry { u32 dr_addr; u32 dr_value; u32 ar_addr; u32 ar_value; } __packed; static const char *const qlc_83xx_idc_states[] = { "Unknown", "Cold", "Init", "Ready", "Need Reset", "Need Quiesce", "Failed", "Quiesce" }; static int qlcnic_83xx_idc_check_driver_presence_reg(struct qlcnic_adapter *adapter) { u32 val; val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE); if ((val & 0xFFFF)) return 1; else return 0; } static void qlcnic_83xx_idc_log_state_history(struct qlcnic_adapter *adapter) { u32 cur, prev; cur = adapter->ahw->idc.curr_state; prev = adapter->ahw->idc.prev_state; dev_info(&adapter->pdev->dev, "current state = %s, prev state = %s\n", adapter->ahw->idc.name[cur], adapter->ahw->idc.name[prev]); } static int qlcnic_83xx_idc_update_audit_reg(struct qlcnic_adapter *adapter, u8 mode, int lock) { u32 val; int seconds; if (lock) { if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; } val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT); val |= (adapter->portnum & 0xf); val |= mode << 7; if (mode) seconds = jiffies / HZ - adapter->ahw->idc.sec_counter; else seconds = jiffies / HZ; val |= seconds << 8; QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT, val); adapter->ahw->idc.sec_counter = jiffies / HZ; if (lock) qlcnic_83xx_unlock_driver(adapter); return 0; } static void qlcnic_83xx_idc_update_minor_version(struct qlcnic_adapter *adapter) { u32 val; val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MIN_VERSION); val = val & ~(0x3 << (adapter->portnum * 2)); val = val | (QLC_83XX_IDC_MINOR_VERSION << (adapter->portnum * 2)); QLCWRX(adapter->ahw, QLC_83XX_IDC_MIN_VERSION, val); } static int qlcnic_83xx_idc_update_major_version(struct qlcnic_adapter *adapter, int lock) { u32 val; if (lock) { if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; } val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION); val = val & ~0xFF; val = val | QLC_83XX_IDC_MAJOR_VERSION; QLCWRX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION, val); if (lock) qlcnic_83xx_unlock_driver(adapter); return 0; } static int qlcnic_83xx_idc_update_drv_presence_reg(struct qlcnic_adapter *adapter, int status, int lock) { u32 val; if (lock) { if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; } val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE); if (status) val = val | (1 << adapter->portnum); else val = val & ~(1 << adapter->portnum); QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val); qlcnic_83xx_idc_update_minor_version(adapter); if (lock) qlcnic_83xx_unlock_driver(adapter); return 0; } static int qlcnic_83xx_idc_check_major_version(struct qlcnic_adapter *adapter) { u32 val; u8 version; val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION); version = val & 0xFF; if (version != QLC_83XX_IDC_MAJOR_VERSION) { dev_info(&adapter->pdev->dev, "%s:mismatch. version 0x%x, expected version 0x%x\n", __func__, version, QLC_83XX_IDC_MAJOR_VERSION); return -EIO; } return 0; } static int qlcnic_83xx_idc_clear_registers(struct qlcnic_adapter *adapter, int lock) { u32 val; if (lock) { if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; } QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, 0); /* Clear gracefull reset bit */ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); val &= ~QLC_83XX_IDC_GRACEFULL_RESET; QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val); if (lock) qlcnic_83xx_unlock_driver(adapter); return 0; } static int qlcnic_83xx_idc_update_drv_ack_reg(struct qlcnic_adapter *adapter, int flag, int lock) { u32 val; if (lock) { if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; } val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_ACK); if (flag) val = val | (1 << adapter->portnum); else val = val & ~(1 << adapter->portnum); QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, val); if (lock) qlcnic_83xx_unlock_driver(adapter); return 0; } static int qlcnic_83xx_idc_check_timeout(struct qlcnic_adapter *adapter, int time_limit) { u64 seconds; seconds = jiffies / HZ - adapter->ahw->idc.sec_counter; if (seconds <= time_limit) return 0; else return -EBUSY; } /** * qlcnic_83xx_idc_check_reset_ack_reg * * @adapter: adapter structure * * Check ACK wait limit and clear the functions which failed to ACK * * Return 0 if all functions have acknowledged the reset request. **/ static int qlcnic_83xx_idc_check_reset_ack_reg(struct qlcnic_adapter *adapter) { int timeout; u32 ack, presence, val; timeout = QLC_83XX_IDC_RESET_TIMEOUT_SECS; ack = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_ACK); presence = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE); dev_info(&adapter->pdev->dev, "%s: ack = 0x%x, presence = 0x%x\n", __func__, ack, presence); if (!((ack & presence) == presence)) { if (qlcnic_83xx_idc_check_timeout(adapter, timeout)) { /* Clear functions which failed to ACK */ dev_info(&adapter->pdev->dev, "%s: ACK wait exceeds time limit\n", __func__); val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE); val = val & ~(ack ^ presence); if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val); dev_info(&adapter->pdev->dev, "%s: updated drv presence reg = 0x%x\n", __func__, val); qlcnic_83xx_unlock_driver(adapter); return 0; } else { return 1; } } else { dev_info(&adapter->pdev->dev, "%s: Reset ACK received from all functions\n", __func__); return 0; } } /** * qlcnic_83xx_idc_tx_soft_reset * * @adapter: adapter structure * * Handle context deletion and recreation request from transmit routine * * Returns -EBUSY or Success (0) * **/ static int qlcnic_83xx_idc_tx_soft_reset(struct qlcnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) return -EBUSY; netif_device_detach(netdev); qlcnic_down(adapter, netdev); qlcnic_up(adapter, netdev); netif_device_attach(netdev); clear_bit(__QLCNIC_RESETTING, &adapter->state); netdev_info(adapter->netdev, "%s: soft reset complete.\n", __func__); return 0; } /** * qlcnic_83xx_idc_detach_driver * * @adapter: adapter structure * Detach net interface, stop TX and cleanup resources before the HW reset. * Returns: None * **/ static void qlcnic_83xx_idc_detach_driver(struct qlcnic_adapter *adapter) { int i; struct net_device *netdev = adapter->netdev; netif_device_detach(netdev); qlcnic_83xx_detach_mailbox_work(adapter); /* Disable mailbox interrupt */ qlcnic_83xx_disable_mbx_intr(adapter); qlcnic_down(adapter, netdev); for (i = 0; i < adapter->ahw->num_msix; i++) { adapter->ahw->intr_tbl[i].id = i; adapter->ahw->intr_tbl[i].enabled = 0; adapter->ahw->intr_tbl[i].src = 0; } if (qlcnic_sriov_pf_check(adapter)) qlcnic_sriov_pf_reset(adapter); } /** * qlcnic_83xx_idc_attach_driver * * @adapter: adapter structure * * Re-attach and re-enable net interface * Returns: None * **/ static void qlcnic_83xx_idc_attach_driver(struct qlcnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; if (netif_running(netdev)) { if (qlcnic_up(adapter, netdev)) goto done; qlcnic_restore_indev_addr(netdev, NETDEV_UP); } done: netif_device_attach(netdev); } static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter, int lock) { if (lock) { if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; } qlcnic_83xx_idc_clear_registers(adapter, 0); QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_FAILED); if (lock) qlcnic_83xx_unlock_driver(adapter); qlcnic_83xx_idc_log_state_history(adapter); dev_info(&adapter->pdev->dev, "Device will enter failed state\n"); return 0; } static int qlcnic_83xx_idc_enter_init_state(struct qlcnic_adapter *adapter, int lock) { if (lock) { if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; } QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_INIT); if (lock) qlcnic_83xx_unlock_driver(adapter); return 0; } static int qlcnic_83xx_idc_enter_need_quiesce(struct qlcnic_adapter *adapter, int lock) { if (lock) { if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; } QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_NEED_QUISCENT); if (lock) qlcnic_83xx_unlock_driver(adapter); return 0; } static int qlcnic_83xx_idc_enter_need_reset_state(struct qlcnic_adapter *adapter, int lock) { if (lock) { if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; } QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_NEED_RESET); if (lock) qlcnic_83xx_unlock_driver(adapter); return 0; } static int qlcnic_83xx_idc_enter_ready_state(struct qlcnic_adapter *adapter, int lock) { if (lock) { if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; } QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_READY); if (lock) qlcnic_83xx_unlock_driver(adapter); return 0; } /** * qlcnic_83xx_idc_find_reset_owner_id * * @adapter: adapter structure * * NIC gets precedence over ISCSI and ISCSI has precedence over FCOE. * Within the same class, function with lowest PCI ID assumes ownership * * Returns: reset owner id or failure indication (-EIO) * **/ static int qlcnic_83xx_idc_find_reset_owner_id(struct qlcnic_adapter *adapter) { u32 reg, reg1, reg2, i, j, owner, class; reg1 = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_PARTITION_INFO_1); reg2 = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_PARTITION_INFO_2); owner = QLCNIC_TYPE_NIC; i = 0; j = 0; reg = reg1; do { class = (((reg & (0xF << j * 4)) >> j * 4) & 0x3); if (class == owner) break; if (i == (QLC_83XX_IDC_MAX_FUNC_PER_PARTITION_INFO - 1)) { reg = reg2; j = 0; } else { j++; } if (i == (QLC_83XX_IDC_MAX_CNA_FUNCTIONS - 1)) { if (owner == QLCNIC_TYPE_NIC) owner = QLCNIC_TYPE_ISCSI; else if (owner == QLCNIC_TYPE_ISCSI) owner = QLCNIC_TYPE_FCOE; else if (owner == QLCNIC_TYPE_FCOE) return -EIO; reg = reg1; j = 0; i = 0; } } while (i++ < QLC_83XX_IDC_MAX_CNA_FUNCTIONS); return i; } static int qlcnic_83xx_idc_restart_hw(struct qlcnic_adapter *adapter, int lock) { int ret = 0; ret = qlcnic_83xx_restart_hw(adapter); if (ret) { qlcnic_83xx_idc_enter_failed_state(adapter, lock); } else { qlcnic_83xx_idc_clear_registers(adapter, lock); ret = qlcnic_83xx_idc_enter_ready_state(adapter, lock); } return ret; } static int qlcnic_83xx_idc_check_fan_failure(struct qlcnic_adapter *adapter) { u32 status; status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1); if (status & QLCNIC_RCODE_FATAL_ERROR) { dev_err(&adapter->pdev->dev, "peg halt status1=0x%x\n", status); if (QLCNIC_FWERROR_CODE(status) == QLCNIC_FWERROR_FAN_FAILURE) { dev_err(&adapter->pdev->dev, "On board active cooling fan failed. " "Device has been halted.\n"); dev_err(&adapter->pdev->dev, "Replace the adapter.\n"); return -EIO; } } return 0; } int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) { int err; qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox); qlcnic_83xx_enable_mbx_interrupt(adapter); qlcnic_83xx_initialize_nic(adapter, 1); err = qlcnic_sriov_pf_reinit(adapter); if (err) return err; qlcnic_83xx_enable_mbx_interrupt(adapter); if (qlcnic_83xx_configure_opmode(adapter)) { qlcnic_83xx_idc_enter_failed_state(adapter, 1); return -EIO; } if (adapter->nic_ops->init_driver(adapter)) { qlcnic_83xx_idc_enter_failed_state(adapter, 1); return -EIO; } if (adapter->portnum == 0) qlcnic_set_drv_version(adapter); qlcnic_dcb_get_info(adapter->dcb); qlcnic_83xx_idc_attach_driver(adapter); return 0; } static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1); qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); ahw->idc.quiesce_req = 0; ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; ahw->idc.err_code = 0; ahw->idc.collect_dump = 0; ahw->reset_context = 0; adapter->tx_timeo_cnt = 0; ahw->idc.delay_reset = 0; clear_bit(__QLCNIC_RESETTING, &adapter->state); } /** * qlcnic_83xx_idc_ready_state_entry * * @adapter: adapter structure * * Perform ready state initialization, this routine will get invoked only * once from READY state. * * Returns: Error code or Success(0) * **/ int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY) { qlcnic_83xx_idc_update_idc_params(adapter); /* Re-attach the device if required */ if ((ahw->idc.prev_state == QLC_83XX_IDC_DEV_NEED_RESET) || (ahw->idc.prev_state == QLC_83XX_IDC_DEV_INIT)) { if (qlcnic_83xx_idc_reattach_driver(adapter)) return -EIO; } } return 0; } /** * qlcnic_83xx_idc_vnic_pf_entry * * @adapter: adapter structure * * Ensure vNIC mode privileged function starts only after vNIC mode is * enabled by management function. * If vNIC mode is ready, start initialization. * * Returns: -EIO or 0 * **/ int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *adapter) { u32 state; struct qlcnic_hardware_context *ahw = adapter->ahw; /* Privileged function waits till mgmt function enables VNIC mode */ state = QLCRDX(adapter->ahw, QLC_83XX_VNIC_STATE); if (state != QLCNIC_DEV_NPAR_OPER) { if (!ahw->idc.vnic_wait_limit--) { qlcnic_83xx_idc_enter_failed_state(adapter, 1); return -EIO; } dev_info(&adapter->pdev->dev, "vNIC mode disabled\n"); return -EIO; } else { /* Perform one time initialization from ready state */ if (ahw->idc.vnic_state != QLCNIC_DEV_NPAR_OPER) { qlcnic_83xx_idc_update_idc_params(adapter); /* If the previous state is UNKNOWN, device will be already attached properly by Init routine*/ if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_UNKNOWN) { if (qlcnic_83xx_idc_reattach_driver(adapter)) return -EIO; } adapter->ahw->idc.vnic_state = QLCNIC_DEV_NPAR_OPER; dev_info(&adapter->pdev->dev, "vNIC mode enabled\n"); } } return 0; } static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter) { adapter->ahw->idc.err_code = -EIO; dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__); clear_bit(__QLCNIC_RESETTING, &adapter->state); return 0; } /** * qlcnic_83xx_idc_cold_state * * @adapter: adapter structure * * If HW is up and running device will enter READY state. * If firmware image from host needs to be loaded, device is * forced to start with the file firmware image. * * Returns: Error code or Success(0) * **/ static int qlcnic_83xx_idc_cold_state_handler(struct qlcnic_adapter *adapter) { qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 0); qlcnic_83xx_idc_update_audit_reg(adapter, 1, 0); if (qlcnic_load_fw_file) { qlcnic_83xx_idc_restart_hw(adapter, 0); } else { if (qlcnic_83xx_check_hw_status(adapter)) { qlcnic_83xx_idc_enter_failed_state(adapter, 0); return -EIO; } else { qlcnic_83xx_idc_enter_ready_state(adapter, 0); } } return 0; } /** * qlcnic_83xx_idc_init_state * * @adapter: adapter structure * * Reset owner will restart the device from this state. * Device will enter failed state if it remains * in this state for more than DEV_INIT time limit. * * Returns: Error code or Success(0) * **/ static int qlcnic_83xx_idc_init_state(struct qlcnic_adapter *adapter) { int timeout, ret = 0; u32 owner; timeout = QLC_83XX_IDC_INIT_TIMEOUT_SECS; if (adapter->ahw->idc.prev_state == QLC_83XX_IDC_DEV_NEED_RESET) { owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); if (adapter->ahw->pci_func == owner) ret = qlcnic_83xx_idc_restart_hw(adapter, 1); } else { ret = qlcnic_83xx_idc_check_timeout(adapter, timeout); } return ret; } /** * qlcnic_83xx_idc_ready_state * * @adapter: adapter structure * * Perform IDC protocol specicifed actions after monitoring device state and * events. * * Returns: Error code or Success(0) * **/ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_mailbox *mbx = ahw->mailbox; int ret = 0; u32 val; /* Perform NIC configuration based ready state entry actions */ if (ahw->idc.state_entry(adapter)) return -EIO; if (qlcnic_check_temp(adapter)) { if (ahw->temp == QLCNIC_TEMP_PANIC) { qlcnic_83xx_idc_check_fan_failure(adapter); dev_err(&adapter->pdev->dev, "Error: device temperature %d above limits\n", adapter->ahw->temp); clear_bit(QLC_83XX_MBX_READY, &mbx->status); set_bit(__QLCNIC_RESETTING, &adapter->state); qlcnic_83xx_idc_detach_driver(adapter); qlcnic_83xx_idc_enter_failed_state(adapter, 1); return -EIO; } } val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); ret = qlcnic_83xx_check_heartbeat(adapter); if (ret) { adapter->flags |= QLCNIC_FW_HANG; if (!(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) { clear_bit(QLC_83XX_MBX_READY, &mbx->status); set_bit(__QLCNIC_RESETTING, &adapter->state); qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); } else { netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n", __func__); qlcnic_83xx_idc_enter_failed_state(adapter, 1); } return -EIO; } if ((val & QLC_83XX_IDC_GRACEFULL_RESET) || ahw->idc.collect_dump) { clear_bit(QLC_83XX_MBX_READY, &mbx->status); /* Move to need reset state and prepare for reset */ qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); return ret; } /* Check for soft reset request */ if (ahw->reset_context && !(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) { adapter->ahw->reset_context = 0; qlcnic_83xx_idc_tx_soft_reset(adapter); return ret; } /* Move to need quiesce state if requested */ if (adapter->ahw->idc.quiesce_req) { qlcnic_83xx_idc_enter_need_quiesce(adapter, 1); qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); return ret; } return ret; } /** * qlcnic_83xx_idc_need_reset_state * * @adapter: adapter structure * * Device will remain in this state until: * Reset request ACK's are recieved from all the functions * Wait time exceeds max time limit * * Returns: Error code or Success(0) * **/ static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter) { struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; int ret = 0; if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) { qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); set_bit(__QLCNIC_RESETTING, &adapter->state); clear_bit(QLC_83XX_MBX_READY, &mbx->status); if (adapter->ahw->nic_mode == QLCNIC_VNIC_MODE) qlcnic_83xx_disable_vnic_mode(adapter, 1); if (qlcnic_check_diag_status(adapter)) { dev_info(&adapter->pdev->dev, "%s: Wait for diag completion\n", __func__); adapter->ahw->idc.delay_reset = 1; return 0; } else { qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1); qlcnic_83xx_idc_detach_driver(adapter); } } if (qlcnic_check_diag_status(adapter)) { dev_info(&adapter->pdev->dev, "%s: Wait for diag completion\n", __func__); return -1; } else { if (adapter->ahw->idc.delay_reset) { qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1); qlcnic_83xx_idc_detach_driver(adapter); adapter->ahw->idc.delay_reset = 0; } /* Check for ACK from other functions */ ret = qlcnic_83xx_idc_check_reset_ack_reg(adapter); if (ret) { dev_info(&adapter->pdev->dev, "%s: Waiting for reset ACK\n", __func__); return -1; } } /* Transit to INIT state and restart the HW */ qlcnic_83xx_idc_enter_init_state(adapter, 1); return ret; } static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter) { dev_err(&adapter->pdev->dev, "%s: TBD\n", __func__); return 0; } static void qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; u32 val, owner; val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) { owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); if (ahw->pci_func == owner) { qlcnic_83xx_stop_hw(adapter); qlcnic_dump_fw(adapter); } } netdev_warn(adapter->netdev, "%s: Reboot will be required to recover the adapter!!\n", __func__); clear_bit(__QLCNIC_RESETTING, &adapter->state); ahw->idc.err_code = -EIO; return; } static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter) { dev_info(&adapter->pdev->dev, "%s: TBD\n", __func__); return 0; } static int qlcnic_83xx_idc_check_state_validity(struct qlcnic_adapter *adapter, u32 state) { u32 cur, prev, next; cur = adapter->ahw->idc.curr_state; prev = adapter->ahw->idc.prev_state; next = state; if ((next < QLC_83XX_IDC_DEV_COLD) || (next > QLC_83XX_IDC_DEV_QUISCENT)) { dev_err(&adapter->pdev->dev, "%s: curr %d, prev %d, next state %d is invalid\n", __func__, cur, prev, state); return 1; } if ((cur == QLC_83XX_IDC_DEV_UNKNOWN) && (prev == QLC_83XX_IDC_DEV_UNKNOWN)) { if ((next != QLC_83XX_IDC_DEV_COLD) && (next != QLC_83XX_IDC_DEV_READY)) { dev_err(&adapter->pdev->dev, "%s: failed, cur %d prev %d next %d\n", __func__, cur, prev, next); return 1; } } if (next == QLC_83XX_IDC_DEV_INIT) { if ((prev != QLC_83XX_IDC_DEV_INIT) && (prev != QLC_83XX_IDC_DEV_COLD) && (prev != QLC_83XX_IDC_DEV_NEED_RESET)) { dev_err(&adapter->pdev->dev, "%s: failed, cur %d prev %d next %d\n", __func__, cur, prev, next); return 1; } } return 0; } static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter) { if (adapter->fhash.fnum) qlcnic_prune_lb_filters(adapter); } /** * qlcnic_83xx_idc_poll_dev_state * * @work: kernel work queue structure used to schedule the function * * Poll device state periodically and perform state specific * actions defined by Inter Driver Communication (IDC) protocol. * * Returns: None * **/ void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work) { struct qlcnic_adapter *adapter; u32 state; adapter = container_of(work, struct qlcnic_adapter, fw_work.work); state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); if (qlcnic_83xx_idc_check_state_validity(adapter, state)) { qlcnic_83xx_idc_log_state_history(adapter); adapter->ahw->idc.curr_state = QLC_83XX_IDC_DEV_UNKNOWN; } else { adapter->ahw->idc.curr_state = state; } switch (adapter->ahw->idc.curr_state) { case QLC_83XX_IDC_DEV_READY: qlcnic_83xx_idc_ready_state(adapter); break; case QLC_83XX_IDC_DEV_NEED_RESET: qlcnic_83xx_idc_need_reset_state(adapter); break; case QLC_83XX_IDC_DEV_NEED_QUISCENT: qlcnic_83xx_idc_need_quiesce_state(adapter); break; case QLC_83XX_IDC_DEV_FAILED: qlcnic_83xx_idc_failed_state(adapter); return; case QLC_83XX_IDC_DEV_INIT: qlcnic_83xx_idc_init_state(adapter); break; case QLC_83XX_IDC_DEV_QUISCENT: qlcnic_83xx_idc_quiesce_state(adapter); break; default: qlcnic_83xx_idc_unknown_state(adapter); return; } adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state; qlcnic_83xx_periodic_tasks(adapter); /* Re-schedule the function */ if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status)) qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, adapter->ahw->idc.delay); } static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter) { u32 idc_params, val; if (qlcnic_83xx_lockless_flash_read32(adapter, QLC_83XX_IDC_FLASH_PARAM_ADDR, (u8 *)&idc_params, 1)) { dev_info(&adapter->pdev->dev, "%s:failed to get IDC params from flash\n", __func__); adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS; adapter->reset_ack_timeo = QLC_83XX_IDC_RESET_TIMEOUT_SECS; } else { adapter->dev_init_timeo = idc_params & 0xFFFF; adapter->reset_ack_timeo = ((idc_params >> 16) & 0xFFFF); } adapter->ahw->idc.curr_state = QLC_83XX_IDC_DEV_UNKNOWN; adapter->ahw->idc.prev_state = QLC_83XX_IDC_DEV_UNKNOWN; adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; adapter->ahw->idc.err_code = 0; adapter->ahw->idc.collect_dump = 0; adapter->ahw->idc.name = (char **)qlc_83xx_idc_states; clear_bit(__QLCNIC_RESETTING, &adapter->state); set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); /* Check if reset recovery is disabled */ if (!qlcnic_auto_fw_reset) { /* Propagate do not reset request to other functions */ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); val = val | QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY; QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val); } } static int qlcnic_83xx_idc_first_to_load_function_handler(struct qlcnic_adapter *adapter) { u32 state, val; if (qlcnic_83xx_lock_driver(adapter)) return -EIO; /* Clear driver lock register */ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, 0); if (qlcnic_83xx_idc_update_major_version(adapter, 0)) { qlcnic_83xx_unlock_driver(adapter); return -EIO; } state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); if (qlcnic_83xx_idc_check_state_validity(adapter, state)) { qlcnic_83xx_unlock_driver(adapter); return -EIO; } if (state != QLC_83XX_IDC_DEV_COLD && qlcnic_load_fw_file) { QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_COLD); state = QLC_83XX_IDC_DEV_COLD; } adapter->ahw->idc.curr_state = state; /* First to load function should cold boot the device */ if (state == QLC_83XX_IDC_DEV_COLD) qlcnic_83xx_idc_cold_state_handler(adapter); /* Check if reset recovery is enabled */ if (qlcnic_auto_fw_reset) { val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); val = val & ~QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY; QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val); } qlcnic_83xx_unlock_driver(adapter); return 0; } int qlcnic_83xx_idc_init(struct qlcnic_adapter *adapter) { int ret = -EIO; qlcnic_83xx_setup_idc_parameters(adapter); if (qlcnic_83xx_get_reset_instruction_template(adapter)) return ret; if (!qlcnic_83xx_idc_check_driver_presence_reg(adapter)) { if (qlcnic_83xx_idc_first_to_load_function_handler(adapter)) return -EIO; } else { if (qlcnic_83xx_idc_check_major_version(adapter)) return -EIO; } qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); return 0; } void qlcnic_83xx_idc_exit(struct qlcnic_adapter *adapter) { int id; u32 val; while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) usleep_range(10000, 11000); id = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID); id = id & 0xFF; if (id == adapter->portnum) { dev_err(&adapter->pdev->dev, "%s: wait for lock recovery.. %d\n", __func__, id); msleep(20); id = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID); id = id & 0xFF; } /* Clear driver presence bit */ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE); val = val & ~(1 << adapter->portnum); QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val); clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); clear_bit(__QLCNIC_RESETTING, &adapter->state); cancel_delayed_work_sync(&adapter->fw_work); } void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key) { u32 val; if (qlcnic_sriov_vf_check(adapter)) return; if (qlcnic_83xx_lock_driver(adapter)) { dev_err(&adapter->pdev->dev, "%s:failed, please retry\n", __func__); return; } val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) { netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n", __func__); qlcnic_83xx_idc_enter_failed_state(adapter, 0); qlcnic_83xx_unlock_driver(adapter); return; } if (key == QLCNIC_FORCE_FW_RESET) { val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); val = val | QLC_83XX_IDC_GRACEFULL_RESET; QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val); } else if (key == QLCNIC_FORCE_FW_DUMP_KEY) { adapter->ahw->idc.collect_dump = 1; } qlcnic_83xx_unlock_driver(adapter); return; } static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter) { u8 *p_cache; u32 src, size; u64 dest; int ret = -EIO; src = QLC_83XX_BOOTLOADER_FLASH_ADDR; dest = QLCRDX(adapter->ahw, QLCNIC_BOOTLOADER_ADDR); size = QLCRDX(adapter->ahw, QLCNIC_BOOTLOADER_SIZE); /* alignment check */ if (size & 0xF) size = (size + 16) & ~0xF; p_cache = vzalloc(size); if (p_cache == NULL) return -ENOMEM; ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache, size / sizeof(u32)); if (ret) { vfree(p_cache); return ret; } /* 16 byte write to MS memory */ ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache, size / 16); if (ret) { vfree(p_cache); return ret; } vfree(p_cache); return ret; } static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter) { struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info; const struct firmware *fw = fw_info->fw; u32 dest, *p_cache; int i, ret = -EIO; u8 data[16]; size_t size; u64 addr; dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR); size = (fw->size & ~0xF); p_cache = (u32 *)fw->data; addr = (u64)dest; ret = qlcnic_83xx_ms_mem_write128(adapter, addr, (u32 *)p_cache, size / 16); if (ret) { dev_err(&adapter->pdev->dev, "MS memory write failed\n"); release_firmware(fw); fw_info->fw = NULL; return -EIO; } /* alignment check */ if (fw->size & 0xF) { addr = dest + size; for (i = 0; i < (fw->size & 0xF); i++) data[i] = fw->data[size + i]; for (; i < 16; i++) data[i] = 0; ret = qlcnic_83xx_ms_mem_write128(adapter, addr, (u32 *)data, 1); if (ret) { dev_err(&adapter->pdev->dev, "MS memory write failed\n"); release_firmware(fw); fw_info->fw = NULL; return -EIO; } } release_firmware(fw); fw_info->fw = NULL; return 0; } static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter) { int i, j; u32 val = 0, val1 = 0, reg = 0; int err = 0; val = QLCRD32(adapter, QLC_83XX_SRE_SHIM_REG, &err); if (err == -EIO) return; dev_info(&adapter->pdev->dev, "SRE-Shim Ctrl:0x%x\n", val); for (j = 0; j < 2; j++) { if (j == 0) { dev_info(&adapter->pdev->dev, "Port 0 RxB Pause Threshold Regs[TC7..TC0]:"); reg = QLC_83XX_PORT0_THRESHOLD; } else if (j == 1) { dev_info(&adapter->pdev->dev, "Port 1 RxB Pause Threshold Regs[TC7..TC0]:"); reg = QLC_83XX_PORT1_THRESHOLD; } for (i = 0; i < 8; i++) { val = QLCRD32(adapter, reg + (i * 0x4), &err); if (err == -EIO) return; dev_info(&adapter->pdev->dev, "0x%x ", val); } dev_info(&adapter->pdev->dev, "\n"); } for (j = 0; j < 2; j++) { if (j == 0) { dev_info(&adapter->pdev->dev, "Port 0 RxB TC Max Cell Registers[4..1]:"); reg = QLC_83XX_PORT0_TC_MC_REG; } else if (j == 1) { dev_info(&adapter->pdev->dev, "Port 1 RxB TC Max Cell Registers[4..1]:"); reg = QLC_83XX_PORT1_TC_MC_REG; } for (i = 0; i < 4; i++) { val = QLCRD32(adapter, reg + (i * 0x4), &err); if (err == -EIO) return; dev_info(&adapter->pdev->dev, "0x%x ", val); } dev_info(&adapter->pdev->dev, "\n"); } for (j = 0; j < 2; j++) { if (j == 0) { dev_info(&adapter->pdev->dev, "Port 0 RxB Rx TC Stats[TC7..TC0]:"); reg = QLC_83XX_PORT0_TC_STATS; } else if (j == 1) { dev_info(&adapter->pdev->dev, "Port 1 RxB Rx TC Stats[TC7..TC0]:"); reg = QLC_83XX_PORT1_TC_STATS; } for (i = 7; i >= 0; i--) { val = QLCRD32(adapter, reg, &err); if (err == -EIO) return; val &= ~(0x7 << 29); /* Reset bits 29 to 31 */ QLCWR32(adapter, reg, (val | (i << 29))); val = QLCRD32(adapter, reg, &err); if (err == -EIO) return; dev_info(&adapter->pdev->dev, "0x%x ", val); } dev_info(&adapter->pdev->dev, "\n"); } val = QLCRD32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD, &err); if (err == -EIO) return; val1 = QLCRD32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD, &err); if (err == -EIO) return; dev_info(&adapter->pdev->dev, "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n", val, val1); } static void qlcnic_83xx_disable_pause_frames(struct qlcnic_adapter *adapter) { u32 reg = 0, i, j; if (qlcnic_83xx_lock_driver(adapter)) { dev_err(&adapter->pdev->dev, "%s:failed to acquire driver lock\n", __func__); return; } qlcnic_83xx_dump_pause_control_regs(adapter); QLCWR32(adapter, QLC_83XX_SRE_SHIM_REG, 0x0); for (j = 0; j < 2; j++) { if (j == 0) reg = QLC_83XX_PORT0_THRESHOLD; else if (j == 1) reg = QLC_83XX_PORT1_THRESHOLD; for (i = 0; i < 8; i++) QLCWR32(adapter, reg + (i * 0x4), 0x0); } for (j = 0; j < 2; j++) { if (j == 0) reg = QLC_83XX_PORT0_TC_MC_REG; else if (j == 1) reg = QLC_83XX_PORT1_TC_MC_REG; for (i = 0; i < 4; i++) QLCWR32(adapter, reg + (i * 0x4), 0x03FF03FF); } QLCWR32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD, 0); QLCWR32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD, 0); dev_info(&adapter->pdev->dev, "Disabled pause frames successfully on all ports\n"); qlcnic_83xx_unlock_driver(adapter); } static void qlcnic_83xx_take_eport_out_of_reset(struct qlcnic_adapter *adapter) { QLCWR32(adapter, QLC_83XX_RESET_REG, 0); QLCWR32(adapter, QLC_83XX_RESET_PORT0, 0); QLCWR32(adapter, QLC_83XX_RESET_PORT1, 0); QLCWR32(adapter, QLC_83XX_RESET_PORT2, 0); QLCWR32(adapter, QLC_83XX_RESET_PORT3, 0); QLCWR32(adapter, QLC_83XX_RESET_SRESHIM, 0); QLCWR32(adapter, QLC_83XX_RESET_EPGSHIM, 0); QLCWR32(adapter, QLC_83XX_RESET_ETHERPCS, 0); QLCWR32(adapter, QLC_83XX_RESET_CONTROL, 1); } static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev) { u32 heartbeat, peg_status; int retries, ret = -EIO, err = 0; retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT; p_dev->heartbeat = QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_ALIVE_COUNTER); do { msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS); heartbeat = QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_ALIVE_COUNTER); if (heartbeat != p_dev->heartbeat) { ret = QLCNIC_RCODE_SUCCESS; break; } } while (--retries); if (ret) { dev_err(&p_dev->pdev->dev, "firmware hang detected\n"); qlcnic_83xx_take_eport_out_of_reset(p_dev); qlcnic_83xx_disable_pause_frames(p_dev); peg_status = QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_HALT_STATUS1); dev_info(&p_dev->pdev->dev, "Dumping HW/FW registers\n" "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n" "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n" "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n" "PEG_NET_4_PC: 0x%x\n", peg_status, QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_HALT_STATUS2), QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_0, &err), QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_1, &err), QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_2, &err), QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_3, &err), QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_4, &err)); if (QLCNIC_FWERROR_CODE(peg_status) == 0x67) dev_err(&p_dev->pdev->dev, "Device is being reset err code 0x00006700.\n"); } return ret; } static int qlcnic_83xx_check_cmd_peg_status(struct qlcnic_adapter *p_dev) { int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT; u32 val; do { val = QLC_SHARED_REG_RD32(p_dev, QLCNIC_CMDPEG_STATE); if (val == QLC_83XX_CMDPEG_COMPLETE) return 0; msleep(QLCNIC_CMDPEG_CHECK_DELAY); } while (--retries); dev_err(&p_dev->pdev->dev, "%s: failed, state = 0x%x\n", __func__, val); return -EIO; } static int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev) { int err; err = qlcnic_83xx_check_cmd_peg_status(p_dev); if (err) return err; err = qlcnic_83xx_check_heartbeat(p_dev); if (err) return err; return err; } static int qlcnic_83xx_poll_reg(struct qlcnic_adapter *p_dev, u32 addr, int duration, u32 mask, u32 status) { int timeout_error, err = 0; u32 value; u8 retries; value = QLCRD32(p_dev, addr, &err); if (err == -EIO) return err; retries = duration / 10; do { if ((value & mask) != status) { timeout_error = 1; msleep(duration / 10); value = QLCRD32(p_dev, addr, &err); if (err == -EIO) return err; } else { timeout_error = 0; break; } } while (retries--); if (timeout_error) { p_dev->ahw->reset.seq_error++; dev_err(&p_dev->pdev->dev, "%s: Timeout Err, entry_num = %d\n", __func__, p_dev->ahw->reset.seq_index); dev_err(&p_dev->pdev->dev, "0x%08x 0x%08x 0x%08x\n", value, mask, status); } return timeout_error; } static int qlcnic_83xx_reset_template_checksum(struct qlcnic_adapter *p_dev) { u32 sum = 0; u16 *buff = (u16 *)p_dev->ahw->reset.buff; int count = p_dev->ahw->reset.hdr->size / sizeof(u16); while (count-- > 0) sum += *buff++; while (sum >> 16) sum = (sum & 0xFFFF) + (sum >> 16); if (~sum) { return 0; } else { dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__); return -1; } } static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev) { struct qlcnic_hardware_context *ahw = p_dev->ahw; u32 addr, count, prev_ver, curr_ver; u8 *p_buff; if (ahw->reset.buff != NULL) { prev_ver = p_dev->fw_version; curr_ver = qlcnic_83xx_get_fw_version(p_dev); if (curr_ver > prev_ver) kfree(ahw->reset.buff); else return 0; } ahw->reset.seq_error = 0; ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL); if (p_dev->ahw->reset.buff == NULL) return -ENOMEM; p_buff = p_dev->ahw->reset.buff; addr = QLC_83XX_RESET_TEMPLATE_ADDR; count = sizeof(struct qlc_83xx_reset_hdr) / sizeof(u32); /* Copy template header from flash */ if (qlcnic_83xx_flash_read32(p_dev, addr, p_buff, count)) { dev_err(&p_dev->pdev->dev, "%s: flash read failed\n", __func__); return -EIO; } ahw->reset.hdr = (struct qlc_83xx_reset_hdr *)ahw->reset.buff; addr = QLC_83XX_RESET_TEMPLATE_ADDR + ahw->reset.hdr->hdr_size; p_buff = ahw->reset.buff + ahw->reset.hdr->hdr_size; count = (ahw->reset.hdr->size - ahw->reset.hdr->hdr_size) / sizeof(u32); /* Copy rest of the template */ if (qlcnic_83xx_flash_read32(p_dev, addr, p_buff, count)) { dev_err(&p_dev->pdev->dev, "%s: flash read failed\n", __func__); return -EIO; } if (qlcnic_83xx_reset_template_checksum(p_dev)) return -EIO; /* Get Stop, Start and Init command offsets */ ahw->reset.init_offset = ahw->reset.buff + ahw->reset.hdr->init_offset; ahw->reset.start_offset = ahw->reset.buff + ahw->reset.hdr->start_offset; ahw->reset.stop_offset = ahw->reset.buff + ahw->reset.hdr->hdr_size; return 0; } /* Read Write HW register command */ static void qlcnic_83xx_read_write_crb_reg(struct qlcnic_adapter *p_dev, u32 raddr, u32 waddr) { int err = 0; u32 value; value = QLCRD32(p_dev, raddr, &err); if (err == -EIO) return; qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value); } /* Read Modify Write HW register command */ static void qlcnic_83xx_rmw_crb_reg(struct qlcnic_adapter *p_dev, u32 raddr, u32 waddr, struct qlc_83xx_rmw *p_rmw_hdr) { int err = 0; u32 value; if (p_rmw_hdr->index_a) { value = p_dev->ahw->reset.array[p_rmw_hdr->index_a]; } else { value = QLCRD32(p_dev, raddr, &err); if (err == -EIO) return; } value &= p_rmw_hdr->mask; value <<= p_rmw_hdr->shl; value >>= p_rmw_hdr->shr; value |= p_rmw_hdr->or_value; value ^= p_rmw_hdr->xor_value; qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value); } /* Write HW register command */ static void qlcnic_83xx_write_list(struct qlcnic_adapter *p_dev, struct qlc_83xx_entry_hdr *p_hdr) { int i; struct qlc_83xx_entry *entry; entry = (struct qlc_83xx_entry *)((char *)p_hdr + sizeof(struct qlc_83xx_entry_hdr)); for (i = 0; i < p_hdr->count; i++, entry++) { qlcnic_83xx_wrt_reg_indirect(p_dev, entry->arg1, entry->arg2); if (p_hdr->delay) udelay((u32)(p_hdr->delay)); } } /* Read and Write instruction */ static void qlcnic_83xx_read_write_list(struct qlcnic_adapter *p_dev, struct qlc_83xx_entry_hdr *p_hdr) { int i; struct qlc_83xx_entry *entry; entry = (struct qlc_83xx_entry *)((char *)p_hdr + sizeof(struct qlc_83xx_entry_hdr)); for (i = 0; i < p_hdr->count; i++, entry++) { qlcnic_83xx_read_write_crb_reg(p_dev, entry->arg1, entry->arg2); if (p_hdr->delay) udelay((u32)(p_hdr->delay)); } } /* Poll HW register command */ static void qlcnic_83xx_poll_list(struct qlcnic_adapter *p_dev, struct qlc_83xx_entry_hdr *p_hdr) { long delay; struct qlc_83xx_entry *entry; struct qlc_83xx_poll *poll; int i, err = 0; unsigned long arg1, arg2; poll = (struct qlc_83xx_poll *)((char *)p_hdr + sizeof(struct qlc_83xx_entry_hdr)); entry = (struct qlc_83xx_entry *)((char *)poll + sizeof(struct qlc_83xx_poll)); delay = (long)p_hdr->delay; if (!delay) { for (i = 0; i < p_hdr->count; i++, entry++) qlcnic_83xx_poll_reg(p_dev, entry->arg1, delay, poll->mask, poll->status); } else { for (i = 0; i < p_hdr->count; i++, entry++) { arg1 = entry->arg1; arg2 = entry->arg2; if (delay) { if (qlcnic_83xx_poll_reg(p_dev, arg1, delay, poll->mask, poll->status)){ QLCRD32(p_dev, arg1, &err); if (err == -EIO) return; QLCRD32(p_dev, arg2, &err); if (err == -EIO) return; } } } } } /* Poll and write HW register command */ static void qlcnic_83xx_poll_write_list(struct qlcnic_adapter *p_dev, struct qlc_83xx_entry_hdr *p_hdr) { int i; long delay; struct qlc_83xx_quad_entry *entry; struct qlc_83xx_poll *poll; poll = (struct qlc_83xx_poll *)((char *)p_hdr + sizeof(struct qlc_83xx_entry_hdr)); entry = (struct qlc_83xx_quad_entry *)((char *)poll + sizeof(struct qlc_83xx_poll)); delay = (long)p_hdr->delay; for (i = 0; i < p_hdr->count; i++, entry++) { qlcnic_83xx_wrt_reg_indirect(p_dev, entry->dr_addr, entry->dr_value); qlcnic_83xx_wrt_reg_indirect(p_dev, entry->ar_addr, entry->ar_value); if (delay) qlcnic_83xx_poll_reg(p_dev, entry->ar_addr, delay, poll->mask, poll->status); } } /* Read Modify Write register command */ static void qlcnic_83xx_read_modify_write(struct qlcnic_adapter *p_dev, struct qlc_83xx_entry_hdr *p_hdr) { int i; struct qlc_83xx_entry *entry; struct qlc_83xx_rmw *rmw_hdr; rmw_hdr = (struct qlc_83xx_rmw *)((char *)p_hdr + sizeof(struct qlc_83xx_entry_hdr)); entry = (struct qlc_83xx_entry *)((char *)rmw_hdr + sizeof(struct qlc_83xx_rmw)); for (i = 0; i < p_hdr->count; i++, entry++) { qlcnic_83xx_rmw_crb_reg(p_dev, entry->arg1, entry->arg2, rmw_hdr); if (p_hdr->delay) udelay((u32)(p_hdr->delay)); } } static void qlcnic_83xx_pause(struct qlc_83xx_entry_hdr *p_hdr) { if (p_hdr->delay) mdelay((u32)((long)p_hdr->delay)); } /* Read and poll register command */ static void qlcnic_83xx_poll_read_list(struct qlcnic_adapter *p_dev, struct qlc_83xx_entry_hdr *p_hdr) { long delay; int index, i, j, err; struct qlc_83xx_quad_entry *entry; struct qlc_83xx_poll *poll; unsigned long addr; poll = (struct qlc_83xx_poll *)((char *)p_hdr + sizeof(struct qlc_83xx_entry_hdr)); entry = (struct qlc_83xx_quad_entry *)((char *)poll + sizeof(struct qlc_83xx_poll)); delay = (long)p_hdr->delay; for (i = 0; i < p_hdr->count; i++, entry++) { qlcnic_83xx_wrt_reg_indirect(p_dev, entry->ar_addr, entry->ar_value); if (delay) { if (!qlcnic_83xx_poll_reg(p_dev, entry->ar_addr, delay, poll->mask, poll->status)){ index = p_dev->ahw->reset.array_index; addr = entry->dr_addr; j = QLCRD32(p_dev, addr, &err); if (err == -EIO) return; p_dev->ahw->reset.array[index++] = j; if (index == QLC_83XX_MAX_RESET_SEQ_ENTRIES) p_dev->ahw->reset.array_index = 1; } } } } static inline void qlcnic_83xx_seq_end(struct qlcnic_adapter *p_dev) { p_dev->ahw->reset.seq_end = 1; } static void qlcnic_83xx_template_end(struct qlcnic_adapter *p_dev) { p_dev->ahw->reset.template_end = 1; if (p_dev->ahw->reset.seq_error == 0) dev_err(&p_dev->pdev->dev, "HW restart process completed successfully.\n"); else dev_err(&p_dev->pdev->dev, "HW restart completed with timeout errors.\n"); } /** * qlcnic_83xx_exec_template_cmd * * @p_dev: adapter structure * @p_buff: Poiter to instruction template * * Template provides instructions to stop, restart and initalize firmware. * These instructions are abstracted as a series of read, write and * poll operations on hardware registers. Register information and operation * specifics are not exposed to the driver. Driver reads the template from * flash and executes the instructions located at pre-defined offsets. * * Returns: None * */ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev, char *p_buff) { int index, entries; struct qlc_83xx_entry_hdr *p_hdr; char *entry = p_buff; p_dev->ahw->reset.seq_end = 0; p_dev->ahw->reset.template_end = 0; entries = p_dev->ahw->reset.hdr->entries; index = p_dev->ahw->reset.seq_index; for (; (!p_dev->ahw->reset.seq_end) && (index < entries); index++) { p_hdr = (struct qlc_83xx_entry_hdr *)entry; switch (p_hdr->cmd) { case QLC_83XX_OPCODE_NOP: break; case QLC_83XX_OPCODE_WRITE_LIST: qlcnic_83xx_write_list(p_dev, p_hdr); break; case QLC_83XX_OPCODE_READ_WRITE_LIST: qlcnic_83xx_read_write_list(p_dev, p_hdr); break; case QLC_83XX_OPCODE_POLL_LIST: qlcnic_83xx_poll_list(p_dev, p_hdr); break; case QLC_83XX_OPCODE_POLL_WRITE_LIST: qlcnic_83xx_poll_write_list(p_dev, p_hdr); break; case QLC_83XX_OPCODE_READ_MODIFY_WRITE: qlcnic_83xx_read_modify_write(p_dev, p_hdr); break; case QLC_83XX_OPCODE_SEQ_PAUSE: qlcnic_83xx_pause(p_hdr); break; case QLC_83XX_OPCODE_SEQ_END: qlcnic_83xx_seq_end(p_dev); break; case QLC_83XX_OPCODE_TMPL_END: qlcnic_83xx_template_end(p_dev); break; case QLC_83XX_OPCODE_POLL_READ_LIST: qlcnic_83xx_poll_read_list(p_dev, p_hdr); break; default: dev_err(&p_dev->pdev->dev, "%s: Unknown opcode 0x%04x in template %d\n", __func__, p_hdr->cmd, index); break; } entry += p_hdr->size; } p_dev->ahw->reset.seq_index = index; } static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev) { p_dev->ahw->reset.seq_index = 0; qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.stop_offset); if (p_dev->ahw->reset.seq_end != 1) dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__); } static void qlcnic_83xx_start_hw(struct qlcnic_adapter *p_dev) { qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.start_offset); if (p_dev->ahw->reset.template_end != 1) dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__); } static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev) { qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.init_offset); if (p_dev->ahw->reset.seq_end != 1) dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__); } static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter) { struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info; int err = -EIO; if (request_firmware(&fw_info->fw, fw_info->fw_file_name, &(adapter->pdev->dev))) { dev_err(&adapter->pdev->dev, "No file FW image, loading flash FW image.\n"); QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, QLC_83XX_BOOT_FROM_FLASH); } else { if (qlcnic_83xx_copy_fw_file(adapter)) return err; QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, QLC_83XX_BOOT_FROM_FILE); } return 0; } static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter) { u32 val; int err = -EIO; qlcnic_83xx_stop_hw(adapter); /* Collect FW register dump if required */ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); if (!(val & QLC_83XX_IDC_GRACEFULL_RESET)) qlcnic_dump_fw(adapter); if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) { netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n", __func__); qlcnic_83xx_idc_enter_failed_state(adapter, 1); return err; } qlcnic_83xx_init_hw(adapter); if (qlcnic_83xx_copy_bootloader(adapter)) return err; /* Boot either flash image or firmware image from host file system */ if (qlcnic_load_fw_file) { if (qlcnic_83xx_load_fw_image_from_host(adapter)) return err; } else { QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, QLC_83XX_BOOT_FROM_FLASH); } qlcnic_83xx_start_hw(adapter); if (qlcnic_83xx_check_hw_status(adapter)) return -EIO; return 0; } static int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter) { int err; struct qlcnic_info nic_info; struct qlcnic_hardware_context *ahw = adapter->ahw; memset(&nic_info, 0, sizeof(struct qlcnic_info)); err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func); if (err) return -EIO; ahw->physical_port = (u8) nic_info.phys_port; ahw->switch_mode = nic_info.switch_mode; ahw->max_tx_ques = nic_info.max_tx_ques; ahw->max_rx_ques = nic_info.max_rx_ques; ahw->capabilities = nic_info.capabilities; ahw->max_mac_filters = nic_info.max_mac_filters; ahw->max_mtu = nic_info.max_mtu; adapter->max_tx_rings = ahw->max_tx_ques; adapter->max_sds_rings = ahw->max_rx_ques; /* eSwitch capability indicates vNIC mode. * vNIC and SRIOV are mutually exclusive operational modes. * If SR-IOV capability is detected, SR-IOV physical function * will get initialized in default mode. * SR-IOV virtual function initialization follows a * different code path and opmode. * SRIOV mode has precedence over vNIC mode. */ if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state)) return QLC_83XX_DEFAULT_OPMODE; if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY) return QLCNIC_VNIC_MODE; return QLC_83XX_DEFAULT_OPMODE; } int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; int ret; ret = qlcnic_83xx_get_nic_configuration(adapter); if (ret == -EIO) return -EIO; if (ret == QLCNIC_VNIC_MODE) { ahw->nic_mode = QLCNIC_VNIC_MODE; if (qlcnic_83xx_config_vnic_opmode(adapter)) return -EIO; adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS; adapter->max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS; } else if (ret == QLC_83XX_DEFAULT_OPMODE) { ahw->nic_mode = QLCNIC_DEFAULT_MODE; adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS; adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS; } else { return -EIO; } return 0; } static void qlcnic_83xx_config_buff_descriptors(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; if (ahw->port_type == QLCNIC_XGBE) { adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G; adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G; adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; } else if (ahw->port_type == QLCNIC_GBE) { adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G; } adapter->num_txd = MAX_CMD_DESCRIPTORS; adapter->max_rds_rings = MAX_RDS_RINGS; } static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter) { int err = -EIO; qlcnic_83xx_get_minidump_template(adapter); if (qlcnic_83xx_get_port_info(adapter)) return err; qlcnic_83xx_config_buff_descriptors(adapter); adapter->ahw->msix_supported = !!qlcnic_use_msi_x; adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; dev_info(&adapter->pdev->dev, "HAL Version: %d\n", adapter->ahw->fw_hal_version); return 0; } #define IS_QLC_83XX_USED(a, b, c) (((1 << a->portnum) & b) || ((c >> 6) & 0x1)) static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter) { struct qlcnic_cmd_args cmd; u32 presence_mask, audit_mask; int status; presence_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE); audit_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT); if (IS_QLC_83XX_USED(adapter, presence_mask, audit_mask)) { status = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_STOP_NIC_FUNC); if (status) return; cmd.req.arg[1] = BIT_31; status = qlcnic_issue_cmd(adapter, &cmd); if (status) dev_err(&adapter->pdev->dev, "Failed to clean up the function resources\n"); qlcnic_free_mbx_args(&cmd); } } static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct pci_dev *pdev = adapter->pdev; struct qlc_83xx_fw_info *fw_info; int err = 0; ahw->fw_info = kzalloc(sizeof(*fw_info), GFP_KERNEL); if (!ahw->fw_info) { err = -ENOMEM; } else { fw_info = ahw->fw_info; switch (pdev->device) { case PCI_DEVICE_ID_QLOGIC_QLE834X: strncpy(fw_info->fw_file_name, QLC_83XX_FW_FILE_NAME, QLC_FW_FILE_NAME_LEN); break; case PCI_DEVICE_ID_QLOGIC_QLE844X: strncpy(fw_info->fw_file_name, QLC_84XX_FW_FILE_NAME, QLC_FW_FILE_NAME_LEN); break; default: dev_err(&pdev->dev, "%s: Invalid device id\n", __func__); err = -EINVAL; break; } } return err; } static void qlcnic_83xx_init_rings(struct qlcnic_adapter *adapter) { u8 rx_cnt = QLCNIC_DEF_SDS_RINGS; u8 tx_cnt = QLCNIC_DEF_TX_RINGS; adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS; adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS; if (!adapter->ahw->msix_supported) { rx_cnt = QLCNIC_SINGLE_RING; tx_cnt = QLCNIC_SINGLE_RING; } /* compute and set drv sds rings */ qlcnic_set_tx_ring_count(adapter, tx_cnt); qlcnic_set_sds_ring_count(adapter, rx_cnt); } int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) { struct qlcnic_hardware_context *ahw = adapter->ahw; int err = 0; adapter->rx_mac_learn = false; ahw->msix_supported = !!qlcnic_use_msi_x; qlcnic_83xx_init_rings(adapter); err = qlcnic_83xx_init_mailbox_work(adapter); if (err) goto exit; if (qlcnic_sriov_vf_check(adapter)) { err = qlcnic_sriov_vf_init(adapter, pci_using_dac); if (err) goto detach_mbx; else return err; } if (qlcnic_83xx_read_flash_descriptor_table(adapter) || qlcnic_83xx_read_flash_mfg_id(adapter)) { dev_err(&adapter->pdev->dev, "Failed reading flash mfg id\n"); err = -ENOTRECOVERABLE; goto detach_mbx; } err = qlcnic_83xx_check_hw_status(adapter); if (err) goto detach_mbx; err = qlcnic_83xx_get_fw_info(adapter); if (err) goto detach_mbx; err = qlcnic_83xx_idc_init(adapter); if (err) goto detach_mbx; err = qlcnic_setup_intr(adapter); if (err) { dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n"); goto disable_intr; } err = qlcnic_83xx_setup_mbx_intr(adapter); if (err) goto disable_mbx_intr; qlcnic_83xx_clear_function_resources(adapter); INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work); qlcnic_83xx_initialize_nic(adapter, 1); /* Configure default, SR-IOV or Virtual NIC mode of operation */ err = qlcnic_83xx_configure_opmode(adapter); if (err) goto disable_mbx_intr; /* Perform operating mode specific initialization */ err = adapter->nic_ops->init_driver(adapter); if (err) goto disable_mbx_intr; /* Periodically monitor device status */ qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work); return 0; disable_mbx_intr: qlcnic_83xx_free_mbx_intr(adapter); disable_intr: qlcnic_teardown_intr(adapter); detach_mbx: qlcnic_83xx_detach_mailbox_work(adapter); qlcnic_83xx_free_mailbox(ahw->mailbox); ahw->mailbox = NULL; exit: return err; } void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlc_83xx_idc *idc = &ahw->idc; clear_bit(QLC_83XX_MBX_READY, &idc->status); cancel_delayed_work_sync(&adapter->fw_work); if (ahw->nic_mode == QLCNIC_VNIC_MODE) qlcnic_83xx_disable_vnic_mode(adapter, 1); qlcnic_83xx_idc_detach_driver(adapter); qlcnic_83xx_initialize_nic(adapter, 0); cancel_delayed_work_sync(&adapter->idc_aen_work); } int qlcnic_83xx_aer_reset(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlc_83xx_idc *idc = &ahw->idc; int ret = 0; u32 owner; /* Mark the previous IDC state as NEED_RESET so * that state_entry() will perform the reattachment * and bringup the device */ idc->prev_state = QLC_83XX_IDC_DEV_NEED_RESET; owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); if (ahw->pci_func == owner) { ret = qlcnic_83xx_restart_hw(adapter); if (ret < 0) return ret; qlcnic_83xx_idc_clear_registers(adapter, 0); } ret = idc->state_entry(adapter); return ret; } void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlc_83xx_idc *idc = &ahw->idc; u32 owner; idc->prev_state = QLC_83XX_IDC_DEV_READY; owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); if (ahw->pci_func == owner) qlcnic_83xx_idc_enter_ready_state(adapter, 0); qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, 0); }
gpl-2.0
AndroidGX/SimpleGX-MM-6.0_H811
net/netfilter/ipvs/ip_vs_sync.c
245
52750
/* * IPVS An implementation of the IP virtual server support for the * LINUX operating system. IPVS is now implemented as a module * over the NetFilter framework. IPVS can be used to build a * high-performance and highly available server based on a * cluster of servers. * * Version 1, is capable of handling both version 0 and 1 messages. * Version 0 is the plain old format. * Note Version 0 receivers will just drop Ver 1 messages. * Version 1 is capable of handle IPv6, Persistence data, * time-outs, and firewall marks. * In ver.1 "ip_vs_sync_conn_options" will be sent in netw. order. * Ver. 0 can be turned on by sysctl -w net.ipv4.vs.sync_version=0 * * Definitions Message: is a complete datagram * Sync_conn: is a part of a Message * Param Data is an option to a Sync_conn. * * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> * * ip_vs_sync: sync connection info from master load balancer to backups * through multicast * * Changes: * Alexandre Cassen : Added master & backup support at a time. * Alexandre Cassen : Added SyncID support for incoming sync * messages filtering. * Justin Ossevoort : Fix endian problem on sync message size. * Hans Schillstrom : Added Version 1: i.e. IPv6, * Persistence support, fwmark and time-out. */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/slab.h> #include <linux/inetdevice.h> #include <linux/net.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/skbuff.h> #include <linux/in.h> #include <linux/igmp.h> /* for ip_mc_join_group */ #include <linux/udp.h> #include <linux/err.h> #include <linux/kthread.h> #include <linux/wait.h> #include <linux/kernel.h> #include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */ #include <net/ip.h> #include <net/sock.h> #include <net/ip_vs.h> #define IP_VS_SYNC_GROUP 0xe0000051 /* multicast addr - 224.0.0.81 */ #define IP_VS_SYNC_PORT 8848 /* multicast port */ #define SYNC_PROTO_VER 1 /* Protocol version in header */ static struct lock_class_key __ipvs_sync_key; /* * IPVS sync connection entry * Version 0, i.e. original version. */ struct ip_vs_sync_conn_v0 { __u8 reserved; /* Protocol, addresses and port numbers */ __u8 protocol; /* Which protocol (TCP/UDP) */ __be16 cport; __be16 vport; __be16 dport; __be32 caddr; /* client address */ __be32 vaddr; /* virtual address */ __be32 daddr; /* destination address */ /* Flags and state transition */ __be16 flags; /* status flags */ __be16 state; /* state info */ /* The sequence options start here */ }; struct ip_vs_sync_conn_options { struct ip_vs_seq in_seq; /* incoming seq. struct */ struct ip_vs_seq out_seq; /* outgoing seq. struct */ }; /* Sync Connection format (sync_conn) 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type | Protocol | Ver. | Size | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Flags | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | State | cport | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | vport | dport | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | fwmark | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | timeout (in sec.) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ... | | IP-Addresses (v4 or v6) | | ... | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Optional Parameters. +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Param. Type | Param. Length | Param. data | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | ... | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | Param Type | Param. Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Param data | | Last Param data should be padded for 32 bit alignment | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ /* * Type 0, IPv4 sync connection format */ struct ip_vs_sync_v4 { __u8 type; __u8 protocol; /* Which protocol (TCP/UDP) */ __be16 ver_size; /* Version msb 4 bits */ /* Flags and state transition */ __be32 flags; /* status flags */ __be16 state; /* state info */ /* Protocol, addresses and port numbers */ __be16 cport; __be16 vport; __be16 dport; __be32 fwmark; /* Firewall mark from skb */ __be32 timeout; /* cp timeout */ __be32 caddr; /* client address */ __be32 vaddr; /* virtual address */ __be32 daddr; /* destination address */ /* The sequence options start here */ /* PE data padded to 32bit alignment after seq. options */ }; /* * Type 2 messages IPv6 */ struct ip_vs_sync_v6 { __u8 type; __u8 protocol; /* Which protocol (TCP/UDP) */ __be16 ver_size; /* Version msb 4 bits */ /* Flags and state transition */ __be32 flags; /* status flags */ __be16 state; /* state info */ /* Protocol, addresses and port numbers */ __be16 cport; __be16 vport; __be16 dport; __be32 fwmark; /* Firewall mark from skb */ __be32 timeout; /* cp timeout */ struct in6_addr caddr; /* client address */ struct in6_addr vaddr; /* virtual address */ struct in6_addr daddr; /* destination address */ /* The sequence options start here */ /* PE data padded to 32bit alignment after seq. options */ }; union ip_vs_sync_conn { struct ip_vs_sync_v4 v4; struct ip_vs_sync_v6 v6; }; /* Bits in Type field in above */ #define STYPE_INET6 0 #define STYPE_F_INET6 (1 << STYPE_INET6) #define SVER_SHIFT 12 /* Shift to get version */ #define SVER_MASK 0x0fff /* Mask to strip version */ #define IPVS_OPT_SEQ_DATA 1 #define IPVS_OPT_PE_DATA 2 #define IPVS_OPT_PE_NAME 3 #define IPVS_OPT_PARAM 7 #define IPVS_OPT_F_SEQ_DATA (1 << (IPVS_OPT_SEQ_DATA-1)) #define IPVS_OPT_F_PE_DATA (1 << (IPVS_OPT_PE_DATA-1)) #define IPVS_OPT_F_PE_NAME (1 << (IPVS_OPT_PE_NAME-1)) #define IPVS_OPT_F_PARAM (1 << (IPVS_OPT_PARAM-1)) struct ip_vs_sync_thread_data { struct net *net; struct socket *sock; char *buf; int id; }; /* Version 0 definition of packet sizes */ #define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn_v0)) #define FULL_CONN_SIZE \ (sizeof(struct ip_vs_sync_conn_v0) + sizeof(struct ip_vs_sync_conn_options)) /* The master mulitcasts messages (Datagrams) to the backup load balancers in the following format. Version 1: Note, first byte should be Zero, so ver 0 receivers will drop the packet. 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 0 | SyncID | Size | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Count Conns | Version | Reserved, set to Zero | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | IPVS Sync Connection (1) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | . | ~ . ~ | . | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | IPVS Sync Connection (n) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Version 0 Header 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Count Conns | SyncID | Size | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | IPVS Sync Connection (1) | */ #define SYNC_MESG_HEADER_LEN 4 #define MAX_CONNS_PER_SYNCBUFF 255 /* nr_conns in ip_vs_sync_mesg is 8 bit */ /* Version 0 header */ struct ip_vs_sync_mesg_v0 { __u8 nr_conns; __u8 syncid; __be16 size; /* ip_vs_sync_conn entries start here */ }; /* Version 1 header */ struct ip_vs_sync_mesg { __u8 reserved; /* must be zero */ __u8 syncid; __be16 size; __u8 nr_conns; __s8 version; /* SYNC_PROTO_VER */ __u16 spare; /* ip_vs_sync_conn entries start here */ }; struct ip_vs_sync_buff { struct list_head list; unsigned long firstuse; /* pointers for the message data */ struct ip_vs_sync_mesg *mesg; unsigned char *head; unsigned char *end; }; /* * Copy of struct ip_vs_seq * From unaligned network order to aligned host order */ static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho) { ho->init_seq = get_unaligned_be32(&no->init_seq); ho->delta = get_unaligned_be32(&no->delta); ho->previous_delta = get_unaligned_be32(&no->previous_delta); } /* * Copy of struct ip_vs_seq * From Aligned host order to unaligned network order */ static void hton_seq(struct ip_vs_seq *ho, struct ip_vs_seq *no) { put_unaligned_be32(ho->init_seq, &no->init_seq); put_unaligned_be32(ho->delta, &no->delta); put_unaligned_be32(ho->previous_delta, &no->previous_delta); } static inline struct ip_vs_sync_buff * sb_dequeue(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms) { struct ip_vs_sync_buff *sb; spin_lock_bh(&ipvs->sync_lock); if (list_empty(&ms->sync_queue)) { sb = NULL; __set_current_state(TASK_INTERRUPTIBLE); } else { sb = list_entry(ms->sync_queue.next, struct ip_vs_sync_buff, list); list_del(&sb->list); ms->sync_queue_len--; if (!ms->sync_queue_len) ms->sync_queue_delay = 0; } spin_unlock_bh(&ipvs->sync_lock); return sb; } /* * Create a new sync buffer for Version 1 proto. */ static inline struct ip_vs_sync_buff * ip_vs_sync_buff_create(struct netns_ipvs *ipvs) { struct ip_vs_sync_buff *sb; if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC))) return NULL; sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC); if (!sb->mesg) { kfree(sb); return NULL; } sb->mesg->reserved = 0; /* old nr_conns i.e. must be zero now */ sb->mesg->version = SYNC_PROTO_VER; sb->mesg->syncid = ipvs->master_syncid; sb->mesg->size = htons(sizeof(struct ip_vs_sync_mesg)); sb->mesg->nr_conns = 0; sb->mesg->spare = 0; sb->head = (unsigned char *)sb->mesg + sizeof(struct ip_vs_sync_mesg); sb->end = (unsigned char *)sb->mesg + ipvs->send_mesg_maxlen; sb->firstuse = jiffies; return sb; } static inline void ip_vs_sync_buff_release(struct ip_vs_sync_buff *sb) { kfree(sb->mesg); kfree(sb); } static inline void sb_queue_tail(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms) { struct ip_vs_sync_buff *sb = ms->sync_buff; spin_lock(&ipvs->sync_lock); if (ipvs->sync_state & IP_VS_STATE_MASTER && ms->sync_queue_len < sysctl_sync_qlen_max(ipvs)) { if (!ms->sync_queue_len) schedule_delayed_work(&ms->master_wakeup_work, max(IPVS_SYNC_SEND_DELAY, 1)); ms->sync_queue_len++; list_add_tail(&sb->list, &ms->sync_queue); if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE) wake_up_process(ms->master_thread); } else ip_vs_sync_buff_release(sb); spin_unlock(&ipvs->sync_lock); } /* * Get the current sync buffer if it has been created for more * than the specified time or the specified time is zero. */ static inline struct ip_vs_sync_buff * get_curr_sync_buff(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms, unsigned long time) { struct ip_vs_sync_buff *sb; spin_lock_bh(&ipvs->sync_buff_lock); sb = ms->sync_buff; if (sb && time_after_eq(jiffies - sb->firstuse, time)) { ms->sync_buff = NULL; __set_current_state(TASK_RUNNING); } else sb = NULL; spin_unlock_bh(&ipvs->sync_buff_lock); return sb; } static inline int select_master_thread_id(struct netns_ipvs *ipvs, struct ip_vs_conn *cp) { return ((long) cp >> (1 + ilog2(sizeof(*cp)))) & ipvs->threads_mask; } /* * Create a new sync buffer for Version 0 proto. */ static inline struct ip_vs_sync_buff * ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs) { struct ip_vs_sync_buff *sb; struct ip_vs_sync_mesg_v0 *mesg; if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC))) return NULL; sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC); if (!sb->mesg) { kfree(sb); return NULL; } mesg = (struct ip_vs_sync_mesg_v0 *)sb->mesg; mesg->nr_conns = 0; mesg->syncid = ipvs->master_syncid; mesg->size = htons(sizeof(struct ip_vs_sync_mesg_v0)); sb->head = (unsigned char *)mesg + sizeof(struct ip_vs_sync_mesg_v0); sb->end = (unsigned char *)mesg + ipvs->send_mesg_maxlen; sb->firstuse = jiffies; return sb; } /* Check if conn should be synced. * pkts: conn packets, use sysctl_sync_threshold to avoid packet check * - (1) sync_refresh_period: reduce sync rate. Additionally, retry * sync_retries times with period of sync_refresh_period/8 * - (2) if both sync_refresh_period and sync_period are 0 send sync only * for state changes or only once when pkts matches sync_threshold * - (3) templates: rate can be reduced only with sync_refresh_period or * with (2) */ static int ip_vs_sync_conn_needed(struct netns_ipvs *ipvs, struct ip_vs_conn *cp, int pkts) { unsigned long orig = ACCESS_ONCE(cp->sync_endtime); unsigned long now = jiffies; unsigned long n = (now + cp->timeout) & ~3UL; unsigned int sync_refresh_period; int sync_period; int force; /* Check if we sync in current state */ if (unlikely(cp->flags & IP_VS_CONN_F_TEMPLATE)) force = 0; else if (likely(cp->protocol == IPPROTO_TCP)) { if (!((1 << cp->state) & ((1 << IP_VS_TCP_S_ESTABLISHED) | (1 << IP_VS_TCP_S_FIN_WAIT) | (1 << IP_VS_TCP_S_CLOSE) | (1 << IP_VS_TCP_S_CLOSE_WAIT) | (1 << IP_VS_TCP_S_TIME_WAIT)))) return 0; force = cp->state != cp->old_state; if (force && cp->state != IP_VS_TCP_S_ESTABLISHED) goto set; } else if (unlikely(cp->protocol == IPPROTO_SCTP)) { if (!((1 << cp->state) & ((1 << IP_VS_SCTP_S_ESTABLISHED) | (1 << IP_VS_SCTP_S_CLOSED) | (1 << IP_VS_SCTP_S_SHUT_ACK_CLI) | (1 << IP_VS_SCTP_S_SHUT_ACK_SER)))) return 0; force = cp->state != cp->old_state; if (force && cp->state != IP_VS_SCTP_S_ESTABLISHED) goto set; } else { /* UDP or another protocol with single state */ force = 0; } sync_refresh_period = sysctl_sync_refresh_period(ipvs); if (sync_refresh_period > 0) { long diff = n - orig; long min_diff = max(cp->timeout >> 1, 10UL * HZ); /* Avoid sync if difference is below sync_refresh_period * and below the half timeout. */ if (abs(diff) < min_t(long, sync_refresh_period, min_diff)) { int retries = orig & 3; if (retries >= sysctl_sync_retries(ipvs)) return 0; if (time_before(now, orig - cp->timeout + (sync_refresh_period >> 3))) return 0; n |= retries + 1; } } sync_period = sysctl_sync_period(ipvs); if (sync_period > 0) { if (!(cp->flags & IP_VS_CONN_F_TEMPLATE) && pkts % sync_period != sysctl_sync_threshold(ipvs)) return 0; } else if (sync_refresh_period <= 0 && pkts != sysctl_sync_threshold(ipvs)) return 0; set: cp->old_state = cp->state; n = cmpxchg(&cp->sync_endtime, orig, n); return n == orig || force; } /* * Version 0 , could be switched in by sys_ctl. * Add an ip_vs_conn information into the current sync_buff. */ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp, int pkts) { struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_sync_mesg_v0 *m; struct ip_vs_sync_conn_v0 *s; struct ip_vs_sync_buff *buff; struct ipvs_master_sync_state *ms; int id; int len; if (unlikely(cp->af != AF_INET)) return; /* Do not sync ONE PACKET */ if (cp->flags & IP_VS_CONN_F_ONE_PACKET) return; if (!ip_vs_sync_conn_needed(ipvs, cp, pkts)) return; spin_lock_bh(&ipvs->sync_buff_lock); if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { spin_unlock_bh(&ipvs->sync_buff_lock); return; } id = select_master_thread_id(ipvs, cp); ms = &ipvs->ms[id]; buff = ms->sync_buff; if (buff) { m = (struct ip_vs_sync_mesg_v0 *) buff->mesg; /* Send buffer if it is for v1 */ if (!m->nr_conns) { sb_queue_tail(ipvs, ms); ms->sync_buff = NULL; buff = NULL; } } if (!buff) { buff = ip_vs_sync_buff_create_v0(ipvs); if (!buff) { spin_unlock_bh(&ipvs->sync_buff_lock); pr_err("ip_vs_sync_buff_create failed.\n"); return; } ms->sync_buff = buff; } len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE : SIMPLE_CONN_SIZE; m = (struct ip_vs_sync_mesg_v0 *) buff->mesg; s = (struct ip_vs_sync_conn_v0 *) buff->head; /* copy members */ s->reserved = 0; s->protocol = cp->protocol; s->cport = cp->cport; s->vport = cp->vport; s->dport = cp->dport; s->caddr = cp->caddr.ip; s->vaddr = cp->vaddr.ip; s->daddr = cp->daddr.ip; s->flags = htons(cp->flags & ~IP_VS_CONN_F_HASHED); s->state = htons(cp->state); if (cp->flags & IP_VS_CONN_F_SEQ_MASK) { struct ip_vs_sync_conn_options *opt = (struct ip_vs_sync_conn_options *)&s[1]; memcpy(opt, &cp->in_seq, sizeof(*opt)); } m->nr_conns++; m->size = htons(ntohs(m->size) + len); buff->head += len; /* check if there is a space for next one */ if (buff->head + FULL_CONN_SIZE > buff->end) { sb_queue_tail(ipvs, ms); ms->sync_buff = NULL; } spin_unlock_bh(&ipvs->sync_buff_lock); /* synchronize its controller if it has */ cp = cp->control; if (cp) { if (cp->flags & IP_VS_CONN_F_TEMPLATE) pkts = atomic_add_return(1, &cp->in_pkts); else pkts = sysctl_sync_threshold(ipvs); ip_vs_sync_conn(net, cp->control, pkts); } } /* * Add an ip_vs_conn information into the current sync_buff. * Called by ip_vs_in. * Sending Version 1 messages */ void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts) { struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_sync_mesg *m; union ip_vs_sync_conn *s; struct ip_vs_sync_buff *buff; struct ipvs_master_sync_state *ms; int id; __u8 *p; unsigned int len, pe_name_len, pad; /* Handle old version of the protocol */ if (sysctl_sync_ver(ipvs) == 0) { ip_vs_sync_conn_v0(net, cp, pkts); return; } /* Do not sync ONE PACKET */ if (cp->flags & IP_VS_CONN_F_ONE_PACKET) goto control; sloop: if (!ip_vs_sync_conn_needed(ipvs, cp, pkts)) goto control; /* Sanity checks */ pe_name_len = 0; if (cp->pe_data_len) { if (!cp->pe_data || !cp->dest) { IP_VS_ERR_RL("SYNC, connection pe_data invalid\n"); return; } pe_name_len = strnlen(cp->pe->name, IP_VS_PENAME_MAXLEN); } spin_lock_bh(&ipvs->sync_buff_lock); if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { spin_unlock_bh(&ipvs->sync_buff_lock); return; } id = select_master_thread_id(ipvs, cp); ms = &ipvs->ms[id]; #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6) len = sizeof(struct ip_vs_sync_v6); else #endif len = sizeof(struct ip_vs_sync_v4); if (cp->flags & IP_VS_CONN_F_SEQ_MASK) len += sizeof(struct ip_vs_sync_conn_options) + 2; if (cp->pe_data_len) len += cp->pe_data_len + 2; /* + Param hdr field */ if (pe_name_len) len += pe_name_len + 2; /* check if there is a space for this one */ pad = 0; buff = ms->sync_buff; if (buff) { m = buff->mesg; pad = (4 - (size_t) buff->head) & 3; /* Send buffer if it is for v0 */ if (buff->head + len + pad > buff->end || m->reserved) { sb_queue_tail(ipvs, ms); ms->sync_buff = NULL; buff = NULL; pad = 0; } } if (!buff) { buff = ip_vs_sync_buff_create(ipvs); if (!buff) { spin_unlock_bh(&ipvs->sync_buff_lock); pr_err("ip_vs_sync_buff_create failed.\n"); return; } ms->sync_buff = buff; m = buff->mesg; } p = buff->head; buff->head += pad + len; m->size = htons(ntohs(m->size) + pad + len); /* Add ev. padding from prev. sync_conn */ while (pad--) *(p++) = 0; s = (union ip_vs_sync_conn *)p; /* Set message type & copy members */ s->v4.type = (cp->af == AF_INET6 ? STYPE_F_INET6 : 0); s->v4.ver_size = htons(len & SVER_MASK); /* Version 0 */ s->v4.flags = htonl(cp->flags & ~IP_VS_CONN_F_HASHED); s->v4.state = htons(cp->state); s->v4.protocol = cp->protocol; s->v4.cport = cp->cport; s->v4.vport = cp->vport; s->v4.dport = cp->dport; s->v4.fwmark = htonl(cp->fwmark); s->v4.timeout = htonl(cp->timeout / HZ); m->nr_conns++; #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6) { p += sizeof(struct ip_vs_sync_v6); s->v6.caddr = cp->caddr.in6; s->v6.vaddr = cp->vaddr.in6; s->v6.daddr = cp->daddr.in6; } else #endif { p += sizeof(struct ip_vs_sync_v4); /* options ptr */ s->v4.caddr = cp->caddr.ip; s->v4.vaddr = cp->vaddr.ip; s->v4.daddr = cp->daddr.ip; } if (cp->flags & IP_VS_CONN_F_SEQ_MASK) { *(p++) = IPVS_OPT_SEQ_DATA; *(p++) = sizeof(struct ip_vs_sync_conn_options); hton_seq((struct ip_vs_seq *)p, &cp->in_seq); p += sizeof(struct ip_vs_seq); hton_seq((struct ip_vs_seq *)p, &cp->out_seq); p += sizeof(struct ip_vs_seq); } /* Handle pe data */ if (cp->pe_data_len && cp->pe_data) { *(p++) = IPVS_OPT_PE_DATA; *(p++) = cp->pe_data_len; memcpy(p, cp->pe_data, cp->pe_data_len); p += cp->pe_data_len; if (pe_name_len) { /* Add PE_NAME */ *(p++) = IPVS_OPT_PE_NAME; *(p++) = pe_name_len; memcpy(p, cp->pe->name, pe_name_len); p += pe_name_len; } } spin_unlock_bh(&ipvs->sync_buff_lock); control: /* synchronize its controller if it has */ cp = cp->control; if (!cp) return; if (cp->flags & IP_VS_CONN_F_TEMPLATE) pkts = atomic_add_return(1, &cp->in_pkts); else pkts = sysctl_sync_threshold(ipvs); goto sloop; } /* * fill_param used by version 1 */ static inline int ip_vs_conn_fill_param_sync(struct net *net, int af, union ip_vs_sync_conn *sc, struct ip_vs_conn_param *p, __u8 *pe_data, unsigned int pe_data_len, __u8 *pe_name, unsigned int pe_name_len) { #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) ip_vs_conn_fill_param(net, af, sc->v6.protocol, (const union nf_inet_addr *)&sc->v6.caddr, sc->v6.cport, (const union nf_inet_addr *)&sc->v6.vaddr, sc->v6.vport, p); else #endif ip_vs_conn_fill_param(net, af, sc->v4.protocol, (const union nf_inet_addr *)&sc->v4.caddr, sc->v4.cport, (const union nf_inet_addr *)&sc->v4.vaddr, sc->v4.vport, p); /* Handle pe data */ if (pe_data_len) { if (pe_name_len) { char buff[IP_VS_PENAME_MAXLEN+1]; memcpy(buff, pe_name, pe_name_len); buff[pe_name_len]=0; p->pe = __ip_vs_pe_getbyname(buff); if (!p->pe) { IP_VS_DBG(3, "BACKUP, no %s engine found/loaded\n", buff); return 1; } } else { IP_VS_ERR_RL("BACKUP, Invalid PE parameters\n"); return 1; } p->pe_data = kmemdup(pe_data, pe_data_len, GFP_ATOMIC); if (!p->pe_data) { if (p->pe->module) module_put(p->pe->module); return -ENOMEM; } p->pe_data_len = pe_data_len; } return 0; } /* * Connection Add / Update. * Common for version 0 and 1 reception of backup sync_conns. * Param: ... * timeout is in sec. */ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param, unsigned int flags, unsigned int state, unsigned int protocol, unsigned int type, const union nf_inet_addr *daddr, __be16 dport, unsigned long timeout, __u32 fwmark, struct ip_vs_sync_conn_options *opt) { struct ip_vs_dest *dest; struct ip_vs_conn *cp; struct netns_ipvs *ipvs = net_ipvs(net); if (!(flags & IP_VS_CONN_F_TEMPLATE)) cp = ip_vs_conn_in_get(param); else cp = ip_vs_ct_in_get(param); if (cp) { /* Free pe_data */ kfree(param->pe_data); dest = cp->dest; spin_lock_bh(&cp->lock); if ((cp->flags ^ flags) & IP_VS_CONN_F_INACTIVE && !(flags & IP_VS_CONN_F_TEMPLATE) && dest) { if (flags & IP_VS_CONN_F_INACTIVE) { atomic_dec(&dest->activeconns); atomic_inc(&dest->inactconns); } else { atomic_inc(&dest->activeconns); atomic_dec(&dest->inactconns); } } flags &= IP_VS_CONN_F_BACKUP_UPD_MASK; flags |= cp->flags & ~IP_VS_CONN_F_BACKUP_UPD_MASK; cp->flags = flags; spin_unlock_bh(&cp->lock); if (!dest) ip_vs_try_bind_dest(cp); } else { /* * Find the appropriate destination for the connection. * If it is not found the connection will remain unbound * but still handled. */ rcu_read_lock(); dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr, param->vport, protocol, fwmark, flags); cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark); rcu_read_unlock(); if (!cp) { if (param->pe_data) kfree(param->pe_data); IP_VS_DBG(2, "BACKUP, add new conn. failed\n"); return; } if (!(flags & IP_VS_CONN_F_TEMPLATE)) kfree(param->pe_data); } if (opt) memcpy(&cp->in_seq, opt, sizeof(*opt)); atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs)); cp->state = state; cp->old_state = cp->state; /* * For Ver 0 messages style * - Not possible to recover the right timeout for templates * - can not find the right fwmark * virtual service. If needed, we can do it for * non-fwmark persistent services. * Ver 1 messages style. * - No problem. */ if (timeout) { if (timeout > MAX_SCHEDULE_TIMEOUT / HZ) timeout = MAX_SCHEDULE_TIMEOUT / HZ; cp->timeout = timeout*HZ; } else { struct ip_vs_proto_data *pd; pd = ip_vs_proto_data_get(net, protocol); if (!(flags & IP_VS_CONN_F_TEMPLATE) && pd && pd->timeout_table) cp->timeout = pd->timeout_table[state]; else cp->timeout = (3*60*HZ); } ip_vs_conn_put(cp); } /* * Process received multicast message for Version 0 */ static void ip_vs_process_message_v0(struct net *net, const char *buffer, const size_t buflen) { struct ip_vs_sync_mesg_v0 *m = (struct ip_vs_sync_mesg_v0 *)buffer; struct ip_vs_sync_conn_v0 *s; struct ip_vs_sync_conn_options *opt; struct ip_vs_protocol *pp; struct ip_vs_conn_param param; char *p; int i; p = (char *)buffer + sizeof(struct ip_vs_sync_mesg_v0); for (i=0; i<m->nr_conns; i++) { unsigned int flags, state; if (p + SIMPLE_CONN_SIZE > buffer+buflen) { IP_VS_ERR_RL("BACKUP v0, bogus conn\n"); return; } s = (struct ip_vs_sync_conn_v0 *) p; flags = ntohs(s->flags) | IP_VS_CONN_F_SYNC; flags &= ~IP_VS_CONN_F_HASHED; if (flags & IP_VS_CONN_F_SEQ_MASK) { opt = (struct ip_vs_sync_conn_options *)&s[1]; p += FULL_CONN_SIZE; if (p > buffer+buflen) { IP_VS_ERR_RL("BACKUP v0, Dropping buffer bogus conn options\n"); return; } } else { opt = NULL; p += SIMPLE_CONN_SIZE; } state = ntohs(s->state); if (!(flags & IP_VS_CONN_F_TEMPLATE)) { pp = ip_vs_proto_get(s->protocol); if (!pp) { IP_VS_DBG(2, "BACKUP v0, Unsupported protocol %u\n", s->protocol); continue; } if (state >= pp->num_states) { IP_VS_DBG(2, "BACKUP v0, Invalid %s state %u\n", pp->name, state); continue; } } else { /* protocol in templates is not used for state/timeout */ if (state > 0) { IP_VS_DBG(2, "BACKUP v0, Invalid template state %u\n", state); state = 0; } } ip_vs_conn_fill_param(net, AF_INET, s->protocol, (const union nf_inet_addr *)&s->caddr, s->cport, (const union nf_inet_addr *)&s->vaddr, s->vport, &param); /* Send timeout as Zero */ ip_vs_proc_conn(net, &param, flags, state, s->protocol, AF_INET, (union nf_inet_addr *)&s->daddr, s->dport, 0, 0, opt); } } /* * Handle options */ static inline int ip_vs_proc_seqopt(__u8 *p, unsigned int plen, __u32 *opt_flags, struct ip_vs_sync_conn_options *opt) { struct ip_vs_sync_conn_options *topt; topt = (struct ip_vs_sync_conn_options *)p; if (plen != sizeof(struct ip_vs_sync_conn_options)) { IP_VS_DBG(2, "BACKUP, bogus conn options length\n"); return -EINVAL; } if (*opt_flags & IPVS_OPT_F_SEQ_DATA) { IP_VS_DBG(2, "BACKUP, conn options found twice\n"); return -EINVAL; } ntoh_seq(&topt->in_seq, &opt->in_seq); ntoh_seq(&topt->out_seq, &opt->out_seq); *opt_flags |= IPVS_OPT_F_SEQ_DATA; return 0; } static int ip_vs_proc_str(__u8 *p, unsigned int plen, unsigned int *data_len, __u8 **data, unsigned int maxlen, __u32 *opt_flags, __u32 flag) { if (plen > maxlen) { IP_VS_DBG(2, "BACKUP, bogus par.data len > %d\n", maxlen); return -EINVAL; } if (*opt_flags & flag) { IP_VS_DBG(2, "BACKUP, Par.data found twice 0x%x\n", flag); return -EINVAL; } *data_len = plen; *data = p; *opt_flags |= flag; return 0; } /* * Process a Version 1 sync. connection */ static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end) { struct ip_vs_sync_conn_options opt; union ip_vs_sync_conn *s; struct ip_vs_protocol *pp; struct ip_vs_conn_param param; __u32 flags; unsigned int af, state, pe_data_len=0, pe_name_len=0; __u8 *pe_data=NULL, *pe_name=NULL; __u32 opt_flags=0; int retc=0; s = (union ip_vs_sync_conn *) p; if (s->v6.type & STYPE_F_INET6) { #ifdef CONFIG_IP_VS_IPV6 af = AF_INET6; p += sizeof(struct ip_vs_sync_v6); #else IP_VS_DBG(3,"BACKUP, IPv6 msg received, and IPVS is not compiled for IPv6\n"); retc = 10; goto out; #endif } else if (!s->v4.type) { af = AF_INET; p += sizeof(struct ip_vs_sync_v4); } else { return -10; } if (p > msg_end) return -20; /* Process optional params check Type & Len. */ while (p < msg_end) { int ptype; int plen; if (p+2 > msg_end) return -30; ptype = *(p++); plen = *(p++); if (!plen || ((p + plen) > msg_end)) return -40; /* Handle seq option p = param data */ switch (ptype & ~IPVS_OPT_F_PARAM) { case IPVS_OPT_SEQ_DATA: if (ip_vs_proc_seqopt(p, plen, &opt_flags, &opt)) return -50; break; case IPVS_OPT_PE_DATA: if (ip_vs_proc_str(p, plen, &pe_data_len, &pe_data, IP_VS_PEDATA_MAXLEN, &opt_flags, IPVS_OPT_F_PE_DATA)) return -60; break; case IPVS_OPT_PE_NAME: if (ip_vs_proc_str(p, plen,&pe_name_len, &pe_name, IP_VS_PENAME_MAXLEN, &opt_flags, IPVS_OPT_F_PE_NAME)) return -70; break; default: /* Param data mandatory ? */ if (!(ptype & IPVS_OPT_F_PARAM)) { IP_VS_DBG(3, "BACKUP, Unknown mandatory param %d found\n", ptype & ~IPVS_OPT_F_PARAM); retc = 20; goto out; } } p += plen; /* Next option */ } /* Get flags and Mask off unsupported */ flags = ntohl(s->v4.flags) & IP_VS_CONN_F_BACKUP_MASK; flags |= IP_VS_CONN_F_SYNC; state = ntohs(s->v4.state); if (!(flags & IP_VS_CONN_F_TEMPLATE)) { pp = ip_vs_proto_get(s->v4.protocol); if (!pp) { IP_VS_DBG(3,"BACKUP, Unsupported protocol %u\n", s->v4.protocol); retc = 30; goto out; } if (state >= pp->num_states) { IP_VS_DBG(3, "BACKUP, Invalid %s state %u\n", pp->name, state); retc = 40; goto out; } } else { /* protocol in templates is not used for state/timeout */ if (state > 0) { IP_VS_DBG(3, "BACKUP, Invalid template state %u\n", state); state = 0; } } if (ip_vs_conn_fill_param_sync(net, af, s, &param, pe_data, pe_data_len, pe_name, pe_name_len)) { retc = 50; goto out; } /* If only IPv4, just silent skip IPv6 */ if (af == AF_INET) ip_vs_proc_conn(net, &param, flags, state, s->v4.protocol, af, (union nf_inet_addr *)&s->v4.daddr, s->v4.dport, ntohl(s->v4.timeout), ntohl(s->v4.fwmark), (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL) ); #ifdef CONFIG_IP_VS_IPV6 else ip_vs_proc_conn(net, &param, flags, state, s->v6.protocol, af, (union nf_inet_addr *)&s->v6.daddr, s->v6.dport, ntohl(s->v6.timeout), ntohl(s->v6.fwmark), (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL) ); #endif ip_vs_pe_put(param.pe); return 0; /* Error exit */ out: IP_VS_DBG(2, "BACKUP, Single msg dropped err:%d\n", retc); return retc; } /* * Process received multicast message and create the corresponding * ip_vs_conn entries. * Handles Version 0 & 1 */ static void ip_vs_process_message(struct net *net, __u8 *buffer, const size_t buflen) { struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_sync_mesg *m2 = (struct ip_vs_sync_mesg *)buffer; __u8 *p, *msg_end; int i, nr_conns; if (buflen < sizeof(struct ip_vs_sync_mesg_v0)) { IP_VS_DBG(2, "BACKUP, message header too short\n"); return; } if (buflen != ntohs(m2->size)) { IP_VS_DBG(2, "BACKUP, bogus message size\n"); return; } /* SyncID sanity check */ if (ipvs->backup_syncid != 0 && m2->syncid != ipvs->backup_syncid) { IP_VS_DBG(7, "BACKUP, Ignoring syncid = %d\n", m2->syncid); return; } /* Handle version 1 message */ if ((m2->version == SYNC_PROTO_VER) && (m2->reserved == 0) && (m2->spare == 0)) { msg_end = buffer + sizeof(struct ip_vs_sync_mesg); nr_conns = m2->nr_conns; for (i=0; i<nr_conns; i++) { union ip_vs_sync_conn *s; unsigned int size; int retc; p = msg_end; if (p + sizeof(s->v4) > buffer+buflen) { IP_VS_ERR_RL("BACKUP, Dropping buffer, to small\n"); return; } s = (union ip_vs_sync_conn *)p; size = ntohs(s->v4.ver_size) & SVER_MASK; msg_end = p + size; /* Basic sanity checks */ if (msg_end > buffer+buflen) { IP_VS_ERR_RL("BACKUP, Dropping buffer, msg > buffer\n"); return; } if (ntohs(s->v4.ver_size) >> SVER_SHIFT) { IP_VS_ERR_RL("BACKUP, Dropping buffer, Unknown version %d\n", ntohs(s->v4.ver_size) >> SVER_SHIFT); return; } /* Process a single sync_conn */ retc = ip_vs_proc_sync_conn(net, p, msg_end); if (retc < 0) { IP_VS_ERR_RL("BACKUP, Dropping buffer, Err: %d in decoding\n", retc); return; } /* Make sure we have 32 bit alignment */ msg_end = p + ((size + 3) & ~3); } } else { /* Old type of message */ ip_vs_process_message_v0(net, buffer, buflen); return; } } /* * Setup sndbuf (mode=1) or rcvbuf (mode=0) */ static void set_sock_size(struct sock *sk, int mode, int val) { /* setsockopt(sock, SOL_SOCKET, SO_SNDBUF, &val, sizeof(val)); */ /* setsockopt(sock, SOL_SOCKET, SO_RCVBUF, &val, sizeof(val)); */ lock_sock(sk); if (mode) { val = clamp_t(int, val, (SOCK_MIN_SNDBUF + 1) / 2, sysctl_wmem_max); sk->sk_sndbuf = val * 2; sk->sk_userlocks |= SOCK_SNDBUF_LOCK; } else { val = clamp_t(int, val, (SOCK_MIN_RCVBUF + 1) / 2, sysctl_rmem_max); sk->sk_rcvbuf = val * 2; sk->sk_userlocks |= SOCK_RCVBUF_LOCK; } release_sock(sk); } /* * Setup loopback of outgoing multicasts on a sending socket */ static void set_mcast_loop(struct sock *sk, u_char loop) { struct inet_sock *inet = inet_sk(sk); /* setsockopt(sock, SOL_IP, IP_MULTICAST_LOOP, &loop, sizeof(loop)); */ lock_sock(sk); inet->mc_loop = loop ? 1 : 0; release_sock(sk); } /* * Specify TTL for outgoing multicasts on a sending socket */ static void set_mcast_ttl(struct sock *sk, u_char ttl) { struct inet_sock *inet = inet_sk(sk); /* setsockopt(sock, SOL_IP, IP_MULTICAST_TTL, &ttl, sizeof(ttl)); */ lock_sock(sk); inet->mc_ttl = ttl; release_sock(sk); } /* * Specifiy default interface for outgoing multicasts */ static int set_mcast_if(struct sock *sk, char *ifname) { struct net_device *dev; struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); dev = __dev_get_by_name(net, ifname); if (!dev) return -ENODEV; if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) return -EINVAL; lock_sock(sk); inet->mc_index = dev->ifindex; /* inet->mc_addr = 0; */ release_sock(sk); return 0; } /* * Set the maximum length of sync message according to the * specified interface's MTU. */ static int set_sync_mesg_maxlen(struct net *net, int sync_state) { struct netns_ipvs *ipvs = net_ipvs(net); struct net_device *dev; int num; if (sync_state == IP_VS_STATE_MASTER) { dev = __dev_get_by_name(net, ipvs->master_mcast_ifn); if (!dev) return -ENODEV; num = (dev->mtu - sizeof(struct iphdr) - sizeof(struct udphdr) - SYNC_MESG_HEADER_LEN - 20) / SIMPLE_CONN_SIZE; ipvs->send_mesg_maxlen = SYNC_MESG_HEADER_LEN + SIMPLE_CONN_SIZE * min(num, MAX_CONNS_PER_SYNCBUFF); IP_VS_DBG(7, "setting the maximum length of sync sending " "message %d.\n", ipvs->send_mesg_maxlen); } else if (sync_state == IP_VS_STATE_BACKUP) { dev = __dev_get_by_name(net, ipvs->backup_mcast_ifn); if (!dev) return -ENODEV; ipvs->recv_mesg_maxlen = dev->mtu - sizeof(struct iphdr) - sizeof(struct udphdr); IP_VS_DBG(7, "setting the maximum length of sync receiving " "message %d.\n", ipvs->recv_mesg_maxlen); } return 0; } /* * Join a multicast group. * the group is specified by a class D multicast address 224.0.0.0/8 * in the in_addr structure passed in as a parameter. */ static int join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname) { struct net *net = sock_net(sk); struct ip_mreqn mreq; struct net_device *dev; int ret; memset(&mreq, 0, sizeof(mreq)); memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr)); dev = __dev_get_by_name(net, ifname); if (!dev) return -ENODEV; if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) return -EINVAL; mreq.imr_ifindex = dev->ifindex; lock_sock(sk); ret = ip_mc_join_group(sk, &mreq); release_sock(sk); return ret; } static int bind_mcastif_addr(struct socket *sock, char *ifname) { struct net *net = sock_net(sock->sk); struct net_device *dev; __be32 addr; struct sockaddr_in sin; dev = __dev_get_by_name(net, ifname); if (!dev) return -ENODEV; addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE); if (!addr) pr_err("You probably need to specify IP address on " "multicast interface.\n"); IP_VS_DBG(7, "binding socket with (%s) %pI4\n", ifname, &addr); /* Now bind the socket with the address of multicast interface */ sin.sin_family = AF_INET; sin.sin_addr.s_addr = addr; sin.sin_port = 0; return sock->ops->bind(sock, (struct sockaddr*)&sin, sizeof(sin)); } /* * Set up sending multicast socket over UDP */ static struct socket *make_send_sock(struct net *net, int id) { struct netns_ipvs *ipvs = net_ipvs(net); /* multicast addr */ struct sockaddr_in mcast_addr = { .sin_family = AF_INET, .sin_port = cpu_to_be16(IP_VS_SYNC_PORT + id), .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP), }; struct socket *sock; int result; /* First create a socket move it to right name space later */ result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); if (result < 0) { pr_err("Error during creation of socket; terminating\n"); return ERR_PTR(result); } /* * Kernel sockets that are a part of a namespace, should not * hold a reference to a namespace in order to allow to stop it. * After sk_change_net should be released using sk_release_kernel. */ sk_change_net(sock->sk, net); result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn); if (result < 0) { pr_err("Error setting outbound mcast interface\n"); goto error; } set_mcast_loop(sock->sk, 0); set_mcast_ttl(sock->sk, 1); result = sysctl_sync_sock_size(ipvs); if (result > 0) set_sock_size(sock->sk, 1, result); result = bind_mcastif_addr(sock, ipvs->master_mcast_ifn); if (result < 0) { pr_err("Error binding address of the mcast interface\n"); goto error; } result = sock->ops->connect(sock, (struct sockaddr *) &mcast_addr, sizeof(struct sockaddr), 0); if (result < 0) { pr_err("Error connecting to the multicast addr\n"); goto error; } return sock; error: sk_release_kernel(sock->sk); return ERR_PTR(result); } /* * Set up receiving multicast socket over UDP */ static struct socket *make_receive_sock(struct net *net, int id) { struct netns_ipvs *ipvs = net_ipvs(net); /* multicast addr */ struct sockaddr_in mcast_addr = { .sin_family = AF_INET, .sin_port = cpu_to_be16(IP_VS_SYNC_PORT + id), .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP), }; struct socket *sock; int result; /* First create a socket */ result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); if (result < 0) { pr_err("Error during creation of socket; terminating\n"); return ERR_PTR(result); } /* * Kernel sockets that are a part of a namespace, should not * hold a reference to a namespace in order to allow to stop it. * After sk_change_net should be released using sk_release_kernel. */ sk_change_net(sock->sk, net); /* it is equivalent to the REUSEADDR option in user-space */ sock->sk->sk_reuse = SK_CAN_REUSE; result = sysctl_sync_sock_size(ipvs); if (result > 0) set_sock_size(sock->sk, 0, result); result = sock->ops->bind(sock, (struct sockaddr *) &mcast_addr, sizeof(struct sockaddr)); if (result < 0) { pr_err("Error binding to the multicast addr\n"); goto error; } /* join the multicast group */ result = join_mcast_group(sock->sk, (struct in_addr *) &mcast_addr.sin_addr, ipvs->backup_mcast_ifn); if (result < 0) { pr_err("Error joining to the multicast group\n"); goto error; } return sock; error: sk_release_kernel(sock->sk); return ERR_PTR(result); } static int ip_vs_send_async(struct socket *sock, const char *buffer, const size_t length) { struct msghdr msg = {.msg_flags = MSG_DONTWAIT|MSG_NOSIGNAL}; struct kvec iov; int len; EnterFunction(7); iov.iov_base = (void *)buffer; iov.iov_len = length; len = kernel_sendmsg(sock, &msg, &iov, 1, (size_t)(length)); LeaveFunction(7); return len; } static int ip_vs_send_sync_msg(struct socket *sock, struct ip_vs_sync_mesg *msg) { int msize; int ret; msize = ntohs(msg->size); ret = ip_vs_send_async(sock, (char *)msg, msize); if (ret >= 0 || ret == -EAGAIN) return ret; pr_err("ip_vs_send_async error %d\n", ret); return 0; } static int ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen) { struct msghdr msg = {NULL,}; struct kvec iov; int len; EnterFunction(7); /* Receive a packet */ iov.iov_base = buffer; iov.iov_len = (size_t)buflen; len = kernel_recvmsg(sock, &msg, &iov, 1, buflen, MSG_DONTWAIT); if (len < 0) return len; LeaveFunction(7); return len; } /* Wakeup the master thread for sending */ static void master_wakeup_work_handler(struct work_struct *work) { struct ipvs_master_sync_state *ms = container_of(work, struct ipvs_master_sync_state, master_wakeup_work.work); struct netns_ipvs *ipvs = ms->ipvs; spin_lock_bh(&ipvs->sync_lock); if (ms->sync_queue_len && ms->sync_queue_delay < IPVS_SYNC_WAKEUP_RATE) { ms->sync_queue_delay = IPVS_SYNC_WAKEUP_RATE; wake_up_process(ms->master_thread); } spin_unlock_bh(&ipvs->sync_lock); } /* Get next buffer to send */ static inline struct ip_vs_sync_buff * next_sync_buff(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms) { struct ip_vs_sync_buff *sb; sb = sb_dequeue(ipvs, ms); if (sb) return sb; /* Do not delay entries in buffer for more than 2 seconds */ return get_curr_sync_buff(ipvs, ms, IPVS_SYNC_FLUSH_TIME); } static int sync_thread_master(void *data) { struct ip_vs_sync_thread_data *tinfo = data; struct netns_ipvs *ipvs = net_ipvs(tinfo->net); struct ipvs_master_sync_state *ms = &ipvs->ms[tinfo->id]; struct sock *sk = tinfo->sock->sk; struct ip_vs_sync_buff *sb; pr_info("sync thread started: state = MASTER, mcast_ifn = %s, " "syncid = %d, id = %d\n", ipvs->master_mcast_ifn, ipvs->master_syncid, tinfo->id); for (;;) { sb = next_sync_buff(ipvs, ms); if (unlikely(kthread_should_stop())) break; if (!sb) { schedule_timeout(IPVS_SYNC_CHECK_PERIOD); continue; } while (ip_vs_send_sync_msg(tinfo->sock, sb->mesg) < 0) { int ret = 0; __wait_event_interruptible(*sk_sleep(sk), sock_writeable(sk) || kthread_should_stop(), ret); if (unlikely(kthread_should_stop())) goto done; } ip_vs_sync_buff_release(sb); } done: __set_current_state(TASK_RUNNING); if (sb) ip_vs_sync_buff_release(sb); /* clean up the sync_buff queue */ while ((sb = sb_dequeue(ipvs, ms))) ip_vs_sync_buff_release(sb); __set_current_state(TASK_RUNNING); /* clean up the current sync_buff */ sb = get_curr_sync_buff(ipvs, ms, 0); if (sb) ip_vs_sync_buff_release(sb); /* release the sending multicast socket */ sk_release_kernel(tinfo->sock->sk); kfree(tinfo); return 0; } static int sync_thread_backup(void *data) { struct ip_vs_sync_thread_data *tinfo = data; struct netns_ipvs *ipvs = net_ipvs(tinfo->net); int len; pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, " "syncid = %d, id = %d\n", ipvs->backup_mcast_ifn, ipvs->backup_syncid, tinfo->id); while (!kthread_should_stop()) { wait_event_interruptible(*sk_sleep(tinfo->sock->sk), !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue) || kthread_should_stop()); /* do we have data now? */ while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) { len = ip_vs_receive(tinfo->sock, tinfo->buf, ipvs->recv_mesg_maxlen); if (len <= 0) { if (len != -EAGAIN) pr_err("receiving message error\n"); break; } ip_vs_process_message(tinfo->net, tinfo->buf, len); } } /* release the sending multicast socket */ sk_release_kernel(tinfo->sock->sk); kfree(tinfo->buf); kfree(tinfo); return 0; } int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid) { struct ip_vs_sync_thread_data *tinfo; struct task_struct **array = NULL, *task; struct socket *sock; struct netns_ipvs *ipvs = net_ipvs(net); char *name; int (*threadfn)(void *data); int id, count; int result = -ENOMEM; IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n", sizeof(struct ip_vs_sync_conn_v0)); if (!ipvs->sync_state) { count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX); ipvs->threads_mask = count - 1; } else count = ipvs->threads_mask + 1; if (state == IP_VS_STATE_MASTER) { if (ipvs->ms) return -EEXIST; strlcpy(ipvs->master_mcast_ifn, mcast_ifn, sizeof(ipvs->master_mcast_ifn)); ipvs->master_syncid = syncid; name = "ipvs-m:%d:%d"; threadfn = sync_thread_master; } else if (state == IP_VS_STATE_BACKUP) { if (ipvs->backup_threads) return -EEXIST; strlcpy(ipvs->backup_mcast_ifn, mcast_ifn, sizeof(ipvs->backup_mcast_ifn)); ipvs->backup_syncid = syncid; name = "ipvs-b:%d:%d"; threadfn = sync_thread_backup; } else { return -EINVAL; } if (state == IP_VS_STATE_MASTER) { struct ipvs_master_sync_state *ms; ipvs->ms = kzalloc(count * sizeof(ipvs->ms[0]), GFP_KERNEL); if (!ipvs->ms) goto out; ms = ipvs->ms; for (id = 0; id < count; id++, ms++) { INIT_LIST_HEAD(&ms->sync_queue); ms->sync_queue_len = 0; ms->sync_queue_delay = 0; INIT_DELAYED_WORK(&ms->master_wakeup_work, master_wakeup_work_handler); ms->ipvs = ipvs; } } else { array = kzalloc(count * sizeof(struct task_struct *), GFP_KERNEL); if (!array) goto out; } set_sync_mesg_maxlen(net, state); tinfo = NULL; for (id = 0; id < count; id++) { if (state == IP_VS_STATE_MASTER) sock = make_send_sock(net, id); else sock = make_receive_sock(net, id); if (IS_ERR(sock)) { result = PTR_ERR(sock); goto outtinfo; } tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL); if (!tinfo) goto outsocket; tinfo->net = net; tinfo->sock = sock; if (state == IP_VS_STATE_BACKUP) { tinfo->buf = kmalloc(ipvs->recv_mesg_maxlen, GFP_KERNEL); if (!tinfo->buf) goto outtinfo; } else { tinfo->buf = NULL; } tinfo->id = id; task = kthread_run(threadfn, tinfo, name, ipvs->gen, id); if (IS_ERR(task)) { result = PTR_ERR(task); goto outtinfo; } tinfo = NULL; if (state == IP_VS_STATE_MASTER) ipvs->ms[id].master_thread = task; else array[id] = task; } /* mark as active */ if (state == IP_VS_STATE_BACKUP) ipvs->backup_threads = array; spin_lock_bh(&ipvs->sync_buff_lock); ipvs->sync_state |= state; spin_unlock_bh(&ipvs->sync_buff_lock); /* increase the module use count */ ip_vs_use_count_inc(); return 0; outsocket: sk_release_kernel(sock->sk); outtinfo: if (tinfo) { sk_release_kernel(tinfo->sock->sk); kfree(tinfo->buf); kfree(tinfo); } count = id; while (count-- > 0) { if (state == IP_VS_STATE_MASTER) kthread_stop(ipvs->ms[count].master_thread); else kthread_stop(array[count]); } kfree(array); out: if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { kfree(ipvs->ms); ipvs->ms = NULL; } return result; } int stop_sync_thread(struct net *net, int state) { struct netns_ipvs *ipvs = net_ipvs(net); struct task_struct **array; int id; int retc = -EINVAL; IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); if (state == IP_VS_STATE_MASTER) { if (!ipvs->ms) return -ESRCH; /* * The lock synchronizes with sb_queue_tail(), so that we don't * add sync buffers to the queue, when we are already in * progress of stopping the master sync daemon. */ spin_lock_bh(&ipvs->sync_buff_lock); spin_lock(&ipvs->sync_lock); ipvs->sync_state &= ~IP_VS_STATE_MASTER; spin_unlock(&ipvs->sync_lock); spin_unlock_bh(&ipvs->sync_buff_lock); retc = 0; for (id = ipvs->threads_mask; id >= 0; id--) { struct ipvs_master_sync_state *ms = &ipvs->ms[id]; int ret; pr_info("stopping master sync thread %d ...\n", task_pid_nr(ms->master_thread)); cancel_delayed_work_sync(&ms->master_wakeup_work); ret = kthread_stop(ms->master_thread); if (retc >= 0) retc = ret; } kfree(ipvs->ms); ipvs->ms = NULL; } else if (state == IP_VS_STATE_BACKUP) { if (!ipvs->backup_threads) return -ESRCH; ipvs->sync_state &= ~IP_VS_STATE_BACKUP; array = ipvs->backup_threads; retc = 0; for (id = ipvs->threads_mask; id >= 0; id--) { int ret; pr_info("stopping backup sync thread %d ...\n", task_pid_nr(array[id])); ret = kthread_stop(array[id]); if (retc >= 0) retc = ret; } kfree(array); ipvs->backup_threads = NULL; } /* decrease the module use count */ ip_vs_use_count_dec(); return retc; } /* * Initialize data struct for each netns */ int __net_init ip_vs_sync_net_init(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); __mutex_init(&ipvs->sync_mutex, "ipvs->sync_mutex", &__ipvs_sync_key); spin_lock_init(&ipvs->sync_lock); spin_lock_init(&ipvs->sync_buff_lock); return 0; } void ip_vs_sync_net_cleanup(struct net *net) { int retc; struct netns_ipvs *ipvs = net_ipvs(net); mutex_lock(&ipvs->sync_mutex); retc = stop_sync_thread(net, IP_VS_STATE_MASTER); if (retc && retc != -ESRCH) pr_err("Failed to stop Master Daemon\n"); retc = stop_sync_thread(net, IP_VS_STATE_BACKUP); if (retc && retc != -ESRCH) pr_err("Failed to stop Backup Daemon\n"); mutex_unlock(&ipvs->sync_mutex); }
gpl-2.0
embeddedarm/linux-3.14-pxa16x
arch/x86/kernel/cpu/mcheck/mce.c
245
59078
/* * Machine check handler. * * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. * Rest from unknown author(s). * 2004 Andi Kleen. Rewrote most of it. * Copyright 2008 Intel Corporation * Author: Andi Kleen */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/thread_info.h> #include <linux/capability.h> #include <linux/miscdevice.h> #include <linux/ratelimit.h> #include <linux/kallsyms.h> #include <linux/rcupdate.h> #include <linux/kobject.h> #include <linux/uaccess.h> #include <linux/kdebug.h> #include <linux/kernel.h> #include <linux/percpu.h> #include <linux/string.h> #include <linux/device.h> #include <linux/syscore_ops.h> #include <linux/delay.h> #include <linux/ctype.h> #include <linux/sched.h> #include <linux/sysfs.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/kmod.h> #include <linux/poll.h> #include <linux/nmi.h> #include <linux/cpu.h> #include <linux/smp.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/debugfs.h> #include <linux/irq_work.h> #include <linux/export.h> #include <asm/processor.h> #include <asm/mce.h> #include <asm/msr.h> #include "mce-internal.h" static DEFINE_MUTEX(mce_chrdev_read_mutex); #define rcu_dereference_check_mce(p) \ rcu_dereference_index_check((p), \ rcu_read_lock_sched_held() || \ lockdep_is_held(&mce_chrdev_read_mutex)) #define CREATE_TRACE_POINTS #include <trace/events/mce.h> #define SPINUNIT 100 /* 100ns */ atomic_t mce_entry; DEFINE_PER_CPU(unsigned, mce_exception_count); struct mce_bank *mce_banks __read_mostly; struct mca_config mca_cfg __read_mostly = { .bootlog = -1, /* * Tolerant levels: * 0: always panic on uncorrected errors, log corrected errors * 1: panic or SIGBUS on uncorrected errors, log corrected errors * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors * 3: never panic or SIGBUS, log all errors (for testing only) */ .tolerant = 1, .monarch_timeout = -1 }; /* User mode helper program triggered by machine check event */ static unsigned long mce_need_notify; static char mce_helper[128]; static char *mce_helper_argv[2] = { mce_helper, NULL }; static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait); static DEFINE_PER_CPU(struct mce, mces_seen); static int cpu_missing; /* * MCA banks polled by the period polling timer for corrected events. * With Intel CMCI, this only has MCA banks which do not support CMCI (if any). */ DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL }; /* * MCA banks controlled through firmware first for corrected errors. * This is a global list of banks for which we won't enable CMCI and we * won't poll. Firmware controls these banks and is responsible for * reporting corrected errors through GHES. Uncorrected/recoverable * errors are still notified through a machine check. */ mce_banks_t mce_banks_ce_disabled; static DEFINE_PER_CPU(struct work_struct, mce_work); static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); /* * CPU/chipset specific EDAC code can register a notifier call here to print * MCE errors in a human-readable form. */ ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain); /* Do initial initialization of a struct mce */ void mce_setup(struct mce *m) { memset(m, 0, sizeof(struct mce)); m->cpu = m->extcpu = smp_processor_id(); rdtscll(m->tsc); /* We hope get_seconds stays lockless */ m->time = get_seconds(); m->cpuvendor = boot_cpu_data.x86_vendor; m->cpuid = cpuid_eax(1); m->socketid = cpu_data(m->extcpu).phys_proc_id; m->apicid = cpu_data(m->extcpu).initial_apicid; rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap); } DEFINE_PER_CPU(struct mce, injectm); EXPORT_PER_CPU_SYMBOL_GPL(injectm); /* * Lockless MCE logging infrastructure. * This avoids deadlocks on printk locks without having to break locks. Also * separate MCEs from kernel messages to avoid bogus bug reports. */ static struct mce_log mcelog = { .signature = MCE_LOG_SIGNATURE, .len = MCE_LOG_LEN, .recordlen = sizeof(struct mce), }; void mce_log(struct mce *mce) { unsigned next, entry; int ret = 0; /* Emit the trace record: */ trace_mce_record(mce); ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce); if (ret == NOTIFY_STOP) return; mce->finished = 0; wmb(); for (;;) { entry = rcu_dereference_check_mce(mcelog.next); for (;;) { /* * When the buffer fills up discard new entries. * Assume that the earlier errors are the more * interesting ones: */ if (entry >= MCE_LOG_LEN) { set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog.flags); return; } /* Old left over entry. Skip: */ if (mcelog.entry[entry].finished) { entry++; continue; } break; } smp_rmb(); next = entry + 1; if (cmpxchg(&mcelog.next, entry, next) == entry) break; } memcpy(mcelog.entry + entry, mce, sizeof(struct mce)); wmb(); mcelog.entry[entry].finished = 1; wmb(); mce->finished = 1; set_bit(0, &mce_need_notify); } static void drain_mcelog_buffer(void) { unsigned int next, i, prev = 0; next = ACCESS_ONCE(mcelog.next); do { struct mce *m; /* drain what was logged during boot */ for (i = prev; i < next; i++) { unsigned long start = jiffies; unsigned retries = 1; m = &mcelog.entry[i]; while (!m->finished) { if (time_after_eq(jiffies, start + 2*retries)) retries++; cpu_relax(); if (!m->finished && retries >= 4) { pr_err("skipping error being logged currently!\n"); break; } } smp_rmb(); atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m); } memset(mcelog.entry + prev, 0, (next - prev) * sizeof(*m)); prev = next; next = cmpxchg(&mcelog.next, prev, 0); } while (next != prev); } void mce_register_decode_chain(struct notifier_block *nb) { atomic_notifier_chain_register(&x86_mce_decoder_chain, nb); drain_mcelog_buffer(); } EXPORT_SYMBOL_GPL(mce_register_decode_chain); void mce_unregister_decode_chain(struct notifier_block *nb) { atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb); } EXPORT_SYMBOL_GPL(mce_unregister_decode_chain); static void print_mce(struct mce *m) { int ret = 0; pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n", m->extcpu, m->mcgstatus, m->bank, m->status); if (m->ip) { pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ", !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", m->cs, m->ip); if (m->cs == __KERNEL_CS) print_symbol("{%s}", m->ip); pr_cont("\n"); } pr_emerg(HW_ERR "TSC %llx ", m->tsc); if (m->addr) pr_cont("ADDR %llx ", m->addr); if (m->misc) pr_cont("MISC %llx ", m->misc); pr_cont("\n"); /* * Note this output is parsed by external tools and old fields * should not be changed. */ pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n", m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid, cpu_data(m->extcpu).microcode); /* * Print out human-readable details about the MCE error, * (if the CPU has an implementation for that) */ ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m); if (ret == NOTIFY_STOP) return; pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n"); } #define PANIC_TIMEOUT 5 /* 5 seconds */ static atomic_t mce_paniced; static int fake_panic; static atomic_t mce_fake_paniced; /* Panic in progress. Enable interrupts and wait for final IPI */ static void wait_for_panic(void) { long timeout = PANIC_TIMEOUT*USEC_PER_SEC; preempt_disable(); local_irq_enable(); while (timeout-- > 0) udelay(1); if (panic_timeout == 0) panic_timeout = mca_cfg.panic_timeout; panic("Panicing machine check CPU died"); } static void mce_panic(char *msg, struct mce *final, char *exp) { int i, apei_err = 0; if (!fake_panic) { /* * Make sure only one CPU runs in machine check panic */ if (atomic_inc_return(&mce_paniced) > 1) wait_for_panic(); barrier(); bust_spinlocks(1); console_verbose(); } else { /* Don't log too much for fake panic */ if (atomic_inc_return(&mce_fake_paniced) > 1) return; } /* First print corrected ones that are still unlogged */ for (i = 0; i < MCE_LOG_LEN; i++) { struct mce *m = &mcelog.entry[i]; if (!(m->status & MCI_STATUS_VAL)) continue; if (!(m->status & MCI_STATUS_UC)) { print_mce(m); if (!apei_err) apei_err = apei_write_mce(m); } } /* Now print uncorrected but with the final one last */ for (i = 0; i < MCE_LOG_LEN; i++) { struct mce *m = &mcelog.entry[i]; if (!(m->status & MCI_STATUS_VAL)) continue; if (!(m->status & MCI_STATUS_UC)) continue; if (!final || memcmp(m, final, sizeof(struct mce))) { print_mce(m); if (!apei_err) apei_err = apei_write_mce(m); } } if (final) { print_mce(final); if (!apei_err) apei_err = apei_write_mce(final); } if (cpu_missing) pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n"); if (exp) pr_emerg(HW_ERR "Machine check: %s\n", exp); if (!fake_panic) { if (panic_timeout == 0) panic_timeout = mca_cfg.panic_timeout; panic(msg); } else pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg); } /* Support code for software error injection */ static int msr_to_offset(u32 msr) { unsigned bank = __this_cpu_read(injectm.bank); if (msr == mca_cfg.rip_msr) return offsetof(struct mce, ip); if (msr == MSR_IA32_MCx_STATUS(bank)) return offsetof(struct mce, status); if (msr == MSR_IA32_MCx_ADDR(bank)) return offsetof(struct mce, addr); if (msr == MSR_IA32_MCx_MISC(bank)) return offsetof(struct mce, misc); if (msr == MSR_IA32_MCG_STATUS) return offsetof(struct mce, mcgstatus); return -1; } /* MSR access wrappers used for error injection */ static u64 mce_rdmsrl(u32 msr) { u64 v; if (__this_cpu_read(injectm.finished)) { int offset = msr_to_offset(msr); if (offset < 0) return 0; return *(u64 *)((char *)&__get_cpu_var(injectm) + offset); } if (rdmsrl_safe(msr, &v)) { WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr); /* * Return zero in case the access faulted. This should * not happen normally but can happen if the CPU does * something weird, or if the code is buggy. */ v = 0; } return v; } static void mce_wrmsrl(u32 msr, u64 v) { if (__this_cpu_read(injectm.finished)) { int offset = msr_to_offset(msr); if (offset >= 0) *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v; return; } wrmsrl(msr, v); } /* * Collect all global (w.r.t. this processor) status about this machine * check into our "mce" struct so that we can use it later to assess * the severity of the problem as we read per-bank specific details. */ static inline void mce_gather_info(struct mce *m, struct pt_regs *regs) { mce_setup(m); m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); if (regs) { /* * Get the address of the instruction at the time of * the machine check error. */ if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) { m->ip = regs->ip; m->cs = regs->cs; /* * When in VM86 mode make the cs look like ring 3 * always. This is a lie, but it's better than passing * the additional vm86 bit around everywhere. */ if (v8086_mode(regs)) m->cs |= 3; } /* Use accurate RIP reporting if available. */ if (mca_cfg.rip_msr) m->ip = mce_rdmsrl(mca_cfg.rip_msr); } } /* * Simple lockless ring to communicate PFNs from the exception handler with the * process context work function. This is vastly simplified because there's * only a single reader and a single writer. */ #define MCE_RING_SIZE 16 /* we use one entry less */ struct mce_ring { unsigned short start; unsigned short end; unsigned long ring[MCE_RING_SIZE]; }; static DEFINE_PER_CPU(struct mce_ring, mce_ring); /* Runs with CPU affinity in workqueue */ static int mce_ring_empty(void) { struct mce_ring *r = &__get_cpu_var(mce_ring); return r->start == r->end; } static int mce_ring_get(unsigned long *pfn) { struct mce_ring *r; int ret = 0; *pfn = 0; get_cpu(); r = &__get_cpu_var(mce_ring); if (r->start == r->end) goto out; *pfn = r->ring[r->start]; r->start = (r->start + 1) % MCE_RING_SIZE; ret = 1; out: put_cpu(); return ret; } /* Always runs in MCE context with preempt off */ static int mce_ring_add(unsigned long pfn) { struct mce_ring *r = &__get_cpu_var(mce_ring); unsigned next; next = (r->end + 1) % MCE_RING_SIZE; if (next == r->start) return -1; r->ring[r->end] = pfn; wmb(); r->end = next; return 0; } int mce_available(struct cpuinfo_x86 *c) { if (mca_cfg.disabled) return 0; return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); } static void mce_schedule_work(void) { if (!mce_ring_empty()) schedule_work(&__get_cpu_var(mce_work)); } DEFINE_PER_CPU(struct irq_work, mce_irq_work); static void mce_irq_work_cb(struct irq_work *entry) { mce_notify_irq(); mce_schedule_work(); } static void mce_report_event(struct pt_regs *regs) { if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) { mce_notify_irq(); /* * Triggering the work queue here is just an insurance * policy in case the syscall exit notify handler * doesn't run soon enough or ends up running on the * wrong CPU (can happen when audit sleeps) */ mce_schedule_work(); return; } irq_work_queue(&__get_cpu_var(mce_irq_work)); } /* * Read ADDR and MISC registers. */ static void mce_read_aux(struct mce *m, int i) { if (m->status & MCI_STATUS_MISCV) m->misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i)); if (m->status & MCI_STATUS_ADDRV) { m->addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i)); /* * Mask the reported address by the reported granularity. */ if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) { u8 shift = MCI_MISC_ADDR_LSB(m->misc); m->addr >>= shift; m->addr <<= shift; } } } DEFINE_PER_CPU(unsigned, mce_poll_count); /* * Poll for corrected events or events that happened before reset. * Those are just logged through /dev/mcelog. * * This is executed in standard interrupt context. * * Note: spec recommends to panic for fatal unsignalled * errors here. However this would be quite problematic -- * we would need to reimplement the Monarch handling and * it would mess up the exclusion between exception handler * and poll hander -- * so we skip this for now. * These cases should not happen anyways, or only when the CPU * is already totally * confused. In this case it's likely it will * not fully execute the machine check handler either. */ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) { struct mce m; int i; this_cpu_inc(mce_poll_count); mce_gather_info(&m, NULL); for (i = 0; i < mca_cfg.banks; i++) { if (!mce_banks[i].ctl || !test_bit(i, *b)) continue; m.misc = 0; m.addr = 0; m.bank = i; m.tsc = 0; barrier(); m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i)); if (!(m.status & MCI_STATUS_VAL)) continue; /* * Uncorrected or signalled events are handled by the exception * handler when it is enabled, so don't process those here. * * TBD do the same check for MCI_STATUS_EN here? */ if (!(flags & MCP_UC) && (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC))) continue; mce_read_aux(&m, i); if (!(flags & MCP_TIMESTAMP)) m.tsc = 0; /* * Don't get the IP here because it's unlikely to * have anything to do with the actual error location. */ if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce) mce_log(&m); /* * Clear state for this bank. */ mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0); } /* * Don't clear MCG_STATUS here because it's only defined for * exceptions. */ sync_core(); } EXPORT_SYMBOL_GPL(machine_check_poll); /* * Do a quick check if any of the events requires a panic. * This decides if we keep the events around or clear them. */ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, struct pt_regs *regs) { int i, ret = 0; for (i = 0; i < mca_cfg.banks; i++) { m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i)); if (m->status & MCI_STATUS_VAL) { __set_bit(i, validp); if (quirk_no_way_out) quirk_no_way_out(i, m, regs); } if (mce_severity(m, mca_cfg.tolerant, msg) >= MCE_PANIC_SEVERITY) ret = 1; } return ret; } /* * Variable to establish order between CPUs while scanning. * Each CPU spins initially until executing is equal its number. */ static atomic_t mce_executing; /* * Defines order of CPUs on entry. First CPU becomes Monarch. */ static atomic_t mce_callin; /* * Check if a timeout waiting for other CPUs happened. */ static int mce_timed_out(u64 *t) { /* * The others already did panic for some reason. * Bail out like in a timeout. * rmb() to tell the compiler that system_state * might have been modified by someone else. */ rmb(); if (atomic_read(&mce_paniced)) wait_for_panic(); if (!mca_cfg.monarch_timeout) goto out; if ((s64)*t < SPINUNIT) { /* CHECKME: Make panic default for 1 too? */ if (mca_cfg.tolerant < 1) mce_panic("Timeout synchronizing machine check over CPUs", NULL, NULL); cpu_missing = 1; return 1; } *t -= SPINUNIT; out: touch_nmi_watchdog(); return 0; } /* * The Monarch's reign. The Monarch is the CPU who entered * the machine check handler first. It waits for the others to * raise the exception too and then grades them. When any * error is fatal panic. Only then let the others continue. * * The other CPUs entering the MCE handler will be controlled by the * Monarch. They are called Subjects. * * This way we prevent any potential data corruption in a unrecoverable case * and also makes sure always all CPU's errors are examined. * * Also this detects the case of a machine check event coming from outer * space (not detected by any CPUs) In this case some external agent wants * us to shut down, so panic too. * * The other CPUs might still decide to panic if the handler happens * in a unrecoverable place, but in this case the system is in a semi-stable * state and won't corrupt anything by itself. It's ok to let the others * continue for a bit first. * * All the spin loops have timeouts; when a timeout happens a CPU * typically elects itself to be Monarch. */ static void mce_reign(void) { int cpu; struct mce *m = NULL; int global_worst = 0; char *msg = NULL; char *nmsg = NULL; /* * This CPU is the Monarch and the other CPUs have run * through their handlers. * Grade the severity of the errors of all the CPUs. */ for_each_possible_cpu(cpu) { int severity = mce_severity(&per_cpu(mces_seen, cpu), mca_cfg.tolerant, &nmsg); if (severity > global_worst) { msg = nmsg; global_worst = severity; m = &per_cpu(mces_seen, cpu); } } /* * Cannot recover? Panic here then. * This dumps all the mces in the log buffer and stops the * other CPUs. */ if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) mce_panic("Fatal Machine check", m, msg); /* * For UC somewhere we let the CPU who detects it handle it. * Also must let continue the others, otherwise the handling * CPU could deadlock on a lock. */ /* * No machine check event found. Must be some external * source or one CPU is hung. Panic. */ if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3) mce_panic("Machine check from unknown source", NULL, NULL); /* * Now clear all the mces_seen so that they don't reappear on * the next mce. */ for_each_possible_cpu(cpu) memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce)); } static atomic_t global_nwo; /* * Start of Monarch synchronization. This waits until all CPUs have * entered the exception handler and then determines if any of them * saw a fatal event that requires panic. Then it executes them * in the entry order. * TBD double check parallel CPU hotunplug */ static int mce_start(int *no_way_out) { int order; int cpus = num_online_cpus(); u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC; if (!timeout) return -1; atomic_add(*no_way_out, &global_nwo); /* * global_nwo should be updated before mce_callin */ smp_wmb(); order = atomic_inc_return(&mce_callin); /* * Wait for everyone. */ while (atomic_read(&mce_callin) != cpus) { if (mce_timed_out(&timeout)) { atomic_set(&global_nwo, 0); return -1; } ndelay(SPINUNIT); } /* * mce_callin should be read before global_nwo */ smp_rmb(); if (order == 1) { /* * Monarch: Starts executing now, the others wait. */ atomic_set(&mce_executing, 1); } else { /* * Subject: Now start the scanning loop one by one in * the original callin order. * This way when there are any shared banks it will be * only seen by one CPU before cleared, avoiding duplicates. */ while (atomic_read(&mce_executing) < order) { if (mce_timed_out(&timeout)) { atomic_set(&global_nwo, 0); return -1; } ndelay(SPINUNIT); } } /* * Cache the global no_way_out state. */ *no_way_out = atomic_read(&global_nwo); return order; } /* * Synchronize between CPUs after main scanning loop. * This invokes the bulk of the Monarch processing. */ static int mce_end(int order) { int ret = -1; u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC; if (!timeout) goto reset; if (order < 0) goto reset; /* * Allow others to run. */ atomic_inc(&mce_executing); if (order == 1) { /* CHECKME: Can this race with a parallel hotplug? */ int cpus = num_online_cpus(); /* * Monarch: Wait for everyone to go through their scanning * loops. */ while (atomic_read(&mce_executing) <= cpus) { if (mce_timed_out(&timeout)) goto reset; ndelay(SPINUNIT); } mce_reign(); barrier(); ret = 0; } else { /* * Subject: Wait for Monarch to finish. */ while (atomic_read(&mce_executing) != 0) { if (mce_timed_out(&timeout)) goto reset; ndelay(SPINUNIT); } /* * Don't reset anything. That's done by the Monarch. */ return 0; } /* * Reset all global state. */ reset: atomic_set(&global_nwo, 0); atomic_set(&mce_callin, 0); barrier(); /* * Let others run again. */ atomic_set(&mce_executing, 0); return ret; } /* * Check if the address reported by the CPU is in a format we can parse. * It would be possible to add code for most other cases, but all would * be somewhat complicated (e.g. segment offset would require an instruction * parser). So only support physical addresses up to page granuality for now. */ static int mce_usable_address(struct mce *m) { if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV)) return 0; if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT) return 0; if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS) return 0; return 1; } static void mce_clear_state(unsigned long *toclear) { int i; for (i = 0; i < mca_cfg.banks; i++) { if (test_bit(i, toclear)) mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0); } } /* * Need to save faulting physical address associated with a process * in the machine check handler some place where we can grab it back * later in mce_notify_process() */ #define MCE_INFO_MAX 16 struct mce_info { atomic_t inuse; struct task_struct *t; __u64 paddr; int restartable; } mce_info[MCE_INFO_MAX]; static void mce_save_info(__u64 addr, int c) { struct mce_info *mi; for (mi = mce_info; mi < &mce_info[MCE_INFO_MAX]; mi++) { if (atomic_cmpxchg(&mi->inuse, 0, 1) == 0) { mi->t = current; mi->paddr = addr; mi->restartable = c; return; } } mce_panic("Too many concurrent recoverable errors", NULL, NULL); } static struct mce_info *mce_find_info(void) { struct mce_info *mi; for (mi = mce_info; mi < &mce_info[MCE_INFO_MAX]; mi++) if (atomic_read(&mi->inuse) && mi->t == current) return mi; return NULL; } static void mce_clear_info(struct mce_info *mi) { atomic_set(&mi->inuse, 0); } /* * The actual machine check handler. This only handles real * exceptions when something got corrupted coming in through int 18. * * This is executed in NMI context not subject to normal locking rules. This * implies that most kernel services cannot be safely used. Don't even * think about putting a printk in there! * * On Intel systems this is entered on all CPUs in parallel through * MCE broadcast. However some CPUs might be broken beyond repair, * so be always careful when synchronizing with others. */ void do_machine_check(struct pt_regs *regs, long error_code) { struct mca_config *cfg = &mca_cfg; struct mce m, *final; int i; int worst = 0; int severity; /* * Establish sequential order between the CPUs entering the machine * check handler. */ int order; /* * If no_way_out gets set, there is no safe way to recover from this * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway. */ int no_way_out = 0; /* * If kill_it gets set, there might be a way to recover from this * error. */ int kill_it = 0; DECLARE_BITMAP(toclear, MAX_NR_BANKS); DECLARE_BITMAP(valid_banks, MAX_NR_BANKS); char *msg = "Unknown"; atomic_inc(&mce_entry); this_cpu_inc(mce_exception_count); if (!cfg->banks) goto out; mce_gather_info(&m, regs); final = &__get_cpu_var(mces_seen); *final = m; memset(valid_banks, 0, sizeof(valid_banks)); no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs); barrier(); /* * When no restart IP might need to kill or panic. * Assume the worst for now, but if we find the * severity is MCE_AR_SEVERITY we have other options. */ if (!(m.mcgstatus & MCG_STATUS_RIPV)) kill_it = 1; /* * Go through all the banks in exclusion of the other CPUs. * This way we don't report duplicated events on shared banks * because the first one to see it will clear it. */ order = mce_start(&no_way_out); for (i = 0; i < cfg->banks; i++) { __clear_bit(i, toclear); if (!test_bit(i, valid_banks)) continue; if (!mce_banks[i].ctl) continue; m.misc = 0; m.addr = 0; m.bank = i; m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i)); if ((m.status & MCI_STATUS_VAL) == 0) continue; /* * Non uncorrected or non signaled errors are handled by * machine_check_poll. Leave them alone, unless this panics. */ if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) && !no_way_out) continue; /* * Set taint even when machine check was not enabled. */ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); severity = mce_severity(&m, cfg->tolerant, NULL); /* * When machine check was for corrected handler don't touch, * unless we're panicing. */ if (severity == MCE_KEEP_SEVERITY && !no_way_out) continue; __set_bit(i, toclear); if (severity == MCE_NO_SEVERITY) { /* * Machine check event was not enabled. Clear, but * ignore. */ continue; } mce_read_aux(&m, i); /* * Action optional error. Queue address for later processing. * When the ring overflows we just ignore the AO error. * RED-PEN add some logging mechanism when * usable_address or mce_add_ring fails. * RED-PEN don't ignore overflow for mca_cfg.tolerant == 0 */ if (severity == MCE_AO_SEVERITY && mce_usable_address(&m)) mce_ring_add(m.addr >> PAGE_SHIFT); mce_log(&m); if (severity > worst) { *final = m; worst = severity; } } /* mce_clear_state will clear *final, save locally for use later */ m = *final; if (!no_way_out) mce_clear_state(toclear); /* * Do most of the synchronization with other CPUs. * When there's any problem use only local no_way_out state. */ if (mce_end(order) < 0) no_way_out = worst >= MCE_PANIC_SEVERITY; /* * At insane "tolerant" levels we take no action. Otherwise * we only die if we have no other choice. For less serious * issues we try to recover, or limit damage to the current * process. */ if (cfg->tolerant < 3) { if (no_way_out) mce_panic("Fatal machine check on current CPU", &m, msg); if (worst == MCE_AR_SEVERITY) { /* schedule action before return to userland */ mce_save_info(m.addr, m.mcgstatus & MCG_STATUS_RIPV); set_thread_flag(TIF_MCE_NOTIFY); } else if (kill_it) { force_sig(SIGBUS, current); } } if (worst > 0) mce_report_event(regs); mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); out: atomic_dec(&mce_entry); sync_core(); } EXPORT_SYMBOL_GPL(do_machine_check); #ifndef CONFIG_MEMORY_FAILURE int memory_failure(unsigned long pfn, int vector, int flags) { /* mce_severity() should not hand us an ACTION_REQUIRED error */ BUG_ON(flags & MF_ACTION_REQUIRED); pr_err("Uncorrected memory error in page 0x%lx ignored\n" "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n", pfn); return 0; } #endif /* * Called in process context that interrupted by MCE and marked with * TIF_MCE_NOTIFY, just before returning to erroneous userland. * This code is allowed to sleep. * Attempt possible recovery such as calling the high level VM handler to * process any corrupted pages, and kill/signal current process if required. * Action required errors are handled here. */ void mce_notify_process(void) { unsigned long pfn; struct mce_info *mi = mce_find_info(); int flags = MF_ACTION_REQUIRED; if (!mi) mce_panic("Lost physical address for unconsumed uncorrectable error", NULL, NULL); pfn = mi->paddr >> PAGE_SHIFT; clear_thread_flag(TIF_MCE_NOTIFY); pr_err("Uncorrected hardware memory error in user-access at %llx", mi->paddr); /* * We must call memory_failure() here even if the current process is * doomed. We still need to mark the page as poisoned and alert any * other users of the page. */ if (!mi->restartable) flags |= MF_MUST_KILL; if (memory_failure(pfn, MCE_VECTOR, flags) < 0) { pr_err("Memory error not recovered"); force_sig(SIGBUS, current); } mce_clear_info(mi); } /* * Action optional processing happens here (picking up * from the list of faulting pages that do_machine_check() * placed into the "ring"). */ static void mce_process_work(struct work_struct *dummy) { unsigned long pfn; while (mce_ring_get(&pfn)) memory_failure(pfn, MCE_VECTOR, 0); } #ifdef CONFIG_X86_MCE_INTEL /*** * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog * @cpu: The CPU on which the event occurred. * @status: Event status information * * This function should be called by the thermal interrupt after the * event has been processed and the decision was made to log the event * further. * * The status parameter will be saved to the 'status' field of 'struct mce' * and historically has been the register value of the * MSR_IA32_THERMAL_STATUS (Intel) msr. */ void mce_log_therm_throt_event(__u64 status) { struct mce m; mce_setup(&m); m.bank = MCE_THERMAL_BANK; m.status = status; mce_log(&m); } #endif /* CONFIG_X86_MCE_INTEL */ /* * Periodic polling timer for "silent" machine check errors. If the * poller finds an MCE, poll 2x faster. When the poller finds no more * errors, poll 2x slower (up to check_interval seconds). */ static unsigned long check_interval = 5 * 60; /* 5 minutes */ static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ static DEFINE_PER_CPU(struct timer_list, mce_timer); static unsigned long mce_adjust_timer_default(unsigned long interval) { return interval; } static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default; static void mce_timer_fn(unsigned long data) { struct timer_list *t = &__get_cpu_var(mce_timer); unsigned long iv; WARN_ON(smp_processor_id() != data); if (mce_available(__this_cpu_ptr(&cpu_info))) { machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_poll_banks)); mce_intel_cmci_poll(); } /* * Alert userspace if needed. If we logged an MCE, reduce the * polling interval, otherwise increase the polling interval. */ iv = __this_cpu_read(mce_next_interval); if (mce_notify_irq()) { iv = max(iv / 2, (unsigned long) HZ/100); } else { iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); iv = mce_adjust_timer(iv); } __this_cpu_write(mce_next_interval, iv); /* Might have become 0 after CMCI storm subsided */ if (iv) { t->expires = jiffies + iv; add_timer_on(t, smp_processor_id()); } } /* * Ensure that the timer is firing in @interval from now. */ void mce_timer_kick(unsigned long interval) { struct timer_list *t = &__get_cpu_var(mce_timer); unsigned long when = jiffies + interval; unsigned long iv = __this_cpu_read(mce_next_interval); if (timer_pending(t)) { if (time_before(when, t->expires)) mod_timer_pinned(t, when); } else { t->expires = round_jiffies(when); add_timer_on(t, smp_processor_id()); } if (interval < iv) __this_cpu_write(mce_next_interval, interval); } /* Must not be called in IRQ context where del_timer_sync() can deadlock */ static void mce_timer_delete_all(void) { int cpu; for_each_online_cpu(cpu) del_timer_sync(&per_cpu(mce_timer, cpu)); } static void mce_do_trigger(struct work_struct *work) { call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT); } static DECLARE_WORK(mce_trigger_work, mce_do_trigger); /* * Notify the user(s) about new machine check events. * Can be called from interrupt context, but not from machine check/NMI * context. */ int mce_notify_irq(void) { /* Not more than two messages every minute */ static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); if (test_and_clear_bit(0, &mce_need_notify)) { /* wake processes polling /dev/mcelog */ wake_up_interruptible(&mce_chrdev_wait); if (mce_helper[0]) schedule_work(&mce_trigger_work); if (__ratelimit(&ratelimit)) pr_info(HW_ERR "Machine check events logged\n"); return 1; } return 0; } EXPORT_SYMBOL_GPL(mce_notify_irq); static int __mcheck_cpu_mce_banks_init(void) { int i; u8 num_banks = mca_cfg.banks; mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL); if (!mce_banks) return -ENOMEM; for (i = 0; i < num_banks; i++) { struct mce_bank *b = &mce_banks[i]; b->ctl = -1ULL; b->init = 1; } return 0; } /* * Initialize Machine Checks for a CPU. */ static int __mcheck_cpu_cap_init(void) { unsigned b; u64 cap; rdmsrl(MSR_IA32_MCG_CAP, cap); b = cap & MCG_BANKCNT_MASK; if (!mca_cfg.banks) pr_info("CPU supports %d MCE banks\n", b); if (b > MAX_NR_BANKS) { pr_warn("Using only %u machine check banks out of %u\n", MAX_NR_BANKS, b); b = MAX_NR_BANKS; } /* Don't support asymmetric configurations today */ WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks); mca_cfg.banks = b; if (!mce_banks) { int err = __mcheck_cpu_mce_banks_init(); if (err) return err; } /* Use accurate RIP reporting if available. */ if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9) mca_cfg.rip_msr = MSR_IA32_MCG_EIP; if (cap & MCG_SER_P) mca_cfg.ser = true; return 0; } static void __mcheck_cpu_init_generic(void) { enum mcp_flags m_fl = 0; mce_banks_t all_banks; u64 cap; int i; if (!mca_cfg.bootlog) m_fl = MCP_DONTLOG; /* * Log the machine checks left over from the previous reset. */ bitmap_fill(all_banks, MAX_NR_BANKS); machine_check_poll(MCP_UC | m_fl, &all_banks); set_in_cr4(X86_CR4_MCE); rdmsrl(MSR_IA32_MCG_CAP, cap); if (cap & MCG_CTL_P) wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); for (i = 0; i < mca_cfg.banks; i++) { struct mce_bank *b = &mce_banks[i]; if (!b->init) continue; wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); wrmsrl(MSR_IA32_MCx_STATUS(i), 0); } } /* * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM * Vol 3B Table 15-20). But this confuses both the code that determines * whether the machine check occurred in kernel or user mode, and also * the severity assessment code. Pretend that EIPV was set, and take the * ip/cs values from the pt_regs that mce_gather_info() ignored earlier. */ static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs) { if (bank != 0) return; if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0) return; if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC| MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV| MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR| MCACOD)) != (MCI_STATUS_UC|MCI_STATUS_EN| MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S| MCI_STATUS_AR|MCACOD_INSTR)) return; m->mcgstatus |= MCG_STATUS_EIPV; m->ip = regs->ip; m->cs = regs->cs; } /* Add per CPU specific workarounds here */ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) { struct mca_config *cfg = &mca_cfg; if (c->x86_vendor == X86_VENDOR_UNKNOWN) { pr_info("unknown CPU type - not enabling MCE support\n"); return -EOPNOTSUPP; } /* This should be disabled by the BIOS, but isn't always */ if (c->x86_vendor == X86_VENDOR_AMD) { if (c->x86 == 15 && cfg->banks > 4) { /* * disable GART TBL walk error reporting, which * trips off incorrectly with the IOMMU & 3ware * & Cerberus: */ clear_bit(10, (unsigned long *)&mce_banks[4].ctl); } if (c->x86 <= 17 && cfg->bootlog < 0) { /* * Lots of broken BIOS around that don't clear them * by default and leave crap in there. Don't log: */ cfg->bootlog = 0; } /* * Various K7s with broken bank 0 around. Always disable * by default. */ if (c->x86 == 6 && cfg->banks > 0) mce_banks[0].ctl = 0; /* * Turn off MC4_MISC thresholding banks on those models since * they're not supported there. */ if (c->x86 == 0x15 && (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) { int i; u64 val, hwcr; bool need_toggle; u32 msrs[] = { 0x00000413, /* MC4_MISC0 */ 0xc0000408, /* MC4_MISC1 */ }; rdmsrl(MSR_K7_HWCR, hwcr); /* McStatusWrEn has to be set */ need_toggle = !(hwcr & BIT(18)); if (need_toggle) wrmsrl(MSR_K7_HWCR, hwcr | BIT(18)); for (i = 0; i < ARRAY_SIZE(msrs); i++) { rdmsrl(msrs[i], val); /* CntP bit set? */ if (val & BIT_64(62)) { val &= ~BIT_64(62); wrmsrl(msrs[i], val); } } /* restore old settings */ if (need_toggle) wrmsrl(MSR_K7_HWCR, hwcr); } } if (c->x86_vendor == X86_VENDOR_INTEL) { /* * SDM documents that on family 6 bank 0 should not be written * because it aliases to another special BIOS controlled * register. * But it's not aliased anymore on model 0x1a+ * Don't ignore bank 0 completely because there could be a * valid event later, merely don't write CTL0. */ if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0) mce_banks[0].init = 0; /* * All newer Intel systems support MCE broadcasting. Enable * synchronization with a one second timeout. */ if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) && cfg->monarch_timeout < 0) cfg->monarch_timeout = USEC_PER_SEC; /* * There are also broken BIOSes on some Pentium M and * earlier systems: */ if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0) cfg->bootlog = 0; if (c->x86 == 6 && c->x86_model == 45) quirk_no_way_out = quirk_sandybridge_ifu; } if (cfg->monarch_timeout < 0) cfg->monarch_timeout = 0; if (cfg->bootlog != 0) cfg->panic_timeout = 30; return 0; } static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) { if (c->x86 != 5) return 0; switch (c->x86_vendor) { case X86_VENDOR_INTEL: intel_p5_mcheck_init(c); return 1; break; case X86_VENDOR_CENTAUR: winchip_mcheck_init(c); return 1; break; } return 0; } static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) { switch (c->x86_vendor) { case X86_VENDOR_INTEL: mce_intel_feature_init(c); mce_adjust_timer = mce_intel_adjust_timer; break; case X86_VENDOR_AMD: mce_amd_feature_init(c); break; default: break; } } static void mce_start_timer(unsigned int cpu, struct timer_list *t) { unsigned long iv = check_interval * HZ; if (mca_cfg.ignore_ce || !iv) return; per_cpu(mce_next_interval, cpu) = iv; t->expires = round_jiffies(jiffies + iv); add_timer_on(t, cpu); } static void __mcheck_cpu_init_timer(void) { struct timer_list *t = &__get_cpu_var(mce_timer); unsigned int cpu = smp_processor_id(); setup_timer(t, mce_timer_fn, cpu); mce_start_timer(cpu, t); } /* Handle unconfigured int18 (should never happen) */ static void unexpected_machine_check(struct pt_regs *regs, long error_code) { pr_err("CPU#%d: Unexpected int18 (Machine Check)\n", smp_processor_id()); } /* Call the installed machine check handler for this CPU setup. */ void (*machine_check_vector)(struct pt_regs *, long error_code) = unexpected_machine_check; /* * Called for each booted CPU to set up machine checks. * Must be called with preempt off: */ void mcheck_cpu_init(struct cpuinfo_x86 *c) { if (mca_cfg.disabled) return; if (__mcheck_cpu_ancient_init(c)) return; if (!mce_available(c)) return; if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) { mca_cfg.disabled = true; return; } machine_check_vector = do_machine_check; __mcheck_cpu_init_generic(); __mcheck_cpu_init_vendor(c); __mcheck_cpu_init_timer(); INIT_WORK(&__get_cpu_var(mce_work), mce_process_work); init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb); } /* * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log. */ static DEFINE_SPINLOCK(mce_chrdev_state_lock); static int mce_chrdev_open_count; /* #times opened */ static int mce_chrdev_open_exclu; /* already open exclusive? */ static int mce_chrdev_open(struct inode *inode, struct file *file) { spin_lock(&mce_chrdev_state_lock); if (mce_chrdev_open_exclu || (mce_chrdev_open_count && (file->f_flags & O_EXCL))) { spin_unlock(&mce_chrdev_state_lock); return -EBUSY; } if (file->f_flags & O_EXCL) mce_chrdev_open_exclu = 1; mce_chrdev_open_count++; spin_unlock(&mce_chrdev_state_lock); return nonseekable_open(inode, file); } static int mce_chrdev_release(struct inode *inode, struct file *file) { spin_lock(&mce_chrdev_state_lock); mce_chrdev_open_count--; mce_chrdev_open_exclu = 0; spin_unlock(&mce_chrdev_state_lock); return 0; } static void collect_tscs(void *data) { unsigned long *cpu_tsc = (unsigned long *)data; rdtscll(cpu_tsc[smp_processor_id()]); } static int mce_apei_read_done; /* Collect MCE record of previous boot in persistent storage via APEI ERST. */ static int __mce_read_apei(char __user **ubuf, size_t usize) { int rc; u64 record_id; struct mce m; if (usize < sizeof(struct mce)) return -EINVAL; rc = apei_read_mce(&m, &record_id); /* Error or no more MCE record */ if (rc <= 0) { mce_apei_read_done = 1; /* * When ERST is disabled, mce_chrdev_read() should return * "no record" instead of "no device." */ if (rc == -ENODEV) return 0; return rc; } rc = -EFAULT; if (copy_to_user(*ubuf, &m, sizeof(struct mce))) return rc; /* * In fact, we should have cleared the record after that has * been flushed to the disk or sent to network in * /sbin/mcelog, but we have no interface to support that now, * so just clear it to avoid duplication. */ rc = apei_clear_mce(record_id); if (rc) { mce_apei_read_done = 1; return rc; } *ubuf += sizeof(struct mce); return 0; } static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf, size_t usize, loff_t *off) { char __user *buf = ubuf; unsigned long *cpu_tsc; unsigned prev, next; int i, err; cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL); if (!cpu_tsc) return -ENOMEM; mutex_lock(&mce_chrdev_read_mutex); if (!mce_apei_read_done) { err = __mce_read_apei(&buf, usize); if (err || buf != ubuf) goto out; } next = rcu_dereference_check_mce(mcelog.next); /* Only supports full reads right now */ err = -EINVAL; if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) goto out; err = 0; prev = 0; do { for (i = prev; i < next; i++) { unsigned long start = jiffies; struct mce *m = &mcelog.entry[i]; while (!m->finished) { if (time_after_eq(jiffies, start + 2)) { memset(m, 0, sizeof(*m)); goto timeout; } cpu_relax(); } smp_rmb(); err |= copy_to_user(buf, m, sizeof(*m)); buf += sizeof(*m); timeout: ; } memset(mcelog.entry + prev, 0, (next - prev) * sizeof(struct mce)); prev = next; next = cmpxchg(&mcelog.next, prev, 0); } while (next != prev); synchronize_sched(); /* * Collect entries that were still getting written before the * synchronize. */ on_each_cpu(collect_tscs, cpu_tsc, 1); for (i = next; i < MCE_LOG_LEN; i++) { struct mce *m = &mcelog.entry[i]; if (m->finished && m->tsc < cpu_tsc[m->cpu]) { err |= copy_to_user(buf, m, sizeof(*m)); smp_rmb(); buf += sizeof(*m); memset(m, 0, sizeof(*m)); } } if (err) err = -EFAULT; out: mutex_unlock(&mce_chrdev_read_mutex); kfree(cpu_tsc); return err ? err : buf - ubuf; } static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait) { poll_wait(file, &mce_chrdev_wait, wait); if (rcu_access_index(mcelog.next)) return POLLIN | POLLRDNORM; if (!mce_apei_read_done && apei_check_mce()) return POLLIN | POLLRDNORM; return 0; } static long mce_chrdev_ioctl(struct file *f, unsigned int cmd, unsigned long arg) { int __user *p = (int __user *)arg; if (!capable(CAP_SYS_ADMIN)) return -EPERM; switch (cmd) { case MCE_GET_RECORD_LEN: return put_user(sizeof(struct mce), p); case MCE_GET_LOG_LEN: return put_user(MCE_LOG_LEN, p); case MCE_GETCLEAR_FLAGS: { unsigned flags; do { flags = mcelog.flags; } while (cmpxchg(&mcelog.flags, flags, 0) != flags); return put_user(flags, p); } default: return -ENOTTY; } } static ssize_t (*mce_write)(struct file *filp, const char __user *ubuf, size_t usize, loff_t *off); void register_mce_write_callback(ssize_t (*fn)(struct file *filp, const char __user *ubuf, size_t usize, loff_t *off)) { mce_write = fn; } EXPORT_SYMBOL_GPL(register_mce_write_callback); ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf, size_t usize, loff_t *off) { if (mce_write) return mce_write(filp, ubuf, usize, off); else return -EINVAL; } static const struct file_operations mce_chrdev_ops = { .open = mce_chrdev_open, .release = mce_chrdev_release, .read = mce_chrdev_read, .write = mce_chrdev_write, .poll = mce_chrdev_poll, .unlocked_ioctl = mce_chrdev_ioctl, .llseek = no_llseek, }; static struct miscdevice mce_chrdev_device = { MISC_MCELOG_MINOR, "mcelog", &mce_chrdev_ops, }; static void __mce_disable_bank(void *arg) { int bank = *((int *)arg); __clear_bit(bank, __get_cpu_var(mce_poll_banks)); cmci_disable_bank(bank); } void mce_disable_bank(int bank) { if (bank >= mca_cfg.banks) { pr_warn(FW_BUG "Ignoring request to disable invalid MCA bank %d.\n", bank); return; } set_bit(bank, mce_banks_ce_disabled); on_each_cpu(__mce_disable_bank, &bank, 1); } /* * mce=off Disables machine check * mce=no_cmci Disables CMCI * mce=dont_log_ce Clears corrected events silently, no log created for CEs. * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared. * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above) * monarchtimeout is how long to wait for other CPUs on machine * check, or 0 to not wait * mce=bootlog Log MCEs from before booting. Disabled by default on AMD. * mce=nobootlog Don't log MCEs from before booting. * mce=bios_cmci_threshold Don't program the CMCI threshold */ static int __init mcheck_enable(char *str) { struct mca_config *cfg = &mca_cfg; if (*str == 0) { enable_p5_mce(); return 1; } if (*str == '=') str++; if (!strcmp(str, "off")) cfg->disabled = true; else if (!strcmp(str, "no_cmci")) cfg->cmci_disabled = true; else if (!strcmp(str, "dont_log_ce")) cfg->dont_log_ce = true; else if (!strcmp(str, "ignore_ce")) cfg->ignore_ce = true; else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog")) cfg->bootlog = (str[0] == 'b'); else if (!strcmp(str, "bios_cmci_threshold")) cfg->bios_cmci_threshold = true; else if (isdigit(str[0])) { get_option(&str, &(cfg->tolerant)); if (*str == ',') { ++str; get_option(&str, &(cfg->monarch_timeout)); } } else { pr_info("mce argument %s ignored. Please use /sys\n", str); return 0; } return 1; } __setup("mce", mcheck_enable); int __init mcheck_init(void) { mcheck_intel_therm_init(); return 0; } /* * mce_syscore: PM support */ /* * Disable machine checks on suspend and shutdown. We can't really handle * them later. */ static int mce_disable_error_reporting(void) { int i; for (i = 0; i < mca_cfg.banks; i++) { struct mce_bank *b = &mce_banks[i]; if (b->init) wrmsrl(MSR_IA32_MCx_CTL(i), 0); } return 0; } static int mce_syscore_suspend(void) { return mce_disable_error_reporting(); } static void mce_syscore_shutdown(void) { mce_disable_error_reporting(); } /* * On resume clear all MCE state. Don't want to see leftovers from the BIOS. * Only one CPU is active at this time, the others get re-added later using * CPU hotplug: */ static void mce_syscore_resume(void) { __mcheck_cpu_init_generic(); __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info)); } static struct syscore_ops mce_syscore_ops = { .suspend = mce_syscore_suspend, .shutdown = mce_syscore_shutdown, .resume = mce_syscore_resume, }; /* * mce_device: Sysfs support */ static void mce_cpu_restart(void *data) { if (!mce_available(__this_cpu_ptr(&cpu_info))) return; __mcheck_cpu_init_generic(); __mcheck_cpu_init_timer(); } /* Reinit MCEs after user configuration changes */ static void mce_restart(void) { mce_timer_delete_all(); on_each_cpu(mce_cpu_restart, NULL, 1); } /* Toggle features for corrected errors */ static void mce_disable_cmci(void *data) { if (!mce_available(__this_cpu_ptr(&cpu_info))) return; cmci_clear(); } static void mce_enable_ce(void *all) { if (!mce_available(__this_cpu_ptr(&cpu_info))) return; cmci_reenable(); cmci_recheck(); if (all) __mcheck_cpu_init_timer(); } static struct bus_type mce_subsys = { .name = "machinecheck", .dev_name = "machinecheck", }; DEFINE_PER_CPU(struct device *, mce_device); void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); static inline struct mce_bank *attr_to_bank(struct device_attribute *attr) { return container_of(attr, struct mce_bank, attr); } static ssize_t show_bank(struct device *s, struct device_attribute *attr, char *buf) { return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl); } static ssize_t set_bank(struct device *s, struct device_attribute *attr, const char *buf, size_t size) { u64 new; if (strict_strtoull(buf, 0, &new) < 0) return -EINVAL; attr_to_bank(attr)->ctl = new; mce_restart(); return size; } static ssize_t show_trigger(struct device *s, struct device_attribute *attr, char *buf) { strcpy(buf, mce_helper); strcat(buf, "\n"); return strlen(mce_helper) + 1; } static ssize_t set_trigger(struct device *s, struct device_attribute *attr, const char *buf, size_t siz) { char *p; strncpy(mce_helper, buf, sizeof(mce_helper)); mce_helper[sizeof(mce_helper)-1] = 0; p = strchr(mce_helper, '\n'); if (p) *p = 0; return strlen(mce_helper) + !!p; } static ssize_t set_ignore_ce(struct device *s, struct device_attribute *attr, const char *buf, size_t size) { u64 new; if (strict_strtoull(buf, 0, &new) < 0) return -EINVAL; if (mca_cfg.ignore_ce ^ !!new) { if (new) { /* disable ce features */ mce_timer_delete_all(); on_each_cpu(mce_disable_cmci, NULL, 1); mca_cfg.ignore_ce = true; } else { /* enable ce features */ mca_cfg.ignore_ce = false; on_each_cpu(mce_enable_ce, (void *)1, 1); } } return size; } static ssize_t set_cmci_disabled(struct device *s, struct device_attribute *attr, const char *buf, size_t size) { u64 new; if (strict_strtoull(buf, 0, &new) < 0) return -EINVAL; if (mca_cfg.cmci_disabled ^ !!new) { if (new) { /* disable cmci */ on_each_cpu(mce_disable_cmci, NULL, 1); mca_cfg.cmci_disabled = true; } else { /* enable cmci */ mca_cfg.cmci_disabled = false; on_each_cpu(mce_enable_ce, NULL, 1); } } return size; } static ssize_t store_int_with_restart(struct device *s, struct device_attribute *attr, const char *buf, size_t size) { ssize_t ret = device_store_int(s, attr, buf, size); mce_restart(); return ret; } static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger); static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant); static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout); static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce); static struct dev_ext_attribute dev_attr_check_interval = { __ATTR(check_interval, 0644, device_show_int, store_int_with_restart), &check_interval }; static struct dev_ext_attribute dev_attr_ignore_ce = { __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce), &mca_cfg.ignore_ce }; static struct dev_ext_attribute dev_attr_cmci_disabled = { __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled), &mca_cfg.cmci_disabled }; static struct device_attribute *mce_device_attrs[] = { &dev_attr_tolerant.attr, &dev_attr_check_interval.attr, &dev_attr_trigger, &dev_attr_monarch_timeout.attr, &dev_attr_dont_log_ce.attr, &dev_attr_ignore_ce.attr, &dev_attr_cmci_disabled.attr, NULL }; static cpumask_var_t mce_device_initialized; static void mce_device_release(struct device *dev) { kfree(dev); } /* Per cpu device init. All of the cpus still share the same ctrl bank: */ static int mce_device_create(unsigned int cpu) { struct device *dev; int err; int i, j; if (!mce_available(&boot_cpu_data)) return -EIO; dev = kzalloc(sizeof *dev, GFP_KERNEL); if (!dev) return -ENOMEM; dev->id = cpu; dev->bus = &mce_subsys; dev->release = &mce_device_release; err = device_register(dev); if (err) { put_device(dev); return err; } for (i = 0; mce_device_attrs[i]; i++) { err = device_create_file(dev, mce_device_attrs[i]); if (err) goto error; } for (j = 0; j < mca_cfg.banks; j++) { err = device_create_file(dev, &mce_banks[j].attr); if (err) goto error2; } cpumask_set_cpu(cpu, mce_device_initialized); per_cpu(mce_device, cpu) = dev; return 0; error2: while (--j >= 0) device_remove_file(dev, &mce_banks[j].attr); error: while (--i >= 0) device_remove_file(dev, mce_device_attrs[i]); device_unregister(dev); return err; } static void mce_device_remove(unsigned int cpu) { struct device *dev = per_cpu(mce_device, cpu); int i; if (!cpumask_test_cpu(cpu, mce_device_initialized)) return; for (i = 0; mce_device_attrs[i]; i++) device_remove_file(dev, mce_device_attrs[i]); for (i = 0; i < mca_cfg.banks; i++) device_remove_file(dev, &mce_banks[i].attr); device_unregister(dev); cpumask_clear_cpu(cpu, mce_device_initialized); per_cpu(mce_device, cpu) = NULL; } /* Make sure there are no machine checks on offlined CPUs. */ static void mce_disable_cpu(void *h) { unsigned long action = *(unsigned long *)h; int i; if (!mce_available(__this_cpu_ptr(&cpu_info))) return; if (!(action & CPU_TASKS_FROZEN)) cmci_clear(); for (i = 0; i < mca_cfg.banks; i++) { struct mce_bank *b = &mce_banks[i]; if (b->init) wrmsrl(MSR_IA32_MCx_CTL(i), 0); } } static void mce_reenable_cpu(void *h) { unsigned long action = *(unsigned long *)h; int i; if (!mce_available(__this_cpu_ptr(&cpu_info))) return; if (!(action & CPU_TASKS_FROZEN)) cmci_reenable(); for (i = 0; i < mca_cfg.banks; i++) { struct mce_bank *b = &mce_banks[i]; if (b->init) wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); } } /* Get notified when a cpu comes on/off. Be hotplug friendly. */ static int mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; struct timer_list *t = &per_cpu(mce_timer, cpu); switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: mce_device_create(cpu); if (threshold_cpu_callback) threshold_cpu_callback(action, cpu); break; case CPU_DEAD: if (threshold_cpu_callback) threshold_cpu_callback(action, cpu); mce_device_remove(cpu); mce_intel_hcpu_update(cpu); break; case CPU_DOWN_PREPARE: smp_call_function_single(cpu, mce_disable_cpu, &action, 1); del_timer_sync(t); break; case CPU_DOWN_FAILED: smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); mce_start_timer(cpu, t); break; } if (action == CPU_POST_DEAD) { /* intentionally ignoring frozen here */ cmci_rediscover(); } return NOTIFY_OK; } static struct notifier_block mce_cpu_notifier = { .notifier_call = mce_cpu_callback, }; static __init void mce_init_banks(void) { int i; for (i = 0; i < mca_cfg.banks; i++) { struct mce_bank *b = &mce_banks[i]; struct device_attribute *a = &b->attr; sysfs_attr_init(&a->attr); a->attr.name = b->attrname; snprintf(b->attrname, ATTR_LEN, "bank%d", i); a->attr.mode = 0644; a->show = show_bank; a->store = set_bank; } } static __init int mcheck_init_device(void) { int err; int i = 0; if (!mce_available(&boot_cpu_data)) return -EIO; zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL); mce_init_banks(); err = subsys_system_register(&mce_subsys, NULL); if (err) return err; for_each_online_cpu(i) { err = mce_device_create(i); if (err) return err; } register_syscore_ops(&mce_syscore_ops); register_hotcpu_notifier(&mce_cpu_notifier); /* register character device /dev/mcelog */ misc_register(&mce_chrdev_device); return err; } device_initcall_sync(mcheck_init_device); /* * Old style boot options parsing. Only for compatibility. */ static int __init mcheck_disable(char *str) { mca_cfg.disabled = true; return 1; } __setup("nomce", mcheck_disable); #ifdef CONFIG_DEBUG_FS struct dentry *mce_get_debugfs_dir(void) { static struct dentry *dmce; if (!dmce) dmce = debugfs_create_dir("mce", NULL); return dmce; } static void mce_reset(void) { cpu_missing = 0; atomic_set(&mce_fake_paniced, 0); atomic_set(&mce_executing, 0); atomic_set(&mce_callin, 0); atomic_set(&global_nwo, 0); } static int fake_panic_get(void *data, u64 *val) { *val = fake_panic; return 0; } static int fake_panic_set(void *data, u64 val) { mce_reset(); fake_panic = val; return 0; } DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get, fake_panic_set, "%llu\n"); static int __init mcheck_debugfs_init(void) { struct dentry *dmce, *ffake_panic; dmce = mce_get_debugfs_dir(); if (!dmce) return -ENOMEM; ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL, &fake_panic_fops); if (!ffake_panic) return -ENOMEM; return 0; } late_initcall(mcheck_debugfs_init); #endif
gpl-2.0
raulherbster/goldfish
arch/m68knommu/kernel/ptrace.c
757
8372
/* * linux/arch/m68knommu/kernel/ptrace.c * * Copyright (C) 1994 by Hamish Macdonald * Taken from linux/kernel/ptrace.c and modified for M680x0. * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds * * This file is subject to the terms and conditions of the GNU General * Public License. See the file COPYING in the main directory of * this archive for more details. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/signal.h> #include <asm/uaccess.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/system.h> #include <asm/processor.h> /* * does not yet catch signals sent when the child dies. * in exit.c or in signal.c. */ /* determines which bits in the SR the user has access to. */ /* 1 = access 0 = no access */ #define SR_MASK 0x001f /* sets the trace bits. */ #define TRACE_BITS 0x8000 /* Find the stack offset for a register, relative to thread.esp0. */ #define PT_REG(reg) ((long)&((struct pt_regs *)0)->reg) #define SW_REG(reg) ((long)&((struct switch_stack *)0)->reg \ - sizeof(struct switch_stack)) /* Mapping from PT_xxx to the stack offset at which the register is saved. Notice that usp has no stack-slot and needs to be treated specially (see get_reg/put_reg below). */ static int regoff[] = { PT_REG(d1), PT_REG(d2), PT_REG(d3), PT_REG(d4), PT_REG(d5), SW_REG(d6), SW_REG(d7), PT_REG(a0), PT_REG(a1), PT_REG(a2), SW_REG(a3), SW_REG(a4), SW_REG(a5), SW_REG(a6), PT_REG(d0), -1, PT_REG(orig_d0), PT_REG(sr), PT_REG(pc), }; /* * Get contents of register REGNO in task TASK. */ static inline long get_reg(struct task_struct *task, int regno) { unsigned long *addr; if (regno == PT_USP) addr = &task->thread.usp; else if (regno < ARRAY_SIZE(regoff)) addr = (unsigned long *)(task->thread.esp0 + regoff[regno]); else return 0; return *addr; } /* * Write contents of register REGNO in task TASK. */ static inline int put_reg(struct task_struct *task, int regno, unsigned long data) { unsigned long *addr; if (regno == PT_USP) addr = &task->thread.usp; else if (regno < ARRAY_SIZE(regoff)) addr = (unsigned long *) (task->thread.esp0 + regoff[regno]); else return -1; *addr = data; return 0; } /* * Called by kernel/ptrace.c when detaching.. * * Make sure the single step bit is not set. */ void ptrace_disable(struct task_struct *child) { unsigned long tmp; /* make sure the single step bit is not set. */ tmp = get_reg(child, PT_SR) & ~(TRACE_BITS << 16); put_reg(child, PT_SR, tmp); } long arch_ptrace(struct task_struct *child, long request, long addr, long data) { int ret; switch (request) { /* when I and D space are separate, these will need to be fixed. */ case PTRACE_PEEKTEXT: /* read word at location addr. */ case PTRACE_PEEKDATA: ret = generic_ptrace_peekdata(child, addr, data); break; /* read the word at location addr in the USER area. */ case PTRACE_PEEKUSR: { unsigned long tmp; ret = -EIO; if ((addr & 3) || addr < 0 || addr > sizeof(struct user) - 3) break; tmp = 0; /* Default return condition */ addr = addr >> 2; /* temporary hack. */ ret = -EIO; if (addr < 19) { tmp = get_reg(child, addr); if (addr == PT_SR) tmp >>= 16; } else if (addr >= 21 && addr < 49) { tmp = child->thread.fp[addr - 21]; #ifdef CONFIG_M68KFPU_EMU /* Convert internal fpu reg representation * into long double format */ if (FPU_IS_EMU && (addr < 45) && !(addr % 3)) tmp = ((tmp & 0xffff0000) << 15) | ((tmp & 0x0000ffff) << 16); #endif } else if (addr == 49) { tmp = child->mm->start_code; } else if (addr == 50) { tmp = child->mm->start_data; } else if (addr == 51) { tmp = child->mm->end_code; } else break; ret = put_user(tmp,(unsigned long *) data); break; } /* when I and D space are separate, this will have to be fixed. */ case PTRACE_POKETEXT: /* write the word at location addr. */ case PTRACE_POKEDATA: ret = generic_ptrace_pokedata(child, addr, data); break; case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ ret = -EIO; if ((addr & 3) || addr < 0 || addr > sizeof(struct user) - 3) break; addr = addr >> 2; /* temporary hack. */ if (addr == PT_SR) { data &= SR_MASK; data <<= 16; data |= get_reg(child, PT_SR) & ~(SR_MASK << 16); } if (addr < 19) { if (put_reg(child, addr, data)) break; ret = 0; break; } if (addr >= 21 && addr < 48) { #ifdef CONFIG_M68KFPU_EMU /* Convert long double format * into internal fpu reg representation */ if (FPU_IS_EMU && (addr < 45) && !(addr % 3)) { data = (unsigned long)data << 15; data = (data & 0xffff0000) | ((data & 0x0000ffff) >> 1); } #endif child->thread.fp[addr - 21] = data; ret = 0; } break; case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ case PTRACE_CONT: { /* restart after signal. */ long tmp; ret = -EIO; if (!valid_signal(data)) break; if (request == PTRACE_SYSCALL) set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); else clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); child->exit_code = data; /* make sure the single step bit is not set. */ tmp = get_reg(child, PT_SR) & ~(TRACE_BITS << 16); put_reg(child, PT_SR, tmp); wake_up_process(child); ret = 0; break; } /* * make the child exit. Best I can do is send it a sigkill. * perhaps it should be put in the status that it wants to * exit. */ case PTRACE_KILL: { long tmp; ret = 0; if (child->exit_state == EXIT_ZOMBIE) /* already dead */ break; child->exit_code = SIGKILL; /* make sure the single step bit is not set. */ tmp = get_reg(child, PT_SR) & ~(TRACE_BITS << 16); put_reg(child, PT_SR, tmp); wake_up_process(child); break; } case PTRACE_SINGLESTEP: { /* set the trap flag. */ long tmp; ret = -EIO; if (!valid_signal(data)) break; clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); tmp = get_reg(child, PT_SR) | (TRACE_BITS << 16); put_reg(child, PT_SR, tmp); child->exit_code = data; /* give it a chance to run. */ wake_up_process(child); ret = 0; break; } case PTRACE_DETACH: /* detach a process that was attached. */ ret = ptrace_detach(child, data); break; case PTRACE_GETREGS: { /* Get all gp regs from the child. */ int i; unsigned long tmp; for (i = 0; i < 19; i++) { tmp = get_reg(child, i); if (i == PT_SR) tmp >>= 16; if (put_user(tmp, (unsigned long *) data)) { ret = -EFAULT; break; } data += sizeof(long); } ret = 0; break; } case PTRACE_SETREGS: { /* Set all gp regs in the child. */ int i; unsigned long tmp; for (i = 0; i < 19; i++) { if (get_user(tmp, (unsigned long *) data)) { ret = -EFAULT; break; } if (i == PT_SR) { tmp &= SR_MASK; tmp <<= 16; tmp |= get_reg(child, PT_SR) & ~(SR_MASK << 16); } put_reg(child, i, tmp); data += sizeof(long); } ret = 0; break; } #ifdef PTRACE_GETFPREGS case PTRACE_GETFPREGS: { /* Get the child FPU state. */ ret = 0; if (copy_to_user((void *)data, &child->thread.fp, sizeof(struct user_m68kfp_struct))) ret = -EFAULT; break; } #endif #ifdef PTRACE_SETFPREGS case PTRACE_SETFPREGS: { /* Set the child FPU state. */ ret = 0; if (copy_from_user(&child->thread.fp, (void *)data, sizeof(struct user_m68kfp_struct))) ret = -EFAULT; break; } #endif default: ret = -EIO; break; } return ret; } asmlinkage void syscall_trace(void) { if (!test_thread_flag(TIF_SYSCALL_TRACE)) return; if (!(current->ptrace & PT_PTRACED)) return; ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); /* * this isn't the same as continuing with a signal, but it will do * for normal use. strace only continues with a signal if the * stopping signal is not SIGTRAP. -brl */ if (current->exit_code) { send_sig(current->exit_code, current, 1); current->exit_code = 0; } }
gpl-2.0
fyu1/test
drivers/staging/iio/light/isl29018.c
757
21046
/* * A iio driver for the light sensor ISL 29018/29023/29035. * * IIO driver for monitoring ambient light intensity in luxi, proximity * sensing and infrared sensing. * * Copyright (c) 2010, NVIDIA Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/regmap.h> #include <linux/slab.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include <linux/acpi.h> #define CONVERSION_TIME_MS 100 #define ISL29018_REG_ADD_COMMAND1 0x00 #define COMMMAND1_OPMODE_SHIFT 5 #define COMMMAND1_OPMODE_MASK (7 << COMMMAND1_OPMODE_SHIFT) #define COMMMAND1_OPMODE_POWER_DOWN 0 #define COMMMAND1_OPMODE_ALS_ONCE 1 #define COMMMAND1_OPMODE_IR_ONCE 2 #define COMMMAND1_OPMODE_PROX_ONCE 3 #define ISL29018_REG_ADD_COMMANDII 0x01 #define COMMANDII_RESOLUTION_SHIFT 2 #define COMMANDII_RESOLUTION_MASK (0x3 << COMMANDII_RESOLUTION_SHIFT) #define COMMANDII_RANGE_SHIFT 0 #define COMMANDII_RANGE_MASK (0x3 << COMMANDII_RANGE_SHIFT) #define COMMANDII_SCHEME_SHIFT 7 #define COMMANDII_SCHEME_MASK (0x1 << COMMANDII_SCHEME_SHIFT) #define ISL29018_REG_ADD_DATA_LSB 0x02 #define ISL29018_REG_ADD_DATA_MSB 0x03 #define ISL29018_REG_TEST 0x08 #define ISL29018_TEST_SHIFT 0 #define ISL29018_TEST_MASK (0xFF << ISL29018_TEST_SHIFT) #define ISL29035_REG_DEVICE_ID 0x0F #define ISL29035_DEVICE_ID_SHIFT 0x03 #define ISL29035_DEVICE_ID_MASK (0x7 << ISL29035_DEVICE_ID_SHIFT) #define ISL29035_DEVICE_ID 0x5 #define ISL29035_BOUT_SHIFT 0x07 #define ISL29035_BOUT_MASK (0x01 << ISL29035_BOUT_SHIFT) struct isl29018_chip { struct device *dev; struct regmap *regmap; struct mutex lock; int type; unsigned int lux_scale; unsigned int lux_uscale; unsigned int range; unsigned int adc_bit; int prox_scheme; bool suspended; }; static int isl29018_set_range(struct isl29018_chip *chip, unsigned long range, unsigned int *new_range) { static const unsigned long supp_ranges[] = {1000, 4000, 16000, 64000}; int i; for (i = 0; i < ARRAY_SIZE(supp_ranges); ++i) { if (range <= supp_ranges[i]) { *new_range = (unsigned int)supp_ranges[i]; break; } } if (i >= ARRAY_SIZE(supp_ranges)) return -EINVAL; return regmap_update_bits(chip->regmap, ISL29018_REG_ADD_COMMANDII, COMMANDII_RANGE_MASK, i << COMMANDII_RANGE_SHIFT); } static int isl29018_set_resolution(struct isl29018_chip *chip, unsigned long adcbit, unsigned int *conf_adc_bit) { static const unsigned long supp_adcbit[] = {16, 12, 8, 4}; int i; for (i = 0; i < ARRAY_SIZE(supp_adcbit); ++i) { if (adcbit >= supp_adcbit[i]) { *conf_adc_bit = (unsigned int)supp_adcbit[i]; break; } } if (i >= ARRAY_SIZE(supp_adcbit)) return -EINVAL; return regmap_update_bits(chip->regmap, ISL29018_REG_ADD_COMMANDII, COMMANDII_RESOLUTION_MASK, i << COMMANDII_RESOLUTION_SHIFT); } static int isl29018_read_sensor_input(struct isl29018_chip *chip, int mode) { int status; unsigned int lsb; unsigned int msb; /* Set mode */ status = regmap_write(chip->regmap, ISL29018_REG_ADD_COMMAND1, mode << COMMMAND1_OPMODE_SHIFT); if (status) { dev_err(chip->dev, "Error in setting operating mode err %d\n", status); return status; } msleep(CONVERSION_TIME_MS); status = regmap_read(chip->regmap, ISL29018_REG_ADD_DATA_LSB, &lsb); if (status < 0) { dev_err(chip->dev, "Error in reading LSB DATA with err %d\n", status); return status; } status = regmap_read(chip->regmap, ISL29018_REG_ADD_DATA_MSB, &msb); if (status < 0) { dev_err(chip->dev, "Error in reading MSB DATA with error %d\n", status); return status; } dev_vdbg(chip->dev, "MSB 0x%x and LSB 0x%x\n", msb, lsb); return (msb << 8) | lsb; } static int isl29018_read_lux(struct isl29018_chip *chip, int *lux) { int lux_data; unsigned int data_x_range, lux_unshifted; lux_data = isl29018_read_sensor_input(chip, COMMMAND1_OPMODE_ALS_ONCE); if (lux_data < 0) return lux_data; /* To support fractional scaling, separate the unshifted lux * into two calculations: int scaling and micro-scaling. * lux_uscale ranges from 0-999999, so about 20 bits. Split * the /1,000,000 in two to reduce the risk of over/underflow. */ data_x_range = lux_data * chip->range; lux_unshifted = data_x_range * chip->lux_scale; lux_unshifted += data_x_range / 1000 * chip->lux_uscale / 1000; *lux = lux_unshifted >> chip->adc_bit; return 0; } static int isl29018_read_ir(struct isl29018_chip *chip, int *ir) { int ir_data; ir_data = isl29018_read_sensor_input(chip, COMMMAND1_OPMODE_IR_ONCE); if (ir_data < 0) return ir_data; *ir = ir_data; return 0; } static int isl29018_read_proximity_ir(struct isl29018_chip *chip, int scheme, int *near_ir) { int status; int prox_data = -1; int ir_data = -1; /* Do proximity sensing with required scheme */ status = regmap_update_bits(chip->regmap, ISL29018_REG_ADD_COMMANDII, COMMANDII_SCHEME_MASK, scheme << COMMANDII_SCHEME_SHIFT); if (status) { dev_err(chip->dev, "Error in setting operating mode\n"); return status; } prox_data = isl29018_read_sensor_input(chip, COMMMAND1_OPMODE_PROX_ONCE); if (prox_data < 0) return prox_data; if (scheme == 1) { *near_ir = prox_data; return 0; } ir_data = isl29018_read_sensor_input(chip, COMMMAND1_OPMODE_IR_ONCE); if (ir_data < 0) return ir_data; if (prox_data >= ir_data) *near_ir = prox_data - ir_data; else *near_ir = 0; return 0; } /* Sysfs interface */ /* range */ static ssize_t show_range(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct isl29018_chip *chip = iio_priv(indio_dev); return sprintf(buf, "%u\n", chip->range); } static ssize_t store_range(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct isl29018_chip *chip = iio_priv(indio_dev); int status; unsigned long lval; unsigned int new_range; if (kstrtoul(buf, 10, &lval)) return -EINVAL; if (!(lval == 1000UL || lval == 4000UL || lval == 16000UL || lval == 64000UL)) { dev_err(dev, "The range is not supported\n"); return -EINVAL; } mutex_lock(&chip->lock); status = isl29018_set_range(chip, lval, &new_range); if (status < 0) { mutex_unlock(&chip->lock); dev_err(dev, "Error in setting max range with err %d\n", status); return status; } chip->range = new_range; mutex_unlock(&chip->lock); return count; } /* resolution */ static ssize_t show_resolution(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct isl29018_chip *chip = iio_priv(indio_dev); return sprintf(buf, "%u\n", chip->adc_bit); } static ssize_t store_resolution(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct isl29018_chip *chip = iio_priv(indio_dev); int status; unsigned int val; unsigned int new_adc_bit; if (kstrtouint(buf, 10, &val)) return -EINVAL; if (!(val == 4 || val == 8 || val == 12 || val == 16)) { dev_err(dev, "The resolution is not supported\n"); return -EINVAL; } mutex_lock(&chip->lock); status = isl29018_set_resolution(chip, val, &new_adc_bit); if (status < 0) { mutex_unlock(&chip->lock); dev_err(dev, "Error in setting resolution\n"); return status; } chip->adc_bit = new_adc_bit; mutex_unlock(&chip->lock); return count; } /* proximity scheme */ static ssize_t show_prox_infrared_suppression(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct isl29018_chip *chip = iio_priv(indio_dev); /* return the "proximity scheme" i.e. if the chip does on chip infrared suppression (1 means perform on chip suppression) */ return sprintf(buf, "%d\n", chip->prox_scheme); } static ssize_t store_prox_infrared_suppression(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct isl29018_chip *chip = iio_priv(indio_dev); int val; if (kstrtoint(buf, 10, &val)) return -EINVAL; if (!(val == 0 || val == 1)) { dev_err(dev, "The mode is not supported\n"); return -EINVAL; } /* get the "proximity scheme" i.e. if the chip does on chip infrared suppression (1 means perform on chip suppression) */ mutex_lock(&chip->lock); chip->prox_scheme = val; mutex_unlock(&chip->lock); return count; } /* Channel IO */ static int isl29018_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { struct isl29018_chip *chip = iio_priv(indio_dev); int ret = -EINVAL; mutex_lock(&chip->lock); if (mask == IIO_CHAN_INFO_CALIBSCALE && chan->type == IIO_LIGHT) { chip->lux_scale = val; /* With no write_raw_get_fmt(), val2 is a MICRO fraction. */ chip->lux_uscale = val2; ret = 0; } mutex_unlock(&chip->lock); return ret; } static int isl29018_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { int ret = -EINVAL; struct isl29018_chip *chip = iio_priv(indio_dev); mutex_lock(&chip->lock); if (chip->suspended) { mutex_unlock(&chip->lock); return -EBUSY; } switch (mask) { case IIO_CHAN_INFO_RAW: case IIO_CHAN_INFO_PROCESSED: switch (chan->type) { case IIO_LIGHT: ret = isl29018_read_lux(chip, val); break; case IIO_INTENSITY: ret = isl29018_read_ir(chip, val); break; case IIO_PROXIMITY: ret = isl29018_read_proximity_ir(chip, chip->prox_scheme, val); break; default: break; } if (!ret) ret = IIO_VAL_INT; break; case IIO_CHAN_INFO_CALIBSCALE: if (chan->type == IIO_LIGHT) { *val = chip->lux_scale; *val2 = chip->lux_uscale; ret = IIO_VAL_INT_PLUS_MICRO; } break; default: break; } mutex_unlock(&chip->lock); return ret; } #define ISL29018_LIGHT_CHANNEL { \ .type = IIO_LIGHT, \ .indexed = 1, \ .channel = 0, \ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) | \ BIT(IIO_CHAN_INFO_CALIBSCALE), \ } #define ISL29018_IR_CHANNEL { \ .type = IIO_INTENSITY, \ .modified = 1, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ .channel2 = IIO_MOD_LIGHT_IR, \ } #define ISL29018_PROXIMITY_CHANNEL { \ .type = IIO_PROXIMITY, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ } static const struct iio_chan_spec isl29018_channels[] = { ISL29018_LIGHT_CHANNEL, ISL29018_IR_CHANNEL, ISL29018_PROXIMITY_CHANNEL, }; static const struct iio_chan_spec isl29023_channels[] = { ISL29018_LIGHT_CHANNEL, ISL29018_IR_CHANNEL, }; static IIO_DEVICE_ATTR(range, S_IRUGO | S_IWUSR, show_range, store_range, 0); static IIO_CONST_ATTR(range_available, "1000 4000 16000 64000"); static IIO_CONST_ATTR(adc_resolution_available, "4 8 12 16"); static IIO_DEVICE_ATTR(adc_resolution, S_IRUGO | S_IWUSR, show_resolution, store_resolution, 0); static IIO_DEVICE_ATTR(proximity_on_chip_ambient_infrared_suppression, S_IRUGO | S_IWUSR, show_prox_infrared_suppression, store_prox_infrared_suppression, 0); #define ISL29018_DEV_ATTR(name) (&iio_dev_attr_##name.dev_attr.attr) #define ISL29018_CONST_ATTR(name) (&iio_const_attr_##name.dev_attr.attr) static struct attribute *isl29018_attributes[] = { ISL29018_DEV_ATTR(range), ISL29018_CONST_ATTR(range_available), ISL29018_DEV_ATTR(adc_resolution), ISL29018_CONST_ATTR(adc_resolution_available), ISL29018_DEV_ATTR(proximity_on_chip_ambient_infrared_suppression), NULL }; static struct attribute *isl29023_attributes[] = { ISL29018_DEV_ATTR(range), ISL29018_CONST_ATTR(range_available), ISL29018_DEV_ATTR(adc_resolution), ISL29018_CONST_ATTR(adc_resolution_available), NULL }; static const struct attribute_group isl29018_group = { .attrs = isl29018_attributes, }; static const struct attribute_group isl29023_group = { .attrs = isl29023_attributes, }; static int isl29035_detect(struct isl29018_chip *chip) { int status; unsigned int id; status = regmap_read(chip->regmap, ISL29035_REG_DEVICE_ID, &id); if (status < 0) { dev_err(chip->dev, "Error reading ID register with error %d\n", status); return status; } id = (id & ISL29035_DEVICE_ID_MASK) >> ISL29035_DEVICE_ID_SHIFT; if (id != ISL29035_DEVICE_ID) return -ENODEV; /* clear out brownout bit */ return regmap_update_bits(chip->regmap, ISL29035_REG_DEVICE_ID, ISL29035_BOUT_MASK, 0); } enum { isl29018, isl29023, isl29035, }; static int isl29018_chip_init(struct isl29018_chip *chip) { int status; unsigned int new_adc_bit; unsigned int new_range; if (chip->type == isl29035) { status = isl29035_detect(chip); if (status < 0) return status; } /* Code added per Intersil Application Note 1534: * When VDD sinks to approximately 1.8V or below, some of * the part's registers may change their state. When VDD * recovers to 2.25V (or greater), the part may thus be in an * unknown mode of operation. The user can return the part to * a known mode of operation either by (a) setting VDD = 0V for * 1 second or more and then powering back up with a slew rate * of 0.5V/ms or greater, or (b) via I2C disable all ALS/PROX * conversions, clear the test registers, and then rewrite all * registers to the desired values. * ... * FOR ISL29011, ISL29018, ISL29021, ISL29023 * 1. Write 0x00 to register 0x08 (TEST) * 2. Write 0x00 to register 0x00 (CMD1) * 3. Rewrite all registers to the desired values * * ISL29018 Data Sheet (FN6619.1, Feb 11, 2010) essentially says * the same thing EXCEPT the data sheet asks for a 1ms delay after * writing the CMD1 register. */ status = regmap_write(chip->regmap, ISL29018_REG_TEST, 0x0); if (status < 0) { dev_err(chip->dev, "Failed to clear isl29018 TEST reg.(%d)\n", status); return status; } /* See Intersil AN1534 comments above. * "Operating Mode" (COMMAND1) register is reprogrammed when * data is read from the device. */ status = regmap_write(chip->regmap, ISL29018_REG_ADD_COMMAND1, 0); if (status < 0) { dev_err(chip->dev, "Failed to clear isl29018 CMD1 reg.(%d)\n", status); return status; } usleep_range(1000, 2000); /* per data sheet, page 10 */ /* set defaults */ status = isl29018_set_range(chip, chip->range, &new_range); if (status < 0) { dev_err(chip->dev, "Init of isl29018 fails\n"); return status; } status = isl29018_set_resolution(chip, chip->adc_bit, &new_adc_bit); return 0; } static const struct iio_info isl29018_info = { .attrs = &isl29018_group, .driver_module = THIS_MODULE, .read_raw = &isl29018_read_raw, .write_raw = &isl29018_write_raw, }; static const struct iio_info isl29023_info = { .attrs = &isl29023_group, .driver_module = THIS_MODULE, .read_raw = &isl29018_read_raw, .write_raw = &isl29018_write_raw, }; static bool is_volatile_reg(struct device *dev, unsigned int reg) { switch (reg) { case ISL29018_REG_ADD_DATA_LSB: case ISL29018_REG_ADD_DATA_MSB: case ISL29018_REG_ADD_COMMAND1: case ISL29018_REG_TEST: case ISL29035_REG_DEVICE_ID: return true; default: return false; } } /* * isl29018_regmap_config: regmap configuration. * Use RBTREE mechanism for caching. */ static const struct regmap_config isl29018_regmap_config = { .reg_bits = 8, .val_bits = 8, .volatile_reg = is_volatile_reg, .max_register = ISL29018_REG_TEST, .num_reg_defaults_raw = ISL29018_REG_TEST + 1, .cache_type = REGCACHE_RBTREE, }; /* isl29035_regmap_config: regmap configuration for ISL29035 */ static const struct regmap_config isl29035_regmap_config = { .reg_bits = 8, .val_bits = 8, .volatile_reg = is_volatile_reg, .max_register = ISL29035_REG_DEVICE_ID, .num_reg_defaults_raw = ISL29035_REG_DEVICE_ID + 1, .cache_type = REGCACHE_RBTREE, }; struct chip_info { const struct iio_chan_spec *channels; int num_channels; const struct iio_info *indio_info; const struct regmap_config *regmap_cfg; }; static const struct chip_info chip_info_tbl[] = { [isl29018] = { .channels = isl29018_channels, .num_channels = ARRAY_SIZE(isl29018_channels), .indio_info = &isl29018_info, .regmap_cfg = &isl29018_regmap_config, }, [isl29023] = { .channels = isl29023_channels, .num_channels = ARRAY_SIZE(isl29023_channels), .indio_info = &isl29023_info, .regmap_cfg = &isl29018_regmap_config, }, [isl29035] = { .channels = isl29023_channels, .num_channels = ARRAY_SIZE(isl29023_channels), .indio_info = &isl29023_info, .regmap_cfg = &isl29035_regmap_config, }, }; static const char *isl29018_match_acpi_device(struct device *dev, int *data) { const struct acpi_device_id *id; id = acpi_match_device(dev->driver->acpi_match_table, dev); if (!id) return NULL; *data = (int) id->driver_data; return dev_name(dev); } static int isl29018_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct isl29018_chip *chip; struct iio_dev *indio_dev; int err; const char *name = NULL; int dev_id = 0; indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip)); if (indio_dev == NULL) { dev_err(&client->dev, "iio allocation fails\n"); return -ENOMEM; } chip = iio_priv(indio_dev); i2c_set_clientdata(client, indio_dev); chip->dev = &client->dev; if (id) { name = id->name; dev_id = id->driver_data; } if (ACPI_HANDLE(&client->dev)) name = isl29018_match_acpi_device(&client->dev, &dev_id); mutex_init(&chip->lock); chip->type = dev_id; chip->lux_scale = 1; chip->lux_uscale = 0; chip->range = 1000; chip->adc_bit = 16; chip->suspended = false; chip->regmap = devm_regmap_init_i2c(client, chip_info_tbl[dev_id].regmap_cfg); if (IS_ERR(chip->regmap)) { err = PTR_ERR(chip->regmap); dev_err(chip->dev, "regmap initialization failed: %d\n", err); return err; } err = isl29018_chip_init(chip); if (err) return err; indio_dev->info = chip_info_tbl[dev_id].indio_info; indio_dev->channels = chip_info_tbl[dev_id].channels; indio_dev->num_channels = chip_info_tbl[dev_id].num_channels; indio_dev->name = name; indio_dev->dev.parent = &client->dev; indio_dev->modes = INDIO_DIRECT_MODE; err = devm_iio_device_register(&client->dev, indio_dev); if (err) { dev_err(&client->dev, "iio registration fails\n"); return err; } return 0; } #ifdef CONFIG_PM_SLEEP static int isl29018_suspend(struct device *dev) { struct isl29018_chip *chip = iio_priv(dev_get_drvdata(dev)); mutex_lock(&chip->lock); /* Since this driver uses only polling commands, we are by default in * auto shutdown (ie, power-down) mode. * So we do not have much to do here. */ chip->suspended = true; mutex_unlock(&chip->lock); return 0; } static int isl29018_resume(struct device *dev) { struct isl29018_chip *chip = iio_priv(dev_get_drvdata(dev)); int err; mutex_lock(&chip->lock); err = isl29018_chip_init(chip); if (!err) chip->suspended = false; mutex_unlock(&chip->lock); return err; } static SIMPLE_DEV_PM_OPS(isl29018_pm_ops, isl29018_suspend, isl29018_resume); #define ISL29018_PM_OPS (&isl29018_pm_ops) #else #define ISL29018_PM_OPS NULL #endif static const struct acpi_device_id isl29018_acpi_match[] = { {"ISL29018", isl29018}, {"ISL29023", isl29023}, {"ISL29035", isl29035}, {}, }; MODULE_DEVICE_TABLE(acpi, isl29018_acpi_match); static const struct i2c_device_id isl29018_id[] = { {"isl29018", isl29018}, {"isl29023", isl29023}, {"isl29035", isl29035}, {} }; MODULE_DEVICE_TABLE(i2c, isl29018_id); static const struct of_device_id isl29018_of_match[] = { { .compatible = "isil,isl29018", }, { .compatible = "isil,isl29023", }, { .compatible = "isil,isl29035", }, { }, }; MODULE_DEVICE_TABLE(of, isl29018_of_match); static struct i2c_driver isl29018_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "isl29018", .acpi_match_table = ACPI_PTR(isl29018_acpi_match), .pm = ISL29018_PM_OPS, .owner = THIS_MODULE, .of_match_table = isl29018_of_match, }, .probe = isl29018_probe, .id_table = isl29018_id, }; module_i2c_driver(isl29018_driver); MODULE_DESCRIPTION("ISL29018 Ambient Light Sensor driver"); MODULE_LICENSE("GPL");
gpl-2.0
Entropy512/linux_kernel_galaxyplayer
drivers/scsi/gvp11.c
1013
10779
#include <linux/types.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/zorro.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/amigaints.h> #include <asm/amigahw.h> #include "scsi.h" #include "wd33c93.h" #include "gvp11.h" #define CHECK_WD33C93 struct gvp11_hostdata { struct WD33C93_hostdata wh; struct gvp11_scsiregs *regs; }; static irqreturn_t gvp11_intr(int irq, void *data) { struct Scsi_Host *instance = data; struct gvp11_hostdata *hdata = shost_priv(instance); unsigned int status = hdata->regs->CNTR; unsigned long flags; if (!(status & GVP11_DMAC_INT_PENDING)) return IRQ_NONE; spin_lock_irqsave(instance->host_lock, flags); wd33c93_intr(instance); spin_unlock_irqrestore(instance->host_lock, flags); return IRQ_HANDLED; } static int gvp11_xfer_mask = 0; void gvp11_setup(char *str, int *ints) { gvp11_xfer_mask = ints[1]; } static int dma_setup(struct scsi_cmnd *cmd, int dir_in) { struct Scsi_Host *instance = cmd->device->host; struct gvp11_hostdata *hdata = shost_priv(instance); struct WD33C93_hostdata *wh = &hdata->wh; struct gvp11_scsiregs *regs = hdata->regs; unsigned short cntr = GVP11_DMAC_INT_ENABLE; unsigned long addr = virt_to_bus(cmd->SCp.ptr); int bank_mask; static int scsi_alloc_out_of_range = 0; /* use bounce buffer if the physical address is bad */ if (addr & wh->dma_xfer_mask) { wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; if (!scsi_alloc_out_of_range) { wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len, GFP_KERNEL); wh->dma_buffer_pool = BUF_SCSI_ALLOCED; } if (scsi_alloc_out_of_range || !wh->dma_bounce_buffer) { wh->dma_bounce_buffer = amiga_chip_alloc(wh->dma_bounce_len, "GVP II SCSI Bounce Buffer"); if (!wh->dma_bounce_buffer) { wh->dma_bounce_len = 0; return 1; } wh->dma_buffer_pool = BUF_CHIP_ALLOCED; } /* check if the address of the bounce buffer is OK */ addr = virt_to_bus(wh->dma_bounce_buffer); if (addr & wh->dma_xfer_mask) { /* fall back to Chip RAM if address out of range */ if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) { kfree(wh->dma_bounce_buffer); scsi_alloc_out_of_range = 1; } else { amiga_chip_free(wh->dma_bounce_buffer); } wh->dma_bounce_buffer = amiga_chip_alloc(wh->dma_bounce_len, "GVP II SCSI Bounce Buffer"); if (!wh->dma_bounce_buffer) { wh->dma_bounce_len = 0; return 1; } addr = virt_to_bus(wh->dma_bounce_buffer); wh->dma_buffer_pool = BUF_CHIP_ALLOCED; } if (!dir_in) { /* copy to bounce buffer for a write */ memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr, cmd->SCp.this_residual); } } /* setup dma direction */ if (!dir_in) cntr |= GVP11_DMAC_DIR_WRITE; wh->dma_dir = dir_in; regs->CNTR = cntr; /* setup DMA *physical* address */ regs->ACR = addr; if (dir_in) { /* invalidate any cache */ cache_clear(addr, cmd->SCp.this_residual); } else { /* push any dirty cache */ cache_push(addr, cmd->SCp.this_residual); } bank_mask = (~wh->dma_xfer_mask >> 18) & 0x01c0; if (bank_mask) regs->BANK = bank_mask & (addr >> 18); /* start DMA */ regs->ST_DMA = 1; /* return success */ return 0; } static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, int status) { struct gvp11_hostdata *hdata = shost_priv(instance); struct WD33C93_hostdata *wh = &hdata->wh; struct gvp11_scsiregs *regs = hdata->regs; /* stop DMA */ regs->SP_DMA = 1; /* remove write bit from CONTROL bits */ regs->CNTR = GVP11_DMAC_INT_ENABLE; /* copy from a bounce buffer, if necessary */ if (status && wh->dma_bounce_buffer) { if (wh->dma_dir && SCpnt) memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer, SCpnt->SCp.this_residual); if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) kfree(wh->dma_bounce_buffer); else amiga_chip_free(wh->dma_bounce_buffer); wh->dma_bounce_buffer = NULL; wh->dma_bounce_len = 0; } } static int gvp11_bus_reset(struct scsi_cmnd *cmd) { struct Scsi_Host *instance = cmd->device->host; /* FIXME perform bus-specific reset */ /* FIXME 2: shouldn't we no-op this function (return FAILED), and fall back to host reset function, wd33c93_host_reset ? */ spin_lock_irq(instance->host_lock); wd33c93_host_reset(cmd); spin_unlock_irq(instance->host_lock); return SUCCESS; } static struct scsi_host_template gvp11_scsi_template = { .module = THIS_MODULE, .name = "GVP Series II SCSI", .proc_info = wd33c93_proc_info, .proc_name = "GVP11", .queuecommand = wd33c93_queuecommand, .eh_abort_handler = wd33c93_abort, .eh_bus_reset_handler = gvp11_bus_reset, .eh_host_reset_handler = wd33c93_host_reset, .can_queue = CAN_QUEUE, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = CMD_PER_LUN, .use_clustering = DISABLE_CLUSTERING }; static int __devinit check_wd33c93(struct gvp11_scsiregs *regs) { #ifdef CHECK_WD33C93 volatile unsigned char *sasr_3393, *scmd_3393; unsigned char save_sasr; unsigned char q, qq; /* * These darn GVP boards are a problem - it can be tough to tell * whether or not they include a SCSI controller. This is the * ultimate Yet-Another-GVP-Detection-Hack in that it actually * probes for a WD33c93 chip: If we find one, it's extremely * likely that this card supports SCSI, regardless of Product_ * Code, Board_Size, etc. */ /* Get pointers to the presumed register locations and save contents */ sasr_3393 = &regs->SASR; scmd_3393 = &regs->SCMD; save_sasr = *sasr_3393; /* First test the AuxStatus Reg */ q = *sasr_3393; /* read it */ if (q & 0x08) /* bit 3 should always be clear */ return -ENODEV; *sasr_3393 = WD_AUXILIARY_STATUS; /* setup indirect address */ if (*sasr_3393 == WD_AUXILIARY_STATUS) { /* shouldn't retain the write */ *sasr_3393 = save_sasr; /* Oops - restore this byte */ return -ENODEV; } if (*sasr_3393 != q) { /* should still read the same */ *sasr_3393 = save_sasr; /* Oops - restore this byte */ return -ENODEV; } if (*scmd_3393 != q) /* and so should the image at 0x1f */ return -ENODEV; /* * Ok, we probably have a wd33c93, but let's check a few other places * for good measure. Make sure that this works for both 'A and 'B * chip versions. */ *sasr_3393 = WD_SCSI_STATUS; q = *scmd_3393; *sasr_3393 = WD_SCSI_STATUS; *scmd_3393 = ~q; *sasr_3393 = WD_SCSI_STATUS; qq = *scmd_3393; *sasr_3393 = WD_SCSI_STATUS; *scmd_3393 = q; if (qq != q) /* should be read only */ return -ENODEV; *sasr_3393 = 0x1e; /* this register is unimplemented */ q = *scmd_3393; *sasr_3393 = 0x1e; *scmd_3393 = ~q; *sasr_3393 = 0x1e; qq = *scmd_3393; *sasr_3393 = 0x1e; *scmd_3393 = q; if (qq != q || qq != 0xff) /* should be read only, all 1's */ return -ENODEV; *sasr_3393 = WD_TIMEOUT_PERIOD; q = *scmd_3393; *sasr_3393 = WD_TIMEOUT_PERIOD; *scmd_3393 = ~q; *sasr_3393 = WD_TIMEOUT_PERIOD; qq = *scmd_3393; *sasr_3393 = WD_TIMEOUT_PERIOD; *scmd_3393 = q; if (qq != (~q & 0xff)) /* should be read/write */ return -ENODEV; #endif /* CHECK_WD33C93 */ return 0; } static int __devinit gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent) { struct Scsi_Host *instance; unsigned long address; int error; unsigned int epc; unsigned int default_dma_xfer_mask; struct gvp11_hostdata *hdata; struct gvp11_scsiregs *regs; wd33c93_regs wdregs; default_dma_xfer_mask = ent->driver_data; /* * Rumors state that some GVP ram boards use the same product * code as the SCSI controllers. Therefore if the board-size * is not 64KB we asume it is a ram board and bail out. */ if (zorro_resource_len(z) != 0x10000) return -ENODEV; address = z->resource.start; if (!request_mem_region(address, 256, "wd33c93")) return -EBUSY; regs = (struct gvp11_scsiregs *)(ZTWO_VADDR(address)); error = check_wd33c93(regs); if (error) goto fail_check_or_alloc; instance = scsi_host_alloc(&gvp11_scsi_template, sizeof(struct gvp11_hostdata)); if (!instance) { error = -ENOMEM; goto fail_check_or_alloc; } instance->irq = IRQ_AMIGA_PORTS; instance->unique_id = z->slotaddr; regs->secret2 = 1; regs->secret1 = 0; regs->secret3 = 15; while (regs->CNTR & GVP11_DMAC_BUSY) ; regs->CNTR = 0; regs->BANK = 0; wdregs.SASR = &regs->SASR; wdregs.SCMD = &regs->SCMD; hdata = shost_priv(instance); if (gvp11_xfer_mask) hdata->wh.dma_xfer_mask = gvp11_xfer_mask; else hdata->wh.dma_xfer_mask = default_dma_xfer_mask; hdata->wh.no_sync = 0xff; hdata->wh.fast = 0; hdata->wh.dma_mode = CTRL_DMA; hdata->regs = regs; /* * Check for 14MHz SCSI clock */ epc = *(unsigned short *)(ZTWO_VADDR(address) + 0x8000); wd33c93_init(instance, wdregs, dma_setup, dma_stop, (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10 : WD33C93_FS_12_15); error = request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED, "GVP11 SCSI", instance); if (error) goto fail_irq; regs->CNTR = GVP11_DMAC_INT_ENABLE; error = scsi_add_host(instance, NULL); if (error) goto fail_host; zorro_set_drvdata(z, instance); scsi_scan_host(instance); return 0; fail_host: free_irq(IRQ_AMIGA_PORTS, instance); fail_irq: scsi_host_put(instance); fail_check_or_alloc: release_mem_region(address, 256); return error; } static void __devexit gvp11_remove(struct zorro_dev *z) { struct Scsi_Host *instance = zorro_get_drvdata(z); struct gvp11_hostdata *hdata = shost_priv(instance); hdata->regs->CNTR = 0; scsi_remove_host(instance); free_irq(IRQ_AMIGA_PORTS, instance); scsi_host_put(instance); release_mem_region(z->resource.start, 256); } /* * This should (hopefully) be the correct way to identify * all the different GVP SCSI controllers (except for the * SERIES I though). */ static struct zorro_device_id gvp11_zorro_tbl[] __devinitdata = { { ZORRO_PROD_GVP_COMBO_030_R3_SCSI, ~0x00ffffff }, { ZORRO_PROD_GVP_SERIES_II, ~0x00ffffff }, { ZORRO_PROD_GVP_GFORCE_030_SCSI, ~0x01ffffff }, { ZORRO_PROD_GVP_A530_SCSI, ~0x01ffffff }, { ZORRO_PROD_GVP_COMBO_030_R4_SCSI, ~0x01ffffff }, { ZORRO_PROD_GVP_A1291, ~0x07ffffff }, { ZORRO_PROD_GVP_GFORCE_040_SCSI_1, ~0x07ffffff }, { 0 } }; MODULE_DEVICE_TABLE(zorro, gvp11_zorro_tbl); static struct zorro_driver gvp11_driver = { .name = "gvp11", .id_table = gvp11_zorro_tbl, .probe = gvp11_probe, .remove = __devexit_p(gvp11_remove), }; static int __init gvp11_init(void) { return zorro_register_driver(&gvp11_driver); } module_init(gvp11_init); static void __exit gvp11_exit(void) { zorro_unregister_driver(&gvp11_driver); } module_exit(gvp11_exit); MODULE_DESCRIPTION("GVP Series II SCSI"); MODULE_LICENSE("GPL");
gpl-2.0
mhmtemnacr/android_kernel_samsung_matissewifi
drivers/net/ethernet/chelsio/cxgb3/sge.c
5109
93465
/* * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/if_vlan.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/prefetch.h> #include <net/arp.h> #include "common.h" #include "regs.h" #include "sge_defs.h" #include "t3_cpl.h" #include "firmware_exports.h" #include "cxgb3_offload.h" #define USE_GTS 0 #define SGE_RX_SM_BUF_SIZE 1536 #define SGE_RX_COPY_THRES 256 #define SGE_RX_PULL_LEN 128 #define SGE_PG_RSVD SMP_CACHE_BYTES /* * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks. * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs * directly. */ #define FL0_PG_CHUNK_SIZE 2048 #define FL0_PG_ORDER 0 #define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER) #define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192) #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1) #define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER) #define SGE_RX_DROP_THRES 16 #define RX_RECLAIM_PERIOD (HZ/4) /* * Max number of Rx buffers we replenish at a time. */ #define MAX_RX_REFILL 16U /* * Period of the Tx buffer reclaim timer. This timer does not need to run * frequently as Tx buffers are usually reclaimed by new Tx packets. */ #define TX_RECLAIM_PERIOD (HZ / 4) #define TX_RECLAIM_TIMER_CHUNK 64U #define TX_RECLAIM_CHUNK 16U /* WR size in bytes */ #define WR_LEN (WR_FLITS * 8) /* * Types of Tx queues in each queue set. Order here matters, do not change. */ enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL }; /* Values for sge_txq.flags */ enum { TXQ_RUNNING = 1 << 0, /* fetch engine is running */ TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */ }; struct tx_desc { __be64 flit[TX_DESC_FLITS]; }; struct rx_desc { __be32 addr_lo; __be32 len_gen; __be32 gen2; __be32 addr_hi; }; struct tx_sw_desc { /* SW state per Tx descriptor */ struct sk_buff *skb; u8 eop; /* set if last descriptor for packet */ u8 addr_idx; /* buffer index of first SGL entry in descriptor */ u8 fragidx; /* first page fragment associated with descriptor */ s8 sflit; /* start flit of first SGL entry in descriptor */ }; struct rx_sw_desc { /* SW state per Rx descriptor */ union { struct sk_buff *skb; struct fl_pg_chunk pg_chunk; }; DEFINE_DMA_UNMAP_ADDR(dma_addr); }; struct rsp_desc { /* response queue descriptor */ struct rss_header rss_hdr; __be32 flags; __be32 len_cq; u8 imm_data[47]; u8 intr_gen; }; /* * Holds unmapping information for Tx packets that need deferred unmapping. * This structure lives at skb->head and must be allocated by callers. */ struct deferred_unmap_info { struct pci_dev *pdev; dma_addr_t addr[MAX_SKB_FRAGS + 1]; }; /* * Maps a number of flits to the number of Tx descriptors that can hold them. * The formula is * * desc = 1 + (flits - 2) / (WR_FLITS - 1). * * HW allows up to 4 descriptors to be combined into a WR. */ static u8 flit_desc_map[] = { 0, #if SGE_NUM_GENBITS == 1 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 #elif SGE_NUM_GENBITS == 2 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, #else # error "SGE_NUM_GENBITS must be 1 or 2" #endif }; static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx) { return container_of(q, struct sge_qset, fl[qidx]); } static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q) { return container_of(q, struct sge_qset, rspq); } static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx) { return container_of(q, struct sge_qset, txq[qidx]); } /** * refill_rspq - replenish an SGE response queue * @adapter: the adapter * @q: the response queue to replenish * @credits: how many new responses to make available * * Replenishes a response queue by making the supplied number of responses * available to HW. */ static inline void refill_rspq(struct adapter *adapter, const struct sge_rspq *q, unsigned int credits) { rmb(); t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN, V_RSPQ(q->cntxt_id) | V_CREDITS(credits)); } /** * need_skb_unmap - does the platform need unmapping of sk_buffs? * * Returns true if the platform needs sk_buff unmapping. The compiler * optimizes away unnecessary code if this returns true. */ static inline int need_skb_unmap(void) { #ifdef CONFIG_NEED_DMA_MAP_STATE return 1; #else return 0; #endif } /** * unmap_skb - unmap a packet main body and its page fragments * @skb: the packet * @q: the Tx queue containing Tx descriptors for the packet * @cidx: index of Tx descriptor * @pdev: the PCI device * * Unmap the main body of an sk_buff and its page fragments, if any. * Because of the fairly complicated structure of our SGLs and the desire * to conserve space for metadata, the information necessary to unmap an * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx * descriptors (the physical addresses of the various data buffers), and * the SW descriptor state (assorted indices). The send functions * initialize the indices for the first packet descriptor so we can unmap * the buffers held in the first Tx descriptor here, and we have enough * information at this point to set the state for the next Tx descriptor. * * Note that it is possible to clean up the first descriptor of a packet * before the send routines have written the next descriptors, but this * race does not cause any problem. We just end up writing the unmapping * info for the descriptor first. */ static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, unsigned int cidx, struct pci_dev *pdev) { const struct sg_ent *sgp; struct tx_sw_desc *d = &q->sdesc[cidx]; int nfrags, frag_idx, curflit, j = d->addr_idx; sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit]; frag_idx = d->fragidx; if (frag_idx == 0 && skb_headlen(skb)) { pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), skb_headlen(skb), PCI_DMA_TODEVICE); j = 1; } curflit = d->sflit + 1 + j; nfrags = skb_shinfo(skb)->nr_frags; while (frag_idx < nfrags && curflit < WR_FLITS) { pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]), skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]), PCI_DMA_TODEVICE); j ^= 1; if (j == 0) { sgp++; curflit++; } curflit++; frag_idx++; } if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */ d = cidx + 1 == q->size ? q->sdesc : d + 1; d->fragidx = frag_idx; d->addr_idx = j; d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */ } } /** * free_tx_desc - reclaims Tx descriptors and their buffers * @adapter: the adapter * @q: the Tx queue to reclaim descriptors from * @n: the number of descriptors to reclaim * * Reclaims Tx descriptors from an SGE Tx queue and frees the associated * Tx buffers. Called with the Tx queue lock held. */ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q, unsigned int n) { struct tx_sw_desc *d; struct pci_dev *pdev = adapter->pdev; unsigned int cidx = q->cidx; const int need_unmap = need_skb_unmap() && q->cntxt_id >= FW_TUNNEL_SGEEC_START; d = &q->sdesc[cidx]; while (n--) { if (d->skb) { /* an SGL is present */ if (need_unmap) unmap_skb(d->skb, q, cidx, pdev); if (d->eop) { kfree_skb(d->skb); d->skb = NULL; } } ++d; if (++cidx == q->size) { cidx = 0; d = q->sdesc; } } q->cidx = cidx; } /** * reclaim_completed_tx - reclaims completed Tx descriptors * @adapter: the adapter * @q: the Tx queue to reclaim completed descriptors from * @chunk: maximum number of descriptors to reclaim * * Reclaims Tx descriptors that the SGE has indicated it has processed, * and frees the associated buffers if possible. Called with the Tx * queue's lock held. */ static inline unsigned int reclaim_completed_tx(struct adapter *adapter, struct sge_txq *q, unsigned int chunk) { unsigned int reclaim = q->processed - q->cleaned; reclaim = min(chunk, reclaim); if (reclaim) { free_tx_desc(adapter, q, reclaim); q->cleaned += reclaim; q->in_use -= reclaim; } return q->processed - q->cleaned; } /** * should_restart_tx - are there enough resources to restart a Tx queue? * @q: the Tx queue * * Checks if there are enough descriptors to restart a suspended Tx queue. */ static inline int should_restart_tx(const struct sge_txq *q) { unsigned int r = q->processed - q->cleaned; return q->in_use - r < (q->size >> 1); } static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q, struct rx_sw_desc *d) { if (q->use_pages && d->pg_chunk.page) { (*d->pg_chunk.p_cnt)--; if (!*d->pg_chunk.p_cnt) pci_unmap_page(pdev, d->pg_chunk.mapping, q->alloc_size, PCI_DMA_FROMDEVICE); put_page(d->pg_chunk.page); d->pg_chunk.page = NULL; } else { pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr), q->buf_size, PCI_DMA_FROMDEVICE); kfree_skb(d->skb); d->skb = NULL; } } /** * free_rx_bufs - free the Rx buffers on an SGE free list * @pdev: the PCI device associated with the adapter * @rxq: the SGE free list to clean up * * Release the buffers on an SGE free-buffer Rx queue. HW fetching from * this queue should be stopped before calling this function. */ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) { unsigned int cidx = q->cidx; while (q->credits--) { struct rx_sw_desc *d = &q->sdesc[cidx]; clear_rx_desc(pdev, q, d); if (++cidx == q->size) cidx = 0; } if (q->pg_chunk.page) { __free_pages(q->pg_chunk.page, q->order); q->pg_chunk.page = NULL; } } /** * add_one_rx_buf - add a packet buffer to a free-buffer list * @va: buffer start VA * @len: the buffer length * @d: the HW Rx descriptor to write * @sd: the SW Rx descriptor to write * @gen: the generation bit value * @pdev: the PCI device associated with the adapter * * Add a buffer of the given length to the supplied HW and SW Rx * descriptors. */ static inline int add_one_rx_buf(void *va, unsigned int len, struct rx_desc *d, struct rx_sw_desc *sd, unsigned int gen, struct pci_dev *pdev) { dma_addr_t mapping; mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); if (unlikely(pci_dma_mapping_error(pdev, mapping))) return -ENOMEM; dma_unmap_addr_set(sd, dma_addr, mapping); d->addr_lo = cpu_to_be32(mapping); d->addr_hi = cpu_to_be32((u64) mapping >> 32); wmb(); d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); return 0; } static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d, unsigned int gen) { d->addr_lo = cpu_to_be32(mapping); d->addr_hi = cpu_to_be32((u64) mapping >> 32); wmb(); d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); return 0; } static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp, unsigned int order) { if (!q->pg_chunk.page) { dma_addr_t mapping; q->pg_chunk.page = alloc_pages(gfp, order); if (unlikely(!q->pg_chunk.page)) return -ENOMEM; q->pg_chunk.va = page_address(q->pg_chunk.page); q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) - SGE_PG_RSVD; q->pg_chunk.offset = 0; mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, 0, q->alloc_size, PCI_DMA_FROMDEVICE); q->pg_chunk.mapping = mapping; } sd->pg_chunk = q->pg_chunk; prefetch(sd->pg_chunk.p_cnt); q->pg_chunk.offset += q->buf_size; if (q->pg_chunk.offset == (PAGE_SIZE << order)) q->pg_chunk.page = NULL; else { q->pg_chunk.va += q->buf_size; get_page(q->pg_chunk.page); } if (sd->pg_chunk.offset == 0) *sd->pg_chunk.p_cnt = 1; else *sd->pg_chunk.p_cnt += 1; return 0; } static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) { if (q->pend_cred >= q->credits / 4) { q->pend_cred = 0; wmb(); t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); } } /** * refill_fl - refill an SGE free-buffer list * @adapter: the adapter * @q: the free-list to refill * @n: the number of new buffers to allocate * @gfp: the gfp flags for allocating new buffers * * (Re)populate an SGE free-buffer list with up to @n new packet buffers, * allocated with the supplied gfp flags. The caller must assure that * @n does not exceed the queue's capacity. */ static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) { struct rx_sw_desc *sd = &q->sdesc[q->pidx]; struct rx_desc *d = &q->desc[q->pidx]; unsigned int count = 0; while (n--) { dma_addr_t mapping; int err; if (q->use_pages) { if (unlikely(alloc_pg_chunk(adap, q, sd, gfp, q->order))) { nomem: q->alloc_failed++; break; } mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset; dma_unmap_addr_set(sd, dma_addr, mapping); add_one_rx_chunk(mapping, d, q->gen); pci_dma_sync_single_for_device(adap->pdev, mapping, q->buf_size - SGE_PG_RSVD, PCI_DMA_FROMDEVICE); } else { void *buf_start; struct sk_buff *skb = alloc_skb(q->buf_size, gfp); if (!skb) goto nomem; sd->skb = skb; buf_start = skb->data; err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen, adap->pdev); if (unlikely(err)) { clear_rx_desc(adap->pdev, q, sd); break; } } d++; sd++; if (++q->pidx == q->size) { q->pidx = 0; q->gen ^= 1; sd = q->sdesc; d = q->desc; } count++; } q->credits += count; q->pend_cred += count; ring_fl_db(adap, q); return count; } static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) { refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits), GFP_ATOMIC | __GFP_COMP); } /** * recycle_rx_buf - recycle a receive buffer * @adapter: the adapter * @q: the SGE free list * @idx: index of buffer to recycle * * Recycles the specified buffer on the given free list by adding it at * the next available slot on the list. */ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q, unsigned int idx) { struct rx_desc *from = &q->desc[idx]; struct rx_desc *to = &q->desc[q->pidx]; q->sdesc[q->pidx] = q->sdesc[idx]; to->addr_lo = from->addr_lo; /* already big endian */ to->addr_hi = from->addr_hi; /* likewise */ wmb(); to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen)); to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen)); if (++q->pidx == q->size) { q->pidx = 0; q->gen ^= 1; } q->credits++; q->pend_cred++; ring_fl_db(adap, q); } /** * alloc_ring - allocate resources for an SGE descriptor ring * @pdev: the PCI device * @nelem: the number of descriptors * @elem_size: the size of each descriptor * @sw_size: the size of the SW state associated with each ring element * @phys: the physical address of the allocated ring * @metadata: address of the array holding the SW state for the ring * * Allocates resources for an SGE descriptor ring, such as Tx queues, * free buffer lists, or response queues. Each SGE ring requires * space for its HW descriptors plus, optionally, space for the SW state * associated with each HW entry (the metadata). The function returns * three values: the virtual address for the HW ring (the return value * of the function), the physical address of the HW ring, and the address * of the SW ring. */ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, size_t sw_size, dma_addr_t * phys, void *metadata) { size_t len = nelem * elem_size; void *s = NULL; void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); if (!p) return NULL; if (sw_size && metadata) { s = kcalloc(nelem, sw_size, GFP_KERNEL); if (!s) { dma_free_coherent(&pdev->dev, len, p, *phys); return NULL; } *(void **)metadata = s; } memset(p, 0, len); return p; } /** * t3_reset_qset - reset a sge qset * @q: the queue set * * Reset the qset structure. * the NAPI structure is preserved in the event of * the qset's reincarnation, for example during EEH recovery. */ static void t3_reset_qset(struct sge_qset *q) { if (q->adap && !(q->adap->flags & NAPI_INIT)) { memset(q, 0, sizeof(*q)); return; } q->adap = NULL; memset(&q->rspq, 0, sizeof(q->rspq)); memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET); memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); q->txq_stopped = 0; q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */ q->rx_reclaim_timer.function = NULL; q->nomem = 0; napi_free_frags(&q->napi); } /** * free_qset - free the resources of an SGE queue set * @adapter: the adapter owning the queue set * @q: the queue set * * Release the HW and SW resources associated with an SGE queue set, such * as HW contexts, packet buffers, and descriptor rings. Traffic to the * queue set must be quiesced prior to calling this. */ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q) { int i; struct pci_dev *pdev = adapter->pdev; for (i = 0; i < SGE_RXQ_PER_SET; ++i) if (q->fl[i].desc) { spin_lock_irq(&adapter->sge.reg_lock); t3_sge_disable_fl(adapter, q->fl[i].cntxt_id); spin_unlock_irq(&adapter->sge.reg_lock); free_rx_bufs(pdev, &q->fl[i]); kfree(q->fl[i].sdesc); dma_free_coherent(&pdev->dev, q->fl[i].size * sizeof(struct rx_desc), q->fl[i].desc, q->fl[i].phys_addr); } for (i = 0; i < SGE_TXQ_PER_SET; ++i) if (q->txq[i].desc) { spin_lock_irq(&adapter->sge.reg_lock); t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); spin_unlock_irq(&adapter->sge.reg_lock); if (q->txq[i].sdesc) { free_tx_desc(adapter, &q->txq[i], q->txq[i].in_use); kfree(q->txq[i].sdesc); } dma_free_coherent(&pdev->dev, q->txq[i].size * sizeof(struct tx_desc), q->txq[i].desc, q->txq[i].phys_addr); __skb_queue_purge(&q->txq[i].sendq); } if (q->rspq.desc) { spin_lock_irq(&adapter->sge.reg_lock); t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id); spin_unlock_irq(&adapter->sge.reg_lock); dma_free_coherent(&pdev->dev, q->rspq.size * sizeof(struct rsp_desc), q->rspq.desc, q->rspq.phys_addr); } t3_reset_qset(q); } /** * init_qset_cntxt - initialize an SGE queue set context info * @qs: the queue set * @id: the queue set id * * Initializes the TIDs and context ids for the queues of a queue set. */ static void init_qset_cntxt(struct sge_qset *qs, unsigned int id) { qs->rspq.cntxt_id = id; qs->fl[0].cntxt_id = 2 * id; qs->fl[1].cntxt_id = 2 * id + 1; qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id; qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id; qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id; qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id; qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id; } /** * sgl_len - calculates the size of an SGL of the given capacity * @n: the number of SGL entries * * Calculates the number of flits needed for a scatter/gather list that * can hold the given number of entries. */ static inline unsigned int sgl_len(unsigned int n) { /* alternatively: 3 * (n / 2) + 2 * (n & 1) */ return (3 * n) / 2 + (n & 1); } /** * flits_to_desc - returns the num of Tx descriptors for the given flits * @n: the number of flits * * Calculates the number of Tx descriptors needed for the supplied number * of flits. */ static inline unsigned int flits_to_desc(unsigned int n) { BUG_ON(n >= ARRAY_SIZE(flit_desc_map)); return flit_desc_map[n]; } /** * get_packet - return the next ingress packet buffer from a free list * @adap: the adapter that received the packet * @fl: the SGE free list holding the packet * @len: the packet length including any SGE padding * @drop_thres: # of remaining buffers before we start dropping packets * * Get the next packet from a free list and complete setup of the * sk_buff. If the packet is small we make a copy and recycle the * original buffer, otherwise we use the original buffer itself. If a * positive drop threshold is supplied packets are dropped and their * buffers recycled if (a) the number of remaining buffers is under the * threshold and the packet is too big to copy, or (b) the packet should * be copied but there is no memory for the copy. */ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl, unsigned int len, unsigned int drop_thres) { struct sk_buff *skb = NULL; struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; prefetch(sd->skb->data); fl->credits--; if (len <= SGE_RX_COPY_THRES) { skb = alloc_skb(len, GFP_ATOMIC); if (likely(skb != NULL)) { __skb_put(skb, len); pci_dma_sync_single_for_cpu(adap->pdev, dma_unmap_addr(sd, dma_addr), len, PCI_DMA_FROMDEVICE); memcpy(skb->data, sd->skb->data, len); pci_dma_sync_single_for_device(adap->pdev, dma_unmap_addr(sd, dma_addr), len, PCI_DMA_FROMDEVICE); } else if (!drop_thres) goto use_orig_buf; recycle: recycle_rx_buf(adap, fl, fl->cidx); return skb; } if (unlikely(fl->credits < drop_thres) && refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1), GFP_ATOMIC | __GFP_COMP) == 0) goto recycle; use_orig_buf: pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr), fl->buf_size, PCI_DMA_FROMDEVICE); skb = sd->skb; skb_put(skb, len); __refill_fl(adap, fl); return skb; } /** * get_packet_pg - return the next ingress packet buffer from a free list * @adap: the adapter that received the packet * @fl: the SGE free list holding the packet * @len: the packet length including any SGE padding * @drop_thres: # of remaining buffers before we start dropping packets * * Get the next packet from a free list populated with page chunks. * If the packet is small we make a copy and recycle the original buffer, * otherwise we attach the original buffer as a page fragment to a fresh * sk_buff. If a positive drop threshold is supplied packets are dropped * and their buffers recycled if (a) the number of remaining buffers is * under the threshold and the packet is too big to copy, or (b) there's * no system memory. * * Note: this function is similar to @get_packet but deals with Rx buffers * that are page chunks rather than sk_buffs. */ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, struct sge_rspq *q, unsigned int len, unsigned int drop_thres) { struct sk_buff *newskb, *skb; struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr); newskb = skb = q->pg_skb; if (!skb && (len <= SGE_RX_COPY_THRES)) { newskb = alloc_skb(len, GFP_ATOMIC); if (likely(newskb != NULL)) { __skb_put(newskb, len); pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); memcpy(newskb->data, sd->pg_chunk.va, len); pci_dma_sync_single_for_device(adap->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); } else if (!drop_thres) return NULL; recycle: fl->credits--; recycle_rx_buf(adap, fl, fl->cidx); q->rx_recycle_buf++; return newskb; } if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres))) goto recycle; prefetch(sd->pg_chunk.p_cnt); if (!skb) newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC); if (unlikely(!newskb)) { if (!drop_thres) return NULL; goto recycle; } pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); (*sd->pg_chunk.p_cnt)--; if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) pci_unmap_page(adap->pdev, sd->pg_chunk.mapping, fl->alloc_size, PCI_DMA_FROMDEVICE); if (!skb) { __skb_put(newskb, SGE_RX_PULL_LEN); memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN); skb_fill_page_desc(newskb, 0, sd->pg_chunk.page, sd->pg_chunk.offset + SGE_RX_PULL_LEN, len - SGE_RX_PULL_LEN); newskb->len = len; newskb->data_len = len - SGE_RX_PULL_LEN; newskb->truesize += newskb->data_len; } else { skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags, sd->pg_chunk.page, sd->pg_chunk.offset, len); newskb->len += len; newskb->data_len += len; newskb->truesize += len; } fl->credits--; /* * We do not refill FLs here, we let the caller do it to overlap a * prefetch. */ return newskb; } /** * get_imm_packet - return the next ingress packet buffer from a response * @resp: the response descriptor containing the packet data * * Return a packet containing the immediate data of the given response. */ static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp) { struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC); if (skb) { __skb_put(skb, IMMED_PKT_SIZE); skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE); } return skb; } /** * calc_tx_descs - calculate the number of Tx descriptors for a packet * @skb: the packet * * Returns the number of Tx descriptors needed for the given Ethernet * packet. Ethernet packets require addition of WR and CPL headers. */ static inline unsigned int calc_tx_descs(const struct sk_buff *skb) { unsigned int flits; if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt)) return 1; flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2; if (skb_shinfo(skb)->gso_size) flits++; return flits_to_desc(flits); } /** * make_sgl - populate a scatter/gather list for a packet * @skb: the packet * @sgp: the SGL to populate * @start: start address of skb main body data to include in the SGL * @len: length of skb main body data to include in the SGL * @pdev: the PCI device * * Generates a scatter/gather list for the buffers that make up a packet * and returns the SGL size in 8-byte words. The caller must size the SGL * appropriately. */ static inline unsigned int make_sgl(const struct sk_buff *skb, struct sg_ent *sgp, unsigned char *start, unsigned int len, struct pci_dev *pdev) { dma_addr_t mapping; unsigned int i, j = 0, nfrags; if (len) { mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE); sgp->len[0] = cpu_to_be32(len); sgp->addr[0] = cpu_to_be64(mapping); j = 1; } nfrags = skb_shinfo(skb)->nr_frags; for (i = 0; i < nfrags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); sgp->addr[j] = cpu_to_be64(mapping); j ^= 1; if (j == 0) ++sgp; } if (j) sgp->len[j] = 0; return ((nfrags + (len != 0)) * 3) / 2 + j; } /** * check_ring_tx_db - check and potentially ring a Tx queue's doorbell * @adap: the adapter * @q: the Tx queue * * Ring the doorbel if a Tx queue is asleep. There is a natural race, * where the HW is going to sleep just after we checked, however, * then the interrupt handler will detect the outstanding TX packet * and ring the doorbell for us. * * When GTS is disabled we unconditionally ring the doorbell. */ static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q) { #if USE_GTS clear_bit(TXQ_LAST_PKT_DB, &q->flags); if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) { set_bit(TXQ_LAST_PKT_DB, &q->flags); t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); } #else wmb(); /* write descriptors before telling HW */ t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); #endif } static inline void wr_gen2(struct tx_desc *d, unsigned int gen) { #if SGE_NUM_GENBITS == 2 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen); #endif } /** * write_wr_hdr_sgl - write a WR header and, optionally, SGL * @ndesc: number of Tx descriptors spanned by the SGL * @skb: the packet corresponding to the WR * @d: first Tx descriptor to be written * @pidx: index of above descriptors * @q: the SGE Tx queue * @sgl: the SGL * @flits: number of flits to the start of the SGL in the first descriptor * @sgl_flits: the SGL size in flits * @gen: the Tx descriptor generation * @wr_hi: top 32 bits of WR header based on WR type (big endian) * @wr_lo: low 32 bits of WR header based on WR type (big endian) * * Write a work request header and an associated SGL. If the SGL is * small enough to fit into one Tx descriptor it has already been written * and we just need to write the WR header. Otherwise we distribute the * SGL across the number of descriptors it spans. */ static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb, struct tx_desc *d, unsigned int pidx, const struct sge_txq *q, const struct sg_ent *sgl, unsigned int flits, unsigned int sgl_flits, unsigned int gen, __be32 wr_hi, __be32 wr_lo) { struct work_request_hdr *wrp = (struct work_request_hdr *)d; struct tx_sw_desc *sd = &q->sdesc[pidx]; sd->skb = skb; if (need_skb_unmap()) { sd->fragidx = 0; sd->addr_idx = 0; sd->sflit = flits; } if (likely(ndesc == 1)) { sd->eop = 1; wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) | V_WR_SGLSFLT(flits)) | wr_hi; wmb(); wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) | V_WR_GEN(gen)) | wr_lo; wr_gen2(d, gen); } else { unsigned int ogen = gen; const u64 *fp = (const u64 *)sgl; struct work_request_hdr *wp = wrp; wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) | V_WR_SGLSFLT(flits)) | wr_hi; while (sgl_flits) { unsigned int avail = WR_FLITS - flits; if (avail > sgl_flits) avail = sgl_flits; memcpy(&d->flit[flits], fp, avail * sizeof(*fp)); sgl_flits -= avail; ndesc--; if (!sgl_flits) break; fp += avail; d++; sd->eop = 0; sd++; if (++pidx == q->size) { pidx = 0; gen ^= 1; d = q->desc; sd = q->sdesc; } sd->skb = skb; wrp = (struct work_request_hdr *)d; wrp->wr_hi = htonl(V_WR_DATATYPE(1) | V_WR_SGLSFLT(1)) | wr_hi; wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS, sgl_flits + 1)) | V_WR_GEN(gen)) | wr_lo; wr_gen2(d, gen); flits = 1; } sd->eop = 1; wrp->wr_hi |= htonl(F_WR_EOP); wmb(); wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo; wr_gen2((struct tx_desc *)wp, ogen); WARN_ON(ndesc != 0); } } /** * write_tx_pkt_wr - write a TX_PKT work request * @adap: the adapter * @skb: the packet to send * @pi: the egress interface * @pidx: index of the first Tx descriptor to write * @gen: the generation value to use * @q: the Tx queue * @ndesc: number of descriptors the packet will occupy * @compl: the value of the COMPL bit to use * * Generate a TX_PKT work request to send the supplied packet. */ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, const struct port_info *pi, unsigned int pidx, unsigned int gen, struct sge_txq *q, unsigned int ndesc, unsigned int compl) { unsigned int flits, sgl_flits, cntrl, tso_info; struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; struct tx_desc *d = &q->desc[pidx]; struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d; cpl->len = htonl(skb->len); cntrl = V_TXPKT_INTF(pi->port_id); if (vlan_tx_tag_present(skb)) cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb)); tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size); if (tso_info) { int eth_type; struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl; d->flit[2] = 0; cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO); hdr->cntrl = htonl(cntrl); eth_type = skb_network_offset(skb) == ETH_HLEN ? CPL_ETH_II : CPL_ETH_II_VLAN; tso_info |= V_LSO_ETH_TYPE(eth_type) | V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) | V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff); hdr->lso_info = htonl(tso_info); flits = 3; } else { cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT); cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */ cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL); cpl->cntrl = htonl(cntrl); if (skb->len <= WR_LEN - sizeof(*cpl)) { q->sdesc[pidx].skb = NULL; if (!skb->data_len) skb_copy_from_linear_data(skb, &d->flit[2], skb->len); else skb_copy_bits(skb, 0, &d->flit[2], skb->len); flits = (skb->len + 7) / 8 + 2; cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) | V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | F_WR_SOP | F_WR_EOP | compl); wmb(); cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) | V_WR_TID(q->token)); wr_gen2(d, gen); kfree_skb(skb); return; } flits = 2; } sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev); write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), htonl(V_WR_TID(q->token))); } static inline void t3_stop_tx_queue(struct netdev_queue *txq, struct sge_qset *qs, struct sge_txq *q) { netif_tx_stop_queue(txq); set_bit(TXQ_ETH, &qs->txq_stopped); q->stops++; } /** * eth_xmit - add a packet to the Ethernet Tx queue * @skb: the packet * @dev: the egress net device * * Add a packet to an SGE Tx queue. Runs with softirqs disabled. */ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) { int qidx; unsigned int ndesc, pidx, credits, gen, compl; const struct port_info *pi = netdev_priv(dev); struct adapter *adap = pi->adapter; struct netdev_queue *txq; struct sge_qset *qs; struct sge_txq *q; /* * The chip min packet length is 9 octets but play safe and reject * anything shorter than an Ethernet header. */ if (unlikely(skb->len < ETH_HLEN)) { dev_kfree_skb(skb); return NETDEV_TX_OK; } qidx = skb_get_queue_mapping(skb); qs = &pi->qs[qidx]; q = &qs->txq[TXQ_ETH]; txq = netdev_get_tx_queue(dev, qidx); reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); credits = q->size - q->in_use; ndesc = calc_tx_descs(skb); if (unlikely(credits < ndesc)) { t3_stop_tx_queue(txq, qs, q); dev_err(&adap->pdev->dev, "%s: Tx ring %u full while queue awake!\n", dev->name, q->cntxt_id & 7); return NETDEV_TX_BUSY; } q->in_use += ndesc; if (unlikely(credits - ndesc < q->stop_thres)) { t3_stop_tx_queue(txq, qs, q); if (should_restart_tx(q) && test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { q->restarts++; netif_tx_start_queue(txq); } } gen = q->gen; q->unacked += ndesc; compl = (q->unacked & 8) << (S_WR_COMPL - 3); q->unacked &= 7; pidx = q->pidx; q->pidx += ndesc; if (q->pidx >= q->size) { q->pidx -= q->size; q->gen ^= 1; } /* update port statistics */ if (skb->ip_summed == CHECKSUM_COMPLETE) qs->port_stats[SGE_PSTAT_TX_CSUM]++; if (skb_shinfo(skb)->gso_size) qs->port_stats[SGE_PSTAT_TSO]++; if (vlan_tx_tag_present(skb)) qs->port_stats[SGE_PSTAT_VLANINS]++; /* * We do not use Tx completion interrupts to free DMAd Tx packets. * This is good for performance but means that we rely on new Tx * packets arriving to run the destructors of completed packets, * which open up space in their sockets' send queues. Sometimes * we do not get such new packets causing Tx to stall. A single * UDP transmitter is a good example of this situation. We have * a clean up timer that periodically reclaims completed packets * but it doesn't run often enough (nor do we want it to) to prevent * lengthy stalls. A solution to this problem is to run the * destructor early, after the packet is queued but before it's DMAd. * A cons is that we lie to socket memory accounting, but the amount * of extra memory is reasonable (limited by the number of Tx * descriptors), the packets do actually get freed quickly by new * packets almost always, and for protocols like TCP that wait for * acks to really free up the data the extra memory is even less. * On the positive side we run the destructors on the sending CPU * rather than on a potentially different completing CPU, usually a * good thing. We also run them without holding our Tx queue lock, * unlike what reclaim_completed_tx() would otherwise do. * * Run the destructor before telling the DMA engine about the packet * to make sure it doesn't complete and get freed prematurely. */ if (likely(!skb_shared(skb))) skb_orphan(skb); write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl); check_ring_tx_db(adap, q); return NETDEV_TX_OK; } /** * write_imm - write a packet into a Tx descriptor as immediate data * @d: the Tx descriptor to write * @skb: the packet * @len: the length of packet data to write as immediate data * @gen: the generation bit value to write * * Writes a packet as immediate data into a Tx descriptor. The packet * contains a work request at its beginning. We must write the packet * carefully so the SGE doesn't read it accidentally before it's written * in its entirety. */ static inline void write_imm(struct tx_desc *d, struct sk_buff *skb, unsigned int len, unsigned int gen) { struct work_request_hdr *from = (struct work_request_hdr *)skb->data; struct work_request_hdr *to = (struct work_request_hdr *)d; if (likely(!skb->data_len)) memcpy(&to[1], &from[1], len - sizeof(*from)); else skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from)); to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP | V_WR_BCNTLFLT(len & 7)); wmb(); to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) | V_WR_LEN((len + 7) / 8)); wr_gen2(d, gen); kfree_skb(skb); } /** * check_desc_avail - check descriptor availability on a send queue * @adap: the adapter * @q: the send queue * @skb: the packet needing the descriptors * @ndesc: the number of Tx descriptors needed * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL) * * Checks if the requested number of Tx descriptors is available on an * SGE send queue. If the queue is already suspended or not enough * descriptors are available the packet is queued for later transmission. * Must be called with the Tx queue locked. * * Returns 0 if enough descriptors are available, 1 if there aren't * enough descriptors and the packet has been queued, and 2 if the caller * needs to retry because there weren't enough descriptors at the * beginning of the call but some freed up in the mean time. */ static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q, struct sk_buff *skb, unsigned int ndesc, unsigned int qid) { if (unlikely(!skb_queue_empty(&q->sendq))) { addq_exit:__skb_queue_tail(&q->sendq, skb); return 1; } if (unlikely(q->size - q->in_use < ndesc)) { struct sge_qset *qs = txq_to_qset(q, qid); set_bit(qid, &qs->txq_stopped); smp_mb__after_clear_bit(); if (should_restart_tx(q) && test_and_clear_bit(qid, &qs->txq_stopped)) return 2; q->stops++; goto addq_exit; } return 0; } /** * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs * @q: the SGE control Tx queue * * This is a variant of reclaim_completed_tx() that is used for Tx queues * that send only immediate data (presently just the control queues) and * thus do not have any sk_buffs to release. */ static inline void reclaim_completed_tx_imm(struct sge_txq *q) { unsigned int reclaim = q->processed - q->cleaned; q->in_use -= reclaim; q->cleaned += reclaim; } static inline int immediate(const struct sk_buff *skb) { return skb->len <= WR_LEN; } /** * ctrl_xmit - send a packet through an SGE control Tx queue * @adap: the adapter * @q: the control queue * @skb: the packet * * Send a packet through an SGE control Tx queue. Packets sent through * a control queue must fit entirely as immediate data in a single Tx * descriptor and have no page fragments. */ static int ctrl_xmit(struct adapter *adap, struct sge_txq *q, struct sk_buff *skb) { int ret; struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data; if (unlikely(!immediate(skb))) { WARN_ON(1); dev_kfree_skb(skb); return NET_XMIT_SUCCESS; } wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP); wrp->wr_lo = htonl(V_WR_TID(q->token)); spin_lock(&q->lock); again:reclaim_completed_tx_imm(q); ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL); if (unlikely(ret)) { if (ret == 1) { spin_unlock(&q->lock); return NET_XMIT_CN; } goto again; } write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); q->in_use++; if (++q->pidx >= q->size) { q->pidx = 0; q->gen ^= 1; } spin_unlock(&q->lock); wmb(); t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); return NET_XMIT_SUCCESS; } /** * restart_ctrlq - restart a suspended control queue * @qs: the queue set cotaining the control queue * * Resumes transmission on a suspended Tx control queue. */ static void restart_ctrlq(unsigned long data) { struct sk_buff *skb; struct sge_qset *qs = (struct sge_qset *)data; struct sge_txq *q = &qs->txq[TXQ_CTRL]; spin_lock(&q->lock); again:reclaim_completed_tx_imm(q); while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) { write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); if (++q->pidx >= q->size) { q->pidx = 0; q->gen ^= 1; } q->in_use++; } if (!skb_queue_empty(&q->sendq)) { set_bit(TXQ_CTRL, &qs->txq_stopped); smp_mb__after_clear_bit(); if (should_restart_tx(q) && test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) goto again; q->stops++; } spin_unlock(&q->lock); wmb(); t3_write_reg(qs->adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); } /* * Send a management message through control queue 0 */ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb) { int ret; local_bh_disable(); ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb); local_bh_enable(); return ret; } /** * deferred_unmap_destructor - unmap a packet when it is freed * @skb: the packet * * This is the packet destructor used for Tx packets that need to remain * mapped until they are freed rather than until their Tx descriptors are * freed. */ static void deferred_unmap_destructor(struct sk_buff *skb) { int i; const dma_addr_t *p; const struct skb_shared_info *si; const struct deferred_unmap_info *dui; dui = (struct deferred_unmap_info *)skb->head; p = dui->addr; if (skb->tail - skb->transport_header) pci_unmap_single(dui->pdev, *p++, skb->tail - skb->transport_header, PCI_DMA_TODEVICE); si = skb_shinfo(skb); for (i = 0; i < si->nr_frags; i++) pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]), PCI_DMA_TODEVICE); } static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev, const struct sg_ent *sgl, int sgl_flits) { dma_addr_t *p; struct deferred_unmap_info *dui; dui = (struct deferred_unmap_info *)skb->head; dui->pdev = pdev; for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) { *p++ = be64_to_cpu(sgl->addr[0]); *p++ = be64_to_cpu(sgl->addr[1]); } if (sgl_flits) *p = be64_to_cpu(sgl->addr[0]); } /** * write_ofld_wr - write an offload work request * @adap: the adapter * @skb: the packet to send * @q: the Tx queue * @pidx: index of the first Tx descriptor to write * @gen: the generation value to use * @ndesc: number of descriptors the packet will occupy * * Write an offload work request to send the supplied packet. The packet * data already carry the work request with most fields populated. */ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, struct sge_txq *q, unsigned int pidx, unsigned int gen, unsigned int ndesc) { unsigned int sgl_flits, flits; struct work_request_hdr *from; struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; struct tx_desc *d = &q->desc[pidx]; if (immediate(skb)) { q->sdesc[pidx].skb = NULL; write_imm(d, skb, skb->len, gen); return; } /* Only TX_DATA builds SGLs */ from = (struct work_request_hdr *)skb->data; memcpy(&d->flit[1], &from[1], skb_transport_offset(skb) - sizeof(*from)); flits = skb_transport_offset(skb) / 8; sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), skb->tail - skb->transport_header, adap->pdev); if (need_skb_unmap()) { setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); skb->destructor = deferred_unmap_destructor; } write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, from->wr_hi, from->wr_lo); } /** * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet * @skb: the packet * * Returns the number of Tx descriptors needed for the given offload * packet. These packets are already fully constructed. */ static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb) { unsigned int flits, cnt; if (skb->len <= WR_LEN) return 1; /* packet fits as immediate data */ flits = skb_transport_offset(skb) / 8; /* headers */ cnt = skb_shinfo(skb)->nr_frags; if (skb->tail != skb->transport_header) cnt++; return flits_to_desc(flits + sgl_len(cnt)); } /** * ofld_xmit - send a packet through an offload queue * @adap: the adapter * @q: the Tx offload queue * @skb: the packet * * Send an offload packet through an SGE offload queue. */ static int ofld_xmit(struct adapter *adap, struct sge_txq *q, struct sk_buff *skb) { int ret; unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen; spin_lock(&q->lock); again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD); if (unlikely(ret)) { if (ret == 1) { skb->priority = ndesc; /* save for restart */ spin_unlock(&q->lock); return NET_XMIT_CN; } goto again; } gen = q->gen; q->in_use += ndesc; pidx = q->pidx; q->pidx += ndesc; if (q->pidx >= q->size) { q->pidx -= q->size; q->gen ^= 1; } spin_unlock(&q->lock); write_ofld_wr(adap, skb, q, pidx, gen, ndesc); check_ring_tx_db(adap, q); return NET_XMIT_SUCCESS; } /** * restart_offloadq - restart a suspended offload queue * @qs: the queue set cotaining the offload queue * * Resumes transmission on a suspended Tx offload queue. */ static void restart_offloadq(unsigned long data) { struct sk_buff *skb; struct sge_qset *qs = (struct sge_qset *)data; struct sge_txq *q = &qs->txq[TXQ_OFLD]; const struct port_info *pi = netdev_priv(qs->netdev); struct adapter *adap = pi->adapter; spin_lock(&q->lock); again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); while ((skb = skb_peek(&q->sendq)) != NULL) { unsigned int gen, pidx; unsigned int ndesc = skb->priority; if (unlikely(q->size - q->in_use < ndesc)) { set_bit(TXQ_OFLD, &qs->txq_stopped); smp_mb__after_clear_bit(); if (should_restart_tx(q) && test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) goto again; q->stops++; break; } gen = q->gen; q->in_use += ndesc; pidx = q->pidx; q->pidx += ndesc; if (q->pidx >= q->size) { q->pidx -= q->size; q->gen ^= 1; } __skb_unlink(skb, &q->sendq); spin_unlock(&q->lock); write_ofld_wr(adap, skb, q, pidx, gen, ndesc); spin_lock(&q->lock); } spin_unlock(&q->lock); #if USE_GTS set_bit(TXQ_RUNNING, &q->flags); set_bit(TXQ_LAST_PKT_DB, &q->flags); #endif wmb(); t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); } /** * queue_set - return the queue set a packet should use * @skb: the packet * * Maps a packet to the SGE queue set it should use. The desired queue * set is carried in bits 1-3 in the packet's priority. */ static inline int queue_set(const struct sk_buff *skb) { return skb->priority >> 1; } /** * is_ctrl_pkt - return whether an offload packet is a control packet * @skb: the packet * * Determines whether an offload packet should use an OFLD or a CTRL * Tx queue. This is indicated by bit 0 in the packet's priority. */ static inline int is_ctrl_pkt(const struct sk_buff *skb) { return skb->priority & 1; } /** * t3_offload_tx - send an offload packet * @tdev: the offload device to send to * @skb: the packet * * Sends an offload packet. We use the packet priority to select the * appropriate Tx queue as follows: bit 0 indicates whether the packet * should be sent as regular or control, bits 1-3 select the queue set. */ int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb) { struct adapter *adap = tdev2adap(tdev); struct sge_qset *qs = &adap->sge.qs[queue_set(skb)]; if (unlikely(is_ctrl_pkt(skb))) return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb); return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb); } /** * offload_enqueue - add an offload packet to an SGE offload receive queue * @q: the SGE response queue * @skb: the packet * * Add a new offload packet to an SGE response queue's offload packet * queue. If the packet is the first on the queue it schedules the RX * softirq to process the queue. */ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb) { int was_empty = skb_queue_empty(&q->rx_queue); __skb_queue_tail(&q->rx_queue, skb); if (was_empty) { struct sge_qset *qs = rspq_to_qset(q); napi_schedule(&qs->napi); } } /** * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts * @tdev: the offload device that will be receiving the packets * @q: the SGE response queue that assembled the bundle * @skbs: the partial bundle * @n: the number of packets in the bundle * * Delivers a (partial) bundle of Rx offload packets to an offload device. */ static inline void deliver_partial_bundle(struct t3cdev *tdev, struct sge_rspq *q, struct sk_buff *skbs[], int n) { if (n) { q->offload_bundles++; tdev->recv(tdev, skbs, n); } } /** * ofld_poll - NAPI handler for offload packets in interrupt mode * @dev: the network device doing the polling * @budget: polling budget * * The NAPI handler for offload packets when a response queue is serviced * by the hard interrupt handler, i.e., when it's operating in non-polling * mode. Creates small packet batches and sends them through the offload * receive handler. Batches need to be of modest size as we do prefetches * on the packets in each. */ static int ofld_poll(struct napi_struct *napi, int budget) { struct sge_qset *qs = container_of(napi, struct sge_qset, napi); struct sge_rspq *q = &qs->rspq; struct adapter *adapter = qs->adap; int work_done = 0; while (work_done < budget) { struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE]; struct sk_buff_head queue; int ngathered; spin_lock_irq(&q->lock); __skb_queue_head_init(&queue); skb_queue_splice_init(&q->rx_queue, &queue); if (skb_queue_empty(&queue)) { napi_complete(napi); spin_unlock_irq(&q->lock); return work_done; } spin_unlock_irq(&q->lock); ngathered = 0; skb_queue_walk_safe(&queue, skb, tmp) { if (work_done >= budget) break; work_done++; __skb_unlink(skb, &queue); prefetch(skb->data); skbs[ngathered] = skb; if (++ngathered == RX_BUNDLE_SIZE) { q->offload_bundles++; adapter->tdev.recv(&adapter->tdev, skbs, ngathered); ngathered = 0; } } if (!skb_queue_empty(&queue)) { /* splice remaining packets back onto Rx queue */ spin_lock_irq(&q->lock); skb_queue_splice(&queue, &q->rx_queue); spin_unlock_irq(&q->lock); } deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); } return work_done; } /** * rx_offload - process a received offload packet * @tdev: the offload device receiving the packet * @rq: the response queue that received the packet * @skb: the packet * @rx_gather: a gather list of packets if we are building a bundle * @gather_idx: index of the next available slot in the bundle * * Process an ingress offload pakcet and add it to the offload ingress * queue. Returns the index of the next available slot in the bundle. */ static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq, struct sk_buff *skb, struct sk_buff *rx_gather[], unsigned int gather_idx) { skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_reset_transport_header(skb); if (rq->polling) { rx_gather[gather_idx++] = skb; if (gather_idx == RX_BUNDLE_SIZE) { tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE); gather_idx = 0; rq->offload_bundles++; } } else offload_enqueue(rq, skb); return gather_idx; } /** * restart_tx - check whether to restart suspended Tx queues * @qs: the queue set to resume * * Restarts suspended Tx queues of an SGE queue set if they have enough * free resources to resume operation. */ static void restart_tx(struct sge_qset *qs) { if (test_bit(TXQ_ETH, &qs->txq_stopped) && should_restart_tx(&qs->txq[TXQ_ETH]) && test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { qs->txq[TXQ_ETH].restarts++; if (netif_running(qs->netdev)) netif_tx_wake_queue(qs->tx_q); } if (test_bit(TXQ_OFLD, &qs->txq_stopped) && should_restart_tx(&qs->txq[TXQ_OFLD]) && test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) { qs->txq[TXQ_OFLD].restarts++; tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk); } if (test_bit(TXQ_CTRL, &qs->txq_stopped) && should_restart_tx(&qs->txq[TXQ_CTRL]) && test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) { qs->txq[TXQ_CTRL].restarts++; tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk); } } /** * cxgb3_arp_process - process an ARP request probing a private IP address * @adapter: the adapter * @skb: the skbuff containing the ARP request * * Check if the ARP request is probing the private IP address * dedicated to iSCSI, generate an ARP reply if so. */ static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb) { struct net_device *dev = skb->dev; struct arphdr *arp; unsigned char *arp_ptr; unsigned char *sha; __be32 sip, tip; if (!dev) return; skb_reset_network_header(skb); arp = arp_hdr(skb); if (arp->ar_op != htons(ARPOP_REQUEST)) return; arp_ptr = (unsigned char *)(arp + 1); sha = arp_ptr; arp_ptr += dev->addr_len; memcpy(&sip, arp_ptr, sizeof(sip)); arp_ptr += sizeof(sip); arp_ptr += dev->addr_len; memcpy(&tip, arp_ptr, sizeof(tip)); if (tip != pi->iscsi_ipv4addr) return; arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, pi->iscsic.mac_addr, sha); } static inline int is_arp(struct sk_buff *skb) { return skb->protocol == htons(ETH_P_ARP); } static void cxgb3_process_iscsi_prov_pack(struct port_info *pi, struct sk_buff *skb) { if (is_arp(skb)) { cxgb3_arp_process(pi, skb); return; } if (pi->iscsic.recv) pi->iscsic.recv(pi, skb); } /** * rx_eth - process an ingress ethernet packet * @adap: the adapter * @rq: the response queue that received the packet * @skb: the packet * @pad: amount of padding at the start of the buffer * * Process an ingress ethernet pakcet and deliver it to the stack. * The padding is 2 if the packet was delivered in an Rx buffer and 0 * if it was immediate data in a response. */ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, struct sk_buff *skb, int pad, int lro) { struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad); struct sge_qset *qs = rspq_to_qset(rq); struct port_info *pi; skb_pull(skb, sizeof(*p) + pad); skb->protocol = eth_type_trans(skb, adap->port[p->iff]); pi = netdev_priv(skb->dev); if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid && p->csum == htons(0xffff) && !p->fragment) { qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; skb->ip_summed = CHECKSUM_UNNECESSARY; } else skb_checksum_none_assert(skb); skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]); if (p->vlan_valid) { qs->port_stats[SGE_PSTAT_VLANEX]++; __vlan_hwaccel_put_tag(skb, ntohs(p->vlan)); } if (rq->polling) { if (lro) napi_gro_receive(&qs->napi, skb); else { if (unlikely(pi->iscsic.flags)) cxgb3_process_iscsi_prov_pack(pi, skb); netif_receive_skb(skb); } } else netif_rx(skb); } static inline int is_eth_tcp(u32 rss) { return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE; } /** * lro_add_page - add a page chunk to an LRO session * @adap: the adapter * @qs: the associated queue set * @fl: the free list containing the page chunk to add * @len: packet length * @complete: Indicates the last fragment of a frame * * Add a received packet contained in a page chunk to an existing LRO * session. */ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, struct sge_fl *fl, int len, int complete) { struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; struct port_info *pi = netdev_priv(qs->netdev); struct sk_buff *skb = NULL; struct cpl_rx_pkt *cpl; struct skb_frag_struct *rx_frag; int nr_frags; int offset = 0; if (!qs->nomem) { skb = napi_get_frags(&qs->napi); qs->nomem = !skb; } fl->credits--; pci_dma_sync_single_for_cpu(adap->pdev, dma_unmap_addr(sd, dma_addr), fl->buf_size - SGE_PG_RSVD, PCI_DMA_FROMDEVICE); (*sd->pg_chunk.p_cnt)--; if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) pci_unmap_page(adap->pdev, sd->pg_chunk.mapping, fl->alloc_size, PCI_DMA_FROMDEVICE); if (!skb) { put_page(sd->pg_chunk.page); if (complete) qs->nomem = 0; return; } rx_frag = skb_shinfo(skb)->frags; nr_frags = skb_shinfo(skb)->nr_frags; if (!nr_frags) { offset = 2 + sizeof(struct cpl_rx_pkt); cpl = qs->lro_va = sd->pg_chunk.va + 2; if ((qs->netdev->features & NETIF_F_RXCSUM) && cpl->csum_valid && cpl->csum == htons(0xffff)) { skb->ip_summed = CHECKSUM_UNNECESSARY; qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; } else skb->ip_summed = CHECKSUM_NONE; } else cpl = qs->lro_va; len -= offset; rx_frag += nr_frags; __skb_frag_set_page(rx_frag, sd->pg_chunk.page); rx_frag->page_offset = sd->pg_chunk.offset + offset; skb_frag_size_set(rx_frag, len); skb->len += len; skb->data_len += len; skb->truesize += len; skb_shinfo(skb)->nr_frags++; if (!complete) return; skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]); if (cpl->vlan_valid) __vlan_hwaccel_put_tag(skb, ntohs(cpl->vlan)); napi_gro_frags(&qs->napi); } /** * handle_rsp_cntrl_info - handles control information in a response * @qs: the queue set corresponding to the response * @flags: the response control flags * * Handles the control information of an SGE response, such as GTS * indications and completion credits for the queue set's Tx queues. * HW coalesces credits, we don't do any extra SW coalescing. */ static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags) { unsigned int credits; #if USE_GTS if (flags & F_RSPD_TXQ0_GTS) clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags); #endif credits = G_RSPD_TXQ0_CR(flags); if (credits) qs->txq[TXQ_ETH].processed += credits; credits = G_RSPD_TXQ2_CR(flags); if (credits) qs->txq[TXQ_CTRL].processed += credits; # if USE_GTS if (flags & F_RSPD_TXQ1_GTS) clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags); # endif credits = G_RSPD_TXQ1_CR(flags); if (credits) qs->txq[TXQ_OFLD].processed += credits; } /** * check_ring_db - check if we need to ring any doorbells * @adapter: the adapter * @qs: the queue set whose Tx queues are to be examined * @sleeping: indicates which Tx queue sent GTS * * Checks if some of a queue set's Tx queues need to ring their doorbells * to resume transmission after idling while they still have unprocessed * descriptors. */ static void check_ring_db(struct adapter *adap, struct sge_qset *qs, unsigned int sleeping) { if (sleeping & F_RSPD_TXQ0_GTS) { struct sge_txq *txq = &qs->txq[TXQ_ETH]; if (txq->cleaned + txq->in_use != txq->processed && !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) { set_bit(TXQ_RUNNING, &txq->flags); t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(txq->cntxt_id)); } } if (sleeping & F_RSPD_TXQ1_GTS) { struct sge_txq *txq = &qs->txq[TXQ_OFLD]; if (txq->cleaned + txq->in_use != txq->processed && !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) { set_bit(TXQ_RUNNING, &txq->flags); t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(txq->cntxt_id)); } } } /** * is_new_response - check if a response is newly written * @r: the response descriptor * @q: the response queue * * Returns true if a response descriptor contains a yet unprocessed * response. */ static inline int is_new_response(const struct rsp_desc *r, const struct sge_rspq *q) { return (r->intr_gen & F_RSPD_GEN2) == q->gen; } static inline void clear_rspq_bufstate(struct sge_rspq * const q) { q->pg_skb = NULL; q->rx_recycle_buf = 0; } #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS) #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \ V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \ V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \ V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR)) /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */ #define NOMEM_INTR_DELAY 2500 /** * process_responses - process responses from an SGE response queue * @adap: the adapter * @qs: the queue set to which the response queue belongs * @budget: how many responses can be processed in this round * * Process responses from an SGE response queue up to the supplied budget. * Responses include received packets as well as credits and other events * for the queues that belong to the response queue's queue set. * A negative budget is effectively unlimited. * * Additionally choose the interrupt holdoff time for the next interrupt * on this queue. If the system is under memory shortage use a fairly * long delay to help recovery. */ static int process_responses(struct adapter *adap, struct sge_qset *qs, int budget) { struct sge_rspq *q = &qs->rspq; struct rsp_desc *r = &q->desc[q->cidx]; int budget_left = budget; unsigned int sleeping = 0; struct sk_buff *offload_skbs[RX_BUNDLE_SIZE]; int ngathered = 0; q->next_holdoff = q->holdoff_tmr; while (likely(budget_left && is_new_response(r, q))) { int packet_complete, eth, ethpad = 2; int lro = !!(qs->netdev->features & NETIF_F_GRO); struct sk_buff *skb = NULL; u32 len, flags; __be32 rss_hi, rss_lo; rmb(); eth = r->rss_hdr.opcode == CPL_RX_PKT; rss_hi = *(const __be32 *)r; rss_lo = r->rss_hdr.rss_hash_val; flags = ntohl(r->flags); if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) { skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC); if (!skb) goto no_mem; memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE); skb->data[0] = CPL_ASYNC_NOTIF; rss_hi = htonl(CPL_ASYNC_NOTIF << 24); q->async_notif++; } else if (flags & F_RSPD_IMM_DATA_VALID) { skb = get_imm_packet(r); if (unlikely(!skb)) { no_mem: q->next_holdoff = NOMEM_INTR_DELAY; q->nomem++; /* consume one credit since we tried */ budget_left--; break; } q->imm_data++; ethpad = 0; } else if ((len = ntohl(r->len_cq)) != 0) { struct sge_fl *fl; lro &= eth && is_eth_tcp(rss_hi); fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; if (fl->use_pages) { void *addr = fl->sdesc[fl->cidx].pg_chunk.va; prefetch(addr); #if L1_CACHE_BYTES < 128 prefetch(addr + L1_CACHE_BYTES); #endif __refill_fl(adap, fl); if (lro > 0) { lro_add_page(adap, qs, fl, G_RSPD_LEN(len), flags & F_RSPD_EOP); goto next_fl; } skb = get_packet_pg(adap, fl, q, G_RSPD_LEN(len), eth ? SGE_RX_DROP_THRES : 0); q->pg_skb = skb; } else skb = get_packet(adap, fl, G_RSPD_LEN(len), eth ? SGE_RX_DROP_THRES : 0); if (unlikely(!skb)) { if (!eth) goto no_mem; q->rx_drops++; } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT)) __skb_pull(skb, 2); next_fl: if (++fl->cidx == fl->size) fl->cidx = 0; } else q->pure_rsps++; if (flags & RSPD_CTRL_MASK) { sleeping |= flags & RSPD_GTS_MASK; handle_rsp_cntrl_info(qs, flags); } r++; if (unlikely(++q->cidx == q->size)) { q->cidx = 0; q->gen ^= 1; r = q->desc; } prefetch(r); if (++q->credits >= (q->size / 4)) { refill_rspq(adap, q, q->credits); q->credits = 0; } packet_complete = flags & (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID | F_RSPD_ASYNC_NOTIF); if (skb != NULL && packet_complete) { if (eth) rx_eth(adap, q, skb, ethpad, lro); else { q->offload_pkts++; /* Preserve the RSS info in csum & priority */ skb->csum = rss_hi; skb->priority = rss_lo; ngathered = rx_offload(&adap->tdev, q, skb, offload_skbs, ngathered); } if (flags & F_RSPD_EOP) clear_rspq_bufstate(q); } --budget_left; } deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); if (sleeping) check_ring_db(adap, qs, sleeping); smp_mb(); /* commit Tx queue .processed updates */ if (unlikely(qs->txq_stopped != 0)) restart_tx(qs); budget -= budget_left; return budget; } static inline int is_pure_response(const struct rsp_desc *r) { __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID); return (n | r->len_cq) == 0; } /** * napi_rx_handler - the NAPI handler for Rx processing * @napi: the napi instance * @budget: how many packets we can process in this round * * Handler for new data events when using NAPI. */ static int napi_rx_handler(struct napi_struct *napi, int budget) { struct sge_qset *qs = container_of(napi, struct sge_qset, napi); struct adapter *adap = qs->adap; int work_done = process_responses(adap, qs, budget); if (likely(work_done < budget)) { napi_complete(napi); /* * Because we don't atomically flush the following * write it is possible that in very rare cases it can * reach the device in a way that races with a new * response being written plus an error interrupt * causing the NAPI interrupt handler below to return * unhandled status to the OS. To protect against * this would require flushing the write and doing * both the write and the flush with interrupts off. * Way too expensive and unjustifiable given the * rarity of the race. * * The race cannot happen at all with MSI-X. */ t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) | V_NEWTIMER(qs->rspq.next_holdoff) | V_NEWINDEX(qs->rspq.cidx)); } return work_done; } /* * Returns true if the device is already scheduled for polling. */ static inline int napi_is_scheduled(struct napi_struct *napi) { return test_bit(NAPI_STATE_SCHED, &napi->state); } /** * process_pure_responses - process pure responses from a response queue * @adap: the adapter * @qs: the queue set owning the response queue * @r: the first pure response to process * * A simpler version of process_responses() that handles only pure (i.e., * non data-carrying) responses. Such respones are too light-weight to * justify calling a softirq under NAPI, so we handle them specially in * the interrupt handler. The function is called with a pointer to a * response, which the caller must ensure is a valid pure response. * * Returns 1 if it encounters a valid data-carrying response, 0 otherwise. */ static int process_pure_responses(struct adapter *adap, struct sge_qset *qs, struct rsp_desc *r) { struct sge_rspq *q = &qs->rspq; unsigned int sleeping = 0; do { u32 flags = ntohl(r->flags); r++; if (unlikely(++q->cidx == q->size)) { q->cidx = 0; q->gen ^= 1; r = q->desc; } prefetch(r); if (flags & RSPD_CTRL_MASK) { sleeping |= flags & RSPD_GTS_MASK; handle_rsp_cntrl_info(qs, flags); } q->pure_rsps++; if (++q->credits >= (q->size / 4)) { refill_rspq(adap, q, q->credits); q->credits = 0; } if (!is_new_response(r, q)) break; rmb(); } while (is_pure_response(r)); if (sleeping) check_ring_db(adap, qs, sleeping); smp_mb(); /* commit Tx queue .processed updates */ if (unlikely(qs->txq_stopped != 0)) restart_tx(qs); return is_new_response(r, q); } /** * handle_responses - decide what to do with new responses in NAPI mode * @adap: the adapter * @q: the response queue * * This is used by the NAPI interrupt handlers to decide what to do with * new SGE responses. If there are no new responses it returns -1. If * there are new responses and they are pure (i.e., non-data carrying) * it handles them straight in hard interrupt context as they are very * cheap and don't deliver any packets. Finally, if there are any data * signaling responses it schedules the NAPI handler. Returns 1 if it * schedules NAPI, 0 if all new responses were pure. * * The caller must ascertain NAPI is not already running. */ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q) { struct sge_qset *qs = rspq_to_qset(q); struct rsp_desc *r = &q->desc[q->cidx]; if (!is_new_response(r, q)) return -1; rmb(); if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) { t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx)); return 0; } napi_schedule(&qs->napi); return 1; } /* * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case * (i.e., response queue serviced in hard interrupt). */ static irqreturn_t t3_sge_intr_msix(int irq, void *cookie) { struct sge_qset *qs = cookie; struct adapter *adap = qs->adap; struct sge_rspq *q = &qs->rspq; spin_lock(&q->lock); if (process_responses(adap, qs, -1) == 0) q->unhandled_irqs++; t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); spin_unlock(&q->lock); return IRQ_HANDLED; } /* * The MSI-X interrupt handler for an SGE response queue for the NAPI case * (i.e., response queue serviced by NAPI polling). */ static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie) { struct sge_qset *qs = cookie; struct sge_rspq *q = &qs->rspq; spin_lock(&q->lock); if (handle_responses(qs->adap, q) < 0) q->unhandled_irqs++; spin_unlock(&q->lock); return IRQ_HANDLED; } /* * The non-NAPI MSI interrupt handler. This needs to handle data events from * SGE response queues as well as error and other async events as they all use * the same MSI vector. We use one SGE response queue per port in this mode * and protect all response queues with queue 0's lock. */ static irqreturn_t t3_intr_msi(int irq, void *cookie) { int new_packets = 0; struct adapter *adap = cookie; struct sge_rspq *q = &adap->sge.qs[0].rspq; spin_lock(&q->lock); if (process_responses(adap, &adap->sge.qs[0], -1)) { t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); new_packets = 1; } if (adap->params.nports == 2 && process_responses(adap, &adap->sge.qs[1], -1)) { struct sge_rspq *q1 = &adap->sge.qs[1].rspq; t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) | V_NEWTIMER(q1->next_holdoff) | V_NEWINDEX(q1->cidx)); new_packets = 1; } if (!new_packets && t3_slow_intr_handler(adap) == 0) q->unhandled_irqs++; spin_unlock(&q->lock); return IRQ_HANDLED; } static int rspq_check_napi(struct sge_qset *qs) { struct sge_rspq *q = &qs->rspq; if (!napi_is_scheduled(&qs->napi) && is_new_response(&q->desc[q->cidx], q)) { napi_schedule(&qs->napi); return 1; } return 0; } /* * The MSI interrupt handler for the NAPI case (i.e., response queues serviced * by NAPI polling). Handles data events from SGE response queues as well as * error and other async events as they all use the same MSI vector. We use * one SGE response queue per port in this mode and protect all response * queues with queue 0's lock. */ static irqreturn_t t3_intr_msi_napi(int irq, void *cookie) { int new_packets; struct adapter *adap = cookie; struct sge_rspq *q = &adap->sge.qs[0].rspq; spin_lock(&q->lock); new_packets = rspq_check_napi(&adap->sge.qs[0]); if (adap->params.nports == 2) new_packets += rspq_check_napi(&adap->sge.qs[1]); if (!new_packets && t3_slow_intr_handler(adap) == 0) q->unhandled_irqs++; spin_unlock(&q->lock); return IRQ_HANDLED; } /* * A helper function that processes responses and issues GTS. */ static inline int process_responses_gts(struct adapter *adap, struct sge_rspq *rq) { int work; work = process_responses(adap, rspq_to_qset(rq), -1); t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) | V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx)); return work; } /* * The legacy INTx interrupt handler. This needs to handle data events from * SGE response queues as well as error and other async events as they all use * the same interrupt pin. We use one SGE response queue per port in this mode * and protect all response queues with queue 0's lock. */ static irqreturn_t t3_intr(int irq, void *cookie) { int work_done, w0, w1; struct adapter *adap = cookie; struct sge_rspq *q0 = &adap->sge.qs[0].rspq; struct sge_rspq *q1 = &adap->sge.qs[1].rspq; spin_lock(&q0->lock); w0 = is_new_response(&q0->desc[q0->cidx], q0); w1 = adap->params.nports == 2 && is_new_response(&q1->desc[q1->cidx], q1); if (likely(w0 | w1)) { t3_write_reg(adap, A_PL_CLI, 0); t3_read_reg(adap, A_PL_CLI); /* flush */ if (likely(w0)) process_responses_gts(adap, q0); if (w1) process_responses_gts(adap, q1); work_done = w0 | w1; } else work_done = t3_slow_intr_handler(adap); spin_unlock(&q0->lock); return IRQ_RETVAL(work_done != 0); } /* * Interrupt handler for legacy INTx interrupts for T3B-based cards. * Handles data events from SGE response queues as well as error and other * async events as they all use the same interrupt pin. We use one SGE * response queue per port in this mode and protect all response queues with * queue 0's lock. */ static irqreturn_t t3b_intr(int irq, void *cookie) { u32 map; struct adapter *adap = cookie; struct sge_rspq *q0 = &adap->sge.qs[0].rspq; t3_write_reg(adap, A_PL_CLI, 0); map = t3_read_reg(adap, A_SG_DATA_INTR); if (unlikely(!map)) /* shared interrupt, most likely */ return IRQ_NONE; spin_lock(&q0->lock); if (unlikely(map & F_ERRINTR)) t3_slow_intr_handler(adap); if (likely(map & 1)) process_responses_gts(adap, q0); if (map & 2) process_responses_gts(adap, &adap->sge.qs[1].rspq); spin_unlock(&q0->lock); return IRQ_HANDLED; } /* * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards. * Handles data events from SGE response queues as well as error and other * async events as they all use the same interrupt pin. We use one SGE * response queue per port in this mode and protect all response queues with * queue 0's lock. */ static irqreturn_t t3b_intr_napi(int irq, void *cookie) { u32 map; struct adapter *adap = cookie; struct sge_qset *qs0 = &adap->sge.qs[0]; struct sge_rspq *q0 = &qs0->rspq; t3_write_reg(adap, A_PL_CLI, 0); map = t3_read_reg(adap, A_SG_DATA_INTR); if (unlikely(!map)) /* shared interrupt, most likely */ return IRQ_NONE; spin_lock(&q0->lock); if (unlikely(map & F_ERRINTR)) t3_slow_intr_handler(adap); if (likely(map & 1)) napi_schedule(&qs0->napi); if (map & 2) napi_schedule(&adap->sge.qs[1].napi); spin_unlock(&q0->lock); return IRQ_HANDLED; } /** * t3_intr_handler - select the top-level interrupt handler * @adap: the adapter * @polling: whether using NAPI to service response queues * * Selects the top-level interrupt handler based on the type of interrupts * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the * response queues. */ irq_handler_t t3_intr_handler(struct adapter *adap, int polling) { if (adap->flags & USING_MSIX) return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix; if (adap->flags & USING_MSI) return polling ? t3_intr_msi_napi : t3_intr_msi; if (adap->params.rev > 0) return polling ? t3b_intr_napi : t3b_intr; return t3_intr; } #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \ F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \ V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \ F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \ F_HIRCQPARITYERROR) #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR) #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \ F_RSPQDISABLED) /** * t3_sge_err_intr_handler - SGE async event interrupt handler * @adapter: the adapter * * Interrupt handler for SGE asynchronous (non-data) events. */ void t3_sge_err_intr_handler(struct adapter *adapter) { unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) & ~F_FLEMPTY; if (status & SGE_PARERR) CH_ALERT(adapter, "SGE parity error (0x%x)\n", status & SGE_PARERR); if (status & SGE_FRAMINGERR) CH_ALERT(adapter, "SGE framing error (0x%x)\n", status & SGE_FRAMINGERR); if (status & F_RSPQCREDITOVERFOW) CH_ALERT(adapter, "SGE response queue credit overflow\n"); if (status & F_RSPQDISABLED) { v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS); CH_ALERT(adapter, "packet delivered to disabled response queue " "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff); } if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR)) queue_work(cxgb3_wq, &adapter->db_drop_task); if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL)) queue_work(cxgb3_wq, &adapter->db_full_task); if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY)) queue_work(cxgb3_wq, &adapter->db_empty_task); t3_write_reg(adapter, A_SG_INT_CAUSE, status); if (status & SGE_FATALERR) t3_fatal_err(adapter); } /** * sge_timer_tx - perform periodic maintenance of an SGE qset * @data: the SGE queue set to maintain * * Runs periodically from a timer to perform maintenance of an SGE queue * set. It performs two tasks: * * Cleans up any completed Tx descriptors that may still be pending. * Normal descriptor cleanup happens when new packets are added to a Tx * queue so this timer is relatively infrequent and does any cleanup only * if the Tx queue has not seen any new packets in a while. We make a * best effort attempt to reclaim descriptors, in that we don't wait * around if we cannot get a queue's lock (which most likely is because * someone else is queueing new packets and so will also handle the clean * up). Since control queues use immediate data exclusively we don't * bother cleaning them up here. * */ static void sge_timer_tx(unsigned long data) { struct sge_qset *qs = (struct sge_qset *)data; struct port_info *pi = netdev_priv(qs->netdev); struct adapter *adap = pi->adapter; unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0}; unsigned long next_period; if (__netif_tx_trylock(qs->tx_q)) { tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH], TX_RECLAIM_TIMER_CHUNK); __netif_tx_unlock(qs->tx_q); } if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) { tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD], TX_RECLAIM_TIMER_CHUNK); spin_unlock(&qs->txq[TXQ_OFLD].lock); } next_period = TX_RECLAIM_PERIOD >> (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) / TX_RECLAIM_TIMER_CHUNK); mod_timer(&qs->tx_reclaim_timer, jiffies + next_period); } /* * sge_timer_rx - perform periodic maintenance of an SGE qset * @data: the SGE queue set to maintain * * a) Replenishes Rx queues that have run out due to memory shortage. * Normally new Rx buffers are added when existing ones are consumed but * when out of memory a queue can become empty. We try to add only a few * buffers here, the queue will be replenished fully as these new buffers * are used up if memory shortage has subsided. * * b) Return coalesced response queue credits in case a response queue is * starved. * */ static void sge_timer_rx(unsigned long data) { spinlock_t *lock; struct sge_qset *qs = (struct sge_qset *)data; struct port_info *pi = netdev_priv(qs->netdev); struct adapter *adap = pi->adapter; u32 status; lock = adap->params.rev > 0 ? &qs->rspq.lock : &adap->sge.qs[0].rspq.lock; if (!spin_trylock_irq(lock)) goto out; if (napi_is_scheduled(&qs->napi)) goto unlock; if (adap->params.rev < 4) { status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS); if (status & (1 << qs->rspq.cntxt_id)) { qs->rspq.starved++; if (qs->rspq.credits) { qs->rspq.credits--; refill_rspq(adap, &qs->rspq, 1); qs->rspq.restarted++; t3_write_reg(adap, A_SG_RSPQ_FL_STATUS, 1 << qs->rspq.cntxt_id); } } } if (qs->fl[0].credits < qs->fl[0].size) __refill_fl(adap, &qs->fl[0]); if (qs->fl[1].credits < qs->fl[1].size) __refill_fl(adap, &qs->fl[1]); unlock: spin_unlock_irq(lock); out: mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); } /** * t3_update_qset_coalesce - update coalescing settings for a queue set * @qs: the SGE queue set * @p: new queue set parameters * * Update the coalescing settings for an SGE queue set. Nothing is done * if the queue set is not initialized yet. */ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p) { qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */ qs->rspq.polling = p->polling; qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll; } /** * t3_sge_alloc_qset - initialize an SGE queue set * @adapter: the adapter * @id: the queue set id * @nports: how many Ethernet ports will be using this queue set * @irq_vec_idx: the IRQ vector index for response queue interrupts * @p: configuration parameters for this queue set * @ntxq: number of Tx queues for the queue set * @netdev: net device associated with this queue set * @netdevq: net device TX queue associated with this queue set * * Allocate resources and initialize an SGE queue set. A queue set * comprises a response queue, two Rx free-buffer queues, and up to 3 * Tx queues. The Tx queues are assigned roles in the order Ethernet * queue, offload queue, and control queue. */ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, int irq_vec_idx, const struct qset_params *p, int ntxq, struct net_device *dev, struct netdev_queue *netdevq) { int i, avail, ret = -ENOMEM; struct sge_qset *q = &adapter->sge.qs[id]; init_qset_cntxt(q, id); setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q); setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q); q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size, sizeof(struct rx_desc), sizeof(struct rx_sw_desc), &q->fl[0].phys_addr, &q->fl[0].sdesc); if (!q->fl[0].desc) goto err; q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size, sizeof(struct rx_desc), sizeof(struct rx_sw_desc), &q->fl[1].phys_addr, &q->fl[1].sdesc); if (!q->fl[1].desc) goto err; q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size, sizeof(struct rsp_desc), 0, &q->rspq.phys_addr, NULL); if (!q->rspq.desc) goto err; for (i = 0; i < ntxq; ++i) { /* * The control queue always uses immediate data so does not * need to keep track of any sk_buffs. */ size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc); q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i], sizeof(struct tx_desc), sz, &q->txq[i].phys_addr, &q->txq[i].sdesc); if (!q->txq[i].desc) goto err; q->txq[i].gen = 1; q->txq[i].size = p->txq_size[i]; spin_lock_init(&q->txq[i].lock); skb_queue_head_init(&q->txq[i].sendq); } tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq, (unsigned long)q); tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq, (unsigned long)q); q->fl[0].gen = q->fl[1].gen = 1; q->fl[0].size = p->fl_size; q->fl[1].size = p->jumbo_size; q->rspq.gen = 1; q->rspq.size = p->rspq_size; spin_lock_init(&q->rspq.lock); skb_queue_head_init(&q->rspq.rx_queue); q->txq[TXQ_ETH].stop_thres = nports * flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); #if FL0_PG_CHUNK_SIZE > 0 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE; #else q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data); #endif #if FL1_PG_CHUNK_SIZE > 0 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE; #else q->fl[1].buf_size = is_offload(adapter) ? (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt); #endif q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0; q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0; q->fl[0].order = FL0_PG_ORDER; q->fl[1].order = FL1_PG_ORDER; q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE; q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE; spin_lock_irq(&adapter->sge.reg_lock); /* FL threshold comparison uses < */ ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx, q->rspq.phys_addr, q->rspq.size, q->fl[0].buf_size - SGE_PG_RSVD, 1, 0); if (ret) goto err_unlock; for (i = 0; i < SGE_RXQ_PER_SET; ++i) { ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0, q->fl[i].phys_addr, q->fl[i].size, q->fl[i].buf_size - SGE_PG_RSVD, p->cong_thres, 1, 0); if (ret) goto err_unlock; } ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS, SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr, q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token, 1, 0); if (ret) goto err_unlock; if (ntxq > 1) { ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id, USE_GTS, SGE_CNTXT_OFLD, id, q->txq[TXQ_OFLD].phys_addr, q->txq[TXQ_OFLD].size, 0, 1, 0); if (ret) goto err_unlock; } if (ntxq > 2) { ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0, SGE_CNTXT_CTRL, id, q->txq[TXQ_CTRL].phys_addr, q->txq[TXQ_CTRL].size, q->txq[TXQ_CTRL].token, 1, 0); if (ret) goto err_unlock; } spin_unlock_irq(&adapter->sge.reg_lock); q->adap = adapter; q->netdev = dev; q->tx_q = netdevq; t3_update_qset_coalesce(q, p); avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL | __GFP_COMP); if (!avail) { CH_ALERT(adapter, "free list queue 0 initialization failed\n"); goto err; } if (avail < q->fl[0].size) CH_WARN(adapter, "free list queue 0 enabled with %d credits\n", avail); avail = refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL | __GFP_COMP); if (avail < q->fl[1].size) CH_WARN(adapter, "free list queue 1 enabled with %d credits\n", avail); refill_rspq(adapter, &q->rspq, q->rspq.size - 1); t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | V_NEWTIMER(q->rspq.holdoff_tmr)); return 0; err_unlock: spin_unlock_irq(&adapter->sge.reg_lock); err: t3_free_qset(adapter, q); return ret; } /** * t3_start_sge_timers - start SGE timer call backs * @adap: the adapter * * Starts each SGE queue set's timer call back */ void t3_start_sge_timers(struct adapter *adap) { int i; for (i = 0; i < SGE_QSETS; ++i) { struct sge_qset *q = &adap->sge.qs[i]; if (q->tx_reclaim_timer.function) mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); if (q->rx_reclaim_timer.function) mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); } } /** * t3_stop_sge_timers - stop SGE timer call backs * @adap: the adapter * * Stops each SGE queue set's timer call back */ void t3_stop_sge_timers(struct adapter *adap) { int i; for (i = 0; i < SGE_QSETS; ++i) { struct sge_qset *q = &adap->sge.qs[i]; if (q->tx_reclaim_timer.function) del_timer_sync(&q->tx_reclaim_timer); if (q->rx_reclaim_timer.function) del_timer_sync(&q->rx_reclaim_timer); } } /** * t3_free_sge_resources - free SGE resources * @adap: the adapter * * Frees resources used by the SGE queue sets. */ void t3_free_sge_resources(struct adapter *adap) { int i; for (i = 0; i < SGE_QSETS; ++i) t3_free_qset(adap, &adap->sge.qs[i]); } /** * t3_sge_start - enable SGE * @adap: the adapter * * Enables the SGE for DMAs. This is the last step in starting packet * transfers. */ void t3_sge_start(struct adapter *adap) { t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE); } /** * t3_sge_stop - disable SGE operation * @adap: the adapter * * Disables the DMA engine. This can be called in emeregencies (e.g., * from error interrupts) or from normal process context. In the latter * case it also disables any pending queue restart tasklets. Note that * if it is called in interrupt context it cannot disable the restart * tasklets as it cannot wait, however the tasklets will have no effect * since the doorbells are disabled and the driver will call this again * later from process context, at which time the tasklets will be stopped * if they are still running. */ void t3_sge_stop(struct adapter *adap) { t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0); if (!in_interrupt()) { int i; for (i = 0; i < SGE_QSETS; ++i) { struct sge_qset *qs = &adap->sge.qs[i]; tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk); tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk); } } } /** * t3_sge_init - initialize SGE * @adap: the adapter * @p: the SGE parameters * * Performs SGE initialization needed every time after a chip reset. * We do not initialize any of the queue sets here, instead the driver * top-level must request those individually. We also do not enable DMA * here, that should be done after the queues have been set up. */ void t3_sge_init(struct adapter *adap, struct sge_params *p) { unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12); ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL | F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN | V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS | V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING; #if SGE_NUM_GENBITS == 1 ctrl |= F_EGRGENCTRL; #endif if (adap->params.rev > 0) { if (!(adap->flags & (USING_MSIX | USING_MSI))) ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ; } t3_write_reg(adap, A_SG_CONTROL, ctrl); t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) | V_LORCQDRBTHRSH(512)); t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10); t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) | V_TIMEOUT(200 * core_ticks_per_usec(adap))); t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, adap->params.rev < T3_REV_C ? 1000 : 500); t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256); t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000); t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256); t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff)); t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024); } /** * t3_sge_prep - one-time SGE initialization * @adap: the associated adapter * @p: SGE parameters * * Performs one-time initialization of SGE SW state. Includes determining * defaults for the assorted SGE parameters, which admins can change until * they are used to initialize the SGE. */ void t3_sge_prep(struct adapter *adap, struct sge_params *p) { int i; p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); for (i = 0; i < SGE_QSETS; ++i) { struct qset_params *q = p->qset + i; q->polling = adap->params.rev > 0; q->coalesce_usecs = 5; q->rspq_size = 1024; q->fl_size = 1024; q->jumbo_size = 512; q->txq_size[TXQ_ETH] = 1024; q->txq_size[TXQ_OFLD] = 1024; q->txq_size[TXQ_CTRL] = 256; q->cong_thres = 0; } spin_lock_init(&adap->sge.reg_lock); }
gpl-2.0
EmmanuelU/wild_kernel_samsung_msm8660
drivers/mtd/maps/dbox2-flash.c
8181
2716
/* * D-Box 2 flash driver */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <asm/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <linux/errno.h> /* partition_info gives details on the logical partitions that the split the * single flash device into. If the size if zero we use up to the end of the * device. */ static struct mtd_partition partition_info[]= { { .name = "BR bootloader", .size = 128 * 1024, .offset = 0, .mask_flags = MTD_WRITEABLE }, { .name = "FLFS (U-Boot)", .size = 128 * 1024, .offset = MTDPART_OFS_APPEND, .mask_flags = 0 }, { .name = "Root (SquashFS)", .size = 7040 * 1024, .offset = MTDPART_OFS_APPEND, .mask_flags = 0 }, { .name = "var (JFFS2)", .size = 896 * 1024, .offset = MTDPART_OFS_APPEND, .mask_flags = 0 }, { .name = "Flash without bootloader", .size = MTDPART_SIZ_FULL, .offset = 128 * 1024, .mask_flags = 0 }, { .name = "Complete Flash", .size = MTDPART_SIZ_FULL, .offset = 0, .mask_flags = MTD_WRITEABLE } }; #define NUM_PARTITIONS ARRAY_SIZE(partition_info) #define WINDOW_ADDR 0x10000000 #define WINDOW_SIZE 0x800000 static struct mtd_info *mymtd; struct map_info dbox2_flash_map = { .name = "D-Box 2 flash memory", .size = WINDOW_SIZE, .bankwidth = 4, .phys = WINDOW_ADDR, }; static int __init init_dbox2_flash(void) { printk(KERN_NOTICE "D-Box 2 flash driver (size->0x%X mem->0x%X)\n", WINDOW_SIZE, WINDOW_ADDR); dbox2_flash_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE); if (!dbox2_flash_map.virt) { printk("Failed to ioremap\n"); return -EIO; } simple_map_init(&dbox2_flash_map); // Probe for dual Intel 28F320 or dual AMD mymtd = do_map_probe("cfi_probe", &dbox2_flash_map); if (!mymtd) { // Probe for single Intel 28F640 dbox2_flash_map.bankwidth = 2; mymtd = do_map_probe("cfi_probe", &dbox2_flash_map); } if (mymtd) { mymtd->owner = THIS_MODULE; /* Create MTD devices for each partition. */ mtd_device_register(mymtd, partition_info, NUM_PARTITIONS); return 0; } iounmap((void *)dbox2_flash_map.virt); return -ENXIO; } static void __exit cleanup_dbox2_flash(void) { if (mymtd) { mtd_device_unregister(mymtd); map_destroy(mymtd); } if (dbox2_flash_map.virt) { iounmap((void *)dbox2_flash_map.virt); dbox2_flash_map.virt = 0; } } module_init(init_dbox2_flash); module_exit(cleanup_dbox2_flash); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kári Davíðsson <kd@flaga.is>, Bastian Blank <waldi@tuxbox.org>, Alexander Wild <wild@te-elektronik.com>"); MODULE_DESCRIPTION("MTD map driver for D-Box 2 board");
gpl-2.0
KronicKernel/falcon
drivers/mtd/maps/dbox2-flash.c
8181
2716
/* * D-Box 2 flash driver */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <asm/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <linux/errno.h> /* partition_info gives details on the logical partitions that the split the * single flash device into. If the size if zero we use up to the end of the * device. */ static struct mtd_partition partition_info[]= { { .name = "BR bootloader", .size = 128 * 1024, .offset = 0, .mask_flags = MTD_WRITEABLE }, { .name = "FLFS (U-Boot)", .size = 128 * 1024, .offset = MTDPART_OFS_APPEND, .mask_flags = 0 }, { .name = "Root (SquashFS)", .size = 7040 * 1024, .offset = MTDPART_OFS_APPEND, .mask_flags = 0 }, { .name = "var (JFFS2)", .size = 896 * 1024, .offset = MTDPART_OFS_APPEND, .mask_flags = 0 }, { .name = "Flash without bootloader", .size = MTDPART_SIZ_FULL, .offset = 128 * 1024, .mask_flags = 0 }, { .name = "Complete Flash", .size = MTDPART_SIZ_FULL, .offset = 0, .mask_flags = MTD_WRITEABLE } }; #define NUM_PARTITIONS ARRAY_SIZE(partition_info) #define WINDOW_ADDR 0x10000000 #define WINDOW_SIZE 0x800000 static struct mtd_info *mymtd; struct map_info dbox2_flash_map = { .name = "D-Box 2 flash memory", .size = WINDOW_SIZE, .bankwidth = 4, .phys = WINDOW_ADDR, }; static int __init init_dbox2_flash(void) { printk(KERN_NOTICE "D-Box 2 flash driver (size->0x%X mem->0x%X)\n", WINDOW_SIZE, WINDOW_ADDR); dbox2_flash_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE); if (!dbox2_flash_map.virt) { printk("Failed to ioremap\n"); return -EIO; } simple_map_init(&dbox2_flash_map); // Probe for dual Intel 28F320 or dual AMD mymtd = do_map_probe("cfi_probe", &dbox2_flash_map); if (!mymtd) { // Probe for single Intel 28F640 dbox2_flash_map.bankwidth = 2; mymtd = do_map_probe("cfi_probe", &dbox2_flash_map); } if (mymtd) { mymtd->owner = THIS_MODULE; /* Create MTD devices for each partition. */ mtd_device_register(mymtd, partition_info, NUM_PARTITIONS); return 0; } iounmap((void *)dbox2_flash_map.virt); return -ENXIO; } static void __exit cleanup_dbox2_flash(void) { if (mymtd) { mtd_device_unregister(mymtd); map_destroy(mymtd); } if (dbox2_flash_map.virt) { iounmap((void *)dbox2_flash_map.virt); dbox2_flash_map.virt = 0; } } module_init(init_dbox2_flash); module_exit(cleanup_dbox2_flash); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kári Davíðsson <kd@flaga.is>, Bastian Blank <waldi@tuxbox.org>, Alexander Wild <wild@te-elektronik.com>"); MODULE_DESCRIPTION("MTD map driver for D-Box 2 board");
gpl-2.0
diaevd/android_kernel_samsung_sm-t325
drivers/scsi/megaraid/megaraid_mm.c
8437
27312
/* * * Linux MegaRAID device driver * * Copyright (c) 2003-2004 LSI Logic Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * FILE : megaraid_mm.c * Version : v2.20.2.7 (Jul 16 2006) * * Common management module */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/mutex.h> #include "megaraid_mm.h" // Entry points for char node driver static DEFINE_MUTEX(mraid_mm_mutex); static int mraid_mm_open(struct inode *, struct file *); static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long); // routines to convert to and from the old the format static int mimd_to_kioc(mimd_t __user *, mraid_mmadp_t *, uioc_t *); static int kioc_to_mimd(uioc_t *, mimd_t __user *); // Helper functions static int handle_drvrcmd(void __user *, uint8_t, int *); static int lld_ioctl(mraid_mmadp_t *, uioc_t *); static void ioctl_done(uioc_t *); static void lld_timedout(unsigned long); static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *); static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *); static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *); static void mraid_mm_dealloc_kioc(mraid_mmadp_t *, uioc_t *); static int mraid_mm_attach_buf(mraid_mmadp_t *, uioc_t *, int); static int mraid_mm_setup_dma_pools(mraid_mmadp_t *); static void mraid_mm_free_adp_resources(mraid_mmadp_t *); static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *); #ifdef CONFIG_COMPAT static long mraid_mm_compat_ioctl(struct file *, unsigned int, unsigned long); #endif MODULE_AUTHOR("LSI Logic Corporation"); MODULE_DESCRIPTION("LSI Logic Management Module"); MODULE_LICENSE("GPL"); MODULE_VERSION(LSI_COMMON_MOD_VERSION); static int dbglevel = CL_ANN; module_param_named(dlevel, dbglevel, int, 0); MODULE_PARM_DESC(dlevel, "Debug level (default=0)"); EXPORT_SYMBOL(mraid_mm_register_adp); EXPORT_SYMBOL(mraid_mm_unregister_adp); EXPORT_SYMBOL(mraid_mm_adapter_app_handle); static uint32_t drvr_ver = 0x02200207; static int adapters_count_g; static struct list_head adapters_list_g; static wait_queue_head_t wait_q; static const struct file_operations lsi_fops = { .open = mraid_mm_open, .unlocked_ioctl = mraid_mm_unlocked_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = mraid_mm_compat_ioctl, #endif .owner = THIS_MODULE, .llseek = noop_llseek, }; static struct miscdevice megaraid_mm_dev = { .minor = MISC_DYNAMIC_MINOR, .name = "megadev0", .fops = &lsi_fops, }; /** * mraid_mm_open - open routine for char node interface * @inode : unused * @filep : unused * * Allow ioctl operations by apps only if they have superuser privilege. */ static int mraid_mm_open(struct inode *inode, struct file *filep) { /* * Only allow superuser to access private ioctl interface */ if (!capable(CAP_SYS_ADMIN)) return (-EACCES); return 0; } /** * mraid_mm_ioctl - module entry-point for ioctls * @inode : inode (ignored) * @filep : file operations pointer (ignored) * @cmd : ioctl command * @arg : user ioctl packet */ static int mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { uioc_t *kioc; char signature[EXT_IOCTL_SIGN_SZ] = {0}; int rval; mraid_mmadp_t *adp; uint8_t old_ioctl; int drvrcmd_rval; void __user *argp = (void __user *)arg; /* * Make sure only USCSICMD are issued through this interface. * MIMD application would still fire different command. */ if ((_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD)) { return (-EINVAL); } /* * Look for signature to see if this is the new or old ioctl format. */ if (copy_from_user(signature, argp, EXT_IOCTL_SIGN_SZ)) { con_log(CL_ANN, (KERN_WARNING "megaraid cmm: copy from usr addr failed\n")); return (-EFAULT); } if (memcmp(signature, EXT_IOCTL_SIGN, EXT_IOCTL_SIGN_SZ) == 0) old_ioctl = 0; else old_ioctl = 1; /* * At present, we don't support the new ioctl packet */ if (!old_ioctl ) return (-EINVAL); /* * If it is a driver ioctl (as opposed to fw ioctls), then we can * handle the command locally. rval > 0 means it is not a drvr cmd */ rval = handle_drvrcmd(argp, old_ioctl, &drvrcmd_rval); if (rval < 0) return rval; else if (rval == 0) return drvrcmd_rval; rval = 0; if ((adp = mraid_mm_get_adapter(argp, &rval)) == NULL) { return rval; } /* * Check if adapter can accept ioctl. We may have marked it offline * if any previous kioc had timedout on this controller. */ if (!adp->quiescent) { con_log(CL_ANN, (KERN_WARNING "megaraid cmm: controller cannot accept cmds due to " "earlier errors\n" )); return -EFAULT; } /* * The following call will block till a kioc is available */ kioc = mraid_mm_alloc_kioc(adp); /* * User sent the old mimd_t ioctl packet. Convert it to uioc_t. */ if ((rval = mimd_to_kioc(argp, adp, kioc))) { mraid_mm_dealloc_kioc(adp, kioc); return rval; } kioc->done = ioctl_done; /* * Issue the IOCTL to the low level driver. After the IOCTL completes * release the kioc if and only if it was _not_ timedout. If it was * timedout, that means that resources are still with low level driver. */ if ((rval = lld_ioctl(adp, kioc))) { if (!kioc->timedout) mraid_mm_dealloc_kioc(adp, kioc); return rval; } /* * Convert the kioc back to user space */ rval = kioc_to_mimd(kioc, argp); /* * Return the kioc to free pool */ mraid_mm_dealloc_kioc(adp, kioc); return rval; } static long mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { int err; /* inconsistent: mraid_mm_compat_ioctl doesn't take the BKL */ mutex_lock(&mraid_mm_mutex); err = mraid_mm_ioctl(filep, cmd, arg); mutex_unlock(&mraid_mm_mutex); return err; } /** * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet * @umimd : User space mimd_t ioctl packet * @rval : returned success/error status * * The function return value is a pointer to the located @adapter. */ static mraid_mmadp_t * mraid_mm_get_adapter(mimd_t __user *umimd, int *rval) { mraid_mmadp_t *adapter; mimd_t mimd; uint32_t adapno; int iterator; if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) { *rval = -EFAULT; return NULL; } adapno = GETADAP(mimd.ui.fcs.adapno); if (adapno >= adapters_count_g) { *rval = -ENODEV; return NULL; } adapter = NULL; iterator = 0; list_for_each_entry(adapter, &adapters_list_g, list) { if (iterator++ == adapno) break; } if (!adapter) { *rval = -ENODEV; return NULL; } return adapter; } /** * handle_drvrcmd - Checks if the opcode is a driver cmd and if it is, handles it. * @arg : packet sent by the user app * @old_ioctl : mimd if 1; uioc otherwise * @rval : pointer for command's returned value (not function status) */ static int handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval) { mimd_t __user *umimd; mimd_t kmimd; uint8_t opcode; uint8_t subopcode; if (old_ioctl) goto old_packet; else goto new_packet; new_packet: return (-ENOTSUPP); old_packet: *rval = 0; umimd = arg; if (copy_from_user(&kmimd, umimd, sizeof(mimd_t))) return (-EFAULT); opcode = kmimd.ui.fcs.opcode; subopcode = kmimd.ui.fcs.subopcode; /* * If the opcode is 0x82 and the subopcode is either GET_DRVRVER or * GET_NUMADP, then we can handle. Otherwise we should return 1 to * indicate that we cannot handle this. */ if (opcode != 0x82) return 1; switch (subopcode) { case MEGAIOC_QDRVRVER: if (copy_to_user(kmimd.data, &drvr_ver, sizeof(uint32_t))) return (-EFAULT); return 0; case MEGAIOC_QNADAP: *rval = adapters_count_g; if (copy_to_user(kmimd.data, &adapters_count_g, sizeof(uint32_t))) return (-EFAULT); return 0; default: /* cannot handle */ return 1; } return 0; } /** * mimd_to_kioc - Converter from old to new ioctl format * @umimd : user space old MIMD IOCTL * @adp : adapter softstate * @kioc : kernel space new format IOCTL * * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The * new packet is in kernel space so that driver can perform operations on it * freely. */ static int mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc) { mbox64_t *mbox64; mbox_t *mbox; mraid_passthru_t *pthru32; uint32_t adapno; uint8_t opcode; uint8_t subopcode; mimd_t mimd; if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) return (-EFAULT); /* * Applications are not allowed to send extd pthru */ if ((mimd.mbox[0] == MBOXCMD_PASSTHRU64) || (mimd.mbox[0] == MBOXCMD_EXTPTHRU)) return (-EINVAL); opcode = mimd.ui.fcs.opcode; subopcode = mimd.ui.fcs.subopcode; adapno = GETADAP(mimd.ui.fcs.adapno); if (adapno >= adapters_count_g) return (-ENODEV); kioc->adapno = adapno; kioc->mb_type = MBOX_LEGACY; kioc->app_type = APPTYPE_MIMD; switch (opcode) { case 0x82: if (subopcode == MEGAIOC_QADAPINFO) { kioc->opcode = GET_ADAP_INFO; kioc->data_dir = UIOC_RD; kioc->xferlen = sizeof(mraid_hba_info_t); if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen)) return (-ENOMEM); } else { con_log(CL_ANN, (KERN_WARNING "megaraid cmm: Invalid subop\n")); return (-EINVAL); } break; case 0x81: kioc->opcode = MBOX_CMD; kioc->xferlen = mimd.ui.fcs.length; kioc->user_data_len = kioc->xferlen; kioc->user_data = mimd.ui.fcs.buffer; if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen)) return (-ENOMEM); if (mimd.outlen) kioc->data_dir = UIOC_RD; if (mimd.inlen) kioc->data_dir |= UIOC_WR; break; case 0x80: kioc->opcode = MBOX_CMD; kioc->xferlen = (mimd.outlen > mimd.inlen) ? mimd.outlen : mimd.inlen; kioc->user_data_len = kioc->xferlen; kioc->user_data = mimd.data; if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen)) return (-ENOMEM); if (mimd.outlen) kioc->data_dir = UIOC_RD; if (mimd.inlen) kioc->data_dir |= UIOC_WR; break; default: return (-EINVAL); } /* * If driver command, nothing else to do */ if (opcode == 0x82) return 0; /* * This is a mailbox cmd; copy the mailbox from mimd */ mbox64 = (mbox64_t *)((unsigned long)kioc->cmdbuf); mbox = &mbox64->mbox32; memcpy(mbox, mimd.mbox, 14); if (mbox->cmd != MBOXCMD_PASSTHRU) { // regular DCMD mbox->xferaddr = (uint32_t)kioc->buf_paddr; if (kioc->data_dir & UIOC_WR) { if (copy_from_user(kioc->buf_vaddr, kioc->user_data, kioc->xferlen)) { return (-EFAULT); } } return 0; } /* * This is a regular 32-bit pthru cmd; mbox points to pthru struct. * Just like in above case, the beginning for memblk is treated as * a mailbox. The passthru will begin at next 1K boundary. And the * data will start 1K after that. */ pthru32 = kioc->pthru32; kioc->user_pthru = &umimd->pthru; mbox->xferaddr = (uint32_t)kioc->pthru32_h; if (copy_from_user(pthru32, kioc->user_pthru, sizeof(mraid_passthru_t))) { return (-EFAULT); } pthru32->dataxferaddr = kioc->buf_paddr; if (kioc->data_dir & UIOC_WR) { if (copy_from_user(kioc->buf_vaddr, kioc->user_data, pthru32->dataxferlen)) { return (-EFAULT); } } return 0; } /** * mraid_mm_attch_buf - Attach a free dma buffer for required size * @adp : Adapter softstate * @kioc : kioc that the buffer needs to be attached to * @xferlen : required length for buffer * * First we search for a pool with smallest buffer that is >= @xferlen. If * that pool has no free buffer, we will try for the next bigger size. If none * is available, we will try to allocate the smallest buffer that is >= * @xferlen and attach it the pool. */ static int mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen) { mm_dmapool_t *pool; int right_pool = -1; unsigned long flags; int i; kioc->pool_index = -1; kioc->buf_vaddr = NULL; kioc->buf_paddr = 0; kioc->free_buf = 0; /* * We need xferlen amount of memory. See if we can get it from our * dma pools. If we don't get exact size, we will try bigger buffer */ for (i = 0; i < MAX_DMA_POOLS; i++) { pool = &adp->dma_pool_list[i]; if (xferlen > pool->buf_size) continue; if (right_pool == -1) right_pool = i; spin_lock_irqsave(&pool->lock, flags); if (!pool->in_use) { pool->in_use = 1; kioc->pool_index = i; kioc->buf_vaddr = pool->vaddr; kioc->buf_paddr = pool->paddr; spin_unlock_irqrestore(&pool->lock, flags); return 0; } else { spin_unlock_irqrestore(&pool->lock, flags); continue; } } /* * If xferlen doesn't match any of our pools, return error */ if (right_pool == -1) return -EINVAL; /* * We did not get any buffer from the preallocated pool. Let us try * to allocate one new buffer. NOTE: This is a blocking call. */ pool = &adp->dma_pool_list[right_pool]; spin_lock_irqsave(&pool->lock, flags); kioc->pool_index = right_pool; kioc->free_buf = 1; kioc->buf_vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL, &kioc->buf_paddr); spin_unlock_irqrestore(&pool->lock, flags); if (!kioc->buf_vaddr) return -ENOMEM; return 0; } /** * mraid_mm_alloc_kioc - Returns a uioc_t from free list * @adp : Adapter softstate for this module * * The kioc_semaphore is initialized with number of kioc nodes in the * free kioc pool. If the kioc pool is empty, this function blocks till * a kioc becomes free. */ static uioc_t * mraid_mm_alloc_kioc(mraid_mmadp_t *adp) { uioc_t *kioc; struct list_head* head; unsigned long flags; down(&adp->kioc_semaphore); spin_lock_irqsave(&adp->kioc_pool_lock, flags); head = &adp->kioc_pool; if (list_empty(head)) { up(&adp->kioc_semaphore); spin_unlock_irqrestore(&adp->kioc_pool_lock, flags); con_log(CL_ANN, ("megaraid cmm: kioc list empty!\n")); return NULL; } kioc = list_entry(head->next, uioc_t, list); list_del_init(&kioc->list); spin_unlock_irqrestore(&adp->kioc_pool_lock, flags); memset((caddr_t)(unsigned long)kioc->cmdbuf, 0, sizeof(mbox64_t)); memset((caddr_t) kioc->pthru32, 0, sizeof(mraid_passthru_t)); kioc->buf_vaddr = NULL; kioc->buf_paddr = 0; kioc->pool_index =-1; kioc->free_buf = 0; kioc->user_data = NULL; kioc->user_data_len = 0; kioc->user_pthru = NULL; kioc->timedout = 0; return kioc; } /** * mraid_mm_dealloc_kioc - Return kioc to free pool * @adp : Adapter softstate * @kioc : uioc_t node to be returned to free pool */ static void mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc) { mm_dmapool_t *pool; unsigned long flags; if (kioc->pool_index != -1) { pool = &adp->dma_pool_list[kioc->pool_index]; /* This routine may be called in non-isr context also */ spin_lock_irqsave(&pool->lock, flags); /* * While attaching the dma buffer, if we didn't get the * required buffer from the pool, we would have allocated * it at the run time and set the free_buf flag. We must * free that buffer. Otherwise, just mark that the buffer is * not in use */ if (kioc->free_buf == 1) pci_pool_free(pool->handle, kioc->buf_vaddr, kioc->buf_paddr); else pool->in_use = 0; spin_unlock_irqrestore(&pool->lock, flags); } /* Return the kioc to the free pool */ spin_lock_irqsave(&adp->kioc_pool_lock, flags); list_add(&kioc->list, &adp->kioc_pool); spin_unlock_irqrestore(&adp->kioc_pool_lock, flags); /* increment the free kioc count */ up(&adp->kioc_semaphore); return; } /** * lld_ioctl - Routine to issue ioctl to low level drvr * @adp : The adapter handle * @kioc : The ioctl packet with kernel addresses */ static int lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc) { int rval; struct timer_list timer; struct timer_list *tp = NULL; kioc->status = -ENODATA; rval = adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE); if (rval) return rval; /* * Start the timer */ if (adp->timeout > 0) { tp = &timer; init_timer(tp); tp->function = lld_timedout; tp->data = (unsigned long)kioc; tp->expires = jiffies + adp->timeout * HZ; add_timer(tp); } /* * Wait till the low level driver completes the ioctl. After this * call, the ioctl either completed successfully or timedout. */ wait_event(wait_q, (kioc->status != -ENODATA)); if (tp) { del_timer_sync(tp); } /* * If the command had timedout, we mark the controller offline * before returning */ if (kioc->timedout) { adp->quiescent = 0; } return kioc->status; } /** * ioctl_done - callback from the low level driver * @kioc : completed ioctl packet */ static void ioctl_done(uioc_t *kioc) { uint32_t adapno; int iterator; mraid_mmadp_t* adapter; /* * When the kioc returns from driver, make sure it still doesn't * have ENODATA in status. Otherwise, driver will hang on wait_event * forever */ if (kioc->status == -ENODATA) { con_log(CL_ANN, (KERN_WARNING "megaraid cmm: lld didn't change status!\n")); kioc->status = -EINVAL; } /* * Check if this kioc was timedout before. If so, nobody is waiting * on this kioc. We don't have to wake up anybody. Instead, we just * have to free the kioc */ if (kioc->timedout) { iterator = 0; adapter = NULL; adapno = kioc->adapno; con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed " "ioctl that was timedout before\n")); list_for_each_entry(adapter, &adapters_list_g, list) { if (iterator++ == adapno) break; } kioc->timedout = 0; if (adapter) { mraid_mm_dealloc_kioc( adapter, kioc ); } } else { wake_up(&wait_q); } } /** * lld_timedout - callback from the expired timer * @ptr : ioctl packet that timed out */ static void lld_timedout(unsigned long ptr) { uioc_t *kioc = (uioc_t *)ptr; kioc->status = -ETIME; kioc->timedout = 1; con_log(CL_ANN, (KERN_WARNING "megaraid cmm: ioctl timed out\n")); wake_up(&wait_q); } /** * kioc_to_mimd - Converter from new back to old format * @kioc : Kernel space IOCTL packet (successfully issued) * @mimd : User space MIMD packet */ static int kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd) { mimd_t kmimd; uint8_t opcode; uint8_t subopcode; mbox64_t *mbox64; mraid_passthru_t __user *upthru32; mraid_passthru_t *kpthru32; mcontroller_t cinfo; mraid_hba_info_t *hinfo; if (copy_from_user(&kmimd, mimd, sizeof(mimd_t))) return (-EFAULT); opcode = kmimd.ui.fcs.opcode; subopcode = kmimd.ui.fcs.subopcode; if (opcode == 0x82) { switch (subopcode) { case MEGAIOC_QADAPINFO: hinfo = (mraid_hba_info_t *)(unsigned long) kioc->buf_vaddr; hinfo_to_cinfo(hinfo, &cinfo); if (copy_to_user(kmimd.data, &cinfo, sizeof(cinfo))) return (-EFAULT); return 0; default: return (-EINVAL); } return 0; } mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf; if (kioc->user_pthru) { upthru32 = kioc->user_pthru; kpthru32 = kioc->pthru32; if (copy_to_user(&upthru32->scsistatus, &kpthru32->scsistatus, sizeof(uint8_t))) { return (-EFAULT); } } if (kioc->user_data) { if (copy_to_user(kioc->user_data, kioc->buf_vaddr, kioc->user_data_len)) { return (-EFAULT); } } if (copy_to_user(&mimd->mbox[17], &mbox64->mbox32.status, sizeof(uint8_t))) { return (-EFAULT); } return 0; } /** * hinfo_to_cinfo - Convert new format hba info into old format * @hinfo : New format, more comprehensive adapter info * @cinfo : Old format adapter info to support mimd_t apps */ static void hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo) { if (!hinfo || !cinfo) return; cinfo->base = hinfo->baseport; cinfo->irq = hinfo->irq; cinfo->numldrv = hinfo->num_ldrv; cinfo->pcibus = hinfo->pci_bus; cinfo->pcidev = hinfo->pci_slot; cinfo->pcifun = PCI_FUNC(hinfo->pci_dev_fn); cinfo->pciid = hinfo->pci_device_id; cinfo->pcivendor = hinfo->pci_vendor_id; cinfo->pcislot = hinfo->pci_slot; cinfo->uid = hinfo->unique_id; } /** * mraid_mm_register_adp - Registration routine for low level drivers * @lld_adp : Adapter objejct */ int mraid_mm_register_adp(mraid_mmadp_t *lld_adp) { mraid_mmadp_t *adapter; mbox64_t *mbox_list; uioc_t *kioc; uint32_t rval; int i; if (lld_adp->drvr_type != DRVRTYPE_MBOX) return (-EINVAL); adapter = kzalloc(sizeof(mraid_mmadp_t), GFP_KERNEL); if (!adapter) return -ENOMEM; adapter->unique_id = lld_adp->unique_id; adapter->drvr_type = lld_adp->drvr_type; adapter->drvr_data = lld_adp->drvr_data; adapter->pdev = lld_adp->pdev; adapter->issue_uioc = lld_adp->issue_uioc; adapter->timeout = lld_adp->timeout; adapter->max_kioc = lld_adp->max_kioc; adapter->quiescent = 1; /* * Allocate single blocks of memory for all required kiocs, * mailboxes and passthru structures. */ adapter->kioc_list = kmalloc(sizeof(uioc_t) * lld_adp->max_kioc, GFP_KERNEL); adapter->mbox_list = kmalloc(sizeof(mbox64_t) * lld_adp->max_kioc, GFP_KERNEL); adapter->pthru_dma_pool = pci_pool_create("megaraid mm pthru pool", adapter->pdev, sizeof(mraid_passthru_t), 16, 0); if (!adapter->kioc_list || !adapter->mbox_list || !adapter->pthru_dma_pool) { con_log(CL_ANN, (KERN_WARNING "megaraid cmm: out of memory, %s %d\n", __func__, __LINE__)); rval = (-ENOMEM); goto memalloc_error; } /* * Slice kioc_list and make a kioc_pool with the individiual kiocs */ INIT_LIST_HEAD(&adapter->kioc_pool); spin_lock_init(&adapter->kioc_pool_lock); sema_init(&adapter->kioc_semaphore, lld_adp->max_kioc); mbox_list = (mbox64_t *)adapter->mbox_list; for (i = 0; i < lld_adp->max_kioc; i++) { kioc = adapter->kioc_list + i; kioc->cmdbuf = (uint64_t)(unsigned long)(mbox_list + i); kioc->pthru32 = pci_pool_alloc(adapter->pthru_dma_pool, GFP_KERNEL, &kioc->pthru32_h); if (!kioc->pthru32) { con_log(CL_ANN, (KERN_WARNING "megaraid cmm: out of memory, %s %d\n", __func__, __LINE__)); rval = (-ENOMEM); goto pthru_dma_pool_error; } list_add_tail(&kioc->list, &adapter->kioc_pool); } // Setup the dma pools for data buffers if ((rval = mraid_mm_setup_dma_pools(adapter)) != 0) { goto dma_pool_error; } list_add_tail(&adapter->list, &adapters_list_g); adapters_count_g++; return 0; dma_pool_error: /* Do nothing */ pthru_dma_pool_error: for (i = 0; i < lld_adp->max_kioc; i++) { kioc = adapter->kioc_list + i; if (kioc->pthru32) { pci_pool_free(adapter->pthru_dma_pool, kioc->pthru32, kioc->pthru32_h); } } memalloc_error: kfree(adapter->kioc_list); kfree(adapter->mbox_list); if (adapter->pthru_dma_pool) pci_pool_destroy(adapter->pthru_dma_pool); kfree(adapter); return rval; } /** * mraid_mm_adapter_app_handle - return the application handle for this adapter * @unique_id : adapter unique identifier * * For the given driver data, locate the adapter in our global list and * return the corresponding handle, which is also used by applications to * uniquely identify an adapter. * * Return adapter handle if found in the list. * Return 0 if adapter could not be located, should never happen though. */ uint32_t mraid_mm_adapter_app_handle(uint32_t unique_id) { mraid_mmadp_t *adapter; mraid_mmadp_t *tmp; int index = 0; list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) { if (adapter->unique_id == unique_id) { return MKADAP(index); } index++; } return 0; } /** * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter * @adp : Adapter softstate * * We maintain a pool of dma buffers per each adapter. Each pool has one * buffer. E.g, we may have 5 dma pools - one each for 4k, 8k ... 64k buffers. * We have just one 4k buffer in 4k pool, one 8k buffer in 8k pool etc. We * dont' want to waste too much memory by allocating more buffers per each * pool. */ static int mraid_mm_setup_dma_pools(mraid_mmadp_t *adp) { mm_dmapool_t *pool; int bufsize; int i; /* * Create MAX_DMA_POOLS number of pools */ bufsize = MRAID_MM_INIT_BUFF_SIZE; for (i = 0; i < MAX_DMA_POOLS; i++){ pool = &adp->dma_pool_list[i]; pool->buf_size = bufsize; spin_lock_init(&pool->lock); pool->handle = pci_pool_create("megaraid mm data buffer", adp->pdev, bufsize, 16, 0); if (!pool->handle) { goto dma_pool_setup_error; } pool->vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL, &pool->paddr); if (!pool->vaddr) goto dma_pool_setup_error; bufsize = bufsize * 2; } return 0; dma_pool_setup_error: mraid_mm_teardown_dma_pools(adp); return (-ENOMEM); } /** * mraid_mm_unregister_adp - Unregister routine for low level drivers * @unique_id : UID of the adpater * * Assumes no outstanding ioctls to llds. */ int mraid_mm_unregister_adp(uint32_t unique_id) { mraid_mmadp_t *adapter; mraid_mmadp_t *tmp; list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) { if (adapter->unique_id == unique_id) { adapters_count_g--; list_del_init(&adapter->list); mraid_mm_free_adp_resources(adapter); kfree(adapter); con_log(CL_ANN, ( "megaraid cmm: Unregistered one adapter:%#x\n", unique_id)); return 0; } } return (-ENODEV); } /** * mraid_mm_free_adp_resources - Free adapter softstate * @adp : Adapter softstate */ static void mraid_mm_free_adp_resources(mraid_mmadp_t *adp) { uioc_t *kioc; int i; mraid_mm_teardown_dma_pools(adp); for (i = 0; i < adp->max_kioc; i++) { kioc = adp->kioc_list + i; pci_pool_free(adp->pthru_dma_pool, kioc->pthru32, kioc->pthru32_h); } kfree(adp->kioc_list); kfree(adp->mbox_list); pci_pool_destroy(adp->pthru_dma_pool); return; } /** * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers * @adp : Adapter softstate */ static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp) { int i; mm_dmapool_t *pool; for (i = 0; i < MAX_DMA_POOLS; i++) { pool = &adp->dma_pool_list[i]; if (pool->handle) { if (pool->vaddr) pci_pool_free(pool->handle, pool->vaddr, pool->paddr); pci_pool_destroy(pool->handle); pool->handle = NULL; } } return; } /** * mraid_mm_init - Module entry point */ static int __init mraid_mm_init(void) { int err; // Announce the driver version con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n", LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION)); err = misc_register(&megaraid_mm_dev); if (err < 0) { con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n")); return err; } init_waitqueue_head(&wait_q); INIT_LIST_HEAD(&adapters_list_g); return 0; } #ifdef CONFIG_COMPAT /** * mraid_mm_compat_ioctl - 32bit to 64bit ioctl conversion routine * @filep : file operations pointer (ignored) * @cmd : ioctl command * @arg : user ioctl packet */ static long mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { int err; err = mraid_mm_ioctl(filep, cmd, arg); return err; } #endif /** * mraid_mm_exit - Module exit point */ static void __exit mraid_mm_exit(void) { con_log(CL_DLEVEL1 , ("exiting common mod\n")); misc_deregister(&megaraid_mm_dev); } module_init(mraid_mm_init); module_exit(mraid_mm_exit); /* vi: set ts=8 sw=8 tw=78: */
gpl-2.0
seadog76/asus-kernel-tf700t
drivers/net/irda/old_belkin-sir.c
12533
4838
/********************************************************************* * * Filename: old_belkin.c * Version: 1.1 * Description: Driver for the Belkin (old) SmartBeam dongle * Status: Experimental... * Author: Jean Tourrilhes <jt@hpl.hp.com> * Created at: 22/11/99 * Modified at: Fri Dec 17 09:13:32 1999 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1999 Jean Tourrilhes, All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA * ********************************************************************/ #include <linux/module.h> #include <linux/delay.h> #include <linux/init.h> #include <net/irda/irda.h> // #include <net/irda/irda_device.h> #include "sir-dev.h" /* * Belkin is selling a dongle called the SmartBeam. * In fact, there is two hardware version of this dongle, of course with * the same name and looking the exactly same (grrr...). * I guess that I've got the old one, because inside I don't have * a jumper for IrDA/ASK... * * As far as I can make it from info on their web site, the old dongle * support only 9600 b/s, which make our life much simpler as far as * the driver is concerned, but you might not like it very much ;-) * The new SmartBeam does 115 kb/s, and I've not tested it... * * Belkin claim that the correct driver for the old dongle (in Windows) * is the generic Parallax 9500a driver, but the Linux LiteLink driver * fails for me (probably because Linux-IrDA doesn't rate fallback), * so I created this really dumb driver... * * In fact, this driver doesn't do much. The only thing it does is to * prevent Linux-IrDA to use any other speed than 9600 b/s ;-) This * driver is called "old_belkin" so that when the new SmartBeam is supported * its driver can be called "belkin" instead of "new_belkin". * * Note : this driver was written without any info/help from Belkin, * so a lot of info here might be totally wrong. Blame me ;-) */ static int old_belkin_open(struct sir_dev *dev); static int old_belkin_close(struct sir_dev *dev); static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed); static int old_belkin_reset(struct sir_dev *dev); static struct dongle_driver old_belkin = { .owner = THIS_MODULE, .driver_name = "Old Belkin SmartBeam", .type = IRDA_OLD_BELKIN_DONGLE, .open = old_belkin_open, .close = old_belkin_close, .reset = old_belkin_reset, .set_speed = old_belkin_change_speed, }; static int __init old_belkin_sir_init(void) { return irda_register_dongle(&old_belkin); } static void __exit old_belkin_sir_cleanup(void) { irda_unregister_dongle(&old_belkin); } static int old_belkin_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; IRDA_DEBUG(2, "%s()\n", __func__); /* Power on dongle */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Not too fast, please... */ qos->baud_rate.bits &= IR_9600; /* Needs at least 10 ms (totally wild guess, can do probably better) */ qos->min_turn_time.bits = 0x01; irda_qos_bits_to_value(qos); /* irda thread waits 50 msec for power settling */ return 0; } static int old_belkin_close(struct sir_dev *dev) { IRDA_DEBUG(2, "%s()\n", __func__); /* Power off dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); return 0; } /* * Function old_belkin_change_speed (task) * * With only one speed available, not much to do... */ static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed) { IRDA_DEBUG(2, "%s()\n", __func__); dev->speed = 9600; return (speed==dev->speed) ? 0 : -EINVAL; } /* * Function old_belkin_reset (task) * * Reset the Old-Belkin type dongle. * */ static int old_belkin_reset(struct sir_dev *dev) { IRDA_DEBUG(2, "%s()\n", __func__); /* This dongles speed "defaults" to 9600 bps ;-) */ dev->speed = 9600; return 0; } MODULE_AUTHOR("Jean Tourrilhes <jt@hpl.hp.com>"); MODULE_DESCRIPTION("Belkin (old) SmartBeam dongle driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("irda-dongle-7"); /* IRDA_OLD_BELKIN_DONGLE */ module_init(old_belkin_sir_init); module_exit(old_belkin_sir_cleanup);
gpl-2.0
TeamButter/android_kernel_samsung_mint2g
net/wireless/wext-priv.c
14837
7027
/* * This file implement the Wireless Extensions priv API. * * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com> * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> * * (As all part of the Linux kernel, this file is GPL) */ #include <linux/slab.h> #include <linux/wireless.h> #include <linux/netdevice.h> #include <net/iw_handler.h> #include <net/wext.h> int iw_handler_get_private(struct net_device * dev, struct iw_request_info * info, union iwreq_data * wrqu, char * extra) { /* Check if the driver has something to export */ if ((dev->wireless_handlers->num_private_args == 0) || (dev->wireless_handlers->private_args == NULL)) return -EOPNOTSUPP; /* Check if there is enough buffer up there */ if (wrqu->data.length < dev->wireless_handlers->num_private_args) { /* User space can't know in advance how large the buffer * needs to be. Give it a hint, so that we can support * any size buffer we want somewhat efficiently... */ wrqu->data.length = dev->wireless_handlers->num_private_args; return -E2BIG; } /* Set the number of available ioctls. */ wrqu->data.length = dev->wireless_handlers->num_private_args; /* Copy structure to the user buffer. */ memcpy(extra, dev->wireless_handlers->private_args, sizeof(struct iw_priv_args) * wrqu->data.length); return 0; } /* Size (in bytes) of the various private data types */ static const char iw_priv_type_size[] = { 0, /* IW_PRIV_TYPE_NONE */ 1, /* IW_PRIV_TYPE_BYTE */ 1, /* IW_PRIV_TYPE_CHAR */ 0, /* Not defined */ sizeof(__u32), /* IW_PRIV_TYPE_INT */ sizeof(struct iw_freq), /* IW_PRIV_TYPE_FLOAT */ sizeof(struct sockaddr), /* IW_PRIV_TYPE_ADDR */ 0, /* Not defined */ }; static int get_priv_size(__u16 args) { int num = args & IW_PRIV_SIZE_MASK; int type = (args & IW_PRIV_TYPE_MASK) >> 12; return num * iw_priv_type_size[type]; } static int adjust_priv_size(__u16 args, struct iw_point *iwp) { int num = iwp->length; int max = args & IW_PRIV_SIZE_MASK; int type = (args & IW_PRIV_TYPE_MASK) >> 12; /* Make sure the driver doesn't goof up */ if (max < num) num = max; return num * iw_priv_type_size[type]; } /* * Wrapper to call a private Wireless Extension handler. * We do various checks and also take care of moving data between * user space and kernel space. * It's not as nice and slimline as the standard wrapper. The cause * is struct iw_priv_args, which was not really designed for the * job we are going here. * * IMPORTANT : This function prevent to set and get data on the same * IOCTL and enforce the SET/GET convention. Not doing it would be * far too hairy... * If you need to set and get data at the same time, please don't use * a iw_handler but process it in your ioctl handler (i.e. use the * old driver API). */ static int get_priv_descr_and_size(struct net_device *dev, unsigned int cmd, const struct iw_priv_args **descrp) { const struct iw_priv_args *descr; int i, extra_size; descr = NULL; for (i = 0; i < dev->wireless_handlers->num_private_args; i++) { if (cmd == dev->wireless_handlers->private_args[i].cmd) { descr = &dev->wireless_handlers->private_args[i]; break; } } extra_size = 0; if (descr) { if (IW_IS_SET(cmd)) { int offset = 0; /* For sub-ioctls */ /* Check for sub-ioctl handler */ if (descr->name[0] == '\0') /* Reserve one int for sub-ioctl index */ offset = sizeof(__u32); /* Size of set arguments */ extra_size = get_priv_size(descr->set_args); /* Does it fits in iwr ? */ if ((descr->set_args & IW_PRIV_SIZE_FIXED) && ((extra_size + offset) <= IFNAMSIZ)) extra_size = 0; } else { /* Size of get arguments */ extra_size = get_priv_size(descr->get_args); /* Does it fits in iwr ? */ if ((descr->get_args & IW_PRIV_SIZE_FIXED) && (extra_size <= IFNAMSIZ)) extra_size = 0; } } *descrp = descr; return extra_size; } static int ioctl_private_iw_point(struct iw_point *iwp, unsigned int cmd, const struct iw_priv_args *descr, iw_handler handler, struct net_device *dev, struct iw_request_info *info, int extra_size) { char *extra; int err; /* Check what user space is giving us */ if (IW_IS_SET(cmd)) { if (!iwp->pointer && iwp->length != 0) return -EFAULT; if (iwp->length > (descr->set_args & IW_PRIV_SIZE_MASK)) return -E2BIG; } else if (!iwp->pointer) return -EFAULT; extra = kzalloc(extra_size, GFP_KERNEL); if (!extra) return -ENOMEM; /* If it is a SET, get all the extra data in here */ if (IW_IS_SET(cmd) && (iwp->length != 0)) { if (copy_from_user(extra, iwp->pointer, extra_size)) { err = -EFAULT; goto out; } } /* Call the handler */ err = handler(dev, info, (union iwreq_data *) iwp, extra); /* If we have something to return to the user */ if (!err && IW_IS_GET(cmd)) { /* Adjust for the actual length if it's variable, * avoid leaking kernel bits outside. */ if (!(descr->get_args & IW_PRIV_SIZE_FIXED)) extra_size = adjust_priv_size(descr->get_args, iwp); if (copy_to_user(iwp->pointer, extra, extra_size)) err = -EFAULT; } out: kfree(extra); return err; } int ioctl_private_call(struct net_device *dev, struct iwreq *iwr, unsigned int cmd, struct iw_request_info *info, iw_handler handler) { int extra_size = 0, ret = -EINVAL; const struct iw_priv_args *descr; extra_size = get_priv_descr_and_size(dev, cmd, &descr); /* Check if we have a pointer to user space data or not. */ if (extra_size == 0) { /* No extra arguments. Trivial to handle */ ret = handler(dev, info, &(iwr->u), (char *) &(iwr->u)); } else { ret = ioctl_private_iw_point(&iwr->u.data, cmd, descr, handler, dev, info, extra_size); } /* Call commit handler if needed and defined */ if (ret == -EIWCOMMIT) ret = call_commit_handler(dev); return ret; } #ifdef CONFIG_COMPAT int compat_private_call(struct net_device *dev, struct iwreq *iwr, unsigned int cmd, struct iw_request_info *info, iw_handler handler) { const struct iw_priv_args *descr; int ret, extra_size; extra_size = get_priv_descr_and_size(dev, cmd, &descr); /* Check if we have a pointer to user space data or not. */ if (extra_size == 0) { /* No extra arguments. Trivial to handle */ ret = handler(dev, info, &(iwr->u), (char *) &(iwr->u)); } else { struct compat_iw_point *iwp_compat; struct iw_point iwp; iwp_compat = (struct compat_iw_point *) &iwr->u.data; iwp.pointer = compat_ptr(iwp_compat->pointer); iwp.length = iwp_compat->length; iwp.flags = iwp_compat->flags; ret = ioctl_private_iw_point(&iwp, cmd, descr, handler, dev, info, extra_size); iwp_compat->pointer = ptr_to_compat(iwp.pointer); iwp_compat->length = iwp.length; iwp_compat->flags = iwp.flags; } /* Call commit handler if needed and defined */ if (ret == -EIWCOMMIT) ret = call_commit_handler(dev); return ret; } #endif
gpl-2.0
TeamPrimo/android_kernel_htc_primo
drivers/media/video/msm/gemini_8x60/msm_gemini_core.c
246
10093
/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Code Aurora Forum, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, and instead of the terms immediately above, this * software may be relicensed by the recipient at their option under the * terms of the GNU General Public License version 2 ("GPL") and only * version 2. If the recipient chooses to relicense the software under * the GPL, then the recipient shall replace all of the text immediately * above and including this paragraph with the text immediately below * and between the words START OF ALTERNATE GPL TERMS and END OF * ALTERNATE GPL TERMS and such notices and license terms shall apply * INSTEAD OF the notices and licensing terms given above. * * START OF ALTERNATE GPL TERMS * * Copyright (c) 2010, Code Aurora Forum. All rights reserved. * * This software was originally licensed under the Code Aurora Forum * Inc. Dual BSD/GPL License version 1.1 and relicensed as permitted * under the terms thereof by a recipient under the General Public * License Version 2. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * END OF ALTERNATE GPL TERMS */ #include <linux/module.h> #include "msm_gemini_hw.h" #include "msm_gemini_core.h" #include "msm_gemini_platform.h" #include "msm_gemini_common.h" static struct msm_gemini_hw_pingpong fe_pingpong_buf; static struct msm_gemini_hw_pingpong we_pingpong_buf; static int we_pingpong_index; static int reset_done_ack; static spinlock_t reset_lock; static wait_queue_head_t reset_wait; int msm_gemini_core_reset(uint8_t op_mode, void *base, int size) { unsigned long flags; int rc = 0; int tm = 500; /*500ms*/ memset(&fe_pingpong_buf, 0, sizeof(fe_pingpong_buf)); fe_pingpong_buf.is_fe = 1; we_pingpong_index = 0; memset(&we_pingpong_buf, 0, sizeof(we_pingpong_buf)); spin_lock_irqsave(&reset_lock, flags); reset_done_ack = 0; msm_gemini_hw_reset(base, size); spin_unlock_irqrestore(&reset_lock, flags); rc = wait_event_interruptible_timeout( reset_wait, reset_done_ack, msecs_to_jiffies(tm)); if (!reset_done_ack) { GMN_PR_ERR("%s: reset ACK failed %d", __func__, rc); return -EBUSY; } GMN_DBG("%s: reset_done_ack rc %d", __func__, rc); spin_lock_irqsave(&reset_lock, flags); reset_done_ack = 0; spin_unlock_irqrestore(&reset_lock, flags); if (op_mode == MSM_GEMINI_MODE_REALTIME_ENCODE) { /* Nothing needed for fe buffer cfg, config we only */ msm_gemini_hw_we_buffer_cfg(1); } else { /* Nothing needed for fe buffer cfg, config we only */ msm_gemini_hw_we_buffer_cfg(0); } /* @todo wait for reset done irq */ return 0; } void msm_gemini_core_release(int release_buf) { int i = 0; for (i = 0; i < 2; i++) { if (we_pingpong_buf.buf_status[i] && release_buf) msm_gemini_platform_p2v(we_pingpong_buf.buf[i].file); we_pingpong_buf.buf_status[i] = 0; } } void msm_gemini_core_init(void) { init_waitqueue_head(&reset_wait); spin_lock_init(&reset_lock); } int msm_gemini_core_fe_start(void) { msm_gemini_hw_fe_start(); return 0; } /* fetch engine */ int msm_gemini_core_fe_buf_update(struct msm_gemini_core_buf *buf) { GMN_DBG("%s:%d] 0x%08x %d 0x%08x %d\n", __func__, __LINE__, (int) buf->y_buffer_addr, buf->y_len, (int) buf->cbcr_buffer_addr, buf->cbcr_len); return msm_gemini_hw_pingpong_update(&fe_pingpong_buf, buf); } void *msm_gemini_core_fe_pingpong_irq(int gemini_irq_status, void *context) { pr_info("[CAM] %s:%d]\n", __func__, __LINE__); return msm_gemini_hw_pingpong_irq(&fe_pingpong_buf); } /* write engine */ int msm_gemini_core_we_buf_update(struct msm_gemini_core_buf *buf) { int rc; GMN_DBG("%s:%d] 0x%08x 0x%08x %d\n", __func__, __LINE__, (int) buf->y_buffer_addr, (int) buf->cbcr_buffer_addr, buf->y_len); we_pingpong_buf.buf_status[we_pingpong_index] = 0; we_pingpong_index = (we_pingpong_index + 1)%2; rc = msm_gemini_hw_pingpong_update(&we_pingpong_buf, buf); return 0; } int msm_gemini_core_we_buf_reset(struct msm_gemini_hw_buf *buf) { int i = 0; for (i = 0; i < 2; i++) { if (we_pingpong_buf.buf[i].y_buffer_addr == buf->y_buffer_addr) we_pingpong_buf.buf_status[i] = 0; } return 0; } void *msm_gemini_core_we_pingpong_irq(int gemini_irq_status, void *context) { pr_info("[CAM] %s:%d]\n", __func__, __LINE__); return msm_gemini_hw_pingpong_irq(&we_pingpong_buf); } void *msm_gemini_core_framedone_irq(int gemini_irq_status, void *context) { struct msm_gemini_hw_buf *buf_p; pr_info("[CAM] %s:%d]\n", __func__, __LINE__); buf_p = msm_gemini_hw_pingpong_active_buffer(&we_pingpong_buf); if (buf_p) { buf_p->framedone_len = msm_gemini_hw_encode_output_size(); GMN_DBG("%s:%d] framedone_len %d\n", __func__, __LINE__, buf_p->framedone_len); } return buf_p; } void *msm_gemini_core_reset_ack_irq(int gemini_irq_status, void *context) { /* @todo return the status back to msm_gemini_core_reset */ pr_info("[CAM] %s:%d]\n", __func__, __LINE__); return NULL; } void *msm_gemini_core_err_irq(int gemini_irq_status, void *context) { GMN_PR_ERR("%s:%d]\n", __func__, gemini_irq_status); return NULL; } static int (*msm_gemini_irq_handler) (int, void *, void *); irqreturn_t msm_gemini_core_irq(int irq_num, void *context) { void *data = NULL; unsigned long flags; int gemini_irq_status; GMN_DBG("%s:%d] irq_num = %d\n", __func__, __LINE__, irq_num); gemini_irq_status = msm_gemini_hw_irq_get_status(); GMN_DBG("%s:%d] gemini_irq_status = %0x\n", __func__, __LINE__, gemini_irq_status); /*For reset and framedone IRQs, clear all bits*/ if (gemini_irq_status & 0x400) { msm_gemini_hw_irq_clear(HWIO_JPEG_IRQ_CLEAR_RMSK, JPEG_IRQ_CLEAR_ALL); spin_lock_irqsave(&reset_lock, flags); reset_done_ack = 1; spin_unlock_irqrestore(&reset_lock, flags); wake_up(&reset_wait); } else if (gemini_irq_status & 0x1) { msm_gemini_hw_irq_clear(HWIO_JPEG_IRQ_CLEAR_RMSK, JPEG_IRQ_CLEAR_ALL); } else { msm_gemini_hw_irq_clear(HWIO_JPEG_IRQ_CLEAR_RMSK, gemini_irq_status); } if (msm_gemini_hw_irq_is_frame_done(gemini_irq_status)) { data = msm_gemini_core_framedone_irq(gemini_irq_status, context); if (msm_gemini_irq_handler) msm_gemini_irq_handler( MSM_GEMINI_HW_MASK_COMP_FRAMEDONE, context, data); } if (msm_gemini_hw_irq_is_fe_pingpong(gemini_irq_status)) { data = msm_gemini_core_fe_pingpong_irq(gemini_irq_status, context); if (msm_gemini_irq_handler) msm_gemini_irq_handler(MSM_GEMINI_HW_MASK_COMP_FE, context, data); } if (msm_gemini_hw_irq_is_we_pingpong(gemini_irq_status) && !msm_gemini_hw_irq_is_frame_done(gemini_irq_status)) { data = msm_gemini_core_we_pingpong_irq(gemini_irq_status, context); if (msm_gemini_irq_handler) msm_gemini_irq_handler(MSM_GEMINI_HW_MASK_COMP_WE, context, data); } if (msm_gemini_hw_irq_is_reset_ack(gemini_irq_status)) { data = msm_gemini_core_reset_ack_irq(gemini_irq_status, context); if (msm_gemini_irq_handler) msm_gemini_irq_handler( MSM_GEMINI_HW_MASK_COMP_RESET_ACK, context, data); } /* Unexpected/unintended HW interrupt */ if (msm_gemini_hw_irq_is_err(gemini_irq_status)) { data = msm_gemini_core_err_irq(gemini_irq_status, context); if (msm_gemini_irq_handler) msm_gemini_irq_handler(MSM_GEMINI_HW_MASK_COMP_ERR, context, data); } return IRQ_HANDLED; } void msm_gemini_core_irq_install(int (*irq_handler) (int, void *, void *)) { msm_gemini_irq_handler = irq_handler; } void msm_gemini_core_irq_remove(void) { msm_gemini_irq_handler = NULL; }
gpl-2.0