repo_name
string
path
string
copies
string
size
string
content
string
license
string
OpenInkpot-archive/linux-2.6
kernel/trace/trace_sched_wakeup.c
314
14083
/* * trace task wakeup timings * * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> * * Based on code from the latency_tracer, that is: * * Copyright (C) 2004-2006 Ingo Molnar * Copyright (C) 2004 William Lee Irwin III */ #include <linux/module.h> #include <linux/fs.h> #include <linux/debugfs.h> #include <linux/kallsyms.h> #include <linux/uaccess.h> #include <linux/ftrace.h> #include <trace/events/sched.h> #include "trace.h" static struct trace_array *wakeup_trace; static int __read_mostly tracer_enabled; static struct task_struct *wakeup_task; static int wakeup_cpu; static int wakeup_current_cpu; static unsigned wakeup_prio = -1; static int wakeup_rt; static arch_spinlock_t wakeup_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; static void wakeup_reset(struct trace_array *tr); static void __wakeup_reset(struct trace_array *tr); static int wakeup_graph_entry(struct ftrace_graph_ent *trace); static void wakeup_graph_return(struct ftrace_graph_ret *trace); static int save_lat_flag; #define TRACE_DISPLAY_GRAPH 1 static struct tracer_opt trace_opts[] = { #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* display latency trace as call graph */ { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) }, #endif { } /* Empty entry */ }; static struct tracer_flags tracer_flags = { .val = 0, .opts = trace_opts, }; #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH) #ifdef CONFIG_FUNCTION_TRACER /* * Prologue for the wakeup function tracers. * * Returns 1 if it is OK to continue, and preemption * is disabled and data->disabled is incremented. * 0 if the trace is to be ignored, and preemption * is not disabled and data->disabled is * kept the same. * * Note, this function is also used outside this ifdef but * inside the #ifdef of the function graph tracer below. * This is OK, since the function graph tracer is * dependent on the function tracer. */ static int func_prolog_preempt_disable(struct trace_array *tr, struct trace_array_cpu **data, int *pc) { long disabled; int cpu; if (likely(!wakeup_task)) return 0; *pc = preempt_count(); preempt_disable_notrace(); cpu = raw_smp_processor_id(); if (cpu != wakeup_current_cpu) goto out_enable; *data = tr->data[cpu]; disabled = atomic_inc_return(&(*data)->disabled); if (unlikely(disabled != 1)) goto out; return 1; out: atomic_dec(&(*data)->disabled); out_enable: preempt_enable_notrace(); return 0; } /* * wakeup uses its own tracer function to keep the overhead down: */ static void wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) { struct trace_array *tr = wakeup_trace; struct trace_array_cpu *data; unsigned long flags; int pc; if (!func_prolog_preempt_disable(tr, &data, &pc)) return; local_irq_save(flags); trace_function(tr, ip, parent_ip, flags, pc); local_irq_restore(flags); atomic_dec(&data->disabled); preempt_enable_notrace(); } static struct ftrace_ops trace_ops __read_mostly = { .func = wakeup_tracer_call, }; #endif /* CONFIG_FUNCTION_TRACER */ static int start_func_tracer(int graph) { int ret; if (!graph) ret = register_ftrace_function(&trace_ops); else ret = register_ftrace_graph(&wakeup_graph_return, &wakeup_graph_entry); if (!ret && tracing_is_enabled()) tracer_enabled = 1; else tracer_enabled = 0; return ret; } static void stop_func_tracer(int graph) { tracer_enabled = 0; if (!graph) unregister_ftrace_function(&trace_ops); else unregister_ftrace_graph(); } #ifdef CONFIG_FUNCTION_GRAPH_TRACER static int wakeup_set_flag(u32 old_flags, u32 bit, int set) { if (!(bit & TRACE_DISPLAY_GRAPH)) return -EINVAL; if (!(is_graph() ^ set)) return 0; stop_func_tracer(!set); wakeup_reset(wakeup_trace); tracing_max_latency = 0; return start_func_tracer(set); } static int wakeup_graph_entry(struct ftrace_graph_ent *trace) { struct trace_array *tr = wakeup_trace; struct trace_array_cpu *data; unsigned long flags; int pc, ret = 0; if (!func_prolog_preempt_disable(tr, &data, &pc)) return 0; local_save_flags(flags); ret = __trace_graph_entry(tr, trace, flags, pc); atomic_dec(&data->disabled); preempt_enable_notrace(); return ret; } static void wakeup_graph_return(struct ftrace_graph_ret *trace) { struct trace_array *tr = wakeup_trace; struct trace_array_cpu *data; unsigned long flags; int pc; if (!func_prolog_preempt_disable(tr, &data, &pc)) return; local_save_flags(flags); __trace_graph_return(tr, trace, flags, pc); atomic_dec(&data->disabled); preempt_enable_notrace(); return; } static void wakeup_trace_open(struct trace_iterator *iter) { if (is_graph()) graph_trace_open(iter); } static void wakeup_trace_close(struct trace_iterator *iter) { if (iter->private) graph_trace_close(iter); } #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC) static enum print_line_t wakeup_print_line(struct trace_iterator *iter) { /* * In graph mode call the graph tracer output function, * otherwise go with the TRACE_FN event handler */ if (is_graph()) return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); return TRACE_TYPE_UNHANDLED; } static void wakeup_print_header(struct seq_file *s) { if (is_graph()) print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); else trace_default_header(s); } static void __trace_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc) { if (is_graph()) trace_graph_function(tr, ip, parent_ip, flags, pc); else trace_function(tr, ip, parent_ip, flags, pc); } #else #define __trace_function trace_function static int wakeup_set_flag(u32 old_flags, u32 bit, int set) { return -EINVAL; } static int wakeup_graph_entry(struct ftrace_graph_ent *trace) { return -1; } static enum print_line_t wakeup_print_line(struct trace_iterator *iter) { return TRACE_TYPE_UNHANDLED; } static void wakeup_graph_return(struct ftrace_graph_ret *trace) { } static void wakeup_print_header(struct seq_file *s) { } static void wakeup_trace_open(struct trace_iterator *iter) { } static void wakeup_trace_close(struct trace_iterator *iter) { } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ /* * Should this new latency be reported/recorded? */ static int report_latency(cycle_t delta) { if (tracing_thresh) { if (delta < tracing_thresh) return 0; } else { if (delta <= tracing_max_latency) return 0; } return 1; } static void probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu) { if (task != wakeup_task) return; wakeup_current_cpu = cpu; } static void notrace probe_wakeup_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next) { struct trace_array_cpu *data; cycle_t T0, T1, delta; unsigned long flags; long disabled; int cpu; int pc; tracing_record_cmdline(prev); if (unlikely(!tracer_enabled)) return; /* * When we start a new trace, we set wakeup_task to NULL * and then set tracer_enabled = 1. We want to make sure * that another CPU does not see the tracer_enabled = 1 * and the wakeup_task with an older task, that might * actually be the same as next. */ smp_rmb(); if (next != wakeup_task) return; pc = preempt_count(); /* disable local data, not wakeup_cpu data */ cpu = raw_smp_processor_id(); disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); if (likely(disabled != 1)) goto out; local_irq_save(flags); arch_spin_lock(&wakeup_lock); /* We could race with grabbing wakeup_lock */ if (unlikely(!tracer_enabled || next != wakeup_task)) goto out_unlock; /* The task we are waiting for is waking up */ data = wakeup_trace->data[wakeup_cpu]; __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); T0 = data->preempt_timestamp; T1 = ftrace_now(cpu); delta = T1-T0; if (!report_latency(delta)) goto out_unlock; if (likely(!is_tracing_stopped())) { tracing_max_latency = delta; update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); } out_unlock: __wakeup_reset(wakeup_trace); arch_spin_unlock(&wakeup_lock); local_irq_restore(flags); out: atomic_dec(&wakeup_trace->data[cpu]->disabled); } static void __wakeup_reset(struct trace_array *tr) { wakeup_cpu = -1; wakeup_prio = -1; if (wakeup_task) put_task_struct(wakeup_task); wakeup_task = NULL; } static void wakeup_reset(struct trace_array *tr) { unsigned long flags; tracing_reset_online_cpus(tr); local_irq_save(flags); arch_spin_lock(&wakeup_lock); __wakeup_reset(tr); arch_spin_unlock(&wakeup_lock); local_irq_restore(flags); } static void probe_wakeup(void *ignore, struct task_struct *p, int success) { struct trace_array_cpu *data; int cpu = smp_processor_id(); unsigned long flags; long disabled; int pc; if (likely(!tracer_enabled)) return; tracing_record_cmdline(p); tracing_record_cmdline(current); if ((wakeup_rt && !rt_task(p)) || p->prio >= wakeup_prio || p->prio >= current->prio) return; pc = preempt_count(); disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); if (unlikely(disabled != 1)) goto out; /* interrupts should be off from try_to_wake_up */ arch_spin_lock(&wakeup_lock); /* check for races. */ if (!tracer_enabled || p->prio >= wakeup_prio) goto out_locked; /* reset the trace */ __wakeup_reset(wakeup_trace); wakeup_cpu = task_cpu(p); wakeup_current_cpu = wakeup_cpu; wakeup_prio = p->prio; wakeup_task = p; get_task_struct(wakeup_task); local_save_flags(flags); data = wakeup_trace->data[wakeup_cpu]; data->preempt_timestamp = ftrace_now(cpu); tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); /* * We must be careful in using CALLER_ADDR2. But since wake_up * is not called by an assembly function (where as schedule is) * it should be safe to use it here. */ __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); out_locked: arch_spin_unlock(&wakeup_lock); out: atomic_dec(&wakeup_trace->data[cpu]->disabled); } static void start_wakeup_tracer(struct trace_array *tr) { int ret; ret = register_trace_sched_wakeup(probe_wakeup, NULL); if (ret) { pr_info("wakeup trace: Couldn't activate tracepoint" " probe to kernel_sched_wakeup\n"); return; } ret = register_trace_sched_wakeup_new(probe_wakeup, NULL); if (ret) { pr_info("wakeup trace: Couldn't activate tracepoint" " probe to kernel_sched_wakeup_new\n"); goto fail_deprobe; } ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL); if (ret) { pr_info("sched trace: Couldn't activate tracepoint" " probe to kernel_sched_switch\n"); goto fail_deprobe_wake_new; } ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL); if (ret) { pr_info("wakeup trace: Couldn't activate tracepoint" " probe to kernel_sched_migrate_task\n"); return; } wakeup_reset(tr); /* * Don't let the tracer_enabled = 1 show up before * the wakeup_task is reset. This may be overkill since * wakeup_reset does a spin_unlock after setting the * wakeup_task to NULL, but I want to be safe. * This is a slow path anyway. */ smp_wmb(); if (start_func_tracer(is_graph())) printk(KERN_ERR "failed to start wakeup tracer\n"); return; fail_deprobe_wake_new: unregister_trace_sched_wakeup_new(probe_wakeup, NULL); fail_deprobe: unregister_trace_sched_wakeup(probe_wakeup, NULL); } static void stop_wakeup_tracer(struct trace_array *tr) { tracer_enabled = 0; stop_func_tracer(is_graph()); unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL); unregister_trace_sched_wakeup_new(probe_wakeup, NULL); unregister_trace_sched_wakeup(probe_wakeup, NULL); unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL); } static int __wakeup_tracer_init(struct trace_array *tr) { save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; trace_flags |= TRACE_ITER_LATENCY_FMT; tracing_max_latency = 0; wakeup_trace = tr; start_wakeup_tracer(tr); return 0; } static int wakeup_tracer_init(struct trace_array *tr) { wakeup_rt = 0; return __wakeup_tracer_init(tr); } static int wakeup_rt_tracer_init(struct trace_array *tr) { wakeup_rt = 1; return __wakeup_tracer_init(tr); } static void wakeup_tracer_reset(struct trace_array *tr) { stop_wakeup_tracer(tr); /* make sure we put back any tasks we are tracing */ wakeup_reset(tr); if (!save_lat_flag) trace_flags &= ~TRACE_ITER_LATENCY_FMT; } static void wakeup_tracer_start(struct trace_array *tr) { wakeup_reset(tr); tracer_enabled = 1; } static void wakeup_tracer_stop(struct trace_array *tr) { tracer_enabled = 0; } static struct tracer wakeup_tracer __read_mostly = { .name = "wakeup", .init = wakeup_tracer_init, .reset = wakeup_tracer_reset, .start = wakeup_tracer_start, .stop = wakeup_tracer_stop, .print_max = 1, .print_header = wakeup_print_header, .print_line = wakeup_print_line, .flags = &tracer_flags, .set_flag = wakeup_set_flag, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_wakeup, #endif .open = wakeup_trace_open, .close = wakeup_trace_close, .use_max_tr = 1, }; static struct tracer wakeup_rt_tracer __read_mostly = { .name = "wakeup_rt", .init = wakeup_rt_tracer_init, .reset = wakeup_tracer_reset, .start = wakeup_tracer_start, .stop = wakeup_tracer_stop, .wait_pipe = poll_wait_pipe, .print_max = 1, .print_header = wakeup_print_header, .print_line = wakeup_print_line, .flags = &tracer_flags, .set_flag = wakeup_set_flag, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_wakeup, #endif .open = wakeup_trace_open, .close = wakeup_trace_close, .use_max_tr = 1, }; __init static int init_wakeup_tracer(void) { int ret; ret = register_tracer(&wakeup_tracer); if (ret) return ret; ret = register_tracer(&wakeup_rt_tracer); if (ret) return ret; return 0; } device_initcall(init_wakeup_tracer);
gpl-2.0
emceethemouth/kernel_mainline
drivers/pci/hotplug/cpcihp_generic.c
314
6456
/* * cpcihp_generic.c * * Generic port I/O CompactPCI driver * * Copyright 2002 SOMA Networks, Inc. * Copyright 2001 Intel San Luis Obispo * Copyright 2000,2001 MontaVista Software Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * * This generic CompactPCI hotplug driver should allow using the PCI hotplug * mechanism on any CompactPCI board that exposes the #ENUM signal as a bit * in a system register that can be read through standard port I/O. * * Send feedback to <scottm@somanetworks.com> */ #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/string.h> #include "cpci_hotplug.h" #define DRIVER_VERSION "0.1" #define DRIVER_AUTHOR "Scott Murray <scottm@somanetworks.com>" #define DRIVER_DESC "Generic port I/O CompactPCI Hot Plug Driver" #if !defined(MODULE) #define MY_NAME "cpcihp_generic" #else #define MY_NAME THIS_MODULE->name #endif #define dbg(format, arg...) \ do { \ if (debug) \ printk(KERN_DEBUG "%s: " format "\n", \ MY_NAME, ## arg); \ } while (0) #define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME, ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME, ## arg) #define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME, ## arg) /* local variables */ static bool debug; static char *bridge; static u8 bridge_busnr; static u8 bridge_slot; static struct pci_bus *bus; static u8 first_slot; static u8 last_slot; static u16 port; static unsigned int enum_bit; static u8 enum_mask; static struct cpci_hp_controller_ops generic_hpc_ops; static struct cpci_hp_controller generic_hpc; static int __init validate_parameters(void) { char *str; char *p; unsigned long tmp; if (!bridge) { info("not configured, disabling."); return -EINVAL; } str = bridge; if (!*str) return -EINVAL; tmp = simple_strtoul(str, &p, 16); if (p == str || tmp > 0xff) { err("Invalid hotplug bus bridge device bus number"); return -EINVAL; } bridge_busnr = (u8) tmp; dbg("bridge_busnr = 0x%02x", bridge_busnr); if (*p != ':') { err("Invalid hotplug bus bridge device"); return -EINVAL; } str = p + 1; tmp = simple_strtoul(str, &p, 16); if (p == str || tmp > 0x1f) { err("Invalid hotplug bus bridge device slot number"); return -EINVAL; } bridge_slot = (u8) tmp; dbg("bridge_slot = 0x%02x", bridge_slot); dbg("first_slot = 0x%02x", first_slot); dbg("last_slot = 0x%02x", last_slot); if (!(first_slot && last_slot)) { err("Need to specify first_slot and last_slot"); return -EINVAL; } if (last_slot < first_slot) { err("first_slot must be less than last_slot"); return -EINVAL; } dbg("port = 0x%04x", port); dbg("enum_bit = 0x%02x", enum_bit); if (enum_bit > 7) { err("Invalid #ENUM bit"); return -EINVAL; } enum_mask = 1 << enum_bit; return 0; } static int query_enum(void) { u8 value; value = inb_p(port); return ((value & enum_mask) == enum_mask); } static int __init cpcihp_generic_init(void) { int status; struct resource *r; struct pci_dev *dev; info(DRIVER_DESC " version: " DRIVER_VERSION); status = validate_parameters(); if (status) return status; r = request_region(port, 1, "#ENUM hotswap signal register"); if (!r) return -EBUSY; dev = pci_get_domain_bus_and_slot(0, bridge_busnr, PCI_DEVFN(bridge_slot, 0)); if (!dev || dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) { err("Invalid bridge device %s", bridge); pci_dev_put(dev); return -EINVAL; } bus = dev->subordinate; pci_dev_put(dev); memset(&generic_hpc, 0, sizeof(struct cpci_hp_controller)); generic_hpc_ops.query_enum = query_enum; generic_hpc.ops = &generic_hpc_ops; status = cpci_hp_register_controller(&generic_hpc); if (status != 0) { err("Could not register cPCI hotplug controller"); return -ENODEV; } dbg("registered controller"); status = cpci_hp_register_bus(bus, first_slot, last_slot); if (status != 0) { err("Could not register cPCI hotplug bus"); goto init_bus_register_error; } dbg("registered bus"); status = cpci_hp_start(); if (status != 0) { err("Could not started cPCI hotplug system"); goto init_start_error; } dbg("started cpci hp system"); return 0; init_start_error: cpci_hp_unregister_bus(bus); init_bus_register_error: cpci_hp_unregister_controller(&generic_hpc); err("status = %d", status); return status; } static void __exit cpcihp_generic_exit(void) { cpci_hp_stop(); cpci_hp_unregister_bus(bus); cpci_hp_unregister_controller(&generic_hpc); release_region(port, 1); } module_init(cpcihp_generic_init); module_exit(cpcihp_generic_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debugging mode enabled or not"); module_param(bridge, charp, 0); MODULE_PARM_DESC(bridge, "Hotswap bus bridge device, <bus>:<slot> (bus and slot are in hexadecimal)"); module_param(first_slot, byte, 0); MODULE_PARM_DESC(first_slot, "Hotswap bus first slot number"); module_param(last_slot, byte, 0); MODULE_PARM_DESC(last_slot, "Hotswap bus last slot number"); module_param(port, ushort, 0); MODULE_PARM_DESC(port, "#ENUM signal I/O port"); module_param(enum_bit, uint, 0); MODULE_PARM_DESC(enum_bit, "#ENUM signal bit (0-7)");
gpl-2.0
keiranFTW/semc-kernel-msm7x30
drivers/atm/idt77105.c
826
11632
/* drivers/atm/idt77105.c - IDT77105 (PHY) driver */ /* Written 1999 by Greg Banks, NEC Australia <gnb@linuxfan.com>. Based on suni.c */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/atmdev.h> #include <linux/sonet.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/capability.h> #include <linux/atm_idt77105.h> #include <linux/spinlock.h> #include <asm/system.h> #include <asm/param.h> #include <asm/uaccess.h> #include "idt77105.h" #undef GENERAL_DEBUG #ifdef GENERAL_DEBUG #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) #else #define DPRINTK(format,args...) #endif struct idt77105_priv { struct idt77105_stats stats; /* link diagnostics */ struct atm_dev *dev; /* device back-pointer */ struct idt77105_priv *next; int loop_mode; unsigned char old_mcr; /* storage of MCR reg while signal lost */ }; static DEFINE_SPINLOCK(idt77105_priv_lock); #define PRIV(dev) ((struct idt77105_priv *) dev->phy_data) #define PUT(val,reg) dev->ops->phy_put(dev,val,IDT77105_##reg) #define GET(reg) dev->ops->phy_get(dev,IDT77105_##reg) static void idt77105_stats_timer_func(unsigned long); static void idt77105_restart_timer_func(unsigned long); static DEFINE_TIMER(stats_timer, idt77105_stats_timer_func, 0, 0); static DEFINE_TIMER(restart_timer, idt77105_restart_timer_func, 0, 0); static int start_timer = 1; static struct idt77105_priv *idt77105_all = NULL; /* * Retrieve the value of one of the IDT77105's counters. * `counter' is one of the IDT77105_CTRSEL_* constants. */ static u16 get_counter(struct atm_dev *dev, int counter) { u16 val; /* write the counter bit into PHY register 6 */ PUT(counter, CTRSEL); /* read the low 8 bits from register 4 */ val = GET(CTRLO); /* read the high 8 bits from register 5 */ val |= GET(CTRHI)<<8; return val; } /* * Timer function called every second to gather statistics * from the 77105. This is done because the h/w registers * will overflow if not read at least once per second. The * kernel's stats are much higher precision. Also, having * a separate copy of the stats allows implementation of * an ioctl which gathers the stats *without* zero'ing them. */ static void idt77105_stats_timer_func(unsigned long dummy) { struct idt77105_priv *walk; struct atm_dev *dev; struct idt77105_stats *stats; DPRINTK("IDT77105 gathering statistics\n"); for (walk = idt77105_all; walk; walk = walk->next) { dev = walk->dev; stats = &walk->stats; stats->symbol_errors += get_counter(dev, IDT77105_CTRSEL_SEC); stats->tx_cells += get_counter(dev, IDT77105_CTRSEL_TCC); stats->rx_cells += get_counter(dev, IDT77105_CTRSEL_RCC); stats->rx_hec_errors += get_counter(dev, IDT77105_CTRSEL_RHEC); } if (!start_timer) mod_timer(&stats_timer,jiffies+IDT77105_STATS_TIMER_PERIOD); } /* * A separate timer func which handles restarting PHY chips which * have had the cable re-inserted after being pulled out. This is * done by polling the Good Signal Bit in the Interrupt Status * register every 5 seconds. The other technique (checking Good * Signal Bit in the interrupt handler) cannot be used because PHY * interrupts need to be disabled when the cable is pulled out * to avoid lots of spurious cell error interrupts. */ static void idt77105_restart_timer_func(unsigned long dummy) { struct idt77105_priv *walk; struct atm_dev *dev; unsigned char istat; DPRINTK("IDT77105 checking for cable re-insertion\n"); for (walk = idt77105_all; walk; walk = walk->next) { dev = walk->dev; if (dev->signal != ATM_PHY_SIG_LOST) continue; istat = GET(ISTAT); /* side effect: clears all interrupt status bits */ if (istat & IDT77105_ISTAT_GOODSIG) { /* Found signal again */ dev->signal = ATM_PHY_SIG_FOUND; printk(KERN_NOTICE "%s(itf %d): signal detected again\n", dev->type,dev->number); /* flush the receive FIFO */ PUT( GET(DIAG) | IDT77105_DIAG_RFLUSH, DIAG); /* re-enable interrupts */ PUT( walk->old_mcr ,MCR); } } if (!start_timer) mod_timer(&restart_timer,jiffies+IDT77105_RESTART_TIMER_PERIOD); } static int fetch_stats(struct atm_dev *dev,struct idt77105_stats __user *arg,int zero) { unsigned long flags; struct idt77105_stats stats; spin_lock_irqsave(&idt77105_priv_lock, flags); memcpy(&stats, &PRIV(dev)->stats, sizeof(struct idt77105_stats)); if (zero) memset(&PRIV(dev)->stats, 0, sizeof(struct idt77105_stats)); spin_unlock_irqrestore(&idt77105_priv_lock, flags); if (arg == NULL) return 0; return copy_to_user(arg, &PRIV(dev)->stats, sizeof(struct idt77105_stats)) ? -EFAULT : 0; } static int set_loopback(struct atm_dev *dev,int mode) { int diag; diag = GET(DIAG) & ~IDT77105_DIAG_LCMASK; switch (mode) { case ATM_LM_NONE: break; case ATM_LM_LOC_ATM: diag |= IDT77105_DIAG_LC_PHY_LOOPBACK; break; case ATM_LM_RMT_ATM: diag |= IDT77105_DIAG_LC_LINE_LOOPBACK; break; default: return -EINVAL; } PUT(diag,DIAG); printk(KERN_NOTICE "%s(%d) Loopback mode is: %s\n", dev->type, dev->number, (mode == ATM_LM_NONE ? "NONE" : (mode == ATM_LM_LOC_ATM ? "DIAG (local)" : (mode == IDT77105_DIAG_LC_LINE_LOOPBACK ? "LOOP (remote)" : "unknown"))) ); PRIV(dev)->loop_mode = mode; return 0; } static int idt77105_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) { printk(KERN_NOTICE "%s(%d) idt77105_ioctl() called\n",dev->type,dev->number); switch (cmd) { case IDT77105_GETSTATZ: if (!capable(CAP_NET_ADMIN)) return -EPERM; /* fall through */ case IDT77105_GETSTAT: return fetch_stats(dev, arg, cmd == IDT77105_GETSTATZ); case ATM_SETLOOP: return set_loopback(dev,(int)(unsigned long) arg); case ATM_GETLOOP: return put_user(PRIV(dev)->loop_mode,(int __user *)arg) ? -EFAULT : 0; case ATM_QUERYLOOP: return put_user(ATM_LM_LOC_ATM | ATM_LM_RMT_ATM, (int __user *) arg) ? -EFAULT : 0; default: return -ENOIOCTLCMD; } } static void idt77105_int(struct atm_dev *dev) { unsigned char istat; istat = GET(ISTAT); /* side effect: clears all interrupt status bits */ DPRINTK("IDT77105 generated an interrupt, istat=%02x\n", (unsigned)istat); if (istat & IDT77105_ISTAT_RSCC) { /* Rx Signal Condition Change - line went up or down */ if (istat & IDT77105_ISTAT_GOODSIG) { /* signal detected again */ /* This should not happen (restart timer does it) but JIC */ dev->signal = ATM_PHY_SIG_FOUND; } else { /* signal lost */ /* * Disable interrupts and stop all transmission and * reception - the restart timer will restore these. */ PRIV(dev)->old_mcr = GET(MCR); PUT( (PRIV(dev)->old_mcr| IDT77105_MCR_DREC| IDT77105_MCR_DRIC| IDT77105_MCR_HALTTX ) & ~IDT77105_MCR_EIP, MCR); dev->signal = ATM_PHY_SIG_LOST; printk(KERN_NOTICE "%s(itf %d): signal lost\n", dev->type,dev->number); } } if (istat & IDT77105_ISTAT_RFO) { /* Rx FIFO Overrun -- perform a FIFO flush */ PUT( GET(DIAG) | IDT77105_DIAG_RFLUSH, DIAG); printk(KERN_NOTICE "%s(itf %d): receive FIFO overrun\n", dev->type,dev->number); } #ifdef GENERAL_DEBUG if (istat & (IDT77105_ISTAT_HECERR | IDT77105_ISTAT_SCR | IDT77105_ISTAT_RSE)) { /* normally don't care - just report in stats */ printk(KERN_NOTICE "%s(itf %d): received cell with error\n", dev->type,dev->number); } #endif } static int idt77105_start(struct atm_dev *dev) { unsigned long flags; if (!(dev->dev_data = kmalloc(sizeof(struct idt77105_priv),GFP_KERNEL))) return -ENOMEM; PRIV(dev)->dev = dev; spin_lock_irqsave(&idt77105_priv_lock, flags); PRIV(dev)->next = idt77105_all; idt77105_all = PRIV(dev); spin_unlock_irqrestore(&idt77105_priv_lock, flags); memset(&PRIV(dev)->stats,0,sizeof(struct idt77105_stats)); /* initialise dev->signal from Good Signal Bit */ dev->signal = GET(ISTAT) & IDT77105_ISTAT_GOODSIG ? ATM_PHY_SIG_FOUND : ATM_PHY_SIG_LOST; if (dev->signal == ATM_PHY_SIG_LOST) printk(KERN_WARNING "%s(itf %d): no signal\n",dev->type, dev->number); /* initialise loop mode from hardware */ switch ( GET(DIAG) & IDT77105_DIAG_LCMASK ) { case IDT77105_DIAG_LC_NORMAL: PRIV(dev)->loop_mode = ATM_LM_NONE; break; case IDT77105_DIAG_LC_PHY_LOOPBACK: PRIV(dev)->loop_mode = ATM_LM_LOC_ATM; break; case IDT77105_DIAG_LC_LINE_LOOPBACK: PRIV(dev)->loop_mode = ATM_LM_RMT_ATM; break; } /* enable interrupts, e.g. on loss of signal */ PRIV(dev)->old_mcr = GET(MCR); if (dev->signal == ATM_PHY_SIG_FOUND) { PRIV(dev)->old_mcr |= IDT77105_MCR_EIP; PUT(PRIV(dev)->old_mcr, MCR); } idt77105_stats_timer_func(0); /* clear 77105 counters */ (void) fetch_stats(dev,NULL,1); /* clear kernel counters */ spin_lock_irqsave(&idt77105_priv_lock, flags); if (start_timer) { start_timer = 0; init_timer(&stats_timer); stats_timer.expires = jiffies+IDT77105_STATS_TIMER_PERIOD; stats_timer.function = idt77105_stats_timer_func; add_timer(&stats_timer); init_timer(&restart_timer); restart_timer.expires = jiffies+IDT77105_RESTART_TIMER_PERIOD; restart_timer.function = idt77105_restart_timer_func; add_timer(&restart_timer); } spin_unlock_irqrestore(&idt77105_priv_lock, flags); return 0; } static int idt77105_stop(struct atm_dev *dev) { struct idt77105_priv *walk, *prev; DPRINTK("%s(itf %d): stopping IDT77105\n",dev->type,dev->number); /* disable interrupts */ PUT( GET(MCR) & ~IDT77105_MCR_EIP, MCR ); /* detach private struct from atm_dev & free */ for (prev = NULL, walk = idt77105_all ; walk != NULL; prev = walk, walk = walk->next) { if (walk->dev == dev) { if (prev != NULL) prev->next = walk->next; else idt77105_all = walk->next; dev->phy = NULL; dev->dev_data = NULL; kfree(walk); break; } } return 0; } static const struct atmphy_ops idt77105_ops = { .start = idt77105_start, .ioctl = idt77105_ioctl, .interrupt = idt77105_int, .stop = idt77105_stop, }; int idt77105_init(struct atm_dev *dev) { dev->phy = &idt77105_ops; return 0; } EXPORT_SYMBOL(idt77105_init); static void __exit idt77105_exit(void) { /* turn off timers */ del_timer(&stats_timer); del_timer(&restart_timer); } module_exit(idt77105_exit); MODULE_LICENSE("GPL");
gpl-2.0
Mazout360/lge-kernel-gb
net/ax25/ax25_ip.c
826
5405
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <net/sock.h> #include <asm/uaccess.h> #include <asm/system.h> #include <linux/fcntl.h> #include <linux/termios.h> /* For TIOCINQ/OUTQ */ #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/netfilter.h> #include <linux/sysctl.h> #include <net/ip.h> #include <net/arp.h> /* * IP over AX.25 encapsulation. */ /* * Shove an AX.25 UI header on an IP packet and handle ARP */ #ifdef CONFIG_INET int ax25_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len) { unsigned char *buff; /* they sometimes come back to us... */ if (type == ETH_P_AX25) return 0; /* header is an AX.25 UI frame from us to them */ buff = skb_push(skb, AX25_HEADER_LEN); *buff++ = 0x00; /* KISS DATA */ if (daddr != NULL) memcpy(buff, daddr, dev->addr_len); /* Address specified */ buff[6] &= ~AX25_CBIT; buff[6] &= ~AX25_EBIT; buff[6] |= AX25_SSSID_SPARE; buff += AX25_ADDR_LEN; if (saddr != NULL) memcpy(buff, saddr, dev->addr_len); else memcpy(buff, dev->dev_addr, dev->addr_len); buff[6] &= ~AX25_CBIT; buff[6] |= AX25_EBIT; buff[6] |= AX25_SSSID_SPARE; buff += AX25_ADDR_LEN; *buff++ = AX25_UI; /* UI */ /* Append a suitable AX.25 PID */ switch (type) { case ETH_P_IP: *buff++ = AX25_P_IP; break; case ETH_P_ARP: *buff++ = AX25_P_ARP; break; default: printk(KERN_ERR "AX.25: ax25_hard_header - wrong protocol type 0x%2.2x\n", type); *buff++ = 0; break; } if (daddr != NULL) return AX25_HEADER_LEN; return -AX25_HEADER_LEN; /* Unfinished header */ } int ax25_rebuild_header(struct sk_buff *skb) { struct sk_buff *ourskb; unsigned char *bp = skb->data; ax25_route *route; struct net_device *dev = NULL; ax25_address *src, *dst; ax25_digi *digipeat = NULL; ax25_dev *ax25_dev; ax25_cb *ax25; char ip_mode = ' '; dst = (ax25_address *)(bp + 1); src = (ax25_address *)(bp + 8); if (arp_find(bp + 1, skb)) return 1; route = ax25_get_route(dst, NULL); if (route) { digipeat = route->digipeat; dev = route->dev; ip_mode = route->ip_mode; } if (dev == NULL) dev = skb->dev; if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) { goto put; } if (bp[16] == AX25_P_IP) { if (ip_mode == 'V' || (ip_mode == ' ' && ax25_dev->values[AX25_VALUES_IPDEFMODE])) { /* * We copy the buffer and release the original thereby * keeping it straight * * Note: we report 1 back so the caller will * not feed the frame direct to the physical device * We don't want that to happen. (It won't be upset * as we have pulled the frame from the queue by * freeing it). * * NB: TCP modifies buffers that are still * on a device queue, thus we use skb_copy() * instead of using skb_clone() unless this * gets fixed. */ ax25_address src_c; ax25_address dst_c; if ((ourskb = skb_copy(skb, GFP_ATOMIC)) == NULL) { kfree_skb(skb); goto put; } if (skb->sk != NULL) skb_set_owner_w(ourskb, skb->sk); kfree_skb(skb); /* dl9sau: bugfix * after kfree_skb(), dst and src which were pointer * to bp which is part of skb->data would not be valid * anymore hope that after skb_pull(ourskb, ..) our * dsc_c and src_c will not become invalid */ bp = ourskb->data; dst_c = *(ax25_address *)(bp + 1); src_c = *(ax25_address *)(bp + 8); skb_pull(ourskb, AX25_HEADER_LEN - 1); /* Keep PID */ skb_reset_network_header(ourskb); ax25=ax25_send_frame( ourskb, ax25_dev->values[AX25_VALUES_PACLEN], &src_c, &dst_c, digipeat, dev); if (ax25) { ax25_cb_put(ax25); } goto put; } } bp[7] &= ~AX25_CBIT; bp[7] &= ~AX25_EBIT; bp[7] |= AX25_SSSID_SPARE; bp[14] &= ~AX25_CBIT; bp[14] |= AX25_EBIT; bp[14] |= AX25_SSSID_SPARE; skb_pull(skb, AX25_KISS_HEADER_LEN); if (digipeat != NULL) { if ((ourskb = ax25_rt_build_path(skb, src, dst, route->digipeat)) == NULL) { kfree_skb(skb); goto put; } skb = ourskb; } ax25_queue_xmit(skb, dev); put: if (route) ax25_put_route(route); return 1; } #else /* INET */ int ax25_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len) { return -AX25_HEADER_LEN; } int ax25_rebuild_header(struct sk_buff *skb) { return 1; } #endif const struct header_ops ax25_header_ops = { .create = ax25_hard_header, .rebuild = ax25_rebuild_header, }; EXPORT_SYMBOL(ax25_hard_header); EXPORT_SYMBOL(ax25_rebuild_header); EXPORT_SYMBOL(ax25_header_ops);
gpl-2.0
Sikyou/s7_kernel
drivers/atm/idt77105.c
826
11632
/* drivers/atm/idt77105.c - IDT77105 (PHY) driver */ /* Written 1999 by Greg Banks, NEC Australia <gnb@linuxfan.com>. Based on suni.c */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/atmdev.h> #include <linux/sonet.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/capability.h> #include <linux/atm_idt77105.h> #include <linux/spinlock.h> #include <asm/system.h> #include <asm/param.h> #include <asm/uaccess.h> #include "idt77105.h" #undef GENERAL_DEBUG #ifdef GENERAL_DEBUG #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) #else #define DPRINTK(format,args...) #endif struct idt77105_priv { struct idt77105_stats stats; /* link diagnostics */ struct atm_dev *dev; /* device back-pointer */ struct idt77105_priv *next; int loop_mode; unsigned char old_mcr; /* storage of MCR reg while signal lost */ }; static DEFINE_SPINLOCK(idt77105_priv_lock); #define PRIV(dev) ((struct idt77105_priv *) dev->phy_data) #define PUT(val,reg) dev->ops->phy_put(dev,val,IDT77105_##reg) #define GET(reg) dev->ops->phy_get(dev,IDT77105_##reg) static void idt77105_stats_timer_func(unsigned long); static void idt77105_restart_timer_func(unsigned long); static DEFINE_TIMER(stats_timer, idt77105_stats_timer_func, 0, 0); static DEFINE_TIMER(restart_timer, idt77105_restart_timer_func, 0, 0); static int start_timer = 1; static struct idt77105_priv *idt77105_all = NULL; /* * Retrieve the value of one of the IDT77105's counters. * `counter' is one of the IDT77105_CTRSEL_* constants. */ static u16 get_counter(struct atm_dev *dev, int counter) { u16 val; /* write the counter bit into PHY register 6 */ PUT(counter, CTRSEL); /* read the low 8 bits from register 4 */ val = GET(CTRLO); /* read the high 8 bits from register 5 */ val |= GET(CTRHI)<<8; return val; } /* * Timer function called every second to gather statistics * from the 77105. This is done because the h/w registers * will overflow if not read at least once per second. The * kernel's stats are much higher precision. Also, having * a separate copy of the stats allows implementation of * an ioctl which gathers the stats *without* zero'ing them. */ static void idt77105_stats_timer_func(unsigned long dummy) { struct idt77105_priv *walk; struct atm_dev *dev; struct idt77105_stats *stats; DPRINTK("IDT77105 gathering statistics\n"); for (walk = idt77105_all; walk; walk = walk->next) { dev = walk->dev; stats = &walk->stats; stats->symbol_errors += get_counter(dev, IDT77105_CTRSEL_SEC); stats->tx_cells += get_counter(dev, IDT77105_CTRSEL_TCC); stats->rx_cells += get_counter(dev, IDT77105_CTRSEL_RCC); stats->rx_hec_errors += get_counter(dev, IDT77105_CTRSEL_RHEC); } if (!start_timer) mod_timer(&stats_timer,jiffies+IDT77105_STATS_TIMER_PERIOD); } /* * A separate timer func which handles restarting PHY chips which * have had the cable re-inserted after being pulled out. This is * done by polling the Good Signal Bit in the Interrupt Status * register every 5 seconds. The other technique (checking Good * Signal Bit in the interrupt handler) cannot be used because PHY * interrupts need to be disabled when the cable is pulled out * to avoid lots of spurious cell error interrupts. */ static void idt77105_restart_timer_func(unsigned long dummy) { struct idt77105_priv *walk; struct atm_dev *dev; unsigned char istat; DPRINTK("IDT77105 checking for cable re-insertion\n"); for (walk = idt77105_all; walk; walk = walk->next) { dev = walk->dev; if (dev->signal != ATM_PHY_SIG_LOST) continue; istat = GET(ISTAT); /* side effect: clears all interrupt status bits */ if (istat & IDT77105_ISTAT_GOODSIG) { /* Found signal again */ dev->signal = ATM_PHY_SIG_FOUND; printk(KERN_NOTICE "%s(itf %d): signal detected again\n", dev->type,dev->number); /* flush the receive FIFO */ PUT( GET(DIAG) | IDT77105_DIAG_RFLUSH, DIAG); /* re-enable interrupts */ PUT( walk->old_mcr ,MCR); } } if (!start_timer) mod_timer(&restart_timer,jiffies+IDT77105_RESTART_TIMER_PERIOD); } static int fetch_stats(struct atm_dev *dev,struct idt77105_stats __user *arg,int zero) { unsigned long flags; struct idt77105_stats stats; spin_lock_irqsave(&idt77105_priv_lock, flags); memcpy(&stats, &PRIV(dev)->stats, sizeof(struct idt77105_stats)); if (zero) memset(&PRIV(dev)->stats, 0, sizeof(struct idt77105_stats)); spin_unlock_irqrestore(&idt77105_priv_lock, flags); if (arg == NULL) return 0; return copy_to_user(arg, &PRIV(dev)->stats, sizeof(struct idt77105_stats)) ? -EFAULT : 0; } static int set_loopback(struct atm_dev *dev,int mode) { int diag; diag = GET(DIAG) & ~IDT77105_DIAG_LCMASK; switch (mode) { case ATM_LM_NONE: break; case ATM_LM_LOC_ATM: diag |= IDT77105_DIAG_LC_PHY_LOOPBACK; break; case ATM_LM_RMT_ATM: diag |= IDT77105_DIAG_LC_LINE_LOOPBACK; break; default: return -EINVAL; } PUT(diag,DIAG); printk(KERN_NOTICE "%s(%d) Loopback mode is: %s\n", dev->type, dev->number, (mode == ATM_LM_NONE ? "NONE" : (mode == ATM_LM_LOC_ATM ? "DIAG (local)" : (mode == IDT77105_DIAG_LC_LINE_LOOPBACK ? "LOOP (remote)" : "unknown"))) ); PRIV(dev)->loop_mode = mode; return 0; } static int idt77105_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) { printk(KERN_NOTICE "%s(%d) idt77105_ioctl() called\n",dev->type,dev->number); switch (cmd) { case IDT77105_GETSTATZ: if (!capable(CAP_NET_ADMIN)) return -EPERM; /* fall through */ case IDT77105_GETSTAT: return fetch_stats(dev, arg, cmd == IDT77105_GETSTATZ); case ATM_SETLOOP: return set_loopback(dev,(int)(unsigned long) arg); case ATM_GETLOOP: return put_user(PRIV(dev)->loop_mode,(int __user *)arg) ? -EFAULT : 0; case ATM_QUERYLOOP: return put_user(ATM_LM_LOC_ATM | ATM_LM_RMT_ATM, (int __user *) arg) ? -EFAULT : 0; default: return -ENOIOCTLCMD; } } static void idt77105_int(struct atm_dev *dev) { unsigned char istat; istat = GET(ISTAT); /* side effect: clears all interrupt status bits */ DPRINTK("IDT77105 generated an interrupt, istat=%02x\n", (unsigned)istat); if (istat & IDT77105_ISTAT_RSCC) { /* Rx Signal Condition Change - line went up or down */ if (istat & IDT77105_ISTAT_GOODSIG) { /* signal detected again */ /* This should not happen (restart timer does it) but JIC */ dev->signal = ATM_PHY_SIG_FOUND; } else { /* signal lost */ /* * Disable interrupts and stop all transmission and * reception - the restart timer will restore these. */ PRIV(dev)->old_mcr = GET(MCR); PUT( (PRIV(dev)->old_mcr| IDT77105_MCR_DREC| IDT77105_MCR_DRIC| IDT77105_MCR_HALTTX ) & ~IDT77105_MCR_EIP, MCR); dev->signal = ATM_PHY_SIG_LOST; printk(KERN_NOTICE "%s(itf %d): signal lost\n", dev->type,dev->number); } } if (istat & IDT77105_ISTAT_RFO) { /* Rx FIFO Overrun -- perform a FIFO flush */ PUT( GET(DIAG) | IDT77105_DIAG_RFLUSH, DIAG); printk(KERN_NOTICE "%s(itf %d): receive FIFO overrun\n", dev->type,dev->number); } #ifdef GENERAL_DEBUG if (istat & (IDT77105_ISTAT_HECERR | IDT77105_ISTAT_SCR | IDT77105_ISTAT_RSE)) { /* normally don't care - just report in stats */ printk(KERN_NOTICE "%s(itf %d): received cell with error\n", dev->type,dev->number); } #endif } static int idt77105_start(struct atm_dev *dev) { unsigned long flags; if (!(dev->dev_data = kmalloc(sizeof(struct idt77105_priv),GFP_KERNEL))) return -ENOMEM; PRIV(dev)->dev = dev; spin_lock_irqsave(&idt77105_priv_lock, flags); PRIV(dev)->next = idt77105_all; idt77105_all = PRIV(dev); spin_unlock_irqrestore(&idt77105_priv_lock, flags); memset(&PRIV(dev)->stats,0,sizeof(struct idt77105_stats)); /* initialise dev->signal from Good Signal Bit */ dev->signal = GET(ISTAT) & IDT77105_ISTAT_GOODSIG ? ATM_PHY_SIG_FOUND : ATM_PHY_SIG_LOST; if (dev->signal == ATM_PHY_SIG_LOST) printk(KERN_WARNING "%s(itf %d): no signal\n",dev->type, dev->number); /* initialise loop mode from hardware */ switch ( GET(DIAG) & IDT77105_DIAG_LCMASK ) { case IDT77105_DIAG_LC_NORMAL: PRIV(dev)->loop_mode = ATM_LM_NONE; break; case IDT77105_DIAG_LC_PHY_LOOPBACK: PRIV(dev)->loop_mode = ATM_LM_LOC_ATM; break; case IDT77105_DIAG_LC_LINE_LOOPBACK: PRIV(dev)->loop_mode = ATM_LM_RMT_ATM; break; } /* enable interrupts, e.g. on loss of signal */ PRIV(dev)->old_mcr = GET(MCR); if (dev->signal == ATM_PHY_SIG_FOUND) { PRIV(dev)->old_mcr |= IDT77105_MCR_EIP; PUT(PRIV(dev)->old_mcr, MCR); } idt77105_stats_timer_func(0); /* clear 77105 counters */ (void) fetch_stats(dev,NULL,1); /* clear kernel counters */ spin_lock_irqsave(&idt77105_priv_lock, flags); if (start_timer) { start_timer = 0; init_timer(&stats_timer); stats_timer.expires = jiffies+IDT77105_STATS_TIMER_PERIOD; stats_timer.function = idt77105_stats_timer_func; add_timer(&stats_timer); init_timer(&restart_timer); restart_timer.expires = jiffies+IDT77105_RESTART_TIMER_PERIOD; restart_timer.function = idt77105_restart_timer_func; add_timer(&restart_timer); } spin_unlock_irqrestore(&idt77105_priv_lock, flags); return 0; } static int idt77105_stop(struct atm_dev *dev) { struct idt77105_priv *walk, *prev; DPRINTK("%s(itf %d): stopping IDT77105\n",dev->type,dev->number); /* disable interrupts */ PUT( GET(MCR) & ~IDT77105_MCR_EIP, MCR ); /* detach private struct from atm_dev & free */ for (prev = NULL, walk = idt77105_all ; walk != NULL; prev = walk, walk = walk->next) { if (walk->dev == dev) { if (prev != NULL) prev->next = walk->next; else idt77105_all = walk->next; dev->phy = NULL; dev->dev_data = NULL; kfree(walk); break; } } return 0; } static const struct atmphy_ops idt77105_ops = { .start = idt77105_start, .ioctl = idt77105_ioctl, .interrupt = idt77105_int, .stop = idt77105_stop, }; int idt77105_init(struct atm_dev *dev) { dev->phy = &idt77105_ops; return 0; } EXPORT_SYMBOL(idt77105_init); static void __exit idt77105_exit(void) { /* turn off timers */ del_timer(&stats_timer); del_timer(&restart_timer); } module_exit(idt77105_exit); MODULE_LICENSE("GPL");
gpl-2.0
Zenfone2-Dev/Flare-AEL-X
arch/powerpc/platforms/pseries/pci.c
2106
4212
/* * Copyright (C) 2001 Dave Engebretsen, IBM Corporation * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM * * pSeries specific routines for PCI. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/string.h> #include <asm/eeh.h> #include <asm/pci-bridge.h> #include <asm/prom.h> #include <asm/ppc-pci.h> #if 0 void pcibios_name_device(struct pci_dev *dev) { struct device_node *dn; /* * Add IBM loc code (slot) as a prefix to the device names for service */ dn = pci_device_to_OF_node(dev); if (dn) { const char *loc_code = of_get_property(dn, "ibm,loc-code", NULL); if (loc_code) { int loc_len = strlen(loc_code); if (loc_len < sizeof(dev->dev.name)) { memmove(dev->dev.name+loc_len+1, dev->dev.name, sizeof(dev->dev.name)-loc_len-1); memcpy(dev->dev.name, loc_code, loc_len); dev->dev.name[loc_len] = ' '; dev->dev.name[sizeof(dev->dev.name)-1] = '\0'; } } } } DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_name_device); #endif static void __init pSeries_request_regions(void) { if (!isa_io_base) return; request_region(0x20,0x20,"pic1"); request_region(0xa0,0x20,"pic2"); request_region(0x00,0x20,"dma1"); request_region(0x40,0x20,"timer"); request_region(0x80,0x10,"dma page reg"); request_region(0xc0,0x20,"dma2"); } void __init pSeries_final_fixup(void) { pSeries_request_regions(); eeh_addr_cache_build(); } /* * Assume the winbond 82c105 is the IDE controller on a * p610/p615/p630. We should probably be more careful in case * someone tries to plug in a similar adapter. */ static void fixup_winbond_82c105(struct pci_dev* dev) { int i; unsigned int reg; if (!machine_is(pseries)) return; printk("Using INTC for W82c105 IDE controller.\n"); pci_read_config_dword(dev, 0x40, &reg); /* Enable LEGIRQ to use INTC instead of ISA interrupts */ pci_write_config_dword(dev, 0x40, reg | (1<<11)); for (i = 0; i < DEVICE_COUNT_RESOURCE; ++i) { /* zap the 2nd function of the winbond chip */ if (dev->resource[i].flags & IORESOURCE_IO && dev->bus->number == 0 && dev->devfn == 0x81) dev->resource[i].flags &= ~IORESOURCE_IO; if (dev->resource[i].start == 0 && dev->resource[i].end) { dev->resource[i].flags = 0; dev->resource[i].end = 0; } } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105, fixup_winbond_82c105); int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) { struct device_node *dn, *pdn; struct pci_bus *bus; const uint32_t *pcie_link_speed_stats; bus = bridge->bus; dn = pcibios_get_phb_of_node(bus); if (!dn) return 0; for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { pcie_link_speed_stats = (const uint32_t *) of_get_property(pdn, "ibm,pcie-link-speed-stats", NULL); if (pcie_link_speed_stats) break; } of_node_put(pdn); if (!pcie_link_speed_stats) { pr_err("no ibm,pcie-link-speed-stats property\n"); return 0; } switch (pcie_link_speed_stats[0]) { case 0x01: bus->max_bus_speed = PCIE_SPEED_2_5GT; break; case 0x02: bus->max_bus_speed = PCIE_SPEED_5_0GT; break; default: bus->max_bus_speed = PCI_SPEED_UNKNOWN; break; } switch (pcie_link_speed_stats[1]) { case 0x01: bus->cur_bus_speed = PCIE_SPEED_2_5GT; break; case 0x02: bus->cur_bus_speed = PCIE_SPEED_5_0GT; break; default: bus->cur_bus_speed = PCI_SPEED_UNKNOWN; break; } return 0; }
gpl-2.0
Bilibox/Linux-3.0.X
fs/omfs/dir.c
2362
10273
/* * OMFS (as used by RIO Karma) directory operations. * Copyright (C) 2005 Bob Copeland <me@bobcopeland.com> * Released under GPL v2. */ #include <linux/fs.h> #include <linux/ctype.h> #include <linux/buffer_head.h> #include "omfs.h" static int omfs_hash(const char *name, int namelen, int mod) { int i, hash = 0; for (i = 0; i < namelen; i++) hash ^= tolower(name[i]) << (i % 24); return hash % mod; } /* * Finds the bucket for a given name and reads the containing block; * *ofs is set to the offset of the first list entry. */ static struct buffer_head *omfs_get_bucket(struct inode *dir, const char *name, int namelen, int *ofs) { int nbuckets = (dir->i_size - OMFS_DIR_START)/8; int bucket = omfs_hash(name, namelen, nbuckets); *ofs = OMFS_DIR_START + bucket * 8; return omfs_bread(dir->i_sb, dir->i_ino); } static struct buffer_head *omfs_scan_list(struct inode *dir, u64 block, const char *name, int namelen, u64 *prev_block) { struct buffer_head *bh; struct omfs_inode *oi; int err = -ENOENT; *prev_block = ~0; while (block != ~0) { bh = omfs_bread(dir->i_sb, block); if (!bh) { err = -EIO; goto err; } oi = (struct omfs_inode *) bh->b_data; if (omfs_is_bad(OMFS_SB(dir->i_sb), &oi->i_head, block)) { brelse(bh); goto err; } if (strncmp(oi->i_name, name, namelen) == 0) return bh; *prev_block = block; block = be64_to_cpu(oi->i_sibling); brelse(bh); } err: return ERR_PTR(err); } static struct buffer_head *omfs_find_entry(struct inode *dir, const char *name, int namelen) { struct buffer_head *bh; int ofs; u64 block, dummy; bh = omfs_get_bucket(dir, name, namelen, &ofs); if (!bh) return ERR_PTR(-EIO); block = be64_to_cpu(*((__be64 *) &bh->b_data[ofs])); brelse(bh); return omfs_scan_list(dir, block, name, namelen, &dummy); } int omfs_make_empty(struct inode *inode, struct super_block *sb) { struct omfs_sb_info *sbi = OMFS_SB(sb); struct buffer_head *bh; struct omfs_inode *oi; bh = omfs_bread(sb, inode->i_ino); if (!bh) return -ENOMEM; memset(bh->b_data, 0, sizeof(struct omfs_inode)); if (inode->i_mode & S_IFDIR) { memset(&bh->b_data[OMFS_DIR_START], 0xff, sbi->s_sys_blocksize - OMFS_DIR_START); } else omfs_make_empty_table(bh, OMFS_EXTENT_START); oi = (struct omfs_inode *) bh->b_data; oi->i_head.h_self = cpu_to_be64(inode->i_ino); oi->i_sibling = ~cpu_to_be64(0ULL); mark_buffer_dirty(bh); brelse(bh); return 0; } static int omfs_add_link(struct dentry *dentry, struct inode *inode) { struct inode *dir = dentry->d_parent->d_inode; const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; struct omfs_inode *oi; struct buffer_head *bh; u64 block; __be64 *entry; int ofs; /* just prepend to head of queue in proper bucket */ bh = omfs_get_bucket(dir, name, namelen, &ofs); if (!bh) goto out; entry = (__be64 *) &bh->b_data[ofs]; block = be64_to_cpu(*entry); *entry = cpu_to_be64(inode->i_ino); mark_buffer_dirty(bh); brelse(bh); /* now set the sibling and parent pointers on the new inode */ bh = omfs_bread(dir->i_sb, inode->i_ino); if (!bh) goto out; oi = (struct omfs_inode *) bh->b_data; memcpy(oi->i_name, name, namelen); memset(oi->i_name + namelen, 0, OMFS_NAMELEN - namelen); oi->i_sibling = cpu_to_be64(block); oi->i_parent = cpu_to_be64(dir->i_ino); mark_buffer_dirty(bh); brelse(bh); dir->i_ctime = CURRENT_TIME_SEC; /* mark affected inodes dirty to rebuild checksums */ mark_inode_dirty(dir); mark_inode_dirty(inode); return 0; out: return -ENOMEM; } static int omfs_delete_entry(struct dentry *dentry) { struct inode *dir = dentry->d_parent->d_inode; struct inode *dirty; const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; struct omfs_inode *oi; struct buffer_head *bh, *bh2; __be64 *entry, next; u64 block, prev; int ofs; int err = -ENOMEM; /* delete the proper node in the bucket's linked list */ bh = omfs_get_bucket(dir, name, namelen, &ofs); if (!bh) goto out; entry = (__be64 *) &bh->b_data[ofs]; block = be64_to_cpu(*entry); bh2 = omfs_scan_list(dir, block, name, namelen, &prev); if (IS_ERR(bh2)) { err = PTR_ERR(bh2); goto out_free_bh; } oi = (struct omfs_inode *) bh2->b_data; next = oi->i_sibling; brelse(bh2); if (prev != ~0) { /* found in middle of list, get list ptr */ brelse(bh); bh = omfs_bread(dir->i_sb, prev); if (!bh) goto out; oi = (struct omfs_inode *) bh->b_data; entry = &oi->i_sibling; } *entry = next; mark_buffer_dirty(bh); if (prev != ~0) { dirty = omfs_iget(dir->i_sb, prev); if (!IS_ERR(dirty)) { mark_inode_dirty(dirty); iput(dirty); } } err = 0; out_free_bh: brelse(bh); out: return err; } static int omfs_dir_is_empty(struct inode *inode) { int nbuckets = (inode->i_size - OMFS_DIR_START) / 8; struct buffer_head *bh; u64 *ptr; int i; bh = omfs_bread(inode->i_sb, inode->i_ino); if (!bh) return 0; ptr = (u64 *) &bh->b_data[OMFS_DIR_START]; for (i = 0; i < nbuckets; i++, ptr++) if (*ptr != ~0) break; brelse(bh); return *ptr != ~0; } static int omfs_remove(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; int ret; if (S_ISDIR(inode->i_mode) && !omfs_dir_is_empty(inode)) return -ENOTEMPTY; ret = omfs_delete_entry(dentry); if (ret) return ret; clear_nlink(inode); mark_inode_dirty(inode); mark_inode_dirty(dir); return 0; } static int omfs_add_node(struct inode *dir, struct dentry *dentry, int mode) { int err; struct inode *inode = omfs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); err = omfs_make_empty(inode, dir->i_sb); if (err) goto out_free_inode; err = omfs_add_link(dentry, inode); if (err) goto out_free_inode; d_instantiate(dentry, inode); return 0; out_free_inode: iput(inode); return err; } static int omfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) { return omfs_add_node(dir, dentry, mode | S_IFDIR); } static int omfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) { return omfs_add_node(dir, dentry, mode | S_IFREG); } static struct dentry *omfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct buffer_head *bh; struct inode *inode = NULL; if (dentry->d_name.len > OMFS_NAMELEN) return ERR_PTR(-ENAMETOOLONG); bh = omfs_find_entry(dir, dentry->d_name.name, dentry->d_name.len); if (!IS_ERR(bh)) { struct omfs_inode *oi = (struct omfs_inode *)bh->b_data; ino_t ino = be64_to_cpu(oi->i_head.h_self); brelse(bh); inode = omfs_iget(dir->i_sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); } d_add(dentry, inode); return NULL; } /* sanity check block's self pointer */ int omfs_is_bad(struct omfs_sb_info *sbi, struct omfs_header *header, u64 fsblock) { int is_bad; u64 ino = be64_to_cpu(header->h_self); is_bad = ((ino != fsblock) || (ino < sbi->s_root_ino) || (ino > sbi->s_num_blocks)); if (is_bad) printk(KERN_WARNING "omfs: bad hash chain detected\n"); return is_bad; } static int omfs_fill_chain(struct file *filp, void *dirent, filldir_t filldir, u64 fsblock, int hindex) { struct inode *dir = filp->f_dentry->d_inode; struct buffer_head *bh; struct omfs_inode *oi; u64 self; int res = 0; unsigned char d_type; /* follow chain in this bucket */ while (fsblock != ~0) { bh = omfs_bread(dir->i_sb, fsblock); if (!bh) goto out; oi = (struct omfs_inode *) bh->b_data; if (omfs_is_bad(OMFS_SB(dir->i_sb), &oi->i_head, fsblock)) { brelse(bh); goto out; } self = fsblock; fsblock = be64_to_cpu(oi->i_sibling); /* skip visited nodes */ if (hindex) { hindex--; brelse(bh); continue; } d_type = (oi->i_type == OMFS_DIR) ? DT_DIR : DT_REG; res = filldir(dirent, oi->i_name, strnlen(oi->i_name, OMFS_NAMELEN), filp->f_pos, self, d_type); brelse(bh); if (res < 0) break; filp->f_pos++; } out: return res; } static int omfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct inode *new_inode = new_dentry->d_inode; struct inode *old_inode = old_dentry->d_inode; int err; if (new_inode) { /* overwriting existing file/dir */ err = omfs_remove(new_dir, new_dentry); if (err) goto out; } /* since omfs locates files by name, we need to unlink _before_ * adding the new link or we won't find the old one */ err = omfs_delete_entry(old_dentry); if (err) goto out; mark_inode_dirty(old_dir); err = omfs_add_link(new_dentry, old_inode); if (err) goto out; old_inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(old_inode); out: return err; } static int omfs_readdir(struct file *filp, void *dirent, filldir_t filldir) { struct inode *dir = filp->f_dentry->d_inode; struct buffer_head *bh; loff_t offset, res; unsigned int hchain, hindex; int nbuckets; u64 fsblock; int ret = -EINVAL; if (filp->f_pos >> 32) goto success; switch ((unsigned long) filp->f_pos) { case 0: if (filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR) < 0) goto success; filp->f_pos++; /* fall through */ case 1: if (filldir(dirent, "..", 2, 1, parent_ino(filp->f_dentry), DT_DIR) < 0) goto success; filp->f_pos = 1 << 20; /* fall through */ } nbuckets = (dir->i_size - OMFS_DIR_START) / 8; /* high 12 bits store bucket + 1 and low 20 bits store hash index */ hchain = (filp->f_pos >> 20) - 1; hindex = filp->f_pos & 0xfffff; bh = omfs_bread(dir->i_sb, dir->i_ino); if (!bh) goto out; offset = OMFS_DIR_START + hchain * 8; for (; hchain < nbuckets; hchain++, offset += 8) { fsblock = be64_to_cpu(*((__be64 *) &bh->b_data[offset])); res = omfs_fill_chain(filp, dirent, filldir, fsblock, hindex); hindex = 0; if (res < 0) break; filp->f_pos = (hchain+2) << 20; } brelse(bh); success: ret = 0; out: return ret; } const struct inode_operations omfs_dir_inops = { .lookup = omfs_lookup, .mkdir = omfs_mkdir, .rename = omfs_rename, .create = omfs_create, .unlink = omfs_remove, .rmdir = omfs_remove, }; const struct file_operations omfs_dir_operations = { .read = generic_read_dir, .readdir = omfs_readdir, .llseek = generic_file_llseek, };
gpl-2.0
captivo/linux-captivo
drivers/mtd/nand/r852.c
2362
25269
/* * Copyright © 2009 - Maxim Levitsky * driver for Ricoh xD readers * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/jiffies.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/delay.h> #include <linux/slab.h> #include <asm/byteorder.h> #include <linux/sched.h> #include "sm_common.h" #include "r852.h" static bool r852_enable_dma = 1; module_param(r852_enable_dma, bool, S_IRUGO); MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)"); static int debug; module_param(debug, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug level (0-2)"); /* read register */ static inline uint8_t r852_read_reg(struct r852_device *dev, int address) { uint8_t reg = readb(dev->mmio + address); return reg; } /* write register */ static inline void r852_write_reg(struct r852_device *dev, int address, uint8_t value) { writeb(value, dev->mmio + address); mmiowb(); } /* read dword sized register */ static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address) { uint32_t reg = le32_to_cpu(readl(dev->mmio + address)); return reg; } /* write dword sized register */ static inline void r852_write_reg_dword(struct r852_device *dev, int address, uint32_t value) { writel(cpu_to_le32(value), dev->mmio + address); mmiowb(); } /* returns pointer to our private structure */ static inline struct r852_device *r852_get_dev(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; return chip->priv; } /* check if controller supports dma */ static void r852_dma_test(struct r852_device *dev) { dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) & (R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2); if (!dev->dma_usable) message("Non dma capable device detected, dma disabled"); if (!r852_enable_dma) { message("disabling dma on user request"); dev->dma_usable = 0; } } /* * Enable dma. Enables ether first or second stage of the DMA, * Expects dev->dma_dir and dev->dma_state be set */ static void r852_dma_enable(struct r852_device *dev) { uint8_t dma_reg, dma_irq_reg; /* Set up dma settings */ dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS); dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY); if (dev->dma_dir) dma_reg |= R852_DMA_READ; if (dev->dma_state == DMA_INTERNAL) { dma_reg |= R852_DMA_INTERNAL; /* Precaution to make sure HW doesn't write */ /* to random kernel memory */ r852_write_reg_dword(dev, R852_DMA_ADDR, cpu_to_le32(dev->phys_bounce_buffer)); } else { dma_reg |= R852_DMA_MEMORY; r852_write_reg_dword(dev, R852_DMA_ADDR, cpu_to_le32(dev->phys_dma_addr)); } /* Precaution: make sure write reached the device */ r852_read_reg_dword(dev, R852_DMA_ADDR); r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg); /* Set dma irq */ dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE); r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, dma_irq_reg | R852_DMA_IRQ_INTERNAL | R852_DMA_IRQ_ERROR | R852_DMA_IRQ_MEMORY); } /* * Disable dma, called from the interrupt handler, which specifies * success of the operation via 'error' argument */ static void r852_dma_done(struct r852_device *dev, int error) { WARN_ON(dev->dma_stage == 0); r852_write_reg_dword(dev, R852_DMA_IRQ_STA, r852_read_reg_dword(dev, R852_DMA_IRQ_STA)); r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0); r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0); /* Precaution to make sure HW doesn't write to random kernel memory */ r852_write_reg_dword(dev, R852_DMA_ADDR, cpu_to_le32(dev->phys_bounce_buffer)); r852_read_reg_dword(dev, R852_DMA_ADDR); dev->dma_error = error; dev->dma_stage = 0; if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer) pci_unmap_single(dev->pci_dev, dev->phys_dma_addr, R852_DMA_LEN, dev->dma_dir ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); } /* * Wait, till dma is done, which includes both phases of it */ static int r852_dma_wait(struct r852_device *dev) { long timeout = wait_for_completion_timeout(&dev->dma_done, msecs_to_jiffies(1000)); if (!timeout) { dbg("timeout waiting for DMA interrupt"); return -ETIMEDOUT; } return 0; } /* * Read/Write one page using dma. Only pages can be read (512 bytes) */ static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read) { int bounce = 0; unsigned long flags; int error; dev->dma_error = 0; /* Set dma direction */ dev->dma_dir = do_read; dev->dma_stage = 1; INIT_COMPLETION(dev->dma_done); dbg_verbose("doing dma %s ", do_read ? "read" : "write"); /* Set initial dma state: for reading first fill on board buffer, from device, for writes first fill the buffer from memory*/ dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY; /* if incoming buffer is not page aligned, we should do bounce */ if ((unsigned long)buf & (R852_DMA_LEN-1)) bounce = 1; if (!bounce) { dev->phys_dma_addr = pci_map_single(dev->pci_dev, (void *)buf, R852_DMA_LEN, (do_read ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE)); if (pci_dma_mapping_error(dev->pci_dev, dev->phys_dma_addr)) bounce = 1; } if (bounce) { dbg_verbose("dma: using bounce buffer"); dev->phys_dma_addr = dev->phys_bounce_buffer; if (!do_read) memcpy(dev->bounce_buffer, buf, R852_DMA_LEN); } /* Enable DMA */ spin_lock_irqsave(&dev->irqlock, flags); r852_dma_enable(dev); spin_unlock_irqrestore(&dev->irqlock, flags); /* Wait till complete */ error = r852_dma_wait(dev); if (error) { r852_dma_done(dev, error); return; } if (do_read && bounce) memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN); } /* * Program data lines of the nand chip to send data to it */ void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) { struct r852_device *dev = r852_get_dev(mtd); uint32_t reg; /* Don't allow any access to hardware if we suspect card removal */ if (dev->card_unstable) return; /* Special case for whole sector read */ if (len == R852_DMA_LEN && dev->dma_usable) { r852_do_dma(dev, (uint8_t *)buf, 0); return; } /* write DWORD chinks - faster */ while (len) { reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24; r852_write_reg_dword(dev, R852_DATALINE, reg); buf += 4; len -= 4; } /* write rest */ while (len) r852_write_reg(dev, R852_DATALINE, *buf++); } /* * Read data lines of the nand chip to retrieve data */ void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) { struct r852_device *dev = r852_get_dev(mtd); uint32_t reg; if (dev->card_unstable) { /* since we can't signal error here, at least, return predictable buffer */ memset(buf, 0, len); return; } /* special case for whole sector read */ if (len == R852_DMA_LEN && dev->dma_usable) { r852_do_dma(dev, buf, 1); return; } /* read in dword sized chunks */ while (len >= 4) { reg = r852_read_reg_dword(dev, R852_DATALINE); *buf++ = reg & 0xFF; *buf++ = (reg >> 8) & 0xFF; *buf++ = (reg >> 16) & 0xFF; *buf++ = (reg >> 24) & 0xFF; len -= 4; } /* read the reset by bytes */ while (len--) *buf++ = r852_read_reg(dev, R852_DATALINE); } /* * Read one byte from nand chip */ static uint8_t r852_read_byte(struct mtd_info *mtd) { struct r852_device *dev = r852_get_dev(mtd); /* Same problem as in r852_read_buf.... */ if (dev->card_unstable) return 0; return r852_read_reg(dev, R852_DATALINE); } /* * Control several chip lines & send commands */ void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl) { struct r852_device *dev = r852_get_dev(mtd); if (dev->card_unstable) return; if (ctrl & NAND_CTRL_CHANGE) { dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND | R852_CTL_ON | R852_CTL_CARDENABLE); if (ctrl & NAND_ALE) dev->ctlreg |= R852_CTL_DATA; if (ctrl & NAND_CLE) dev->ctlreg |= R852_CTL_COMMAND; if (ctrl & NAND_NCE) dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON); else dev->ctlreg &= ~R852_CTL_WRITE; /* when write is stareted, enable write access */ if (dat == NAND_CMD_ERASE1) dev->ctlreg |= R852_CTL_WRITE; r852_write_reg(dev, R852_CTL, dev->ctlreg); } /* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need to set write mode */ if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) { dev->ctlreg |= R852_CTL_WRITE; r852_write_reg(dev, R852_CTL, dev->ctlreg); } if (dat != NAND_CMD_NONE) r852_write_reg(dev, R852_DATALINE, dat); } /* * Wait till card is ready. * based on nand_wait, but returns errors on DMA error */ int r852_wait(struct mtd_info *mtd, struct nand_chip *chip) { struct r852_device *dev = chip->priv; unsigned long timeout; int status; timeout = jiffies + (chip->state == FL_ERASING ? msecs_to_jiffies(400) : msecs_to_jiffies(20)); while (time_before(jiffies, timeout)) if (chip->dev_ready(mtd)) break; chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); status = (int)chip->read_byte(mtd); /* Unfortunelly, no way to send detailed error status... */ if (dev->dma_error) { status |= NAND_STATUS_FAIL; dev->dma_error = 0; } return status; } /* * Check if card is ready */ int r852_ready(struct mtd_info *mtd) { struct r852_device *dev = r852_get_dev(mtd); return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY); } /* * Set ECC engine mode */ void r852_ecc_hwctl(struct mtd_info *mtd, int mode) { struct r852_device *dev = r852_get_dev(mtd); if (dev->card_unstable) return; switch (mode) { case NAND_ECC_READ: case NAND_ECC_WRITE: /* enable ecc generation/check*/ dev->ctlreg |= R852_CTL_ECC_ENABLE; /* flush ecc buffer */ r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS); r852_read_reg_dword(dev, R852_DATALINE); r852_write_reg(dev, R852_CTL, dev->ctlreg); return; case NAND_ECC_READSYN: /* disable ecc generation */ dev->ctlreg &= ~R852_CTL_ECC_ENABLE; r852_write_reg(dev, R852_CTL, dev->ctlreg); } } /* * Calculate ECC, only used for writes */ int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat, uint8_t *ecc_code) { struct r852_device *dev = r852_get_dev(mtd); struct sm_oob *oob = (struct sm_oob *)ecc_code; uint32_t ecc1, ecc2; if (dev->card_unstable) return 0; dev->ctlreg &= ~R852_CTL_ECC_ENABLE; r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS); ecc1 = r852_read_reg_dword(dev, R852_DATALINE); ecc2 = r852_read_reg_dword(dev, R852_DATALINE); oob->ecc1[0] = (ecc1) & 0xFF; oob->ecc1[1] = (ecc1 >> 8) & 0xFF; oob->ecc1[2] = (ecc1 >> 16) & 0xFF; oob->ecc2[0] = (ecc2) & 0xFF; oob->ecc2[1] = (ecc2 >> 8) & 0xFF; oob->ecc2[2] = (ecc2 >> 16) & 0xFF; r852_write_reg(dev, R852_CTL, dev->ctlreg); return 0; } /* * Correct the data using ECC, hw did almost everything for us */ int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat, uint8_t *read_ecc, uint8_t *calc_ecc) { uint16_t ecc_reg; uint8_t ecc_status, err_byte; int i, error = 0; struct r852_device *dev = r852_get_dev(mtd); if (dev->card_unstable) return 0; if (dev->dma_error) { dev->dma_error = 0; return -1; } r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS); ecc_reg = r852_read_reg_dword(dev, R852_DATALINE); r852_write_reg(dev, R852_CTL, dev->ctlreg); for (i = 0 ; i <= 1 ; i++) { ecc_status = (ecc_reg >> 8) & 0xFF; /* ecc uncorrectable error */ if (ecc_status & R852_ECC_FAIL) { dbg("ecc: unrecoverable error, in half %d", i); error = -1; goto exit; } /* correctable error */ if (ecc_status & R852_ECC_CORRECTABLE) { err_byte = ecc_reg & 0xFF; dbg("ecc: recoverable error, " "in half %d, byte %d, bit %d", i, err_byte, ecc_status & R852_ECC_ERR_BIT_MSK); dat[err_byte] ^= 1 << (ecc_status & R852_ECC_ERR_BIT_MSK); error++; } dat += 256; ecc_reg >>= 16; } exit: return error; } /* * This is copy of nand_read_oob_std * nand_read_oob_syndrome assumes we can send column address - we can't */ static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip, int page) { chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); return 0; } /* * Start the nand engine */ void r852_engine_enable(struct r852_device *dev) { if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) { r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON); r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED); } else { r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED); r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON); } msleep(300); r852_write_reg(dev, R852_CTL, 0); } /* * Stop the nand engine */ void r852_engine_disable(struct r852_device *dev) { r852_write_reg_dword(dev, R852_HW, 0); r852_write_reg(dev, R852_CTL, R852_CTL_RESET); } /* * Test if card is present */ void r852_card_update_present(struct r852_device *dev) { unsigned long flags; uint8_t reg; spin_lock_irqsave(&dev->irqlock, flags); reg = r852_read_reg(dev, R852_CARD_STA); dev->card_detected = !!(reg & R852_CARD_STA_PRESENT); spin_unlock_irqrestore(&dev->irqlock, flags); } /* * Update card detection IRQ state according to current card state * which is read in r852_card_update_present */ void r852_update_card_detect(struct r852_device *dev) { int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE); dev->card_unstable = 0; card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT); card_detect_reg |= R852_CARD_IRQ_GENABLE; card_detect_reg |= dev->card_detected ? R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT; r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg); } ssize_t r852_media_type_show(struct device *sys_dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev); struct r852_device *dev = r852_get_dev(mtd); char *data = dev->sm ? "smartmedia" : "xd"; strcpy(buf, data); return strlen(data); } DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL); /* Detect properties of card in slot */ void r852_update_media_status(struct r852_device *dev) { uint8_t reg; unsigned long flags; int readonly; spin_lock_irqsave(&dev->irqlock, flags); if (!dev->card_detected) { message("card removed"); spin_unlock_irqrestore(&dev->irqlock, flags); return ; } readonly = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO; reg = r852_read_reg(dev, R852_DMA_CAP); dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT); message("detected %s %s card in slot", dev->sm ? "SmartMedia" : "xD", readonly ? "readonly" : "writeable"); dev->readonly = readonly; spin_unlock_irqrestore(&dev->irqlock, flags); } /* * Register the nand device * Called when the card is detected */ int r852_register_nand_device(struct r852_device *dev) { dev->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL); if (!dev->mtd) goto error1; WARN_ON(dev->card_registred); dev->mtd->owner = THIS_MODULE; dev->mtd->priv = dev->chip; dev->mtd->dev.parent = &dev->pci_dev->dev; if (dev->readonly) dev->chip->options |= NAND_ROM; r852_engine_enable(dev); if (sm_register_device(dev->mtd, dev->sm)) goto error2; if (device_create_file(&dev->mtd->dev, &dev_attr_media_type)) message("can't create media type sysfs attribute"); dev->card_registred = 1; return 0; error2: kfree(dev->mtd); error1: /* Force card redetect */ dev->card_detected = 0; return -1; } /* * Unregister the card */ void r852_unregister_nand_device(struct r852_device *dev) { if (!dev->card_registred) return; device_remove_file(&dev->mtd->dev, &dev_attr_media_type); nand_release(dev->mtd); r852_engine_disable(dev); dev->card_registred = 0; kfree(dev->mtd); dev->mtd = NULL; } /* Card state updater */ void r852_card_detect_work(struct work_struct *work) { struct r852_device *dev = container_of(work, struct r852_device, card_detect_work.work); r852_card_update_present(dev); r852_update_card_detect(dev); dev->card_unstable = 0; /* False alarm */ if (dev->card_detected == dev->card_registred) goto exit; /* Read media properties */ r852_update_media_status(dev); /* Register the card */ if (dev->card_detected) r852_register_nand_device(dev); else r852_unregister_nand_device(dev); exit: r852_update_card_detect(dev); } /* Ack + disable IRQ generation */ static void r852_disable_irqs(struct r852_device *dev) { uint8_t reg; reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE); r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK); reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE); r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, reg & ~R852_DMA_IRQ_MASK); r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK); r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK); } /* Interrupt handler */ static irqreturn_t r852_irq(int irq, void *data) { struct r852_device *dev = (struct r852_device *)data; uint8_t card_status, dma_status; unsigned long flags; irqreturn_t ret = IRQ_NONE; spin_lock_irqsave(&dev->irqlock, flags); /* handle card detection interrupts first */ card_status = r852_read_reg(dev, R852_CARD_IRQ_STA); r852_write_reg(dev, R852_CARD_IRQ_STA, card_status); if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) { ret = IRQ_HANDLED; dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT); /* we shouldn't receive any interrupts if we wait for card to settle */ WARN_ON(dev->card_unstable); /* disable irqs while card is unstable */ /* this will timeout DMA if active, but better that garbage */ r852_disable_irqs(dev); if (dev->card_unstable) goto out; /* let, card state to settle a bit, and then do the work */ dev->card_unstable = 1; queue_delayed_work(dev->card_workqueue, &dev->card_detect_work, msecs_to_jiffies(100)); goto out; } /* Handle dma interrupts */ dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA); r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status); if (dma_status & R852_DMA_IRQ_MASK) { ret = IRQ_HANDLED; if (dma_status & R852_DMA_IRQ_ERROR) { dbg("received dma error IRQ"); r852_dma_done(dev, -EIO); complete(&dev->dma_done); goto out; } /* received DMA interrupt out of nowhere? */ WARN_ON_ONCE(dev->dma_stage == 0); if (dev->dma_stage == 0) goto out; /* done device access */ if (dev->dma_state == DMA_INTERNAL && (dma_status & R852_DMA_IRQ_INTERNAL)) { dev->dma_state = DMA_MEMORY; dev->dma_stage++; } /* done memory DMA */ if (dev->dma_state == DMA_MEMORY && (dma_status & R852_DMA_IRQ_MEMORY)) { dev->dma_state = DMA_INTERNAL; dev->dma_stage++; } /* Enable 2nd half of dma dance */ if (dev->dma_stage == 2) r852_dma_enable(dev); /* Operation done */ if (dev->dma_stage == 3) { r852_dma_done(dev, 0); complete(&dev->dma_done); } goto out; } /* Handle unknown interrupts */ if (dma_status) dbg("bad dma IRQ status = %x", dma_status); if (card_status & ~R852_CARD_STA_CD) dbg("strange card status = %x", card_status); out: spin_unlock_irqrestore(&dev->irqlock, flags); return ret; } int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { int error; struct nand_chip *chip; struct r852_device *dev; /* pci initialization */ error = pci_enable_device(pci_dev); if (error) goto error1; pci_set_master(pci_dev); error = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); if (error) goto error2; error = pci_request_regions(pci_dev, DRV_NAME); if (error) goto error3; error = -ENOMEM; /* init nand chip, but register it only on card insert */ chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL); if (!chip) goto error4; /* commands */ chip->cmd_ctrl = r852_cmdctl; chip->waitfunc = r852_wait; chip->dev_ready = r852_ready; /* I/O */ chip->read_byte = r852_read_byte; chip->read_buf = r852_read_buf; chip->write_buf = r852_write_buf; /* ecc */ chip->ecc.mode = NAND_ECC_HW_SYNDROME; chip->ecc.size = R852_DMA_LEN; chip->ecc.bytes = SM_OOB_SIZE; chip->ecc.strength = 2; chip->ecc.hwctl = r852_ecc_hwctl; chip->ecc.calculate = r852_ecc_calculate; chip->ecc.correct = r852_ecc_correct; /* TODO: hack */ chip->ecc.read_oob = r852_read_oob; /* init our device structure */ dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL); if (!dev) goto error5; chip->priv = dev; dev->chip = chip; dev->pci_dev = pci_dev; pci_set_drvdata(pci_dev, dev); dev->bounce_buffer = pci_alloc_consistent(pci_dev, R852_DMA_LEN, &dev->phys_bounce_buffer); if (!dev->bounce_buffer) goto error6; error = -ENODEV; dev->mmio = pci_ioremap_bar(pci_dev, 0); if (!dev->mmio) goto error7; error = -ENOMEM; dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL); if (!dev->tmp_buffer) goto error8; init_completion(&dev->dma_done); dev->card_workqueue = create_freezable_workqueue(DRV_NAME); if (!dev->card_workqueue) goto error9; INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work); /* shutdown everything - precation */ r852_engine_disable(dev); r852_disable_irqs(dev); r852_dma_test(dev); dev->irq = pci_dev->irq; spin_lock_init(&dev->irqlock); dev->card_detected = 0; r852_card_update_present(dev); /*register irq handler*/ error = -ENODEV; if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED, DRV_NAME, dev)) goto error10; /* kick initial present test */ queue_delayed_work(dev->card_workqueue, &dev->card_detect_work, 0); printk(KERN_NOTICE DRV_NAME ": driver loaded successfully\n"); return 0; error10: destroy_workqueue(dev->card_workqueue); error9: kfree(dev->tmp_buffer); error8: pci_iounmap(pci_dev, dev->mmio); error7: pci_free_consistent(pci_dev, R852_DMA_LEN, dev->bounce_buffer, dev->phys_bounce_buffer); error6: kfree(dev); error5: kfree(chip); error4: pci_release_regions(pci_dev); error3: error2: pci_disable_device(pci_dev); error1: return error; } void r852_remove(struct pci_dev *pci_dev) { struct r852_device *dev = pci_get_drvdata(pci_dev); /* Stop detect workqueue - we are going to unregister the device anyway*/ cancel_delayed_work_sync(&dev->card_detect_work); destroy_workqueue(dev->card_workqueue); /* Unregister the device, this might make more IO */ r852_unregister_nand_device(dev); /* Stop interrupts */ r852_disable_irqs(dev); synchronize_irq(dev->irq); free_irq(dev->irq, dev); /* Cleanup */ kfree(dev->tmp_buffer); pci_iounmap(pci_dev, dev->mmio); pci_free_consistent(pci_dev, R852_DMA_LEN, dev->bounce_buffer, dev->phys_bounce_buffer); kfree(dev->chip); kfree(dev); /* Shutdown the PCI device */ pci_release_regions(pci_dev); pci_disable_device(pci_dev); } void r852_shutdown(struct pci_dev *pci_dev) { struct r852_device *dev = pci_get_drvdata(pci_dev); cancel_delayed_work_sync(&dev->card_detect_work); r852_disable_irqs(dev); synchronize_irq(dev->irq); pci_disable_device(pci_dev); } #ifdef CONFIG_PM static int r852_suspend(struct device *device) { struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); if (dev->ctlreg & R852_CTL_CARDENABLE) return -EBUSY; /* First make sure the detect work is gone */ cancel_delayed_work_sync(&dev->card_detect_work); /* Turn off the interrupts and stop the device */ r852_disable_irqs(dev); r852_engine_disable(dev); /* If card was pulled off just during the suspend, which is very unlikely, we will remove it on resume, it too late now anyway... */ dev->card_unstable = 0; return 0; } static int r852_resume(struct device *device) { struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); r852_disable_irqs(dev); r852_card_update_present(dev); r852_engine_disable(dev); /* If card status changed, just do the work */ if (dev->card_detected != dev->card_registred) { dbg("card was %s during low power state", dev->card_detected ? "added" : "removed"); queue_delayed_work(dev->card_workqueue, &dev->card_detect_work, msecs_to_jiffies(1000)); return 0; } /* Otherwise, initialize the card */ if (dev->card_registred) { r852_engine_enable(dev); dev->chip->select_chip(dev->mtd, 0); dev->chip->cmdfunc(dev->mtd, NAND_CMD_RESET, -1, -1); dev->chip->select_chip(dev->mtd, -1); } /* Program card detection IRQ */ r852_update_card_detect(dev); return 0; } #else #define r852_suspend NULL #define r852_resume NULL #endif static const struct pci_device_id r852_pci_id_tbl[] = { { PCI_VDEVICE(RICOH, 0x0852), }, { }, }; MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl); static SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume); static struct pci_driver r852_pci_driver = { .name = DRV_NAME, .id_table = r852_pci_id_tbl, .probe = r852_probe, .remove = r852_remove, .shutdown = r852_shutdown, .driver.pm = &r852_pm_ops, }; module_pci_driver(r852_pci_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>"); MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver");
gpl-2.0
hzpeterchen/linux-usb
drivers/video/bfin_adv7393fb.c
2618
20278
/* * Frame buffer driver for ADV7393/2 video encoder * * Copyright 2006-2009 Analog Devices Inc. * Licensed under the GPL-2 or late. */ /* * TODO: Remove Globals * TODO: Code Cleanup */ #define pr_fmt(fmt) DRIVER_NAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/tty.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <asm/blackfin.h> #include <asm/irq.h> #include <asm/dma.h> #include <linux/uaccess.h> #include <linux/gpio.h> #include <asm/portmux.h> #include <linux/dma-mapping.h> #include <linux/proc_fs.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include "bfin_adv7393fb.h" static int mode = VMODE; static int mem = VMEM; static int nocursor = 1; static const unsigned short ppi_pins[] = { P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2, P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, P_PPI0_D3, P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7, P_PPI0_D8, P_PPI0_D9, P_PPI0_D10, P_PPI0_D11, P_PPI0_D12, P_PPI0_D13, P_PPI0_D14, P_PPI0_D15, 0 }; /* * card parameters */ static struct bfin_adv7393_fb_par { /* structure holding blackfin / adv7393 parameters when screen is blanked */ struct { u8 Mode; /* ntsc/pal/? */ } vga_state; atomic_t ref_count; } bfin_par; /* --------------------------------------------------------------------- */ static struct fb_var_screeninfo bfin_adv7393_fb_defined = { .xres = 720, .yres = 480, .xres_virtual = 720, .yres_virtual = 480, .bits_per_pixel = 16, .activate = FB_ACTIVATE_TEST, .height = -1, .width = -1, .left_margin = 0, .right_margin = 0, .upper_margin = 0, .lower_margin = 0, .vmode = FB_VMODE_INTERLACED, .red = {11, 5, 0}, .green = {5, 6, 0}, .blue = {0, 5, 0}, .transp = {0, 0, 0}, }; static struct fb_fix_screeninfo bfin_adv7393_fb_fix = { .id = "BFIN ADV7393", .smem_len = 720 * 480 * 2, .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, .xpanstep = 0, .ypanstep = 0, .line_length = 720 * 2, .accel = FB_ACCEL_NONE }; static struct fb_ops bfin_adv7393_fb_ops = { .owner = THIS_MODULE, .fb_open = bfin_adv7393_fb_open, .fb_release = bfin_adv7393_fb_release, .fb_check_var = bfin_adv7393_fb_check_var, .fb_pan_display = bfin_adv7393_fb_pan_display, .fb_blank = bfin_adv7393_fb_blank, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_cursor = bfin_adv7393_fb_cursor, .fb_setcolreg = bfin_adv7393_fb_setcolreg, }; static int dma_desc_list(struct adv7393fb_device *fbdev, u16 arg) { if (arg == BUILD) { /* Build */ fbdev->vb1 = l1_data_sram_zalloc(sizeof(struct dmasg)); if (fbdev->vb1 == NULL) goto error; fbdev->av1 = l1_data_sram_zalloc(sizeof(struct dmasg)); if (fbdev->av1 == NULL) goto error; fbdev->vb2 = l1_data_sram_zalloc(sizeof(struct dmasg)); if (fbdev->vb2 == NULL) goto error; fbdev->av2 = l1_data_sram_zalloc(sizeof(struct dmasg)); if (fbdev->av2 == NULL) goto error; /* Build linked DMA descriptor list */ fbdev->vb1->next_desc_addr = fbdev->av1; fbdev->av1->next_desc_addr = fbdev->vb2; fbdev->vb2->next_desc_addr = fbdev->av2; fbdev->av2->next_desc_addr = fbdev->vb1; /* Save list head */ fbdev->descriptor_list_head = fbdev->av2; /* Vertical Blanking Field 1 */ fbdev->vb1->start_addr = VB_DUMMY_MEMORY_SOURCE; fbdev->vb1->cfg = DMA_CFG_VAL; fbdev->vb1->x_count = fbdev->modes[mode].xres + fbdev->modes[mode].boeft_blank; fbdev->vb1->x_modify = 0; fbdev->vb1->y_count = fbdev->modes[mode].vb1_lines; fbdev->vb1->y_modify = 0; /* Active Video Field 1 */ fbdev->av1->start_addr = (unsigned long)fbdev->fb_mem; fbdev->av1->cfg = DMA_CFG_VAL; fbdev->av1->x_count = fbdev->modes[mode].xres + fbdev->modes[mode].boeft_blank; fbdev->av1->x_modify = fbdev->modes[mode].bpp / 8; fbdev->av1->y_count = fbdev->modes[mode].a_lines; fbdev->av1->y_modify = (fbdev->modes[mode].xres - fbdev->modes[mode].boeft_blank + 1) * (fbdev->modes[mode].bpp / 8); /* Vertical Blanking Field 2 */ fbdev->vb2->start_addr = VB_DUMMY_MEMORY_SOURCE; fbdev->vb2->cfg = DMA_CFG_VAL; fbdev->vb2->x_count = fbdev->modes[mode].xres + fbdev->modes[mode].boeft_blank; fbdev->vb2->x_modify = 0; fbdev->vb2->y_count = fbdev->modes[mode].vb2_lines; fbdev->vb2->y_modify = 0; /* Active Video Field 2 */ fbdev->av2->start_addr = (unsigned long)fbdev->fb_mem + fbdev->line_len; fbdev->av2->cfg = DMA_CFG_VAL; fbdev->av2->x_count = fbdev->modes[mode].xres + fbdev->modes[mode].boeft_blank; fbdev->av2->x_modify = (fbdev->modes[mode].bpp / 8); fbdev->av2->y_count = fbdev->modes[mode].a_lines; fbdev->av2->y_modify = (fbdev->modes[mode].xres - fbdev->modes[mode].boeft_blank + 1) * (fbdev->modes[mode].bpp / 8); return 1; } error: l1_data_sram_free(fbdev->vb1); l1_data_sram_free(fbdev->av1); l1_data_sram_free(fbdev->vb2); l1_data_sram_free(fbdev->av2); return 0; } static int bfin_config_dma(struct adv7393fb_device *fbdev) { BUG_ON(!(fbdev->fb_mem)); set_dma_x_count(CH_PPI, fbdev->descriptor_list_head->x_count); set_dma_x_modify(CH_PPI, fbdev->descriptor_list_head->x_modify); set_dma_y_count(CH_PPI, fbdev->descriptor_list_head->y_count); set_dma_y_modify(CH_PPI, fbdev->descriptor_list_head->y_modify); set_dma_start_addr(CH_PPI, fbdev->descriptor_list_head->start_addr); set_dma_next_desc_addr(CH_PPI, fbdev->descriptor_list_head->next_desc_addr); set_dma_config(CH_PPI, fbdev->descriptor_list_head->cfg); return 1; } static void bfin_disable_dma(void) { bfin_write_DMA0_CONFIG(bfin_read_DMA0_CONFIG() & ~DMAEN); } static void bfin_config_ppi(struct adv7393fb_device *fbdev) { if (ANOMALY_05000183) { bfin_write_TIMER2_CONFIG(WDTH_CAP); bfin_write_TIMER_ENABLE(TIMEN2); } bfin_write_PPI_CONTROL(0x381E); bfin_write_PPI_FRAME(fbdev->modes[mode].tot_lines); bfin_write_PPI_COUNT(fbdev->modes[mode].xres + fbdev->modes[mode].boeft_blank - 1); bfin_write_PPI_DELAY(fbdev->modes[mode].aoeft_blank - 1); } static void bfin_enable_ppi(void) { bfin_write_PPI_CONTROL(bfin_read_PPI_CONTROL() | PORT_EN); } static void bfin_disable_ppi(void) { bfin_write_PPI_CONTROL(bfin_read_PPI_CONTROL() & ~PORT_EN); } static inline int adv7393_write(struct i2c_client *client, u8 reg, u8 value) { return i2c_smbus_write_byte_data(client, reg, value); } static inline int adv7393_read(struct i2c_client *client, u8 reg) { return i2c_smbus_read_byte_data(client, reg); } static int adv7393_write_block(struct i2c_client *client, const u8 *data, unsigned int len) { int ret = -1; u8 reg; while (len >= 2) { reg = *data++; ret = adv7393_write(client, reg, *data++); if (ret < 0) break; len -= 2; } return ret; } static int adv7393_mode(struct i2c_client *client, u16 mode) { switch (mode) { case POWER_ON: /* ADV7393 Sleep mode OFF */ adv7393_write(client, 0x00, 0x1E); break; case POWER_DOWN: /* ADV7393 Sleep mode ON */ adv7393_write(client, 0x00, 0x1F); break; case BLANK_OFF: /* Pixel Data Valid */ adv7393_write(client, 0x82, 0xCB); break; case BLANK_ON: /* Pixel Data Invalid */ adv7393_write(client, 0x82, 0x8B); break; default: return -EINVAL; break; } return 0; } static irqreturn_t ppi_irq_error(int irq, void *dev_id) { struct adv7393fb_device *fbdev = (struct adv7393fb_device *)dev_id; u16 status = bfin_read_PPI_STATUS(); pr_debug("%s: PPI Status = 0x%X\n", __func__, status); if (status) { bfin_disable_dma(); /* TODO: Check Sequence */ bfin_disable_ppi(); bfin_clear_PPI_STATUS(); bfin_config_dma(fbdev); bfin_enable_ppi(); } return IRQ_HANDLED; } static int proc_output(char *buf) { char *p = buf; p += sprintf(p, "Usage:\n" "echo 0x[REG][Value] > adv7393\n" "example: echo 0x1234 >adv7393\n" "writes 0x34 into Register 0x12\n"); return p - buf; } static ssize_t adv7393_read_proc(struct file *file, char __user *buf, size_t size, loff_t *ppos) { static const char message[] = "Usage:\n" "echo 0x[REG][Value] > adv7393\n" "example: echo 0x1234 >adv7393\n" "writes 0x34 into Register 0x12\n"; return simple_read_from_buffer(buf, size, ppos, message, sizeof(message)); } static ssize_t adv7393_write_proc(struct file *file, const char __user * buffer, size_t count, loff_t *ppos) { struct adv7393fb_device *fbdev = PDE_DATA(file_inode(file)); unsigned int val; int ret; ret = kstrtouint_from_user(buffer, count, 0, &val); if (ret) return -EFAULT; adv7393_write(fbdev->client, val >> 8, val & 0xff); return count; } static const struct file_operations fops = { .read = adv7393_read_proc, .write = adv7393_write_proc, .llseek = default_llseek, }; static int bfin_adv7393_fb_probe(struct i2c_client *client, const struct i2c_device_id *id) { int ret = 0; struct proc_dir_entry *entry; int num_modes = ARRAY_SIZE(known_modes); struct adv7393fb_device *fbdev = NULL; if (mem > 2) { dev_err(&client->dev, "mem out of allowed range [1;2]\n"); return -EINVAL; } if (mode > num_modes) { dev_err(&client->dev, "mode %d: not supported", mode); return -EFAULT; } fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); if (!fbdev) { dev_err(&client->dev, "failed to allocate device private record"); return -ENOMEM; } i2c_set_clientdata(client, fbdev); fbdev->modes = known_modes; fbdev->client = client; fbdev->fb_len = mem * fbdev->modes[mode].xres * fbdev->modes[mode].xres * (fbdev->modes[mode].bpp / 8); fbdev->line_len = fbdev->modes[mode].xres * (fbdev->modes[mode].bpp / 8); /* Workaround "PPI Does Not Start Properly In Specific Mode" */ if (ANOMALY_05000400) { ret = gpio_request_one(P_IDENT(P_PPI0_FS3), GPIOF_OUT_INIT_LOW, "PPI0_FS3") if (ret) { dev_err(&client->dev, "PPI0_FS3 GPIO request failed\n"); ret = -EBUSY; goto free_fbdev; } } if (peripheral_request_list(ppi_pins, DRIVER_NAME)) { dev_err(&client->dev, "requesting PPI peripheral failed\n"); ret = -EFAULT; goto free_gpio; } fbdev->fb_mem = dma_alloc_coherent(NULL, fbdev->fb_len, &fbdev->dma_handle, GFP_KERNEL); if (NULL == fbdev->fb_mem) { dev_err(&client->dev, "couldn't allocate dma buffer (%d bytes)\n", (u32) fbdev->fb_len); ret = -ENOMEM; goto free_ppi_pins; } fbdev->info.screen_base = (void *)fbdev->fb_mem; bfin_adv7393_fb_fix.smem_start = (int)fbdev->fb_mem; bfin_adv7393_fb_fix.smem_len = fbdev->fb_len; bfin_adv7393_fb_fix.line_length = fbdev->line_len; if (mem > 1) bfin_adv7393_fb_fix.ypanstep = 1; bfin_adv7393_fb_defined.red.length = 5; bfin_adv7393_fb_defined.green.length = 6; bfin_adv7393_fb_defined.blue.length = 5; bfin_adv7393_fb_defined.xres = fbdev->modes[mode].xres; bfin_adv7393_fb_defined.yres = fbdev->modes[mode].yres; bfin_adv7393_fb_defined.xres_virtual = fbdev->modes[mode].xres; bfin_adv7393_fb_defined.yres_virtual = mem * fbdev->modes[mode].yres; bfin_adv7393_fb_defined.bits_per_pixel = fbdev->modes[mode].bpp; fbdev->info.fbops = &bfin_adv7393_fb_ops; fbdev->info.var = bfin_adv7393_fb_defined; fbdev->info.fix = bfin_adv7393_fb_fix; fbdev->info.par = &bfin_par; fbdev->info.flags = FBINFO_DEFAULT; fbdev->info.pseudo_palette = kzalloc(sizeof(u32) * 16, GFP_KERNEL); if (!fbdev->info.pseudo_palette) { dev_err(&client->dev, "failed to allocate pseudo_palette\n"); ret = -ENOMEM; goto free_fb_mem; } if (fb_alloc_cmap(&fbdev->info.cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0) < 0) { dev_err(&client->dev, "failed to allocate colormap (%d entries)\n", BFIN_LCD_NBR_PALETTE_ENTRIES); ret = -EFAULT; goto free_palette; } if (request_dma(CH_PPI, "BF5xx_PPI_DMA") < 0) { dev_err(&client->dev, "unable to request PPI DMA\n"); ret = -EFAULT; goto free_cmap; } if (request_irq(IRQ_PPI_ERROR, ppi_irq_error, 0, "PPI ERROR", fbdev) < 0) { dev_err(&client->dev, "unable to request PPI ERROR IRQ\n"); ret = -EFAULT; goto free_ch_ppi; } fbdev->open = 0; ret = adv7393_write_block(client, fbdev->modes[mode].adv7393_i2c_initd, fbdev->modes[mode].adv7393_i2c_initd_len); if (ret) { dev_err(&client->dev, "i2c attach: init error\n"); goto free_irq_ppi; } if (register_framebuffer(&fbdev->info) < 0) { dev_err(&client->dev, "unable to register framebuffer\n"); ret = -EFAULT; goto free_irq_ppi; } dev_info(&client->dev, "fb%d: %s frame buffer device\n", fbdev->info.node, fbdev->info.fix.id); dev_info(&client->dev, "fb memory address : 0x%p\n", fbdev->fb_mem); entry = proc_create_data("driver/adv7393", 0, NULL, &fops, fbdev); if (!entry) { dev_err(&client->dev, "unable to create /proc entry\n"); ret = -EFAULT; goto free_fb; } return 0; free_fb: unregister_framebuffer(&fbdev->info); free_irq_ppi: free_irq(IRQ_PPI_ERROR, fbdev); free_ch_ppi: free_dma(CH_PPI); free_cmap: fb_dealloc_cmap(&fbdev->info.cmap); free_palette: kfree(fbdev->info.pseudo_palette); free_fb_mem: dma_free_coherent(NULL, fbdev->fb_len, fbdev->fb_mem, fbdev->dma_handle); free_ppi_pins: peripheral_free_list(ppi_pins); free_gpio: if (ANOMALY_05000400) gpio_free(P_IDENT(P_PPI0_FS3)); free_fbdev: kfree(fbdev); return ret; } static int bfin_adv7393_fb_open(struct fb_info *info, int user) { struct adv7393fb_device *fbdev = to_adv7393fb_device(info); fbdev->info.screen_base = (void *)fbdev->fb_mem; if (!fbdev->info.screen_base) { dev_err(&fbdev->client->dev, "unable to map device\n"); return -ENOMEM; } fbdev->open = 1; dma_desc_list(fbdev, BUILD); adv7393_mode(fbdev->client, BLANK_OFF); bfin_config_ppi(fbdev); bfin_config_dma(fbdev); bfin_enable_ppi(); return 0; } static int bfin_adv7393_fb_release(struct fb_info *info, int user) { struct adv7393fb_device *fbdev = to_adv7393fb_device(info); adv7393_mode(fbdev->client, BLANK_ON); bfin_disable_dma(); bfin_disable_ppi(); dma_desc_list(fbdev, DESTRUCT); fbdev->open = 0; return 0; } static int bfin_adv7393_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { switch (var->bits_per_pixel) { case 16:/* DIRECTCOLOUR, 64k */ var->red.offset = info->var.red.offset; var->green.offset = info->var.green.offset; var->blue.offset = info->var.blue.offset; var->red.length = info->var.red.length; var->green.length = info->var.green.length; var->blue.length = info->var.blue.length; var->transp.offset = 0; var->transp.length = 0; var->transp.msb_right = 0; var->red.msb_right = 0; var->green.msb_right = 0; var->blue.msb_right = 0; break; default: pr_debug("%s: depth not supported: %u BPP\n", __func__, var->bits_per_pixel); return -EINVAL; } if (info->var.xres != var->xres || info->var.yres != var->yres || info->var.xres_virtual != var->xres_virtual || info->var.yres_virtual != var->yres_virtual) { pr_debug("%s: Resolution not supported: X%u x Y%u\n", __func__, var->xres, var->yres); return -EINVAL; } /* * Memory limit */ if ((info->fix.line_length * var->yres_virtual) > info->fix.smem_len) { pr_debug("%s: Memory Limit requested yres_virtual = %u\n", __func__, var->yres_virtual); return -ENOMEM; } return 0; } static int bfin_adv7393_fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { int dy; u32 dmaaddr; struct adv7393fb_device *fbdev = to_adv7393fb_device(info); if (!var || !info) return -EINVAL; if (var->xoffset - info->var.xoffset) { /* No support for X panning for now! */ return -EINVAL; } dy = var->yoffset - info->var.yoffset; if (dy) { pr_debug("%s: Panning screen of %d lines\n", __func__, dy); dmaaddr = fbdev->av1->start_addr; dmaaddr += (info->fix.line_length * dy); /* TODO: Wait for current frame to finished */ fbdev->av1->start_addr = (unsigned long)dmaaddr; fbdev->av2->start_addr = (unsigned long)dmaaddr + fbdev->line_len; } return 0; } /* 0 unblank, 1 blank, 2 no vsync, 3 no hsync, 4 off */ static int bfin_adv7393_fb_blank(int blank, struct fb_info *info) { struct adv7393fb_device *fbdev = to_adv7393fb_device(info); switch (blank) { case VESA_NO_BLANKING: /* Turn on panel */ adv7393_mode(fbdev->client, BLANK_OFF); break; case VESA_VSYNC_SUSPEND: case VESA_HSYNC_SUSPEND: case VESA_POWERDOWN: /* Turn off panel */ adv7393_mode(fbdev->client, BLANK_ON); break; default: return -EINVAL; break; } return 0; } int bfin_adv7393_fb_cursor(struct fb_info *info, struct fb_cursor *cursor) { if (nocursor) return 0; else return -EINVAL; /* just to force soft_cursor() call */ } static int bfin_adv7393_fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { if (regno >= BFIN_LCD_NBR_PALETTE_ENTRIES) return -EINVAL; if (info->var.grayscale) /* grayscale = 0.30*R + 0.59*G + 0.11*B */ red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8; if (info->fix.visual == FB_VISUAL_TRUECOLOR) { u32 value; /* Place color in the pseudopalette */ if (regno > 16) return -EINVAL; red >>= (16 - info->var.red.length); green >>= (16 - info->var.green.length); blue >>= (16 - info->var.blue.length); value = (red << info->var.red.offset) | (green << info->var.green.offset)| (blue << info->var.blue.offset); value &= 0xFFFF; ((u32 *) (info->pseudo_palette))[regno] = value; } return 0; } static int bfin_adv7393_fb_remove(struct i2c_client *client) { struct adv7393fb_device *fbdev = i2c_get_clientdata(client); adv7393_mode(client, POWER_DOWN); if (fbdev->fb_mem) dma_free_coherent(NULL, fbdev->fb_len, fbdev->fb_mem, fbdev->dma_handle); free_dma(CH_PPI); free_irq(IRQ_PPI_ERROR, fbdev); unregister_framebuffer(&fbdev->info); remove_proc_entry("driver/adv7393", NULL); fb_dealloc_cmap(&fbdev->info.cmap); kfree(fbdev->info.pseudo_palette); if (ANOMALY_05000400) gpio_free(P_IDENT(P_PPI0_FS3)); /* FS3 */ peripheral_free_list(ppi_pins); kfree(fbdev); return 0; } #ifdef CONFIG_PM static int bfin_adv7393_fb_suspend(struct device *dev) { struct adv7393fb_device *fbdev = dev_get_drvdata(dev); if (fbdev->open) { bfin_disable_dma(); bfin_disable_ppi(); dma_desc_list(fbdev, DESTRUCT); } adv7393_mode(fbdev->client, POWER_DOWN); return 0; } static int bfin_adv7393_fb_resume(struct device *dev) { struct adv7393fb_device *fbdev = dev_get_drvdata(dev); adv7393_mode(fbdev->client, POWER_ON); if (fbdev->open) { dma_desc_list(fbdev, BUILD); bfin_config_ppi(fbdev); bfin_config_dma(fbdev); bfin_enable_ppi(); } return 0; } static const struct dev_pm_ops bfin_adv7393_dev_pm_ops = { .suspend = bfin_adv7393_fb_suspend, .resume = bfin_adv7393_fb_resume, }; #endif static const struct i2c_device_id bfin_adv7393_id[] = { {DRIVER_NAME, 0}, {} }; MODULE_DEVICE_TABLE(i2c, bfin_adv7393_id); static struct i2c_driver bfin_adv7393_fb_driver = { .driver = { .name = DRIVER_NAME, #ifdef CONFIG_PM .pm = &bfin_adv7393_dev_pm_ops, #endif }, .probe = bfin_adv7393_fb_probe, .remove = bfin_adv7393_fb_remove, .id_table = bfin_adv7393_id, }; static int __init bfin_adv7393_fb_driver_init(void) { #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) request_module("i2c-bfin-twi"); #else request_module("i2c-gpio"); #endif return i2c_add_driver(&bfin_adv7393_fb_driver); } module_init(bfin_adv7393_fb_driver_init); static void __exit bfin_adv7393_fb_driver_cleanup(void) { i2c_del_driver(&bfin_adv7393_fb_driver); } module_exit(bfin_adv7393_fb_driver_cleanup); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("Frame buffer driver for ADV7393/2 Video Encoder"); module_param(mode, int, 0); MODULE_PARM_DESC(mode, "Video Mode (0=NTSC,1=PAL,2=NTSC 640x480,3=PAL 640x480,4=NTSC YCbCr input,5=PAL YCbCr input)"); module_param(mem, int, 0); MODULE_PARM_DESC(mem, "Size of frame buffer memory 1=Single 2=Double Size (allows y-panning / frame stacking)"); module_param(nocursor, int, 0644); MODULE_PARM_DESC(nocursor, "cursor enable/disable");
gpl-2.0
padovan/bluetooth-next
drivers/staging/vt6656/int.c
3386
6368
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * * File: int.c * * Purpose: Handle USB interrupt endpoint * * Author: Jerry Chen * * Date: Apr. 2, 2004 * * Functions: * * Revision History: * 04-02-2004 Jerry Chen: Initial release * */ #include "int.h" #include "mib.h" #include "tmacro.h" #include "mac.h" #include "power.h" #include "bssdb.h" #include "usbpipe.h" /*--------------------- Static Definitions -------------------------*/ /* static int msglevel = MSG_LEVEL_DEBUG; */ static int msglevel = MSG_LEVEL_INFO; /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ /*--------------------- Static Functions --------------------------*/ /*--------------------- Export Variables --------------------------*/ /*--------------------- Export Functions --------------------------*/ /*+ * * Function: InterruptPollingThread * * Synopsis: Thread running at IRQL PASSIVE_LEVEL. * * Arguments: Device Extension * * Returns: * * Algorithm: Call USBD for input data; * * History: dd-mm-yyyy Author Comment * * * Notes: * * USB reads are by nature 'Blocking', and when in a read, the device looks * like it's in a 'stall' condition, so we deliberately time out every second * if we've gotten no data * -*/ void INTvWorkItem(void *Context) { PSDevice pDevice = (PSDevice) Context; int ntStatus; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Interrupt Polling Thread\n"); spin_lock_irq(&pDevice->lock); if (pDevice->fKillEventPollingThread != TRUE) ntStatus = PIPEnsInterruptRead(pDevice); spin_unlock_irq(&pDevice->lock); } int INTnsProcessData(PSDevice pDevice) { int status = STATUS_SUCCESS; PSINTData pINTData; PSMgmtObject pMgmt = &(pDevice->sMgmtObj); struct net_device_stats *pStats = &pDevice->stats; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsInterruptProcessData\n"); pINTData = (PSINTData) pDevice->intBuf.pDataBuf; if (pINTData->byTSR0 & TSR_VALID) { STAvUpdateTDStatCounter(&(pDevice->scStatistic), (BYTE) (pINTData->byPkt0 & 0x0F), (BYTE) (pINTData->byPkt0>>4), pINTData->byTSR0); BSSvUpdateNodeTxCounter(pDevice, &(pDevice->scStatistic), pINTData->byTSR0, pINTData->byPkt0); /*DBG_PRN_GRP01(("TSR0 %02x\n", pINTData->byTSR0));*/ } if (pINTData->byTSR1 & TSR_VALID) { STAvUpdateTDStatCounter(&(pDevice->scStatistic), (BYTE) (pINTData->byPkt1 & 0x0F), (BYTE) (pINTData->byPkt1>>4), pINTData->byTSR1); BSSvUpdateNodeTxCounter(pDevice, &(pDevice->scStatistic), pINTData->byTSR1, pINTData->byPkt1); /*DBG_PRN_GRP01(("TSR1 %02x\n", pINTData->byTSR1));*/ } if (pINTData->byTSR2 & TSR_VALID) { STAvUpdateTDStatCounter(&(pDevice->scStatistic), (BYTE) (pINTData->byPkt2 & 0x0F), (BYTE) (pINTData->byPkt2>>4), pINTData->byTSR2); BSSvUpdateNodeTxCounter(pDevice, &(pDevice->scStatistic), pINTData->byTSR2, pINTData->byPkt2); /*DBG_PRN_GRP01(("TSR2 %02x\n", pINTData->byTSR2));*/ } if (pINTData->byTSR3 & TSR_VALID) { STAvUpdateTDStatCounter(&(pDevice->scStatistic), (BYTE) (pINTData->byPkt3 & 0x0F), (BYTE) (pINTData->byPkt3>>4), pINTData->byTSR3); BSSvUpdateNodeTxCounter(pDevice, &(pDevice->scStatistic), pINTData->byTSR3, pINTData->byPkt3); /*DBG_PRN_GRP01(("TSR3 %02x\n", pINTData->byTSR3));*/ } if (pINTData->byISR0 != 0) { if (pINTData->byISR0 & ISR_BNTX) { if (pDevice->eOPMode == OP_MODE_AP) { if (pMgmt->byDTIMCount > 0) { pMgmt->byDTIMCount--; pMgmt->sNodeDBTable[0].bRxPSPoll = FALSE; } else if (pMgmt->byDTIMCount == 0) { /* check if mutltcast tx bufferring */ pMgmt->byDTIMCount = pMgmt->byDTIMPeriod-1; pMgmt->sNodeDBTable[0].bRxPSPoll = TRUE; if (pMgmt->sNodeDBTable[0].bPSEnable) bScheduleCommand((void *) pDevice, WLAN_CMD_RX_PSPOLL, NULL); } bScheduleCommand((void *) pDevice, WLAN_CMD_BECON_SEND, NULL); } /* if (pDevice->eOPMode == OP_MODE_AP) */ pDevice->bBeaconSent = TRUE; } else { pDevice->bBeaconSent = FALSE; } if (pINTData->byISR0 & ISR_TBTT) { if (pDevice->bEnablePSMode) bScheduleCommand((void *) pDevice, WLAN_CMD_TBTT_WAKEUP, NULL); if (pDevice->bChannelSwitch) { pDevice->byChannelSwitchCount--; if (pDevice->byChannelSwitchCount == 0) bScheduleCommand((void *) pDevice, WLAN_CMD_11H_CHSW, NULL); } } LODWORD(pDevice->qwCurrTSF) = pINTData->dwLoTSF; HIDWORD(pDevice->qwCurrTSF) = pINTData->dwHiTSF; /*DBG_PRN_GRP01(("ISR0 = %02x , LoTsf = %08x, HiTsf = %08x\n", pINTData->byISR0, pINTData->dwLoTSF, pINTData->dwHiTSF)); */ STAvUpdate802_11Counter(&pDevice->s802_11Counter, &pDevice->scStatistic, pINTData->byRTSSuccess, pINTData->byRTSFail, pINTData->byACKFail, pINTData->byFCSErr); STAvUpdateIsrStatCounter(&pDevice->scStatistic, pINTData->byISR0, pINTData->byISR1); } if (pINTData->byISR1 != 0) if (pINTData->byISR1 & ISR_GPIO3) bScheduleCommand((void *) pDevice, WLAN_CMD_RADIO, NULL); pDevice->intBuf.uDataLen = 0; pDevice->intBuf.bInUse = FALSE; pStats->tx_packets = pDevice->scStatistic.ullTsrOK; pStats->tx_bytes = pDevice->scStatistic.ullTxDirectedBytes + pDevice->scStatistic.ullTxMulticastBytes + pDevice->scStatistic.ullTxBroadcastBytes; pStats->tx_errors = pDevice->scStatistic.dwTsrErr; pStats->tx_dropped = pDevice->scStatistic.dwTsrErr; return status; }
gpl-2.0
thicklizard/Komodo1
drivers/staging/comedi/drivers/das16m1.c
3386
21632
/* comedi/drivers/das16m1.c CIO-DAS16/M1 driver Author: Frank Mori Hess, based on code from the das16 driver. Copyright (C) 2001 Frank Mori Hess <fmhess@users.sourceforge.net> COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ************************************************************************ */ /* Driver: das16m1 Description: CIO-DAS16/M1 Author: Frank Mori Hess <fmhess@users.sourceforge.net> Devices: [Measurement Computing] CIO-DAS16/M1 (cio-das16/m1) Status: works This driver supports a single board - the CIO-DAS16/M1. As far as I know, there are no other boards that have the same register layout. Even the CIO-DAS16/M1/16 is significantly different. I was _barely_ able to reach the full 1 MHz capability of this board, using a hard real-time interrupt (set the TRIG_RT flag in your struct comedi_cmd and use rtlinux or RTAI). The board can't do dma, so the bottleneck is pulling the data across the ISA bus. I timed the interrupt handler, and it took my computer ~470 microseconds to pull 512 samples from the board. So at 1 Mhz sampling rate, expect your CPU to be spending almost all of its time in the interrupt handler. This board has some unusual restrictions for its channel/gain list. If the list has 2 or more channels in it, then two conditions must be satisfied: (1) - even/odd channels must appear at even/odd indices in the list (2) - the list must have an even number of entries. Options: [0] - base io address [1] - irq (optional, but you probably want it) irq can be omitted, although the cmd interface will not work without it. */ #include <linux/ioport.h> #include <linux/interrupt.h> #include "../comedidev.h" #include "8255.h" #include "8253.h" #include "comedi_fc.h" #define DAS16M1_SIZE 16 #define DAS16M1_SIZE2 8 #define DAS16M1_XTAL 100 /* 10 MHz master clock */ #define FIFO_SIZE 1024 /* 1024 sample fifo */ /* CIO-DAS16_M1.pdf "cio-das16/m1" 0 a/d bits 0-3, mux start 12 bit 1 a/d bits 4-11 unused 2 status control 3 di 4 bit do 4 bit 4 unused clear interrupt 5 interrupt, pacer 6 channel/gain queue address 7 channel/gain queue data 89ab 8254 cdef 8254 400 8255 404-407 8254 */ #define DAS16M1_AI 0 /* 16-bit wide register */ #define AI_CHAN(x) ((x) & 0xf) #define DAS16M1_CS 2 #define EXT_TRIG_BIT 0x1 #define OVRUN 0x20 #define IRQDATA 0x80 #define DAS16M1_DIO 3 #define DAS16M1_CLEAR_INTR 4 #define DAS16M1_INTR_CONTROL 5 #define EXT_PACER 0x2 #define INT_PACER 0x3 #define PACER_MASK 0x3 #define INTE 0x80 #define DAS16M1_QUEUE_ADDR 6 #define DAS16M1_QUEUE_DATA 7 #define Q_CHAN(x) ((x) & 0x7) #define Q_RANGE(x) (((x) & 0xf) << 4) #define UNIPOLAR 0x40 #define DAS16M1_8254_FIRST 0x8 #define DAS16M1_8254_FIRST_CNTRL 0xb #define TOTAL_CLEAR 0x30 #define DAS16M1_8254_SECOND 0xc #define DAS16M1_82C55 0x400 #define DAS16M1_8254_THIRD 0x404 static const struct comedi_lrange range_das16m1 = { 9, { BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), BIP_RANGE(0.625), UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2.5), UNI_RANGE(1.25), BIP_RANGE(10), } }; static int das16m1_do_wbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int das16m1_di_rbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int das16m1_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int das16m1_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static int das16m1_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s); static int das16m1_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static int das16m1_poll(struct comedi_device *dev, struct comedi_subdevice *s); static irqreturn_t das16m1_interrupt(int irq, void *d); static void das16m1_handler(struct comedi_device *dev, unsigned int status); static unsigned int das16m1_set_pacer(struct comedi_device *dev, unsigned int ns, int round_flag); static int das16m1_irq_bits(unsigned int irq); struct das16m1_board { const char *name; unsigned int ai_speed; }; static const struct das16m1_board das16m1_boards[] = { { .name = "cio-das16/m1", /* CIO-DAS16_M1.pdf */ .ai_speed = 1000, /* 1MHz max speed */ }, }; static int das16m1_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int das16m1_detach(struct comedi_device *dev); static struct comedi_driver driver_das16m1 = { .driver_name = "das16m1", .module = THIS_MODULE, .attach = das16m1_attach, .detach = das16m1_detach, .board_name = &das16m1_boards[0].name, .num_names = ARRAY_SIZE(das16m1_boards), .offset = sizeof(das16m1_boards[0]), }; struct das16m1_private_struct { unsigned int control_state; volatile unsigned int adc_count; /* number of samples completed */ /* initial value in lower half of hardware conversion counter, * needed to keep track of whether new count has been loaded into * counter yet (loaded by first sample conversion) */ u16 initial_hw_count; short ai_buffer[FIFO_SIZE]; unsigned int do_bits; /* saves status of digital output bits */ unsigned int divisor1; /* divides master clock to obtain conversion speed */ unsigned int divisor2; /* divides master clock to obtain conversion speed */ }; #define devpriv ((struct das16m1_private_struct *)(dev->private)) #define thisboard ((const struct das16m1_board *)(dev->board_ptr)) static int __init driver_das16m1_init_module(void) { return comedi_driver_register(&driver_das16m1); } static void __exit driver_das16m1_cleanup_module(void) { comedi_driver_unregister(&driver_das16m1); } module_init(driver_das16m1_init_module); module_exit(driver_das16m1_cleanup_module); static inline short munge_sample(short data) { return (data >> 4) & 0xfff; } static int das16m1_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { unsigned int err = 0, tmp, i; /* make sure triggers are valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW | TRIG_EXT; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_FOLLOW; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_TIMER | TRIG_EXT; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2: make sure trigger sources are unique and mutually compatible */ if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE) err++; if (cmd->start_src != TRIG_NOW && cmd->start_src != TRIG_EXT) err++; if (cmd->convert_src != TRIG_TIMER && cmd->convert_src != TRIG_EXT) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } if (cmd->scan_begin_src == TRIG_FOLLOW) { /* internal trigger */ if (cmd->scan_begin_arg != 0) { cmd->scan_begin_arg = 0; err++; } } if (cmd->convert_src == TRIG_TIMER) { if (cmd->convert_arg < thisboard->ai_speed) { cmd->convert_arg = thisboard->ai_speed; err++; } } if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } if (cmd->stop_src == TRIG_COUNT) { /* any count is allowed */ } else { /* TRIG_NONE */ if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } } if (err) return 3; /* step 4: fix up arguments */ if (cmd->convert_src == TRIG_TIMER) { tmp = cmd->convert_arg; /* calculate counter values that give desired timing */ i8253_cascade_ns_to_timer_2div(DAS16M1_XTAL, &(devpriv->divisor1), &(devpriv->divisor2), &(cmd->convert_arg), cmd->flags & TRIG_ROUND_MASK); if (tmp != cmd->convert_arg) err++; } if (err) return 4; /* check chanlist against board's peculiarities */ if (cmd->chanlist && cmd->chanlist_len > 1) { for (i = 0; i < cmd->chanlist_len; i++) { /* even/odd channels must go into even/odd queue addresses */ if ((i % 2) != (CR_CHAN(cmd->chanlist[i]) % 2)) { comedi_error(dev, "bad chanlist:\n" " even/odd channels must go have even/odd chanlist indices"); err++; } } if ((cmd->chanlist_len % 2) != 0) { comedi_error(dev, "chanlist must be of even length or length 1"); err++; } } if (err) return 5; return 0; } static int das16m1_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned int byte, i; if (dev->irq == 0) { comedi_error(dev, "irq required to execute comedi_cmd"); return -1; } /* disable interrupts and internal pacer */ devpriv->control_state &= ~INTE & ~PACER_MASK; outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL); /* set software count */ devpriv->adc_count = 0; /* Initialize lower half of hardware counter, used to determine how * many samples are in fifo. Value doesn't actually load into counter * until counter's next clock (the next a/d conversion) */ i8254_load(dev->iobase + DAS16M1_8254_FIRST, 0, 1, 0, 2); /* remember current reading of counter so we know when counter has * actually been loaded */ devpriv->initial_hw_count = i8254_read(dev->iobase + DAS16M1_8254_FIRST, 0, 1); /* setup channel/gain queue */ for (i = 0; i < cmd->chanlist_len; i++) { outb(i, dev->iobase + DAS16M1_QUEUE_ADDR); byte = Q_CHAN(CR_CHAN(cmd->chanlist[i])) | Q_RANGE(CR_RANGE(cmd->chanlist[i])); outb(byte, dev->iobase + DAS16M1_QUEUE_DATA); } /* set counter mode and counts */ cmd->convert_arg = das16m1_set_pacer(dev, cmd->convert_arg, cmd->flags & TRIG_ROUND_MASK); /* set control & status register */ byte = 0; /* if we are using external start trigger (also board dislikes having * both start and conversion triggers external simultaneously) */ if (cmd->start_src == TRIG_EXT && cmd->convert_src != TRIG_EXT) { byte |= EXT_TRIG_BIT; } outb(byte, dev->iobase + DAS16M1_CS); /* clear interrupt bit */ outb(0, dev->iobase + DAS16M1_CLEAR_INTR); /* enable interrupts and internal pacer */ devpriv->control_state &= ~PACER_MASK; if (cmd->convert_src == TRIG_TIMER) { devpriv->control_state |= INT_PACER; } else { devpriv->control_state |= EXT_PACER; } devpriv->control_state |= INTE; outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL); return 0; } static int das16m1_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { devpriv->control_state &= ~INTE & ~PACER_MASK; outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL); return 0; } static int das16m1_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i, n; int byte; const int timeout = 1000; /* disable interrupts and internal pacer */ devpriv->control_state &= ~INTE & ~PACER_MASK; outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL); /* setup channel/gain queue */ outb(0, dev->iobase + DAS16M1_QUEUE_ADDR); byte = Q_CHAN(CR_CHAN(insn->chanspec)) | Q_RANGE(CR_RANGE(insn->chanspec)); outb(byte, dev->iobase + DAS16M1_QUEUE_DATA); for (n = 0; n < insn->n; n++) { /* clear IRQDATA bit */ outb(0, dev->iobase + DAS16M1_CLEAR_INTR); /* trigger conversion */ outb(0, dev->iobase); for (i = 0; i < timeout; i++) { if (inb(dev->iobase + DAS16M1_CS) & IRQDATA) break; } if (i == timeout) { comedi_error(dev, "timeout"); return -ETIME; } data[n] = munge_sample(inw(dev->iobase)); } return n; } static int das16m1_di_rbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int bits; bits = inb(dev->iobase + DAS16M1_DIO) & 0xf; data[1] = bits; data[0] = 0; return 2; } static int das16m1_do_wbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int wbits; /* only set bits that have been masked */ data[0] &= 0xf; wbits = devpriv->do_bits; /* zero bits that have been masked */ wbits &= ~data[0]; /* set masked bits */ wbits |= data[0] & data[1]; devpriv->do_bits = wbits; data[1] = wbits; outb(devpriv->do_bits, dev->iobase + DAS16M1_DIO); return 2; } static int das16m1_poll(struct comedi_device *dev, struct comedi_subdevice *s) { unsigned long flags; unsigned int status; /* prevent race with interrupt handler */ spin_lock_irqsave(&dev->spinlock, flags); status = inb(dev->iobase + DAS16M1_CS); das16m1_handler(dev, status); spin_unlock_irqrestore(&dev->spinlock, flags); return s->async->buf_write_count - s->async->buf_read_count; } static irqreturn_t das16m1_interrupt(int irq, void *d) { int status; struct comedi_device *dev = d; if (dev->attached == 0) { comedi_error(dev, "premature interrupt"); return IRQ_HANDLED; } /* prevent race with comedi_poll() */ spin_lock(&dev->spinlock); status = inb(dev->iobase + DAS16M1_CS); if ((status & (IRQDATA | OVRUN)) == 0) { comedi_error(dev, "spurious interrupt"); spin_unlock(&dev->spinlock); return IRQ_NONE; } das16m1_handler(dev, status); /* clear interrupt */ outb(0, dev->iobase + DAS16M1_CLEAR_INTR); spin_unlock(&dev->spinlock); return IRQ_HANDLED; } static void munge_sample_array(short *array, unsigned int num_elements) { unsigned int i; for (i = 0; i < num_elements; i++) { array[i] = munge_sample(array[i]); } } static void das16m1_handler(struct comedi_device *dev, unsigned int status) { struct comedi_subdevice *s; struct comedi_async *async; struct comedi_cmd *cmd; u16 num_samples; u16 hw_counter; s = dev->read_subdev; async = s->async; async->events = 0; cmd = &async->cmd; /* figure out how many samples are in fifo */ hw_counter = i8254_read(dev->iobase + DAS16M1_8254_FIRST, 0, 1); /* make sure hardware counter reading is not bogus due to initial value * not having been loaded yet */ if (devpriv->adc_count == 0 && hw_counter == devpriv->initial_hw_count) { num_samples = 0; } else { /* The calculation of num_samples looks odd, but it uses the following facts. * 16 bit hardware counter is initialized with value of zero (which really * means 0x1000). The counter decrements by one on each conversion * (when the counter decrements from zero it goes to 0xffff). num_samples * is a 16 bit variable, so it will roll over in a similar fashion to the * hardware counter. Work it out, and this is what you get. */ num_samples = -hw_counter - devpriv->adc_count; } /* check if we only need some of the points */ if (cmd->stop_src == TRIG_COUNT) { if (num_samples > cmd->stop_arg * cmd->chanlist_len) num_samples = cmd->stop_arg * cmd->chanlist_len; } /* make sure we dont try to get too many points if fifo has overrun */ if (num_samples > FIFO_SIZE) num_samples = FIFO_SIZE; insw(dev->iobase, devpriv->ai_buffer, num_samples); munge_sample_array(devpriv->ai_buffer, num_samples); cfc_write_array_to_buffer(s, devpriv->ai_buffer, num_samples * sizeof(short)); devpriv->adc_count += num_samples; if (cmd->stop_src == TRIG_COUNT) { if (devpriv->adc_count >= cmd->stop_arg * cmd->chanlist_len) { /* end of acquisition */ das16m1_cancel(dev, s); async->events |= COMEDI_CB_EOA; } } /* this probably won't catch overruns since the card doesn't generate * overrun interrupts, but we might as well try */ if (status & OVRUN) { das16m1_cancel(dev, s); async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; comedi_error(dev, "fifo overflow"); } comedi_event(dev, s); } /* This function takes a time in nanoseconds and sets the * * 2 pacer clocks to the closest frequency possible. It also * * returns the actual sampling period. */ static unsigned int das16m1_set_pacer(struct comedi_device *dev, unsigned int ns, int rounding_flags) { i8253_cascade_ns_to_timer_2div(DAS16M1_XTAL, &(devpriv->divisor1), &(devpriv->divisor2), &ns, rounding_flags & TRIG_ROUND_MASK); /* Write the values of ctr1 and ctr2 into counters 1 and 2 */ i8254_load(dev->iobase + DAS16M1_8254_SECOND, 0, 1, devpriv->divisor1, 2); i8254_load(dev->iobase + DAS16M1_8254_SECOND, 0, 2, devpriv->divisor2, 2); return ns; } static int das16m1_irq_bits(unsigned int irq) { int ret; switch (irq) { case 10: ret = 0x0; break; case 11: ret = 0x1; break; case 12: ret = 0x2; break; case 15: ret = 0x3; break; case 2: ret = 0x4; break; case 3: ret = 0x5; break; case 5: ret = 0x6; break; case 7: ret = 0x7; break; default: return -1; break; } return ret << 4; } /* * Options list: * 0 I/O base * 1 IRQ */ static int das16m1_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; int ret; unsigned int irq; unsigned long iobase; iobase = it->options[0]; printk("comedi%d: das16m1:", dev->minor); ret = alloc_private(dev, sizeof(struct das16m1_private_struct)); if (ret < 0) return ret; dev->board_name = thisboard->name; printk(" io 0x%lx-0x%lx 0x%lx-0x%lx", iobase, iobase + DAS16M1_SIZE, iobase + DAS16M1_82C55, iobase + DAS16M1_82C55 + DAS16M1_SIZE2); if (!request_region(iobase, DAS16M1_SIZE, driver_das16m1.driver_name)) { printk(" I/O port conflict\n"); return -EIO; } if (!request_region(iobase + DAS16M1_82C55, DAS16M1_SIZE2, driver_das16m1.driver_name)) { release_region(iobase, DAS16M1_SIZE); printk(" I/O port conflict\n"); return -EIO; } dev->iobase = iobase; /* now for the irq */ irq = it->options[1]; /* make sure it is valid */ if (das16m1_irq_bits(irq) >= 0) { ret = request_irq(irq, das16m1_interrupt, 0, driver_das16m1.driver_name, dev); if (ret < 0) { printk(", irq unavailable\n"); return ret; } dev->irq = irq; printk(", irq %u\n", irq); } else if (irq == 0) { printk(", no irq\n"); } else { printk(", invalid irq\n" " valid irqs are 2, 3, 5, 7, 10, 11, 12, or 15\n"); return -EINVAL; } ret = alloc_subdevices(dev, 4); if (ret < 0) return ret; s = dev->subdevices + 0; dev->read_subdev = s; /* ai */ s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_CMD_READ; s->n_chan = 8; s->subdev_flags = SDF_DIFF; s->len_chanlist = 256; s->maxdata = (1 << 12) - 1; s->range_table = &range_das16m1; s->insn_read = das16m1_ai_rinsn; s->do_cmdtest = das16m1_cmd_test; s->do_cmd = das16m1_cmd_exec; s->cancel = das16m1_cancel; s->poll = das16m1_poll; s = dev->subdevices + 1; /* di */ s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE; s->n_chan = 4; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = das16m1_di_rbits; s = dev->subdevices + 2; /* do */ s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_WRITABLE | SDF_READABLE; s->n_chan = 4; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = das16m1_do_wbits; s = dev->subdevices + 3; /* 8255 */ subdev_8255_init(dev, s, NULL, dev->iobase + DAS16M1_82C55); /* disable upper half of hardware conversion counter so it doesn't mess with us */ outb(TOTAL_CLEAR, dev->iobase + DAS16M1_8254_FIRST_CNTRL); /* initialize digital output lines */ outb(devpriv->do_bits, dev->iobase + DAS16M1_DIO); /* set the interrupt level */ if (dev->irq) devpriv->control_state = das16m1_irq_bits(dev->irq); else devpriv->control_state = 0; outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL); return 0; } static int das16m1_detach(struct comedi_device *dev) { printk("comedi%d: das16m1: remove\n", dev->minor); /* das16m1_reset(dev); */ if (dev->subdevices) subdev_8255_cleanup(dev, dev->subdevices + 3); if (dev->irq) free_irq(dev->irq, dev); if (dev->iobase) { release_region(dev->iobase, DAS16M1_SIZE); release_region(dev->iobase + DAS16M1_82C55, DAS16M1_SIZE2); } return 0; } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
TEAM-RAZOR-DEVICES/kernel_cyanogen_msm8916
arch/mips/txx9/generic/setup_tx3927.c
4154
3728
/* * TX3927 setup routines * Based on linux/arch/mips/txx9/jmr3927/setup.c * * Copyright 2001 MontaVista Software Inc. * Copyright (C) 2000-2001 Toshiba Corporation * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org) * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/param.h> #include <linux/io.h> #include <linux/mtd/physmap.h> #include <asm/mipsregs.h> #include <asm/txx9irq.h> #include <asm/txx9tmr.h> #include <asm/txx9pio.h> #include <asm/txx9/generic.h> #include <asm/txx9/tx3927.h> void __init tx3927_wdt_init(void) { txx9_wdt_init(TX3927_TMR_REG(2)); } void __init tx3927_setup(void) { int i; unsigned int conf; txx9_reg_res_init(TX3927_REV_PCODE(), TX3927_REG_BASE, TX3927_REG_SIZE); /* SDRAMC,ROMC are configured by PROM */ for (i = 0; i < 8; i++) { if (!(tx3927_romcptr->cr[i] & 0x8)) continue; /* disabled */ txx9_ce_res[i].start = (unsigned long)TX3927_ROMC_BA(i); txx9_ce_res[i].end = txx9_ce_res[i].start + TX3927_ROMC_SIZE(i) - 1; request_resource(&iomem_resource, &txx9_ce_res[i]); } /* clocks */ txx9_gbus_clock = txx9_cpu_clock / 2; /* change default value to udelay/mdelay take reasonable time */ loops_per_jiffy = txx9_cpu_clock / HZ / 2; /* CCFG */ /* enable Timeout BusError */ if (txx9_ccfg_toeon) tx3927_ccfgptr->ccfg |= TX3927_CCFG_TOE; /* clear BusErrorOnWrite flag */ tx3927_ccfgptr->ccfg &= ~TX3927_CCFG_BEOW; if (read_c0_conf() & TX39_CONF_WBON) /* Disable PCI snoop */ tx3927_ccfgptr->ccfg &= ~TX3927_CCFG_PSNP; else /* Enable PCI SNOOP - with write through only */ tx3927_ccfgptr->ccfg |= TX3927_CCFG_PSNP; /* do reset on watchdog */ tx3927_ccfgptr->ccfg |= TX3927_CCFG_WR; printk(KERN_INFO "TX3927 -- CRIR:%08lx CCFG:%08lx PCFG:%08lx\n", tx3927_ccfgptr->crir, tx3927_ccfgptr->ccfg, tx3927_ccfgptr->pcfg); /* TMR */ for (i = 0; i < TX3927_NR_TMR; i++) txx9_tmr_init(TX3927_TMR_REG(i)); /* DMA */ tx3927_dmaptr->mcr = 0; for (i = 0; i < ARRAY_SIZE(tx3927_dmaptr->ch); i++) { /* reset channel */ tx3927_dmaptr->ch[i].ccr = TX3927_DMA_CCR_CHRST; tx3927_dmaptr->ch[i].ccr = 0; } /* enable DMA */ #ifdef __BIG_ENDIAN tx3927_dmaptr->mcr = TX3927_DMA_MCR_MSTEN; #else tx3927_dmaptr->mcr = TX3927_DMA_MCR_MSTEN | TX3927_DMA_MCR_LE; #endif /* PIO */ __raw_writel(0, &tx3927_pioptr->maskcpu); __raw_writel(0, &tx3927_pioptr->maskext); txx9_gpio_init(TX3927_PIO_REG, 0, 16); conf = read_c0_conf(); if (conf & TX39_CONF_DCE) { if (!(conf & TX39_CONF_WBON)) pr_info("TX3927 D-Cache WriteThrough.\n"); else if (!(conf & TX39_CONF_CWFON)) pr_info("TX3927 D-Cache WriteBack.\n"); else pr_info("TX3927 D-Cache WriteBack (CWF) .\n"); } } void __init tx3927_time_init(unsigned int evt_tmrnr, unsigned int src_tmrnr) { txx9_clockevent_init(TX3927_TMR_REG(evt_tmrnr), TXX9_IRQ_BASE + TX3927_IR_TMR(evt_tmrnr), TXX9_IMCLK); txx9_clocksource_init(TX3927_TMR_REG(src_tmrnr), TXX9_IMCLK); } void __init tx3927_sio_init(unsigned int sclk, unsigned int cts_mask) { int i; for (i = 0; i < 2; i++) txx9_sio_init(TX3927_SIO_REG(i), TXX9_IRQ_BASE + TX3927_IR_SIO(i), i, sclk, (1 << i) & cts_mask); } void __init tx3927_mtd_init(int ch) { struct physmap_flash_data pdata = { .width = TX3927_ROMC_WIDTH(ch) / 8, }; unsigned long start = txx9_ce_res[ch].start; unsigned long size = txx9_ce_res[ch].end - start + 1; if (!(tx3927_romcptr->cr[ch] & 0x8)) return; /* disabled */ txx9_physmap_flash_init(ch, start, size, &pdata); }
gpl-2.0
InfinitiveOS-Devices/android_kernel_lge_hammerhead
arch/sh/boards/board-secureedge5410.c
4666
1764
/* * Copyright (C) 2002 David McCullough <davidm@snapgear.com> * Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org> * * Based on files with the following comments: * * Copyright (C) 2000 Kazumoto Kojima * * Modified for 7751 Solution Engine by * Ian da Silva and Jeremy Siegel, 2001. */ #include <linux/init.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/timer.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/sched.h> #include <asm/machvec.h> #include <mach/secureedge5410.h> #include <asm/irq.h> #include <asm/io.h> #include <cpu/timer.h> unsigned short secureedge5410_ioport; /* * EraseConfig handling functions */ static irqreturn_t eraseconfig_interrupt(int irq, void *dev_id) { printk("SnapGear: erase switch interrupt!\n"); return IRQ_HANDLED; } static int __init eraseconfig_init(void) { unsigned int irq = evt2irq(0x240); printk("SnapGear: EraseConfig init\n"); /* Setup "EraseConfig" switch on external IRQ 0 */ if (request_irq(irq, eraseconfig_interrupt, 0, "Erase Config", NULL)) printk("SnapGear: failed to register IRQ%d for Reset witch\n", irq); else printk("SnapGear: registered EraseConfig switch on IRQ%d\n", irq); return 0; } module_init(eraseconfig_init); /* * Initialize IRQ setting * * IRL0 = erase switch * IRL1 = eth0 * IRL2 = eth1 * IRL3 = crypto */ static void __init init_snapgear_IRQ(void) { printk("Setup SnapGear IRQ/IPR ...\n"); /* enable individual interrupt mode for externals */ plat_irq_setup_pins(IRQ_MODE_IRQ); } /* * The Machine Vector */ static struct sh_machine_vector mv_snapgear __initmv = { .mv_name = "SnapGear SecureEdge5410", .mv_nr_irqs = 72, .mv_init_irq = init_snapgear_IRQ, };
gpl-2.0
SunRain/kernel_evita
drivers/staging/tidspbridge/gen/uuidutil.c
4922
2593
/* * uuidutil.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * This file contains the implementation of UUID helper functions. * * Copyright (C) 2005-2006 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #include <linux/types.h> /* ----------------------------------- Host OS */ #include <dspbridge/host_os.h> /* ----------------------------------- DSP/BIOS Bridge */ #include <dspbridge/dbdefs.h> /* ----------------------------------- This */ #include <dspbridge/uuidutil.h> /* * ======== uuid_uuid_to_string ======== * Purpose: * Converts a struct dsp_uuid to a string. * Note: snprintf format specifier is: * %[flags] [width] [.precision] [{h | l | I64 | L}]type */ void uuid_uuid_to_string(struct dsp_uuid *uuid_obj, char *sz_uuid, s32 size) { s32 i; /* return result from snprintf. */ i = snprintf(sz_uuid, size, "%.8X_%.4X_%.4X_%.2X%.2X_%.2X%.2X%.2X%.2X%.2X%.2X", uuid_obj->data1, uuid_obj->data2, uuid_obj->data3, uuid_obj->data4, uuid_obj->data5, uuid_obj->data6[0], uuid_obj->data6[1], uuid_obj->data6[2], uuid_obj->data6[3], uuid_obj->data6[4], uuid_obj->data6[5]); } static s32 uuid_hex_to_bin(char *buf, s32 len) { s32 i; s32 result = 0; int value; for (i = 0; i < len; i++) { value = hex_to_bin(*buf++); result *= 16; if (value > 0) result += value; } return result; } /* * ======== uuid_uuid_from_string ======== * Purpose: * Converts a string to a struct dsp_uuid. */ void uuid_uuid_from_string(char *sz_uuid, struct dsp_uuid *uuid_obj) { s32 j; uuid_obj->data1 = uuid_hex_to_bin(sz_uuid, 8); sz_uuid += 8; /* Step over underscore */ sz_uuid++; uuid_obj->data2 = (u16) uuid_hex_to_bin(sz_uuid, 4); sz_uuid += 4; /* Step over underscore */ sz_uuid++; uuid_obj->data3 = (u16) uuid_hex_to_bin(sz_uuid, 4); sz_uuid += 4; /* Step over underscore */ sz_uuid++; uuid_obj->data4 = (u8) uuid_hex_to_bin(sz_uuid, 2); sz_uuid += 2; uuid_obj->data5 = (u8) uuid_hex_to_bin(sz_uuid, 2); sz_uuid += 2; /* Step over underscore */ sz_uuid++; for (j = 0; j < 6; j++) { uuid_obj->data6[j] = (u8) uuid_hex_to_bin(sz_uuid, 2); sz_uuid += 2; } }
gpl-2.0
Jackeagle/kernel_caf
drivers/macintosh/via-cuda.c
7226
16127
/* * Device driver for the via-cuda on Apple Powermacs. * * The VIA (versatile interface adapter) interfaces to the CUDA, * a 6805 microprocessor core which controls the ADB (Apple Desktop * Bus) which connects to the keyboard and mouse. The CUDA also * controls system power and the RTC (real time clock) chip. * * Copyright (C) 1996 Paul Mackerras. */ #include <stdarg.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/adb.h> #include <linux/cuda.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #ifdef CONFIG_PPC #include <asm/prom.h> #include <asm/machdep.h> #else #include <asm/macintosh.h> #include <asm/macints.h> #include <asm/mac_via.h> #endif #include <asm/io.h> #include <linux/init.h> static volatile unsigned char __iomem *via; static DEFINE_SPINLOCK(cuda_lock); /* VIA registers - spaced 0x200 bytes apart */ #define RS 0x200 /* skip between registers */ #define B 0 /* B-side data */ #define A RS /* A-side data */ #define DIRB (2*RS) /* B-side direction (1=output) */ #define DIRA (3*RS) /* A-side direction (1=output) */ #define T1CL (4*RS) /* Timer 1 ctr/latch (low 8 bits) */ #define T1CH (5*RS) /* Timer 1 counter (high 8 bits) */ #define T1LL (6*RS) /* Timer 1 latch (low 8 bits) */ #define T1LH (7*RS) /* Timer 1 latch (high 8 bits) */ #define T2CL (8*RS) /* Timer 2 ctr/latch (low 8 bits) */ #define T2CH (9*RS) /* Timer 2 counter (high 8 bits) */ #define SR (10*RS) /* Shift register */ #define ACR (11*RS) /* Auxiliary control register */ #define PCR (12*RS) /* Peripheral control register */ #define IFR (13*RS) /* Interrupt flag register */ #define IER (14*RS) /* Interrupt enable register */ #define ANH (15*RS) /* A-side data, no handshake */ /* Bits in B data register: all active low */ #define TREQ 0x08 /* Transfer request (input) */ #define TACK 0x10 /* Transfer acknowledge (output) */ #define TIP 0x20 /* Transfer in progress (output) */ /* Bits in ACR */ #define SR_CTRL 0x1c /* Shift register control bits */ #define SR_EXT 0x0c /* Shift on external clock */ #define SR_OUT 0x10 /* Shift out if 1 */ /* Bits in IFR and IER */ #define IER_SET 0x80 /* set bits in IER */ #define IER_CLR 0 /* clear bits in IER */ #define SR_INT 0x04 /* Shift register full/empty */ static enum cuda_state { idle, sent_first_byte, sending, reading, read_done, awaiting_reply } cuda_state; static struct adb_request *current_req; static struct adb_request *last_req; static unsigned char cuda_rbuf[16]; static unsigned char *reply_ptr; static int reading_reply; static int data_index; static int cuda_irq; #ifdef CONFIG_PPC static struct device_node *vias; #endif static int cuda_fully_inited; #ifdef CONFIG_ADB static int cuda_probe(void); static int cuda_send_request(struct adb_request *req, int sync); static int cuda_adb_autopoll(int devs); static int cuda_reset_adb_bus(void); #endif /* CONFIG_ADB */ static int cuda_init_via(void); static void cuda_start(void); static irqreturn_t cuda_interrupt(int irq, void *arg); static void cuda_input(unsigned char *buf, int nb); void cuda_poll(void); static int cuda_write(struct adb_request *req); int cuda_request(struct adb_request *req, void (*done)(struct adb_request *), int nbytes, ...); #ifdef CONFIG_ADB struct adb_driver via_cuda_driver = { .name = "CUDA", .probe = cuda_probe, .send_request = cuda_send_request, .autopoll = cuda_adb_autopoll, .poll = cuda_poll, .reset_bus = cuda_reset_adb_bus, }; #endif /* CONFIG_ADB */ #ifdef CONFIG_MAC int __init find_via_cuda(void) { struct adb_request req; int err; if (macintosh_config->adb_type != MAC_ADB_CUDA) return 0; via = via1; cuda_state = idle; err = cuda_init_via(); if (err) { printk(KERN_ERR "cuda_init_via() failed\n"); via = NULL; return 0; } /* enable autopoll */ cuda_request(&req, NULL, 3, CUDA_PACKET, CUDA_AUTOPOLL, 1); while (!req.complete) cuda_poll(); return 1; } #else int __init find_via_cuda(void) { struct adb_request req; phys_addr_t taddr; const u32 *reg; int err; if (vias != 0) return 1; vias = of_find_node_by_name(NULL, "via-cuda"); if (vias == 0) return 0; reg = of_get_property(vias, "reg", NULL); if (reg == NULL) { printk(KERN_ERR "via-cuda: No \"reg\" property !\n"); goto fail; } taddr = of_translate_address(vias, reg); if (taddr == 0) { printk(KERN_ERR "via-cuda: Can't translate address !\n"); goto fail; } via = ioremap(taddr, 0x2000); if (via == NULL) { printk(KERN_ERR "via-cuda: Can't map address !\n"); goto fail; } cuda_state = idle; sys_ctrler = SYS_CTRLER_CUDA; err = cuda_init_via(); if (err) { printk(KERN_ERR "cuda_init_via() failed\n"); via = NULL; return 0; } /* Clear and enable interrupts, but only on PPC. On 68K it's done */ /* for us by the main VIA driver in arch/m68k/mac/via.c */ out_8(&via[IFR], 0x7f); /* clear interrupts by writing 1s */ out_8(&via[IER], IER_SET|SR_INT); /* enable interrupt from SR */ /* enable autopoll */ cuda_request(&req, NULL, 3, CUDA_PACKET, CUDA_AUTOPOLL, 1); while (!req.complete) cuda_poll(); return 1; fail: of_node_put(vias); vias = NULL; return 0; } #endif /* !defined CONFIG_MAC */ static int __init via_cuda_start(void) { if (via == NULL) return -ENODEV; #ifdef CONFIG_MAC cuda_irq = IRQ_MAC_ADB; #else cuda_irq = irq_of_parse_and_map(vias, 0); if (cuda_irq == NO_IRQ) { printk(KERN_ERR "via-cuda: can't map interrupts for %s\n", vias->full_name); return -ENODEV; } #endif if (request_irq(cuda_irq, cuda_interrupt, 0, "ADB", cuda_interrupt)) { printk(KERN_ERR "via-cuda: can't request irq %d\n", cuda_irq); return -EAGAIN; } printk("Macintosh CUDA driver v0.5 for Unified ADB.\n"); cuda_fully_inited = 1; return 0; } device_initcall(via_cuda_start); #ifdef CONFIG_ADB static int cuda_probe(void) { #ifdef CONFIG_PPC if (sys_ctrler != SYS_CTRLER_CUDA) return -ENODEV; #else if (macintosh_config->adb_type != MAC_ADB_CUDA) return -ENODEV; #endif if (via == NULL) return -ENODEV; return 0; } #endif /* CONFIG_ADB */ #define WAIT_FOR(cond, what) \ do { \ int x; \ for (x = 1000; !(cond); --x) { \ if (x == 0) { \ printk("Timeout waiting for " what "\n"); \ return -ENXIO; \ } \ udelay(100); \ } \ } while (0) static int cuda_init_via(void) { out_8(&via[DIRB], (in_8(&via[DIRB]) | TACK | TIP) & ~TREQ); /* TACK & TIP out */ out_8(&via[B], in_8(&via[B]) | TACK | TIP); /* negate them */ out_8(&via[ACR] ,(in_8(&via[ACR]) & ~SR_CTRL) | SR_EXT); /* SR data in */ (void)in_8(&via[SR]); /* clear any left-over data */ #ifdef CONFIG_PPC out_8(&via[IER], 0x7f); /* disable interrupts from VIA */ (void)in_8(&via[IER]); #else out_8(&via[IER], SR_INT); /* disable SR interrupt from VIA */ #endif /* delay 4ms and then clear any pending interrupt */ mdelay(4); (void)in_8(&via[SR]); out_8(&via[IFR], SR_INT); /* sync with the CUDA - assert TACK without TIP */ out_8(&via[B], in_8(&via[B]) & ~TACK); /* wait for the CUDA to assert TREQ in response */ WAIT_FOR((in_8(&via[B]) & TREQ) == 0, "CUDA response to sync"); /* wait for the interrupt and then clear it */ WAIT_FOR(in_8(&via[IFR]) & SR_INT, "CUDA response to sync (2)"); (void)in_8(&via[SR]); out_8(&via[IFR], SR_INT); /* finish the sync by negating TACK */ out_8(&via[B], in_8(&via[B]) | TACK); /* wait for the CUDA to negate TREQ and the corresponding interrupt */ WAIT_FOR(in_8(&via[B]) & TREQ, "CUDA response to sync (3)"); WAIT_FOR(in_8(&via[IFR]) & SR_INT, "CUDA response to sync (4)"); (void)in_8(&via[SR]); out_8(&via[IFR], SR_INT); out_8(&via[B], in_8(&via[B]) | TIP); /* should be unnecessary */ return 0; } #ifdef CONFIG_ADB /* Send an ADB command */ static int cuda_send_request(struct adb_request *req, int sync) { int i; if ((via == NULL) || !cuda_fully_inited) { req->complete = 1; return -ENXIO; } req->reply_expected = 1; i = cuda_write(req); if (i) return i; if (sync) { while (!req->complete) cuda_poll(); } return 0; } /* Enable/disable autopolling */ static int cuda_adb_autopoll(int devs) { struct adb_request req; if ((via == NULL) || !cuda_fully_inited) return -ENXIO; cuda_request(&req, NULL, 3, CUDA_PACKET, CUDA_AUTOPOLL, (devs? 1: 0)); while (!req.complete) cuda_poll(); return 0; } /* Reset adb bus - how do we do this?? */ static int cuda_reset_adb_bus(void) { struct adb_request req; if ((via == NULL) || !cuda_fully_inited) return -ENXIO; cuda_request(&req, NULL, 2, ADB_PACKET, 0); /* maybe? */ while (!req.complete) cuda_poll(); return 0; } #endif /* CONFIG_ADB */ /* Construct and send a cuda request */ int cuda_request(struct adb_request *req, void (*done)(struct adb_request *), int nbytes, ...) { va_list list; int i; if (via == NULL) { req->complete = 1; return -ENXIO; } req->nbytes = nbytes; req->done = done; va_start(list, nbytes); for (i = 0; i < nbytes; ++i) req->data[i] = va_arg(list, int); va_end(list); req->reply_expected = 1; return cuda_write(req); } static int cuda_write(struct adb_request *req) { unsigned long flags; if (req->nbytes < 2 || req->data[0] > CUDA_PACKET) { req->complete = 1; return -EINVAL; } req->next = NULL; req->sent = 0; req->complete = 0; req->reply_len = 0; spin_lock_irqsave(&cuda_lock, flags); if (current_req != 0) { last_req->next = req; last_req = req; } else { current_req = req; last_req = req; if (cuda_state == idle) cuda_start(); } spin_unlock_irqrestore(&cuda_lock, flags); return 0; } static void cuda_start(void) { struct adb_request *req; /* assert cuda_state == idle */ /* get the packet to send */ req = current_req; if (req == 0) return; if ((in_8(&via[B]) & TREQ) == 0) return; /* a byte is coming in from the CUDA */ /* set the shift register to shift out and send a byte */ out_8(&via[ACR], in_8(&via[ACR]) | SR_OUT); out_8(&via[SR], req->data[0]); out_8(&via[B], in_8(&via[B]) & ~TIP); cuda_state = sent_first_byte; } void cuda_poll(void) { /* cuda_interrupt only takes a normal lock, we disable * interrupts here to avoid re-entering and thus deadlocking. */ if (cuda_irq) disable_irq(cuda_irq); cuda_interrupt(0, NULL); if (cuda_irq) enable_irq(cuda_irq); } static irqreturn_t cuda_interrupt(int irq, void *arg) { int status; struct adb_request *req = NULL; unsigned char ibuf[16]; int ibuf_len = 0; int complete = 0; spin_lock(&cuda_lock); /* On powermacs, this handler is registered for the VIA IRQ. But they use * just the shift register IRQ -- other VIA interrupt sources are disabled. * On m68k macs, the VIA IRQ sources are dispatched individually. Unless * we are polling, the shift register IRQ flag has already been cleared. */ #ifdef CONFIG_MAC if (!arg) #endif { if ((in_8(&via[IFR]) & SR_INT) == 0) { spin_unlock(&cuda_lock); return IRQ_NONE; } else { out_8(&via[IFR], SR_INT); } } status = (~in_8(&via[B]) & (TIP|TREQ)) | (in_8(&via[ACR]) & SR_OUT); /* printk("cuda_interrupt: state=%d status=%x\n", cuda_state, status); */ switch (cuda_state) { case idle: /* CUDA has sent us the first byte of data - unsolicited */ if (status != TREQ) printk("cuda: state=idle, status=%x\n", status); (void)in_8(&via[SR]); out_8(&via[B], in_8(&via[B]) & ~TIP); cuda_state = reading; reply_ptr = cuda_rbuf; reading_reply = 0; break; case awaiting_reply: /* CUDA has sent us the first byte of data of a reply */ if (status != TREQ) printk("cuda: state=awaiting_reply, status=%x\n", status); (void)in_8(&via[SR]); out_8(&via[B], in_8(&via[B]) & ~TIP); cuda_state = reading; reply_ptr = current_req->reply; reading_reply = 1; break; case sent_first_byte: if (status == TREQ + TIP + SR_OUT) { /* collision */ out_8(&via[ACR], in_8(&via[ACR]) & ~SR_OUT); (void)in_8(&via[SR]); out_8(&via[B], in_8(&via[B]) | TIP | TACK); cuda_state = idle; } else { /* assert status == TIP + SR_OUT */ if (status != TIP + SR_OUT) printk("cuda: state=sent_first_byte status=%x\n", status); out_8(&via[SR], current_req->data[1]); out_8(&via[B], in_8(&via[B]) ^ TACK); data_index = 2; cuda_state = sending; } break; case sending: req = current_req; if (data_index >= req->nbytes) { out_8(&via[ACR], in_8(&via[ACR]) & ~SR_OUT); (void)in_8(&via[SR]); out_8(&via[B], in_8(&via[B]) | TACK | TIP); req->sent = 1; if (req->reply_expected) { cuda_state = awaiting_reply; } else { current_req = req->next; complete = 1; /* not sure about this */ cuda_state = idle; cuda_start(); } } else { out_8(&via[SR], req->data[data_index++]); out_8(&via[B], in_8(&via[B]) ^ TACK); } break; case reading: *reply_ptr++ = in_8(&via[SR]); if (status == TIP) { /* that's all folks */ out_8(&via[B], in_8(&via[B]) | TACK | TIP); cuda_state = read_done; } else { /* assert status == TIP | TREQ */ if (status != TIP + TREQ) printk("cuda: state=reading status=%x\n", status); out_8(&via[B], in_8(&via[B]) ^ TACK); } break; case read_done: (void)in_8(&via[SR]); if (reading_reply) { req = current_req; req->reply_len = reply_ptr - req->reply; if (req->data[0] == ADB_PACKET) { /* Have to adjust the reply from ADB commands */ if (req->reply_len <= 2 || (req->reply[1] & 2) != 0) { /* the 0x2 bit indicates no response */ req->reply_len = 0; } else { /* leave just the command and result bytes in the reply */ req->reply_len -= 2; memmove(req->reply, req->reply + 2, req->reply_len); } } current_req = req->next; complete = 1; } else { /* This is tricky. We must break the spinlock to call * cuda_input. However, doing so means we might get * re-entered from another CPU getting an interrupt * or calling cuda_poll(). I ended up using the stack * (it's only for 16 bytes) and moving the actual * call to cuda_input to outside of the lock. */ ibuf_len = reply_ptr - cuda_rbuf; memcpy(ibuf, cuda_rbuf, ibuf_len); } if (status == TREQ) { out_8(&via[B], in_8(&via[B]) & ~TIP); cuda_state = reading; reply_ptr = cuda_rbuf; reading_reply = 0; } else { cuda_state = idle; cuda_start(); } break; default: printk("cuda_interrupt: unknown cuda_state %d?\n", cuda_state); } spin_unlock(&cuda_lock); if (complete && req) { void (*done)(struct adb_request *) = req->done; mb(); req->complete = 1; /* Here, we assume that if the request has a done member, the * struct request will survive to setting req->complete to 1 */ if (done) (*done)(req); } if (ibuf_len) cuda_input(ibuf, ibuf_len); return IRQ_HANDLED; } static void cuda_input(unsigned char *buf, int nb) { int i; switch (buf[0]) { case ADB_PACKET: #ifdef CONFIG_XMON if (nb == 5 && buf[2] == 0x2c) { extern int xmon_wants_key, xmon_adb_keycode; if (xmon_wants_key) { xmon_adb_keycode = buf[3]; return; } } #endif /* CONFIG_XMON */ #ifdef CONFIG_ADB adb_input(buf+2, nb-2, buf[1] & 0x40); #endif /* CONFIG_ADB */ break; default: printk("data from cuda (%d bytes):", nb); for (i = 0; i < nb; ++i) printk(" %.2x", buf[i]); printk("\n"); } }
gpl-2.0
Fuzion24/m7_vzw_kernel
arch/mips/pmc-sierra/msp71xx/msp_smtc.c
9530
2276
/* * MSP71xx Platform-specific hooks for SMP operation */ #include <linux/irq.h> #include <linux/init.h> #include <asm/mipsmtregs.h> #include <asm/mipsregs.h> #include <asm/smtc.h> #include <asm/smtc_ipi.h> /* VPE/SMP Prototype implements platform interfaces directly */ /* * Cause the specified action to be performed on a targeted "CPU" */ static void msp_smtc_send_ipi_single(int cpu, unsigned int action) { /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ smtc_send_ipi(cpu, LINUX_SMP_IPI, action); } static void msp_smtc_send_ipi_mask(const struct cpumask *mask, unsigned int action) { unsigned int i; for_each_cpu(i, mask) msp_smtc_send_ipi_single(i, action); } /* * Post-config but pre-boot cleanup entry point */ static void __cpuinit msp_smtc_init_secondary(void) { int myvpe; /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */ myvpe = read_c0_tcbind() & TCBIND_CURVPE; if (myvpe > 0) change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7); smtc_init_secondary(); } /* * Platform "CPU" startup hook */ static void __cpuinit msp_smtc_boot_secondary(int cpu, struct task_struct *idle) { smtc_boot_secondary(cpu, idle); } /* * SMP initialization finalization entry point */ static void __cpuinit msp_smtc_smp_finish(void) { smtc_smp_finish(); } /* * Hook for after all CPUs are online */ static void msp_smtc_cpus_done(void) { } /* * Platform SMP pre-initialization * * As noted above, we can assume a single CPU for now * but it may be multithreaded. */ static void __init msp_smtc_smp_setup(void) { /* * we won't get the definitive value until * we've run smtc_prepare_cpus later, but */ if (read_c0_config3() & (1 << 2)) smp_num_siblings = smtc_build_cpu_map(0); } static void __init msp_smtc_prepare_cpus(unsigned int max_cpus) { smtc_prepare_cpus(max_cpus); } struct plat_smp_ops msp_smtc_smp_ops = { .send_ipi_single = msp_smtc_send_ipi_single, .send_ipi_mask = msp_smtc_send_ipi_mask, .init_secondary = msp_smtc_init_secondary, .smp_finish = msp_smtc_smp_finish, .cpus_done = msp_smtc_cpus_done, .boot_secondary = msp_smtc_boot_secondary, .smp_setup = msp_smtc_smp_setup, .prepare_cpus = msp_smtc_prepare_cpus, };
gpl-2.0
htc-msm8660/android_kernel_htc_msm8660
fs/ntfs/lcnalloc.c
14394
33171
/* * lcnalloc.c - Cluster (de)allocation code. Part of the Linux-NTFS project. * * Copyright (c) 2004-2005 Anton Altaparmakov * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program/include file is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program (in the main directory of the Linux-NTFS * distribution in the file COPYING); if not, write to the Free Software * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifdef NTFS_RW #include <linux/pagemap.h> #include "lcnalloc.h" #include "debug.h" #include "bitmap.h" #include "inode.h" #include "volume.h" #include "attrib.h" #include "malloc.h" #include "aops.h" #include "ntfs.h" /** * ntfs_cluster_free_from_rl_nolock - free clusters from runlist * @vol: mounted ntfs volume on which to free the clusters * @rl: runlist describing the clusters to free * * Free all the clusters described by the runlist @rl on the volume @vol. In * the case of an error being returned, at least some of the clusters were not * freed. * * Return 0 on success and -errno on error. * * Locking: - The volume lcn bitmap must be locked for writing on entry and is * left locked on return. */ int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol, const runlist_element *rl) { struct inode *lcnbmp_vi = vol->lcnbmp_ino; int ret = 0; ntfs_debug("Entering."); if (!rl) return 0; for (; rl->length; rl++) { int err; if (rl->lcn < 0) continue; err = ntfs_bitmap_clear_run(lcnbmp_vi, rl->lcn, rl->length); if (unlikely(err && (!ret || ret == -ENOMEM) && ret != err)) ret = err; } ntfs_debug("Done."); return ret; } /** * ntfs_cluster_alloc - allocate clusters on an ntfs volume * @vol: mounted ntfs volume on which to allocate the clusters * @start_vcn: vcn to use for the first allocated cluster * @count: number of clusters to allocate * @start_lcn: starting lcn at which to allocate the clusters (or -1 if none) * @zone: zone from which to allocate the clusters * @is_extension: if 'true', this is an attribute extension * * Allocate @count clusters preferably starting at cluster @start_lcn or at the * current allocator position if @start_lcn is -1, on the mounted ntfs volume * @vol. @zone is either DATA_ZONE for allocation of normal clusters or * MFT_ZONE for allocation of clusters for the master file table, i.e. the * $MFT/$DATA attribute. * * @start_vcn specifies the vcn of the first allocated cluster. This makes * merging the resulting runlist with the old runlist easier. * * If @is_extension is 'true', the caller is allocating clusters to extend an * attribute and if it is 'false', the caller is allocating clusters to fill a * hole in an attribute. Practically the difference is that if @is_extension * is 'true' the returned runlist will be terminated with LCN_ENOENT and if * @is_extension is 'false' the runlist will be terminated with * LCN_RL_NOT_MAPPED. * * You need to check the return value with IS_ERR(). If this is false, the * function was successful and the return value is a runlist describing the * allocated cluster(s). If IS_ERR() is true, the function failed and * PTR_ERR() gives you the error code. * * Notes on the allocation algorithm * ================================= * * There are two data zones. First is the area between the end of the mft zone * and the end of the volume, and second is the area between the start of the * volume and the start of the mft zone. On unmodified/standard NTFS 1.x * volumes, the second data zone does not exist due to the mft zone being * expanded to cover the start of the volume in order to reserve space for the * mft bitmap attribute. * * This is not the prettiest function but the complexity stems from the need of * implementing the mft vs data zoned approach and from the fact that we have * access to the lcn bitmap in portions of up to 8192 bytes at a time, so we * need to cope with crossing over boundaries of two buffers. Further, the * fact that the allocator allows for caller supplied hints as to the location * of where allocation should begin and the fact that the allocator keeps track * of where in the data zones the next natural allocation should occur, * contribute to the complexity of the function. But it should all be * worthwhile, because this allocator should: 1) be a full implementation of * the MFT zone approach used by Windows NT, 2) cause reduction in * fragmentation, and 3) be speedy in allocations (the code is not optimized * for speed, but the algorithm is, so further speed improvements are probably * possible). * * FIXME: We should be monitoring cluster allocation and increment the MFT zone * size dynamically but this is something for the future. We will just cause * heavier fragmentation by not doing it and I am not even sure Windows would * grow the MFT zone dynamically, so it might even be correct not to do this. * The overhead in doing dynamic MFT zone expansion would be very large and * unlikely worth the effort. (AIA) * * TODO: I have added in double the required zone position pointer wrap around * logic which can be optimized to having only one of the two logic sets. * However, having the double logic will work fine, but if we have only one of * the sets and we get it wrong somewhere, then we get into trouble, so * removing the duplicate logic requires _very_ careful consideration of _all_ * possible code paths. So at least for now, I am leaving the double logic - * better safe than sorry... (AIA) * * Locking: - The volume lcn bitmap must be unlocked on entry and is unlocked * on return. * - This function takes the volume lcn bitmap lock for writing and * modifies the bitmap contents. */ runlist_element *ntfs_cluster_alloc(ntfs_volume *vol, const VCN start_vcn, const s64 count, const LCN start_lcn, const NTFS_CLUSTER_ALLOCATION_ZONES zone, const bool is_extension) { LCN zone_start, zone_end, bmp_pos, bmp_initial_pos, last_read_pos, lcn; LCN prev_lcn = 0, prev_run_len = 0, mft_zone_size; s64 clusters; loff_t i_size; struct inode *lcnbmp_vi; runlist_element *rl = NULL; struct address_space *mapping; struct page *page = NULL; u8 *buf, *byte; int err = 0, rlpos, rlsize, buf_size; u8 pass, done_zones, search_zone, need_writeback = 0, bit; ntfs_debug("Entering for start_vcn 0x%llx, count 0x%llx, start_lcn " "0x%llx, zone %s_ZONE.", (unsigned long long)start_vcn, (unsigned long long)count, (unsigned long long)start_lcn, zone == MFT_ZONE ? "MFT" : "DATA"); BUG_ON(!vol); lcnbmp_vi = vol->lcnbmp_ino; BUG_ON(!lcnbmp_vi); BUG_ON(start_vcn < 0); BUG_ON(count < 0); BUG_ON(start_lcn < -1); BUG_ON(zone < FIRST_ZONE); BUG_ON(zone > LAST_ZONE); /* Return NULL if @count is zero. */ if (!count) return NULL; /* Take the lcnbmp lock for writing. */ down_write(&vol->lcnbmp_lock); /* * If no specific @start_lcn was requested, use the current data zone * position, otherwise use the requested @start_lcn but make sure it * lies outside the mft zone. Also set done_zones to 0 (no zones done) * and pass depending on whether we are starting inside a zone (1) or * at the beginning of a zone (2). If requesting from the MFT_ZONE, * we either start at the current position within the mft zone or at * the specified position. If the latter is out of bounds then we start * at the beginning of the MFT_ZONE. */ done_zones = 0; pass = 1; /* * zone_start and zone_end are the current search range. search_zone * is 1 for mft zone, 2 for data zone 1 (end of mft zone till end of * volume) and 4 for data zone 2 (start of volume till start of mft * zone). */ zone_start = start_lcn; if (zone_start < 0) { if (zone == DATA_ZONE) zone_start = vol->data1_zone_pos; else zone_start = vol->mft_zone_pos; if (!zone_start) { /* * Zone starts at beginning of volume which means a * single pass is sufficient. */ pass = 2; } } else if (zone == DATA_ZONE && zone_start >= vol->mft_zone_start && zone_start < vol->mft_zone_end) { zone_start = vol->mft_zone_end; /* * Starting at beginning of data1_zone which means a single * pass in this zone is sufficient. */ pass = 2; } else if (zone == MFT_ZONE && (zone_start < vol->mft_zone_start || zone_start >= vol->mft_zone_end)) { zone_start = vol->mft_lcn; if (!vol->mft_zone_end) zone_start = 0; /* * Starting at beginning of volume which means a single pass * is sufficient. */ pass = 2; } if (zone == MFT_ZONE) { zone_end = vol->mft_zone_end; search_zone = 1; } else /* if (zone == DATA_ZONE) */ { /* Skip searching the mft zone. */ done_zones |= 1; if (zone_start >= vol->mft_zone_end) { zone_end = vol->nr_clusters; search_zone = 2; } else { zone_end = vol->mft_zone_start; search_zone = 4; } } /* * bmp_pos is the current bit position inside the bitmap. We use * bmp_initial_pos to determine whether or not to do a zone switch. */ bmp_pos = bmp_initial_pos = zone_start; /* Loop until all clusters are allocated, i.e. clusters == 0. */ clusters = count; rlpos = rlsize = 0; mapping = lcnbmp_vi->i_mapping; i_size = i_size_read(lcnbmp_vi); while (1) { ntfs_debug("Start of outer while loop: done_zones 0x%x, " "search_zone %i, pass %i, zone_start 0x%llx, " "zone_end 0x%llx, bmp_initial_pos 0x%llx, " "bmp_pos 0x%llx, rlpos %i, rlsize %i.", done_zones, search_zone, pass, (unsigned long long)zone_start, (unsigned long long)zone_end, (unsigned long long)bmp_initial_pos, (unsigned long long)bmp_pos, rlpos, rlsize); /* Loop until we run out of free clusters. */ last_read_pos = bmp_pos >> 3; ntfs_debug("last_read_pos 0x%llx.", (unsigned long long)last_read_pos); if (last_read_pos > i_size) { ntfs_debug("End of attribute reached. " "Skipping to zone_pass_done."); goto zone_pass_done; } if (likely(page)) { if (need_writeback) { ntfs_debug("Marking page dirty."); flush_dcache_page(page); set_page_dirty(page); need_writeback = 0; } ntfs_unmap_page(page); } page = ntfs_map_page(mapping, last_read_pos >> PAGE_CACHE_SHIFT); if (IS_ERR(page)) { err = PTR_ERR(page); ntfs_error(vol->sb, "Failed to map page."); goto out; } buf_size = last_read_pos & ~PAGE_CACHE_MASK; buf = page_address(page) + buf_size; buf_size = PAGE_CACHE_SIZE - buf_size; if (unlikely(last_read_pos + buf_size > i_size)) buf_size = i_size - last_read_pos; buf_size <<= 3; lcn = bmp_pos & 7; bmp_pos &= ~(LCN)7; ntfs_debug("Before inner while loop: buf_size %i, lcn 0x%llx, " "bmp_pos 0x%llx, need_writeback %i.", buf_size, (unsigned long long)lcn, (unsigned long long)bmp_pos, need_writeback); while (lcn < buf_size && lcn + bmp_pos < zone_end) { byte = buf + (lcn >> 3); ntfs_debug("In inner while loop: buf_size %i, " "lcn 0x%llx, bmp_pos 0x%llx, " "need_writeback %i, byte ofs 0x%x, " "*byte 0x%x.", buf_size, (unsigned long long)lcn, (unsigned long long)bmp_pos, need_writeback, (unsigned int)(lcn >> 3), (unsigned int)*byte); /* Skip full bytes. */ if (*byte == 0xff) { lcn = (lcn + 8) & ~(LCN)7; ntfs_debug("Continuing while loop 1."); continue; } bit = 1 << (lcn & 7); ntfs_debug("bit 0x%x.", bit); /* If the bit is already set, go onto the next one. */ if (*byte & bit) { lcn++; ntfs_debug("Continuing while loop 2."); continue; } /* * Allocate more memory if needed, including space for * the terminator element. * ntfs_malloc_nofs() operates on whole pages only. */ if ((rlpos + 2) * sizeof(*rl) > rlsize) { runlist_element *rl2; ntfs_debug("Reallocating memory."); if (!rl) ntfs_debug("First free bit is at LCN " "0x%llx.", (unsigned long long) (lcn + bmp_pos)); rl2 = ntfs_malloc_nofs(rlsize + (int)PAGE_SIZE); if (unlikely(!rl2)) { err = -ENOMEM; ntfs_error(vol->sb, "Failed to " "allocate memory."); goto out; } memcpy(rl2, rl, rlsize); ntfs_free(rl); rl = rl2; rlsize += PAGE_SIZE; ntfs_debug("Reallocated memory, rlsize 0x%x.", rlsize); } /* Allocate the bitmap bit. */ *byte |= bit; /* We need to write this bitmap page to disk. */ need_writeback = 1; ntfs_debug("*byte 0x%x, need_writeback is set.", (unsigned int)*byte); /* * Coalesce with previous run if adjacent LCNs. * Otherwise, append a new run. */ ntfs_debug("Adding run (lcn 0x%llx, len 0x%llx), " "prev_lcn 0x%llx, lcn 0x%llx, " "bmp_pos 0x%llx, prev_run_len 0x%llx, " "rlpos %i.", (unsigned long long)(lcn + bmp_pos), 1ULL, (unsigned long long)prev_lcn, (unsigned long long)lcn, (unsigned long long)bmp_pos, (unsigned long long)prev_run_len, rlpos); if (prev_lcn == lcn + bmp_pos - prev_run_len && rlpos) { ntfs_debug("Coalescing to run (lcn 0x%llx, " "len 0x%llx).", (unsigned long long) rl[rlpos - 1].lcn, (unsigned long long) rl[rlpos - 1].length); rl[rlpos - 1].length = ++prev_run_len; ntfs_debug("Run now (lcn 0x%llx, len 0x%llx), " "prev_run_len 0x%llx.", (unsigned long long) rl[rlpos - 1].lcn, (unsigned long long) rl[rlpos - 1].length, (unsigned long long) prev_run_len); } else { if (likely(rlpos)) { ntfs_debug("Adding new run, (previous " "run lcn 0x%llx, " "len 0x%llx).", (unsigned long long) rl[rlpos - 1].lcn, (unsigned long long) rl[rlpos - 1].length); rl[rlpos].vcn = rl[rlpos - 1].vcn + prev_run_len; } else { ntfs_debug("Adding new run, is first " "run."); rl[rlpos].vcn = start_vcn; } rl[rlpos].lcn = prev_lcn = lcn + bmp_pos; rl[rlpos].length = prev_run_len = 1; rlpos++; } /* Done? */ if (!--clusters) { LCN tc; /* * Update the current zone position. Positions * of already scanned zones have been updated * during the respective zone switches. */ tc = lcn + bmp_pos + 1; ntfs_debug("Done. Updating current zone " "position, tc 0x%llx, " "search_zone %i.", (unsigned long long)tc, search_zone); switch (search_zone) { case 1: ntfs_debug("Before checks, " "vol->mft_zone_pos " "0x%llx.", (unsigned long long) vol->mft_zone_pos); if (tc >= vol->mft_zone_end) { vol->mft_zone_pos = vol->mft_lcn; if (!vol->mft_zone_end) vol->mft_zone_pos = 0; } else if ((bmp_initial_pos >= vol->mft_zone_pos || tc > vol->mft_zone_pos) && tc >= vol->mft_lcn) vol->mft_zone_pos = tc; ntfs_debug("After checks, " "vol->mft_zone_pos " "0x%llx.", (unsigned long long) vol->mft_zone_pos); break; case 2: ntfs_debug("Before checks, " "vol->data1_zone_pos " "0x%llx.", (unsigned long long) vol->data1_zone_pos); if (tc >= vol->nr_clusters) vol->data1_zone_pos = vol->mft_zone_end; else if ((bmp_initial_pos >= vol->data1_zone_pos || tc > vol->data1_zone_pos) && tc >= vol->mft_zone_end) vol->data1_zone_pos = tc; ntfs_debug("After checks, " "vol->data1_zone_pos " "0x%llx.", (unsigned long long) vol->data1_zone_pos); break; case 4: ntfs_debug("Before checks, " "vol->data2_zone_pos " "0x%llx.", (unsigned long long) vol->data2_zone_pos); if (tc >= vol->mft_zone_start) vol->data2_zone_pos = 0; else if (bmp_initial_pos >= vol->data2_zone_pos || tc > vol->data2_zone_pos) vol->data2_zone_pos = tc; ntfs_debug("After checks, " "vol->data2_zone_pos " "0x%llx.", (unsigned long long) vol->data2_zone_pos); break; default: BUG(); } ntfs_debug("Finished. Going to out."); goto out; } lcn++; } bmp_pos += buf_size; ntfs_debug("After inner while loop: buf_size 0x%x, lcn " "0x%llx, bmp_pos 0x%llx, need_writeback %i.", buf_size, (unsigned long long)lcn, (unsigned long long)bmp_pos, need_writeback); if (bmp_pos < zone_end) { ntfs_debug("Continuing outer while loop, " "bmp_pos 0x%llx, zone_end 0x%llx.", (unsigned long long)bmp_pos, (unsigned long long)zone_end); continue; } zone_pass_done: /* Finished with the current zone pass. */ ntfs_debug("At zone_pass_done, pass %i.", pass); if (pass == 1) { /* * Now do pass 2, scanning the first part of the zone * we omitted in pass 1. */ pass = 2; zone_end = zone_start; switch (search_zone) { case 1: /* mft_zone */ zone_start = vol->mft_zone_start; break; case 2: /* data1_zone */ zone_start = vol->mft_zone_end; break; case 4: /* data2_zone */ zone_start = 0; break; default: BUG(); } /* Sanity check. */ if (zone_end < zone_start) zone_end = zone_start; bmp_pos = zone_start; ntfs_debug("Continuing outer while loop, pass 2, " "zone_start 0x%llx, zone_end 0x%llx, " "bmp_pos 0x%llx.", (unsigned long long)zone_start, (unsigned long long)zone_end, (unsigned long long)bmp_pos); continue; } /* pass == 2 */ done_zones_check: ntfs_debug("At done_zones_check, search_zone %i, done_zones " "before 0x%x, done_zones after 0x%x.", search_zone, done_zones, done_zones | search_zone); done_zones |= search_zone; if (done_zones < 7) { ntfs_debug("Switching zone."); /* Now switch to the next zone we haven't done yet. */ pass = 1; switch (search_zone) { case 1: ntfs_debug("Switching from mft zone to data1 " "zone."); /* Update mft zone position. */ if (rlpos) { LCN tc; ntfs_debug("Before checks, " "vol->mft_zone_pos " "0x%llx.", (unsigned long long) vol->mft_zone_pos); tc = rl[rlpos - 1].lcn + rl[rlpos - 1].length; if (tc >= vol->mft_zone_end) { vol->mft_zone_pos = vol->mft_lcn; if (!vol->mft_zone_end) vol->mft_zone_pos = 0; } else if ((bmp_initial_pos >= vol->mft_zone_pos || tc > vol->mft_zone_pos) && tc >= vol->mft_lcn) vol->mft_zone_pos = tc; ntfs_debug("After checks, " "vol->mft_zone_pos " "0x%llx.", (unsigned long long) vol->mft_zone_pos); } /* Switch from mft zone to data1 zone. */ switch_to_data1_zone: search_zone = 2; zone_start = bmp_initial_pos = vol->data1_zone_pos; zone_end = vol->nr_clusters; if (zone_start == vol->mft_zone_end) pass = 2; if (zone_start >= zone_end) { vol->data1_zone_pos = zone_start = vol->mft_zone_end; pass = 2; } break; case 2: ntfs_debug("Switching from data1 zone to " "data2 zone."); /* Update data1 zone position. */ if (rlpos) { LCN tc; ntfs_debug("Before checks, " "vol->data1_zone_pos " "0x%llx.", (unsigned long long) vol->data1_zone_pos); tc = rl[rlpos - 1].lcn + rl[rlpos - 1].length; if (tc >= vol->nr_clusters) vol->data1_zone_pos = vol->mft_zone_end; else if ((bmp_initial_pos >= vol->data1_zone_pos || tc > vol->data1_zone_pos) && tc >= vol->mft_zone_end) vol->data1_zone_pos = tc; ntfs_debug("After checks, " "vol->data1_zone_pos " "0x%llx.", (unsigned long long) vol->data1_zone_pos); } /* Switch from data1 zone to data2 zone. */ search_zone = 4; zone_start = bmp_initial_pos = vol->data2_zone_pos; zone_end = vol->mft_zone_start; if (!zone_start) pass = 2; if (zone_start >= zone_end) { vol->data2_zone_pos = zone_start = bmp_initial_pos = 0; pass = 2; } break; case 4: ntfs_debug("Switching from data2 zone to " "data1 zone."); /* Update data2 zone position. */ if (rlpos) { LCN tc; ntfs_debug("Before checks, " "vol->data2_zone_pos " "0x%llx.", (unsigned long long) vol->data2_zone_pos); tc = rl[rlpos - 1].lcn + rl[rlpos - 1].length; if (tc >= vol->mft_zone_start) vol->data2_zone_pos = 0; else if (bmp_initial_pos >= vol->data2_zone_pos || tc > vol->data2_zone_pos) vol->data2_zone_pos = tc; ntfs_debug("After checks, " "vol->data2_zone_pos " "0x%llx.", (unsigned long long) vol->data2_zone_pos); } /* Switch from data2 zone to data1 zone. */ goto switch_to_data1_zone; default: BUG(); } ntfs_debug("After zone switch, search_zone %i, " "pass %i, bmp_initial_pos 0x%llx, " "zone_start 0x%llx, zone_end 0x%llx.", search_zone, pass, (unsigned long long)bmp_initial_pos, (unsigned long long)zone_start, (unsigned long long)zone_end); bmp_pos = zone_start; if (zone_start == zone_end) { ntfs_debug("Empty zone, going to " "done_zones_check."); /* Empty zone. Don't bother searching it. */ goto done_zones_check; } ntfs_debug("Continuing outer while loop."); continue; } /* done_zones == 7 */ ntfs_debug("All zones are finished."); /* * All zones are finished! If DATA_ZONE, shrink mft zone. If * MFT_ZONE, we have really run out of space. */ mft_zone_size = vol->mft_zone_end - vol->mft_zone_start; ntfs_debug("vol->mft_zone_start 0x%llx, vol->mft_zone_end " "0x%llx, mft_zone_size 0x%llx.", (unsigned long long)vol->mft_zone_start, (unsigned long long)vol->mft_zone_end, (unsigned long long)mft_zone_size); if (zone == MFT_ZONE || mft_zone_size <= 0) { ntfs_debug("No free clusters left, going to out."); /* Really no more space left on device. */ err = -ENOSPC; goto out; } /* zone == DATA_ZONE && mft_zone_size > 0 */ ntfs_debug("Shrinking mft zone."); zone_end = vol->mft_zone_end; mft_zone_size >>= 1; if (mft_zone_size > 0) vol->mft_zone_end = vol->mft_zone_start + mft_zone_size; else /* mft zone and data2 zone no longer exist. */ vol->data2_zone_pos = vol->mft_zone_start = vol->mft_zone_end = 0; if (vol->mft_zone_pos >= vol->mft_zone_end) { vol->mft_zone_pos = vol->mft_lcn; if (!vol->mft_zone_end) vol->mft_zone_pos = 0; } bmp_pos = zone_start = bmp_initial_pos = vol->data1_zone_pos = vol->mft_zone_end; search_zone = 2; pass = 2; done_zones &= ~2; ntfs_debug("After shrinking mft zone, mft_zone_size 0x%llx, " "vol->mft_zone_start 0x%llx, " "vol->mft_zone_end 0x%llx, " "vol->mft_zone_pos 0x%llx, search_zone 2, " "pass 2, dones_zones 0x%x, zone_start 0x%llx, " "zone_end 0x%llx, vol->data1_zone_pos 0x%llx, " "continuing outer while loop.", (unsigned long long)mft_zone_size, (unsigned long long)vol->mft_zone_start, (unsigned long long)vol->mft_zone_end, (unsigned long long)vol->mft_zone_pos, done_zones, (unsigned long long)zone_start, (unsigned long long)zone_end, (unsigned long long)vol->data1_zone_pos); } ntfs_debug("After outer while loop."); out: ntfs_debug("At out."); /* Add runlist terminator element. */ if (likely(rl)) { rl[rlpos].vcn = rl[rlpos - 1].vcn + rl[rlpos - 1].length; rl[rlpos].lcn = is_extension ? LCN_ENOENT : LCN_RL_NOT_MAPPED; rl[rlpos].length = 0; } if (likely(page && !IS_ERR(page))) { if (need_writeback) { ntfs_debug("Marking page dirty."); flush_dcache_page(page); set_page_dirty(page); need_writeback = 0; } ntfs_unmap_page(page); } if (likely(!err)) { up_write(&vol->lcnbmp_lock); ntfs_debug("Done."); return rl; } ntfs_error(vol->sb, "Failed to allocate clusters, aborting " "(error %i).", err); if (rl) { int err2; if (err == -ENOSPC) ntfs_debug("Not enough space to complete allocation, " "err -ENOSPC, first free lcn 0x%llx, " "could allocate up to 0x%llx " "clusters.", (unsigned long long)rl[0].lcn, (unsigned long long)(count - clusters)); /* Deallocate all allocated clusters. */ ntfs_debug("Attempting rollback..."); err2 = ntfs_cluster_free_from_rl_nolock(vol, rl); if (err2) { ntfs_error(vol->sb, "Failed to rollback (error %i). " "Leaving inconsistent metadata! " "Unmount and run chkdsk.", err2); NVolSetErrors(vol); } /* Free the runlist. */ ntfs_free(rl); } else if (err == -ENOSPC) ntfs_debug("No space left at all, err = -ENOSPC, first free " "lcn = 0x%llx.", (long long)vol->data1_zone_pos); up_write(&vol->lcnbmp_lock); return ERR_PTR(err); } /** * __ntfs_cluster_free - free clusters on an ntfs volume * @ni: ntfs inode whose runlist describes the clusters to free * @start_vcn: vcn in the runlist of @ni at which to start freeing clusters * @count: number of clusters to free or -1 for all clusters * @ctx: active attribute search context if present or NULL if not * @is_rollback: true if this is a rollback operation * * Free @count clusters starting at the cluster @start_vcn in the runlist * described by the vfs inode @ni. * * If @count is -1, all clusters from @start_vcn to the end of the runlist are * deallocated. Thus, to completely free all clusters in a runlist, use * @start_vcn = 0 and @count = -1. * * If @ctx is specified, it is an active search context of @ni and its base mft * record. This is needed when __ntfs_cluster_free() encounters unmapped * runlist fragments and allows their mapping. If you do not have the mft * record mapped, you can specify @ctx as NULL and __ntfs_cluster_free() will * perform the necessary mapping and unmapping. * * Note, __ntfs_cluster_free() saves the state of @ctx on entry and restores it * before returning. Thus, @ctx will be left pointing to the same attribute on * return as on entry. However, the actual pointers in @ctx may point to * different memory locations on return, so you must remember to reset any * cached pointers from the @ctx, i.e. after the call to __ntfs_cluster_free(), * you will probably want to do: * m = ctx->mrec; * a = ctx->attr; * Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that * you cache ctx->mrec in a variable @m of type MFT_RECORD *. * * @is_rollback should always be 'false', it is for internal use to rollback * errors. You probably want to use ntfs_cluster_free() instead. * * Note, __ntfs_cluster_free() does not modify the runlist, so you have to * remove from the runlist or mark sparse the freed runs later. * * Return the number of deallocated clusters (not counting sparse ones) on * success and -errno on error. * * WARNING: If @ctx is supplied, regardless of whether success or failure is * returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx * is no longer valid, i.e. you need to either call * ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it. * In that case PTR_ERR(@ctx->mrec) will give you the error code for * why the mapping of the old inode failed. * * Locking: - The runlist described by @ni must be locked for writing on entry * and is locked on return. Note the runlist may be modified when * needed runlist fragments need to be mapped. * - The volume lcn bitmap must be unlocked on entry and is unlocked * on return. * - This function takes the volume lcn bitmap lock for writing and * modifies the bitmap contents. * - If @ctx is NULL, the base mft record of @ni must not be mapped on * entry and it will be left unmapped on return. * - If @ctx is not NULL, the base mft record must be mapped on entry * and it will be left mapped on return. */ s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn, s64 count, ntfs_attr_search_ctx *ctx, const bool is_rollback) { s64 delta, to_free, total_freed, real_freed; ntfs_volume *vol; struct inode *lcnbmp_vi; runlist_element *rl; int err; BUG_ON(!ni); ntfs_debug("Entering for i_ino 0x%lx, start_vcn 0x%llx, count " "0x%llx.%s", ni->mft_no, (unsigned long long)start_vcn, (unsigned long long)count, is_rollback ? " (rollback)" : ""); vol = ni->vol; lcnbmp_vi = vol->lcnbmp_ino; BUG_ON(!lcnbmp_vi); BUG_ON(start_vcn < 0); BUG_ON(count < -1); /* * Lock the lcn bitmap for writing but only if not rolling back. We * must hold the lock all the way including through rollback otherwise * rollback is not possible because once we have cleared a bit and * dropped the lock, anyone could have set the bit again, thus * allocating the cluster for another use. */ if (likely(!is_rollback)) down_write(&vol->lcnbmp_lock); total_freed = real_freed = 0; rl = ntfs_attr_find_vcn_nolock(ni, start_vcn, ctx); if (IS_ERR(rl)) { if (!is_rollback) ntfs_error(vol->sb, "Failed to find first runlist " "element (error %li), aborting.", PTR_ERR(rl)); err = PTR_ERR(rl); goto err_out; } if (unlikely(rl->lcn < LCN_HOLE)) { if (!is_rollback) ntfs_error(vol->sb, "First runlist element has " "invalid lcn, aborting."); err = -EIO; goto err_out; } /* Find the starting cluster inside the run that needs freeing. */ delta = start_vcn - rl->vcn; /* The number of clusters in this run that need freeing. */ to_free = rl->length - delta; if (count >= 0 && to_free > count) to_free = count; if (likely(rl->lcn >= 0)) { /* Do the actual freeing of the clusters in this run. */ err = ntfs_bitmap_set_bits_in_run(lcnbmp_vi, rl->lcn + delta, to_free, likely(!is_rollback) ? 0 : 1); if (unlikely(err)) { if (!is_rollback) ntfs_error(vol->sb, "Failed to clear first run " "(error %i), aborting.", err); goto err_out; } /* We have freed @to_free real clusters. */ real_freed = to_free; }; /* Go to the next run and adjust the number of clusters left to free. */ ++rl; if (count >= 0) count -= to_free; /* Keep track of the total "freed" clusters, including sparse ones. */ total_freed = to_free; /* * Loop over the remaining runs, using @count as a capping value, and * free them. */ for (; rl->length && count != 0; ++rl) { if (unlikely(rl->lcn < LCN_HOLE)) { VCN vcn; /* Attempt to map runlist. */ vcn = rl->vcn; rl = ntfs_attr_find_vcn_nolock(ni, vcn, ctx); if (IS_ERR(rl)) { err = PTR_ERR(rl); if (!is_rollback) ntfs_error(vol->sb, "Failed to map " "runlist fragment or " "failed to find " "subsequent runlist " "element."); goto err_out; } if (unlikely(rl->lcn < LCN_HOLE)) { if (!is_rollback) ntfs_error(vol->sb, "Runlist element " "has invalid lcn " "(0x%llx).", (unsigned long long) rl->lcn); err = -EIO; goto err_out; } } /* The number of clusters in this run that need freeing. */ to_free = rl->length; if (count >= 0 && to_free > count) to_free = count; if (likely(rl->lcn >= 0)) { /* Do the actual freeing of the clusters in the run. */ err = ntfs_bitmap_set_bits_in_run(lcnbmp_vi, rl->lcn, to_free, likely(!is_rollback) ? 0 : 1); if (unlikely(err)) { if (!is_rollback) ntfs_error(vol->sb, "Failed to clear " "subsequent run."); goto err_out; } /* We have freed @to_free real clusters. */ real_freed += to_free; } /* Adjust the number of clusters left to free. */ if (count >= 0) count -= to_free; /* Update the total done clusters. */ total_freed += to_free; } if (likely(!is_rollback)) up_write(&vol->lcnbmp_lock); BUG_ON(count > 0); /* We are done. Return the number of actually freed clusters. */ ntfs_debug("Done."); return real_freed; err_out: if (is_rollback) return err; /* If no real clusters were freed, no need to rollback. */ if (!real_freed) { up_write(&vol->lcnbmp_lock); return err; } /* * Attempt to rollback and if that succeeds just return the error code. * If rollback fails, set the volume errors flag, emit an error * message, and return the error code. */ delta = __ntfs_cluster_free(ni, start_vcn, total_freed, ctx, true); if (delta < 0) { ntfs_error(vol->sb, "Failed to rollback (error %i). Leaving " "inconsistent metadata! Unmount and run " "chkdsk.", (int)delta); NVolSetErrors(vol); } up_write(&vol->lcnbmp_lock); ntfs_error(vol->sb, "Aborting (error %i).", err); return err; } #endif /* NTFS_RW */
gpl-2.0
xinglin/qemu-2.0.2
roms/ipxe/src/drivers/net/ath/ath9k/ath9k_hw.c
59
51142
/* * Copyright (c) 2008-2011 Atheros Communications Inc. * * Modified for iPXE by Scott K Logan <logans@cottsay.net> July 2011 * Original from Linux kernel 3.0.1 * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <ipxe/vsprintf.h> #include <ipxe/io.h> #include "hw.h" #include "hw-ops.h" #include "ar9003_mac.h" static int ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type); /* Private hardware callbacks */ static void ath9k_hw_init_cal_settings(struct ath_hw *ah) { ath9k_hw_private_ops(ah)->init_cal_settings(ah); } static void ath9k_hw_init_mode_regs(struct ath_hw *ah) { ath9k_hw_private_ops(ah)->init_mode_regs(ah); } static u32 ath9k_hw_compute_pll_control(struct ath_hw *ah, struct ath9k_channel *chan) { return ath9k_hw_private_ops(ah)->compute_pll_control(ah, chan); } static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah) { if (!ath9k_hw_private_ops(ah)->init_mode_gain_regs) return; ath9k_hw_private_ops(ah)->init_mode_gain_regs(ah); } static void ath9k_hw_ani_cache_ini_regs(struct ath_hw *ah) { /* You will not have this callback if using the old ANI */ if (!ath9k_hw_private_ops(ah)->ani_cache_ini_regs) return; ath9k_hw_private_ops(ah)->ani_cache_ini_regs(ah); } /********************/ /* Helper Functions */ /********************/ static void ath9k_hw_set_clockrate(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); struct net80211_device *dev = common->dev; unsigned int clockrate; if (!ah->curchan) /* should really check for CCK instead */ clockrate = ATH9K_CLOCK_RATE_CCK; else if ((dev->channels + dev->channel)->band == NET80211_BAND_2GHZ) clockrate = ATH9K_CLOCK_RATE_2GHZ_OFDM; else if (ah->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK) clockrate = ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM; else clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM; common->clockrate = clockrate; } static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs) { struct ath_common *common = ath9k_hw_common(ah); return usecs * common->clockrate; } int ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout) { unsigned int i; for (i = 0; i < (timeout / AH_TIME_QUANTUM); i++) { if ((REG_READ(ah, reg) & mask) == val) return 1; udelay(AH_TIME_QUANTUM); } DBG("ath9k: " "timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n", timeout, reg, REG_READ(ah, reg), mask, val); return 0; } void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array, int column, unsigned int *writecnt) { unsigned int r; ENABLE_REGWRITE_BUFFER(ah); for (r = 0; r < array->ia_rows; r++) { REG_WRITE(ah, INI_RA(array, r, 0), INI_RA(array, r, column)); DO_DELAY(*writecnt); } REGWRITE_BUFFER_FLUSH(ah); } u32 ath9k_hw_reverse_bits(u32 val, u32 n) { u32 retval; unsigned int i; for (i = 0, retval = 0; i < n; i++) { retval = (retval << 1) | (val & 1); val >>= 1; } return retval; } u16 ath9k_hw_computetxtime(struct ath_hw *ah, u8 phy, int kbps, u32 frameLen, u16 rateix, int shortPreamble) { u32 bitsPerSymbol, numBits, numSymbols, phyTime, txTime; if (kbps == 0) return 0; switch (phy) { case CHANNEL_CCK: phyTime = CCK_PREAMBLE_BITS + CCK_PLCP_BITS; if (shortPreamble) phyTime >>= 1; numBits = frameLen << 3; txTime = CCK_SIFS_TIME + phyTime + ((numBits * 1000) / kbps); break; case CHANNEL_OFDM: if (ah->curchan && IS_CHAN_QUARTER_RATE(ah->curchan)) { bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_QUARTER) / 1000; numBits = OFDM_PLCP_BITS + (frameLen << 3); numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol); txTime = OFDM_SIFS_TIME_QUARTER + OFDM_PREAMBLE_TIME_QUARTER + (numSymbols * OFDM_SYMBOL_TIME_QUARTER); } else if (ah->curchan && IS_CHAN_HALF_RATE(ah->curchan)) { bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_HALF) / 1000; numBits = OFDM_PLCP_BITS + (frameLen << 3); numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol); txTime = OFDM_SIFS_TIME_HALF + OFDM_PREAMBLE_TIME_HALF + (numSymbols * OFDM_SYMBOL_TIME_HALF); } else { bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME) / 1000; numBits = OFDM_PLCP_BITS + (frameLen << 3); numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol); txTime = OFDM_SIFS_TIME + OFDM_PREAMBLE_TIME + (numSymbols * OFDM_SYMBOL_TIME); } break; default: DBG("ath9k: " "Unknown phy %d (rate ix %d)\n", phy, rateix); txTime = 0; break; } return txTime; } void ath9k_hw_get_channel_centers(struct ath_hw *ah __unused, struct ath9k_channel *chan, struct chan_centers *centers) { int8_t extoff; if (!IS_CHAN_HT40(chan)) { centers->ctl_center = centers->ext_center = centers->synth_center = chan->channel; return; } if ((chan->chanmode == CHANNEL_A_HT40PLUS) || (chan->chanmode == CHANNEL_G_HT40PLUS)) { centers->synth_center = chan->channel + HT40_CHANNEL_CENTER_SHIFT; extoff = 1; } else { centers->synth_center = chan->channel - HT40_CHANNEL_CENTER_SHIFT; extoff = -1; } centers->ctl_center = centers->synth_center - (extoff * HT40_CHANNEL_CENTER_SHIFT); /* 25 MHz spacing is supported by hw but not on upper layers */ centers->ext_center = centers->synth_center + (extoff * HT40_CHANNEL_CENTER_SHIFT); } /******************/ /* Chip Revisions */ /******************/ static void ath9k_hw_read_revisions(struct ath_hw *ah) { u32 val; switch (ah->hw_version.devid) { case AR5416_AR9100_DEVID: ah->hw_version.macVersion = AR_SREV_VERSION_9100; break; case AR9300_DEVID_AR9340: ah->hw_version.macVersion = AR_SREV_VERSION_9340; val = REG_READ(ah, AR_SREV); ah->hw_version.macRev = MS(val, AR_SREV_REVISION2); return; } val = REG_READ(ah, AR_SREV) & AR_SREV_ID; if (val == 0xFF) { val = REG_READ(ah, AR_SREV); ah->hw_version.macVersion = (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S; ah->hw_version.macRev = MS(val, AR_SREV_REVISION2); ah->is_pciexpress = (val & AR_SREV_TYPE2_HOST_MODE) ? 0 : 1; } else { if (!AR_SREV_9100(ah)) ah->hw_version.macVersion = MS(val, AR_SREV_VERSION); ah->hw_version.macRev = val & AR_SREV_REVISION; if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE) ah->is_pciexpress = 1; } } /************************************/ /* HW Attach, Detach, Init Routines */ /************************************/ static void ath9k_hw_disablepcie(struct ath_hw *ah) { if (!AR_SREV_5416(ah)) return; REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00); REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924); REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029); REG_WRITE(ah, AR_PCIE_SERDES, 0x57160824); REG_WRITE(ah, AR_PCIE_SERDES, 0x25980579); REG_WRITE(ah, AR_PCIE_SERDES, 0x00000000); REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40); REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554); REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007); REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); } /* This should work for all families including legacy */ static int ath9k_hw_chip_test(struct ath_hw *ah) { u32 regAddr[2] = { AR_STA_ID0 }; u32 regHold[2]; static const u32 patternData[4] = { 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999 }; int i, j, loop_max; if (!AR_SREV_9300_20_OR_LATER(ah)) { loop_max = 2; regAddr[1] = AR_PHY_BASE + (8 << 2); } else loop_max = 1; for (i = 0; i < loop_max; i++) { u32 addr = regAddr[i]; u32 wrData, rdData; regHold[i] = REG_READ(ah, addr); for (j = 0; j < 0x100; j++) { wrData = (j << 16) | j; REG_WRITE(ah, addr, wrData); rdData = REG_READ(ah, addr); if (rdData != wrData) { DBG("ath9k: " "address test failed addr: 0x%08x - wr:0x%08x != rd:0x%08x\n", addr, wrData, rdData); return 0; } } for (j = 0; j < 4; j++) { wrData = patternData[j]; REG_WRITE(ah, addr, wrData); rdData = REG_READ(ah, addr); if (wrData != rdData) { DBG("ath9k: " "address test failed addr: 0x%08x - wr:0x%08x != rd:0x%08x\n", addr, wrData, rdData); return 0; } } REG_WRITE(ah, regAddr[i], regHold[i]); } udelay(100); return 1; } static void ath9k_hw_init_config(struct ath_hw *ah) { int i; ah->config.dma_beacon_response_time = 2; ah->config.sw_beacon_response_time = 10; ah->config.additional_swba_backoff = 0; ah->config.ack_6mb = 0x0; ah->config.cwm_ignore_extcca = 0; ah->config.pcie_powersave_enable = 0; ah->config.pcie_clock_req = 0; ah->config.pcie_waen = 0; ah->config.analog_shiftreg = 1; ah->config.enable_ani = 1; for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { ah->config.spurchans[i][0] = AR_NO_SPUR; ah->config.spurchans[i][1] = AR_NO_SPUR; } /* PAPRD needs some more work to be enabled */ ah->config.paprd_disable = 1; ah->config.rx_intr_mitigation = 1; ah->config.pcieSerDesWrite = 1; } static void ath9k_hw_init_defaults(struct ath_hw *ah) { struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); regulatory->country_code = CTRY_DEFAULT; regulatory->power_limit = MAX_RATE_POWER; regulatory->tp_scale = ATH9K_TP_SCALE_MAX; ah->hw_version.magic = AR5416_MAGIC; ah->hw_version.subvendorid = 0; ah->atim_window = 0; ah->sta_id1_defaults = AR_STA_ID1_CRPT_MIC_ENABLE | AR_STA_ID1_MCAST_KSRCH; if (AR_SREV_9100(ah)) ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX; ah->enable_32kHz_clock = DONT_USE_32KHZ; ah->slottime = 20; ah->globaltxtimeout = (u32) -1; ah->power_mode = ATH9K_PM_UNDEFINED; } static int ath9k_hw_init_macaddr(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); u32 sum; int i; u16 eeval; static const u32 EEP_MAC[] = { EEP_MAC_LSW, EEP_MAC_MID, EEP_MAC_MSW }; sum = 0; for (i = 0; i < 3; i++) { eeval = ah->eep_ops->get_eeprom(ah, EEP_MAC[i]); sum += eeval; common->macaddr[2 * i] = eeval >> 8; common->macaddr[2 * i + 1] = eeval & 0xff; } if (sum == 0 || sum == 0xffff * 3) return -EADDRNOTAVAIL; return 0; } static int ath9k_hw_post_init(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); int ecode; if (common->bus_ops->ath_bus_type != ATH_USB) { if (!ath9k_hw_chip_test(ah)) return -ENODEV; } if (!AR_SREV_9300_20_OR_LATER(ah)) { ecode = ar9002_hw_rf_claim(ah); if (ecode != 0) return ecode; } ecode = ath9k_hw_eeprom_init(ah); if (ecode != 0) return ecode; DBG("ath9k: " "Eeprom VER: %d, REV: %d\n", ah->eep_ops->get_eeprom_ver(ah), ah->eep_ops->get_eeprom_rev(ah)); ecode = ath9k_hw_rf_alloc_ext_banks(ah); if (ecode) { DBG("ath9k: " "Failed allocating banks for external radio\n"); ath9k_hw_rf_free_ext_banks(ah); return ecode; } if (!AR_SREV_9100(ah) && !AR_SREV_9340(ah)) { ath9k_hw_ani_setup(ah); ath9k_hw_ani_init(ah); } return 0; } static void ath9k_hw_attach_ops(struct ath_hw *ah) { if (AR_SREV_9300_20_OR_LATER(ah)) ar9003_hw_attach_ops(ah); else ar9002_hw_attach_ops(ah); } /* Called for all hardware families */ static int __ath9k_hw_init(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); int r = 0; ath9k_hw_read_revisions(ah); /* * Read back AR_WA into a permanent copy and set bits 14 and 17. * We need to do this to avoid RMW of this register. We cannot * read the reg when chip is asleep. */ ah->WARegVal = REG_READ(ah, AR_WA); ah->WARegVal |= (AR_WA_D3_L1_DISABLE | AR_WA_ASPM_TIMER_BASED_DISABLE); if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { DBG("ath9k: Couldn't reset chip\n"); return -EIO; } ath9k_hw_init_defaults(ah); ath9k_hw_init_config(ah); ath9k_hw_attach_ops(ah); if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) { DBG("ath9k: Couldn't wakeup chip\n"); return -EIO; } if (ah->config.serialize_regmode == SER_REG_MODE_AUTO) { if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI || ((AR_SREV_9160(ah) || AR_SREV_9280(ah)) && !ah->is_pciexpress)) { ah->config.serialize_regmode = SER_REG_MODE_ON; } else { ah->config.serialize_regmode = SER_REG_MODE_OFF; } } DBG2("ath9k: serialize_regmode is %d\n", ah->config.serialize_regmode); if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD >> 1; else ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD; switch (ah->hw_version.macVersion) { case AR_SREV_VERSION_5416_PCI: case AR_SREV_VERSION_5416_PCIE: case AR_SREV_VERSION_9160: case AR_SREV_VERSION_9100: case AR_SREV_VERSION_9280: case AR_SREV_VERSION_9285: case AR_SREV_VERSION_9287: case AR_SREV_VERSION_9271: case AR_SREV_VERSION_9300: case AR_SREV_VERSION_9485: case AR_SREV_VERSION_9340: break; default: DBG("ath9k: " "Mac Chip Rev 0x%02x.%x is not supported by this driver\n", ah->hw_version.macVersion, ah->hw_version.macRev); return -EOPNOTSUPP; } if (AR_SREV_9271(ah) || AR_SREV_9100(ah) || AR_SREV_9340(ah)) ah->is_pciexpress = 0; ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID); ath9k_hw_init_cal_settings(ah); ah->ani_function = ATH9K_ANI_ALL; if (AR_SREV_9280_20_OR_LATER(ah) && !AR_SREV_9300_20_OR_LATER(ah)) ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL; if (!AR_SREV_9300_20_OR_LATER(ah)) ah->ani_function &= ~ATH9K_ANI_MRC_CCK; ath9k_hw_init_mode_regs(ah); if (ah->is_pciexpress) ath9k_hw_configpcipowersave(ah, 0, 0); else ath9k_hw_disablepcie(ah); if (!AR_SREV_9300_20_OR_LATER(ah)) ar9002_hw_cck_chan14_spread(ah); r = ath9k_hw_post_init(ah); if (r) return r; ath9k_hw_init_mode_gain_regs(ah); r = ath9k_hw_fill_cap_info(ah); if (r) return r; r = ath9k_hw_init_macaddr(ah); if (r) { DBG("ath9k: Failed to initialize MAC address\n"); return r; } if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) ah->tx_trig_level = (AR_FTRIG_256B >> AR_FTRIG_S); else ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S); common->state = ATH_HW_INITIALIZED; return 0; } int ath9k_hw_init(struct ath_hw *ah) { int ret; struct ath_common *common = ath9k_hw_common(ah); /* These are all the AR5008/AR9001/AR9002 hardware family of chipsets */ switch (ah->hw_version.devid) { case AR5416_DEVID_PCI: case AR5416_DEVID_PCIE: case AR5416_AR9100_DEVID: case AR9160_DEVID_PCI: case AR9280_DEVID_PCI: case AR9280_DEVID_PCIE: case AR9285_DEVID_PCIE: case AR9287_DEVID_PCI: case AR9287_DEVID_PCIE: case AR2427_DEVID_PCIE: case AR9300_DEVID_PCIE: case AR9300_DEVID_AR9485_PCIE: case AR9300_DEVID_AR9340: break; default: if (common->bus_ops->ath_bus_type == ATH_USB) break; DBG("ath9k: Hardware device ID 0x%04x not supported\n", ah->hw_version.devid); return -EOPNOTSUPP; } ret = __ath9k_hw_init(ah); if (ret) { DBG("ath9k: " "Unable to initialize hardware; initialization status: %d\n", ret); return ret; } return 0; } u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah) { REG_CLR_BIT(ah, PLL3, PLL3_DO_MEAS_MASK); udelay(100); REG_SET_BIT(ah, PLL3, PLL3_DO_MEAS_MASK); while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0) udelay(100); return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3; } static void ath9k_hw_init_pll(struct ath_hw *ah, struct ath9k_channel *chan) { u32 pll; if (AR_SREV_9485(ah)) { /* program BB PLL ki and kd value, ki=0x4, kd=0x40 */ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_BB_DPLL2_PLL_PWD, 0x1); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_DPLL2_KD, 0x40); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_DPLL2_KI, 0x4); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL1, AR_CH0_BB_DPLL1_REFDIV, 0x5); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL1, AR_CH0_BB_DPLL1_NINI, 0x58); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL1, AR_CH0_BB_DPLL1_NFRAC, 0x0); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_BB_DPLL2_OUTDIV, 0x1); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_BB_DPLL2_LOCAL_PLL, 0x1); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_BB_DPLL2_EN_NEGTRIG, 0x1); /* program BB PLL phase_shift to 0x6 */ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3, AR_CH0_BB_DPLL3_PHASE_SHIFT, 0x6); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_BB_DPLL2_PLL_PWD, 0x0); udelay(1000); } else if (AR_SREV_9340(ah)) { u32 regval, pll2_divint, pll2_divfrac, refdiv; REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c); udelay(1000); REG_SET_BIT(ah, AR_PHY_PLL_MODE, 0x1 << 16); udelay(100); if (ah->is_clk_25mhz) { pll2_divint = 0x54; pll2_divfrac = 0x1eb85; refdiv = 3; } else { pll2_divint = 88; pll2_divfrac = 0; refdiv = 5; } regval = REG_READ(ah, AR_PHY_PLL_MODE); regval |= (0x1 << 16); REG_WRITE(ah, AR_PHY_PLL_MODE, regval); udelay(100); REG_WRITE(ah, AR_PHY_PLL_CONTROL, (refdiv << 27) | (pll2_divint << 18) | pll2_divfrac); udelay(100); regval = REG_READ(ah, AR_PHY_PLL_MODE); regval = (regval & 0x80071fff) | (0x1 << 30) | (0x1 << 13) | (0x4 << 26) | (0x18 << 19); REG_WRITE(ah, AR_PHY_PLL_MODE, regval); REG_WRITE(ah, AR_PHY_PLL_MODE, REG_READ(ah, AR_PHY_PLL_MODE) & 0xfffeffff); udelay(1000); } pll = ath9k_hw_compute_pll_control(ah, chan); REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll); if (AR_SREV_9485(ah) || AR_SREV_9340(ah)) udelay(1000); /* Switch the core clock for ar9271 to 117Mhz */ if (AR_SREV_9271(ah)) { udelay(500); REG_WRITE(ah, 0x50040, 0x304); } udelay(RTC_PLL_SETTLE_DELAY); REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK); if (AR_SREV_9340(ah)) { if (ah->is_clk_25mhz) { REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1); REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7); REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae); } else { REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1); REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400); REG_WRITE(ah, AR_SLP32_INC, 0x0001e800); } udelay(100); } } static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah) { u32 sync_default = AR_INTR_SYNC_DEFAULT; u32 imr_reg = AR_IMR_TXERR | AR_IMR_TXURN | AR_IMR_RXERR | AR_IMR_RXORN;; if (AR_SREV_9340(ah)) sync_default &= ~AR_INTR_SYNC_HOST1_FATAL; if (AR_SREV_9300_20_OR_LATER(ah)) { imr_reg |= AR_IMR_RXOK_HP; if (ah->config.rx_intr_mitigation) imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR; else imr_reg |= AR_IMR_RXOK_LP; } else { if (ah->config.rx_intr_mitigation) imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR; else imr_reg |= AR_IMR_RXOK; } if (ah->config.tx_intr_mitigation) imr_reg |= AR_IMR_TXINTM | AR_IMR_TXMINTR; else imr_reg |= AR_IMR_TXOK; ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_IMR, imr_reg); // ah->imrs2_reg |= AR_IMR_S2_GTT; REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg); if (!AR_SREV_9100(ah)) { REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF); REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default); REG_WRITE(ah, AR_INTR_SYNC_MASK, 0); } REGWRITE_BUFFER_FLUSH(ah); if (AR_SREV_9300_20_OR_LATER(ah)) { REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0); REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK, 0); REG_WRITE(ah, AR_INTR_PRIO_SYNC_ENABLE, 0); REG_WRITE(ah, AR_INTR_PRIO_SYNC_MASK, 0); } } static void ath9k_hw_setslottime(struct ath_hw *ah, u32 us) { u32 val = ath9k_hw_mac_to_clks(ah, us); val = min(val, (u32) 0xFFFF); REG_WRITE(ah, AR_D_GBL_IFS_SLOT, val); } static void ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us) { u32 val = ath9k_hw_mac_to_clks(ah, us); val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_ACK)); REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_ACK, val); } static void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us) { u32 val = ath9k_hw_mac_to_clks(ah, us); val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_CTS)); REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_CTS, val); } static int ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu) { if (tu > 0xFFFF) { DBG("ath9k: " "bad global tx timeout %d\n", tu); ah->globaltxtimeout = (u32) -1; return 0; } else { REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu); ah->globaltxtimeout = tu; return 1; } } void ath9k_hw_init_global_settings(struct ath_hw *ah) { int acktimeout; int slottime; int sifstime; DBG2("ath9k: ah->misc_mode 0x%x\n", ah->misc_mode); if (ah->misc_mode != 0) REG_SET_BIT(ah, AR_PCU_MISC, ah->misc_mode); if ((ah->dev->channels + ah->dev->channel)->band == NET80211_BAND_5GHZ) sifstime = 16; else sifstime = 10; /* As defined by IEEE 802.11-2007 17.3.8.6 */ slottime = ah->slottime + 3 * ah->coverage_class; acktimeout = slottime + sifstime; /* * Workaround for early ACK timeouts, add an offset to match the * initval's 64us ack timeout value. * This was initially only meant to work around an issue with delayed * BA frames in some implementations, but it has been found to fix ACK * timeout issues in other cases as well. */ if ((ah->dev->channels + ah->dev->channel)->band == NET80211_BAND_2GHZ) acktimeout += 64 - sifstime - ah->slottime; ath9k_hw_setslottime(ah, ah->slottime); ath9k_hw_set_ack_timeout(ah, acktimeout); ath9k_hw_set_cts_timeout(ah, acktimeout); if (ah->globaltxtimeout != (u32) -1) ath9k_hw_set_global_txtimeout(ah, ah->globaltxtimeout); } void ath9k_hw_deinit(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); if (common->state < ATH_HW_INITIALIZED) goto free_hw; ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP); free_hw: ath9k_hw_rf_free_ext_banks(ah); } /*******/ /* INI */ /*******/ u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan) { u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band); if (IS_CHAN_B(chan)) ctl |= CTL_11B; else if (IS_CHAN_G(chan)) ctl |= CTL_11G; else ctl |= CTL_11A; return ctl; } /****************************************/ /* Reset and Channel Switching Routines */ /****************************************/ static inline void ath9k_hw_set_dma(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); ENABLE_REGWRITE_BUFFER(ah); /* * set AHB_MODE not to do cacheline prefetches */ if (!AR_SREV_9300_20_OR_LATER(ah)) REG_SET_BIT(ah, AR_AHB_MODE, AR_AHB_PREFETCH_RD_EN); /* * let mac dma reads be in 128 byte chunks */ REG_RMW(ah, AR_TXCFG, AR_TXCFG_DMASZ_128B, AR_TXCFG_DMASZ_MASK); REGWRITE_BUFFER_FLUSH(ah); /* * Restore TX Trigger Level to its pre-reset value. * The initial value depends on whether aggregation is enabled, and is * adjusted whenever underruns are detected. */ if (!AR_SREV_9300_20_OR_LATER(ah)) REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->tx_trig_level); ENABLE_REGWRITE_BUFFER(ah); /* * let mac dma writes be in 128 byte chunks */ REG_RMW(ah, AR_RXCFG, AR_RXCFG_DMASZ_128B, AR_RXCFG_DMASZ_MASK); /* * Setup receive FIFO threshold to hold off TX activities */ REG_WRITE(ah, AR_RXFIFO_CFG, 0x200); if (AR_SREV_9300_20_OR_LATER(ah)) { REG_RMW_FIELD(ah, AR_RXBP_THRESH, AR_RXBP_THRESH_HP, 0x1); REG_RMW_FIELD(ah, AR_RXBP_THRESH, AR_RXBP_THRESH_LP, 0x1); ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - ah->caps.rx_status_len); } /* * reduce the number of usable entries in PCU TXBUF to avoid * wrap around issues. */ if (AR_SREV_9285(ah)) { /* For AR9285 the number of Fifos are reduced to half. * So set the usable tx buf size also to half to * avoid data/delimiter underruns */ REG_WRITE(ah, AR_PCU_TXBUF_CTRL, AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE); } else if (!AR_SREV_9271(ah)) { REG_WRITE(ah, AR_PCU_TXBUF_CTRL, AR_PCU_TXBUF_CTRL_USABLE_SIZE); } REGWRITE_BUFFER_FLUSH(ah); if (AR_SREV_9300_20_OR_LATER(ah)) ath9k_hw_reset_txstatus_ring(ah); } static void ath9k_hw_set_operating_mode(struct ath_hw *ah) { u32 mask = AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC; u32 set = AR_STA_ID1_KSRCH_MODE; REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); REG_RMW(ah, AR_STA_ID1, set, mask); } void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah __unused, u32 coef_scaled, u32 *coef_mantissa, u32 *coef_exponent) { u32 coef_exp, coef_man; for (coef_exp = 31; coef_exp > 0; coef_exp--) if ((coef_scaled >> coef_exp) & 0x1) break; coef_exp = 14 - (coef_exp - COEF_SCALE_S); coef_man = coef_scaled + (1 << (COEF_SCALE_S - coef_exp - 1)); *coef_mantissa = coef_man >> (COEF_SCALE_S - coef_exp); *coef_exponent = coef_exp - 16; } static int ath9k_hw_set_reset(struct ath_hw *ah, int type) { u32 rst_flags; u32 tmpReg; if (AR_SREV_9100(ah)) { REG_RMW_FIELD(ah, AR_RTC_DERIVED_CLK, AR_RTC_DERIVED_CLK_PERIOD, 1); (void)REG_READ(ah, AR_RTC_DERIVED_CLK); } ENABLE_REGWRITE_BUFFER(ah); if (AR_SREV_9300_20_OR_LATER(ah)) { REG_WRITE(ah, AR_WA, ah->WARegVal); udelay(10); } REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT); if (AR_SREV_9100(ah)) { rst_flags = AR_RTC_RC_MAC_WARM | AR_RTC_RC_MAC_COLD | AR_RTC_RC_COLD_RESET | AR_RTC_RC_WARM_RESET; } else { tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE); if (tmpReg & (AR_INTR_SYNC_LOCAL_TIMEOUT | AR_INTR_SYNC_RADM_CPL_TIMEOUT)) { u32 val; REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0); val = AR_RC_HOSTIF; if (!AR_SREV_9300_20_OR_LATER(ah)) val |= AR_RC_AHB; REG_WRITE(ah, AR_RC, val); } else if (!AR_SREV_9300_20_OR_LATER(ah)) REG_WRITE(ah, AR_RC, AR_RC_AHB); rst_flags = AR_RTC_RC_MAC_WARM; if (type == ATH9K_RESET_COLD) rst_flags |= AR_RTC_RC_MAC_COLD; } REG_WRITE(ah, AR_RTC_RC, rst_flags); REGWRITE_BUFFER_FLUSH(ah); udelay(50); REG_WRITE(ah, AR_RTC_RC, 0); if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) { DBG("ath9k: " "RTC stuck in MAC reset\n"); return 0; } if (!AR_SREV_9100(ah)) REG_WRITE(ah, AR_RC, 0); if (AR_SREV_9100(ah)) udelay(50); return 1; } static int ath9k_hw_set_reset_power_on(struct ath_hw *ah) { ENABLE_REGWRITE_BUFFER(ah); if (AR_SREV_9300_20_OR_LATER(ah)) { REG_WRITE(ah, AR_WA, ah->WARegVal); udelay(10); } REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT); if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah)) REG_WRITE(ah, AR_RC, AR_RC_AHB); REG_WRITE(ah, AR_RTC_RESET, 0); REGWRITE_BUFFER_FLUSH(ah); if (!AR_SREV_9300_20_OR_LATER(ah)) udelay(2); if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah)) REG_WRITE(ah, AR_RC, 0); REG_WRITE(ah, AR_RTC_RESET, 1); if (!ath9k_hw_wait(ah, AR_RTC_STATUS, AR_RTC_STATUS_M, AR_RTC_STATUS_ON, AH_WAIT_TIMEOUT)) { DBG("ath9k: " "RTC not waking up\n"); return 0; } return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM); } static int ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type) { if (AR_SREV_9300_20_OR_LATER(ah)) { REG_WRITE(ah, AR_WA, ah->WARegVal); udelay(10); } REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT); switch (type) { case ATH9K_RESET_POWER_ON: return ath9k_hw_set_reset_power_on(ah); case ATH9K_RESET_WARM: case ATH9K_RESET_COLD: return ath9k_hw_set_reset(ah, type); default: return 0; } } static int ath9k_hw_chip_reset(struct ath_hw *ah, struct ath9k_channel *chan) { if (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) { if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) return 0; } else if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM)) return 0; if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) return 0; ah->chip_fullsleep = 0; ath9k_hw_init_pll(ah, chan); ath9k_hw_set_rfmode(ah, chan); return 1; } static int ath9k_hw_channel_change(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); struct net80211_channel *channel = chan->chan; u32 qnum; int r; for (qnum = 0; qnum < AR_NUM_QCU; qnum++) { if (ath9k_hw_numtxpending(ah, qnum)) { DBG("ath9k: " "Transmit frames pending on queue %d\n", qnum); return 0; } } if (!ath9k_hw_rfbus_req(ah)) { DBG("ath9k: Could not kill baseband RX\n"); return 0; } ath9k_hw_set_channel_regs(ah, chan); r = ath9k_hw_rf_set_freq(ah, chan); if (r) { DBG("ath9k: Failed to set channel\n"); return 0; } ath9k_hw_set_clockrate(ah); ah->eep_ops->set_txpower(ah, chan, ath9k_regd_get_ctl(regulatory, chan), 0, channel->maxpower * 2, min((u32) MAX_RATE_POWER, (u32) regulatory->power_limit), 0); ath9k_hw_rfbus_done(ah); if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) ath9k_hw_set_delta_slope(ah, chan); ath9k_hw_spur_mitigate_freq(ah, chan); return 1; } static void ath9k_hw_apply_gpio_override(struct ath_hw *ah) { u32 gpio_mask = ah->gpio_mask; int i; for (i = 0; gpio_mask; i++, gpio_mask >>= 1) { if (!(gpio_mask & 1)) continue; ath9k_hw_cfg_output(ah, i, AR_GPIO_OUTPUT_MUX_AS_OUTPUT); ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i))); } } int ath9k_hw_check_alive(struct ath_hw *ah) { int count = 50; u32 reg; if (AR_SREV_9285_12_OR_LATER(ah)) return 1; do { reg = REG_READ(ah, AR_OBS_BUS_1); if ((reg & 0x7E7FFFEF) == 0x00702400) continue; switch (reg & 0x7E000B00) { case 0x1E000000: case 0x52000B00: case 0x18000B00: continue; default: return 1; } } while (count-- > 0); return 0; } int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, struct ath9k_hw_cal_data *caldata, int bChannelChange) { struct ath_common *common = ath9k_hw_common(ah); u32 saveLedState; struct ath9k_channel *curchan = ah->curchan; u32 saveDefAntenna; u32 macStaId1; int i, r; ah->txchainmask = common->tx_chainmask; ah->rxchainmask = common->rx_chainmask; if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) return -EIO; if (curchan && !ah->chip_fullsleep) ath9k_hw_getnf(ah, curchan); ah->caldata = caldata; if (caldata && (chan->channel != caldata->channel || (chan->channelFlags & ~CHANNEL_CW_INT) != (caldata->channelFlags & ~CHANNEL_CW_INT))) { /* Operating channel changed, reset channel calibration data */ memset(caldata, 0, sizeof(*caldata)); ath9k_init_nfcal_hist_buffer(ah, chan); } if (bChannelChange && (ah->chip_fullsleep != 1) && (ah->curchan != NULL) && (chan->channel != ah->curchan->channel) && ((chan->channelFlags & CHANNEL_ALL) == (ah->curchan->channelFlags & CHANNEL_ALL)) && (!AR_SREV_9280(ah) || AR_DEVID_7010(ah))) { if (ath9k_hw_channel_change(ah, chan)) { ath9k_hw_loadnf(ah, ah->curchan); ath9k_hw_start_nfcal(ah, 1); if (AR_SREV_9271(ah)) ar9002_hw_load_ani_reg(ah, chan); return 0; } } saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA); if (saveDefAntenna == 0) saveDefAntenna = 1; macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B; saveLedState = REG_READ(ah, AR_CFG_LED) & (AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL | AR_CFG_LED_BLINK_THRESH_SEL | AR_CFG_LED_BLINK_SLOW); ath9k_hw_mark_phy_inactive(ah); ah->paprd_table_write_done = 0; /* Only required on the first reset */ if (AR_SREV_9271(ah) && ah->htc_reset_init) { REG_WRITE(ah, AR9271_RESET_POWER_DOWN_CONTROL, AR9271_RADIO_RF_RST); udelay(50); } if (!ath9k_hw_chip_reset(ah, chan)) { DBG("ath9k: Chip reset failed\n"); return -EINVAL; } /* Only required on the first reset */ if (AR_SREV_9271(ah) && ah->htc_reset_init) { ah->htc_reset_init = 0; REG_WRITE(ah, AR9271_RESET_POWER_DOWN_CONTROL, AR9271_GATE_MAC_CTL); udelay(50); } if (AR_SREV_9280_20_OR_LATER(ah)) REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE); if (!AR_SREV_9300_20_OR_LATER(ah)) ar9002_hw_enable_async_fifo(ah); r = ath9k_hw_process_ini(ah, chan); if (r) return r; /* Setup MFP options for CCMP */ if (AR_SREV_9280_20_OR_LATER(ah)) { /* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt * frames when constructing CCMP AAD. */ REG_RMW_FIELD(ah, AR_AES_MUTE_MASK1, AR_AES_MUTE_MASK1_FC_MGMT, 0xc7ff); ah->sw_mgmt_crypto = 0; } else if (AR_SREV_9160_10_OR_LATER(ah)) { /* Disable hardware crypto for management frames */ REG_CLR_BIT(ah, AR_PCU_MISC_MODE2, AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE); REG_SET_BIT(ah, AR_PCU_MISC_MODE2, AR_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT); ah->sw_mgmt_crypto = 1; } else ah->sw_mgmt_crypto = 1; if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) ath9k_hw_set_delta_slope(ah, chan); ath9k_hw_spur_mitigate_freq(ah, chan); ah->eep_ops->set_board_values(ah, chan); ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr)); REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(common->macaddr + 4) | macStaId1 | AR_STA_ID1_RTS_USE_DEF | (ah->config. ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0) | ah->sta_id1_defaults); ath_hw_setbssidmask(common); REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna); ath9k_hw_write_associd(ah); REG_WRITE(ah, AR_ISR, ~0); REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR); REGWRITE_BUFFER_FLUSH(ah); ath9k_hw_set_operating_mode(ah); r = ath9k_hw_rf_set_freq(ah, chan); if (r) return r; ath9k_hw_set_clockrate(ah); ENABLE_REGWRITE_BUFFER(ah); for (i = 0; i < AR_NUM_DCU; i++) REG_WRITE(ah, AR_DQCUMASK(i), 1 << i); REGWRITE_BUFFER_FLUSH(ah); ah->intr_txqs = 0; for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) ath9k_hw_resettxqueue(ah, i); ath9k_hw_init_interrupt_masks(ah); ath9k_hw_ani_cache_ini_regs(ah); if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT) ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio); ath9k_hw_init_global_settings(ah); if (!AR_SREV_9300_20_OR_LATER(ah)) { ar9002_hw_update_async_fifo(ah); ar9002_hw_enable_wep_aggregation(ah); } REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM); ath9k_hw_set_dma(ah); REG_WRITE(ah, AR_OBS, 8); if (ah->config.rx_intr_mitigation) { REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500); REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000); } if (ah->config.tx_intr_mitigation) { REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_LAST, 300); REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_FIRST, 750); } ath9k_hw_init_bb(ah, chan); if (!ath9k_hw_init_cal(ah, chan)) return -EIO; ENABLE_REGWRITE_BUFFER(ah); ath9k_hw_restore_chainmask(ah); REG_WRITE(ah, AR_CFG_LED, saveLedState | AR_CFG_SCLK_32KHZ); REGWRITE_BUFFER_FLUSH(ah); /* * For big endian systems turn on swapping for descriptors */ if (AR_SREV_9100(ah)) { u32 mask; mask = REG_READ(ah, AR_CFG); if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) { DBG2("ath9k: " "CFG Byte Swap Set 0x%x\n", mask); } else { mask = INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB; REG_WRITE(ah, AR_CFG, mask); DBG2("ath9k: " "Setting CFG 0x%x\n", REG_READ(ah, AR_CFG)); } } else { if (common->bus_ops->ath_bus_type == ATH_USB) { /* Configure AR9271 target WLAN */ if (AR_SREV_9271(ah)) REG_WRITE(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB); else REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD); } #if __BYTE_ORDER == __BIG_ENDIAN else if (AR_SREV_9340(ah)) REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0); else REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD); #endif } if (AR_SREV_9300_20_OR_LATER(ah)) { ar9003_hw_disable_phy_restart(ah); } ath9k_hw_apply_gpio_override(ah); return 0; } /******************************/ /* Power Management (Chipset) */ /******************************/ /* * Notify Power Mgt is disabled in self-generated frames. * If requested, force chip to sleep. */ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip) { REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); if (setChip) { /* * Clear the RTC force wake bit to allow the * mac to go to sleep. */ REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN); if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah)) REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF); /* Shutdown chip. Active low */ if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah)) REG_CLR_BIT(ah, (AR_RTC_RESET), AR_RTC_RESET_EN); } /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */ if (AR_SREV_9300_20_OR_LATER(ah)) REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE); } static int ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip) { u32 val; int i; /* Set Bits 14 and 17 of AR_WA before powering on the chip. */ if (AR_SREV_9300_20_OR_LATER(ah)) { REG_WRITE(ah, AR_WA, ah->WARegVal); udelay(10); } if (setChip) { if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) { if (ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON) != 1) { return 0; } if (!AR_SREV_9300_20_OR_LATER(ah)) ath9k_hw_init_pll(ah, NULL); } if (AR_SREV_9100(ah)) REG_SET_BIT(ah, AR_RTC_RESET, AR_RTC_RESET_EN); REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN); udelay(50); for (i = POWER_UP_TIME / 50; i > 0; i--) { val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M; if (val == AR_RTC_STATUS_ON) break; udelay(50); REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN); } if (i == 0) { DBG("ath9k: " "Failed to wakeup in %dus\n", POWER_UP_TIME / 20); return 0; } } REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); return 1; } int ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode) { int status = 1, setChip = 1; static const char *modes[] = { "AWAKE", "FULL-SLEEP", "NETWORK SLEEP", "UNDEFINED" }; if (ah->power_mode == mode) return status; DBG2("ath9k: %s -> %s\n", modes[ah->power_mode], modes[mode]); switch (mode) { case ATH9K_PM_AWAKE: status = ath9k_hw_set_power_awake(ah, setChip); break; case ATH9K_PM_FULL_SLEEP: ath9k_set_power_sleep(ah, setChip); ah->chip_fullsleep = 1; break; default: DBG("ath9k: Unknown power mode %d\n", mode); return 0; } ah->power_mode = mode; return status; } /*******************/ /* HW Capabilities */ /*******************/ int ath9k_hw_fill_cap_info(struct ath_hw *ah) { struct ath9k_hw_capabilities *pCap = &ah->caps; struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); struct ath_common *common = ath9k_hw_common(ah); u16 eeval; u8 ant_div_ctl1, tx_chainmask, rx_chainmask; eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_0); regulatory->current_rd = eeval; eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_1); if (AR_SREV_9285_12_OR_LATER(ah)) eeval |= AR9285_RDEXT_DEFAULT; regulatory->current_rd_ext = eeval; if (ah->hw_version.subvendorid == AR_SUBVENDOR_ID_NEW_A) { if (regulatory->current_rd == 0x64 || regulatory->current_rd == 0x65) regulatory->current_rd += 5; else if (regulatory->current_rd == 0x41) regulatory->current_rd = 0x43; DBG2("ath9k: " "regdomain mapped to 0x%x\n", regulatory->current_rd); } eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE); if ((eeval & (AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A)) == 0) { DBG("ath9k: " "no band has been marked as supported in EEPROM\n"); return -EINVAL; } if (eeval & AR5416_OPFLAGS_11A) pCap->hw_caps |= ATH9K_HW_CAP_5GHZ; if (eeval & AR5416_OPFLAGS_11G) pCap->hw_caps |= ATH9K_HW_CAP_2GHZ; pCap->tx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_TX_MASK); /* * For AR9271 we will temporarilly uses the rx chainmax as read from * the EEPROM. */ if ((ah->hw_version.devid == AR5416_DEVID_PCI) && !(eeval & AR5416_OPFLAGS_11A) && !(AR_SREV_9271(ah))) /* CB71: GPIO 0 is pulled down to indicate 3 rx chains */ pCap->rx_chainmask = ath9k_hw_gpio_get(ah, 0) ? 0x5 : 0x7; else if (AR_SREV_9100(ah)) pCap->rx_chainmask = 0x7; else /* Use rx_chainmask from EEPROM. */ pCap->rx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_RX_MASK); ah->misc_mode |= AR_PCU_MIC_NEW_LOC_ENA; /* enable key search for every frame in an aggregate */ if (AR_SREV_9300_20_OR_LATER(ah)) ah->misc_mode |= AR_PCU_ALWAYS_PERFORM_KEYSEARCH; common->crypt_caps |= ATH_CRYPT_CAP_CIPHER_AESCCM; pCap->hw_caps &= ~ATH9K_HW_CAP_HT; if (AR_SREV_9271(ah)) pCap->num_gpio_pins = AR9271_NUM_GPIO; else if (AR_DEVID_7010(ah)) pCap->num_gpio_pins = AR7010_NUM_GPIO; else if (AR_SREV_9285_12_OR_LATER(ah)) pCap->num_gpio_pins = AR9285_NUM_GPIO; else if (AR_SREV_9280_20_OR_LATER(ah)) pCap->num_gpio_pins = AR928X_NUM_GPIO; else pCap->num_gpio_pins = AR_NUM_GPIO; if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) { pCap->hw_caps |= ATH9K_HW_CAP_CST; pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX; } else { pCap->rts_aggr_limit = (8 * 1024); } ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT); if (ah->rfsilent & EEP_RFSILENT_ENABLED) { ah->rfkill_gpio = MS(ah->rfsilent, EEP_RFSILENT_GPIO_SEL); ah->rfkill_polarity = MS(ah->rfsilent, EEP_RFSILENT_POLARITY); pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT; } pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP; if (AR_SREV_9280(ah) || AR_SREV_9285(ah)) pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS; else pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS; if (AR_SREV_9300_20_OR_LATER(ah)) { pCap->hw_caps |= ATH9K_HW_CAP_FASTCLOCK; if (!AR_SREV_9485(ah)) pCap->hw_caps |= ATH9K_HW_CAP_LDPC; pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH; pCap->rx_lp_qdepth = ATH9K_HW_RX_LP_QDEPTH; pCap->rx_status_len = sizeof(struct ar9003_rxs); pCap->tx_desc_len = sizeof(struct ar9003_txc); pCap->txs_len = sizeof(struct ar9003_txs); if (!ah->config.paprd_disable && ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) pCap->hw_caps |= ATH9K_HW_CAP_PAPRD; } else { pCap->tx_desc_len = sizeof(struct ath_desc); if (AR_SREV_9280_20(ah) && ((ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) <= AR5416_EEP_MINOR_VER_16) || ah->eep_ops->get_eeprom(ah, EEP_FSTCLK_5G))) pCap->hw_caps |= ATH9K_HW_CAP_FASTCLOCK; } if (AR_SREV_9300_20_OR_LATER(ah)) pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED; if (AR_SREV_9300_20_OR_LATER(ah)) ah->ent_mode = REG_READ(ah, AR_ENT_OTP); if (AR_SREV_9287_11_OR_LATER(ah) || AR_SREV_9271(ah)) pCap->hw_caps |= ATH9K_HW_CAP_SGI_20; if (AR_SREV_9285(ah)) if (ah->eep_ops->get_eeprom(ah, EEP_MODAL_VER) >= 3) { ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1); if ((ant_div_ctl1 & 0x1) && ((ant_div_ctl1 >> 3) & 0x1)) pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB; } if (AR_SREV_9300_20_OR_LATER(ah)) { if (ah->eep_ops->get_eeprom(ah, EEP_CHAIN_MASK_REDUCE)) pCap->hw_caps |= ATH9K_HW_CAP_APM; } if (AR_SREV_9485(ah)) { ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1); /* * enable the diversity-combining algorithm only when * both enable_lna_div and enable_fast_div are set * Table for Diversity * ant_div_alt_lnaconf bit 0-1 * ant_div_main_lnaconf bit 2-3 * ant_div_alt_gaintb bit 4 * ant_div_main_gaintb bit 5 * enable_ant_div_lnadiv bit 6 * enable_ant_fast_div bit 7 */ if ((ant_div_ctl1 >> 0x6) == 0x3) pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB; } if (AR_SREV_9485_10(ah)) { pCap->pcie_lcr_extsync_en = 1; pCap->pcie_lcr_offset = 0x80; } tx_chainmask = pCap->tx_chainmask; rx_chainmask = pCap->rx_chainmask; while (tx_chainmask || rx_chainmask) { if (tx_chainmask & BIT(0)) pCap->max_txchains++; if (rx_chainmask & BIT(0)) pCap->max_rxchains++; tx_chainmask >>= 1; rx_chainmask >>= 1; } return 0; } /****************************/ /* GPIO / RFKILL / Antennae */ /****************************/ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, u32 gpio, u32 type) { int addr; u32 gpio_shift, tmp; if (gpio > 11) addr = AR_GPIO_OUTPUT_MUX3; else if (gpio > 5) addr = AR_GPIO_OUTPUT_MUX2; else addr = AR_GPIO_OUTPUT_MUX1; gpio_shift = (gpio % 6) * 5; if (AR_SREV_9280_20_OR_LATER(ah) || (addr != AR_GPIO_OUTPUT_MUX1)) { REG_RMW(ah, addr, (type << gpio_shift), (0x1f << gpio_shift)); } else { tmp = REG_READ(ah, addr); tmp = ((tmp & 0x1F0) << 1) | (tmp & ~0x1F0); tmp &= ~(0x1f << gpio_shift); tmp |= (type << gpio_shift); REG_WRITE(ah, addr, tmp); } } void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio) { u32 gpio_shift; if (AR_DEVID_7010(ah)) { gpio_shift = gpio; REG_RMW(ah, AR7010_GPIO_OE, (AR7010_GPIO_OE_AS_INPUT << gpio_shift), (AR7010_GPIO_OE_MASK << gpio_shift)); return; } gpio_shift = gpio << 1; REG_RMW(ah, AR_GPIO_OE_OUT, (AR_GPIO_OE_OUT_DRV_NO << gpio_shift), (AR_GPIO_OE_OUT_DRV << gpio_shift)); } u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio) { #define MS_REG_READ(x, y) \ (MS(REG_READ(ah, AR_GPIO_IN_OUT), x##_GPIO_IN_VAL) & (AR_GPIO_BIT(y))) if (gpio >= ah->caps.num_gpio_pins) return 0xffffffff; if (AR_DEVID_7010(ah)) { u32 val; val = REG_READ(ah, AR7010_GPIO_IN); return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0; } else if (AR_SREV_9300_20_OR_LATER(ah)) return (MS(REG_READ(ah, AR_GPIO_IN), AR9300_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) != 0; else if (AR_SREV_9271(ah)) return MS_REG_READ(AR9271, gpio) != 0; else if (AR_SREV_9287_11_OR_LATER(ah)) return MS_REG_READ(AR9287, gpio) != 0; else if (AR_SREV_9285_12_OR_LATER(ah)) return MS_REG_READ(AR9285, gpio) != 0; else if (AR_SREV_9280_20_OR_LATER(ah)) return MS_REG_READ(AR928X, gpio) != 0; else return MS_REG_READ(AR, gpio) != 0; } void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio, u32 ah_signal_type) { u32 gpio_shift; if (AR_DEVID_7010(ah)) { gpio_shift = gpio; REG_RMW(ah, AR7010_GPIO_OE, (AR7010_GPIO_OE_AS_OUTPUT << gpio_shift), (AR7010_GPIO_OE_MASK << gpio_shift)); return; } ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type); gpio_shift = 2 * gpio; REG_RMW(ah, AR_GPIO_OE_OUT, (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift), (AR_GPIO_OE_OUT_DRV << gpio_shift)); } void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val) { if (AR_DEVID_7010(ah)) { val = val ? 0 : 1; REG_RMW(ah, AR7010_GPIO_OUT, ((val&1) << gpio), AR_GPIO_BIT(gpio)); return; } if (AR_SREV_9271(ah)) val = ~val; REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio), AR_GPIO_BIT(gpio)); } u32 ath9k_hw_getdefantenna(struct ath_hw *ah) { return REG_READ(ah, AR_DEF_ANTENNA) & 0x7; } void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna) { REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7)); } /*********************/ /* General Operation */ /*********************/ u32 ath9k_hw_getrxfilter(struct ath_hw *ah) { u32 bits = REG_READ(ah, AR_RX_FILTER); u32 phybits = REG_READ(ah, AR_PHY_ERR); if (phybits & AR_PHY_ERR_RADAR) bits |= ATH9K_RX_FILTER_PHYRADAR; if (phybits & (AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING)) bits |= ATH9K_RX_FILTER_PHYERR; return bits; } void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits) { u32 phybits; ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_RX_FILTER, bits); phybits = 0; if (bits & ATH9K_RX_FILTER_PHYRADAR) phybits |= AR_PHY_ERR_RADAR; if (bits & ATH9K_RX_FILTER_PHYERR) phybits |= AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING; REG_WRITE(ah, AR_PHY_ERR, phybits); if (phybits) REG_SET_BIT(ah, AR_RXCFG, AR_RXCFG_ZLFDMA); else REG_CLR_BIT(ah, AR_RXCFG, AR_RXCFG_ZLFDMA); REGWRITE_BUFFER_FLUSH(ah); } int ath9k_hw_phy_disable(struct ath_hw *ah) { if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM)) return 0; ath9k_hw_init_pll(ah, NULL); return 1; } int ath9k_hw_disable(struct ath_hw *ah) { if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) return 0; if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_COLD)) return 0; ath9k_hw_init_pll(ah, NULL); return 1; } void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, int test) { struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); struct ath9k_channel *chan = ah->curchan; struct net80211_channel *channel = chan->chan; regulatory->power_limit = min(limit, (u32) MAX_RATE_POWER); ah->eep_ops->set_txpower(ah, chan, ath9k_regd_get_ctl(regulatory, chan), 0, channel->maxpower * 2, min((u32) MAX_RATE_POWER, (u32) regulatory->power_limit), test); } void ath9k_hw_setopmode(struct ath_hw *ah) { ath9k_hw_set_operating_mode(ah); } void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1) { REG_WRITE(ah, AR_MCAST_FIL0, filter0); REG_WRITE(ah, AR_MCAST_FIL1, filter1); } void ath9k_hw_write_associd(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(common->curbssid)); REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(common->curbssid + 4) | ((common->curaid & 0x3fff) << AR_BSS_ID1_AID_S)); } void ath9k_hw_set11nmac2040(struct ath_hw *ah) { u32 macmode; macmode = 0; REG_WRITE(ah, AR_2040_MODE, macmode); } static struct { u32 version; const char * name; } ath_mac_bb_names[] = { /* Devices with external radios */ { AR_SREV_VERSION_5416_PCI, "5416" }, { AR_SREV_VERSION_5416_PCIE, "5418" }, { AR_SREV_VERSION_9100, "9100" }, { AR_SREV_VERSION_9160, "9160" }, /* Single-chip solutions */ { AR_SREV_VERSION_9280, "9280" }, { AR_SREV_VERSION_9285, "9285" }, { AR_SREV_VERSION_9287, "9287" }, { AR_SREV_VERSION_9271, "9271" }, { AR_SREV_VERSION_9300, "9300" }, { AR_SREV_VERSION_9485, "9485" }, }; /* For devices with external radios */ static struct { u16 version; const char * name; } ath_rf_names[] = { { 0, "5133" }, { AR_RAD5133_SREV_MAJOR, "5133" }, { AR_RAD5122_SREV_MAJOR, "5122" }, { AR_RAD2133_SREV_MAJOR, "2133" }, { AR_RAD2122_SREV_MAJOR, "2122" } }; /* * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown. */ static const char *ath9k_hw_mac_bb_name(u32 mac_bb_version) { unsigned int i; for (i=0; i<ARRAY_SIZE(ath_mac_bb_names); i++) { if (ath_mac_bb_names[i].version == mac_bb_version) { return ath_mac_bb_names[i].name; } } return "????"; } /* * Return the RF name. "????" is returned if the RF is unknown. * Used for devices with external radios. */ static const char *ath9k_hw_rf_name(u16 rf_version) { unsigned int i; for (i=0; i<ARRAY_SIZE(ath_rf_names); i++) { if (ath_rf_names[i].version == rf_version) { return ath_rf_names[i].name; } } return "????"; } void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len) { int used; /* chipsets >= AR9280 are single-chip */ if (AR_SREV_9280_20_OR_LATER(ah)) { used = snprintf(hw_name, len, "Atheros AR%s Rev:%x", ath9k_hw_mac_bb_name(ah->hw_version.macVersion), ah->hw_version.macRev); } else { used = snprintf(hw_name, len, "Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x", ath9k_hw_mac_bb_name(ah->hw_version.macVersion), ah->hw_version.macRev, ath9k_hw_rf_name((ah->hw_version.analog5GhzRev & AR_RADIO_SREV_MAJOR)), ah->hw_version.phyRev); } hw_name[used] = '\0'; }
gpl-2.0
rpcraig/tiamat-kernel-seandroid
drivers/platform/x86/topstar-laptop.c
315
5081
/* * ACPI driver for Topstar notebooks (hotkeys support only) * * Copyright (c) 2009 Herton Ronaldo Krzesinski <herton@mandriva.com.br> * * Implementation inspired by existing x86 platform drivers, in special * asus/eepc/fujitsu-laptop, thanks to their authors * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/acpi.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> #define ACPI_TOPSTAR_CLASS "topstar" struct topstar_hkey { struct input_dev *inputdev; }; static const struct key_entry topstar_keymap[] = { { KE_KEY, 0x80, { KEY_BRIGHTNESSUP } }, { KE_KEY, 0x81, { KEY_BRIGHTNESSDOWN } }, { KE_KEY, 0x83, { KEY_VOLUMEUP } }, { KE_KEY, 0x84, { KEY_VOLUMEDOWN } }, { KE_KEY, 0x85, { KEY_MUTE } }, { KE_KEY, 0x86, { KEY_SWITCHVIDEOMODE } }, { KE_KEY, 0x87, { KEY_F13 } }, /* touchpad enable/disable key */ { KE_KEY, 0x88, { KEY_WLAN } }, { KE_KEY, 0x8a, { KEY_WWW } }, { KE_KEY, 0x8b, { KEY_MAIL } }, { KE_KEY, 0x8c, { KEY_MEDIA } }, /* Known non hotkey events don't handled or that we don't care yet */ { KE_IGNORE, 0x8e, }, { KE_IGNORE, 0x8f, }, { KE_IGNORE, 0x90, }, /* * 'G key' generate two event codes, convert to only * one event/key code for now, consider replacing by * a switch (3G switch - SW_3G?) */ { KE_KEY, 0x96, { KEY_F14 } }, { KE_KEY, 0x97, { KEY_F14 } }, { KE_END, 0 } }; static void acpi_topstar_notify(struct acpi_device *device, u32 event) { static bool dup_evnt[2]; bool *dup; struct topstar_hkey *hkey = acpi_driver_data(device); /* 0x83 and 0x84 key events comes duplicated... */ if (event == 0x83 || event == 0x84) { dup = &dup_evnt[event - 0x83]; if (*dup) { *dup = false; return; } *dup = true; } if (!sparse_keymap_report_event(hkey->inputdev, event, 1, true)) pr_info("unknown event = 0x%02x\n", event); } static int acpi_topstar_fncx_switch(struct acpi_device *device, bool state) { acpi_status status; union acpi_object fncx_params[1] = { { .type = ACPI_TYPE_INTEGER } }; struct acpi_object_list fncx_arg_list = { 1, &fncx_params[0] }; fncx_params[0].integer.value = state ? 0x86 : 0x87; status = acpi_evaluate_object(device->handle, "FNCX", &fncx_arg_list, NULL); if (ACPI_FAILURE(status)) { pr_err("Unable to switch FNCX notifications\n"); return -ENODEV; } return 0; } static int acpi_topstar_init_hkey(struct topstar_hkey *hkey) { struct input_dev *input; int error; input = input_allocate_device(); if (!input) { pr_err("Unable to allocate input device\n"); return -ENOMEM; } input->name = "Topstar Laptop extra buttons"; input->phys = "topstar/input0"; input->id.bustype = BUS_HOST; error = sparse_keymap_setup(input, topstar_keymap, NULL); if (error) { pr_err("Unable to setup input device keymap\n"); goto err_free_dev; } error = input_register_device(input); if (error) { pr_err("Unable to register input device\n"); goto err_free_keymap; } hkey->inputdev = input; return 0; err_free_keymap: sparse_keymap_free(input); err_free_dev: input_free_device(input); return error; } static int acpi_topstar_add(struct acpi_device *device) { struct topstar_hkey *tps_hkey; tps_hkey = kzalloc(sizeof(struct topstar_hkey), GFP_KERNEL); if (!tps_hkey) return -ENOMEM; strcpy(acpi_device_name(device), "Topstar TPSACPI"); strcpy(acpi_device_class(device), ACPI_TOPSTAR_CLASS); if (acpi_topstar_fncx_switch(device, true)) goto add_err; if (acpi_topstar_init_hkey(tps_hkey)) goto add_err; device->driver_data = tps_hkey; return 0; add_err: kfree(tps_hkey); return -ENODEV; } static int acpi_topstar_remove(struct acpi_device *device, int type) { struct topstar_hkey *tps_hkey = acpi_driver_data(device); acpi_topstar_fncx_switch(device, false); sparse_keymap_free(tps_hkey->inputdev); input_unregister_device(tps_hkey->inputdev); kfree(tps_hkey); return 0; } static const struct acpi_device_id topstar_device_ids[] = { { "TPSACPI01", 0 }, { "", 0 }, }; MODULE_DEVICE_TABLE(acpi, topstar_device_ids); static struct acpi_driver acpi_topstar_driver = { .name = "Topstar laptop ACPI driver", .class = ACPI_TOPSTAR_CLASS, .ids = topstar_device_ids, .ops = { .add = acpi_topstar_add, .remove = acpi_topstar_remove, .notify = acpi_topstar_notify, }, }; static int __init topstar_laptop_init(void) { int ret; ret = acpi_bus_register_driver(&acpi_topstar_driver); if (ret < 0) return ret; printk(KERN_INFO "Topstar Laptop ACPI extras driver loaded\n"); return 0; } static void __exit topstar_laptop_exit(void) { acpi_bus_unregister_driver(&acpi_topstar_driver); } module_init(topstar_laptop_init); module_exit(topstar_laptop_exit); MODULE_AUTHOR("Herton Ronaldo Krzesinski"); MODULE_DESCRIPTION("Topstar Laptop ACPI Extras driver"); MODULE_LICENSE("GPL");
gpl-2.0
penhoi/linux-3.14.56
arch/mips/alchemy/board-mtx1.c
315
8081
/* * MTX-1 platform devices registration (Au1500) * * Copyright (C) 2007-2009, Florian Fainelli <florian@openwrt.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/leds.h> #include <linux/gpio.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <mtd/mtd-abi.h> #include <asm/bootinfo.h> #include <asm/reboot.h> #include <asm/mach-au1x00/au1000.h> #include <asm/mach-au1x00/au1xxx_eth.h> #include <prom.h> const char *get_system_type(void) { return "MTX-1"; } void __init prom_init(void) { unsigned char *memsize_str; unsigned long memsize; prom_argc = fw_arg0; prom_argv = (char **)fw_arg1; prom_envp = (char **)fw_arg2; prom_init_cmdline(); memsize_str = prom_getenv("memsize"); if (!memsize_str || kstrtoul(memsize_str, 0, &memsize)) memsize = 0x04000000; add_memory_region(0, memsize, BOOT_MEM_RAM); } void prom_putchar(unsigned char c) { alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); } static void mtx1_reset(char *c) { /* Jump to the reset vector */ __asm__ __volatile__("jr\t%0" : : "r"(0xbfc00000)); } static void mtx1_power_off(void) { while (1) asm volatile ( " .set mips32 \n" " wait \n" " .set mips0 \n"); } void __init board_setup(void) { #if IS_ENABLED(CONFIG_USB_OHCI_HCD) /* Enable USB power switch */ alchemy_gpio_direction_output(204, 0); #endif /* IS_ENABLED(CONFIG_USB_OHCI_HCD) */ /* Initialize sys_pinfunc */ au_writel(SYS_PF_NI2, SYS_PINFUNC); /* Initialize GPIO */ au_writel(~0, KSEG1ADDR(AU1000_SYS_PHYS_ADDR) + SYS_TRIOUTCLR); alchemy_gpio_direction_output(0, 0); /* Disable M66EN (PCI 66MHz) */ alchemy_gpio_direction_output(3, 1); /* Disable PCI CLKRUN# */ alchemy_gpio_direction_output(1, 1); /* Enable EXT_IO3 */ alchemy_gpio_direction_output(5, 0); /* Disable eth PHY TX_ER */ /* Enable LED and set it to green */ alchemy_gpio_direction_output(211, 1); /* green on */ alchemy_gpio_direction_output(212, 0); /* red off */ pm_power_off = mtx1_power_off; _machine_halt = mtx1_power_off; _machine_restart = mtx1_reset; printk(KERN_INFO "4G Systems MTX-1 Board\n"); } /******************************************************************************/ static struct gpio_keys_button mtx1_gpio_button[] = { { .gpio = 207, .code = BTN_0, .desc = "System button", } }; static struct gpio_keys_platform_data mtx1_buttons_data = { .buttons = mtx1_gpio_button, .nbuttons = ARRAY_SIZE(mtx1_gpio_button), }; static struct platform_device mtx1_button = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &mtx1_buttons_data, } }; static struct resource mtx1_wdt_res[] = { [0] = { .start = 215, .end = 215, .name = "mtx1-wdt-gpio", .flags = IORESOURCE_IRQ, } }; static struct platform_device mtx1_wdt = { .name = "mtx1-wdt", .id = 0, .num_resources = ARRAY_SIZE(mtx1_wdt_res), .resource = mtx1_wdt_res, }; static struct gpio_led default_leds[] = { { .name = "mtx1:green", .gpio = 211, }, { .name = "mtx1:red", .gpio = 212, }, }; static struct gpio_led_platform_data mtx1_led_data = { .num_leds = ARRAY_SIZE(default_leds), .leds = default_leds, }; static struct platform_device mtx1_gpio_leds = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &mtx1_led_data, } }; static struct mtd_partition mtx1_mtd_partitions[] = { { .name = "filesystem", .size = 0x01C00000, .offset = 0, }, { .name = "yamon", .size = 0x00100000, .offset = MTDPART_OFS_APPEND, .mask_flags = MTD_WRITEABLE, }, { .name = "kernel", .size = 0x002c0000, .offset = MTDPART_OFS_APPEND, }, { .name = "yamon env", .size = 0x00040000, .offset = MTDPART_OFS_APPEND, }, }; static struct physmap_flash_data mtx1_flash_data = { .width = 4, .nr_parts = 4, .parts = mtx1_mtd_partitions, }; static struct resource mtx1_mtd_resource = { .start = 0x1e000000, .end = 0x1fffffff, .flags = IORESOURCE_MEM, }; static struct platform_device mtx1_mtd = { .name = "physmap-flash", .dev = { .platform_data = &mtx1_flash_data, }, .num_resources = 1, .resource = &mtx1_mtd_resource, }; static struct resource alchemy_pci_host_res[] = { [0] = { .start = AU1500_PCI_PHYS_ADDR, .end = AU1500_PCI_PHYS_ADDR + 0xfff, .flags = IORESOURCE_MEM, }, }; static int mtx1_pci_idsel(unsigned int devsel, int assert) { /* This function is only necessary to support a proprietary Cardbus * adapter on the mtx-1 "singleboard" variant. It triggers a custom * logic chip connected to EXT_IO3 (GPIO1) to suppress IDSEL signals. */ udelay(1); if (assert && devsel != 0) /* Suppress signal to Cardbus */ alchemy_gpio_set_value(1, 0); /* set EXT_IO3 OFF */ else alchemy_gpio_set_value(1, 1); /* set EXT_IO3 ON */ udelay(1); return 1; } static const char mtx1_irqtab[][5] = { [0] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTA, 0xff, 0xff }, /* IDSEL 00 - AdapterA-Slot0 (top) */ [1] = { -1, AU1500_PCI_INTB, AU1500_PCI_INTA, 0xff, 0xff }, /* IDSEL 01 - AdapterA-Slot1 (bottom) */ [2] = { -1, AU1500_PCI_INTC, AU1500_PCI_INTD, 0xff, 0xff }, /* IDSEL 02 - AdapterB-Slot0 (top) */ [3] = { -1, AU1500_PCI_INTD, AU1500_PCI_INTC, 0xff, 0xff }, /* IDSEL 03 - AdapterB-Slot1 (bottom) */ [4] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTB, 0xff, 0xff }, /* IDSEL 04 - AdapterC-Slot0 (top) */ [5] = { -1, AU1500_PCI_INTB, AU1500_PCI_INTA, 0xff, 0xff }, /* IDSEL 05 - AdapterC-Slot1 (bottom) */ [6] = { -1, AU1500_PCI_INTC, AU1500_PCI_INTD, 0xff, 0xff }, /* IDSEL 06 - AdapterD-Slot0 (top) */ [7] = { -1, AU1500_PCI_INTD, AU1500_PCI_INTC, 0xff, 0xff }, /* IDSEL 07 - AdapterD-Slot1 (bottom) */ }; static int mtx1_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin) { return mtx1_irqtab[slot][pin]; } static struct alchemy_pci_platdata mtx1_pci_pd = { .board_map_irq = mtx1_map_pci_irq, .board_pci_idsel = mtx1_pci_idsel, .pci_cfg_set = PCI_CONFIG_AEN | PCI_CONFIG_R2H | PCI_CONFIG_R1H | PCI_CONFIG_CH | #if defined(__MIPSEB__) PCI_CONFIG_SIC_HWA_DAT | PCI_CONFIG_SM, #else 0, #endif }; static struct platform_device mtx1_pci_host = { .dev.platform_data = &mtx1_pci_pd, .name = "alchemy-pci", .id = 0, .num_resources = ARRAY_SIZE(alchemy_pci_host_res), .resource = alchemy_pci_host_res, }; static struct platform_device *mtx1_devs[] __initdata = { &mtx1_pci_host, &mtx1_gpio_leds, &mtx1_wdt, &mtx1_button, &mtx1_mtd, }; static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = { .phy_search_highest_addr = 1, .phy1_search_mac0 = 1, }; static int __init mtx1_register_devices(void) { int rc; irq_set_irq_type(AU1500_GPIO204_INT, IRQ_TYPE_LEVEL_HIGH); irq_set_irq_type(AU1500_GPIO201_INT, IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(AU1500_GPIO202_INT, IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(AU1500_GPIO203_INT, IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(AU1500_GPIO205_INT, IRQ_TYPE_LEVEL_LOW); au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata); rc = gpio_request(mtx1_gpio_button[0].gpio, mtx1_gpio_button[0].desc); if (rc < 0) { printk(KERN_INFO "mtx1: failed to request %d\n", mtx1_gpio_button[0].gpio); goto out; } gpio_direction_input(mtx1_gpio_button[0].gpio); out: return platform_add_devices(mtx1_devs, ARRAY_SIZE(mtx1_devs)); } arch_initcall(mtx1_register_devices);
gpl-2.0
thoemy/android_kernel_htc_endeavoru
sound/soc/blackfin/bfin-eval-adau1701.c
571
3662
/* * Machine driver for EVAL-ADAU1701MINIZ on Analog Devices bfin * evaluation boards. * * Copyright 2011 Analog Devices Inc. * Author: Lars-Peter Clausen <lars@metafoo.de> * * Licensed under the GPL-2 or later. */ #include <linux/module.h> #include <linux/device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <sound/pcm_params.h> #include "../codecs/adau1701.h" static const struct snd_soc_dapm_widget bfin_eval_adau1701_dapm_widgets[] = { SND_SOC_DAPM_SPK("Speaker", NULL), SND_SOC_DAPM_LINE("Line Out", NULL), SND_SOC_DAPM_LINE("Line In", NULL), }; static const struct snd_soc_dapm_route bfin_eval_adau1701_dapm_routes[] = { { "Speaker", NULL, "OUT0" }, { "Speaker", NULL, "OUT1" }, { "Line Out", NULL, "OUT2" }, { "Line Out", NULL, "OUT3" }, { "IN0", NULL, "Line In" }, { "IN1", NULL, "Line In" }, }; static int bfin_eval_adau1701_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; struct snd_soc_dai *codec_dai = rtd->codec_dai; int ret; ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM); if (ret) return ret; ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM); if (ret) return ret; ret = snd_soc_dai_set_sysclk(codec_dai, ADAU1701_CLK_SRC_OSC, 12288000, SND_SOC_CLOCK_IN); return ret; } static struct snd_soc_ops bfin_eval_adau1701_ops = { .hw_params = bfin_eval_adau1701_hw_params, }; static struct snd_soc_dai_link bfin_eval_adau1701_dai[] = { { .name = "adau1701", .stream_name = "adau1701", .cpu_dai_name = "bfin-i2s.0", .codec_dai_name = "adau1701", .platform_name = "bfin-i2s-pcm-audio", .codec_name = "adau1701.0-0034", .ops = &bfin_eval_adau1701_ops, }, { .name = "adau1701", .stream_name = "adau1701", .cpu_dai_name = "bfin-i2s.1", .codec_dai_name = "adau1701", .platform_name = "bfin-i2s-pcm-audio", .codec_name = "adau1701.0-0034", .ops = &bfin_eval_adau1701_ops, }, }; static struct snd_soc_card bfin_eval_adau1701 = { .name = "bfin-eval-adau1701", .dai_link = &bfin_eval_adau1701_dai[CONFIG_SND_BF5XX_SPORT_NUM], .num_links = 1, .dapm_widgets = bfin_eval_adau1701_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(bfin_eval_adau1701_dapm_widgets), .dapm_routes = bfin_eval_adau1701_dapm_routes, .num_dapm_routes = ARRAY_SIZE(bfin_eval_adau1701_dapm_routes), }; static int bfin_eval_adau1701_probe(struct platform_device *pdev) { struct snd_soc_card *card = &bfin_eval_adau1701; card->dev = &pdev->dev; return snd_soc_register_card(&bfin_eval_adau1701); } static int __devexit bfin_eval_adau1701_remove(struct platform_device *pdev) { struct snd_soc_card *card = platform_get_drvdata(pdev); snd_soc_unregister_card(card); return 0; } static struct platform_driver bfin_eval_adau1701_driver = { .driver = { .name = "bfin-eval-adau1701", .owner = THIS_MODULE, .pm = &snd_soc_pm_ops, }, .probe = bfin_eval_adau1701_probe, .remove = __devexit_p(bfin_eval_adau1701_remove), }; static int __init bfin_eval_adau1701_init(void) { return platform_driver_register(&bfin_eval_adau1701_driver); } module_init(bfin_eval_adau1701_init); static void __exit bfin_eval_adau1701_exit(void) { platform_driver_unregister(&bfin_eval_adau1701_driver); } module_exit(bfin_eval_adau1701_exit); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_DESCRIPTION("ALSA SoC bfin ADAU1701 driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:bfin-eval-adau1701");
gpl-2.0
LuckJava/KVMGT-kernel
fs/stat.c
1083
12240
/* * linux/fs/stat.c * * Copyright (C) 1991, 1992 Linus Torvalds */ #include <linux/export.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/file.h> #include <linux/highuid.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/pagemap.h> #include <asm/uaccess.h> #include <asm/unistd.h> void generic_fillattr(struct inode *inode, struct kstat *stat) { stat->dev = inode->i_sb->s_dev; stat->ino = inode->i_ino; stat->mode = inode->i_mode; stat->nlink = inode->i_nlink; stat->uid = inode->i_uid; stat->gid = inode->i_gid; stat->rdev = inode->i_rdev; stat->size = i_size_read(inode); stat->atime = inode->i_atime; stat->mtime = inode->i_mtime; stat->ctime = inode->i_ctime; stat->blksize = (1 << inode->i_blkbits); stat->blocks = inode->i_blocks; } EXPORT_SYMBOL(generic_fillattr); /** * vfs_getattr_nosec - getattr without security checks * @path: file to get attributes from * @stat: structure to return attributes in * * Get attributes without calling security_inode_getattr. * * Currently the only caller other than vfs_getattr is internal to the * filehandle lookup code, which uses only the inode number and returns * no attributes to any user. Any other code probably wants * vfs_getattr. */ int vfs_getattr_nosec(struct path *path, struct kstat *stat) { struct inode *inode = path->dentry->d_inode; if (inode->i_op->getattr) return inode->i_op->getattr(path->mnt, path->dentry, stat); generic_fillattr(inode, stat); return 0; } EXPORT_SYMBOL(vfs_getattr_nosec); int vfs_getattr(struct path *path, struct kstat *stat) { int retval; retval = security_inode_getattr(path->mnt, path->dentry); if (retval) return retval; return vfs_getattr_nosec(path, stat); } EXPORT_SYMBOL(vfs_getattr); int vfs_fstat(unsigned int fd, struct kstat *stat) { struct fd f = fdget_raw(fd); int error = -EBADF; if (f.file) { error = vfs_getattr(&f.file->f_path, stat); fdput(f); } return error; } EXPORT_SYMBOL(vfs_fstat); int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat, int flag) { struct path path; int error = -EINVAL; unsigned int lookup_flags = 0; if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | AT_EMPTY_PATH)) != 0) goto out; if (!(flag & AT_SYMLINK_NOFOLLOW)) lookup_flags |= LOOKUP_FOLLOW; if (flag & AT_EMPTY_PATH) lookup_flags |= LOOKUP_EMPTY; retry: error = user_path_at(dfd, filename, lookup_flags, &path); if (error) goto out; error = vfs_getattr(&path, stat); path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } out: return error; } EXPORT_SYMBOL(vfs_fstatat); int vfs_stat(const char __user *name, struct kstat *stat) { return vfs_fstatat(AT_FDCWD, name, stat, 0); } EXPORT_SYMBOL(vfs_stat); int vfs_lstat(const char __user *name, struct kstat *stat) { return vfs_fstatat(AT_FDCWD, name, stat, AT_SYMLINK_NOFOLLOW); } EXPORT_SYMBOL(vfs_lstat); #ifdef __ARCH_WANT_OLD_STAT /* * For backward compatibility? Maybe this should be moved * into arch/i386 instead? */ static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf) { static int warncount = 5; struct __old_kernel_stat tmp; if (warncount > 0) { warncount--; printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n", current->comm); } else if (warncount < 0) { /* it's laughable, but... */ warncount = 0; } memset(&tmp, 0, sizeof(struct __old_kernel_stat)); tmp.st_dev = old_encode_dev(stat->dev); tmp.st_ino = stat->ino; if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) return -EOVERFLOW; tmp.st_mode = stat->mode; tmp.st_nlink = stat->nlink; if (tmp.st_nlink != stat->nlink) return -EOVERFLOW; SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); tmp.st_rdev = old_encode_dev(stat->rdev); #if BITS_PER_LONG == 32 if (stat->size > MAX_NON_LFS) return -EOVERFLOW; #endif tmp.st_size = stat->size; tmp.st_atime = stat->atime.tv_sec; tmp.st_mtime = stat->mtime.tv_sec; tmp.st_ctime = stat->ctime.tv_sec; return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; } SYSCALL_DEFINE2(stat, const char __user *, filename, struct __old_kernel_stat __user *, statbuf) { struct kstat stat; int error; error = vfs_stat(filename, &stat); if (error) return error; return cp_old_stat(&stat, statbuf); } SYSCALL_DEFINE2(lstat, const char __user *, filename, struct __old_kernel_stat __user *, statbuf) { struct kstat stat; int error; error = vfs_lstat(filename, &stat); if (error) return error; return cp_old_stat(&stat, statbuf); } SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf) { struct kstat stat; int error = vfs_fstat(fd, &stat); if (!error) error = cp_old_stat(&stat, statbuf); return error; } #endif /* __ARCH_WANT_OLD_STAT */ #if BITS_PER_LONG == 32 # define choose_32_64(a,b) a #else # define choose_32_64(a,b) b #endif #define valid_dev(x) choose_32_64(old_valid_dev,new_valid_dev)(x) #define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x) #ifndef INIT_STRUCT_STAT_PADDING # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st)) #endif static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf) { struct stat tmp; if (!valid_dev(stat->dev) || !valid_dev(stat->rdev)) return -EOVERFLOW; #if BITS_PER_LONG == 32 if (stat->size > MAX_NON_LFS) return -EOVERFLOW; #endif INIT_STRUCT_STAT_PADDING(tmp); tmp.st_dev = encode_dev(stat->dev); tmp.st_ino = stat->ino; if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) return -EOVERFLOW; tmp.st_mode = stat->mode; tmp.st_nlink = stat->nlink; if (tmp.st_nlink != stat->nlink) return -EOVERFLOW; SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); tmp.st_rdev = encode_dev(stat->rdev); tmp.st_size = stat->size; tmp.st_atime = stat->atime.tv_sec; tmp.st_mtime = stat->mtime.tv_sec; tmp.st_ctime = stat->ctime.tv_sec; #ifdef STAT_HAVE_NSEC tmp.st_atime_nsec = stat->atime.tv_nsec; tmp.st_mtime_nsec = stat->mtime.tv_nsec; tmp.st_ctime_nsec = stat->ctime.tv_nsec; #endif tmp.st_blocks = stat->blocks; tmp.st_blksize = stat->blksize; return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; } SYSCALL_DEFINE2(newstat, const char __user *, filename, struct stat __user *, statbuf) { struct kstat stat; int error = vfs_stat(filename, &stat); if (error) return error; return cp_new_stat(&stat, statbuf); } SYSCALL_DEFINE2(newlstat, const char __user *, filename, struct stat __user *, statbuf) { struct kstat stat; int error; error = vfs_lstat(filename, &stat); if (error) return error; return cp_new_stat(&stat, statbuf); } #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT) SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename, struct stat __user *, statbuf, int, flag) { struct kstat stat; int error; error = vfs_fstatat(dfd, filename, &stat, flag); if (error) return error; return cp_new_stat(&stat, statbuf); } #endif SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf) { struct kstat stat; int error = vfs_fstat(fd, &stat); if (!error) error = cp_new_stat(&stat, statbuf); return error; } SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname, char __user *, buf, int, bufsiz) { struct path path; int error; int empty = 0; unsigned int lookup_flags = LOOKUP_EMPTY; if (bufsiz <= 0) return -EINVAL; retry: error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty); if (!error) { struct inode *inode = path.dentry->d_inode; error = empty ? -ENOENT : -EINVAL; if (inode->i_op->readlink) { error = security_inode_readlink(path.dentry); if (!error) { touch_atime(&path); error = inode->i_op->readlink(path.dentry, buf, bufsiz); } } path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } } return error; } SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf, int, bufsiz) { return sys_readlinkat(AT_FDCWD, path, buf, bufsiz); } /* ---------- LFS-64 ----------- */ #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64) #ifndef INIT_STRUCT_STAT64_PADDING # define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st)) #endif static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf) { struct stat64 tmp; INIT_STRUCT_STAT64_PADDING(tmp); #ifdef CONFIG_MIPS /* mips has weird padding, so we don't get 64 bits there */ if (!new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev)) return -EOVERFLOW; tmp.st_dev = new_encode_dev(stat->dev); tmp.st_rdev = new_encode_dev(stat->rdev); #else tmp.st_dev = huge_encode_dev(stat->dev); tmp.st_rdev = huge_encode_dev(stat->rdev); #endif tmp.st_ino = stat->ino; if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) return -EOVERFLOW; #ifdef STAT64_HAS_BROKEN_ST_INO tmp.__st_ino = stat->ino; #endif tmp.st_mode = stat->mode; tmp.st_nlink = stat->nlink; tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid); tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid); tmp.st_atime = stat->atime.tv_sec; tmp.st_atime_nsec = stat->atime.tv_nsec; tmp.st_mtime = stat->mtime.tv_sec; tmp.st_mtime_nsec = stat->mtime.tv_nsec; tmp.st_ctime = stat->ctime.tv_sec; tmp.st_ctime_nsec = stat->ctime.tv_nsec; tmp.st_size = stat->size; tmp.st_blocks = stat->blocks; tmp.st_blksize = stat->blksize; return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; } SYSCALL_DEFINE2(stat64, const char __user *, filename, struct stat64 __user *, statbuf) { struct kstat stat; int error = vfs_stat(filename, &stat); if (!error) error = cp_new_stat64(&stat, statbuf); return error; } SYSCALL_DEFINE2(lstat64, const char __user *, filename, struct stat64 __user *, statbuf) { struct kstat stat; int error = vfs_lstat(filename, &stat); if (!error) error = cp_new_stat64(&stat, statbuf); return error; } SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf) { struct kstat stat; int error = vfs_fstat(fd, &stat); if (!error) error = cp_new_stat64(&stat, statbuf); return error; } SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename, struct stat64 __user *, statbuf, int, flag) { struct kstat stat; int error; error = vfs_fstatat(dfd, filename, &stat, flag); if (error) return error; return cp_new_stat64(&stat, statbuf); } #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */ /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ void __inode_add_bytes(struct inode *inode, loff_t bytes) { inode->i_blocks += bytes >> 9; bytes &= 511; inode->i_bytes += bytes; if (inode->i_bytes >= 512) { inode->i_blocks++; inode->i_bytes -= 512; } } void inode_add_bytes(struct inode *inode, loff_t bytes) { spin_lock(&inode->i_lock); __inode_add_bytes(inode, bytes); spin_unlock(&inode->i_lock); } EXPORT_SYMBOL(inode_add_bytes); void __inode_sub_bytes(struct inode *inode, loff_t bytes) { inode->i_blocks -= bytes >> 9; bytes &= 511; if (inode->i_bytes < bytes) { inode->i_blocks--; inode->i_bytes += 512; } inode->i_bytes -= bytes; } EXPORT_SYMBOL(__inode_sub_bytes); void inode_sub_bytes(struct inode *inode, loff_t bytes) { spin_lock(&inode->i_lock); __inode_sub_bytes(inode, bytes); spin_unlock(&inode->i_lock); } EXPORT_SYMBOL(inode_sub_bytes); loff_t inode_get_bytes(struct inode *inode) { loff_t ret; spin_lock(&inode->i_lock); ret = (((loff_t)inode->i_blocks) << 9) + inode->i_bytes; spin_unlock(&inode->i_lock); return ret; } EXPORT_SYMBOL(inode_get_bytes); void inode_set_bytes(struct inode *inode, loff_t bytes) { /* Caller is here responsible for sufficient locking * (ie. inode->i_lock) */ inode->i_blocks = bytes >> 9; inode->i_bytes = bytes & 511; } EXPORT_SYMBOL(inode_set_bytes);
gpl-2.0
abanerj/linux-mac
tools/usb/usbip/src/usbip_detach.c
1339
2292
/* * Copyright (C) 2011 matt mooney <mfm@muteddisk.com> * 2005-2007 Takahiro Hirofuchi * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <ctype.h> #include <limits.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <getopt.h> #include <unistd.h> #include "vhci_driver.h" #include "usbip_common.h" #include "usbip_network.h" #include "usbip.h" static const char usbip_detach_usage_string[] = "usbip detach <args>\n" " -p, --port=<port> " USBIP_VHCI_DRV_NAME " port the device is on\n"; void usbip_detach_usage(void) { printf("usage: %s", usbip_detach_usage_string); } static int detach_port(char *port) { int ret; uint8_t portnum; char path[PATH_MAX+1]; for (unsigned int i = 0; i < strlen(port); i++) if (!isdigit(port[i])) { err("invalid port %s", port); return -1; } /* check max port */ portnum = atoi(port); /* remove the port state file */ snprintf(path, PATH_MAX, VHCI_STATE_PATH"/port%d", portnum); remove(path); rmdir(VHCI_STATE_PATH); ret = usbip_vhci_driver_open(); if (ret < 0) { err("open vhci_driver"); return -1; } ret = usbip_vhci_detach_device(portnum); if (ret < 0) return -1; usbip_vhci_driver_close(); return ret; } int usbip_detach(int argc, char *argv[]) { static const struct option opts[] = { { "port", required_argument, NULL, 'p' }, { NULL, 0, NULL, 0 } }; int opt; int ret = -1; for (;;) { opt = getopt_long(argc, argv, "p:", opts, NULL); if (opt == -1) break; switch (opt) { case 'p': ret = detach_port(optarg); goto out; default: goto err_out; } } err_out: usbip_detach_usage(); out: return ret; }
gpl-2.0
Docker-J/Sail_STOCK
drivers/base/regmap/regcache-rbtree.c
1339
10892
/* * Register cache access API - rbtree caching support * * Copyright 2011 Wolfson Microelectronics plc * * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/slab.h> #include <linux/device.h> #include <linux/debugfs.h> #include <linux/rbtree.h> #include <linux/seq_file.h> #include "internal.h" static int regcache_rbtree_write(struct regmap *map, unsigned int reg, unsigned int value); static int regcache_rbtree_exit(struct regmap *map); struct regcache_rbtree_node { /* the actual rbtree node holding this block */ struct rb_node node; /* base register handled by this block */ unsigned int base_reg; /* block of adjacent registers */ void *block; /* number of registers available in the block */ unsigned int blklen; } __attribute__ ((packed)); struct regcache_rbtree_ctx { struct rb_root root; struct regcache_rbtree_node *cached_rbnode; }; static inline void regcache_rbtree_get_base_top_reg( struct regcache_rbtree_node *rbnode, unsigned int *base, unsigned int *top) { *base = rbnode->base_reg; *top = rbnode->base_reg + rbnode->blklen - 1; } static unsigned int regcache_rbtree_get_register( struct regcache_rbtree_node *rbnode, unsigned int idx, unsigned int word_size) { return regcache_get_val(rbnode->block, idx, word_size); } static void regcache_rbtree_set_register(struct regcache_rbtree_node *rbnode, unsigned int idx, unsigned int val, unsigned int word_size) { regcache_set_val(rbnode->block, idx, val, word_size); } static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map, unsigned int reg) { struct regcache_rbtree_ctx *rbtree_ctx = map->cache; struct rb_node *node; struct regcache_rbtree_node *rbnode; unsigned int base_reg, top_reg; rbnode = rbtree_ctx->cached_rbnode; if (rbnode) { regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg); if (reg >= base_reg && reg <= top_reg) return rbnode; } node = rbtree_ctx->root.rb_node; while (node) { rbnode = container_of(node, struct regcache_rbtree_node, node); regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg); if (reg >= base_reg && reg <= top_reg) { rbtree_ctx->cached_rbnode = rbnode; return rbnode; } else if (reg > top_reg) { node = node->rb_right; } else if (reg < base_reg) { node = node->rb_left; } } return NULL; } static int regcache_rbtree_insert(struct rb_root *root, struct regcache_rbtree_node *rbnode) { struct rb_node **new, *parent; struct regcache_rbtree_node *rbnode_tmp; unsigned int base_reg_tmp, top_reg_tmp; unsigned int base_reg; parent = NULL; new = &root->rb_node; while (*new) { rbnode_tmp = container_of(*new, struct regcache_rbtree_node, node); /* base and top registers of the current rbnode */ regcache_rbtree_get_base_top_reg(rbnode_tmp, &base_reg_tmp, &top_reg_tmp); /* base register of the rbnode to be added */ base_reg = rbnode->base_reg; parent = *new; /* if this register has already been inserted, just return */ if (base_reg >= base_reg_tmp && base_reg <= top_reg_tmp) return 0; else if (base_reg > top_reg_tmp) new = &((*new)->rb_right); else if (base_reg < base_reg_tmp) new = &((*new)->rb_left); } /* insert the node into the rbtree */ rb_link_node(&rbnode->node, parent, new); rb_insert_color(&rbnode->node, root); return 1; } #ifdef CONFIG_DEBUG_FS static int rbtree_show(struct seq_file *s, void *ignored) { struct regmap *map = s->private; struct regcache_rbtree_ctx *rbtree_ctx = map->cache; struct regcache_rbtree_node *n; struct rb_node *node; unsigned int base, top; int nodes = 0; int registers = 0; int average; mutex_lock(&map->lock); for (node = rb_first(&rbtree_ctx->root); node != NULL; node = rb_next(node)) { n = container_of(node, struct regcache_rbtree_node, node); regcache_rbtree_get_base_top_reg(n, &base, &top); seq_printf(s, "%x-%x (%d)\n", base, top, top - base + 1); nodes++; registers += top - base + 1; } if (nodes) average = registers / nodes; else average = 0; seq_printf(s, "%d nodes, %d registers, average %d registers\n", nodes, registers, average); mutex_unlock(&map->lock); return 0; } static int rbtree_open(struct inode *inode, struct file *file) { return single_open(file, rbtree_show, inode->i_private); } static const struct file_operations rbtree_fops = { .open = rbtree_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void rbtree_debugfs_init(struct regmap *map) { debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops); } #else static void rbtree_debugfs_init(struct regmap *map) { } #endif static int regcache_rbtree_init(struct regmap *map) { struct regcache_rbtree_ctx *rbtree_ctx; int i; int ret; map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL); if (!map->cache) return -ENOMEM; rbtree_ctx = map->cache; rbtree_ctx->root = RB_ROOT; rbtree_ctx->cached_rbnode = NULL; for (i = 0; i < map->num_reg_defaults; i++) { ret = regcache_rbtree_write(map, map->reg_defaults[i].reg, map->reg_defaults[i].def); if (ret) goto err; } rbtree_debugfs_init(map); return 0; err: regcache_rbtree_exit(map); return ret; } static int regcache_rbtree_exit(struct regmap *map) { struct rb_node *next; struct regcache_rbtree_ctx *rbtree_ctx; struct regcache_rbtree_node *rbtree_node; /* if we've already been called then just return */ rbtree_ctx = map->cache; if (!rbtree_ctx) return 0; /* free up the rbtree */ next = rb_first(&rbtree_ctx->root); while (next) { rbtree_node = rb_entry(next, struct regcache_rbtree_node, node); next = rb_next(&rbtree_node->node); rb_erase(&rbtree_node->node, &rbtree_ctx->root); kfree(rbtree_node->block); kfree(rbtree_node); } /* release the resources */ kfree(map->cache); map->cache = NULL; return 0; } static int regcache_rbtree_read(struct regmap *map, unsigned int reg, unsigned int *value) { struct regcache_rbtree_node *rbnode; unsigned int reg_tmp; rbnode = regcache_rbtree_lookup(map, reg); if (rbnode) { reg_tmp = reg - rbnode->base_reg; *value = regcache_rbtree_get_register(rbnode, reg_tmp, map->cache_word_size); } else { return -ENOENT; } return 0; } static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node *rbnode, unsigned int pos, unsigned int reg, unsigned int value, unsigned int word_size) { u8 *blk; blk = krealloc(rbnode->block, (rbnode->blklen + 1) * word_size, GFP_KERNEL); if (!blk) return -ENOMEM; /* insert the register value in the correct place in the rbnode block */ memmove(blk + (pos + 1) * word_size, blk + pos * word_size, (rbnode->blklen - pos) * word_size); /* update the rbnode block, its size and the base register */ rbnode->block = blk; rbnode->blklen++; if (!pos) rbnode->base_reg = reg; regcache_rbtree_set_register(rbnode, pos, value, word_size); return 0; } static int regcache_rbtree_write(struct regmap *map, unsigned int reg, unsigned int value) { struct regcache_rbtree_ctx *rbtree_ctx; struct regcache_rbtree_node *rbnode, *rbnode_tmp; struct rb_node *node; unsigned int val; unsigned int reg_tmp; unsigned int pos; int i; int ret; rbtree_ctx = map->cache; /* if we can't locate it in the cached rbnode we'll have * to traverse the rbtree looking for it. */ rbnode = regcache_rbtree_lookup(map, reg); if (rbnode) { reg_tmp = reg - rbnode->base_reg; val = regcache_rbtree_get_register(rbnode, reg_tmp, map->cache_word_size); if (val == value) return 0; regcache_rbtree_set_register(rbnode, reg_tmp, value, map->cache_word_size); } else { /* look for an adjacent register to the one we are about to add */ for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, node); for (i = 0; i < rbnode_tmp->blklen; i++) { reg_tmp = rbnode_tmp->base_reg + i; if (abs(reg_tmp - reg) != 1) continue; /* decide where in the block to place our register */ if (reg_tmp + 1 == reg) pos = i + 1; else pos = i; ret = regcache_rbtree_insert_to_block(rbnode_tmp, pos, reg, value, map->cache_word_size); if (ret) return ret; rbtree_ctx->cached_rbnode = rbnode_tmp; return 0; } } /* we did not manage to find a place to insert it in an existing * block so create a new rbnode with a single register in its block. * This block will get populated further if any other adjacent * registers get modified in the future. */ rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL); if (!rbnode) return -ENOMEM; rbnode->blklen = 1; rbnode->base_reg = reg; rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size, GFP_KERNEL); if (!rbnode->block) { kfree(rbnode); return -ENOMEM; } regcache_rbtree_set_register(rbnode, 0, value, map->cache_word_size); regcache_rbtree_insert(&rbtree_ctx->root, rbnode); rbtree_ctx->cached_rbnode = rbnode; } return 0; } static int regcache_rbtree_sync(struct regmap *map, unsigned int min, unsigned int max) { struct regcache_rbtree_ctx *rbtree_ctx; struct rb_node *node; struct regcache_rbtree_node *rbnode; unsigned int regtmp; unsigned int val; int ret; int i, base, end; rbtree_ctx = map->cache; for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { rbnode = rb_entry(node, struct regcache_rbtree_node, node); if (rbnode->base_reg < min) continue; if (rbnode->base_reg > max) break; if (rbnode->base_reg + rbnode->blklen < min) continue; if (min > rbnode->base_reg) base = min - rbnode->base_reg; else base = 0; if (max < rbnode->base_reg + rbnode->blklen) end = max - rbnode->base_reg + 1; else end = rbnode->blklen; for (i = base; i < end; i++) { regtmp = rbnode->base_reg + i; val = regcache_rbtree_get_register(rbnode, i, map->cache_word_size); /* Is this the hardware default? If so skip. */ ret = regcache_lookup_reg(map, regtmp); if (ret >= 0 && val == map->reg_defaults[ret].def) continue; map->cache_bypass = 1; ret = _regmap_write(map, regtmp, val); map->cache_bypass = 0; if (ret) return ret; dev_dbg(map->dev, "Synced register %#x, value %#x\n", regtmp, val); } } return 0; } struct regcache_ops regcache_rbtree_ops = { .type = REGCACHE_RBTREE, .name = "rbtree", .init = regcache_rbtree_init, .exit = regcache_rbtree_exit, .read = regcache_rbtree_read, .write = regcache_rbtree_write, .sync = regcache_rbtree_sync };
gpl-2.0
WaRP7/linux-fslc
drivers/power/da9052-battery.c
1339
15678
/* * Batttery Driver for Dialog DA9052 PMICs * * Copyright(c) 2011 Dialog Semiconductor Ltd. * * Author: David Dajun Chen <dchen@diasemi.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/delay.h> #include <linux/freezer.h> #include <linux/fs.h> #include <linux/jiffies.h> #include <linux/module.h> #include <linux/timer.h> #include <linux/uaccess.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/mfd/da9052/da9052.h> #include <linux/mfd/da9052/pdata.h> #include <linux/mfd/da9052/reg.h> /* STATIC CONFIGURATION */ #define DA9052_BAT_CUTOFF_VOLT 2800 #define DA9052_BAT_TSH 62000 #define DA9052_BAT_LOW_CAP 4 #define DA9052_AVG_SZ 4 #define DA9052_VC_TBL_SZ 68 #define DA9052_VC_TBL_REF_SZ 3 #define DA9052_ISET_USB_MASK 0x0F #define DA9052_CHG_USB_ILIM_MASK 0x40 #define DA9052_CHG_LIM_COLS 16 #define DA9052_MEAN(x, y) ((x + y) / 2) enum charger_type_enum { DA9052_NOCHARGER = 1, DA9052_CHARGER, }; static const u16 da9052_chg_current_lim[2][DA9052_CHG_LIM_COLS] = { {70, 80, 90, 100, 110, 120, 400, 450, 500, 550, 600, 650, 700, 900, 1100, 1300}, {80, 90, 100, 110, 120, 400, 450, 500, 550, 600, 800, 1000, 1200, 1400, 1600, 1800}, }; static const u16 vc_tbl_ref[3] = {10, 25, 40}; /* Lookup table for voltage vs capacity */ static u32 const vc_tbl[3][68][2] = { /* For temperature 10 degree Celsius */ { {4082, 100}, {4036, 98}, {4020, 96}, {4008, 95}, {3997, 93}, {3983, 91}, {3964, 90}, {3943, 88}, {3926, 87}, {3912, 85}, {3900, 84}, {3890, 82}, {3881, 80}, {3873, 79}, {3865, 77}, {3857, 76}, {3848, 74}, {3839, 73}, {3829, 71}, {3820, 70}, {3811, 68}, {3802, 67}, {3794, 65}, {3785, 64}, {3778, 62}, {3770, 61}, {3763, 59}, {3756, 58}, {3750, 56}, {3744, 55}, {3738, 53}, {3732, 52}, {3727, 50}, {3722, 49}, {3717, 47}, {3712, 46}, {3708, 44}, {3703, 43}, {3700, 41}, {3696, 40}, {3693, 38}, {3691, 37}, {3688, 35}, {3686, 34}, {3683, 32}, {3681, 31}, {3678, 29}, {3675, 28}, {3672, 26}, {3669, 25}, {3665, 23}, {3661, 22}, {3656, 21}, {3651, 19}, {3645, 18}, {3639, 16}, {3631, 15}, {3622, 13}, {3611, 12}, {3600, 10}, {3587, 9}, {3572, 7}, {3548, 6}, {3503, 5}, {3420, 3}, {3268, 2}, {2992, 1}, {2746, 0} }, /* For temperature 25 degree Celsius */ { {4102, 100}, {4065, 98}, {4048, 96}, {4034, 95}, {4021, 93}, {4011, 92}, {4001, 90}, {3986, 88}, {3968, 87}, {3952, 85}, {3938, 84}, {3926, 82}, {3916, 81}, {3908, 79}, {3900, 77}, {3892, 76}, {3883, 74}, {3874, 73}, {3864, 71}, {3855, 70}, {3846, 68}, {3836, 67}, {3827, 65}, {3819, 64}, {3810, 62}, {3801, 61}, {3793, 59}, {3786, 58}, {3778, 56}, {3772, 55}, {3765, 53}, {3759, 52}, {3754, 50}, {3748, 49}, {3743, 47}, {3738, 46}, {3733, 44}, {3728, 43}, {3724, 41}, {3720, 40}, {3716, 38}, {3712, 37}, {3709, 35}, {3706, 34}, {3703, 33}, {3701, 31}, {3698, 30}, {3696, 28}, {3693, 27}, {3690, 25}, {3687, 24}, {3683, 22}, {3680, 21}, {3675, 19}, {3671, 18}, {3666, 17}, {3660, 15}, {3654, 14}, {3647, 12}, {3639, 11}, {3630, 9}, {3621, 8}, {3613, 6}, {3606, 5}, {3597, 4}, {3582, 2}, {3546, 1}, {2747, 0} }, /* For temperature 40 degree Celsius */ { {4114, 100}, {4081, 98}, {4065, 96}, {4050, 95}, {4036, 93}, {4024, 92}, {4013, 90}, {4002, 88}, {3990, 87}, {3976, 85}, {3962, 84}, {3950, 82}, {3939, 81}, {3930, 79}, {3921, 77}, {3912, 76}, {3902, 74}, {3893, 73}, {3883, 71}, {3874, 70}, {3865, 68}, {3856, 67}, {3847, 65}, {3838, 64}, {3829, 62}, {3820, 61}, {3812, 59}, {3803, 58}, {3795, 56}, {3787, 55}, {3780, 53}, {3773, 52}, {3767, 50}, {3761, 49}, {3756, 47}, {3751, 46}, {3746, 44}, {3741, 43}, {3736, 41}, {3732, 40}, {3728, 38}, {3724, 37}, {3720, 35}, {3716, 34}, {3713, 33}, {3710, 31}, {3707, 30}, {3704, 28}, {3701, 27}, {3698, 25}, {3695, 24}, {3691, 22}, {3686, 21}, {3681, 19}, {3676, 18}, {3671, 17}, {3666, 15}, {3661, 14}, {3655, 12}, {3648, 11}, {3640, 9}, {3632, 8}, {3622, 6}, {3616, 5}, {3611, 4}, {3604, 2}, {3594, 1}, {2747, 0} } }; struct da9052_battery { struct da9052 *da9052; struct power_supply *psy; struct notifier_block nb; int charger_type; int status; int health; }; static inline int volt_reg_to_mV(int value) { return ((value * 1000) / 512) + 2500; } static inline int ichg_reg_to_mA(int value) { return (value * 3900) / 1000; } static int da9052_read_chgend_current(struct da9052_battery *bat, int *current_mA) { int ret; if (bat->status == POWER_SUPPLY_STATUS_DISCHARGING) return -EINVAL; ret = da9052_reg_read(bat->da9052, DA9052_ICHG_END_REG); if (ret < 0) return ret; *current_mA = ichg_reg_to_mA(ret & DA9052_ICHGEND_ICHGEND); return 0; } static int da9052_read_chg_current(struct da9052_battery *bat, int *current_mA) { int ret; if (bat->status == POWER_SUPPLY_STATUS_DISCHARGING) return -EINVAL; ret = da9052_reg_read(bat->da9052, DA9052_ICHG_AV_REG); if (ret < 0) return ret; *current_mA = ichg_reg_to_mA(ret & DA9052_ICHGAV_ICHGAV); return 0; } static int da9052_bat_check_status(struct da9052_battery *bat, int *status) { u8 v[2] = {0, 0}; u8 bat_status; u8 chg_end; int ret; int chg_current; int chg_end_current; bool dcinsel; bool dcindet; bool vbussel; bool vbusdet; bool dc; bool vbus; ret = da9052_group_read(bat->da9052, DA9052_STATUS_A_REG, 2, v); if (ret < 0) return ret; bat_status = v[0]; chg_end = v[1]; dcinsel = bat_status & DA9052_STATUSA_DCINSEL; dcindet = bat_status & DA9052_STATUSA_DCINDET; vbussel = bat_status & DA9052_STATUSA_VBUSSEL; vbusdet = bat_status & DA9052_STATUSA_VBUSDET; dc = dcinsel && dcindet; vbus = vbussel && vbusdet; /* Preference to WALL(DCIN) charger unit */ if (dc || vbus) { bat->charger_type = DA9052_CHARGER; /* If charging end flag is set and Charging current is greater * than charging end limit then battery is charging */ if ((chg_end & DA9052_STATUSB_CHGEND) != 0) { ret = da9052_read_chg_current(bat, &chg_current); if (ret < 0) return ret; ret = da9052_read_chgend_current(bat, &chg_end_current); if (ret < 0) return ret; if (chg_current >= chg_end_current) bat->status = POWER_SUPPLY_STATUS_CHARGING; else bat->status = POWER_SUPPLY_STATUS_NOT_CHARGING; } else { /* If Charging end flag is cleared then battery is * charging */ bat->status = POWER_SUPPLY_STATUS_CHARGING; } } else if (dcindet || vbusdet) { bat->charger_type = DA9052_CHARGER; bat->status = POWER_SUPPLY_STATUS_NOT_CHARGING; } else { bat->charger_type = DA9052_NOCHARGER; bat->status = POWER_SUPPLY_STATUS_DISCHARGING; } if (status != NULL) *status = bat->status; return 0; } static int da9052_bat_read_volt(struct da9052_battery *bat, int *volt_mV) { int volt; volt = da9052_adc_manual_read(bat->da9052, DA9052_ADC_MAN_MUXSEL_VBAT); if (volt < 0) return volt; *volt_mV = volt_reg_to_mV(volt); return 0; } static int da9052_bat_check_presence(struct da9052_battery *bat, int *illegal) { int bat_temp; bat_temp = da9052_adc_read_temp(bat->da9052); if (bat_temp < 0) return bat_temp; if (bat_temp > DA9052_BAT_TSH) *illegal = 1; else *illegal = 0; return 0; } static int da9052_bat_interpolate(int vbat_lower, int vbat_upper, int level_lower, int level_upper, int bat_voltage) { int tmp; tmp = ((level_upper - level_lower) * 1000) / (vbat_upper - vbat_lower); tmp = level_lower + (((bat_voltage - vbat_lower) * tmp) / 1000); return tmp; } static unsigned char da9052_determine_vc_tbl_index(unsigned char adc_temp) { int i; if (adc_temp <= vc_tbl_ref[0]) return 0; if (adc_temp > vc_tbl_ref[DA9052_VC_TBL_REF_SZ - 1]) return DA9052_VC_TBL_REF_SZ - 1; for (i = 0; i < DA9052_VC_TBL_REF_SZ - 1; i++) { if ((adc_temp > vc_tbl_ref[i]) && (adc_temp <= DA9052_MEAN(vc_tbl_ref[i], vc_tbl_ref[i + 1]))) return i; if ((adc_temp > DA9052_MEAN(vc_tbl_ref[i], vc_tbl_ref[i + 1])) && (adc_temp <= vc_tbl_ref[i])) return i + 1; } /* * For some reason authors of the driver didn't presume that we can * end up here. It might be OK, but might be not, no one knows for * sure. Go check your battery, is it on fire? */ WARN_ON(1); return 0; } static int da9052_bat_read_capacity(struct da9052_battery *bat, int *capacity) { int adc_temp; int bat_voltage; int vbat_lower; int vbat_upper; int level_upper; int level_lower; int ret; int flag; int i = 0; int j; ret = da9052_bat_read_volt(bat, &bat_voltage); if (ret < 0) return ret; adc_temp = da9052_adc_read_temp(bat->da9052); if (adc_temp < 0) return adc_temp; i = da9052_determine_vc_tbl_index(adc_temp); if (bat_voltage >= vc_tbl[i][0][0]) { *capacity = 100; return 0; } if (bat_voltage <= vc_tbl[i][DA9052_VC_TBL_SZ - 1][0]) { *capacity = 0; return 0; } flag = 0; for (j = 0; j < (DA9052_VC_TBL_SZ-1); j++) { if ((bat_voltage <= vc_tbl[i][j][0]) && (bat_voltage >= vc_tbl[i][j + 1][0])) { vbat_upper = vc_tbl[i][j][0]; vbat_lower = vc_tbl[i][j + 1][0]; level_upper = vc_tbl[i][j][1]; level_lower = vc_tbl[i][j + 1][1]; flag = 1; break; } } if (!flag) return -EIO; *capacity = da9052_bat_interpolate(vbat_lower, vbat_upper, level_lower, level_upper, bat_voltage); return 0; } static int da9052_bat_check_health(struct da9052_battery *bat, int *health) { int ret; int bat_illegal; int capacity; ret = da9052_bat_check_presence(bat, &bat_illegal); if (ret < 0) return ret; if (bat_illegal) { bat->health = POWER_SUPPLY_HEALTH_UNKNOWN; return 0; } if (bat->health != POWER_SUPPLY_HEALTH_OVERHEAT) { ret = da9052_bat_read_capacity(bat, &capacity); if (ret < 0) return ret; if (capacity < DA9052_BAT_LOW_CAP) bat->health = POWER_SUPPLY_HEALTH_DEAD; else bat->health = POWER_SUPPLY_HEALTH_GOOD; } *health = bat->health; return 0; } static irqreturn_t da9052_bat_irq(int irq, void *data) { struct da9052_battery *bat = data; int virq; virq = regmap_irq_get_virq(bat->da9052->irq_data, irq); irq -= virq; if (irq == DA9052_IRQ_CHGEND) bat->status = POWER_SUPPLY_STATUS_FULL; else da9052_bat_check_status(bat, NULL); if (irq == DA9052_IRQ_CHGEND || irq == DA9052_IRQ_DCIN || irq == DA9052_IRQ_VBUS || irq == DA9052_IRQ_TBAT) { power_supply_changed(bat->psy); } return IRQ_HANDLED; } static int da9052_USB_current_notifier(struct notifier_block *nb, unsigned long events, void *data) { u8 row; u8 col; int *current_mA = data; int ret; struct da9052_battery *bat = container_of(nb, struct da9052_battery, nb); if (bat->status == POWER_SUPPLY_STATUS_DISCHARGING) return -EPERM; ret = da9052_reg_read(bat->da9052, DA9052_CHGBUCK_REG); if (ret & DA9052_CHG_USB_ILIM_MASK) return -EPERM; if (bat->da9052->chip_id == DA9052) row = 0; else row = 1; if (*current_mA < da9052_chg_current_lim[row][0] || *current_mA > da9052_chg_current_lim[row][DA9052_CHG_LIM_COLS - 1]) return -EINVAL; for (col = 0; col <= DA9052_CHG_LIM_COLS - 1 ; col++) { if (*current_mA <= da9052_chg_current_lim[row][col]) break; } return da9052_reg_update(bat->da9052, DA9052_ISET_REG, DA9052_ISET_USB_MASK, col); } static int da9052_bat_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { int ret; int illegal; struct da9052_battery *bat = power_supply_get_drvdata(psy); ret = da9052_bat_check_presence(bat, &illegal); if (ret < 0) return ret; if (illegal && psp != POWER_SUPPLY_PROP_PRESENT) return -ENODEV; switch (psp) { case POWER_SUPPLY_PROP_STATUS: ret = da9052_bat_check_status(bat, &val->intval); break; case POWER_SUPPLY_PROP_ONLINE: val->intval = (bat->charger_type == DA9052_NOCHARGER) ? 0 : 1; break; case POWER_SUPPLY_PROP_PRESENT: ret = da9052_bat_check_presence(bat, &val->intval); break; case POWER_SUPPLY_PROP_HEALTH: ret = da9052_bat_check_health(bat, &val->intval); break; case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: val->intval = DA9052_BAT_CUTOFF_VOLT * 1000; break; case POWER_SUPPLY_PROP_VOLTAGE_AVG: ret = da9052_bat_read_volt(bat, &val->intval); break; case POWER_SUPPLY_PROP_CURRENT_AVG: ret = da9052_read_chg_current(bat, &val->intval); break; case POWER_SUPPLY_PROP_CAPACITY: ret = da9052_bat_read_capacity(bat, &val->intval); break; case POWER_SUPPLY_PROP_TEMP: val->intval = da9052_adc_read_temp(bat->da9052); ret = val->intval; break; case POWER_SUPPLY_PROP_TECHNOLOGY: val->intval = POWER_SUPPLY_TECHNOLOGY_LION; break; default: return -EINVAL; } return ret; } static enum power_supply_property da9052_bat_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_AVG, POWER_SUPPLY_PROP_CURRENT_AVG, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_TECHNOLOGY, }; static struct power_supply_desc psy_desc = { .name = "da9052-bat", .type = POWER_SUPPLY_TYPE_BATTERY, .properties = da9052_bat_props, .num_properties = ARRAY_SIZE(da9052_bat_props), .get_property = da9052_bat_get_property, }; static char *da9052_bat_irqs[] = { "BATT TEMP", "DCIN DET", "DCIN REM", "VBUS DET", "VBUS REM", "CHG END", }; static int da9052_bat_irq_bits[] = { DA9052_IRQ_TBAT, DA9052_IRQ_DCIN, DA9052_IRQ_DCINREM, DA9052_IRQ_VBUS, DA9052_IRQ_VBUSREM, DA9052_IRQ_CHGEND, }; static s32 da9052_bat_probe(struct platform_device *pdev) { struct da9052_pdata *pdata; struct da9052_battery *bat; struct power_supply_config psy_cfg = {}; int ret; int i; bat = devm_kzalloc(&pdev->dev, sizeof(struct da9052_battery), GFP_KERNEL); if (!bat) return -ENOMEM; psy_cfg.drv_data = bat; bat->da9052 = dev_get_drvdata(pdev->dev.parent); bat->charger_type = DA9052_NOCHARGER; bat->status = POWER_SUPPLY_STATUS_UNKNOWN; bat->health = POWER_SUPPLY_HEALTH_UNKNOWN; bat->nb.notifier_call = da9052_USB_current_notifier; pdata = bat->da9052->dev->platform_data; if (pdata != NULL && pdata->use_for_apm) psy_desc.use_for_apm = pdata->use_for_apm; else psy_desc.use_for_apm = 1; for (i = 0; i < ARRAY_SIZE(da9052_bat_irqs); i++) { ret = da9052_request_irq(bat->da9052, da9052_bat_irq_bits[i], da9052_bat_irqs[i], da9052_bat_irq, bat); if (ret != 0) { dev_err(bat->da9052->dev, "DA9052 failed to request %s IRQ: %d\n", da9052_bat_irqs[i], ret); goto err; } } bat->psy = power_supply_register(&pdev->dev, &psy_desc, &psy_cfg); if (IS_ERR(bat->psy)) { ret = PTR_ERR(bat->psy); goto err; } platform_set_drvdata(pdev, bat); return 0; err: while (--i >= 0) da9052_free_irq(bat->da9052, da9052_bat_irq_bits[i], bat); return ret; } static int da9052_bat_remove(struct platform_device *pdev) { int i; struct da9052_battery *bat = platform_get_drvdata(pdev); for (i = 0; i < ARRAY_SIZE(da9052_bat_irqs); i++) da9052_free_irq(bat->da9052, da9052_bat_irq_bits[i], bat); power_supply_unregister(bat->psy); return 0; } static struct platform_driver da9052_bat_driver = { .probe = da9052_bat_probe, .remove = da9052_bat_remove, .driver = { .name = "da9052-bat", }, }; module_platform_driver(da9052_bat_driver); MODULE_DESCRIPTION("DA9052 BAT Device Driver"); MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:da9052-bat");
gpl-2.0
omegamoon/rockchip-rk3188-generic
arch/x86/platform/efi/efi.c
1851
19042
/* * Common EFI (Extensible Firmware Interface) support functions * Based on Extensible Firmware Interface Specification version 1.0 * * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999-2002 Hewlett-Packard Co. * David Mosberger-Tang <davidm@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com> * Copyright (C) 2005-2008 Intel Co. * Fenghua Yu <fenghua.yu@intel.com> * Bibo Mao <bibo.mao@intel.com> * Chandramouli Narayanan <mouli@linux.intel.com> * Huang Ying <ying.huang@intel.com> * * Copied from efi_32.c to eliminate the duplicated code between EFI * 32/64 support code. --ying 2007-10-26 * * All EFI Runtime Services are not implemented yet as EFI only * supports physical mode addressing on SoftSDV. This is to be fixed * in a future version. --drummond 1999-07-20 * * Implemented EFI runtime services and virtual mode calls. --davidm * * Goutham Rao: <goutham.rao@intel.com> * Skip non-WB memory and ignore empty memory ranges. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/efi.h> #include <linux/bootmem.h> #include <linux/memblock.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/time.h> #include <linux/io.h> #include <linux/reboot.h> #include <linux/bcd.h> #include <asm/setup.h> #include <asm/efi.h> #include <asm/time.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/x86_init.h> #define EFI_DEBUG 1 #define PFX "EFI: " int efi_enabled; EXPORT_SYMBOL(efi_enabled); struct efi efi; EXPORT_SYMBOL(efi); struct efi_memory_map memmap; static struct efi efi_phys __initdata; static efi_system_table_t efi_systab __initdata; static int __init setup_noefi(char *arg) { efi_enabled = 0; return 0; } early_param("noefi", setup_noefi); int add_efi_memmap; EXPORT_SYMBOL(add_efi_memmap); static int __init setup_add_efi_memmap(char *arg) { add_efi_memmap = 1; return 0; } early_param("add_efi_memmap", setup_add_efi_memmap); static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) { return efi_call_virt2(get_time, tm, tc); } static efi_status_t virt_efi_set_time(efi_time_t *tm) { return efi_call_virt1(set_time, tm); } static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending, efi_time_t *tm) { return efi_call_virt3(get_wakeup_time, enabled, pending, tm); } static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm) { return efi_call_virt2(set_wakeup_time, enabled, tm); } static efi_status_t virt_efi_get_variable(efi_char16_t *name, efi_guid_t *vendor, u32 *attr, unsigned long *data_size, void *data) { return efi_call_virt5(get_variable, name, vendor, attr, data_size, data); } static efi_status_t virt_efi_get_next_variable(unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor) { return efi_call_virt3(get_next_variable, name_size, name, vendor); } static efi_status_t virt_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor, unsigned long attr, unsigned long data_size, void *data) { return efi_call_virt5(set_variable, name, vendor, attr, data_size, data); } static efi_status_t virt_efi_get_next_high_mono_count(u32 *count) { return efi_call_virt1(get_next_high_mono_count, count); } static void virt_efi_reset_system(int reset_type, efi_status_t status, unsigned long data_size, efi_char16_t *data) { efi_call_virt4(reset_system, reset_type, status, data_size, data); } static efi_status_t __init phys_efi_set_virtual_address_map( unsigned long memory_map_size, unsigned long descriptor_size, u32 descriptor_version, efi_memory_desc_t *virtual_map) { efi_status_t status; efi_call_phys_prelog(); status = efi_call_phys4(efi_phys.set_virtual_address_map, memory_map_size, descriptor_size, descriptor_version, virtual_map); efi_call_phys_epilog(); return status; } static efi_status_t __init phys_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) { efi_status_t status; efi_call_phys_prelog(); status = efi_call_phys2(efi_phys.get_time, tm, tc); efi_call_phys_epilog(); return status; } int efi_set_rtc_mmss(unsigned long nowtime) { int real_seconds, real_minutes; efi_status_t status; efi_time_t eft; efi_time_cap_t cap; status = efi.get_time(&eft, &cap); if (status != EFI_SUCCESS) { printk(KERN_ERR "Oops: efitime: can't read time!\n"); return -1; } real_seconds = nowtime % 60; real_minutes = nowtime / 60; if (((abs(real_minutes - eft.minute) + 15)/30) & 1) real_minutes += 30; real_minutes %= 60; eft.minute = real_minutes; eft.second = real_seconds; status = efi.set_time(&eft); if (status != EFI_SUCCESS) { printk(KERN_ERR "Oops: efitime: can't write time!\n"); return -1; } return 0; } unsigned long efi_get_time(void) { efi_status_t status; efi_time_t eft; efi_time_cap_t cap; status = efi.get_time(&eft, &cap); if (status != EFI_SUCCESS) printk(KERN_ERR "Oops: efitime: can't read time!\n"); return mktime(eft.year, eft.month, eft.day, eft.hour, eft.minute, eft.second); } /* * Tell the kernel about the EFI memory map. This might include * more than the max 128 entries that can fit in the e820 legacy * (zeropage) memory map. */ static void __init do_add_efi_memmap(void) { void *p; for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { efi_memory_desc_t *md = p; unsigned long long start = md->phys_addr; unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; int e820_type; switch (md->type) { case EFI_LOADER_CODE: case EFI_LOADER_DATA: case EFI_BOOT_SERVICES_CODE: case EFI_BOOT_SERVICES_DATA: case EFI_CONVENTIONAL_MEMORY: if (md->attribute & EFI_MEMORY_WB) e820_type = E820_RAM; else e820_type = E820_RESERVED; break; case EFI_ACPI_RECLAIM_MEMORY: e820_type = E820_ACPI; break; case EFI_ACPI_MEMORY_NVS: e820_type = E820_NVS; break; case EFI_UNUSABLE_MEMORY: e820_type = E820_UNUSABLE; break; default: /* * EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE */ e820_type = E820_RESERVED; break; } e820_add_region(start, size, e820_type); } sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); } void __init efi_memblock_x86_reserve_range(void) { unsigned long pmap; #ifdef CONFIG_X86_32 pmap = boot_params.efi_info.efi_memmap; #else pmap = (boot_params.efi_info.efi_memmap | ((__u64)boot_params.efi_info.efi_memmap_hi<<32)); #endif memmap.phys_map = (void *)pmap; memmap.nr_map = boot_params.efi_info.efi_memmap_size / boot_params.efi_info.efi_memdesc_size; memmap.desc_version = boot_params.efi_info.efi_memdesc_version; memmap.desc_size = boot_params.efi_info.efi_memdesc_size; memblock_x86_reserve_range(pmap, pmap + memmap.nr_map * memmap.desc_size, "EFI memmap"); } #if EFI_DEBUG static void __init print_efi_memmap(void) { efi_memory_desc_t *md; void *p; int i; for (p = memmap.map, i = 0; p < memmap.map_end; p += memmap.desc_size, i++) { md = p; printk(KERN_INFO PFX "mem%02u: type=%u, attr=0x%llx, " "range=[0x%016llx-0x%016llx) (%lluMB)\n", i, md->type, md->attribute, md->phys_addr, md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT), (md->num_pages >> (20 - EFI_PAGE_SHIFT))); } } #endif /* EFI_DEBUG */ void __init efi_reserve_boot_services(void) { void *p; for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { efi_memory_desc_t *md = p; u64 start = md->phys_addr; u64 size = md->num_pages << EFI_PAGE_SHIFT; if (md->type != EFI_BOOT_SERVICES_CODE && md->type != EFI_BOOT_SERVICES_DATA) continue; /* Only reserve where possible: * - Not within any already allocated areas * - Not over any memory area (really needed, if above?) * - Not within any part of the kernel * - Not the bios reserved area */ if ((start+size >= virt_to_phys(_text) && start <= virt_to_phys(_end)) || !e820_all_mapped(start, start+size, E820_RAM) || memblock_x86_check_reserved_size(&start, &size, 1<<EFI_PAGE_SHIFT)) { /* Could not reserve, skip it */ md->num_pages = 0; memblock_dbg(PFX "Could not reserve boot range " "[0x%010llx-0x%010llx]\n", start, start+size-1); } else memblock_x86_reserve_range(start, start+size, "EFI Boot"); } } static void __init efi_free_boot_services(void) { void *p; for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { efi_memory_desc_t *md = p; unsigned long long start = md->phys_addr; unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; if (md->type != EFI_BOOT_SERVICES_CODE && md->type != EFI_BOOT_SERVICES_DATA) continue; /* Could not reserve boot area */ if (!size) continue; free_bootmem_late(start, size); } } void __init efi_init(void) { efi_config_table_t *config_tables; efi_runtime_services_t *runtime; efi_char16_t *c16; char vendor[100] = "unknown"; int i = 0; void *tmp; #ifdef CONFIG_X86_32 efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab; #else efi_phys.systab = (efi_system_table_t *) (boot_params.efi_info.efi_systab | ((__u64)boot_params.efi_info.efi_systab_hi<<32)); #endif efi.systab = early_ioremap((unsigned long)efi_phys.systab, sizeof(efi_system_table_t)); if (efi.systab == NULL) printk(KERN_ERR "Couldn't map the EFI system table!\n"); memcpy(&efi_systab, efi.systab, sizeof(efi_system_table_t)); early_iounmap(efi.systab, sizeof(efi_system_table_t)); efi.systab = &efi_systab; /* * Verify the EFI Table */ if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) printk(KERN_ERR "EFI system table signature incorrect!\n"); if ((efi.systab->hdr.revision >> 16) == 0) printk(KERN_ERR "Warning: EFI system table version " "%d.%02d, expected 1.00 or greater!\n", efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff); /* * Show what we know for posterity */ c16 = tmp = early_ioremap(efi.systab->fw_vendor, 2); if (c16) { for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i) vendor[i] = *c16++; vendor[i] = '\0'; } else printk(KERN_ERR PFX "Could not map the firmware vendor!\n"); early_iounmap(tmp, 2); printk(KERN_INFO "EFI v%u.%.02u by %s\n", efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor); /* * Let's see what config tables the firmware passed to us. */ config_tables = early_ioremap( efi.systab->tables, efi.systab->nr_tables * sizeof(efi_config_table_t)); if (config_tables == NULL) printk(KERN_ERR "Could not map EFI Configuration Table!\n"); printk(KERN_INFO); for (i = 0; i < efi.systab->nr_tables; i++) { if (!efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID)) { efi.mps = config_tables[i].table; printk(" MPS=0x%lx ", config_tables[i].table); } else if (!efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID)) { efi.acpi20 = config_tables[i].table; printk(" ACPI 2.0=0x%lx ", config_tables[i].table); } else if (!efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID)) { efi.acpi = config_tables[i].table; printk(" ACPI=0x%lx ", config_tables[i].table); } else if (!efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID)) { efi.smbios = config_tables[i].table; printk(" SMBIOS=0x%lx ", config_tables[i].table); #ifdef CONFIG_X86_UV } else if (!efi_guidcmp(config_tables[i].guid, UV_SYSTEM_TABLE_GUID)) { efi.uv_systab = config_tables[i].table; printk(" UVsystab=0x%lx ", config_tables[i].table); #endif } else if (!efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID)) { efi.hcdp = config_tables[i].table; printk(" HCDP=0x%lx ", config_tables[i].table); } else if (!efi_guidcmp(config_tables[i].guid, UGA_IO_PROTOCOL_GUID)) { efi.uga = config_tables[i].table; printk(" UGA=0x%lx ", config_tables[i].table); } } printk("\n"); early_iounmap(config_tables, efi.systab->nr_tables * sizeof(efi_config_table_t)); /* * Check out the runtime services table. We need to map * the runtime services table so that we can grab the physical * address of several of the EFI runtime functions, needed to * set the firmware into virtual mode. */ runtime = early_ioremap((unsigned long)efi.systab->runtime, sizeof(efi_runtime_services_t)); if (runtime != NULL) { /* * We will only need *early* access to the following * two EFI runtime services before set_virtual_address_map * is invoked. */ efi_phys.get_time = (efi_get_time_t *)runtime->get_time; efi_phys.set_virtual_address_map = (efi_set_virtual_address_map_t *) runtime->set_virtual_address_map; /* * Make efi_get_time can be called before entering * virtual mode. */ efi.get_time = phys_efi_get_time; } else printk(KERN_ERR "Could not map the EFI runtime service " "table!\n"); early_iounmap(runtime, sizeof(efi_runtime_services_t)); /* Map the EFI memory map */ memmap.map = early_ioremap((unsigned long)memmap.phys_map, memmap.nr_map * memmap.desc_size); if (memmap.map == NULL) printk(KERN_ERR "Could not map the EFI memory map!\n"); memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size); if (memmap.desc_size != sizeof(efi_memory_desc_t)) printk(KERN_WARNING "Kernel-defined memdesc doesn't match the one from EFI!\n"); if (add_efi_memmap) do_add_efi_memmap(); #ifdef CONFIG_X86_32 x86_platform.get_wallclock = efi_get_time; x86_platform.set_wallclock = efi_set_rtc_mmss; #endif #if EFI_DEBUG print_efi_memmap(); #endif } void __init efi_set_executable(efi_memory_desc_t *md, bool executable) { u64 addr, npages; addr = md->virt_addr; npages = md->num_pages; memrange_efi_to_native(&addr, &npages); if (executable) set_memory_x(addr, npages); else set_memory_nx(addr, npages); } static void __init runtime_code_page_mkexec(void) { efi_memory_desc_t *md; void *p; /* Make EFI runtime service code area executable */ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { md = p; if (md->type != EFI_RUNTIME_SERVICES_CODE) continue; efi_set_executable(md, true); } } /* * This function will switch the EFI runtime services to virtual mode. * Essentially, look through the EFI memmap and map every region that * has the runtime attribute bit set in its memory descriptor and update * that memory descriptor with the virtual address obtained from ioremap(). * This enables the runtime services to be called without having to * thunk back into physical mode for every invocation. */ void __init efi_enter_virtual_mode(void) { efi_memory_desc_t *md, *prev_md = NULL; efi_status_t status; unsigned long size; u64 end, systab, addr, npages, end_pfn; void *p, *va, *new_memmap = NULL; int count = 0; efi.systab = NULL; /* Merge contiguous regions of the same type and attribute */ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { u64 prev_size; md = p; if (!prev_md) { prev_md = md; continue; } if (prev_md->type != md->type || prev_md->attribute != md->attribute) { prev_md = md; continue; } prev_size = prev_md->num_pages << EFI_PAGE_SHIFT; if (md->phys_addr == (prev_md->phys_addr + prev_size)) { prev_md->num_pages += md->num_pages; md->type = EFI_RESERVED_TYPE; md->attribute = 0; continue; } prev_md = md; } for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { md = p; if (!(md->attribute & EFI_MEMORY_RUNTIME) && md->type != EFI_BOOT_SERVICES_CODE && md->type != EFI_BOOT_SERVICES_DATA) continue; size = md->num_pages << EFI_PAGE_SHIFT; end = md->phys_addr + size; end_pfn = PFN_UP(end); if (end_pfn <= max_low_pfn_mapped || (end_pfn > (1UL << (32 - PAGE_SHIFT)) && end_pfn <= max_pfn_mapped)) va = __va(md->phys_addr); else va = efi_ioremap(md->phys_addr, size, md->type); md->virt_addr = (u64) (unsigned long) va; if (!va) { printk(KERN_ERR PFX "ioremap of 0x%llX failed!\n", (unsigned long long)md->phys_addr); continue; } if (!(md->attribute & EFI_MEMORY_WB)) { addr = md->virt_addr; npages = md->num_pages; memrange_efi_to_native(&addr, &npages); set_memory_uc(addr, npages); } systab = (u64) (unsigned long) efi_phys.systab; if (md->phys_addr <= systab && systab < end) { systab += md->virt_addr - md->phys_addr; efi.systab = (efi_system_table_t *) (unsigned long) systab; } new_memmap = krealloc(new_memmap, (count + 1) * memmap.desc_size, GFP_KERNEL); memcpy(new_memmap + (count * memmap.desc_size), md, memmap.desc_size); count++; } BUG_ON(!efi.systab); status = phys_efi_set_virtual_address_map( memmap.desc_size * count, memmap.desc_size, memmap.desc_version, (efi_memory_desc_t *)__pa(new_memmap)); if (status != EFI_SUCCESS) { printk(KERN_ALERT "Unable to switch EFI into virtual mode " "(status=%lx)!\n", status); panic("EFI call to SetVirtualAddressMap() failed!"); } /* * Thankfully, it does seem that no runtime services other than * SetVirtualAddressMap() will touch boot services code, so we can * get rid of it all at this point */ efi_free_boot_services(); /* * Now that EFI is in virtual mode, update the function * pointers in the runtime service table to the new virtual addresses. * * Call EFI services through wrapper functions. */ efi.get_time = virt_efi_get_time; efi.set_time = virt_efi_set_time; efi.get_wakeup_time = virt_efi_get_wakeup_time; efi.set_wakeup_time = virt_efi_set_wakeup_time; efi.get_variable = virt_efi_get_variable; efi.get_next_variable = virt_efi_get_next_variable; efi.set_variable = virt_efi_set_variable; efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count; efi.reset_system = virt_efi_reset_system; efi.set_virtual_address_map = NULL; if (__supported_pte_mask & _PAGE_NX) runtime_code_page_mkexec(); early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size); memmap.map = NULL; kfree(new_memmap); } /* * Convenience functions to obtain memory types and attributes */ u32 efi_mem_type(unsigned long phys_addr) { efi_memory_desc_t *md; void *p; for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { md = p; if ((md->phys_addr <= phys_addr) && (phys_addr < (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)))) return md->type; } return 0; } u64 efi_mem_attributes(unsigned long phys_addr) { efi_memory_desc_t *md; void *p; for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { md = p; if ((md->phys_addr <= phys_addr) && (phys_addr < (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)))) return md->attribute; } return 0; }
gpl-2.0
mfiels/lguest-suspend
arch/arm/mach-orion5x/edmini_v2-setup.c
2107
4987
/* * arch/arm/mach-orion5x/edmini_v2-setup.c * * LaCie Ethernet Disk mini V2 Setup * * Copyright (C) 2008 Christopher Moore <moore@free.fr> * Copyright (C) 2008 Albert Aribaud <albert.aribaud@free.fr> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ /* * TODO: add Orion USB device port init when kernel.org support is added. * TODO: add flash write support: see below. * TODO: add power-off support. * TODO: add I2C EEPROM support. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/pci.h> #include <linux/irq.h> #include <linux/mtd/physmap.h> #include <linux/mv643xx_eth.h> #include <linux/leds.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <linux/i2c.h> #include <linux/ata_platform.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/pci.h> #include <mach/orion5x.h> #include "common.h" #include "mpp.h" /***************************************************************************** * EDMINI_V2 Info ****************************************************************************/ /* * 512KB NOR flash Device bus boot chip select */ #define EDMINI_V2_NOR_BOOT_BASE 0xfff80000 #define EDMINI_V2_NOR_BOOT_SIZE SZ_512K /***************************************************************************** * 512KB NOR Flash on BOOT Device ****************************************************************************/ /* * Currently the MTD code does not recognize the MX29LV400CBCT as a bottom * -type device. This could cause risks of accidentally erasing critical * flash sectors. We thus define a single, write-protected partition covering * the whole flash. * TODO: once the flash part TOP/BOTTOM detection issue is sorted out in the MTD * code, break this into at least three partitions: 'u-boot code', 'u-boot * environment' and 'whatever is left'. */ static struct mtd_partition edmini_v2_partitions[] = { { .name = "Full512kb", .size = 0x00080000, .offset = 0x00000000, .mask_flags = MTD_WRITEABLE, }, }; static struct physmap_flash_data edmini_v2_nor_flash_data = { .width = 1, .parts = edmini_v2_partitions, .nr_parts = ARRAY_SIZE(edmini_v2_partitions), }; static struct resource edmini_v2_nor_flash_resource = { .flags = IORESOURCE_MEM, .start = EDMINI_V2_NOR_BOOT_BASE, .end = EDMINI_V2_NOR_BOOT_BASE + EDMINI_V2_NOR_BOOT_SIZE - 1, }; static struct platform_device edmini_v2_nor_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &edmini_v2_nor_flash_data, }, .num_resources = 1, .resource = &edmini_v2_nor_flash_resource, }; /***************************************************************************** * Ethernet ****************************************************************************/ static struct mv643xx_eth_platform_data edmini_v2_eth_data = { .phy_addr = 8, }; /***************************************************************************** * RTC 5C372a on I2C bus ****************************************************************************/ #define EDMINIV2_RTC_GPIO 3 static struct i2c_board_info __initdata edmini_v2_i2c_rtc = { I2C_BOARD_INFO("rs5c372a", 0x32), .irq = 0, }; /***************************************************************************** * General Setup ****************************************************************************/ static unsigned int edminiv2_mpp_modes[] __initdata = { MPP0_UNUSED, MPP1_UNUSED, MPP2_UNUSED, MPP3_GPIO, /* RTC interrupt */ MPP4_UNUSED, MPP5_UNUSED, MPP6_UNUSED, MPP7_UNUSED, MPP8_UNUSED, MPP9_UNUSED, MPP10_UNUSED, MPP11_UNUSED, MPP12_SATA_LED, /* SATA 0 presence */ MPP13_SATA_LED, /* SATA 1 presence */ MPP14_SATA_LED, /* SATA 0 active */ MPP15_SATA_LED, /* SATA 1 active */ /* 16: Power LED control (0 = On, 1 = Off) */ MPP16_GPIO, /* 17: Power LED control select (0 = CPLD, 1 = GPIO16) */ MPP17_GPIO, /* 18: Power button status (0 = Released, 1 = Pressed) */ MPP18_GPIO, MPP19_UNUSED, 0, }; void __init edmini_v2_init(void) { orion5x_mpp_conf(edminiv2_mpp_modes); /* * Configure peripherals. */ orion5x_ehci0_init(); orion5x_eth_init(&edmini_v2_eth_data); mvebu_mbus_add_window("devbus-boot", EDMINI_V2_NOR_BOOT_BASE, EDMINI_V2_NOR_BOOT_SIZE); platform_device_register(&edmini_v2_nor_flash); pr_notice("edmini_v2: USB device port, flash write and power-off " "are not yet supported.\n"); /* Get RTC IRQ and register the chip */ if (gpio_request(EDMINIV2_RTC_GPIO, "rtc") == 0) { if (gpio_direction_input(EDMINIV2_RTC_GPIO) == 0) edmini_v2_i2c_rtc.irq = gpio_to_irq(EDMINIV2_RTC_GPIO); else gpio_free(EDMINIV2_RTC_GPIO); } if (edmini_v2_i2c_rtc.irq == 0) pr_warning("edmini_v2: failed to get RTC IRQ\n"); i2c_register_board_info(0, &edmini_v2_i2c_rtc, 1); }
gpl-2.0
KylinUI/android_kernel_samsung_t1
drivers/gpu/drm/radeon/radeon_cs.c
2363
9949
/* * Copyright 2008 Jerome Glisse. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Jerome Glisse <glisse@freedesktop.org> */ #include "drmP.h" #include "radeon_drm.h" #include "radeon_reg.h" #include "radeon.h" void r100_cs_dump_packet(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt); int radeon_cs_parser_relocs(struct radeon_cs_parser *p) { struct drm_device *ddev = p->rdev->ddev; struct radeon_cs_chunk *chunk; unsigned i, j; bool duplicate; if (p->chunk_relocs_idx == -1) { return 0; } chunk = &p->chunks[p->chunk_relocs_idx]; /* FIXME: we assume that each relocs use 4 dwords */ p->nrelocs = chunk->length_dw / 4; p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL); if (p->relocs_ptr == NULL) { return -ENOMEM; } p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL); if (p->relocs == NULL) { return -ENOMEM; } for (i = 0; i < p->nrelocs; i++) { struct drm_radeon_cs_reloc *r; duplicate = false; r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; for (j = 0; j < p->nrelocs; j++) { if (r->handle == p->relocs[j].handle) { p->relocs_ptr[i] = &p->relocs[j]; duplicate = true; break; } } if (!duplicate) { p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp, r->handle); if (p->relocs[i].gobj == NULL) { DRM_ERROR("gem object lookup failed 0x%x\n", r->handle); return -ENOENT; } p->relocs_ptr[i] = &p->relocs[i]; p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj); p->relocs[i].lobj.bo = p->relocs[i].robj; p->relocs[i].lobj.wdomain = r->write_domain; p->relocs[i].lobj.rdomain = r->read_domains; p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo; p->relocs[i].handle = r->handle; p->relocs[i].flags = r->flags; radeon_bo_list_add_object(&p->relocs[i].lobj, &p->validated); } } return radeon_bo_list_validate(&p->validated); } int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) { struct drm_radeon_cs *cs = data; uint64_t *chunk_array_ptr; unsigned size, i; if (!cs->num_chunks) { return 0; } /* get chunks */ INIT_LIST_HEAD(&p->validated); p->idx = 0; p->chunk_ib_idx = -1; p->chunk_relocs_idx = -1; p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL); if (p->chunks_array == NULL) { return -ENOMEM; } chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr, sizeof(uint64_t)*cs->num_chunks)) { return -EFAULT; } p->nchunks = cs->num_chunks; p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL); if (p->chunks == NULL) { return -ENOMEM; } for (i = 0; i < p->nchunks; i++) { struct drm_radeon_cs_chunk __user **chunk_ptr = NULL; struct drm_radeon_cs_chunk user_chunk; uint32_t __user *cdata; chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i]; if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr, sizeof(struct drm_radeon_cs_chunk))) { return -EFAULT; } p->chunks[i].length_dw = user_chunk.length_dw; p->chunks[i].kdata = NULL; p->chunks[i].chunk_id = user_chunk.chunk_id; if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) { p->chunk_relocs_idx = i; } if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { p->chunk_ib_idx = i; /* zero length IB isn't useful */ if (p->chunks[i].length_dw == 0) return -EINVAL; } p->chunks[i].length_dw = user_chunk.length_dw; p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data; cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; if (p->chunks[i].chunk_id != RADEON_CHUNK_ID_IB) { size = p->chunks[i].length_dw * sizeof(uint32_t); p->chunks[i].kdata = kmalloc(size, GFP_KERNEL); if (p->chunks[i].kdata == NULL) { return -ENOMEM; } if (DRM_COPY_FROM_USER(p->chunks[i].kdata, p->chunks[i].user_ptr, size)) { return -EFAULT; } } else { p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL); if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) { kfree(p->chunks[i].kpage[0]); kfree(p->chunks[i].kpage[1]); return -ENOMEM; } p->chunks[i].kpage_idx[0] = -1; p->chunks[i].kpage_idx[1] = -1; p->chunks[i].last_copied_page = -1; p->chunks[i].last_page_index = ((p->chunks[i].length_dw * 4) - 1) / PAGE_SIZE; } } if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) { DRM_ERROR("cs IB too big: %d\n", p->chunks[p->chunk_ib_idx].length_dw); return -EINVAL; } return 0; } /** * cs_parser_fini() - clean parser states * @parser: parser structure holding parsing context. * @error: error number * * If error is set than unvalidate buffer, otherwise just free memory * used by parsing context. **/ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) { unsigned i; if (!error && parser->ib) ttm_eu_fence_buffer_objects(&parser->validated, parser->ib->fence); else ttm_eu_backoff_reservation(&parser->validated); if (parser->relocs != NULL) { for (i = 0; i < parser->nrelocs; i++) { if (parser->relocs[i].gobj) drm_gem_object_unreference_unlocked(parser->relocs[i].gobj); } } kfree(parser->track); kfree(parser->relocs); kfree(parser->relocs_ptr); for (i = 0; i < parser->nchunks; i++) { kfree(parser->chunks[i].kdata); kfree(parser->chunks[i].kpage[0]); kfree(parser->chunks[i].kpage[1]); } kfree(parser->chunks); kfree(parser->chunks_array); radeon_ib_free(parser->rdev, &parser->ib); } int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct radeon_device *rdev = dev->dev_private; struct radeon_cs_parser parser; struct radeon_cs_chunk *ib_chunk; int r; mutex_lock(&rdev->cs_mutex); /* initialize parser */ memset(&parser, 0, sizeof(struct radeon_cs_parser)); parser.filp = filp; parser.rdev = rdev; parser.dev = rdev->dev; parser.family = rdev->family; r = radeon_cs_parser_init(&parser, data); if (r) { DRM_ERROR("Failed to initialize parser !\n"); radeon_cs_parser_fini(&parser, r); mutex_unlock(&rdev->cs_mutex); return r; } r = radeon_ib_get(rdev, &parser.ib); if (r) { DRM_ERROR("Failed to get ib !\n"); radeon_cs_parser_fini(&parser, r); mutex_unlock(&rdev->cs_mutex); return r; } r = radeon_cs_parser_relocs(&parser); if (r) { if (r != -ERESTARTSYS) DRM_ERROR("Failed to parse relocation %d!\n", r); radeon_cs_parser_fini(&parser, r); mutex_unlock(&rdev->cs_mutex); return r; } /* Copy the packet into the IB, the parser will read from the * input memory (cached) and write to the IB (which can be * uncached). */ ib_chunk = &parser.chunks[parser.chunk_ib_idx]; parser.ib->length_dw = ib_chunk->length_dw; r = radeon_cs_parse(&parser); if (r || parser.parser_error) { DRM_ERROR("Invalid command stream !\n"); radeon_cs_parser_fini(&parser, r); mutex_unlock(&rdev->cs_mutex); return r; } r = radeon_cs_finish_pages(&parser); if (r) { DRM_ERROR("Invalid command stream !\n"); radeon_cs_parser_fini(&parser, r); mutex_unlock(&rdev->cs_mutex); return r; } r = radeon_ib_schedule(rdev, parser.ib); if (r) { DRM_ERROR("Failed to schedule IB !\n"); } radeon_cs_parser_fini(&parser, r); mutex_unlock(&rdev->cs_mutex); return r; } int radeon_cs_finish_pages(struct radeon_cs_parser *p) { struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; int i; int size = PAGE_SIZE; for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) { if (i == ibc->last_page_index) { size = (ibc->length_dw * 4) % PAGE_SIZE; if (size == 0) size = PAGE_SIZE; } if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)), ibc->user_ptr + (i * PAGE_SIZE), size)) return -EFAULT; } return 0; } int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx) { int new_page; struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; int i; int size = PAGE_SIZE; for (i = ibc->last_copied_page + 1; i < pg_idx; i++) { if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)), ibc->user_ptr + (i * PAGE_SIZE), PAGE_SIZE)) { p->parser_error = -EFAULT; return 0; } } new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1; if (pg_idx == ibc->last_page_index) { size = (ibc->length_dw * 4) % PAGE_SIZE; if (size == 0) size = PAGE_SIZE; } if (DRM_COPY_FROM_USER(ibc->kpage[new_page], ibc->user_ptr + (pg_idx * PAGE_SIZE), size)) { p->parser_error = -EFAULT; return 0; } /* copy to IB here */ memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size); ibc->last_copied_page = pg_idx; ibc->kpage_idx[new_page] = pg_idx; return new_page; }
gpl-2.0
dnkn/rk3188_tablet
fs/ext3/fsync.c
2619
2960
/* * linux/fs/ext3/fsync.c * * Copyright (C) 1993 Stephen Tweedie (sct@redhat.com) * from * Copyright (C) 1992 Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * from * linux/fs/minix/truncate.c Copyright (C) 1991, 1992 Linus Torvalds * * ext3fs fsync primitive * * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 * * Removed unnecessary code duplication for little endian machines * and excessive __inline__s. * Andi Kleen, 1997 * * Major simplications and cleanup - we only need to do the metadata, because * we can depend on generic_block_fdatasync() to sync the data blocks. */ #include <linux/time.h> #include <linux/blkdev.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/writeback.h> #include <linux/jbd.h> #include <linux/ext3_fs.h> #include <linux/ext3_jbd.h> /* * akpm: A new design for ext3_sync_file(). * * This is only called from sys_fsync(), sys_fdatasync() and sys_msync(). * There cannot be a transaction open by this task. * Another task could have dirtied this inode. Its data can be in any * state in the journalling system. * * What we do is just kick off a commit and wait on it. This will snapshot the * inode to disk. */ int ext3_sync_file(struct file *file, int datasync) { struct inode *inode = file->f_mapping->host; struct ext3_inode_info *ei = EXT3_I(inode); journal_t *journal = EXT3_SB(inode->i_sb)->s_journal; int ret, needs_barrier = 0; tid_t commit_tid; if (inode->i_sb->s_flags & MS_RDONLY) return 0; J_ASSERT(ext3_journal_current_handle() == NULL); /* * data=writeback,ordered: * The caller's filemap_fdatawrite()/wait will sync the data. * Metadata is in the journal, we wait for a proper transaction * to commit here. * * data=journal: * filemap_fdatawrite won't do anything (the buffers are clean). * ext3_force_commit will write the file data into the journal and * will wait on that. * filemap_fdatawait() will encounter a ton of newly-dirtied pages * (they were dirtied by commit). But that's OK - the blocks are * safe in-journal, which is all fsync() needs to ensure. */ if (ext3_should_journal_data(inode)) return ext3_force_commit(inode->i_sb); if (datasync) commit_tid = atomic_read(&ei->i_datasync_tid); else commit_tid = atomic_read(&ei->i_sync_tid); if (test_opt(inode->i_sb, BARRIER) && !journal_trans_will_send_data_barrier(journal, commit_tid)) needs_barrier = 1; log_start_commit(journal, commit_tid); ret = log_wait_commit(journal, commit_tid); /* * In case we didn't commit a transaction, we have to flush * disk caches manually so that data really is on persistent * storage */ if (needs_barrier) blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); return ret; }
gpl-2.0
leehz/android_kernel_samsung_ms013g
arch/arm/plat-omap/counter_32k.c
3131
3406
/* * OMAP 32ksynctimer/counter_32k-related code * * Copyright (C) 2009 Texas Instruments * Copyright (C) 2010 Nokia Corporation * Tony Lindgren <tony@atomide.com> * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * NOTE: This timer is not the same timer as the old OMAP1 MPU timer. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <linux/clocksource.h> #include <asm/sched_clock.h> #include <plat/hardware.h> #include <plat/common.h> #include <plat/board.h> #include <plat/clock.h> /* * 32KHz clocksource ... always available, on pretty most chips except * OMAP 730 and 1510. Other timers could be used as clocksources, with * higher resolution in free-running counter modes (e.g. 12 MHz xtal), * but systems won't necessarily want to spend resources that way. */ static void __iomem *timer_32k_base; #define OMAP16XX_TIMER_32K_SYNCHRONIZED 0xfffbc410 static u32 notrace omap_32k_read_sched_clock(void) { return timer_32k_base ? __raw_readl(timer_32k_base) : 0; } /** * read_persistent_clock - Return time from a persistent clock. * * Reads the time from a source which isn't disabled during PM, the * 32k sync timer. Convert the cycles elapsed since last read into * nsecs and adds to a monotonically increasing timespec. */ static struct timespec persistent_ts; static cycles_t cycles, last_cycles; static unsigned int persistent_mult, persistent_shift; void read_persistent_clock(struct timespec *ts) { unsigned long long nsecs; cycles_t delta; struct timespec *tsp = &persistent_ts; last_cycles = cycles; cycles = timer_32k_base ? __raw_readl(timer_32k_base) : 0; delta = cycles - last_cycles; nsecs = clocksource_cyc2ns(delta, persistent_mult, persistent_shift); timespec_add_ns(tsp, nsecs); *ts = *tsp; } int __init omap_init_clocksource_32k(void) { static char err[] __initdata = KERN_ERR "%s: can't register clocksource!\n"; if (cpu_is_omap16xx() || cpu_class_is_omap2()) { u32 pbase; unsigned long size = SZ_4K; void __iomem *base; struct clk *sync_32k_ick; if (cpu_is_omap16xx()) { pbase = OMAP16XX_TIMER_32K_SYNCHRONIZED; size = SZ_1K; } else if (cpu_is_omap2420()) pbase = OMAP2420_32KSYNCT_BASE + 0x10; else if (cpu_is_omap2430()) pbase = OMAP2430_32KSYNCT_BASE + 0x10; else if (cpu_is_omap34xx()) pbase = OMAP3430_32KSYNCT_BASE + 0x10; else if (cpu_is_omap44xx()) pbase = OMAP4430_32KSYNCT_BASE + 0x10; else return -ENODEV; /* For this to work we must have a static mapping in io.c for this area */ base = ioremap(pbase, size); if (!base) return -ENODEV; sync_32k_ick = clk_get(NULL, "omap_32ksync_ick"); if (!IS_ERR(sync_32k_ick)) clk_enable(sync_32k_ick); timer_32k_base = base; /* * 120000 rough estimate from the calculations in * __clocksource_updatefreq_scale. */ clocks_calc_mult_shift(&persistent_mult, &persistent_shift, 32768, NSEC_PER_SEC, 120000); if (clocksource_mmio_init(base, "32k_counter", 32768, 250, 32, clocksource_mmio_readl_up)) printk(err, "32k_counter"); setup_sched_clock(omap_32k_read_sched_clock, 32, 32768); } return 0; }
gpl-2.0
cristianomatos/android_kernel_samsung_smdk4412
drivers/staging/comedi/drivers/cb_pcimdda.c
3387
16257
/* comedi/drivers/cb_pcimdda.c Computer Boards PCIM-DDA06-16 Comedi driver Author: Calin Culianu <calin@ajvar.org> COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: cb_pcimdda Description: Measurement Computing PCIM-DDA06-16 Devices: [Measurement Computing] PCIM-DDA06-16 (cb_pcimdda) Author: Calin Culianu <calin@ajvar.org> Updated: Mon, 14 Apr 2008 15:15:51 +0100 Status: works All features of the PCIM-DDA06-16 board are supported. This board has 6 16-bit AO channels, and the usual 8255 DIO setup. (24 channels, configurable in banks of 8 and 4, etc.). This board does not support commands. The board has a peculiar way of specifying AO gain/range settings -- You have 1 jumper bank on the card, which either makes all 6 AO channels either 5 Volt unipolar, 5V bipolar, 10 Volt unipolar or 10V bipolar. Since there is absolutely _no_ way to tell in software how this jumper is set (well, at least according to the rather thin spec. from Measurement Computing that comes with the board), the driver assumes the jumper is at its factory default setting of +/-5V. Also of note is the fact that this board features another jumper, whose state is also completely invisible to software. It toggles two possible AO output modes on the board: - Update Mode: Writing to an AO channel instantaneously updates the actual signal output by the DAC on the board (this is the factory default). - Simultaneous XFER Mode: Writing to an AO channel has no effect until you read from any one of the AO channels. This is useful for loading all 6 AO values, and then reading from any one of the AO channels on the device to instantly update all 6 AO values in unison. Useful for some control apps, I would assume? If your jumper is in this setting, then you need to issue your comedi_data_write()s to load all the values you want, then issue one comedi_data_read() on any channel on the AO subdevice to initiate the simultaneous XFER. Configuration Options: [0] PCI bus (optional) [1] PCI slot (optional) [2] analog output range jumper setting 0 == +/- 5 V 1 == +/- 10 V */ /* This is a driver for the Computer Boards PCIM-DDA06-16 Analog Output card. This board has a unique register layout and as such probably deserves its own driver file. It is theoretically possible to integrate this board into the cb_pcidda file, but since that isn't my code, I didn't want to significantly modify that file to support this board (I thought it impolite to do so). At any rate, if you feel ambitious, please feel free to take the code out of this file and combine it with a more unified driver file. I would like to thank Timothy Curry <Timothy.Curry@rdec.redstone.army.mil> for lending me a board so that I could write this driver. -Calin Culianu <calin@ajvar.org> */ #include "../comedidev.h" #include "comedi_pci.h" #include "8255.h" /* device ids of the cards we support -- currently only 1 card supported */ #define PCI_VENDOR_ID_COMPUTERBOARDS 0x1307 #define PCI_ID_PCIM_DDA06_16 0x0053 /* * This is straight from skel.c -- I did this in case this source file * will someday support more than 1 board... */ struct board_struct { const char *name; unsigned short device_id; int ao_chans; int ao_bits; int dio_chans; int dio_method; int dio_offset; /* how many bytes into the BADR are the DIO ports */ int regs_badrindex; /* IO Region for the control, analog output, and DIO registers */ int reg_sz; /* number of bytes of registers in io region */ }; enum DIO_METHODS { DIO_NONE = 0, DIO_8255, DIO_INTERNAL /* unimplemented */ }; static const struct board_struct boards[] = { { .name = "cb_pcimdda06-16", .device_id = PCI_ID_PCIM_DDA06_16, .ao_chans = 6, .ao_bits = 16, .dio_chans = 24, .dio_method = DIO_8255, .dio_offset = 12, .regs_badrindex = 3, .reg_sz = 16, } }; /* * Useful for shorthand access to the particular board structure */ #define thisboard ((const struct board_struct *)dev->board_ptr) #define REG_SZ (thisboard->reg_sz) #define REGS_BADRINDEX (thisboard->regs_badrindex) /* This is used by modprobe to translate PCI IDs to drivers. Should * only be used for PCI and ISA-PnP devices */ /* Please add your PCI vendor ID to comedidev.h, and it will be forwarded * upstream. */ static DEFINE_PCI_DEVICE_TABLE(pci_table) = { { PCI_VENDOR_ID_COMPUTERBOARDS, PCI_ID_PCIM_DDA06_16, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { 0} }; MODULE_DEVICE_TABLE(pci, pci_table); /* this structure is for data unique to this hardware driver. If several hardware drivers keep similar information in this structure, feel free to suggest moving the variable to the struct comedi_device struct. */ struct board_private_struct { unsigned long registers; /* set by probe */ unsigned long dio_registers; char attached_to_8255; /* boolean */ char attached_successfully; /* boolean */ /* would be useful for a PCI device */ struct pci_dev *pci_dev; #define MAX_AO_READBACK_CHANNELS 6 /* Used for AO readback */ unsigned int ao_readback[MAX_AO_READBACK_CHANNELS]; }; /* * most drivers define the following macro to make it easy to * access the private structure. */ #define devpriv ((struct board_private_struct *)dev->private) /* * The struct comedi_driver structure tells the Comedi core module * which functions to call to configure/deconfigure (attach/detach) * the board, and also about the kernel module that contains * the device code. */ static int attach(struct comedi_device *dev, struct comedi_devconfig *it); static int detach(struct comedi_device *dev); static struct comedi_driver cb_pcimdda_driver = { .driver_name = "cb_pcimdda", .module = THIS_MODULE, .attach = attach, .detach = detach, }; MODULE_AUTHOR("Calin A. Culianu <calin@rtlab.org>"); MODULE_DESCRIPTION("Comedi low-level driver for the Computerboards PCIM-DDA " "series. Currently only supports PCIM-DDA06-16 (which " "also happens to be the only board in this series. :) ) "); MODULE_LICENSE("GPL"); static int __devinit cb_pcimdda_driver_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, cb_pcimdda_driver.driver_name); } static void __devexit cb_pcimdda_driver_pci_remove(struct pci_dev *dev) { comedi_pci_auto_unconfig(dev); } static struct pci_driver cb_pcimdda_driver_pci_driver = { .id_table = pci_table, .probe = &cb_pcimdda_driver_pci_probe, .remove = __devexit_p(&cb_pcimdda_driver_pci_remove) }; static int __init cb_pcimdda_driver_init_module(void) { int retval; retval = comedi_driver_register(&cb_pcimdda_driver); if (retval < 0) return retval; cb_pcimdda_driver_pci_driver.name = (char *)cb_pcimdda_driver.driver_name; return pci_register_driver(&cb_pcimdda_driver_pci_driver); } static void __exit cb_pcimdda_driver_cleanup_module(void) { pci_unregister_driver(&cb_pcimdda_driver_pci_driver); comedi_driver_unregister(&cb_pcimdda_driver); } module_init(cb_pcimdda_driver_init_module); module_exit(cb_pcimdda_driver_cleanup_module); static int ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); /*--------------------------------------------------------------------------- HELPER FUNCTION DECLARATIONS -----------------------------------------------------------------------------*/ /* returns a maxdata value for a given n_bits */ static inline unsigned int figure_out_maxdata(int bits) { return ((unsigned int)1 << bits) - 1; } /* * Probes for a supported device. * * Prerequisite: private be allocated already inside dev * * If the device is found, it returns 0 and has the following side effects: * * o assigns a struct pci_dev * to dev->private->pci_dev * o assigns a struct board * to dev->board_ptr * o sets dev->private->registers * o sets dev->private->dio_registers * * Otherwise, returns a -errno on error */ static int probe(struct comedi_device *dev, const struct comedi_devconfig *it); /*--------------------------------------------------------------------------- FUNCTION DEFINITIONS -----------------------------------------------------------------------------*/ /* * Attach is called by the Comedi core to configure the driver * for a particular board. If you specified a board_name array * in the driver structure, dev->board_ptr contains that * address. */ static int attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; int err; /* * Allocate the private structure area. alloc_private() is a * convenient macro defined in comedidev.h. * if this function fails (returns negative) then the private area is * kfree'd by comedi */ if (alloc_private(dev, sizeof(struct board_private_struct)) < 0) return -ENOMEM; /* * If you can probe the device to determine what device in a series * it is, this is the place to do it. Otherwise, dev->board_ptr * should already be initialized. */ err = probe(dev, it); if (err) return err; /* Output some info */ printk("comedi%d: %s: ", dev->minor, thisboard->name); /* * Initialize dev->board_name. Note that we can use the "thisboard" * macro now, since we just initialized it in the last line. */ dev->board_name = thisboard->name; /* * Allocate the subdevice structures. alloc_subdevice() is a * convenient macro defined in comedidev.h. */ if (alloc_subdevices(dev, 2) < 0) return -ENOMEM; s = dev->subdevices + 0; /* analog output subdevice */ s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE | SDF_READABLE; s->n_chan = thisboard->ao_chans; s->maxdata = figure_out_maxdata(thisboard->ao_bits); /* this is hard-coded here */ if (it->options[2]) s->range_table = &range_bipolar10; else s->range_table = &range_bipolar5; s->insn_write = &ao_winsn; s->insn_read = &ao_rinsn; s = dev->subdevices + 1; /* digital i/o subdevice */ if (thisboard->dio_chans) { switch (thisboard->dio_method) { case DIO_8255: /* this is a straight 8255, so register us with the 8255 driver */ subdev_8255_init(dev, s, NULL, devpriv->dio_registers); devpriv->attached_to_8255 = 1; break; case DIO_INTERNAL: default: printk("DIO_INTERNAL not implemented yet!\n"); return -ENXIO; break; } } else { s->type = COMEDI_SUBD_UNUSED; } devpriv->attached_successfully = 1; printk("attached\n"); return 1; } /* * _detach is called to deconfigure a device. It should deallocate * resources. * This function is also called when _attach() fails, so it should be * careful not to release resources that were not necessarily * allocated by _attach(). dev->private and dev->subdevices are * deallocated automatically by the core. */ static int detach(struct comedi_device *dev) { if (devpriv) { if (dev->subdevices && devpriv->attached_to_8255) { /* de-register us from the 8255 driver */ subdev_8255_cleanup(dev, dev->subdevices + 2); devpriv->attached_to_8255 = 0; } if (devpriv->pci_dev) { if (devpriv->registers) comedi_pci_disable(devpriv->pci_dev); pci_dev_put(devpriv->pci_dev); } if (devpriv->attached_successfully && thisboard) printk("comedi%d: %s: detached\n", dev->minor, thisboard->name); } return 0; } static int ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; int chan = CR_CHAN(insn->chanspec); unsigned long offset = devpriv->registers + chan * 2; /* Writing a list of values to an AO channel is probably not * very useful, but that's how the interface is defined. */ for (i = 0; i < insn->n; i++) { /* first, load the low byte */ outb((char)(data[i] & 0x00ff), offset); /* next, write the high byte -- only after this is written is the channel voltage updated in the DAC, unless we're in simultaneous xfer mode (jumper on card) then a rinsn is necessary to actually update the DAC -- see ao_rinsn() below... */ outb((char)(data[i] >> 8 & 0x00ff), offset + 1); /* for testing only.. the actual rinsn SHOULD do an inw! (see the stuff about simultaneous XFER mode on this board) */ devpriv->ao_readback[chan] = data[i]; } /* return the number of samples read/written */ return i; } /* AO subdevices should have a read insn as well as a write insn. Usually this means copying a value stored in devpriv->ao_readback. However, since this board has this jumper setting called "Simultaneous Xfer mode" (off by default), we will support it. Simultaneaous xfer mode is accomplished by loading ALL the values you want for AO in all the channels, then READing off one of the AO registers to initiate the instantaneous simultaneous update of all DAC outputs, which makes all AO channels update simultaneously. This is useful for some control applications, I would imagine. */ static int ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; int chan = CR_CHAN(insn->chanspec); for (i = 0; i < insn->n; i++) { inw(devpriv->registers + chan * 2); /* should I set data[i] to the result of the actual read on the register or the cached unsigned int in devpriv->ao_readback[]? */ data[i] = devpriv->ao_readback[chan]; } return i; } /*--------------------------------------------------------------------------- HELPER FUNCTION DEFINITIONS -----------------------------------------------------------------------------*/ /* * Probes for a supported device. * * Prerequisite: private be allocated already inside dev * * If the device is found, it returns 0 and has the following side effects: * * o assigns a struct pci_dev * to dev->private->pci_dev * o assigns a struct board * to dev->board_ptr * o sets dev->private->registers * o sets dev->private->dio_registers * * Otherwise, returns a -errno on error */ static int probe(struct comedi_device *dev, const struct comedi_devconfig *it) { struct pci_dev *pcidev = NULL; int index; unsigned long registers; for_each_pci_dev(pcidev) { /* is it not a computer boards card? */ if (pcidev->vendor != PCI_VENDOR_ID_COMPUTERBOARDS) continue; /* loop through cards supported by this driver */ for (index = 0; index < ARRAY_SIZE(boards); index++) { if (boards[index].device_id != pcidev->device) continue; /* was a particular bus/slot requested? */ if (it->options[0] || it->options[1]) { /* are we on the wrong bus/slot? */ if (pcidev->bus->number != it->options[0] || PCI_SLOT(pcidev->devfn) != it->options[1]) { continue; } } /* found ! */ devpriv->pci_dev = pcidev; dev->board_ptr = boards + index; if (comedi_pci_enable(pcidev, thisboard->name)) { printk ("cb_pcimdda: Failed to enable PCI device and request regions\n"); return -EIO; } registers = pci_resource_start(devpriv->pci_dev, REGS_BADRINDEX); devpriv->registers = registers; devpriv->dio_registers = devpriv->registers + thisboard->dio_offset; return 0; } } printk("cb_pcimdda: No supported ComputerBoards/MeasurementComputing " "card found at the requested position\n"); return -ENODEV; }
gpl-2.0
PhenomX1998/FRACTALX-OP3
arch/powerpc/kernel/isa-bridge.c
4155
7332
/* * Routines for tracking a legacy ISA bridge * * Copyrigh 2007 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. * * Some bits and pieces moved over from pci_64.c * * Copyrigh 2003 Anton Blanchard <anton@au.ibm.com>, IBM Corp. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define DEBUG #include <linux/kernel.h> #include <linux/pci.h> #include <linux/string.h> #include <linux/export.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/notifier.h> #include <asm/processor.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> #include <asm/ppc-pci.h> unsigned long isa_io_base; /* NULL if no ISA bus */ EXPORT_SYMBOL(isa_io_base); /* Cached ISA bridge dev. */ static struct device_node *isa_bridge_devnode; struct pci_dev *isa_bridge_pcidev; EXPORT_SYMBOL_GPL(isa_bridge_pcidev); #define ISA_SPACE_MASK 0x1 #define ISA_SPACE_IO 0x1 static void pci_process_ISA_OF_ranges(struct device_node *isa_node, unsigned long phb_io_base_phys) { /* We should get some saner parsing here and remove these structs */ struct pci_address { u32 a_hi; u32 a_mid; u32 a_lo; }; struct isa_address { u32 a_hi; u32 a_lo; }; struct isa_range { struct isa_address isa_addr; struct pci_address pci_addr; unsigned int size; }; const struct isa_range *range; unsigned long pci_addr; unsigned int isa_addr; unsigned int size; int rlen = 0; range = of_get_property(isa_node, "ranges", &rlen); if (range == NULL || (rlen < sizeof(struct isa_range))) goto inval_range; /* From "ISA Binding to 1275" * The ranges property is laid out as an array of elements, * each of which comprises: * cells 0 - 1: an ISA address * cells 2 - 4: a PCI address * (size depending on dev->n_addr_cells) * cell 5: the size of the range */ if ((range->isa_addr.a_hi & ISA_SPACE_MASK) != ISA_SPACE_IO) { range++; rlen -= sizeof(struct isa_range); if (rlen < sizeof(struct isa_range)) goto inval_range; } if ((range->isa_addr.a_hi & ISA_SPACE_MASK) != ISA_SPACE_IO) goto inval_range; isa_addr = range->isa_addr.a_lo; pci_addr = (unsigned long) range->pci_addr.a_mid << 32 | range->pci_addr.a_lo; /* Assume these are both zero. Note: We could fix that and * do a proper parsing instead ... oh well, that will do for * now as nobody uses fancy mappings for ISA bridges */ if ((pci_addr != 0) || (isa_addr != 0)) { printk(KERN_ERR "unexpected isa to pci mapping: %s\n", __func__); return; } /* Align size and make sure it's cropped to 64K */ size = PAGE_ALIGN(range->size); if (size > 0x10000) size = 0x10000; __ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE, size, _PAGE_NO_CACHE|_PAGE_GUARDED); return; inval_range: printk(KERN_ERR "no ISA IO ranges or unexpected isa range, " "mapping 64k\n"); __ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE, 0x10000, _PAGE_NO_CACHE|_PAGE_GUARDED); } /** * isa_bridge_find_early - Find and map the ISA IO space early before * main PCI discovery. This is optionally called by * the arch code when adding PCI PHBs to get early * access to ISA IO ports */ void __init isa_bridge_find_early(struct pci_controller *hose) { struct device_node *np, *parent = NULL, *tmp; /* If we already have an ISA bridge, bail off */ if (isa_bridge_devnode != NULL) return; /* For each "isa" node in the system. Note : we do a search by * type and not by name. It might be better to do by name but that's * what the code used to do and I don't want to break too much at * once. We can look into changing that separately */ for_each_node_by_type(np, "isa") { /* Look for our hose being a parent */ for (parent = of_get_parent(np); parent;) { if (parent == hose->dn) { of_node_put(parent); break; } tmp = parent; parent = of_get_parent(parent); of_node_put(tmp); } if (parent != NULL) break; } if (np == NULL) return; isa_bridge_devnode = np; /* Now parse the "ranges" property and setup the ISA mapping */ pci_process_ISA_OF_ranges(np, hose->io_base_phys); /* Set the global ISA io base to indicate we have an ISA bridge */ isa_io_base = ISA_IO_BASE; pr_debug("ISA bridge (early) is %s\n", np->full_name); } /** * isa_bridge_find_late - Find and map the ISA IO space upon discovery of * a new ISA bridge */ static void isa_bridge_find_late(struct pci_dev *pdev, struct device_node *devnode) { struct pci_controller *hose = pci_bus_to_host(pdev->bus); /* Store ISA device node and PCI device */ isa_bridge_devnode = of_node_get(devnode); isa_bridge_pcidev = pdev; /* Now parse the "ranges" property and setup the ISA mapping */ pci_process_ISA_OF_ranges(devnode, hose->io_base_phys); /* Set the global ISA io base to indicate we have an ISA bridge */ isa_io_base = ISA_IO_BASE; pr_debug("ISA bridge (late) is %s on %s\n", devnode->full_name, pci_name(pdev)); } /** * isa_bridge_remove - Remove/unmap an ISA bridge */ static void isa_bridge_remove(void) { pr_debug("ISA bridge removed !\n"); /* Clear the global ISA io base to indicate that we have no more * ISA bridge. Note that drivers don't quite handle that, though * we should probably do something about it. But do we ever really * have ISA bridges being removed on machines using legacy devices ? */ isa_io_base = ISA_IO_BASE; /* Clear references to the bridge */ of_node_put(isa_bridge_devnode); isa_bridge_devnode = NULL; isa_bridge_pcidev = NULL; /* Unmap the ISA area */ __iounmap_at((void *)ISA_IO_BASE, 0x10000); } /** * isa_bridge_notify - Get notified of PCI devices addition/removal */ static int isa_bridge_notify(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; struct pci_dev *pdev = to_pci_dev(dev); struct device_node *devnode = pci_device_to_OF_node(pdev); switch(action) { case BUS_NOTIFY_ADD_DEVICE: /* Check if we have an early ISA device, without PCI dev */ if (isa_bridge_devnode && isa_bridge_devnode == devnode && !isa_bridge_pcidev) { pr_debug("ISA bridge PCI attached: %s\n", pci_name(pdev)); isa_bridge_pcidev = pdev; } /* Check if we have no ISA device, and this happens to be one, * register it as such if it has an OF device */ if (!isa_bridge_devnode && devnode && devnode->type && !strcmp(devnode->type, "isa")) isa_bridge_find_late(pdev, devnode); return 0; case BUS_NOTIFY_DEL_DEVICE: /* Check if this our existing ISA device */ if (pdev == isa_bridge_pcidev || (devnode && devnode == isa_bridge_devnode)) isa_bridge_remove(); return 0; } return 0; } static struct notifier_block isa_bridge_notifier = { .notifier_call = isa_bridge_notify }; /** * isa_bridge_init - register to be notified of ISA bridge addition/removal * */ static int __init isa_bridge_init(void) { bus_register_notifier(&pci_bus_type, &isa_bridge_notifier); return 0; } arch_initcall(isa_bridge_init);
gpl-2.0
brindev/android_kernel_htc_msm8960
arch/cris/arch-v10/drivers/sync_serial.c
4667
44134
/* * Simple synchronous serial port driver for ETRAX 100LX. * * Synchronous serial ports are used for continuous streamed data like audio. * The default setting for this driver is compatible with the STA 013 MP3 * decoder. The driver can easily be tuned to fit other audio encoder/decoders * and SPI * * Copyright (c) 2001-2008 Axis Communications AB * * Author: Mikael Starvik, Johan Adolfsson * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/major.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/timer.h> #include <asm/irq.h> #include <asm/dma.h> #include <asm/io.h> #include <arch/svinto.h> #include <asm/uaccess.h> #include <asm/sync_serial.h> #include <arch/io_interface_mux.h> /* The receiver is a bit tricky because of the continuous stream of data.*/ /* */ /* Three DMA descriptors are linked together. Each DMA descriptor is */ /* responsible for port->bufchunk of a common buffer. */ /* */ /* +---------------------------------------------+ */ /* | +----------+ +----------+ +----------+ | */ /* +-> | Descr[0] |-->| Descr[1] |-->| Descr[2] |-+ */ /* +----------+ +----------+ +----------+ */ /* | | | */ /* v v v */ /* +-------------------------------------+ */ /* | BUFFER | */ /* +-------------------------------------+ */ /* |<- data_avail ->| */ /* readp writep */ /* */ /* If the application keeps up the pace readp will be right after writep.*/ /* If the application can't keep the pace we have to throw away data. */ /* The idea is that readp should be ready with the data pointed out by */ /* Descr[i] when the DMA has filled in Descr[i+1]. */ /* Otherwise we will discard */ /* the rest of the data pointed out by Descr1 and set readp to the start */ /* of Descr2 */ #define SYNC_SERIAL_MAJOR 125 /* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */ /* words can be handled */ #define IN_BUFFER_SIZE 12288 #define IN_DESCR_SIZE 256 #define NUM_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE) #define OUT_BUFFER_SIZE 4096 #define DEFAULT_FRAME_RATE 0 #define DEFAULT_WORD_RATE 7 /* NOTE: Enabling some debug will likely cause overrun or underrun, * especially if manual mode is use. */ #define DEBUG(x) #define DEBUGREAD(x) #define DEBUGWRITE(x) #define DEBUGPOLL(x) #define DEBUGRXINT(x) #define DEBUGTXINT(x) /* Define some macros to access ETRAX 100 registers */ #define SETF(var, reg, field, val) \ do { \ var = (var & ~IO_MASK_(reg##_, field##_)) | \ IO_FIELD_(reg##_, field##_, val); \ } while (0) #define SETS(var, reg, field, val) \ do { \ var = (var & ~IO_MASK_(reg##_, field##_)) | \ IO_STATE_(reg##_, field##_, _##val); \ } while (0) struct sync_port { /* Etrax registers and bits*/ const volatile unsigned *const status; volatile unsigned *const ctrl_data; volatile unsigned *const output_dma_first; volatile unsigned char *const output_dma_cmd; volatile unsigned char *const output_dma_clr_irq; volatile unsigned *const input_dma_first; volatile unsigned char *const input_dma_cmd; volatile unsigned *const input_dma_descr; /* 8*4 */ volatile unsigned char *const input_dma_clr_irq; volatile unsigned *const data_out; const volatile unsigned *const data_in; char data_avail_bit; /* In R_IRQ_MASK1_RD/SET/CLR */ char transmitter_ready_bit; /* In R_IRQ_MASK1_RD/SET/CLR */ char input_dma_descr_bit; /* In R_IRQ_MASK2_RD */ char output_dma_bit; /* In R_IRQ_MASK2_RD */ /* End of fields initialised in array */ char started; /* 1 if port has been started */ char port_nbr; /* Port 0 or 1 */ char busy; /* 1 if port is busy */ char enabled; /* 1 if port is enabled */ char use_dma; /* 1 if port uses dma */ char tr_running; char init_irqs; /* Register shadow */ unsigned int ctrl_data_shadow; /* Remaining bytes for current transfer */ volatile unsigned int out_count; /* Current position in out_buffer */ unsigned char *outp; /* 16*4 */ /* Next byte to be read by application */ volatile unsigned char *volatile readp; /* Next byte to be written by etrax */ volatile unsigned char *volatile writep; unsigned int in_buffer_size; unsigned int inbufchunk; struct etrax_dma_descr out_descr __attribute__ ((aligned(32))); struct etrax_dma_descr in_descr[NUM_IN_DESCR] __attribute__ ((aligned(32))); unsigned char out_buffer[OUT_BUFFER_SIZE] __attribute__ ((aligned(32))); unsigned char in_buffer[IN_BUFFER_SIZE]__attribute__ ((aligned(32))); unsigned char flip[IN_BUFFER_SIZE] __attribute__ ((aligned(32))); struct etrax_dma_descr *next_rx_desc; struct etrax_dma_descr *prev_rx_desc; int full; wait_queue_head_t out_wait_q; wait_queue_head_t in_wait_q; }; static DEFINE_MUTEX(sync_serial_mutex); static int etrax_sync_serial_init(void); static void initialize_port(int portnbr); static inline int sync_data_avail(struct sync_port *port); static int sync_serial_open(struct inode *inode, struct file *file); static int sync_serial_release(struct inode *inode, struct file *file); static unsigned int sync_serial_poll(struct file *filp, poll_table *wait); static long sync_serial_ioctl(struct file *file, unsigned int cmd, unsigned long arg); static ssize_t sync_serial_write(struct file *file, const char *buf, size_t count, loff_t *ppos); static ssize_t sync_serial_read(struct file *file, char *buf, size_t count, loff_t *ppos); #if ((defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \ defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \ (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \ defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))) #define SYNC_SER_DMA #endif static void send_word(struct sync_port *port); static void start_dma(struct sync_port *port, const char *data, int count); static void start_dma_in(struct sync_port *port); #ifdef SYNC_SER_DMA static irqreturn_t tr_interrupt(int irq, void *dev_id); static irqreturn_t rx_interrupt(int irq, void *dev_id); #endif #if ((defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \ !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \ (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \ !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))) #define SYNC_SER_MANUAL #endif #ifdef SYNC_SER_MANUAL static irqreturn_t manual_interrupt(int irq, void *dev_id); #endif /* The ports */ static struct sync_port ports[] = { { .status = R_SYNC_SERIAL1_STATUS, .ctrl_data = R_SYNC_SERIAL1_CTRL, .output_dma_first = R_DMA_CH8_FIRST, .output_dma_cmd = R_DMA_CH8_CMD, .output_dma_clr_irq = R_DMA_CH8_CLR_INTR, .input_dma_first = R_DMA_CH9_FIRST, .input_dma_cmd = R_DMA_CH9_CMD, .input_dma_descr = R_DMA_CH9_DESCR, .input_dma_clr_irq = R_DMA_CH9_CLR_INTR, .data_out = R_SYNC_SERIAL1_TR_DATA, .data_in = R_SYNC_SERIAL1_REC_DATA, .data_avail_bit = IO_BITNR(R_IRQ_MASK1_RD, ser1_data), .transmitter_ready_bit = IO_BITNR(R_IRQ_MASK1_RD, ser1_ready), .input_dma_descr_bit = IO_BITNR(R_IRQ_MASK2_RD, dma9_descr), .output_dma_bit = IO_BITNR(R_IRQ_MASK2_RD, dma8_eop), .init_irqs = 1, #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA) .use_dma = 1, #else .use_dma = 0, #endif }, { .status = R_SYNC_SERIAL3_STATUS, .ctrl_data = R_SYNC_SERIAL3_CTRL, .output_dma_first = R_DMA_CH4_FIRST, .output_dma_cmd = R_DMA_CH4_CMD, .output_dma_clr_irq = R_DMA_CH4_CLR_INTR, .input_dma_first = R_DMA_CH5_FIRST, .input_dma_cmd = R_DMA_CH5_CMD, .input_dma_descr = R_DMA_CH5_DESCR, .input_dma_clr_irq = R_DMA_CH5_CLR_INTR, .data_out = R_SYNC_SERIAL3_TR_DATA, .data_in = R_SYNC_SERIAL3_REC_DATA, .data_avail_bit = IO_BITNR(R_IRQ_MASK1_RD, ser3_data), .transmitter_ready_bit = IO_BITNR(R_IRQ_MASK1_RD, ser3_ready), .input_dma_descr_bit = IO_BITNR(R_IRQ_MASK2_RD, dma5_descr), .output_dma_bit = IO_BITNR(R_IRQ_MASK2_RD, dma4_eop), .init_irqs = 1, #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA) .use_dma = 1, #else .use_dma = 0, #endif } }; /* Register shadows */ static unsigned sync_serial_prescale_shadow; #define NUMBER_OF_PORTS 2 static const struct file_operations sync_serial_fops = { .owner = THIS_MODULE, .write = sync_serial_write, .read = sync_serial_read, .poll = sync_serial_poll, .unlocked_ioctl = sync_serial_ioctl, .open = sync_serial_open, .release = sync_serial_release, .llseek = noop_llseek, }; static int __init etrax_sync_serial_init(void) { ports[0].enabled = 0; ports[1].enabled = 0; #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) if (cris_request_io_interface(if_sync_serial_1, "sync_ser1")) { printk(KERN_CRIT "ETRAX100LX sync_serial: " "Could not allocate IO group for port %d\n", 0); return -EBUSY; } #endif #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) if (cris_request_io_interface(if_sync_serial_3, "sync_ser3")) { #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) cris_free_io_interface(if_sync_serial_1); #endif printk(KERN_CRIT "ETRAX100LX sync_serial: " "Could not allocate IO group for port %d\n", 1); return -EBUSY; } #endif if (register_chrdev(SYNC_SERIAL_MAJOR, "sync serial", &sync_serial_fops) < 0) { #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) cris_free_io_interface(if_sync_serial_3); #endif #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) cris_free_io_interface(if_sync_serial_1); #endif printk("unable to get major for synchronous serial port\n"); return -EBUSY; } /* Deselect synchronous serial ports while configuring. */ SETS(gen_config_ii_shadow, R_GEN_CONFIG_II, sermode1, async); SETS(gen_config_ii_shadow, R_GEN_CONFIG_II, sermode3, async); *R_GEN_CONFIG_II = gen_config_ii_shadow; /* Initialize Ports */ #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) ports[0].enabled = 1; SETS(port_pb_i2c_shadow, R_PORT_PB_I2C, syncser1, ss1extra); SETS(gen_config_ii_shadow, R_GEN_CONFIG_II, sermode1, sync); #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA) ports[0].use_dma = 1; #else ports[0].use_dma = 0; #endif initialize_port(0); #endif #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) ports[1].enabled = 1; SETS(port_pb_i2c_shadow, R_PORT_PB_I2C, syncser3, ss3extra); SETS(gen_config_ii_shadow, R_GEN_CONFIG_II, sermode3, sync); #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA) ports[1].use_dma = 1; #else ports[1].use_dma = 0; #endif initialize_port(1); #endif *R_PORT_PB_I2C = port_pb_i2c_shadow; /* Use PB4/PB7 */ /* Set up timing */ *R_SYNC_SERIAL_PRESCALE = sync_serial_prescale_shadow = ( IO_STATE(R_SYNC_SERIAL_PRESCALE, clk_sel_u1, codec) | IO_STATE(R_SYNC_SERIAL_PRESCALE, word_stb_sel_u1, external) | IO_STATE(R_SYNC_SERIAL_PRESCALE, clk_sel_u3, codec) | IO_STATE(R_SYNC_SERIAL_PRESCALE, word_stb_sel_u3, external) | IO_STATE(R_SYNC_SERIAL_PRESCALE, prescaler, div4) | IO_FIELD(R_SYNC_SERIAL_PRESCALE, frame_rate, DEFAULT_FRAME_RATE) | IO_FIELD(R_SYNC_SERIAL_PRESCALE, word_rate, DEFAULT_WORD_RATE) | IO_STATE(R_SYNC_SERIAL_PRESCALE, warp_mode, normal)); /* Select synchronous ports */ *R_GEN_CONFIG_II = gen_config_ii_shadow; printk(KERN_INFO "ETRAX 100LX synchronous serial port driver\n"); return 0; } static void __init initialize_port(int portnbr) { struct sync_port *port = &ports[portnbr]; DEBUG(printk(KERN_DEBUG "Init sync serial port %d\n", portnbr)); port->started = 0; port->port_nbr = portnbr; port->busy = 0; port->tr_running = 0; port->out_count = 0; port->outp = port->out_buffer; port->readp = port->flip; port->writep = port->flip; port->in_buffer_size = IN_BUFFER_SIZE; port->inbufchunk = IN_DESCR_SIZE; port->next_rx_desc = &port->in_descr[0]; port->prev_rx_desc = &port->in_descr[NUM_IN_DESCR-1]; port->prev_rx_desc->ctrl = d_eol; init_waitqueue_head(&port->out_wait_q); init_waitqueue_head(&port->in_wait_q); port->ctrl_data_shadow = IO_STATE(R_SYNC_SERIAL1_CTRL, tr_baud, c115k2Hz) | IO_STATE(R_SYNC_SERIAL1_CTRL, mode, master_output) | IO_STATE(R_SYNC_SERIAL1_CTRL, error, ignore) | IO_STATE(R_SYNC_SERIAL1_CTRL, rec_enable, disable) | IO_STATE(R_SYNC_SERIAL1_CTRL, f_synctype, normal) | IO_STATE(R_SYNC_SERIAL1_CTRL, f_syncsize, word) | IO_STATE(R_SYNC_SERIAL1_CTRL, f_sync, on) | IO_STATE(R_SYNC_SERIAL1_CTRL, clk_mode, normal) | IO_STATE(R_SYNC_SERIAL1_CTRL, clk_halt, stopped) | IO_STATE(R_SYNC_SERIAL1_CTRL, bitorder, msb) | IO_STATE(R_SYNC_SERIAL1_CTRL, tr_enable, disable) | IO_STATE(R_SYNC_SERIAL1_CTRL, wordsize, size8bit) | IO_STATE(R_SYNC_SERIAL1_CTRL, buf_empty, lmt_8) | IO_STATE(R_SYNC_SERIAL1_CTRL, buf_full, lmt_8) | IO_STATE(R_SYNC_SERIAL1_CTRL, flow_ctrl, enabled) | IO_STATE(R_SYNC_SERIAL1_CTRL, clk_polarity, neg) | IO_STATE(R_SYNC_SERIAL1_CTRL, frame_polarity, normal)| IO_STATE(R_SYNC_SERIAL1_CTRL, status_polarity, inverted)| IO_STATE(R_SYNC_SERIAL1_CTRL, clk_driver, normal) | IO_STATE(R_SYNC_SERIAL1_CTRL, frame_driver, normal) | IO_STATE(R_SYNC_SERIAL1_CTRL, status_driver, normal)| IO_STATE(R_SYNC_SERIAL1_CTRL, def_out0, high); if (port->use_dma) port->ctrl_data_shadow |= IO_STATE(R_SYNC_SERIAL1_CTRL, dma_enable, on); else port->ctrl_data_shadow |= IO_STATE(R_SYNC_SERIAL1_CTRL, dma_enable, off); *port->ctrl_data = port->ctrl_data_shadow; } static inline int sync_data_avail(struct sync_port *port) { int avail; unsigned char *start; unsigned char *end; start = (unsigned char *)port->readp; /* cast away volatile */ end = (unsigned char *)port->writep; /* cast away volatile */ /* 0123456789 0123456789 * ----- - ----- * ^rp ^wp ^wp ^rp */ if (end >= start) avail = end - start; else avail = port->in_buffer_size - (start - end); return avail; } static inline int sync_data_avail_to_end(struct sync_port *port) { int avail; unsigned char *start; unsigned char *end; start = (unsigned char *)port->readp; /* cast away volatile */ end = (unsigned char *)port->writep; /* cast away volatile */ /* 0123456789 0123456789 * ----- ----- * ^rp ^wp ^wp ^rp */ if (end >= start) avail = end - start; else avail = port->flip + port->in_buffer_size - start; return avail; } static int sync_serial_open(struct inode *inode, struct file *file) { int dev = MINOR(inode->i_rdev); struct sync_port *port; int mode; int err = -EBUSY; mutex_lock(&sync_serial_mutex); DEBUG(printk(KERN_DEBUG "Open sync serial port %d\n", dev)); if (dev < 0 || dev >= NUMBER_OF_PORTS || !ports[dev].enabled) { DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev)); err = -ENODEV; goto out; } port = &ports[dev]; /* Allow open this device twice (assuming one reader and one writer) */ if (port->busy == 2) { DEBUG(printk(KERN_DEBUG "Device is busy.. \n")); goto out; } if (port->init_irqs) { if (port->use_dma) { if (port == &ports[0]) { #ifdef SYNC_SER_DMA if (request_irq(24, tr_interrupt, 0, "synchronous serial 1 dma tr", &ports[0])) { printk(KERN_CRIT "Can't alloc " "sync serial port 1 IRQ"); goto out; } else if (request_irq(25, rx_interrupt, 0, "synchronous serial 1 dma rx", &ports[0])) { free_irq(24, &port[0]); printk(KERN_CRIT "Can't alloc " "sync serial port 1 IRQ"); goto out; } else if (cris_request_dma(8, "synchronous serial 1 dma tr", DMA_VERBOSE_ON_ERROR, dma_ser1)) { free_irq(24, &port[0]); free_irq(25, &port[0]); printk(KERN_CRIT "Can't alloc " "sync serial port 1 " "TX DMA channel"); goto out; } else if (cris_request_dma(9, "synchronous serial 1 dma rec", DMA_VERBOSE_ON_ERROR, dma_ser1)) { cris_free_dma(8, NULL); free_irq(24, &port[0]); free_irq(25, &port[0]); printk(KERN_CRIT "Can't alloc " "sync serial port 1 " "RX DMA channel"); goto out; } #endif RESET_DMA(8); WAIT_DMA(8); RESET_DMA(9); WAIT_DMA(9); *R_DMA_CH8_CLR_INTR = IO_STATE(R_DMA_CH8_CLR_INTR, clr_eop, do) | IO_STATE(R_DMA_CH8_CLR_INTR, clr_descr, do); *R_DMA_CH9_CLR_INTR = IO_STATE(R_DMA_CH9_CLR_INTR, clr_eop, do) | IO_STATE(R_DMA_CH9_CLR_INTR, clr_descr, do); *R_IRQ_MASK2_SET = IO_STATE(R_IRQ_MASK2_SET, dma8_eop, set) | IO_STATE(R_IRQ_MASK2_SET, dma9_descr, set); } else if (port == &ports[1]) { #ifdef SYNC_SER_DMA if (request_irq(20, tr_interrupt, 0, "synchronous serial 3 dma tr", &ports[1])) { printk(KERN_CRIT "Can't alloc " "sync serial port 3 IRQ"); goto out; } else if (request_irq(21, rx_interrupt, 0, "synchronous serial 3 dma rx", &ports[1])) { free_irq(20, &ports[1]); printk(KERN_CRIT "Can't alloc " "sync serial port 3 IRQ"); goto out; } else if (cris_request_dma(4, "synchronous serial 3 dma tr", DMA_VERBOSE_ON_ERROR, dma_ser3)) { free_irq(21, &ports[1]); free_irq(20, &ports[1]); printk(KERN_CRIT "Can't alloc " "sync serial port 3 " "TX DMA channel"); goto out; } else if (cris_request_dma(5, "synchronous serial 3 dma rec", DMA_VERBOSE_ON_ERROR, dma_ser3)) { cris_free_dma(4, NULL); free_irq(21, &ports[1]); free_irq(20, &ports[1]); printk(KERN_CRIT "Can't alloc " "sync serial port 3 " "RX DMA channel"); goto out; } #endif RESET_DMA(4); WAIT_DMA(4); RESET_DMA(5); WAIT_DMA(5); *R_DMA_CH4_CLR_INTR = IO_STATE(R_DMA_CH4_CLR_INTR, clr_eop, do) | IO_STATE(R_DMA_CH4_CLR_INTR, clr_descr, do); *R_DMA_CH5_CLR_INTR = IO_STATE(R_DMA_CH5_CLR_INTR, clr_eop, do) | IO_STATE(R_DMA_CH5_CLR_INTR, clr_descr, do); *R_IRQ_MASK2_SET = IO_STATE(R_IRQ_MASK2_SET, dma4_eop, set) | IO_STATE(R_IRQ_MASK2_SET, dma5_descr, set); } start_dma_in(port); port->init_irqs = 0; } else { /* !port->use_dma */ #ifdef SYNC_SER_MANUAL if (port == &ports[0]) { if (request_irq(8, manual_interrupt, IRQF_SHARED | IRQF_DISABLED, "synchronous serial manual irq", &ports[0])) { printk(KERN_CRIT "Can't alloc " "sync serial manual irq"); goto out; } } else if (port == &ports[1]) { if (request_irq(8, manual_interrupt, IRQF_SHARED | IRQF_DISABLED, "synchronous serial manual irq", &ports[1])) { printk(KERN_CRIT "Can't alloc " "sync serial manual irq"); goto out; } } port->init_irqs = 0; #else panic("sync_serial: Manual mode not supported.\n"); #endif /* SYNC_SER_MANUAL */ } } /* port->init_irqs */ port->busy++; /* Start port if we use it as input */ mode = IO_EXTRACT(R_SYNC_SERIAL1_CTRL, mode, port->ctrl_data_shadow); if (mode == IO_STATE_VALUE(R_SYNC_SERIAL1_CTRL, mode, master_input) || mode == IO_STATE_VALUE(R_SYNC_SERIAL1_CTRL, mode, slave_input) || mode == IO_STATE_VALUE(R_SYNC_SERIAL1_CTRL, mode, master_bidir) || mode == IO_STATE_VALUE(R_SYNC_SERIAL1_CTRL, mode, slave_bidir)) { SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, clk_halt, running); SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, tr_enable, enable); SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, rec_enable, enable); port->started = 1; *port->ctrl_data = port->ctrl_data_shadow; if (!port->use_dma) *R_IRQ_MASK1_SET = 1 << port->data_avail_bit; DEBUG(printk(KERN_DEBUG "sser%d rec started\n", dev)); } err = 0; out: mutex_unlock(&sync_serial_mutex); return err; } static int sync_serial_release(struct inode *inode, struct file *file) { int dev = MINOR(inode->i_rdev); struct sync_port *port; if (dev < 0 || dev >= NUMBER_OF_PORTS || !ports[dev].enabled) { DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev)); return -ENODEV; } port = &ports[dev]; if (port->busy) port->busy--; if (!port->busy) *R_IRQ_MASK1_CLR = ((1 << port->data_avail_bit) | (1 << port->transmitter_ready_bit)); return 0; } static unsigned int sync_serial_poll(struct file *file, poll_table *wait) { int dev = MINOR(file->f_dentry->d_inode->i_rdev); unsigned int mask = 0; struct sync_port *port; DEBUGPOLL(static unsigned int prev_mask = 0); port = &ports[dev]; poll_wait(file, &port->out_wait_q, wait); poll_wait(file, &port->in_wait_q, wait); /* Some room to write */ if (port->out_count < OUT_BUFFER_SIZE) mask |= POLLOUT | POLLWRNORM; /* At least an inbufchunk of data */ if (sync_data_avail(port) >= port->inbufchunk) mask |= POLLIN | POLLRDNORM; DEBUGPOLL(if (mask != prev_mask) printk(KERN_DEBUG "sync_serial_poll: mask 0x%08X %s %s\n", mask, mask & POLLOUT ? "POLLOUT" : "", mask & POLLIN ? "POLLIN" : ""); prev_mask = mask; ); return mask; } static int sync_serial_ioctl_unlocked(struct file *file, unsigned int cmd, unsigned long arg) { int return_val = 0; unsigned long flags; int dev = MINOR(file->f_dentry->d_inode->i_rdev); struct sync_port *port; if (dev < 0 || dev >= NUMBER_OF_PORTS || !ports[dev].enabled) { DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev)); return -1; } port = &ports[dev]; local_irq_save(flags); /* Disable port while changing config */ if (dev) { if (port->use_dma) { RESET_DMA(4); WAIT_DMA(4); port->tr_running = 0; port->out_count = 0; port->outp = port->out_buffer; *R_DMA_CH4_CLR_INTR = IO_STATE(R_DMA_CH4_CLR_INTR, clr_eop, do) | IO_STATE(R_DMA_CH4_CLR_INTR, clr_descr, do); } SETS(gen_config_ii_shadow, R_GEN_CONFIG_II, sermode3, async); } else { if (port->use_dma) { RESET_DMA(8); WAIT_DMA(8); port->tr_running = 0; port->out_count = 0; port->outp = port->out_buffer; *R_DMA_CH8_CLR_INTR = IO_STATE(R_DMA_CH8_CLR_INTR, clr_eop, do) | IO_STATE(R_DMA_CH8_CLR_INTR, clr_descr, do); } SETS(gen_config_ii_shadow, R_GEN_CONFIG_II, sermode1, async); } *R_GEN_CONFIG_II = gen_config_ii_shadow; local_irq_restore(flags); switch (cmd) { case SSP_SPEED: if (GET_SPEED(arg) == CODEC) { if (dev) SETS(sync_serial_prescale_shadow, R_SYNC_SERIAL_PRESCALE, clk_sel_u3, codec); else SETS(sync_serial_prescale_shadow, R_SYNC_SERIAL_PRESCALE, clk_sel_u1, codec); SETF(sync_serial_prescale_shadow, R_SYNC_SERIAL_PRESCALE, prescaler, GET_FREQ(arg)); SETF(sync_serial_prescale_shadow, R_SYNC_SERIAL_PRESCALE, frame_rate, GET_FRAME_RATE(arg)); SETF(sync_serial_prescale_shadow, R_SYNC_SERIAL_PRESCALE, word_rate, GET_WORD_RATE(arg)); } else { SETF(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, tr_baud, GET_SPEED(arg)); if (dev) SETS(sync_serial_prescale_shadow, R_SYNC_SERIAL_PRESCALE, clk_sel_u3, baudrate); else SETS(sync_serial_prescale_shadow, R_SYNC_SERIAL_PRESCALE, clk_sel_u1, baudrate); } break; case SSP_MODE: if (arg > 5) return -EINVAL; if (arg == MASTER_OUTPUT || arg == SLAVE_OUTPUT) *R_IRQ_MASK1_CLR = 1 << port->data_avail_bit; else if (!port->use_dma) *R_IRQ_MASK1_SET = 1 << port->data_avail_bit; SETF(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, mode, arg); break; case SSP_FRAME_SYNC: if (arg & NORMAL_SYNC) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, f_synctype, normal); else if (arg & EARLY_SYNC) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, f_synctype, early); if (arg & BIT_SYNC) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, f_syncsize, bit); else if (arg & WORD_SYNC) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, f_syncsize, word); else if (arg & EXTENDED_SYNC) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, f_syncsize, extended); if (arg & SYNC_ON) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, f_sync, on); else if (arg & SYNC_OFF) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, f_sync, off); if (arg & WORD_SIZE_8) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, wordsize, size8bit); else if (arg & WORD_SIZE_12) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, wordsize, size12bit); else if (arg & WORD_SIZE_16) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, wordsize, size16bit); else if (arg & WORD_SIZE_24) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, wordsize, size24bit); else if (arg & WORD_SIZE_32) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, wordsize, size32bit); if (arg & BIT_ORDER_MSB) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, bitorder, msb); else if (arg & BIT_ORDER_LSB) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, bitorder, lsb); if (arg & FLOW_CONTROL_ENABLE) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, flow_ctrl, enabled); else if (arg & FLOW_CONTROL_DISABLE) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, flow_ctrl, disabled); if (arg & CLOCK_NOT_GATED) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, clk_mode, normal); else if (arg & CLOCK_GATED) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, clk_mode, gated); break; case SSP_IPOLARITY: /* NOTE!! negedge is considered NORMAL */ if (arg & CLOCK_NORMAL) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, clk_polarity, neg); else if (arg & CLOCK_INVERT) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, clk_polarity, pos); if (arg & FRAME_NORMAL) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, frame_polarity, normal); else if (arg & FRAME_INVERT) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, frame_polarity, inverted); if (arg & STATUS_NORMAL) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, status_polarity, normal); else if (arg & STATUS_INVERT) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, status_polarity, inverted); break; case SSP_OPOLARITY: if (arg & CLOCK_NORMAL) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, clk_driver, normal); else if (arg & CLOCK_INVERT) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, clk_driver, inverted); if (arg & FRAME_NORMAL) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, frame_driver, normal); else if (arg & FRAME_INVERT) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, frame_driver, inverted); if (arg & STATUS_NORMAL) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, status_driver, normal); else if (arg & STATUS_INVERT) SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, status_driver, inverted); break; case SSP_SPI: SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, flow_ctrl, disabled); SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, bitorder, msb); SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, wordsize, size8bit); SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, f_sync, on); SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, f_syncsize, word); SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, f_synctype, normal); if (arg & SPI_SLAVE) { SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, frame_polarity, inverted); SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, clk_polarity, neg); SETF(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, mode, SLAVE_INPUT); } else { SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, frame_driver, inverted); SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, clk_driver, inverted); SETF(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, mode, MASTER_OUTPUT); } break; case SSP_INBUFCHUNK: #if 0 if (arg > port->in_buffer_size/NUM_IN_DESCR) return -EINVAL; port->inbufchunk = arg; /* Make sure in_buffer_size is a multiple of inbufchunk */ port->in_buffer_size = (port->in_buffer_size/port->inbufchunk) * port->inbufchunk; DEBUG(printk(KERN_DEBUG "inbufchunk %i in_buffer_size: %i\n", port->inbufchunk, port->in_buffer_size)); if (port->use_dma) { if (port->port_nbr == 0) { RESET_DMA(9); WAIT_DMA(9); } else { RESET_DMA(5); WAIT_DMA(5); } start_dma_in(port); } #endif break; default: return_val = -1; } /* Make sure we write the config without interruption */ local_irq_save(flags); /* Set config and enable port */ *port->ctrl_data = port->ctrl_data_shadow; nop(); nop(); nop(); nop(); *R_SYNC_SERIAL_PRESCALE = sync_serial_prescale_shadow; nop(); nop(); nop(); nop(); if (dev) SETS(gen_config_ii_shadow, R_GEN_CONFIG_II, sermode3, sync); else SETS(gen_config_ii_shadow, R_GEN_CONFIG_II, sermode1, sync); *R_GEN_CONFIG_II = gen_config_ii_shadow; /* Reset DMA. At readout from serial port the data could be shifted * one byte if not resetting DMA. */ if (port->use_dma) { if (port->port_nbr == 0) { RESET_DMA(9); WAIT_DMA(9); } else { RESET_DMA(5); WAIT_DMA(5); } start_dma_in(port); } local_irq_restore(flags); return return_val; } static long sync_serial_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret; mutex_lock(&sync_serial_mutex); ret = sync_serial_ioctl_unlocked(file, cmd, arg); mutex_unlock(&sync_serial_mutex); return ret; } static ssize_t sync_serial_write(struct file *file, const char *buf, size_t count, loff_t *ppos) { int dev = MINOR(file->f_dentry->d_inode->i_rdev); DECLARE_WAITQUEUE(wait, current); struct sync_port *port; unsigned long flags; unsigned long c, c1; unsigned long free_outp; unsigned long outp; unsigned long out_buffer; if (dev < 0 || dev >= NUMBER_OF_PORTS || !ports[dev].enabled) { DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev)); return -ENODEV; } port = &ports[dev]; DEBUGWRITE(printk(KERN_DEBUG "W d%d c %lu (%d/%d)\n", port->port_nbr, count, port->out_count, OUT_BUFFER_SIZE)); /* Space to end of buffer */ /* * out_buffer <c1>012345<- c ->OUT_BUFFER_SIZE * outp^ +out_count * ^free_outp * out_buffer 45<- c ->0123OUT_BUFFER_SIZE * +out_count outp^ * free_outp * */ /* Read variables that may be updated by interrupts */ local_irq_save(flags); if (count > OUT_BUFFER_SIZE - port->out_count) count = OUT_BUFFER_SIZE - port->out_count; outp = (unsigned long)port->outp; free_outp = outp + port->out_count; local_irq_restore(flags); out_buffer = (unsigned long)port->out_buffer; /* Find out where and how much to write */ if (free_outp >= out_buffer + OUT_BUFFER_SIZE) free_outp -= OUT_BUFFER_SIZE; if (free_outp >= outp) c = out_buffer + OUT_BUFFER_SIZE - free_outp; else c = outp - free_outp; if (c > count) c = count; DEBUGWRITE(printk(KERN_DEBUG "w op %08lX fop %08lX c %lu\n", outp, free_outp, c)); if (copy_from_user((void *)free_outp, buf, c)) return -EFAULT; if (c != count) { buf += c; c1 = count - c; DEBUGWRITE(printk(KERN_DEBUG "w2 fi %lu c %lu c1 %lu\n", free_outp-out_buffer, c, c1)); if (copy_from_user((void *)out_buffer, buf, c1)) return -EFAULT; } local_irq_save(flags); port->out_count += count; local_irq_restore(flags); /* Make sure transmitter/receiver is running */ if (!port->started) { SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, clk_halt, running); SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, tr_enable, enable); SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, rec_enable, enable); port->started = 1; } *port->ctrl_data = port->ctrl_data_shadow; if (file->f_flags & O_NONBLOCK) { local_irq_save(flags); if (!port->tr_running) { if (!port->use_dma) { /* Start sender by writing data */ send_word(port); /* and enable transmitter ready IRQ */ *R_IRQ_MASK1_SET = 1 << port->transmitter_ready_bit; } else start_dma(port, (unsigned char *volatile)port->outp, c); } local_irq_restore(flags); DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu NB\n", port->port_nbr, count)); return count; } /* Sleep until all sent */ add_wait_queue(&port->out_wait_q, &wait); set_current_state(TASK_INTERRUPTIBLE); local_irq_save(flags); if (!port->tr_running) { if (!port->use_dma) { /* Start sender by writing data */ send_word(port); /* and enable transmitter ready IRQ */ *R_IRQ_MASK1_SET = 1 << port->transmitter_ready_bit; } else start_dma(port, port->outp, c); } local_irq_restore(flags); schedule(); set_current_state(TASK_RUNNING); remove_wait_queue(&port->out_wait_q, &wait); if (signal_pending(current)) return -EINTR; DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu\n", port->port_nbr, count)); return count; } static ssize_t sync_serial_read(struct file *file, char *buf, size_t count, loff_t *ppos) { int dev = MINOR(file->f_dentry->d_inode->i_rdev); int avail; struct sync_port *port; unsigned char *start; unsigned char *end; unsigned long flags; if (dev < 0 || dev >= NUMBER_OF_PORTS || !ports[dev].enabled) { DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev)); return -ENODEV; } port = &ports[dev]; DEBUGREAD(printk(KERN_DEBUG "R%d c %d ri %lu wi %lu /%lu\n", dev, count, port->readp - port->flip, port->writep - port->flip, port->in_buffer_size)); if (!port->started) { SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, clk_halt, running); SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, tr_enable, enable); SETS(port->ctrl_data_shadow, R_SYNC_SERIAL1_CTRL, rec_enable, enable); port->started = 1; } *port->ctrl_data = port->ctrl_data_shadow; /* Calculate number of available bytes */ /* Save pointers to avoid that they are modified by interrupt */ local_irq_save(flags); start = (unsigned char *)port->readp; /* cast away volatile */ end = (unsigned char *)port->writep; /* cast away volatile */ local_irq_restore(flags); while (start == end && !port->full) { /* No data */ if (file->f_flags & O_NONBLOCK) return -EAGAIN; interruptible_sleep_on(&port->in_wait_q); if (signal_pending(current)) return -EINTR; local_irq_save(flags); start = (unsigned char *)port->readp; /* cast away volatile */ end = (unsigned char *)port->writep; /* cast away volatile */ local_irq_restore(flags); } /* Lazy read, never return wrapped data. */ if (port->full) avail = port->in_buffer_size; else if (end > start) avail = end - start; else avail = port->flip + port->in_buffer_size - start; count = count > avail ? avail : count; if (copy_to_user(buf, start, count)) return -EFAULT; /* Disable interrupts while updating readp */ local_irq_save(flags); port->readp += count; if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */ port->readp = port->flip; port->full = 0; local_irq_restore(flags); DEBUGREAD(printk(KERN_DEBUG "r %d\n", count)); return count; } static void send_word(struct sync_port *port) { switch (IO_EXTRACT(R_SYNC_SERIAL1_CTRL, wordsize, port->ctrl_data_shadow)) { case IO_STATE_VALUE(R_SYNC_SERIAL1_CTRL, wordsize, size8bit): port->out_count--; *port->data_out = *port->outp++; if (port->outp >= port->out_buffer + OUT_BUFFER_SIZE) port->outp = port->out_buffer; break; case IO_STATE_VALUE(R_SYNC_SERIAL1_CTRL, wordsize, size12bit): { int data = (*port->outp++) << 8; data |= *port->outp++; port->out_count -= 2; *port->data_out = data; if (port->outp >= port->out_buffer + OUT_BUFFER_SIZE) port->outp = port->out_buffer; break; } case IO_STATE_VALUE(R_SYNC_SERIAL1_CTRL, wordsize, size16bit): port->out_count -= 2; *port->data_out = *(unsigned short *)port->outp; port->outp += 2; if (port->outp >= port->out_buffer + OUT_BUFFER_SIZE) port->outp = port->out_buffer; break; case IO_STATE_VALUE(R_SYNC_SERIAL1_CTRL, wordsize, size24bit): port->out_count -= 3; *port->data_out = *(unsigned int *)port->outp; port->outp += 3; if (port->outp >= port->out_buffer + OUT_BUFFER_SIZE) port->outp = port->out_buffer; break; case IO_STATE_VALUE(R_SYNC_SERIAL1_CTRL, wordsize, size32bit): port->out_count -= 4; *port->data_out = *(unsigned int *)port->outp; port->outp += 4; if (port->outp >= port->out_buffer + OUT_BUFFER_SIZE) port->outp = port->out_buffer; break; } } static void start_dma(struct sync_port *port, const char *data, int count) { port->tr_running = 1; port->out_descr.hw_len = 0; port->out_descr.next = 0; port->out_descr.ctrl = d_eol | d_eop; /* No d_wait to avoid glitches */ port->out_descr.sw_len = count; port->out_descr.buf = virt_to_phys(data); port->out_descr.status = 0; *port->output_dma_first = virt_to_phys(&port->out_descr); *port->output_dma_cmd = IO_STATE(R_DMA_CH0_CMD, cmd, start); DEBUGTXINT(printk(KERN_DEBUG "dma %08lX c %d\n", (unsigned long)data, count)); } static void start_dma_in(struct sync_port *port) { int i; unsigned long buf; port->writep = port->flip; if (port->writep > port->flip + port->in_buffer_size) { panic("Offset too large in sync serial driver\n"); return; } buf = virt_to_phys(port->in_buffer); for (i = 0; i < NUM_IN_DESCR; i++) { port->in_descr[i].sw_len = port->inbufchunk; port->in_descr[i].ctrl = d_int; port->in_descr[i].next = virt_to_phys(&port->in_descr[i+1]); port->in_descr[i].buf = buf; port->in_descr[i].hw_len = 0; port->in_descr[i].status = 0; port->in_descr[i].fifo_len = 0; buf += port->inbufchunk; prepare_rx_descriptor(&port->in_descr[i]); } /* Link the last descriptor to the first */ port->in_descr[i-1].next = virt_to_phys(&port->in_descr[0]); port->in_descr[i-1].ctrl |= d_eol; port->next_rx_desc = &port->in_descr[0]; port->prev_rx_desc = &port->in_descr[NUM_IN_DESCR - 1]; *port->input_dma_first = virt_to_phys(port->next_rx_desc); *port->input_dma_cmd = IO_STATE(R_DMA_CH0_CMD, cmd, start); } #ifdef SYNC_SER_DMA static irqreturn_t tr_interrupt(int irq, void *dev_id) { unsigned long ireg = *R_IRQ_MASK2_RD; struct etrax_dma_descr *descr; unsigned int sentl; int handled = 0; int i; for (i = 0; i < NUMBER_OF_PORTS; i++) { struct sync_port *port = &ports[i]; if (!port->enabled || !port->use_dma) continue; /* IRQ active for the port? */ if (!(ireg & (1 << port->output_dma_bit))) continue; handled = 1; /* Clear IRQ */ *port->output_dma_clr_irq = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do) | IO_STATE(R_DMA_CH0_CLR_INTR, clr_descr, do); descr = &port->out_descr; if (!(descr->status & d_stop)) sentl = descr->sw_len; else /* Otherwise find amount of data sent here */ sentl = descr->hw_len; port->out_count -= sentl; port->outp += sentl; if (port->outp >= port->out_buffer + OUT_BUFFER_SIZE) port->outp = port->out_buffer; if (port->out_count) { int c = port->out_buffer + OUT_BUFFER_SIZE - port->outp; if (c > port->out_count) c = port->out_count; DEBUGTXINT(printk(KERN_DEBUG "tx_int DMAWRITE %i %i\n", sentl, c)); start_dma(port, port->outp, c); } else { DEBUGTXINT(printk(KERN_DEBUG "tx_int DMA stop %i\n", sentl)); port->tr_running = 0; } /* wake up the waiting process */ wake_up_interruptible(&port->out_wait_q); } return IRQ_RETVAL(handled); } /* tr_interrupt */ static irqreturn_t rx_interrupt(int irq, void *dev_id) { unsigned long ireg = *R_IRQ_MASK2_RD; int i; int handled = 0; for (i = 0; i < NUMBER_OF_PORTS; i++) { struct sync_port *port = &ports[i]; if (!port->enabled || !port->use_dma) continue; if (!(ireg & (1 << port->input_dma_descr_bit))) continue; /* Descriptor interrupt */ handled = 1; while (*port->input_dma_descr != virt_to_phys(port->next_rx_desc)) { if (port->writep + port->inbufchunk > port->flip + port->in_buffer_size) { int first_size = port->flip + port->in_buffer_size - port->writep; memcpy(port->writep, phys_to_virt(port->next_rx_desc->buf), first_size); memcpy(port->flip, phys_to_virt(port->next_rx_desc->buf + first_size), port->inbufchunk - first_size); port->writep = port->flip + port->inbufchunk - first_size; } else { memcpy(port->writep, phys_to_virt(port->next_rx_desc->buf), port->inbufchunk); port->writep += port->inbufchunk; if (port->writep >= port->flip + port->in_buffer_size) port->writep = port->flip; } if (port->writep == port->readp) port->full = 1; prepare_rx_descriptor(port->next_rx_desc); port->next_rx_desc->ctrl |= d_eol; port->prev_rx_desc->ctrl &= ~d_eol; port->prev_rx_desc = phys_to_virt((unsigned) port->next_rx_desc); port->next_rx_desc = phys_to_virt((unsigned) port->next_rx_desc->next); /* Wake up the waiting process */ wake_up_interruptible(&port->in_wait_q); *port->input_dma_cmd = IO_STATE(R_DMA_CH1_CMD, cmd, restart); /* DMA has reached end of descriptor */ *port->input_dma_clr_irq = IO_STATE(R_DMA_CH0_CLR_INTR, clr_descr, do); } } return IRQ_RETVAL(handled); } /* rx_interrupt */ #endif /* SYNC_SER_DMA */ #ifdef SYNC_SER_MANUAL static irqreturn_t manual_interrupt(int irq, void *dev_id) { int i; int handled = 0; for (i = 0; i < NUMBER_OF_PORTS; i++) { struct sync_port *port = &ports[i]; if (!port->enabled || port->use_dma) continue; /* Data received? */ if (*R_IRQ_MASK1_RD & (1 << port->data_avail_bit)) { handled = 1; /* Read data */ switch (port->ctrl_data_shadow & IO_MASK(R_SYNC_SERIAL1_CTRL, wordsize)) { case IO_STATE(R_SYNC_SERIAL1_CTRL, wordsize, size8bit): *port->writep++ = *(volatile char *)port->data_in; break; case IO_STATE(R_SYNC_SERIAL1_CTRL, wordsize, size12bit): { int data = *(unsigned short *)port->data_in; *port->writep = (data & 0x0ff0) >> 4; *(port->writep + 1) = data & 0x0f; port->writep += 2; break; } case IO_STATE(R_SYNC_SERIAL1_CTRL, wordsize, size16bit): *(unsigned short *)port->writep = *(volatile unsigned short *)port->data_in; port->writep += 2; break; case IO_STATE(R_SYNC_SERIAL1_CTRL, wordsize, size24bit): *(unsigned int *)port->writep = *port->data_in; port->writep += 3; break; case IO_STATE(R_SYNC_SERIAL1_CTRL, wordsize, size32bit): *(unsigned int *)port->writep = *port->data_in; port->writep += 4; break; } /* Wrap? */ if (port->writep >= port->flip + port->in_buffer_size) port->writep = port->flip; if (port->writep == port->readp) { /* Receive buffer overrun, discard oldest */ port->readp++; /* Wrap? */ if (port->readp >= port->flip + port->in_buffer_size) port->readp = port->flip; } if (sync_data_avail(port) >= port->inbufchunk) { /* Wake up application */ wake_up_interruptible(&port->in_wait_q); } } /* Transmitter ready? */ if (*R_IRQ_MASK1_RD & (1 << port->transmitter_ready_bit)) { if (port->out_count > 0) { /* More data to send */ send_word(port); } else { /* Transmission finished */ /* Turn off IRQ */ *R_IRQ_MASK1_CLR = 1 << port->transmitter_ready_bit; /* Wake up application */ wake_up_interruptible(&port->out_wait_q); } } } return IRQ_RETVAL(handled); } #endif module_init(etrax_sync_serial_init);
gpl-2.0
Marvell-Semi/PXA168_kernel
net/dccp/ccids/lib/loss_interval.c
4923
5653
/* * Copyright (c) 2007 The University of Aberdeen, Scotland, UK * Copyright (c) 2005-7 The University of Waikato, Hamilton, New Zealand. * Copyright (c) 2005-7 Ian McDonald <ian.mcdonald@jandi.co.nz> * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <net/sock.h> #include "tfrc.h" static struct kmem_cache *tfrc_lh_slab __read_mostly; /* Loss Interval weights from [RFC 3448, 5.4], scaled by 10 */ static const int tfrc_lh_weights[NINTERVAL] = { 10, 10, 10, 10, 8, 6, 4, 2 }; /* implements LIFO semantics on the array */ static inline u8 LIH_INDEX(const u8 ctr) { return LIH_SIZE - 1 - (ctr % LIH_SIZE); } /* the `counter' index always points at the next entry to be populated */ static inline struct tfrc_loss_interval *tfrc_lh_peek(struct tfrc_loss_hist *lh) { return lh->counter ? lh->ring[LIH_INDEX(lh->counter - 1)] : NULL; } /* given i with 0 <= i <= k, return I_i as per the rfc3448bis notation */ static inline u32 tfrc_lh_get_interval(struct tfrc_loss_hist *lh, const u8 i) { BUG_ON(i >= lh->counter); return lh->ring[LIH_INDEX(lh->counter - i - 1)]->li_length; } /* * On-demand allocation and de-allocation of entries */ static struct tfrc_loss_interval *tfrc_lh_demand_next(struct tfrc_loss_hist *lh) { if (lh->ring[LIH_INDEX(lh->counter)] == NULL) lh->ring[LIH_INDEX(lh->counter)] = kmem_cache_alloc(tfrc_lh_slab, GFP_ATOMIC); return lh->ring[LIH_INDEX(lh->counter)]; } void tfrc_lh_cleanup(struct tfrc_loss_hist *lh) { if (!tfrc_lh_is_initialised(lh)) return; for (lh->counter = 0; lh->counter < LIH_SIZE; lh->counter++) if (lh->ring[LIH_INDEX(lh->counter)] != NULL) { kmem_cache_free(tfrc_lh_slab, lh->ring[LIH_INDEX(lh->counter)]); lh->ring[LIH_INDEX(lh->counter)] = NULL; } } static void tfrc_lh_calc_i_mean(struct tfrc_loss_hist *lh) { u32 i_i, i_tot0 = 0, i_tot1 = 0, w_tot = 0; int i, k = tfrc_lh_length(lh) - 1; /* k is as in rfc3448bis, 5.4 */ if (k <= 0) return; for (i = 0; i <= k; i++) { i_i = tfrc_lh_get_interval(lh, i); if (i < k) { i_tot0 += i_i * tfrc_lh_weights[i]; w_tot += tfrc_lh_weights[i]; } if (i > 0) i_tot1 += i_i * tfrc_lh_weights[i-1]; } lh->i_mean = max(i_tot0, i_tot1) / w_tot; } /** * tfrc_lh_update_i_mean - Update the `open' loss interval I_0 * For recomputing p: returns `true' if p > p_prev <=> 1/p < 1/p_prev */ u8 tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *skb) { struct tfrc_loss_interval *cur = tfrc_lh_peek(lh); u32 old_i_mean = lh->i_mean; s64 len; if (cur == NULL) /* not initialised */ return 0; len = dccp_delta_seqno(cur->li_seqno, DCCP_SKB_CB(skb)->dccpd_seq) + 1; if (len - (s64)cur->li_length <= 0) /* duplicate or reordered */ return 0; if (SUB16(dccp_hdr(skb)->dccph_ccval, cur->li_ccval) > 4) /* * Implements RFC 4342, 10.2: * If a packet S (skb) exists whose seqno comes `after' the one * starting the current loss interval (cur) and if the modulo-16 * distance from C(cur) to C(S) is greater than 4, consider all * subsequent packets as belonging to a new loss interval. This * test is necessary since CCVal may wrap between intervals. */ cur->li_is_closed = 1; if (tfrc_lh_length(lh) == 1) /* due to RFC 3448, 6.3.1 */ return 0; cur->li_length = len; tfrc_lh_calc_i_mean(lh); return lh->i_mean < old_i_mean; } /* Determine if `new_loss' does begin a new loss interval [RFC 4342, 10.2] */ static inline u8 tfrc_lh_is_new_loss(struct tfrc_loss_interval *cur, struct tfrc_rx_hist_entry *new_loss) { return dccp_delta_seqno(cur->li_seqno, new_loss->tfrchrx_seqno) > 0 && (cur->li_is_closed || SUB16(new_loss->tfrchrx_ccval, cur->li_ccval) > 4); } /** * tfrc_lh_interval_add - Insert new record into the Loss Interval database * @lh: Loss Interval database * @rh: Receive history containing a fresh loss event * @calc_first_li: Caller-dependent routine to compute length of first interval * @sk: Used by @calc_first_li in caller-specific way (subtyping) * * Updates I_mean and returns 1 if a new interval has in fact been added to @lh. */ int tfrc_lh_interval_add(struct tfrc_loss_hist *lh, struct tfrc_rx_hist *rh, u32 (*calc_first_li)(struct sock *), struct sock *sk) { struct tfrc_loss_interval *cur = tfrc_lh_peek(lh), *new; if (cur != NULL && !tfrc_lh_is_new_loss(cur, tfrc_rx_hist_loss_prev(rh))) return 0; new = tfrc_lh_demand_next(lh); if (unlikely(new == NULL)) { DCCP_CRIT("Cannot allocate/add loss record."); return 0; } new->li_seqno = tfrc_rx_hist_loss_prev(rh)->tfrchrx_seqno; new->li_ccval = tfrc_rx_hist_loss_prev(rh)->tfrchrx_ccval; new->li_is_closed = 0; if (++lh->counter == 1) lh->i_mean = new->li_length = (*calc_first_li)(sk); else { cur->li_length = dccp_delta_seqno(cur->li_seqno, new->li_seqno); new->li_length = dccp_delta_seqno(new->li_seqno, tfrc_rx_hist_last_rcv(rh)->tfrchrx_seqno) + 1; if (lh->counter > (2*LIH_SIZE)) lh->counter -= LIH_SIZE; tfrc_lh_calc_i_mean(lh); } return 1; } int __init tfrc_li_init(void) { tfrc_lh_slab = kmem_cache_create("tfrc_li_hist", sizeof(struct tfrc_loss_interval), 0, SLAB_HWCACHE_ALIGN, NULL); return tfrc_lh_slab == NULL ? -ENOBUFS : 0; } void tfrc_li_exit(void) { if (tfrc_lh_slab != NULL) { kmem_cache_destroy(tfrc_lh_slab); tfrc_lh_slab = NULL; } }
gpl-2.0
DogukanErgun/android_kernel_htc_shooteru_3.4
drivers/watchdog/softdog.c
7227
5343
/* * SoftDog: A Software Watchdog Device * * (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>, * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Neither Alan Cox nor CymruNet Ltd. admit liability nor provide * warranty for any of this software. This material is provided * "AS-IS" and at no charge. * * (c) Copyright 1995 Alan Cox <alan@lxorguk.ukuu.org.uk> * * Software only watchdog driver. Unlike its big brother the WDT501P * driver this won't always recover a failed machine. * * 03/96: Angelo Haritsis <ah@doc.ic.ac.uk> : * Modularised. * Added soft_margin; use upon insmod to change the timer delay. * NB: uses same minor as wdt (WATCHDOG_MINOR); we could use separate * minors. * * 19980911 Alan Cox * Made SMP safe for 2.3.x * * 20011127 Joel Becker (jlbec@evilplan.org> * Added soft_noboot; Allows testing the softdog trigger without * requiring a recompile. * Added WDIOC_GETTIMEOUT and WDIOC_SETTIMOUT. * * 20020530 Joel Becker <joel.becker@oracle.com> * Added Matt Domsch's nowayout module option. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/timer.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/kernel.h> #define TIMER_MARGIN 60 /* Default is 60 seconds */ static unsigned int soft_margin = TIMER_MARGIN; /* in seconds */ module_param(soft_margin, uint, 0); MODULE_PARM_DESC(soft_margin, "Watchdog soft_margin in seconds. (0 < soft_margin < 65536, default=" __MODULE_STRING(TIMER_MARGIN) ")"); static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); static int soft_noboot = 0; module_param(soft_noboot, int, 0); MODULE_PARM_DESC(soft_noboot, "Softdog action, set to 1 to ignore reboots, 0 to reboot (default=0)"); static int soft_panic; module_param(soft_panic, int, 0); MODULE_PARM_DESC(soft_panic, "Softdog action, set to 1 to panic, 0 to reboot (default=0)"); /* * Our timer */ static void watchdog_fire(unsigned long); static struct timer_list watchdog_ticktock = TIMER_INITIALIZER(watchdog_fire, 0, 0); /* * If the timer expires.. */ static void watchdog_fire(unsigned long data) { if (soft_noboot) pr_crit("Triggered - Reboot ignored\n"); else if (soft_panic) { pr_crit("Initiating panic\n"); panic("Software Watchdog Timer expired"); } else { pr_crit("Initiating system reboot\n"); emergency_restart(); pr_crit("Reboot didn't ?????\n"); } } /* * Softdog operations */ static int softdog_ping(struct watchdog_device *w) { mod_timer(&watchdog_ticktock, jiffies+(w->timeout*HZ)); return 0; } static int softdog_stop(struct watchdog_device *w) { del_timer(&watchdog_ticktock); return 0; } static int softdog_set_timeout(struct watchdog_device *w, unsigned int t) { w->timeout = t; return 0; } /* * Notifier for system down */ static int softdog_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) /* Turn the WDT off */ softdog_stop(NULL); return NOTIFY_DONE; } /* * Kernel Interfaces */ static struct notifier_block softdog_notifier = { .notifier_call = softdog_notify_sys, }; static struct watchdog_info softdog_info = { .identity = "Software Watchdog", .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, }; static struct watchdog_ops softdog_ops = { .owner = THIS_MODULE, .start = softdog_ping, .stop = softdog_stop, .ping = softdog_ping, .set_timeout = softdog_set_timeout, }; static struct watchdog_device softdog_dev = { .info = &softdog_info, .ops = &softdog_ops, .min_timeout = 1, .max_timeout = 0xFFFF }; static int __init watchdog_init(void) { int ret; /* Check that the soft_margin value is within it's range; if not reset to the default */ if (soft_margin < 1 || soft_margin > 65535) { pr_info("soft_margin must be 0 < soft_margin < 65536, using %d\n", TIMER_MARGIN); return -EINVAL; } softdog_dev.timeout = soft_margin; watchdog_set_nowayout(&softdog_dev, nowayout); ret = register_reboot_notifier(&softdog_notifier); if (ret) { pr_err("cannot register reboot notifier (err=%d)\n", ret); return ret; } ret = watchdog_register_device(&softdog_dev); if (ret) { unregister_reboot_notifier(&softdog_notifier); return ret; } pr_info("Software Watchdog Timer: 0.08 initialized. soft_noboot=%d soft_margin=%d sec soft_panic=%d (nowayout=%d)\n", soft_noboot, soft_margin, soft_panic, nowayout); return 0; } static void __exit watchdog_exit(void) { watchdog_unregister_device(&softdog_dev); unregister_reboot_notifier(&softdog_notifier); } module_init(watchdog_init); module_exit(watchdog_exit); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("Software Watchdog Device Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
aj700/nxzimg
arch/powerpc/platforms/83xx/mpc832x_rdb.c
8763
5852
/* * arch/powerpc/platforms/83xx/mpc832x_rdb.c * * Copyright (C) Freescale Semiconductor, Inc. 2007. All rights reserved. * * Description: * MPC832x RDB board specific routines. * This file is based on mpc832x_mds.c and mpc8313_rdb.c * Author: Michael Barkowski <michael.barkowski@freescale.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/spi/spi.h> #include <linux/spi/mmc_spi.h> #include <linux/mmc/host.h> #include <linux/of_platform.h> #include <linux/fsl_devices.h> #include <asm/time.h> #include <asm/ipic.h> #include <asm/udbg.h> #include <asm/qe.h> #include <asm/qe_ic.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "mpc83xx.h" #undef DEBUG #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) #else #define DBG(fmt...) #endif #ifdef CONFIG_QUICC_ENGINE static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk, struct spi_board_info *board_infos, unsigned int num_board_infos, void (*cs_control)(struct spi_device *dev, bool on)) { struct device_node *np; unsigned int i = 0; for_each_compatible_node(np, type, compatible) { int ret; unsigned int j; const void *prop; struct resource res[2]; struct platform_device *pdev; struct fsl_spi_platform_data pdata = { .cs_control = cs_control, }; memset(res, 0, sizeof(res)); pdata.sysclk = sysclk; prop = of_get_property(np, "reg", NULL); if (!prop) goto err; pdata.bus_num = *(u32 *)prop; prop = of_get_property(np, "cell-index", NULL); if (prop) i = *(u32 *)prop; prop = of_get_property(np, "mode", NULL); if (prop && !strcmp(prop, "cpu-qe")) pdata.flags = SPI_QE_CPU_MODE; for (j = 0; j < num_board_infos; j++) { if (board_infos[j].bus_num == pdata.bus_num) pdata.max_chipselect++; } if (!pdata.max_chipselect) continue; ret = of_address_to_resource(np, 0, &res[0]); if (ret) goto err; ret = of_irq_to_resource(np, 0, &res[1]); if (ret == NO_IRQ) goto err; pdev = platform_device_alloc("mpc83xx_spi", i); if (!pdev) goto err; ret = platform_device_add_data(pdev, &pdata, sizeof(pdata)); if (ret) goto unreg; ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res)); if (ret) goto unreg; ret = platform_device_add(pdev); if (ret) goto unreg; goto next; unreg: platform_device_del(pdev); err: pr_err("%s: registration failed\n", np->full_name); next: i++; } return i; } static int __init fsl_spi_init(struct spi_board_info *board_infos, unsigned int num_board_infos, void (*cs_control)(struct spi_device *spi, bool on)) { u32 sysclk = -1; int ret; /* SPI controller is either clocked from QE or SoC clock */ sysclk = get_brgfreq(); if (sysclk == -1) { sysclk = fsl_get_sys_freq(); if (sysclk == -1) return -ENODEV; } ret = of_fsl_spi_probe(NULL, "fsl,spi", sysclk, board_infos, num_board_infos, cs_control); if (!ret) of_fsl_spi_probe("spi", "fsl_spi", sysclk, board_infos, num_board_infos, cs_control); return spi_register_board_info(board_infos, num_board_infos); } static void mpc83xx_spi_cs_control(struct spi_device *spi, bool on) { pr_debug("%s %d %d\n", __func__, spi->chip_select, on); par_io_data_set(3, 13, on); } static struct mmc_spi_platform_data mpc832x_mmc_pdata = { .ocr_mask = MMC_VDD_33_34, }; static struct spi_board_info mpc832x_spi_boardinfo = { .bus_num = 0x4c0, .chip_select = 0, .max_speed_hz = 50000000, .modalias = "mmc_spi", .platform_data = &mpc832x_mmc_pdata, }; static int __init mpc832x_spi_init(void) { par_io_config_pin(3, 0, 3, 0, 1, 0); /* SPI1 MOSI, I/O */ par_io_config_pin(3, 1, 3, 0, 1, 0); /* SPI1 MISO, I/O */ par_io_config_pin(3, 2, 3, 0, 1, 0); /* SPI1 CLK, I/O */ par_io_config_pin(3, 3, 2, 0, 1, 0); /* SPI1 SEL, I */ par_io_config_pin(3, 13, 1, 0, 0, 0); /* !SD_CS, O */ par_io_config_pin(3, 14, 2, 0, 0, 0); /* SD_INSERT, I */ par_io_config_pin(3, 15, 2, 0, 0, 0); /* SD_PROTECT,I */ /* * Don't bother with legacy stuff when device tree contains * mmc-spi-slot node. */ if (of_find_compatible_node(NULL, NULL, "mmc-spi-slot")) return 0; return fsl_spi_init(&mpc832x_spi_boardinfo, 1, mpc83xx_spi_cs_control); } machine_device_initcall(mpc832x_rdb, mpc832x_spi_init); #endif /* CONFIG_QUICC_ENGINE */ /* ************************************************************************ * * Setup the architecture * */ static void __init mpc832x_rdb_setup_arch(void) { #if defined(CONFIG_QUICC_ENGINE) struct device_node *np; #endif if (ppc_md.progress) ppc_md.progress("mpc832x_rdb_setup_arch()", 0); mpc83xx_setup_pci(); #ifdef CONFIG_QUICC_ENGINE qe_reset(); if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) { par_io_init(np); of_node_put(np); for (np = NULL; (np = of_find_node_by_name(np, "ucc")) != NULL;) par_io_of_config(np); } #endif /* CONFIG_QUICC_ENGINE */ } machine_device_initcall(mpc832x_rdb, mpc83xx_declare_of_platform_devices); /* * Called very early, MMU is off, device-tree isn't unflattened */ static int __init mpc832x_rdb_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "MPC832xRDB"); } define_machine(mpc832x_rdb) { .name = "MPC832x RDB", .probe = mpc832x_rdb_probe, .setup_arch = mpc832x_rdb_setup_arch, .init_IRQ = mpc83xx_ipic_and_qe_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, };
gpl-2.0
ysat0/linux-ysato
arch/powerpc/platforms/83xx/mpc832x_rdb.c
8763
5852
/* * arch/powerpc/platforms/83xx/mpc832x_rdb.c * * Copyright (C) Freescale Semiconductor, Inc. 2007. All rights reserved. * * Description: * MPC832x RDB board specific routines. * This file is based on mpc832x_mds.c and mpc8313_rdb.c * Author: Michael Barkowski <michael.barkowski@freescale.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/spi/spi.h> #include <linux/spi/mmc_spi.h> #include <linux/mmc/host.h> #include <linux/of_platform.h> #include <linux/fsl_devices.h> #include <asm/time.h> #include <asm/ipic.h> #include <asm/udbg.h> #include <asm/qe.h> #include <asm/qe_ic.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "mpc83xx.h" #undef DEBUG #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) #else #define DBG(fmt...) #endif #ifdef CONFIG_QUICC_ENGINE static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk, struct spi_board_info *board_infos, unsigned int num_board_infos, void (*cs_control)(struct spi_device *dev, bool on)) { struct device_node *np; unsigned int i = 0; for_each_compatible_node(np, type, compatible) { int ret; unsigned int j; const void *prop; struct resource res[2]; struct platform_device *pdev; struct fsl_spi_platform_data pdata = { .cs_control = cs_control, }; memset(res, 0, sizeof(res)); pdata.sysclk = sysclk; prop = of_get_property(np, "reg", NULL); if (!prop) goto err; pdata.bus_num = *(u32 *)prop; prop = of_get_property(np, "cell-index", NULL); if (prop) i = *(u32 *)prop; prop = of_get_property(np, "mode", NULL); if (prop && !strcmp(prop, "cpu-qe")) pdata.flags = SPI_QE_CPU_MODE; for (j = 0; j < num_board_infos; j++) { if (board_infos[j].bus_num == pdata.bus_num) pdata.max_chipselect++; } if (!pdata.max_chipselect) continue; ret = of_address_to_resource(np, 0, &res[0]); if (ret) goto err; ret = of_irq_to_resource(np, 0, &res[1]); if (ret == NO_IRQ) goto err; pdev = platform_device_alloc("mpc83xx_spi", i); if (!pdev) goto err; ret = platform_device_add_data(pdev, &pdata, sizeof(pdata)); if (ret) goto unreg; ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res)); if (ret) goto unreg; ret = platform_device_add(pdev); if (ret) goto unreg; goto next; unreg: platform_device_del(pdev); err: pr_err("%s: registration failed\n", np->full_name); next: i++; } return i; } static int __init fsl_spi_init(struct spi_board_info *board_infos, unsigned int num_board_infos, void (*cs_control)(struct spi_device *spi, bool on)) { u32 sysclk = -1; int ret; /* SPI controller is either clocked from QE or SoC clock */ sysclk = get_brgfreq(); if (sysclk == -1) { sysclk = fsl_get_sys_freq(); if (sysclk == -1) return -ENODEV; } ret = of_fsl_spi_probe(NULL, "fsl,spi", sysclk, board_infos, num_board_infos, cs_control); if (!ret) of_fsl_spi_probe("spi", "fsl_spi", sysclk, board_infos, num_board_infos, cs_control); return spi_register_board_info(board_infos, num_board_infos); } static void mpc83xx_spi_cs_control(struct spi_device *spi, bool on) { pr_debug("%s %d %d\n", __func__, spi->chip_select, on); par_io_data_set(3, 13, on); } static struct mmc_spi_platform_data mpc832x_mmc_pdata = { .ocr_mask = MMC_VDD_33_34, }; static struct spi_board_info mpc832x_spi_boardinfo = { .bus_num = 0x4c0, .chip_select = 0, .max_speed_hz = 50000000, .modalias = "mmc_spi", .platform_data = &mpc832x_mmc_pdata, }; static int __init mpc832x_spi_init(void) { par_io_config_pin(3, 0, 3, 0, 1, 0); /* SPI1 MOSI, I/O */ par_io_config_pin(3, 1, 3, 0, 1, 0); /* SPI1 MISO, I/O */ par_io_config_pin(3, 2, 3, 0, 1, 0); /* SPI1 CLK, I/O */ par_io_config_pin(3, 3, 2, 0, 1, 0); /* SPI1 SEL, I */ par_io_config_pin(3, 13, 1, 0, 0, 0); /* !SD_CS, O */ par_io_config_pin(3, 14, 2, 0, 0, 0); /* SD_INSERT, I */ par_io_config_pin(3, 15, 2, 0, 0, 0); /* SD_PROTECT,I */ /* * Don't bother with legacy stuff when device tree contains * mmc-spi-slot node. */ if (of_find_compatible_node(NULL, NULL, "mmc-spi-slot")) return 0; return fsl_spi_init(&mpc832x_spi_boardinfo, 1, mpc83xx_spi_cs_control); } machine_device_initcall(mpc832x_rdb, mpc832x_spi_init); #endif /* CONFIG_QUICC_ENGINE */ /* ************************************************************************ * * Setup the architecture * */ static void __init mpc832x_rdb_setup_arch(void) { #if defined(CONFIG_QUICC_ENGINE) struct device_node *np; #endif if (ppc_md.progress) ppc_md.progress("mpc832x_rdb_setup_arch()", 0); mpc83xx_setup_pci(); #ifdef CONFIG_QUICC_ENGINE qe_reset(); if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) { par_io_init(np); of_node_put(np); for (np = NULL; (np = of_find_node_by_name(np, "ucc")) != NULL;) par_io_of_config(np); } #endif /* CONFIG_QUICC_ENGINE */ } machine_device_initcall(mpc832x_rdb, mpc83xx_declare_of_platform_devices); /* * Called very early, MMU is off, device-tree isn't unflattened */ static int __init mpc832x_rdb_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "MPC832xRDB"); } define_machine(mpc832x_rdb) { .name = "MPC832x RDB", .probe = mpc832x_rdb_probe, .setup_arch = mpc832x_rdb_setup_arch, .init_IRQ = mpc83xx_ipic_and_qe_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, };
gpl-2.0
ztemt/A465_5.1_kernel
arch/powerpc/platforms/83xx/mpc832x_rdb.c
8763
5852
/* * arch/powerpc/platforms/83xx/mpc832x_rdb.c * * Copyright (C) Freescale Semiconductor, Inc. 2007. All rights reserved. * * Description: * MPC832x RDB board specific routines. * This file is based on mpc832x_mds.c and mpc8313_rdb.c * Author: Michael Barkowski <michael.barkowski@freescale.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/spi/spi.h> #include <linux/spi/mmc_spi.h> #include <linux/mmc/host.h> #include <linux/of_platform.h> #include <linux/fsl_devices.h> #include <asm/time.h> #include <asm/ipic.h> #include <asm/udbg.h> #include <asm/qe.h> #include <asm/qe_ic.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "mpc83xx.h" #undef DEBUG #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) #else #define DBG(fmt...) #endif #ifdef CONFIG_QUICC_ENGINE static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk, struct spi_board_info *board_infos, unsigned int num_board_infos, void (*cs_control)(struct spi_device *dev, bool on)) { struct device_node *np; unsigned int i = 0; for_each_compatible_node(np, type, compatible) { int ret; unsigned int j; const void *prop; struct resource res[2]; struct platform_device *pdev; struct fsl_spi_platform_data pdata = { .cs_control = cs_control, }; memset(res, 0, sizeof(res)); pdata.sysclk = sysclk; prop = of_get_property(np, "reg", NULL); if (!prop) goto err; pdata.bus_num = *(u32 *)prop; prop = of_get_property(np, "cell-index", NULL); if (prop) i = *(u32 *)prop; prop = of_get_property(np, "mode", NULL); if (prop && !strcmp(prop, "cpu-qe")) pdata.flags = SPI_QE_CPU_MODE; for (j = 0; j < num_board_infos; j++) { if (board_infos[j].bus_num == pdata.bus_num) pdata.max_chipselect++; } if (!pdata.max_chipselect) continue; ret = of_address_to_resource(np, 0, &res[0]); if (ret) goto err; ret = of_irq_to_resource(np, 0, &res[1]); if (ret == NO_IRQ) goto err; pdev = platform_device_alloc("mpc83xx_spi", i); if (!pdev) goto err; ret = platform_device_add_data(pdev, &pdata, sizeof(pdata)); if (ret) goto unreg; ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res)); if (ret) goto unreg; ret = platform_device_add(pdev); if (ret) goto unreg; goto next; unreg: platform_device_del(pdev); err: pr_err("%s: registration failed\n", np->full_name); next: i++; } return i; } static int __init fsl_spi_init(struct spi_board_info *board_infos, unsigned int num_board_infos, void (*cs_control)(struct spi_device *spi, bool on)) { u32 sysclk = -1; int ret; /* SPI controller is either clocked from QE or SoC clock */ sysclk = get_brgfreq(); if (sysclk == -1) { sysclk = fsl_get_sys_freq(); if (sysclk == -1) return -ENODEV; } ret = of_fsl_spi_probe(NULL, "fsl,spi", sysclk, board_infos, num_board_infos, cs_control); if (!ret) of_fsl_spi_probe("spi", "fsl_spi", sysclk, board_infos, num_board_infos, cs_control); return spi_register_board_info(board_infos, num_board_infos); } static void mpc83xx_spi_cs_control(struct spi_device *spi, bool on) { pr_debug("%s %d %d\n", __func__, spi->chip_select, on); par_io_data_set(3, 13, on); } static struct mmc_spi_platform_data mpc832x_mmc_pdata = { .ocr_mask = MMC_VDD_33_34, }; static struct spi_board_info mpc832x_spi_boardinfo = { .bus_num = 0x4c0, .chip_select = 0, .max_speed_hz = 50000000, .modalias = "mmc_spi", .platform_data = &mpc832x_mmc_pdata, }; static int __init mpc832x_spi_init(void) { par_io_config_pin(3, 0, 3, 0, 1, 0); /* SPI1 MOSI, I/O */ par_io_config_pin(3, 1, 3, 0, 1, 0); /* SPI1 MISO, I/O */ par_io_config_pin(3, 2, 3, 0, 1, 0); /* SPI1 CLK, I/O */ par_io_config_pin(3, 3, 2, 0, 1, 0); /* SPI1 SEL, I */ par_io_config_pin(3, 13, 1, 0, 0, 0); /* !SD_CS, O */ par_io_config_pin(3, 14, 2, 0, 0, 0); /* SD_INSERT, I */ par_io_config_pin(3, 15, 2, 0, 0, 0); /* SD_PROTECT,I */ /* * Don't bother with legacy stuff when device tree contains * mmc-spi-slot node. */ if (of_find_compatible_node(NULL, NULL, "mmc-spi-slot")) return 0; return fsl_spi_init(&mpc832x_spi_boardinfo, 1, mpc83xx_spi_cs_control); } machine_device_initcall(mpc832x_rdb, mpc832x_spi_init); #endif /* CONFIG_QUICC_ENGINE */ /* ************************************************************************ * * Setup the architecture * */ static void __init mpc832x_rdb_setup_arch(void) { #if defined(CONFIG_QUICC_ENGINE) struct device_node *np; #endif if (ppc_md.progress) ppc_md.progress("mpc832x_rdb_setup_arch()", 0); mpc83xx_setup_pci(); #ifdef CONFIG_QUICC_ENGINE qe_reset(); if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) { par_io_init(np); of_node_put(np); for (np = NULL; (np = of_find_node_by_name(np, "ucc")) != NULL;) par_io_of_config(np); } #endif /* CONFIG_QUICC_ENGINE */ } machine_device_initcall(mpc832x_rdb, mpc83xx_declare_of_platform_devices); /* * Called very early, MMU is off, device-tree isn't unflattened */ static int __init mpc832x_rdb_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "MPC832xRDB"); } define_machine(mpc832x_rdb) { .name = "MPC832x RDB", .probe = mpc832x_rdb_probe, .setup_arch = mpc832x_rdb_setup_arch, .init_IRQ = mpc83xx_ipic_and_qe_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, };
gpl-2.0
TeamRegular/android_kernel_tcl_msm8916
lib/libcrc32c.c
12859
2133
/* * CRC32C *@Article{castagnoli-crc, * author = { Guy Castagnoli and Stefan Braeuer and Martin Herrman}, * title = {{Optimization of Cyclic Redundancy-Check Codes with 24 * and 32 Parity Bits}}, * journal = IEEE Transactions on Communication, * year = {1993}, * volume = {41}, * number = {6}, * pages = {}, * month = {June}, *} * Used by the iSCSI driver, possibly others, and derived from the * the iscsi-crc.c module of the linux-iscsi driver at * http://linux-iscsi.sourceforge.net. * * Following the example of lib/crc32, this function is intended to be * flexible and useful for all users. Modules that currently have their * own crc32c, but hopefully may be able to use this one are: * net/sctp (please add all your doco to here if you change to * use this one!) * <endoflist> * * Copyright (c) 2004 Cisco Systems, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/hash.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> static struct crypto_shash *tfm; u32 crc32c(u32 crc, const void *address, unsigned int length) { struct { struct shash_desc shash; char ctx[crypto_shash_descsize(tfm)]; } desc; int err; desc.shash.tfm = tfm; desc.shash.flags = 0; *(u32 *)desc.ctx = crc; err = crypto_shash_update(&desc.shash, address, length); BUG_ON(err); return *(u32 *)desc.ctx; } EXPORT_SYMBOL(crc32c); static int __init libcrc32c_mod_init(void) { tfm = crypto_alloc_shash("crc32c", 0, 0); if (IS_ERR(tfm)) return PTR_ERR(tfm); return 0; } static void __exit libcrc32c_mod_fini(void) { crypto_free_shash(tfm); } module_init(libcrc32c_mod_init); module_exit(libcrc32c_mod_fini); MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>"); MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations"); MODULE_LICENSE("GPL");
gpl-2.0
charles1018/Nexus_5
lib/libcrc32c.c
12859
2133
/* * CRC32C *@Article{castagnoli-crc, * author = { Guy Castagnoli and Stefan Braeuer and Martin Herrman}, * title = {{Optimization of Cyclic Redundancy-Check Codes with 24 * and 32 Parity Bits}}, * journal = IEEE Transactions on Communication, * year = {1993}, * volume = {41}, * number = {6}, * pages = {}, * month = {June}, *} * Used by the iSCSI driver, possibly others, and derived from the * the iscsi-crc.c module of the linux-iscsi driver at * http://linux-iscsi.sourceforge.net. * * Following the example of lib/crc32, this function is intended to be * flexible and useful for all users. Modules that currently have their * own crc32c, but hopefully may be able to use this one are: * net/sctp (please add all your doco to here if you change to * use this one!) * <endoflist> * * Copyright (c) 2004 Cisco Systems, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/hash.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> static struct crypto_shash *tfm; u32 crc32c(u32 crc, const void *address, unsigned int length) { struct { struct shash_desc shash; char ctx[crypto_shash_descsize(tfm)]; } desc; int err; desc.shash.tfm = tfm; desc.shash.flags = 0; *(u32 *)desc.ctx = crc; err = crypto_shash_update(&desc.shash, address, length); BUG_ON(err); return *(u32 *)desc.ctx; } EXPORT_SYMBOL(crc32c); static int __init libcrc32c_mod_init(void) { tfm = crypto_alloc_shash("crc32c", 0, 0); if (IS_ERR(tfm)) return PTR_ERR(tfm); return 0; } static void __exit libcrc32c_mod_fini(void) { crypto_free_shash(tfm); } module_init(libcrc32c_mod_init); module_exit(libcrc32c_mod_fini); MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>"); MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations"); MODULE_LICENSE("GPL");
gpl-2.0
exynos4-sdk/kernel
arch/sparc/mm/init_64.c
60
57961
/* * arch/sparc64/mm/init.c * * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/initrd.h> #include <linux/swap.h> #include <linux/pagemap.h> #include <linux/poison.h> #include <linux/fs.h> #include <linux/seq_file.h> #include <linux/kprobes.h> #include <linux/cache.h> #include <linux/sort.h> #include <linux/percpu.h> #include <linux/memblock.h> #include <linux/mmzone.h> #include <linux/gfp.h> #include <asm/head.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/oplib.h> #include <asm/iommu.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> #include <asm/dma.h> #include <asm/starfire.h> #include <asm/tlb.h> #include <asm/spitfire.h> #include <asm/sections.h> #include <asm/tsb.h> #include <asm/hypervisor.h> #include <asm/prom.h> #include <asm/mdesc.h> #include <asm/cpudata.h> #include <asm/irq.h> #include "init_64.h" unsigned long kern_linear_pte_xor[2] __read_mostly; /* A bitmap, one bit for every 256MB of physical memory. If the bit * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else * if set we should use a 256MB page (via kern_linear_pte_xor[1]). */ unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; #ifndef CONFIG_DEBUG_PAGEALLOC /* A special kernel TSB for 4MB and 256MB linear mappings. * Space is allocated for this right after the trap table * in arch/sparc64/kernel/head.S */ extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; #endif #define MAX_BANKS 32 static struct linux_prom64_registers pavail[MAX_BANKS] __devinitdata; static int pavail_ents __devinitdata; static int cmp_p64(const void *a, const void *b) { const struct linux_prom64_registers *x = a, *y = b; if (x->phys_addr > y->phys_addr) return 1; if (x->phys_addr < y->phys_addr) return -1; return 0; } static void __init read_obp_memory(const char *property, struct linux_prom64_registers *regs, int *num_ents) { phandle node = prom_finddevice("/memory"); int prop_size = prom_getproplen(node, property); int ents, ret, i; ents = prop_size / sizeof(struct linux_prom64_registers); if (ents > MAX_BANKS) { prom_printf("The machine has more %s property entries than " "this kernel can support (%d).\n", property, MAX_BANKS); prom_halt(); } ret = prom_getproperty(node, property, (char *) regs, prop_size); if (ret == -1) { prom_printf("Couldn't get %s property from /memory.\n"); prom_halt(); } /* Sanitize what we got from the firmware, by page aligning * everything. */ for (i = 0; i < ents; i++) { unsigned long base, size; base = regs[i].phys_addr; size = regs[i].reg_size; size &= PAGE_MASK; if (base & ~PAGE_MASK) { unsigned long new_base = PAGE_ALIGN(base); size -= new_base - base; if ((long) size < 0L) size = 0UL; base = new_base; } if (size == 0UL) { /* If it is empty, simply get rid of it. * This simplifies the logic of the other * functions that process these arrays. */ memmove(&regs[i], &regs[i + 1], (ents - i - 1) * sizeof(regs[0])); i--; ents--; continue; } regs[i].phys_addr = base; regs[i].reg_size = size; } *num_ents = ents; sort(regs, ents, sizeof(struct linux_prom64_registers), cmp_p64, NULL); } unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES / sizeof(unsigned long)]; EXPORT_SYMBOL(sparc64_valid_addr_bitmap); /* Kernel physical address base and size in bytes. */ unsigned long kern_base __read_mostly; unsigned long kern_size __read_mostly; /* Initial ramdisk setup */ extern unsigned long sparc_ramdisk_image64; extern unsigned int sparc_ramdisk_image; extern unsigned int sparc_ramdisk_size; struct page *mem_map_zero __read_mostly; EXPORT_SYMBOL(mem_map_zero); unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly; unsigned long sparc64_kern_pri_context __read_mostly; unsigned long sparc64_kern_pri_nuc_bits __read_mostly; unsigned long sparc64_kern_sec_context __read_mostly; int num_kernel_image_mappings; #ifdef CONFIG_DEBUG_DCFLUSH atomic_t dcpage_flushes = ATOMIC_INIT(0); #ifdef CONFIG_SMP atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); #endif #endif inline void flush_dcache_page_impl(struct page *page) { BUG_ON(tlb_type == hypervisor); #ifdef CONFIG_DEBUG_DCFLUSH atomic_inc(&dcpage_flushes); #endif #ifdef DCACHE_ALIASING_POSSIBLE __flush_dcache_page(page_address(page), ((tlb_type == spitfire) && page_mapping(page) != NULL)); #else if (page_mapping(page) != NULL && tlb_type == spitfire) __flush_icache_page(__pa(page_address(page))); #endif } #define PG_dcache_dirty PG_arch_1 #define PG_dcache_cpu_shift 32UL #define PG_dcache_cpu_mask \ ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL) #define dcache_dirty_cpu(page) \ (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) static inline void set_dcache_dirty(struct page *page, int this_cpu) { unsigned long mask = this_cpu; unsigned long non_cpu_bits; non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift); mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty); __asm__ __volatile__("1:\n\t" "ldx [%2], %%g7\n\t" "and %%g7, %1, %%g1\n\t" "or %%g1, %0, %%g1\n\t" "casx [%2], %%g7, %%g1\n\t" "cmp %%g7, %%g1\n\t" "bne,pn %%xcc, 1b\n\t" " nop" : /* no outputs */ : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags) : "g1", "g7"); } static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu) { unsigned long mask = (1UL << PG_dcache_dirty); __asm__ __volatile__("! test_and_clear_dcache_dirty\n" "1:\n\t" "ldx [%2], %%g7\n\t" "srlx %%g7, %4, %%g1\n\t" "and %%g1, %3, %%g1\n\t" "cmp %%g1, %0\n\t" "bne,pn %%icc, 2f\n\t" " andn %%g7, %1, %%g1\n\t" "casx [%2], %%g7, %%g1\n\t" "cmp %%g7, %%g1\n\t" "bne,pn %%xcc, 1b\n\t" " nop\n" "2:" : /* no outputs */ : "r" (cpu), "r" (mask), "r" (&page->flags), "i" (PG_dcache_cpu_mask), "i" (PG_dcache_cpu_shift) : "g1", "g7"); } static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte) { unsigned long tsb_addr = (unsigned long) ent; if (tlb_type == cheetah_plus || tlb_type == hypervisor) tsb_addr = __pa(tsb_addr); __tsb_insert(tsb_addr, tag, pte); } unsigned long _PAGE_ALL_SZ_BITS __read_mostly; unsigned long _PAGE_SZBITS __read_mostly; static void flush_dcache(unsigned long pfn) { struct page *page; page = pfn_to_page(pfn); if (page) { unsigned long pg_flags; pg_flags = page->flags; if (pg_flags & (1UL << PG_dcache_dirty)) { int cpu = ((pg_flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask); int this_cpu = get_cpu(); /* This is just to optimize away some function calls * in the SMP case. */ if (cpu == this_cpu) flush_dcache_page_impl(page); else smp_flush_dcache_page_impl(page, cpu); clear_dcache_dirty_cpu(page, cpu); put_cpu(); } } } void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { struct mm_struct *mm; struct tsb *tsb; unsigned long tag, flags; unsigned long tsb_index, tsb_hash_shift; pte_t pte = *ptep; if (tlb_type != hypervisor) { unsigned long pfn = pte_pfn(pte); if (pfn_valid(pfn)) flush_dcache(pfn); } mm = vma->vm_mm; tsb_index = MM_TSB_BASE; tsb_hash_shift = PAGE_SHIFT; spin_lock_irqsave(&mm->context.lock, flags); #ifdef CONFIG_HUGETLB_PAGE if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) { if ((tlb_type == hypervisor && (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || (tlb_type != hypervisor && (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) { tsb_index = MM_TSB_HUGE; tsb_hash_shift = HPAGE_SHIFT; } } #endif tsb = mm->context.tsb_block[tsb_index].tsb; tsb += ((address >> tsb_hash_shift) & (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); tag = (address >> 22UL); tsb_insert(tsb, tag, pte_val(pte)); spin_unlock_irqrestore(&mm->context.lock, flags); } void flush_dcache_page(struct page *page) { struct address_space *mapping; int this_cpu; if (tlb_type == hypervisor) return; /* Do not bother with the expensive D-cache flush if it * is merely the zero page. The 'bigcore' testcase in GDB * causes this case to run millions of times. */ if (page == ZERO_PAGE(0)) return; this_cpu = get_cpu(); mapping = page_mapping(page); if (mapping && !mapping_mapped(mapping)) { int dirty = test_bit(PG_dcache_dirty, &page->flags); if (dirty) { int dirty_cpu = dcache_dirty_cpu(page); if (dirty_cpu == this_cpu) goto out; smp_flush_dcache_page_impl(page, dirty_cpu); } set_dcache_dirty(page, this_cpu); } else { /* We could delay the flush for the !page_mapping * case too. But that case is for exec env/arg * pages and those are %99 certainly going to get * faulted into the tlb (and thus flushed) anyways. */ flush_dcache_page_impl(page); } out: put_cpu(); } EXPORT_SYMBOL(flush_dcache_page); void __kprobes flush_icache_range(unsigned long start, unsigned long end) { /* Cheetah and Hypervisor platform cpus have coherent I-cache. */ if (tlb_type == spitfire) { unsigned long kaddr; /* This code only runs on Spitfire cpus so this is * why we can assume _PAGE_PADDR_4U. */ for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) { unsigned long paddr, mask = _PAGE_PADDR_4U; if (kaddr >= PAGE_OFFSET) paddr = kaddr & mask; else { pgd_t *pgdp = pgd_offset_k(kaddr); pud_t *pudp = pud_offset(pgdp, kaddr); pmd_t *pmdp = pmd_offset(pudp, kaddr); pte_t *ptep = pte_offset_kernel(pmdp, kaddr); paddr = pte_val(*ptep) & mask; } __flush_icache_page(paddr); } } } EXPORT_SYMBOL(flush_icache_range); void mmu_info(struct seq_file *m) { if (tlb_type == cheetah) seq_printf(m, "MMU Type\t: Cheetah\n"); else if (tlb_type == cheetah_plus) seq_printf(m, "MMU Type\t: Cheetah+\n"); else if (tlb_type == spitfire) seq_printf(m, "MMU Type\t: Spitfire\n"); else if (tlb_type == hypervisor) seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n"); else seq_printf(m, "MMU Type\t: ???\n"); #ifdef CONFIG_DEBUG_DCFLUSH seq_printf(m, "DCPageFlushes\t: %d\n", atomic_read(&dcpage_flushes)); #ifdef CONFIG_SMP seq_printf(m, "DCPageFlushesXC\t: %d\n", atomic_read(&dcpage_flushes_xcall)); #endif /* CONFIG_SMP */ #endif /* CONFIG_DEBUG_DCFLUSH */ } struct linux_prom_translation prom_trans[512] __read_mostly; unsigned int prom_trans_ents __read_mostly; unsigned long kern_locked_tte_data; /* The obp translations are saved based on 8k pagesize, since obp can * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> * HI_OBP_ADDRESS range are handled in ktlb.S. */ static inline int in_obp_range(unsigned long vaddr) { return (vaddr >= LOW_OBP_ADDRESS && vaddr < HI_OBP_ADDRESS); } static int cmp_ptrans(const void *a, const void *b) { const struct linux_prom_translation *x = a, *y = b; if (x->virt > y->virt) return 1; if (x->virt < y->virt) return -1; return 0; } /* Read OBP translations property into 'prom_trans[]'. */ static void __init read_obp_translations(void) { int n, node, ents, first, last, i; node = prom_finddevice("/virtual-memory"); n = prom_getproplen(node, "translations"); if (unlikely(n == 0 || n == -1)) { prom_printf("prom_mappings: Couldn't get size.\n"); prom_halt(); } if (unlikely(n > sizeof(prom_trans))) { prom_printf("prom_mappings: Size %Zd is too big.\n", n); prom_halt(); } if ((n = prom_getproperty(node, "translations", (char *)&prom_trans[0], sizeof(prom_trans))) == -1) { prom_printf("prom_mappings: Couldn't get property.\n"); prom_halt(); } n = n / sizeof(struct linux_prom_translation); ents = n; sort(prom_trans, ents, sizeof(struct linux_prom_translation), cmp_ptrans, NULL); /* Now kick out all the non-OBP entries. */ for (i = 0; i < ents; i++) { if (in_obp_range(prom_trans[i].virt)) break; } first = i; for (; i < ents; i++) { if (!in_obp_range(prom_trans[i].virt)) break; } last = i; for (i = 0; i < (last - first); i++) { struct linux_prom_translation *src = &prom_trans[i + first]; struct linux_prom_translation *dest = &prom_trans[i]; *dest = *src; } for (; i < ents; i++) { struct linux_prom_translation *dest = &prom_trans[i]; dest->virt = dest->size = dest->data = 0x0UL; } prom_trans_ents = last - first; if (tlb_type == spitfire) { /* Clear diag TTE bits. */ for (i = 0; i < prom_trans_ents; i++) prom_trans[i].data &= ~0x0003fe0000000000UL; } /* Force execute bit on. */ for (i = 0; i < prom_trans_ents; i++) prom_trans[i].data |= (tlb_type == hypervisor ? _PAGE_EXEC_4V : _PAGE_EXEC_4U); } static void __init hypervisor_tlb_lock(unsigned long vaddr, unsigned long pte, unsigned long mmu) { unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu); if (ret != 0) { prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: " "errors with %lx\n", vaddr, 0, pte, mmu, ret); prom_halt(); } } static unsigned long kern_large_tte(unsigned long paddr); static void __init remap_kernel(void) { unsigned long phys_page, tte_vaddr, tte_data; int i, tlb_ent = sparc64_highest_locked_tlbent(); tte_vaddr = (unsigned long) KERNBASE; phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; tte_data = kern_large_tte(phys_page); kern_locked_tte_data = tte_data; /* Now lock us into the TLBs via Hypervisor or OBP. */ if (tlb_type == hypervisor) { for (i = 0; i < num_kernel_image_mappings; i++) { hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); tte_vaddr += 0x400000; tte_data += 0x400000; } } else { for (i = 0; i < num_kernel_image_mappings; i++) { prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr); prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr); tte_vaddr += 0x400000; tte_data += 0x400000; } sparc64_highest_unlocked_tlb_ent = tlb_ent - i; } if (tlb_type == cheetah_plus) { sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | CTX_CHEETAH_PLUS_NUC); sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC; sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0; } } static void __init inherit_prom_mappings(void) { /* Now fixup OBP's idea about where we really are mapped. */ printk("Remapping the kernel... "); remap_kernel(); printk("done.\n"); } void prom_world(int enter) { if (!enter) set_fs((mm_segment_t) { get_thread_current_ds() }); __asm__ __volatile__("flushw"); } void __flush_dcache_range(unsigned long start, unsigned long end) { unsigned long va; if (tlb_type == spitfire) { int n = 0; for (va = start; va < end; va += 32) { spitfire_put_dcache_tag(va & 0x3fe0, 0x0); if (++n >= 512) break; } } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { start = __pa(start); end = __pa(end); for (va = start; va < end; va += 32) __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (va), "i" (ASI_DCACHE_INVALIDATE)); } } EXPORT_SYMBOL(__flush_dcache_range); /* get_new_mmu_context() uses "cache + 1". */ DEFINE_SPINLOCK(ctx_alloc_lock); unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; #define MAX_CTX_NR (1UL << CTX_NR_BITS) #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); /* Caller does TLB context flushing on local CPU if necessary. * The caller also ensures that CTX_VALID(mm->context) is false. * * We must be careful about boundary cases so that we never * let the user have CTX 0 (nucleus) or we ever use a CTX * version of zero (and thus NO_CONTEXT would not be caught * by version mis-match tests in mmu_context.h). * * Always invoked with interrupts disabled. */ void get_new_mmu_context(struct mm_struct *mm) { unsigned long ctx, new_ctx; unsigned long orig_pgsz_bits; unsigned long flags; int new_version; spin_lock_irqsave(&ctx_alloc_lock, flags); orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); ctx = (tlb_context_cache + 1) & CTX_NR_MASK; new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); new_version = 0; if (new_ctx >= (1 << CTX_NR_BITS)) { new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); if (new_ctx >= ctx) { int i; new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION; if (new_ctx == 1) new_ctx = CTX_FIRST_VERSION; /* Don't call memset, for 16 entries that's just * plain silly... */ mmu_context_bmap[0] = 3; mmu_context_bmap[1] = 0; mmu_context_bmap[2] = 0; mmu_context_bmap[3] = 0; for (i = 4; i < CTX_BMAP_SLOTS; i += 4) { mmu_context_bmap[i + 0] = 0; mmu_context_bmap[i + 1] = 0; mmu_context_bmap[i + 2] = 0; mmu_context_bmap[i + 3] = 0; } new_version = 1; goto out; } } mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); out: tlb_context_cache = new_ctx; mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; spin_unlock_irqrestore(&ctx_alloc_lock, flags); if (unlikely(new_version)) smp_new_mmu_context_version(); } static int numa_enabled = 1; static int numa_debug; static int __init early_numa(char *p) { if (!p) return 0; if (strstr(p, "off")) numa_enabled = 0; if (strstr(p, "debug")) numa_debug = 1; return 0; } early_param("numa", early_numa); #define numadbg(f, a...) \ do { if (numa_debug) \ printk(KERN_INFO f, ## a); \ } while (0) static void __init find_ramdisk(unsigned long phys_base) { #ifdef CONFIG_BLK_DEV_INITRD if (sparc_ramdisk_image || sparc_ramdisk_image64) { unsigned long ramdisk_image; /* Older versions of the bootloader only supported a * 32-bit physical address for the ramdisk image * location, stored at sparc_ramdisk_image. Newer * SILO versions set sparc_ramdisk_image to zero and * provide a full 64-bit physical address at * sparc_ramdisk_image64. */ ramdisk_image = sparc_ramdisk_image; if (!ramdisk_image) ramdisk_image = sparc_ramdisk_image64; /* Another bootloader quirk. The bootloader normalizes * the physical address to KERNBASE, so we have to * factor that back out and add in the lowest valid * physical page address to get the true physical address. */ ramdisk_image -= KERNBASE; ramdisk_image += phys_base; numadbg("Found ramdisk at physical address 0x%lx, size %u\n", ramdisk_image, sparc_ramdisk_size); initrd_start = ramdisk_image; initrd_end = ramdisk_image + sparc_ramdisk_size; memblock_reserve(initrd_start, sparc_ramdisk_size); initrd_start += PAGE_OFFSET; initrd_end += PAGE_OFFSET; } #endif } struct node_mem_mask { unsigned long mask; unsigned long val; }; static struct node_mem_mask node_masks[MAX_NUMNODES]; static int num_node_masks; int numa_cpu_lookup_table[NR_CPUS]; cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; #ifdef CONFIG_NEED_MULTIPLE_NODES struct mdesc_mblock { u64 base; u64 size; u64 offset; /* RA-to-PA */ }; static struct mdesc_mblock *mblocks; static int num_mblocks; static unsigned long ra_to_pa(unsigned long addr) { int i; for (i = 0; i < num_mblocks; i++) { struct mdesc_mblock *m = &mblocks[i]; if (addr >= m->base && addr < (m->base + m->size)) { addr += m->offset; break; } } return addr; } static int find_node(unsigned long addr) { int i; addr = ra_to_pa(addr); for (i = 0; i < num_node_masks; i++) { struct node_mem_mask *p = &node_masks[i]; if ((addr & p->mask) == p->val) return i; } return -1; } static u64 memblock_nid_range(u64 start, u64 end, int *nid) { *nid = find_node(start); start += PAGE_SIZE; while (start < end) { int n = find_node(start); if (n != *nid) break; start += PAGE_SIZE; } if (start > end) start = end; return start; } #endif /* This must be invoked after performing all of the necessary * memblock_set_node() calls for 'nid'. We need to be able to get * correct data from get_pfn_range_for_nid(). */ static void __init allocate_node_data(int nid) { struct pglist_data *p; unsigned long start_pfn, end_pfn; #ifdef CONFIG_NEED_MULTIPLE_NODES unsigned long paddr; paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid); if (!paddr) { prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); prom_halt(); } NODE_DATA(nid) = __va(paddr); memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); NODE_DATA(nid)->node_id = nid; #endif p = NODE_DATA(nid); get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); p->node_start_pfn = start_pfn; p->node_spanned_pages = end_pfn - start_pfn; } static void init_node_masks_nonnuma(void) { int i; numadbg("Initializing tables for non-numa.\n"); node_masks[0].mask = node_masks[0].val = 0; num_node_masks = 1; for (i = 0; i < NR_CPUS; i++) numa_cpu_lookup_table[i] = 0; cpumask_setall(&numa_cpumask_lookup_table[0]); } #ifdef CONFIG_NEED_MULTIPLE_NODES struct pglist_data *node_data[MAX_NUMNODES]; EXPORT_SYMBOL(numa_cpu_lookup_table); EXPORT_SYMBOL(numa_cpumask_lookup_table); EXPORT_SYMBOL(node_data); struct mdesc_mlgroup { u64 node; u64 latency; u64 match; u64 mask; }; static struct mdesc_mlgroup *mlgroups; static int num_mlgroups; static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio, u32 cfg_handle) { u64 arc; mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) { u64 target = mdesc_arc_target(md, arc); const u64 *val; val = mdesc_get_property(md, target, "cfg-handle", NULL); if (val && *val == cfg_handle) return 0; } return -ENODEV; } static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp, u32 cfg_handle) { u64 arc, candidate, best_latency = ~(u64)0; candidate = MDESC_NODE_NULL; mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { u64 target = mdesc_arc_target(md, arc); const char *name = mdesc_node_name(md, target); const u64 *val; if (strcmp(name, "pio-latency-group")) continue; val = mdesc_get_property(md, target, "latency", NULL); if (!val) continue; if (*val < best_latency) { candidate = target; best_latency = *val; } } if (candidate == MDESC_NODE_NULL) return -ENODEV; return scan_pio_for_cfg_handle(md, candidate, cfg_handle); } int of_node_to_nid(struct device_node *dp) { const struct linux_prom64_registers *regs; struct mdesc_handle *md; u32 cfg_handle; int count, nid; u64 grp; /* This is the right thing to do on currently supported * SUN4U NUMA platforms as well, as the PCI controller does * not sit behind any particular memory controller. */ if (!mlgroups) return -1; regs = of_get_property(dp, "reg", NULL); if (!regs) return -1; cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff; md = mdesc_grab(); count = 0; nid = -1; mdesc_for_each_node_by_name(md, grp, "group") { if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) { nid = count; break; } count++; } mdesc_release(md); return nid; } static void __init add_node_ranges(void) { struct memblock_region *reg; for_each_memblock(memory, reg) { unsigned long size = reg->size; unsigned long start, end; start = reg->base; end = start + size; while (start < end) { unsigned long this_end; int nid; this_end = memblock_nid_range(start, end, &nid); numadbg("Setting memblock NUMA node nid[%d] " "start[%lx] end[%lx]\n", nid, start, this_end); memblock_set_node(start, this_end - start, nid); start = this_end; } } } static int __init grab_mlgroups(struct mdesc_handle *md) { unsigned long paddr; int count = 0; u64 node; mdesc_for_each_node_by_name(md, node, "memory-latency-group") count++; if (!count) return -ENOENT; paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup), SMP_CACHE_BYTES); if (!paddr) return -ENOMEM; mlgroups = __va(paddr); num_mlgroups = count; count = 0; mdesc_for_each_node_by_name(md, node, "memory-latency-group") { struct mdesc_mlgroup *m = &mlgroups[count++]; const u64 *val; m->node = node; val = mdesc_get_property(md, node, "latency", NULL); m->latency = *val; val = mdesc_get_property(md, node, "address-match", NULL); m->match = *val; val = mdesc_get_property(md, node, "address-mask", NULL); m->mask = *val; numadbg("MLGROUP[%d]: node[%llx] latency[%llx] " "match[%llx] mask[%llx]\n", count - 1, m->node, m->latency, m->match, m->mask); } return 0; } static int __init grab_mblocks(struct mdesc_handle *md) { unsigned long paddr; int count = 0; u64 node; mdesc_for_each_node_by_name(md, node, "mblock") count++; if (!count) return -ENOENT; paddr = memblock_alloc(count * sizeof(struct mdesc_mblock), SMP_CACHE_BYTES); if (!paddr) return -ENOMEM; mblocks = __va(paddr); num_mblocks = count; count = 0; mdesc_for_each_node_by_name(md, node, "mblock") { struct mdesc_mblock *m = &mblocks[count++]; const u64 *val; val = mdesc_get_property(md, node, "base", NULL); m->base = *val; val = mdesc_get_property(md, node, "size", NULL); m->size = *val; val = mdesc_get_property(md, node, "address-congruence-offset", NULL); m->offset = *val; numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n", count - 1, m->base, m->size, m->offset); } return 0; } static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md, u64 grp, cpumask_t *mask) { u64 arc; cpumask_clear(mask); mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) { u64 target = mdesc_arc_target(md, arc); const char *name = mdesc_node_name(md, target); const u64 *id; if (strcmp(name, "cpu")) continue; id = mdesc_get_property(md, target, "id", NULL); if (*id < nr_cpu_ids) cpumask_set_cpu(*id, mask); } } static struct mdesc_mlgroup * __init find_mlgroup(u64 node) { int i; for (i = 0; i < num_mlgroups; i++) { struct mdesc_mlgroup *m = &mlgroups[i]; if (m->node == node) return m; } return NULL; } static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp, int index) { struct mdesc_mlgroup *candidate = NULL; u64 arc, best_latency = ~(u64)0; struct node_mem_mask *n; mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { u64 target = mdesc_arc_target(md, arc); struct mdesc_mlgroup *m = find_mlgroup(target); if (!m) continue; if (m->latency < best_latency) { candidate = m; best_latency = m->latency; } } if (!candidate) return -ENOENT; if (num_node_masks != index) { printk(KERN_ERR "Inconsistent NUMA state, " "index[%d] != num_node_masks[%d]\n", index, num_node_masks); return -EINVAL; } n = &node_masks[num_node_masks++]; n->mask = candidate->mask; n->val = candidate->match; numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n", index, n->mask, n->val, candidate->latency); return 0; } static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp, int index) { cpumask_t mask; int cpu; numa_parse_mdesc_group_cpus(md, grp, &mask); for_each_cpu(cpu, &mask) numa_cpu_lookup_table[cpu] = index; cpumask_copy(&numa_cpumask_lookup_table[index], &mask); if (numa_debug) { printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index); for_each_cpu(cpu, &mask) printk("%d ", cpu); printk("]\n"); } return numa_attach_mlgroup(md, grp, index); } static int __init numa_parse_mdesc(void) { struct mdesc_handle *md = mdesc_grab(); int i, err, count; u64 node; node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); if (node == MDESC_NODE_NULL) { mdesc_release(md); return -ENOENT; } err = grab_mblocks(md); if (err < 0) goto out; err = grab_mlgroups(md); if (err < 0) goto out; count = 0; mdesc_for_each_node_by_name(md, node, "group") { err = numa_parse_mdesc_group(md, node, count); if (err < 0) break; count++; } add_node_ranges(); for (i = 0; i < num_node_masks; i++) { allocate_node_data(i); node_set_online(i); } err = 0; out: mdesc_release(md); return err; } static int __init numa_parse_jbus(void) { unsigned long cpu, index; /* NUMA node id is encoded in bits 36 and higher, and there is * a 1-to-1 mapping from CPU ID to NUMA node ID. */ index = 0; for_each_present_cpu(cpu) { numa_cpu_lookup_table[cpu] = index; cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu)); node_masks[index].mask = ~((1UL << 36UL) - 1UL); node_masks[index].val = cpu << 36UL; index++; } num_node_masks = index; add_node_ranges(); for (index = 0; index < num_node_masks; index++) { allocate_node_data(index); node_set_online(index); } return 0; } static int __init numa_parse_sun4u(void) { if (tlb_type == cheetah || tlb_type == cheetah_plus) { unsigned long ver; __asm__ ("rdpr %%ver, %0" : "=r" (ver)); if ((ver >> 32UL) == __JALAPENO_ID || (ver >> 32UL) == __SERRANO_ID) return numa_parse_jbus(); } return -1; } static int __init bootmem_init_numa(void) { int err = -1; numadbg("bootmem_init_numa()\n"); if (numa_enabled) { if (tlb_type == hypervisor) err = numa_parse_mdesc(); else err = numa_parse_sun4u(); } return err; } #else static int bootmem_init_numa(void) { return -1; } #endif static void __init bootmem_init_nonnuma(void) { unsigned long top_of_ram = memblock_end_of_DRAM(); unsigned long total_ram = memblock_phys_mem_size(); numadbg("bootmem_init_nonnuma()\n"); printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram); printk(KERN_INFO "Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20); init_node_masks_nonnuma(); memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); allocate_node_data(0); node_set_online(0); } static unsigned long __init bootmem_init(unsigned long phys_base) { unsigned long end_pfn; end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; max_pfn = max_low_pfn = end_pfn; min_low_pfn = (phys_base >> PAGE_SHIFT); if (bootmem_init_numa() < 0) bootmem_init_nonnuma(); /* Dump memblock with node info. */ memblock_dump_all(); /* XXX cpu notifier XXX */ sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); return end_pfn; } static struct linux_prom64_registers pall[MAX_BANKS] __initdata; static int pall_ents __initdata; #ifdef CONFIG_DEBUG_PAGEALLOC static unsigned long __ref kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot) { unsigned long vstart = PAGE_OFFSET + pstart; unsigned long vend = PAGE_OFFSET + pend; unsigned long alloc_bytes = 0UL; if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) { prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n", vstart, vend); prom_halt(); } while (vstart < vend) { unsigned long this_end, paddr = __pa(vstart); pgd_t *pgd = pgd_offset_k(vstart); pud_t *pud; pmd_t *pmd; pte_t *pte; pud = pud_offset(pgd, vstart); if (pud_none(*pud)) { pmd_t *new; new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); alloc_bytes += PAGE_SIZE; pud_populate(&init_mm, pud, new); } pmd = pmd_offset(pud, vstart); if (!pmd_present(*pmd)) { pte_t *new; new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); alloc_bytes += PAGE_SIZE; pmd_populate_kernel(&init_mm, pmd, new); } pte = pte_offset_kernel(pmd, vstart); this_end = (vstart + PMD_SIZE) & PMD_MASK; if (this_end > vend) this_end = vend; while (vstart < this_end) { pte_val(*pte) = (paddr | pgprot_val(prot)); vstart += PAGE_SIZE; paddr += PAGE_SIZE; pte++; } } return alloc_bytes; } extern unsigned int kvmap_linear_patch[1]; #endif /* CONFIG_DEBUG_PAGEALLOC */ static void __init mark_kpte_bitmap(unsigned long start, unsigned long end) { const unsigned long shift_256MB = 28; const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL); const unsigned long size_256MB = (1UL << shift_256MB); while (start < end) { long remains; remains = end - start; if (remains < size_256MB) break; if (start & mask_256MB) { start = (start + size_256MB) & ~mask_256MB; continue; } while (remains >= size_256MB) { unsigned long index = start >> shift_256MB; __set_bit(index, kpte_linear_bitmap); start += size_256MB; remains -= size_256MB; } } } static void __init init_kpte_bitmap(void) { unsigned long i; for (i = 0; i < pall_ents; i++) { unsigned long phys_start, phys_end; phys_start = pall[i].phys_addr; phys_end = phys_start + pall[i].reg_size; mark_kpte_bitmap(phys_start, phys_end); } } static void __init kernel_physical_mapping_init(void) { #ifdef CONFIG_DEBUG_PAGEALLOC unsigned long i, mem_alloced = 0UL; for (i = 0; i < pall_ents; i++) { unsigned long phys_start, phys_end; phys_start = pall[i].phys_addr; phys_end = phys_start + pall[i].reg_size; mem_alloced += kernel_map_range(phys_start, phys_end, PAGE_KERNEL); } printk("Allocated %ld bytes for kernel page tables.\n", mem_alloced); kvmap_linear_patch[0] = 0x01000000; /* nop */ flushi(&kvmap_linear_patch[0]); __flush_tlb_all(); #endif } #ifdef CONFIG_DEBUG_PAGEALLOC void kernel_map_pages(struct page *page, int numpages, int enable) { unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); kernel_map_range(phys_start, phys_end, (enable ? PAGE_KERNEL : __pgprot(0))); flush_tsb_kernel_range(PAGE_OFFSET + phys_start, PAGE_OFFSET + phys_end); /* we should perform an IPI and flush all tlbs, * but that can deadlock->flush only current cpu. */ __flush_tlb_kernel_range(PAGE_OFFSET + phys_start, PAGE_OFFSET + phys_end); } #endif unsigned long __init find_ecache_flush_span(unsigned long size) { int i; for (i = 0; i < pavail_ents; i++) { if (pavail[i].reg_size >= size) return pavail[i].phys_addr; } return ~0UL; } static void __init tsb_phys_patch(void) { struct tsb_ldquad_phys_patch_entry *pquad; struct tsb_phys_patch_entry *p; pquad = &__tsb_ldquad_phys_patch; while (pquad < &__tsb_ldquad_phys_patch_end) { unsigned long addr = pquad->addr; if (tlb_type == hypervisor) *(unsigned int *) addr = pquad->sun4v_insn; else *(unsigned int *) addr = pquad->sun4u_insn; wmb(); __asm__ __volatile__("flush %0" : /* no outputs */ : "r" (addr)); pquad++; } p = &__tsb_phys_patch; while (p < &__tsb_phys_patch_end) { unsigned long addr = p->addr; *(unsigned int *) addr = p->insn; wmb(); __asm__ __volatile__("flush %0" : /* no outputs */ : "r" (addr)); p++; } } /* Don't mark as init, we give this to the Hypervisor. */ #ifndef CONFIG_DEBUG_PAGEALLOC #define NUM_KTSB_DESCR 2 #else #define NUM_KTSB_DESCR 1 #endif static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR]; extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa) { pa >>= KTSB_PHYS_SHIFT; while (start < end) { unsigned int *ia = (unsigned int *)(unsigned long)*start; ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10); __asm__ __volatile__("flush %0" : : "r" (ia)); ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff); __asm__ __volatile__("flush %0" : : "r" (ia + 1)); start++; } } static void ktsb_phys_patch(void) { extern unsigned int __swapper_tsb_phys_patch; extern unsigned int __swapper_tsb_phys_patch_end; unsigned long ktsb_pa; ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); patch_one_ktsb_phys(&__swapper_tsb_phys_patch, &__swapper_tsb_phys_patch_end, ktsb_pa); #ifndef CONFIG_DEBUG_PAGEALLOC { extern unsigned int __swapper_4m_tsb_phys_patch; extern unsigned int __swapper_4m_tsb_phys_patch_end; ktsb_pa = (kern_base + ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch, &__swapper_4m_tsb_phys_patch_end, ktsb_pa); } #endif } static void __init sun4v_ktsb_init(void) { unsigned long ktsb_pa; /* First KTSB for PAGE_SIZE mappings. */ ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); switch (PAGE_SIZE) { case 8 * 1024: default: ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K; ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K; break; case 64 * 1024: ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K; ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K; break; case 512 * 1024: ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K; ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K; break; case 4 * 1024 * 1024: ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB; ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB; break; } ktsb_descr[0].assoc = 1; ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES; ktsb_descr[0].ctx_idx = 0; ktsb_descr[0].tsb_base = ktsb_pa; ktsb_descr[0].resv = 0; #ifndef CONFIG_DEBUG_PAGEALLOC /* Second KTSB for 4MB/256MB mappings. */ ktsb_pa = (kern_base + ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB; ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB | HV_PGSZ_MASK_256MB); ktsb_descr[1].assoc = 1; ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES; ktsb_descr[1].ctx_idx = 0; ktsb_descr[1].tsb_base = ktsb_pa; ktsb_descr[1].resv = 0; #endif } void __cpuinit sun4v_ktsb_register(void) { unsigned long pa, ret; pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa); if (ret != 0) { prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: " "errors with %lx\n", pa, ret); prom_halt(); } } /* paging_init() sets up the page tables */ static unsigned long last_valid_pfn; pgd_t swapper_pg_dir[2048]; static void sun4u_pgprot_init(void); static void sun4v_pgprot_init(void); void __init paging_init(void) { unsigned long end_pfn, shift, phys_base; unsigned long real_end, i; int node; /* These build time checkes make sure that the dcache_dirty_cpu() * page->flags usage will work. * * When a page gets marked as dcache-dirty, we store the * cpu number starting at bit 32 in the page->flags. Also, * functions like clear_dcache_dirty_cpu use the cpu mask * in 13-bit signed-immediate instruction fields. */ /* * Page flags must not reach into upper 32 bits that are used * for the cpu number */ BUILD_BUG_ON(NR_PAGEFLAGS > 32); /* * The bit fields placed in the high range must not reach below * the 32 bit boundary. Otherwise we cannot place the cpu field * at the 32 bit boundary. */ BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH + ilog2(roundup_pow_of_two(NR_CPUS)) > 32); BUILD_BUG_ON(NR_CPUS > 4096); kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; /* Invalidate both kernel TSBs. */ memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); #ifndef CONFIG_DEBUG_PAGEALLOC memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); #endif if (tlb_type == hypervisor) sun4v_pgprot_init(); else sun4u_pgprot_init(); if (tlb_type == cheetah_plus || tlb_type == hypervisor) { tsb_phys_patch(); ktsb_phys_patch(); } if (tlb_type == hypervisor) { sun4v_patch_tlb_handlers(); sun4v_ktsb_init(); } /* Find available physical memory... * * Read it twice in order to work around a bug in openfirmware. * The call to grab this table itself can cause openfirmware to * allocate memory, which in turn can take away some space from * the list of available memory. Reading it twice makes sure * we really do get the final value. */ read_obp_translations(); read_obp_memory("reg", &pall[0], &pall_ents); read_obp_memory("available", &pavail[0], &pavail_ents); read_obp_memory("available", &pavail[0], &pavail_ents); phys_base = 0xffffffffffffffffUL; for (i = 0; i < pavail_ents; i++) { phys_base = min(phys_base, pavail[i].phys_addr); memblock_add(pavail[i].phys_addr, pavail[i].reg_size); } memblock_reserve(kern_base, kern_size); find_ramdisk(phys_base); memblock_enforce_memory_limit(cmdline_memory_size); memblock_allow_resize(); memblock_dump_all(); set_bit(0, mmu_context_bmap); shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); real_end = (unsigned long)_end; num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22); printk("Kernel: Using %d locked TLB entries for main kernel image.\n", num_kernel_image_mappings); /* Set kernel pgd to upper alias so physical page computations * work. */ init_mm.pgd += ((shift) / (sizeof(pgd_t))); memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir)); /* Now can init the kernel/bad page tables. */ pud_set(pud_offset(&swapper_pg_dir[0], 0), swapper_low_pmd_dir + (shift / sizeof(pgd_t))); inherit_prom_mappings(); init_kpte_bitmap(); /* Ok, we can use our TLB miss and window trap handlers safely. */ setup_tba(); __flush_tlb_all(); if (tlb_type == hypervisor) sun4v_ktsb_register(); prom_build_devicetree(); of_populate_present_mask(); #ifndef CONFIG_SMP of_fill_in_cpu_data(); #endif if (tlb_type == hypervisor) { sun4v_mdesc_init(); mdesc_populate_present_mask(cpu_all_mask); #ifndef CONFIG_SMP mdesc_fill_in_cpu_data(cpu_all_mask); #endif } /* Setup bootmem... */ last_valid_pfn = end_pfn = bootmem_init(phys_base); /* Once the OF device tree and MDESC have been setup, we know * the list of possible cpus. Therefore we can allocate the * IRQ stacks. */ for_each_possible_cpu(i) { node = cpu_to_node(i); softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node), THREAD_SIZE, THREAD_SIZE, 0); hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node), THREAD_SIZE, THREAD_SIZE, 0); } kernel_physical_mapping_init(); { unsigned long max_zone_pfns[MAX_NR_ZONES]; memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); max_zone_pfns[ZONE_NORMAL] = end_pfn; free_area_init_nodes(max_zone_pfns); } printk("Booting Linux...\n"); } int __devinit page_in_phys_avail(unsigned long paddr) { int i; paddr &= PAGE_MASK; for (i = 0; i < pavail_ents; i++) { unsigned long start, end; start = pavail[i].phys_addr; end = start + pavail[i].reg_size; if (paddr >= start && paddr < end) return 1; } if (paddr >= kern_base && paddr < (kern_base + kern_size)) return 1; #ifdef CONFIG_BLK_DEV_INITRD if (paddr >= __pa(initrd_start) && paddr < __pa(PAGE_ALIGN(initrd_end))) return 1; #endif return 0; } static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata; static int pavail_rescan_ents __initdata; /* Certain OBP calls, such as fetching "available" properties, can * claim physical memory. So, along with initializing the valid * address bitmap, what we do here is refetch the physical available * memory list again, and make sure it provides at least as much * memory as 'pavail' does. */ static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap) { int i; read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents); for (i = 0; i < pavail_ents; i++) { unsigned long old_start, old_end; old_start = pavail[i].phys_addr; old_end = old_start + pavail[i].reg_size; while (old_start < old_end) { int n; for (n = 0; n < pavail_rescan_ents; n++) { unsigned long new_start, new_end; new_start = pavail_rescan[n].phys_addr; new_end = new_start + pavail_rescan[n].reg_size; if (new_start <= old_start && new_end >= (old_start + PAGE_SIZE)) { set_bit(old_start >> 22, bitmap); goto do_next_page; } } prom_printf("mem_init: Lost memory in pavail\n"); prom_printf("mem_init: OLD start[%lx] size[%lx]\n", pavail[i].phys_addr, pavail[i].reg_size); prom_printf("mem_init: NEW start[%lx] size[%lx]\n", pavail_rescan[i].phys_addr, pavail_rescan[i].reg_size); prom_printf("mem_init: Cannot continue, aborting.\n"); prom_halt(); do_next_page: old_start += PAGE_SIZE; } } } static void __init patch_tlb_miss_handler_bitmap(void) { extern unsigned int valid_addr_bitmap_insn[]; extern unsigned int valid_addr_bitmap_patch[]; valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1]; mb(); valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0]; flushi(&valid_addr_bitmap_insn[0]); } void __init mem_init(void) { unsigned long codepages, datapages, initpages; unsigned long addr, last; addr = PAGE_OFFSET + kern_base; last = PAGE_ALIGN(kern_size) + addr; while (addr < last) { set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap); addr += PAGE_SIZE; } setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap); patch_tlb_miss_handler_bitmap(); high_memory = __va(last_valid_pfn << PAGE_SHIFT); #ifdef CONFIG_NEED_MULTIPLE_NODES { int i; for_each_online_node(i) { if (NODE_DATA(i)->node_spanned_pages != 0) { totalram_pages += free_all_bootmem_node(NODE_DATA(i)); } } totalram_pages += free_low_memory_core_early(MAX_NUMNODES); } #else totalram_pages = free_all_bootmem(); #endif /* We subtract one to account for the mem_map_zero page * allocated below. */ totalram_pages -= 1; num_physpages = totalram_pages; /* * Set up the zero page, mark it reserved, so that page count * is not manipulated when freeing the page from user ptes. */ mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0); if (mem_map_zero == NULL) { prom_printf("paging_init: Cannot alloc zero page.\n"); prom_halt(); } SetPageReserved(mem_map_zero); codepages = (((unsigned long) _etext) - ((unsigned long) _start)); codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; datapages = (((unsigned long) _edata) - ((unsigned long) _etext)); datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT; initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin)); initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT; printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n", nr_free_pages() << (PAGE_SHIFT-10), codepages << (PAGE_SHIFT-10), datapages << (PAGE_SHIFT-10), initpages << (PAGE_SHIFT-10), PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT)); if (tlb_type == cheetah || tlb_type == cheetah_plus) cheetah_ecache_flush_init(); } void free_initmem(void) { unsigned long addr, initend; int do_free = 1; /* If the physical memory maps were trimmed by kernel command * line options, don't even try freeing this initmem stuff up. * The kernel image could have been in the trimmed out region * and if so the freeing below will free invalid page structs. */ if (cmdline_memory_size) do_free = 0; /* * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes. */ addr = PAGE_ALIGN((unsigned long)(__init_begin)); initend = (unsigned long)(__init_end) & PAGE_MASK; for (; addr < initend; addr += PAGE_SIZE) { unsigned long page; struct page *p; page = (addr + ((unsigned long) __va(kern_base)) - ((unsigned long) KERNBASE)); memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); if (do_free) { p = virt_to_page(page); ClearPageReserved(p); init_page_count(p); __free_page(p); num_physpages++; totalram_pages++; } } } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { if (start < end) printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); for (; start < end; start += PAGE_SIZE) { struct page *p = virt_to_page(start); ClearPageReserved(p); init_page_count(p); __free_page(p); num_physpages++; totalram_pages++; } } #endif #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) pgprot_t PAGE_KERNEL __read_mostly; EXPORT_SYMBOL(PAGE_KERNEL); pgprot_t PAGE_KERNEL_LOCKED __read_mostly; pgprot_t PAGE_COPY __read_mostly; pgprot_t PAGE_SHARED __read_mostly; EXPORT_SYMBOL(PAGE_SHARED); unsigned long pg_iobits __read_mostly; unsigned long _PAGE_IE __read_mostly; EXPORT_SYMBOL(_PAGE_IE); unsigned long _PAGE_E __read_mostly; EXPORT_SYMBOL(_PAGE_E); unsigned long _PAGE_CACHE __read_mostly; EXPORT_SYMBOL(_PAGE_CACHE); #ifdef CONFIG_SPARSEMEM_VMEMMAP unsigned long vmemmap_table[VMEMMAP_SIZE]; int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) { unsigned long vstart = (unsigned long) start; unsigned long vend = (unsigned long) (start + nr); unsigned long phys_start = (vstart - VMEMMAP_BASE); unsigned long phys_end = (vend - VMEMMAP_BASE); unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK; unsigned long end = VMEMMAP_ALIGN(phys_end); unsigned long pte_base; pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | _PAGE_W_4U); if (tlb_type == hypervisor) pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | _PAGE_W_4V); for (; addr < end; addr += VMEMMAP_CHUNK) { unsigned long *vmem_pp = vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT); void *block; if (!(*vmem_pp & _PAGE_VALID)) { block = vmemmap_alloc_block(1UL << 22, node); if (!block) return -ENOMEM; *vmem_pp = pte_base | __pa(block); printk(KERN_INFO "[%p-%p] page_structs=%lu " "node=%d entry=%lu/%lu\n", start, block, nr, node, addr >> VMEMMAP_CHUNK_SHIFT, VMEMMAP_SIZE); } } return 0; } #endif /* CONFIG_SPARSEMEM_VMEMMAP */ static void prot_init_common(unsigned long page_none, unsigned long page_shared, unsigned long page_copy, unsigned long page_readonly, unsigned long page_exec_bit) { PAGE_COPY = __pgprot(page_copy); PAGE_SHARED = __pgprot(page_shared); protection_map[0x0] = __pgprot(page_none); protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit); protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit); protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit); protection_map[0x4] = __pgprot(page_readonly); protection_map[0x5] = __pgprot(page_readonly); protection_map[0x6] = __pgprot(page_copy); protection_map[0x7] = __pgprot(page_copy); protection_map[0x8] = __pgprot(page_none); protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit); protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit); protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit); protection_map[0xc] = __pgprot(page_readonly); protection_map[0xd] = __pgprot(page_readonly); protection_map[0xe] = __pgprot(page_shared); protection_map[0xf] = __pgprot(page_shared); } static void __init sun4u_pgprot_init(void) { unsigned long page_none, page_shared, page_copy, page_readonly; unsigned long page_exec_bit; PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | _PAGE_CACHE_4U | _PAGE_P_4U | __ACCESS_BITS_4U | __DIRTY_BITS_4U | _PAGE_EXEC_4U); PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | _PAGE_CACHE_4U | _PAGE_P_4U | __ACCESS_BITS_4U | __DIRTY_BITS_4U | _PAGE_EXEC_4U | _PAGE_L_4U); _PAGE_IE = _PAGE_IE_4U; _PAGE_E = _PAGE_E_4U; _PAGE_CACHE = _PAGE_CACHE_4U; pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U | __ACCESS_BITS_4U | _PAGE_E_4U); #ifdef CONFIG_DEBUG_PAGEALLOC kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4U) ^ 0xfffff80000000000UL; #else kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ 0xfffff80000000000UL; #endif kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | _PAGE_W_4U); /* XXX Should use 256MB on Panther. XXX */ kern_linear_pte_xor[1] = kern_linear_pte_xor[0]; _PAGE_SZBITS = _PAGE_SZBITS_4U; _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U | _PAGE_SZ64K_4U | _PAGE_SZ8K_4U | _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U); page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U; page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U); page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | __ACCESS_BITS_4U | _PAGE_EXEC_4U); page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | __ACCESS_BITS_4U | _PAGE_EXEC_4U); page_exec_bit = _PAGE_EXEC_4U; prot_init_common(page_none, page_shared, page_copy, page_readonly, page_exec_bit); } static void __init sun4v_pgprot_init(void) { unsigned long page_none, page_shared, page_copy, page_readonly; unsigned long page_exec_bit; PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID | _PAGE_CACHE_4V | _PAGE_P_4V | __ACCESS_BITS_4V | __DIRTY_BITS_4V | _PAGE_EXEC_4V); PAGE_KERNEL_LOCKED = PAGE_KERNEL; _PAGE_IE = _PAGE_IE_4V; _PAGE_E = _PAGE_E_4V; _PAGE_CACHE = _PAGE_CACHE_4V; #ifdef CONFIG_DEBUG_PAGEALLOC kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^ 0xfffff80000000000UL; #else kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ 0xfffff80000000000UL; #endif kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | _PAGE_W_4V); #ifdef CONFIG_DEBUG_PAGEALLOC kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^ 0xfffff80000000000UL; #else kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ 0xfffff80000000000UL; #endif kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | _PAGE_W_4V); pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V | __ACCESS_BITS_4V | _PAGE_E_4V); _PAGE_SZBITS = _PAGE_SZBITS_4V; _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V | _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V | _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | _PAGE_SZ64K_4V | _PAGE_SZ8K_4V); page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V; page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V); page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | __ACCESS_BITS_4V | _PAGE_EXEC_4V); page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | __ACCESS_BITS_4V | _PAGE_EXEC_4V); page_exec_bit = _PAGE_EXEC_4V; prot_init_common(page_none, page_shared, page_copy, page_readonly, page_exec_bit); } unsigned long pte_sz_bits(unsigned long sz) { if (tlb_type == hypervisor) { switch (sz) { case 8 * 1024: default: return _PAGE_SZ8K_4V; case 64 * 1024: return _PAGE_SZ64K_4V; case 512 * 1024: return _PAGE_SZ512K_4V; case 4 * 1024 * 1024: return _PAGE_SZ4MB_4V; } } else { switch (sz) { case 8 * 1024: default: return _PAGE_SZ8K_4U; case 64 * 1024: return _PAGE_SZ64K_4U; case 512 * 1024: return _PAGE_SZ512K_4U; case 4 * 1024 * 1024: return _PAGE_SZ4MB_4U; } } } pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size) { pte_t pte; pte_val(pte) = page | pgprot_val(pgprot_noncached(prot)); pte_val(pte) |= (((unsigned long)space) << 32); pte_val(pte) |= pte_sz_bits(page_size); return pte; } static unsigned long kern_large_tte(unsigned long paddr) { unsigned long val; val = (_PAGE_VALID | _PAGE_SZ4MB_4U | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U); if (tlb_type == hypervisor) val = (_PAGE_VALID | _PAGE_SZ4MB_4V | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | _PAGE_EXEC_4V | _PAGE_W_4V); return val | paddr; } /* If not locked, zap it. */ void __flush_tlb_all(void) { unsigned long pstate; int i; __asm__ __volatile__("flushw\n\t" "rdpr %%pstate, %0\n\t" "wrpr %0, %1, %%pstate" : "=r" (pstate) : "i" (PSTATE_IE)); if (tlb_type == hypervisor) { sun4v_mmu_demap_all(); } else if (tlb_type == spitfire) { for (i = 0; i < 64; i++) { /* Spitfire Errata #32 workaround */ /* NOTE: Always runs on spitfire, so no * cheetah+ page size encodings. */ __asm__ __volatile__("stxa %0, [%1] %2\n\t" "flush %%g6" : /* No outputs */ : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) { __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); spitfire_put_dtlb_data(i, 0x0UL); } /* Spitfire Errata #32 workaround */ /* NOTE: Always runs on spitfire, so no * cheetah+ page size encodings. */ __asm__ __volatile__("stxa %0, [%1] %2\n\t" "flush %%g6" : /* No outputs */ : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) { __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); spitfire_put_itlb_data(i, 0x0UL); } } } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { cheetah_flush_dtlb_all(); cheetah_flush_itlb_all(); } __asm__ __volatile__("wrpr %0, 0, %%pstate" : : "r" (pstate)); }
gpl-2.0
brion/operations-debs-ffmpeg2theorawmf
ffmpeg/libavformat/bintext.c
60
11698
/* * Binary text demuxer * eXtended BINary text (XBIN) demuxer * Artworx Data Format demuxer * iCEDraw File demuxer * Copyright (c) 2010 Peter Ross <pross@xvid.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Binary text demuxer * eXtended BINary text (XBIN) demuxer * Artworx Data Format demuxer * iCEDraw File demuxer */ #include "libavutil/intreadwrite.h" #include "libavutil/opt.h" #include "libavutil/parseutils.h" #include "avformat.h" #include "internal.h" #include "sauce.h" #include "libavcodec/bintext.h" typedef struct { const AVClass *class; int chars_per_frame; /**< characters to send decoder per frame; set by private options as characters per second, and then converted to characters per frame at runtime */ int width, height; /**< video size (WxH pixels) (private option) */ AVRational framerate; /**< frames per second (private option) */ uint64_t fsize; /**< file size less metadata buffer */ } BinDemuxContext; static AVStream * init_stream(AVFormatContext *s) { BinDemuxContext *bin = s->priv_data; AVStream *st = avformat_new_stream(s, NULL); if (!st) return NULL; st->codec->codec_tag = 0; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; if (!bin->width) { st->codec->width = (80<<3); st->codec->height = (25<<4); } avpriv_set_pts_info(st, 60, bin->framerate.den, bin->framerate.num); /* simulate tty display speed */ bin->chars_per_frame = av_clip(av_q2d(st->time_base) * bin->chars_per_frame, 1, INT_MAX); return st; } #if CONFIG_BINTEXT_DEMUXER | CONFIG_ADF_DEMUXER | CONFIG_IDF_DEMUXER /** * Given filesize and width, calculate height (assume font_height of 16) */ static void calculate_height(AVCodecContext *avctx, uint64_t fsize) { avctx->height = (fsize / ((avctx->width>>3)*2)) << 4; } #endif #if CONFIG_BINTEXT_DEMUXER static const uint8_t next_magic[]={ 0x1A, 0x1B, '[', '0', ';', '3', '0', ';', '4', '0', 'm', 'N', 'E', 'X', 'T', 0x00 }; static int next_tag_read(AVFormatContext *avctx, uint64_t *fsize) { AVIOContext *pb = avctx->pb; char buf[36]; int len; uint64_t start_pos = avio_size(pb) - 256; avio_seek(pb, start_pos, SEEK_SET); if (avio_read(pb, buf, sizeof(next_magic)) != sizeof(next_magic)) return -1; if (memcmp(buf, next_magic, sizeof(next_magic))) return -1; if (avio_r8(pb) != 0x01) return -1; *fsize -= 256; #define GET_EFI2_META(name,size) \ len = avio_r8(pb); \ if (len < 1 || len > size) \ return -1; \ if (avio_read(pb, buf, size) == size && *buf) { \ buf[len] = 0; \ av_dict_set(&avctx->metadata, name, buf, 0); \ } GET_EFI2_META("filename", 12) GET_EFI2_META("author", 20) GET_EFI2_META("publisher", 20) GET_EFI2_META("title", 35) return 0; } static void predict_width(AVCodecContext *avctx, uint64_t fsize, int got_width) { /** attempt to guess width */ if (!got_width) avctx->width = fsize > 4000 ? (160<<3) : (80<<3); } static int bintext_read_header(AVFormatContext *s) { BinDemuxContext *bin = s->priv_data; AVIOContext *pb = s->pb; AVStream *st = init_stream(s); if (!st) return AVERROR(ENOMEM); st->codec->codec_id = AV_CODEC_ID_BINTEXT; if (ff_alloc_extradata(st->codec, 2)) return AVERROR(ENOMEM); st->codec->extradata[0] = 16; st->codec->extradata[1] = 0; if (pb->seekable) { int got_width = 0; bin->fsize = avio_size(pb); if (ff_sauce_read(s, &bin->fsize, &got_width, 0) < 0) next_tag_read(s, &bin->fsize); if (!bin->width) { predict_width(st->codec, bin->fsize, got_width); calculate_height(st->codec, bin->fsize); } avio_seek(pb, 0, SEEK_SET); } return 0; } #endif /* CONFIG_BINTEXT_DEMUXER */ #if CONFIG_XBIN_DEMUXER static int xbin_probe(AVProbeData *p) { const uint8_t *d = p->buf; if (AV_RL32(d) == MKTAG('X','B','I','N') && d[4] == 0x1A && AV_RL16(d+5) > 0 && AV_RL16(d+5) <= 160 && d[9] > 0 && d[9] <= 32) return AVPROBE_SCORE_MAX; return 0; } static int xbin_read_header(AVFormatContext *s) { BinDemuxContext *bin = s->priv_data; AVIOContext *pb = s->pb; char fontheight, flags; AVStream *st = init_stream(s); if (!st) return AVERROR(ENOMEM); avio_skip(pb, 5); st->codec->width = avio_rl16(pb)<<3; st->codec->height = avio_rl16(pb); fontheight = avio_r8(pb); st->codec->height *= fontheight; flags = avio_r8(pb); st->codec->extradata_size = 2; if ((flags & BINTEXT_PALETTE)) st->codec->extradata_size += 48; if ((flags & BINTEXT_FONT)) st->codec->extradata_size += fontheight * (flags & 0x10 ? 512 : 256); st->codec->codec_id = flags & 4 ? AV_CODEC_ID_XBIN : AV_CODEC_ID_BINTEXT; if (ff_alloc_extradata(st->codec, st->codec->extradata_size)) return AVERROR(ENOMEM); st->codec->extradata[0] = fontheight; st->codec->extradata[1] = flags; if (avio_read(pb, st->codec->extradata + 2, st->codec->extradata_size - 2) < 0) return AVERROR(EIO); if (pb->seekable) { bin->fsize = avio_size(pb) - 9 - st->codec->extradata_size; ff_sauce_read(s, &bin->fsize, NULL, 0); avio_seek(pb, 9 + st->codec->extradata_size, SEEK_SET); } return 0; } #endif /* CONFIG_XBIN_DEMUXER */ #if CONFIG_ADF_DEMUXER static int adf_read_header(AVFormatContext *s) { BinDemuxContext *bin = s->priv_data; AVIOContext *pb = s->pb; AVStream *st; if (avio_r8(pb) != 1) return AVERROR_INVALIDDATA; st = init_stream(s); if (!st) return AVERROR(ENOMEM); st->codec->codec_id = AV_CODEC_ID_BINTEXT; if (ff_alloc_extradata(st->codec, 2 + 48 + 4096)) return AVERROR(ENOMEM); st->codec->extradata[0] = 16; st->codec->extradata[1] = BINTEXT_PALETTE|BINTEXT_FONT; if (avio_read(pb, st->codec->extradata + 2, 24) < 0) return AVERROR(EIO); avio_skip(pb, 144); if (avio_read(pb, st->codec->extradata + 2 + 24, 24) < 0) return AVERROR(EIO); if (avio_read(pb, st->codec->extradata + 2 + 48, 4096) < 0) return AVERROR(EIO); if (pb->seekable) { int got_width = 0; bin->fsize = avio_size(pb) - 1 - 192 - 4096; st->codec->width = 80<<3; ff_sauce_read(s, &bin->fsize, &got_width, 0); if (!bin->width) calculate_height(st->codec, bin->fsize); avio_seek(pb, 1 + 192 + 4096, SEEK_SET); } return 0; } #endif /* CONFIG_ADF_DEMUXER */ #if CONFIG_IDF_DEMUXER static const uint8_t idf_magic[] = { 0x04, 0x31, 0x2e, 0x34, 0x00, 0x00, 0x00, 0x00, 0x4f, 0x00, 0x15, 0x00 }; static int idf_probe(AVProbeData *p) { if (p->buf_size < sizeof(idf_magic)) return 0; if (!memcmp(p->buf, idf_magic, sizeof(idf_magic))) return AVPROBE_SCORE_MAX; return 0; } static int idf_read_header(AVFormatContext *s) { BinDemuxContext *bin = s->priv_data; AVIOContext *pb = s->pb; AVStream *st; int got_width = 0; if (!pb->seekable) return AVERROR(EIO); st = init_stream(s); if (!st) return AVERROR(ENOMEM); st->codec->codec_id = AV_CODEC_ID_IDF; if (ff_alloc_extradata(st->codec, 2 + 48 + 4096)) return AVERROR(ENOMEM); st->codec->extradata[0] = 16; st->codec->extradata[1] = BINTEXT_PALETTE|BINTEXT_FONT; avio_seek(pb, avio_size(pb) - 4096 - 48, SEEK_SET); if (avio_read(pb, st->codec->extradata + 2 + 48, 4096) < 0) return AVERROR(EIO); if (avio_read(pb, st->codec->extradata + 2, 48) < 0) return AVERROR(EIO); bin->fsize = avio_size(pb) - 12 - 4096 - 48; ff_sauce_read(s, &bin->fsize, &got_width, 0); if (!bin->width) calculate_height(st->codec, bin->fsize); avio_seek(pb, 12, SEEK_SET); return 0; } #endif /* CONFIG_IDF_DEMUXER */ static int read_packet(AVFormatContext *s, AVPacket *pkt) { BinDemuxContext *bin = s->priv_data; if (bin->fsize > 0) { if (av_get_packet(s->pb, pkt, bin->fsize) < 0) return AVERROR(EIO); bin->fsize = -1; /* done */ } else if (!bin->fsize) { if (avio_feof(s->pb)) return AVERROR(EIO); if (av_get_packet(s->pb, pkt, bin->chars_per_frame) < 0) return AVERROR(EIO); } else { return AVERROR(EIO); } pkt->flags |= AV_PKT_FLAG_KEY; return 0; } #define OFFSET(x) offsetof(BinDemuxContext, x) static const AVOption options[] = { { "linespeed", "set simulated line speed (bytes per second)", OFFSET(chars_per_frame), AV_OPT_TYPE_INT, {.i64 = 6000}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM}, { "video_size", "set video size, such as 640x480 or hd720.", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM }, { "framerate", "set framerate (frames per second)", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, AV_OPT_FLAG_DECODING_PARAM }, { NULL }, }; #define CLASS(name) \ (const AVClass[1]){{ \ .class_name = name, \ .item_name = av_default_item_name, \ .option = options, \ .version = LIBAVUTIL_VERSION_INT, \ }} #if CONFIG_BINTEXT_DEMUXER AVInputFormat ff_bintext_demuxer = { .name = "bin", .long_name = NULL_IF_CONFIG_SMALL("Binary text"), .priv_data_size = sizeof(BinDemuxContext), .read_header = bintext_read_header, .read_packet = read_packet, .extensions = "bin", .priv_class = CLASS("Binary text demuxer"), }; #endif #if CONFIG_XBIN_DEMUXER AVInputFormat ff_xbin_demuxer = { .name = "xbin", .long_name = NULL_IF_CONFIG_SMALL("eXtended BINary text (XBIN)"), .priv_data_size = sizeof(BinDemuxContext), .read_probe = xbin_probe, .read_header = xbin_read_header, .read_packet = read_packet, .priv_class = CLASS("eXtended BINary text (XBIN) demuxer"), }; #endif #if CONFIG_ADF_DEMUXER AVInputFormat ff_adf_demuxer = { .name = "adf", .long_name = NULL_IF_CONFIG_SMALL("Artworx Data Format"), .priv_data_size = sizeof(BinDemuxContext), .read_header = adf_read_header, .read_packet = read_packet, .extensions = "adf", .priv_class = CLASS("Artworx Data Format demuxer"), }; #endif #if CONFIG_IDF_DEMUXER AVInputFormat ff_idf_demuxer = { .name = "idf", .long_name = NULL_IF_CONFIG_SMALL("iCE Draw File"), .priv_data_size = sizeof(BinDemuxContext), .read_probe = idf_probe, .read_header = idf_read_header, .read_packet = read_packet, .extensions = "idf", .priv_class = CLASS("iCE Draw File demuxer"), }; #endif
gpl-2.0
percy-g2/Novathor_xperia_u8500
6.1.1.B.1.54/external/webkit/Source/WebCore/editing/CreateLinkCommand.cpp
60
2325
/* * Copyright (C) 2006 Apple Computer, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "config.h" #include "CreateLinkCommand.h" #include "htmlediting.h" #include "Text.h" #include "HTMLAnchorElement.h" namespace WebCore { CreateLinkCommand::CreateLinkCommand(Document* document, const String& url) : CompositeEditCommand(document) { m_url = url; } void CreateLinkCommand::doApply() { if (endingSelection().isNone()) return; RefPtr<HTMLAnchorElement> anchorElement = HTMLAnchorElement::create(document()); anchorElement->setHref(m_url); if (endingSelection().isRange()) applyStyledElement(anchorElement.get()); else { insertNodeAt(anchorElement.get(), endingSelection().start()); RefPtr<Text> textNode = Text::create(document(), m_url); appendNode(textNode.get(), anchorElement.get()); setEndingSelection(VisibleSelection(positionInParentBeforeNode(anchorElement.get()), positionInParentAfterNode(anchorElement.get()), DOWNSTREAM)); } } }
gpl-2.0
kerneldevs/RM-CAF-PECAN
fs/namespace.c
316
58541
/* * linux/fs/namespace.c * * (C) Copyright Al Viro 2000, 2001 * Released under GPL v2. * * Based on code from fs/super.c, copyright Linus Torvalds and others. * Heavily rewritten. */ #include <linux/syscalls.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/smp_lock.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/acct.h> #include <linux/capability.h> #include <linux/cpumask.h> #include <linux/module.h> #include <linux/sysfs.h> #include <linux/seq_file.h> #include <linux/mnt_namespace.h> #include <linux/namei.h> #include <linux/nsproxy.h> #include <linux/security.h> #include <linux/mount.h> #include <linux/ramfs.h> #include <linux/log2.h> #include <linux/idr.h> #include <linux/fs_struct.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include "pnode.h" #include "internal.h" #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head)) #define HASH_SIZE (1UL << HASH_SHIFT) /* spinlock for vfsmount related operations, inplace of dcache_lock */ __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock); static int event; static DEFINE_IDA(mnt_id_ida); static DEFINE_IDA(mnt_group_ida); static int mnt_id_start = 0; static int mnt_group_start = 1; static struct list_head *mount_hashtable __read_mostly; static struct kmem_cache *mnt_cache __read_mostly; static struct rw_semaphore namespace_sem; /* /sys/fs */ struct kobject *fs_kobj; EXPORT_SYMBOL_GPL(fs_kobj); static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry) { unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); tmp += ((unsigned long)dentry / L1_CACHE_BYTES); tmp = tmp + (tmp >> HASH_SHIFT); return tmp & (HASH_SIZE - 1); } #define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16) /* allocation is serialized by namespace_sem */ static int mnt_alloc_id(struct vfsmount *mnt) { int res; retry: ida_pre_get(&mnt_id_ida, GFP_KERNEL); spin_lock(&vfsmount_lock); res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id); if (!res) mnt_id_start = mnt->mnt_id + 1; spin_unlock(&vfsmount_lock); if (res == -EAGAIN) goto retry; return res; } static void mnt_free_id(struct vfsmount *mnt) { int id = mnt->mnt_id; spin_lock(&vfsmount_lock); ida_remove(&mnt_id_ida, id); if (mnt_id_start > id) mnt_id_start = id; spin_unlock(&vfsmount_lock); } /* * Allocate a new peer group ID * * mnt_group_ida is protected by namespace_sem */ static int mnt_alloc_group_id(struct vfsmount *mnt) { int res; if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL)) return -ENOMEM; res = ida_get_new_above(&mnt_group_ida, mnt_group_start, &mnt->mnt_group_id); if (!res) mnt_group_start = mnt->mnt_group_id + 1; return res; } /* * Release a peer group ID */ void mnt_release_group_id(struct vfsmount *mnt) { int id = mnt->mnt_group_id; ida_remove(&mnt_group_ida, id); if (mnt_group_start > id) mnt_group_start = id; mnt->mnt_group_id = 0; } struct vfsmount *alloc_vfsmnt(const char *name) { struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL); if (mnt) { int err; err = mnt_alloc_id(mnt); if (err) goto out_free_cache; if (name) { mnt->mnt_devname = kstrdup(name, GFP_KERNEL); if (!mnt->mnt_devname) goto out_free_id; } atomic_set(&mnt->mnt_count, 1); INIT_LIST_HEAD(&mnt->mnt_hash); INIT_LIST_HEAD(&mnt->mnt_child); INIT_LIST_HEAD(&mnt->mnt_mounts); INIT_LIST_HEAD(&mnt->mnt_list); INIT_LIST_HEAD(&mnt->mnt_expire); INIT_LIST_HEAD(&mnt->mnt_share); INIT_LIST_HEAD(&mnt->mnt_slave_list); INIT_LIST_HEAD(&mnt->mnt_slave); #ifdef CONFIG_SMP mnt->mnt_writers = alloc_percpu(int); if (!mnt->mnt_writers) goto out_free_devname; #else mnt->mnt_writers = 0; #endif } return mnt; #ifdef CONFIG_SMP out_free_devname: kfree(mnt->mnt_devname); #endif out_free_id: mnt_free_id(mnt); out_free_cache: kmem_cache_free(mnt_cache, mnt); return NULL; } /* * Most r/o checks on a fs are for operations that take * discrete amounts of time, like a write() or unlink(). * We must keep track of when those operations start * (for permission checks) and when they end, so that * we can determine when writes are able to occur to * a filesystem. */ /* * __mnt_is_readonly: check whether a mount is read-only * @mnt: the mount to check for its write status * * This shouldn't be used directly ouside of the VFS. * It does not guarantee that the filesystem will stay * r/w, just that it is right *now*. This can not and * should not be used in place of IS_RDONLY(inode). * mnt_want/drop_write() will _keep_ the filesystem * r/w. */ int __mnt_is_readonly(struct vfsmount *mnt) { if (mnt->mnt_flags & MNT_READONLY) return 1; if (mnt->mnt_sb->s_flags & MS_RDONLY) return 1; return 0; } EXPORT_SYMBOL_GPL(__mnt_is_readonly); static inline void inc_mnt_writers(struct vfsmount *mnt) { #ifdef CONFIG_SMP (*per_cpu_ptr(mnt->mnt_writers, smp_processor_id()))++; #else mnt->mnt_writers++; #endif } static inline void dec_mnt_writers(struct vfsmount *mnt) { #ifdef CONFIG_SMP (*per_cpu_ptr(mnt->mnt_writers, smp_processor_id()))--; #else mnt->mnt_writers--; #endif } static unsigned int count_mnt_writers(struct vfsmount *mnt) { #ifdef CONFIG_SMP unsigned int count = 0; int cpu; for_each_possible_cpu(cpu) { count += *per_cpu_ptr(mnt->mnt_writers, cpu); } return count; #else return mnt->mnt_writers; #endif } /* * Most r/o checks on a fs are for operations that take * discrete amounts of time, like a write() or unlink(). * We must keep track of when those operations start * (for permission checks) and when they end, so that * we can determine when writes are able to occur to * a filesystem. */ /** * mnt_want_write - get write access to a mount * @mnt: the mount on which to take a write * * This tells the low-level filesystem that a write is * about to be performed to it, and makes sure that * writes are allowed before returning success. When * the write operation is finished, mnt_drop_write() * must be called. This is effectively a refcount. */ int mnt_want_write(struct vfsmount *mnt) { int ret = 0; preempt_disable(); inc_mnt_writers(mnt); /* * The store to inc_mnt_writers must be visible before we pass * MNT_WRITE_HOLD loop below, so that the slowpath can see our * incremented count after it has set MNT_WRITE_HOLD. */ smp_mb(); while (mnt->mnt_flags & MNT_WRITE_HOLD) cpu_relax(); /* * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will * be set to match its requirements. So we must not load that until * MNT_WRITE_HOLD is cleared. */ smp_rmb(); if (__mnt_is_readonly(mnt)) { dec_mnt_writers(mnt); ret = -EROFS; goto out; } out: preempt_enable(); return ret; } EXPORT_SYMBOL_GPL(mnt_want_write); /** * mnt_clone_write - get write access to a mount * @mnt: the mount on which to take a write * * This is effectively like mnt_want_write, except * it must only be used to take an extra write reference * on a mountpoint that we already know has a write reference * on it. This allows some optimisation. * * After finished, mnt_drop_write must be called as usual to * drop the reference. */ int mnt_clone_write(struct vfsmount *mnt) { /* superblock may be r/o */ if (__mnt_is_readonly(mnt)) return -EROFS; preempt_disable(); inc_mnt_writers(mnt); preempt_enable(); return 0; } EXPORT_SYMBOL_GPL(mnt_clone_write); /** * mnt_want_write_file - get write access to a file's mount * @file: the file who's mount on which to take a write * * This is like mnt_want_write, but it takes a file and can * do some optimisations if the file is open for write already */ int mnt_want_write_file(struct file *file) { struct inode *inode = file->f_dentry->d_inode; if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode)) return mnt_want_write(file->f_path.mnt); else return mnt_clone_write(file->f_path.mnt); } EXPORT_SYMBOL_GPL(mnt_want_write_file); /** * mnt_drop_write - give up write access to a mount * @mnt: the mount on which to give up write access * * Tells the low-level filesystem that we are done * performing writes to it. Must be matched with * mnt_want_write() call above. */ void mnt_drop_write(struct vfsmount *mnt) { preempt_disable(); dec_mnt_writers(mnt); preempt_enable(); } EXPORT_SYMBOL_GPL(mnt_drop_write); static int mnt_make_readonly(struct vfsmount *mnt) { int ret = 0; spin_lock(&vfsmount_lock); mnt->mnt_flags |= MNT_WRITE_HOLD; /* * After storing MNT_WRITE_HOLD, we'll read the counters. This store * should be visible before we do. */ smp_mb(); /* * With writers on hold, if this value is zero, then there are * definitely no active writers (although held writers may subsequently * increment the count, they'll have to wait, and decrement it after * seeing MNT_READONLY). * * It is OK to have counter incremented on one CPU and decremented on * another: the sum will add up correctly. The danger would be when we * sum up each counter, if we read a counter before it is incremented, * but then read another CPU's count which it has been subsequently * decremented from -- we would see more decrements than we should. * MNT_WRITE_HOLD protects against this scenario, because * mnt_want_write first increments count, then smp_mb, then spins on * MNT_WRITE_HOLD, so it can't be decremented by another CPU while * we're counting up here. */ if (count_mnt_writers(mnt) > 0) ret = -EBUSY; else mnt->mnt_flags |= MNT_READONLY; /* * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers * that become unheld will see MNT_READONLY. */ smp_wmb(); mnt->mnt_flags &= ~MNT_WRITE_HOLD; spin_unlock(&vfsmount_lock); return ret; } static void __mnt_unmake_readonly(struct vfsmount *mnt) { spin_lock(&vfsmount_lock); mnt->mnt_flags &= ~MNT_READONLY; spin_unlock(&vfsmount_lock); } void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb) { mnt->mnt_sb = sb; mnt->mnt_root = dget(sb->s_root); } EXPORT_SYMBOL(simple_set_mnt); void free_vfsmnt(struct vfsmount *mnt) { kfree(mnt->mnt_devname); mnt_free_id(mnt); #ifdef CONFIG_SMP free_percpu(mnt->mnt_writers); #endif kmem_cache_free(mnt_cache, mnt); } /* * find the first or last mount at @dentry on vfsmount @mnt depending on * @dir. If @dir is set return the first mount else return the last mount. */ struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry, int dir) { struct list_head *head = mount_hashtable + hash(mnt, dentry); struct list_head *tmp = head; struct vfsmount *p, *found = NULL; for (;;) { tmp = dir ? tmp->next : tmp->prev; p = NULL; if (tmp == head) break; p = list_entry(tmp, struct vfsmount, mnt_hash); if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) { found = p; break; } } return found; } /* * lookup_mnt increments the ref count before returning * the vfsmount struct. */ struct vfsmount *lookup_mnt(struct path *path) { struct vfsmount *child_mnt; spin_lock(&vfsmount_lock); if ((child_mnt = __lookup_mnt(path->mnt, path->dentry, 1))) mntget(child_mnt); spin_unlock(&vfsmount_lock); return child_mnt; } static inline int check_mnt(struct vfsmount *mnt) { return mnt->mnt_ns == current->nsproxy->mnt_ns; } static void touch_mnt_namespace(struct mnt_namespace *ns) { if (ns) { ns->event = ++event; wake_up_interruptible(&ns->poll); } } static void __touch_mnt_namespace(struct mnt_namespace *ns) { if (ns && ns->event != event) { ns->event = event; wake_up_interruptible(&ns->poll); } } static void detach_mnt(struct vfsmount *mnt, struct path *old_path) { old_path->dentry = mnt->mnt_mountpoint; old_path->mnt = mnt->mnt_parent; mnt->mnt_parent = mnt; mnt->mnt_mountpoint = mnt->mnt_root; list_del_init(&mnt->mnt_child); list_del_init(&mnt->mnt_hash); old_path->dentry->d_mounted--; } void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry, struct vfsmount *child_mnt) { child_mnt->mnt_parent = mntget(mnt); child_mnt->mnt_mountpoint = dget(dentry); dentry->d_mounted++; } static void attach_mnt(struct vfsmount *mnt, struct path *path) { mnt_set_mountpoint(path->mnt, path->dentry, mnt); list_add_tail(&mnt->mnt_hash, mount_hashtable + hash(path->mnt, path->dentry)); list_add_tail(&mnt->mnt_child, &path->mnt->mnt_mounts); } /* * the caller must hold vfsmount_lock */ static void commit_tree(struct vfsmount *mnt) { struct vfsmount *parent = mnt->mnt_parent; struct vfsmount *m; LIST_HEAD(head); struct mnt_namespace *n = parent->mnt_ns; BUG_ON(parent == mnt); list_add_tail(&head, &mnt->mnt_list); list_for_each_entry(m, &head, mnt_list) m->mnt_ns = n; list_splice(&head, n->list.prev); list_add_tail(&mnt->mnt_hash, mount_hashtable + hash(parent, mnt->mnt_mountpoint)); list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); touch_mnt_namespace(n); } static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root) { struct list_head *next = p->mnt_mounts.next; if (next == &p->mnt_mounts) { while (1) { if (p == root) return NULL; next = p->mnt_child.next; if (next != &p->mnt_parent->mnt_mounts) break; p = p->mnt_parent; } } return list_entry(next, struct vfsmount, mnt_child); } static struct vfsmount *skip_mnt_tree(struct vfsmount *p) { struct list_head *prev = p->mnt_mounts.prev; while (prev != &p->mnt_mounts) { p = list_entry(prev, struct vfsmount, mnt_child); prev = p->mnt_mounts.prev; } return p; } static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root, int flag) { struct super_block *sb = old->mnt_sb; struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname); if (mnt) { if (flag & (CL_SLAVE | CL_PRIVATE)) mnt->mnt_group_id = 0; /* not a peer of original */ else mnt->mnt_group_id = old->mnt_group_id; if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) { int err = mnt_alloc_group_id(mnt); if (err) goto out_free; } mnt->mnt_flags = old->mnt_flags; atomic_inc(&sb->s_active); mnt->mnt_sb = sb; mnt->mnt_root = dget(root); mnt->mnt_mountpoint = mnt->mnt_root; mnt->mnt_parent = mnt; if (flag & CL_SLAVE) { list_add(&mnt->mnt_slave, &old->mnt_slave_list); mnt->mnt_master = old; CLEAR_MNT_SHARED(mnt); } else if (!(flag & CL_PRIVATE)) { if ((flag & CL_PROPAGATION) || IS_MNT_SHARED(old)) list_add(&mnt->mnt_share, &old->mnt_share); if (IS_MNT_SLAVE(old)) list_add(&mnt->mnt_slave, &old->mnt_slave); mnt->mnt_master = old->mnt_master; } if (flag & CL_MAKE_SHARED) set_mnt_shared(mnt); /* stick the duplicate mount on the same expiry list * as the original if that was on one */ if (flag & CL_EXPIRE) { if (!list_empty(&old->mnt_expire)) list_add(&mnt->mnt_expire, &old->mnt_expire); } } return mnt; out_free: free_vfsmnt(mnt); return NULL; } static inline void __mntput(struct vfsmount *mnt) { struct super_block *sb = mnt->mnt_sb; /* * This probably indicates that somebody messed * up a mnt_want/drop_write() pair. If this * happens, the filesystem was probably unable * to make r/w->r/o transitions. */ /* * atomic_dec_and_lock() used to deal with ->mnt_count decrements * provides barriers, so count_mnt_writers() below is safe. AV */ WARN_ON(count_mnt_writers(mnt)); dput(mnt->mnt_root); free_vfsmnt(mnt); deactivate_super(sb); } void mntput_no_expire(struct vfsmount *mnt) { repeat: if (atomic_dec_and_lock(&mnt->mnt_count, &vfsmount_lock)) { if (likely(!mnt->mnt_pinned)) { spin_unlock(&vfsmount_lock); __mntput(mnt); return; } atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count); mnt->mnt_pinned = 0; spin_unlock(&vfsmount_lock); acct_auto_close_mnt(mnt); security_sb_umount_close(mnt); goto repeat; } } EXPORT_SYMBOL(mntput_no_expire); void mnt_pin(struct vfsmount *mnt) { spin_lock(&vfsmount_lock); mnt->mnt_pinned++; spin_unlock(&vfsmount_lock); } EXPORT_SYMBOL(mnt_pin); void mnt_unpin(struct vfsmount *mnt) { spin_lock(&vfsmount_lock); if (mnt->mnt_pinned) { atomic_inc(&mnt->mnt_count); mnt->mnt_pinned--; } spin_unlock(&vfsmount_lock); } EXPORT_SYMBOL(mnt_unpin); static inline void mangle(struct seq_file *m, const char *s) { seq_escape(m, s, " \t\n\\"); } /* * Simple .show_options callback for filesystems which don't want to * implement more complex mount option showing. * * See also save_mount_options(). */ int generic_show_options(struct seq_file *m, struct vfsmount *mnt) { const char *options; rcu_read_lock(); options = rcu_dereference(mnt->mnt_sb->s_options); if (options != NULL && options[0]) { seq_putc(m, ','); mangle(m, options); } rcu_read_unlock(); return 0; } EXPORT_SYMBOL(generic_show_options); /* * If filesystem uses generic_show_options(), this function should be * called from the fill_super() callback. * * The .remount_fs callback usually needs to be handled in a special * way, to make sure, that previous options are not overwritten if the * remount fails. * * Also note, that if the filesystem's .remount_fs function doesn't * reset all options to their default value, but changes only newly * given options, then the displayed options will not reflect reality * any more. */ void save_mount_options(struct super_block *sb, char *options) { BUG_ON(sb->s_options); rcu_assign_pointer(sb->s_options, kstrdup(options, GFP_KERNEL)); } EXPORT_SYMBOL(save_mount_options); void replace_mount_options(struct super_block *sb, char *options) { char *old = sb->s_options; rcu_assign_pointer(sb->s_options, options); if (old) { synchronize_rcu(); kfree(old); } } EXPORT_SYMBOL(replace_mount_options); #ifdef CONFIG_PROC_FS /* iterator */ static void *m_start(struct seq_file *m, loff_t *pos) { struct proc_mounts *p = m->private; down_read(&namespace_sem); return seq_list_start(&p->ns->list, *pos); } static void *m_next(struct seq_file *m, void *v, loff_t *pos) { struct proc_mounts *p = m->private; return seq_list_next(v, &p->ns->list, pos); } static void m_stop(struct seq_file *m, void *v) { up_read(&namespace_sem); } struct proc_fs_info { int flag; const char *str; }; static int show_sb_opts(struct seq_file *m, struct super_block *sb) { static const struct proc_fs_info fs_info[] = { { MS_SYNCHRONOUS, ",sync" }, { MS_DIRSYNC, ",dirsync" }, { MS_MANDLOCK, ",mand" }, { 0, NULL } }; const struct proc_fs_info *fs_infop; for (fs_infop = fs_info; fs_infop->flag; fs_infop++) { if (sb->s_flags & fs_infop->flag) seq_puts(m, fs_infop->str); } return security_sb_show_options(m, sb); } static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt) { static const struct proc_fs_info mnt_info[] = { { MNT_NOSUID, ",nosuid" }, { MNT_NODEV, ",nodev" }, { MNT_NOEXEC, ",noexec" }, { MNT_NOATIME, ",noatime" }, { MNT_NODIRATIME, ",nodiratime" }, { MNT_RELATIME, ",relatime" }, { MNT_STRICTATIME, ",strictatime" }, { 0, NULL } }; const struct proc_fs_info *fs_infop; for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) { if (mnt->mnt_flags & fs_infop->flag) seq_puts(m, fs_infop->str); } } static void show_type(struct seq_file *m, struct super_block *sb) { mangle(m, sb->s_type->name); if (sb->s_subtype && sb->s_subtype[0]) { seq_putc(m, '.'); mangle(m, sb->s_subtype); } } static int show_vfsmnt(struct seq_file *m, void *v) { struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list); int err = 0; struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none"); seq_putc(m, ' '); seq_path(m, &mnt_path, " \t\n\\"); seq_putc(m, ' '); show_type(m, mnt->mnt_sb); seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw"); err = show_sb_opts(m, mnt->mnt_sb); if (err) goto out; show_mnt_opts(m, mnt); if (mnt->mnt_sb->s_op->show_options) err = mnt->mnt_sb->s_op->show_options(m, mnt); seq_puts(m, " 0 0\n"); out: return err; } const struct seq_operations mounts_op = { .start = m_start, .next = m_next, .stop = m_stop, .show = show_vfsmnt }; static int show_mountinfo(struct seq_file *m, void *v) { struct proc_mounts *p = m->private; struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list); struct super_block *sb = mnt->mnt_sb; struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; struct path root = p->root; int err = 0; seq_printf(m, "%i %i %u:%u ", mnt->mnt_id, mnt->mnt_parent->mnt_id, MAJOR(sb->s_dev), MINOR(sb->s_dev)); seq_dentry(m, mnt->mnt_root, " \t\n\\"); seq_putc(m, ' '); seq_path_root(m, &mnt_path, &root, " \t\n\\"); if (root.mnt != p->root.mnt || root.dentry != p->root.dentry) { /* * Mountpoint is outside root, discard that one. Ugly, * but less so than trying to do that in iterator in a * race-free way (due to renames). */ return SEQ_SKIP; } seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw"); show_mnt_opts(m, mnt); /* Tagged fields ("foo:X" or "bar") */ if (IS_MNT_SHARED(mnt)) seq_printf(m, " shared:%i", mnt->mnt_group_id); if (IS_MNT_SLAVE(mnt)) { int master = mnt->mnt_master->mnt_group_id; int dom = get_dominating_id(mnt, &p->root); seq_printf(m, " master:%i", master); if (dom && dom != master) seq_printf(m, " propagate_from:%i", dom); } if (IS_MNT_UNBINDABLE(mnt)) seq_puts(m, " unbindable"); /* Filesystem specific data */ seq_puts(m, " - "); show_type(m, sb); seq_putc(m, ' '); mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none"); seq_puts(m, sb->s_flags & MS_RDONLY ? " ro" : " rw"); err = show_sb_opts(m, sb); if (err) goto out; if (sb->s_op->show_options) err = sb->s_op->show_options(m, mnt); seq_putc(m, '\n'); out: return err; } const struct seq_operations mountinfo_op = { .start = m_start, .next = m_next, .stop = m_stop, .show = show_mountinfo, }; static int show_vfsstat(struct seq_file *m, void *v) { struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list); struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; int err = 0; /* device */ if (mnt->mnt_devname) { seq_puts(m, "device "); mangle(m, mnt->mnt_devname); } else seq_puts(m, "no device"); /* mount point */ seq_puts(m, " mounted on "); seq_path(m, &mnt_path, " \t\n\\"); seq_putc(m, ' '); /* file system type */ seq_puts(m, "with fstype "); show_type(m, mnt->mnt_sb); /* optional statistics */ if (mnt->mnt_sb->s_op->show_stats) { seq_putc(m, ' '); err = mnt->mnt_sb->s_op->show_stats(m, mnt); } seq_putc(m, '\n'); return err; } const struct seq_operations mountstats_op = { .start = m_start, .next = m_next, .stop = m_stop, .show = show_vfsstat, }; #endif /* CONFIG_PROC_FS */ /** * may_umount_tree - check if a mount tree is busy * @mnt: root of mount tree * * This is called to check if a tree of mounts has any * open files, pwds, chroots or sub mounts that are * busy. */ int may_umount_tree(struct vfsmount *mnt) { int actual_refs = 0; int minimum_refs = 0; struct vfsmount *p; spin_lock(&vfsmount_lock); for (p = mnt; p; p = next_mnt(p, mnt)) { actual_refs += atomic_read(&p->mnt_count); minimum_refs += 2; } spin_unlock(&vfsmount_lock); if (actual_refs > minimum_refs) return 0; return 1; } EXPORT_SYMBOL(may_umount_tree); /** * may_umount - check if a mount point is busy * @mnt: root of mount * * This is called to check if a mount point has any * open files, pwds, chroots or sub mounts. If the * mount has sub mounts this will return busy * regardless of whether the sub mounts are busy. * * Doesn't take quota and stuff into account. IOW, in some cases it will * give false negatives. The main reason why it's here is that we need * a non-destructive way to look for easily umountable filesystems. */ int may_umount(struct vfsmount *mnt) { int ret = 1; spin_lock(&vfsmount_lock); if (propagate_mount_busy(mnt, 2)) ret = 0; spin_unlock(&vfsmount_lock); return ret; } EXPORT_SYMBOL(may_umount); void release_mounts(struct list_head *head) { struct vfsmount *mnt; while (!list_empty(head)) { mnt = list_first_entry(head, struct vfsmount, mnt_hash); list_del_init(&mnt->mnt_hash); if (mnt->mnt_parent != mnt) { struct dentry *dentry; struct vfsmount *m; spin_lock(&vfsmount_lock); dentry = mnt->mnt_mountpoint; m = mnt->mnt_parent; mnt->mnt_mountpoint = mnt->mnt_root; mnt->mnt_parent = mnt; m->mnt_ghosts--; spin_unlock(&vfsmount_lock); dput(dentry); mntput(m); } mntput(mnt); } } void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill) { struct vfsmount *p; for (p = mnt; p; p = next_mnt(p, mnt)) list_move(&p->mnt_hash, kill); if (propagate) propagate_umount(kill); list_for_each_entry(p, kill, mnt_hash) { list_del_init(&p->mnt_expire); list_del_init(&p->mnt_list); __touch_mnt_namespace(p->mnt_ns); p->mnt_ns = NULL; list_del_init(&p->mnt_child); if (p->mnt_parent != p) { p->mnt_parent->mnt_ghosts++; p->mnt_mountpoint->d_mounted--; } change_mnt_propagation(p, MS_PRIVATE); } } static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts); static int do_umount(struct vfsmount *mnt, int flags) { struct super_block *sb = mnt->mnt_sb; int retval; LIST_HEAD(umount_list); retval = security_sb_umount(mnt, flags); if (retval) return retval; /* * Allow userspace to request a mountpoint be expired rather than * unmounting unconditionally. Unmount only happens if: * (1) the mark is already set (the mark is cleared by mntput()) * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount] */ if (flags & MNT_EXPIRE) { if (mnt == current->fs->root.mnt || flags & (MNT_FORCE | MNT_DETACH)) return -EINVAL; if (atomic_read(&mnt->mnt_count) != 2) return -EBUSY; if (!xchg(&mnt->mnt_expiry_mark, 1)) return -EAGAIN; } /* * If we may have to abort operations to get out of this * mount, and they will themselves hold resources we must * allow the fs to do things. In the Unix tradition of * 'Gee thats tricky lets do it in userspace' the umount_begin * might fail to complete on the first run through as other tasks * must return, and the like. Thats for the mount program to worry * about for the moment. */ if (flags & MNT_FORCE && sb->s_op->umount_begin) { sb->s_op->umount_begin(sb); } /* * No sense to grab the lock for this test, but test itself looks * somewhat bogus. Suggestions for better replacement? * Ho-hum... In principle, we might treat that as umount + switch * to rootfs. GC would eventually take care of the old vfsmount. * Actually it makes sense, especially if rootfs would contain a * /reboot - static binary that would close all descriptors and * call reboot(9). Then init(8) could umount root and exec /reboot. */ if (mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) { /* * Special case for "unmounting" root ... * we just try to remount it readonly. */ down_write(&sb->s_umount); if (!(sb->s_flags & MS_RDONLY)) retval = do_remount_sb(sb, MS_RDONLY, NULL, 0); up_write(&sb->s_umount); return retval; } down_write(&namespace_sem); spin_lock(&vfsmount_lock); event++; if (!(flags & MNT_DETACH)) shrink_submounts(mnt, &umount_list); retval = -EBUSY; if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) { if (!list_empty(&mnt->mnt_list)) umount_tree(mnt, 1, &umount_list); retval = 0; } spin_unlock(&vfsmount_lock); if (retval) security_sb_umount_busy(mnt); up_write(&namespace_sem); release_mounts(&umount_list); return retval; } /* * Now umount can handle mount points as well as block devices. * This is important for filesystems which use unnamed block devices. * * We now support a flag for forced unmount like the other 'big iron' * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD */ SYSCALL_DEFINE2(umount, char __user *, name, int, flags) { struct path path; int retval; retval = user_path(name, &path); if (retval) goto out; retval = -EINVAL; if (path.dentry != path.mnt->mnt_root) goto dput_and_out; if (!check_mnt(path.mnt)) goto dput_and_out; retval = -EPERM; if (!capable(CAP_SYS_ADMIN)) goto dput_and_out; retval = do_umount(path.mnt, flags); dput_and_out: /* we mustn't call path_put() as that would clear mnt_expiry_mark */ dput(path.dentry); mntput_no_expire(path.mnt); out: return retval; } #ifdef __ARCH_WANT_SYS_OLDUMOUNT /* * The 2.0 compatible umount. No flags. */ SYSCALL_DEFINE1(oldumount, char __user *, name) { return sys_umount(name, 0); } #endif static int mount_is_safe(struct path *path) { if (capable(CAP_SYS_ADMIN)) return 0; return -EPERM; #ifdef notyet if (S_ISLNK(path->dentry->d_inode->i_mode)) return -EPERM; if (path->dentry->d_inode->i_mode & S_ISVTX) { if (current_uid() != path->dentry->d_inode->i_uid) return -EPERM; } if (inode_permission(path->dentry->d_inode, MAY_WRITE)) return -EPERM; return 0; #endif } struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry, int flag) { struct vfsmount *res, *p, *q, *r, *s; struct path path; if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt)) return NULL; res = q = clone_mnt(mnt, dentry, flag); if (!q) goto Enomem; q->mnt_mountpoint = mnt->mnt_mountpoint; p = mnt; list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) { if (!is_subdir(r->mnt_mountpoint, dentry)) continue; for (s = r; s; s = next_mnt(s, r)) { if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) { s = skip_mnt_tree(s); continue; } while (p != s->mnt_parent) { p = p->mnt_parent; q = q->mnt_parent; } p = s; path.mnt = q; path.dentry = p->mnt_mountpoint; q = clone_mnt(p, p->mnt_root, flag); if (!q) goto Enomem; spin_lock(&vfsmount_lock); list_add_tail(&q->mnt_list, &res->mnt_list); attach_mnt(q, &path); spin_unlock(&vfsmount_lock); } } return res; Enomem: if (res) { LIST_HEAD(umount_list); spin_lock(&vfsmount_lock); umount_tree(res, 0, &umount_list); spin_unlock(&vfsmount_lock); release_mounts(&umount_list); } return NULL; } struct vfsmount *collect_mounts(struct path *path) { struct vfsmount *tree; down_write(&namespace_sem); tree = copy_tree(path->mnt, path->dentry, CL_COPY_ALL | CL_PRIVATE); up_write(&namespace_sem); return tree; } void drop_collected_mounts(struct vfsmount *mnt) { LIST_HEAD(umount_list); down_write(&namespace_sem); spin_lock(&vfsmount_lock); umount_tree(mnt, 0, &umount_list); spin_unlock(&vfsmount_lock); up_write(&namespace_sem); release_mounts(&umount_list); } static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end) { struct vfsmount *p; for (p = mnt; p != end; p = next_mnt(p, mnt)) { if (p->mnt_group_id && !IS_MNT_SHARED(p)) mnt_release_group_id(p); } } static int invent_group_ids(struct vfsmount *mnt, bool recurse) { struct vfsmount *p; for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) { if (!p->mnt_group_id && !IS_MNT_SHARED(p)) { int err = mnt_alloc_group_id(p); if (err) { cleanup_group_ids(mnt, p); return err; } } } return 0; } /* * @source_mnt : mount tree to be attached * @nd : place the mount tree @source_mnt is attached * @parent_nd : if non-null, detach the source_mnt from its parent and * store the parent mount and mountpoint dentry. * (done when source_mnt is moved) * * NOTE: in the table below explains the semantics when a source mount * of a given type is attached to a destination mount of a given type. * --------------------------------------------------------------------------- * | BIND MOUNT OPERATION | * |************************************************************************** * | source-->| shared | private | slave | unbindable | * | dest | | | | | * | | | | | | | * | v | | | | | * |************************************************************************** * | shared | shared (++) | shared (+) | shared(+++)| invalid | * | | | | | | * |non-shared| shared (+) | private | slave (*) | invalid | * *************************************************************************** * A bind operation clones the source mount and mounts the clone on the * destination mount. * * (++) the cloned mount is propagated to all the mounts in the propagation * tree of the destination mount and the cloned mount is added to * the peer group of the source mount. * (+) the cloned mount is created under the destination mount and is marked * as shared. The cloned mount is added to the peer group of the source * mount. * (+++) the mount is propagated to all the mounts in the propagation tree * of the destination mount and the cloned mount is made slave * of the same master as that of the source mount. The cloned mount * is marked as 'shared and slave'. * (*) the cloned mount is made a slave of the same master as that of the * source mount. * * --------------------------------------------------------------------------- * | MOVE MOUNT OPERATION | * |************************************************************************** * | source-->| shared | private | slave | unbindable | * | dest | | | | | * | | | | | | | * | v | | | | | * |************************************************************************** * | shared | shared (+) | shared (+) | shared(+++) | invalid | * | | | | | | * |non-shared| shared (+*) | private | slave (*) | unbindable | * *************************************************************************** * * (+) the mount is moved to the destination. And is then propagated to * all the mounts in the propagation tree of the destination mount. * (+*) the mount is moved to the destination. * (+++) the mount is moved to the destination and is then propagated to * all the mounts belonging to the destination mount's propagation tree. * the mount is marked as 'shared and slave'. * (*) the mount continues to be a slave at the new location. * * if the source mount is a tree, the operations explained above is * applied to each mount in the tree. * Must be called without spinlocks held, since this function can sleep * in allocations. */ static int attach_recursive_mnt(struct vfsmount *source_mnt, struct path *path, struct path *parent_path) { LIST_HEAD(tree_list); struct vfsmount *dest_mnt = path->mnt; struct dentry *dest_dentry = path->dentry; struct vfsmount *child, *p; int err; if (IS_MNT_SHARED(dest_mnt)) { err = invent_group_ids(source_mnt, true); if (err) goto out; } err = propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list); if (err) goto out_cleanup_ids; if (IS_MNT_SHARED(dest_mnt)) { for (p = source_mnt; p; p = next_mnt(p, source_mnt)) set_mnt_shared(p); } spin_lock(&vfsmount_lock); if (parent_path) { detach_mnt(source_mnt, parent_path); attach_mnt(source_mnt, path); touch_mnt_namespace(parent_path->mnt->mnt_ns); } else { mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt); commit_tree(source_mnt); } list_for_each_entry_safe(child, p, &tree_list, mnt_hash) { list_del_init(&child->mnt_hash); commit_tree(child); } spin_unlock(&vfsmount_lock); return 0; out_cleanup_ids: if (IS_MNT_SHARED(dest_mnt)) cleanup_group_ids(source_mnt, NULL); out: return err; } static int graft_tree(struct vfsmount *mnt, struct path *path) { int err; if (mnt->mnt_sb->s_flags & MS_NOUSER) return -EINVAL; if (S_ISDIR(path->dentry->d_inode->i_mode) != S_ISDIR(mnt->mnt_root->d_inode->i_mode)) return -ENOTDIR; err = -ENOENT; mutex_lock(&path->dentry->d_inode->i_mutex); if (IS_DEADDIR(path->dentry->d_inode)) goto out_unlock; err = security_sb_check_sb(mnt, path); if (err) goto out_unlock; err = -ENOENT; if (!d_unlinked(path->dentry)) err = attach_recursive_mnt(mnt, path, NULL); out_unlock: mutex_unlock(&path->dentry->d_inode->i_mutex); if (!err) security_sb_post_addmount(mnt, path); return err; } /* * recursively change the type of the mountpoint. */ static int do_change_type(struct path *path, int flag) { struct vfsmount *m, *mnt = path->mnt; int recurse = flag & MS_REC; int type = flag & ~MS_REC; int err = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (path->dentry != path->mnt->mnt_root) return -EINVAL; down_write(&namespace_sem); if (type == MS_SHARED) { err = invent_group_ids(mnt, recurse); if (err) goto out_unlock; } spin_lock(&vfsmount_lock); for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) change_mnt_propagation(m, type); spin_unlock(&vfsmount_lock); out_unlock: up_write(&namespace_sem); return err; } /* * do loopback mount. */ static int do_loopback(struct path *path, char *old_name, int recurse) { struct path old_path; struct vfsmount *mnt = NULL; int err = mount_is_safe(path); if (err) return err; if (!old_name || !*old_name) return -EINVAL; err = kern_path(old_name, LOOKUP_FOLLOW, &old_path); if (err) return err; down_write(&namespace_sem); err = -EINVAL; if (IS_MNT_UNBINDABLE(old_path.mnt)) goto out; if (!check_mnt(path->mnt) || !check_mnt(old_path.mnt)) goto out; err = -ENOMEM; if (recurse) mnt = copy_tree(old_path.mnt, old_path.dentry, 0); else mnt = clone_mnt(old_path.mnt, old_path.dentry, 0); if (!mnt) goto out; err = graft_tree(mnt, path); if (err) { LIST_HEAD(umount_list); spin_lock(&vfsmount_lock); umount_tree(mnt, 0, &umount_list); spin_unlock(&vfsmount_lock); release_mounts(&umount_list); } out: up_write(&namespace_sem); path_put(&old_path); return err; } static int change_mount_flags(struct vfsmount *mnt, int ms_flags) { int error = 0; int readonly_request = 0; if (ms_flags & MS_RDONLY) readonly_request = 1; if (readonly_request == __mnt_is_readonly(mnt)) return 0; if (readonly_request) error = mnt_make_readonly(mnt); else __mnt_unmake_readonly(mnt); return error; } /* * change filesystem flags. dir should be a physical root of filesystem. * If you've mounted a non-root directory somewhere and want to do remount * on it - tough luck. */ static int do_remount(struct path *path, int flags, int mnt_flags, void *data) { int err; struct super_block *sb = path->mnt->mnt_sb; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!check_mnt(path->mnt)) return -EINVAL; if (path->dentry != path->mnt->mnt_root) return -EINVAL; down_write(&sb->s_umount); if (flags & MS_BIND) err = change_mount_flags(path->mnt, flags); else err = do_remount_sb(sb, flags, data, 0); if (!err) path->mnt->mnt_flags = mnt_flags; up_write(&sb->s_umount); if (!err) { security_sb_post_remount(path->mnt, flags, data); spin_lock(&vfsmount_lock); touch_mnt_namespace(path->mnt->mnt_ns); spin_unlock(&vfsmount_lock); } return err; } static inline int tree_contains_unbindable(struct vfsmount *mnt) { struct vfsmount *p; for (p = mnt; p; p = next_mnt(p, mnt)) { if (IS_MNT_UNBINDABLE(p)) return 1; } return 0; } static int do_move_mount(struct path *path, char *old_name) { struct path old_path, parent_path; struct vfsmount *p; int err = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!old_name || !*old_name) return -EINVAL; err = kern_path(old_name, LOOKUP_FOLLOW, &old_path); if (err) return err; down_write(&namespace_sem); while (d_mountpoint(path->dentry) && follow_down(path)) ; err = -EINVAL; if (!check_mnt(path->mnt) || !check_mnt(old_path.mnt)) goto out; err = -ENOENT; mutex_lock(&path->dentry->d_inode->i_mutex); if (IS_DEADDIR(path->dentry->d_inode)) goto out1; if (d_unlinked(path->dentry)) goto out1; err = -EINVAL; if (old_path.dentry != old_path.mnt->mnt_root) goto out1; if (old_path.mnt == old_path.mnt->mnt_parent) goto out1; if (S_ISDIR(path->dentry->d_inode->i_mode) != S_ISDIR(old_path.dentry->d_inode->i_mode)) goto out1; /* * Don't move a mount residing in a shared parent. */ if (old_path.mnt->mnt_parent && IS_MNT_SHARED(old_path.mnt->mnt_parent)) goto out1; /* * Don't move a mount tree containing unbindable mounts to a destination * mount which is shared. */ if (IS_MNT_SHARED(path->mnt) && tree_contains_unbindable(old_path.mnt)) goto out1; err = -ELOOP; for (p = path->mnt; p->mnt_parent != p; p = p->mnt_parent) if (p == old_path.mnt) goto out1; err = attach_recursive_mnt(old_path.mnt, path, &parent_path); if (err) goto out1; /* if the mount is moved, it should no longer be expire * automatically */ list_del_init(&old_path.mnt->mnt_expire); out1: mutex_unlock(&path->dentry->d_inode->i_mutex); out: up_write(&namespace_sem); if (!err) path_put(&parent_path); path_put(&old_path); return err; } /* * create a new mount for userspace and request it to be added into the * namespace's tree */ static int do_new_mount(struct path *path, char *type, int flags, int mnt_flags, char *name, void *data) { struct vfsmount *mnt; if (!type) return -EINVAL; /* we need capabilities... */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; lock_kernel(); mnt = do_kern_mount(type, flags, name, data); unlock_kernel(); if (IS_ERR(mnt)) return PTR_ERR(mnt); return do_add_mount(mnt, path, mnt_flags, NULL); } /* * add a mount into a namespace's mount tree * - provide the option of adding the new mount to an expiration list */ int do_add_mount(struct vfsmount *newmnt, struct path *path, int mnt_flags, struct list_head *fslist) { int err; down_write(&namespace_sem); /* Something was mounted here while we slept */ while (d_mountpoint(path->dentry) && follow_down(path)) ; err = -EINVAL; if (!(mnt_flags & MNT_SHRINKABLE) && !check_mnt(path->mnt)) goto unlock; /* Refuse the same filesystem on the same mount point */ err = -EBUSY; if (path->mnt->mnt_sb == newmnt->mnt_sb && path->mnt->mnt_root == path->dentry) goto unlock; err = -EINVAL; if (S_ISLNK(newmnt->mnt_root->d_inode->i_mode)) goto unlock; newmnt->mnt_flags = mnt_flags; if ((err = graft_tree(newmnt, path))) goto unlock; if (fslist) /* add to the specified expiration list */ list_add_tail(&newmnt->mnt_expire, fslist); up_write(&namespace_sem); return 0; unlock: up_write(&namespace_sem); mntput(newmnt); return err; } EXPORT_SYMBOL_GPL(do_add_mount); /* * process a list of expirable mountpoints with the intent of discarding any * mountpoints that aren't in use and haven't been touched since last we came * here */ void mark_mounts_for_expiry(struct list_head *mounts) { struct vfsmount *mnt, *next; LIST_HEAD(graveyard); LIST_HEAD(umounts); if (list_empty(mounts)) return; down_write(&namespace_sem); spin_lock(&vfsmount_lock); /* extract from the expiration list every vfsmount that matches the * following criteria: * - only referenced by its parent vfsmount * - still marked for expiry (marked on the last call here; marks are * cleared by mntput()) */ list_for_each_entry_safe(mnt, next, mounts, mnt_expire) { if (!xchg(&mnt->mnt_expiry_mark, 1) || propagate_mount_busy(mnt, 1)) continue; list_move(&mnt->mnt_expire, &graveyard); } while (!list_empty(&graveyard)) { mnt = list_first_entry(&graveyard, struct vfsmount, mnt_expire); touch_mnt_namespace(mnt->mnt_ns); umount_tree(mnt, 1, &umounts); } spin_unlock(&vfsmount_lock); up_write(&namespace_sem); release_mounts(&umounts); } EXPORT_SYMBOL_GPL(mark_mounts_for_expiry); /* * Ripoff of 'select_parent()' * * search the list of submounts for a given mountpoint, and move any * shrinkable submounts to the 'graveyard' list. */ static int select_submounts(struct vfsmount *parent, struct list_head *graveyard) { struct vfsmount *this_parent = parent; struct list_head *next; int found = 0; repeat: next = this_parent->mnt_mounts.next; resume: while (next != &this_parent->mnt_mounts) { struct list_head *tmp = next; struct vfsmount *mnt = list_entry(tmp, struct vfsmount, mnt_child); next = tmp->next; if (!(mnt->mnt_flags & MNT_SHRINKABLE)) continue; /* * Descend a level if the d_mounts list is non-empty. */ if (!list_empty(&mnt->mnt_mounts)) { this_parent = mnt; goto repeat; } if (!propagate_mount_busy(mnt, 1)) { list_move_tail(&mnt->mnt_expire, graveyard); found++; } } /* * All done at this level ... ascend and resume the search */ if (this_parent != parent) { next = this_parent->mnt_child.next; this_parent = this_parent->mnt_parent; goto resume; } return found; } /* * process a list of expirable mountpoints with the intent of discarding any * submounts of a specific parent mountpoint */ static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts) { LIST_HEAD(graveyard); struct vfsmount *m; /* extract submounts of 'mountpoint' from the expiration list */ while (select_submounts(mnt, &graveyard)) { while (!list_empty(&graveyard)) { m = list_first_entry(&graveyard, struct vfsmount, mnt_expire); touch_mnt_namespace(m->mnt_ns); umount_tree(m, 1, umounts); } } } /* * Some copy_from_user() implementations do not return the exact number of * bytes remaining to copy on a fault. But copy_mount_options() requires that. * Note that this function differs from copy_from_user() in that it will oops * on bad values of `to', rather than returning a short copy. */ static long exact_copy_from_user(void *to, const void __user * from, unsigned long n) { char *t = to; const char __user *f = from; char c; if (!access_ok(VERIFY_READ, from, n)) return n; while (n) { if (__get_user(c, f)) { memset(t, 0, n); break; } *t++ = c; f++; n--; } return n; } int copy_mount_options(const void __user * data, unsigned long *where) { int i; unsigned long page; unsigned long size; *where = 0; if (!data) return 0; if (!(page = __get_free_page(GFP_KERNEL))) return -ENOMEM; /* We only care that *some* data at the address the user * gave us is valid. Just in case, we'll zero * the remainder of the page. */ /* copy_from_user cannot cross TASK_SIZE ! */ size = TASK_SIZE - (unsigned long)data; if (size > PAGE_SIZE) size = PAGE_SIZE; i = size - exact_copy_from_user((void *)page, data, size); if (!i) { free_page(page); return -EFAULT; } if (i != PAGE_SIZE) memset((char *)page + i, 0, PAGE_SIZE - i); *where = page; return 0; } int copy_mount_string(const void __user *data, char **where) { char *tmp; if (!data) { *where = NULL; return 0; } tmp = strndup_user(data, PAGE_SIZE); if (IS_ERR(tmp)) return PTR_ERR(tmp); *where = tmp; return 0; } /* * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to * be given to the mount() call (ie: read-only, no-dev, no-suid etc). * * data is a (void *) that can point to any structure up to * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent * information (or be NULL). * * Pre-0.97 versions of mount() didn't have a flags word. * When the flags word was introduced its top half was required * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9. * Therefore, if this magic number is present, it carries no information * and must be discarded. */ long do_mount(char *dev_name, char *dir_name, char *type_page, unsigned long flags, void *data_page) { struct path path; int retval = 0; int mnt_flags = 0; /* Discard magic */ if ((flags & MS_MGC_MSK) == MS_MGC_VAL) flags &= ~MS_MGC_MSK; /* Basic sanity checks */ if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE)) return -EINVAL; if (data_page) ((char *)data_page)[PAGE_SIZE - 1] = 0; /* Default to relatime unless overriden */ if (!(flags & MS_NOATIME)) mnt_flags |= MNT_RELATIME; /* Separate the per-mountpoint flags */ if (flags & MS_NOSUID) mnt_flags |= MNT_NOSUID; if (flags & MS_NODEV) mnt_flags |= MNT_NODEV; if (flags & MS_NOEXEC) mnt_flags |= MNT_NOEXEC; if (flags & MS_NOATIME) mnt_flags |= MNT_NOATIME; if (flags & MS_NODIRATIME) mnt_flags |= MNT_NODIRATIME; if (flags & MS_STRICTATIME) mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME); if (flags & MS_RDONLY) mnt_flags |= MNT_READONLY; flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | MS_STRICTATIME); /* ... and get the mountpoint */ retval = kern_path(dir_name, LOOKUP_FOLLOW, &path); if (retval) return retval; retval = security_sb_mount(dev_name, &path, type_page, flags, data_page); if (retval) goto dput_out; if (flags & MS_REMOUNT) retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, data_page); else if (flags & MS_BIND) retval = do_loopback(&path, dev_name, flags & MS_REC); else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) retval = do_change_type(&path, flags); else if (flags & MS_MOVE) retval = do_move_mount(&path, dev_name); else retval = do_new_mount(&path, type_page, flags, mnt_flags, dev_name, data_page); dput_out: path_put(&path); return retval; } static struct mnt_namespace *alloc_mnt_ns(void) { struct mnt_namespace *new_ns; new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL); if (!new_ns) return ERR_PTR(-ENOMEM); atomic_set(&new_ns->count, 1); new_ns->root = NULL; INIT_LIST_HEAD(&new_ns->list); init_waitqueue_head(&new_ns->poll); new_ns->event = 0; return new_ns; } /* * Allocate a new namespace structure and populate it with contents * copied from the namespace of the passed in task structure. */ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns, struct fs_struct *fs) { struct mnt_namespace *new_ns; struct vfsmount *rootmnt = NULL, *pwdmnt = NULL; struct vfsmount *p, *q; new_ns = alloc_mnt_ns(); if (IS_ERR(new_ns)) return new_ns; down_write(&namespace_sem); /* First pass: copy the tree topology */ new_ns->root = copy_tree(mnt_ns->root, mnt_ns->root->mnt_root, CL_COPY_ALL | CL_EXPIRE); if (!new_ns->root) { up_write(&namespace_sem); kfree(new_ns); return ERR_PTR(-ENOMEM); } spin_lock(&vfsmount_lock); list_add_tail(&new_ns->list, &new_ns->root->mnt_list); spin_unlock(&vfsmount_lock); /* * Second pass: switch the tsk->fs->* elements and mark new vfsmounts * as belonging to new namespace. We have already acquired a private * fs_struct, so tsk->fs->lock is not needed. */ p = mnt_ns->root; q = new_ns->root; while (p) { q->mnt_ns = new_ns; if (fs) { if (p == fs->root.mnt) { rootmnt = p; fs->root.mnt = mntget(q); } if (p == fs->pwd.mnt) { pwdmnt = p; fs->pwd.mnt = mntget(q); } } p = next_mnt(p, mnt_ns->root); q = next_mnt(q, new_ns->root); } up_write(&namespace_sem); if (rootmnt) mntput(rootmnt); if (pwdmnt) mntput(pwdmnt); return new_ns; } struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, struct fs_struct *new_fs) { struct mnt_namespace *new_ns; BUG_ON(!ns); get_mnt_ns(ns); if (!(flags & CLONE_NEWNS)) return ns; new_ns = dup_mnt_ns(ns, new_fs); put_mnt_ns(ns); return new_ns; } /** * create_mnt_ns - creates a private namespace and adds a root filesystem * @mnt: pointer to the new root filesystem mountpoint */ struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt) { struct mnt_namespace *new_ns; new_ns = alloc_mnt_ns(); if (!IS_ERR(new_ns)) { mnt->mnt_ns = new_ns; new_ns->root = mnt; list_add(&new_ns->list, &new_ns->root->mnt_list); } return new_ns; } EXPORT_SYMBOL(create_mnt_ns); SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name, char __user *, type, unsigned long, flags, void __user *, data) { int ret; char *kernel_type; char *kernel_dir; char *kernel_dev; unsigned long data_page; ret = copy_mount_string(type, &kernel_type); if (ret < 0) goto out_type; kernel_dir = getname(dir_name); if (IS_ERR(kernel_dir)) { ret = PTR_ERR(kernel_dir); goto out_dir; } ret = copy_mount_string(dev_name, &kernel_dev); if (ret < 0) goto out_dev; ret = copy_mount_options(data, &data_page); if (ret < 0) goto out_data; ret = do_mount(kernel_dev, kernel_dir, kernel_type, flags, (void *) data_page); free_page(data_page); out_data: kfree(kernel_dev); out_dev: putname(kernel_dir); out_dir: kfree(kernel_type); out_type: return ret; } /* * pivot_root Semantics: * Moves the root file system of the current process to the directory put_old, * makes new_root as the new root file system of the current process, and sets * root/cwd of all processes which had them on the current root to new_root. * * Restrictions: * The new_root and put_old must be directories, and must not be on the * same file system as the current process root. The put_old must be * underneath new_root, i.e. adding a non-zero number of /.. to the string * pointed to by put_old must yield the same directory as new_root. No other * file system may be mounted on put_old. After all, new_root is a mountpoint. * * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem. * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives * in this situation. * * Notes: * - we don't move root/cwd if they are not at the root (reason: if something * cared enough to change them, it's probably wrong to force them elsewhere) * - it's okay to pick a root that isn't the root of a file system, e.g. * /nfs/my_root where /nfs is the mount point. It must be a mountpoint, * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root * first. */ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, const char __user *, put_old) { struct vfsmount *tmp; struct path new, old, parent_path, root_parent, root; int error; if (!capable(CAP_SYS_ADMIN)) return -EPERM; error = user_path_dir(new_root, &new); if (error) goto out0; error = -EINVAL; if (!check_mnt(new.mnt)) goto out1; error = user_path_dir(put_old, &old); if (error) goto out1; error = security_sb_pivotroot(&old, &new); if (error) { path_put(&old); goto out1; } read_lock(&current->fs->lock); root = current->fs->root; path_get(&current->fs->root); read_unlock(&current->fs->lock); down_write(&namespace_sem); mutex_lock(&old.dentry->d_inode->i_mutex); error = -EINVAL; if (IS_MNT_SHARED(old.mnt) || IS_MNT_SHARED(new.mnt->mnt_parent) || IS_MNT_SHARED(root.mnt->mnt_parent)) goto out2; if (!check_mnt(root.mnt)) goto out2; error = -ENOENT; if (IS_DEADDIR(new.dentry->d_inode)) goto out2; if (d_unlinked(new.dentry)) goto out2; if (d_unlinked(old.dentry)) goto out2; error = -EBUSY; if (new.mnt == root.mnt || old.mnt == root.mnt) goto out2; /* loop, on the same file system */ error = -EINVAL; if (root.mnt->mnt_root != root.dentry) goto out2; /* not a mountpoint */ if (root.mnt->mnt_parent == root.mnt) goto out2; /* not attached */ if (new.mnt->mnt_root != new.dentry) goto out2; /* not a mountpoint */ if (new.mnt->mnt_parent == new.mnt) goto out2; /* not attached */ /* make sure we can reach put_old from new_root */ tmp = old.mnt; spin_lock(&vfsmount_lock); if (tmp != new.mnt) { for (;;) { if (tmp->mnt_parent == tmp) goto out3; /* already mounted on put_old */ if (tmp->mnt_parent == new.mnt) break; tmp = tmp->mnt_parent; } if (!is_subdir(tmp->mnt_mountpoint, new.dentry)) goto out3; } else if (!is_subdir(old.dentry, new.dentry)) goto out3; detach_mnt(new.mnt, &parent_path); detach_mnt(root.mnt, &root_parent); /* mount old root on put_old */ attach_mnt(root.mnt, &old); /* mount new_root on / */ attach_mnt(new.mnt, &root_parent); touch_mnt_namespace(current->nsproxy->mnt_ns); spin_unlock(&vfsmount_lock); chroot_fs_refs(&root, &new); security_sb_post_pivotroot(&root, &new); error = 0; path_put(&root_parent); path_put(&parent_path); out2: mutex_unlock(&old.dentry->d_inode->i_mutex); up_write(&namespace_sem); path_put(&root); path_put(&old); out1: path_put(&new); out0: return error; out3: spin_unlock(&vfsmount_lock); goto out2; } static void __init init_mount_tree(void) { struct vfsmount *mnt; struct mnt_namespace *ns; struct path root; mnt = do_kern_mount("rootfs", 0, "rootfs", NULL); if (IS_ERR(mnt)) panic("Can't create rootfs"); ns = create_mnt_ns(mnt); if (IS_ERR(ns)) panic("Can't allocate initial namespace"); init_task.nsproxy->mnt_ns = ns; get_mnt_ns(ns); root.mnt = ns->root; root.dentry = ns->root->mnt_root; set_fs_pwd(current->fs, &root); set_fs_root(current->fs, &root); } void __init mnt_init(void) { unsigned u; int err; init_rwsem(&namespace_sem); mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount), 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC); if (!mount_hashtable) panic("Failed to allocate mount hash table\n"); printk("Mount-cache hash table entries: %lu\n", HASH_SIZE); for (u = 0; u < HASH_SIZE; u++) INIT_LIST_HEAD(&mount_hashtable[u]); err = sysfs_init(); if (err) printk(KERN_WARNING "%s: sysfs_init error: %d\n", __func__, err); fs_kobj = kobject_create_and_add("fs", NULL); if (!fs_kobj) printk(KERN_WARNING "%s: kobj create error\n", __func__); init_rootfs(); init_mount_tree(); } void put_mnt_ns(struct mnt_namespace *ns) { struct vfsmount *root; LIST_HEAD(umount_list); if (!atomic_dec_and_lock(&ns->count, &vfsmount_lock)) return; root = ns->root; ns->root = NULL; spin_unlock(&vfsmount_lock); down_write(&namespace_sem); spin_lock(&vfsmount_lock); umount_tree(root, 0, &umount_list); spin_unlock(&vfsmount_lock); up_write(&namespace_sem); release_mounts(&umount_list); kfree(ns); } EXPORT_SYMBOL(put_mnt_ns);
gpl-2.0
moonman/linux-imx6-3.14
drivers/firmware/dcdbas.c
316
15968
/* * dcdbas.c: Dell Systems Management Base Driver * * The Dell Systems Management Base Driver provides a sysfs interface for * systems management software to perform System Management Interrupts (SMIs) * and Host Control Actions (power cycle or power off after OS shutdown) on * Dell systems. * * See Documentation/dcdbas.txt for more information. * * Copyright (C) 1995-2006 Dell Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License v2.0 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/gfp.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/mc146818rtc.h> #include <linux/module.h> #include <linux/reboot.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/types.h> #include <linux/mutex.h> #include <asm/io.h> #include "dcdbas.h" #define DRIVER_NAME "dcdbas" #define DRIVER_VERSION "5.6.0-3.2" #define DRIVER_DESCRIPTION "Dell Systems Management Base Driver" static struct platform_device *dcdbas_pdev; static u8 *smi_data_buf; static dma_addr_t smi_data_buf_handle; static unsigned long smi_data_buf_size; static u32 smi_data_buf_phys_addr; static DEFINE_MUTEX(smi_data_lock); static unsigned int host_control_action; static unsigned int host_control_smi_type; static unsigned int host_control_on_shutdown; /** * smi_data_buf_free: free SMI data buffer */ static void smi_data_buf_free(void) { if (!smi_data_buf) return; dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n", __func__, smi_data_buf_phys_addr, smi_data_buf_size); dma_free_coherent(&dcdbas_pdev->dev, smi_data_buf_size, smi_data_buf, smi_data_buf_handle); smi_data_buf = NULL; smi_data_buf_handle = 0; smi_data_buf_phys_addr = 0; smi_data_buf_size = 0; } /** * smi_data_buf_realloc: grow SMI data buffer if needed */ static int smi_data_buf_realloc(unsigned long size) { void *buf; dma_addr_t handle; if (smi_data_buf_size >= size) return 0; if (size > MAX_SMI_DATA_BUF_SIZE) return -EINVAL; /* new buffer is needed */ buf = dma_alloc_coherent(&dcdbas_pdev->dev, size, &handle, GFP_KERNEL); if (!buf) { dev_dbg(&dcdbas_pdev->dev, "%s: failed to allocate memory size %lu\n", __func__, size); return -ENOMEM; } /* memory zeroed by dma_alloc_coherent */ if (smi_data_buf) memcpy(buf, smi_data_buf, smi_data_buf_size); /* free any existing buffer */ smi_data_buf_free(); /* set up new buffer for use */ smi_data_buf = buf; smi_data_buf_handle = handle; smi_data_buf_phys_addr = (u32) virt_to_phys(buf); smi_data_buf_size = size; dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n", __func__, smi_data_buf_phys_addr, smi_data_buf_size); return 0; } static ssize_t smi_data_buf_phys_addr_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%x\n", smi_data_buf_phys_addr); } static ssize_t smi_data_buf_size_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", smi_data_buf_size); } static ssize_t smi_data_buf_size_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long buf_size; ssize_t ret; buf_size = simple_strtoul(buf, NULL, 10); /* make sure SMI data buffer is at least buf_size */ mutex_lock(&smi_data_lock); ret = smi_data_buf_realloc(buf_size); mutex_unlock(&smi_data_lock); if (ret) return ret; return count; } static ssize_t smi_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count) { ssize_t ret; mutex_lock(&smi_data_lock); ret = memory_read_from_buffer(buf, count, &pos, smi_data_buf, smi_data_buf_size); mutex_unlock(&smi_data_lock); return ret; } static ssize_t smi_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count) { ssize_t ret; if ((pos + count) > MAX_SMI_DATA_BUF_SIZE) return -EINVAL; mutex_lock(&smi_data_lock); ret = smi_data_buf_realloc(pos + count); if (ret) goto out; memcpy(smi_data_buf + pos, buf, count); ret = count; out: mutex_unlock(&smi_data_lock); return ret; } static ssize_t host_control_action_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%u\n", host_control_action); } static ssize_t host_control_action_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { ssize_t ret; /* make sure buffer is available for host control command */ mutex_lock(&smi_data_lock); ret = smi_data_buf_realloc(sizeof(struct apm_cmd)); mutex_unlock(&smi_data_lock); if (ret) return ret; host_control_action = simple_strtoul(buf, NULL, 10); return count; } static ssize_t host_control_smi_type_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%u\n", host_control_smi_type); } static ssize_t host_control_smi_type_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { host_control_smi_type = simple_strtoul(buf, NULL, 10); return count; } static ssize_t host_control_on_shutdown_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%u\n", host_control_on_shutdown); } static ssize_t host_control_on_shutdown_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { host_control_on_shutdown = simple_strtoul(buf, NULL, 10); return count; } /** * dcdbas_smi_request: generate SMI request * * Called with smi_data_lock. */ int dcdbas_smi_request(struct smi_cmd *smi_cmd) { cpumask_var_t old_mask; int ret = 0; if (smi_cmd->magic != SMI_CMD_MAGIC) { dev_info(&dcdbas_pdev->dev, "%s: invalid magic value\n", __func__); return -EBADR; } /* SMI requires CPU 0 */ if (!alloc_cpumask_var(&old_mask, GFP_KERNEL)) return -ENOMEM; cpumask_copy(old_mask, &current->cpus_allowed); set_cpus_allowed_ptr(current, cpumask_of(0)); if (smp_processor_id() != 0) { dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", __func__); ret = -EBUSY; goto out; } /* generate SMI */ /* inb to force posted write through and make SMI happen now */ asm volatile ( "outb %b0,%w1\n" "inb %w1" : /* no output args */ : "a" (smi_cmd->command_code), "d" (smi_cmd->command_address), "b" (smi_cmd->ebx), "c" (smi_cmd->ecx) : "memory" ); out: set_cpus_allowed_ptr(current, old_mask); free_cpumask_var(old_mask); return ret; } /** * smi_request_store: * * The valid values are: * 0: zero SMI data buffer * 1: generate calling interface SMI * 2: generate raw SMI * * User application writes smi_cmd to smi_data before telling driver * to generate SMI. */ static ssize_t smi_request_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct smi_cmd *smi_cmd; unsigned long val = simple_strtoul(buf, NULL, 10); ssize_t ret; mutex_lock(&smi_data_lock); if (smi_data_buf_size < sizeof(struct smi_cmd)) { ret = -ENODEV; goto out; } smi_cmd = (struct smi_cmd *)smi_data_buf; switch (val) { case 2: /* Raw SMI */ ret = dcdbas_smi_request(smi_cmd); if (!ret) ret = count; break; case 1: /* Calling Interface SMI */ smi_cmd->ebx = (u32) virt_to_phys(smi_cmd->command_buffer); ret = dcdbas_smi_request(smi_cmd); if (!ret) ret = count; break; case 0: memset(smi_data_buf, 0, smi_data_buf_size); ret = count; break; default: ret = -EINVAL; break; } out: mutex_unlock(&smi_data_lock); return ret; } EXPORT_SYMBOL(dcdbas_smi_request); /** * host_control_smi: generate host control SMI * * Caller must set up the host control command in smi_data_buf. */ static int host_control_smi(void) { struct apm_cmd *apm_cmd; u8 *data; unsigned long flags; u32 num_ticks; s8 cmd_status; u8 index; apm_cmd = (struct apm_cmd *)smi_data_buf; apm_cmd->status = ESM_STATUS_CMD_UNSUCCESSFUL; switch (host_control_smi_type) { case HC_SMITYPE_TYPE1: spin_lock_irqsave(&rtc_lock, flags); /* write SMI data buffer physical address */ data = (u8 *)&smi_data_buf_phys_addr; for (index = PE1300_CMOS_CMD_STRUCT_PTR; index < (PE1300_CMOS_CMD_STRUCT_PTR + 4); index++, data++) { outb(index, (CMOS_BASE_PORT + CMOS_PAGE2_INDEX_PORT_PIIX4)); outb(*data, (CMOS_BASE_PORT + CMOS_PAGE2_DATA_PORT_PIIX4)); } /* first set status to -1 as called by spec */ cmd_status = ESM_STATUS_CMD_UNSUCCESSFUL; outb((u8) cmd_status, PCAT_APM_STATUS_PORT); /* generate SMM call */ outb(ESM_APM_CMD, PCAT_APM_CONTROL_PORT); spin_unlock_irqrestore(&rtc_lock, flags); /* wait a few to see if it executed */ num_ticks = TIMEOUT_USEC_SHORT_SEMA_BLOCKING; while ((cmd_status = inb(PCAT_APM_STATUS_PORT)) == ESM_STATUS_CMD_UNSUCCESSFUL) { num_ticks--; if (num_ticks == EXPIRED_TIMER) return -ETIME; } break; case HC_SMITYPE_TYPE2: case HC_SMITYPE_TYPE3: spin_lock_irqsave(&rtc_lock, flags); /* write SMI data buffer physical address */ data = (u8 *)&smi_data_buf_phys_addr; for (index = PE1400_CMOS_CMD_STRUCT_PTR; index < (PE1400_CMOS_CMD_STRUCT_PTR + 4); index++, data++) { outb(index, (CMOS_BASE_PORT + CMOS_PAGE1_INDEX_PORT)); outb(*data, (CMOS_BASE_PORT + CMOS_PAGE1_DATA_PORT)); } /* generate SMM call */ if (host_control_smi_type == HC_SMITYPE_TYPE3) outb(ESM_APM_CMD, PCAT_APM_CONTROL_PORT); else outb(ESM_APM_CMD, PE1400_APM_CONTROL_PORT); /* restore RTC index pointer since it was written to above */ CMOS_READ(RTC_REG_C); spin_unlock_irqrestore(&rtc_lock, flags); /* read control port back to serialize write */ cmd_status = inb(PE1400_APM_CONTROL_PORT); /* wait a few to see if it executed */ num_ticks = TIMEOUT_USEC_SHORT_SEMA_BLOCKING; while (apm_cmd->status == ESM_STATUS_CMD_UNSUCCESSFUL) { num_ticks--; if (num_ticks == EXPIRED_TIMER) return -ETIME; } break; default: dev_dbg(&dcdbas_pdev->dev, "%s: invalid SMI type %u\n", __func__, host_control_smi_type); return -ENOSYS; } return 0; } /** * dcdbas_host_control: initiate host control * * This function is called by the driver after the system has * finished shutting down if the user application specified a * host control action to perform on shutdown. It is safe to * use smi_data_buf at this point because the system has finished * shutting down and no userspace apps are running. */ static void dcdbas_host_control(void) { struct apm_cmd *apm_cmd; u8 action; if (host_control_action == HC_ACTION_NONE) return; action = host_control_action; host_control_action = HC_ACTION_NONE; if (!smi_data_buf) { dev_dbg(&dcdbas_pdev->dev, "%s: no SMI buffer\n", __func__); return; } if (smi_data_buf_size < sizeof(struct apm_cmd)) { dev_dbg(&dcdbas_pdev->dev, "%s: SMI buffer too small\n", __func__); return; } apm_cmd = (struct apm_cmd *)smi_data_buf; /* power off takes precedence */ if (action & HC_ACTION_HOST_CONTROL_POWEROFF) { apm_cmd->command = ESM_APM_POWER_CYCLE; apm_cmd->reserved = 0; *((s16 *)&apm_cmd->parameters.shortreq.parm[0]) = (s16) 0; host_control_smi(); } else if (action & HC_ACTION_HOST_CONTROL_POWERCYCLE) { apm_cmd->command = ESM_APM_POWER_CYCLE; apm_cmd->reserved = 0; *((s16 *)&apm_cmd->parameters.shortreq.parm[0]) = (s16) 20; host_control_smi(); } } /** * dcdbas_reboot_notify: handle reboot notification for host control */ static int dcdbas_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused) { switch (code) { case SYS_DOWN: case SYS_HALT: case SYS_POWER_OFF: if (host_control_on_shutdown) { /* firmware is going to perform host control action */ printk(KERN_WARNING "Please wait for shutdown " "action to complete...\n"); dcdbas_host_control(); } break; } return NOTIFY_DONE; } static struct notifier_block dcdbas_reboot_nb = { .notifier_call = dcdbas_reboot_notify, .next = NULL, .priority = INT_MIN }; static DCDBAS_BIN_ATTR_RW(smi_data); static struct bin_attribute *dcdbas_bin_attrs[] = { &bin_attr_smi_data, NULL }; static DCDBAS_DEV_ATTR_RW(smi_data_buf_size); static DCDBAS_DEV_ATTR_RO(smi_data_buf_phys_addr); static DCDBAS_DEV_ATTR_WO(smi_request); static DCDBAS_DEV_ATTR_RW(host_control_action); static DCDBAS_DEV_ATTR_RW(host_control_smi_type); static DCDBAS_DEV_ATTR_RW(host_control_on_shutdown); static struct attribute *dcdbas_dev_attrs[] = { &dev_attr_smi_data_buf_size.attr, &dev_attr_smi_data_buf_phys_addr.attr, &dev_attr_smi_request.attr, &dev_attr_host_control_action.attr, &dev_attr_host_control_smi_type.attr, &dev_attr_host_control_on_shutdown.attr, NULL }; static struct attribute_group dcdbas_attr_group = { .attrs = dcdbas_dev_attrs, .bin_attrs = dcdbas_bin_attrs, }; static int dcdbas_probe(struct platform_device *dev) { int error; host_control_action = HC_ACTION_NONE; host_control_smi_type = HC_SMITYPE_NONE; dcdbas_pdev = dev; /* * BIOS SMI calls require buffer addresses be in 32-bit address space. * This is done by setting the DMA mask below. */ error = dma_set_coherent_mask(&dcdbas_pdev->dev, DMA_BIT_MASK(32)); if (error) return error; error = sysfs_create_group(&dev->dev.kobj, &dcdbas_attr_group); if (error) return error; register_reboot_notifier(&dcdbas_reboot_nb); dev_info(&dev->dev, "%s (version %s)\n", DRIVER_DESCRIPTION, DRIVER_VERSION); return 0; } static int dcdbas_remove(struct platform_device *dev) { unregister_reboot_notifier(&dcdbas_reboot_nb); sysfs_remove_group(&dev->dev.kobj, &dcdbas_attr_group); return 0; } static struct platform_driver dcdbas_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, .probe = dcdbas_probe, .remove = dcdbas_remove, }; static const struct platform_device_info dcdbas_dev_info __initdata = { .name = DRIVER_NAME, .id = -1, .dma_mask = DMA_BIT_MASK(32), }; static struct platform_device *dcdbas_pdev_reg; /** * dcdbas_init: initialize driver */ static int __init dcdbas_init(void) { int error; error = platform_driver_register(&dcdbas_driver); if (error) return error; dcdbas_pdev_reg = platform_device_register_full(&dcdbas_dev_info); if (IS_ERR(dcdbas_pdev_reg)) { error = PTR_ERR(dcdbas_pdev_reg); goto err_unregister_driver; } return 0; err_unregister_driver: platform_driver_unregister(&dcdbas_driver); return error; } /** * dcdbas_exit: perform driver cleanup */ static void __exit dcdbas_exit(void) { /* * make sure functions that use dcdbas_pdev are called * before platform_device_unregister */ unregister_reboot_notifier(&dcdbas_reboot_nb); /* * We have to free the buffer here instead of dcdbas_remove * because only in module exit function we can be sure that * all sysfs attributes belonging to this module have been * released. */ if (dcdbas_pdev) smi_data_buf_free(); platform_device_unregister(dcdbas_pdev_reg); platform_driver_unregister(&dcdbas_driver); } module_init(dcdbas_init); module_exit(dcdbas_exit); MODULE_DESCRIPTION(DRIVER_DESCRIPTION " (version " DRIVER_VERSION ")"); MODULE_VERSION(DRIVER_VERSION); MODULE_AUTHOR("Dell Inc."); MODULE_LICENSE("GPL"); /* Any System or BIOS claiming to be by Dell */ MODULE_ALIAS("dmi:*:[bs]vnD[Ee][Ll][Ll]*:*");
gpl-2.0
wiktorek140/android_kernel_sony_msm8x60
arch/arm/mach-msm/pil-q6v5-mss.c
316
7102
/* * Copyright (c) 2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/init.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/ioport.h> #include <linux/elf.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/of.h> #include <linux/regulator/consumer.h> #include <mach/clk.h> #include "peripheral-loader.h" #include "pil-q6v5.h" /* Q6 Register Offsets */ #define QDSP6SS_RST_EVB 0x010 /* AXI Halting Registers */ #define MSS_Q6_HALT_BASE 0x180 #define MSS_MODEM_HALT_BASE 0x200 #define MSS_NC_HALT_BASE 0x280 /* RMB Status Register Values */ #define STATUS_PBL_SUCCESS 0x1 #define STATUS_XPU_UNLOCKED 0x1 #define STATUS_XPU_UNLOCKED_SCRIBBLED 0x2 /* PBL/MBA interface registers */ #define RMB_MBA_IMAGE 0x00 #define RMB_PBL_STATUS 0x04 #define RMB_MBA_STATUS 0x0C #define PBL_MBA_WAIT_TIMEOUT_US 100000 #define PROXY_TIMEOUT_MS 10000 #define POLL_INTERVAL_US 50 static int pil_mss_power_up(struct device *dev) { int ret; struct q6v5_data *drv = dev_get_drvdata(dev); ret = regulator_enable(drv->vreg); if (ret) dev_err(dev, "Failed to enable regulator.\n"); return ret; } static int pil_mss_power_down(struct device *dev) { struct q6v5_data *drv = dev_get_drvdata(dev); return regulator_disable(drv->vreg); } static int wait_for_mba_ready(struct device *dev) { struct q6v5_data *drv = dev_get_drvdata(dev); int ret; u32 status; /* Wait for PBL completion. */ ret = readl_poll_timeout(drv->rmb_base + RMB_PBL_STATUS, status, status != 0, POLL_INTERVAL_US, PBL_MBA_WAIT_TIMEOUT_US); if (ret) { dev_err(dev, "PBL boot timed out\n"); return ret; } if (status != STATUS_PBL_SUCCESS) { dev_err(dev, "PBL returned unexpected status %d\n", status); return -EINVAL; } /* Wait for MBA completion. */ ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status, status != 0, POLL_INTERVAL_US, PBL_MBA_WAIT_TIMEOUT_US); if (ret) { dev_err(dev, "MBA boot timed out\n"); return ret; } if (status != STATUS_XPU_UNLOCKED && status != STATUS_XPU_UNLOCKED_SCRIBBLED) { dev_err(dev, "MBA returned unexpected status %d\n", status); return -EINVAL; } return 0; } static int pil_mss_shutdown(struct pil_desc *pil) { struct q6v5_data *drv = dev_get_drvdata(pil->dev); pil_q6v5_halt_axi_port(pil, drv->axi_halt_base + MSS_Q6_HALT_BASE); pil_q6v5_halt_axi_port(pil, drv->axi_halt_base + MSS_MODEM_HALT_BASE); pil_q6v5_halt_axi_port(pil, drv->axi_halt_base + MSS_NC_HALT_BASE); /* * If the shutdown function is called before the reset function, clocks * and power will not be enabled yet. Enable them here so that register * writes performed during the shutdown succeed. */ if (drv->is_booted == false) { pil_mss_power_up(pil->dev); pil_q6v5_enable_clks(pil); } pil_q6v5_shutdown(pil); pil_q6v5_disable_clks(pil); pil_mss_power_down(pil->dev); writel_relaxed(1, drv->restart_reg); drv->is_booted = false; return 0; } static int pil_mss_reset(struct pil_desc *pil) { struct q6v5_data *drv = dev_get_drvdata(pil->dev); int ret; writel_relaxed(0, drv->restart_reg); mb(); /* * Bring subsystem out of reset and enable required * regulators and clocks. */ ret = pil_mss_power_up(pil->dev); if (ret) goto err_power; ret = pil_q6v5_enable_clks(pil); if (ret) goto err_clks; /* Program Image Address */ if (drv->self_auth) writel_relaxed(drv->start_addr, drv->rmb_base + RMB_MBA_IMAGE); else writel_relaxed((drv->start_addr >> 4) & 0x0FFFFFF0, drv->reg_base + QDSP6SS_RST_EVB); ret = pil_q6v5_reset(pil); if (ret) goto err_q6v5_reset; /* Wait for MBA to start. Check for PBL and MBA errors while waiting. */ if (drv->self_auth) { ret = wait_for_mba_ready(pil->dev); if (ret) goto err_auth; } drv->is_booted = true; return 0; err_auth: pil_q6v5_shutdown(pil); err_q6v5_reset: pil_q6v5_disable_clks(pil); err_clks: pil_mss_power_down(pil->dev); err_power: return ret; } static struct pil_reset_ops pil_mss_ops = { .init_image = pil_q6v5_init_image, .proxy_vote = pil_q6v5_make_proxy_votes, .proxy_unvote = pil_q6v5_remove_proxy_votes, .auth_and_reset = pil_mss_reset, .shutdown = pil_mss_shutdown, }; static int __devinit pil_mss_driver_probe(struct platform_device *pdev) { struct q6v5_data *drv; struct pil_desc *desc; struct resource *res; int ret; desc = pil_q6v5_init(pdev); if (IS_ERR(desc)) return PTR_ERR(desc); drv = platform_get_drvdata(pdev); if (drv == NULL) return -ENODEV; desc->ops = &pil_mss_ops; desc->owner = THIS_MODULE; desc->proxy_timeout = PROXY_TIMEOUT_MS; of_property_read_u32(pdev->dev.of_node, "qcom,pil-self-auth", &drv->self_auth); if (drv->self_auth) { res = platform_get_resource(pdev, IORESOURCE_MEM, 2); drv->rmb_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!drv->rmb_base) return -ENOMEM; } res = platform_get_resource(pdev, IORESOURCE_MEM, 3); drv->restart_reg = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!drv->restart_reg) return -ENOMEM; drv->vreg = devm_regulator_get(&pdev->dev, "vdd_mss"); if (IS_ERR(drv->vreg)) return PTR_ERR(drv->vreg); ret = regulator_set_voltage(drv->vreg, 1150000, 1150000); if (ret) dev_err(&pdev->dev, "Failed to set regulator's voltage.\n"); ret = regulator_set_optimum_mode(drv->vreg, 100000); if (ret < 0) { dev_err(&pdev->dev, "Failed to set regulator's mode.\n"); return ret; } drv->mem_clk = devm_clk_get(&pdev->dev, "mem_clk"); if (IS_ERR(drv->mem_clk)) return PTR_ERR(drv->mem_clk); drv->pil = msm_pil_register(desc); if (IS_ERR(drv->pil)) return PTR_ERR(drv->pil); return 0; } static int __devexit pil_mss_driver_exit(struct platform_device *pdev) { struct q6v5_data *drv = platform_get_drvdata(pdev); msm_pil_unregister(drv->pil); return 0; } static struct of_device_id mss_match_table[] = { { .compatible = "qcom,pil-q6v5-mss" }, {} }; static struct platform_driver pil_mss_driver = { .probe = pil_mss_driver_probe, .remove = __devexit_p(pil_mss_driver_exit), .driver = { .name = "pil-q6v5-mss", .of_match_table = mss_match_table, .owner = THIS_MODULE, }, }; static int __init pil_mss_init(void) { return platform_driver_register(&pil_mss_driver); } module_init(pil_mss_init); static void __exit pil_mss_exit(void) { platform_driver_unregister(&pil_mss_driver); } module_exit(pil_mss_exit); MODULE_DESCRIPTION("Support for booting modem subsystems with QDSP6v5 Hexagon processors"); MODULE_LICENSE("GPL v2");
gpl-2.0
pantoniou/linux-beagle-track-mainline
arch/x86/lib/cmdline.c
316
2500
/* * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2. * * Misc librarized functions for cmdline poking. */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/ctype.h> #include <asm/setup.h> static inline int myisspace(u8 c) { return c <= ' '; /* Close enough approximation */ } /** * Find a boolean option (like quiet,noapic,nosmp....) * * @cmdline: the cmdline string * @option: option string to look for * * Returns the position of that @option (starts counting with 1) * or 0 on not found. @option will only be found if it is found * as an entire word in @cmdline. For instance, if @option="car" * then a cmdline which contains "cart" will not match. */ static int __cmdline_find_option_bool(const char *cmdline, int max_cmdline_size, const char *option) { char c; int pos = 0, wstart = 0; const char *opptr = NULL; enum { st_wordstart = 0, /* Start of word/after whitespace */ st_wordcmp, /* Comparing this word */ st_wordskip, /* Miscompare, skip */ } state = st_wordstart; if (!cmdline) return -1; /* No command line */ /* * This 'pos' check ensures we do not overrun * a non-NULL-terminated 'cmdline' */ while (pos < max_cmdline_size) { c = *(char *)cmdline++; pos++; switch (state) { case st_wordstart: if (!c) return 0; else if (myisspace(c)) break; state = st_wordcmp; opptr = option; wstart = pos; /* fall through */ case st_wordcmp: if (!*opptr) { /* * We matched all the way to the end of the * option we were looking for. If the * command-line has a space _or_ ends, then * we matched! */ if (!c || myisspace(c)) return wstart; /* * We hit the end of the option, but _not_ * the end of a word on the cmdline. Not * a match. */ } else if (!c) { /* * Hit the NULL terminator on the end of * cmdline. */ return 0; } else if (c == *opptr++) { /* * We are currently matching, so continue * to the next character on the cmdline. */ break; } state = st_wordskip; /* fall through */ case st_wordskip: if (!c) return 0; else if (myisspace(c)) state = st_wordstart; break; } } return 0; /* Buffer overrun */ } int cmdline_find_option_bool(const char *cmdline, const char *option) { return __cmdline_find_option_bool(cmdline, COMMAND_LINE_SIZE, option); }
gpl-2.0
jabez1314/linux
drivers/video/fbdev/via/via-gpio.c
572
7295
/* * Support for viafb GPIO ports. * * Copyright 2009 Jonathan Corbet <corbet@lwn.net> * Distributable under version 2 of the GNU General Public License. */ #include <linux/spinlock.h> #include <linux/gpio/driver.h> #include <linux/platform_device.h> #include <linux/via-core.h> #include <linux/via-gpio.h> #include <linux/export.h> /* * The ports we know about. Note that the port-25 gpios are not * mentioned in the datasheet. */ struct viafb_gpio { char *vg_name; /* Data sheet name */ u16 vg_io_port; u8 vg_port_index; int vg_mask_shift; }; static struct viafb_gpio viafb_all_gpios[] = { { .vg_name = "VGPIO0", /* Guess - not in datasheet */ .vg_io_port = VIASR, .vg_port_index = 0x25, .vg_mask_shift = 1 }, { .vg_name = "VGPIO1", .vg_io_port = VIASR, .vg_port_index = 0x25, .vg_mask_shift = 0 }, { .vg_name = "VGPIO2", /* aka DISPCLKI0 */ .vg_io_port = VIASR, .vg_port_index = 0x2c, .vg_mask_shift = 1 }, { .vg_name = "VGPIO3", /* aka DISPCLKO0 */ .vg_io_port = VIASR, .vg_port_index = 0x2c, .vg_mask_shift = 0 }, { .vg_name = "VGPIO4", /* DISPCLKI1 */ .vg_io_port = VIASR, .vg_port_index = 0x3d, .vg_mask_shift = 1 }, { .vg_name = "VGPIO5", /* DISPCLKO1 */ .vg_io_port = VIASR, .vg_port_index = 0x3d, .vg_mask_shift = 0 }, }; #define VIAFB_NUM_GPIOS ARRAY_SIZE(viafb_all_gpios) /* * This structure controls the active GPIOs, which may be a subset * of those which are known. */ struct viafb_gpio_cfg { struct gpio_chip gpio_chip; struct viafb_dev *vdev; struct viafb_gpio *active_gpios[VIAFB_NUM_GPIOS]; const char *gpio_names[VIAFB_NUM_GPIOS]; }; /* * GPIO access functions */ static void via_gpio_set(struct gpio_chip *chip, unsigned int nr, int value) { struct viafb_gpio_cfg *cfg = gpiochip_get_data(chip); u8 reg; struct viafb_gpio *gpio; unsigned long flags; spin_lock_irqsave(&cfg->vdev->reg_lock, flags); gpio = cfg->active_gpios[nr]; reg = via_read_reg(VIASR, gpio->vg_port_index); reg |= 0x40 << gpio->vg_mask_shift; /* output enable */ if (value) reg |= 0x10 << gpio->vg_mask_shift; else reg &= ~(0x10 << gpio->vg_mask_shift); via_write_reg(VIASR, gpio->vg_port_index, reg); spin_unlock_irqrestore(&cfg->vdev->reg_lock, flags); } static int via_gpio_dir_out(struct gpio_chip *chip, unsigned int nr, int value) { via_gpio_set(chip, nr, value); return 0; } /* * Set the input direction. I'm not sure this is right; we should * be able to do input without disabling output. */ static int via_gpio_dir_input(struct gpio_chip *chip, unsigned int nr) { struct viafb_gpio_cfg *cfg = gpiochip_get_data(chip); struct viafb_gpio *gpio; unsigned long flags; spin_lock_irqsave(&cfg->vdev->reg_lock, flags); gpio = cfg->active_gpios[nr]; via_write_reg_mask(VIASR, gpio->vg_port_index, 0, 0x40 << gpio->vg_mask_shift); spin_unlock_irqrestore(&cfg->vdev->reg_lock, flags); return 0; } static int via_gpio_get(struct gpio_chip *chip, unsigned int nr) { struct viafb_gpio_cfg *cfg = gpiochip_get_data(chip); u8 reg; struct viafb_gpio *gpio; unsigned long flags; spin_lock_irqsave(&cfg->vdev->reg_lock, flags); gpio = cfg->active_gpios[nr]; reg = via_read_reg(VIASR, gpio->vg_port_index); spin_unlock_irqrestore(&cfg->vdev->reg_lock, flags); return !!(reg & (0x04 << gpio->vg_mask_shift)); } static struct viafb_gpio_cfg viafb_gpio_config = { .gpio_chip = { .label = "VIAFB onboard GPIO", .owner = THIS_MODULE, .direction_output = via_gpio_dir_out, .set = via_gpio_set, .direction_input = via_gpio_dir_input, .get = via_gpio_get, .base = -1, .ngpio = 0, .can_sleep = 0 } }; /* * Manage the software enable bit. */ static void viafb_gpio_enable(struct viafb_gpio *gpio) { via_write_reg_mask(VIASR, gpio->vg_port_index, 0x02, 0x02); } static void viafb_gpio_disable(struct viafb_gpio *gpio) { via_write_reg_mask(VIASR, gpio->vg_port_index, 0, 0x02); } #ifdef CONFIG_PM static int viafb_gpio_suspend(void *private) { return 0; } static int viafb_gpio_resume(void *private) { int i; for (i = 0; i < viafb_gpio_config.gpio_chip.ngpio; i += 2) viafb_gpio_enable(viafb_gpio_config.active_gpios[i]); return 0; } static struct viafb_pm_hooks viafb_gpio_pm_hooks = { .suspend = viafb_gpio_suspend, .resume = viafb_gpio_resume }; #endif /* CONFIG_PM */ /* * Look up a specific gpio and return the number it was assigned. */ int viafb_gpio_lookup(const char *name) { int i; for (i = 0; i < viafb_gpio_config.gpio_chip.ngpio; i++) if (!strcmp(name, viafb_gpio_config.active_gpios[i]->vg_name)) return viafb_gpio_config.gpio_chip.base + i; return -1; } EXPORT_SYMBOL_GPL(viafb_gpio_lookup); /* * Platform device stuff. */ static int viafb_gpio_probe(struct platform_device *platdev) { struct viafb_dev *vdev = platdev->dev.platform_data; struct via_port_cfg *port_cfg = vdev->port_cfg; int i, ngpio = 0, ret; struct viafb_gpio *gpio; unsigned long flags; /* * Set up entries for all GPIOs which have been configured to * operate as such (as opposed to as i2c ports). */ for (i = 0; i < VIAFB_NUM_PORTS; i++) { if (port_cfg[i].mode != VIA_MODE_GPIO) continue; for (gpio = viafb_all_gpios; gpio < viafb_all_gpios + VIAFB_NUM_GPIOS; gpio++) if (gpio->vg_port_index == port_cfg[i].ioport_index) { viafb_gpio_config.active_gpios[ngpio] = gpio; viafb_gpio_config.gpio_names[ngpio] = gpio->vg_name; ngpio++; } } viafb_gpio_config.gpio_chip.ngpio = ngpio; viafb_gpio_config.gpio_chip.names = viafb_gpio_config.gpio_names; viafb_gpio_config.vdev = vdev; if (ngpio == 0) { printk(KERN_INFO "viafb: no GPIOs configured\n"); return 0; } /* * Enable the ports. They come in pairs, with a single * enable bit for both. */ spin_lock_irqsave(&viafb_gpio_config.vdev->reg_lock, flags); for (i = 0; i < ngpio; i += 2) viafb_gpio_enable(viafb_gpio_config.active_gpios[i]); spin_unlock_irqrestore(&viafb_gpio_config.vdev->reg_lock, flags); /* * Get registered. */ viafb_gpio_config.gpio_chip.base = -1; /* Dynamic */ ret = gpiochip_add_data(&viafb_gpio_config.gpio_chip, &viafb_gpio_config); if (ret) { printk(KERN_ERR "viafb: failed to add gpios (%d)\n", ret); viafb_gpio_config.gpio_chip.ngpio = 0; } #ifdef CONFIG_PM viafb_pm_register(&viafb_gpio_pm_hooks); #endif return ret; } static int viafb_gpio_remove(struct platform_device *platdev) { unsigned long flags; int i; #ifdef CONFIG_PM viafb_pm_unregister(&viafb_gpio_pm_hooks); #endif /* * Get unregistered. */ if (viafb_gpio_config.gpio_chip.ngpio > 0) { gpiochip_remove(&viafb_gpio_config.gpio_chip); } /* * Disable the ports. */ spin_lock_irqsave(&viafb_gpio_config.vdev->reg_lock, flags); for (i = 0; i < viafb_gpio_config.gpio_chip.ngpio; i += 2) viafb_gpio_disable(viafb_gpio_config.active_gpios[i]); viafb_gpio_config.gpio_chip.ngpio = 0; spin_unlock_irqrestore(&viafb_gpio_config.vdev->reg_lock, flags); return 0; } static struct platform_driver via_gpio_driver = { .driver = { .name = "viafb-gpio", }, .probe = viafb_gpio_probe, .remove = viafb_gpio_remove, }; int viafb_gpio_init(void) { return platform_driver_register(&via_gpio_driver); } void viafb_gpio_exit(void) { platform_driver_unregister(&via_gpio_driver); }
gpl-2.0
daedae1112/kernel
arch/avr32/mm/fault.c
1340
5975
/* * Copyright (C) 2004-2006 Atmel Corporation * * Based on linux/arch/sh/mm/fault.c: * Copyright (C) 1999 Niibe Yutaka * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/mm.h> #include <linux/module.h> #include <linux/pagemap.h> #include <linux/kdebug.h> #include <linux/kprobes.h> #include <asm/mmu_context.h> #include <asm/sysreg.h> #include <asm/tlb.h> #include <asm/uaccess.h> #ifdef CONFIG_KPROBES static inline int notify_page_fault(struct pt_regs *regs, int trap) { int ret = 0; if (!user_mode(regs)) { if (kprobe_running() && kprobe_fault_handler(regs, trap)) ret = 1; } return ret; } #else static inline int notify_page_fault(struct pt_regs *regs, int trap) { return 0; } #endif int exception_trace = 1; /* * This routine handles page faults. It determines the address and the * problem, and then passes it off to one of the appropriate routines. * * ecr is the Exception Cause Register. Possible values are: * 6: Protection fault (instruction access) * 15: Protection fault (read access) * 16: Protection fault (write access) * 20: Page not found (instruction access) * 24: Page not found (read access) * 28: Page not found (write access) */ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) { struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct *vma; const struct exception_table_entry *fixup; unsigned long address; unsigned long page; int writeaccess; long signr; int code; int fault; if (notify_page_fault(regs, ecr)) return; address = sysreg_read(TLBEAR); tsk = current; mm = tsk->mm; signr = SIGSEGV; code = SEGV_MAPERR; /* * If we're in an interrupt or have no user context, we must * not take the fault... */ if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM)) goto no_context; local_irq_enable(); down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) goto bad_area; if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if (expand_stack(vma, address)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so we * can handle it... */ good_area: code = SEGV_ACCERR; writeaccess = 0; switch (ecr) { case ECR_PROTECTION_X: case ECR_TLB_MISS_X: if (!(vma->vm_flags & VM_EXEC)) goto bad_area; break; case ECR_PROTECTION_R: case ECR_TLB_MISS_R: if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) goto bad_area; break; case ECR_PROTECTION_W: case ECR_TLB_MISS_W: if (!(vma->vm_flags & VM_WRITE)) goto bad_area; writeaccess = 1; break; default: panic("Unhandled case %lu in do_page_fault!", ecr); } /* * If for any reason at all we couldn't handle the fault, make * sure we exit gracefully rather than endlessly redo the * fault. */ survive: fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); } if (fault & VM_FAULT_MAJOR) tsk->maj_flt++; else tsk->min_flt++; up_read(&mm->mmap_sem); return; /* * Something tried to access memory that isn't in our memory * map. Fix it, but check if it's kernel or user first... */ bad_area: up_read(&mm->mmap_sem); if (user_mode(regs)) { if (exception_trace && printk_ratelimit()) printk("%s%s[%d]: segfault at %08lx pc %08lx " "sp %08lx ecr %lu\n", is_global_init(tsk) ? KERN_EMERG : KERN_INFO, tsk->comm, tsk->pid, address, regs->pc, regs->sp, ecr); _exception(SIGSEGV, regs, code, address); return; } no_context: /* Are we prepared to handle this kernel fault? */ fixup = search_exception_tables(regs->pc); if (fixup) { regs->pc = fixup->fixup; return; } /* * Oops. The kernel tried to access some bad page. We'll have * to terminate things with extreme prejudice. */ if (address < PAGE_SIZE) printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); else printk(KERN_ALERT "Unable to handle kernel paging request"); printk(" at virtual address %08lx\n", address); page = sysreg_read(PTBR); printk(KERN_ALERT "ptbr = %08lx", page); if (address >= TASK_SIZE) page = (unsigned long)swapper_pg_dir; if (page) { page = ((unsigned long *)page)[address >> 22]; printk(" pgd = %08lx", page); if (page & _PAGE_PRESENT) { page &= PAGE_MASK; address &= 0x003ff000; page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT]; printk(" pte = %08lx", page); } } printk("\n"); die("Kernel access of bad area", regs, signr); return; /* * We ran out of memory, or some other thing happened to us * that made us unable to handle the page fault gracefully. */ out_of_memory: up_read(&mm->mmap_sem); if (is_global_init(current)) { yield(); down_read(&mm->mmap_sem); goto survive; } printk("VM: Killing process %s\n", tsk->comm); if (user_mode(regs)) do_group_exit(SIGKILL); goto no_context; do_sigbus: up_read(&mm->mmap_sem); /* Kernel mode? Handle exceptions or die */ signr = SIGBUS; code = BUS_ADRERR; if (!user_mode(regs)) goto no_context; if (exception_trace) printk("%s%s[%d]: bus error at %08lx pc %08lx " "sp %08lx ecr %lu\n", is_global_init(tsk) ? KERN_EMERG : KERN_INFO, tsk->comm, tsk->pid, address, regs->pc, regs->sp, ecr); _exception(SIGBUS, regs, BUS_ADRERR, address); } asmlinkage void do_bus_error(unsigned long addr, int write_access, struct pt_regs *regs) { printk(KERN_ALERT "Bus error at physical address 0x%08lx (%s access)\n", addr, write_access ? "write" : "read"); printk(KERN_INFO "DTLB dump:\n"); dump_dtlb(); die("Bus Error", regs, SIGKILL); }
gpl-2.0
bhuman/KernelV4
arch/arm/mm/discontig.c
1596
1271
/* * linux/arch/arm/mm/discontig.c * * Discontiguous memory support. * * Initial code: Copyright (C) 1999-2000 Nicolas Pitre * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/mmzone.h> #include <linux/bootmem.h> #if MAX_NUMNODES != 4 && MAX_NUMNODES != 16 # error Fix Me Please #endif /* * Our node_data structure for discontiguous memory. */ pg_data_t discontig_node_data[MAX_NUMNODES] = { { .bdata = &bootmem_node_data[0] }, { .bdata = &bootmem_node_data[1] }, { .bdata = &bootmem_node_data[2] }, { .bdata = &bootmem_node_data[3] }, #if MAX_NUMNODES == 16 { .bdata = &bootmem_node_data[4] }, { .bdata = &bootmem_node_data[5] }, { .bdata = &bootmem_node_data[6] }, { .bdata = &bootmem_node_data[7] }, { .bdata = &bootmem_node_data[8] }, { .bdata = &bootmem_node_data[9] }, { .bdata = &bootmem_node_data[10] }, { .bdata = &bootmem_node_data[11] }, { .bdata = &bootmem_node_data[12] }, { .bdata = &bootmem_node_data[13] }, { .bdata = &bootmem_node_data[14] }, { .bdata = &bootmem_node_data[15] }, #endif }; EXPORT_SYMBOL(discontig_node_data);
gpl-2.0
1nv4d3r5/linux
drivers/spi/spi-bfin5xx.c
2108
41407
/* * Blackfin On-Chip SPI Driver * * Copyright 2004-2010 Analog Devices Inc. * * Enter bugs at http://blackfin.uclinux.org/ * * Licensed under the GPL-2 or later. */ #include <linux/init.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/irq.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/spi/spi.h> #include <linux/workqueue.h> #include <asm/dma.h> #include <asm/portmux.h> #include <asm/bfin5xx_spi.h> #include <asm/cacheflush.h> #define DRV_NAME "bfin-spi" #define DRV_AUTHOR "Bryan Wu, Luke Yang" #define DRV_DESC "Blackfin on-chip SPI Controller Driver" #define DRV_VERSION "1.0" MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION(DRV_DESC); MODULE_LICENSE("GPL"); #define START_STATE ((void *)0) #define RUNNING_STATE ((void *)1) #define DONE_STATE ((void *)2) #define ERROR_STATE ((void *)-1) struct bfin_spi_master_data; struct bfin_spi_transfer_ops { void (*write) (struct bfin_spi_master_data *); void (*read) (struct bfin_spi_master_data *); void (*duplex) (struct bfin_spi_master_data *); }; struct bfin_spi_master_data { /* Driver model hookup */ struct platform_device *pdev; /* SPI framework hookup */ struct spi_master *master; /* Regs base of SPI controller */ struct bfin_spi_regs __iomem *regs; /* Pin request list */ u16 *pin_req; /* BFIN hookup */ struct bfin5xx_spi_master *master_info; /* Driver message queue */ struct workqueue_struct *workqueue; struct work_struct pump_messages; spinlock_t lock; struct list_head queue; int busy; bool running; /* Message Transfer pump */ struct tasklet_struct pump_transfers; /* Current message transfer state info */ struct spi_message *cur_msg; struct spi_transfer *cur_transfer; struct bfin_spi_slave_data *cur_chip; size_t len_in_bytes; size_t len; void *tx; void *tx_end; void *rx; void *rx_end; /* DMA stuffs */ int dma_channel; int dma_mapped; int dma_requested; dma_addr_t rx_dma; dma_addr_t tx_dma; int irq_requested; int spi_irq; size_t rx_map_len; size_t tx_map_len; u8 n_bytes; u16 ctrl_reg; u16 flag_reg; int cs_change; const struct bfin_spi_transfer_ops *ops; }; struct bfin_spi_slave_data { u16 ctl_reg; u16 baud; u16 flag; u8 chip_select_num; u8 enable_dma; u16 cs_chg_udelay; /* Some devices require > 255usec delay */ u32 cs_gpio; u16 idle_tx_val; u8 pio_interrupt; /* use spi data irq */ const struct bfin_spi_transfer_ops *ops; }; static void bfin_spi_enable(struct bfin_spi_master_data *drv_data) { bfin_write_or(&drv_data->regs->ctl, BIT_CTL_ENABLE); } static void bfin_spi_disable(struct bfin_spi_master_data *drv_data) { bfin_write_and(&drv_data->regs->ctl, ~BIT_CTL_ENABLE); } /* Caculate the SPI_BAUD register value based on input HZ */ static u16 hz_to_spi_baud(u32 speed_hz) { u_long sclk = get_sclk(); u16 spi_baud = (sclk / (2 * speed_hz)); if ((sclk % (2 * speed_hz)) > 0) spi_baud++; if (spi_baud < MIN_SPI_BAUD_VAL) spi_baud = MIN_SPI_BAUD_VAL; return spi_baud; } static int bfin_spi_flush(struct bfin_spi_master_data *drv_data) { unsigned long limit = loops_per_jiffy << 1; /* wait for stop and clear stat */ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_SPIF) && --limit) cpu_relax(); bfin_write(&drv_data->regs->stat, BIT_STAT_CLR); return limit; } /* Chip select operation functions for cs_change flag */ static void bfin_spi_cs_active(struct bfin_spi_master_data *drv_data, struct bfin_spi_slave_data *chip) { if (likely(chip->chip_select_num < MAX_CTRL_CS)) bfin_write_and(&drv_data->regs->flg, ~chip->flag); else gpio_set_value(chip->cs_gpio, 0); } static void bfin_spi_cs_deactive(struct bfin_spi_master_data *drv_data, struct bfin_spi_slave_data *chip) { if (likely(chip->chip_select_num < MAX_CTRL_CS)) bfin_write_or(&drv_data->regs->flg, chip->flag); else gpio_set_value(chip->cs_gpio, 1); /* Move delay here for consistency */ if (chip->cs_chg_udelay) udelay(chip->cs_chg_udelay); } /* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */ static inline void bfin_spi_cs_enable(struct bfin_spi_master_data *drv_data, struct bfin_spi_slave_data *chip) { if (chip->chip_select_num < MAX_CTRL_CS) bfin_write_or(&drv_data->regs->flg, chip->flag >> 8); } static inline void bfin_spi_cs_disable(struct bfin_spi_master_data *drv_data, struct bfin_spi_slave_data *chip) { if (chip->chip_select_num < MAX_CTRL_CS) bfin_write_and(&drv_data->regs->flg, ~(chip->flag >> 8)); } /* stop controller and re-config current chip*/ static void bfin_spi_restore_state(struct bfin_spi_master_data *drv_data) { struct bfin_spi_slave_data *chip = drv_data->cur_chip; /* Clear status and disable clock */ bfin_write(&drv_data->regs->stat, BIT_STAT_CLR); bfin_spi_disable(drv_data); dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n"); SSYNC(); /* Load the registers */ bfin_write(&drv_data->regs->ctl, chip->ctl_reg); bfin_write(&drv_data->regs->baud, chip->baud); bfin_spi_enable(drv_data); bfin_spi_cs_active(drv_data, chip); } /* used to kick off transfer in rx mode and read unwanted RX data */ static inline void bfin_spi_dummy_read(struct bfin_spi_master_data *drv_data) { (void) bfin_read(&drv_data->regs->rdbr); } static void bfin_spi_u8_writer(struct bfin_spi_master_data *drv_data) { /* clear RXS (we check for RXS inside the loop) */ bfin_spi_dummy_read(drv_data); while (drv_data->tx < drv_data->tx_end) { bfin_write(&drv_data->regs->tdbr, (*(u8 *) (drv_data->tx++))); /* wait until transfer finished. checking SPIF or TXS may not guarantee transfer completion */ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS)) cpu_relax(); /* discard RX data and clear RXS */ bfin_spi_dummy_read(drv_data); } } static void bfin_spi_u8_reader(struct bfin_spi_master_data *drv_data) { u16 tx_val = drv_data->cur_chip->idle_tx_val; /* discard old RX data and clear RXS */ bfin_spi_dummy_read(drv_data); while (drv_data->rx < drv_data->rx_end) { bfin_write(&drv_data->regs->tdbr, tx_val); while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS)) cpu_relax(); *(u8 *) (drv_data->rx++) = bfin_read(&drv_data->regs->rdbr); } } static void bfin_spi_u8_duplex(struct bfin_spi_master_data *drv_data) { /* discard old RX data and clear RXS */ bfin_spi_dummy_read(drv_data); while (drv_data->rx < drv_data->rx_end) { bfin_write(&drv_data->regs->tdbr, (*(u8 *) (drv_data->tx++))); while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS)) cpu_relax(); *(u8 *) (drv_data->rx++) = bfin_read(&drv_data->regs->rdbr); } } static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u8 = { .write = bfin_spi_u8_writer, .read = bfin_spi_u8_reader, .duplex = bfin_spi_u8_duplex, }; static void bfin_spi_u16_writer(struct bfin_spi_master_data *drv_data) { /* clear RXS (we check for RXS inside the loop) */ bfin_spi_dummy_read(drv_data); while (drv_data->tx < drv_data->tx_end) { bfin_write(&drv_data->regs->tdbr, (*(u16 *) (drv_data->tx))); drv_data->tx += 2; /* wait until transfer finished. checking SPIF or TXS may not guarantee transfer completion */ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS)) cpu_relax(); /* discard RX data and clear RXS */ bfin_spi_dummy_read(drv_data); } } static void bfin_spi_u16_reader(struct bfin_spi_master_data *drv_data) { u16 tx_val = drv_data->cur_chip->idle_tx_val; /* discard old RX data and clear RXS */ bfin_spi_dummy_read(drv_data); while (drv_data->rx < drv_data->rx_end) { bfin_write(&drv_data->regs->tdbr, tx_val); while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS)) cpu_relax(); *(u16 *) (drv_data->rx) = bfin_read(&drv_data->regs->rdbr); drv_data->rx += 2; } } static void bfin_spi_u16_duplex(struct bfin_spi_master_data *drv_data) { /* discard old RX data and clear RXS */ bfin_spi_dummy_read(drv_data); while (drv_data->rx < drv_data->rx_end) { bfin_write(&drv_data->regs->tdbr, (*(u16 *) (drv_data->tx))); drv_data->tx += 2; while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS)) cpu_relax(); *(u16 *) (drv_data->rx) = bfin_read(&drv_data->regs->rdbr); drv_data->rx += 2; } } static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u16 = { .write = bfin_spi_u16_writer, .read = bfin_spi_u16_reader, .duplex = bfin_spi_u16_duplex, }; /* test if there is more transfer to be done */ static void *bfin_spi_next_transfer(struct bfin_spi_master_data *drv_data) { struct spi_message *msg = drv_data->cur_msg; struct spi_transfer *trans = drv_data->cur_transfer; /* Move to next transfer */ if (trans->transfer_list.next != &msg->transfers) { drv_data->cur_transfer = list_entry(trans->transfer_list.next, struct spi_transfer, transfer_list); return RUNNING_STATE; } else return DONE_STATE; } /* * caller already set message->status; * dma and pio irqs are blocked give finished message back */ static void bfin_spi_giveback(struct bfin_spi_master_data *drv_data) { struct bfin_spi_slave_data *chip = drv_data->cur_chip; struct spi_transfer *last_transfer; unsigned long flags; struct spi_message *msg; spin_lock_irqsave(&drv_data->lock, flags); msg = drv_data->cur_msg; drv_data->cur_msg = NULL; drv_data->cur_transfer = NULL; drv_data->cur_chip = NULL; queue_work(drv_data->workqueue, &drv_data->pump_messages); spin_unlock_irqrestore(&drv_data->lock, flags); last_transfer = list_entry(msg->transfers.prev, struct spi_transfer, transfer_list); msg->state = NULL; if (!drv_data->cs_change) bfin_spi_cs_deactive(drv_data, chip); /* Not stop spi in autobuffer mode */ if (drv_data->tx_dma != 0xFFFF) bfin_spi_disable(drv_data); if (msg->complete) msg->complete(msg->context); } /* spi data irq handler */ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id) { struct bfin_spi_master_data *drv_data = dev_id; struct bfin_spi_slave_data *chip = drv_data->cur_chip; struct spi_message *msg = drv_data->cur_msg; int n_bytes = drv_data->n_bytes; int loop = 0; /* wait until transfer finished. */ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS)) cpu_relax(); if ((drv_data->tx && drv_data->tx >= drv_data->tx_end) || (drv_data->rx && drv_data->rx >= (drv_data->rx_end - n_bytes))) { /* last read */ if (drv_data->rx) { dev_dbg(&drv_data->pdev->dev, "last read\n"); if (!(n_bytes % 2)) { u16 *buf = (u16 *)drv_data->rx; for (loop = 0; loop < n_bytes / 2; loop++) *buf++ = bfin_read(&drv_data->regs->rdbr); } else { u8 *buf = (u8 *)drv_data->rx; for (loop = 0; loop < n_bytes; loop++) *buf++ = bfin_read(&drv_data->regs->rdbr); } drv_data->rx += n_bytes; } msg->actual_length += drv_data->len_in_bytes; if (drv_data->cs_change) bfin_spi_cs_deactive(drv_data, chip); /* Move to next transfer */ msg->state = bfin_spi_next_transfer(drv_data); disable_irq_nosync(drv_data->spi_irq); /* Schedule transfer tasklet */ tasklet_schedule(&drv_data->pump_transfers); return IRQ_HANDLED; } if (drv_data->rx && drv_data->tx) { /* duplex */ dev_dbg(&drv_data->pdev->dev, "duplex: write_TDBR\n"); if (!(n_bytes % 2)) { u16 *buf = (u16 *)drv_data->rx; u16 *buf2 = (u16 *)drv_data->tx; for (loop = 0; loop < n_bytes / 2; loop++) { *buf++ = bfin_read(&drv_data->regs->rdbr); bfin_write(&drv_data->regs->tdbr, *buf2++); } } else { u8 *buf = (u8 *)drv_data->rx; u8 *buf2 = (u8 *)drv_data->tx; for (loop = 0; loop < n_bytes; loop++) { *buf++ = bfin_read(&drv_data->regs->rdbr); bfin_write(&drv_data->regs->tdbr, *buf2++); } } } else if (drv_data->rx) { /* read */ dev_dbg(&drv_data->pdev->dev, "read: write_TDBR\n"); if (!(n_bytes % 2)) { u16 *buf = (u16 *)drv_data->rx; for (loop = 0; loop < n_bytes / 2; loop++) { *buf++ = bfin_read(&drv_data->regs->rdbr); bfin_write(&drv_data->regs->tdbr, chip->idle_tx_val); } } else { u8 *buf = (u8 *)drv_data->rx; for (loop = 0; loop < n_bytes; loop++) { *buf++ = bfin_read(&drv_data->regs->rdbr); bfin_write(&drv_data->regs->tdbr, chip->idle_tx_val); } } } else if (drv_data->tx) { /* write */ dev_dbg(&drv_data->pdev->dev, "write: write_TDBR\n"); if (!(n_bytes % 2)) { u16 *buf = (u16 *)drv_data->tx; for (loop = 0; loop < n_bytes / 2; loop++) { bfin_read(&drv_data->regs->rdbr); bfin_write(&drv_data->regs->tdbr, *buf++); } } else { u8 *buf = (u8 *)drv_data->tx; for (loop = 0; loop < n_bytes; loop++) { bfin_read(&drv_data->regs->rdbr); bfin_write(&drv_data->regs->tdbr, *buf++); } } } if (drv_data->tx) drv_data->tx += n_bytes; if (drv_data->rx) drv_data->rx += n_bytes; return IRQ_HANDLED; } static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) { struct bfin_spi_master_data *drv_data = dev_id; struct bfin_spi_slave_data *chip = drv_data->cur_chip; struct spi_message *msg = drv_data->cur_msg; unsigned long timeout; unsigned short dmastat = get_dma_curr_irqstat(drv_data->dma_channel); u16 spistat = bfin_read(&drv_data->regs->stat); dev_dbg(&drv_data->pdev->dev, "in dma_irq_handler dmastat:0x%x spistat:0x%x\n", dmastat, spistat); if (drv_data->rx != NULL) { u16 cr = bfin_read(&drv_data->regs->ctl); /* discard old RX data and clear RXS */ bfin_spi_dummy_read(drv_data); bfin_write(&drv_data->regs->ctl, cr & ~BIT_CTL_ENABLE); /* Disable SPI */ bfin_write(&drv_data->regs->ctl, cr & ~BIT_CTL_TIMOD); /* Restore State */ bfin_write(&drv_data->regs->stat, BIT_STAT_CLR); /* Clear Status */ } clear_dma_irqstat(drv_data->dma_channel); /* * wait for the last transaction shifted out. HRM states: * at this point there may still be data in the SPI DMA FIFO waiting * to be transmitted ... software needs to poll TXS in the SPI_STAT * register until it goes low for 2 successive reads */ if (drv_data->tx != NULL) { while ((bfin_read(&drv_data->regs->stat) & BIT_STAT_TXS) || (bfin_read(&drv_data->regs->stat) & BIT_STAT_TXS)) cpu_relax(); } dev_dbg(&drv_data->pdev->dev, "in dma_irq_handler dmastat:0x%x spistat:0x%x\n", dmastat, bfin_read(&drv_data->regs->stat)); timeout = jiffies + HZ; while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_SPIF)) if (!time_before(jiffies, timeout)) { dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF"); break; } else cpu_relax(); if ((dmastat & DMA_ERR) && (spistat & BIT_STAT_RBSY)) { msg->state = ERROR_STATE; dev_err(&drv_data->pdev->dev, "dma receive: fifo/buffer overflow\n"); } else { msg->actual_length += drv_data->len_in_bytes; if (drv_data->cs_change) bfin_spi_cs_deactive(drv_data, chip); /* Move to next transfer */ msg->state = bfin_spi_next_transfer(drv_data); } /* Schedule transfer tasklet */ tasklet_schedule(&drv_data->pump_transfers); /* free the irq handler before next transfer */ dev_dbg(&drv_data->pdev->dev, "disable dma channel irq%d\n", drv_data->dma_channel); dma_disable_irq_nosync(drv_data->dma_channel); return IRQ_HANDLED; } static void bfin_spi_pump_transfers(unsigned long data) { struct bfin_spi_master_data *drv_data = (struct bfin_spi_master_data *)data; struct spi_message *message = NULL; struct spi_transfer *transfer = NULL; struct spi_transfer *previous = NULL; struct bfin_spi_slave_data *chip = NULL; unsigned int bits_per_word; u16 cr, cr_width, dma_width, dma_config; u32 tranf_success = 1; u8 full_duplex = 0; /* Get current state information */ message = drv_data->cur_msg; transfer = drv_data->cur_transfer; chip = drv_data->cur_chip; /* * if msg is error or done, report it back using complete() callback */ /* Handle for abort */ if (message->state == ERROR_STATE) { dev_dbg(&drv_data->pdev->dev, "transfer: we've hit an error\n"); message->status = -EIO; bfin_spi_giveback(drv_data); return; } /* Handle end of message */ if (message->state == DONE_STATE) { dev_dbg(&drv_data->pdev->dev, "transfer: all done!\n"); message->status = 0; bfin_spi_flush(drv_data); bfin_spi_giveback(drv_data); return; } /* Delay if requested at end of transfer */ if (message->state == RUNNING_STATE) { dev_dbg(&drv_data->pdev->dev, "transfer: still running ...\n"); previous = list_entry(transfer->transfer_list.prev, struct spi_transfer, transfer_list); if (previous->delay_usecs) udelay(previous->delay_usecs); } /* Flush any existing transfers that may be sitting in the hardware */ if (bfin_spi_flush(drv_data) == 0) { dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); message->status = -EIO; bfin_spi_giveback(drv_data); return; } if (transfer->len == 0) { /* Move to next transfer of this msg */ message->state = bfin_spi_next_transfer(drv_data); /* Schedule next transfer tasklet */ tasklet_schedule(&drv_data->pump_transfers); return; } if (transfer->tx_buf != NULL) { drv_data->tx = (void *)transfer->tx_buf; drv_data->tx_end = drv_data->tx + transfer->len; dev_dbg(&drv_data->pdev->dev, "tx_buf is %p, tx_end is %p\n", transfer->tx_buf, drv_data->tx_end); } else { drv_data->tx = NULL; } if (transfer->rx_buf != NULL) { full_duplex = transfer->tx_buf != NULL; drv_data->rx = transfer->rx_buf; drv_data->rx_end = drv_data->rx + transfer->len; dev_dbg(&drv_data->pdev->dev, "rx_buf is %p, rx_end is %p\n", transfer->rx_buf, drv_data->rx_end); } else { drv_data->rx = NULL; } drv_data->rx_dma = transfer->rx_dma; drv_data->tx_dma = transfer->tx_dma; drv_data->len_in_bytes = transfer->len; drv_data->cs_change = transfer->cs_change; /* Bits per word setup */ bits_per_word = transfer->bits_per_word; if (bits_per_word % 16 == 0) { drv_data->n_bytes = bits_per_word/8; drv_data->len = (transfer->len) >> 1; cr_width = BIT_CTL_WORDSIZE; drv_data->ops = &bfin_bfin_spi_transfer_ops_u16; } else if (bits_per_word % 8 == 0) { drv_data->n_bytes = bits_per_word/8; drv_data->len = transfer->len; cr_width = 0; drv_data->ops = &bfin_bfin_spi_transfer_ops_u8; } else { dev_err(&drv_data->pdev->dev, "transfer: unsupported bits_per_word\n"); message->status = -EINVAL; bfin_spi_giveback(drv_data); return; } cr = bfin_read(&drv_data->regs->ctl) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE); cr |= cr_width; bfin_write(&drv_data->regs->ctl, cr); dev_dbg(&drv_data->pdev->dev, "transfer: drv_data->ops is %p, chip->ops is %p, u8_ops is %p\n", drv_data->ops, chip->ops, &bfin_bfin_spi_transfer_ops_u8); message->state = RUNNING_STATE; dma_config = 0; /* Speed setup (surely valid because already checked) */ if (transfer->speed_hz) bfin_write(&drv_data->regs->baud, hz_to_spi_baud(transfer->speed_hz)); else bfin_write(&drv_data->regs->baud, chip->baud); bfin_write(&drv_data->regs->stat, BIT_STAT_CLR); bfin_spi_cs_active(drv_data, chip); dev_dbg(&drv_data->pdev->dev, "now pumping a transfer: width is %d, len is %d\n", cr_width, transfer->len); /* * Try to map dma buffer and do a dma transfer. If successful use, * different way to r/w according to the enable_dma settings and if * we are not doing a full duplex transfer (since the hardware does * not support full duplex DMA transfers). */ if (!full_duplex && drv_data->cur_chip->enable_dma && drv_data->len > 6) { unsigned long dma_start_addr, flags; disable_dma(drv_data->dma_channel); clear_dma_irqstat(drv_data->dma_channel); /* config dma channel */ dev_dbg(&drv_data->pdev->dev, "doing dma transfer\n"); set_dma_x_count(drv_data->dma_channel, drv_data->len); if (cr_width == BIT_CTL_WORDSIZE) { set_dma_x_modify(drv_data->dma_channel, 2); dma_width = WDSIZE_16; } else { set_dma_x_modify(drv_data->dma_channel, 1); dma_width = WDSIZE_8; } /* poll for SPI completion before start */ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_SPIF)) cpu_relax(); /* dirty hack for autobuffer DMA mode */ if (drv_data->tx_dma == 0xFFFF) { dev_dbg(&drv_data->pdev->dev, "doing autobuffer DMA out.\n"); /* no irq in autobuffer mode */ dma_config = (DMAFLOW_AUTO | RESTART | dma_width | DI_EN); set_dma_config(drv_data->dma_channel, dma_config); set_dma_start_addr(drv_data->dma_channel, (unsigned long)drv_data->tx); enable_dma(drv_data->dma_channel); /* start SPI transfer */ bfin_write(&drv_data->regs->ctl, cr | BIT_CTL_TIMOD_DMA_TX); /* just return here, there can only be one transfer * in this mode */ message->status = 0; bfin_spi_giveback(drv_data); return; } /* In dma mode, rx or tx must be NULL in one transfer */ dma_config = (RESTART | dma_width | DI_EN); if (drv_data->rx != NULL) { /* set transfer mode, and enable SPI */ dev_dbg(&drv_data->pdev->dev, "doing DMA in to %p (size %zx)\n", drv_data->rx, drv_data->len_in_bytes); /* invalidate caches, if needed */ if (bfin_addr_dcacheable((unsigned long) drv_data->rx)) invalidate_dcache_range((unsigned long) drv_data->rx, (unsigned long) (drv_data->rx + drv_data->len_in_bytes)); dma_config |= WNR; dma_start_addr = (unsigned long)drv_data->rx; cr |= BIT_CTL_TIMOD_DMA_RX | BIT_CTL_SENDOPT; } else if (drv_data->tx != NULL) { dev_dbg(&drv_data->pdev->dev, "doing DMA out.\n"); /* flush caches, if needed */ if (bfin_addr_dcacheable((unsigned long) drv_data->tx)) flush_dcache_range((unsigned long) drv_data->tx, (unsigned long) (drv_data->tx + drv_data->len_in_bytes)); dma_start_addr = (unsigned long)drv_data->tx; cr |= BIT_CTL_TIMOD_DMA_TX; } else BUG(); /* oh man, here there be monsters ... and i dont mean the * fluffy cute ones from pixar, i mean the kind that'll eat * your data, kick your dog, and love it all. do *not* try * and change these lines unless you (1) heavily test DMA * with SPI flashes on a loaded system (e.g. ping floods), * (2) know just how broken the DMA engine interaction with * the SPI peripheral is, and (3) have someone else to blame * when you screw it all up anyways. */ set_dma_start_addr(drv_data->dma_channel, dma_start_addr); set_dma_config(drv_data->dma_channel, dma_config); local_irq_save(flags); SSYNC(); bfin_write(&drv_data->regs->ctl, cr); enable_dma(drv_data->dma_channel); dma_enable_irq(drv_data->dma_channel); local_irq_restore(flags); return; } /* * We always use SPI_WRITE mode (transfer starts with TDBR write). * SPI_READ mode (transfer starts with RDBR read) seems to have * problems with setting up the output value in TDBR prior to the * start of the transfer. */ bfin_write(&drv_data->regs->ctl, cr | BIT_CTL_TXMOD); if (chip->pio_interrupt) { /* SPI irq should have been disabled by now */ /* discard old RX data and clear RXS */ bfin_spi_dummy_read(drv_data); /* start transfer */ if (drv_data->tx == NULL) bfin_write(&drv_data->regs->tdbr, chip->idle_tx_val); else { int loop; if (bits_per_word % 16 == 0) { u16 *buf = (u16 *)drv_data->tx; for (loop = 0; loop < bits_per_word / 16; loop++) { bfin_write(&drv_data->regs->tdbr, *buf++); } } else if (bits_per_word % 8 == 0) { u8 *buf = (u8 *)drv_data->tx; for (loop = 0; loop < bits_per_word / 8; loop++) bfin_write(&drv_data->regs->tdbr, *buf++); } drv_data->tx += drv_data->n_bytes; } /* once TDBR is empty, interrupt is triggered */ enable_irq(drv_data->spi_irq); return; } /* IO mode */ dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n"); if (full_duplex) { /* full duplex mode */ BUG_ON((drv_data->tx_end - drv_data->tx) != (drv_data->rx_end - drv_data->rx)); dev_dbg(&drv_data->pdev->dev, "IO duplex: cr is 0x%x\n", cr); drv_data->ops->duplex(drv_data); if (drv_data->tx != drv_data->tx_end) tranf_success = 0; } else if (drv_data->tx != NULL) { /* write only half duplex */ dev_dbg(&drv_data->pdev->dev, "IO write: cr is 0x%x\n", cr); drv_data->ops->write(drv_data); if (drv_data->tx != drv_data->tx_end) tranf_success = 0; } else if (drv_data->rx != NULL) { /* read only half duplex */ dev_dbg(&drv_data->pdev->dev, "IO read: cr is 0x%x\n", cr); drv_data->ops->read(drv_data); if (drv_data->rx != drv_data->rx_end) tranf_success = 0; } if (!tranf_success) { dev_dbg(&drv_data->pdev->dev, "IO write error!\n"); message->state = ERROR_STATE; } else { /* Update total byte transferred */ message->actual_length += drv_data->len_in_bytes; /* Move to next transfer of this msg */ message->state = bfin_spi_next_transfer(drv_data); if (drv_data->cs_change && message->state != DONE_STATE) { bfin_spi_flush(drv_data); bfin_spi_cs_deactive(drv_data, chip); } } /* Schedule next transfer tasklet */ tasklet_schedule(&drv_data->pump_transfers); } /* pop a msg from queue and kick off real transfer */ static void bfin_spi_pump_messages(struct work_struct *work) { struct bfin_spi_master_data *drv_data; unsigned long flags; drv_data = container_of(work, struct bfin_spi_master_data, pump_messages); /* Lock queue and check for queue work */ spin_lock_irqsave(&drv_data->lock, flags); if (list_empty(&drv_data->queue) || !drv_data->running) { /* pumper kicked off but no work to do */ drv_data->busy = 0; spin_unlock_irqrestore(&drv_data->lock, flags); return; } /* Make sure we are not already running a message */ if (drv_data->cur_msg) { spin_unlock_irqrestore(&drv_data->lock, flags); return; } /* Extract head of queue */ drv_data->cur_msg = list_entry(drv_data->queue.next, struct spi_message, queue); /* Setup the SSP using the per chip configuration */ drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); bfin_spi_restore_state(drv_data); list_del_init(&drv_data->cur_msg->queue); /* Initial message state */ drv_data->cur_msg->state = START_STATE; drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, struct spi_transfer, transfer_list); dev_dbg(&drv_data->pdev->dev, "got a message to pump, " "state is set to: baud %d, flag 0x%x, ctl 0x%x\n", drv_data->cur_chip->baud, drv_data->cur_chip->flag, drv_data->cur_chip->ctl_reg); dev_dbg(&drv_data->pdev->dev, "the first transfer len is %d\n", drv_data->cur_transfer->len); /* Mark as busy and launch transfers */ tasklet_schedule(&drv_data->pump_transfers); drv_data->busy = 1; spin_unlock_irqrestore(&drv_data->lock, flags); } /* * got a msg to transfer, queue it in drv_data->queue. * And kick off message pumper */ static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg) { struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); unsigned long flags; spin_lock_irqsave(&drv_data->lock, flags); if (!drv_data->running) { spin_unlock_irqrestore(&drv_data->lock, flags); return -ESHUTDOWN; } msg->actual_length = 0; msg->status = -EINPROGRESS; msg->state = START_STATE; dev_dbg(&spi->dev, "adding an msg in transfer() \n"); list_add_tail(&msg->queue, &drv_data->queue); if (drv_data->running && !drv_data->busy) queue_work(drv_data->workqueue, &drv_data->pump_messages); spin_unlock_irqrestore(&drv_data->lock, flags); return 0; } #define MAX_SPI_SSEL 7 static const u16 ssel[][MAX_SPI_SSEL] = { {P_SPI0_SSEL1, P_SPI0_SSEL2, P_SPI0_SSEL3, P_SPI0_SSEL4, P_SPI0_SSEL5, P_SPI0_SSEL6, P_SPI0_SSEL7}, {P_SPI1_SSEL1, P_SPI1_SSEL2, P_SPI1_SSEL3, P_SPI1_SSEL4, P_SPI1_SSEL5, P_SPI1_SSEL6, P_SPI1_SSEL7}, {P_SPI2_SSEL1, P_SPI2_SSEL2, P_SPI2_SSEL3, P_SPI2_SSEL4, P_SPI2_SSEL5, P_SPI2_SSEL6, P_SPI2_SSEL7}, }; /* setup for devices (may be called multiple times -- not just first setup) */ static int bfin_spi_setup(struct spi_device *spi) { struct bfin5xx_spi_chip *chip_info; struct bfin_spi_slave_data *chip = NULL; struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); u16 bfin_ctl_reg; int ret = -EINVAL; /* Only alloc (or use chip_info) on first setup */ chip_info = NULL; chip = spi_get_ctldata(spi); if (chip == NULL) { chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (!chip) { dev_err(&spi->dev, "cannot allocate chip data\n"); ret = -ENOMEM; goto error; } chip->enable_dma = 0; chip_info = spi->controller_data; } /* Let people set non-standard bits directly */ bfin_ctl_reg = BIT_CTL_OPENDRAIN | BIT_CTL_EMISO | BIT_CTL_PSSE | BIT_CTL_GM | BIT_CTL_SZ; /* chip_info isn't always needed */ if (chip_info) { /* Make sure people stop trying to set fields via ctl_reg * when they should actually be using common SPI framework. * Currently we let through: WOM EMISO PSSE GM SZ. * Not sure if a user actually needs/uses any of these, * but let's assume (for now) they do. */ if (chip_info->ctl_reg & ~bfin_ctl_reg) { dev_err(&spi->dev, "do not set bits in ctl_reg " "that the SPI framework manages\n"); goto error; } chip->enable_dma = chip_info->enable_dma != 0 && drv_data->master_info->enable_dma; chip->ctl_reg = chip_info->ctl_reg; chip->cs_chg_udelay = chip_info->cs_chg_udelay; chip->idle_tx_val = chip_info->idle_tx_val; chip->pio_interrupt = chip_info->pio_interrupt; } else { /* force a default base state */ chip->ctl_reg &= bfin_ctl_reg; } if (spi->bits_per_word % 8) { dev_err(&spi->dev, "%d bits_per_word is not supported\n", spi->bits_per_word); goto error; } /* translate common spi framework into our register */ if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) { dev_err(&spi->dev, "unsupported spi modes detected\n"); goto error; } if (spi->mode & SPI_CPOL) chip->ctl_reg |= BIT_CTL_CPOL; if (spi->mode & SPI_CPHA) chip->ctl_reg |= BIT_CTL_CPHA; if (spi->mode & SPI_LSB_FIRST) chip->ctl_reg |= BIT_CTL_LSBF; /* we dont support running in slave mode (yet?) */ chip->ctl_reg |= BIT_CTL_MASTER; /* * Notice: for blackfin, the speed_hz is the value of register * SPI_BAUD, not the real baudrate */ chip->baud = hz_to_spi_baud(spi->max_speed_hz); chip->chip_select_num = spi->chip_select; if (chip->chip_select_num < MAX_CTRL_CS) { if (!(spi->mode & SPI_CPHA)) dev_warn(&spi->dev, "Warning: SPI CPHA not set:" " Slave Select not under software control!\n" " See Documentation/blackfin/bfin-spi-notes.txt"); chip->flag = (1 << spi->chip_select) << 8; } else chip->cs_gpio = chip->chip_select_num - MAX_CTRL_CS; if (chip->enable_dma && chip->pio_interrupt) { dev_err(&spi->dev, "enable_dma is set, " "do not set pio_interrupt\n"); goto error; } /* * if any one SPI chip is registered and wants DMA, request the * DMA channel for it */ if (chip->enable_dma && !drv_data->dma_requested) { /* register dma irq handler */ ret = request_dma(drv_data->dma_channel, "BFIN_SPI_DMA"); if (ret) { dev_err(&spi->dev, "Unable to request BlackFin SPI DMA channel\n"); goto error; } drv_data->dma_requested = 1; ret = set_dma_callback(drv_data->dma_channel, bfin_spi_dma_irq_handler, drv_data); if (ret) { dev_err(&spi->dev, "Unable to set dma callback\n"); goto error; } dma_disable_irq(drv_data->dma_channel); } if (chip->pio_interrupt && !drv_data->irq_requested) { ret = request_irq(drv_data->spi_irq, bfin_spi_pio_irq_handler, 0, "BFIN_SPI", drv_data); if (ret) { dev_err(&spi->dev, "Unable to register spi IRQ\n"); goto error; } drv_data->irq_requested = 1; /* we use write mode, spi irq has to be disabled here */ disable_irq(drv_data->spi_irq); } if (chip->chip_select_num >= MAX_CTRL_CS) { /* Only request on first setup */ if (spi_get_ctldata(spi) == NULL) { ret = gpio_request(chip->cs_gpio, spi->modalias); if (ret) { dev_err(&spi->dev, "gpio_request() error\n"); goto pin_error; } gpio_direction_output(chip->cs_gpio, 1); } } dev_dbg(&spi->dev, "setup spi chip %s, width is %d, dma is %d\n", spi->modalias, spi->bits_per_word, chip->enable_dma); dev_dbg(&spi->dev, "ctl_reg is 0x%x, flag_reg is 0x%x\n", chip->ctl_reg, chip->flag); spi_set_ctldata(spi, chip); dev_dbg(&spi->dev, "chip select number is %d\n", chip->chip_select_num); if (chip->chip_select_num < MAX_CTRL_CS) { ret = peripheral_request(ssel[spi->master->bus_num] [chip->chip_select_num-1], spi->modalias); if (ret) { dev_err(&spi->dev, "peripheral_request() error\n"); goto pin_error; } } bfin_spi_cs_enable(drv_data, chip); bfin_spi_cs_deactive(drv_data, chip); return 0; pin_error: if (chip->chip_select_num >= MAX_CTRL_CS) gpio_free(chip->cs_gpio); else peripheral_free(ssel[spi->master->bus_num] [chip->chip_select_num - 1]); error: if (chip) { if (drv_data->dma_requested) free_dma(drv_data->dma_channel); drv_data->dma_requested = 0; kfree(chip); /* prevent free 'chip' twice */ spi_set_ctldata(spi, NULL); } return ret; } /* * callback for spi framework. * clean driver specific data */ static void bfin_spi_cleanup(struct spi_device *spi) { struct bfin_spi_slave_data *chip = spi_get_ctldata(spi); struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); if (!chip) return; if (chip->chip_select_num < MAX_CTRL_CS) { peripheral_free(ssel[spi->master->bus_num] [chip->chip_select_num-1]); bfin_spi_cs_disable(drv_data, chip); } else gpio_free(chip->cs_gpio); kfree(chip); /* prevent free 'chip' twice */ spi_set_ctldata(spi, NULL); } static int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data) { INIT_LIST_HEAD(&drv_data->queue); spin_lock_init(&drv_data->lock); drv_data->running = false; drv_data->busy = 0; /* init transfer tasklet */ tasklet_init(&drv_data->pump_transfers, bfin_spi_pump_transfers, (unsigned long)drv_data); /* init messages workqueue */ INIT_WORK(&drv_data->pump_messages, bfin_spi_pump_messages); drv_data->workqueue = create_singlethread_workqueue( dev_name(drv_data->master->dev.parent)); if (drv_data->workqueue == NULL) return -EBUSY; return 0; } static int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data) { unsigned long flags; spin_lock_irqsave(&drv_data->lock, flags); if (drv_data->running || drv_data->busy) { spin_unlock_irqrestore(&drv_data->lock, flags); return -EBUSY; } drv_data->running = true; drv_data->cur_msg = NULL; drv_data->cur_transfer = NULL; drv_data->cur_chip = NULL; spin_unlock_irqrestore(&drv_data->lock, flags); queue_work(drv_data->workqueue, &drv_data->pump_messages); return 0; } static int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data) { unsigned long flags; unsigned limit = 500; int status = 0; spin_lock_irqsave(&drv_data->lock, flags); /* * This is a bit lame, but is optimized for the common execution path. * A wait_queue on the drv_data->busy could be used, but then the common * execution path (pump_messages) would be required to call wake_up or * friends on every SPI message. Do this instead */ drv_data->running = false; while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) { spin_unlock_irqrestore(&drv_data->lock, flags); msleep(10); spin_lock_irqsave(&drv_data->lock, flags); } if (!list_empty(&drv_data->queue) || drv_data->busy) status = -EBUSY; spin_unlock_irqrestore(&drv_data->lock, flags); return status; } static int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data) { int status; status = bfin_spi_stop_queue(drv_data); if (status != 0) return status; destroy_workqueue(drv_data->workqueue); return 0; } static int bfin_spi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct bfin5xx_spi_master *platform_info; struct spi_master *master; struct bfin_spi_master_data *drv_data; struct resource *res; int status = 0; platform_info = dev->platform_data; /* Allocate master with space for drv_data */ master = spi_alloc_master(dev, sizeof(*drv_data)); if (!master) { dev_err(&pdev->dev, "can not alloc spi_master\n"); return -ENOMEM; } drv_data = spi_master_get_devdata(master); drv_data->master = master; drv_data->master_info = platform_info; drv_data->pdev = pdev; drv_data->pin_req = platform_info->pin_req; /* the spi->mode bits supported by this driver: */ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; master->bus_num = pdev->id; master->num_chipselect = platform_info->num_chipselect; master->cleanup = bfin_spi_cleanup; master->setup = bfin_spi_setup; master->transfer = bfin_spi_transfer; /* Find and map our resources */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(dev, "Cannot get IORESOURCE_MEM\n"); status = -ENOENT; goto out_error_get_res; } drv_data->regs = ioremap(res->start, resource_size(res)); if (drv_data->regs == NULL) { dev_err(dev, "Cannot map IO\n"); status = -ENXIO; goto out_error_ioremap; } res = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (res == NULL) { dev_err(dev, "No DMA channel specified\n"); status = -ENOENT; goto out_error_free_io; } drv_data->dma_channel = res->start; drv_data->spi_irq = platform_get_irq(pdev, 0); if (drv_data->spi_irq < 0) { dev_err(dev, "No spi pio irq specified\n"); status = -ENOENT; goto out_error_free_io; } /* Initial and start queue */ status = bfin_spi_init_queue(drv_data); if (status != 0) { dev_err(dev, "problem initializing queue\n"); goto out_error_queue_alloc; } status = bfin_spi_start_queue(drv_data); if (status != 0) { dev_err(dev, "problem starting queue\n"); goto out_error_queue_alloc; } status = peripheral_request_list(drv_data->pin_req, DRV_NAME); if (status != 0) { dev_err(&pdev->dev, ": Requesting Peripherals failed\n"); goto out_error_queue_alloc; } /* Reset SPI registers. If these registers were used by the boot loader, * the sky may fall on your head if you enable the dma controller. */ bfin_write(&drv_data->regs->ctl, BIT_CTL_CPHA | BIT_CTL_MASTER); bfin_write(&drv_data->regs->flg, 0xFF00); /* Register with the SPI framework */ platform_set_drvdata(pdev, drv_data); status = spi_register_master(master); if (status != 0) { dev_err(dev, "problem registering spi master\n"); goto out_error_queue_alloc; } dev_info(dev, "%s, Version %s, regs@%p, dma channel@%d\n", DRV_DESC, DRV_VERSION, drv_data->regs, drv_data->dma_channel); return status; out_error_queue_alloc: bfin_spi_destroy_queue(drv_data); out_error_free_io: iounmap(drv_data->regs); out_error_ioremap: out_error_get_res: spi_master_put(master); return status; } /* stop hardware and remove the driver */ static int bfin_spi_remove(struct platform_device *pdev) { struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); int status = 0; if (!drv_data) return 0; /* Remove the queue */ status = bfin_spi_destroy_queue(drv_data); if (status != 0) return status; /* Disable the SSP at the peripheral and SOC level */ bfin_spi_disable(drv_data); /* Release DMA */ if (drv_data->master_info->enable_dma) { if (dma_channel_active(drv_data->dma_channel)) free_dma(drv_data->dma_channel); } if (drv_data->irq_requested) { free_irq(drv_data->spi_irq, drv_data); drv_data->irq_requested = 0; } /* Disconnect from the SPI framework */ spi_unregister_master(drv_data->master); peripheral_free_list(drv_data->pin_req); /* Prevent double remove */ platform_set_drvdata(pdev, NULL); return 0; } #ifdef CONFIG_PM static int bfin_spi_suspend(struct platform_device *pdev, pm_message_t state) { struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); int status = 0; status = bfin_spi_stop_queue(drv_data); if (status != 0) return status; drv_data->ctrl_reg = bfin_read(&drv_data->regs->ctl); drv_data->flag_reg = bfin_read(&drv_data->regs->flg); /* * reset SPI_CTL and SPI_FLG registers */ bfin_write(&drv_data->regs->ctl, BIT_CTL_CPHA | BIT_CTL_MASTER); bfin_write(&drv_data->regs->flg, 0xFF00); return 0; } static int bfin_spi_resume(struct platform_device *pdev) { struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); int status = 0; bfin_write(&drv_data->regs->ctl, drv_data->ctrl_reg); bfin_write(&drv_data->regs->flg, drv_data->flag_reg); /* Start the queue running */ status = bfin_spi_start_queue(drv_data); if (status != 0) { dev_err(&pdev->dev, "problem starting queue (%d)\n", status); return status; } return 0; } #else #define bfin_spi_suspend NULL #define bfin_spi_resume NULL #endif /* CONFIG_PM */ MODULE_ALIAS("platform:bfin-spi"); static struct platform_driver bfin_spi_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, .suspend = bfin_spi_suspend, .resume = bfin_spi_resume, .remove = bfin_spi_remove, }; static int __init bfin_spi_init(void) { return platform_driver_probe(&bfin_spi_driver, bfin_spi_probe); } subsys_initcall(bfin_spi_init); static void __exit bfin_spi_exit(void) { platform_driver_unregister(&bfin_spi_driver); } module_exit(bfin_spi_exit);
gpl-2.0
eoghan2t9/Wildfire_S_3.0_Kernel
drivers/hid/usbhid/hid-core.c
2364
41118
/* * USB HID support for Linux * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2007-2008 Oliver Neukum * Copyright (c) 2006-2010 Jiri Kosina */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/mm.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <asm/unaligned.h> #include <asm/byteorder.h> #include <linux/input.h> #include <linux/wait.h> #include <linux/workqueue.h> #include <linux/usb.h> #include <linux/hid.h> #include <linux/hiddev.h> #include <linux/hid-debug.h> #include <linux/hidraw.h> #include "usbhid.h" /* * Version Information */ #define DRIVER_DESC "USB HID core driver" #define DRIVER_LICENSE "GPL" /* * Module parameters. */ static unsigned int hid_mousepoll_interval; module_param_named(mousepoll, hid_mousepoll_interval, uint, 0644); MODULE_PARM_DESC(mousepoll, "Polling interval of mice"); static unsigned int ignoreled; module_param_named(ignoreled, ignoreled, uint, 0644); MODULE_PARM_DESC(ignoreled, "Autosuspend with active leds"); /* Quirks specified at module load time */ static char *quirks_param[MAX_USBHID_BOOT_QUIRKS] = { [ 0 ... (MAX_USBHID_BOOT_QUIRKS - 1) ] = NULL }; module_param_array_named(quirks, quirks_param, charp, NULL, 0444); MODULE_PARM_DESC(quirks, "Add/modify USB HID quirks by specifying " " quirks=vendorID:productID:quirks" " where vendorID, productID, and quirks are all in" " 0x-prefixed hex"); /* * Input submission and I/O error handler. */ static DEFINE_MUTEX(hid_open_mut); static void hid_io_error(struct hid_device *hid); static int hid_submit_out(struct hid_device *hid); static int hid_submit_ctrl(struct hid_device *hid); static void hid_cancel_delayed_stuff(struct usbhid_device *usbhid); /* Start up the input URB */ static int hid_start_in(struct hid_device *hid) { unsigned long flags; int rc = 0; struct usbhid_device *usbhid = hid->driver_data; spin_lock_irqsave(&usbhid->lock, flags); if (hid->open > 0 && !test_bit(HID_DISCONNECTED, &usbhid->iofl) && !test_bit(HID_REPORTED_IDLE, &usbhid->iofl) && !test_and_set_bit(HID_IN_RUNNING, &usbhid->iofl)) { rc = usb_submit_urb(usbhid->urbin, GFP_ATOMIC); if (rc != 0) clear_bit(HID_IN_RUNNING, &usbhid->iofl); } spin_unlock_irqrestore(&usbhid->lock, flags); return rc; } /* I/O retry timer routine */ static void hid_retry_timeout(unsigned long _hid) { struct hid_device *hid = (struct hid_device *) _hid; struct usbhid_device *usbhid = hid->driver_data; dev_dbg(&usbhid->intf->dev, "retrying intr urb\n"); if (hid_start_in(hid)) hid_io_error(hid); } /* Workqueue routine to reset the device or clear a halt */ static void hid_reset(struct work_struct *work) { struct usbhid_device *usbhid = container_of(work, struct usbhid_device, reset_work); struct hid_device *hid = usbhid->hid; int rc = 0; if (test_bit(HID_CLEAR_HALT, &usbhid->iofl)) { dev_dbg(&usbhid->intf->dev, "clear halt\n"); rc = usb_clear_halt(hid_to_usb_dev(hid), usbhid->urbin->pipe); clear_bit(HID_CLEAR_HALT, &usbhid->iofl); hid_start_in(hid); } else if (test_bit(HID_RESET_PENDING, &usbhid->iofl)) { dev_dbg(&usbhid->intf->dev, "resetting device\n"); rc = usb_lock_device_for_reset(hid_to_usb_dev(hid), usbhid->intf); if (rc == 0) { rc = usb_reset_device(hid_to_usb_dev(hid)); usb_unlock_device(hid_to_usb_dev(hid)); } clear_bit(HID_RESET_PENDING, &usbhid->iofl); } switch (rc) { case 0: if (!test_bit(HID_IN_RUNNING, &usbhid->iofl)) hid_io_error(hid); break; default: hid_err(hid, "can't reset device, %s-%s/input%d, status %d\n", hid_to_usb_dev(hid)->bus->bus_name, hid_to_usb_dev(hid)->devpath, usbhid->ifnum, rc); /* FALLTHROUGH */ case -EHOSTUNREACH: case -ENODEV: case -EINTR: break; } } /* Main I/O error handler */ static void hid_io_error(struct hid_device *hid) { unsigned long flags; struct usbhid_device *usbhid = hid->driver_data; spin_lock_irqsave(&usbhid->lock, flags); /* Stop when disconnected */ if (test_bit(HID_DISCONNECTED, &usbhid->iofl)) goto done; /* If it has been a while since the last error, we'll assume * this a brand new error and reset the retry timeout. */ if (time_after(jiffies, usbhid->stop_retry + HZ/2)) usbhid->retry_delay = 0; /* When an error occurs, retry at increasing intervals */ if (usbhid->retry_delay == 0) { usbhid->retry_delay = 13; /* Then 26, 52, 104, 104, ... */ usbhid->stop_retry = jiffies + msecs_to_jiffies(1000); } else if (usbhid->retry_delay < 100) usbhid->retry_delay *= 2; if (time_after(jiffies, usbhid->stop_retry)) { /* Retries failed, so do a port reset */ if (!test_and_set_bit(HID_RESET_PENDING, &usbhid->iofl)) { schedule_work(&usbhid->reset_work); goto done; } } mod_timer(&usbhid->io_retry, jiffies + msecs_to_jiffies(usbhid->retry_delay)); done: spin_unlock_irqrestore(&usbhid->lock, flags); } static void usbhid_mark_busy(struct usbhid_device *usbhid) { struct usb_interface *intf = usbhid->intf; usb_mark_last_busy(interface_to_usbdev(intf)); } static int usbhid_restart_out_queue(struct usbhid_device *usbhid) { struct hid_device *hid = usb_get_intfdata(usbhid->intf); int kicked; if (!hid) return 0; if ((kicked = (usbhid->outhead != usbhid->outtail))) { dbg("Kicking head %d tail %d", usbhid->outhead, usbhid->outtail); if (hid_submit_out(hid)) { clear_bit(HID_OUT_RUNNING, &usbhid->iofl); wake_up(&usbhid->wait); } } return kicked; } static int usbhid_restart_ctrl_queue(struct usbhid_device *usbhid) { struct hid_device *hid = usb_get_intfdata(usbhid->intf); int kicked; WARN_ON(hid == NULL); if (!hid) return 0; if ((kicked = (usbhid->ctrlhead != usbhid->ctrltail))) { dbg("Kicking head %d tail %d", usbhid->ctrlhead, usbhid->ctrltail); if (hid_submit_ctrl(hid)) { clear_bit(HID_CTRL_RUNNING, &usbhid->iofl); wake_up(&usbhid->wait); } } return kicked; } /* * Input interrupt completion handler. */ static void hid_irq_in(struct urb *urb) { struct hid_device *hid = urb->context; struct usbhid_device *usbhid = hid->driver_data; int status; switch (urb->status) { case 0: /* success */ usbhid_mark_busy(usbhid); usbhid->retry_delay = 0; hid_input_report(urb->context, HID_INPUT_REPORT, urb->transfer_buffer, urb->actual_length, 1); /* * autosuspend refused while keys are pressed * because most keyboards don't wake up when * a key is released */ if (hid_check_keys_pressed(hid)) set_bit(HID_KEYS_PRESSED, &usbhid->iofl); else clear_bit(HID_KEYS_PRESSED, &usbhid->iofl); break; case -EPIPE: /* stall */ usbhid_mark_busy(usbhid); clear_bit(HID_IN_RUNNING, &usbhid->iofl); set_bit(HID_CLEAR_HALT, &usbhid->iofl); schedule_work(&usbhid->reset_work); return; case -ECONNRESET: /* unlink */ case -ENOENT: case -ESHUTDOWN: /* unplug */ clear_bit(HID_IN_RUNNING, &usbhid->iofl); return; case -EILSEQ: /* protocol error or unplug */ case -EPROTO: /* protocol error or unplug */ case -ETIME: /* protocol error or unplug */ case -ETIMEDOUT: /* Should never happen, but... */ usbhid_mark_busy(usbhid); clear_bit(HID_IN_RUNNING, &usbhid->iofl); hid_io_error(hid); return; default: /* error */ hid_warn(urb->dev, "input irq status %d received\n", urb->status); } status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { clear_bit(HID_IN_RUNNING, &usbhid->iofl); if (status != -EPERM) { hid_err(hid, "can't resubmit intr, %s-%s/input%d, status %d\n", hid_to_usb_dev(hid)->bus->bus_name, hid_to_usb_dev(hid)->devpath, usbhid->ifnum, status); hid_io_error(hid); } } } static int hid_submit_out(struct hid_device *hid) { struct hid_report *report; char *raw_report; struct usbhid_device *usbhid = hid->driver_data; int r; report = usbhid->out[usbhid->outtail].report; raw_report = usbhid->out[usbhid->outtail].raw_report; r = usb_autopm_get_interface_async(usbhid->intf); if (r < 0) return -1; /* * if the device hasn't been woken, we leave the output * to resume() */ if (!test_bit(HID_REPORTED_IDLE, &usbhid->iofl)) { usbhid->urbout->transfer_buffer_length = ((report->size - 1) >> 3) + 1 + (report->id > 0); usbhid->urbout->dev = hid_to_usb_dev(hid); memcpy(usbhid->outbuf, raw_report, usbhid->urbout->transfer_buffer_length); kfree(raw_report); dbg_hid("submitting out urb\n"); if (usb_submit_urb(usbhid->urbout, GFP_ATOMIC)) { hid_err(hid, "usb_submit_urb(out) failed\n"); usb_autopm_put_interface_async(usbhid->intf); return -1; } usbhid->last_out = jiffies; } return 0; } static int hid_submit_ctrl(struct hid_device *hid) { struct hid_report *report; unsigned char dir; char *raw_report; int len, r; struct usbhid_device *usbhid = hid->driver_data; report = usbhid->ctrl[usbhid->ctrltail].report; raw_report = usbhid->ctrl[usbhid->ctrltail].raw_report; dir = usbhid->ctrl[usbhid->ctrltail].dir; r = usb_autopm_get_interface_async(usbhid->intf); if (r < 0) return -1; if (!test_bit(HID_REPORTED_IDLE, &usbhid->iofl)) { len = ((report->size - 1) >> 3) + 1 + (report->id > 0); if (dir == USB_DIR_OUT) { usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0); usbhid->urbctrl->transfer_buffer_length = len; memcpy(usbhid->ctrlbuf, raw_report, len); kfree(raw_report); } else { int maxpacket, padlen; usbhid->urbctrl->pipe = usb_rcvctrlpipe(hid_to_usb_dev(hid), 0); maxpacket = usb_maxpacket(hid_to_usb_dev(hid), usbhid->urbctrl->pipe, 0); if (maxpacket > 0) { padlen = DIV_ROUND_UP(len, maxpacket); padlen *= maxpacket; if (padlen > usbhid->bufsize) padlen = usbhid->bufsize; } else padlen = 0; usbhid->urbctrl->transfer_buffer_length = padlen; } usbhid->urbctrl->dev = hid_to_usb_dev(hid); usbhid->cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE | dir; usbhid->cr->bRequest = (dir == USB_DIR_OUT) ? HID_REQ_SET_REPORT : HID_REQ_GET_REPORT; usbhid->cr->wValue = cpu_to_le16(((report->type + 1) << 8) | report->id); usbhid->cr->wIndex = cpu_to_le16(usbhid->ifnum); usbhid->cr->wLength = cpu_to_le16(len); dbg_hid("submitting ctrl urb: %s wValue=0x%04x wIndex=0x%04x wLength=%u\n", usbhid->cr->bRequest == HID_REQ_SET_REPORT ? "Set_Report" : "Get_Report", usbhid->cr->wValue, usbhid->cr->wIndex, usbhid->cr->wLength); if (usb_submit_urb(usbhid->urbctrl, GFP_ATOMIC)) { usb_autopm_put_interface_async(usbhid->intf); hid_err(hid, "usb_submit_urb(ctrl) failed\n"); return -1; } usbhid->last_ctrl = jiffies; } return 0; } /* * Output interrupt completion handler. */ static void hid_irq_out(struct urb *urb) { struct hid_device *hid = urb->context; struct usbhid_device *usbhid = hid->driver_data; unsigned long flags; int unplug = 0; switch (urb->status) { case 0: /* success */ break; case -ESHUTDOWN: /* unplug */ unplug = 1; case -EILSEQ: /* protocol error or unplug */ case -EPROTO: /* protocol error or unplug */ case -ECONNRESET: /* unlink */ case -ENOENT: break; default: /* error */ hid_warn(urb->dev, "output irq status %d received\n", urb->status); } spin_lock_irqsave(&usbhid->lock, flags); if (unplug) usbhid->outtail = usbhid->outhead; else usbhid->outtail = (usbhid->outtail + 1) & (HID_OUTPUT_FIFO_SIZE - 1); if (usbhid->outhead != usbhid->outtail) { if (hid_submit_out(hid)) { clear_bit(HID_OUT_RUNNING, &usbhid->iofl); wake_up(&usbhid->wait); } spin_unlock_irqrestore(&usbhid->lock, flags); return; } clear_bit(HID_OUT_RUNNING, &usbhid->iofl); spin_unlock_irqrestore(&usbhid->lock, flags); usb_autopm_put_interface_async(usbhid->intf); wake_up(&usbhid->wait); } /* * Control pipe completion handler. */ static void hid_ctrl(struct urb *urb) { struct hid_device *hid = urb->context; struct usbhid_device *usbhid = hid->driver_data; int unplug = 0, status = urb->status; spin_lock(&usbhid->lock); switch (status) { case 0: /* success */ if (usbhid->ctrl[usbhid->ctrltail].dir == USB_DIR_IN) hid_input_report(urb->context, usbhid->ctrl[usbhid->ctrltail].report->type, urb->transfer_buffer, urb->actual_length, 0); break; case -ESHUTDOWN: /* unplug */ unplug = 1; case -EILSEQ: /* protocol error or unplug */ case -EPROTO: /* protocol error or unplug */ case -ECONNRESET: /* unlink */ case -ENOENT: case -EPIPE: /* report not available */ break; default: /* error */ hid_warn(urb->dev, "ctrl urb status %d received\n", status); } if (unplug) usbhid->ctrltail = usbhid->ctrlhead; else usbhid->ctrltail = (usbhid->ctrltail + 1) & (HID_CONTROL_FIFO_SIZE - 1); if (usbhid->ctrlhead != usbhid->ctrltail) { if (hid_submit_ctrl(hid)) { clear_bit(HID_CTRL_RUNNING, &usbhid->iofl); wake_up(&usbhid->wait); } spin_unlock(&usbhid->lock); usb_autopm_put_interface_async(usbhid->intf); return; } clear_bit(HID_CTRL_RUNNING, &usbhid->iofl); spin_unlock(&usbhid->lock); usb_autopm_put_interface_async(usbhid->intf); wake_up(&usbhid->wait); } static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *report, unsigned char dir) { int head; struct usbhid_device *usbhid = hid->driver_data; int len = ((report->size - 1) >> 3) + 1 + (report->id > 0); if ((hid->quirks & HID_QUIRK_NOGET) && dir == USB_DIR_IN) return; if (usbhid->urbout && dir == USB_DIR_OUT && report->type == HID_OUTPUT_REPORT) { if ((head = (usbhid->outhead + 1) & (HID_OUTPUT_FIFO_SIZE - 1)) == usbhid->outtail) { hid_warn(hid, "output queue full\n"); return; } usbhid->out[usbhid->outhead].raw_report = kmalloc(len, GFP_ATOMIC); if (!usbhid->out[usbhid->outhead].raw_report) { hid_warn(hid, "output queueing failed\n"); return; } hid_output_report(report, usbhid->out[usbhid->outhead].raw_report); usbhid->out[usbhid->outhead].report = report; usbhid->outhead = head; if (!test_and_set_bit(HID_OUT_RUNNING, &usbhid->iofl)) { if (hid_submit_out(hid)) clear_bit(HID_OUT_RUNNING, &usbhid->iofl); } else { /* * the queue is known to run * but an earlier request may be stuck * we may need to time out * no race because this is called under * spinlock */ if (time_after(jiffies, usbhid->last_out + HZ * 5)) usb_unlink_urb(usbhid->urbout); } return; } if ((head = (usbhid->ctrlhead + 1) & (HID_CONTROL_FIFO_SIZE - 1)) == usbhid->ctrltail) { hid_warn(hid, "control queue full\n"); return; } if (dir == USB_DIR_OUT) { usbhid->ctrl[usbhid->ctrlhead].raw_report = kmalloc(len, GFP_ATOMIC); if (!usbhid->ctrl[usbhid->ctrlhead].raw_report) { hid_warn(hid, "control queueing failed\n"); return; } hid_output_report(report, usbhid->ctrl[usbhid->ctrlhead].raw_report); } usbhid->ctrl[usbhid->ctrlhead].report = report; usbhid->ctrl[usbhid->ctrlhead].dir = dir; usbhid->ctrlhead = head; if (!test_and_set_bit(HID_CTRL_RUNNING, &usbhid->iofl)) { if (hid_submit_ctrl(hid)) clear_bit(HID_CTRL_RUNNING, &usbhid->iofl); } else { /* * the queue is known to run * but an earlier request may be stuck * we may need to time out * no race because this is called under * spinlock */ if (time_after(jiffies, usbhid->last_ctrl + HZ * 5)) usb_unlink_urb(usbhid->urbctrl); } } void usbhid_submit_report(struct hid_device *hid, struct hid_report *report, unsigned char dir) { struct usbhid_device *usbhid = hid->driver_data; unsigned long flags; spin_lock_irqsave(&usbhid->lock, flags); __usbhid_submit_report(hid, report, dir); spin_unlock_irqrestore(&usbhid->lock, flags); } EXPORT_SYMBOL_GPL(usbhid_submit_report); static int usb_hidinput_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { struct hid_device *hid = input_get_drvdata(dev); struct usbhid_device *usbhid = hid->driver_data; struct hid_field *field; unsigned long flags; int offset; if (type == EV_FF) return input_ff_event(dev, type, code, value); if (type != EV_LED) return -1; if ((offset = hidinput_find_field(hid, type, code, &field)) == -1) { hid_warn(dev, "event field not found\n"); return -1; } hid_set_field(field, offset, value); if (value) { spin_lock_irqsave(&usbhid->lock, flags); usbhid->ledcount++; spin_unlock_irqrestore(&usbhid->lock, flags); } else { spin_lock_irqsave(&usbhid->lock, flags); usbhid->ledcount--; spin_unlock_irqrestore(&usbhid->lock, flags); } usbhid_submit_report(hid, field->report, USB_DIR_OUT); return 0; } int usbhid_wait_io(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; if (!wait_event_timeout(usbhid->wait, (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl) && !test_bit(HID_OUT_RUNNING, &usbhid->iofl)), 10*HZ)) { dbg_hid("timeout waiting for ctrl or out queue to clear\n"); return -1; } return 0; } EXPORT_SYMBOL_GPL(usbhid_wait_io); static int hid_set_idle(struct usb_device *dev, int ifnum, int report, int idle) { return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), HID_REQ_SET_IDLE, USB_TYPE_CLASS | USB_RECIP_INTERFACE, (idle << 8) | report, ifnum, NULL, 0, USB_CTRL_SET_TIMEOUT); } static int hid_get_class_descriptor(struct usb_device *dev, int ifnum, unsigned char type, void *buf, int size) { int result, retries = 4; memset(buf, 0, size); do { result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), USB_REQ_GET_DESCRIPTOR, USB_RECIP_INTERFACE | USB_DIR_IN, (type << 8), ifnum, buf, size, USB_CTRL_GET_TIMEOUT); retries--; } while (result < size && retries); return result; } int usbhid_open(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; int res; mutex_lock(&hid_open_mut); if (!hid->open++) { res = usb_autopm_get_interface(usbhid->intf); /* the device must be awake to reliably request remote wakeup */ if (res < 0) { hid->open--; mutex_unlock(&hid_open_mut); return -EIO; } usbhid->intf->needs_remote_wakeup = 1; if (hid_start_in(hid)) hid_io_error(hid); usb_autopm_put_interface(usbhid->intf); } mutex_unlock(&hid_open_mut); return 0; } void usbhid_close(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; mutex_lock(&hid_open_mut); /* protecting hid->open to make sure we don't restart * data acquistion due to a resumption we no longer * care about */ spin_lock_irq(&usbhid->lock); if (!--hid->open) { spin_unlock_irq(&usbhid->lock); hid_cancel_delayed_stuff(usbhid); usb_kill_urb(usbhid->urbin); usbhid->intf->needs_remote_wakeup = 0; } else { spin_unlock_irq(&usbhid->lock); } mutex_unlock(&hid_open_mut); } /* * Initialize all reports */ void usbhid_init_reports(struct hid_device *hid) { struct hid_report *report; struct usbhid_device *usbhid = hid->driver_data; int err, ret; list_for_each_entry(report, &hid->report_enum[HID_INPUT_REPORT].report_list, list) usbhid_submit_report(hid, report, USB_DIR_IN); list_for_each_entry(report, &hid->report_enum[HID_FEATURE_REPORT].report_list, list) usbhid_submit_report(hid, report, USB_DIR_IN); err = 0; ret = usbhid_wait_io(hid); while (ret) { err |= ret; if (test_bit(HID_CTRL_RUNNING, &usbhid->iofl)) usb_kill_urb(usbhid->urbctrl); if (test_bit(HID_OUT_RUNNING, &usbhid->iofl)) usb_kill_urb(usbhid->urbout); ret = usbhid_wait_io(hid); } if (err) hid_warn(hid, "timeout initializing reports\n"); } /* * Reset LEDs which BIOS might have left on. For now, just NumLock (0x01). */ static int hid_find_field_early(struct hid_device *hid, unsigned int page, unsigned int hid_code, struct hid_field **pfield) { struct hid_report *report; struct hid_field *field; struct hid_usage *usage; int i, j; list_for_each_entry(report, &hid->report_enum[HID_OUTPUT_REPORT].report_list, list) { for (i = 0; i < report->maxfield; i++) { field = report->field[i]; for (j = 0; j < field->maxusage; j++) { usage = &field->usage[j]; if ((usage->hid & HID_USAGE_PAGE) == page && (usage->hid & 0xFFFF) == hid_code) { *pfield = field; return j; } } } } return -1; } void usbhid_set_leds(struct hid_device *hid) { struct hid_field *field; int offset; if ((offset = hid_find_field_early(hid, HID_UP_LED, 0x01, &field)) != -1) { hid_set_field(field, offset, 0); usbhid_submit_report(hid, field->report, USB_DIR_OUT); } } EXPORT_SYMBOL_GPL(usbhid_set_leds); /* * Traverse the supplied list of reports and find the longest */ static void hid_find_max_report(struct hid_device *hid, unsigned int type, unsigned int *max) { struct hid_report *report; unsigned int size; list_for_each_entry(report, &hid->report_enum[type].report_list, list) { size = ((report->size - 1) >> 3) + 1 + hid->report_enum[type].numbered; if (*max < size) *max = size; } } static int hid_alloc_buffers(struct usb_device *dev, struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; usbhid->inbuf = usb_alloc_coherent(dev, usbhid->bufsize, GFP_KERNEL, &usbhid->inbuf_dma); usbhid->outbuf = usb_alloc_coherent(dev, usbhid->bufsize, GFP_KERNEL, &usbhid->outbuf_dma); usbhid->cr = kmalloc(sizeof(*usbhid->cr), GFP_KERNEL); usbhid->ctrlbuf = usb_alloc_coherent(dev, usbhid->bufsize, GFP_KERNEL, &usbhid->ctrlbuf_dma); if (!usbhid->inbuf || !usbhid->outbuf || !usbhid->cr || !usbhid->ctrlbuf) return -1; return 0; } static int usbhid_get_raw_report(struct hid_device *hid, unsigned char report_number, __u8 *buf, size_t count, unsigned char report_type) { struct usbhid_device *usbhid = hid->driver_data; struct usb_device *dev = hid_to_usb_dev(hid); struct usb_interface *intf = usbhid->intf; struct usb_host_interface *interface = intf->cur_altsetting; int skipped_report_id = 0; int ret; /* Byte 0 is the report number. Report data starts at byte 1.*/ buf[0] = report_number; if (report_number == 0x0) { /* Offset the return buffer by 1, so that the report ID will remain in byte 0. */ buf++; count--; skipped_report_id = 1; } ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), HID_REQ_GET_REPORT, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, ((report_type + 1) << 8) | report_number, interface->desc.bInterfaceNumber, buf, count, USB_CTRL_SET_TIMEOUT); /* count also the report id */ if (ret > 0 && skipped_report_id) ret++; return ret; } static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t count, unsigned char report_type) { struct usbhid_device *usbhid = hid->driver_data; struct usb_device *dev = hid_to_usb_dev(hid); struct usb_interface *intf = usbhid->intf; struct usb_host_interface *interface = intf->cur_altsetting; int ret; if (usbhid->urbout && report_type != HID_FEATURE_REPORT) { int actual_length; int skipped_report_id = 0; if (buf[0] == 0x0) { /* Don't send the Report ID */ buf++; count--; skipped_report_id = 1; } ret = usb_interrupt_msg(dev, usbhid->urbout->pipe, buf, count, &actual_length, USB_CTRL_SET_TIMEOUT); /* return the number of bytes transferred */ if (ret == 0) { ret = actual_length; /* count also the report id */ if (skipped_report_id) ret++; } } else { int skipped_report_id = 0; int report_id = buf[0]; if (buf[0] == 0x0) { /* Don't send the Report ID */ buf++; count--; skipped_report_id = 1; } ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), HID_REQ_SET_REPORT, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, ((report_type + 1) << 8) | report_id, interface->desc.bInterfaceNumber, buf, count, USB_CTRL_SET_TIMEOUT); /* count also the report id, if this was a numbered report. */ if (ret > 0 && skipped_report_id) ret++; } return ret; } static void usbhid_restart_queues(struct usbhid_device *usbhid) { if (usbhid->urbout) usbhid_restart_out_queue(usbhid); usbhid_restart_ctrl_queue(usbhid); } static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; usb_free_coherent(dev, usbhid->bufsize, usbhid->inbuf, usbhid->inbuf_dma); usb_free_coherent(dev, usbhid->bufsize, usbhid->outbuf, usbhid->outbuf_dma); kfree(usbhid->cr); usb_free_coherent(dev, usbhid->bufsize, usbhid->ctrlbuf, usbhid->ctrlbuf_dma); } static int usbhid_parse(struct hid_device *hid) { struct usb_interface *intf = to_usb_interface(hid->dev.parent); struct usb_host_interface *interface = intf->cur_altsetting; struct usb_device *dev = interface_to_usbdev (intf); struct hid_descriptor *hdesc; u32 quirks = 0; unsigned int rsize = 0; char *rdesc; int ret, n; quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct)); if (quirks & HID_QUIRK_IGNORE) return -ENODEV; /* Many keyboards and mice don't like to be polled for reports, * so we will always set the HID_QUIRK_NOGET flag for them. */ if (interface->desc.bInterfaceSubClass == USB_INTERFACE_SUBCLASS_BOOT) { if (interface->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_KEYBOARD || interface->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_MOUSE) quirks |= HID_QUIRK_NOGET; } if (usb_get_extra_descriptor(interface, HID_DT_HID, &hdesc) && (!interface->desc.bNumEndpoints || usb_get_extra_descriptor(&interface->endpoint[0], HID_DT_HID, &hdesc))) { dbg_hid("class descriptor not present\n"); return -ENODEV; } hid->version = le16_to_cpu(hdesc->bcdHID); hid->country = hdesc->bCountryCode; for (n = 0; n < hdesc->bNumDescriptors; n++) if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT) rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength); if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) { dbg_hid("weird size of report descriptor (%u)\n", rsize); return -EINVAL; } if (!(rdesc = kmalloc(rsize, GFP_KERNEL))) { dbg_hid("couldn't allocate rdesc memory\n"); return -ENOMEM; } hid_set_idle(dev, interface->desc.bInterfaceNumber, 0, 0); ret = hid_get_class_descriptor(dev, interface->desc.bInterfaceNumber, HID_DT_REPORT, rdesc, rsize); if (ret < 0) { dbg_hid("reading report descriptor failed\n"); kfree(rdesc); goto err; } ret = hid_parse_report(hid, rdesc, rsize); kfree(rdesc); if (ret) { dbg_hid("parsing report descriptor failed\n"); goto err; } hid->quirks |= quirks; return 0; err: return ret; } static int usbhid_start(struct hid_device *hid) { struct usb_interface *intf = to_usb_interface(hid->dev.parent); struct usb_host_interface *interface = intf->cur_altsetting; struct usb_device *dev = interface_to_usbdev(intf); struct usbhid_device *usbhid = hid->driver_data; unsigned int n, insize = 0; int ret; clear_bit(HID_DISCONNECTED, &usbhid->iofl); usbhid->bufsize = HID_MIN_BUFFER_SIZE; hid_find_max_report(hid, HID_INPUT_REPORT, &usbhid->bufsize); hid_find_max_report(hid, HID_OUTPUT_REPORT, &usbhid->bufsize); hid_find_max_report(hid, HID_FEATURE_REPORT, &usbhid->bufsize); if (usbhid->bufsize > HID_MAX_BUFFER_SIZE) usbhid->bufsize = HID_MAX_BUFFER_SIZE; hid_find_max_report(hid, HID_INPUT_REPORT, &insize); if (insize > HID_MAX_BUFFER_SIZE) insize = HID_MAX_BUFFER_SIZE; if (hid_alloc_buffers(dev, hid)) { ret = -ENOMEM; goto fail; } for (n = 0; n < interface->desc.bNumEndpoints; n++) { struct usb_endpoint_descriptor *endpoint; int pipe; int interval; endpoint = &interface->endpoint[n].desc; if (!usb_endpoint_xfer_int(endpoint)) continue; interval = endpoint->bInterval; /* Some vendors give fullspeed interval on highspeed devides */ if (hid->quirks & HID_QUIRK_FULLSPEED_INTERVAL && dev->speed == USB_SPEED_HIGH) { interval = fls(endpoint->bInterval*8); printk(KERN_INFO "%s: Fixing fullspeed to highspeed interval: %d -> %d\n", hid->name, endpoint->bInterval, interval); } /* Change the polling interval of mice. */ if (hid->collection->usage == HID_GD_MOUSE && hid_mousepoll_interval > 0) interval = hid_mousepoll_interval; ret = -ENOMEM; if (usb_endpoint_dir_in(endpoint)) { if (usbhid->urbin) continue; if (!(usbhid->urbin = usb_alloc_urb(0, GFP_KERNEL))) goto fail; pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress); usb_fill_int_urb(usbhid->urbin, dev, pipe, usbhid->inbuf, insize, hid_irq_in, hid, interval); usbhid->urbin->transfer_dma = usbhid->inbuf_dma; usbhid->urbin->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; } else { if (usbhid->urbout) continue; if (!(usbhid->urbout = usb_alloc_urb(0, GFP_KERNEL))) goto fail; pipe = usb_sndintpipe(dev, endpoint->bEndpointAddress); usb_fill_int_urb(usbhid->urbout, dev, pipe, usbhid->outbuf, 0, hid_irq_out, hid, interval); usbhid->urbout->transfer_dma = usbhid->outbuf_dma; usbhid->urbout->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; } } usbhid->urbctrl = usb_alloc_urb(0, GFP_KERNEL); if (!usbhid->urbctrl) { ret = -ENOMEM; goto fail; } usb_fill_control_urb(usbhid->urbctrl, dev, 0, (void *) usbhid->cr, usbhid->ctrlbuf, 1, hid_ctrl, hid); usbhid->urbctrl->transfer_dma = usbhid->ctrlbuf_dma; usbhid->urbctrl->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; if (!(hid->quirks & HID_QUIRK_NO_INIT_REPORTS)) usbhid_init_reports(hid); set_bit(HID_STARTED, &usbhid->iofl); /* Some keyboards don't work until their LEDs have been set. * Since BIOSes do set the LEDs, it must be safe for any device * that supports the keyboard boot protocol. * In addition, enable remote wakeup by default for all keyboard * devices supporting the boot protocol. */ if (interface->desc.bInterfaceSubClass == USB_INTERFACE_SUBCLASS_BOOT && interface->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_KEYBOARD) { usbhid_set_leds(hid); device_set_wakeup_enable(&dev->dev, 1); } return 0; fail: usb_free_urb(usbhid->urbin); usb_free_urb(usbhid->urbout); usb_free_urb(usbhid->urbctrl); usbhid->urbin = NULL; usbhid->urbout = NULL; usbhid->urbctrl = NULL; hid_free_buffers(dev, hid); return ret; } static void usbhid_stop(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; if (WARN_ON(!usbhid)) return; clear_bit(HID_STARTED, &usbhid->iofl); spin_lock_irq(&usbhid->lock); /* Sync with error handler */ set_bit(HID_DISCONNECTED, &usbhid->iofl); spin_unlock_irq(&usbhid->lock); usb_kill_urb(usbhid->urbin); usb_kill_urb(usbhid->urbout); usb_kill_urb(usbhid->urbctrl); hid_cancel_delayed_stuff(usbhid); hid->claimed = 0; usb_free_urb(usbhid->urbin); usb_free_urb(usbhid->urbctrl); usb_free_urb(usbhid->urbout); usbhid->urbin = NULL; /* don't mess up next start */ usbhid->urbctrl = NULL; usbhid->urbout = NULL; hid_free_buffers(hid_to_usb_dev(hid), hid); } static int usbhid_power(struct hid_device *hid, int lvl) { int r = 0; switch (lvl) { case PM_HINT_FULLON: r = usbhid_get_power(hid); break; case PM_HINT_NORMAL: usbhid_put_power(hid); break; } return r; } static struct hid_ll_driver usb_hid_driver = { .parse = usbhid_parse, .start = usbhid_start, .stop = usbhid_stop, .open = usbhid_open, .close = usbhid_close, .power = usbhid_power, .hidinput_input_event = usb_hidinput_input_event, }; static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_host_interface *interface = intf->cur_altsetting; struct usb_device *dev = interface_to_usbdev(intf); struct usbhid_device *usbhid; struct hid_device *hid; unsigned int n, has_in = 0; size_t len; int ret; dbg_hid("HID probe called for ifnum %d\n", intf->altsetting->desc.bInterfaceNumber); for (n = 0; n < interface->desc.bNumEndpoints; n++) if (usb_endpoint_is_int_in(&interface->endpoint[n].desc)) has_in++; if (!has_in) { hid_err(intf, "couldn't find an input interrupt endpoint\n"); return -ENODEV; } hid = hid_allocate_device(); if (IS_ERR(hid)) return PTR_ERR(hid); usb_set_intfdata(intf, hid); hid->ll_driver = &usb_hid_driver; hid->hid_get_raw_report = usbhid_get_raw_report; hid->hid_output_raw_report = usbhid_output_raw_report; hid->ff_init = hid_pidff_init; #ifdef CONFIG_USB_HIDDEV hid->hiddev_connect = hiddev_connect; hid->hiddev_disconnect = hiddev_disconnect; hid->hiddev_hid_event = hiddev_hid_event; hid->hiddev_report_event = hiddev_report_event; #endif hid->dev.parent = &intf->dev; hid->bus = BUS_USB; hid->vendor = le16_to_cpu(dev->descriptor.idVendor); hid->product = le16_to_cpu(dev->descriptor.idProduct); hid->name[0] = 0; hid->quirks = usbhid_lookup_quirk(hid->vendor, hid->product); if (intf->cur_altsetting->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_MOUSE) hid->type = HID_TYPE_USBMOUSE; if (dev->manufacturer) strlcpy(hid->name, dev->manufacturer, sizeof(hid->name)); if (dev->product) { if (dev->manufacturer) strlcat(hid->name, " ", sizeof(hid->name)); strlcat(hid->name, dev->product, sizeof(hid->name)); } if (!strlen(hid->name)) snprintf(hid->name, sizeof(hid->name), "HID %04x:%04x", le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct)); usb_make_path(dev, hid->phys, sizeof(hid->phys)); strlcat(hid->phys, "/input", sizeof(hid->phys)); len = strlen(hid->phys); if (len < sizeof(hid->phys) - 1) snprintf(hid->phys + len, sizeof(hid->phys) - len, "%d", intf->altsetting[0].desc.bInterfaceNumber); if (usb_string(dev, dev->descriptor.iSerialNumber, hid->uniq, 64) <= 0) hid->uniq[0] = 0; usbhid = kzalloc(sizeof(*usbhid), GFP_KERNEL); if (usbhid == NULL) { ret = -ENOMEM; goto err; } hid->driver_data = usbhid; usbhid->hid = hid; usbhid->intf = intf; usbhid->ifnum = interface->desc.bInterfaceNumber; init_waitqueue_head(&usbhid->wait); INIT_WORK(&usbhid->reset_work, hid_reset); setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid); spin_lock_init(&usbhid->lock); ret = hid_add_device(hid); if (ret) { if (ret != -ENODEV) hid_err(intf, "can't add hid device: %d\n", ret); goto err_free; } return 0; err_free: kfree(usbhid); err: hid_destroy_device(hid); return ret; } static void usbhid_disconnect(struct usb_interface *intf) { struct hid_device *hid = usb_get_intfdata(intf); struct usbhid_device *usbhid; if (WARN_ON(!hid)) return; usbhid = hid->driver_data; hid_destroy_device(hid); kfree(usbhid); } static void hid_cancel_delayed_stuff(struct usbhid_device *usbhid) { del_timer_sync(&usbhid->io_retry); cancel_work_sync(&usbhid->reset_work); } static void hid_cease_io(struct usbhid_device *usbhid) { del_timer(&usbhid->io_retry); usb_kill_urb(usbhid->urbin); usb_kill_urb(usbhid->urbctrl); usb_kill_urb(usbhid->urbout); } /* Treat USB reset pretty much the same as suspend/resume */ static int hid_pre_reset(struct usb_interface *intf) { struct hid_device *hid = usb_get_intfdata(intf); struct usbhid_device *usbhid = hid->driver_data; spin_lock_irq(&usbhid->lock); set_bit(HID_RESET_PENDING, &usbhid->iofl); spin_unlock_irq(&usbhid->lock); hid_cease_io(usbhid); return 0; } /* Same routine used for post_reset and reset_resume */ static int hid_post_reset(struct usb_interface *intf) { struct usb_device *dev = interface_to_usbdev (intf); struct hid_device *hid = usb_get_intfdata(intf); struct usbhid_device *usbhid = hid->driver_data; int status; spin_lock_irq(&usbhid->lock); clear_bit(HID_RESET_PENDING, &usbhid->iofl); spin_unlock_irq(&usbhid->lock); hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0); status = hid_start_in(hid); if (status < 0) hid_io_error(hid); usbhid_restart_queues(usbhid); return 0; } int usbhid_get_power(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; return usb_autopm_get_interface(usbhid->intf); } void usbhid_put_power(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; usb_autopm_put_interface(usbhid->intf); } #ifdef CONFIG_PM static int hid_suspend(struct usb_interface *intf, pm_message_t message) { struct hid_device *hid = usb_get_intfdata(intf); struct usbhid_device *usbhid = hid->driver_data; int status; if (message.event & PM_EVENT_AUTO) { spin_lock_irq(&usbhid->lock); /* Sync with error handler */ if (!test_bit(HID_RESET_PENDING, &usbhid->iofl) && !test_bit(HID_CLEAR_HALT, &usbhid->iofl) && !test_bit(HID_OUT_RUNNING, &usbhid->iofl) && !test_bit(HID_CTRL_RUNNING, &usbhid->iofl) && !test_bit(HID_KEYS_PRESSED, &usbhid->iofl) && (!usbhid->ledcount || ignoreled)) { set_bit(HID_REPORTED_IDLE, &usbhid->iofl); spin_unlock_irq(&usbhid->lock); if (hid->driver && hid->driver->suspend) { status = hid->driver->suspend(hid, message); if (status < 0) return status; } } else { usbhid_mark_busy(usbhid); spin_unlock_irq(&usbhid->lock); return -EBUSY; } } else { if (hid->driver && hid->driver->suspend) { status = hid->driver->suspend(hid, message); if (status < 0) return status; } spin_lock_irq(&usbhid->lock); set_bit(HID_REPORTED_IDLE, &usbhid->iofl); spin_unlock_irq(&usbhid->lock); if (usbhid_wait_io(hid) < 0) return -EIO; } if (!ignoreled && (message.event & PM_EVENT_AUTO)) { spin_lock_irq(&usbhid->lock); if (test_bit(HID_LED_ON, &usbhid->iofl)) { spin_unlock_irq(&usbhid->lock); usbhid_mark_busy(usbhid); return -EBUSY; } spin_unlock_irq(&usbhid->lock); } hid_cancel_delayed_stuff(usbhid); hid_cease_io(usbhid); if ((message.event & PM_EVENT_AUTO) && test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) { /* lost race against keypresses */ status = hid_start_in(hid); if (status < 0) hid_io_error(hid); usbhid_mark_busy(usbhid); return -EBUSY; } dev_dbg(&intf->dev, "suspend\n"); return 0; } static int hid_resume(struct usb_interface *intf) { struct hid_device *hid = usb_get_intfdata (intf); struct usbhid_device *usbhid = hid->driver_data; int status; if (!test_bit(HID_STARTED, &usbhid->iofl)) return 0; clear_bit(HID_REPORTED_IDLE, &usbhid->iofl); usbhid_mark_busy(usbhid); if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) || test_bit(HID_RESET_PENDING, &usbhid->iofl)) schedule_work(&usbhid->reset_work); usbhid->retry_delay = 0; status = hid_start_in(hid); if (status < 0) hid_io_error(hid); usbhid_restart_queues(usbhid); if (status >= 0 && hid->driver && hid->driver->resume) { int ret = hid->driver->resume(hid); if (ret < 0) status = ret; } dev_dbg(&intf->dev, "resume status %d\n", status); return 0; } static int hid_reset_resume(struct usb_interface *intf) { struct hid_device *hid = usb_get_intfdata(intf); struct usbhid_device *usbhid = hid->driver_data; int status; clear_bit(HID_REPORTED_IDLE, &usbhid->iofl); status = hid_post_reset(intf); if (status >= 0 && hid->driver && hid->driver->reset_resume) { int ret = hid->driver->reset_resume(hid); if (ret < 0) status = ret; } return status; } #endif /* CONFIG_PM */ static const struct usb_device_id hid_usb_ids[] = { { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS, .bInterfaceClass = USB_INTERFACE_CLASS_HID }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, hid_usb_ids); static struct usb_driver hid_driver = { .name = "usbhid", .probe = usbhid_probe, .disconnect = usbhid_disconnect, #ifdef CONFIG_PM .suspend = hid_suspend, .resume = hid_resume, .reset_resume = hid_reset_resume, #endif .pre_reset = hid_pre_reset, .post_reset = hid_post_reset, .id_table = hid_usb_ids, .supports_autosuspend = 1, }; static const struct hid_device_id hid_usb_table[] = { { HID_USB_DEVICE(HID_ANY_ID, HID_ANY_ID) }, { } }; struct usb_interface *usbhid_find_interface(int minor) { return usb_find_interface(&hid_driver, minor); } static struct hid_driver hid_usb_driver = { .name = "generic-usb", .id_table = hid_usb_table, }; static int __init hid_init(void) { int retval = -ENOMEM; retval = hid_register_driver(&hid_usb_driver); if (retval) goto hid_register_fail; retval = usbhid_quirks_init(quirks_param); if (retval) goto usbhid_quirks_init_fail; retval = usb_register(&hid_driver); if (retval) goto usb_register_fail; printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_DESC "\n"); return 0; usb_register_fail: usbhid_quirks_exit(); usbhid_quirks_init_fail: hid_unregister_driver(&hid_usb_driver); hid_register_fail: return retval; } static void __exit hid_exit(void) { usb_deregister(&hid_driver); usbhid_quirks_exit(); hid_unregister_driver(&hid_usb_driver); } module_init(hid_init); module_exit(hid_exit); MODULE_AUTHOR("Andreas Gal"); MODULE_AUTHOR("Vojtech Pavlik"); MODULE_AUTHOR("Jiri Kosina"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE(DRIVER_LICENSE);
gpl-2.0
Silentlys/android_kernel_xiaomi_redmi2
drivers/mfd/si476x-cmd.c
2620
47218
/* * drivers/mfd/si476x-cmd.c -- Subroutines implementing command * protocol of si476x series of chips * * Copyright (C) 2012 Innovative Converged Devices(ICD) * Copyright (C) 2013 Andrey Smirnov * * Author: Andrey Smirnov <andrew.smirnov@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * */ #include <linux/module.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/atomic.h> #include <linux/i2c.h> #include <linux/device.h> #include <linux/gpio.h> #include <linux/videodev2.h> #include <linux/mfd/si476x-core.h> #include <asm/unaligned.h> #define msb(x) ((u8)((u16) x >> 8)) #define lsb(x) ((u8)((u16) x & 0x00FF)) #define CMD_POWER_UP 0x01 #define CMD_POWER_UP_A10_NRESP 1 #define CMD_POWER_UP_A10_NARGS 5 #define CMD_POWER_UP_A20_NRESP 1 #define CMD_POWER_UP_A20_NARGS 5 #define POWER_UP_DELAY_MS 110 #define CMD_POWER_DOWN 0x11 #define CMD_POWER_DOWN_A10_NRESP 1 #define CMD_POWER_DOWN_A20_NRESP 1 #define CMD_POWER_DOWN_A20_NARGS 1 #define CMD_FUNC_INFO 0x12 #define CMD_FUNC_INFO_NRESP 7 #define CMD_SET_PROPERTY 0x13 #define CMD_SET_PROPERTY_NARGS 5 #define CMD_SET_PROPERTY_NRESP 1 #define CMD_GET_PROPERTY 0x14 #define CMD_GET_PROPERTY_NARGS 3 #define CMD_GET_PROPERTY_NRESP 4 #define CMD_AGC_STATUS 0x17 #define CMD_AGC_STATUS_NRESP_A10 2 #define CMD_AGC_STATUS_NRESP_A20 6 #define PIN_CFG_BYTE(x) (0x7F & (x)) #define CMD_DIG_AUDIO_PIN_CFG 0x18 #define CMD_DIG_AUDIO_PIN_CFG_NARGS 4 #define CMD_DIG_AUDIO_PIN_CFG_NRESP 5 #define CMD_ZIF_PIN_CFG 0x19 #define CMD_ZIF_PIN_CFG_NARGS 4 #define CMD_ZIF_PIN_CFG_NRESP 5 #define CMD_IC_LINK_GPO_CTL_PIN_CFG 0x1A #define CMD_IC_LINK_GPO_CTL_PIN_CFG_NARGS 4 #define CMD_IC_LINK_GPO_CTL_PIN_CFG_NRESP 5 #define CMD_ANA_AUDIO_PIN_CFG 0x1B #define CMD_ANA_AUDIO_PIN_CFG_NARGS 1 #define CMD_ANA_AUDIO_PIN_CFG_NRESP 2 #define CMD_INTB_PIN_CFG 0x1C #define CMD_INTB_PIN_CFG_NARGS 2 #define CMD_INTB_PIN_CFG_A10_NRESP 6 #define CMD_INTB_PIN_CFG_A20_NRESP 3 #define CMD_FM_TUNE_FREQ 0x30 #define CMD_FM_TUNE_FREQ_A10_NARGS 5 #define CMD_FM_TUNE_FREQ_A20_NARGS 3 #define CMD_FM_TUNE_FREQ_NRESP 1 #define CMD_FM_RSQ_STATUS 0x32 #define CMD_FM_RSQ_STATUS_A10_NARGS 1 #define CMD_FM_RSQ_STATUS_A10_NRESP 17 #define CMD_FM_RSQ_STATUS_A30_NARGS 1 #define CMD_FM_RSQ_STATUS_A30_NRESP 23 #define CMD_FM_SEEK_START 0x31 #define CMD_FM_SEEK_START_NARGS 1 #define CMD_FM_SEEK_START_NRESP 1 #define CMD_FM_RDS_STATUS 0x36 #define CMD_FM_RDS_STATUS_NARGS 1 #define CMD_FM_RDS_STATUS_NRESP 16 #define CMD_FM_RDS_BLOCKCOUNT 0x37 #define CMD_FM_RDS_BLOCKCOUNT_NARGS 1 #define CMD_FM_RDS_BLOCKCOUNT_NRESP 8 #define CMD_FM_PHASE_DIVERSITY 0x38 #define CMD_FM_PHASE_DIVERSITY_NARGS 1 #define CMD_FM_PHASE_DIVERSITY_NRESP 1 #define CMD_FM_PHASE_DIV_STATUS 0x39 #define CMD_FM_PHASE_DIV_STATUS_NRESP 2 #define CMD_AM_TUNE_FREQ 0x40 #define CMD_AM_TUNE_FREQ_NARGS 3 #define CMD_AM_TUNE_FREQ_NRESP 1 #define CMD_AM_RSQ_STATUS 0x42 #define CMD_AM_RSQ_STATUS_NARGS 1 #define CMD_AM_RSQ_STATUS_NRESP 13 #define CMD_AM_SEEK_START 0x41 #define CMD_AM_SEEK_START_NARGS 1 #define CMD_AM_SEEK_START_NRESP 1 #define CMD_AM_ACF_STATUS 0x45 #define CMD_AM_ACF_STATUS_NRESP 6 #define CMD_AM_ACF_STATUS_NARGS 1 #define CMD_FM_ACF_STATUS 0x35 #define CMD_FM_ACF_STATUS_NRESP 8 #define CMD_FM_ACF_STATUS_NARGS 1 #define CMD_MAX_ARGS_COUNT (10) enum si476x_acf_status_report_bits { SI476X_ACF_BLEND_INT = (1 << 4), SI476X_ACF_HIBLEND_INT = (1 << 3), SI476X_ACF_HICUT_INT = (1 << 2), SI476X_ACF_CHBW_INT = (1 << 1), SI476X_ACF_SOFTMUTE_INT = (1 << 0), SI476X_ACF_SMUTE = (1 << 0), SI476X_ACF_SMATTN = 0x1f, SI476X_ACF_PILOT = (1 << 7), SI476X_ACF_STBLEND = ~SI476X_ACF_PILOT, }; enum si476x_agc_status_report_bits { SI476X_AGC_MXHI = (1 << 5), SI476X_AGC_MXLO = (1 << 4), SI476X_AGC_LNAHI = (1 << 3), SI476X_AGC_LNALO = (1 << 2), }; enum si476x_errors { SI476X_ERR_BAD_COMMAND = 0x10, SI476X_ERR_BAD_ARG1 = 0x11, SI476X_ERR_BAD_ARG2 = 0x12, SI476X_ERR_BAD_ARG3 = 0x13, SI476X_ERR_BAD_ARG4 = 0x14, SI476X_ERR_BUSY = 0x18, SI476X_ERR_BAD_INTERNAL_MEMORY = 0x20, SI476X_ERR_BAD_PATCH = 0x30, SI476X_ERR_BAD_BOOT_MODE = 0x31, SI476X_ERR_BAD_PROPERTY = 0x40, }; static int si476x_core_parse_and_nag_about_error(struct si476x_core *core) { int err; char *cause; u8 buffer[2]; if (core->revision != SI476X_REVISION_A10) { err = si476x_core_i2c_xfer(core, SI476X_I2C_RECV, buffer, sizeof(buffer)); if (err == sizeof(buffer)) { switch (buffer[1]) { case SI476X_ERR_BAD_COMMAND: cause = "Bad command"; err = -EINVAL; break; case SI476X_ERR_BAD_ARG1: cause = "Bad argument #1"; err = -EINVAL; break; case SI476X_ERR_BAD_ARG2: cause = "Bad argument #2"; err = -EINVAL; break; case SI476X_ERR_BAD_ARG3: cause = "Bad argument #3"; err = -EINVAL; break; case SI476X_ERR_BAD_ARG4: cause = "Bad argument #4"; err = -EINVAL; break; case SI476X_ERR_BUSY: cause = "Chip is busy"; err = -EBUSY; break; case SI476X_ERR_BAD_INTERNAL_MEMORY: cause = "Bad internal memory"; err = -EIO; break; case SI476X_ERR_BAD_PATCH: cause = "Bad patch"; err = -EINVAL; break; case SI476X_ERR_BAD_BOOT_MODE: cause = "Bad boot mode"; err = -EINVAL; break; case SI476X_ERR_BAD_PROPERTY: cause = "Bad property"; err = -EINVAL; break; default: cause = "Unknown"; err = -EIO; } dev_err(&core->client->dev, "[Chip error status]: %s\n", cause); } else { dev_err(&core->client->dev, "Failed to fetch error code\n"); err = (err >= 0) ? -EIO : err; } } else { err = -EIO; } return err; } /** * si476x_core_send_command() - sends a command to si476x and waits its * response * @core: si476x_device structure for the device we are * communicating with * @command: command id * @args: command arguments we are sending * @argn: actual size of @args * @response: buffer to place the expected response from the device * @respn: actual size of @response * @usecs: amount of time to wait before reading the response (in * usecs) * * Function returns 0 on succsess and negative error code on * failure */ static int si476x_core_send_command(struct si476x_core *core, const u8 command, const u8 args[], const int argn, u8 resp[], const int respn, const int usecs) { struct i2c_client *client = core->client; int err; u8 data[CMD_MAX_ARGS_COUNT + 1]; if (argn > CMD_MAX_ARGS_COUNT) { err = -ENOMEM; goto exit; } if (!client->adapter) { err = -ENODEV; goto exit; } /* First send the command and its arguments */ data[0] = command; memcpy(&data[1], args, argn); dev_dbg(&client->dev, "Command:\n %*ph\n", argn + 1, data); err = si476x_core_i2c_xfer(core, SI476X_I2C_SEND, (char *) data, argn + 1); if (err != argn + 1) { dev_err(&core->client->dev, "Error while sending command 0x%02x\n", command); err = (err >= 0) ? -EIO : err; goto exit; } /* Set CTS to zero only after the command is send to avoid * possible racing conditions when working in polling mode */ atomic_set(&core->cts, 0); /* if (unlikely(command == CMD_POWER_DOWN) */ if (!wait_event_timeout(core->command, atomic_read(&core->cts), usecs_to_jiffies(usecs) + 1)) dev_warn(&core->client->dev, "(%s) [CMD 0x%02x] Answer timeout.\n", __func__, command); /* When working in polling mode, for some reason the tuner will report CTS bit as being set in the first status byte read, but all the consequtive ones will return zeros until the tuner is actually completed the POWER_UP command. To workaround that we wait for second CTS to be reported */ if (unlikely(!core->client->irq && command == CMD_POWER_UP)) { if (!wait_event_timeout(core->command, atomic_read(&core->cts), usecs_to_jiffies(usecs) + 1)) dev_warn(&core->client->dev, "(%s) Power up took too much time.\n", __func__); } /* Then get the response */ err = si476x_core_i2c_xfer(core, SI476X_I2C_RECV, resp, respn); if (err != respn) { dev_err(&core->client->dev, "Error while reading response for command 0x%02x\n", command); err = (err >= 0) ? -EIO : err; goto exit; } dev_dbg(&client->dev, "Response:\n %*ph\n", respn, resp); err = 0; if (resp[0] & SI476X_ERR) { dev_err(&core->client->dev, "[CMD 0x%02x] Chip set error flag\n", command); err = si476x_core_parse_and_nag_about_error(core); goto exit; } if (!(resp[0] & SI476X_CTS)) err = -EBUSY; exit: return err; } static int si476x_cmd_clear_stc(struct si476x_core *core) { int err; struct si476x_rsq_status_args args = { .primary = false, .rsqack = false, .attune = false, .cancel = false, .stcack = true, }; switch (core->power_up_parameters.func) { case SI476X_FUNC_FM_RECEIVER: err = si476x_core_cmd_fm_rsq_status(core, &args, NULL); break; case SI476X_FUNC_AM_RECEIVER: err = si476x_core_cmd_am_rsq_status(core, &args, NULL); break; default: err = -EINVAL; } return err; } static int si476x_cmd_tune_seek_freq(struct si476x_core *core, uint8_t cmd, const uint8_t args[], size_t argn, uint8_t *resp, size_t respn) { int err; atomic_set(&core->stc, 0); err = si476x_core_send_command(core, cmd, args, argn, resp, respn, SI476X_TIMEOUT_TUNE); if (!err) { wait_event_killable(core->tuning, atomic_read(&core->stc)); si476x_cmd_clear_stc(core); } return err; } /** * si476x_cmd_func_info() - send 'FUNC_INFO' command to the device * @core: device to send the command to * @info: struct si476x_func_info to fill all the information * returned by the command * * The command requests the firmware and patch version for currently * loaded firmware (dependent on the function of the device FM/AM/WB) * * Function returns 0 on succsess and negative error code on * failure */ int si476x_core_cmd_func_info(struct si476x_core *core, struct si476x_func_info *info) { int err; u8 resp[CMD_FUNC_INFO_NRESP]; err = si476x_core_send_command(core, CMD_FUNC_INFO, NULL, 0, resp, ARRAY_SIZE(resp), SI476X_DEFAULT_TIMEOUT); info->firmware.major = resp[1]; info->firmware.minor[0] = resp[2]; info->firmware.minor[1] = resp[3]; info->patch_id = ((u16) resp[4] << 8) | resp[5]; info->func = resp[6]; return err; } EXPORT_SYMBOL_GPL(si476x_core_cmd_func_info); /** * si476x_cmd_set_property() - send 'SET_PROPERTY' command to the device * @core: device to send the command to * @property: property address * @value: property value * * Function returns 0 on succsess and negative error code on * failure */ int si476x_core_cmd_set_property(struct si476x_core *core, u16 property, u16 value) { u8 resp[CMD_SET_PROPERTY_NRESP]; const u8 args[CMD_SET_PROPERTY_NARGS] = { 0x00, msb(property), lsb(property), msb(value), lsb(value), }; return si476x_core_send_command(core, CMD_SET_PROPERTY, args, ARRAY_SIZE(args), resp, ARRAY_SIZE(resp), SI476X_DEFAULT_TIMEOUT); } EXPORT_SYMBOL_GPL(si476x_core_cmd_set_property); /** * si476x_cmd_get_property() - send 'GET_PROPERTY' command to the device * @core: device to send the command to * @property: property address * * Function return the value of property as u16 on success or a * negative error on failure */ int si476x_core_cmd_get_property(struct si476x_core *core, u16 property) { int err; u8 resp[CMD_GET_PROPERTY_NRESP]; const u8 args[CMD_GET_PROPERTY_NARGS] = { 0x00, msb(property), lsb(property), }; err = si476x_core_send_command(core, CMD_GET_PROPERTY, args, ARRAY_SIZE(args), resp, ARRAY_SIZE(resp), SI476X_DEFAULT_TIMEOUT); if (err < 0) return err; else return get_unaligned_be16(resp + 2); } EXPORT_SYMBOL_GPL(si476x_core_cmd_get_property); /** * si476x_cmd_dig_audio_pin_cfg() - send 'DIG_AUDIO_PIN_CFG' command to * the device * @core: device to send the command to * @dclk: DCLK pin function configuration: * #SI476X_DCLK_NOOP - do not modify the behaviour * #SI476X_DCLK_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown * #SI476X_DCLK_DAUDIO - set the pin to be a part of digital * audio interface * @dfs: DFS pin function configuration: * #SI476X_DFS_NOOP - do not modify the behaviour * #SI476X_DFS_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown * SI476X_DFS_DAUDIO - set the pin to be a part of digital * audio interface * @dout - DOUT pin function configuration: * SI476X_DOUT_NOOP - do not modify the behaviour * SI476X_DOUT_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown * SI476X_DOUT_I2S_OUTPUT - set this pin to be digital out on I2S * port 1 * SI476X_DOUT_I2S_INPUT - set this pin to be digital in on I2S * port 1 * @xout - XOUT pin function configuration: * SI476X_XOUT_NOOP - do not modify the behaviour * SI476X_XOUT_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown * SI476X_XOUT_I2S_INPUT - set this pin to be digital in on I2S * port 1 * SI476X_XOUT_MODE_SELECT - set this pin to be the input that * selects the mode of the I2S audio * combiner (analog or HD) * [SI4761/63/65/67 Only] * * Function returns 0 on success and negative error code on failure */ int si476x_core_cmd_dig_audio_pin_cfg(struct si476x_core *core, enum si476x_dclk_config dclk, enum si476x_dfs_config dfs, enum si476x_dout_config dout, enum si476x_xout_config xout) { u8 resp[CMD_DIG_AUDIO_PIN_CFG_NRESP]; const u8 args[CMD_DIG_AUDIO_PIN_CFG_NARGS] = { PIN_CFG_BYTE(dclk), PIN_CFG_BYTE(dfs), PIN_CFG_BYTE(dout), PIN_CFG_BYTE(xout), }; return si476x_core_send_command(core, CMD_DIG_AUDIO_PIN_CFG, args, ARRAY_SIZE(args), resp, ARRAY_SIZE(resp), SI476X_DEFAULT_TIMEOUT); } EXPORT_SYMBOL_GPL(si476x_core_cmd_dig_audio_pin_cfg); /** * si476x_cmd_zif_pin_cfg - send 'ZIF_PIN_CFG_COMMAND' * @core - device to send the command to * @iqclk - IQCL pin function configuration: * SI476X_IQCLK_NOOP - do not modify the behaviour * SI476X_IQCLK_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown * SI476X_IQCLK_IQ - set pin to be a part of I/Q interace * in master mode * @iqfs - IQFS pin function configuration: * SI476X_IQFS_NOOP - do not modify the behaviour * SI476X_IQFS_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown * SI476X_IQFS_IQ - set pin to be a part of I/Q interace * in master mode * @iout - IOUT pin function configuration: * SI476X_IOUT_NOOP - do not modify the behaviour * SI476X_IOUT_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown * SI476X_IOUT_OUTPUT - set pin to be I out * @qout - QOUT pin function configuration: * SI476X_QOUT_NOOP - do not modify the behaviour * SI476X_QOUT_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown * SI476X_QOUT_OUTPUT - set pin to be Q out * * Function returns 0 on success and negative error code on failure */ int si476x_core_cmd_zif_pin_cfg(struct si476x_core *core, enum si476x_iqclk_config iqclk, enum si476x_iqfs_config iqfs, enum si476x_iout_config iout, enum si476x_qout_config qout) { u8 resp[CMD_ZIF_PIN_CFG_NRESP]; const u8 args[CMD_ZIF_PIN_CFG_NARGS] = { PIN_CFG_BYTE(iqclk), PIN_CFG_BYTE(iqfs), PIN_CFG_BYTE(iout), PIN_CFG_BYTE(qout), }; return si476x_core_send_command(core, CMD_ZIF_PIN_CFG, args, ARRAY_SIZE(args), resp, ARRAY_SIZE(resp), SI476X_DEFAULT_TIMEOUT); } EXPORT_SYMBOL_GPL(si476x_core_cmd_zif_pin_cfg); /** * si476x_cmd_ic_link_gpo_ctl_pin_cfg - send * 'IC_LINK_GPIO_CTL_PIN_CFG' comand to the device * @core - device to send the command to * @icin - ICIN pin function configuration: * SI476X_ICIN_NOOP - do not modify the behaviour * SI476X_ICIN_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown * SI476X_ICIN_GPO1_HIGH - set pin to be an output, drive it high * SI476X_ICIN_GPO1_LOW - set pin to be an output, drive it low * SI476X_ICIN_IC_LINK - set the pin to be a part of Inter-Chip link * @icip - ICIP pin function configuration: * SI476X_ICIP_NOOP - do not modify the behaviour * SI476X_ICIP_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown * SI476X_ICIP_GPO1_HIGH - set pin to be an output, drive it high * SI476X_ICIP_GPO1_LOW - set pin to be an output, drive it low * SI476X_ICIP_IC_LINK - set the pin to be a part of Inter-Chip link * @icon - ICON pin function configuration: * SI476X_ICON_NOOP - do not modify the behaviour * SI476X_ICON_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown * SI476X_ICON_I2S - set the pin to be a part of audio * interface in slave mode (DCLK) * SI476X_ICON_IC_LINK - set the pin to be a part of Inter-Chip link * @icop - ICOP pin function configuration: * SI476X_ICOP_NOOP - do not modify the behaviour * SI476X_ICOP_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown * SI476X_ICOP_I2S - set the pin to be a part of audio * interface in slave mode (DOUT) * [Si4761/63/65/67 Only] * SI476X_ICOP_IC_LINK - set the pin to be a part of Inter-Chip link * * Function returns 0 on success and negative error code on failure */ int si476x_core_cmd_ic_link_gpo_ctl_pin_cfg(struct si476x_core *core, enum si476x_icin_config icin, enum si476x_icip_config icip, enum si476x_icon_config icon, enum si476x_icop_config icop) { u8 resp[CMD_IC_LINK_GPO_CTL_PIN_CFG_NRESP]; const u8 args[CMD_IC_LINK_GPO_CTL_PIN_CFG_NARGS] = { PIN_CFG_BYTE(icin), PIN_CFG_BYTE(icip), PIN_CFG_BYTE(icon), PIN_CFG_BYTE(icop), }; return si476x_core_send_command(core, CMD_IC_LINK_GPO_CTL_PIN_CFG, args, ARRAY_SIZE(args), resp, ARRAY_SIZE(resp), SI476X_DEFAULT_TIMEOUT); } EXPORT_SYMBOL_GPL(si476x_core_cmd_ic_link_gpo_ctl_pin_cfg); /** * si476x_cmd_ana_audio_pin_cfg - send 'ANA_AUDIO_PIN_CFG' to the * device * @core - device to send the command to * @lrout - LROUT pin function configuration: * SI476X_LROUT_NOOP - do not modify the behaviour * SI476X_LROUT_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown * SI476X_LROUT_AUDIO - set pin to be audio output * SI476X_LROUT_MPX - set pin to be MPX output * * Function returns 0 on success and negative error code on failure */ int si476x_core_cmd_ana_audio_pin_cfg(struct si476x_core *core, enum si476x_lrout_config lrout) { u8 resp[CMD_ANA_AUDIO_PIN_CFG_NRESP]; const u8 args[CMD_ANA_AUDIO_PIN_CFG_NARGS] = { PIN_CFG_BYTE(lrout), }; return si476x_core_send_command(core, CMD_ANA_AUDIO_PIN_CFG, args, ARRAY_SIZE(args), resp, ARRAY_SIZE(resp), SI476X_DEFAULT_TIMEOUT); } EXPORT_SYMBOL_GPL(si476x_core_cmd_ana_audio_pin_cfg); /** * si476x_cmd_intb_pin_cfg - send 'INTB_PIN_CFG' command to the device * @core - device to send the command to * @intb - INTB pin function configuration: * SI476X_INTB_NOOP - do not modify the behaviour * SI476X_INTB_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown * SI476X_INTB_DAUDIO - set pin to be a part of digital * audio interface in slave mode * SI476X_INTB_IRQ - set pin to be an interrupt request line * @a1 - A1 pin function configuration: * SI476X_A1_NOOP - do not modify the behaviour * SI476X_A1_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown * SI476X_A1_IRQ - set pin to be an interrupt request line * * Function returns 0 on success and negative error code on failure */ static int si476x_core_cmd_intb_pin_cfg_a10(struct si476x_core *core, enum si476x_intb_config intb, enum si476x_a1_config a1) { u8 resp[CMD_INTB_PIN_CFG_A10_NRESP]; const u8 args[CMD_INTB_PIN_CFG_NARGS] = { PIN_CFG_BYTE(intb), PIN_CFG_BYTE(a1), }; return si476x_core_send_command(core, CMD_INTB_PIN_CFG, args, ARRAY_SIZE(args), resp, ARRAY_SIZE(resp), SI476X_DEFAULT_TIMEOUT); } static int si476x_core_cmd_intb_pin_cfg_a20(struct si476x_core *core, enum si476x_intb_config intb, enum si476x_a1_config a1) { u8 resp[CMD_INTB_PIN_CFG_A20_NRESP]; const u8 args[CMD_INTB_PIN_CFG_NARGS] = { PIN_CFG_BYTE(intb), PIN_CFG_BYTE(a1), }; return si476x_core_send_command(core, CMD_INTB_PIN_CFG, args, ARRAY_SIZE(args), resp, ARRAY_SIZE(resp), SI476X_DEFAULT_TIMEOUT); } /** * si476x_cmd_am_rsq_status - send 'AM_RSQ_STATUS' command to the * device * @core - device to send the command to * @rsqack - if set command clears RSQINT, SNRINT, SNRLINT, RSSIHINT, * RSSSILINT, BLENDINT, MULTHINT and MULTLINT * @attune - when set the values in the status report are the values * that were calculated at tune * @cancel - abort ongoing seek/tune opertation * @stcack - clear the STCINT bin in status register * @report - all signal quality information retured by the command * (if NULL then the output of the command is ignored) * * Function returns 0 on success and negative error code on failure */ int si476x_core_cmd_am_rsq_status(struct si476x_core *core, struct si476x_rsq_status_args *rsqargs, struct si476x_rsq_status_report *report) { int err; u8 resp[CMD_AM_RSQ_STATUS_NRESP]; const u8 args[CMD_AM_RSQ_STATUS_NARGS] = { rsqargs->rsqack << 3 | rsqargs->attune << 2 | rsqargs->cancel << 1 | rsqargs->stcack, }; err = si476x_core_send_command(core, CMD_AM_RSQ_STATUS, args, ARRAY_SIZE(args), resp, ARRAY_SIZE(resp), SI476X_DEFAULT_TIMEOUT); /* * Besides getting received signal quality information this * command can be used to just acknowledge different interrupt * flags in those cases it is useless to copy and parse * received data so user can pass NULL, and thus avoid * unnecessary copying. */ if (!report) return err; report->snrhint = 0x08 & resp[1]; report->snrlint = 0x04 & resp[1]; report->rssihint = 0x02 & resp[1]; report->rssilint = 0x01 & resp[1]; report->bltf = 0x80 & resp[2]; report->snr_ready = 0x20 & resp[2]; report->rssiready = 0x08 & resp[2]; report->afcrl = 0x02 & resp[2]; report->valid = 0x01 & resp[2]; report->readfreq = get_unaligned_be16(resp + 3); report->freqoff = resp[5]; report->rssi = resp[6]; report->snr = resp[7]; report->lassi = resp[9]; report->hassi = resp[10]; report->mult = resp[11]; report->dev = resp[12]; return err; } EXPORT_SYMBOL_GPL(si476x_core_cmd_am_rsq_status); int si476x_core_cmd_fm_acf_status(struct si476x_core *core, struct si476x_acf_status_report *report) { int err; u8 resp[CMD_FM_ACF_STATUS_NRESP]; const u8 args[CMD_FM_ACF_STATUS_NARGS] = { 0x0, }; if (!report) return -EINVAL; err = si476x_core_send_command(core, CMD_FM_ACF_STATUS, args, ARRAY_SIZE(args), resp, ARRAY_SIZE(resp), SI476X_DEFAULT_TIMEOUT); if (err < 0) return err; report->blend_int = resp[1] & SI476X_ACF_BLEND_INT; report->hblend_int = resp[1] & SI476X_ACF_HIBLEND_INT; report->hicut_int = resp[1] & SI476X_ACF_HICUT_INT; report->chbw_int = resp[1] & SI476X_ACF_CHBW_INT; report->softmute_int = resp[1] & SI476X_ACF_SOFTMUTE_INT; report->smute = resp[2] & SI476X_ACF_SMUTE; report->smattn = resp[3] & SI476X_ACF_SMATTN; report->chbw = resp[4]; report->hicut = resp[5]; report->hiblend = resp[6]; report->pilot = resp[7] & SI476X_ACF_PILOT; report->stblend = resp[7] & SI476X_ACF_STBLEND; return err; } EXPORT_SYMBOL_GPL(si476x_core_cmd_fm_acf_status); int si476x_core_cmd_am_acf_status(struct si476x_core *core, struct si476x_acf_status_report *report) { int err; u8 resp[CMD_AM_ACF_STATUS_NRESP]; const u8 args[CMD_AM_ACF_STATUS_NARGS] = { 0x0, }; if (!report) return -EINVAL; err = si476x_core_send_command(core, CMD_AM_ACF_STATUS, args, ARRAY_SIZE(args), resp, ARRAY_SIZE(resp), SI476X_DEFAULT_TIMEOUT); if (err < 0) return err; report->blend_int = resp[1] & SI476X_ACF_BLEND_INT; report->hblend_int = resp[1] & SI476X_ACF_HIBLEND_INT; report->hicut_int = resp[1] & SI476X_ACF_HICUT_INT; report->chbw_int = resp[1] & SI476X_ACF_CHBW_INT; report->softmute_int = resp[1] & SI476X_ACF_SOFTMUTE_INT; report->smute = resp[2] & SI476X_ACF_SMUTE; report->smattn = resp[3] & SI476X_ACF_SMATTN; report->chbw = resp[4]; report->hicut = resp[5]; return err; } EXPORT_SYMBOL_GPL(si476x_core_cmd_am_acf_status); /** * si476x_cmd_fm_seek_start - send 'FM_SEEK_START' command to the * device * @core - device to send the command to * @seekup - if set the direction of the search is 'up' * @wrap - if set seek wraps when hitting band limit * * This function begins search for a valid station. The station is * considered valid when 'FM_VALID_SNR_THRESHOLD' and * 'FM_VALID_RSSI_THRESHOLD' and 'FM_VALID_MAX_TUNE_ERROR' criteria * are met. } * * Function returns 0 on success and negative error code on failure */ int si476x_core_cmd_fm_seek_start(struct si476x_core *core, bool seekup, bool wrap) { u8 resp[CMD_FM_SEEK_START_NRESP]; const u8 args[CMD_FM_SEEK_START_NARGS] = { seekup << 3 | wrap << 2, }; return si476x_cmd_tune_seek_freq(core, CMD_FM_SEEK_START, args, sizeof(args), resp, sizeof(resp)); } EXPORT_SYMBOL_GPL(si476x_core_cmd_fm_seek_start); /** * si476x_cmd_fm_rds_status - send 'FM_RDS_STATUS' command to the * device * @core - device to send the command to * @status_only - if set the data is not removed from RDSFIFO, * RDSFIFOUSED is not decremented and data in all the * rest RDS data contains the last valid info received * @mtfifo if set the command clears RDS receive FIFO * @intack if set the command clards the RDSINT bit. * * Function returns 0 on success and negative error code on failure */ int si476x_core_cmd_fm_rds_status(struct si476x_core *core, bool status_only, bool mtfifo, bool intack, struct si476x_rds_status_report *report) { int err; u8 resp[CMD_FM_RDS_STATUS_NRESP]; const u8 args[CMD_FM_RDS_STATUS_NARGS] = { status_only << 2 | mtfifo << 1 | intack, }; err = si476x_core_send_command(core, CMD_FM_RDS_STATUS, args, ARRAY_SIZE(args), resp, ARRAY_SIZE(resp), SI476X_DEFAULT_TIMEOUT); /* * Besides getting RDS status information this command can be * used to just acknowledge different interrupt flags in those * cases it is useless to copy and parse received data so user * can pass NULL, and thus avoid unnecessary copying. */ if (err < 0 || report == NULL) return err; report->rdstpptyint = 0x10 & resp[1]; report->rdspiint = 0x08 & resp[1]; report->rdssyncint = 0x02 & resp[1]; report->rdsfifoint = 0x01 & resp[1]; report->tpptyvalid = 0x10 & resp[2]; report->pivalid = 0x08 & resp[2]; report->rdssync = 0x02 & resp[2]; report->rdsfifolost = 0x01 & resp[2]; report->tp = 0x20 & resp[3]; report->pty = 0x1f & resp[3]; report->pi = get_unaligned_be16(resp + 4); report->rdsfifoused = resp[6]; report->ble[V4L2_RDS_BLOCK_A] = 0xc0 & resp[7]; report->ble[V4L2_RDS_BLOCK_B] = 0x30 & resp[7]; report->ble[V4L2_RDS_BLOCK_C] = 0x0c & resp[7]; report->ble[V4L2_RDS_BLOCK_D] = 0x03 & resp[7]; report->rds[V4L2_RDS_BLOCK_A].block = V4L2_RDS_BLOCK_A; report->rds[V4L2_RDS_BLOCK_A].msb = resp[8]; report->rds[V4L2_RDS_BLOCK_A].lsb = resp[9]; report->rds[V4L2_RDS_BLOCK_B].block = V4L2_RDS_BLOCK_B; report->rds[V4L2_RDS_BLOCK_B].msb = resp[10]; report->rds[V4L2_RDS_BLOCK_B].lsb = resp[11]; report->rds[V4L2_RDS_BLOCK_C].block = V4L2_RDS_BLOCK_C; report->rds[V4L2_RDS_BLOCK_C].msb = resp[12]; report->rds[V4L2_RDS_BLOCK_C].lsb = resp[13]; report->rds[V4L2_RDS_BLOCK_D].block = V4L2_RDS_BLOCK_D; report->rds[V4L2_RDS_BLOCK_D].msb = resp[14]; report->rds[V4L2_RDS_BLOCK_D].lsb = resp[15]; return err; } EXPORT_SYMBOL_GPL(si476x_core_cmd_fm_rds_status); int si476x_core_cmd_fm_rds_blockcount(struct si476x_core *core, bool clear, struct si476x_rds_blockcount_report *report) { int err; u8 resp[CMD_FM_RDS_BLOCKCOUNT_NRESP]; const u8 args[CMD_FM_RDS_BLOCKCOUNT_NARGS] = { clear, }; if (!report) return -EINVAL; err = si476x_core_send_command(core, CMD_FM_RDS_BLOCKCOUNT, args, ARRAY_SIZE(args), resp, ARRAY_SIZE(resp), SI476X_DEFAULT_TIMEOUT); if (!err) { report->expected = get_unaligned_be16(resp + 2); report->received = get_unaligned_be16(resp + 4); report->uncorrectable = get_unaligned_be16(resp + 6); } return err; } EXPORT_SYMBOL_GPL(si476x_core_cmd_fm_rds_blockcount); int si476x_core_cmd_fm_phase_diversity(struct si476x_core *core, enum si476x_phase_diversity_mode mode) { u8 resp[CMD_FM_PHASE_DIVERSITY_NRESP]; const u8 args[CMD_FM_PHASE_DIVERSITY_NARGS] = { mode & 0x07, }; return si476x_core_send_command(core, CMD_FM_PHASE_DIVERSITY, args, ARRAY_SIZE(args), resp, ARRAY_SIZE(resp), SI476X_DEFAULT_TIMEOUT); } EXPORT_SYMBOL_GPL(si476x_core_cmd_fm_phase_diversity); /** * si476x_core_cmd_fm_phase_div_status() - get the phase diversity * status * * @core: si476x device * * NOTE caller must hold core lock * * Function returns the value of the status bit in case of success and * negative error code in case of failre. */ int si476x_core_cmd_fm_phase_div_status(struct si476x_core *core) { int err; u8 resp[CMD_FM_PHASE_DIV_STATUS_NRESP]; err = si476x_core_send_command(core, CMD_FM_PHASE_DIV_STATUS, NULL, 0, resp, ARRAY_SIZE(resp), SI476X_DEFAULT_TIMEOUT); return (err < 0) ? err : resp[1]; } EXPORT_SYMBOL_GPL(si476x_core_cmd_fm_phase_div_status); /** * si476x_cmd_am_seek_start - send 'FM_SEEK_START' command to the * device * @core - device to send the command to * @seekup - if set the direction of the search is 'up' * @wrap - if set seek wraps when hitting band limit * * This function begins search for a valid station. The station is * considered valid when 'FM_VALID_SNR_THRESHOLD' and * 'FM_VALID_RSSI_THRESHOLD' and 'FM_VALID_MAX_TUNE_ERROR' criteria * are met. * * Function returns 0 on success and negative error code on failure */ int si476x_core_cmd_am_seek_start(struct si476x_core *core, bool seekup, bool wrap) { u8 resp[CMD_AM_SEEK_START_NRESP]; const u8 args[CMD_AM_SEEK_START_NARGS] = { seekup << 3 | wrap << 2, }; return si476x_cmd_tune_seek_freq(core, CMD_AM_SEEK_START, args, sizeof(args), resp, sizeof(resp)); } EXPORT_SYMBOL_GPL(si476x_core_cmd_am_seek_start); static int si476x_core_cmd_power_up_a10(struct si476x_core *core, struct si476x_power_up_args *puargs) { u8 resp[CMD_POWER_UP_A10_NRESP]; const bool intsel = (core->pinmux.a1 == SI476X_A1_IRQ); const bool ctsen = (core->client->irq != 0); const u8 args[CMD_POWER_UP_A10_NARGS] = { 0xF7, /* Reserved, always 0xF7 */ 0x3F & puargs->xcload, /* First two bits are reserved to be * zeros */ ctsen << 7 | intsel << 6 | 0x07, /* Last five bits * are reserved to * be written as 0x7 */ puargs->func << 4 | puargs->freq, 0x11, /* Reserved, always 0x11 */ }; return si476x_core_send_command(core, CMD_POWER_UP, args, ARRAY_SIZE(args), resp, ARRAY_SIZE(resp), SI476X_TIMEOUT_POWER_UP); } static int si476x_core_cmd_power_up_a20(struct si476x_core *core, struct si476x_power_up_args *puargs) { u8 resp[CMD_POWER_UP_A20_NRESP]; const bool intsel = (core->pinmux.a1 == SI476X_A1_IRQ); const bool ctsen = (core->client->irq != 0); const u8 args[CMD_POWER_UP_A20_NARGS] = { puargs->ibias6x << 7 | puargs->xstart, 0x3F & puargs->xcload, /* First two bits are reserved to be * zeros */ ctsen << 7 | intsel << 6 | puargs->fastboot << 5 | puargs->xbiashc << 3 | puargs->xbias, puargs->func << 4 | puargs->freq, 0x10 | puargs->xmode, }; return si476x_core_send_command(core, CMD_POWER_UP, args, ARRAY_SIZE(args), resp, ARRAY_SIZE(resp), SI476X_TIMEOUT_POWER_UP); } static int si476x_core_cmd_power_down_a10(struct si476x_core *core, struct si476x_power_down_args *pdargs) { u8 resp[CMD_POWER_DOWN_A10_NRESP]; return si476x_core_send_command(core, CMD_POWER_DOWN, NULL, 0, resp, ARRAY_SIZE(resp), SI476X_DEFAULT_TIMEOUT); } static int si476x_core_cmd_power_down_a20(struct si476x_core *core, struct si476x_power_down_args *pdargs) { u8 resp[CMD_POWER_DOWN_A20_NRESP]; const u8 args[CMD_POWER_DOWN_A20_NARGS] = { pdargs->xosc, }; return si476x_core_send_command(core, CMD_POWER_DOWN, args, ARRAY_SIZE(args), resp, ARRAY_SIZE(resp), SI476X_DEFAULT_TIMEOUT); } static int si476x_core_cmd_am_tune_freq_a10(struct si476x_core *core, struct si476x_tune_freq_args *tuneargs) { const int am_freq = tuneargs->freq; u8 resp[CMD_AM_TUNE_FREQ_NRESP]; const u8 args[CMD_AM_TUNE_FREQ_NARGS] = { (tuneargs->hd << 6), msb(am_freq), lsb(am_freq), }; return si476x_cmd_tune_seek_freq(core, CMD_AM_TUNE_FREQ, args, sizeof(args), resp, sizeof(resp)); } static int si476x_core_cmd_am_tune_freq_a20(struct si476x_core *core, struct si476x_tune_freq_args *tuneargs) { const int am_freq = tuneargs->freq; u8 resp[CMD_AM_TUNE_FREQ_NRESP]; const u8 args[CMD_AM_TUNE_FREQ_NARGS] = { (tuneargs->zifsr << 6) | (tuneargs->injside & 0x03), msb(am_freq), lsb(am_freq), }; return si476x_cmd_tune_seek_freq(core, CMD_AM_TUNE_FREQ, args, sizeof(args), resp, sizeof(resp)); } static int si476x_core_cmd_fm_rsq_status_a10(struct si476x_core *core, struct si476x_rsq_status_args *rsqargs, struct si476x_rsq_status_report *report) { int err; u8 resp[CMD_FM_RSQ_STATUS_A10_NRESP]; const u8 args[CMD_FM_RSQ_STATUS_A10_NARGS] = { rsqargs->rsqack << 3 | rsqargs->attune << 2 | rsqargs->cancel << 1 | rsqargs->stcack, }; err = si476x_core_send_command(core, CMD_FM_RSQ_STATUS, args, ARRAY_SIZE(args), resp, ARRAY_SIZE(resp), SI476X_DEFAULT_TIMEOUT); /* * Besides getting received signal quality information this * command can be used to just acknowledge different interrupt * flags in those cases it is useless to copy and parse * received data so user can pass NULL, and thus avoid * unnecessary copying. */ if (err < 0 || report == NULL) return err; report->multhint = 0x80 & resp[1]; report->multlint = 0x40 & resp[1]; report->snrhint = 0x08 & resp[1]; report->snrlint = 0x04 & resp[1]; report->rssihint = 0x02 & resp[1]; report->rssilint = 0x01 & resp[1]; report->bltf = 0x80 & resp[2]; report->snr_ready = 0x20 & resp[2]; report->rssiready = 0x08 & resp[2]; report->afcrl = 0x02 & resp[2]; report->valid = 0x01 & resp[2]; report->readfreq = get_unaligned_be16(resp + 3); report->freqoff = resp[5]; report->rssi = resp[6]; report->snr = resp[7]; report->lassi = resp[9]; report->hassi = resp[10]; report->mult = resp[11]; report->dev = resp[12]; report->readantcap = get_unaligned_be16(resp + 13); report->assi = resp[15]; report->usn = resp[16]; return err; } static int si476x_core_cmd_fm_rsq_status_a20(struct si476x_core *core, struct si476x_rsq_status_args *rsqargs, struct si476x_rsq_status_report *report) { int err; u8 resp[CMD_FM_RSQ_STATUS_A10_NRESP]; const u8 args[CMD_FM_RSQ_STATUS_A30_NARGS] = { rsqargs->primary << 4 | rsqargs->rsqack << 3 | rsqargs->attune << 2 | rsqargs->cancel << 1 | rsqargs->stcack, }; err = si476x_core_send_command(core, CMD_FM_RSQ_STATUS, args, ARRAY_SIZE(args), resp, ARRAY_SIZE(resp), SI476X_DEFAULT_TIMEOUT); /* * Besides getting received signal quality information this * command can be used to just acknowledge different interrupt * flags in those cases it is useless to copy and parse * received data so user can pass NULL, and thus avoid * unnecessary copying. */ if (err < 0 || report == NULL) return err; report->multhint = 0x80 & resp[1]; report->multlint = 0x40 & resp[1]; report->snrhint = 0x08 & resp[1]; report->snrlint = 0x04 & resp[1]; report->rssihint = 0x02 & resp[1]; report->rssilint = 0x01 & resp[1]; report->bltf = 0x80 & resp[2]; report->snr_ready = 0x20 & resp[2]; report->rssiready = 0x08 & resp[2]; report->afcrl = 0x02 & resp[2]; report->valid = 0x01 & resp[2]; report->readfreq = get_unaligned_be16(resp + 3); report->freqoff = resp[5]; report->rssi = resp[6]; report->snr = resp[7]; report->lassi = resp[9]; report->hassi = resp[10]; report->mult = resp[11]; report->dev = resp[12]; report->readantcap = get_unaligned_be16(resp + 13); report->assi = resp[15]; report->usn = resp[16]; return err; } static int si476x_core_cmd_fm_rsq_status_a30(struct si476x_core *core, struct si476x_rsq_status_args *rsqargs, struct si476x_rsq_status_report *report) { int err; u8 resp[CMD_FM_RSQ_STATUS_A30_NRESP]; const u8 args[CMD_FM_RSQ_STATUS_A30_NARGS] = { rsqargs->primary << 4 | rsqargs->rsqack << 3 | rsqargs->attune << 2 | rsqargs->cancel << 1 | rsqargs->stcack, }; err = si476x_core_send_command(core, CMD_FM_RSQ_STATUS, args, ARRAY_SIZE(args), resp, ARRAY_SIZE(resp), SI476X_DEFAULT_TIMEOUT); /* * Besides getting received signal quality information this * command can be used to just acknowledge different interrupt * flags in those cases it is useless to copy and parse * received data so user can pass NULL, and thus avoid * unnecessary copying. */ if (err < 0 || report == NULL) return err; report->multhint = 0x80 & resp[1]; report->multlint = 0x40 & resp[1]; report->snrhint = 0x08 & resp[1]; report->snrlint = 0x04 & resp[1]; report->rssihint = 0x02 & resp[1]; report->rssilint = 0x01 & resp[1]; report->bltf = 0x80 & resp[2]; report->snr_ready = 0x20 & resp[2]; report->rssiready = 0x08 & resp[2]; report->injside = 0x04 & resp[2]; report->afcrl = 0x02 & resp[2]; report->valid = 0x01 & resp[2]; report->readfreq = get_unaligned_be16(resp + 3); report->freqoff = resp[5]; report->rssi = resp[6]; report->snr = resp[7]; report->issi = resp[8]; report->lassi = resp[9]; report->hassi = resp[10]; report->mult = resp[11]; report->dev = resp[12]; report->readantcap = get_unaligned_be16(resp + 13); report->assi = resp[15]; report->usn = resp[16]; report->pilotdev = resp[17]; report->rdsdev = resp[18]; report->assidev = resp[19]; report->strongdev = resp[20]; report->rdspi = get_unaligned_be16(resp + 21); return err; } static int si476x_core_cmd_fm_tune_freq_a10(struct si476x_core *core, struct si476x_tune_freq_args *tuneargs) { u8 resp[CMD_FM_TUNE_FREQ_NRESP]; const u8 args[CMD_FM_TUNE_FREQ_A10_NARGS] = { (tuneargs->hd << 6) | (tuneargs->tunemode << 4) | (tuneargs->smoothmetrics << 2), msb(tuneargs->freq), lsb(tuneargs->freq), msb(tuneargs->antcap), lsb(tuneargs->antcap) }; return si476x_cmd_tune_seek_freq(core, CMD_FM_TUNE_FREQ, args, sizeof(args), resp, sizeof(resp)); } static int si476x_core_cmd_fm_tune_freq_a20(struct si476x_core *core, struct si476x_tune_freq_args *tuneargs) { u8 resp[CMD_FM_TUNE_FREQ_NRESP]; const u8 args[CMD_FM_TUNE_FREQ_A20_NARGS] = { (tuneargs->hd << 6) | (tuneargs->tunemode << 4) | (tuneargs->smoothmetrics << 2) | (tuneargs->injside), msb(tuneargs->freq), lsb(tuneargs->freq), }; return si476x_cmd_tune_seek_freq(core, CMD_FM_TUNE_FREQ, args, sizeof(args), resp, sizeof(resp)); } static int si476x_core_cmd_agc_status_a20(struct si476x_core *core, struct si476x_agc_status_report *report) { int err; u8 resp[CMD_AGC_STATUS_NRESP_A20]; if (!report) return -EINVAL; err = si476x_core_send_command(core, CMD_AGC_STATUS, NULL, 0, resp, ARRAY_SIZE(resp), SI476X_DEFAULT_TIMEOUT); if (err < 0) return err; report->mxhi = resp[1] & SI476X_AGC_MXHI; report->mxlo = resp[1] & SI476X_AGC_MXLO; report->lnahi = resp[1] & SI476X_AGC_LNAHI; report->lnalo = resp[1] & SI476X_AGC_LNALO; report->fmagc1 = resp[2]; report->fmagc2 = resp[3]; report->pgagain = resp[4]; report->fmwblang = resp[5]; return err; } static int si476x_core_cmd_agc_status_a10(struct si476x_core *core, struct si476x_agc_status_report *report) { int err; u8 resp[CMD_AGC_STATUS_NRESP_A10]; if (!report) return -EINVAL; err = si476x_core_send_command(core, CMD_AGC_STATUS, NULL, 0, resp, ARRAY_SIZE(resp), SI476X_DEFAULT_TIMEOUT); if (err < 0) return err; report->mxhi = resp[1] & SI476X_AGC_MXHI; report->mxlo = resp[1] & SI476X_AGC_MXLO; report->lnahi = resp[1] & SI476X_AGC_LNAHI; report->lnalo = resp[1] & SI476X_AGC_LNALO; return err; } typedef int (*tune_freq_func_t) (struct si476x_core *core, struct si476x_tune_freq_args *tuneargs); static struct { int (*power_up) (struct si476x_core *, struct si476x_power_up_args *); int (*power_down) (struct si476x_core *, struct si476x_power_down_args *); tune_freq_func_t fm_tune_freq; tune_freq_func_t am_tune_freq; int (*fm_rsq_status)(struct si476x_core *, struct si476x_rsq_status_args *, struct si476x_rsq_status_report *); int (*agc_status)(struct si476x_core *, struct si476x_agc_status_report *); int (*intb_pin_cfg)(struct si476x_core *core, enum si476x_intb_config intb, enum si476x_a1_config a1); } si476x_cmds_vtable[] = { [SI476X_REVISION_A10] = { .power_up = si476x_core_cmd_power_up_a10, .power_down = si476x_core_cmd_power_down_a10, .fm_tune_freq = si476x_core_cmd_fm_tune_freq_a10, .am_tune_freq = si476x_core_cmd_am_tune_freq_a10, .fm_rsq_status = si476x_core_cmd_fm_rsq_status_a10, .agc_status = si476x_core_cmd_agc_status_a10, .intb_pin_cfg = si476x_core_cmd_intb_pin_cfg_a10, }, [SI476X_REVISION_A20] = { .power_up = si476x_core_cmd_power_up_a20, .power_down = si476x_core_cmd_power_down_a20, .fm_tune_freq = si476x_core_cmd_fm_tune_freq_a20, .am_tune_freq = si476x_core_cmd_am_tune_freq_a20, .fm_rsq_status = si476x_core_cmd_fm_rsq_status_a20, .agc_status = si476x_core_cmd_agc_status_a20, .intb_pin_cfg = si476x_core_cmd_intb_pin_cfg_a20, }, [SI476X_REVISION_A30] = { .power_up = si476x_core_cmd_power_up_a20, .power_down = si476x_core_cmd_power_down_a20, .fm_tune_freq = si476x_core_cmd_fm_tune_freq_a20, .am_tune_freq = si476x_core_cmd_am_tune_freq_a20, .fm_rsq_status = si476x_core_cmd_fm_rsq_status_a30, .agc_status = si476x_core_cmd_agc_status_a20, .intb_pin_cfg = si476x_core_cmd_intb_pin_cfg_a20, }, }; int si476x_core_cmd_power_up(struct si476x_core *core, struct si476x_power_up_args *args) { BUG_ON(core->revision > SI476X_REVISION_A30 || core->revision == -1); return si476x_cmds_vtable[core->revision].power_up(core, args); } EXPORT_SYMBOL_GPL(si476x_core_cmd_power_up); int si476x_core_cmd_power_down(struct si476x_core *core, struct si476x_power_down_args *args) { BUG_ON(core->revision > SI476X_REVISION_A30 || core->revision == -1); return si476x_cmds_vtable[core->revision].power_down(core, args); } EXPORT_SYMBOL_GPL(si476x_core_cmd_power_down); int si476x_core_cmd_fm_tune_freq(struct si476x_core *core, struct si476x_tune_freq_args *args) { BUG_ON(core->revision > SI476X_REVISION_A30 || core->revision == -1); return si476x_cmds_vtable[core->revision].fm_tune_freq(core, args); } EXPORT_SYMBOL_GPL(si476x_core_cmd_fm_tune_freq); int si476x_core_cmd_am_tune_freq(struct si476x_core *core, struct si476x_tune_freq_args *args) { BUG_ON(core->revision > SI476X_REVISION_A30 || core->revision == -1); return si476x_cmds_vtable[core->revision].am_tune_freq(core, args); } EXPORT_SYMBOL_GPL(si476x_core_cmd_am_tune_freq); int si476x_core_cmd_fm_rsq_status(struct si476x_core *core, struct si476x_rsq_status_args *args, struct si476x_rsq_status_report *report) { BUG_ON(core->revision > SI476X_REVISION_A30 || core->revision == -1); return si476x_cmds_vtable[core->revision].fm_rsq_status(core, args, report); } EXPORT_SYMBOL_GPL(si476x_core_cmd_fm_rsq_status); int si476x_core_cmd_agc_status(struct si476x_core *core, struct si476x_agc_status_report *report) { BUG_ON(core->revision > SI476X_REVISION_A30 || core->revision == -1); return si476x_cmds_vtable[core->revision].agc_status(core, report); } EXPORT_SYMBOL_GPL(si476x_core_cmd_agc_status); int si476x_core_cmd_intb_pin_cfg(struct si476x_core *core, enum si476x_intb_config intb, enum si476x_a1_config a1) { BUG_ON(core->revision > SI476X_REVISION_A30 || core->revision == -1); return si476x_cmds_vtable[core->revision].intb_pin_cfg(core, intb, a1); } EXPORT_SYMBOL_GPL(si476x_core_cmd_intb_pin_cfg); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Andrey Smirnov <andrew.smirnov@gmail.com>"); MODULE_DESCRIPTION("API for command exchange for si476x");
gpl-2.0
bestmjh47/android_kernel_samsung_aries
drivers/usb/host/fhci-tds.c
7996
16906
/* * Freescale QUICC Engine USB Host Controller Driver * * Copyright (c) Freescale Semicondutor, Inc. 2006. * Shlomi Gridish <gridish@freescale.com> * Jerry Huang <Chang-Ming.Huang@freescale.com> * Copyright (c) Logic Product Development, Inc. 2007 * Peter Barada <peterb@logicpd.com> * Copyright (c) MontaVista Software, Inc. 2008. * Anton Vorontsov <avorontsov@ru.mvista.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/io.h> #include <linux/usb.h> #include <linux/usb/hcd.h> #include "fhci.h" #define DUMMY_BD_BUFFER 0xdeadbeef #define DUMMY2_BD_BUFFER 0xbaadf00d /* Transaction Descriptors bits */ #define TD_R 0x8000 /* ready bit */ #define TD_W 0x2000 /* wrap bit */ #define TD_I 0x1000 /* interrupt on completion */ #define TD_L 0x0800 /* last */ #define TD_TC 0x0400 /* transmit CRC */ #define TD_CNF 0x0200 /* CNF - Must be always 1 */ #define TD_LSP 0x0100 /* Low-speed transaction */ #define TD_PID 0x00c0 /* packet id */ #define TD_RXER 0x0020 /* Rx error or not */ #define TD_NAK 0x0010 /* No ack. */ #define TD_STAL 0x0008 /* Stall received */ #define TD_TO 0x0004 /* time out */ #define TD_UN 0x0002 /* underrun */ #define TD_NO 0x0010 /* Rx Non Octet Aligned Packet */ #define TD_AB 0x0008 /* Frame Aborted */ #define TD_CR 0x0004 /* CRC Error */ #define TD_OV 0x0002 /* Overrun */ #define TD_BOV 0x0001 /* Buffer Overrun */ #define TD_ERRORS (TD_NAK | TD_STAL | TD_TO | TD_UN | \ TD_NO | TD_AB | TD_CR | TD_OV | TD_BOV) #define TD_PID_DATA0 0x0080 /* Data 0 toggle */ #define TD_PID_DATA1 0x00c0 /* Data 1 toggle */ #define TD_PID_TOGGLE 0x00c0 /* Data 0/1 toggle mask */ #define TD_TOK_SETUP 0x0000 #define TD_TOK_OUT 0x4000 #define TD_TOK_IN 0x8000 #define TD_ISO 0x1000 #define TD_ENDP 0x0780 #define TD_ADDR 0x007f #define TD_ENDP_SHIFT 7 struct usb_td { __be16 status; __be16 length; __be32 buf_ptr; __be16 extra; __be16 reserved; }; static struct usb_td __iomem *next_bd(struct usb_td __iomem *base, struct usb_td __iomem *td, u16 status) { if (status & TD_W) return base; else return ++td; } void fhci_push_dummy_bd(struct endpoint *ep) { if (ep->already_pushed_dummy_bd == false) { u16 td_status = in_be16(&ep->empty_td->status); out_be32(&ep->empty_td->buf_ptr, DUMMY_BD_BUFFER); /* get the next TD in the ring */ ep->empty_td = next_bd(ep->td_base, ep->empty_td, td_status); ep->already_pushed_dummy_bd = true; } } /* destroy an USB endpoint */ void fhci_ep0_free(struct fhci_usb *usb) { struct endpoint *ep; int size; ep = usb->ep0; if (ep) { if (ep->td_base) cpm_muram_free(cpm_muram_offset(ep->td_base)); if (kfifo_initialized(&ep->conf_frame_Q)) { size = cq_howmany(&ep->conf_frame_Q); for (; size; size--) { struct packet *pkt = cq_get(&ep->conf_frame_Q); kfree(pkt); } cq_delete(&ep->conf_frame_Q); } if (kfifo_initialized(&ep->empty_frame_Q)) { size = cq_howmany(&ep->empty_frame_Q); for (; size; size--) { struct packet *pkt = cq_get(&ep->empty_frame_Q); kfree(pkt); } cq_delete(&ep->empty_frame_Q); } if (kfifo_initialized(&ep->dummy_packets_Q)) { size = cq_howmany(&ep->dummy_packets_Q); for (; size; size--) { u8 *buff = cq_get(&ep->dummy_packets_Q); kfree(buff); } cq_delete(&ep->dummy_packets_Q); } kfree(ep); usb->ep0 = NULL; } } /* * create the endpoint structure * * arguments: * usb A pointer to the data structure of the USB * data_mem The data memory partition(BUS) * ring_len TD ring length */ u32 fhci_create_ep(struct fhci_usb *usb, enum fhci_mem_alloc data_mem, u32 ring_len) { struct endpoint *ep; struct usb_td __iomem *td; unsigned long ep_offset; char *err_for = "enpoint PRAM"; int ep_mem_size; u32 i; /* we need at least 3 TDs in the ring */ if (!(ring_len > 2)) { fhci_err(usb->fhci, "illegal TD ring length parameters\n"); return -EINVAL; } ep = kzalloc(sizeof(*ep), GFP_KERNEL); if (!ep) return -ENOMEM; ep_mem_size = ring_len * sizeof(*td) + sizeof(struct fhci_ep_pram); ep_offset = cpm_muram_alloc(ep_mem_size, 32); if (IS_ERR_VALUE(ep_offset)) goto err; ep->td_base = cpm_muram_addr(ep_offset); /* zero all queue pointers */ if (cq_new(&ep->conf_frame_Q, ring_len + 2) || cq_new(&ep->empty_frame_Q, ring_len + 2) || cq_new(&ep->dummy_packets_Q, ring_len + 2)) { err_for = "frame_queues"; goto err; } for (i = 0; i < (ring_len + 1); i++) { struct packet *pkt; u8 *buff; pkt = kmalloc(sizeof(*pkt), GFP_KERNEL); if (!pkt) { err_for = "frame"; goto err; } buff = kmalloc(1028 * sizeof(*buff), GFP_KERNEL); if (!buff) { kfree(pkt); err_for = "buffer"; goto err; } cq_put(&ep->empty_frame_Q, pkt); cq_put(&ep->dummy_packets_Q, buff); } /* we put the endpoint parameter RAM right behind the TD ring */ ep->ep_pram_ptr = (void __iomem *)ep->td_base + sizeof(*td) * ring_len; ep->conf_td = ep->td_base; ep->empty_td = ep->td_base; ep->already_pushed_dummy_bd = false; /* initialize tds */ td = ep->td_base; for (i = 0; i < ring_len; i++) { out_be32(&td->buf_ptr, 0); out_be16(&td->status, 0); out_be16(&td->length, 0); out_be16(&td->extra, 0); td++; } td--; out_be16(&td->status, TD_W); /* for last TD set Wrap bit */ out_be16(&td->length, 0); /* endpoint structure has been created */ usb->ep0 = ep; return 0; err: fhci_ep0_free(usb); kfree(ep); fhci_err(usb->fhci, "no memory for the %s\n", err_for); return -ENOMEM; } /* * initialize the endpoint register according to the given parameters * * artuments: * usb A pointer to the data strucutre of the USB * ep A pointer to the endpoint structre * data_mem The data memory partition(BUS) */ void fhci_init_ep_registers(struct fhci_usb *usb, struct endpoint *ep, enum fhci_mem_alloc data_mem) { u8 rt; /* set the endpoint registers according to the endpoint */ out_be16(&usb->fhci->regs->usb_ep[0], USB_TRANS_CTR | USB_EP_MF | USB_EP_RTE); out_be16(&usb->fhci->pram->ep_ptr[0], cpm_muram_offset(ep->ep_pram_ptr)); rt = (BUS_MODE_BO_BE | BUS_MODE_GBL); #ifdef MULTI_DATA_BUS if (data_mem == MEM_SECONDARY) rt |= BUS_MODE_DTB; #endif out_8(&ep->ep_pram_ptr->rx_func_code, rt); out_8(&ep->ep_pram_ptr->tx_func_code, rt); out_be16(&ep->ep_pram_ptr->rx_buff_len, 1028); out_be16(&ep->ep_pram_ptr->rx_base, 0); out_be16(&ep->ep_pram_ptr->tx_base, cpm_muram_offset(ep->td_base)); out_be16(&ep->ep_pram_ptr->rx_bd_ptr, 0); out_be16(&ep->ep_pram_ptr->tx_bd_ptr, cpm_muram_offset(ep->td_base)); out_be32(&ep->ep_pram_ptr->tx_state, 0); } /* * Collect the submitted frames and inform the application about them * It is also preparing the TDs for new frames. If the Tx interrupts * are disabled, the application should call that routine to get * confirmation about the submitted frames. Otherwise, the routine is * called from the interrupt service routine during the Tx interrupt. * In that case the application is informed by calling the application * specific 'fhci_transaction_confirm' routine */ static void fhci_td_transaction_confirm(struct fhci_usb *usb) { struct endpoint *ep = usb->ep0; struct packet *pkt; struct usb_td __iomem *td; u16 extra_data; u16 td_status; u16 td_length; u32 buf; /* * collect transmitted BDs from the chip. The routine clears all BDs * with R bit = 0 and the pointer to data buffer is not NULL, that is * BDs which point to the transmitted data buffer */ while (1) { td = ep->conf_td; td_status = in_be16(&td->status); td_length = in_be16(&td->length); buf = in_be32(&td->buf_ptr); extra_data = in_be16(&td->extra); /* check if the TD is empty */ if (!(!(td_status & TD_R) && ((td_status & ~TD_W) || buf))) break; /* check if it is a dummy buffer */ else if ((buf == DUMMY_BD_BUFFER) && !(td_status & ~TD_W)) break; /* mark TD as empty */ clrbits16(&td->status, ~TD_W); out_be16(&td->length, 0); out_be32(&td->buf_ptr, 0); out_be16(&td->extra, 0); /* advance the TD pointer */ ep->conf_td = next_bd(ep->td_base, ep->conf_td, td_status); /* check if it is a dummy buffer(type2) */ if ((buf == DUMMY2_BD_BUFFER) && !(td_status & ~TD_W)) continue; pkt = cq_get(&ep->conf_frame_Q); if (!pkt) fhci_err(usb->fhci, "no frame to confirm\n"); if (td_status & TD_ERRORS) { if (td_status & TD_RXER) { if (td_status & TD_CR) pkt->status = USB_TD_RX_ER_CRC; else if (td_status & TD_AB) pkt->status = USB_TD_RX_ER_BITSTUFF; else if (td_status & TD_OV) pkt->status = USB_TD_RX_ER_OVERUN; else if (td_status & TD_BOV) pkt->status = USB_TD_RX_DATA_OVERUN; else if (td_status & TD_NO) pkt->status = USB_TD_RX_ER_NONOCT; else fhci_err(usb->fhci, "illegal error " "occurred\n"); } else if (td_status & TD_NAK) pkt->status = USB_TD_TX_ER_NAK; else if (td_status & TD_TO) pkt->status = USB_TD_TX_ER_TIMEOUT; else if (td_status & TD_UN) pkt->status = USB_TD_TX_ER_UNDERUN; else if (td_status & TD_STAL) pkt->status = USB_TD_TX_ER_STALL; else fhci_err(usb->fhci, "illegal error occurred\n"); } else if ((extra_data & TD_TOK_IN) && pkt->len > td_length - CRC_SIZE) { pkt->status = USB_TD_RX_DATA_UNDERUN; } if (extra_data & TD_TOK_IN) pkt->len = td_length - CRC_SIZE; else if (pkt->info & PKT_ZLP) pkt->len = 0; else pkt->len = td_length; fhci_transaction_confirm(usb, pkt); } } /* * Submitting a data frame to a specified endpoint of a USB device * The frame is put in the driver's transmit queue for this endpoint * * Arguments: * usb A pointer to the USB structure * pkt A pointer to the user frame structure * trans_type Transaction tyep - IN,OUT or SETUP * dest_addr Device address - 0~127 * dest_ep Endpoint number of the device - 0~16 * trans_mode Pipe type - ISO,Interrupt,bulk or control * dest_speed USB speed - Low speed or FULL speed * data_toggle Data sequence toggle - 0 or 1 */ u32 fhci_host_transaction(struct fhci_usb *usb, struct packet *pkt, enum fhci_ta_type trans_type, u8 dest_addr, u8 dest_ep, enum fhci_tf_mode trans_mode, enum fhci_speed dest_speed, u8 data_toggle) { struct endpoint *ep = usb->ep0; struct usb_td __iomem *td; u16 extra_data; u16 td_status; fhci_usb_disable_interrupt(usb); /* start from the next BD that should be filled */ td = ep->empty_td; td_status = in_be16(&td->status); if (td_status & TD_R && in_be16(&td->length)) { /* if the TD is not free */ fhci_usb_enable_interrupt(usb); return -1; } /* get the next TD in the ring */ ep->empty_td = next_bd(ep->td_base, ep->empty_td, td_status); fhci_usb_enable_interrupt(usb); pkt->priv_data = td; out_be32(&td->buf_ptr, virt_to_phys(pkt->data)); /* sets up transaction parameters - addr,endp,dir,and type */ extra_data = (dest_ep << TD_ENDP_SHIFT) | dest_addr; switch (trans_type) { case FHCI_TA_IN: extra_data |= TD_TOK_IN; break; case FHCI_TA_OUT: extra_data |= TD_TOK_OUT; break; case FHCI_TA_SETUP: extra_data |= TD_TOK_SETUP; break; } if (trans_mode == FHCI_TF_ISO) extra_data |= TD_ISO; out_be16(&td->extra, extra_data); /* sets up the buffer descriptor */ td_status = ((td_status & TD_W) | TD_R | TD_L | TD_I | TD_CNF); if (!(pkt->info & PKT_NO_CRC)) td_status |= TD_TC; switch (trans_type) { case FHCI_TA_IN: if (data_toggle) pkt->info |= PKT_PID_DATA1; else pkt->info |= PKT_PID_DATA0; break; default: if (data_toggle) { td_status |= TD_PID_DATA1; pkt->info |= PKT_PID_DATA1; } else { td_status |= TD_PID_DATA0; pkt->info |= PKT_PID_DATA0; } break; } if ((dest_speed == FHCI_LOW_SPEED) && (usb->port_status == FHCI_PORT_FULL)) td_status |= TD_LSP; out_be16(&td->status, td_status); /* set up buffer length */ if (trans_type == FHCI_TA_IN) out_be16(&td->length, pkt->len + CRC_SIZE); else out_be16(&td->length, pkt->len); /* put the frame to the confirmation queue */ cq_put(&ep->conf_frame_Q, pkt); if (cq_howmany(&ep->conf_frame_Q) == 1) out_8(&usb->fhci->regs->usb_comm, USB_CMD_STR_FIFO); return 0; } /* Reset the Tx BD ring */ void fhci_flush_bds(struct fhci_usb *usb) { u16 extra_data; u16 td_status; u32 buf; struct usb_td __iomem *td; struct endpoint *ep = usb->ep0; td = ep->td_base; while (1) { td_status = in_be16(&td->status); buf = in_be32(&td->buf_ptr); extra_data = in_be16(&td->extra); /* if the TD is not empty - we'll confirm it as Timeout */ if (td_status & TD_R) out_be16(&td->status, (td_status & ~TD_R) | TD_TO); /* if this TD is dummy - let's skip this TD */ else if (in_be32(&td->buf_ptr) == DUMMY_BD_BUFFER) out_be32(&td->buf_ptr, DUMMY2_BD_BUFFER); /* if this is the last TD - break */ if (td_status & TD_W) break; td++; } fhci_td_transaction_confirm(usb); td = ep->td_base; do { out_be16(&td->status, 0); out_be16(&td->length, 0); out_be32(&td->buf_ptr, 0); out_be16(&td->extra, 0); td++; } while (!(in_be16(&td->status) & TD_W)); out_be16(&td->status, TD_W); /* for last TD set Wrap bit */ out_be16(&td->length, 0); out_be32(&td->buf_ptr, 0); out_be16(&td->extra, 0); out_be16(&ep->ep_pram_ptr->tx_bd_ptr, in_be16(&ep->ep_pram_ptr->tx_base)); out_be32(&ep->ep_pram_ptr->tx_state, 0); out_be16(&ep->ep_pram_ptr->tx_cnt, 0); ep->empty_td = ep->td_base; ep->conf_td = ep->td_base; } /* * Flush all transmitted packets from TDs in the actual frame. * This routine is called when something wrong with the controller and * we want to get rid of the actual frame and start again next frame */ void fhci_flush_actual_frame(struct fhci_usb *usb) { u8 mode; u16 tb_ptr; u16 extra_data; u16 td_status; u32 buf_ptr; struct usb_td __iomem *td; struct endpoint *ep = usb->ep0; /* disable the USB controller */ mode = in_8(&usb->fhci->regs->usb_mod); out_8(&usb->fhci->regs->usb_mod, mode & ~USB_MODE_EN); tb_ptr = in_be16(&ep->ep_pram_ptr->tx_bd_ptr); td = cpm_muram_addr(tb_ptr); td_status = in_be16(&td->status); buf_ptr = in_be32(&td->buf_ptr); extra_data = in_be16(&td->extra); do { if (td_status & TD_R) { out_be16(&td->status, (td_status & ~TD_R) | TD_TO); } else { out_be32(&td->buf_ptr, 0); ep->already_pushed_dummy_bd = false; break; } /* advance the TD pointer */ td = next_bd(ep->td_base, td, td_status); td_status = in_be16(&td->status); buf_ptr = in_be32(&td->buf_ptr); extra_data = in_be16(&td->extra); } while ((td_status & TD_R) || buf_ptr); fhci_td_transaction_confirm(usb); out_be16(&ep->ep_pram_ptr->tx_bd_ptr, in_be16(&ep->ep_pram_ptr->tx_base)); out_be32(&ep->ep_pram_ptr->tx_state, 0); out_be16(&ep->ep_pram_ptr->tx_cnt, 0); ep->empty_td = ep->td_base; ep->conf_td = ep->td_base; usb->actual_frame->frame_status = FRAME_TIMER_END_TRANSMISSION; /* reset the event register */ out_be16(&usb->fhci->regs->usb_event, 0xffff); /* enable the USB controller */ out_8(&usb->fhci->regs->usb_mod, mode | USB_MODE_EN); } /* handles Tx confirm and Tx error interrupt */ void fhci_tx_conf_interrupt(struct fhci_usb *usb) { fhci_td_transaction_confirm(usb); /* * Schedule another transaction to this frame only if we have * already confirmed all transaction in the frame. */ if (((fhci_get_sof_timer_count(usb) < usb->max_frame_usage) || (usb->actual_frame->frame_status & FRAME_END_TRANSMISSION)) && (list_empty(&usb->actual_frame->tds_list))) fhci_schedule_transactions(usb); } void fhci_host_transmit_actual_frame(struct fhci_usb *usb) { u16 tb_ptr; u16 td_status; struct usb_td __iomem *td; struct endpoint *ep = usb->ep0; tb_ptr = in_be16(&ep->ep_pram_ptr->tx_bd_ptr); td = cpm_muram_addr(tb_ptr); if (in_be32(&td->buf_ptr) == DUMMY_BD_BUFFER) { struct usb_td __iomem *old_td = td; ep->already_pushed_dummy_bd = false; td_status = in_be16(&td->status); /* gets the next TD in the ring */ td = next_bd(ep->td_base, td, td_status); tb_ptr = cpm_muram_offset(td); out_be16(&ep->ep_pram_ptr->tx_bd_ptr, tb_ptr); /* start transmit only if we have something in the TDs */ if (in_be16(&td->status) & TD_R) out_8(&usb->fhci->regs->usb_comm, USB_CMD_STR_FIFO); if (in_be32(&ep->conf_td->buf_ptr) == DUMMY_BD_BUFFER) { out_be32(&old_td->buf_ptr, 0); ep->conf_td = next_bd(ep->td_base, ep->conf_td, td_status); } else { out_be32(&old_td->buf_ptr, DUMMY2_BD_BUFFER); } } }
gpl-2.0
motley-git/Kernel-Nexus7
arch/s390/lib/uaccess_mvcos.c
8508
6062
/* * arch/s390/lib/uaccess_mvcos.c * * Optimized user space space access functions based on mvcos. * * Copyright (C) IBM Corp. 2006 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Gerald Schaefer (gerald.schaefer@de.ibm.com) */ #include <linux/errno.h> #include <linux/mm.h> #include <asm/uaccess.h> #include <asm/futex.h> #include "uaccess.h" #ifndef __s390x__ #define AHI "ahi" #define ALR "alr" #define CLR "clr" #define LHI "lhi" #define SLR "slr" #else #define AHI "aghi" #define ALR "algr" #define CLR "clgr" #define LHI "lghi" #define SLR "slgr" #endif static size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x) { register unsigned long reg0 asm("0") = 0x81UL; unsigned long tmp1, tmp2; tmp1 = -4096UL; asm volatile( "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n" "9: jz 7f\n" "1:"ALR" %0,%3\n" " "SLR" %1,%3\n" " "SLR" %2,%3\n" " j 0b\n" "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ " "SLR" %4,%1\n" " "CLR" %0,%4\n" /* copy crosses next page boundary? */ " jnh 4f\n" "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n" "10:"SLR" %0,%4\n" " "ALR" %2,%4\n" "4:"LHI" %4,-1\n" " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ " bras %3,6f\n" /* memset loop */ " xc 0(1,%2),0(%2)\n" "5: xc 0(256,%2),0(%2)\n" " la %2,256(%2)\n" "6:"AHI" %4,-256\n" " jnm 5b\n" " ex %4,0(%3)\n" " j 8f\n" "7:"SLR" %0,%0\n" "8: \n" EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b) : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) : "d" (reg0) : "cc", "memory"); return size; } static size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x) { if (size <= 256) return copy_from_user_std(size, ptr, x); return copy_from_user_mvcos(size, ptr, x); } static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x) { register unsigned long reg0 asm("0") = 0x810000UL; unsigned long tmp1, tmp2; tmp1 = -4096UL; asm volatile( "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" "6: jz 4f\n" "1:"ALR" %0,%3\n" " "SLR" %1,%3\n" " "SLR" %2,%3\n" " j 0b\n" "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ " "SLR" %4,%1\n" " "CLR" %0,%4\n" /* copy crosses next page boundary? */ " jnh 5f\n" "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n" "7:"SLR" %0,%4\n" " j 5f\n" "4:"SLR" %0,%0\n" "5: \n" EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b) : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) : "d" (reg0) : "cc", "memory"); return size; } static size_t copy_to_user_mvcos_check(size_t size, void __user *ptr, const void *x) { if (size <= 256) return copy_to_user_std(size, ptr, x); return copy_to_user_mvcos(size, ptr, x); } static size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from) { register unsigned long reg0 asm("0") = 0x810081UL; unsigned long tmp1, tmp2; tmp1 = -4096UL; /* FIXME: copy with reduced length. */ asm volatile( "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" " jz 2f\n" "1:"ALR" %0,%3\n" " "SLR" %1,%3\n" " "SLR" %2,%3\n" " j 0b\n" "2:"SLR" %0,%0\n" "3: \n" EX_TABLE(0b,3b) : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2) : "d" (reg0) : "cc", "memory"); return size; } static size_t clear_user_mvcos(size_t size, void __user *to) { register unsigned long reg0 asm("0") = 0x810000UL; unsigned long tmp1, tmp2; tmp1 = -4096UL; asm volatile( "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n" " jz 4f\n" "1:"ALR" %0,%2\n" " "SLR" %1,%2\n" " j 0b\n" "2: la %3,4095(%1)\n"/* %4 = to + 4095 */ " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */ " "SLR" %3,%1\n" " "CLR" %0,%3\n" /* copy crosses next page boundary? */ " jnh 5f\n" "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n" " "SLR" %0,%3\n" " j 5f\n" "4:"SLR" %0,%0\n" "5: \n" EX_TABLE(0b,2b) EX_TABLE(3b,5b) : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2) : "a" (empty_zero_page), "d" (reg0) : "cc", "memory"); return size; } static size_t strnlen_user_mvcos(size_t count, const char __user *src) { char buf[256]; int rc; size_t done, len, len_str; done = 0; do { len = min(count - done, (size_t) 256); rc = uaccess.copy_from_user(len, src + done, buf); if (unlikely(rc == len)) return 0; len -= rc; len_str = strnlen(buf, len); done += len_str; } while ((len_str == len) && (done < count)); return done + 1; } static size_t strncpy_from_user_mvcos(size_t count, const char __user *src, char *dst) { int rc; size_t done, len, len_str; done = 0; do { len = min(count - done, (size_t) 4096); rc = uaccess.copy_from_user(len, src + done, dst); if (unlikely(rc == len)) return -EFAULT; len -= rc; len_str = strnlen(dst, len); done += len_str; } while ((len_str == len) && (done < count)); return done; } struct uaccess_ops uaccess_mvcos = { .copy_from_user = copy_from_user_mvcos_check, .copy_from_user_small = copy_from_user_std, .copy_to_user = copy_to_user_mvcos_check, .copy_to_user_small = copy_to_user_std, .copy_in_user = copy_in_user_mvcos, .clear_user = clear_user_mvcos, .strnlen_user = strnlen_user_std, .strncpy_from_user = strncpy_from_user_std, .futex_atomic_op = futex_atomic_op_std, .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std, }; struct uaccess_ops uaccess_mvcos_switch = { .copy_from_user = copy_from_user_mvcos, .copy_from_user_small = copy_from_user_mvcos, .copy_to_user = copy_to_user_mvcos, .copy_to_user_small = copy_to_user_mvcos, .copy_in_user = copy_in_user_mvcos, .clear_user = clear_user_mvcos, .strnlen_user = strnlen_user_mvcos, .strncpy_from_user = strncpy_from_user_mvcos, .futex_atomic_op = futex_atomic_op_pt, .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt, };
gpl-2.0
TeamBliss-Devices/android_kernel_samsung_hlte
arch/alpha/kernel/sys_alcor.c
9020
7714
/* * linux/arch/alpha/kernel/sys_alcor.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996 Jay A Estabrook * Copyright (C) 1998, 1999 Richard Henderson * * Code supporting the ALCOR and XLT (XL-300/366/433). */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/reboot.h> #include <linux/bitops.h> #include <asm/ptrace.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/mmu_context.h> #include <asm/irq.h> #include <asm/pgtable.h> #include <asm/core_cia.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" /* Note mask bit is true for ENABLED irqs. */ static unsigned long cached_irq_mask; static inline void alcor_update_irq_hw(unsigned long mask) { *(vuip)GRU_INT_MASK = mask; mb(); } static inline void alcor_enable_irq(struct irq_data *d) { alcor_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); } static void alcor_disable_irq(struct irq_data *d) { alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); } static void alcor_mask_and_ack_irq(struct irq_data *d) { alcor_disable_irq(d); /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ *(vuip)GRU_INT_CLEAR = 1 << (d->irq - 16); mb(); *(vuip)GRU_INT_CLEAR = 0; mb(); } static void alcor_isa_mask_and_ack_irq(struct irq_data *d) { i8259a_mask_and_ack_irq(d); /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ *(vuip)GRU_INT_CLEAR = 0x80000000; mb(); *(vuip)GRU_INT_CLEAR = 0; mb(); } static struct irq_chip alcor_irq_type = { .name = "ALCOR", .irq_unmask = alcor_enable_irq, .irq_mask = alcor_disable_irq, .irq_mask_ack = alcor_mask_and_ack_irq, }; static void alcor_device_interrupt(unsigned long vector) { unsigned long pld; unsigned int i; /* Read the interrupt summary register of the GRU */ pld = (*(vuip)GRU_INT_REQ) & GRU_INT_REQ_BITS; /* * Now for every possible bit set, work through them and call * the appropriate interrupt handler. */ while (pld) { i = ffz(~pld); pld &= pld - 1; /* clear least bit set */ if (i == 31) { isa_device_interrupt(vector); } else { handle_irq(16 + i); } } } static void __init alcor_init_irq(void) { long i; if (alpha_using_srm) alpha_mv.device_interrupt = srm_device_interrupt; *(vuip)GRU_INT_MASK = 0; mb(); /* all disabled */ *(vuip)GRU_INT_EDGE = 0; mb(); /* all are level */ *(vuip)GRU_INT_HILO = 0x80000000U; mb(); /* ISA only HI */ *(vuip)GRU_INT_CLEAR = 0; mb(); /* all clear */ for (i = 16; i < 48; ++i) { /* On Alcor, at least, lines 20..30 are not connected and can generate spurious interrupts if we turn them on while IRQ probing. */ if (i >= 16+20 && i <= 16+30) continue; irq_set_chip_and_handler(i, &alcor_irq_type, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq; init_i8259a_irqs(); common_init_isa_dma(); setup_irq(16+31, &isa_cascade_irqaction); } /* * PCI Fixup configuration. * * Summary @ GRU_INT_REQ: * Bit Meaning * 0 Interrupt Line A from slot 2 * 1 Interrupt Line B from slot 2 * 2 Interrupt Line C from slot 2 * 3 Interrupt Line D from slot 2 * 4 Interrupt Line A from slot 1 * 5 Interrupt line B from slot 1 * 6 Interrupt Line C from slot 1 * 7 Interrupt Line D from slot 1 * 8 Interrupt Line A from slot 0 * 9 Interrupt Line B from slot 0 *10 Interrupt Line C from slot 0 *11 Interrupt Line D from slot 0 *12 Interrupt Line A from slot 4 *13 Interrupt Line B from slot 4 *14 Interrupt Line C from slot 4 *15 Interrupt Line D from slot 4 *16 Interrupt Line D from slot 3 *17 Interrupt Line D from slot 3 *18 Interrupt Line D from slot 3 *19 Interrupt Line D from slot 3 *20-30 Reserved *31 EISA interrupt * * The device to slot mapping looks like: * * Slot Device * 6 built-in TULIP (XLT only) * 7 PCI on board slot 0 * 8 PCI on board slot 3 * 9 PCI on board slot 4 * 10 PCEB (PCI-EISA bridge) * 11 PCI on board slot 2 * 12 PCI on board slot 1 * * * This two layered interrupt approach means that we allocate IRQ 16 and * above for PCI interrupts. The IRQ relates to which bit the interrupt * comes in on. This makes interrupt processing much easier. */ static int __init alcor_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[7][5] __initdata = { /*INT INTA INTB INTC INTD */ /* note: IDSEL 17 is XLT only */ {16+13, 16+13, 16+13, 16+13, 16+13}, /* IdSel 17, TULIP */ { 16+8, 16+8, 16+9, 16+10, 16+11}, /* IdSel 18, slot 0 */ {16+16, 16+16, 16+17, 16+18, 16+19}, /* IdSel 19, slot 3 */ {16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 20, slot 4 */ { -1, -1, -1, -1, -1}, /* IdSel 21, PCEB */ { 16+0, 16+0, 16+1, 16+2, 16+3}, /* IdSel 22, slot 2 */ { 16+4, 16+4, 16+5, 16+6, 16+7}, /* IdSel 23, slot 1 */ }; const long min_idsel = 6, max_idsel = 12, irqs_per_slot = 5; return COMMON_TABLE_LOOKUP; } static void alcor_kill_arch(int mode) { cia_kill_arch(mode); #ifndef ALPHA_RESTORE_SRM_SETUP switch(mode) { case LINUX_REBOOT_CMD_RESTART: /* Who said DEC engineer's have no sense of humor? ;-) */ if (alpha_using_srm) { *(vuip) GRU_RESET = 0x0000dead; mb(); } break; case LINUX_REBOOT_CMD_HALT: break; case LINUX_REBOOT_CMD_POWER_OFF: break; } halt(); #endif } static void __init alcor_init_pci(void) { struct pci_dev *dev; cia_init_pci(); /* * Now we can look to see if we are really running on an XLT-type * motherboard, by looking for a 21040 TULIP in slot 6, which is * built into XLT and BRET/MAVERICK, but not available on ALCOR. */ dev = pci_get_device(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP, NULL); if (dev && dev->devfn == PCI_DEVFN(6,0)) { alpha_mv.sys.cia.gru_int_req_bits = XLT_GRU_INT_REQ_BITS; printk(KERN_INFO "%s: Detected AS500 or XLT motherboard.\n", __func__); } pci_dev_put(dev); } /* * The System Vectors */ struct alpha_machine_vector alcor_mv __initmv = { .vector_name = "Alcor", DO_EV5_MMU, DO_DEFAULT_RTC, DO_CIA_IO, .machine_check = cia_machine_check, .max_isa_dma_address = ALPHA_ALCOR_MAX_ISA_DMA_ADDRESS, .min_io_address = EISA_DEFAULT_IO_BASE, .min_mem_address = CIA_DEFAULT_MEM_BASE, .nr_irqs = 48, .device_interrupt = alcor_device_interrupt, .init_arch = cia_init_arch, .init_irq = alcor_init_irq, .init_rtc = common_init_rtc, .init_pci = alcor_init_pci, .kill_arch = alcor_kill_arch, .pci_map_irq = alcor_map_irq, .pci_swizzle = common_swizzle, .sys = { .cia = { .gru_int_req_bits = ALCOR_GRU_INT_REQ_BITS }} }; ALIAS_MV(alcor) struct alpha_machine_vector xlt_mv __initmv = { .vector_name = "XLT", DO_EV5_MMU, DO_DEFAULT_RTC, DO_CIA_IO, .machine_check = cia_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = EISA_DEFAULT_IO_BASE, .min_mem_address = CIA_DEFAULT_MEM_BASE, .nr_irqs = 48, .device_interrupt = alcor_device_interrupt, .init_arch = cia_init_arch, .init_irq = alcor_init_irq, .init_rtc = common_init_rtc, .init_pci = alcor_init_pci, .kill_arch = alcor_kill_arch, .pci_map_irq = alcor_map_irq, .pci_swizzle = common_swizzle, .sys = { .cia = { .gru_int_req_bits = XLT_GRU_INT_REQ_BITS }} }; /* No alpha_mv alias for XLT, since we compile it in unconditionally with ALCOR; setup_arch knows how to cope. */
gpl-2.0
Kali-/android_kernel_sony_msm8974
drivers/input/keyboard/hilkbd.c
9788
8804
/* * linux/drivers/hil/hilkbd.c * * Copyright (C) 1998 Philip Blundell <philb@gnu.org> * Copyright (C) 1999 Matthew Wilcox <willy@bofh.ai> * Copyright (C) 1999-2007 Helge Deller <deller@gmx.de> * * Very basic HP Human Interface Loop (HIL) driver. * This driver handles the keyboard on HP300 (m68k) and on some * HP700 (parisc) series machines. * * * This file is subject to the terms and conditions of the GNU General Public * License version 2. See the file COPYING in the main directory of this * archive for more details. */ #include <linux/pci_ids.h> #include <linux/ioport.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/input.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/hil.h> #include <linux/io.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <asm/irq.h> #ifdef CONFIG_HP300 #include <asm/hwtest.h> #endif MODULE_AUTHOR("Philip Blundell, Matthew Wilcox, Helge Deller"); MODULE_DESCRIPTION("HIL keyboard driver (basic functionality)"); MODULE_LICENSE("GPL v2"); #if defined(CONFIG_PARISC) #include <asm/io.h> #include <asm/hardware.h> #include <asm/parisc-device.h> static unsigned long hil_base; /* HPA for the HIL device */ static unsigned int hil_irq; #define HILBASE hil_base /* HPPA (parisc) port address */ #define HIL_DATA 0x800 #define HIL_CMD 0x801 #define HIL_IRQ hil_irq #define hil_readb(p) gsc_readb(p) #define hil_writeb(v,p) gsc_writeb((v),(p)) #elif defined(CONFIG_HP300) #define HILBASE 0xf0428000UL /* HP300 (m68k) port address */ #define HIL_DATA 0x1 #define HIL_CMD 0x3 #define HIL_IRQ 2 #define hil_readb(p) readb(p) #define hil_writeb(v,p) writeb((v),(p)) #else #error "HIL is not supported on this platform" #endif /* HIL helper functions */ #define hil_busy() (hil_readb(HILBASE + HIL_CMD) & HIL_BUSY) #define hil_data_available() (hil_readb(HILBASE + HIL_CMD) & HIL_DATA_RDY) #define hil_status() (hil_readb(HILBASE + HIL_CMD)) #define hil_command(x) do { hil_writeb((x), HILBASE + HIL_CMD); } while (0) #define hil_read_data() (hil_readb(HILBASE + HIL_DATA)) #define hil_write_data(x) do { hil_writeb((x), HILBASE + HIL_DATA); } while (0) /* HIL constants */ #define HIL_BUSY 0x02 #define HIL_DATA_RDY 0x01 #define HIL_SETARD 0xA0 /* set auto-repeat delay */ #define HIL_SETARR 0xA2 /* set auto-repeat rate */ #define HIL_SETTONE 0xA3 /* set tone generator */ #define HIL_CNMT 0xB2 /* clear nmi */ #define HIL_INTON 0x5C /* Turn on interrupts. */ #define HIL_INTOFF 0x5D /* Turn off interrupts. */ #define HIL_READKBDSADR 0xF9 #define HIL_WRITEKBDSADR 0xE9 static unsigned int hphilkeyb_keycode[HIL_KEYCODES_SET1_TBLSIZE] __read_mostly = { HIL_KEYCODES_SET1 }; /* HIL structure */ static struct { struct input_dev *dev; unsigned int curdev; unsigned char s; unsigned char c; int valid; unsigned char data[16]; unsigned int ptr; spinlock_t lock; void *dev_id; /* native bus device */ } hil_dev; static void poll_finished(void) { int down; int key; unsigned char scode; switch (hil_dev.data[0]) { case 0x40: down = (hil_dev.data[1] & 1) == 0; scode = hil_dev.data[1] >> 1; key = hphilkeyb_keycode[scode]; input_report_key(hil_dev.dev, key, down); break; } hil_dev.curdev = 0; } static inline void handle_status(unsigned char s, unsigned char c) { if (c & 0x8) { /* End of block */ if (c & 0x10) poll_finished(); } else { if (c & 0x10) { if (hil_dev.curdev) poll_finished(); /* just in case */ hil_dev.curdev = c & 7; hil_dev.ptr = 0; } } } static inline void handle_data(unsigned char s, unsigned char c) { if (hil_dev.curdev) { hil_dev.data[hil_dev.ptr++] = c; hil_dev.ptr &= 15; } } /* handle HIL interrupts */ static irqreturn_t hil_interrupt(int irq, void *handle) { unsigned char s, c; s = hil_status(); c = hil_read_data(); switch (s >> 4) { case 0x5: handle_status(s, c); break; case 0x6: handle_data(s, c); break; case 0x4: hil_dev.s = s; hil_dev.c = c; mb(); hil_dev.valid = 1; break; } return IRQ_HANDLED; } /* send a command to the HIL */ static void hil_do(unsigned char cmd, unsigned char *data, unsigned int len) { unsigned long flags; spin_lock_irqsave(&hil_dev.lock, flags); while (hil_busy()) /* wait */; hil_command(cmd); while (len--) { while (hil_busy()) /* wait */; hil_write_data(*(data++)); } spin_unlock_irqrestore(&hil_dev.lock, flags); } /* initialize HIL */ static int __devinit hil_keyb_init(void) { unsigned char c; unsigned int i, kbid; wait_queue_head_t hil_wait; int err; if (hil_dev.dev) return -ENODEV; /* already initialized */ init_waitqueue_head(&hil_wait); spin_lock_init(&hil_dev.lock); hil_dev.dev = input_allocate_device(); if (!hil_dev.dev) return -ENOMEM; err = request_irq(HIL_IRQ, hil_interrupt, 0, "hil", hil_dev.dev_id); if (err) { printk(KERN_ERR "HIL: Can't get IRQ\n"); goto err1; } /* Turn on interrupts */ hil_do(HIL_INTON, NULL, 0); /* Look for keyboards */ hil_dev.valid = 0; /* clear any pending data */ hil_do(HIL_READKBDSADR, NULL, 0); wait_event_interruptible_timeout(hil_wait, hil_dev.valid, 3 * HZ); if (!hil_dev.valid) printk(KERN_WARNING "HIL: timed out, assuming no keyboard present\n"); c = hil_dev.c; hil_dev.valid = 0; if (c == 0) { kbid = -1; printk(KERN_WARNING "HIL: no keyboard present\n"); } else { kbid = ffz(~c); printk(KERN_INFO "HIL: keyboard found at id %d\n", kbid); } /* set it to raw mode */ c = 0; hil_do(HIL_WRITEKBDSADR, &c, 1); for (i = 0; i < HIL_KEYCODES_SET1_TBLSIZE; i++) if (hphilkeyb_keycode[i] != KEY_RESERVED) __set_bit(hphilkeyb_keycode[i], hil_dev.dev->keybit); hil_dev.dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); hil_dev.dev->ledbit[0] = BIT_MASK(LED_NUML) | BIT_MASK(LED_CAPSL) | BIT_MASK(LED_SCROLLL); hil_dev.dev->keycodemax = HIL_KEYCODES_SET1_TBLSIZE; hil_dev.dev->keycodesize= sizeof(hphilkeyb_keycode[0]); hil_dev.dev->keycode = hphilkeyb_keycode; hil_dev.dev->name = "HIL keyboard"; hil_dev.dev->phys = "hpkbd/input0"; hil_dev.dev->id.bustype = BUS_HIL; hil_dev.dev->id.vendor = PCI_VENDOR_ID_HP; hil_dev.dev->id.product = 0x0001; hil_dev.dev->id.version = 0x0010; err = input_register_device(hil_dev.dev); if (err) { printk(KERN_ERR "HIL: Can't register device\n"); goto err2; } printk(KERN_INFO "input: %s, ID %d at 0x%08lx (irq %d) found and attached\n", hil_dev.dev->name, kbid, HILBASE, HIL_IRQ); return 0; err2: hil_do(HIL_INTOFF, NULL, 0); free_irq(HIL_IRQ, hil_dev.dev_id); err1: input_free_device(hil_dev.dev); hil_dev.dev = NULL; return err; } static void __devexit hil_keyb_exit(void) { if (HIL_IRQ) free_irq(HIL_IRQ, hil_dev.dev_id); /* Turn off interrupts */ hil_do(HIL_INTOFF, NULL, 0); input_unregister_device(hil_dev.dev); hil_dev.dev = NULL; } #if defined(CONFIG_PARISC) static int __devinit hil_probe_chip(struct parisc_device *dev) { /* Only allow one HIL keyboard */ if (hil_dev.dev) return -ENODEV; if (!dev->irq) { printk(KERN_WARNING "HIL: IRQ not found for HIL bus at 0x%p\n", (void *)dev->hpa.start); return -ENODEV; } hil_base = dev->hpa.start; hil_irq = dev->irq; hil_dev.dev_id = dev; printk(KERN_INFO "Found HIL bus at 0x%08lx, IRQ %d\n", hil_base, hil_irq); return hil_keyb_init(); } static int __devexit hil_remove_chip(struct parisc_device *dev) { hil_keyb_exit(); return 0; } static struct parisc_device_id hil_tbl[] = { { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00073 }, { 0, } }; #if 0 /* Disabled to avoid conflicts with the HP SDC HIL drivers */ MODULE_DEVICE_TABLE(parisc, hil_tbl); #endif static struct parisc_driver hil_driver = { .name = "hil", .id_table = hil_tbl, .probe = hil_probe_chip, .remove = __devexit_p(hil_remove_chip), }; static int __init hil_init(void) { return register_parisc_driver(&hil_driver); } static void __exit hil_exit(void) { unregister_parisc_driver(&hil_driver); } #else /* !CONFIG_PARISC */ static int __init hil_init(void) { int error; /* Only allow one HIL keyboard */ if (hil_dev.dev) return -EBUSY; if (!MACH_IS_HP300) return -ENODEV; if (!hwreg_present((void *)(HILBASE + HIL_DATA))) { printk(KERN_ERR "HIL: hardware register was not found\n"); return -ENODEV; } if (!request_region(HILBASE + HIL_DATA, 2, "hil")) { printk(KERN_ERR "HIL: IOPORT region already used\n"); return -EIO; } error = hil_keyb_init(); if (error) { release_region(HILBASE + HIL_DATA, 2); return error; } return 0; } static void __exit hil_exit(void) { hil_keyb_exit(); release_region(HILBASE + HIL_DATA, 2); } #endif /* CONFIG_PARISC */ module_init(hil_init); module_exit(hil_exit);
gpl-2.0
TenchiMasaki/android_kernel_asus_moorefield
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
10300
9519
/* * Copyright (C) 1999 - 2010 Intel Corporation. * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. * * This code was derived from the Intel e1000e Linux driver. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. */ #include "pch_gbe.h" #include "pch_gbe_phy.h" #define PHY_MAX_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ /* PHY 1000 MII Register/Bit Definitions */ /* PHY Registers defined by IEEE */ #define PHY_CONTROL 0x00 /* Control Register */ #define PHY_STATUS 0x01 /* Status Regiser */ #define PHY_ID1 0x02 /* Phy Id Register (word 1) */ #define PHY_ID2 0x03 /* Phy Id Register (word 2) */ #define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ #define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ #define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Register */ #define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */ #define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ #define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Register */ #define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Register */ #define PHY_EXT_STATUS 0x0F /* Extended Status Register */ #define PHY_PHYSP_CONTROL 0x10 /* PHY Specific Control Register */ #define PHY_EXT_PHYSP_CONTROL 0x14 /* Extended PHY Specific Control Register */ #define PHY_LED_CONTROL 0x18 /* LED Control Register */ #define PHY_EXT_PHYSP_STATUS 0x1B /* Extended PHY Specific Status Register */ /* PHY Control Register */ #define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ #define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ #define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ #define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ #define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ #define MII_CR_POWER_DOWN 0x0800 /* Power down */ #define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ #define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ #define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ #define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ #define MII_CR_SPEED_1000 0x0040 #define MII_CR_SPEED_100 0x2000 #define MII_CR_SPEED_10 0x0000 /* PHY Status Register */ #define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ #define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ #define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ #define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ #define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ #define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ #define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ #define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ #define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ #define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ #define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ #define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ #define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ #define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ #define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ /* Phy Id Register (word 2) */ #define PHY_REVISION_MASK 0x000F /* PHY Specific Control Register */ #define PHYSP_CTRL_ASSERT_CRS_TX 0x0800 /* Default value of PHY register */ #define PHY_CONTROL_DEFAULT 0x1140 /* Control Register */ #define PHY_AUTONEG_ADV_DEFAULT 0x01e0 /* Autoneg Advertisement */ #define PHY_NEXT_PAGE_TX_DEFAULT 0x2001 /* Next Page TX */ #define PHY_1000T_CTRL_DEFAULT 0x0300 /* 1000Base-T Control Register */ #define PHY_PHYSP_CONTROL_DEFAULT 0x01EE /* PHY Specific Control Register */ /** * pch_gbe_phy_get_id - Retrieve the PHY ID and revision * @hw: Pointer to the HW structure * Returns * 0: Successful. * Negative value: Failed. */ s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw) { struct pch_gbe_phy_info *phy = &hw->phy; s32 ret; u16 phy_id1; u16 phy_id2; ret = pch_gbe_phy_read_reg_miic(hw, PHY_ID1, &phy_id1); if (ret) return ret; ret = pch_gbe_phy_read_reg_miic(hw, PHY_ID2, &phy_id2); if (ret) return ret; /* * PHY_ID1: [bit15-0:ID(21-6)] * PHY_ID2: [bit15-10:ID(5-0)][bit9-4:Model][bit3-0:revision] */ phy->id = (u32)phy_id1; phy->id = ((phy->id << 6) | ((phy_id2 & 0xFC00) >> 10)); phy->revision = (u32) (phy_id2 & 0x000F); pr_debug("phy->id : 0x%08x phy->revision : 0x%08x\n", phy->id, phy->revision); return 0; } /** * pch_gbe_phy_read_reg_miic - Read MII control register * @hw: Pointer to the HW structure * @offset: Register offset to be read * @data: Pointer to the read data * Returns * 0: Successful. * -EINVAL: Invalid argument. */ s32 pch_gbe_phy_read_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 *data) { struct pch_gbe_phy_info *phy = &hw->phy; if (offset > PHY_MAX_REG_ADDRESS) { pr_err("PHY Address %d is out of range\n", offset); return -EINVAL; } *data = pch_gbe_mac_ctrl_miim(hw, phy->addr, PCH_GBE_HAL_MIIM_READ, offset, (u16)0); return 0; } /** * pch_gbe_phy_write_reg_miic - Write MII control register * @hw: Pointer to the HW structure * @offset: Register offset to be read * @data: data to write to register at offset * Returns * 0: Successful. * -EINVAL: Invalid argument. */ s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data) { struct pch_gbe_phy_info *phy = &hw->phy; if (offset > PHY_MAX_REG_ADDRESS) { pr_err("PHY Address %d is out of range\n", offset); return -EINVAL; } pch_gbe_mac_ctrl_miim(hw, phy->addr, PCH_GBE_HAL_MIIM_WRITE, offset, data); return 0; } /** * pch_gbe_phy_sw_reset - PHY software reset * @hw: Pointer to the HW structure */ void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw) { u16 phy_ctrl; pch_gbe_phy_read_reg_miic(hw, PHY_CONTROL, &phy_ctrl); phy_ctrl |= MII_CR_RESET; pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, phy_ctrl); udelay(1); } /** * pch_gbe_phy_hw_reset - PHY hardware reset * @hw: Pointer to the HW structure */ void pch_gbe_phy_hw_reset(struct pch_gbe_hw *hw) { pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, PHY_CONTROL_DEFAULT); pch_gbe_phy_write_reg_miic(hw, PHY_AUTONEG_ADV, PHY_AUTONEG_ADV_DEFAULT); pch_gbe_phy_write_reg_miic(hw, PHY_NEXT_PAGE_TX, PHY_NEXT_PAGE_TX_DEFAULT); pch_gbe_phy_write_reg_miic(hw, PHY_1000T_CTRL, PHY_1000T_CTRL_DEFAULT); pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL, PHY_PHYSP_CONTROL_DEFAULT); } /** * pch_gbe_phy_power_up - restore link in case the phy was powered down * @hw: Pointer to the HW structure */ void pch_gbe_phy_power_up(struct pch_gbe_hw *hw) { u16 mii_reg; mii_reg = 0; /* Just clear the power down bit to wake the phy back up */ /* according to the manual, the phy will retain its * settings across a power-down/up cycle */ pch_gbe_phy_read_reg_miic(hw, PHY_CONTROL, &mii_reg); mii_reg &= ~MII_CR_POWER_DOWN; pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, mii_reg); } /** * pch_gbe_phy_power_down - Power down PHY * @hw: Pointer to the HW structure */ void pch_gbe_phy_power_down(struct pch_gbe_hw *hw) { u16 mii_reg; mii_reg = 0; /* Power down the PHY so no link is implied when interface is down * * The PHY cannot be powered down if any of the following is TRUE * * (a) WoL is enabled * (b) AMT is active */ pch_gbe_phy_read_reg_miic(hw, PHY_CONTROL, &mii_reg); mii_reg |= MII_CR_POWER_DOWN; pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, mii_reg); mdelay(1); } /** * pch_gbe_phy_set_rgmii - RGMII interface setting * @hw: Pointer to the HW structure */ inline void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw) { pch_gbe_phy_sw_reset(hw); } /** * pch_gbe_phy_init_setting - PHY initial setting * @hw: Pointer to the HW structure */ void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw) { struct pch_gbe_adapter *adapter; struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; int ret; u16 mii_reg; adapter = container_of(hw, struct pch_gbe_adapter, hw); ret = mii_ethtool_gset(&adapter->mii, &cmd); if (ret) pr_err("Error: mii_ethtool_gset\n"); ethtool_cmd_speed_set(&cmd, hw->mac.link_speed); cmd.duplex = hw->mac.link_duplex; cmd.advertising = hw->phy.autoneg_advertised; cmd.autoneg = hw->mac.autoneg; pch_gbe_phy_write_reg_miic(hw, MII_BMCR, BMCR_RESET); ret = mii_ethtool_sset(&adapter->mii, &cmd); if (ret) pr_err("Error: mii_ethtool_sset\n"); pch_gbe_phy_sw_reset(hw); pch_gbe_phy_read_reg_miic(hw, PHY_PHYSP_CONTROL, &mii_reg); mii_reg |= PHYSP_CTRL_ASSERT_CRS_TX; pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL, mii_reg); }
gpl-2.0
svn2github/j
none/tests/mips64/cvm_atomic.c
61
12837
#include <stdio.h> #define N 256 unsigned long long reg_val_double[N]; void init_reg_val_double() { unsigned long c = 19650218UL; int i; reg_val_double[0]= c & 0xffffffffUL; for (i = 1; i < N; i++) { reg_val_double[i] = (1812433253UL * (reg_val_double[i - 1] ^ (reg_val_double[i - 1] >> 30)) + i); } } /* Make a copy of original array to prevent the unexpected changes by Atomic Add Instructions */ unsigned long long reg_val_double_copy[N]; void copy_reg_val_double() { int i; for (i = 0; i < N; i++) { reg_val_double_copy[i] = reg_val_double[i]; } } /* TEST1_32/64 macro is used in load atomic increment/decrement/set/clear instructions. After executing each instruction we must check both memory location and register value. 1: Move arguments (offset and base address) to registers 2: Add offset and base address to make absolute address 3: Execute instruction 4: Move result from register ($t3) 5: Load memory data ('lw' for 32bit instruction and 'ld' for 64bit addresses) */ #define TEST1_32(instruction, offset,mem) \ { \ unsigned long out = 0; \ unsigned long res_mem = 0; \ __asm__ volatile( \ "move $t0, %2" "\n\t" \ "move $t1, %3" "\n\t" \ "daddu $t0, $t1, $t0" "\n\t" \ instruction " $t3, ($t0)" "\n\t" \ "move %0, $t3" "\n\t" \ "lw %1, 0($t0)" "\n\t" \ : "=&r" (out), "=&r"(res_mem) \ : "r" (mem) , "r" (offset) \ : "$12", "$13", "cc", "memory" \ ); \ printf("%s :: offset: 0x%x, out: 0x%lx, result:0x%lx\n", \ instruction, offset, out, res_mem); \ } #define TEST1_64(instruction, offset,mem) \ { \ unsigned long out = 0; \ unsigned long res_mem = 0; \ __asm__ volatile( \ "move $t0, %2" "\n\t" \ "move $t1, %3" "\n\t" \ "daddu $t0, $t1, $t0" "\n\t" \ instruction " $t3, ($t0)" "\n\t" \ "move %0, $t3" "\n\t" \ "ld %1, 0($t0)" "\n\t" \ : "=&r" (out), "=&r"(res_mem) \ : "r" (mem) , "r" (offset) \ : "$12", "$13", "cc", "memory" \ ); \ printf("%s :: offset: 0x%x, out: 0x%lx, result: 0x%lx\n", \ instruction, offset, out, res_mem); \ } /* Test 2 macro is used for pop/dpop/baddu instructions. After executing each instructions the macro performs following operations: 1: Move arguments to registers 2: Execute instruction 3: Move result to register ($t3) */ #define TEST2(instruction, RSVal, RTVal) \ { \ unsigned long out; \ __asm__ volatile( \ "move $t1, %1" "\n\t" \ "move $t2, %2" "\n\t" \ instruction "\n\t" \ "move %0, $t3" "\n\t" \ : "=&r" (out) \ : "r" (RSVal), "r" (RTVal) \ : "$12", "$13", "cc", "memory" \ ); \ printf("%s :: rd 0x%lx, rs 0x%llx, rt 0x%llx\n", \ instruction, out, (long long) RSVal, (long long) RTVal); \ } /* TEST3 macro is used for store atomic add and store atomic add doubleword instructions. Following operations are performed by the test macro: 1: Move arguments to the register 2: Add offset and base address to make absolute address 3: Execute instruction 4: Load memory data */ #define TEST3(instruction, offset, mem, value) \ { \ unsigned long out = 0; \ unsigned long outPre = 0; \ __asm__ volatile( \ "move $t0, %2" "\n\t" \ "move $t1, %3" "\n\t" \ "daddu $t0, $t1, $t0" "\n\t" \ "ld %1, 0($t0)" "\n\t" \ "move $t2, %4" "\n\t" \ instruction " $t2, ($t0)" "\n\t" \ "ld %0, 0($t0)" "\n\t" \ : "=&r" (out), "=&r" (outPre) \ : "r" (mem) , "r" (offset), "r" (value) \ : "$12", "$13", "$14", "cc", "memory" \ ); \ printf("%s :: value: 0x%llx, memPre: 0x%lx, mem: 0x%lx\n", \ instruction, value, outPre, out); \ } /* TEST4_32/64 is used for load atomic add/swap instructions. Following operations are performed by macro after execution of each instruction: 1: Move arguments to register. 2: Add offset and base address to make absolute address. 3: Execute instruction. 4: Move result to register. 5: Load memory data ('lw' for 32bit instruction and 'ld' for 64bit). */ #define TEST4_32(instruction, offset, mem) \ { \ unsigned long out = 0; \ unsigned long res_mem = 0; \ __asm__ volatile( \ "move $t0, %2" "\n\t" \ "move $t1, %3" "\n\t" \ "daddu $t0, $t0, $t1" "\n\t" \ instruction " $t3, ($t0), $t1" "\n\t" \ "move %0, $t3" "\n\t" \ "lw %1, 0($t0)" "\n\t" \ : "=&r" (out), "=&r"(res_mem) \ : "r" (mem) , "r" (offset) \ : "$12", "$13", "cc", "memory" \ ); \ printf("%s :: offset: 0x%x, out: 0x%lx, result:0x%lx\n", \ instruction, offset, out, res_mem); \ } #define TEST4_64(instruction, offset, mem) \ { \ unsigned long out = 0; \ unsigned long res_mem = 0; \ __asm__ volatile( \ "move $t0, %2" "\n\t" \ "move $t1, %3" "\n\t" \ "daddu $t0, $t0, $t1" "\n\t" \ instruction " $t3, ($t0), $t1" "\n\t" \ "move %0, $t3" "\n\t" \ "ld %1, 0($t0)" "\n\t" \ : "=&r" (out), "=&r"(res_mem) \ : "r" (mem) , "r" (offset) \ : "$12", "$13", "cc", "memory" \ ); \ printf("%s :: offset: 0x%x, out: 0x%lx, result: 0x%lx\n", \ instruction, offset, out, res_mem); \ } typedef enum { BADDU, POP, DPOP, SAA, SAAD, LAA, LAAD, LAW, LAWD, LAI, LAID, LAD, LADD, LAS, LASD, LAC, LACD } cvm_op; int main() { #if (_MIPS_ARCH_OCTEON2) init_reg_val_double(); int i,j; cvm_op op; for (op = BADDU; op <= LACD; op++) { switch(op){ /* Unsigned Byte Add - BADDU rd, rs, rt; Cavium OCTEON */ case BADDU: { for(i = 4; i < N; i += 4) for(j = 4; j < N; j += 4) TEST2("baddu $t3, $t1, $t2", reg_val_double[i], reg_val_double[j]); break; } case POP: { /* Count Ones in a Word - POP */ for(j = 4; j < N; j += 4) TEST2("pop $t3, $t1", reg_val_double[j], 0); break; } case DPOP: { /* Count Ones in a Doubleword - DPOP */ for(j = 8; j < N; j += 8) TEST2("dpop $t3, $t1", reg_val_double[j], 0); break; } case SAA: { /* Atomic Add Word - saa rt, (base). */ copy_reg_val_double(); for(j = 4; j < N; j += 4) TEST3("saa", j, reg_val_double_copy, reg_val_double[j]); break; } case SAAD: { /* Atomic Add Double - saad rt, (base). */ copy_reg_val_double(); for(j = 8; j < N; j += 8) TEST3("saad", j, reg_val_double_copy, reg_val_double[j]); break; } case LAA: { /* Load Atomic Add Word - laa rd, (base), rt. */ copy_reg_val_double(); for(j = 4; j < N; j += 4) TEST4_32("laa", j, reg_val_double_copy); break; } case LAAD: { /* Load Atomic Add Double - laad rd, (base), rt */ copy_reg_val_double(); for(j = 8; j < N; j += 8) TEST4_64("laad ", j, reg_val_double_copy); break; } case LAW: { /* Load Atomic Swap Word - law rd, (base), rt */ copy_reg_val_double(); for(j = 4; j < N; j += 4) TEST4_32("law", j, reg_val_double_copy); break; } case LAWD: { /* Load Atomic Swap Double - lawd rd, (base), rt */ copy_reg_val_double(); for(j = 8; j < N; j += 8) TEST4_64("lawd", j, reg_val_double_copy); break; } case LAI: { /* Load Atomic Increment Word - lai rd, (base) */ copy_reg_val_double(); for(i = 4; i < N; i += 4) TEST1_32("lai", i, reg_val_double_copy); break; } case LAID: { /* Load Atomic Increment Double - laid rd, (base) */ copy_reg_val_double(); for(i = 8; i < N; i += 8) TEST1_64("laid ", i, reg_val_double_copy); break; } case LAD: { /* Load Atomic Decrement Word - lad rd, (base) */ copy_reg_val_double(); for(i = 4; i < N; i += 4) TEST1_32("lad", i, reg_val_double_copy); break; } case LADD: { /* Load Atomic Decrement Double - ladd rd, (base) */ copy_reg_val_double(); for(i = 8; i < N; i += 8) TEST1_64("ladd",i, reg_val_double_copy); break; } case LAS:{ /* Load Atomic Set Word - las rd, (base) */ copy_reg_val_double(); for(i = 4; i < N; i += 4) TEST1_32("las",i, reg_val_double_copy); break; } case LASD:{ /* Load Atomic Set Word - lasd rd, (base) */ copy_reg_val_double(); for(i = 8; i < N; i += 8) TEST1_64("lasd",i, reg_val_double_copy); break; } case LAC: { /* Load Atomic Clear Word - lac rd, (base) */ copy_reg_val_double(); for(i = 4; i < N; i += 4) TEST1_32("lac",i, reg_val_double_copy); break; } case LACD: { /* Load Atomic Clear Double - lacd rd, (base) */ copy_reg_val_double(); for(i = 8; i < N; i += 8) TEST1_64("lacd",i, reg_val_double_copy); break; } default: printf("Nothing to be executed \n"); } } #endif return 0; }
gpl-2.0
deadman96385/android_kernel_leeco_msm8996
drivers/gud/MobiCoreDriver/main.c
61
16228
/* * Copyright (c) 2013-2015 TRUSTONIC LIMITED * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <asm/pgtable.h> #include <linux/highmem.h> #include <linux/slab.h> #include <linux/kthread.h> #include <linux/platform_device.h> #include <linux/module.h> #include <linux/ioctl.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/mutex.h> #include <linux/cdev.h> #include <linux/stat.h> #include <linux/debugfs.h> #include "public/mc_linux.h" #include "main.h" #include "fastcall.h" #include "arm.h" #include "mmu.h" #include "scheduler.h" #include "pm.h" #include "debug.h" #include "logging.h" #include "admin.h" #include "mcp.h" #include "session.h" #include "client.h" #include "api.h" #include "build_tag.h" /* Define a MobiCore device structure for use with dev_debug() etc */ static struct device_driver driver = { .name = "Trustonic" }; static struct device device = { .driver = &driver }; struct mc_device_ctx g_ctx = { .mcd = &device }; /* device admin */ static dev_t mc_dev_admin; /* device user */ static dev_t mc_dev_user; /* Need to discover a chrdev region for the driver */ static struct cdev mc_user_cdev; /* Device class for the driver assigned major */ static struct class *mc_device_class; /* * Get client object from file pointer */ static inline struct tbase_client *get_client(struct file *file) { return (struct tbase_client *)file->private_data; } /* * Callback for system mmap() */ static int mc_fd_user_mmap(struct file *file, struct vm_area_struct *vmarea) { struct tbase_client *client = get_client(file); uint32_t len = (uint32_t)(vmarea->vm_end - vmarea->vm_start); /* Alloc contiguous buffer for this client */ return api_malloc_cbuf(client, len, NULL, vmarea); } /* * Check r/w access to referenced memory */ static inline int ioctl_check_pointer(unsigned int cmd, int __user *uarg) { int err = 0; if (_IOC_DIR(cmd) & _IOC_READ) err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd)); else if (_IOC_DIR(cmd) & _IOC_WRITE) err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd)); if (err) return -EFAULT; return 0; } /* * Callback for system ioctl() * Implement most of ClientLib API functions * @file pointer to file * @cmd command * @arg arguments * * Returns 0 for OK and an errno in case of error */ static long mc_fd_user_ioctl(struct file *file, unsigned int id, unsigned long arg) { struct tbase_client *client = get_client(file); int __user *uarg = (int __user *)arg; int ret = -EINVAL; MCDRV_DBG("%u from %s", _IOC_NR(id), current->comm); if (WARN(!client, "No client data available")) return -EPROTO; if (ioctl_check_pointer(id, uarg)) return -EFAULT; switch (id) { case MC_IO_FREEZE: /* Freeze the client */ ret = api_freeze_device(client); break; case MC_IO_OPEN_SESSION: { struct mc_ioctl_open_sess sess; if (copy_from_user(&sess, uarg, sizeof(sess))) { ret = -EFAULT; break; } ret = api_open_session(client, &sess.sid, &sess.uuid, sess.tci, sess.tcilen, sess.is_gp_uuid, &sess.identity); if (ret) break; if (copy_to_user(uarg, &sess, sizeof(sess))) { ret = -EFAULT; api_close_session(client, sess.sid); break; } break; } case MC_IO_OPEN_TRUSTLET: { struct mc_ioctl_open_trustlet ta_desc; if (copy_from_user(&ta_desc, uarg, sizeof(ta_desc))) { ret = -EFAULT; break; } /* Call internal api */ ret = api_open_trustlet(client, &ta_desc.sid, ta_desc.spid, ta_desc.buffer, ta_desc.tlen, ta_desc.tci, ta_desc.tcilen); if (ret) break; if (copy_to_user(uarg, &ta_desc, sizeof(ta_desc))) { ret = -EFAULT; api_close_session(client, ta_desc.sid); break; } break; } case MC_IO_CLOSE_SESSION: { uint32_t sid = (uint32_t)arg; ret = api_close_session(client, sid); break; } case MC_IO_NOTIFY: { uint32_t sid = (uint32_t)arg; ret = api_notify(client, sid); break; } case MC_IO_WAIT: { struct mc_ioctl_wait wait; if (copy_from_user(&wait, uarg, sizeof(wait))) { ret = -EFAULT; break; } ret = api_wait_notification(client, wait.sid, wait.timeout); break; } case MC_IO_MAP: { struct mc_ioctl_map map; if (copy_from_user(&map, uarg, sizeof(map))) { ret = -EFAULT; break; } ret = api_map_wsms(client, map.sid, map.bufs); if (ret) break; /* Fill in return struct */ if (copy_to_user(uarg, &map, sizeof(map))) { ret = -EFAULT; api_unmap_wsms(client, map.sid, map.bufs); break; } break; } case MC_IO_UNMAP: { struct mc_ioctl_map map; if (copy_from_user(&map, uarg, sizeof(map))) { ret = -EFAULT; break; } ret = api_unmap_wsms(client, map.sid, map.bufs); break; } case MC_IO_ERR: { struct mc_ioctl_geterr *uerr = (struct mc_ioctl_geterr *)uarg; uint32_t sid; int32_t exit_code; if (get_user(sid, &uerr->sid)) { ret = -EFAULT; break; } ret = api_get_session_exitcode(client, sid, &exit_code); if (ret) break; /* Fill in return struct */ if (put_user(exit_code, &uerr->value)) { ret = -EFAULT; break; } break; } case MC_IO_VERSION: { struct mc_version_info version_info; ret = mcp_get_version(&version_info); if (ret) break; if (copy_to_user(uarg, &version_info, sizeof(version_info))) ret = -EFAULT; break; } case MC_IO_DR_VERSION: { uint32_t version = MC_VERSION(MCDRVMODULEAPI_VERSION_MAJOR, MCDRVMODULEAPI_VERSION_MINOR); ret = put_user(version, uarg); break; } default: MCDRV_ERROR("unsupported cmd=0x%x", id); ret = -ENOIOCTLCMD; } return ret; } /* * Callback for system open() * A set of internal client data are created and initialized. * * @inode * @file * Returns 0 if OK or -ENOMEM if no allocation was possible. */ static int mc_fd_user_open(struct inode *inode, struct file *file) { struct tbase_client *client; MCDRV_DBG("from %s", current->comm); /* Create client */ client = api_open_device(false); if (!client) return -ENOMEM; /* Store client in user file */ file->private_data = client; return 0; } /* * Callback for system close() * The client object is freed. * @inode * @file * Returns 0 */ static int mc_fd_user_release(struct inode *inode, struct file *file) { struct tbase_client *client = get_client(file); MCDRV_DBG("from %s", current->comm); if (WARN(!client, "No client data available")) return -EPROTO; /* Detach client from user file */ file->private_data = NULL; /* Destroy client, including remaining sessions */ api_close_device(client); return 0; } static const struct file_operations mc_user_fops = { .owner = THIS_MODULE, .open = mc_fd_user_open, .release = mc_fd_user_release, .unlocked_ioctl = mc_fd_user_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = mc_fd_user_ioctl, #endif .mmap = mc_fd_user_mmap, }; int kasnprintf(struct kasnprintf_buf *buf, const char *fmt, ...) { va_list args; int max_size = buf->size - buf->off; int i; va_start(args, fmt); i = vsnprintf(buf->buf + buf->off, max_size, fmt, args); if (i >= max_size) { int new_size = PAGE_ALIGN(buf->size + i + 1); char *new_buf = krealloc(buf->buf, new_size, buf->gfp); if (!new_buf) { i = -ENOMEM; } else { buf->buf = new_buf; buf->size = new_size; max_size = buf->size - buf->off; i = vsnprintf(buf->buf + buf->off, max_size, fmt, args); } } if (i > 0) buf->off += i; va_end(args); return i; } static ssize_t debug_info_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { /* Add/update buffer */ if (!file->private_data || !*ppos) { struct kasnprintf_buf *buf, *old_buf; int ret; buf = kzalloc(GFP_KERNEL, sizeof(*buf)); if (!buf) return -ENOMEM; buf->gfp = GFP_KERNEL; ret = api_info(buf); if (ret < 0) { kfree(buf); return ret; } old_buf = file->private_data; file->private_data = buf; kfree(old_buf); } if (file->private_data) { struct kasnprintf_buf *buf = file->private_data; return simple_read_from_buffer(user_buf, count, ppos, buf->buf, buf->off); } return 0; } static int debug_info_release(struct inode *inode, struct file *file) { kfree(file->private_data); return 0; } static const struct file_operations mc_debug_info_ops = { .read = debug_info_read, .llseek = default_llseek, .release = debug_info_release, }; static inline int device_admin_init(int (*tee_start_cb)(void)) { int ret = 0; cdev_init(&mc_user_cdev, &mc_user_fops); mc_device_class = class_create(THIS_MODULE, "trustonic_tee"); if (IS_ERR(mc_device_class)) { MCDRV_ERROR("failed to create device class"); return PTR_ERR(mc_device_class); } /* Create the ADMIN node */ ret = mc_admin_init(mc_device_class, &mc_dev_admin, tee_start_cb); if (ret < 0) { MCDRV_ERROR("failed to init mobicore device"); class_destroy(mc_device_class); return ret; } return 0; } static inline int device_user_init(void) { int ret = 0; struct device *dev; mc_dev_user = MKDEV(MAJOR(mc_dev_admin), 1); /* Create the user node */ ret = cdev_add(&mc_user_cdev, mc_dev_user, 1); if (ret) { MCDRV_ERROR("user device register failed"); goto err_cdev_add; } mc_user_cdev.owner = THIS_MODULE; dev = device_create(mc_device_class, NULL, mc_dev_user, NULL, MC_USER_DEVNODE); if (IS_ERR(dev)) { ret = PTR_ERR(dev); goto err_device_create; } /* Create debugfs info entry */ debugfs_create_file("info", 0400, g_ctx.debug_dir, NULL, &mc_debug_info_ops); return 0; err_device_create: cdev_del(&mc_user_cdev); err_cdev_add: mc_admin_exit(mc_device_class); class_destroy(mc_device_class); MCDRV_DBG("failed with %d", ret); return ret; } static void devices_exit(void) { device_destroy(mc_device_class, mc_dev_user); cdev_del(&mc_user_cdev); mc_admin_exit(mc_device_class); class_destroy(mc_device_class); } static inline int mobicore_start(void) { int ret; struct mc_version_info version_info; ret = mcp_start(); if (ret) { MCDRV_ERROR("TEE start failed"); goto err_mcp; } ret = mc_logging_start(); if (ret) { MCDRV_ERROR("Log start failed"); goto err_log; } ret = mc_scheduler_start(); if (ret) { MCDRV_ERROR("Scheduler start failed"); goto err_sched; } ret = mc_pm_start(); if (ret) { MCDRV_ERROR("Power Management start failed"); goto err_pm; } ret = mcp_get_version(&version_info); if (ret) goto err_mcp_cmd; MCDRV_DBG("\n" " product_id = %s\n" " version_so = 0x%x\n" " version_mci = 0x%x\n" " version_mclf = 0x%x\n" " version_container = 0x%x\n" " version_mc_config = 0x%x\n" " version_tl_api = 0x%x\n" " version_dr_api = 0x%x\n" " version_cmp = 0x%x\n", version_info.product_id, version_info.version_mci, version_info.version_so, version_info.version_mclf, version_info.version_container, version_info.version_mc_config, version_info.version_tl_api, version_info.version_dr_api, version_info.version_cmp); if (MC_VERSION_MAJOR(version_info.version_mci) > 1) { pr_err("MCI version %d.%d is too recent for this driver", MC_VERSION_MAJOR(version_info.version_mci), MC_VERSION_MINOR(version_info.version_mci)); goto err_version; } if ((MC_VERSION_MAJOR(version_info.version_mci) == 0) && (MC_VERSION_MINOR(version_info.version_mci) < 6)) { pr_err("MCI version %d.%d is too old for this driver", MC_VERSION_MAJOR(version_info.version_mci), MC_VERSION_MINOR(version_info.version_mci)); goto err_version; } dev_info(g_ctx.mcd, "MobiCore MCI version is %d.%d\n", MC_VERSION_MAJOR(version_info.version_mci), MC_VERSION_MINOR(version_info.version_mci)); /* Determine which features are supported */ switch (version_info.version_mci) { case MC_VERSION(1, 2): /* 310 */ g_ctx.f_client_login = true; /* Fall through */ case MC_VERSION(1, 1): g_ctx.f_multimap = true; /* Fall through */ case MC_VERSION(1, 0): /* 302 */ g_ctx.f_mem_ext = true; g_ctx.f_ta_auth = true; /* Fall through */ case MC_VERSION(0, 7): g_ctx.f_timeout = true; /* Fall through */ case MC_VERSION(0, 6): /* 301 */ break; } ret = device_user_init(); if (ret) goto err_create_dev_user; return 0; err_create_dev_user: err_version: err_mcp_cmd: mc_pm_stop(); err_pm: mc_scheduler_stop(); err_sched: mc_logging_stop(); err_log: mcp_stop(); err_mcp: return ret; } static inline void mobicore_stop(void) { mc_pm_stop(); mc_scheduler_stop(); mc_logging_stop(); mcp_stop(); } /* * This function is called by the kernel during startup or by a insmod command. * This device is installed and registered as cdev, then interrupt and * queue handling is set up */ static int mobicore_init(void) { int err = 0; dev_set_name(g_ctx.mcd, "TEE"); /* Do not remove or change the following trace. * The string "MobiCore" is used to detect if <t-base is in of the image */ dev_info(g_ctx.mcd, "MobiCore mcDrvModuleApi version is %d.%d\n", MCDRVMODULEAPI_VERSION_MAJOR, MCDRVMODULEAPI_VERSION_MINOR); #ifdef MOBICORE_COMPONENT_BUILD_TAG dev_info(g_ctx.mcd, "MobiCore %s\n", MOBICORE_COMPONENT_BUILD_TAG); #endif /* Hardware does not support ARM TrustZone -> Cannot continue! */ if (!has_security_extensions()) { MCDRV_ERROR("Hardware doesn't support ARM TrustZone!"); return -ENODEV; } /* Running in secure mode -> Cannot load the driver! */ if (is_secure_mode()) { MCDRV_ERROR("Running in secure MODE!"); return -ENODEV; } /* Init common API layer */ api_init(); /* Init plenty of nice features */ err = mc_fastcall_init(); if (err) { MCDRV_ERROR("Fastcall support init failed!"); goto fail_fastcall_init; } err = mcp_init(); if (err) { MCDRV_ERROR("MCP init failed!"); goto fail_mcp_init; } err = mc_logging_init(); if (err) { MCDRV_ERROR("Log init failed!"); goto fail_log_init; } /* The scheduler is the first to create a debugfs entry */ g_ctx.debug_dir = debugfs_create_dir("trustonic_tee", NULL); err = mc_scheduler_init(); if (err) { MCDRV_ERROR("Scheduler init failed!"); goto fail_mc_device_sched_init; } /* * Create admin dev so that daemon can already communicate with * the driver */ err = device_admin_init(mobicore_start); if (err) goto fail_creat_dev_admin; return 0; fail_creat_dev_admin: mc_scheduler_exit(); fail_mc_device_sched_init: debugfs_remove(g_ctx.debug_dir); mc_logging_exit(); fail_log_init: mcp_exit(); fail_mcp_init: mc_fastcall_exit(); fail_fastcall_init: return err; } /* * This function removes this device driver from the Linux device manager . */ static void mobicore_exit(void) { MCDRV_DBG("enter"); devices_exit(); mobicore_stop(); mc_scheduler_exit(); mc_logging_exit(); mcp_exit(); mc_fastcall_exit(); debugfs_remove_recursive(g_ctx.debug_dir); MCDRV_DBG("exit"); } /* Linux Driver Module Macros */ #ifdef MC_DEVICE_PROPNAME static int mobicore_probe(struct platform_device *pdev) { g_ctx.mcd->of_node = pdev->dev.of_node; mobicore_init(); return 0; } static const struct of_device_id of_match_table[] = { { .compatible = MC_DEVICE_PROPNAME }, { } }; static struct platform_driver mc_plat_driver = { .probe = mobicore_probe, .driver = { .name = "mcd", .owner = THIS_MODULE, .of_match_table = of_match_table, } }; static int mobicore_register(void) { return platform_driver_register(&mc_plat_driver); } static void mobicore_unregister(void) { platform_driver_unregister(&mc_plat_driver); mobicore_exit(); } module_init(mobicore_register); module_exit(mobicore_unregister); #else /* MC_DEVICE_PROPNAME */ module_init(mobicore_init); module_exit(mobicore_exit); #endif /* !MC_DEVICE_PROPNAME */ MODULE_AUTHOR("Trustonic Limited"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MobiCore driver");
gpl-2.0
portante/qemu
hw/exynos4210_pwm.c
61
12201
/* * Samsung exynos4210 Pulse Width Modulation Timer * * Copyright (c) 2000 - 2011 Samsung Electronics Co., Ltd. * All rights reserved. * * Evgeny Voevodin <e.voevodin@samsung.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, see <http://www.gnu.org/licenses/>. */ #include "sysbus.h" #include "qemu-timer.h" #include "qemu-common.h" #include "ptimer.h" #include "exynos4210.h" //#define DEBUG_PWM #ifdef DEBUG_PWM #define DPRINTF(fmt, ...) \ do { fprintf(stdout, "PWM: [%24s:%5d] " fmt, __func__, __LINE__, \ ## __VA_ARGS__); } while (0) #else #define DPRINTF(fmt, ...) do {} while (0) #endif #define EXYNOS4210_PWM_TIMERS_NUM 5 #define EXYNOS4210_PWM_REG_MEM_SIZE 0x50 #define TCFG0 0x0000 #define TCFG1 0x0004 #define TCON 0x0008 #define TCNTB0 0x000C #define TCMPB0 0x0010 #define TCNTO0 0x0014 #define TCNTB1 0x0018 #define TCMPB1 0x001C #define TCNTO1 0x0020 #define TCNTB2 0x0024 #define TCMPB2 0x0028 #define TCNTO2 0x002C #define TCNTB3 0x0030 #define TCMPB3 0x0034 #define TCNTO3 0x0038 #define TCNTB4 0x003C #define TCNTO4 0x0040 #define TINT_CSTAT 0x0044 #define TCNTB(x) (0xC * (x)) #define TCMPB(x) (0xC * (x) + 1) #define TCNTO(x) (0xC * (x) + 2) #define GET_PRESCALER(reg, x) (((reg) & (0xFF << (8 * (x)))) >> 8 * (x)) #define GET_DIVIDER(reg, x) (1 << (((reg) & (0xF << (4 * (x)))) >> (4 * (x)))) /* * Attention! Timer4 doesn't have OUTPUT_INVERTER, * so Auto Reload bit is not accessible by macros! */ #define TCON_TIMER_BASE(x) (((x) ? 1 : 0) * 4 + 4 * (x)) #define TCON_TIMER_START(x) (1 << (TCON_TIMER_BASE(x) + 0)) #define TCON_TIMER_MANUAL_UPD(x) (1 << (TCON_TIMER_BASE(x) + 1)) #define TCON_TIMER_OUTPUT_INV(x) (1 << (TCON_TIMER_BASE(x) + 2)) #define TCON_TIMER_AUTO_RELOAD(x) (1 << (TCON_TIMER_BASE(x) + 3)) #define TCON_TIMER4_AUTO_RELOAD (1 << 22) #define TINT_CSTAT_STATUS(x) (1 << (5 + (x))) #define TINT_CSTAT_ENABLE(x) (1 << (x)) /* timer struct */ typedef struct { uint32_t id; /* timer id */ qemu_irq irq; /* local timer irq */ uint32_t freq; /* timer frequency */ /* use ptimer.c to represent count down timer */ ptimer_state *ptimer; /* timer */ /* registers */ uint32_t reg_tcntb; /* counter register buffer */ uint32_t reg_tcmpb; /* compare register buffer */ struct Exynos4210PWMState *parent; } Exynos4210PWM; typedef struct Exynos4210PWMState { SysBusDevice busdev; MemoryRegion iomem; uint32_t reg_tcfg[2]; uint32_t reg_tcon; uint32_t reg_tint_cstat; Exynos4210PWM timer[EXYNOS4210_PWM_TIMERS_NUM]; } Exynos4210PWMState; /*** VMState ***/ static const VMStateDescription vmstate_exynos4210_pwm = { .name = "exynos4210.pwm.pwm", .version_id = 1, .minimum_version_id = 1, .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_UINT32(id, Exynos4210PWM), VMSTATE_UINT32(freq, Exynos4210PWM), VMSTATE_PTIMER(ptimer, Exynos4210PWM), VMSTATE_UINT32(reg_tcntb, Exynos4210PWM), VMSTATE_UINT32(reg_tcmpb, Exynos4210PWM), VMSTATE_END_OF_LIST() } }; static const VMStateDescription vmstate_exynos4210_pwm_state = { .name = "exynos4210.pwm", .version_id = 1, .minimum_version_id = 1, .minimum_version_id_old = 1, .fields = (VMStateField[]) { VMSTATE_UINT32_ARRAY(reg_tcfg, Exynos4210PWMState, 2), VMSTATE_UINT32(reg_tcon, Exynos4210PWMState), VMSTATE_UINT32(reg_tint_cstat, Exynos4210PWMState), VMSTATE_STRUCT_ARRAY(timer, Exynos4210PWMState, EXYNOS4210_PWM_TIMERS_NUM, 0, vmstate_exynos4210_pwm, Exynos4210PWM), VMSTATE_END_OF_LIST() } }; /* * PWM update frequency */ static void exynos4210_pwm_update_freq(Exynos4210PWMState *s, uint32_t id) { uint32_t freq; freq = s->timer[id].freq; if (id > 1) { s->timer[id].freq = 24000000 / ((GET_PRESCALER(s->reg_tcfg[0], 1) + 1) * (GET_DIVIDER(s->reg_tcfg[1], id))); } else { s->timer[id].freq = 24000000 / ((GET_PRESCALER(s->reg_tcfg[0], 0) + 1) * (GET_DIVIDER(s->reg_tcfg[1], id))); } if (freq != s->timer[id].freq) { ptimer_set_freq(s->timer[id].ptimer, s->timer[id].freq); DPRINTF("freq=%dHz\n", s->timer[id].freq); } } /* * Counter tick handler */ static void exynos4210_pwm_tick(void *opaque) { Exynos4210PWM *s = (Exynos4210PWM *)opaque; Exynos4210PWMState *p = (Exynos4210PWMState *)s->parent; uint32_t id = s->id; bool cmp; DPRINTF("timer %d tick\n", id); /* set irq status */ p->reg_tint_cstat |= TINT_CSTAT_STATUS(id); /* raise IRQ */ if (p->reg_tint_cstat & TINT_CSTAT_ENABLE(id)) { DPRINTF("timer %d IRQ\n", id); qemu_irq_raise(p->timer[id].irq); } /* reload timer */ if (id != 4) { cmp = p->reg_tcon & TCON_TIMER_AUTO_RELOAD(id); } else { cmp = p->reg_tcon & TCON_TIMER4_AUTO_RELOAD; } if (cmp) { DPRINTF("auto reload timer %d count to %x\n", id, p->timer[id].reg_tcntb); ptimer_set_count(p->timer[id].ptimer, p->timer[id].reg_tcntb); ptimer_run(p->timer[id].ptimer, 1); } else { /* stop timer, set status to STOP, see Basic Timer Operation */ p->reg_tcon = ~TCON_TIMER_START(id); ptimer_stop(p->timer[id].ptimer); } } /* * PWM Read */ static uint64_t exynos4210_pwm_read(void *opaque, target_phys_addr_t offset, unsigned size) { Exynos4210PWMState *s = (Exynos4210PWMState *)opaque; uint32_t value = 0; int index; switch (offset) { case TCFG0: case TCFG1: index = (offset - TCFG0) >> 2; value = s->reg_tcfg[index]; break; case TCON: value = s->reg_tcon; break; case TCNTB0: case TCNTB1: case TCNTB2: case TCNTB3: case TCNTB4: index = (offset - TCNTB0) / 0xC; value = s->timer[index].reg_tcntb; break; case TCMPB0: case TCMPB1: case TCMPB2: case TCMPB3: index = (offset - TCMPB0) / 0xC; value = s->timer[index].reg_tcmpb; break; case TCNTO0: case TCNTO1: case TCNTO2: case TCNTO3: case TCNTO4: index = (offset == TCNTO4) ? 4 : (offset - TCNTO0) / 0xC; value = ptimer_get_count(s->timer[index].ptimer); break; case TINT_CSTAT: value = s->reg_tint_cstat; break; default: fprintf(stderr, "[exynos4210.pwm: bad read offset " TARGET_FMT_plx "]\n", offset); break; } return value; } /* * PWM Write */ static void exynos4210_pwm_write(void *opaque, target_phys_addr_t offset, uint64_t value, unsigned size) { Exynos4210PWMState *s = (Exynos4210PWMState *)opaque; int index; uint32_t new_val; int i; switch (offset) { case TCFG0: case TCFG1: index = (offset - TCFG0) >> 2; s->reg_tcfg[index] = value; /* update timers frequencies */ for (i = 0; i < EXYNOS4210_PWM_TIMERS_NUM; i++) { exynos4210_pwm_update_freq(s, s->timer[i].id); } break; case TCON: for (i = 0; i < EXYNOS4210_PWM_TIMERS_NUM; i++) { if ((value & TCON_TIMER_MANUAL_UPD(i)) > (s->reg_tcon & TCON_TIMER_MANUAL_UPD(i))) { /* * TCNTB and TCMPB are loaded into TCNT and TCMP. * Update timers. */ /* this will start timer to run, this ok, because * during processing start bit timer will be stopped * if needed */ ptimer_set_count(s->timer[i].ptimer, s->timer[i].reg_tcntb); DPRINTF("set timer %d count to %x\n", i, s->timer[i].reg_tcntb); } if ((value & TCON_TIMER_START(i)) > (s->reg_tcon & TCON_TIMER_START(i))) { /* changed to start */ ptimer_run(s->timer[i].ptimer, 1); DPRINTF("run timer %d\n", i); } if ((value & TCON_TIMER_START(i)) < (s->reg_tcon & TCON_TIMER_START(i))) { /* changed to stop */ ptimer_stop(s->timer[i].ptimer); DPRINTF("stop timer %d\n", i); } } s->reg_tcon = value; break; case TCNTB0: case TCNTB1: case TCNTB2: case TCNTB3: case TCNTB4: index = (offset - TCNTB0) / 0xC; s->timer[index].reg_tcntb = value; break; case TCMPB0: case TCMPB1: case TCMPB2: case TCMPB3: index = (offset - TCMPB0) / 0xC; s->timer[index].reg_tcmpb = value; break; case TINT_CSTAT: new_val = (s->reg_tint_cstat & 0x3E0) + (0x1F & value); new_val &= ~(0x3E0 & value); for (i = 0; i < EXYNOS4210_PWM_TIMERS_NUM; i++) { if ((new_val & TINT_CSTAT_STATUS(i)) < (s->reg_tint_cstat & TINT_CSTAT_STATUS(i))) { qemu_irq_lower(s->timer[i].irq); } } s->reg_tint_cstat = new_val; break; default: fprintf(stderr, "[exynos4210.pwm: bad write offset " TARGET_FMT_plx "]\n", offset); break; } } /* * Set default values to timer fields and registers */ static void exynos4210_pwm_reset(DeviceState *d) { Exynos4210PWMState *s = (Exynos4210PWMState *)d; int i; s->reg_tcfg[0] = 0x0101; s->reg_tcfg[1] = 0x0; s->reg_tcon = 0; s->reg_tint_cstat = 0; for (i = 0; i < EXYNOS4210_PWM_TIMERS_NUM; i++) { s->timer[i].reg_tcmpb = 0; s->timer[i].reg_tcntb = 0; exynos4210_pwm_update_freq(s, s->timer[i].id); ptimer_stop(s->timer[i].ptimer); } } static const MemoryRegionOps exynos4210_pwm_ops = { .read = exynos4210_pwm_read, .write = exynos4210_pwm_write, .endianness = DEVICE_NATIVE_ENDIAN, }; /* * PWM timer initialization */ static int exynos4210_pwm_init(SysBusDevice *dev) { Exynos4210PWMState *s = FROM_SYSBUS(Exynos4210PWMState, dev); int i; QEMUBH *bh; for (i = 0; i < EXYNOS4210_PWM_TIMERS_NUM; i++) { bh = qemu_bh_new(exynos4210_pwm_tick, &s->timer[i]); sysbus_init_irq(dev, &s->timer[i].irq); s->timer[i].ptimer = ptimer_init(bh); s->timer[i].id = i; s->timer[i].parent = s; } memory_region_init_io(&s->iomem, &exynos4210_pwm_ops, s, "exynos4210-pwm", EXYNOS4210_PWM_REG_MEM_SIZE); sysbus_init_mmio(dev, &s->iomem); return 0; } static void exynos4210_pwm_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); k->init = exynos4210_pwm_init; dc->reset = exynos4210_pwm_reset; dc->vmsd = &vmstate_exynos4210_pwm_state; } static TypeInfo exynos4210_pwm_info = { .name = "exynos4210.pwm", .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(Exynos4210PWMState), .class_init = exynos4210_pwm_class_init, }; static void exynos4210_pwm_register_types(void) { type_register_static(&exynos4210_pwm_info); } type_init(exynos4210_pwm_register_types)
gpl-2.0
jstotero/optimus_chic_kernel
arch/ia64/kernel/efi.c
573
37423
/* * Extensible Firmware Interface * * Based on Extensible Firmware Interface Specification version 0.9 * April 30, 1999 * * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999-2003 Hewlett-Packard Co. * David Mosberger-Tang <davidm@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com> * (c) Copyright 2006 Hewlett-Packard Development Company, L.P. * Bjorn Helgaas <bjorn.helgaas@hp.com> * * All EFI Runtime Services are not implemented yet as EFI only * supports physical mode addressing on SoftSDV. This is to be fixed * in a future version. --drummond 1999-07-20 * * Implemented EFI runtime services and virtual mode calls. --davidm * * Goutham Rao: <goutham.rao@intel.com> * Skip non-WB memory and ignore empty memory ranges. */ #include <linux/module.h> #include <linux/bootmem.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/time.h> #include <linux/efi.h> #include <linux/kexec.h> #include <linux/mm.h> #include <asm/io.h> #include <asm/kregs.h> #include <asm/meminit.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/mca.h> #include <asm/tlbflush.h> #define EFI_DEBUG 0 extern efi_status_t efi_call_phys (void *, ...); struct efi efi; EXPORT_SYMBOL(efi); static efi_runtime_services_t *runtime; static u64 mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL; #define efi_call_virt(f, args...) (*(f))(args) #define STUB_GET_TIME(prefix, adjust_arg) \ static efi_status_t \ prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \ { \ struct ia64_fpreg fr[6]; \ efi_time_cap_t *atc = NULL; \ efi_status_t ret; \ \ if (tc) \ atc = adjust_arg(tc); \ ia64_save_scratch_fpregs(fr); \ ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), \ adjust_arg(tm), atc); \ ia64_load_scratch_fpregs(fr); \ return ret; \ } #define STUB_SET_TIME(prefix, adjust_arg) \ static efi_status_t \ prefix##_set_time (efi_time_t *tm) \ { \ struct ia64_fpreg fr[6]; \ efi_status_t ret; \ \ ia64_save_scratch_fpregs(fr); \ ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), \ adjust_arg(tm)); \ ia64_load_scratch_fpregs(fr); \ return ret; \ } #define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \ static efi_status_t \ prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, \ efi_time_t *tm) \ { \ struct ia64_fpreg fr[6]; \ efi_status_t ret; \ \ ia64_save_scratch_fpregs(fr); \ ret = efi_call_##prefix( \ (efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \ adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \ ia64_load_scratch_fpregs(fr); \ return ret; \ } #define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \ static efi_status_t \ prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \ { \ struct ia64_fpreg fr[6]; \ efi_time_t *atm = NULL; \ efi_status_t ret; \ \ if (tm) \ atm = adjust_arg(tm); \ ia64_save_scratch_fpregs(fr); \ ret = efi_call_##prefix( \ (efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \ enabled, atm); \ ia64_load_scratch_fpregs(fr); \ return ret; \ } #define STUB_GET_VARIABLE(prefix, adjust_arg) \ static efi_status_t \ prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \ unsigned long *data_size, void *data) \ { \ struct ia64_fpreg fr[6]; \ u32 *aattr = NULL; \ efi_status_t ret; \ \ if (attr) \ aattr = adjust_arg(attr); \ ia64_save_scratch_fpregs(fr); \ ret = efi_call_##prefix( \ (efi_get_variable_t *) __va(runtime->get_variable), \ adjust_arg(name), adjust_arg(vendor), aattr, \ adjust_arg(data_size), adjust_arg(data)); \ ia64_load_scratch_fpregs(fr); \ return ret; \ } #define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \ static efi_status_t \ prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, \ efi_guid_t *vendor) \ { \ struct ia64_fpreg fr[6]; \ efi_status_t ret; \ \ ia64_save_scratch_fpregs(fr); \ ret = efi_call_##prefix( \ (efi_get_next_variable_t *) __va(runtime->get_next_variable), \ adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \ ia64_load_scratch_fpregs(fr); \ return ret; \ } #define STUB_SET_VARIABLE(prefix, adjust_arg) \ static efi_status_t \ prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, \ unsigned long attr, unsigned long data_size, \ void *data) \ { \ struct ia64_fpreg fr[6]; \ efi_status_t ret; \ \ ia64_save_scratch_fpregs(fr); \ ret = efi_call_##prefix( \ (efi_set_variable_t *) __va(runtime->set_variable), \ adjust_arg(name), adjust_arg(vendor), attr, data_size, \ adjust_arg(data)); \ ia64_load_scratch_fpregs(fr); \ return ret; \ } #define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \ static efi_status_t \ prefix##_get_next_high_mono_count (u32 *count) \ { \ struct ia64_fpreg fr[6]; \ efi_status_t ret; \ \ ia64_save_scratch_fpregs(fr); \ ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \ __va(runtime->get_next_high_mono_count), \ adjust_arg(count)); \ ia64_load_scratch_fpregs(fr); \ return ret; \ } #define STUB_RESET_SYSTEM(prefix, adjust_arg) \ static void \ prefix##_reset_system (int reset_type, efi_status_t status, \ unsigned long data_size, efi_char16_t *data) \ { \ struct ia64_fpreg fr[6]; \ efi_char16_t *adata = NULL; \ \ if (data) \ adata = adjust_arg(data); \ \ ia64_save_scratch_fpregs(fr); \ efi_call_##prefix( \ (efi_reset_system_t *) __va(runtime->reset_system), \ reset_type, status, data_size, adata); \ /* should not return, but just in case... */ \ ia64_load_scratch_fpregs(fr); \ } #define phys_ptr(arg) ((__typeof__(arg)) ia64_tpa(arg)) STUB_GET_TIME(phys, phys_ptr) STUB_SET_TIME(phys, phys_ptr) STUB_GET_WAKEUP_TIME(phys, phys_ptr) STUB_SET_WAKEUP_TIME(phys, phys_ptr) STUB_GET_VARIABLE(phys, phys_ptr) STUB_GET_NEXT_VARIABLE(phys, phys_ptr) STUB_SET_VARIABLE(phys, phys_ptr) STUB_GET_NEXT_HIGH_MONO_COUNT(phys, phys_ptr) STUB_RESET_SYSTEM(phys, phys_ptr) #define id(arg) arg STUB_GET_TIME(virt, id) STUB_SET_TIME(virt, id) STUB_GET_WAKEUP_TIME(virt, id) STUB_SET_WAKEUP_TIME(virt, id) STUB_GET_VARIABLE(virt, id) STUB_GET_NEXT_VARIABLE(virt, id) STUB_SET_VARIABLE(virt, id) STUB_GET_NEXT_HIGH_MONO_COUNT(virt, id) STUB_RESET_SYSTEM(virt, id) void efi_gettimeofday (struct timespec *ts) { efi_time_t tm; if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS) { memset(ts, 0, sizeof(*ts)); return; } ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second); ts->tv_nsec = tm.nanosecond; } static int is_memory_available (efi_memory_desc_t *md) { if (!(md->attribute & EFI_MEMORY_WB)) return 0; switch (md->type) { case EFI_LOADER_CODE: case EFI_LOADER_DATA: case EFI_BOOT_SERVICES_CODE: case EFI_BOOT_SERVICES_DATA: case EFI_CONVENTIONAL_MEMORY: return 1; } return 0; } typedef struct kern_memdesc { u64 attribute; u64 start; u64 num_pages; } kern_memdesc_t; static kern_memdesc_t *kern_memmap; #define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT) static inline u64 kmd_end(kern_memdesc_t *kmd) { return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT)); } static inline u64 efi_md_end(efi_memory_desc_t *md) { return (md->phys_addr + efi_md_size(md)); } static inline int efi_wb(efi_memory_desc_t *md) { return (md->attribute & EFI_MEMORY_WB); } static inline int efi_uc(efi_memory_desc_t *md) { return (md->attribute & EFI_MEMORY_UC); } static void walk (efi_freemem_callback_t callback, void *arg, u64 attr) { kern_memdesc_t *k; u64 start, end, voff; voff = (attr == EFI_MEMORY_WB) ? PAGE_OFFSET : __IA64_UNCACHED_OFFSET; for (k = kern_memmap; k->start != ~0UL; k++) { if (k->attribute != attr) continue; start = PAGE_ALIGN(k->start); end = (k->start + (k->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK; if (start < end) if ((*callback)(start + voff, end + voff, arg) < 0) return; } } /* * Walk the EFI memory map and call CALLBACK once for each EFI memory * descriptor that has memory that is available for OS use. */ void efi_memmap_walk (efi_freemem_callback_t callback, void *arg) { walk(callback, arg, EFI_MEMORY_WB); } /* * Walk the EFI memory map and call CALLBACK once for each EFI memory * descriptor that has memory that is available for uncached allocator. */ void efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg) { walk(callback, arg, EFI_MEMORY_UC); } /* * Look for the PAL_CODE region reported by EFI and map it using an * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor * Abstraction Layer chapter 11 in ADAG */ void * efi_get_pal_addr (void) { void *efi_map_start, *efi_map_end, *p; efi_memory_desc_t *md; u64 efi_desc_size; int pal_code_count = 0; u64 vaddr, mask; efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_desc_size = ia64_boot_param->efi_memdesc_size; for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { md = p; if (md->type != EFI_PAL_CODE) continue; if (++pal_code_count > 1) { printk(KERN_ERR "Too many EFI Pal Code memory ranges, " "dropped @ %llx\n", md->phys_addr); continue; } /* * The only ITLB entry in region 7 that is used is the one * installed by __start(). That entry covers a 64MB range. */ mask = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1); vaddr = PAGE_OFFSET + md->phys_addr; /* * We must check that the PAL mapping won't overlap with the * kernel mapping. * * PAL code is guaranteed to be aligned on a power of 2 between * 4k and 256KB and that only one ITR is needed to map it. This * implies that the PAL code is always aligned on its size, * i.e., the closest matching page size supported by the TLB. * Therefore PAL code is guaranteed never to cross a 64MB unless * it is bigger than 64MB (very unlikely!). So for now the * following test is enough to determine whether or not we need * a dedicated ITR for the PAL code. */ if ((vaddr & mask) == (KERNEL_START & mask)) { printk(KERN_INFO "%s: no need to install ITR for PAL code\n", __func__); continue; } if (efi_md_size(md) > IA64_GRANULE_SIZE) panic("Whoa! PAL code size bigger than a granule!"); #if EFI_DEBUG mask = ~((1 << IA64_GRANULE_SHIFT) - 1); printk(KERN_INFO "CPU %d: mapping PAL code " "[0x%lx-0x%lx) into [0x%lx-0x%lx)\n", smp_processor_id(), md->phys_addr, md->phys_addr + efi_md_size(md), vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE); #endif return __va(md->phys_addr); } printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n", __func__); return NULL; } static u8 __init palo_checksum(u8 *buffer, u32 length) { u8 sum = 0; u8 *end = buffer + length; while (buffer < end) sum = (u8) (sum + *(buffer++)); return sum; } /* * Parse and handle PALO table which is published at: * http://www.dig64.org/home/DIG64_PALO_R1_0.pdf */ static void __init handle_palo(unsigned long palo_phys) { struct palo_table *palo = __va(palo_phys); u8 checksum; if (strncmp(palo->signature, PALO_SIG, sizeof(PALO_SIG) - 1)) { printk(KERN_INFO "PALO signature incorrect.\n"); return; } checksum = palo_checksum((u8 *)palo, palo->length); if (checksum) { printk(KERN_INFO "PALO checksum incorrect.\n"); return; } setup_ptcg_sem(palo->max_tlb_purges, NPTCG_FROM_PALO); } void efi_map_pal_code (void) { void *pal_vaddr = efi_get_pal_addr (); u64 psr; if (!pal_vaddr) return; /* * Cannot write to CRx with PSR.ic=1 */ psr = ia64_clear_ic(); ia64_itr(0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long) pal_vaddr), pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)), IA64_GRANULE_SHIFT); paravirt_dv_serialize_data(); ia64_set_psr(psr); /* restore psr */ } void __init efi_init (void) { void *efi_map_start, *efi_map_end; efi_config_table_t *config_tables; efi_char16_t *c16; u64 efi_desc_size; char *cp, vendor[100] = "unknown"; int i; unsigned long palo_phys; /* * It's too early to be able to use the standard kernel command line * support... */ for (cp = boot_command_line; *cp; ) { if (memcmp(cp, "mem=", 4) == 0) { mem_limit = memparse(cp + 4, &cp); } else if (memcmp(cp, "max_addr=", 9) == 0) { max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp)); } else if (memcmp(cp, "min_addr=", 9) == 0) { min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp)); } else { while (*cp != ' ' && *cp) ++cp; while (*cp == ' ') ++cp; } } if (min_addr != 0UL) printk(KERN_INFO "Ignoring memory below %lluMB\n", min_addr >> 20); if (max_addr != ~0UL) printk(KERN_INFO "Ignoring memory above %lluMB\n", max_addr >> 20); efi.systab = __va(ia64_boot_param->efi_systab); /* * Verify the EFI Table */ if (efi.systab == NULL) panic("Whoa! Can't find EFI system table.\n"); if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) panic("Whoa! EFI system table signature incorrect\n"); if ((efi.systab->hdr.revision >> 16) == 0) printk(KERN_WARNING "Warning: EFI system table version " "%d.%02d, expected 1.00 or greater\n", efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff); config_tables = __va(efi.systab->tables); /* Show what we know for posterity */ c16 = __va(efi.systab->fw_vendor); if (c16) { for (i = 0;i < (int) sizeof(vendor) - 1 && *c16; ++i) vendor[i] = *c16++; vendor[i] = '\0'; } printk(KERN_INFO "EFI v%u.%.02u by %s:", efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor); efi.mps = EFI_INVALID_TABLE_ADDR; efi.acpi = EFI_INVALID_TABLE_ADDR; efi.acpi20 = EFI_INVALID_TABLE_ADDR; efi.smbios = EFI_INVALID_TABLE_ADDR; efi.sal_systab = EFI_INVALID_TABLE_ADDR; efi.boot_info = EFI_INVALID_TABLE_ADDR; efi.hcdp = EFI_INVALID_TABLE_ADDR; efi.uga = EFI_INVALID_TABLE_ADDR; palo_phys = EFI_INVALID_TABLE_ADDR; for (i = 0; i < (int) efi.systab->nr_tables; i++) { if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) { efi.mps = config_tables[i].table; printk(" MPS=0x%lx", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) { efi.acpi20 = config_tables[i].table; printk(" ACPI 2.0=0x%lx", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) { efi.acpi = config_tables[i].table; printk(" ACPI=0x%lx", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) { efi.smbios = config_tables[i].table; printk(" SMBIOS=0x%lx", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) { efi.sal_systab = config_tables[i].table; printk(" SALsystab=0x%lx", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) { efi.hcdp = config_tables[i].table; printk(" HCDP=0x%lx", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID) == 0) { palo_phys = config_tables[i].table; printk(" PALO=0x%lx", config_tables[i].table); } } printk("\n"); if (palo_phys != EFI_INVALID_TABLE_ADDR) handle_palo(palo_phys); runtime = __va(efi.systab->runtime); efi.get_time = phys_get_time; efi.set_time = phys_set_time; efi.get_wakeup_time = phys_get_wakeup_time; efi.set_wakeup_time = phys_set_wakeup_time; efi.get_variable = phys_get_variable; efi.get_next_variable = phys_get_next_variable; efi.set_variable = phys_set_variable; efi.get_next_high_mono_count = phys_get_next_high_mono_count; efi.reset_system = phys_reset_system; efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_desc_size = ia64_boot_param->efi_memdesc_size; #if EFI_DEBUG /* print EFI memory map: */ { efi_memory_desc_t *md; void *p; for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += efi_desc_size) { const char *unit; unsigned long size; md = p; size = md->num_pages << EFI_PAGE_SHIFT; if ((size >> 40) > 0) { size >>= 40; unit = "TB"; } else if ((size >> 30) > 0) { size >>= 30; unit = "GB"; } else if ((size >> 20) > 0) { size >>= 20; unit = "MB"; } else { size >>= 10; unit = "KB"; } printk("mem%02d: type=%2u, attr=0x%016lx, " "range=[0x%016lx-0x%016lx) (%4lu%s)\n", i, md->type, md->attribute, md->phys_addr, md->phys_addr + efi_md_size(md), size, unit); } } #endif efi_map_pal_code(); efi_enter_virtual_mode(); } void efi_enter_virtual_mode (void) { void *efi_map_start, *efi_map_end, *p; efi_memory_desc_t *md; efi_status_t status; u64 efi_desc_size; efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_desc_size = ia64_boot_param->efi_memdesc_size; for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { md = p; if (md->attribute & EFI_MEMORY_RUNTIME) { /* * Some descriptors have multiple bits set, so the * order of the tests is relevant. */ if (md->attribute & EFI_MEMORY_WB) { md->virt_addr = (u64) __va(md->phys_addr); } else if (md->attribute & EFI_MEMORY_UC) { md->virt_addr = (u64) ioremap(md->phys_addr, 0); } else if (md->attribute & EFI_MEMORY_WC) { #if 0 md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WC | _PAGE_PL_0 | _PAGE_AR_RW)); #else printk(KERN_INFO "EFI_MEMORY_WC mapping\n"); md->virt_addr = (u64) ioremap(md->phys_addr, 0); #endif } else if (md->attribute & EFI_MEMORY_WT) { #if 0 md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WT | _PAGE_PL_0 | _PAGE_AR_RW)); #else printk(KERN_INFO "EFI_MEMORY_WT mapping\n"); md->virt_addr = (u64) ioremap(md->phys_addr, 0); #endif } } } status = efi_call_phys(__va(runtime->set_virtual_address_map), ia64_boot_param->efi_memmap_size, efi_desc_size, ia64_boot_param->efi_memdesc_version, ia64_boot_param->efi_memmap); if (status != EFI_SUCCESS) { printk(KERN_WARNING "warning: unable to switch EFI into " "virtual mode (status=%lu)\n", status); return; } /* * Now that EFI is in virtual mode, we call the EFI functions more * efficiently: */ efi.get_time = virt_get_time; efi.set_time = virt_set_time; efi.get_wakeup_time = virt_get_wakeup_time; efi.set_wakeup_time = virt_set_wakeup_time; efi.get_variable = virt_get_variable; efi.get_next_variable = virt_get_next_variable; efi.set_variable = virt_set_variable; efi.get_next_high_mono_count = virt_get_next_high_mono_count; efi.reset_system = virt_reset_system; } /* * Walk the EFI memory map looking for the I/O port range. There can only be * one entry of this type, other I/O port ranges should be described via ACPI. */ u64 efi_get_iobase (void) { void *efi_map_start, *efi_map_end, *p; efi_memory_desc_t *md; u64 efi_desc_size; efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_desc_size = ia64_boot_param->efi_memdesc_size; for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { md = p; if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) { if (md->attribute & EFI_MEMORY_UC) return md->phys_addr; } } return 0; } static struct kern_memdesc * kern_memory_descriptor (unsigned long phys_addr) { struct kern_memdesc *md; for (md = kern_memmap; md->start != ~0UL; md++) { if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT)) return md; } return NULL; } static efi_memory_desc_t * efi_memory_descriptor (unsigned long phys_addr) { void *efi_map_start, *efi_map_end, *p; efi_memory_desc_t *md; u64 efi_desc_size; efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_desc_size = ia64_boot_param->efi_memdesc_size; for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { md = p; if (phys_addr - md->phys_addr < efi_md_size(md)) return md; } return NULL; } static int efi_memmap_intersects (unsigned long phys_addr, unsigned long size) { void *efi_map_start, *efi_map_end, *p; efi_memory_desc_t *md; u64 efi_desc_size; unsigned long end; efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_desc_size = ia64_boot_param->efi_memdesc_size; end = phys_addr + size; for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { md = p; if (md->phys_addr < end && efi_md_end(md) > phys_addr) return 1; } return 0; } u32 efi_mem_type (unsigned long phys_addr) { efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); if (md) return md->type; return 0; } u64 efi_mem_attributes (unsigned long phys_addr) { efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); if (md) return md->attribute; return 0; } EXPORT_SYMBOL(efi_mem_attributes); u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size) { unsigned long end = phys_addr + size; efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); u64 attr; if (!md) return 0; /* * EFI_MEMORY_RUNTIME is not a memory attribute; it just tells * the kernel that firmware needs this region mapped. */ attr = md->attribute & ~EFI_MEMORY_RUNTIME; do { unsigned long md_end = efi_md_end(md); if (end <= md_end) return attr; md = efi_memory_descriptor(md_end); if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr) return 0; } while (md); return 0; /* never reached */ } u64 kern_mem_attribute (unsigned long phys_addr, unsigned long size) { unsigned long end = phys_addr + size; struct kern_memdesc *md; u64 attr; /* * This is a hack for ioremap calls before we set up kern_memmap. * Maybe we should do efi_memmap_init() earlier instead. */ if (!kern_memmap) { attr = efi_mem_attribute(phys_addr, size); if (attr & EFI_MEMORY_WB) return EFI_MEMORY_WB; return 0; } md = kern_memory_descriptor(phys_addr); if (!md) return 0; attr = md->attribute; do { unsigned long md_end = kmd_end(md); if (end <= md_end) return attr; md = kern_memory_descriptor(md_end); if (!md || md->attribute != attr) return 0; } while (md); return 0; /* never reached */ } EXPORT_SYMBOL(kern_mem_attribute); int valid_phys_addr_range (unsigned long phys_addr, unsigned long size) { u64 attr; /* * /dev/mem reads and writes use copy_to_user(), which implicitly * uses a granule-sized kernel identity mapping. It's really * only safe to do this for regions in kern_memmap. For more * details, see Documentation/ia64/aliasing.txt. */ attr = kern_mem_attribute(phys_addr, size); if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC) return 1; return 0; } int valid_mmap_phys_addr_range (unsigned long pfn, unsigned long size) { unsigned long phys_addr = pfn << PAGE_SHIFT; u64 attr; attr = efi_mem_attribute(phys_addr, size); /* * /dev/mem mmap uses normal user pages, so we don't need the entire * granule, but the entire region we're mapping must support the same * attribute. */ if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC) return 1; /* * Intel firmware doesn't tell us about all the MMIO regions, so * in general we have to allow mmap requests. But if EFI *does* * tell us about anything inside this region, we should deny it. * The user can always map a smaller region to avoid the overlap. */ if (efi_memmap_intersects(phys_addr, size)) return 0; return 1; } pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) { unsigned long phys_addr = pfn << PAGE_SHIFT; u64 attr; /* * For /dev/mem mmap, we use user mappings, but if the region is * in kern_memmap (and hence may be covered by a kernel mapping), * we must use the same attribute as the kernel mapping. */ attr = kern_mem_attribute(phys_addr, size); if (attr & EFI_MEMORY_WB) return pgprot_cacheable(vma_prot); else if (attr & EFI_MEMORY_UC) return pgprot_noncached(vma_prot); /* * Some chipsets don't support UC access to memory. If * WB is supported, we prefer that. */ if (efi_mem_attribute(phys_addr, size) & EFI_MEMORY_WB) return pgprot_cacheable(vma_prot); return pgprot_noncached(vma_prot); } int __init efi_uart_console_only(void) { efi_status_t status; char *s, name[] = "ConOut"; efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID; efi_char16_t *utf16, name_utf16[32]; unsigned char data[1024]; unsigned long size = sizeof(data); struct efi_generic_dev_path *hdr, *end_addr; int uart = 0; /* Convert to UTF-16 */ utf16 = name_utf16; s = name; while (*s) *utf16++ = *s++ & 0x7f; *utf16 = 0; status = efi.get_variable(name_utf16, &guid, NULL, &size, data); if (status != EFI_SUCCESS) { printk(KERN_ERR "No EFI %s variable?\n", name); return 0; } hdr = (struct efi_generic_dev_path *) data; end_addr = (struct efi_generic_dev_path *) ((u8 *) data + size); while (hdr < end_addr) { if (hdr->type == EFI_DEV_MSG && hdr->sub_type == EFI_DEV_MSG_UART) uart = 1; else if (hdr->type == EFI_DEV_END_PATH || hdr->type == EFI_DEV_END_PATH2) { if (!uart) return 0; if (hdr->sub_type == EFI_DEV_END_ENTIRE) return 1; uart = 0; } hdr = (struct efi_generic_dev_path *)((u8 *) hdr + hdr->length); } printk(KERN_ERR "Malformed %s value\n", name); return 0; } /* * Look for the first granule aligned memory descriptor memory * that is big enough to hold EFI memory map. Make sure this * descriptor is atleast granule sized so it does not get trimmed */ struct kern_memdesc * find_memmap_space (void) { u64 contig_low=0, contig_high=0; u64 as = 0, ae; void *efi_map_start, *efi_map_end, *p, *q; efi_memory_desc_t *md, *pmd = NULL, *check_md; u64 space_needed, efi_desc_size; unsigned long total_mem = 0; efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_desc_size = ia64_boot_param->efi_memdesc_size; /* * Worst case: we need 3 kernel descriptors for each efi descriptor * (if every entry has a WB part in the middle, and UC head and tail), * plus one for the end marker. */ space_needed = sizeof(kern_memdesc_t) * (3 * (ia64_boot_param->efi_memmap_size/efi_desc_size) + 1); for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) { md = p; if (!efi_wb(md)) { continue; } if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) { contig_low = GRANULEROUNDUP(md->phys_addr); contig_high = efi_md_end(md); for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) { check_md = q; if (!efi_wb(check_md)) break; if (contig_high != check_md->phys_addr) break; contig_high = efi_md_end(check_md); } contig_high = GRANULEROUNDDOWN(contig_high); } if (!is_memory_available(md) || md->type == EFI_LOADER_DATA) continue; /* Round ends inward to granule boundaries */ as = max(contig_low, md->phys_addr); ae = min(contig_high, efi_md_end(md)); /* keep within max_addr= and min_addr= command line arg */ as = max(as, min_addr); ae = min(ae, max_addr); if (ae <= as) continue; /* avoid going over mem= command line arg */ if (total_mem + (ae - as) > mem_limit) ae -= total_mem + (ae - as) - mem_limit; if (ae <= as) continue; if (ae - as > space_needed) break; } if (p >= efi_map_end) panic("Can't allocate space for kernel memory descriptors"); return __va(as); } /* * Walk the EFI memory map and gather all memory available for kernel * to use. We can allocate partial granules only if the unavailable * parts exist, and are WB. */ unsigned long efi_memmap_init(u64 *s, u64 *e) { struct kern_memdesc *k, *prev = NULL; u64 contig_low=0, contig_high=0; u64 as, ae, lim; void *efi_map_start, *efi_map_end, *p, *q; efi_memory_desc_t *md, *pmd = NULL, *check_md; u64 efi_desc_size; unsigned long total_mem = 0; k = kern_memmap = find_memmap_space(); efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_desc_size = ia64_boot_param->efi_memdesc_size; for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) { md = p; if (!efi_wb(md)) { if (efi_uc(md) && (md->type == EFI_CONVENTIONAL_MEMORY || md->type == EFI_BOOT_SERVICES_DATA)) { k->attribute = EFI_MEMORY_UC; k->start = md->phys_addr; k->num_pages = md->num_pages; k++; } continue; } if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) { contig_low = GRANULEROUNDUP(md->phys_addr); contig_high = efi_md_end(md); for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) { check_md = q; if (!efi_wb(check_md)) break; if (contig_high != check_md->phys_addr) break; contig_high = efi_md_end(check_md); } contig_high = GRANULEROUNDDOWN(contig_high); } if (!is_memory_available(md)) continue; #ifdef CONFIG_CRASH_DUMP /* saved_max_pfn should ignore max_addr= command line arg */ if (saved_max_pfn < (efi_md_end(md) >> PAGE_SHIFT)) saved_max_pfn = (efi_md_end(md) >> PAGE_SHIFT); #endif /* * Round ends inward to granule boundaries * Give trimmings to uncached allocator */ if (md->phys_addr < contig_low) { lim = min(efi_md_end(md), contig_low); if (efi_uc(md)) { if (k > kern_memmap && (k-1)->attribute == EFI_MEMORY_UC && kmd_end(k-1) == md->phys_addr) { (k-1)->num_pages += (lim - md->phys_addr) >> EFI_PAGE_SHIFT; } else { k->attribute = EFI_MEMORY_UC; k->start = md->phys_addr; k->num_pages = (lim - md->phys_addr) >> EFI_PAGE_SHIFT; k++; } } as = contig_low; } else as = md->phys_addr; if (efi_md_end(md) > contig_high) { lim = max(md->phys_addr, contig_high); if (efi_uc(md)) { if (lim == md->phys_addr && k > kern_memmap && (k-1)->attribute == EFI_MEMORY_UC && kmd_end(k-1) == md->phys_addr) { (k-1)->num_pages += md->num_pages; } else { k->attribute = EFI_MEMORY_UC; k->start = lim; k->num_pages = (efi_md_end(md) - lim) >> EFI_PAGE_SHIFT; k++; } } ae = contig_high; } else ae = efi_md_end(md); /* keep within max_addr= and min_addr= command line arg */ as = max(as, min_addr); ae = min(ae, max_addr); if (ae <= as) continue; /* avoid going over mem= command line arg */ if (total_mem + (ae - as) > mem_limit) ae -= total_mem + (ae - as) - mem_limit; if (ae <= as) continue; if (prev && kmd_end(prev) == md->phys_addr) { prev->num_pages += (ae - as) >> EFI_PAGE_SHIFT; total_mem += ae - as; continue; } k->attribute = EFI_MEMORY_WB; k->start = as; k->num_pages = (ae - as) >> EFI_PAGE_SHIFT; total_mem += ae - as; prev = k++; } k->start = ~0L; /* end-marker */ /* reserve the memory we are using for kern_memmap */ *s = (u64)kern_memmap; *e = (u64)++k; return total_mem; } void efi_initialize_iomem_resources(struct resource *code_resource, struct resource *data_resource, struct resource *bss_resource) { struct resource *res; void *efi_map_start, *efi_map_end, *p; efi_memory_desc_t *md; u64 efi_desc_size; char *name; unsigned long flags; efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_desc_size = ia64_boot_param->efi_memdesc_size; res = NULL; for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { md = p; if (md->num_pages == 0) /* should not happen */ continue; flags = IORESOURCE_MEM | IORESOURCE_BUSY; switch (md->type) { case EFI_MEMORY_MAPPED_IO: case EFI_MEMORY_MAPPED_IO_PORT_SPACE: continue; case EFI_LOADER_CODE: case EFI_LOADER_DATA: case EFI_BOOT_SERVICES_DATA: case EFI_BOOT_SERVICES_CODE: case EFI_CONVENTIONAL_MEMORY: if (md->attribute & EFI_MEMORY_WP) { name = "System ROM"; flags |= IORESOURCE_READONLY; } else if (md->attribute == EFI_MEMORY_UC) name = "Uncached RAM"; else name = "System RAM"; break; case EFI_ACPI_MEMORY_NVS: name = "ACPI Non-volatile Storage"; break; case EFI_UNUSABLE_MEMORY: name = "reserved"; flags |= IORESOURCE_DISABLED; break; case EFI_RESERVED_TYPE: case EFI_RUNTIME_SERVICES_CODE: case EFI_RUNTIME_SERVICES_DATA: case EFI_ACPI_RECLAIM_MEMORY: default: name = "reserved"; break; } if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { printk(KERN_ERR "failed to allocate resource for iomem\n"); return; } res->name = name; res->start = md->phys_addr; res->end = md->phys_addr + efi_md_size(md) - 1; res->flags = flags; if (insert_resource(&iomem_resource, res) < 0) kfree(res); else { /* * We don't know which region contains * kernel data so we try it repeatedly and * let the resource manager test it. */ insert_resource(res, code_resource); insert_resource(res, data_resource); insert_resource(res, bss_resource); #ifdef CONFIG_KEXEC insert_resource(res, &efi_memmap_res); insert_resource(res, &boot_param_res); if (crashk_res.end > crashk_res.start) insert_resource(res, &crashk_res); #endif } } } #ifdef CONFIG_KEXEC /* find a block of memory aligned to 64M exclude reserved regions rsvd_regions are sorted */ unsigned long __init kdump_find_rsvd_region (unsigned long size, struct rsvd_region *r, int n) { int i; u64 start, end; u64 alignment = 1UL << _PAGE_SIZE_64M; void *efi_map_start, *efi_map_end, *p; efi_memory_desc_t *md; u64 efi_desc_size; efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_desc_size = ia64_boot_param->efi_memdesc_size; for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { md = p; if (!efi_wb(md)) continue; start = ALIGN(md->phys_addr, alignment); end = efi_md_end(md); for (i = 0; i < n; i++) { if (__pa(r[i].start) >= start && __pa(r[i].end) < end) { if (__pa(r[i].start) > start + size) return start; start = ALIGN(__pa(r[i].end), alignment); if (i < n-1 && __pa(r[i+1].start) < start + size) continue; else break; } } if (end > start + size) return start; } printk(KERN_WARNING "Cannot reserve 0x%lx byte of memory for crashdump\n", size); return ~0UL; } #endif #ifdef CONFIG_CRASH_DUMP /* locate the size find a the descriptor at a certain address */ unsigned long __init vmcore_find_descriptor_size (unsigned long address) { void *efi_map_start, *efi_map_end, *p; efi_memory_desc_t *md; u64 efi_desc_size; unsigned long ret = 0; efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_desc_size = ia64_boot_param->efi_memdesc_size; for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { md = p; if (efi_wb(md) && md->type == EFI_LOADER_DATA && md->phys_addr == address) { ret = efi_md_size(md); break; } } if (ret == 0) printk(KERN_WARNING "Cannot locate EFI vmcore descriptor\n"); return ret; } #endif
gpl-2.0
dvlemplgk/source
target/linux/mcs814x/files-3.18/drivers/net/phy/mcs814x.c
573
1677
/* * Driver for Moschip MCS814x internal PHY * * Copyright (c) 2012 Florian Fainelli <florian@openwrt.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/unistd.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/phy.h> MODULE_DESCRIPTION("Moschip MCS814x PHY driver"); MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); MODULE_LICENSE("GPL"); /* Nothing special about this PHY but its OUI (O) */ static struct phy_driver mcs8140_driver = { .phy_id = 0, .name = "Moschip MCS8140", .phy_id_mask = 0x02, .features = PHY_BASIC_FEATURES, .config_aneg = &genphy_config_aneg, .read_status = &genphy_read_status, .suspend = genphy_suspend, .resume = genphy_resume, .driver = { .owner = THIS_MODULE,}, }; static int __init mcs814x_phy_init(void) { return phy_driver_register(&mcs8140_driver); } static void __exit mcs814x_phy_exit(void) { phy_driver_unregister(&mcs8140_driver); } module_init(mcs814x_phy_init); module_exit(mcs814x_phy_exit); static struct mdio_device_id __maybe_unused mcs814x_phy_tbl[] = { { 0x0, 0x0ffffff0 }, { } }; MODULE_DEVICE_TABLE(mdio, mcs814x_phy_tbl);
gpl-2.0
deepsrd/android_kernel_nx507j
drivers/input/touchscreen/synaptics_i2c_rmi4.c
829
99299
/* * Synaptics RMI4 touchscreen driver * * Copyright (C) 2012 Synaptics Incorporated * * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com> * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com> * Copyright (c) 2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/input/synaptics_dsx.h> #include <linux/of_gpio.h> #include "synaptics_i2c_rmi4.h" #include <linux/input/mt.h> #define DRIVER_NAME "synaptics_rmi4_i2c" #define INPUT_PHYS_NAME "synaptics_rmi4_i2c/input0" #define DEBUGFS_DIR_NAME "ts_debug" #define RESET_DELAY 100 #define TYPE_B_PROTOCOL #define NO_0D_WHILE_2D /* #define REPORT_2D_Z */ #define REPORT_2D_W #define RPT_TYPE (1 << 0) #define RPT_X_LSB (1 << 1) #define RPT_X_MSB (1 << 2) #define RPT_Y_LSB (1 << 3) #define RPT_Y_MSB (1 << 4) #define RPT_Z (1 << 5) #define RPT_WX (1 << 6) #define RPT_WY (1 << 7) #define RPT_DEFAULT (RPT_TYPE | RPT_X_LSB | RPT_X_MSB | RPT_Y_LSB | RPT_Y_MSB) #define EXP_FN_DET_INTERVAL 1000 /* ms */ #define POLLING_PERIOD 1 /* ms */ #define SYN_I2C_RETRY_TIMES 10 #define MAX_ABS_MT_TOUCH_MAJOR 15 #define F01_STD_QUERY_LEN 21 #define F01_BUID_ID_OFFSET 18 #define F11_STD_QUERY_LEN 9 #define F11_STD_CTRL_LEN 10 #define F11_STD_DATA_LEN 12 #define NORMAL_OPERATION 0 #define SENSOR_SLEEP 1 #define NO_SLEEP_OFF 0 #define NO_SLEEP_ON 1 enum device_status { STATUS_NO_ERROR = 0x00, STATUS_RESET_OCCURED = 0x01, STATUS_INVALID_CONFIG = 0x02, STATUS_DEVICE_FAILURE = 0x03, STATUS_CONFIG_CRC_FAILURE = 0x04, STATUS_FIRMWARE_CRC_FAILURE = 0x05, STATUS_CRC_IN_PROGRESS = 0x06, STATUS_UNCONFIGURED = 0x80 }; #define DEVICE_CONFIGURED 0x1 #define RMI4_VTG_MIN_UV 2700000 #define RMI4_VTG_MAX_UV 3300000 #define RMI4_ACTIVE_LOAD_UA 15000 #define RMI4_LPM_LOAD_UA 10 #define RMI4_I2C_VTG_MIN_UV 1800000 #define RMI4_I2C_VTG_MAX_UV 1800000 #define RMI4_I2C_LOAD_UA 10000 #define RMI4_I2C_LPM_LOAD_UA 10 #define RMI4_GPIO_SLEEP_LOW_US 10000 #define F12_FINGERS_TO_SUPPORT 10 #define MAX_F11_TOUCH_WIDTH 15 #define RMI4_COORDS_ARR_SIZE 4 static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data, unsigned short addr, unsigned char *data, unsigned short length); static int synaptics_rmi4_i2c_write(struct synaptics_rmi4_data *rmi4_data, unsigned short addr, unsigned char *data, unsigned short length); static int synaptics_rmi4_reset_device(struct synaptics_rmi4_data *rmi4_data); static void synaptics_rmi4_sensor_wake(struct synaptics_rmi4_data *rmi4_data); static int synaptics_rmi4_check_configuration(struct synaptics_rmi4_data *rmi4_data); static int synaptics_rmi4_suspend(struct device *dev); static int synaptics_rmi4_resume(struct device *dev); #ifdef CONFIG_PM static ssize_t synaptics_rmi4_full_pm_cycle_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t synaptics_rmi4_full_pm_cycle_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); #if defined(CONFIG_FB) static int fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data); #elif defined(CONFIG_HAS_EARLYSUSPEND) static void synaptics_rmi4_early_suspend(struct early_suspend *h); static void synaptics_rmi4_late_resume(struct early_suspend *h); #endif #endif static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); static ssize_t synaptics_rmi4_f01_productinfo_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t synaptics_rmi4_f01_buildid_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t synaptics_rmi4_f01_flashprog_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t synaptics_rmi4_0dbutton_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t synaptics_rmi4_0dbutton_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); static ssize_t synaptics_rmi4_flipx_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t synaptics_rmi4_flipx_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); static ssize_t synaptics_rmi4_flipy_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t synaptics_rmi4_flipy_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); struct synaptics_rmi4_f01_device_status { union { struct { unsigned char status_code:4; unsigned char reserved:2; unsigned char flash_prog:1; unsigned char unconfigured:1; } __packed; unsigned char data[1]; }; }; struct synaptics_rmi4_f01_device_control_0 { union { struct { unsigned char sleep_mode:2; unsigned char nosleep:1; unsigned char reserved:2; unsigned char charger_input:1; unsigned char report_rate:1; unsigned char configured:1; } __packed; unsigned char data[1]; }; }; struct synaptics_rmi4_f12_query_5 { union { struct { unsigned char size_of_query6; struct { unsigned char ctrl0_is_present:1; unsigned char ctrl1_is_present:1; unsigned char ctrl2_is_present:1; unsigned char ctrl3_is_present:1; unsigned char ctrl4_is_present:1; unsigned char ctrl5_is_present:1; unsigned char ctrl6_is_present:1; unsigned char ctrl7_is_present:1; } __packed; struct { unsigned char ctrl8_is_present:1; unsigned char ctrl9_is_present:1; unsigned char ctrl10_is_present:1; unsigned char ctrl11_is_present:1; unsigned char ctrl12_is_present:1; unsigned char ctrl13_is_present:1; unsigned char ctrl14_is_present:1; unsigned char ctrl15_is_present:1; } __packed; struct { unsigned char ctrl16_is_present:1; unsigned char ctrl17_is_present:1; unsigned char ctrl18_is_present:1; unsigned char ctrl19_is_present:1; unsigned char ctrl20_is_present:1; unsigned char ctrl21_is_present:1; unsigned char ctrl22_is_present:1; unsigned char ctrl23_is_present:1; } __packed; struct { unsigned char ctrl24_is_present:1; unsigned char ctrl25_is_present:1; unsigned char ctrl26_is_present:1; unsigned char ctrl27_is_present:1; unsigned char ctrl28_is_present:1; unsigned char ctrl29_is_present:1; unsigned char ctrl30_is_present:1; unsigned char ctrl31_is_present:1; } __packed; }; unsigned char data[5]; }; }; struct synaptics_rmi4_f12_query_8 { union { struct { unsigned char size_of_query9; struct { unsigned char data0_is_present:1; unsigned char data1_is_present:1; unsigned char data2_is_present:1; unsigned char data3_is_present:1; unsigned char data4_is_present:1; unsigned char data5_is_present:1; unsigned char data6_is_present:1; unsigned char data7_is_present:1; } __packed; struct { unsigned char data8_is_present:1; unsigned char data9_is_present:1; unsigned char data10_is_present:1; unsigned char data11_is_present:1; unsigned char data12_is_present:1; unsigned char data13_is_present:1; unsigned char data14_is_present:1; unsigned char data15_is_present:1; } __packed; }; unsigned char data[3]; }; }; struct synaptics_rmi4_f12_ctrl_8 { union { struct { unsigned char max_x_coord_lsb; unsigned char max_x_coord_msb; unsigned char max_y_coord_lsb; unsigned char max_y_coord_msb; unsigned char rx_pitch_lsb; unsigned char rx_pitch_msb; unsigned char tx_pitch_lsb; unsigned char tx_pitch_msb; unsigned char low_rx_clip; unsigned char high_rx_clip; unsigned char low_tx_clip; unsigned char high_tx_clip; unsigned char num_of_rx; unsigned char num_of_tx; }; unsigned char data[14]; }; }; struct synaptics_rmi4_f12_ctrl_23 { union { struct { unsigned char obj_type_enable; unsigned char max_reported_objects; }; unsigned char data[2]; }; }; struct synaptics_rmi4_f12_finger_data { unsigned char object_type_and_status; unsigned char x_lsb; unsigned char x_msb; unsigned char y_lsb; unsigned char y_msb; #ifdef REPORT_2D_Z unsigned char z; #endif #ifdef REPORT_2D_W unsigned char wx; unsigned char wy; #endif }; struct synaptics_rmi4_f1a_query { union { struct { unsigned char max_button_count:3; unsigned char reserved:5; unsigned char has_general_control:1; unsigned char has_interrupt_enable:1; unsigned char has_multibutton_select:1; unsigned char has_tx_rx_map:1; unsigned char has_perbutton_threshold:1; unsigned char has_release_threshold:1; unsigned char has_strongestbtn_hysteresis:1; unsigned char has_filter_strength:1; } __packed; unsigned char data[2]; }; }; struct synaptics_rmi4_f1a_control_0 { union { struct { unsigned char multibutton_report:2; unsigned char filter_mode:2; unsigned char reserved:4; } __packed; unsigned char data[1]; }; }; struct synaptics_rmi4_f1a_control_3_4 { unsigned char transmitterbutton; unsigned char receiverbutton; }; struct synaptics_rmi4_f1a_control { struct synaptics_rmi4_f1a_control_0 general_control; unsigned char *button_int_enable; unsigned char *multi_button; struct synaptics_rmi4_f1a_control_3_4 *electrode_map; unsigned char *button_threshold; unsigned char button_release_threshold; unsigned char strongest_button_hysteresis; unsigned char filter_strength; }; struct synaptics_rmi4_f1a_handle { int button_bitmask_size; unsigned char button_count; unsigned char valid_button_count; unsigned char *button_data_buffer; unsigned char *button_map; struct synaptics_rmi4_f1a_query button_query; struct synaptics_rmi4_f1a_control button_control; }; struct synaptics_rmi4_f12_extra_data { unsigned char data1_offset; unsigned char data15_offset; unsigned char data15_size; unsigned char data15_data[(F12_FINGERS_TO_SUPPORT + 7) / 8]; }; struct synaptics_rmi4_exp_fn { enum exp_fn fn_type; bool inserted; int (*func_init)(struct synaptics_rmi4_data *rmi4_data); void (*func_remove)(struct synaptics_rmi4_data *rmi4_data); void (*func_attn)(struct synaptics_rmi4_data *rmi4_data, unsigned char intr_mask); struct list_head link; }; static struct device_attribute attrs[] = { #ifdef CONFIG_PM __ATTR(full_pm_cycle, (S_IRUGO | S_IWUSR | S_IWGRP), synaptics_rmi4_full_pm_cycle_show, synaptics_rmi4_full_pm_cycle_store), #endif __ATTR(reset, S_IWUSR | S_IWGRP, NULL, synaptics_rmi4_f01_reset_store), __ATTR(productinfo, S_IRUGO, synaptics_rmi4_f01_productinfo_show, synaptics_rmi4_store_error), __ATTR(buildid, S_IRUGO, synaptics_rmi4_f01_buildid_show, synaptics_rmi4_store_error), __ATTR(flashprog, S_IRUGO, synaptics_rmi4_f01_flashprog_show, synaptics_rmi4_store_error), __ATTR(0dbutton, (S_IRUGO | S_IWUSR | S_IWGRP), synaptics_rmi4_0dbutton_show, synaptics_rmi4_0dbutton_store), __ATTR(flipx, (S_IRUGO | S_IWUSR | S_IWGRP), synaptics_rmi4_flipx_show, synaptics_rmi4_flipx_store), __ATTR(flipy, (S_IRUGO | S_IWUSR | S_IWGRP), synaptics_rmi4_flipy_show, synaptics_rmi4_flipy_store), }; static bool exp_fn_inited; static struct mutex exp_fn_list_mutex; static struct list_head exp_fn_list; static int synaptics_rmi4_debug_suspend_set(void *_data, u64 val) { struct synaptics_rmi4_data *rmi4_data = _data; if (val) synaptics_rmi4_suspend(&rmi4_data->input_dev->dev); else synaptics_rmi4_resume(&rmi4_data->input_dev->dev); return 0; } static int synaptics_rmi4_debug_suspend_get(void *_data, u64 *val) { struct synaptics_rmi4_data *rmi4_data = _data; *val = rmi4_data->suspended; return 0; } DEFINE_SIMPLE_ATTRIBUTE(debug_suspend_fops, synaptics_rmi4_debug_suspend_get, synaptics_rmi4_debug_suspend_set, "%lld\n"); #ifdef CONFIG_PM static ssize_t synaptics_rmi4_full_pm_cycle_show(struct device *dev, struct device_attribute *attr, char *buf) { struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%u\n", rmi4_data->full_pm_cycle); } static ssize_t synaptics_rmi4_full_pm_cycle_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int input; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); if (sscanf(buf, "%u", &input) != 1) return -EINVAL; rmi4_data->full_pm_cycle = input > 0 ? 1 : 0; return count; } #ifdef CONFIG_FB static void configure_sleep(struct synaptics_rmi4_data *rmi4_data) { int retval = 0; rmi4_data->fb_notif.notifier_call = fb_notifier_callback; retval = fb_register_client(&rmi4_data->fb_notif); if (retval) dev_err(&rmi4_data->i2c_client->dev, "Unable to register fb_notifier: %d\n", retval); return; } #elif defined CONFIG_HAS_EARLYSUSPEND static void configure_sleep(struct synaptics_rmi4_data *rmi4_data) { rmi4_data->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1; rmi4_data->early_suspend.suspend = synaptics_rmi4_early_suspend; rmi4_data->early_suspend.resume = synaptics_rmi4_late_resume; register_early_suspend(&rmi4_data->early_suspend); return; } #else static void configure_sleep(struct synaptics_rmi4_data *rmi4_data) { return; } #endif #else static void configure_sleep(struct synaptics_rmi4_data *rmi4_data) { return; } #endif static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int retval; unsigned int reset; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); if (sscanf(buf, "%u", &reset) != 1) return -EINVAL; if (reset != 1) return -EINVAL; retval = synaptics_rmi4_reset_device(rmi4_data); if (retval < 0) { dev_err(dev, "%s: Failed to issue reset command, error = %d\n", __func__, retval); return retval; } return count; } static ssize_t synaptics_rmi4_f01_productinfo_show(struct device *dev, struct device_attribute *attr, char *buf) { struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "0x%02x 0x%02x\n", (rmi4_data->rmi4_mod_info.product_info[0]), (rmi4_data->rmi4_mod_info.product_info[1])); } static ssize_t synaptics_rmi4_f01_buildid_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned int build_id; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); struct synaptics_rmi4_device_info *rmi; rmi = &(rmi4_data->rmi4_mod_info); build_id = (unsigned int)rmi->build_id[0] + (unsigned int)rmi->build_id[1] * 0x100 + (unsigned int)rmi->build_id[2] * 0x10000; return snprintf(buf, PAGE_SIZE, "%u\n", build_id); } static ssize_t synaptics_rmi4_f01_flashprog_show(struct device *dev, struct device_attribute *attr, char *buf) { int retval; struct synaptics_rmi4_f01_device_status device_status; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_data_base_addr, device_status.data, sizeof(device_status.data)); if (retval < 0) { dev_err(dev, "%s: Failed to read device status, error = %d\n", __func__, retval); return retval; } return snprintf(buf, PAGE_SIZE, "%u\n", device_status.flash_prog); } static ssize_t synaptics_rmi4_0dbutton_show(struct device *dev, struct device_attribute *attr, char *buf) { struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%u\n", rmi4_data->button_0d_enabled); } static ssize_t synaptics_rmi4_0dbutton_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int retval; unsigned int input; unsigned char ii; unsigned char intr_enable; struct synaptics_rmi4_fn *fhandler; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); struct synaptics_rmi4_device_info *rmi; rmi = &(rmi4_data->rmi4_mod_info); if (sscanf(buf, "%u", &input) != 1) return -EINVAL; input = input > 0 ? 1 : 0; if (rmi4_data->button_0d_enabled == input) return count; mutex_lock(&rmi->support_fn_list_mutex); if (!list_empty(&rmi->support_fn_list)) { list_for_each_entry(fhandler, &rmi->support_fn_list, link) { if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) { ii = fhandler->intr_reg_num; retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_ctrl_base_addr + 1 + ii, &intr_enable, sizeof(intr_enable)); if (retval < 0) goto exit; if (input == 1) intr_enable |= fhandler->intr_mask; else intr_enable &= ~fhandler->intr_mask; retval = synaptics_rmi4_i2c_write(rmi4_data, rmi4_data->f01_ctrl_base_addr + 1 + ii, &intr_enable, sizeof(intr_enable)); if (retval < 0) goto exit; } } } mutex_unlock(&rmi->support_fn_list_mutex); rmi4_data->button_0d_enabled = input; return count; exit: mutex_unlock(&rmi->support_fn_list_mutex); return retval; } static ssize_t synaptics_rmi4_flipx_show(struct device *dev, struct device_attribute *attr, char *buf) { struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%u\n", rmi4_data->flip_x); } static ssize_t synaptics_rmi4_flipx_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int input; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); if (sscanf(buf, "%u", &input) != 1) return -EINVAL; rmi4_data->flip_x = input > 0 ? 1 : 0; return count; } static ssize_t synaptics_rmi4_flipy_show(struct device *dev, struct device_attribute *attr, char *buf) { struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%u\n", rmi4_data->flip_y); } static ssize_t synaptics_rmi4_flipy_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int input; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); if (sscanf(buf, "%u", &input) != 1) return -EINVAL; rmi4_data->flip_y = input > 0 ? 1 : 0; return count; } /** * synaptics_rmi4_set_page() * * Called by synaptics_rmi4_i2c_read() and synaptics_rmi4_i2c_write(). * * This function writes to the page select register to switch to the * assigned page. */ static int synaptics_rmi4_set_page(struct synaptics_rmi4_data *rmi4_data, unsigned int address) { int retval = 0; unsigned char retry; unsigned char buf[PAGE_SELECT_LEN]; unsigned char page; struct i2c_client *i2c = rmi4_data->i2c_client; page = ((address >> 8) & MASK_8BIT); if (page != rmi4_data->current_page) { buf[0] = MASK_8BIT; buf[1] = page; for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) { retval = i2c_master_send(i2c, buf, PAGE_SELECT_LEN); if (retval != PAGE_SELECT_LEN) { dev_err(&i2c->dev, "%s: I2C retry %d\n", __func__, retry + 1); msleep(20); } else { rmi4_data->current_page = page; break; } } } else return PAGE_SELECT_LEN; return (retval == PAGE_SELECT_LEN) ? retval : -EIO; } /** * synaptics_rmi4_i2c_read() * * Called by various functions in this driver, and also exported to * other expansion Function modules such as rmi_dev. * * This function reads data of an arbitrary length from the sensor, * starting from an assigned register address of the sensor, via I2C * with a retry mechanism. */ static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data, unsigned short addr, unsigned char *data, unsigned short length) { int retval; unsigned char retry; unsigned char buf; struct i2c_msg msg[] = { { .addr = rmi4_data->i2c_client->addr, .flags = 0, .len = 1, .buf = &buf, }, { .addr = rmi4_data->i2c_client->addr, .flags = I2C_M_RD, .len = length, .buf = data, }, }; buf = addr & MASK_8BIT; mutex_lock(&(rmi4_data->rmi4_io_ctrl_mutex)); retval = synaptics_rmi4_set_page(rmi4_data, addr); if (retval != PAGE_SELECT_LEN) goto exit; for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) { if (i2c_transfer(rmi4_data->i2c_client->adapter, msg, 2) == 2) { retval = length; break; } dev_err(&rmi4_data->i2c_client->dev, "%s: I2C retry %d\n", __func__, retry + 1); msleep(20); } if (retry == SYN_I2C_RETRY_TIMES) { dev_err(&rmi4_data->i2c_client->dev, "%s: I2C read over retry limit\n", __func__); retval = -EIO; } exit: mutex_unlock(&(rmi4_data->rmi4_io_ctrl_mutex)); return retval; } /** * synaptics_rmi4_i2c_write() * * Called by various functions in this driver, and also exported to * other expansion Function modules such as rmi_dev. * * This function writes data of an arbitrary length to the sensor, * starting from an assigned register address of the sensor, via I2C with * a retry mechanism. */ static int synaptics_rmi4_i2c_write(struct synaptics_rmi4_data *rmi4_data, unsigned short addr, unsigned char *data, unsigned short length) { int retval; unsigned char retry; unsigned char buf[length + 1]; struct i2c_msg msg[] = { { .addr = rmi4_data->i2c_client->addr, .flags = 0, .len = length + 1, .buf = buf, } }; mutex_lock(&(rmi4_data->rmi4_io_ctrl_mutex)); retval = synaptics_rmi4_set_page(rmi4_data, addr); if (retval != PAGE_SELECT_LEN) goto exit; buf[0] = addr & MASK_8BIT; memcpy(&buf[1], &data[0], length); for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) { if (i2c_transfer(rmi4_data->i2c_client->adapter, msg, 1) == 1) { retval = length; break; } dev_err(&rmi4_data->i2c_client->dev, "%s: I2C retry %d\n", __func__, retry + 1); msleep(20); } if (retry == SYN_I2C_RETRY_TIMES) { dev_err(&rmi4_data->i2c_client->dev, "%s: I2C write over retry limit\n", __func__); retval = -EIO; } exit: mutex_unlock(&(rmi4_data->rmi4_io_ctrl_mutex)); return retval; } /** * synaptics_rmi4_release_all() * * Called by synaptics_rmi4_suspend() * * Release all touch data during the touch device switch to suspend state. */ static void synaptics_rmi4_release_all(struct synaptics_rmi4_data *rmi4_data) { int finger; int max_num_fingers = rmi4_data->num_of_fingers; for (finger = 0; finger < max_num_fingers; finger++) { input_mt_slot(rmi4_data->input_dev, finger); input_mt_report_slot_state(rmi4_data->input_dev, MT_TOOL_FINGER, 0); } input_report_key(rmi4_data->input_dev, BTN_TOUCH, 0); input_report_key(rmi4_data->input_dev, BTN_TOOL_FINGER, 0); input_sync(rmi4_data->input_dev); } /** * synaptics_rmi4_f11_abs_report() * * Called by synaptics_rmi4_report_touch() when valid Function $11 * finger data has been detected. * * This function reads the Function $11 data registers, determines the * status of each finger supported by the Function, processes any * necessary coordinate manipulation, reports the finger data to * the input subsystem, and returns the number of fingers detected. */ static int synaptics_rmi4_f11_abs_report(struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler) { int retval; unsigned char touch_count = 0; /* number of touch points */ unsigned char reg_index; unsigned char finger; unsigned char fingers_supported; unsigned char num_of_finger_status_regs; unsigned char finger_shift; unsigned char finger_status; unsigned char data_reg_blk_size; unsigned char finger_status_reg[3]; unsigned char data[F11_STD_DATA_LEN]; unsigned short data_addr; unsigned short data_offset; int x; int y; int wx; int wy; int z; /* * The number of finger status registers is determined by the * maximum number of fingers supported - 2 bits per finger. So * the number of finger status registers to read is: * register_count = ceil(max_num_of_fingers / 4) */ fingers_supported = fhandler->num_of_data_points; num_of_finger_status_regs = (fingers_supported + 3) / 4; data_addr = fhandler->full_addr.data_base; data_reg_blk_size = fhandler->size_of_data_register_block; retval = synaptics_rmi4_i2c_read(rmi4_data, data_addr, finger_status_reg, num_of_finger_status_regs); if (retval < 0) return 0; for (finger = 0; finger < fingers_supported; finger++) { reg_index = finger / 4; finger_shift = (finger % 4) * 2; finger_status = (finger_status_reg[reg_index] >> finger_shift) & MASK_2BIT; /* * Each 2-bit finger status field represents the following: * 00 = finger not present * 01 = finger present and data accurate * 10 = finger present but data may be inaccurate * 11 = reserved */ #ifdef TYPE_B_PROTOCOL input_mt_slot(rmi4_data->input_dev, finger); input_mt_report_slot_state(rmi4_data->input_dev, MT_TOOL_FINGER, finger_status != 0); #endif if (finger_status) { data_offset = data_addr + num_of_finger_status_regs + (finger * data_reg_blk_size); retval = synaptics_rmi4_i2c_read(rmi4_data, data_offset, data, data_reg_blk_size); if (retval < 0) return 0; x = (data[0] << 4) | (data[2] & MASK_4BIT); y = (data[1] << 4) | ((data[2] >> 4) & MASK_4BIT); wx = (data[3] & MASK_4BIT); wy = (data[3] >> 4) & MASK_4BIT; z = data[4]; if (rmi4_data->flip_x) x = rmi4_data->sensor_max_x - x; if (rmi4_data->flip_y) y = rmi4_data->sensor_max_y - y; dev_dbg(&rmi4_data->i2c_client->dev, "%s: Finger %d:\n" "status = 0x%02x\n" "x = %d\n" "y = %d\n" "wx = %d\n" "wy = %d\n", __func__, finger, finger_status, x, y, wx, wy); input_report_abs(rmi4_data->input_dev, ABS_MT_POSITION_X, x); input_report_abs(rmi4_data->input_dev, ABS_MT_POSITION_Y, y); input_report_abs(rmi4_data->input_dev, ABS_MT_PRESSURE, z); #ifdef REPORT_2D_W input_report_abs(rmi4_data->input_dev, ABS_MT_TOUCH_MAJOR, max(wx, wy)); input_report_abs(rmi4_data->input_dev, ABS_MT_TOUCH_MINOR, min(wx, wy)); #endif #ifndef TYPE_B_PROTOCOL input_mt_sync(rmi4_data->input_dev); #endif touch_count++; } } input_report_key(rmi4_data->input_dev, BTN_TOUCH, touch_count > 0); input_report_key(rmi4_data->input_dev, BTN_TOOL_FINGER, touch_count > 0); #ifndef TYPE_B_PROTOCOL if (!touch_count) input_mt_sync(rmi4_data->input_dev); #else input_mt_report_pointer_emulation(rmi4_data->input_dev, false); #endif input_sync(rmi4_data->input_dev); return touch_count; } /** * synaptics_rmi4_f12_abs_report() * * Called by synaptics_rmi4_report_touch() when valid Function $12 * finger data has been detected. * * This function reads the Function $12 data registers, determines the * status of each finger supported by the Function, processes any * necessary coordinate manipulation, reports the finger data to * the input subsystem, and returns the number of fingers detected. */ static int synaptics_rmi4_f12_abs_report(struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler) { int retval; unsigned char touch_count = 0; /* number of touch points */ unsigned char finger; unsigned char fingers_to_process; unsigned char finger_status; unsigned char size_of_2d_data; unsigned short data_addr; int x; int y; int wx; int wy; struct synaptics_rmi4_f12_extra_data *extra_data; struct synaptics_rmi4_f12_finger_data *data; struct synaptics_rmi4_f12_finger_data *finger_data; fingers_to_process = fhandler->num_of_data_points; data_addr = fhandler->full_addr.data_base; extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra; size_of_2d_data = sizeof(struct synaptics_rmi4_f12_finger_data); retval = synaptics_rmi4_i2c_read(rmi4_data, data_addr + extra_data->data1_offset, (unsigned char *)fhandler->data, fingers_to_process * size_of_2d_data); if (retval < 0) return 0; data = (struct synaptics_rmi4_f12_finger_data *)fhandler->data; for (finger = 0; finger < fingers_to_process; finger++) { finger_data = data + finger; finger_status = finger_data->object_type_and_status & MASK_2BIT; /* * Each 2-bit finger status field represents the following: * 00 = finger not present * 01 = finger present and data accurate * 10 = finger present but data may be inaccurate * 11 = reserved */ #ifdef TYPE_B_PROTOCOL input_mt_slot(rmi4_data->input_dev, finger); input_mt_report_slot_state(rmi4_data->input_dev, MT_TOOL_FINGER, finger_status != 0); #endif if (finger_status) { x = (finger_data->x_msb << 8) | (finger_data->x_lsb); y = (finger_data->y_msb << 8) | (finger_data->y_lsb); #ifdef REPORT_2D_W wx = finger_data->wx; wy = finger_data->wy; #endif if (rmi4_data->flip_x) x = rmi4_data->sensor_max_x - x; if (rmi4_data->flip_y) y = rmi4_data->sensor_max_y - y; dev_dbg(&rmi4_data->i2c_client->dev, "%s: Finger %d:\n" "status = 0x%02x\n" "x = %d\n" "y = %d\n" "wx = %d\n" "wy = %d\n", __func__, finger, finger_status, x, y, wx, wy); input_report_key(rmi4_data->input_dev, BTN_TOUCH, 1); input_report_key(rmi4_data->input_dev, BTN_TOOL_FINGER, 1); input_report_abs(rmi4_data->input_dev, ABS_MT_POSITION_X, x); input_report_abs(rmi4_data->input_dev, ABS_MT_POSITION_Y, y); #ifdef REPORT_2D_W input_report_abs(rmi4_data->input_dev, ABS_MT_TOUCH_MAJOR, max(wx, wy)); input_report_abs(rmi4_data->input_dev, ABS_MT_TOUCH_MINOR, min(wx, wy)); #endif #ifndef TYPE_B_PROTOCOL input_mt_sync(rmi4_data->input_dev); #endif touch_count++; } } input_report_key(rmi4_data->input_dev, BTN_TOUCH, touch_count > 0); input_report_key(rmi4_data->input_dev, BTN_TOOL_FINGER, touch_count > 0); #ifndef TYPE_B_PROTOCOL if (!touch_count) input_mt_sync(rmi4_data->input_dev); #endif input_mt_report_pointer_emulation(rmi4_data->input_dev, false); input_sync(rmi4_data->input_dev); return touch_count; } static void synaptics_rmi4_f1a_report(struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler) { int retval; unsigned char button; unsigned char index; unsigned char shift; unsigned char status; unsigned char *data; unsigned short data_addr = fhandler->full_addr.data_base; struct synaptics_rmi4_f1a_handle *f1a = fhandler->data; static unsigned char do_once = 1; static bool current_status[MAX_NUMBER_OF_BUTTONS]; #ifdef NO_0D_WHILE_2D static bool before_2d_status[MAX_NUMBER_OF_BUTTONS]; static bool while_2d_status[MAX_NUMBER_OF_BUTTONS]; #endif if (do_once) { memset(current_status, 0, sizeof(current_status)); #ifdef NO_0D_WHILE_2D memset(before_2d_status, 0, sizeof(before_2d_status)); memset(while_2d_status, 0, sizeof(while_2d_status)); #endif do_once = 0; } retval = synaptics_rmi4_i2c_read(rmi4_data, data_addr, f1a->button_data_buffer, f1a->button_bitmask_size); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to read button data registers\n", __func__); return; } data = f1a->button_data_buffer; for (button = 0; button < f1a->valid_button_count; button++) { index = button / 8; shift = button % 8; status = ((data[index] >> shift) & MASK_1BIT); if (current_status[button] == status) continue; else current_status[button] = status; dev_dbg(&rmi4_data->i2c_client->dev, "%s: Button %d (code %d) ->%d\n", __func__, button, f1a->button_map[button], status); #ifdef NO_0D_WHILE_2D if (rmi4_data->fingers_on_2d == false) { if (status == 1) { before_2d_status[button] = 1; } else { if (while_2d_status[button] == 1) { while_2d_status[button] = 0; continue; } else { before_2d_status[button] = 0; } } input_report_key(rmi4_data->input_dev, f1a->button_map[button], status); } else { if (before_2d_status[button] == 1) { before_2d_status[button] = 0; input_report_key(rmi4_data->input_dev, f1a->button_map[button], status); } else { if (status == 1) while_2d_status[button] = 1; else while_2d_status[button] = 0; } } #else input_report_key(rmi4_data->input_dev, f1a->button_map[button], status); #endif } input_sync(rmi4_data->input_dev); return; } /** * synaptics_rmi4_report_touch() * * Called by synaptics_rmi4_sensor_report(). * * This function calls the appropriate finger data reporting function * based on the function handler it receives and returns the number of * fingers detected. */ static void synaptics_rmi4_report_touch(struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler, unsigned char *touch_count) { unsigned char touch_count_2d; dev_dbg(&rmi4_data->i2c_client->dev, "%s: Function %02x reporting\n", __func__, fhandler->fn_number); switch (fhandler->fn_number) { case SYNAPTICS_RMI4_F11: touch_count_2d = synaptics_rmi4_f11_abs_report(rmi4_data, fhandler); *touch_count += touch_count_2d; if (touch_count_2d) rmi4_data->fingers_on_2d = true; else rmi4_data->fingers_on_2d = false; break; case SYNAPTICS_RMI4_F12: touch_count_2d = synaptics_rmi4_f12_abs_report(rmi4_data, fhandler); if (touch_count_2d) rmi4_data->fingers_on_2d = true; else rmi4_data->fingers_on_2d = false; break; case SYNAPTICS_RMI4_F1A: synaptics_rmi4_f1a_report(rmi4_data, fhandler); break; default: break; } return; } /** * synaptics_rmi4_sensor_report() * * Called by synaptics_rmi4_irq(). * * This function determines the interrupt source(s) from the sensor * and calls synaptics_rmi4_report_touch() with the appropriate * function handler for each function with valid data inputs. */ static int synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *rmi4_data) { int retval; unsigned char touch_count = 0; unsigned char intr[MAX_INTR_REGISTERS]; struct synaptics_rmi4_fn *fhandler; struct synaptics_rmi4_exp_fn *exp_fhandler; struct synaptics_rmi4_device_info *rmi; rmi = &(rmi4_data->rmi4_mod_info); /* * Get interrupt status information from F01 Data1 register to * determine the source(s) that are flagging the interrupt. */ retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_data_base_addr + 1, intr, rmi4_data->num_of_intr_regs); if (retval < 0) return retval; /* * Traverse the function handler list and service the source(s) * of the interrupt accordingly. */ mutex_lock(&rmi->support_fn_list_mutex); if (!list_empty(&rmi->support_fn_list)) { list_for_each_entry(fhandler, &rmi->support_fn_list, link) { if (fhandler->num_of_data_sources) { if (fhandler->intr_mask & intr[fhandler->intr_reg_num]) { synaptics_rmi4_report_touch(rmi4_data, fhandler, &touch_count); } } } } mutex_unlock(&rmi->support_fn_list_mutex); mutex_lock(&exp_fn_list_mutex); if (!list_empty(&exp_fn_list)) { list_for_each_entry(exp_fhandler, &exp_fn_list, link) { if (exp_fhandler->inserted && (exp_fhandler->func_attn != NULL)) exp_fhandler->func_attn(rmi4_data, intr[0]); } } mutex_unlock(&exp_fn_list_mutex); return touch_count; } /** * synaptics_rmi4_irq() * * Called by the kernel when an interrupt occurs (when the sensor * asserts the attention irq). * * This function is the ISR thread and handles the acquisition * and the reporting of finger data when the presence of fingers * is detected. */ static irqreturn_t synaptics_rmi4_irq(int irq, void *data) { struct synaptics_rmi4_data *rmi4_data = data; synaptics_rmi4_sensor_report(rmi4_data); return IRQ_HANDLED; } #ifdef CONFIG_OF static int synaptics_rmi4_get_dt_coords(struct device *dev, char *name, struct synaptics_rmi4_platform_data *pdata) { u32 coords[RMI4_COORDS_ARR_SIZE]; struct property *prop; struct device_node *np = dev->of_node; int coords_size, rc; prop = of_find_property(np, name, NULL); if (!prop) return -EINVAL; if (!prop->value) return -ENODATA; coords_size = prop->length / sizeof(u32); if (coords_size != RMI4_COORDS_ARR_SIZE) { dev_err(dev, "invalid %s\n", name); return -EINVAL; } rc = of_property_read_u32_array(np, name, coords, coords_size); if (rc && (rc != -EINVAL)) { dev_err(dev, "Unable to read %s\n", name); return rc; } if (strcmp(name, "synaptics,panel-coords") == 0) { pdata->panel_minx = coords[0]; pdata->panel_miny = coords[1]; pdata->panel_maxx = coords[2]; pdata->panel_maxy = coords[3]; } else if (strcmp(name, "synaptics,display-coords") == 0) { pdata->disp_minx = coords[0]; pdata->disp_miny = coords[1]; pdata->disp_maxx = coords[2]; pdata->disp_maxy = coords[3]; } else { dev_err(dev, "unsupported property %s\n", name); return -EINVAL; } return 0; } static int synaptics_rmi4_parse_dt(struct device *dev, struct synaptics_rmi4_platform_data *rmi4_pdata) { struct device_node *np = dev->of_node; struct property *prop; u32 temp_val, num_buttons; u32 button_map[MAX_NUMBER_OF_BUTTONS]; int rc, i; rmi4_pdata->i2c_pull_up = of_property_read_bool(np, "synaptics,i2c-pull-up"); rmi4_pdata->power_down_enable = of_property_read_bool(np, "synaptics,power-down"); rmi4_pdata->disable_gpios = of_property_read_bool(np, "synaptics,disable-gpios"); rmi4_pdata->x_flip = of_property_read_bool(np, "synaptics,x-flip"); rmi4_pdata->y_flip = of_property_read_bool(np, "synaptics,y-flip"); rmi4_pdata->do_lockdown = of_property_read_bool(np, "synaptics,do-lockdown"); rc = synaptics_rmi4_get_dt_coords(dev, "synaptics,display-coords", rmi4_pdata); if (rc && (rc != -EINVAL)) return rc; rc = synaptics_rmi4_get_dt_coords(dev, "synaptics,panel-coords", rmi4_pdata); if (rc && (rc != -EINVAL)) return rc; rmi4_pdata->reset_delay = RESET_DELAY; rc = of_property_read_u32(np, "synaptics,reset-delay", &temp_val); if (!rc) rmi4_pdata->reset_delay = temp_val; else if (rc != -EINVAL) { dev_err(dev, "Unable to read reset delay\n"); return rc; } rc = of_property_read_string(np, "synaptics,fw-image-name", &rmi4_pdata->fw_image_name); if (rc && (rc != -EINVAL)) { dev_err(dev, "Unable to read fw image name\n"); return rc; } /* reset, irq gpio info */ rmi4_pdata->reset_gpio = of_get_named_gpio_flags(np, "synaptics,reset-gpio", 0, &rmi4_pdata->reset_flags); rmi4_pdata->irq_gpio = of_get_named_gpio_flags(np, "synaptics,irq-gpio", 0, &rmi4_pdata->irq_flags); prop = of_find_property(np, "synaptics,button-map", NULL); if (prop) { num_buttons = prop->length / sizeof(temp_val); rmi4_pdata->capacitance_button_map = devm_kzalloc(dev, sizeof(*rmi4_pdata->capacitance_button_map), GFP_KERNEL); if (!rmi4_pdata->capacitance_button_map) return -ENOMEM; rmi4_pdata->capacitance_button_map->map = devm_kzalloc(dev, sizeof(*rmi4_pdata->capacitance_button_map->map) * MAX_NUMBER_OF_BUTTONS, GFP_KERNEL); if (!rmi4_pdata->capacitance_button_map->map) return -ENOMEM; if (num_buttons <= MAX_NUMBER_OF_BUTTONS) { rc = of_property_read_u32_array(np, "synaptics,button-map", button_map, num_buttons); if (rc) { dev_err(dev, "Unable to read key codes\n"); return rc; } for (i = 0; i < num_buttons; i++) rmi4_pdata->capacitance_button_map->map[i] = button_map[i]; rmi4_pdata->capacitance_button_map->nbuttons = num_buttons; } else { return -EINVAL; } } return 0; } #else static inline int synaptics_rmi4_parse_dt(struct device *dev, struct synaptics_rmi4_platform_data *rmi4_pdata) { return 0; } #endif /** * synaptics_rmi4_irq_enable() * * Called by synaptics_rmi4_probe() and the power management functions * in this driver and also exported to other expansion Function modules * such as rmi_dev. * * This function handles the enabling and disabling of the attention * irq including the setting up of the ISR thread. */ static int synaptics_rmi4_irq_enable(struct synaptics_rmi4_data *rmi4_data, bool enable) { int retval = 0; unsigned char *intr_status; if (enable) { if (rmi4_data->irq_enabled) return retval; intr_status = kzalloc(rmi4_data->num_of_intr_regs, GFP_KERNEL); if (!intr_status) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to alloc memory\n", __func__); return -ENOMEM; } /* Clear interrupts first */ retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_data_base_addr + 1, intr_status, rmi4_data->num_of_intr_regs); kfree(intr_status); if (retval < 0) return retval; enable_irq(rmi4_data->irq); rmi4_data->irq_enabled = true; } else { if (rmi4_data->irq_enabled) { disable_irq(rmi4_data->irq); rmi4_data->irq_enabled = false; } } return retval; } /** * synaptics_rmi4_f11_init() * * Called by synaptics_rmi4_query_device(). * * This funtion parses information from the Function 11 registers * and determines the number of fingers supported, x and y data ranges, * offset to the associated interrupt status register, interrupt bit * mask, and gathers finger data acquisition capabilities from the query * registers. */ static int synaptics_rmi4_f11_init(struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler, struct synaptics_rmi4_fn_desc *fd, unsigned int intr_count) { int retval; unsigned char ii; unsigned char intr_offset; unsigned char abs_data_size; unsigned char abs_data_blk_size; unsigned char query[F11_STD_QUERY_LEN]; unsigned char control[F11_STD_CTRL_LEN]; fhandler->fn_number = fd->fn_number; fhandler->num_of_data_sources = fd->intr_src_count; retval = synaptics_rmi4_i2c_read(rmi4_data, fhandler->full_addr.query_base, query, sizeof(query)); if (retval < 0) return retval; /* Maximum number of fingers supported */ if ((query[1] & MASK_3BIT) <= 4) fhandler->num_of_data_points = (query[1] & MASK_3BIT) + 1; else if ((query[1] & MASK_3BIT) == 5) fhandler->num_of_data_points = 10; rmi4_data->num_of_fingers = fhandler->num_of_data_points; retval = synaptics_rmi4_i2c_read(rmi4_data, fhandler->full_addr.ctrl_base, control, sizeof(control)); if (retval < 0) return retval; /* Maximum x and y */ rmi4_data->sensor_max_x = ((control[6] & MASK_8BIT) << 0) | ((control[7] & MASK_4BIT) << 8); rmi4_data->sensor_max_y = ((control[8] & MASK_8BIT) << 0) | ((control[9] & MASK_4BIT) << 8); dev_dbg(&rmi4_data->i2c_client->dev, "%s: Function %02x max x = %d max y = %d\n", __func__, fhandler->fn_number, rmi4_data->sensor_max_x, rmi4_data->sensor_max_y); rmi4_data->max_touch_width = MAX_F11_TOUCH_WIDTH; fhandler->intr_reg_num = (intr_count + 7) / 8; if (fhandler->intr_reg_num != 0) fhandler->intr_reg_num -= 1; /* Set an enable bit for each data source */ intr_offset = intr_count % 8; fhandler->intr_mask = 0; for (ii = intr_offset; ii < ((fd->intr_src_count & MASK_3BIT) + intr_offset); ii++) fhandler->intr_mask |= 1 << ii; abs_data_size = query[5] & MASK_2BIT; abs_data_blk_size = 3 + (2 * (abs_data_size == 0 ? 1 : 0)); fhandler->size_of_data_register_block = abs_data_blk_size; return retval; } static int synaptics_rmi4_f12_set_enables(struct synaptics_rmi4_data *rmi4_data, unsigned short ctrl28) { int retval; static unsigned short ctrl_28_address; if (ctrl28) ctrl_28_address = ctrl28; retval = synaptics_rmi4_i2c_write(rmi4_data, ctrl_28_address, &rmi4_data->report_enable, sizeof(rmi4_data->report_enable)); if (retval < 0) return retval; return retval; } /** * synaptics_rmi4_f12_init() * * Called by synaptics_rmi4_query_device(). * * This funtion parses information from the Function 12 registers and * determines the number of fingers supported, offset to the data1 * register, x and y data ranges, offset to the associated interrupt * status register, interrupt bit mask, and allocates memory resources * for finger data acquisition. */ static int synaptics_rmi4_f12_init(struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler, struct synaptics_rmi4_fn_desc *fd, unsigned int intr_count) { int retval; unsigned char ii; unsigned char intr_offset; unsigned char size_of_2d_data; unsigned char size_of_query8; unsigned char ctrl_8_offset; unsigned char ctrl_23_offset; unsigned char ctrl_28_offset; unsigned char num_of_fingers; struct synaptics_rmi4_f12_extra_data *extra_data; struct synaptics_rmi4_f12_query_5 query_5; struct synaptics_rmi4_f12_query_8 query_8; struct synaptics_rmi4_f12_ctrl_8 ctrl_8; struct synaptics_rmi4_f12_ctrl_23 ctrl_23; fhandler->fn_number = fd->fn_number; fhandler->num_of_data_sources = fd->intr_src_count; fhandler->extra = kmalloc(sizeof(*extra_data), GFP_KERNEL); extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra; size_of_2d_data = sizeof(struct synaptics_rmi4_f12_finger_data); retval = synaptics_rmi4_i2c_read(rmi4_data, fhandler->full_addr.query_base + 5, query_5.data, sizeof(query_5.data)); if (retval < 0) return retval; ctrl_8_offset = query_5.ctrl0_is_present + query_5.ctrl1_is_present + query_5.ctrl2_is_present + query_5.ctrl3_is_present + query_5.ctrl4_is_present + query_5.ctrl5_is_present + query_5.ctrl6_is_present + query_5.ctrl7_is_present; ctrl_23_offset = ctrl_8_offset + query_5.ctrl8_is_present + query_5.ctrl9_is_present + query_5.ctrl10_is_present + query_5.ctrl11_is_present + query_5.ctrl12_is_present + query_5.ctrl13_is_present + query_5.ctrl14_is_present + query_5.ctrl15_is_present + query_5.ctrl16_is_present + query_5.ctrl17_is_present + query_5.ctrl18_is_present + query_5.ctrl19_is_present + query_5.ctrl20_is_present + query_5.ctrl21_is_present + query_5.ctrl22_is_present; ctrl_28_offset = ctrl_23_offset + query_5.ctrl23_is_present + query_5.ctrl24_is_present + query_5.ctrl25_is_present + query_5.ctrl26_is_present + query_5.ctrl27_is_present; retval = synaptics_rmi4_i2c_read(rmi4_data, fhandler->full_addr.ctrl_base + ctrl_23_offset, ctrl_23.data, sizeof(ctrl_23.data)); if (retval < 0) return retval; /* Maximum number of fingers supported */ fhandler->num_of_data_points = min(ctrl_23.max_reported_objects, (unsigned char)F12_FINGERS_TO_SUPPORT); num_of_fingers = fhandler->num_of_data_points; rmi4_data->num_of_fingers = num_of_fingers; retval = synaptics_rmi4_i2c_read(rmi4_data, fhandler->full_addr.query_base + 7, &size_of_query8, sizeof(size_of_query8)); if (retval < 0) return retval; retval = synaptics_rmi4_i2c_read(rmi4_data, fhandler->full_addr.query_base + 8, query_8.data, size_of_query8); if (retval < 0) return retval; /* Determine the presence of the Data0 register */ extra_data->data1_offset = query_8.data0_is_present; if ((size_of_query8 >= 3) && (query_8.data15_is_present)) { extra_data->data15_offset = query_8.data0_is_present + query_8.data1_is_present + query_8.data2_is_present + query_8.data3_is_present + query_8.data4_is_present + query_8.data5_is_present + query_8.data6_is_present + query_8.data7_is_present + query_8.data8_is_present + query_8.data9_is_present + query_8.data10_is_present + query_8.data11_is_present + query_8.data12_is_present + query_8.data13_is_present + query_8.data14_is_present; extra_data->data15_size = (num_of_fingers + 7) / 8; } else { extra_data->data15_size = 0; } rmi4_data->report_enable = RPT_DEFAULT; #ifdef REPORT_2D_Z rmi4_data->report_enable |= RPT_Z; #endif #ifdef REPORT_2D_W rmi4_data->report_enable |= (RPT_WX | RPT_WY); #endif retval = synaptics_rmi4_f12_set_enables(rmi4_data, fhandler->full_addr.ctrl_base + ctrl_28_offset); if (retval < 0) return retval; retval = synaptics_rmi4_i2c_read(rmi4_data, fhandler->full_addr.ctrl_base + ctrl_8_offset, ctrl_8.data, sizeof(ctrl_8.data)); if (retval < 0) return retval; /* Maximum x and y */ rmi4_data->sensor_max_x = ((unsigned short)ctrl_8.max_x_coord_lsb << 0) | ((unsigned short)ctrl_8.max_x_coord_msb << 8); rmi4_data->sensor_max_y = ((unsigned short)ctrl_8.max_y_coord_lsb << 0) | ((unsigned short)ctrl_8.max_y_coord_msb << 8); dev_dbg(&rmi4_data->i2c_client->dev, "%s: Function %02x max x = %d max y = %d\n", __func__, fhandler->fn_number, rmi4_data->sensor_max_x, rmi4_data->sensor_max_y); rmi4_data->num_of_rx = ctrl_8.num_of_rx; rmi4_data->num_of_tx = ctrl_8.num_of_tx; rmi4_data->max_touch_width = max(rmi4_data->num_of_rx, rmi4_data->num_of_tx); fhandler->intr_reg_num = (intr_count + 7) / 8; if (fhandler->intr_reg_num != 0) fhandler->intr_reg_num -= 1; /* Set an enable bit for each data source */ intr_offset = intr_count % 8; fhandler->intr_mask = 0; for (ii = intr_offset; ii < ((fd->intr_src_count & MASK_3BIT) + intr_offset); ii++) fhandler->intr_mask |= 1 << ii; /* Allocate memory for finger data storage space */ fhandler->data_size = num_of_fingers * size_of_2d_data; fhandler->data = kmalloc(fhandler->data_size, GFP_KERNEL); return retval; } static int synaptics_rmi4_f1a_alloc_mem(struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler) { int retval; struct synaptics_rmi4_f1a_handle *f1a; f1a = kzalloc(sizeof(*f1a), GFP_KERNEL); if (!f1a) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to alloc mem for function handle\n", __func__); return -ENOMEM; } fhandler->data = (void *)f1a; retval = synaptics_rmi4_i2c_read(rmi4_data, fhandler->full_addr.query_base, f1a->button_query.data, sizeof(f1a->button_query.data)); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to read query registers\n", __func__); return retval; } f1a->button_count = f1a->button_query.max_button_count + 1; f1a->button_bitmask_size = (f1a->button_count + 7) / 8; f1a->button_data_buffer = kcalloc(f1a->button_bitmask_size, sizeof(*(f1a->button_data_buffer)), GFP_KERNEL); if (!f1a->button_data_buffer) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to alloc mem for data buffer\n", __func__); return -ENOMEM; } f1a->button_map = kcalloc(f1a->button_count, sizeof(*(f1a->button_map)), GFP_KERNEL); if (!f1a->button_map) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to alloc mem for button map\n", __func__); return -ENOMEM; } return 0; } static int synaptics_rmi4_capacitance_button_map( struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler) { unsigned char ii; struct synaptics_rmi4_f1a_handle *f1a = fhandler->data; const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board; if (!pdata->capacitance_button_map) { dev_info(&rmi4_data->i2c_client->dev, "%s: capacitance_button_map not in use\n", __func__); return 0; } else if (!pdata->capacitance_button_map->map) { dev_err(&rmi4_data->i2c_client->dev, "%s: Button map is missing in board file\n", __func__); return -ENODEV; } else { if (pdata->capacitance_button_map->nbuttons != f1a->button_count) { f1a->valid_button_count = min(f1a->button_count, pdata->capacitance_button_map->nbuttons); } else { f1a->valid_button_count = f1a->button_count; } for (ii = 0; ii < f1a->valid_button_count; ii++) f1a->button_map[ii] = pdata->capacitance_button_map->map[ii]; } return 0; } static void synaptics_rmi4_f1a_kfree(struct synaptics_rmi4_fn *fhandler) { struct synaptics_rmi4_f1a_handle *f1a = fhandler->data; if (f1a) { kfree(f1a->button_data_buffer); kfree(f1a->button_map); kfree(f1a); fhandler->data = NULL; } return; } static int synaptics_rmi4_f1a_init(struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler, struct synaptics_rmi4_fn_desc *fd, unsigned int intr_count) { int retval; unsigned char ii; unsigned short intr_offset; fhandler->fn_number = fd->fn_number; fhandler->num_of_data_sources = fd->intr_src_count; fhandler->intr_reg_num = (intr_count + 7) / 8; if (fhandler->intr_reg_num != 0) fhandler->intr_reg_num -= 1; /* Set an enable bit for each data source */ intr_offset = intr_count % 8; fhandler->intr_mask = 0; for (ii = intr_offset; ii < ((fd->intr_src_count & MASK_3BIT) + intr_offset); ii++) fhandler->intr_mask |= 1 << ii; retval = synaptics_rmi4_f1a_alloc_mem(rmi4_data, fhandler); if (retval < 0) goto error_exit; retval = synaptics_rmi4_capacitance_button_map(rmi4_data, fhandler); if (retval < 0) goto error_exit; rmi4_data->button_0d_enabled = 1; return 0; error_exit: synaptics_rmi4_f1a_kfree(fhandler); return retval; } static int synaptics_rmi4_alloc_fh(struct synaptics_rmi4_fn **fhandler, struct synaptics_rmi4_fn_desc *rmi_fd, int page_number) { *fhandler = kzalloc(sizeof(**fhandler), GFP_KERNEL); if (!(*fhandler)) return -ENOMEM; (*fhandler)->full_addr.data_base = (rmi_fd->data_base_addr | (page_number << 8)); (*fhandler)->full_addr.ctrl_base = (rmi_fd->ctrl_base_addr | (page_number << 8)); (*fhandler)->full_addr.cmd_base = (rmi_fd->cmd_base_addr | (page_number << 8)); (*fhandler)->full_addr.query_base = (rmi_fd->query_base_addr | (page_number << 8)); (*fhandler)->fn_number = rmi_fd->fn_number; return 0; } /** * synaptics_rmi4_query_device_info() * * Called by synaptics_rmi4_query_device(). * */ static int synaptics_rmi4_query_device_info( struct synaptics_rmi4_data *rmi4_data) { int retval; unsigned char f01_query[F01_STD_QUERY_LEN]; struct synaptics_rmi4_device_info *rmi = &(rmi4_data->rmi4_mod_info); retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_query_base_addr, f01_query, sizeof(f01_query)); if (retval < 0) return retval; /* RMI Version 4.0 currently supported */ rmi->version_major = 4; rmi->version_minor = 0; rmi->manufacturer_id = f01_query[0]; rmi->product_props = f01_query[1]; rmi->product_info[0] = f01_query[2] & MASK_7BIT; rmi->product_info[1] = f01_query[3] & MASK_7BIT; rmi->date_code[0] = f01_query[4] & MASK_5BIT; rmi->date_code[1] = f01_query[5] & MASK_4BIT; rmi->date_code[2] = f01_query[6] & MASK_5BIT; rmi->tester_id = ((f01_query[7] & MASK_7BIT) << 8) | (f01_query[8] & MASK_7BIT); rmi->serial_number = ((f01_query[9] & MASK_7BIT) << 8) | (f01_query[10] & MASK_7BIT); memcpy(rmi->product_id_string, &f01_query[11], 10); if (rmi->manufacturer_id != 1) { dev_err(&rmi4_data->i2c_client->dev, "%s: Non-Synaptics device found, manufacturer ID = %d\n", __func__, rmi->manufacturer_id); } retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_query_base_addr + F01_BUID_ID_OFFSET, rmi->build_id, sizeof(rmi->build_id)); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to read firmware build id (code %d)\n", __func__, retval); return retval; } return 0; } /* * This function checks whether the fhandler already existis in the * support_fn_list or not. * If it exists then return 1 as found or return 0 as not found. * * Called by synaptics_rmi4_query_device(). */ static int synaptics_rmi4_check_fn_list(struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler) { int found = 0; struct synaptics_rmi4_fn *new_fhandler; struct synaptics_rmi4_device_info *rmi; rmi = &(rmi4_data->rmi4_mod_info); mutex_lock(&rmi->support_fn_list_mutex); if (!list_empty(&rmi->support_fn_list)) list_for_each_entry(new_fhandler, &rmi->support_fn_list, link) if (new_fhandler->fn_number == fhandler->fn_number) found = 1; mutex_unlock(&rmi->support_fn_list_mutex); return found; } /** * synaptics_rmi4_query_device() * * Called by synaptics_rmi4_probe(). * * This funtion scans the page description table, records the offsets * to the register types of Function $01, sets up the function handlers * for Function $11 and Function $12, determines the number of interrupt * sources from the sensor, adds valid Functions with data inputs to the * Function linked list, parses information from the query registers of * Function $01, and enables the interrupt sources from the valid Functions * with data inputs. */ static int synaptics_rmi4_query_device(struct synaptics_rmi4_data *rmi4_data) { int retval, found; unsigned char ii; unsigned char page_number; unsigned char intr_count = 0; unsigned char data_sources = 0; unsigned short pdt_entry_addr; unsigned short intr_addr; struct synaptics_rmi4_f01_device_status status; struct synaptics_rmi4_fn_desc rmi_fd; struct synaptics_rmi4_fn *fhandler; struct synaptics_rmi4_device_info *rmi; rmi = &(rmi4_data->rmi4_mod_info); /* Scan the page description tables of the pages to service */ for (page_number = 0; page_number < PAGES_TO_SERVICE; page_number++) { for (pdt_entry_addr = PDT_START; pdt_entry_addr > PDT_END; pdt_entry_addr -= PDT_ENTRY_SIZE) { pdt_entry_addr |= (page_number << 8); retval = synaptics_rmi4_i2c_read(rmi4_data, pdt_entry_addr, (unsigned char *)&rmi_fd, sizeof(rmi_fd)); if (retval < 0) return retval; fhandler = NULL; found = 0; if (rmi_fd.fn_number == 0) { dev_dbg(&rmi4_data->i2c_client->dev, "%s: Reached end of PDT\n", __func__); break; } dev_dbg(&rmi4_data->i2c_client->dev, "%s: F%02x found (page %d)\n", __func__, rmi_fd.fn_number, page_number); switch (rmi_fd.fn_number) { case SYNAPTICS_RMI4_F01: rmi4_data->f01_query_base_addr = rmi_fd.query_base_addr; rmi4_data->f01_ctrl_base_addr = rmi_fd.ctrl_base_addr; rmi4_data->f01_data_base_addr = rmi_fd.data_base_addr; rmi4_data->f01_cmd_base_addr = rmi_fd.cmd_base_addr; retval = synaptics_rmi4_query_device_info(rmi4_data); if (retval < 0) return retval; retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_data_base_addr, status.data, sizeof(status.data)); if (retval < 0) return retval; while (status.status_code == STATUS_CRC_IN_PROGRESS) { msleep(1); retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_data_base_addr, status.data, sizeof(status.data)); if (retval < 0) return retval; } if (status.flash_prog == 1) { pr_notice("%s: In flash prog mode, status = 0x%02x\n", __func__, status.status_code); goto flash_prog_mode; } break; case SYNAPTICS_RMI4_F11: if (rmi_fd.intr_src_count == 0) break; retval = synaptics_rmi4_alloc_fh(&fhandler, &rmi_fd, page_number); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to alloc for F%d\n", __func__, rmi_fd.fn_number); return retval; } retval = synaptics_rmi4_f11_init(rmi4_data, fhandler, &rmi_fd, intr_count); if (retval < 0) return retval; break; case SYNAPTICS_RMI4_F12: if (rmi_fd.intr_src_count == 0) break; retval = synaptics_rmi4_alloc_fh(&fhandler, &rmi_fd, page_number); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to alloc for F%d\n", __func__, rmi_fd.fn_number); return retval; } retval = synaptics_rmi4_f12_init(rmi4_data, fhandler, &rmi_fd, intr_count); if (retval < 0) return retval; break; case SYNAPTICS_RMI4_F1A: if (rmi_fd.intr_src_count == 0) break; retval = synaptics_rmi4_alloc_fh(&fhandler, &rmi_fd, page_number); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to alloc for F%d\n", __func__, rmi_fd.fn_number); return retval; } retval = synaptics_rmi4_f1a_init(rmi4_data, fhandler, &rmi_fd, intr_count); if (retval < 0) return retval; break; } /* Accumulate the interrupt count */ intr_count += (rmi_fd.intr_src_count & MASK_3BIT); if (fhandler && rmi_fd.intr_src_count) { /* Want to check whether the fhandler already exists in the support_fn_list or not. If not found then add it to the list, otherwise free the memory allocated to it. */ found = synaptics_rmi4_check_fn_list(rmi4_data, fhandler); if (!found) { mutex_lock(&rmi->support_fn_list_mutex); list_add_tail(&fhandler->link, &rmi->support_fn_list); mutex_unlock( &rmi->support_fn_list_mutex); } else { if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) { synaptics_rmi4_f1a_kfree( fhandler); } else { kfree(fhandler->data); kfree(fhandler->extra); } kfree(fhandler); } } } } flash_prog_mode: rmi4_data->num_of_intr_regs = (intr_count + 7) / 8; dev_dbg(&rmi4_data->i2c_client->dev, "%s: Number of interrupt registers = %d\n", __func__, rmi4_data->num_of_intr_regs); memset(rmi4_data->intr_mask, 0x00, sizeof(rmi4_data->intr_mask)); /* * Map out the interrupt bit masks for the interrupt sources * from the registered function handlers. */ mutex_lock(&rmi->support_fn_list_mutex); if (!list_empty(&rmi->support_fn_list)) { list_for_each_entry(fhandler, &rmi->support_fn_list, link) data_sources += fhandler->num_of_data_sources; } mutex_unlock(&rmi->support_fn_list_mutex); if (data_sources) { mutex_lock(&rmi->support_fn_list_mutex); if (!list_empty(&rmi->support_fn_list)) { list_for_each_entry(fhandler, &rmi->support_fn_list, link) { if (fhandler->num_of_data_sources) { rmi4_data->intr_mask[fhandler->intr_reg_num] |= fhandler->intr_mask; } } } mutex_unlock(&rmi->support_fn_list_mutex); } /* Enable the interrupt sources */ for (ii = 0; ii < rmi4_data->num_of_intr_regs; ii++) { if (rmi4_data->intr_mask[ii] != 0x00) { dev_dbg(&rmi4_data->i2c_client->dev, "%s: Interrupt enable mask %d = 0x%02x\n", __func__, ii, rmi4_data->intr_mask[ii]); intr_addr = rmi4_data->f01_ctrl_base_addr + 1 + ii; retval = synaptics_rmi4_i2c_write(rmi4_data, intr_addr, &(rmi4_data->intr_mask[ii]), sizeof(rmi4_data->intr_mask[ii])); if (retval < 0) return retval; } } return 0; } static int synaptics_rmi4_reset_command(struct synaptics_rmi4_data *rmi4_data) { int retval; int page_number; unsigned char command = 0x01; unsigned short pdt_entry_addr; struct synaptics_rmi4_fn_desc rmi_fd; bool done = false; /* Scan the page description tables of the pages to service */ for (page_number = 0; page_number < PAGES_TO_SERVICE; page_number++) { for (pdt_entry_addr = PDT_START; pdt_entry_addr > PDT_END; pdt_entry_addr -= PDT_ENTRY_SIZE) { retval = synaptics_rmi4_i2c_read(rmi4_data, pdt_entry_addr, (unsigned char *)&rmi_fd, sizeof(rmi_fd)); if (retval < 0) return retval; if (rmi_fd.fn_number == 0) break; switch (rmi_fd.fn_number) { case SYNAPTICS_RMI4_F01: rmi4_data->f01_cmd_base_addr = rmi_fd.cmd_base_addr; done = true; break; } } if (done) { dev_info(&rmi4_data->i2c_client->dev, "%s: Find F01 in page description table 0x%x\n", __func__, rmi4_data->f01_cmd_base_addr); break; } } if (!done) { dev_err(&rmi4_data->i2c_client->dev, "%s: Cannot find F01 in page description table\n", __func__); return -EINVAL; } retval = synaptics_rmi4_i2c_write(rmi4_data, rmi4_data->f01_cmd_base_addr, &command, sizeof(command)); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to issue reset command, error = %d\n", __func__, retval); return retval; } msleep(rmi4_data->board->reset_delay); return retval; }; static int synaptics_rmi4_reset_device(struct synaptics_rmi4_data *rmi4_data) { int retval; struct synaptics_rmi4_fn *fhandler; struct synaptics_rmi4_fn *next_fhandler; struct synaptics_rmi4_device_info *rmi; rmi = &(rmi4_data->rmi4_mod_info); retval = synaptics_rmi4_reset_command(rmi4_data); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to send command reset\n", __func__); return retval; } if (!list_empty(&rmi->support_fn_list)) { list_for_each_entry_safe(fhandler, next_fhandler, &rmi->support_fn_list, link) { if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) synaptics_rmi4_f1a_kfree(fhandler); else { kfree(fhandler->data); kfree(fhandler->extra); } kfree(fhandler); } } INIT_LIST_HEAD(&rmi->support_fn_list); retval = synaptics_rmi4_query_device(rmi4_data); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to query device\n", __func__); return retval; } return 0; } /** * synaptics_rmi4_detection_work() * * Called by the kernel at the scheduled time. * * This function is a self-rearming work thread that checks for the * insertion and removal of other expansion Function modules such as * rmi_dev and calls their initialization and removal callback functions * accordingly. */ static void synaptics_rmi4_detection_work(struct work_struct *work) { struct synaptics_rmi4_exp_fn *exp_fhandler, *next_list_entry; struct synaptics_rmi4_data *rmi4_data = container_of(work, struct synaptics_rmi4_data, det_work.work); mutex_lock(&exp_fn_list_mutex); if (!list_empty(&exp_fn_list)) { list_for_each_entry_safe(exp_fhandler, next_list_entry, &exp_fn_list, link) { if ((exp_fhandler->func_init != NULL) && (exp_fhandler->inserted == false)) { if (exp_fhandler->func_init(rmi4_data) < 0) { list_del(&exp_fhandler->link); kfree(exp_fhandler); } else { exp_fhandler->inserted = true; } } else if ((exp_fhandler->func_init == NULL) && (exp_fhandler->inserted == true)) { exp_fhandler->func_remove(rmi4_data); list_del(&exp_fhandler->link); kfree(exp_fhandler); } } } mutex_unlock(&exp_fn_list_mutex); return; } /** * synaptics_rmi4_new_function() * * Called by other expansion Function modules in their module init and * module exit functions. * * This function is used by other expansion Function modules such as * rmi_dev to register themselves with the driver by providing their * initialization and removal callback function pointers so that they * can be inserted or removed dynamically at module init and exit times, * respectively. */ void synaptics_rmi4_new_function(enum exp_fn fn_type, bool insert, int (*func_init)(struct synaptics_rmi4_data *rmi4_data), void (*func_remove)(struct synaptics_rmi4_data *rmi4_data), void (*func_attn)(struct synaptics_rmi4_data *rmi4_data, unsigned char intr_mask)) { struct synaptics_rmi4_exp_fn *exp_fhandler; if (!exp_fn_inited) { mutex_init(&exp_fn_list_mutex); INIT_LIST_HEAD(&exp_fn_list); exp_fn_inited = 1; } mutex_lock(&exp_fn_list_mutex); if (insert) { exp_fhandler = kzalloc(sizeof(*exp_fhandler), GFP_KERNEL); if (!exp_fhandler) { pr_err("%s: Failed to alloc mem for expansion function\n", __func__); goto exit; } exp_fhandler->fn_type = fn_type; exp_fhandler->func_init = func_init; exp_fhandler->func_attn = func_attn; exp_fhandler->func_remove = func_remove; exp_fhandler->inserted = false; list_add_tail(&exp_fhandler->link, &exp_fn_list); } else { if (!list_empty(&exp_fn_list)) { list_for_each_entry(exp_fhandler, &exp_fn_list, link) { if (exp_fhandler->func_init == func_init) { exp_fhandler->inserted = false; exp_fhandler->func_init = NULL; exp_fhandler->func_attn = NULL; goto exit; } } } } exit: mutex_unlock(&exp_fn_list_mutex); return; } EXPORT_SYMBOL(synaptics_rmi4_new_function); static int reg_set_optimum_mode_check(struct regulator *reg, int load_uA) { return (regulator_count_voltages(reg) > 0) ? regulator_set_optimum_mode(reg, load_uA) : 0; } static int synaptics_rmi4_regulator_configure(struct synaptics_rmi4_data *rmi4_data, bool on) { int retval; if (on == false) goto hw_shutdown; rmi4_data->vdd = regulator_get(&rmi4_data->i2c_client->dev, "vdd"); if (IS_ERR(rmi4_data->vdd)) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to get vdd regulator\n", __func__); return PTR_ERR(rmi4_data->vdd); } if (regulator_count_voltages(rmi4_data->vdd) > 0) { retval = regulator_set_voltage(rmi4_data->vdd, RMI4_VTG_MIN_UV, RMI4_VTG_MAX_UV); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "regulator set_vtg failed retval =%d\n", retval); goto err_set_vtg_vdd; } } if (rmi4_data->board->i2c_pull_up) { rmi4_data->vcc_i2c = regulator_get(&rmi4_data->i2c_client->dev, "vcc_i2c"); if (IS_ERR(rmi4_data->vcc_i2c)) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to get i2c regulator\n", __func__); retval = PTR_ERR(rmi4_data->vcc_i2c); goto err_get_vtg_i2c; } if (regulator_count_voltages(rmi4_data->vcc_i2c) > 0) { retval = regulator_set_voltage(rmi4_data->vcc_i2c, RMI4_I2C_VTG_MIN_UV, RMI4_I2C_VTG_MAX_UV); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "reg set i2c vtg failed retval =%d\n", retval); goto err_set_vtg_i2c; } } } return 0; err_set_vtg_i2c: if (rmi4_data->board->i2c_pull_up) regulator_put(rmi4_data->vcc_i2c); err_get_vtg_i2c: if (regulator_count_voltages(rmi4_data->vdd) > 0) regulator_set_voltage(rmi4_data->vdd, 0, RMI4_VTG_MAX_UV); err_set_vtg_vdd: regulator_put(rmi4_data->vdd); return retval; hw_shutdown: if (regulator_count_voltages(rmi4_data->vdd) > 0) regulator_set_voltage(rmi4_data->vdd, 0, RMI4_VTG_MAX_UV); regulator_put(rmi4_data->vdd); if (rmi4_data->board->i2c_pull_up) { if (regulator_count_voltages(rmi4_data->vcc_i2c) > 0) regulator_set_voltage(rmi4_data->vcc_i2c, 0, RMI4_I2C_VTG_MAX_UV); regulator_put(rmi4_data->vcc_i2c); } return 0; }; static int synaptics_rmi4_power_on(struct synaptics_rmi4_data *rmi4_data, bool on) { int retval; if (on == false) goto power_off; retval = reg_set_optimum_mode_check(rmi4_data->vdd, RMI4_ACTIVE_LOAD_UA); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vdd set_opt failed rc=%d\n", retval); return retval; } retval = regulator_enable(rmi4_data->vdd); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vdd enable failed rc=%d\n", retval); goto error_reg_en_vdd; } if (rmi4_data->board->i2c_pull_up) { retval = reg_set_optimum_mode_check(rmi4_data->vcc_i2c, RMI4_I2C_LOAD_UA); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vcc_i2c set_opt failed rc=%d\n", retval); goto error_reg_opt_i2c; } retval = regulator_enable(rmi4_data->vcc_i2c); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vcc_i2c enable failed rc=%d\n", retval); goto error_reg_en_vcc_i2c; } } return 0; error_reg_en_vcc_i2c: if (rmi4_data->board->i2c_pull_up) reg_set_optimum_mode_check(rmi4_data->vcc_i2c, 0); error_reg_opt_i2c: regulator_disable(rmi4_data->vdd); error_reg_en_vdd: reg_set_optimum_mode_check(rmi4_data->vdd, 0); return retval; power_off: reg_set_optimum_mode_check(rmi4_data->vdd, 0); regulator_disable(rmi4_data->vdd); if (rmi4_data->board->i2c_pull_up) { reg_set_optimum_mode_check(rmi4_data->vcc_i2c, 0); regulator_disable(rmi4_data->vcc_i2c); } return 0; } static int synaptics_rmi4_gpio_configure(struct synaptics_rmi4_data *rmi4_data, bool on) { int retval = 0; if (on) { if (gpio_is_valid(rmi4_data->board->irq_gpio)) { /* configure touchscreen irq gpio */ retval = gpio_request(rmi4_data->board->irq_gpio, "rmi4_irq_gpio"); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "unable to request gpio [%d]\n", rmi4_data->board->irq_gpio); goto err_irq_gpio_req; } retval = gpio_direction_input(rmi4_data->board->\ irq_gpio); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "unable to set direction for gpio " \ "[%d]\n", rmi4_data->board->irq_gpio); goto err_irq_gpio_dir; } } else { dev_err(&rmi4_data->i2c_client->dev, "irq gpio not provided\n"); goto err_irq_gpio_req; } if (gpio_is_valid(rmi4_data->board->reset_gpio)) { /* configure touchscreen reset out gpio */ retval = gpio_request(rmi4_data->board->reset_gpio, "rmi4_reset_gpio"); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "unable to request gpio [%d]\n", rmi4_data->board->reset_gpio); goto err_irq_gpio_dir; } retval = gpio_direction_output(rmi4_data->board->\ reset_gpio, 1); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "unable to set direction for gpio " \ "[%d]\n", rmi4_data->board->reset_gpio); goto err_reset_gpio_dir; } gpio_set_value(rmi4_data->board->reset_gpio, 1); msleep(rmi4_data->board->reset_delay); } else synaptics_rmi4_reset_command(rmi4_data); return 0; } else { if (rmi4_data->board->disable_gpios) { if (gpio_is_valid(rmi4_data->board->irq_gpio)) gpio_free(rmi4_data->board->irq_gpio); if (gpio_is_valid(rmi4_data->board->reset_gpio)) { /* * This is intended to save leakage current * only. Even if the call(gpio_direction_input) * fails, only leakage current will be more but * functionality will not be affected. */ retval = gpio_direction_input(rmi4_data-> board->reset_gpio); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "unable to set direction for gpio " "[%d]\n", rmi4_data->board->irq_gpio); } gpio_free(rmi4_data->board->reset_gpio); } } return 0; } err_reset_gpio_dir: if (gpio_is_valid(rmi4_data->board->reset_gpio)) gpio_free(rmi4_data->board->reset_gpio); err_irq_gpio_dir: if (gpio_is_valid(rmi4_data->board->irq_gpio)) gpio_free(rmi4_data->board->irq_gpio); err_irq_gpio_req: return retval; } /** * synaptics_rmi4_probe() * * Called by the kernel when an association with an I2C device of the * same name is made (after doing i2c_add_driver). * * This funtion allocates and initializes the resources for the driver * as an input driver, turns on the power to the sensor, queries the * sensor for its supported Functions and characteristics, registers * the driver to the input subsystem, sets up the interrupt, handles * the registration of the early_suspend and late_resume functions, * and creates a work queue for detection of other expansion Function * modules. */ static int __devinit synaptics_rmi4_probe(struct i2c_client *client, const struct i2c_device_id *dev_id) { int retval = 0; unsigned char ii; unsigned char attr_count; struct synaptics_rmi4_f1a_handle *f1a; struct synaptics_rmi4_fn *fhandler; struct synaptics_rmi4_fn *next_fhandler; struct synaptics_rmi4_data *rmi4_data; struct synaptics_rmi4_device_info *rmi; struct synaptics_rmi4_platform_data *platform_data = client->dev.platform_data; struct dentry *temp; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_err(&client->dev, "%s: SMBus byte data not supported\n", __func__); return -EIO; } if (client->dev.of_node) { platform_data = devm_kzalloc(&client->dev, sizeof(*platform_data), GFP_KERNEL); if (!platform_data) { dev_err(&client->dev, "Failed to allocate memory\n"); return -ENOMEM; } retval = synaptics_rmi4_parse_dt(&client->dev, platform_data); if (retval) return retval; } else { platform_data = client->dev.platform_data; } if (!platform_data) { dev_err(&client->dev, "%s: No platform data found\n", __func__); return -EINVAL; } rmi4_data = kzalloc(sizeof(*rmi4_data) * 2, GFP_KERNEL); if (!rmi4_data) { dev_err(&client->dev, "%s: Failed to alloc mem for rmi4_data\n", __func__); return -ENOMEM; } rmi = &(rmi4_data->rmi4_mod_info); rmi4_data->input_dev = input_allocate_device(); if (rmi4_data->input_dev == NULL) { dev_err(&client->dev, "%s: Failed to allocate input device\n", __func__); retval = -ENOMEM; goto err_input_device; } rmi4_data->i2c_client = client; rmi4_data->current_page = MASK_8BIT; rmi4_data->board = platform_data; rmi4_data->touch_stopped = false; rmi4_data->sensor_sleep = false; rmi4_data->irq_enabled = false; rmi4_data->fw_updating = false; rmi4_data->suspended = false; rmi4_data->i2c_read = synaptics_rmi4_i2c_read; rmi4_data->i2c_write = synaptics_rmi4_i2c_write; rmi4_data->irq_enable = synaptics_rmi4_irq_enable; rmi4_data->reset_device = synaptics_rmi4_reset_device; rmi4_data->flip_x = rmi4_data->board->x_flip; rmi4_data->flip_y = rmi4_data->board->y_flip; if (rmi4_data->board->fw_image_name) snprintf(rmi4_data->fw_image_name, NAME_BUFFER_SIZE, "%s", rmi4_data->board->fw_image_name); rmi4_data->input_dev->name = DRIVER_NAME; rmi4_data->input_dev->phys = INPUT_PHYS_NAME; rmi4_data->input_dev->id.bustype = BUS_I2C; rmi4_data->input_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT; rmi4_data->input_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION; rmi4_data->input_dev->dev.parent = &client->dev; input_set_drvdata(rmi4_data->input_dev, rmi4_data); set_bit(EV_SYN, rmi4_data->input_dev->evbit); set_bit(EV_KEY, rmi4_data->input_dev->evbit); set_bit(EV_ABS, rmi4_data->input_dev->evbit); set_bit(BTN_TOUCH, rmi4_data->input_dev->keybit); set_bit(BTN_TOOL_FINGER, rmi4_data->input_dev->keybit); #ifdef INPUT_PROP_DIRECT set_bit(INPUT_PROP_DIRECT, rmi4_data->input_dev->propbit); #endif retval = synaptics_rmi4_regulator_configure(rmi4_data, true); if (retval < 0) { dev_err(&client->dev, "Failed to configure regulators\n"); goto err_reg_configure; } retval = synaptics_rmi4_power_on(rmi4_data, true); if (retval < 0) { dev_err(&client->dev, "Failed to power on\n"); goto err_power_device; } retval = synaptics_rmi4_gpio_configure(rmi4_data, true); if (retval < 0) { dev_err(&client->dev, "Failed to configure gpios\n"); goto err_gpio_config; } init_waitqueue_head(&rmi4_data->wait); mutex_init(&(rmi4_data->rmi4_io_ctrl_mutex)); INIT_LIST_HEAD(&rmi->support_fn_list); mutex_init(&rmi->support_fn_list_mutex); retval = synaptics_rmi4_query_device(rmi4_data); if (retval < 0) { dev_err(&client->dev, "%s: Failed to query device\n", __func__); goto err_free_gpios; } if (rmi4_data->board->disp_maxx) rmi4_data->disp_maxx = rmi4_data->board->disp_maxx; else rmi4_data->disp_maxx = rmi4_data->sensor_max_x; if (rmi4_data->board->disp_maxy) rmi4_data->disp_maxy = rmi4_data->board->disp_maxy; else rmi4_data->disp_maxy = rmi4_data->sensor_max_y; if (rmi4_data->board->disp_minx) rmi4_data->disp_minx = rmi4_data->board->disp_minx; else rmi4_data->disp_minx = 0; if (rmi4_data->board->disp_miny) rmi4_data->disp_miny = rmi4_data->board->disp_miny; else rmi4_data->disp_miny = 0; input_set_abs_params(rmi4_data->input_dev, ABS_MT_POSITION_X, rmi4_data->disp_minx, rmi4_data->disp_maxx, 0, 0); input_set_abs_params(rmi4_data->input_dev, ABS_MT_POSITION_Y, rmi4_data->disp_miny, rmi4_data->disp_maxy, 0, 0); input_set_abs_params(rmi4_data->input_dev, ABS_PRESSURE, 0, 255, 0, 0); #ifdef REPORT_2D_W input_set_abs_params(rmi4_data->input_dev, ABS_MT_TOUCH_MAJOR, 0, rmi4_data->max_touch_width, 0, 0); input_set_abs_params(rmi4_data->input_dev, ABS_MT_TOUCH_MINOR, 0, rmi4_data->max_touch_width, 0, 0); #endif #ifdef TYPE_B_PROTOCOL input_mt_init_slots(rmi4_data->input_dev, rmi4_data->num_of_fingers); #endif i2c_set_clientdata(client, rmi4_data); f1a = NULL; mutex_lock(&rmi->support_fn_list_mutex); if (!list_empty(&rmi->support_fn_list)) { list_for_each_entry(fhandler, &rmi->support_fn_list, link) { if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) f1a = fhandler->data; } } mutex_unlock(&rmi->support_fn_list_mutex); if (f1a) { for (ii = 0; ii < f1a->valid_button_count; ii++) { set_bit(f1a->button_map[ii], rmi4_data->input_dev->keybit); input_set_capability(rmi4_data->input_dev, EV_KEY, f1a->button_map[ii]); } } retval = input_register_device(rmi4_data->input_dev); if (retval) { dev_err(&client->dev, "%s: Failed to register input device\n", __func__); goto err_register_input; } configure_sleep(rmi4_data); if (!exp_fn_inited) { mutex_init(&exp_fn_list_mutex); INIT_LIST_HEAD(&exp_fn_list); exp_fn_inited = 1; } rmi4_data->det_workqueue = create_singlethread_workqueue("rmi_det_workqueue"); INIT_DELAYED_WORK(&rmi4_data->det_work, synaptics_rmi4_detection_work); queue_delayed_work(rmi4_data->det_workqueue, &rmi4_data->det_work, msecs_to_jiffies(EXP_FN_DET_INTERVAL)); rmi4_data->irq = gpio_to_irq(platform_data->irq_gpio); retval = request_threaded_irq(rmi4_data->irq, NULL, synaptics_rmi4_irq, platform_data->irq_flags, DRIVER_NAME, rmi4_data); rmi4_data->irq_enabled = true; if (retval < 0) { dev_err(&client->dev, "%s: Failed to create irq thread\n", __func__); goto err_enable_irq; } rmi4_data->dir = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL); if (rmi4_data->dir == NULL || IS_ERR(rmi4_data->dir)) { dev_err(&client->dev, "%s: Failed to create debugfs directory, rc = %ld\n", __func__, PTR_ERR(rmi4_data->dir)); retval = PTR_ERR(rmi4_data->dir); goto err_create_debugfs_dir; } temp = debugfs_create_file("suspend", S_IRUSR | S_IWUSR, rmi4_data->dir, rmi4_data, &debug_suspend_fops); if (temp == NULL || IS_ERR(temp)) { dev_err(&client->dev, "%s: Failed to create suspend debugfs file, rc = %ld\n", __func__, PTR_ERR(temp)); retval = PTR_ERR(temp); goto err_create_debugfs_file; } for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) { retval = sysfs_create_file(&client->dev.kobj, &attrs[attr_count].attr); if (retval < 0) { dev_err(&client->dev, "%s: Failed to create sysfs attributes\n", __func__); goto err_sysfs; } } synaptics_rmi4_sensor_wake(rmi4_data); retval = synaptics_rmi4_irq_enable(rmi4_data, true); if (retval < 0) { dev_err(&client->dev, "%s: Failed to enable attention interrupt\n", __func__); goto err_sysfs; } retval = synaptics_rmi4_check_configuration(rmi4_data); if (retval < 0) { dev_err(&client->dev, "Failed to check configuration\n"); return retval; } return retval; err_sysfs: for (attr_count--; attr_count >= 0; attr_count--) { sysfs_remove_file(&rmi4_data->input_dev->dev.kobj, &attrs[attr_count].attr); } err_create_debugfs_file: debugfs_remove_recursive(rmi4_data->dir); err_create_debugfs_dir: free_irq(rmi4_data->irq, rmi4_data); err_enable_irq: cancel_delayed_work_sync(&rmi4_data->det_work); flush_workqueue(rmi4_data->det_workqueue); destroy_workqueue(rmi4_data->det_workqueue); input_unregister_device(rmi4_data->input_dev); err_register_input: mutex_lock(&rmi->support_fn_list_mutex); if (!list_empty(&rmi->support_fn_list)) { list_for_each_entry_safe(fhandler, next_fhandler, &rmi->support_fn_list, link) { if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) synaptics_rmi4_f1a_kfree(fhandler); else { kfree(fhandler->data); kfree(fhandler->extra); } kfree(fhandler); } } mutex_unlock(&rmi->support_fn_list_mutex); err_free_gpios: if (gpio_is_valid(rmi4_data->board->reset_gpio)) gpio_free(rmi4_data->board->reset_gpio); if (gpio_is_valid(rmi4_data->board->irq_gpio)) gpio_free(rmi4_data->board->irq_gpio); err_gpio_config: synaptics_rmi4_power_on(rmi4_data, false); err_power_device: synaptics_rmi4_regulator_configure(rmi4_data, false); err_reg_configure: input_free_device(rmi4_data->input_dev); rmi4_data->input_dev = NULL; err_input_device: kfree(rmi4_data); return retval; } /** * synaptics_rmi4_remove() * * Called by the kernel when the association with an I2C device of the * same name is broken (when the driver is unloaded). * * This funtion terminates the work queue, stops sensor data acquisition, * frees the interrupt, unregisters the driver from the input subsystem, * turns off the power to the sensor, and frees other allocated resources. */ static int __devexit synaptics_rmi4_remove(struct i2c_client *client) { unsigned char attr_count; struct synaptics_rmi4_fn *fhandler; struct synaptics_rmi4_fn *next_fhandler; struct synaptics_rmi4_data *rmi4_data = i2c_get_clientdata(client); struct synaptics_rmi4_device_info *rmi; rmi = &(rmi4_data->rmi4_mod_info); debugfs_remove_recursive(rmi4_data->dir); cancel_delayed_work_sync(&rmi4_data->det_work); flush_workqueue(rmi4_data->det_workqueue); destroy_workqueue(rmi4_data->det_workqueue); rmi4_data->touch_stopped = true; wake_up(&rmi4_data->wait); free_irq(rmi4_data->irq, rmi4_data); for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) { sysfs_remove_file(&rmi4_data->input_dev->dev.kobj, &attrs[attr_count].attr); } input_unregister_device(rmi4_data->input_dev); mutex_lock(&rmi->support_fn_list_mutex); if (!list_empty(&rmi->support_fn_list)) { list_for_each_entry_safe(fhandler, next_fhandler, &rmi->support_fn_list, link) { if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) synaptics_rmi4_f1a_kfree(fhandler); else { kfree(fhandler->data); kfree(fhandler->extra); } kfree(fhandler); } } mutex_unlock(&rmi->support_fn_list_mutex); if (gpio_is_valid(rmi4_data->board->reset_gpio)) gpio_free(rmi4_data->board->reset_gpio); if (gpio_is_valid(rmi4_data->board->irq_gpio)) gpio_free(rmi4_data->board->irq_gpio); synaptics_rmi4_power_on(rmi4_data, false); synaptics_rmi4_regulator_configure(rmi4_data, false); kfree(rmi4_data); return 0; } #ifdef CONFIG_PM /** * synaptics_rmi4_sensor_sleep() * * Called by synaptics_rmi4_early_suspend() and synaptics_rmi4_suspend(). * * This function stops finger data acquisition and puts the sensor to sleep. */ static void synaptics_rmi4_sensor_sleep(struct synaptics_rmi4_data *rmi4_data) { int retval; struct synaptics_rmi4_f01_device_control_0 device_ctrl; retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_ctrl_base_addr, device_ctrl.data, sizeof(device_ctrl.data)); if (retval < 0) { dev_err(&(rmi4_data->input_dev->dev), "%s: Failed to enter sleep mode\n", __func__); rmi4_data->sensor_sleep = false; return; } device_ctrl.sleep_mode = SENSOR_SLEEP; device_ctrl.nosleep = NO_SLEEP_OFF; retval = synaptics_rmi4_i2c_write(rmi4_data, rmi4_data->f01_ctrl_base_addr, device_ctrl.data, sizeof(device_ctrl.data)); if (retval < 0) { dev_err(&(rmi4_data->input_dev->dev), "%s: Failed to enter sleep mode\n", __func__); rmi4_data->sensor_sleep = false; return; } else { rmi4_data->sensor_sleep = true; } return; } /** * synaptics_rmi4_sensor_wake() * * Called by synaptics_rmi4_resume() and synaptics_rmi4_late_resume(). * * This function wakes the sensor from sleep. */ static void synaptics_rmi4_sensor_wake(struct synaptics_rmi4_data *rmi4_data) { int retval; struct synaptics_rmi4_f01_device_control_0 device_ctrl; retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_ctrl_base_addr, device_ctrl.data, sizeof(device_ctrl.data)); if (retval < 0) { dev_err(&(rmi4_data->input_dev->dev), "%s: Failed to wake from sleep mode\n", __func__); rmi4_data->sensor_sleep = true; return; } if (device_ctrl.nosleep == NO_SLEEP_OFF && device_ctrl.sleep_mode == NORMAL_OPERATION) { rmi4_data->sensor_sleep = false; return; } device_ctrl.sleep_mode = NORMAL_OPERATION; device_ctrl.nosleep = NO_SLEEP_OFF; retval = synaptics_rmi4_i2c_write(rmi4_data, rmi4_data->f01_ctrl_base_addr, device_ctrl.data, sizeof(device_ctrl.data)); if (retval < 0) { dev_err(&(rmi4_data->input_dev->dev), "%s: Failed to wake from sleep mode\n", __func__); rmi4_data->sensor_sleep = true; return; } else { rmi4_data->sensor_sleep = false; } return; } #if defined(CONFIG_FB) static int fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data) { struct fb_event *evdata = data; int *blank; struct synaptics_rmi4_data *rmi4_data = container_of(self, struct synaptics_rmi4_data, fb_notif); if (evdata && evdata->data && event == FB_EVENT_BLANK && rmi4_data && rmi4_data->i2c_client) { blank = evdata->data; if (*blank == FB_BLANK_UNBLANK) synaptics_rmi4_resume(&(rmi4_data->input_dev->dev)); else if (*blank == FB_BLANK_POWERDOWN) synaptics_rmi4_suspend(&(rmi4_data->input_dev->dev)); } return 0; } #elif defined(CONFIG_HAS_EARLYSUSPEND) /** * synaptics_rmi4_early_suspend() * * Called by the kernel during the early suspend phase when the system * enters suspend. * * This function calls synaptics_rmi4_sensor_sleep() to stop finger * data acquisition and put the sensor to sleep. */ static void synaptics_rmi4_early_suspend(struct early_suspend *h) { struct synaptics_rmi4_data *rmi4_data = container_of(h, struct synaptics_rmi4_data, early_suspend); if (rmi4_data->stay_awake) rmi4_data->staying_awake = true; else rmi4_data->staying_awake = false; rmi4_data->touch_stopped = true; wake_up(&rmi4_data->wait); synaptics_rmi4_irq_enable(rmi4_data, false); synaptics_rmi4_sensor_sleep(rmi4_data); if (rmi4_data->full_pm_cycle) synaptics_rmi4_suspend(&(rmi4_data->input_dev->dev)); return; } /** * synaptics_rmi4_late_resume() * * Called by the kernel during the late resume phase when the system * wakes up from suspend. * * This function goes through the sensor wake process if the system wakes * up from early suspend (without going into suspend). */ static void synaptics_rmi4_late_resume(struct early_suspend *h) { struct synaptics_rmi4_data *rmi4_data = container_of(h, struct synaptics_rmi4_data, early_suspend); if (rmi4_data->staying_awake) return; if (rmi4_data->full_pm_cycle) synaptics_rmi4_resume(&(rmi4_data->input_dev->dev)); if (rmi4_data->sensor_sleep == true) { synaptics_rmi4_sensor_wake(rmi4_data); rmi4_data->touch_stopped = false; synaptics_rmi4_irq_enable(rmi4_data, true); } return; } #endif static int synaptics_rmi4_regulator_lpm(struct synaptics_rmi4_data *rmi4_data, bool on) { int retval; int load_ua; if (on == false) goto regulator_hpm; if (rmi4_data->board->i2c_pull_up) { load_ua = rmi4_data->board->power_down_enable ? 0 : RMI4_I2C_LPM_LOAD_UA; retval = reg_set_optimum_mode_check(rmi4_data->vcc_i2c, load_ua); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vcc_i2c set_opt failed " \ "rc=%d\n", retval); goto fail_regulator_lpm; } if (rmi4_data->board->power_down_enable) { retval = regulator_disable(rmi4_data->vcc_i2c); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vcc_i2c disable failed " \ "rc=%d\n", retval); goto fail_regulator_lpm; } } } load_ua = rmi4_data->board->power_down_enable ? 0 : RMI4_LPM_LOAD_UA; retval = reg_set_optimum_mode_check(rmi4_data->vdd, load_ua); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vdd_ana set_opt failed rc=%d\n", retval); goto fail_regulator_lpm; } if (rmi4_data->board->power_down_enable) { retval = regulator_disable(rmi4_data->vdd); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vdd disable failed rc=%d\n", retval); goto fail_regulator_lpm; } } return 0; regulator_hpm: retval = reg_set_optimum_mode_check(rmi4_data->vdd, RMI4_ACTIVE_LOAD_UA); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vcc_ana set_opt failed rc=%d\n", retval); goto fail_regulator_hpm; } if (rmi4_data->board->power_down_enable) { retval = regulator_enable(rmi4_data->vdd); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vdd enable failed rc=%d\n", retval); goto fail_regulator_hpm; } } if (rmi4_data->board->i2c_pull_up) { retval = reg_set_optimum_mode_check(rmi4_data->vcc_i2c, RMI4_I2C_LOAD_UA); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vcc_i2c set_opt failed rc=%d\n", retval); goto fail_regulator_hpm; } if (rmi4_data->board->power_down_enable) { retval = regulator_enable(rmi4_data->vcc_i2c); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vcc_i2c enable failed " \ "rc=%d\n", retval); goto fail_regulator_hpm; } } } return 0; fail_regulator_lpm: reg_set_optimum_mode_check(rmi4_data->vdd, RMI4_ACTIVE_LOAD_UA); if (rmi4_data->board->i2c_pull_up) reg_set_optimum_mode_check(rmi4_data->vcc_i2c, RMI4_I2C_LOAD_UA); return retval; fail_regulator_hpm: load_ua = rmi4_data->board->power_down_enable ? 0 : RMI4_LPM_LOAD_UA; reg_set_optimum_mode_check(rmi4_data->vdd, load_ua); if (rmi4_data->board->i2c_pull_up) { load_ua = rmi4_data->board->power_down_enable ? 0 : RMI4_I2C_LPM_LOAD_UA; reg_set_optimum_mode_check(rmi4_data->vcc_i2c, load_ua); } return retval; } static int synaptics_rmi4_check_configuration(struct synaptics_rmi4_data *rmi4_data) { int retval; struct synaptics_rmi4_f01_device_control_0 device_control; struct synaptics_rmi4_f01_device_status device_status; retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_data_base_addr, device_status.data, sizeof(device_status.data)); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "Failed to read device status, rc=%d\n", retval); return retval; } if (device_status.unconfigured) { retval = synaptics_rmi4_query_device(rmi4_data); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "Failed to query device, rc=%d\n", retval); return retval; } retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_ctrl_base_addr, device_control.data, sizeof(device_control.data)); if (retval < 0) return retval; device_control.configured = DEVICE_CONFIGURED; retval = synaptics_rmi4_i2c_write(rmi4_data, rmi4_data->f01_ctrl_base_addr, device_control.data, sizeof(device_control.data)); if (retval < 0) return retval; } return 0; } /** * synaptics_rmi4_suspend() * * Called by the kernel during the suspend phase when the system * enters suspend. * * This function stops finger data acquisition and puts the sensor to * sleep (if not already done so during the early suspend phase), * disables the interrupt, and turns off the power to the sensor. */ static int synaptics_rmi4_suspend(struct device *dev) { struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); int retval; if (rmi4_data->stay_awake) { rmi4_data->staying_awake = true; return 0; } else rmi4_data->staying_awake = false; if (rmi4_data->suspended) { dev_info(dev, "Already in suspend state\n"); return 0; } if (!rmi4_data->fw_updating) { if (!rmi4_data->sensor_sleep) { rmi4_data->touch_stopped = true; wake_up(&rmi4_data->wait); synaptics_rmi4_irq_enable(rmi4_data, false); synaptics_rmi4_sensor_sleep(rmi4_data); } synaptics_rmi4_release_all(rmi4_data); retval = synaptics_rmi4_regulator_lpm(rmi4_data, true); if (retval < 0) { dev_err(dev, "failed to enter low power mode\n"); return retval; } } else { dev_err(dev, "Firmware updating, cannot go into suspend mode\n"); return 0; } if (rmi4_data->board->disable_gpios) { retval = synaptics_rmi4_gpio_configure(rmi4_data, false); if (retval < 0) { dev_err(dev, "failed to put gpios in suspend state\n"); return retval; } } rmi4_data->suspended = true; return 0; } /** * synaptics_rmi4_resume() * * Called by the kernel during the resume phase when the system * wakes up from suspend. * * This function turns on the power to the sensor, wakes the sensor * from sleep, enables the interrupt, and starts finger data * acquisition. */ static int synaptics_rmi4_resume(struct device *dev) { struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); int retval; if (rmi4_data->staying_awake) return 0; if (!rmi4_data->suspended) { dev_info(dev, "Already in awake state\n"); return 0; } retval = synaptics_rmi4_regulator_lpm(rmi4_data, false); if (retval < 0) { dev_err(dev, "Failed to enter active power mode\n"); return retval; } if (rmi4_data->board->disable_gpios) { retval = synaptics_rmi4_gpio_configure(rmi4_data, true); if (retval < 0) { dev_err(dev, "Failed to put gpios in active state\n"); return retval; } } synaptics_rmi4_sensor_wake(rmi4_data); rmi4_data->touch_stopped = false; synaptics_rmi4_irq_enable(rmi4_data, true); retval = synaptics_rmi4_check_configuration(rmi4_data); if (retval < 0) { dev_err(dev, "Failed to check configuration\n"); return retval; } rmi4_data->suspended = false; return 0; } #if (!defined(CONFIG_FB) && !defined(CONFIG_HAS_EARLYSUSPEND)) static const struct dev_pm_ops synaptics_rmi4_dev_pm_ops = { .suspend = synaptics_rmi4_suspend, .resume = synaptics_rmi4_resume, }; #else static const struct dev_pm_ops synaptics_rmi4_dev_pm_ops = { }; #endif #else static void synaptics_rmi4_sensor_wake(struct synaptics_rmi4_data *rmi4_data) { return; }; static void synaptics_rmi4_sensor_sleep(struct synaptics_rmi4_data *rmi4_data) { return; }; static int synaptics_rmi4_check_configuration(struct synaptics_rmi4_data *rmi4_data) { return 0; }; static int synaptics_rmi4_suspend(struct device *dev); { return 0; } static int synaptics_rmi4_resume(struct device *dev); { return 0; } #endif static const struct i2c_device_id synaptics_rmi4_id_table[] = { {DRIVER_NAME, 0}, {}, }; MODULE_DEVICE_TABLE(i2c, synaptics_rmi4_id_table); #ifdef CONFIG_OF static struct of_device_id rmi4_match_table[] = { { .compatible = "synaptics,rmi4",}, { }, }; #else #define rmi4_match_table NULL #endif static struct i2c_driver synaptics_rmi4_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = rmi4_match_table, #ifdef CONFIG_PM .pm = &synaptics_rmi4_dev_pm_ops, #endif }, .probe = synaptics_rmi4_probe, .remove = __devexit_p(synaptics_rmi4_remove), .id_table = synaptics_rmi4_id_table, }; /** * synaptics_rmi4_init() * * Called by the kernel during do_initcalls (if built-in) * or when the driver is loaded (if a module). * * This function registers the driver to the I2C subsystem. * */ static int __init synaptics_rmi4_init(void) { return i2c_add_driver(&synaptics_rmi4_driver); } /** * synaptics_rmi4_exit() * * Called by the kernel when the driver is unloaded. * * This funtion unregisters the driver from the I2C subsystem. * */ static void __exit synaptics_rmi4_exit(void) { i2c_del_driver(&synaptics_rmi4_driver); } module_init(synaptics_rmi4_init); module_exit(synaptics_rmi4_exit); MODULE_AUTHOR("Synaptics, Inc."); MODULE_DESCRIPTION("Synaptics RMI4 I2C Touch Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
mikeNG/android_kernel_oppo_msm8974
drivers/input/misc/stk3x1x.c
1597
72713
/* * stk3x1x.c - Linux kernel modules for sensortek stk301x, stk321x and stk331x * proximity/ambient light sensor * * Copyright (c) 2013, The Linux Foundation. All Rights Reserved. * Copyright (C) 2012 Lex Hsieh / sensortek <lex_hsieh@sitronix.com.tw> or * <lex_hsieh@sensortek.com.tw> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Linux Foundation chooses to take subject only to the GPLv2 license * terms, and distributes only under these terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/mutex.h> #include <linux/kdev_t.h> #include <linux/fs.h> #include <linux/input.h> #include <linux/sensors.h> #include <linux/workqueue.h> #include <linux/irq.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/kthread.h> #include <linux/errno.h> #include <linux/wakelock.h> #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/fs.h> #include <linux/uaccess.h> #include <linux/regulator/consumer.h> #ifdef CONFIG_OF #include <linux/of_gpio.h> #endif #ifdef CONFIG_HAS_EARLYSUSPEND #include <linux/earlysuspend.h> #endif #include "linux/stk3x1x.h" #define DRIVER_VERSION "3.4.4ts" /* Driver Settings */ #define CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD #ifdef CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD #define STK_ALS_CHANGE_THD 20 /* The threshold to trigger ALS interrupt, unit: lux */ #endif /* #ifdef CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD */ #define STK_INT_PS_MODE 1 /* 1, 2, or 3 */ #define STK_POLL_PS #define STK_POLL_ALS /* ALS interrupt is valid only when STK_PS_INT_MODE = 1 or 4*/ /* Define Register Map */ #define STK_STATE_REG 0x00 #define STK_PSCTRL_REG 0x01 #define STK_ALSCTRL_REG 0x02 #define STK_LEDCTRL_REG 0x03 #define STK_INT_REG 0x04 #define STK_WAIT_REG 0x05 #define STK_THDH1_PS_REG 0x06 #define STK_THDH2_PS_REG 0x07 #define STK_THDL1_PS_REG 0x08 #define STK_THDL2_PS_REG 0x09 #define STK_THDH1_ALS_REG 0x0A #define STK_THDH2_ALS_REG 0x0B #define STK_THDL1_ALS_REG 0x0C #define STK_THDL2_ALS_REG 0x0D #define STK_FLAG_REG 0x10 #define STK_DATA1_PS_REG 0x11 #define STK_DATA2_PS_REG 0x12 #define STK_DATA1_ALS_REG 0x13 #define STK_DATA2_ALS_REG 0x14 #define STK_DATA1_OFFSET_REG 0x15 #define STK_DATA2_OFFSET_REG 0x16 #define STK_DATA1_IR_REG 0x17 #define STK_DATA2_IR_REG 0x18 #define STK_PDT_ID_REG 0x3E #define STK_RSRVD_REG 0x3F #define STK_SW_RESET_REG 0x80 /* Define state reg */ #define STK_STATE_EN_IRS_SHIFT 7 #define STK_STATE_EN_AK_SHIFT 6 #define STK_STATE_EN_ASO_SHIFT 5 #define STK_STATE_EN_IRO_SHIFT 4 #define STK_STATE_EN_WAIT_SHIFT 2 #define STK_STATE_EN_ALS_SHIFT 1 #define STK_STATE_EN_PS_SHIFT 0 #define STK_STATE_EN_IRS_MASK 0x80 #define STK_STATE_EN_AK_MASK 0x40 #define STK_STATE_EN_ASO_MASK 0x20 #define STK_STATE_EN_IRO_MASK 0x10 #define STK_STATE_EN_WAIT_MASK 0x04 #define STK_STATE_EN_ALS_MASK 0x02 #define STK_STATE_EN_PS_MASK 0x01 /* Define PS ctrl reg */ #define STK_PS_PRS_SHIFT 6 #define STK_PS_GAIN_SHIFT 4 #define STK_PS_IT_SHIFT 0 #define STK_PS_PRS_MASK 0xC0 #define STK_PS_GAIN_MASK 0x30 #define STK_PS_IT_MASK 0x0F /* Define ALS ctrl reg */ #define STK_ALS_PRS_SHIFT 6 #define STK_ALS_GAIN_SHIFT 4 #define STK_ALS_IT_SHIFT 0 #define STK_ALS_PRS_MASK 0xC0 #define STK_ALS_GAIN_MASK 0x30 #define STK_ALS_IT_MASK 0x0F /* Define LED ctrl reg */ #define STK_LED_IRDR_SHIFT 6 #define STK_LED_DT_SHIFT 0 #define STK_LED_IRDR_MASK 0xC0 #define STK_LED_DT_MASK 0x3F /* Define interrupt reg */ #define STK_INT_CTRL_SHIFT 7 #define STK_INT_OUI_SHIFT 4 #define STK_INT_ALS_SHIFT 3 #define STK_INT_PS_SHIFT 0 #define STK_INT_CTRL_MASK 0x80 #define STK_INT_OUI_MASK 0x10 #define STK_INT_ALS_MASK 0x08 #define STK_INT_PS_MASK 0x07 #define STK_INT_ALS 0x08 /* Define flag reg */ #define STK_FLG_ALSDR_SHIFT 7 #define STK_FLG_PSDR_SHIFT 6 #define STK_FLG_ALSINT_SHIFT 5 #define STK_FLG_PSINT_SHIFT 4 #define STK_FLG_OUI_SHIFT 2 #define STK_FLG_IR_RDY_SHIFT 1 #define STK_FLG_NF_SHIFT 0 #define STK_FLG_ALSDR_MASK 0x80 #define STK_FLG_PSDR_MASK 0x40 #define STK_FLG_ALSINT_MASK 0x20 #define STK_FLG_PSINT_MASK 0x10 #define STK_FLG_OUI_MASK 0x04 #define STK_FLG_IR_RDY_MASK 0x02 #define STK_FLG_NF_MASK 0x01 /* misc define */ #define MIN_ALS_POLL_DELAY_NS 110000000 #define DEVICE_NAME "stk_ps" #define ALS_NAME "stk3x1x-ls" #define PS_NAME "proximity" /* POWER SUPPLY VOLTAGE RANGE */ #define STK3X1X_VDD_MIN_UV 2000000 #define STK3X1X_VDD_MAX_UV 3300000 #define STK3X1X_VIO_MIN_UV 1750000 #define STK3X1X_VIO_MAX_UV 1950000 #define STK_FIR_LEN 16 #define MAX_FIR_LEN 32 static struct sensors_classdev sensors_light_cdev = { .name = "stk3x1x-light", .vendor = "Sensortek", .version = 1, .handle = SENSORS_LIGHT_HANDLE, .type = SENSOR_TYPE_LIGHT, .max_range = "6500", .resolution = "0.0625", .sensor_power = "0.09", .min_delay = (MIN_ALS_POLL_DELAY_NS / 1000), /* us */ .fifo_reserved_event_count = 0, .fifo_max_event_count = 0, .enabled = 0, .delay_msec = 200, .sensors_enable = NULL, .sensors_poll_delay = NULL, }; static struct sensors_classdev sensors_proximity_cdev = { .name = "stk3x1x-proximity", .vendor = "Sensortek", .version = 1, .handle = SENSORS_PROXIMITY_HANDLE, .type = SENSOR_TYPE_PROXIMITY, .max_range = "5.0", .resolution = "5.0", .sensor_power = "0.1", .min_delay = 0, .fifo_reserved_event_count = 0, .fifo_max_event_count = 0, .enabled = 0, .delay_msec = 200, .sensors_enable = NULL, .sensors_poll_delay = NULL, }; struct data_filter { u16 raw[MAX_FIR_LEN]; int sum; int number; int idx; }; struct stk3x1x_data { struct i2c_client *client; struct stk3x1x_platform_data *pdata; struct sensors_classdev als_cdev; struct sensors_classdev ps_cdev; #if (!defined(STK_POLL_PS) || !defined(STK_POLL_ALS)) int32_t irq; struct work_struct stk_work; struct workqueue_struct *stk_wq; #endif int int_pin; uint8_t wait_reg; #ifdef CONFIG_HAS_EARLYSUSPEND struct early_suspend stk_early_suspend; #endif uint16_t ps_thd_h; uint16_t ps_thd_l; struct mutex io_lock; struct input_dev *ps_input_dev; int32_t ps_distance_last; bool ps_enabled; struct wake_lock ps_wakelock; struct work_struct stk_ps_work; struct workqueue_struct *stk_ps_wq; #ifdef STK_POLL_PS struct wake_lock ps_nosuspend_wl; #endif struct input_dev *als_input_dev; int32_t als_lux_last; uint32_t als_transmittance; bool als_enabled; struct hrtimer als_timer; struct hrtimer ps_timer; ktime_t als_poll_delay; ktime_t ps_poll_delay; #ifdef STK_POLL_ALS struct work_struct stk_als_work; struct workqueue_struct *stk_als_wq; #endif struct regulator *vdd; struct regulator *vio; bool power_enabled; bool use_fir; struct data_filter fir; atomic_t firlength; }; #if( !defined(CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD)) static uint32_t lux_threshold_table[] = { 3, 10, 40, 65, 145, 300, 550, 930, 1250, 1700, }; #define LUX_THD_TABLE_SIZE (sizeof(lux_threshold_table)/sizeof(uint32_t)+1) static uint16_t code_threshold_table[LUX_THD_TABLE_SIZE+1]; #endif static int32_t stk3x1x_enable_ps(struct stk3x1x_data *ps_data, uint8_t enable); static int32_t stk3x1x_enable_als(struct stk3x1x_data *ps_data, uint8_t enable); static int32_t stk3x1x_set_ps_thd_l(struct stk3x1x_data *ps_data, uint16_t thd_l); static int32_t stk3x1x_set_ps_thd_h(struct stk3x1x_data *ps_data, uint16_t thd_h); static int32_t stk3x1x_set_als_thd_l(struct stk3x1x_data *ps_data, uint16_t thd_l); static int32_t stk3x1x_set_als_thd_h(struct stk3x1x_data *ps_data, uint16_t thd_h); static int stk3x1x_device_ctl(struct stk3x1x_data *ps_data, bool enable); //static int32_t stk3x1x_set_ps_aoffset(struct stk3x1x_data *ps_data, uint16_t offset); inline uint32_t stk_alscode2lux(struct stk3x1x_data *ps_data, uint32_t alscode) { alscode += ((alscode<<7)+(alscode<<3)+(alscode>>1)); alscode<<=3; alscode/=ps_data->als_transmittance; return alscode; } inline uint32_t stk_lux2alscode(struct stk3x1x_data *ps_data, uint32_t lux) { lux*=ps_data->als_transmittance; lux/=1100; if (unlikely(lux>=(1<<16))) lux = (1<<16) -1; return lux; } #ifndef CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD static void stk_init_code_threshold_table(struct stk3x1x_data *ps_data) { uint32_t i,j; uint32_t alscode; code_threshold_table[0] = 0; #ifdef STK_DEBUG_PRINTF printk(KERN_INFO "alscode[0]=%d\n",0); #endif for (i=1,j=0;i<LUX_THD_TABLE_SIZE;i++,j++) { alscode = stk_lux2alscode(ps_data, lux_threshold_table[j]); dev_dbg(&ps_data->client->dev, "alscode[%d]=%d\n", i, alscode); code_threshold_table[i] = (uint16_t)(alscode); } code_threshold_table[i] = 0xffff; dev_dbg(&ps_data->client->dev, "alscode[%d]=%d\n", i, alscode); } static uint32_t stk_get_lux_interval_index(uint16_t alscode) { uint32_t i; for (i=1;i<=LUX_THD_TABLE_SIZE;i++) { if ((alscode>=code_threshold_table[i-1])&&(alscode<code_threshold_table[i])) { return i; } } return LUX_THD_TABLE_SIZE; } #else inline void stk_als_set_new_thd(struct stk3x1x_data *ps_data, uint16_t alscode) { int32_t high_thd,low_thd; high_thd = alscode + stk_lux2alscode(ps_data, STK_ALS_CHANGE_THD); low_thd = alscode - stk_lux2alscode(ps_data, STK_ALS_CHANGE_THD); if (high_thd >= (1<<16)) high_thd = (1<<16) -1; if (low_thd <0) low_thd = 0; stk3x1x_set_als_thd_h(ps_data, (uint16_t)high_thd); stk3x1x_set_als_thd_l(ps_data, (uint16_t)low_thd); } #endif // CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD static int32_t stk3x1x_init_all_reg(struct stk3x1x_data *ps_data, struct stk3x1x_platform_data *plat_data) { int32_t ret; uint8_t w_reg; w_reg = plat_data->state_reg; ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_reg); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } ps_data->ps_thd_h = plat_data->ps_thd_h; ps_data->ps_thd_l = plat_data->ps_thd_l; w_reg = plat_data->psctrl_reg; ret = i2c_smbus_write_byte_data(ps_data->client, STK_PSCTRL_REG, w_reg); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } w_reg = plat_data->alsctrl_reg; ret = i2c_smbus_write_byte_data(ps_data->client, STK_ALSCTRL_REG, w_reg); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } w_reg = plat_data->ledctrl_reg; ret = i2c_smbus_write_byte_data(ps_data->client, STK_LEDCTRL_REG, w_reg); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } ps_data->wait_reg = plat_data->wait_reg; if(ps_data->wait_reg < 2) { printk(KERN_WARNING "%s: wait_reg should be larger than 2, force to write 2\n", __func__); ps_data->wait_reg = 2; } else if (ps_data->wait_reg > 0xFF) { printk(KERN_WARNING "%s: wait_reg should be less than 0xFF, force to write 0xFF\n", __func__); ps_data->wait_reg = 0xFF; } w_reg = plat_data->wait_reg; ret = i2c_smbus_write_byte_data(ps_data->client, STK_WAIT_REG, w_reg); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } stk3x1x_set_ps_thd_h(ps_data, ps_data->ps_thd_h); stk3x1x_set_ps_thd_l(ps_data, ps_data->ps_thd_l); w_reg = 0; #ifndef STK_POLL_PS w_reg |= STK_INT_PS_MODE; #else w_reg |= 0x01; #endif #if (!defined(STK_POLL_ALS) && (STK_INT_PS_MODE != 0x02) && (STK_INT_PS_MODE != 0x03)) w_reg |= STK_INT_ALS; #endif ret = i2c_smbus_write_byte_data(ps_data->client, STK_INT_REG, w_reg); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } ret = i2c_smbus_write_byte_data(ps_data->client, 0x87, 0x60); if (ret < 0) { dev_err(&ps_data->client->dev, "%s: write i2c error\n", __func__); return ret; } return 0; } static int32_t stk3x1x_check_pid(struct stk3x1x_data *ps_data) { int32_t err1, err2; err1 = i2c_smbus_read_byte_data(ps_data->client,STK_PDT_ID_REG); if (err1 < 0) { printk(KERN_ERR "%s: read i2c error, err=%d\n", __func__, err1); return err1; } err2 = i2c_smbus_read_byte_data(ps_data->client,STK_RSRVD_REG); if (err2 < 0) { printk(KERN_ERR "%s: read i2c error, err=%d\n", __func__, err2); return -1; } if(err2 == 0xC0) printk(KERN_INFO "%s: RID=0xC0!!!!!!!!!!!!!\n", __func__); return 0; } static int32_t stk3x1x_software_reset(struct stk3x1x_data *ps_data) { int32_t r; uint8_t w_reg; w_reg = 0x7F; r = i2c_smbus_write_byte_data(ps_data->client,STK_WAIT_REG,w_reg); if (r<0) { printk(KERN_ERR "%s: software reset: write i2c error, ret=%d\n", __func__, r); return r; } r = i2c_smbus_read_byte_data(ps_data->client,STK_WAIT_REG); if (w_reg != r) { printk(KERN_ERR "%s: software reset: read-back value is not the same\n", __func__); return -1; } r = i2c_smbus_write_byte_data(ps_data->client,STK_SW_RESET_REG,0); if (r<0) { printk(KERN_ERR "%s: software reset: read error after reset\n", __func__); return r; } msleep(1); return 0; } static int32_t stk3x1x_set_als_thd_l(struct stk3x1x_data *ps_data, uint16_t thd_l) { uint8_t temp; uint8_t* pSrc = (uint8_t*)&thd_l; temp = *pSrc; *pSrc = *(pSrc+1); *(pSrc+1) = temp; return i2c_smbus_write_word_data(ps_data->client,STK_THDL1_ALS_REG,thd_l); } static int32_t stk3x1x_set_als_thd_h(struct stk3x1x_data *ps_data, uint16_t thd_h) { uint8_t temp; uint8_t* pSrc = (uint8_t*)&thd_h; temp = *pSrc; *pSrc = *(pSrc+1); *(pSrc+1) = temp; return i2c_smbus_write_word_data(ps_data->client,STK_THDH1_ALS_REG,thd_h); } static int32_t stk3x1x_set_ps_thd_l(struct stk3x1x_data *ps_data, uint16_t thd_l) { uint8_t temp; uint8_t* pSrc = (uint8_t*)&thd_l; temp = *pSrc; *pSrc = *(pSrc+1); *(pSrc+1) = temp; ps_data->ps_thd_l = thd_l; return i2c_smbus_write_word_data(ps_data->client,STK_THDL1_PS_REG,thd_l); } static int32_t stk3x1x_set_ps_thd_h(struct stk3x1x_data *ps_data, uint16_t thd_h) { uint8_t temp; uint8_t* pSrc = (uint8_t*)&thd_h; temp = *pSrc; *pSrc = *(pSrc+1); *(pSrc+1) = temp; ps_data->ps_thd_h = thd_h; return i2c_smbus_write_word_data(ps_data->client,STK_THDH1_PS_REG,thd_h); } /* static int32_t stk3x1x_set_ps_foffset(struct stk3x1x_data *ps_data, uint16_t offset) { uint8_t temp; uint8_t* pSrc = (uint8_t*)&offset; temp = *pSrc; *pSrc = *(pSrc+1); *(pSrc+1) = temp; return i2c_smbus_write_word_data(ps_data->client,STK_DATA1_OFFSET_REG,offset); } static int32_t stk3x1x_set_ps_aoffset(struct stk3x1x_data *ps_data, uint16_t offset) { uint8_t temp; uint8_t* pSrc = (uint8_t*)&offset; int ret; uint8_t w_state_reg; uint8_t re_en; ret = i2c_smbus_read_byte_data(ps_data->client, STK_STATE_REG); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } re_en = (ret & STK_STATE_EN_AK_MASK) ? 1: 0; if(re_en) { w_state_reg = (uint8_t)(ret & (~STK_STATE_EN_AK_MASK)); ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_state_reg); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } msleep(1); } temp = *pSrc; *pSrc = *(pSrc+1); *(pSrc+1) = temp; ret = i2c_smbus_write_word_data(ps_data->client,0x0E,offset); if(!re_en) return ret; w_state_reg |= STK_STATE_EN_AK_MASK; ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_state_reg); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } return 0; } */ static inline uint32_t stk3x1x_get_ps_reading(struct stk3x1x_data *ps_data) { int32_t word_data, tmp_word_data; tmp_word_data = i2c_smbus_read_word_data(ps_data->client,STK_DATA1_PS_REG); if(tmp_word_data < 0) { printk(KERN_ERR "%s fail, err=0x%x", __func__, tmp_word_data); return tmp_word_data; } word_data = ((tmp_word_data & 0xFF00) >> 8) | ((tmp_word_data & 0x00FF) << 8) ; return word_data; } static int32_t stk3x1x_set_flag(struct stk3x1x_data *ps_data, uint8_t org_flag_reg, uint8_t clr) { uint8_t w_flag; w_flag = org_flag_reg | (STK_FLG_ALSINT_MASK | STK_FLG_PSINT_MASK | STK_FLG_OUI_MASK | STK_FLG_IR_RDY_MASK); w_flag &= (~clr); //printk(KERN_INFO "%s: org_flag_reg=0x%x, w_flag = 0x%x\n", __func__, org_flag_reg, w_flag); return i2c_smbus_write_byte_data(ps_data->client,STK_FLAG_REG, w_flag); } static int32_t stk3x1x_get_flag(struct stk3x1x_data *ps_data) { return i2c_smbus_read_byte_data(ps_data->client,STK_FLAG_REG); } static int32_t stk3x1x_enable_ps(struct stk3x1x_data *ps_data, uint8_t enable) { int32_t ret; uint8_t w_state_reg; uint8_t curr_ps_enable; curr_ps_enable = ps_data->ps_enabled?1:0; if(curr_ps_enable == enable) return 0; if (enable) { ret = stk3x1x_device_ctl(ps_data, enable); if (ret) return ret; } ret = i2c_smbus_read_byte_data(ps_data->client, STK_STATE_REG); if (ret < 0) { printk(KERN_ERR "%s: write i2c error, ret=%d\n", __func__, ret); return ret; } w_state_reg = ret; w_state_reg &= ~(STK_STATE_EN_PS_MASK | STK_STATE_EN_WAIT_MASK | 0x60); if(enable) { w_state_reg |= STK_STATE_EN_PS_MASK; if(!(ps_data->als_enabled)) w_state_reg |= STK_STATE_EN_WAIT_MASK; } ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_state_reg); if (ret < 0) { printk(KERN_ERR "%s: write i2c error, ret=%d\n", __func__, ret); return ret; } if(enable) { #ifdef STK_POLL_PS hrtimer_start(&ps_data->ps_timer, ps_data->ps_poll_delay, HRTIMER_MODE_REL); ps_data->ps_distance_last = -1; #endif ps_data->ps_enabled = true; #ifndef STK_POLL_PS #ifndef STK_POLL_ALS if(!(ps_data->als_enabled)) #endif /* #ifndef STK_POLL_ALS */ enable_irq(ps_data->irq); msleep(1); ret = stk3x1x_get_flag(ps_data); if (ret < 0) { printk(KERN_ERR "%s: read i2c error, ret=%d\n", __func__, ret); return ret; } near_far_state = ret & STK_FLG_NF_MASK; ps_data->ps_distance_last = near_far_state; input_report_abs(ps_data->ps_input_dev, ABS_DISTANCE, near_far_state); input_sync(ps_data->ps_input_dev); wake_lock_timeout(&ps_data->ps_wakelock, 3*HZ); reading = stk3x1x_get_ps_reading(ps_data); dev_dbg(&ps_data->client->dev, "%s: ps input event=%d, ps code = %d\n", __func__, near_far_state, reading); #endif /* #ifndef STK_POLL_PS */ } else { #ifdef STK_POLL_PS hrtimer_cancel(&ps_data->ps_timer); #else #ifndef STK_POLL_ALS if(!(ps_data->als_enabled)) #endif disable_irq(ps_data->irq); #endif ps_data->ps_enabled = false; } if (!enable) { ret = stk3x1x_device_ctl(ps_data, enable); if (ret) return ret; } return ret; } static int32_t stk3x1x_enable_als(struct stk3x1x_data *ps_data, uint8_t enable) { int32_t ret; uint8_t w_state_reg; uint8_t curr_als_enable = (ps_data->als_enabled)?1:0; if(curr_als_enable == enable) return 0; if (enable) { ret = stk3x1x_device_ctl(ps_data, enable); if (ret) return ret; } #ifndef STK_POLL_ALS if (enable) { stk3x1x_set_als_thd_h(ps_data, 0x0000); stk3x1x_set_als_thd_l(ps_data, 0xFFFF); } #endif ret = i2c_smbus_read_byte_data(ps_data->client, STK_STATE_REG); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } w_state_reg = (uint8_t)(ret & (~(STK_STATE_EN_ALS_MASK | STK_STATE_EN_WAIT_MASK))); if(enable) w_state_reg |= STK_STATE_EN_ALS_MASK; else if (ps_data->ps_enabled) w_state_reg |= STK_STATE_EN_WAIT_MASK; ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_state_reg); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } if (enable) { ps_data->als_enabled = true; #ifdef STK_POLL_ALS hrtimer_start(&ps_data->als_timer, ps_data->als_poll_delay, HRTIMER_MODE_REL); #else #ifndef STK_POLL_PS if(!(ps_data->ps_enabled)) #endif enable_irq(ps_data->irq); #endif } else { ps_data->als_enabled = false; #ifdef STK_POLL_ALS hrtimer_cancel(&ps_data->als_timer); #else #ifndef STK_POLL_PS if(!(ps_data->ps_enabled)) #endif disable_irq(ps_data->irq); #endif } if (!enable) { ret = stk3x1x_device_ctl(ps_data, enable); if (ret) return ret; } return ret; } static inline int32_t stk3x1x_filter_reading(struct stk3x1x_data *ps_data, int32_t word_data) { int index; int firlen = atomic_read(&ps_data->firlength); if (ps_data->fir.number < firlen) { ps_data->fir.raw[ps_data->fir.number] = word_data; ps_data->fir.sum += word_data; ps_data->fir.number++; ps_data->fir.idx++; } else { index = ps_data->fir.idx % firlen; ps_data->fir.sum -= ps_data->fir.raw[index]; ps_data->fir.raw[index] = word_data; ps_data->fir.sum += word_data; ps_data->fir.idx++; word_data = ps_data->fir.sum/firlen; } return word_data; } static inline int32_t stk3x1x_get_als_reading(struct stk3x1x_data *ps_data) { int32_t word_data, tmp_word_data; tmp_word_data = i2c_smbus_read_word_data(ps_data->client, STK_DATA1_ALS_REG); if(tmp_word_data < 0) { printk(KERN_ERR "%s fail, err=0x%x", __func__, tmp_word_data); return tmp_word_data; } word_data = ((tmp_word_data & 0xFF00) >> 8) | ((tmp_word_data & 0x00FF) << 8) ; if (ps_data->use_fir) word_data = stk3x1x_filter_reading(ps_data, word_data); return word_data; } static int32_t stk3x1x_get_ir_reading(struct stk3x1x_data *ps_data) { int32_t word_data, tmp_word_data; int32_t ret; uint8_t w_reg, retry = 0; if(ps_data->ps_enabled) { stk3x1x_enable_ps(ps_data, 0); ps_data->ps_enabled = true; } ret = i2c_smbus_read_byte_data(ps_data->client, STK_STATE_REG); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } w_reg = (uint8_t)(ret & (~STK_STATE_EN_IRS_MASK)); w_reg |= STK_STATE_EN_IRS_MASK; ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_reg); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } msleep(100); do { msleep(50); ret = stk3x1x_get_flag(ps_data); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } retry++; }while(retry < 5 && ((ret&STK_FLG_IR_RDY_MASK) == 0)); if(retry == 5) { printk(KERN_ERR "%s: ir data is not ready for 300ms\n", __func__); return -EINVAL; } ret = stk3x1x_get_flag(ps_data); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } ret = stk3x1x_set_flag(ps_data, ret, STK_FLG_IR_RDY_MASK); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } tmp_word_data = i2c_smbus_read_word_data(ps_data->client, STK_DATA1_IR_REG); if(tmp_word_data < 0) { printk(KERN_ERR "%s fail, err=0x%x", __func__, tmp_word_data); return tmp_word_data; } word_data = ((tmp_word_data & 0xFF00) >> 8) | ((tmp_word_data & 0x00FF) << 8) ; if(ps_data->ps_enabled) stk3x1x_enable_ps(ps_data, 1); return word_data; } static ssize_t stk_als_code_show(struct device *dev, struct device_attribute *attr, char *buf) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); int32_t reading; reading = stk3x1x_get_als_reading(ps_data); return scnprintf(buf, PAGE_SIZE, "%d\n", reading); } static ssize_t stk_als_enable_set(struct sensors_classdev *sensors_cdev, unsigned int enabled) { struct stk3x1x_data *als_data = container_of(sensors_cdev, struct stk3x1x_data, als_cdev); int err; mutex_lock(&als_data->io_lock); err = stk3x1x_enable_als(als_data, enabled); mutex_unlock(&als_data->io_lock); if (err < 0) return err; return 0; } static ssize_t stk_als_enable_show(struct device *dev, struct device_attribute *attr, char *buf) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); int32_t enable, ret; mutex_lock(&ps_data->io_lock); enable = (ps_data->als_enabled)?1:0; mutex_unlock(&ps_data->io_lock); ret = i2c_smbus_read_byte_data(ps_data->client,STK_STATE_REG); ret = (ret & STK_STATE_EN_ALS_MASK)?1:0; if(enable != ret) printk(KERN_ERR "%s: driver and sensor mismatch! driver_enable=0x%x, sensor_enable=%x\n", __func__, enable, ret); return scnprintf(buf, PAGE_SIZE, "%d\n", ret); } static ssize_t stk_als_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); uint8_t en; if (sysfs_streq(buf, "1")) en = 1; else if (sysfs_streq(buf, "0")) en = 0; else { printk(KERN_ERR "%s, invalid value %d\n", __func__, *buf); return -EINVAL; } dev_dbg(dev, "%s: Enable ALS : %d\n", __func__, en); mutex_lock(&ps_data->io_lock); stk3x1x_enable_als(ps_data, en); mutex_unlock(&ps_data->io_lock); return size; } static ssize_t stk_als_lux_show(struct device *dev, struct device_attribute *attr, char *buf) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); int32_t als_reading; uint32_t als_lux; als_reading = stk3x1x_get_als_reading(ps_data); mutex_lock(&ps_data->io_lock); als_lux = stk_alscode2lux(ps_data, als_reading); mutex_unlock(&ps_data->io_lock); return scnprintf(buf, PAGE_SIZE, "%d lux\n", als_lux); } static ssize_t stk_als_lux_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); unsigned long value = 0; int ret; ret = kstrtoul(buf, 16, &value); if(ret < 0) { printk(KERN_ERR "%s:kstrtoul failed, ret=0x%x\n", __func__, ret); return ret; } mutex_lock(&ps_data->io_lock); ps_data->als_lux_last = value; input_report_abs(ps_data->als_input_dev, ABS_MISC, value); input_sync(ps_data->als_input_dev); mutex_unlock(&ps_data->io_lock); dev_dbg(dev, "%s: als input event %ld lux\n", __func__, value); return size; } static ssize_t stk_als_transmittance_show(struct device *dev, struct device_attribute *attr, char *buf) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); int32_t transmittance; mutex_lock(&ps_data->io_lock); transmittance = ps_data->als_transmittance; mutex_unlock(&ps_data->io_lock); return scnprintf(buf, PAGE_SIZE, "%d\n", transmittance); } static ssize_t stk_als_transmittance_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); unsigned long value = 0; int ret; ret = kstrtoul(buf, 10, &value); if(ret < 0) { printk(KERN_ERR "%s:kstrtoul failed, ret=0x%x\n", __func__, ret); return ret; } mutex_lock(&ps_data->io_lock); ps_data->als_transmittance = value; mutex_unlock(&ps_data->io_lock); return size; } static ssize_t stk_als_delay_show(struct device *dev, struct device_attribute *attr, char *buf) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%u\n", (u32)ktime_to_ms(ps_data->als_poll_delay)); } static inline void stk_als_delay_store_fir(struct stk3x1x_data *ps_data) { ps_data->fir.number = 0; ps_data->fir.idx = 0; ps_data->fir.sum = 0; } static ssize_t stk_als_poll_delay_set(struct sensors_classdev *sensors_cdev, unsigned int delay_msec) { struct stk3x1x_data *als_data = container_of(sensors_cdev, struct stk3x1x_data, als_cdev); uint64_t value = 0; value = delay_msec * 1000000; if (value < MIN_ALS_POLL_DELAY_NS) value = MIN_ALS_POLL_DELAY_NS; mutex_lock(&als_data->io_lock); if (value != ktime_to_ns(als_data->als_poll_delay)) als_data->als_poll_delay = ns_to_ktime(value); if (als_data->use_fir) stk_als_delay_store_fir(als_data); mutex_unlock(&als_data->io_lock); return 0; } static ssize_t stk_als_delay_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { uint64_t value = 0; int ret; struct stk3x1x_data *als_data = dev_get_drvdata(dev); ret = kstrtoull(buf, 10, &value); if(ret < 0) { dev_err(dev, "%s:kstrtoull failed, ret=0x%x\n", __func__, ret); return ret; } #ifdef STK_DEBUG_PRINTF dev_dbg(dev, "%s: set als poll delay=%lld\n", __func__, value); #endif ret = stk_als_poll_delay_set(&als_data->als_cdev, value); if (ret < 0) return ret; return size; } static ssize_t stk_als_ir_code_show(struct device *dev, struct device_attribute *attr, char *buf) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); int32_t reading; reading = stk3x1x_get_ir_reading(ps_data); return scnprintf(buf, PAGE_SIZE, "%d\n", reading); } static ssize_t stk_als_firlen_show(struct device *dev, struct device_attribute *attr, char *buf) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); int len = atomic_read(&ps_data->firlength); dev_dbg(dev, "%s: len = %2d, idx = %2d\n", __func__, len, ps_data->fir.idx); dev_dbg(dev, "%s: sum = %5d, ave = %5d\n", __func__, ps_data->fir.sum, ps_data->fir.sum/len); return scnprintf(buf, PAGE_SIZE, "%d\n", len); } static ssize_t stk_als_firlen_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { uint64_t value = 0; int ret; struct stk3x1x_data *ps_data = dev_get_drvdata(dev); ret = kstrtoull(buf, 10, &value); if (ret < 0) { dev_err(dev, "%s:strict_strtoull failed, ret=0x%x\n", __func__, ret); return ret; } if (value > MAX_FIR_LEN) { dev_err(dev, "%s: firlen exceed maximum filter length\n", __func__); } else if (value < 1) { atomic_set(&ps_data->firlength, 1); memset(&ps_data->fir, 0x00, sizeof(ps_data->fir)); } else { atomic_set(&ps_data->firlength, value); memset(&ps_data->fir, 0x00, sizeof(ps_data->fir)); } return size; } static ssize_t stk_als_fir_enable_show(struct device *dev, struct device_attribute *attr, char *buf) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%d\n", ps_data->use_fir); } static ssize_t stk_als_fir_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { uint64_t value = 0; int ret; struct stk3x1x_data *ps_data = dev_get_drvdata(dev); ret = kstrtoull(buf, 10, &value); if (ret < 0) { dev_err(dev, "%s:strict_strtoull failed, ret=0x%x\n", __func__, ret); return ret; } if (value) { ps_data->use_fir = true; memset(&ps_data->fir, 0x00, sizeof(ps_data->fir)); } else { ps_data->use_fir = false; } return size; } static ssize_t stk_ps_code_show(struct device *dev, struct device_attribute *attr, char *buf) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); uint32_t reading; reading = stk3x1x_get_ps_reading(ps_data); return scnprintf(buf, PAGE_SIZE, "%d\n", reading); } static ssize_t stk_ps_enable_set(struct sensors_classdev *sensors_cdev, unsigned int enabled) { struct stk3x1x_data *ps_data = container_of(sensors_cdev, struct stk3x1x_data, ps_cdev); int err; mutex_lock(&ps_data->io_lock); err = stk3x1x_enable_ps(ps_data, enabled); mutex_unlock(&ps_data->io_lock); if (err < 0) return err; return 0; } static ssize_t stk_ps_enable_show(struct device *dev, struct device_attribute *attr, char *buf) { int32_t enable, ret; struct stk3x1x_data *ps_data = dev_get_drvdata(dev); mutex_lock(&ps_data->io_lock); enable = (ps_data->ps_enabled)?1:0; mutex_unlock(&ps_data->io_lock); ret = i2c_smbus_read_byte_data(ps_data->client,STK_STATE_REG); ret = (ret & STK_STATE_EN_PS_MASK)?1:0; if(enable != ret) printk(KERN_ERR "%s: driver and sensor mismatch! driver_enable=0x%x, sensor_enable=%x\n", __func__, enable, ret); return scnprintf(buf, PAGE_SIZE, "%d\n", ret); } static ssize_t stk_ps_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); uint8_t en; if (sysfs_streq(buf, "1")) en = 1; else if (sysfs_streq(buf, "0")) en = 0; else { printk(KERN_ERR "%s, invalid value %d\n", __func__, *buf); return -EINVAL; } dev_dbg(dev, "%s: Enable PS : %d\n", __func__, en); mutex_lock(&ps_data->io_lock); stk3x1x_enable_ps(ps_data, en); mutex_unlock(&ps_data->io_lock); return size; } static ssize_t stk_ps_enable_aso_show(struct device *dev, struct device_attribute *attr, char *buf) { int32_t ret; struct stk3x1x_data *ps_data = dev_get_drvdata(dev); ret = i2c_smbus_read_byte_data(ps_data->client,STK_STATE_REG); ret = (ret & STK_STATE_EN_ASO_MASK)?1:0; return scnprintf(buf, PAGE_SIZE, "%d\n", ret); } static ssize_t stk_ps_enable_aso_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); uint8_t en; int32_t ret; uint8_t w_state_reg; if (sysfs_streq(buf, "1")) en = 1; else if (sysfs_streq(buf, "0")) en = 0; else { printk(KERN_ERR "%s, invalid value %d\n", __func__, *buf); return -EINVAL; } dev_dbg(dev, "%s: Enable PS ASO : %d\n", __func__, en); ret = i2c_smbus_read_byte_data(ps_data->client, STK_STATE_REG); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } w_state_reg = (uint8_t)(ret & (~STK_STATE_EN_ASO_MASK)); if(en) w_state_reg |= STK_STATE_EN_ASO_MASK; ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_state_reg); if (ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } return size; } static ssize_t stk_ps_offset_show(struct device *dev, struct device_attribute *attr, char *buf) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); int32_t word_data, tmp_word_data; tmp_word_data = i2c_smbus_read_word_data(ps_data->client, STK_DATA1_OFFSET_REG); if(tmp_word_data < 0) { printk(KERN_ERR "%s fail, err=0x%x", __func__, tmp_word_data); return tmp_word_data; } word_data = ((tmp_word_data & 0xFF00) >> 8) | ((tmp_word_data & 0x00FF) << 8) ; return scnprintf(buf, PAGE_SIZE, "%d\n", word_data); } static ssize_t stk_ps_offset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); unsigned long value = 0; int ret; uint16_t offset; ret = kstrtoul(buf, 10, &value); if(ret < 0) { printk(KERN_ERR "%s:kstrtoul failed, ret=0x%x\n", __func__, ret); return ret; } if(value > 65535) { printk(KERN_ERR "%s: invalid value, offset=%ld\n", __func__, value); return -EINVAL; } offset = (uint16_t) ((value&0x00FF) << 8) | ((value&0xFF00) >>8); ret = i2c_smbus_write_word_data(ps_data->client,STK_DATA1_OFFSET_REG,offset); if(ret < 0) { printk(KERN_ERR "%s: write i2c error\n", __func__); return ret; } return size; } static ssize_t stk_ps_distance_show(struct device *dev, struct device_attribute *attr, char *buf) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); int32_t dist=1, ret; mutex_lock(&ps_data->io_lock); ret = stk3x1x_get_flag(ps_data); if(ret < 0) { printk(KERN_ERR "%s: stk3x1x_get_flag failed, ret=0x%x\n", __func__, ret); return ret; } dist = (ret & STK_FLG_NF_MASK)?1:0; ps_data->ps_distance_last = dist; input_report_abs(ps_data->ps_input_dev, ABS_DISTANCE, dist); input_sync(ps_data->ps_input_dev); mutex_unlock(&ps_data->io_lock); wake_lock_timeout(&ps_data->ps_wakelock, 3*HZ); dev_dbg(dev, "%s: ps input event %d cm\n", __func__, dist); return scnprintf(buf, PAGE_SIZE, "%d\n", dist); } static ssize_t stk_ps_distance_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); unsigned long value = 0; int ret; ret = kstrtoul(buf, 10, &value); if(ret < 0) { printk(KERN_ERR "%s:kstrtoul failed, ret=0x%x\n", __func__, ret); return ret; } mutex_lock(&ps_data->io_lock); ps_data->ps_distance_last = value; input_report_abs(ps_data->ps_input_dev, ABS_DISTANCE, value); input_sync(ps_data->ps_input_dev); mutex_unlock(&ps_data->io_lock); wake_lock_timeout(&ps_data->ps_wakelock, 3*HZ); dev_dbg(dev, "%s: ps input event %ld cm\n", __func__, value); return size; } static ssize_t stk_ps_code_thd_l_show(struct device *dev, struct device_attribute *attr, char *buf) { int32_t ps_thd_l1_reg, ps_thd_l2_reg; struct stk3x1x_data *ps_data = dev_get_drvdata(dev); mutex_lock(&ps_data->io_lock); ps_thd_l1_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDL1_PS_REG); if(ps_thd_l1_reg < 0) { printk(KERN_ERR "%s fail, err=0x%x", __func__, ps_thd_l1_reg); return -EINVAL; } ps_thd_l2_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDL2_PS_REG); if(ps_thd_l2_reg < 0) { printk(KERN_ERR "%s fail, err=0x%x", __func__, ps_thd_l2_reg); return -EINVAL; } mutex_unlock(&ps_data->io_lock); ps_thd_l1_reg = ps_thd_l1_reg<<8 | ps_thd_l2_reg; return scnprintf(buf, PAGE_SIZE, "%d\n", ps_thd_l1_reg); } static ssize_t stk_ps_code_thd_l_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); unsigned long value = 0; int ret; ret = kstrtoul(buf, 10, &value); if(ret < 0) { printk(KERN_ERR "%s:kstrtoul failed, ret=0x%x\n", __func__, ret); return ret; } mutex_lock(&ps_data->io_lock); stk3x1x_set_ps_thd_l(ps_data, value); mutex_unlock(&ps_data->io_lock); return size; } static ssize_t stk_ps_code_thd_h_show(struct device *dev, struct device_attribute *attr, char *buf) { int32_t ps_thd_h1_reg, ps_thd_h2_reg; struct stk3x1x_data *ps_data = dev_get_drvdata(dev); mutex_lock(&ps_data->io_lock); ps_thd_h1_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDH1_PS_REG); if(ps_thd_h1_reg < 0) { printk(KERN_ERR "%s fail, err=0x%x", __func__, ps_thd_h1_reg); return -EINVAL; } ps_thd_h2_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDH2_PS_REG); if(ps_thd_h2_reg < 0) { printk(KERN_ERR "%s fail, err=0x%x", __func__, ps_thd_h2_reg); return -EINVAL; } mutex_unlock(&ps_data->io_lock); ps_thd_h1_reg = ps_thd_h1_reg<<8 | ps_thd_h2_reg; return scnprintf(buf, PAGE_SIZE, "%d\n", ps_thd_h1_reg); } static ssize_t stk_ps_code_thd_h_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); unsigned long value = 0; int ret; ret = kstrtoul(buf, 10, &value); if(ret < 0) { printk(KERN_ERR "%s:kstrtoul failed, ret=0x%x\n", __func__, ret); return ret; } mutex_lock(&ps_data->io_lock); stk3x1x_set_ps_thd_h(ps_data, value); mutex_unlock(&ps_data->io_lock); return size; } #if 0 static ssize_t stk_als_lux_thd_l_show(struct device *dev, struct device_attribute *attr, char *buf) { int32_t als_thd_l0_reg,als_thd_l1_reg; struct stk3x1x_data *ps_data = dev_get_drvdata(dev); uint32_t als_lux; mutex_lock(&ps_data->io_lock); als_thd_l0_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDL1_ALS_REG); als_thd_l1_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDL2_ALS_REG); if(als_thd_l0_reg < 0) { printk(KERN_ERR "%s fail, err=0x%x", __func__, als_thd_l0_reg); return -EINVAL; } if(als_thd_l1_reg < 0) { printk(KERN_ERR "%s fail, err=0x%x", __func__, als_thd_l1_reg); return -EINVAL; } als_thd_l0_reg|=(als_thd_l1_reg<<8); als_lux = stk_alscode2lux(ps_data, als_thd_l0_reg); mutex_unlock(&ps_data->io_lock); return scnprintf(buf, PAGE_SIZE, "%d\n", als_lux); } static ssize_t stk_als_lux_thd_l_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); unsigned long value = 0; int ret; ret = kstrtoul(buf, 10, &value); if(ret < 0) { printk(KERN_ERR "%s:kstrtoul failed, ret=0x%x\n", __func__, ret); return ret; } mutex_lock(&ps_data->io_lock); value = stk_lux2alscode(ps_data, value); stk3x1x_set_als_thd_l(ps_data, value); mutex_unlock(&ps_data->io_lock); return size; } static ssize_t stk_als_lux_thd_h_show(struct device *dev, struct device_attribute *attr, char *buf) { int32_t als_thd_h0_reg,als_thd_h1_reg; struct stk3x1x_data *ps_data = dev_get_drvdata(dev); uint32_t als_lux; mutex_lock(&ps_data->io_lock); als_thd_h0_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDH1_ALS_REG); als_thd_h1_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDH2_ALS_REG); if(als_thd_h0_reg < 0) { printk(KERN_ERR "%s fail, err=0x%x", __func__, als_thd_h0_reg); return -EINVAL; } if(als_thd_h1_reg < 0) { printk(KERN_ERR "%s fail, err=0x%x", __func__, als_thd_h1_reg); return -EINVAL; } als_thd_h0_reg|=(als_thd_h1_reg<<8); als_lux = stk_alscode2lux(ps_data, als_thd_h0_reg); mutex_unlock(&ps_data->io_lock); return scnprintf(buf, PAGE_SIZE, "%d\n", als_lux); } static ssize_t stk_als_lux_thd_h_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct stk3x1x_data *ps_data = dev_get_drvdata(dev); unsigned long value = 0; int ret; ret = strict_strtoul(buf, 10, &value); if(ret < 0) { printk(KERN_ERR "%s:strict_strtoul failed, ret=0x%x\n", __func__, ret); return ret; } mutex_lock(&ps_data->io_lock); value = stk_lux2alscode(ps_data, value); stk3x1x_set_als_thd_h(ps_data, value); mutex_unlock(&ps_data->io_lock); return size; } #endif static ssize_t stk_all_reg_show(struct device *dev, struct device_attribute *attr, char *buf) { int32_t ps_reg[27]; uint8_t cnt; struct stk3x1x_data *ps_data = dev_get_drvdata(dev); mutex_lock(&ps_data->io_lock); for(cnt=0;cnt<25;cnt++) { ps_reg[cnt] = i2c_smbus_read_byte_data(ps_data->client, (cnt)); if(ps_reg[cnt] < 0) { mutex_unlock(&ps_data->io_lock); printk(KERN_ERR "stk_all_reg_show:i2c_smbus_read_byte_data fail, ret=%d", ps_reg[cnt]); return -EINVAL; } else { dev_dbg(dev, "reg[0x%2X]=0x%2X\n", cnt, ps_reg[cnt]); } } ps_reg[cnt] = i2c_smbus_read_byte_data(ps_data->client, STK_PDT_ID_REG); if(ps_reg[cnt] < 0) { mutex_unlock(&ps_data->io_lock); printk( KERN_ERR "all_reg_show:i2c_smbus_read_byte_data fail, ret=%d", ps_reg[cnt]); return -EINVAL; } dev_dbg(dev, "reg[0x%x]=0x%2X\n", STK_PDT_ID_REG, ps_reg[cnt]); cnt++; ps_reg[cnt] = i2c_smbus_read_byte_data(ps_data->client, STK_RSRVD_REG); if(ps_reg[cnt] < 0) { mutex_unlock(&ps_data->io_lock); printk( KERN_ERR "all_reg_show:i2c_smbus_read_byte_data fail, ret=%d", ps_reg[cnt]); return -EINVAL; } dev_dbg(dev, "reg[0x%x]=0x%2X\n", STK_RSRVD_REG, ps_reg[cnt]); mutex_unlock(&ps_data->io_lock); return scnprintf(buf, PAGE_SIZE, "%2X %2X %2X %2X %2X,%2X %2X %2X %2X %2X,%2X %2X %2X %2X %2X,%2X %2X %2X %2X %2X,%2X %2X %2X %2X %2X,%2X %2X\n", ps_reg[0], ps_reg[1], ps_reg[2], ps_reg[3], ps_reg[4], ps_reg[5], ps_reg[6], ps_reg[7], ps_reg[8], ps_reg[9], ps_reg[10], ps_reg[11], ps_reg[12], ps_reg[13], ps_reg[14], ps_reg[15], ps_reg[16], ps_reg[17], ps_reg[18], ps_reg[19], ps_reg[20], ps_reg[21], ps_reg[22], ps_reg[23], ps_reg[24], ps_reg[25], ps_reg[26]); } static ssize_t stk_recv_show(struct device *dev, struct device_attribute *attr, char *buf) { return 0; } static ssize_t stk_recv_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { unsigned long value = 0; int ret; int32_t recv_data; struct stk3x1x_data *ps_data = dev_get_drvdata(dev); ret = kstrtoul(buf, 16, &value); if (ret < 0) { printk(KERN_ERR "%s:kstrtoul failed, ret=0x%x\n", __func__, ret); return ret; } recv_data = i2c_smbus_read_byte_data(ps_data->client,value); printk("%s: reg 0x%x=0x%x\n", __func__, (int)value, recv_data); return size; } static ssize_t stk_send_show(struct device *dev, struct device_attribute *attr, char *buf) { return 0; } static ssize_t stk_send_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int addr, cmd; u8 addr_u8, cmd_u8; int32_t ret, i; char *token[10]; struct stk3x1x_data *ps_data = dev_get_drvdata(dev); for (i = 0; i < 2; i++) token[i] = strsep((char **)&buf, " "); ret = kstrtoul(token[0], 16, (unsigned long *)&(addr)); if (ret < 0) { printk(KERN_ERR "%s:kstrtoul failed, ret=0x%x\n", __func__, ret); return ret; } ret = kstrtoul(token[1], 16, (unsigned long *)&(cmd)); if (ret < 0) { printk(KERN_ERR "%s:kstrtoul failed, ret=0x%x\n", __func__, ret); return ret; } dev_dbg(dev, "%s: write reg 0x%x=0x%x\n", __func__, addr, cmd); addr_u8 = (u8) addr; cmd_u8 = (u8) cmd; //mutex_lock(&ps_data->io_lock); ret = i2c_smbus_write_byte_data(ps_data->client,addr_u8,cmd_u8); //mutex_unlock(&ps_data->io_lock); if (0 != ret) { printk(KERN_ERR "%s: i2c_smbus_write_byte_data fail\n", __func__); return ret; } return size; } static struct device_attribute als_enable_attribute = __ATTR(enable,0664,stk_als_enable_show,stk_als_enable_store); static struct device_attribute als_lux_attribute = __ATTR(lux,0664,stk_als_lux_show,stk_als_lux_store); static struct device_attribute als_code_attribute = __ATTR(code, 0444, stk_als_code_show, NULL); static struct device_attribute als_transmittance_attribute = __ATTR(transmittance,0664,stk_als_transmittance_show,stk_als_transmittance_store); static struct device_attribute als_poll_delay_attribute = __ATTR(poll_delay, 0664, stk_als_delay_show, stk_als_delay_store); static struct device_attribute als_ir_code_attribute = __ATTR(ircode,0444,stk_als_ir_code_show,NULL); static struct device_attribute als_firlen_attribute = __ATTR(firlen, 0664, stk_als_firlen_show, stk_als_firlen_store); static struct device_attribute als_fir_enable_attribute = __ATTR(fir_enable, 0664, stk_als_fir_enable_show, stk_als_fir_enable_store); static struct attribute *stk_als_attrs [] = { &als_enable_attribute.attr, &als_lux_attribute.attr, &als_code_attribute.attr, &als_transmittance_attribute.attr, &als_poll_delay_attribute.attr, &als_ir_code_attribute.attr, &als_firlen_attribute.attr, &als_fir_enable_attribute.attr, NULL }; static struct attribute_group stk_als_attribute_group = { .attrs = stk_als_attrs, }; static struct device_attribute ps_enable_attribute = __ATTR(enable,0664,stk_ps_enable_show,stk_ps_enable_store); static struct device_attribute ps_enable_aso_attribute = __ATTR(enableaso,0664,stk_ps_enable_aso_show,stk_ps_enable_aso_store); static struct device_attribute ps_distance_attribute = __ATTR(distance,0664,stk_ps_distance_show, stk_ps_distance_store); static struct device_attribute ps_offset_attribute = __ATTR(offset,0664,stk_ps_offset_show, stk_ps_offset_store); static struct device_attribute ps_code_attribute = __ATTR(code, 0444, stk_ps_code_show, NULL); static struct device_attribute ps_code_thd_l_attribute = __ATTR(codethdl,0664,stk_ps_code_thd_l_show,stk_ps_code_thd_l_store); static struct device_attribute ps_code_thd_h_attribute = __ATTR(codethdh,0664,stk_ps_code_thd_h_show,stk_ps_code_thd_h_store); static struct device_attribute recv_attribute = __ATTR(recv,0664,stk_recv_show,stk_recv_store); static struct device_attribute send_attribute = __ATTR(send,0664,stk_send_show, stk_send_store); static struct device_attribute all_reg_attribute = __ATTR(allreg, 0444, stk_all_reg_show, NULL); static struct attribute *stk_ps_attrs [] = { &ps_enable_attribute.attr, &ps_enable_aso_attribute.attr, &ps_distance_attribute.attr, &ps_offset_attribute.attr, &ps_code_attribute.attr, &ps_code_thd_l_attribute.attr, &ps_code_thd_h_attribute.attr, &recv_attribute.attr, &send_attribute.attr, &all_reg_attribute.attr, NULL }; static struct attribute_group stk_ps_attribute_group = { .attrs = stk_ps_attrs, }; #ifdef STK_POLL_ALS static enum hrtimer_restart stk_als_timer_func(struct hrtimer *timer) { struct stk3x1x_data *ps_data = container_of(timer, struct stk3x1x_data, als_timer); queue_work(ps_data->stk_als_wq, &ps_data->stk_als_work); hrtimer_forward_now(&ps_data->als_timer, ps_data->als_poll_delay); return HRTIMER_RESTART; } static void stk_als_work_func(struct work_struct *work) { struct stk3x1x_data *ps_data = container_of(work, struct stk3x1x_data, stk_als_work); int32_t reading; mutex_lock(&ps_data->io_lock); reading = stk3x1x_get_als_reading(ps_data); if(reading < 0) return; ps_data->als_lux_last = stk_alscode2lux(ps_data, reading); input_report_abs(ps_data->als_input_dev, ABS_MISC, ps_data->als_lux_last); input_sync(ps_data->als_input_dev); mutex_unlock(&ps_data->io_lock); } #endif static enum hrtimer_restart stk_ps_timer_func(struct hrtimer *timer) { struct stk3x1x_data *ps_data = container_of(timer, struct stk3x1x_data, ps_timer); queue_work(ps_data->stk_ps_wq, &ps_data->stk_ps_work); #ifdef STK_POLL_PS hrtimer_forward_now(&ps_data->ps_timer, ps_data->ps_poll_delay); return HRTIMER_RESTART; #else hrtimer_cancel(&ps_data->ps_timer); return HRTIMER_NORESTART; #endif } static void stk_ps_work_func(struct work_struct *work) { struct stk3x1x_data *ps_data = container_of(work, struct stk3x1x_data, stk_ps_work); uint32_t reading; int32_t near_far_state; uint8_t org_flag_reg; int32_t ret; uint8_t disable_flag = 0; mutex_lock(&ps_data->io_lock); org_flag_reg = stk3x1x_get_flag(ps_data); if(org_flag_reg < 0) { printk(KERN_ERR "%s: get_status_reg fail, ret=%d", __func__, org_flag_reg); goto err_i2c_rw; } near_far_state = (org_flag_reg & STK_FLG_NF_MASK)?1:0; reading = stk3x1x_get_ps_reading(ps_data); if(ps_data->ps_distance_last != near_far_state) { ps_data->ps_distance_last = near_far_state; input_report_abs(ps_data->ps_input_dev, ABS_DISTANCE, near_far_state); input_sync(ps_data->ps_input_dev); wake_lock_timeout(&ps_data->ps_wakelock, 3*HZ); #ifdef STK_DEBUG_PRINTF printk(KERN_INFO "%s: ps input event %d cm, ps code = %d\n",__func__, near_far_state, reading); #endif } ret = stk3x1x_set_flag(ps_data, org_flag_reg, disable_flag); if(ret < 0) { printk(KERN_ERR "%s:stk3x1x_set_flag fail, ret=%d\n", __func__, ret); goto err_i2c_rw; } mutex_unlock(&ps_data->io_lock); return; err_i2c_rw: mutex_unlock(&ps_data->io_lock); msleep(30); return; } #if (!defined(STK_POLL_PS) || !defined(STK_POLL_ALS)) static void stk_work_func(struct work_struct *work) { uint32_t reading; #if ((STK_INT_PS_MODE != 0x03) && (STK_INT_PS_MODE != 0x02)) int32_t ret; uint8_t disable_flag = 0; uint8_t org_flag_reg; #endif /* #if ((STK_INT_PS_MODE != 0x03) && (STK_INT_PS_MODE != 0x02)) */ #ifndef CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD uint32_t nLuxIndex; #endif struct stk3x1x_data *ps_data = container_of(work, struct stk3x1x_data, stk_work); int32_t near_far_state; mutex_lock(&ps_data->io_lock); #if (STK_INT_PS_MODE == 0x03) near_far_state = gpio_get_value(ps_data->int_pin); #elif (STK_INT_PS_MODE == 0x02) near_far_state = !(gpio_get_value(ps_data->int_pin)); #endif #if ((STK_INT_PS_MODE == 0x03) || (STK_INT_PS_MODE == 0x02)) ps_data->ps_distance_last = near_far_state; input_report_abs(ps_data->ps_input_dev, ABS_DISTANCE, near_far_state); input_sync(ps_data->ps_input_dev); wake_lock_timeout(&ps_data->ps_wakelock, 3*HZ); reading = stk3x1x_get_ps_reading(ps_data); #ifdef STK_DEBUG_PRINTF printk(KERN_INFO "%s: ps input event %d cm, ps code = %d\n",__func__, near_far_state, reading); #endif #else /* mode 0x01 or 0x04 */ org_flag_reg = stk3x1x_get_flag(ps_data); if(org_flag_reg < 0) { printk(KERN_ERR "%s: get_status_reg fail, org_flag_reg=%d", __func__, org_flag_reg); goto err_i2c_rw; } if (org_flag_reg & STK_FLG_ALSINT_MASK) { disable_flag |= STK_FLG_ALSINT_MASK; reading = stk3x1x_get_als_reading(ps_data); if(reading < 0) { printk(KERN_ERR "%s: stk3x1x_get_als_reading fail, ret=%d", __func__, reading); goto err_i2c_rw; } #ifndef CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD nLuxIndex = stk_get_lux_interval_index(reading); stk3x1x_set_als_thd_h(ps_data, code_threshold_table[nLuxIndex]); stk3x1x_set_als_thd_l(ps_data, code_threshold_table[nLuxIndex-1]); #else stk_als_set_new_thd(ps_data, reading); #endif //CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD ps_data->als_lux_last = stk_alscode2lux(ps_data, reading); input_report_abs(ps_data->als_input_dev, ABS_MISC, ps_data->als_lux_last); input_sync(ps_data->als_input_dev); #ifdef STK_DEBUG_PRINTF printk(KERN_INFO "%s: als input event %d lux\n",__func__, ps_data->als_lux_last); #endif } if (org_flag_reg & STK_FLG_PSINT_MASK) { disable_flag |= STK_FLG_PSINT_MASK; near_far_state = (org_flag_reg & STK_FLG_NF_MASK)?1:0; ps_data->ps_distance_last = near_far_state; input_report_abs(ps_data->ps_input_dev, ABS_DISTANCE, near_far_state); input_sync(ps_data->ps_input_dev); wake_lock_timeout(&ps_data->ps_wakelock, 3*HZ); reading = stk3x1x_get_ps_reading(ps_data); #ifdef STK_DEBUG_PRINTF printk(KERN_INFO "%s: ps input event=%d, ps code = %d\n",__func__, near_far_state, reading); #endif } ret = stk3x1x_set_flag(ps_data, org_flag_reg, disable_flag); if(ret < 0) { printk(KERN_ERR "%s:reset_int_flag fail, ret=%d\n", __func__, ret); goto err_i2c_rw; } #endif msleep(1); enable_irq(ps_data->irq); mutex_unlock(&ps_data->io_lock); return; err_i2c_rw: mutex_unlock(&ps_data->io_lock); msleep(30); enable_irq(ps_data->irq); return; } #endif #if (!defined(STK_POLL_PS) || !defined(STK_POLL_ALS)) static irqreturn_t stk_oss_irq_handler(int irq, void *data) { struct stk3x1x_data *pData = data; disable_irq_nosync(irq); queue_work(pData->stk_wq,&pData->stk_work); return IRQ_HANDLED; } #endif /* #if (!defined(STK_POLL_PS) || !defined(STK_POLL_ALS)) */ static inline void stk3x1x_init_fir(struct stk3x1x_data *ps_data) { memset(&ps_data->fir, 0x00, sizeof(ps_data->fir)); atomic_set(&ps_data->firlength, STK_FIR_LEN); } static int32_t stk3x1x_init_all_setting(struct i2c_client *client, struct stk3x1x_platform_data *plat_data) { int32_t ret; struct stk3x1x_data *ps_data = i2c_get_clientdata(client); ret = stk3x1x_software_reset(ps_data); if(ret < 0) return ret; stk3x1x_check_pid(ps_data); if(ret < 0) return ret; ret = stk3x1x_init_all_reg(ps_data, plat_data); if(ret < 0) return ret; #ifndef CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD stk_init_code_threshold_table(ps_data); #endif if (plat_data->use_fir) stk3x1x_init_fir(ps_data); return 0; } #if (!defined(STK_POLL_PS) || !defined(STK_POLL_ALS)) static int stk3x1x_setup_irq(struct i2c_client *client) { int irq, err = -EIO; struct stk3x1x_data *ps_data = i2c_get_clientdata(client); irq = gpio_to_irq(ps_data->int_pin); #ifdef STK_DEBUG_PRINTF printk(KERN_INFO "%s: int pin #=%d, irq=%d\n",__func__, ps_data->int_pin, irq); #endif if (irq <= 0) { printk(KERN_ERR "irq number is not specified, irq # = %d, int pin=%d\n",irq, ps_data->int_pin); return irq; } ps_data->irq = irq; err = gpio_request(ps_data->int_pin,"stk-int"); if(err < 0) { printk(KERN_ERR "%s: gpio_request, err=%d", __func__, err); return err; } err = gpio_direction_input(ps_data->int_pin); if(err < 0) { printk(KERN_ERR "%s: gpio_direction_input, err=%d", __func__, err); return err; } #if ((STK_INT_PS_MODE == 0x03) || (STK_INT_PS_MODE == 0x02)) err = request_any_context_irq(irq, stk_oss_irq_handler, IRQF_TRIGGER_FALLING|IRQF_TRIGGER_RISING, DEVICE_NAME, ps_data); #else err = request_any_context_irq(irq, stk_oss_irq_handler, IRQF_TRIGGER_LOW, DEVICE_NAME, ps_data); #endif if (err < 0) { printk(KERN_WARNING "%s: request_any_context_irq(%d) failed for (%d)\n", __func__, irq, err); goto err_request_any_context_irq; } disable_irq(irq); return 0; err_request_any_context_irq: gpio_free(ps_data->int_pin); return err; } #endif #ifdef CONFIG_HAS_EARLYSUSPEND static void stk3x1x_early_suspend(struct early_suspend *h) { struct stk3x1x_data *ps_data = container_of(h, struct stk3x1x_data, stk_early_suspend); #ifndef STK_POLL_PS int err; #endif mutex_lock(&ps_data->io_lock); if(ps_data->als_enabled) { stk3x1x_enable_als(ps_data, 0); ps_data->als_enabled = true; } if(ps_data->ps_enabled) { #ifdef STK_POLL_PS wake_lock(&ps_data->ps_nosuspend_wl); #else err = enable_irq_wake(ps_data->irq); if (err) printk(KERN_WARNING "%s: set_irq_wake(%d) failed, err=(%d)\n", __func__, ps_data->irq, err); #endif } mutex_unlock(&ps_data->io_lock); return; } static void stk3x1x_late_resume(struct early_suspend *h) { struct stk3x1x_data *ps_data = container_of(h, struct stk3x1x_data, stk_early_suspend); #ifndef STK_POLL_PS int err; #endif mutex_lock(&ps_data->io_lock); if(ps_data->als_enabled) stk3x1x_enable_als(ps_data, 1); if(ps_data->ps_enabled) { #ifdef STK_POLL_PS wake_lock(&ps_data->ps_nosuspend_wl); #else err = disable_irq_wake(ps_data->irq); if (err) printk(KERN_WARNING "%s: disable_irq_wake(%d) failed, err=(%d)\n", __func__, ps_data->irq, err); #endif } mutex_unlock(&ps_data->io_lock); return; } #endif //#ifdef CONFIG_HAS_EARLYSUSPEND static int stk3x1x_power_ctl(struct stk3x1x_data *data, bool on) { int ret = 0; if (!on && data->power_enabled) { ret = regulator_disable(data->vdd); if (ret) { dev_err(&data->client->dev, "Regulator vdd disable failed ret=%d\n", ret); return ret; } ret = regulator_disable(data->vio); if (ret) { dev_err(&data->client->dev, "Regulator vio disable failed ret=%d\n", ret); regulator_enable(data->vdd); return ret; } data->power_enabled = on; dev_dbg(&data->client->dev, "stk3x1x_power_ctl on=%d\n", on); } else if (on && !data->power_enabled) { ret = regulator_enable(data->vdd); if (ret) { dev_err(&data->client->dev, "Regulator vdd enable failed ret=%d\n", ret); return ret; } ret = regulator_enable(data->vio); if (ret) { dev_err(&data->client->dev, "Regulator vio enable failed ret=%d\n", ret); regulator_disable(data->vdd); return ret; } data->power_enabled = on; dev_dbg(&data->client->dev, "stk3x1x_power_ctl on=%d\n", on); } else { dev_warn(&data->client->dev, "Power on=%d. enabled=%d\n", on, data->power_enabled); } return ret; } static int stk3x1x_power_init(struct stk3x1x_data *data, bool on) { int ret; if (!on) { if (regulator_count_voltages(data->vdd) > 0) regulator_set_voltage(data->vdd, 0, STK3X1X_VDD_MAX_UV); regulator_put(data->vdd); if (regulator_count_voltages(data->vio) > 0) regulator_set_voltage(data->vio, 0, STK3X1X_VIO_MAX_UV); regulator_put(data->vio); } else { data->vdd = regulator_get(&data->client->dev, "vdd"); if (IS_ERR(data->vdd)) { ret = PTR_ERR(data->vdd); dev_err(&data->client->dev, "Regulator get failed vdd ret=%d\n", ret); return ret; } if (regulator_count_voltages(data->vdd) > 0) { ret = regulator_set_voltage(data->vdd, STK3X1X_VDD_MIN_UV, STK3X1X_VDD_MAX_UV); if (ret) { dev_err(&data->client->dev, "Regulator set failed vdd ret=%d\n", ret); goto reg_vdd_put; } } data->vio = regulator_get(&data->client->dev, "vio"); if (IS_ERR(data->vio)) { ret = PTR_ERR(data->vio); dev_err(&data->client->dev, "Regulator get failed vio ret=%d\n", ret); goto reg_vdd_set; } if (regulator_count_voltages(data->vio) > 0) { ret = regulator_set_voltage(data->vio, STK3X1X_VIO_MIN_UV, STK3X1X_VIO_MAX_UV); if (ret) { dev_err(&data->client->dev, "Regulator set failed vio ret=%d\n", ret); goto reg_vio_put; } } } return 0; reg_vio_put: regulator_put(data->vio); reg_vdd_set: if (regulator_count_voltages(data->vdd) > 0) regulator_set_voltage(data->vdd, 0, STK3X1X_VDD_MAX_UV); reg_vdd_put: regulator_put(data->vdd); return ret; } static int stk3x1x_device_ctl(struct stk3x1x_data *ps_data, bool enable) { int ret; struct device *dev = &ps_data->client->dev; if (enable && !ps_data->power_enabled) { ret = stk3x1x_power_ctl(ps_data, true); if (ret) { dev_err(dev, "Failed to enable device power\n"); goto err_exit; } ret = stk3x1x_init_all_setting(ps_data->client, ps_data->pdata); if (ret < 0) { stk3x1x_power_ctl(ps_data, false); dev_err(dev, "Failed to re-init device setting\n"); goto err_exit; } } else if (!enable && ps_data->power_enabled) { if (!ps_data->als_enabled && !ps_data->ps_enabled) { ret = stk3x1x_power_ctl(ps_data, false); if (ret) { dev_err(dev, "Failed to disable device power\n"); goto err_exit; } } else { dev_dbg(dev, "device control: als_enabled=%d, ps_enabled=%d\n", ps_data->als_enabled, ps_data->ps_enabled); } } else { dev_dbg(dev, "device control: enable=%d, power_enabled=%d\n", enable, ps_data->power_enabled); } return 0; err_exit: return ret; } #ifdef CONFIG_OF static int stk3x1x_parse_dt(struct device *dev, struct stk3x1x_platform_data *pdata) { int rc; struct device_node *np = dev->of_node; u32 temp_val; pdata->int_pin = of_get_named_gpio_flags(np, "stk,irq-gpio", 0, &pdata->int_flags); if (pdata->int_pin < 0) { dev_err(dev, "Unable to read irq-gpio\n"); return pdata->int_pin; } rc = of_property_read_u32(np, "stk,transmittance", &temp_val); if (!rc) pdata->transmittance = temp_val; else { dev_err(dev, "Unable to read transmittance\n"); return rc; } rc = of_property_read_u32(np, "stk,state-reg", &temp_val); if (!rc) pdata->state_reg = temp_val; else { dev_err(dev, "Unable to read state-reg\n"); return rc; } rc = of_property_read_u32(np, "stk,psctrl-reg", &temp_val); if (!rc) pdata->psctrl_reg = (u8)temp_val; else { dev_err(dev, "Unable to read psctrl-reg\n"); return rc; } rc = of_property_read_u32(np, "stk,alsctrl-reg", &temp_val); if (!rc) pdata->alsctrl_reg = (u8)temp_val; else { dev_err(dev, "Unable to read alsctrl-reg\n"); return rc; } rc = of_property_read_u32(np, "stk,ledctrl-reg", &temp_val); if (!rc) pdata->ledctrl_reg = (u8)temp_val; else { dev_err(dev, "Unable to read ledctrl-reg\n"); return rc; } rc = of_property_read_u32(np, "stk,wait-reg", &temp_val); if (!rc) pdata->wait_reg = (u8)temp_val; else { dev_err(dev, "Unable to read wait-reg\n"); return rc; } rc = of_property_read_u32(np, "stk,ps-thdh", &temp_val); if (!rc) pdata->ps_thd_h = (u16)temp_val; else { dev_err(dev, "Unable to read ps-thdh\n"); return rc; } rc = of_property_read_u32(np, "stk,ps-thdl", &temp_val); if (!rc) pdata->ps_thd_l = (u16)temp_val; else { dev_err(dev, "Unable to read ps-thdl\n"); return rc; } pdata->use_fir = of_property_read_bool(np, "stk,use-fir"); return 0; } #else static int stk3x1x_parse_dt(struct device *dev, struct stk3x1x_platform_data *pdata) { return -ENODEV; } #endif /* !CONFIG_OF */ static int stk3x1x_probe(struct i2c_client *client, const struct i2c_device_id *id) { int err = -ENODEV; struct stk3x1x_data *ps_data; struct stk3x1x_platform_data *plat_data; printk(KERN_INFO "%s: driver version = %s\n", __func__, DRIVER_VERSION); if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { printk(KERN_ERR "%s: No Support for I2C_FUNC_SMBUS_BYTE_DATA\n", __func__); return -ENODEV; } if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) { printk(KERN_ERR "%s: No Support for I2C_FUNC_SMBUS_WORD_DATA\n", __func__); return -ENODEV; } ps_data = kzalloc(sizeof(struct stk3x1x_data),GFP_KERNEL); if(!ps_data) { printk(KERN_ERR "%s: failed to allocate stk3x1x_data\n", __func__); return -ENOMEM; } ps_data->client = client; i2c_set_clientdata(client,ps_data); mutex_init(&ps_data->io_lock); wake_lock_init(&ps_data->ps_wakelock,WAKE_LOCK_SUSPEND, "stk_input_wakelock"); #ifdef STK_POLL_PS wake_lock_init(&ps_data->ps_nosuspend_wl,WAKE_LOCK_SUSPEND, "stk_nosuspend_wakelock"); #endif if (client->dev.of_node) { plat_data = devm_kzalloc(&client->dev, sizeof(struct stk3x1x_platform_data), GFP_KERNEL); if (!plat_data) { dev_err(&client->dev, "Failed to allocate memory\n"); return -ENOMEM; } err = stk3x1x_parse_dt(&client->dev, plat_data); dev_err(&client->dev, "%s: stk3x1x_parse_dt ret=%d\n", __func__, err); if (err) return err; } else plat_data = client->dev.platform_data; if (!plat_data) { dev_err(&client->dev, "%s: no stk3x1x platform data!\n", __func__); goto err_als_input_allocate; } ps_data->als_transmittance = plat_data->transmittance; ps_data->int_pin = plat_data->int_pin; ps_data->use_fir = plat_data->use_fir; ps_data->pdata = plat_data; if (ps_data->als_transmittance == 0) { dev_err(&client->dev, "%s: Please set als_transmittance\n", __func__); goto err_als_input_allocate; } ps_data->als_input_dev = input_allocate_device(); if (ps_data->als_input_dev==NULL) { printk(KERN_ERR "%s: could not allocate als device\n", __func__); err = -ENOMEM; goto err_als_input_allocate; } ps_data->ps_input_dev = input_allocate_device(); if (ps_data->ps_input_dev==NULL) { printk(KERN_ERR "%s: could not allocate ps device\n", __func__); err = -ENOMEM; goto err_ps_input_allocate; } ps_data->als_input_dev->name = ALS_NAME; ps_data->ps_input_dev->name = PS_NAME; set_bit(EV_ABS, ps_data->als_input_dev->evbit); set_bit(EV_ABS, ps_data->ps_input_dev->evbit); input_set_abs_params(ps_data->als_input_dev, ABS_MISC, 0, stk_alscode2lux(ps_data, (1<<16)-1), 0, 0); input_set_abs_params(ps_data->ps_input_dev, ABS_DISTANCE, 0,1, 0, 0); err = input_register_device(ps_data->als_input_dev); if (err<0) { printk(KERN_ERR "%s: can not register als input device\n", __func__); goto err_als_input_register; } err = input_register_device(ps_data->ps_input_dev); if (err<0) { printk(KERN_ERR "%s: can not register ps input device\n", __func__); goto err_ps_input_register; } err = sysfs_create_group(&ps_data->als_input_dev->dev.kobj, &stk_als_attribute_group); if (err < 0) { printk(KERN_ERR "%s:could not create sysfs group for als\n", __func__); goto err_als_sysfs_create_group; } err = sysfs_create_group(&ps_data->ps_input_dev->dev.kobj, &stk_ps_attribute_group); if (err < 0) { printk(KERN_ERR "%s:could not create sysfs group for ps\n", __func__); goto err_ps_sysfs_create_group; } input_set_drvdata(ps_data->als_input_dev, ps_data); input_set_drvdata(ps_data->ps_input_dev, ps_data); #ifdef STK_POLL_ALS ps_data->stk_als_wq = create_singlethread_workqueue("stk_als_wq"); INIT_WORK(&ps_data->stk_als_work, stk_als_work_func); hrtimer_init(&ps_data->als_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ps_data->als_poll_delay = ns_to_ktime(110 * NSEC_PER_MSEC); ps_data->als_timer.function = stk_als_timer_func; #endif ps_data->stk_ps_wq = create_singlethread_workqueue("stk_ps_wq"); INIT_WORK(&ps_data->stk_ps_work, stk_ps_work_func); hrtimer_init(&ps_data->ps_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ps_data->ps_poll_delay = ns_to_ktime(110 * NSEC_PER_MSEC); ps_data->ps_timer.function = stk_ps_timer_func; #if (!defined(STK_POLL_ALS) || !defined(STK_POLL_PS)) ps_data->stk_wq = create_singlethread_workqueue("stk_wq"); INIT_WORK(&ps_data->stk_work, stk_work_func); err = stk3x1x_setup_irq(client); if(err < 0) goto err_stk3x1x_setup_irq; #endif err = stk3x1x_power_init(ps_data, true); if (err) goto err_power_init; err = stk3x1x_power_ctl(ps_data, true); if (err) goto err_power_on; ps_data->als_enabled = false; ps_data->ps_enabled = false; #ifdef CONFIG_HAS_EARLYSUSPEND ps_data->stk_early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1; ps_data->stk_early_suspend.suspend = stk3x1x_early_suspend; ps_data->stk_early_suspend.resume = stk3x1x_late_resume; register_early_suspend(&ps_data->stk_early_suspend); #endif /* make sure everything is ok before registering the class device */ ps_data->als_cdev = sensors_light_cdev; ps_data->als_cdev.sensors_enable = stk_als_enable_set; ps_data->als_cdev.sensors_poll_delay = stk_als_poll_delay_set; err = sensors_classdev_register(&client->dev, &ps_data->als_cdev); if (err) goto err_power_on; ps_data->ps_cdev = sensors_proximity_cdev; ps_data->ps_cdev.sensors_enable = stk_ps_enable_set; err = sensors_classdev_register(&client->dev, &ps_data->ps_cdev); if (err) goto err_class_sysfs; /* enable device power only when it is enabled */ err = stk3x1x_power_ctl(ps_data, false); if (err) goto err_init_all_setting; dev_dbg(&client->dev, "%s: probe successfully", __func__); return 0; err_init_all_setting: stk3x1x_power_ctl(ps_data, false); sensors_classdev_unregister(&ps_data->ps_cdev); err_class_sysfs: sensors_classdev_unregister(&ps_data->als_cdev); err_power_on: stk3x1x_power_init(ps_data, false); err_power_init: #ifndef STK_POLL_PS free_irq(ps_data->irq, ps_data); gpio_free(plat_data->int_pin); #endif #if (!defined(STK_POLL_ALS) || !defined(STK_POLL_PS)) err_stk3x1x_setup_irq: #endif #ifdef STK_POLL_ALS hrtimer_try_to_cancel(&ps_data->als_timer); destroy_workqueue(ps_data->stk_als_wq); #endif destroy_workqueue(ps_data->stk_ps_wq); #if (!defined(STK_POLL_ALS) || !defined(STK_POLL_PS)) destroy_workqueue(ps_data->stk_wq); #endif sysfs_remove_group(&ps_data->ps_input_dev->dev.kobj, &stk_ps_attribute_group); err_ps_sysfs_create_group: sysfs_remove_group(&ps_data->als_input_dev->dev.kobj, &stk_als_attribute_group); err_als_sysfs_create_group: input_unregister_device(ps_data->ps_input_dev); err_ps_input_register: input_unregister_device(ps_data->als_input_dev); err_als_input_register: input_free_device(ps_data->ps_input_dev); err_ps_input_allocate: input_free_device(ps_data->als_input_dev); err_als_input_allocate: #ifdef STK_POLL_PS wake_lock_destroy(&ps_data->ps_nosuspend_wl); #endif wake_lock_destroy(&ps_data->ps_wakelock); mutex_destroy(&ps_data->io_lock); kfree(ps_data); return err; } static int stk3x1x_remove(struct i2c_client *client) { struct stk3x1x_data *ps_data = i2c_get_clientdata(client); #ifndef STK_POLL_PS free_irq(ps_data->irq, ps_data); gpio_free(ps_data->int_pin); #endif #ifdef STK_POLL_ALS hrtimer_try_to_cancel(&ps_data->als_timer); destroy_workqueue(ps_data->stk_als_wq); #endif destroy_workqueue(ps_data->stk_ps_wq); #if (!defined(STK_POLL_ALS) || !defined(STK_POLL_PS)) destroy_workqueue(ps_data->stk_wq); #endif sysfs_remove_group(&ps_data->ps_input_dev->dev.kobj, &stk_ps_attribute_group); sysfs_remove_group(&ps_data->als_input_dev->dev.kobj, &stk_als_attribute_group); input_unregister_device(ps_data->ps_input_dev); input_unregister_device(ps_data->als_input_dev); input_free_device(ps_data->ps_input_dev); input_free_device(ps_data->als_input_dev); #ifdef STK_POLL_PS wake_lock_destroy(&ps_data->ps_nosuspend_wl); #endif wake_lock_destroy(&ps_data->ps_wakelock); mutex_destroy(&ps_data->io_lock); kfree(ps_data); return 0; } static const struct i2c_device_id stk_ps_id[] = { { "stk_ps", 0}, {} }; MODULE_DEVICE_TABLE(i2c, stk_ps_id); static struct of_device_id stk_match_table[] = { { .compatible = "stk,stk3x1x", }, { }, }; static struct i2c_driver stk_ps_driver = { .driver = { .name = DEVICE_NAME, .owner = THIS_MODULE, .of_match_table = stk_match_table, }, .probe = stk3x1x_probe, .remove = stk3x1x_remove, .id_table = stk_ps_id, }; static int __init stk3x1x_init(void) { int ret; ret = i2c_add_driver(&stk_ps_driver); if (ret) return ret; return 0; } static void __exit stk3x1x_exit(void) { i2c_del_driver(&stk_ps_driver); } module_init(stk3x1x_init); module_exit(stk3x1x_exit); MODULE_AUTHOR("Lex Hsieh <lex_hsieh@sitronix.com.tw>"); MODULE_DESCRIPTION("Sensortek stk3x1x Proximity Sensor driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRIVER_VERSION);
gpl-2.0
rdeva31/kernel-msm-3.10
drivers/spi/spi-au1550.c
2109
26666
/* * au1550 psc spi controller driver * may work also with au1200, au1210, au1250 * will not work on au1000, au1100 and au1500 (no full spi controller there) * * Copyright (c) 2006 ATRON electronic GmbH * Author: Jan Nikitenko <jan.nikitenko@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/resource.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> #include <linux/dma-mapping.h> #include <linux/completion.h> #include <asm/mach-au1x00/au1000.h> #include <asm/mach-au1x00/au1xxx_psc.h> #include <asm/mach-au1x00/au1xxx_dbdma.h> #include <asm/mach-au1x00/au1550_spi.h> static unsigned usedma = 1; module_param(usedma, uint, 0644); /* #define AU1550_SPI_DEBUG_LOOPBACK */ #define AU1550_SPI_DBDMA_DESCRIPTORS 1 #define AU1550_SPI_DMA_RXTMP_MINSIZE 2048U struct au1550_spi { struct spi_bitbang bitbang; volatile psc_spi_t __iomem *regs; int irq; unsigned freq_max; unsigned freq_min; unsigned len; unsigned tx_count; unsigned rx_count; const u8 *tx; u8 *rx; void (*rx_word)(struct au1550_spi *hw); void (*tx_word)(struct au1550_spi *hw); int (*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t); irqreturn_t (*irq_callback)(struct au1550_spi *hw); struct completion master_done; unsigned usedma; u32 dma_tx_id; u32 dma_rx_id; u32 dma_tx_ch; u32 dma_rx_ch; u8 *dma_rx_tmpbuf; unsigned dma_rx_tmpbuf_size; u32 dma_rx_tmpbuf_addr; struct spi_master *master; struct device *dev; struct au1550_spi_info *pdata; struct resource *ioarea; }; /* we use an 8-bit memory device for dma transfers to/from spi fifo */ static dbdev_tab_t au1550_spi_mem_dbdev = { .dev_id = DBDMA_MEM_CHAN, .dev_flags = DEV_FLAGS_ANYUSE|DEV_FLAGS_SYNC, .dev_tsize = 0, .dev_devwidth = 8, .dev_physaddr = 0x00000000, .dev_intlevel = 0, .dev_intpolarity = 0 }; static int ddma_memid; /* id to above mem dma device */ static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw); /* * compute BRG and DIV bits to setup spi clock based on main input clock rate * that was specified in platform data structure * according to au1550 datasheet: * psc_tempclk = psc_mainclk / (2 << DIV) * spiclk = psc_tempclk / (2 * (BRG + 1)) * BRG valid range is 4..63 * DIV valid range is 0..3 */ static u32 au1550_spi_baudcfg(struct au1550_spi *hw, unsigned speed_hz) { u32 mainclk_hz = hw->pdata->mainclk_hz; u32 div, brg; for (div = 0; div < 4; div++) { brg = mainclk_hz / speed_hz / (4 << div); /* now we have BRG+1 in brg, so count with that */ if (brg < (4 + 1)) { brg = (4 + 1); /* speed_hz too big */ break; /* set lowest brg (div is == 0) */ } if (brg <= (63 + 1)) break; /* we have valid brg and div */ } if (div == 4) { div = 3; /* speed_hz too small */ brg = (63 + 1); /* set highest brg and div */ } brg--; return PSC_SPICFG_SET_BAUD(brg) | PSC_SPICFG_SET_DIV(div); } static inline void au1550_spi_mask_ack_all(struct au1550_spi *hw) { hw->regs->psc_spimsk = PSC_SPIMSK_MM | PSC_SPIMSK_RR | PSC_SPIMSK_RO | PSC_SPIMSK_RU | PSC_SPIMSK_TR | PSC_SPIMSK_TO | PSC_SPIMSK_TU | PSC_SPIMSK_SD | PSC_SPIMSK_MD; au_sync(); hw->regs->psc_spievent = PSC_SPIEVNT_MM | PSC_SPIEVNT_RR | PSC_SPIEVNT_RO | PSC_SPIEVNT_RU | PSC_SPIEVNT_TR | PSC_SPIEVNT_TO | PSC_SPIEVNT_TU | PSC_SPIEVNT_SD | PSC_SPIEVNT_MD; au_sync(); } static void au1550_spi_reset_fifos(struct au1550_spi *hw) { u32 pcr; hw->regs->psc_spipcr = PSC_SPIPCR_RC | PSC_SPIPCR_TC; au_sync(); do { pcr = hw->regs->psc_spipcr; au_sync(); } while (pcr != 0); } /* * dma transfers are used for the most common spi word size of 8-bits * we cannot easily change already set up dma channels' width, so if we wanted * dma support for more than 8-bit words (up to 24 bits), we would need to * setup dma channels from scratch on each spi transfer, based on bits_per_word * instead we have pre set up 8 bit dma channels supporting spi 4 to 8 bits * transfers, and 9 to 24 bits spi transfers will be done in pio irq based mode * callbacks to handle dma or pio are set up in au1550_spi_bits_handlers_set() */ static void au1550_spi_chipsel(struct spi_device *spi, int value) { struct au1550_spi *hw = spi_master_get_devdata(spi->master); unsigned cspol = spi->mode & SPI_CS_HIGH ? 1 : 0; u32 cfg, stat; switch (value) { case BITBANG_CS_INACTIVE: if (hw->pdata->deactivate_cs) hw->pdata->deactivate_cs(hw->pdata, spi->chip_select, cspol); break; case BITBANG_CS_ACTIVE: au1550_spi_bits_handlers_set(hw, spi->bits_per_word); cfg = hw->regs->psc_spicfg; au_sync(); hw->regs->psc_spicfg = cfg & ~PSC_SPICFG_DE_ENABLE; au_sync(); if (spi->mode & SPI_CPOL) cfg |= PSC_SPICFG_BI; else cfg &= ~PSC_SPICFG_BI; if (spi->mode & SPI_CPHA) cfg &= ~PSC_SPICFG_CDE; else cfg |= PSC_SPICFG_CDE; if (spi->mode & SPI_LSB_FIRST) cfg |= PSC_SPICFG_MLF; else cfg &= ~PSC_SPICFG_MLF; if (hw->usedma && spi->bits_per_word <= 8) cfg &= ~PSC_SPICFG_DD_DISABLE; else cfg |= PSC_SPICFG_DD_DISABLE; cfg = PSC_SPICFG_CLR_LEN(cfg); cfg |= PSC_SPICFG_SET_LEN(spi->bits_per_word); cfg = PSC_SPICFG_CLR_BAUD(cfg); cfg &= ~PSC_SPICFG_SET_DIV(3); cfg |= au1550_spi_baudcfg(hw, spi->max_speed_hz); hw->regs->psc_spicfg = cfg | PSC_SPICFG_DE_ENABLE; au_sync(); do { stat = hw->regs->psc_spistat; au_sync(); } while ((stat & PSC_SPISTAT_DR) == 0); if (hw->pdata->activate_cs) hw->pdata->activate_cs(hw->pdata, spi->chip_select, cspol); break; } } static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t) { struct au1550_spi *hw = spi_master_get_devdata(spi->master); unsigned bpw, hz; u32 cfg, stat; bpw = spi->bits_per_word; hz = spi->max_speed_hz; if (t) { if (t->bits_per_word) bpw = t->bits_per_word; if (t->speed_hz) hz = t->speed_hz; } if (bpw < 4 || bpw > 24) { dev_err(&spi->dev, "setupxfer: invalid bits_per_word=%d\n", bpw); return -EINVAL; } if (hz > spi->max_speed_hz || hz > hw->freq_max || hz < hw->freq_min) { dev_err(&spi->dev, "setupxfer: clock rate=%d out of range\n", hz); return -EINVAL; } au1550_spi_bits_handlers_set(hw, spi->bits_per_word); cfg = hw->regs->psc_spicfg; au_sync(); hw->regs->psc_spicfg = cfg & ~PSC_SPICFG_DE_ENABLE; au_sync(); if (hw->usedma && bpw <= 8) cfg &= ~PSC_SPICFG_DD_DISABLE; else cfg |= PSC_SPICFG_DD_DISABLE; cfg = PSC_SPICFG_CLR_LEN(cfg); cfg |= PSC_SPICFG_SET_LEN(bpw); cfg = PSC_SPICFG_CLR_BAUD(cfg); cfg &= ~PSC_SPICFG_SET_DIV(3); cfg |= au1550_spi_baudcfg(hw, hz); hw->regs->psc_spicfg = cfg; au_sync(); if (cfg & PSC_SPICFG_DE_ENABLE) { do { stat = hw->regs->psc_spistat; au_sync(); } while ((stat & PSC_SPISTAT_DR) == 0); } au1550_spi_reset_fifos(hw); au1550_spi_mask_ack_all(hw); return 0; } static int au1550_spi_setup(struct spi_device *spi) { struct au1550_spi *hw = spi_master_get_devdata(spi->master); if (spi->bits_per_word < 4 || spi->bits_per_word > 24) { dev_err(&spi->dev, "setup: invalid bits_per_word=%d\n", spi->bits_per_word); return -EINVAL; } if (spi->max_speed_hz == 0) spi->max_speed_hz = hw->freq_max; if (spi->max_speed_hz > hw->freq_max || spi->max_speed_hz < hw->freq_min) return -EINVAL; /* * NOTE: cannot change speed and other hw settings immediately, * otherwise sharing of spi bus is not possible, * so do not call setupxfer(spi, NULL) here */ return 0; } /* * for dma spi transfers, we have to setup rx channel, otherwise there is * no reliable way how to recognize that spi transfer is done * dma complete callbacks are called before real spi transfer is finished * and if only tx dma channel is set up (and rx fifo overflow event masked) * spi master done event irq is not generated unless rx fifo is empty (emptied) * so we need rx tmp buffer to use for rx dma if user does not provide one */ static int au1550_spi_dma_rxtmp_alloc(struct au1550_spi *hw, unsigned size) { hw->dma_rx_tmpbuf = kmalloc(size, GFP_KERNEL); if (!hw->dma_rx_tmpbuf) return -ENOMEM; hw->dma_rx_tmpbuf_size = size; hw->dma_rx_tmpbuf_addr = dma_map_single(hw->dev, hw->dma_rx_tmpbuf, size, DMA_FROM_DEVICE); if (dma_mapping_error(hw->dev, hw->dma_rx_tmpbuf_addr)) { kfree(hw->dma_rx_tmpbuf); hw->dma_rx_tmpbuf = 0; hw->dma_rx_tmpbuf_size = 0; return -EFAULT; } return 0; } static void au1550_spi_dma_rxtmp_free(struct au1550_spi *hw) { dma_unmap_single(hw->dev, hw->dma_rx_tmpbuf_addr, hw->dma_rx_tmpbuf_size, DMA_FROM_DEVICE); kfree(hw->dma_rx_tmpbuf); hw->dma_rx_tmpbuf = 0; hw->dma_rx_tmpbuf_size = 0; } static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t) { struct au1550_spi *hw = spi_master_get_devdata(spi->master); dma_addr_t dma_tx_addr; dma_addr_t dma_rx_addr; u32 res; hw->len = t->len; hw->tx_count = 0; hw->rx_count = 0; hw->tx = t->tx_buf; hw->rx = t->rx_buf; dma_tx_addr = t->tx_dma; dma_rx_addr = t->rx_dma; /* * check if buffers are already dma mapped, map them otherwise: * - first map the TX buffer, so cache data gets written to memory * - then map the RX buffer, so that cache entries (with * soon-to-be-stale data) get removed * use rx buffer in place of tx if tx buffer was not provided * use temp rx buffer (preallocated or realloc to fit) for rx dma */ if (t->tx_buf) { if (t->tx_dma == 0) { /* if DMA_ADDR_INVALID, map it */ dma_tx_addr = dma_map_single(hw->dev, (void *)t->tx_buf, t->len, DMA_TO_DEVICE); if (dma_mapping_error(hw->dev, dma_tx_addr)) dev_err(hw->dev, "tx dma map error\n"); } } if (t->rx_buf) { if (t->rx_dma == 0) { /* if DMA_ADDR_INVALID, map it */ dma_rx_addr = dma_map_single(hw->dev, (void *)t->rx_buf, t->len, DMA_FROM_DEVICE); if (dma_mapping_error(hw->dev, dma_rx_addr)) dev_err(hw->dev, "rx dma map error\n"); } } else { if (t->len > hw->dma_rx_tmpbuf_size) { int ret; au1550_spi_dma_rxtmp_free(hw); ret = au1550_spi_dma_rxtmp_alloc(hw, max(t->len, AU1550_SPI_DMA_RXTMP_MINSIZE)); if (ret < 0) return ret; } hw->rx = hw->dma_rx_tmpbuf; dma_rx_addr = hw->dma_rx_tmpbuf_addr; dma_sync_single_for_device(hw->dev, dma_rx_addr, t->len, DMA_FROM_DEVICE); } if (!t->tx_buf) { dma_sync_single_for_device(hw->dev, dma_rx_addr, t->len, DMA_BIDIRECTIONAL); hw->tx = hw->rx; } /* put buffers on the ring */ res = au1xxx_dbdma_put_dest(hw->dma_rx_ch, virt_to_phys(hw->rx), t->len, DDMA_FLAGS_IE); if (!res) dev_err(hw->dev, "rx dma put dest error\n"); res = au1xxx_dbdma_put_source(hw->dma_tx_ch, virt_to_phys(hw->tx), t->len, DDMA_FLAGS_IE); if (!res) dev_err(hw->dev, "tx dma put source error\n"); au1xxx_dbdma_start(hw->dma_rx_ch); au1xxx_dbdma_start(hw->dma_tx_ch); /* by default enable nearly all events interrupt */ hw->regs->psc_spimsk = PSC_SPIMSK_SD; au_sync(); /* start the transfer */ hw->regs->psc_spipcr = PSC_SPIPCR_MS; au_sync(); wait_for_completion(&hw->master_done); au1xxx_dbdma_stop(hw->dma_tx_ch); au1xxx_dbdma_stop(hw->dma_rx_ch); if (!t->rx_buf) { /* using the temporal preallocated and premapped buffer */ dma_sync_single_for_cpu(hw->dev, dma_rx_addr, t->len, DMA_FROM_DEVICE); } /* unmap buffers if mapped above */ if (t->rx_buf && t->rx_dma == 0 ) dma_unmap_single(hw->dev, dma_rx_addr, t->len, DMA_FROM_DEVICE); if (t->tx_buf && t->tx_dma == 0 ) dma_unmap_single(hw->dev, dma_tx_addr, t->len, DMA_TO_DEVICE); return hw->rx_count < hw->tx_count ? hw->rx_count : hw->tx_count; } static irqreturn_t au1550_spi_dma_irq_callback(struct au1550_spi *hw) { u32 stat, evnt; stat = hw->regs->psc_spistat; evnt = hw->regs->psc_spievent; au_sync(); if ((stat & PSC_SPISTAT_DI) == 0) { dev_err(hw->dev, "Unexpected IRQ!\n"); return IRQ_NONE; } if ((evnt & (PSC_SPIEVNT_MM | PSC_SPIEVNT_RO | PSC_SPIEVNT_RU | PSC_SPIEVNT_TO | PSC_SPIEVNT_TU | PSC_SPIEVNT_SD)) != 0) { /* * due to an spi error we consider transfer as done, * so mask all events until before next transfer start * and stop the possibly running dma immediately */ au1550_spi_mask_ack_all(hw); au1xxx_dbdma_stop(hw->dma_rx_ch); au1xxx_dbdma_stop(hw->dma_tx_ch); /* get number of transferred bytes */ hw->rx_count = hw->len - au1xxx_get_dma_residue(hw->dma_rx_ch); hw->tx_count = hw->len - au1xxx_get_dma_residue(hw->dma_tx_ch); au1xxx_dbdma_reset(hw->dma_rx_ch); au1xxx_dbdma_reset(hw->dma_tx_ch); au1550_spi_reset_fifos(hw); if (evnt == PSC_SPIEVNT_RO) dev_err(hw->dev, "dma transfer: receive FIFO overflow!\n"); else dev_err(hw->dev, "dma transfer: unexpected SPI error " "(event=0x%x stat=0x%x)!\n", evnt, stat); complete(&hw->master_done); return IRQ_HANDLED; } if ((evnt & PSC_SPIEVNT_MD) != 0) { /* transfer completed successfully */ au1550_spi_mask_ack_all(hw); hw->rx_count = hw->len; hw->tx_count = hw->len; complete(&hw->master_done); } return IRQ_HANDLED; } /* routines to handle different word sizes in pio mode */ #define AU1550_SPI_RX_WORD(size, mask) \ static void au1550_spi_rx_word_##size(struct au1550_spi *hw) \ { \ u32 fifoword = hw->regs->psc_spitxrx & (u32)(mask); \ au_sync(); \ if (hw->rx) { \ *(u##size *)hw->rx = (u##size)fifoword; \ hw->rx += (size) / 8; \ } \ hw->rx_count += (size) / 8; \ } #define AU1550_SPI_TX_WORD(size, mask) \ static void au1550_spi_tx_word_##size(struct au1550_spi *hw) \ { \ u32 fifoword = 0; \ if (hw->tx) { \ fifoword = *(u##size *)hw->tx & (u32)(mask); \ hw->tx += (size) / 8; \ } \ hw->tx_count += (size) / 8; \ if (hw->tx_count >= hw->len) \ fifoword |= PSC_SPITXRX_LC; \ hw->regs->psc_spitxrx = fifoword; \ au_sync(); \ } AU1550_SPI_RX_WORD(8,0xff) AU1550_SPI_RX_WORD(16,0xffff) AU1550_SPI_RX_WORD(32,0xffffff) AU1550_SPI_TX_WORD(8,0xff) AU1550_SPI_TX_WORD(16,0xffff) AU1550_SPI_TX_WORD(32,0xffffff) static int au1550_spi_pio_txrxb(struct spi_device *spi, struct spi_transfer *t) { u32 stat, mask; struct au1550_spi *hw = spi_master_get_devdata(spi->master); hw->tx = t->tx_buf; hw->rx = t->rx_buf; hw->len = t->len; hw->tx_count = 0; hw->rx_count = 0; /* by default enable nearly all events after filling tx fifo */ mask = PSC_SPIMSK_SD; /* fill the transmit FIFO */ while (hw->tx_count < hw->len) { hw->tx_word(hw); if (hw->tx_count >= hw->len) { /* mask tx fifo request interrupt as we are done */ mask |= PSC_SPIMSK_TR; } stat = hw->regs->psc_spistat; au_sync(); if (stat & PSC_SPISTAT_TF) break; } /* enable event interrupts */ hw->regs->psc_spimsk = mask; au_sync(); /* start the transfer */ hw->regs->psc_spipcr = PSC_SPIPCR_MS; au_sync(); wait_for_completion(&hw->master_done); return hw->rx_count < hw->tx_count ? hw->rx_count : hw->tx_count; } static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw) { int busy; u32 stat, evnt; stat = hw->regs->psc_spistat; evnt = hw->regs->psc_spievent; au_sync(); if ((stat & PSC_SPISTAT_DI) == 0) { dev_err(hw->dev, "Unexpected IRQ!\n"); return IRQ_NONE; } if ((evnt & (PSC_SPIEVNT_MM | PSC_SPIEVNT_RO | PSC_SPIEVNT_RU | PSC_SPIEVNT_TO | PSC_SPIEVNT_SD)) != 0) { /* * due to an error we consider transfer as done, * so mask all events until before next transfer start */ au1550_spi_mask_ack_all(hw); au1550_spi_reset_fifos(hw); dev_err(hw->dev, "pio transfer: unexpected SPI error " "(event=0x%x stat=0x%x)!\n", evnt, stat); complete(&hw->master_done); return IRQ_HANDLED; } /* * while there is something to read from rx fifo * or there is a space to write to tx fifo: */ do { busy = 0; stat = hw->regs->psc_spistat; au_sync(); /* * Take care to not let the Rx FIFO overflow. * * We only write a byte if we have read one at least. Initially, * the write fifo is full, so we should read from the read fifo * first. * In case we miss a word from the read fifo, we should get a * RO event and should back out. */ if (!(stat & PSC_SPISTAT_RE) && hw->rx_count < hw->len) { hw->rx_word(hw); busy = 1; if (!(stat & PSC_SPISTAT_TF) && hw->tx_count < hw->len) hw->tx_word(hw); } } while (busy); hw->regs->psc_spievent = PSC_SPIEVNT_RR | PSC_SPIEVNT_TR; au_sync(); /* * Restart the SPI transmission in case of a transmit underflow. * This seems to work despite the notes in the Au1550 data book * of Figure 8-4 with flowchart for SPI master operation: * * """Note 1: An XFR Error Interrupt occurs, unless masked, * for any of the following events: Tx FIFO Underflow, * Rx FIFO Overflow, or Multiple-master Error * Note 2: In case of a Tx Underflow Error, all zeroes are * transmitted.""" * * By simply restarting the spi transfer on Tx Underflow Error, * we assume that spi transfer was paused instead of zeroes * transmittion mentioned in the Note 2 of Au1550 data book. */ if (evnt & PSC_SPIEVNT_TU) { hw->regs->psc_spievent = PSC_SPIEVNT_TU | PSC_SPIEVNT_MD; au_sync(); hw->regs->psc_spipcr = PSC_SPIPCR_MS; au_sync(); } if (hw->rx_count >= hw->len) { /* transfer completed successfully */ au1550_spi_mask_ack_all(hw); complete(&hw->master_done); } return IRQ_HANDLED; } static int au1550_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) { struct au1550_spi *hw = spi_master_get_devdata(spi->master); return hw->txrx_bufs(spi, t); } static irqreturn_t au1550_spi_irq(int irq, void *dev) { struct au1550_spi *hw = dev; return hw->irq_callback(hw); } static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw) { if (bpw <= 8) { if (hw->usedma) { hw->txrx_bufs = &au1550_spi_dma_txrxb; hw->irq_callback = &au1550_spi_dma_irq_callback; } else { hw->rx_word = &au1550_spi_rx_word_8; hw->tx_word = &au1550_spi_tx_word_8; hw->txrx_bufs = &au1550_spi_pio_txrxb; hw->irq_callback = &au1550_spi_pio_irq_callback; } } else if (bpw <= 16) { hw->rx_word = &au1550_spi_rx_word_16; hw->tx_word = &au1550_spi_tx_word_16; hw->txrx_bufs = &au1550_spi_pio_txrxb; hw->irq_callback = &au1550_spi_pio_irq_callback; } else { hw->rx_word = &au1550_spi_rx_word_32; hw->tx_word = &au1550_spi_tx_word_32; hw->txrx_bufs = &au1550_spi_pio_txrxb; hw->irq_callback = &au1550_spi_pio_irq_callback; } } static void au1550_spi_setup_psc_as_spi(struct au1550_spi *hw) { u32 stat, cfg; /* set up the PSC for SPI mode */ hw->regs->psc_ctrl = PSC_CTRL_DISABLE; au_sync(); hw->regs->psc_sel = PSC_SEL_PS_SPIMODE; au_sync(); hw->regs->psc_spicfg = 0; au_sync(); hw->regs->psc_ctrl = PSC_CTRL_ENABLE; au_sync(); do { stat = hw->regs->psc_spistat; au_sync(); } while ((stat & PSC_SPISTAT_SR) == 0); cfg = hw->usedma ? 0 : PSC_SPICFG_DD_DISABLE; cfg |= PSC_SPICFG_SET_LEN(8); cfg |= PSC_SPICFG_RT_FIFO8 | PSC_SPICFG_TT_FIFO8; /* use minimal allowed brg and div values as initial setting: */ cfg |= PSC_SPICFG_SET_BAUD(4) | PSC_SPICFG_SET_DIV(0); #ifdef AU1550_SPI_DEBUG_LOOPBACK cfg |= PSC_SPICFG_LB; #endif hw->regs->psc_spicfg = cfg; au_sync(); au1550_spi_mask_ack_all(hw); hw->regs->psc_spicfg |= PSC_SPICFG_DE_ENABLE; au_sync(); do { stat = hw->regs->psc_spistat; au_sync(); } while ((stat & PSC_SPISTAT_DR) == 0); au1550_spi_reset_fifos(hw); } static int au1550_spi_probe(struct platform_device *pdev) { struct au1550_spi *hw; struct spi_master *master; struct resource *r; int err = 0; master = spi_alloc_master(&pdev->dev, sizeof(struct au1550_spi)); if (master == NULL) { dev_err(&pdev->dev, "No memory for spi_master\n"); err = -ENOMEM; goto err_nomem; } /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST; hw = spi_master_get_devdata(master); hw->master = spi_master_get(master); hw->pdata = pdev->dev.platform_data; hw->dev = &pdev->dev; if (hw->pdata == NULL) { dev_err(&pdev->dev, "No platform data supplied\n"); err = -ENOENT; goto err_no_pdata; } r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!r) { dev_err(&pdev->dev, "no IRQ\n"); err = -ENODEV; goto err_no_iores; } hw->irq = r->start; hw->usedma = 0; r = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (r) { hw->dma_tx_id = r->start; r = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (r) { hw->dma_rx_id = r->start; if (usedma && ddma_memid) { if (pdev->dev.dma_mask == NULL) dev_warn(&pdev->dev, "no dma mask\n"); else hw->usedma = 1; } } } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { dev_err(&pdev->dev, "no mmio resource\n"); err = -ENODEV; goto err_no_iores; } hw->ioarea = request_mem_region(r->start, sizeof(psc_spi_t), pdev->name); if (!hw->ioarea) { dev_err(&pdev->dev, "Cannot reserve iomem region\n"); err = -ENXIO; goto err_no_iores; } hw->regs = (psc_spi_t __iomem *)ioremap(r->start, sizeof(psc_spi_t)); if (!hw->regs) { dev_err(&pdev->dev, "cannot ioremap\n"); err = -ENXIO; goto err_ioremap; } platform_set_drvdata(pdev, hw); init_completion(&hw->master_done); hw->bitbang.master = hw->master; hw->bitbang.setup_transfer = au1550_spi_setupxfer; hw->bitbang.chipselect = au1550_spi_chipsel; hw->bitbang.master->setup = au1550_spi_setup; hw->bitbang.txrx_bufs = au1550_spi_txrx_bufs; if (hw->usedma) { hw->dma_tx_ch = au1xxx_dbdma_chan_alloc(ddma_memid, hw->dma_tx_id, NULL, (void *)hw); if (hw->dma_tx_ch == 0) { dev_err(&pdev->dev, "Cannot allocate tx dma channel\n"); err = -ENXIO; goto err_no_txdma; } au1xxx_dbdma_set_devwidth(hw->dma_tx_ch, 8); if (au1xxx_dbdma_ring_alloc(hw->dma_tx_ch, AU1550_SPI_DBDMA_DESCRIPTORS) == 0) { dev_err(&pdev->dev, "Cannot allocate tx dma descriptors\n"); err = -ENXIO; goto err_no_txdma_descr; } hw->dma_rx_ch = au1xxx_dbdma_chan_alloc(hw->dma_rx_id, ddma_memid, NULL, (void *)hw); if (hw->dma_rx_ch == 0) { dev_err(&pdev->dev, "Cannot allocate rx dma channel\n"); err = -ENXIO; goto err_no_rxdma; } au1xxx_dbdma_set_devwidth(hw->dma_rx_ch, 8); if (au1xxx_dbdma_ring_alloc(hw->dma_rx_ch, AU1550_SPI_DBDMA_DESCRIPTORS) == 0) { dev_err(&pdev->dev, "Cannot allocate rx dma descriptors\n"); err = -ENXIO; goto err_no_rxdma_descr; } err = au1550_spi_dma_rxtmp_alloc(hw, AU1550_SPI_DMA_RXTMP_MINSIZE); if (err < 0) { dev_err(&pdev->dev, "Cannot allocate initial rx dma tmp buffer\n"); goto err_dma_rxtmp_alloc; } } au1550_spi_bits_handlers_set(hw, 8); err = request_irq(hw->irq, au1550_spi_irq, 0, pdev->name, hw); if (err) { dev_err(&pdev->dev, "Cannot claim IRQ\n"); goto err_no_irq; } master->bus_num = pdev->id; master->num_chipselect = hw->pdata->num_chipselect; /* * precompute valid range for spi freq - from au1550 datasheet: * psc_tempclk = psc_mainclk / (2 << DIV) * spiclk = psc_tempclk / (2 * (BRG + 1)) * BRG valid range is 4..63 * DIV valid range is 0..3 * round the min and max frequencies to values that would still * produce valid brg and div */ { int min_div = (2 << 0) * (2 * (4 + 1)); int max_div = (2 << 3) * (2 * (63 + 1)); hw->freq_max = hw->pdata->mainclk_hz / min_div; hw->freq_min = hw->pdata->mainclk_hz / (max_div + 1) + 1; } au1550_spi_setup_psc_as_spi(hw); err = spi_bitbang_start(&hw->bitbang); if (err) { dev_err(&pdev->dev, "Failed to register SPI master\n"); goto err_register; } dev_info(&pdev->dev, "spi master registered: bus_num=%d num_chipselect=%d\n", master->bus_num, master->num_chipselect); return 0; err_register: free_irq(hw->irq, hw); err_no_irq: au1550_spi_dma_rxtmp_free(hw); err_dma_rxtmp_alloc: err_no_rxdma_descr: if (hw->usedma) au1xxx_dbdma_chan_free(hw->dma_rx_ch); err_no_rxdma: err_no_txdma_descr: if (hw->usedma) au1xxx_dbdma_chan_free(hw->dma_tx_ch); err_no_txdma: iounmap((void __iomem *)hw->regs); err_ioremap: release_resource(hw->ioarea); kfree(hw->ioarea); err_no_iores: err_no_pdata: spi_master_put(hw->master); err_nomem: return err; } static int au1550_spi_remove(struct platform_device *pdev) { struct au1550_spi *hw = platform_get_drvdata(pdev); dev_info(&pdev->dev, "spi master remove: bus_num=%d\n", hw->master->bus_num); spi_bitbang_stop(&hw->bitbang); free_irq(hw->irq, hw); iounmap((void __iomem *)hw->regs); release_resource(hw->ioarea); kfree(hw->ioarea); if (hw->usedma) { au1550_spi_dma_rxtmp_free(hw); au1xxx_dbdma_chan_free(hw->dma_rx_ch); au1xxx_dbdma_chan_free(hw->dma_tx_ch); } platform_set_drvdata(pdev, NULL); spi_master_put(hw->master); return 0; } /* work with hotplug and coldplug */ MODULE_ALIAS("platform:au1550-spi"); static struct platform_driver au1550_spi_drv = { .remove = au1550_spi_remove, .driver = { .name = "au1550-spi", .owner = THIS_MODULE, }, }; static int __init au1550_spi_init(void) { /* * create memory device with 8 bits dev_devwidth * needed for proper byte ordering to spi fifo */ if (usedma) { ddma_memid = au1xxx_ddma_add_device(&au1550_spi_mem_dbdev); if (!ddma_memid) printk(KERN_ERR "au1550-spi: cannot add memory" "dbdma device\n"); } return platform_driver_probe(&au1550_spi_drv, au1550_spi_probe); } module_init(au1550_spi_init); static void __exit au1550_spi_exit(void) { if (usedma && ddma_memid) au1xxx_ddma_del_device(ddma_memid); platform_driver_unregister(&au1550_spi_drv); } module_exit(au1550_spi_exit); MODULE_DESCRIPTION("Au1550 PSC SPI Driver"); MODULE_AUTHOR("Jan Nikitenko <jan.nikitenko@gmail.com>"); MODULE_LICENSE("GPL");
gpl-2.0
Split-Screen/android_kernel_asus_fugu
drivers/spi/spi-au1550.c
2109
26666
/* * au1550 psc spi controller driver * may work also with au1200, au1210, au1250 * will not work on au1000, au1100 and au1500 (no full spi controller there) * * Copyright (c) 2006 ATRON electronic GmbH * Author: Jan Nikitenko <jan.nikitenko@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/resource.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> #include <linux/dma-mapping.h> #include <linux/completion.h> #include <asm/mach-au1x00/au1000.h> #include <asm/mach-au1x00/au1xxx_psc.h> #include <asm/mach-au1x00/au1xxx_dbdma.h> #include <asm/mach-au1x00/au1550_spi.h> static unsigned usedma = 1; module_param(usedma, uint, 0644); /* #define AU1550_SPI_DEBUG_LOOPBACK */ #define AU1550_SPI_DBDMA_DESCRIPTORS 1 #define AU1550_SPI_DMA_RXTMP_MINSIZE 2048U struct au1550_spi { struct spi_bitbang bitbang; volatile psc_spi_t __iomem *regs; int irq; unsigned freq_max; unsigned freq_min; unsigned len; unsigned tx_count; unsigned rx_count; const u8 *tx; u8 *rx; void (*rx_word)(struct au1550_spi *hw); void (*tx_word)(struct au1550_spi *hw); int (*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t); irqreturn_t (*irq_callback)(struct au1550_spi *hw); struct completion master_done; unsigned usedma; u32 dma_tx_id; u32 dma_rx_id; u32 dma_tx_ch; u32 dma_rx_ch; u8 *dma_rx_tmpbuf; unsigned dma_rx_tmpbuf_size; u32 dma_rx_tmpbuf_addr; struct spi_master *master; struct device *dev; struct au1550_spi_info *pdata; struct resource *ioarea; }; /* we use an 8-bit memory device for dma transfers to/from spi fifo */ static dbdev_tab_t au1550_spi_mem_dbdev = { .dev_id = DBDMA_MEM_CHAN, .dev_flags = DEV_FLAGS_ANYUSE|DEV_FLAGS_SYNC, .dev_tsize = 0, .dev_devwidth = 8, .dev_physaddr = 0x00000000, .dev_intlevel = 0, .dev_intpolarity = 0 }; static int ddma_memid; /* id to above mem dma device */ static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw); /* * compute BRG and DIV bits to setup spi clock based on main input clock rate * that was specified in platform data structure * according to au1550 datasheet: * psc_tempclk = psc_mainclk / (2 << DIV) * spiclk = psc_tempclk / (2 * (BRG + 1)) * BRG valid range is 4..63 * DIV valid range is 0..3 */ static u32 au1550_spi_baudcfg(struct au1550_spi *hw, unsigned speed_hz) { u32 mainclk_hz = hw->pdata->mainclk_hz; u32 div, brg; for (div = 0; div < 4; div++) { brg = mainclk_hz / speed_hz / (4 << div); /* now we have BRG+1 in brg, so count with that */ if (brg < (4 + 1)) { brg = (4 + 1); /* speed_hz too big */ break; /* set lowest brg (div is == 0) */ } if (brg <= (63 + 1)) break; /* we have valid brg and div */ } if (div == 4) { div = 3; /* speed_hz too small */ brg = (63 + 1); /* set highest brg and div */ } brg--; return PSC_SPICFG_SET_BAUD(brg) | PSC_SPICFG_SET_DIV(div); } static inline void au1550_spi_mask_ack_all(struct au1550_spi *hw) { hw->regs->psc_spimsk = PSC_SPIMSK_MM | PSC_SPIMSK_RR | PSC_SPIMSK_RO | PSC_SPIMSK_RU | PSC_SPIMSK_TR | PSC_SPIMSK_TO | PSC_SPIMSK_TU | PSC_SPIMSK_SD | PSC_SPIMSK_MD; au_sync(); hw->regs->psc_spievent = PSC_SPIEVNT_MM | PSC_SPIEVNT_RR | PSC_SPIEVNT_RO | PSC_SPIEVNT_RU | PSC_SPIEVNT_TR | PSC_SPIEVNT_TO | PSC_SPIEVNT_TU | PSC_SPIEVNT_SD | PSC_SPIEVNT_MD; au_sync(); } static void au1550_spi_reset_fifos(struct au1550_spi *hw) { u32 pcr; hw->regs->psc_spipcr = PSC_SPIPCR_RC | PSC_SPIPCR_TC; au_sync(); do { pcr = hw->regs->psc_spipcr; au_sync(); } while (pcr != 0); } /* * dma transfers are used for the most common spi word size of 8-bits * we cannot easily change already set up dma channels' width, so if we wanted * dma support for more than 8-bit words (up to 24 bits), we would need to * setup dma channels from scratch on each spi transfer, based on bits_per_word * instead we have pre set up 8 bit dma channels supporting spi 4 to 8 bits * transfers, and 9 to 24 bits spi transfers will be done in pio irq based mode * callbacks to handle dma or pio are set up in au1550_spi_bits_handlers_set() */ static void au1550_spi_chipsel(struct spi_device *spi, int value) { struct au1550_spi *hw = spi_master_get_devdata(spi->master); unsigned cspol = spi->mode & SPI_CS_HIGH ? 1 : 0; u32 cfg, stat; switch (value) { case BITBANG_CS_INACTIVE: if (hw->pdata->deactivate_cs) hw->pdata->deactivate_cs(hw->pdata, spi->chip_select, cspol); break; case BITBANG_CS_ACTIVE: au1550_spi_bits_handlers_set(hw, spi->bits_per_word); cfg = hw->regs->psc_spicfg; au_sync(); hw->regs->psc_spicfg = cfg & ~PSC_SPICFG_DE_ENABLE; au_sync(); if (spi->mode & SPI_CPOL) cfg |= PSC_SPICFG_BI; else cfg &= ~PSC_SPICFG_BI; if (spi->mode & SPI_CPHA) cfg &= ~PSC_SPICFG_CDE; else cfg |= PSC_SPICFG_CDE; if (spi->mode & SPI_LSB_FIRST) cfg |= PSC_SPICFG_MLF; else cfg &= ~PSC_SPICFG_MLF; if (hw->usedma && spi->bits_per_word <= 8) cfg &= ~PSC_SPICFG_DD_DISABLE; else cfg |= PSC_SPICFG_DD_DISABLE; cfg = PSC_SPICFG_CLR_LEN(cfg); cfg |= PSC_SPICFG_SET_LEN(spi->bits_per_word); cfg = PSC_SPICFG_CLR_BAUD(cfg); cfg &= ~PSC_SPICFG_SET_DIV(3); cfg |= au1550_spi_baudcfg(hw, spi->max_speed_hz); hw->regs->psc_spicfg = cfg | PSC_SPICFG_DE_ENABLE; au_sync(); do { stat = hw->regs->psc_spistat; au_sync(); } while ((stat & PSC_SPISTAT_DR) == 0); if (hw->pdata->activate_cs) hw->pdata->activate_cs(hw->pdata, spi->chip_select, cspol); break; } } static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t) { struct au1550_spi *hw = spi_master_get_devdata(spi->master); unsigned bpw, hz; u32 cfg, stat; bpw = spi->bits_per_word; hz = spi->max_speed_hz; if (t) { if (t->bits_per_word) bpw = t->bits_per_word; if (t->speed_hz) hz = t->speed_hz; } if (bpw < 4 || bpw > 24) { dev_err(&spi->dev, "setupxfer: invalid bits_per_word=%d\n", bpw); return -EINVAL; } if (hz > spi->max_speed_hz || hz > hw->freq_max || hz < hw->freq_min) { dev_err(&spi->dev, "setupxfer: clock rate=%d out of range\n", hz); return -EINVAL; } au1550_spi_bits_handlers_set(hw, spi->bits_per_word); cfg = hw->regs->psc_spicfg; au_sync(); hw->regs->psc_spicfg = cfg & ~PSC_SPICFG_DE_ENABLE; au_sync(); if (hw->usedma && bpw <= 8) cfg &= ~PSC_SPICFG_DD_DISABLE; else cfg |= PSC_SPICFG_DD_DISABLE; cfg = PSC_SPICFG_CLR_LEN(cfg); cfg |= PSC_SPICFG_SET_LEN(bpw); cfg = PSC_SPICFG_CLR_BAUD(cfg); cfg &= ~PSC_SPICFG_SET_DIV(3); cfg |= au1550_spi_baudcfg(hw, hz); hw->regs->psc_spicfg = cfg; au_sync(); if (cfg & PSC_SPICFG_DE_ENABLE) { do { stat = hw->regs->psc_spistat; au_sync(); } while ((stat & PSC_SPISTAT_DR) == 0); } au1550_spi_reset_fifos(hw); au1550_spi_mask_ack_all(hw); return 0; } static int au1550_spi_setup(struct spi_device *spi) { struct au1550_spi *hw = spi_master_get_devdata(spi->master); if (spi->bits_per_word < 4 || spi->bits_per_word > 24) { dev_err(&spi->dev, "setup: invalid bits_per_word=%d\n", spi->bits_per_word); return -EINVAL; } if (spi->max_speed_hz == 0) spi->max_speed_hz = hw->freq_max; if (spi->max_speed_hz > hw->freq_max || spi->max_speed_hz < hw->freq_min) return -EINVAL; /* * NOTE: cannot change speed and other hw settings immediately, * otherwise sharing of spi bus is not possible, * so do not call setupxfer(spi, NULL) here */ return 0; } /* * for dma spi transfers, we have to setup rx channel, otherwise there is * no reliable way how to recognize that spi transfer is done * dma complete callbacks are called before real spi transfer is finished * and if only tx dma channel is set up (and rx fifo overflow event masked) * spi master done event irq is not generated unless rx fifo is empty (emptied) * so we need rx tmp buffer to use for rx dma if user does not provide one */ static int au1550_spi_dma_rxtmp_alloc(struct au1550_spi *hw, unsigned size) { hw->dma_rx_tmpbuf = kmalloc(size, GFP_KERNEL); if (!hw->dma_rx_tmpbuf) return -ENOMEM; hw->dma_rx_tmpbuf_size = size; hw->dma_rx_tmpbuf_addr = dma_map_single(hw->dev, hw->dma_rx_tmpbuf, size, DMA_FROM_DEVICE); if (dma_mapping_error(hw->dev, hw->dma_rx_tmpbuf_addr)) { kfree(hw->dma_rx_tmpbuf); hw->dma_rx_tmpbuf = 0; hw->dma_rx_tmpbuf_size = 0; return -EFAULT; } return 0; } static void au1550_spi_dma_rxtmp_free(struct au1550_spi *hw) { dma_unmap_single(hw->dev, hw->dma_rx_tmpbuf_addr, hw->dma_rx_tmpbuf_size, DMA_FROM_DEVICE); kfree(hw->dma_rx_tmpbuf); hw->dma_rx_tmpbuf = 0; hw->dma_rx_tmpbuf_size = 0; } static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t) { struct au1550_spi *hw = spi_master_get_devdata(spi->master); dma_addr_t dma_tx_addr; dma_addr_t dma_rx_addr; u32 res; hw->len = t->len; hw->tx_count = 0; hw->rx_count = 0; hw->tx = t->tx_buf; hw->rx = t->rx_buf; dma_tx_addr = t->tx_dma; dma_rx_addr = t->rx_dma; /* * check if buffers are already dma mapped, map them otherwise: * - first map the TX buffer, so cache data gets written to memory * - then map the RX buffer, so that cache entries (with * soon-to-be-stale data) get removed * use rx buffer in place of tx if tx buffer was not provided * use temp rx buffer (preallocated or realloc to fit) for rx dma */ if (t->tx_buf) { if (t->tx_dma == 0) { /* if DMA_ADDR_INVALID, map it */ dma_tx_addr = dma_map_single(hw->dev, (void *)t->tx_buf, t->len, DMA_TO_DEVICE); if (dma_mapping_error(hw->dev, dma_tx_addr)) dev_err(hw->dev, "tx dma map error\n"); } } if (t->rx_buf) { if (t->rx_dma == 0) { /* if DMA_ADDR_INVALID, map it */ dma_rx_addr = dma_map_single(hw->dev, (void *)t->rx_buf, t->len, DMA_FROM_DEVICE); if (dma_mapping_error(hw->dev, dma_rx_addr)) dev_err(hw->dev, "rx dma map error\n"); } } else { if (t->len > hw->dma_rx_tmpbuf_size) { int ret; au1550_spi_dma_rxtmp_free(hw); ret = au1550_spi_dma_rxtmp_alloc(hw, max(t->len, AU1550_SPI_DMA_RXTMP_MINSIZE)); if (ret < 0) return ret; } hw->rx = hw->dma_rx_tmpbuf; dma_rx_addr = hw->dma_rx_tmpbuf_addr; dma_sync_single_for_device(hw->dev, dma_rx_addr, t->len, DMA_FROM_DEVICE); } if (!t->tx_buf) { dma_sync_single_for_device(hw->dev, dma_rx_addr, t->len, DMA_BIDIRECTIONAL); hw->tx = hw->rx; } /* put buffers on the ring */ res = au1xxx_dbdma_put_dest(hw->dma_rx_ch, virt_to_phys(hw->rx), t->len, DDMA_FLAGS_IE); if (!res) dev_err(hw->dev, "rx dma put dest error\n"); res = au1xxx_dbdma_put_source(hw->dma_tx_ch, virt_to_phys(hw->tx), t->len, DDMA_FLAGS_IE); if (!res) dev_err(hw->dev, "tx dma put source error\n"); au1xxx_dbdma_start(hw->dma_rx_ch); au1xxx_dbdma_start(hw->dma_tx_ch); /* by default enable nearly all events interrupt */ hw->regs->psc_spimsk = PSC_SPIMSK_SD; au_sync(); /* start the transfer */ hw->regs->psc_spipcr = PSC_SPIPCR_MS; au_sync(); wait_for_completion(&hw->master_done); au1xxx_dbdma_stop(hw->dma_tx_ch); au1xxx_dbdma_stop(hw->dma_rx_ch); if (!t->rx_buf) { /* using the temporal preallocated and premapped buffer */ dma_sync_single_for_cpu(hw->dev, dma_rx_addr, t->len, DMA_FROM_DEVICE); } /* unmap buffers if mapped above */ if (t->rx_buf && t->rx_dma == 0 ) dma_unmap_single(hw->dev, dma_rx_addr, t->len, DMA_FROM_DEVICE); if (t->tx_buf && t->tx_dma == 0 ) dma_unmap_single(hw->dev, dma_tx_addr, t->len, DMA_TO_DEVICE); return hw->rx_count < hw->tx_count ? hw->rx_count : hw->tx_count; } static irqreturn_t au1550_spi_dma_irq_callback(struct au1550_spi *hw) { u32 stat, evnt; stat = hw->regs->psc_spistat; evnt = hw->regs->psc_spievent; au_sync(); if ((stat & PSC_SPISTAT_DI) == 0) { dev_err(hw->dev, "Unexpected IRQ!\n"); return IRQ_NONE; } if ((evnt & (PSC_SPIEVNT_MM | PSC_SPIEVNT_RO | PSC_SPIEVNT_RU | PSC_SPIEVNT_TO | PSC_SPIEVNT_TU | PSC_SPIEVNT_SD)) != 0) { /* * due to an spi error we consider transfer as done, * so mask all events until before next transfer start * and stop the possibly running dma immediately */ au1550_spi_mask_ack_all(hw); au1xxx_dbdma_stop(hw->dma_rx_ch); au1xxx_dbdma_stop(hw->dma_tx_ch); /* get number of transferred bytes */ hw->rx_count = hw->len - au1xxx_get_dma_residue(hw->dma_rx_ch); hw->tx_count = hw->len - au1xxx_get_dma_residue(hw->dma_tx_ch); au1xxx_dbdma_reset(hw->dma_rx_ch); au1xxx_dbdma_reset(hw->dma_tx_ch); au1550_spi_reset_fifos(hw); if (evnt == PSC_SPIEVNT_RO) dev_err(hw->dev, "dma transfer: receive FIFO overflow!\n"); else dev_err(hw->dev, "dma transfer: unexpected SPI error " "(event=0x%x stat=0x%x)!\n", evnt, stat); complete(&hw->master_done); return IRQ_HANDLED; } if ((evnt & PSC_SPIEVNT_MD) != 0) { /* transfer completed successfully */ au1550_spi_mask_ack_all(hw); hw->rx_count = hw->len; hw->tx_count = hw->len; complete(&hw->master_done); } return IRQ_HANDLED; } /* routines to handle different word sizes in pio mode */ #define AU1550_SPI_RX_WORD(size, mask) \ static void au1550_spi_rx_word_##size(struct au1550_spi *hw) \ { \ u32 fifoword = hw->regs->psc_spitxrx & (u32)(mask); \ au_sync(); \ if (hw->rx) { \ *(u##size *)hw->rx = (u##size)fifoword; \ hw->rx += (size) / 8; \ } \ hw->rx_count += (size) / 8; \ } #define AU1550_SPI_TX_WORD(size, mask) \ static void au1550_spi_tx_word_##size(struct au1550_spi *hw) \ { \ u32 fifoword = 0; \ if (hw->tx) { \ fifoword = *(u##size *)hw->tx & (u32)(mask); \ hw->tx += (size) / 8; \ } \ hw->tx_count += (size) / 8; \ if (hw->tx_count >= hw->len) \ fifoword |= PSC_SPITXRX_LC; \ hw->regs->psc_spitxrx = fifoword; \ au_sync(); \ } AU1550_SPI_RX_WORD(8,0xff) AU1550_SPI_RX_WORD(16,0xffff) AU1550_SPI_RX_WORD(32,0xffffff) AU1550_SPI_TX_WORD(8,0xff) AU1550_SPI_TX_WORD(16,0xffff) AU1550_SPI_TX_WORD(32,0xffffff) static int au1550_spi_pio_txrxb(struct spi_device *spi, struct spi_transfer *t) { u32 stat, mask; struct au1550_spi *hw = spi_master_get_devdata(spi->master); hw->tx = t->tx_buf; hw->rx = t->rx_buf; hw->len = t->len; hw->tx_count = 0; hw->rx_count = 0; /* by default enable nearly all events after filling tx fifo */ mask = PSC_SPIMSK_SD; /* fill the transmit FIFO */ while (hw->tx_count < hw->len) { hw->tx_word(hw); if (hw->tx_count >= hw->len) { /* mask tx fifo request interrupt as we are done */ mask |= PSC_SPIMSK_TR; } stat = hw->regs->psc_spistat; au_sync(); if (stat & PSC_SPISTAT_TF) break; } /* enable event interrupts */ hw->regs->psc_spimsk = mask; au_sync(); /* start the transfer */ hw->regs->psc_spipcr = PSC_SPIPCR_MS; au_sync(); wait_for_completion(&hw->master_done); return hw->rx_count < hw->tx_count ? hw->rx_count : hw->tx_count; } static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw) { int busy; u32 stat, evnt; stat = hw->regs->psc_spistat; evnt = hw->regs->psc_spievent; au_sync(); if ((stat & PSC_SPISTAT_DI) == 0) { dev_err(hw->dev, "Unexpected IRQ!\n"); return IRQ_NONE; } if ((evnt & (PSC_SPIEVNT_MM | PSC_SPIEVNT_RO | PSC_SPIEVNT_RU | PSC_SPIEVNT_TO | PSC_SPIEVNT_SD)) != 0) { /* * due to an error we consider transfer as done, * so mask all events until before next transfer start */ au1550_spi_mask_ack_all(hw); au1550_spi_reset_fifos(hw); dev_err(hw->dev, "pio transfer: unexpected SPI error " "(event=0x%x stat=0x%x)!\n", evnt, stat); complete(&hw->master_done); return IRQ_HANDLED; } /* * while there is something to read from rx fifo * or there is a space to write to tx fifo: */ do { busy = 0; stat = hw->regs->psc_spistat; au_sync(); /* * Take care to not let the Rx FIFO overflow. * * We only write a byte if we have read one at least. Initially, * the write fifo is full, so we should read from the read fifo * first. * In case we miss a word from the read fifo, we should get a * RO event and should back out. */ if (!(stat & PSC_SPISTAT_RE) && hw->rx_count < hw->len) { hw->rx_word(hw); busy = 1; if (!(stat & PSC_SPISTAT_TF) && hw->tx_count < hw->len) hw->tx_word(hw); } } while (busy); hw->regs->psc_spievent = PSC_SPIEVNT_RR | PSC_SPIEVNT_TR; au_sync(); /* * Restart the SPI transmission in case of a transmit underflow. * This seems to work despite the notes in the Au1550 data book * of Figure 8-4 with flowchart for SPI master operation: * * """Note 1: An XFR Error Interrupt occurs, unless masked, * for any of the following events: Tx FIFO Underflow, * Rx FIFO Overflow, or Multiple-master Error * Note 2: In case of a Tx Underflow Error, all zeroes are * transmitted.""" * * By simply restarting the spi transfer on Tx Underflow Error, * we assume that spi transfer was paused instead of zeroes * transmittion mentioned in the Note 2 of Au1550 data book. */ if (evnt & PSC_SPIEVNT_TU) { hw->regs->psc_spievent = PSC_SPIEVNT_TU | PSC_SPIEVNT_MD; au_sync(); hw->regs->psc_spipcr = PSC_SPIPCR_MS; au_sync(); } if (hw->rx_count >= hw->len) { /* transfer completed successfully */ au1550_spi_mask_ack_all(hw); complete(&hw->master_done); } return IRQ_HANDLED; } static int au1550_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) { struct au1550_spi *hw = spi_master_get_devdata(spi->master); return hw->txrx_bufs(spi, t); } static irqreturn_t au1550_spi_irq(int irq, void *dev) { struct au1550_spi *hw = dev; return hw->irq_callback(hw); } static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw) { if (bpw <= 8) { if (hw->usedma) { hw->txrx_bufs = &au1550_spi_dma_txrxb; hw->irq_callback = &au1550_spi_dma_irq_callback; } else { hw->rx_word = &au1550_spi_rx_word_8; hw->tx_word = &au1550_spi_tx_word_8; hw->txrx_bufs = &au1550_spi_pio_txrxb; hw->irq_callback = &au1550_spi_pio_irq_callback; } } else if (bpw <= 16) { hw->rx_word = &au1550_spi_rx_word_16; hw->tx_word = &au1550_spi_tx_word_16; hw->txrx_bufs = &au1550_spi_pio_txrxb; hw->irq_callback = &au1550_spi_pio_irq_callback; } else { hw->rx_word = &au1550_spi_rx_word_32; hw->tx_word = &au1550_spi_tx_word_32; hw->txrx_bufs = &au1550_spi_pio_txrxb; hw->irq_callback = &au1550_spi_pio_irq_callback; } } static void au1550_spi_setup_psc_as_spi(struct au1550_spi *hw) { u32 stat, cfg; /* set up the PSC for SPI mode */ hw->regs->psc_ctrl = PSC_CTRL_DISABLE; au_sync(); hw->regs->psc_sel = PSC_SEL_PS_SPIMODE; au_sync(); hw->regs->psc_spicfg = 0; au_sync(); hw->regs->psc_ctrl = PSC_CTRL_ENABLE; au_sync(); do { stat = hw->regs->psc_spistat; au_sync(); } while ((stat & PSC_SPISTAT_SR) == 0); cfg = hw->usedma ? 0 : PSC_SPICFG_DD_DISABLE; cfg |= PSC_SPICFG_SET_LEN(8); cfg |= PSC_SPICFG_RT_FIFO8 | PSC_SPICFG_TT_FIFO8; /* use minimal allowed brg and div values as initial setting: */ cfg |= PSC_SPICFG_SET_BAUD(4) | PSC_SPICFG_SET_DIV(0); #ifdef AU1550_SPI_DEBUG_LOOPBACK cfg |= PSC_SPICFG_LB; #endif hw->regs->psc_spicfg = cfg; au_sync(); au1550_spi_mask_ack_all(hw); hw->regs->psc_spicfg |= PSC_SPICFG_DE_ENABLE; au_sync(); do { stat = hw->regs->psc_spistat; au_sync(); } while ((stat & PSC_SPISTAT_DR) == 0); au1550_spi_reset_fifos(hw); } static int au1550_spi_probe(struct platform_device *pdev) { struct au1550_spi *hw; struct spi_master *master; struct resource *r; int err = 0; master = spi_alloc_master(&pdev->dev, sizeof(struct au1550_spi)); if (master == NULL) { dev_err(&pdev->dev, "No memory for spi_master\n"); err = -ENOMEM; goto err_nomem; } /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST; hw = spi_master_get_devdata(master); hw->master = spi_master_get(master); hw->pdata = pdev->dev.platform_data; hw->dev = &pdev->dev; if (hw->pdata == NULL) { dev_err(&pdev->dev, "No platform data supplied\n"); err = -ENOENT; goto err_no_pdata; } r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!r) { dev_err(&pdev->dev, "no IRQ\n"); err = -ENODEV; goto err_no_iores; } hw->irq = r->start; hw->usedma = 0; r = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (r) { hw->dma_tx_id = r->start; r = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (r) { hw->dma_rx_id = r->start; if (usedma && ddma_memid) { if (pdev->dev.dma_mask == NULL) dev_warn(&pdev->dev, "no dma mask\n"); else hw->usedma = 1; } } } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { dev_err(&pdev->dev, "no mmio resource\n"); err = -ENODEV; goto err_no_iores; } hw->ioarea = request_mem_region(r->start, sizeof(psc_spi_t), pdev->name); if (!hw->ioarea) { dev_err(&pdev->dev, "Cannot reserve iomem region\n"); err = -ENXIO; goto err_no_iores; } hw->regs = (psc_spi_t __iomem *)ioremap(r->start, sizeof(psc_spi_t)); if (!hw->regs) { dev_err(&pdev->dev, "cannot ioremap\n"); err = -ENXIO; goto err_ioremap; } platform_set_drvdata(pdev, hw); init_completion(&hw->master_done); hw->bitbang.master = hw->master; hw->bitbang.setup_transfer = au1550_spi_setupxfer; hw->bitbang.chipselect = au1550_spi_chipsel; hw->bitbang.master->setup = au1550_spi_setup; hw->bitbang.txrx_bufs = au1550_spi_txrx_bufs; if (hw->usedma) { hw->dma_tx_ch = au1xxx_dbdma_chan_alloc(ddma_memid, hw->dma_tx_id, NULL, (void *)hw); if (hw->dma_tx_ch == 0) { dev_err(&pdev->dev, "Cannot allocate tx dma channel\n"); err = -ENXIO; goto err_no_txdma; } au1xxx_dbdma_set_devwidth(hw->dma_tx_ch, 8); if (au1xxx_dbdma_ring_alloc(hw->dma_tx_ch, AU1550_SPI_DBDMA_DESCRIPTORS) == 0) { dev_err(&pdev->dev, "Cannot allocate tx dma descriptors\n"); err = -ENXIO; goto err_no_txdma_descr; } hw->dma_rx_ch = au1xxx_dbdma_chan_alloc(hw->dma_rx_id, ddma_memid, NULL, (void *)hw); if (hw->dma_rx_ch == 0) { dev_err(&pdev->dev, "Cannot allocate rx dma channel\n"); err = -ENXIO; goto err_no_rxdma; } au1xxx_dbdma_set_devwidth(hw->dma_rx_ch, 8); if (au1xxx_dbdma_ring_alloc(hw->dma_rx_ch, AU1550_SPI_DBDMA_DESCRIPTORS) == 0) { dev_err(&pdev->dev, "Cannot allocate rx dma descriptors\n"); err = -ENXIO; goto err_no_rxdma_descr; } err = au1550_spi_dma_rxtmp_alloc(hw, AU1550_SPI_DMA_RXTMP_MINSIZE); if (err < 0) { dev_err(&pdev->dev, "Cannot allocate initial rx dma tmp buffer\n"); goto err_dma_rxtmp_alloc; } } au1550_spi_bits_handlers_set(hw, 8); err = request_irq(hw->irq, au1550_spi_irq, 0, pdev->name, hw); if (err) { dev_err(&pdev->dev, "Cannot claim IRQ\n"); goto err_no_irq; } master->bus_num = pdev->id; master->num_chipselect = hw->pdata->num_chipselect; /* * precompute valid range for spi freq - from au1550 datasheet: * psc_tempclk = psc_mainclk / (2 << DIV) * spiclk = psc_tempclk / (2 * (BRG + 1)) * BRG valid range is 4..63 * DIV valid range is 0..3 * round the min and max frequencies to values that would still * produce valid brg and div */ { int min_div = (2 << 0) * (2 * (4 + 1)); int max_div = (2 << 3) * (2 * (63 + 1)); hw->freq_max = hw->pdata->mainclk_hz / min_div; hw->freq_min = hw->pdata->mainclk_hz / (max_div + 1) + 1; } au1550_spi_setup_psc_as_spi(hw); err = spi_bitbang_start(&hw->bitbang); if (err) { dev_err(&pdev->dev, "Failed to register SPI master\n"); goto err_register; } dev_info(&pdev->dev, "spi master registered: bus_num=%d num_chipselect=%d\n", master->bus_num, master->num_chipselect); return 0; err_register: free_irq(hw->irq, hw); err_no_irq: au1550_spi_dma_rxtmp_free(hw); err_dma_rxtmp_alloc: err_no_rxdma_descr: if (hw->usedma) au1xxx_dbdma_chan_free(hw->dma_rx_ch); err_no_rxdma: err_no_txdma_descr: if (hw->usedma) au1xxx_dbdma_chan_free(hw->dma_tx_ch); err_no_txdma: iounmap((void __iomem *)hw->regs); err_ioremap: release_resource(hw->ioarea); kfree(hw->ioarea); err_no_iores: err_no_pdata: spi_master_put(hw->master); err_nomem: return err; } static int au1550_spi_remove(struct platform_device *pdev) { struct au1550_spi *hw = platform_get_drvdata(pdev); dev_info(&pdev->dev, "spi master remove: bus_num=%d\n", hw->master->bus_num); spi_bitbang_stop(&hw->bitbang); free_irq(hw->irq, hw); iounmap((void __iomem *)hw->regs); release_resource(hw->ioarea); kfree(hw->ioarea); if (hw->usedma) { au1550_spi_dma_rxtmp_free(hw); au1xxx_dbdma_chan_free(hw->dma_rx_ch); au1xxx_dbdma_chan_free(hw->dma_tx_ch); } platform_set_drvdata(pdev, NULL); spi_master_put(hw->master); return 0; } /* work with hotplug and coldplug */ MODULE_ALIAS("platform:au1550-spi"); static struct platform_driver au1550_spi_drv = { .remove = au1550_spi_remove, .driver = { .name = "au1550-spi", .owner = THIS_MODULE, }, }; static int __init au1550_spi_init(void) { /* * create memory device with 8 bits dev_devwidth * needed for proper byte ordering to spi fifo */ if (usedma) { ddma_memid = au1xxx_ddma_add_device(&au1550_spi_mem_dbdev); if (!ddma_memid) printk(KERN_ERR "au1550-spi: cannot add memory" "dbdma device\n"); } return platform_driver_probe(&au1550_spi_drv, au1550_spi_probe); } module_init(au1550_spi_init); static void __exit au1550_spi_exit(void) { if (usedma && ddma_memid) au1xxx_ddma_del_device(ddma_memid); platform_driver_unregister(&au1550_spi_drv); } module_exit(au1550_spi_exit); MODULE_DESCRIPTION("Au1550 PSC SPI Driver"); MODULE_AUTHOR("Jan Nikitenko <jan.nikitenko@gmail.com>"); MODULE_LICENSE("GPL");
gpl-2.0
jdheiner/SGH-T769_Kernel_ICS
drivers/staging/ath6kl/os/linux/cfg80211.c
2365
51526
//------------------------------------------------------------------------------ // Copyright (c) 2004-2010 Atheros Communications Inc. // All rights reserved. // // // // Permission to use, copy, modify, and/or distribute this software for any // purpose with or without fee is hereby granted, provided that the above // copyright notice and this permission notice appear in all copies. // // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. // // // // Author(s): ="Atheros" //------------------------------------------------------------------------------ #include <linux/wireless.h> #include <linux/ieee80211.h> #include <net/cfg80211.h> #include "ar6000_drv.h" extern A_WAITQUEUE_HEAD arEvent; extern unsigned int wmitimeout; extern int reconnect_flag; #define RATETAB_ENT(_rate, _rateid, _flags) { \ .bitrate = (_rate), \ .flags = (_flags), \ .hw_value = (_rateid), \ } #define CHAN2G(_channel, _freq, _flags) { \ .band = IEEE80211_BAND_2GHZ, \ .hw_value = (_channel), \ .center_freq = (_freq), \ .flags = (_flags), \ .max_antenna_gain = 0, \ .max_power = 30, \ } #define CHAN5G(_channel, _flags) { \ .band = IEEE80211_BAND_5GHZ, \ .hw_value = (_channel), \ .center_freq = 5000 + (5 * (_channel)), \ .flags = (_flags), \ .max_antenna_gain = 0, \ .max_power = 30, \ } static struct ieee80211_rate ar6k_rates[] = { RATETAB_ENT(10, 0x1, 0), RATETAB_ENT(20, 0x2, 0), RATETAB_ENT(55, 0x4, 0), RATETAB_ENT(110, 0x8, 0), RATETAB_ENT(60, 0x10, 0), RATETAB_ENT(90, 0x20, 0), RATETAB_ENT(120, 0x40, 0), RATETAB_ENT(180, 0x80, 0), RATETAB_ENT(240, 0x100, 0), RATETAB_ENT(360, 0x200, 0), RATETAB_ENT(480, 0x400, 0), RATETAB_ENT(540, 0x800, 0), }; #define ar6k_a_rates (ar6k_rates + 4) #define ar6k_a_rates_size 8 #define ar6k_g_rates (ar6k_rates + 0) #define ar6k_g_rates_size 12 static struct ieee80211_channel ar6k_2ghz_channels[] = { CHAN2G(1, 2412, 0), CHAN2G(2, 2417, 0), CHAN2G(3, 2422, 0), CHAN2G(4, 2427, 0), CHAN2G(5, 2432, 0), CHAN2G(6, 2437, 0), CHAN2G(7, 2442, 0), CHAN2G(8, 2447, 0), CHAN2G(9, 2452, 0), CHAN2G(10, 2457, 0), CHAN2G(11, 2462, 0), CHAN2G(12, 2467, 0), CHAN2G(13, 2472, 0), CHAN2G(14, 2484, 0), }; static struct ieee80211_channel ar6k_5ghz_a_channels[] = { CHAN5G(34, 0), CHAN5G(36, 0), CHAN5G(38, 0), CHAN5G(40, 0), CHAN5G(42, 0), CHAN5G(44, 0), CHAN5G(46, 0), CHAN5G(48, 0), CHAN5G(52, 0), CHAN5G(56, 0), CHAN5G(60, 0), CHAN5G(64, 0), CHAN5G(100, 0), CHAN5G(104, 0), CHAN5G(108, 0), CHAN5G(112, 0), CHAN5G(116, 0), CHAN5G(120, 0), CHAN5G(124, 0), CHAN5G(128, 0), CHAN5G(132, 0), CHAN5G(136, 0), CHAN5G(140, 0), CHAN5G(149, 0), CHAN5G(153, 0), CHAN5G(157, 0), CHAN5G(161, 0), CHAN5G(165, 0), CHAN5G(184, 0), CHAN5G(188, 0), CHAN5G(192, 0), CHAN5G(196, 0), CHAN5G(200, 0), CHAN5G(204, 0), CHAN5G(208, 0), CHAN5G(212, 0), CHAN5G(216, 0), }; static struct ieee80211_supported_band ar6k_band_2ghz = { .n_channels = ARRAY_SIZE(ar6k_2ghz_channels), .channels = ar6k_2ghz_channels, .n_bitrates = ar6k_g_rates_size, .bitrates = ar6k_g_rates, }; static struct ieee80211_supported_band ar6k_band_5ghz = { .n_channels = ARRAY_SIZE(ar6k_5ghz_a_channels), .channels = ar6k_5ghz_a_channels, .n_bitrates = ar6k_a_rates_size, .bitrates = ar6k_a_rates, }; static int ar6k_set_wpa_version(struct ar6_softc *ar, enum nl80211_wpa_versions wpa_version) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: %u\n", __func__, wpa_version)); if (!wpa_version) { ar->arAuthMode = NONE_AUTH; } else if (wpa_version & NL80211_WPA_VERSION_1) { ar->arAuthMode = WPA_AUTH; } else if (wpa_version & NL80211_WPA_VERSION_2) { ar->arAuthMode = WPA2_AUTH; } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: %u not spported\n", __func__, wpa_version)); return -ENOTSUPP; } return 0; } static int ar6k_set_auth_type(struct ar6_softc *ar, enum nl80211_auth_type auth_type) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: 0x%x\n", __func__, auth_type)); switch (auth_type) { case NL80211_AUTHTYPE_OPEN_SYSTEM: ar->arDot11AuthMode = OPEN_AUTH; break; case NL80211_AUTHTYPE_SHARED_KEY: ar->arDot11AuthMode = SHARED_AUTH; break; case NL80211_AUTHTYPE_NETWORK_EAP: ar->arDot11AuthMode = LEAP_AUTH; break; case NL80211_AUTHTYPE_AUTOMATIC: ar->arDot11AuthMode = OPEN_AUTH; ar->arAutoAuthStage = AUTH_OPEN_IN_PROGRESS; break; default: ar->arDot11AuthMode = OPEN_AUTH; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: 0x%x not spported\n", __func__, auth_type)); return -ENOTSUPP; } return 0; } static int ar6k_set_cipher(struct ar6_softc *ar, u32 cipher, bool ucast) { u8 *ar_cipher = ucast ? &ar->arPairwiseCrypto : &ar->arGroupCrypto; u8 *ar_cipher_len = ucast ? &ar->arPairwiseCryptoLen : &ar->arGroupCryptoLen; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: cipher 0x%x, ucast %u\n", __func__, cipher, ucast)); switch (cipher) { case 0: case IW_AUTH_CIPHER_NONE: *ar_cipher = NONE_CRYPT; *ar_cipher_len = 0; break; case WLAN_CIPHER_SUITE_WEP40: *ar_cipher = WEP_CRYPT; *ar_cipher_len = 5; break; case WLAN_CIPHER_SUITE_WEP104: *ar_cipher = WEP_CRYPT; *ar_cipher_len = 13; break; case WLAN_CIPHER_SUITE_TKIP: *ar_cipher = TKIP_CRYPT; *ar_cipher_len = 0; break; case WLAN_CIPHER_SUITE_CCMP: *ar_cipher = AES_CRYPT; *ar_cipher_len = 0; break; default: AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: cipher 0x%x not supported\n", __func__, cipher)); return -ENOTSUPP; } return 0; } static void ar6k_set_key_mgmt(struct ar6_softc *ar, u32 key_mgmt) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: 0x%x\n", __func__, key_mgmt)); if (WLAN_AKM_SUITE_PSK == key_mgmt) { if (WPA_AUTH == ar->arAuthMode) { ar->arAuthMode = WPA_PSK_AUTH; } else if (WPA2_AUTH == ar->arAuthMode) { ar->arAuthMode = WPA2_PSK_AUTH; } } else if (WLAN_AKM_SUITE_8021X != key_mgmt) { ar->arAuthMode = NONE_AUTH; } } static int ar6k_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_connect_params *sme) { struct ar6_softc *ar = ar6k_priv(dev); int status; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: \n", __func__)); ar->smeState = SME_CONNECTING; if(ar->arWmiReady == false) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready yet\n", __func__)); return -EIO; } if(ar->arWlanState == WLAN_DISABLED) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__)); return -EIO; } if(ar->bIsDestroyProgress) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: destroy in progress\n", __func__)); return -EBUSY; } if(!sme->ssid_len || IEEE80211_MAX_SSID_LEN < sme->ssid_len) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: ssid invalid\n", __func__)); return -EINVAL; } if(ar->arSkipScan == true && ((sme->channel && sme->channel->center_freq == 0) || (sme->bssid && !sme->bssid[0] && !sme->bssid[1] && !sme->bssid[2] && !sme->bssid[3] && !sme->bssid[4] && !sme->bssid[5]))) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s:SkipScan: channel or bssid invalid\n", __func__)); return -EINVAL; } if(down_interruptible(&ar->arSem)) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: busy, couldn't get access\n", __func__)); return -ERESTARTSYS; } if(ar->bIsDestroyProgress) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: busy, destroy in progress\n", __func__)); up(&ar->arSem); return -EBUSY; } if(ar->arTxPending[wmi_get_control_ep(ar->arWmi)]) { /* * sleep until the command queue drains */ wait_event_interruptible_timeout(arEvent, ar->arTxPending[wmi_get_control_ep(ar->arWmi)] == 0, wmitimeout * HZ); if (signal_pending(current)) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: cmd queue drain timeout\n", __func__)); up(&ar->arSem); return -EINTR; } } if(ar->arConnected == true && ar->arSsidLen == sme->ssid_len && !memcmp(ar->arSsid, sme->ssid, ar->arSsidLen)) { reconnect_flag = true; status = wmi_reconnect_cmd(ar->arWmi, ar->arReqBssid, ar->arChannelHint); up(&ar->arSem); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: wmi_reconnect_cmd failed\n", __func__)); return -EIO; } return 0; } else if(ar->arSsidLen == sme->ssid_len && !memcmp(ar->arSsid, sme->ssid, ar->arSsidLen)) { ar6000_disconnect(ar); } A_MEMZERO(ar->arSsid, sizeof(ar->arSsid)); ar->arSsidLen = sme->ssid_len; memcpy(ar->arSsid, sme->ssid, sme->ssid_len); if(sme->channel){ ar->arChannelHint = sme->channel->center_freq; } A_MEMZERO(ar->arReqBssid, sizeof(ar->arReqBssid)); if(sme->bssid){ if(memcmp(&sme->bssid, bcast_mac, AR6000_ETH_ADDR_LEN)) { memcpy(ar->arReqBssid, sme->bssid, sizeof(ar->arReqBssid)); } } ar6k_set_wpa_version(ar, sme->crypto.wpa_versions); ar6k_set_auth_type(ar, sme->auth_type); if(sme->crypto.n_ciphers_pairwise) { ar6k_set_cipher(ar, sme->crypto.ciphers_pairwise[0], true); } else { ar6k_set_cipher(ar, IW_AUTH_CIPHER_NONE, true); } ar6k_set_cipher(ar, sme->crypto.cipher_group, false); if(sme->crypto.n_akm_suites) { ar6k_set_key_mgmt(ar, sme->crypto.akm_suites[0]); } if((sme->key_len) && (NONE_AUTH == ar->arAuthMode) && (WEP_CRYPT == ar->arPairwiseCrypto)) { struct ar_key *key = NULL; if(sme->key_idx < WMI_MIN_KEY_INDEX || sme->key_idx > WMI_MAX_KEY_INDEX) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: key index %d out of bounds\n", __func__, sme->key_idx)); up(&ar->arSem); return -ENOENT; } key = &ar->keys[sme->key_idx]; key->key_len = sme->key_len; memcpy(key->key, sme->key, key->key_len); key->cipher = ar->arPairwiseCrypto; ar->arDefTxKeyIndex = sme->key_idx; wmi_addKey_cmd(ar->arWmi, sme->key_idx, ar->arPairwiseCrypto, GROUP_USAGE | TX_USAGE, key->key_len, NULL, key->key, KEY_OP_INIT_VAL, NULL, NO_SYNC_WMIFLAG); } if (!ar->arUserBssFilter) { if (wmi_bssfilter_cmd(ar->arWmi, ALL_BSS_FILTER, 0) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Couldn't set bss filtering\n", __func__)); up(&ar->arSem); return -EIO; } } ar->arNetworkType = ar->arNextMode; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: Connect called with authmode %d dot11 auth %d"\ " PW crypto %d PW crypto Len %d GRP crypto %d"\ " GRP crypto Len %d channel hint %u\n", __func__, ar->arAuthMode, ar->arDot11AuthMode, ar->arPairwiseCrypto, ar->arPairwiseCryptoLen, ar->arGroupCrypto, ar->arGroupCryptoLen, ar->arChannelHint)); reconnect_flag = 0; status = wmi_connect_cmd(ar->arWmi, ar->arNetworkType, ar->arDot11AuthMode, ar->arAuthMode, ar->arPairwiseCrypto, ar->arPairwiseCryptoLen, ar->arGroupCrypto,ar->arGroupCryptoLen, ar->arSsidLen, ar->arSsid, ar->arReqBssid, ar->arChannelHint, ar->arConnectCtrlFlags); up(&ar->arSem); if (A_EINVAL == status) { A_MEMZERO(ar->arSsid, sizeof(ar->arSsid)); ar->arSsidLen = 0; AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Invalid request\n", __func__)); return -ENOENT; } else if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: wmi_connect_cmd failed\n", __func__)); return -EIO; } if ((!(ar->arConnectCtrlFlags & CONNECT_DO_WPA_OFFLOAD)) && ((WPA_PSK_AUTH == ar->arAuthMode) || (WPA2_PSK_AUTH == ar->arAuthMode))) { A_TIMEOUT_MS(&ar->disconnect_timer, A_DISCONNECT_TIMER_INTERVAL, 0); } ar->arConnectCtrlFlags &= ~CONNECT_DO_WPA_OFFLOAD; ar->arConnectPending = true; return 0; } void ar6k_cfg80211_connect_event(struct ar6_softc *ar, u16 channel, u8 *bssid, u16 listenInterval, u16 beaconInterval,NETWORK_TYPE networkType, u8 beaconIeLen, u8 assocReqLen, u8 assocRespLen, u8 *assocInfo) { u16 size = 0; u16 capability = 0; struct cfg80211_bss *bss = NULL; struct ieee80211_mgmt *mgmt = NULL; struct ieee80211_channel *ibss_channel = NULL; s32 signal = 50 * 100; u8 ie_buf_len = 0; unsigned char ie_buf[256]; unsigned char *ptr_ie_buf = ie_buf; unsigned char *ieeemgmtbuf = NULL; u8 source_mac[ATH_MAC_LEN]; u8 assocReqIeOffset = sizeof(u16) + /* capinfo*/ sizeof(u16); /* listen interval */ u8 assocRespIeOffset = sizeof(u16) + /* capinfo*/ sizeof(u16) + /* status Code */ sizeof(u16); /* associd */ u8 *assocReqIe = assocInfo + beaconIeLen + assocReqIeOffset; u8 *assocRespIe = assocInfo + beaconIeLen + assocReqLen + assocRespIeOffset; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: \n", __func__)); assocReqLen -= assocReqIeOffset; assocRespLen -= assocRespIeOffset; ar->arAutoAuthStage = AUTH_IDLE; if((ADHOC_NETWORK & networkType)) { if(NL80211_IFTYPE_ADHOC != ar->wdev->iftype) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: ath6k not in ibss mode\n", __func__)); return; } } if((INFRA_NETWORK & networkType)) { if(NL80211_IFTYPE_STATION != ar->wdev->iftype) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: ath6k not in station mode\n", __func__)); return; } } /* Before informing the join/connect event, make sure that * bss entry is present in scan list, if it not present * construct and insert into scan list, otherwise that * event will be dropped on the way by cfg80211, due to * this keys will not be plumbed in case of WEP and * application will not be aware of join/connect status. */ bss = cfg80211_get_bss(ar->wdev->wiphy, NULL, bssid, ar->wdev->ssid, ar->wdev->ssid_len, ((ADHOC_NETWORK & networkType) ? WLAN_CAPABILITY_IBSS : WLAN_CAPABILITY_ESS), ((ADHOC_NETWORK & networkType) ? WLAN_CAPABILITY_IBSS : WLAN_CAPABILITY_ESS)); /* * Earlier we were updating the cfg about bss by making a beacon frame * only if the entry for bss is not there. This can have some issue if * ROAM event is generated and a heavy traffic is ongoing. The ROAM * event is handled through a work queue and by the time it really gets * handled, BSS would have been aged out. So it is better to update the * cfg about BSS irrespective of its entry being present right now or * not. */ if (ADHOC_NETWORK & networkType) { /* construct 802.11 mgmt beacon */ if(ptr_ie_buf) { *ptr_ie_buf++ = WLAN_EID_SSID; *ptr_ie_buf++ = ar->arSsidLen; memcpy(ptr_ie_buf, ar->arSsid, ar->arSsidLen); ptr_ie_buf +=ar->arSsidLen; *ptr_ie_buf++ = WLAN_EID_IBSS_PARAMS; *ptr_ie_buf++ = 2; /* length */ *ptr_ie_buf++ = 0; /* ATIM window */ *ptr_ie_buf++ = 0; /* ATIM window */ /* TODO: update ibss params and include supported rates, * DS param set, extened support rates, wmm. */ ie_buf_len = ptr_ie_buf - ie_buf; } capability |= IEEE80211_CAPINFO_IBSS; if(WEP_CRYPT == ar->arPairwiseCrypto) { capability |= IEEE80211_CAPINFO_PRIVACY; } memcpy(source_mac, ar->arNetDev->dev_addr, ATH_MAC_LEN); ptr_ie_buf = ie_buf; } else { capability = *(u16 *)(&assocInfo[beaconIeLen]); memcpy(source_mac, bssid, ATH_MAC_LEN); ptr_ie_buf = assocReqIe; ie_buf_len = assocReqLen; } size = offsetof(struct ieee80211_mgmt, u) + sizeof(mgmt->u.beacon) + ie_buf_len; ieeemgmtbuf = A_MALLOC_NOWAIT(size); if(!ieeemgmtbuf) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: ieeeMgmtbuf alloc error\n", __func__)); cfg80211_put_bss(bss); return; } A_MEMZERO(ieeemgmtbuf, size); mgmt = (struct ieee80211_mgmt *)ieeemgmtbuf; mgmt->frame_control = (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); memcpy(mgmt->da, bcast_mac, ATH_MAC_LEN); memcpy(mgmt->sa, source_mac, ATH_MAC_LEN); memcpy(mgmt->bssid, bssid, ATH_MAC_LEN); mgmt->u.beacon.beacon_int = beaconInterval; mgmt->u.beacon.capab_info = capability; memcpy(mgmt->u.beacon.variable, ptr_ie_buf, ie_buf_len); ibss_channel = ieee80211_get_channel(ar->wdev->wiphy, (int)channel); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: inform bss with bssid %pM channel %d beaconInterval %d " "capability 0x%x\n", __func__, mgmt->bssid, ibss_channel->hw_value, beaconInterval, capability)); bss = cfg80211_inform_bss_frame(ar->wdev->wiphy, ibss_channel, mgmt, le16_to_cpu(size), signal, GFP_KERNEL); kfree(ieeemgmtbuf); cfg80211_put_bss(bss); if((ADHOC_NETWORK & networkType)) { cfg80211_ibss_joined(ar->arNetDev, bssid, GFP_KERNEL); return; } if (false == ar->arConnected) { /* inform connect result to cfg80211 */ ar->smeState = SME_DISCONNECTED; cfg80211_connect_result(ar->arNetDev, bssid, assocReqIe, assocReqLen, assocRespIe, assocRespLen, WLAN_STATUS_SUCCESS, GFP_KERNEL); } else { /* inform roam event to cfg80211 */ cfg80211_roamed(ar->arNetDev, ibss_channel, bssid, assocReqIe, assocReqLen, assocRespIe, assocRespLen, GFP_KERNEL); } } static int ar6k_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, u16 reason_code) { struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: reason=%u\n", __func__, reason_code)); if(ar->arWmiReady == false) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__)); return -EIO; } if(ar->arWlanState == WLAN_DISABLED) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__)); return -EIO; } if(ar->bIsDestroyProgress) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: busy, destroy in progress\n", __func__)); return -EBUSY; } if(down_interruptible(&ar->arSem)) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: busy, couldn't get access\n", __func__)); return -ERESTARTSYS; } reconnect_flag = 0; ar6000_disconnect(ar); A_MEMZERO(ar->arSsid, sizeof(ar->arSsid)); ar->arSsidLen = 0; if (ar->arSkipScan == false) { A_MEMZERO(ar->arReqBssid, sizeof(ar->arReqBssid)); } up(&ar->arSem); return 0; } void ar6k_cfg80211_disconnect_event(struct ar6_softc *ar, u8 reason, u8 *bssid, u8 assocRespLen, u8 *assocInfo, u16 protocolReasonStatus) { u16 status; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: reason=%u\n", __func__, reason)); if (ar->scan_request) { cfg80211_scan_done(ar->scan_request, true); ar->scan_request = NULL; } if((ADHOC_NETWORK & ar->arNetworkType)) { if(NL80211_IFTYPE_ADHOC != ar->wdev->iftype) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: ath6k not in ibss mode\n", __func__)); return; } A_MEMZERO(bssid, ETH_ALEN); cfg80211_ibss_joined(ar->arNetDev, bssid, GFP_KERNEL); return; } if((INFRA_NETWORK & ar->arNetworkType)) { if(NL80211_IFTYPE_STATION != ar->wdev->iftype) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: ath6k not in station mode\n", __func__)); return; } } if(true == ar->arConnectPending) { if(NO_NETWORK_AVAIL == reason) { /* connect cmd failed */ wmi_disconnect_cmd(ar->arWmi); } else if (reason == DISCONNECT_CMD) { if (ar->arAutoAuthStage) { /* * If the current auth algorithm is open try shared * and make autoAuthStage idle. We do not make it * leap for now being. */ if (ar->arDot11AuthMode == OPEN_AUTH) { struct ar_key *key = NULL; key = &ar->keys[ar->arDefTxKeyIndex]; if (down_interruptible(&ar->arSem)) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: busy, couldn't get access\n", __func__)); return; } ar->arDot11AuthMode = SHARED_AUTH; ar->arAutoAuthStage = AUTH_IDLE; wmi_addKey_cmd(ar->arWmi, ar->arDefTxKeyIndex, ar->arPairwiseCrypto, GROUP_USAGE | TX_USAGE, key->key_len, NULL, key->key, KEY_OP_INIT_VAL, NULL, NO_SYNC_WMIFLAG); status = wmi_connect_cmd(ar->arWmi, ar->arNetworkType, ar->arDot11AuthMode, ar->arAuthMode, ar->arPairwiseCrypto, ar->arPairwiseCryptoLen, ar->arGroupCrypto, ar->arGroupCryptoLen, ar->arSsidLen, ar->arSsid, ar->arReqBssid, ar->arChannelHint, ar->arConnectCtrlFlags); up(&ar->arSem); } else if (ar->arDot11AuthMode == SHARED_AUTH) { /* should not reach here */ } } else { ar->arConnectPending = false; if (ar->smeState == SME_CONNECTING) { cfg80211_connect_result(ar->arNetDev, bssid, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL); } else { cfg80211_disconnected(ar->arNetDev, reason, NULL, 0, GFP_KERNEL); } ar->smeState = SME_DISCONNECTED; } } } else { if (reason != DISCONNECT_CMD) wmi_disconnect_cmd(ar->arWmi); } } void ar6k_cfg80211_scan_node(void *arg, bss_t *ni) { struct wiphy *wiphy = (struct wiphy *)arg; u16 size; unsigned char *ieeemgmtbuf = NULL; struct ieee80211_mgmt *mgmt; struct ieee80211_channel *channel; struct ieee80211_supported_band *band; struct ieee80211_common_ie *cie; s32 signal; int freq; cie = &ni->ni_cie; #define CHAN_IS_11A(x) (!((x >= 2412) && (x <= 2484))) if(CHAN_IS_11A(cie->ie_chan)) { /* 11a */ band = wiphy->bands[IEEE80211_BAND_5GHZ]; } else if((cie->ie_erp) || (cie->ie_xrates)) { /* 11g */ band = wiphy->bands[IEEE80211_BAND_2GHZ]; } else { /* 11b */ band = wiphy->bands[IEEE80211_BAND_2GHZ]; } size = ni->ni_framelen + offsetof(struct ieee80211_mgmt, u); ieeemgmtbuf = A_MALLOC_NOWAIT(size); if(!ieeemgmtbuf) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: ieeeMgmtbuf alloc error\n", __func__)); return; } /* Note: TODO: Update target to include 802.11 mac header while sending bss info. Target removes 802.11 mac header while sending the bss info to host, cfg80211 needs it, for time being just filling the da, sa and bssid fields alone. */ mgmt = (struct ieee80211_mgmt *)ieeemgmtbuf; memcpy(mgmt->da, bcast_mac, ATH_MAC_LEN); memcpy(mgmt->sa, ni->ni_macaddr, ATH_MAC_LEN); memcpy(mgmt->bssid, ni->ni_macaddr, ATH_MAC_LEN); memcpy(ieeemgmtbuf + offsetof(struct ieee80211_mgmt, u), ni->ni_buf, ni->ni_framelen); freq = cie->ie_chan; channel = ieee80211_get_channel(wiphy, freq); signal = ni->ni_snr * 100; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: bssid %pM channel %d freq %d size %d\n", __func__, mgmt->bssid, channel->hw_value, freq, size)); cfg80211_inform_bss_frame(wiphy, channel, mgmt, le16_to_cpu(size), signal, GFP_KERNEL); kfree (ieeemgmtbuf); } static int ar6k_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_scan_request *request) { struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(ndev); int ret = 0; u32 forceFgScan = 0; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: \n", __func__)); if(ar->arWmiReady == false) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__)); return -EIO; } if(ar->arWlanState == WLAN_DISABLED) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__)); return -EIO; } if (!ar->arUserBssFilter) { if (wmi_bssfilter_cmd(ar->arWmi, (ar->arConnected ? ALL_BUT_BSS_FILTER : ALL_BSS_FILTER), 0) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Couldn't set bss filtering\n", __func__)); return -EIO; } } if(request->n_ssids && request->ssids[0].ssid_len) { u8 i; if(request->n_ssids > (MAX_PROBED_SSID_INDEX - 1)) { request->n_ssids = MAX_PROBED_SSID_INDEX - 1; } for (i = 0; i < request->n_ssids; i++) { wmi_probedSsid_cmd(ar->arWmi, i+1, SPECIFIC_SSID_FLAG, request->ssids[i].ssid_len, request->ssids[i].ssid); } } if(ar->arConnected) { forceFgScan = 1; } if(wmi_startscan_cmd(ar->arWmi, WMI_LONG_SCAN, forceFgScan, false, \ 0, 0, 0, NULL) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: wmi_startscan_cmd failed\n", __func__)); ret = -EIO; } ar->scan_request = request; return ret; } void ar6k_cfg80211_scanComplete_event(struct ar6_softc *ar, int status) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: status %d\n", __func__, status)); if (!ar->scan_request) return; if ((status == A_ECANCELED) || (status == A_EBUSY)) { cfg80211_scan_done(ar->scan_request, true); goto out; } /* Translate data to cfg80211 mgmt format */ wmi_iterate_nodes(ar->arWmi, ar6k_cfg80211_scan_node, ar->wdev->wiphy); cfg80211_scan_done(ar->scan_request, false); if(ar->scan_request->n_ssids && ar->scan_request->ssids[0].ssid_len) { u8 i; for (i = 0; i < ar->scan_request->n_ssids; i++) { wmi_probedSsid_cmd(ar->arWmi, i+1, DISABLE_SSID_FLAG, 0, NULL); } } out: ar->scan_request = NULL; } static int ar6k_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_index, bool pairwise, const u8 *mac_addr, struct key_params *params) { struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(ndev); struct ar_key *key = NULL; u8 key_usage; u8 key_type; int status = 0; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s:\n", __func__)); if(ar->arWmiReady == false) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__)); return -EIO; } if(ar->arWlanState == WLAN_DISABLED) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__)); return -EIO; } if(key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: key index %d out of bounds\n", __func__, key_index)); return -ENOENT; } key = &ar->keys[key_index]; A_MEMZERO(key, sizeof(struct ar_key)); if(!mac_addr || is_broadcast_ether_addr(mac_addr)) { key_usage = GROUP_USAGE; } else { key_usage = PAIRWISE_USAGE; } if(params) { if(params->key_len > WLAN_MAX_KEY_LEN || params->seq_len > IW_ENCODE_SEQ_MAX_SIZE) return -EINVAL; key->key_len = params->key_len; memcpy(key->key, params->key, key->key_len); key->seq_len = params->seq_len; memcpy(key->seq, params->seq, key->seq_len); key->cipher = params->cipher; } switch (key->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: key_type = WEP_CRYPT; break; case WLAN_CIPHER_SUITE_TKIP: key_type = TKIP_CRYPT; break; case WLAN_CIPHER_SUITE_CCMP: key_type = AES_CRYPT; break; default: return -ENOTSUPP; } if (((WPA_PSK_AUTH == ar->arAuthMode) || (WPA2_PSK_AUTH == ar->arAuthMode)) && (GROUP_USAGE & key_usage)) { A_UNTIMEOUT(&ar->disconnect_timer); } AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: index %d, key_len %d, key_type 0x%x,"\ " key_usage 0x%x, seq_len %d\n", __func__, key_index, key->key_len, key_type, key_usage, key->seq_len)); ar->arDefTxKeyIndex = key_index; status = wmi_addKey_cmd(ar->arWmi, ar->arDefTxKeyIndex, key_type, key_usage, key->key_len, key->seq, key->key, KEY_OP_INIT_VAL, (u8 *)mac_addr, SYNC_BOTH_WMIFLAG); if (status) { return -EIO; } return 0; } static int ar6k_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_index, bool pairwise, const u8 *mac_addr) { struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(ndev); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: index %d\n", __func__, key_index)); if(ar->arWmiReady == false) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__)); return -EIO; } if(ar->arWlanState == WLAN_DISABLED) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__)); return -EIO; } if(key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: key index %d out of bounds\n", __func__, key_index)); return -ENOENT; } if(!ar->keys[key_index].key_len) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: index %d is empty\n", __func__, key_index)); return 0; } ar->keys[key_index].key_len = 0; return wmi_deleteKey_cmd(ar->arWmi, key_index); } static int ar6k_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_index, bool pairwise, const u8 *mac_addr, void *cookie, void (*callback)(void *cookie, struct key_params*)) { struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(ndev); struct ar_key *key = NULL; struct key_params params; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: index %d\n", __func__, key_index)); if(ar->arWmiReady == false) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__)); return -EIO; } if(ar->arWlanState == WLAN_DISABLED) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__)); return -EIO; } if(key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: key index %d out of bounds\n", __func__, key_index)); return -ENOENT; } key = &ar->keys[key_index]; A_MEMZERO(&params, sizeof(params)); params.cipher = key->cipher; params.key_len = key->key_len; params.seq_len = key->seq_len; params.seq = key->seq; params.key = key->key; callback(cookie, &params); return key->key_len ? 0 : -ENOENT; } static int ar6k_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_index, bool unicast, bool multicast) { struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(ndev); struct ar_key *key = NULL; int status = 0; u8 key_usage; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: index %d\n", __func__, key_index)); if(ar->arWmiReady == false) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__)); return -EIO; } if(ar->arWlanState == WLAN_DISABLED) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__)); return -EIO; } if(key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: key index %d out of bounds\n", __func__, key_index)); return -ENOENT; } if(!ar->keys[key_index].key_len) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: invalid key index %d\n", __func__, key_index)); return -EINVAL; } ar->arDefTxKeyIndex = key_index; key = &ar->keys[ar->arDefTxKeyIndex]; key_usage = GROUP_USAGE; if (WEP_CRYPT == ar->arPairwiseCrypto) { key_usage |= TX_USAGE; } status = wmi_addKey_cmd(ar->arWmi, ar->arDefTxKeyIndex, ar->arPairwiseCrypto, key_usage, key->key_len, key->seq, key->key, KEY_OP_INIT_VAL, NULL, SYNC_BOTH_WMIFLAG); if (status) { return -EIO; } return 0; } static int ar6k_cfg80211_set_default_mgmt_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_index) { struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(ndev); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: index %d\n", __func__, key_index)); if(ar->arWmiReady == false) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__)); return -EIO; } if(ar->arWlanState == WLAN_DISABLED) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__)); return -EIO; } AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: not supported\n", __func__)); return -ENOTSUPP; } void ar6k_cfg80211_tkip_micerr_event(struct ar6_softc *ar, u8 keyid, bool ismcast) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: keyid %d, ismcast %d\n", __func__, keyid, ismcast)); cfg80211_michael_mic_failure(ar->arNetDev, ar->arBssid, (ismcast ? NL80211_KEYTYPE_GROUP : NL80211_KEYTYPE_PAIRWISE), keyid, NULL, GFP_KERNEL); } static int ar6k_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) { struct ar6_softc *ar = (struct ar6_softc *)wiphy_priv(wiphy); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: changed 0x%x\n", __func__, changed)); if(ar->arWmiReady == false) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__)); return -EIO; } if(ar->arWlanState == WLAN_DISABLED) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__)); return -EIO; } if (changed & WIPHY_PARAM_RTS_THRESHOLD) { if (wmi_set_rts_cmd(ar->arWmi,wiphy->rts_threshold) != 0){ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: wmi_set_rts_cmd failed\n", __func__)); return -EIO; } } return 0; } static int ar6k_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, const struct cfg80211_bitrate_mask *mask) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Setting rates: Not supported\n")); return -EIO; } /* The type nl80211_tx_power_setting replaces the following data type from 2.6.36 onwards */ static int ar6k_cfg80211_set_txpower(struct wiphy *wiphy, enum nl80211_tx_power_setting type, int dbm) { struct ar6_softc *ar = (struct ar6_softc *)wiphy_priv(wiphy); u8 ar_dbm; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: type 0x%x, dbm %d\n", __func__, type, dbm)); if(ar->arWmiReady == false) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__)); return -EIO; } if(ar->arWlanState == WLAN_DISABLED) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__)); return -EIO; } ar->arTxPwrSet = false; switch(type) { case NL80211_TX_POWER_AUTOMATIC: return 0; case NL80211_TX_POWER_LIMITED: ar->arTxPwr = ar_dbm = dbm; ar->arTxPwrSet = true; break; default: AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: type 0x%x not supported\n", __func__, type)); return -EOPNOTSUPP; } wmi_set_txPwr_cmd(ar->arWmi, ar_dbm); return 0; } static int ar6k_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm) { struct ar6_softc *ar = (struct ar6_softc *)wiphy_priv(wiphy); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: \n", __func__)); if(ar->arWmiReady == false) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__)); return -EIO; } if(ar->arWlanState == WLAN_DISABLED) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__)); return -EIO; } if((ar->arConnected == true)) { ar->arTxPwr = 0; if(wmi_get_txPwr_cmd(ar->arWmi) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: wmi_get_txPwr_cmd failed\n", __func__)); return -EIO; } wait_event_interruptible_timeout(arEvent, ar->arTxPwr != 0, 5 * HZ); if(signal_pending(current)) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Target did not respond\n", __func__)); return -EINTR; } } *dbm = ar->arTxPwr; return 0; } static int ar6k_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, bool pmgmt, int timeout) { struct ar6_softc *ar = ar6k_priv(dev); WMI_POWER_MODE_CMD pwrMode; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: pmgmt %d, timeout %d\n", __func__, pmgmt, timeout)); if(ar->arWmiReady == false) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__)); return -EIO; } if(ar->arWlanState == WLAN_DISABLED) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__)); return -EIO; } if(pmgmt) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: Max Perf\n", __func__)); pwrMode.powerMode = REC_POWER; } else { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: Rec Power\n", __func__)); pwrMode.powerMode = MAX_PERF_POWER; } if(wmi_powermode_cmd(ar->arWmi, pwrMode.powerMode) != 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: wmi_powermode_cmd failed\n", __func__)); return -EIO; } return 0; } static struct net_device * ar6k_cfg80211_add_virtual_intf(struct wiphy *wiphy, char *name, enum nl80211_iftype type, u32 *flags, struct vif_params *params) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: not supported\n", __func__)); /* Multiple virtual interface is not supported. * The default interface supports STA and IBSS type */ return ERR_PTR(-EOPNOTSUPP); } static int ar6k_cfg80211_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev) { AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: not supported\n", __func__)); /* Multiple virtual interface is not supported. * The default interface supports STA and IBSS type */ return -EOPNOTSUPP; } static int ar6k_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev, enum nl80211_iftype type, u32 *flags, struct vif_params *params) { struct ar6_softc *ar = ar6k_priv(ndev); struct wireless_dev *wdev = ar->wdev; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: type %u\n", __func__, type)); if(ar->arWmiReady == false) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__)); return -EIO; } if(ar->arWlanState == WLAN_DISABLED) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__)); return -EIO; } switch (type) { case NL80211_IFTYPE_STATION: ar->arNextMode = INFRA_NETWORK; break; case NL80211_IFTYPE_ADHOC: ar->arNextMode = ADHOC_NETWORK; break; default: AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: type %u\n", __func__, type)); return -EOPNOTSUPP; } wdev->iftype = type; return 0; } static int ar6k_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ibss_params *ibss_param) { struct ar6_softc *ar = ar6k_priv(dev); int status; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: \n", __func__)); if(ar->arWmiReady == false) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__)); return -EIO; } if(ar->arWlanState == WLAN_DISABLED) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__)); return -EIO; } if(!ibss_param->ssid_len || IEEE80211_MAX_SSID_LEN < ibss_param->ssid_len) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: ssid invalid\n", __func__)); return -EINVAL; } ar->arSsidLen = ibss_param->ssid_len; memcpy(ar->arSsid, ibss_param->ssid, ar->arSsidLen); if(ibss_param->channel) { ar->arChannelHint = ibss_param->channel->center_freq; } if(ibss_param->channel_fixed) { /* TODO: channel_fixed: The channel should be fixed, do not search for * IBSSs to join on other channels. Target firmware does not support this * feature, needs to be updated.*/ } A_MEMZERO(ar->arReqBssid, sizeof(ar->arReqBssid)); if(ibss_param->bssid) { if(memcmp(&ibss_param->bssid, bcast_mac, AR6000_ETH_ADDR_LEN)) { memcpy(ar->arReqBssid, ibss_param->bssid, sizeof(ar->arReqBssid)); } } ar6k_set_wpa_version(ar, 0); ar6k_set_auth_type(ar, NL80211_AUTHTYPE_OPEN_SYSTEM); if(ibss_param->privacy) { ar6k_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, true); ar6k_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, false); } else { ar6k_set_cipher(ar, IW_AUTH_CIPHER_NONE, true); ar6k_set_cipher(ar, IW_AUTH_CIPHER_NONE, false); } ar->arNetworkType = ar->arNextMode; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: Connect called with authmode %d dot11 auth %d"\ " PW crypto %d PW crypto Len %d GRP crypto %d"\ " GRP crypto Len %d channel hint %u\n", __func__, ar->arAuthMode, ar->arDot11AuthMode, ar->arPairwiseCrypto, ar->arPairwiseCryptoLen, ar->arGroupCrypto, ar->arGroupCryptoLen, ar->arChannelHint)); status = wmi_connect_cmd(ar->arWmi, ar->arNetworkType, ar->arDot11AuthMode, ar->arAuthMode, ar->arPairwiseCrypto, ar->arPairwiseCryptoLen, ar->arGroupCrypto,ar->arGroupCryptoLen, ar->arSsidLen, ar->arSsid, ar->arReqBssid, ar->arChannelHint, ar->arConnectCtrlFlags); ar->arConnectPending = true; return 0; } static int ar6k_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) { struct ar6_softc *ar = (struct ar6_softc *)ar6k_priv(dev); AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: \n", __func__)); if(ar->arWmiReady == false) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wmi not ready\n", __func__)); return -EIO; } if(ar->arWlanState == WLAN_DISABLED) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Wlan disabled\n", __func__)); return -EIO; } ar6000_disconnect(ar); A_MEMZERO(ar->arSsid, sizeof(ar->arSsid)); ar->arSsidLen = 0; return 0; } static const u32 cipher_suites[] = { WLAN_CIPHER_SUITE_WEP40, WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, }; bool is_rate_legacy(s32 rate) { static const s32 legacy[] = { 1000, 2000, 5500, 11000, 6000, 9000, 12000, 18000, 24000, 36000, 48000, 54000 }; u8 i; for (i = 0; i < ARRAY_SIZE(legacy); i++) { if (rate == legacy[i]) return true; } return false; } bool is_rate_ht20(s32 rate, u8 *mcs, bool *sgi) { static const s32 ht20[] = { 6500, 13000, 19500, 26000, 39000, 52000, 58500, 65000, 72200 }; u8 i; for (i = 0; i < ARRAY_SIZE(ht20); i++) { if (rate == ht20[i]) { if (i == ARRAY_SIZE(ht20) - 1) /* last rate uses sgi */ *sgi = true; else *sgi = false; *mcs = i; return true; } } return false; } bool is_rate_ht40(s32 rate, u8 *mcs, bool *sgi) { static const s32 ht40[] = { 13500, 27000, 40500, 54000, 81000, 108000, 121500, 135000, 150000 }; u8 i; for (i = 0; i < ARRAY_SIZE(ht40); i++) { if (rate == ht40[i]) { if (i == ARRAY_SIZE(ht40) - 1) /* last rate uses sgi */ *sgi = true; else *sgi = false; *mcs = i; return true; } } return false; } static int ar6k_get_station(struct wiphy *wiphy, struct net_device *dev, u8 *mac, struct station_info *sinfo) { struct ar6_softc *ar = ar6k_priv(dev); long left; bool sgi; s32 rate; int ret; u8 mcs; if (memcmp(mac, ar->arBssid, ETH_ALEN) != 0) return -ENOENT; if (down_interruptible(&ar->arSem)) return -EBUSY; ar->statsUpdatePending = true; ret = wmi_get_stats_cmd(ar->arWmi); if (ret != 0) { up(&ar->arSem); return -EIO; } left = wait_event_interruptible_timeout(arEvent, ar->statsUpdatePending == false, wmitimeout * HZ); up(&ar->arSem); if (left == 0) return -ETIMEDOUT; else if (left < 0) return left; if (ar->arTargetStats.rx_bytes) { sinfo->rx_bytes = ar->arTargetStats.rx_bytes; sinfo->filled |= STATION_INFO_RX_BYTES; sinfo->rx_packets = ar->arTargetStats.rx_packets; sinfo->filled |= STATION_INFO_RX_PACKETS; } if (ar->arTargetStats.tx_bytes) { sinfo->tx_bytes = ar->arTargetStats.tx_bytes; sinfo->filled |= STATION_INFO_TX_BYTES; sinfo->tx_packets = ar->arTargetStats.tx_packets; sinfo->filled |= STATION_INFO_TX_PACKETS; } sinfo->signal = ar->arTargetStats.cs_rssi; sinfo->filled |= STATION_INFO_SIGNAL; rate = ar->arTargetStats.tx_unicast_rate; if (is_rate_legacy(rate)) { sinfo->txrate.legacy = rate / 100; } else if (is_rate_ht20(rate, &mcs, &sgi)) { if (sgi) { sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; sinfo->txrate.mcs = mcs - 1; } else { sinfo->txrate.mcs = mcs; } sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS; } else if (is_rate_ht40(rate, &mcs, &sgi)) { if (sgi) { sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; sinfo->txrate.mcs = mcs - 1; } else { sinfo->txrate.mcs = mcs; } sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS; } else { WARN(1, "invalid rate: %d", rate); return 0; } sinfo->filled |= STATION_INFO_TX_BITRATE; return 0; } static struct cfg80211_ops ar6k_cfg80211_ops = { .change_virtual_intf = ar6k_cfg80211_change_iface, .add_virtual_intf = ar6k_cfg80211_add_virtual_intf, .del_virtual_intf = ar6k_cfg80211_del_virtual_intf, .scan = ar6k_cfg80211_scan, .connect = ar6k_cfg80211_connect, .disconnect = ar6k_cfg80211_disconnect, .add_key = ar6k_cfg80211_add_key, .get_key = ar6k_cfg80211_get_key, .del_key = ar6k_cfg80211_del_key, .set_default_key = ar6k_cfg80211_set_default_key, .set_default_mgmt_key = ar6k_cfg80211_set_default_mgmt_key, .set_wiphy_params = ar6k_cfg80211_set_wiphy_params, .set_bitrate_mask = ar6k_cfg80211_set_bitrate_mask, .set_tx_power = ar6k_cfg80211_set_txpower, .get_tx_power = ar6k_cfg80211_get_txpower, .set_power_mgmt = ar6k_cfg80211_set_power_mgmt, .join_ibss = ar6k_cfg80211_join_ibss, .leave_ibss = ar6k_cfg80211_leave_ibss, .get_station = ar6k_get_station, }; struct wireless_dev * ar6k_cfg80211_init(struct device *dev) { int ret = 0; struct wireless_dev *wdev; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: \n", __func__)); wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); if(!wdev) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Couldn't allocate wireless device\n", __func__)); return ERR_PTR(-ENOMEM); } /* create a new wiphy for use with cfg80211 */ wdev->wiphy = wiphy_new(&ar6k_cfg80211_ops, sizeof(struct ar6_softc)); if(!wdev->wiphy) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Couldn't allocate wiphy device\n", __func__)); kfree(wdev); return ERR_PTR(-ENOMEM); } /* set device pointer for wiphy */ set_wiphy_dev(wdev->wiphy, dev); wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); /* max num of ssids that can be probed during scanning */ wdev->wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX; wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar6k_band_2ghz; wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &ar6k_band_5ghz; wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; wdev->wiphy->cipher_suites = cipher_suites; wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); ret = wiphy_register(wdev->wiphy); if(ret < 0) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Couldn't register wiphy device\n", __func__)); wiphy_free(wdev->wiphy); return ERR_PTR(ret); } return wdev; } void ar6k_cfg80211_deinit(struct ar6_softc *ar) { struct wireless_dev *wdev = ar->wdev; AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: \n", __func__)); if(ar->scan_request) { cfg80211_scan_done(ar->scan_request, true); ar->scan_request = NULL; } if(!wdev) return; wiphy_unregister(wdev->wiphy); wiphy_free(wdev->wiphy); kfree(wdev); }
gpl-2.0
webore/lenovo
net/atm/lec.c
2365
64855
/* * lec.c: Lan Emulation driver * * Marko Kiiskila <mkiiskila@yahoo.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/bitops.h> #include <linux/capability.h> /* We are ethernet device */ #include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <net/sock.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <asm/byteorder.h> #include <linux/uaccess.h> #include <net/arp.h> #include <net/dst.h> #include <linux/proc_fs.h> #include <linux/spinlock.h> #include <linux/seq_file.h> /* TokenRing if needed */ #ifdef CONFIG_TR #include <linux/trdevice.h> #endif /* And atm device */ #include <linux/atmdev.h> #include <linux/atmlec.h> /* Proxy LEC knows about bridging */ #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) #include "../bridge/br_private.h" static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 }; #endif /* Modular too */ #include <linux/module.h> #include <linux/init.h> #include "lec.h" #include "lec_arpc.h" #include "resources.h" #define DUMP_PACKETS 0 /* * 0 = None, * 1 = 30 first bytes * 2 = Whole packet */ #define LEC_UNRES_QUE_LEN 8 /* * number of tx packets to queue for a * single destination while waiting for SVC */ static int lec_open(struct net_device *dev); static netdev_tx_t lec_start_xmit(struct sk_buff *skb, struct net_device *dev); static int lec_close(struct net_device *dev); static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, const unsigned char *mac_addr); static int lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove); /* LANE2 functions */ static void lane2_associate_ind(struct net_device *dev, const u8 *mac_address, const u8 *tlvs, u32 sizeoftlvs); static int lane2_resolve(struct net_device *dev, const u8 *dst_mac, int force, u8 **tlvs, u32 *sizeoftlvs); static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst, const u8 *tlvs, u32 sizeoftlvs); static int lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr, unsigned long permanent); static void lec_arp_check_empties(struct lec_priv *priv, struct atm_vcc *vcc, struct sk_buff *skb); static void lec_arp_destroy(struct lec_priv *priv); static void lec_arp_init(struct lec_priv *priv); static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv, const unsigned char *mac_to_find, int is_rdesc, struct lec_arp_table **ret_entry); static void lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr, const unsigned char *atm_addr, unsigned long remoteflag, unsigned int targetless_le_arp); static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id); static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc); static void lec_set_flush_tran_id(struct lec_priv *priv, const unsigned char *atm_addr, unsigned long tran_id); static void lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data, struct atm_vcc *vcc, void (*old_push)(struct atm_vcc *vcc, struct sk_buff *skb)); static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc); /* must be done under lec_arp_lock */ static inline void lec_arp_hold(struct lec_arp_table *entry) { atomic_inc(&entry->usage); } static inline void lec_arp_put(struct lec_arp_table *entry) { if (atomic_dec_and_test(&entry->usage)) kfree(entry); } static struct lane2_ops lane2_ops = { lane2_resolve, /* resolve, spec 3.1.3 */ lane2_associate_req, /* associate_req, spec 3.1.4 */ NULL /* associate indicator, spec 3.1.5 */ }; static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; /* Device structures */ static struct net_device *dev_lec[MAX_LEC_ITF]; #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev) { char *buff; struct lec_priv *priv; /* * Check if this is a BPDU. If so, ask zeppelin to send * LE_TOPOLOGY_REQUEST with the same value of Topology Change bit * as the Config BPDU has */ buff = skb->data + skb->dev->hard_header_len; if (*buff++ == 0x42 && *buff++ == 0x42 && *buff++ == 0x03) { struct sock *sk; struct sk_buff *skb2; struct atmlec_msg *mesg; skb2 = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC); if (skb2 == NULL) return; skb2->len = sizeof(struct atmlec_msg); mesg = (struct atmlec_msg *)skb2->data; mesg->type = l_topology_change; buff += 4; mesg->content.normal.flag = *buff & 0x01; /* 0x01 is topology change */ priv = netdev_priv(dev); atm_force_charge(priv->lecd, skb2->truesize); sk = sk_atm(priv->lecd); skb_queue_tail(&sk->sk_receive_queue, skb2); sk->sk_data_ready(sk, skb2->len); } } #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ /* * Modelled after tr_type_trans * All multicast and ARE or STE frames go to BUS. * Non source routed frames go by destination address. * Last hop source routed frames go by destination address. * Not last hop source routed frames go by _next_ route descriptor. * Returns pointer to destination MAC address or fills in rdesc * and returns NULL. */ #ifdef CONFIG_TR static unsigned char *get_tr_dst(unsigned char *packet, unsigned char *rdesc) { struct trh_hdr *trh; unsigned int riflen, num_rdsc; trh = (struct trh_hdr *)packet; if (trh->daddr[0] & (uint8_t) 0x80) return bus_mac; /* multicast */ if (trh->saddr[0] & TR_RII) { riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8; if ((ntohs(trh->rcf) >> 13) != 0) return bus_mac; /* ARE or STE */ } else return trh->daddr; /* not source routed */ if (riflen < 6) return trh->daddr; /* last hop, source routed */ /* riflen is 6 or more, packet has more than one route descriptor */ num_rdsc = (riflen / 2) - 1; memset(rdesc, 0, ETH_ALEN); /* offset 4 comes from LAN destination field in LE control frames */ if (trh->rcf & htons((uint16_t) TR_RCF_DIR_BIT)) memcpy(&rdesc[4], &trh->rseg[num_rdsc - 2], sizeof(__be16)); else { memcpy(&rdesc[4], &trh->rseg[1], sizeof(__be16)); rdesc[5] = ((ntohs(trh->rseg[0]) & 0x000f) | (rdesc[5] & 0xf0)); } return NULL; } #endif /* CONFIG_TR */ /* * Open/initialize the netdevice. This is called (in the current kernel) * sometime after booting when the 'ifconfig' program is run. * * This routine should set everything up anew at each open, even * registers that "should" only need to be set once at boot, so that * there is non-reboot way to recover if something goes wrong. */ static int lec_open(struct net_device *dev) { netif_start_queue(dev); return 0; } static void lec_send(struct atm_vcc *vcc, struct sk_buff *skb) { struct net_device *dev = skb->dev; ATM_SKB(skb)->vcc = vcc; ATM_SKB(skb)->atm_options = vcc->atm_options; atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); if (vcc->send(vcc, skb) < 0) { dev->stats.tx_dropped++; return; } dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; } static void lec_tx_timeout(struct net_device *dev) { pr_info("%s\n", dev->name); dev->trans_start = jiffies; netif_wake_queue(dev); } static netdev_tx_t lec_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct sk_buff *skb2; struct lec_priv *priv = netdev_priv(dev); struct lecdatahdr_8023 *lec_h; struct atm_vcc *vcc; struct lec_arp_table *entry; unsigned char *dst; int min_frame_size; #ifdef CONFIG_TR unsigned char rdesc[ETH_ALEN]; /* Token Ring route descriptor */ #endif int is_rdesc; pr_debug("called\n"); if (!priv->lecd) { pr_info("%s:No lecd attached\n", dev->name); dev->stats.tx_errors++; netif_stop_queue(dev); kfree_skb(skb); return NETDEV_TX_OK; } pr_debug("skbuff head:%lx data:%lx tail:%lx end:%lx\n", (long)skb->head, (long)skb->data, (long)skb_tail_pointer(skb), (long)skb_end_pointer(skb)); #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) if (memcmp(skb->data, bridge_ula_lec, sizeof(bridge_ula_lec)) == 0) lec_handle_bridge(skb, dev); #endif /* Make sure we have room for lec_id */ if (skb_headroom(skb) < 2) { pr_debug("reallocating skb\n"); skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN); kfree_skb(skb); if (skb2 == NULL) return NETDEV_TX_OK; skb = skb2; } skb_push(skb, 2); /* Put le header to place, works for TokenRing too */ lec_h = (struct lecdatahdr_8023 *)skb->data; lec_h->le_header = htons(priv->lecid); #ifdef CONFIG_TR /* * Ugly. Use this to realign Token Ring packets for * e.g. PCA-200E driver. */ if (priv->is_trdev) { skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN); kfree_skb(skb); if (skb2 == NULL) return NETDEV_TX_OK; skb = skb2; } #endif #if DUMP_PACKETS >= 2 #define MAX_DUMP_SKB 99 #elif DUMP_PACKETS >= 1 #define MAX_DUMP_SKB 30 #endif #if DUMP_PACKETS >= 1 printk(KERN_DEBUG "%s: send datalen:%ld lecid:%4.4x\n", dev->name, skb->len, priv->lecid); print_hex_dump(KERN_DEBUG, "", DUMP_OFFSET, 16, 1, skb->data, min(skb->len, MAX_DUMP_SKB), true); #endif /* DUMP_PACKETS >= 1 */ /* Minimum ethernet-frame size */ #ifdef CONFIG_TR if (priv->is_trdev) min_frame_size = LEC_MINIMUM_8025_SIZE; else #endif min_frame_size = LEC_MINIMUM_8023_SIZE; if (skb->len < min_frame_size) { if ((skb->len + skb_tailroom(skb)) < min_frame_size) { skb2 = skb_copy_expand(skb, 0, min_frame_size - skb->truesize, GFP_ATOMIC); dev_kfree_skb(skb); if (skb2 == NULL) { dev->stats.tx_dropped++; return NETDEV_TX_OK; } skb = skb2; } skb_put(skb, min_frame_size - skb->len); } /* Send to right vcc */ is_rdesc = 0; dst = lec_h->h_dest; #ifdef CONFIG_TR if (priv->is_trdev) { dst = get_tr_dst(skb->data + 2, rdesc); if (dst == NULL) { dst = rdesc; is_rdesc = 1; } } #endif entry = NULL; vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry); pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n", dev->name, vcc, vcc ? vcc->flags : 0, entry); if (!vcc || !test_bit(ATM_VF_READY, &vcc->flags)) { if (entry && (entry->tx_wait.qlen < LEC_UNRES_QUE_LEN)) { pr_debug("%s:queuing packet, MAC address %pM\n", dev->name, lec_h->h_dest); skb_queue_tail(&entry->tx_wait, skb); } else { pr_debug("%s:tx queue full or no arp entry, dropping, MAC address: %pM\n", dev->name, lec_h->h_dest); dev->stats.tx_dropped++; dev_kfree_skb(skb); } goto out; } #if DUMP_PACKETS > 0 printk(KERN_DEBUG "%s:sending to vpi:%d vci:%d\n", dev->name, vcc->vpi, vcc->vci); #endif /* DUMP_PACKETS > 0 */ while (entry && (skb2 = skb_dequeue(&entry->tx_wait))) { pr_debug("emptying tx queue, MAC address %pM\n", lec_h->h_dest); lec_send(vcc, skb2); } lec_send(vcc, skb); if (!atm_may_send(vcc, 0)) { struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc); vpriv->xoff = 1; netif_stop_queue(dev); /* * vcc->pop() might have occurred in between, making * the vcc usuable again. Since xmit is serialized, * this is the only situation we have to re-test. */ if (atm_may_send(vcc, 0)) netif_wake_queue(dev); } out: if (entry) lec_arp_put(entry); dev->trans_start = jiffies; return NETDEV_TX_OK; } /* The inverse routine to net_open(). */ static int lec_close(struct net_device *dev) { netif_stop_queue(dev); return 0; } static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb) { unsigned long flags; struct net_device *dev = (struct net_device *)vcc->proto_data; struct lec_priv *priv = netdev_priv(dev); struct atmlec_msg *mesg; struct lec_arp_table *entry; int i; char *tmp; /* FIXME */ atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); mesg = (struct atmlec_msg *)skb->data; tmp = skb->data; tmp += sizeof(struct atmlec_msg); pr_debug("%s: msg from zeppelin:%d\n", dev->name, mesg->type); switch (mesg->type) { case l_set_mac_addr: for (i = 0; i < 6; i++) dev->dev_addr[i] = mesg->content.normal.mac_addr[i]; break; case l_del_mac_addr: for (i = 0; i < 6; i++) dev->dev_addr[i] = 0; break; case l_addr_delete: lec_addr_delete(priv, mesg->content.normal.atm_addr, mesg->content.normal.flag); break; case l_topology_change: priv->topology_change = mesg->content.normal.flag; break; case l_flush_complete: lec_flush_complete(priv, mesg->content.normal.flag); break; case l_narp_req: /* LANE2: see 7.1.35 in the lane2 spec */ spin_lock_irqsave(&priv->lec_arp_lock, flags); entry = lec_arp_find(priv, mesg->content.normal.mac_addr); lec_arp_remove(priv, entry); spin_unlock_irqrestore(&priv->lec_arp_lock, flags); if (mesg->content.normal.no_source_le_narp) break; /* FALL THROUGH */ case l_arp_update: lec_arp_update(priv, mesg->content.normal.mac_addr, mesg->content.normal.atm_addr, mesg->content.normal.flag, mesg->content.normal.targetless_le_arp); pr_debug("in l_arp_update\n"); if (mesg->sizeoftlvs != 0) { /* LANE2 3.1.5 */ pr_debug("LANE2 3.1.5, got tlvs, size %d\n", mesg->sizeoftlvs); lane2_associate_ind(dev, mesg->content.normal.mac_addr, tmp, mesg->sizeoftlvs); } break; case l_config: priv->maximum_unknown_frame_count = mesg->content.config.maximum_unknown_frame_count; priv->max_unknown_frame_time = (mesg->content.config.max_unknown_frame_time * HZ); priv->max_retry_count = mesg->content.config.max_retry_count; priv->aging_time = (mesg->content.config.aging_time * HZ); priv->forward_delay_time = (mesg->content.config.forward_delay_time * HZ); priv->arp_response_time = (mesg->content.config.arp_response_time * HZ); priv->flush_timeout = (mesg->content.config.flush_timeout * HZ); priv->path_switching_delay = (mesg->content.config.path_switching_delay * HZ); priv->lane_version = mesg->content.config.lane_version; /* LANE2 */ priv->lane2_ops = NULL; if (priv->lane_version > 1) priv->lane2_ops = &lane2_ops; if (dev_set_mtu(dev, mesg->content.config.mtu)) pr_info("%s: change_mtu to %d failed\n", dev->name, mesg->content.config.mtu); priv->is_proxy = mesg->content.config.is_proxy; break; case l_flush_tran_id: lec_set_flush_tran_id(priv, mesg->content.normal.atm_addr, mesg->content.normal.flag); break; case l_set_lecid: priv->lecid = (unsigned short)(0xffff & mesg->content.normal.flag); break; case l_should_bridge: #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) { pr_debug("%s: bridge zeppelin asks about %pM\n", dev->name, mesg->content.proxy.mac_addr); if (br_fdb_test_addr_hook == NULL) break; if (br_fdb_test_addr_hook(dev, mesg->content.proxy.mac_addr)) { /* hit from bridge table, send LE_ARP_RESPONSE */ struct sk_buff *skb2; struct sock *sk; pr_debug("%s: entry found, responding to zeppelin\n", dev->name); skb2 = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC); if (skb2 == NULL) break; skb2->len = sizeof(struct atmlec_msg); skb_copy_to_linear_data(skb2, mesg, sizeof(*mesg)); atm_force_charge(priv->lecd, skb2->truesize); sk = sk_atm(priv->lecd); skb_queue_tail(&sk->sk_receive_queue, skb2); sk->sk_data_ready(sk, skb2->len); } } #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ break; default: pr_info("%s: Unknown message type %d\n", dev->name, mesg->type); dev_kfree_skb(skb); return -EINVAL; } dev_kfree_skb(skb); return 0; } static void lec_atm_close(struct atm_vcc *vcc) { struct sk_buff *skb; struct net_device *dev = (struct net_device *)vcc->proto_data; struct lec_priv *priv = netdev_priv(dev); priv->lecd = NULL; /* Do something needful? */ netif_stop_queue(dev); lec_arp_destroy(priv); if (skb_peek(&sk_atm(vcc)->sk_receive_queue)) pr_info("%s closing with messages pending\n", dev->name); while ((skb = skb_dequeue(&sk_atm(vcc)->sk_receive_queue))) { atm_return(vcc, skb->truesize); dev_kfree_skb(skb); } pr_info("%s: Shut down!\n", dev->name); module_put(THIS_MODULE); } static struct atmdev_ops lecdev_ops = { .close = lec_atm_close, .send = lec_atm_send }; static struct atm_dev lecatm_dev = { .ops = &lecdev_ops, .type = "lec", .number = 999, /* dummy device number */ .lock = __SPIN_LOCK_UNLOCKED(lecatm_dev.lock) }; /* * LANE2: new argument struct sk_buff *data contains * the LE_ARP based TLVs introduced in the LANE2 spec */ static int send_to_lecd(struct lec_priv *priv, atmlec_msg_type type, const unsigned char *mac_addr, const unsigned char *atm_addr, struct sk_buff *data) { struct sock *sk; struct sk_buff *skb; struct atmlec_msg *mesg; if (!priv || !priv->lecd) return -1; skb = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC); if (!skb) return -1; skb->len = sizeof(struct atmlec_msg); mesg = (struct atmlec_msg *)skb->data; memset(mesg, 0, sizeof(struct atmlec_msg)); mesg->type = type; if (data != NULL) mesg->sizeoftlvs = data->len; if (mac_addr) memcpy(&mesg->content.normal.mac_addr, mac_addr, ETH_ALEN); else mesg->content.normal.targetless_le_arp = 1; if (atm_addr) memcpy(&mesg->content.normal.atm_addr, atm_addr, ATM_ESA_LEN); atm_force_charge(priv->lecd, skb->truesize); sk = sk_atm(priv->lecd); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, skb->len); if (data != NULL) { pr_debug("about to send %d bytes of data\n", data->len); atm_force_charge(priv->lecd, data->truesize); skb_queue_tail(&sk->sk_receive_queue, data); sk->sk_data_ready(sk, skb->len); } return 0; } /* shamelessly stolen from drivers/net/net_init.c */ static int lec_change_mtu(struct net_device *dev, int new_mtu) { if ((new_mtu < 68) || (new_mtu > 18190)) return -EINVAL; dev->mtu = new_mtu; return 0; } static void lec_set_multicast_list(struct net_device *dev) { /* * by default, all multicast frames arrive over the bus. * eventually support selective multicast service */ } static const struct net_device_ops lec_netdev_ops = { .ndo_open = lec_open, .ndo_stop = lec_close, .ndo_start_xmit = lec_start_xmit, .ndo_change_mtu = lec_change_mtu, .ndo_tx_timeout = lec_tx_timeout, .ndo_set_multicast_list = lec_set_multicast_list, }; static const unsigned char lec_ctrl_magic[] = { 0xff, 0x00, 0x01, 0x01 }; #define LEC_DATA_DIRECT_8023 2 #define LEC_DATA_DIRECT_8025 3 static int lec_is_data_direct(struct atm_vcc *vcc) { return ((vcc->sap.blli[0].l3.tr9577.snap[4] == LEC_DATA_DIRECT_8023) || (vcc->sap.blli[0].l3.tr9577.snap[4] == LEC_DATA_DIRECT_8025)); } static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb) { unsigned long flags; struct net_device *dev = (struct net_device *)vcc->proto_data; struct lec_priv *priv = netdev_priv(dev); #if DUMP_PACKETS > 0 printk(KERN_DEBUG "%s: vcc vpi:%d vci:%d\n", dev->name, vcc->vpi, vcc->vci); #endif if (!skb) { pr_debug("%s: null skb\n", dev->name); lec_vcc_close(priv, vcc); return; } #if DUMP_PACKETS >= 2 #define MAX_SKB_DUMP 99 #elif DUMP_PACKETS >= 1 #define MAX_SKB_DUMP 30 #endif #if DUMP_PACKETS > 0 printk(KERN_DEBUG "%s: rcv datalen:%ld lecid:%4.4x\n", dev->name, skb->len, priv->lecid); print_hex_dump(KERN_DEBUG, "", DUMP_OFFSET, 16, 1, skb->data, min(MAX_SKB_DUMP, skb->len), true); #endif /* DUMP_PACKETS > 0 */ if (memcmp(skb->data, lec_ctrl_magic, 4) == 0) { /* Control frame, to daemon */ struct sock *sk = sk_atm(vcc); pr_debug("%s: To daemon\n", dev->name); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, skb->len); } else { /* Data frame, queue to protocol handlers */ struct lec_arp_table *entry; unsigned char *src, *dst; atm_return(vcc, skb->truesize); if (*(__be16 *) skb->data == htons(priv->lecid) || !priv->lecd || !(dev->flags & IFF_UP)) { /* * Probably looping back, or if lecd is missing, * lecd has gone down */ pr_debug("Ignoring frame...\n"); dev_kfree_skb(skb); return; } #ifdef CONFIG_TR if (priv->is_trdev) dst = ((struct lecdatahdr_8025 *)skb->data)->h_dest; else #endif dst = ((struct lecdatahdr_8023 *)skb->data)->h_dest; /* * If this is a Data Direct VCC, and the VCC does not match * the LE_ARP cache entry, delete the LE_ARP cache entry. */ spin_lock_irqsave(&priv->lec_arp_lock, flags); if (lec_is_data_direct(vcc)) { #ifdef CONFIG_TR if (priv->is_trdev) src = ((struct lecdatahdr_8025 *)skb->data)-> h_source; else #endif src = ((struct lecdatahdr_8023 *)skb->data)-> h_source; entry = lec_arp_find(priv, src); if (entry && entry->vcc != vcc) { lec_arp_remove(priv, entry); lec_arp_put(entry); } } spin_unlock_irqrestore(&priv->lec_arp_lock, flags); if (!(dst[0] & 0x01) && /* Never filter Multi/Broadcast */ !priv->is_proxy && /* Proxy wants all the packets */ memcmp(dst, dev->dev_addr, dev->addr_len)) { dev_kfree_skb(skb); return; } if (!hlist_empty(&priv->lec_arp_empty_ones)) lec_arp_check_empties(priv, vcc, skb); skb_pull(skb, 2); /* skip lec_id */ #ifdef CONFIG_TR if (priv->is_trdev) skb->protocol = tr_type_trans(skb, dev); else #endif skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); netif_rx(skb); } } static void lec_pop(struct atm_vcc *vcc, struct sk_buff *skb) { struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc); struct net_device *dev = skb->dev; if (vpriv == NULL) { pr_info("vpriv = NULL!?!?!?\n"); return; } vpriv->old_pop(vcc, skb); if (vpriv->xoff && atm_may_send(vcc, 0)) { vpriv->xoff = 0; if (netif_running(dev) && netif_queue_stopped(dev)) netif_wake_queue(dev); } } static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg) { struct lec_vcc_priv *vpriv; int bytes_left; struct atmlec_ioc ioc_data; /* Lecd must be up in this case */ bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc)); if (bytes_left != 0) pr_info("copy from user failed for %d bytes\n", bytes_left); if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF || !dev_lec[ioc_data.dev_num]) return -EINVAL; vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL); if (!vpriv) return -ENOMEM; vpriv->xoff = 0; vpriv->old_pop = vcc->pop; vcc->user_back = vpriv; vcc->pop = lec_pop; lec_vcc_added(netdev_priv(dev_lec[ioc_data.dev_num]), &ioc_data, vcc, vcc->push); vcc->proto_data = dev_lec[ioc_data.dev_num]; vcc->push = lec_push; return 0; } static int lec_mcast_attach(struct atm_vcc *vcc, int arg) { if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg]) return -EINVAL; vcc->proto_data = dev_lec[arg]; return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc); } /* Initialize device. */ static int lecd_attach(struct atm_vcc *vcc, int arg) { int i; struct lec_priv *priv; if (arg < 0) i = 0; else i = arg; #ifdef CONFIG_TR if (arg >= MAX_LEC_ITF) return -EINVAL; #else /* Reserve the top NUM_TR_DEVS for TR */ if (arg >= (MAX_LEC_ITF - NUM_TR_DEVS)) return -EINVAL; #endif if (!dev_lec[i]) { int is_trdev, size; is_trdev = 0; if (i >= (MAX_LEC_ITF - NUM_TR_DEVS)) is_trdev = 1; size = sizeof(struct lec_priv); #ifdef CONFIG_TR if (is_trdev) dev_lec[i] = alloc_trdev(size); else #endif dev_lec[i] = alloc_etherdev(size); if (!dev_lec[i]) return -ENOMEM; dev_lec[i]->netdev_ops = &lec_netdev_ops; snprintf(dev_lec[i]->name, IFNAMSIZ, "lec%d", i); if (register_netdev(dev_lec[i])) { free_netdev(dev_lec[i]); return -EINVAL; } priv = netdev_priv(dev_lec[i]); priv->is_trdev = is_trdev; } else { priv = netdev_priv(dev_lec[i]); if (priv->lecd) return -EADDRINUSE; } lec_arp_init(priv); priv->itfnum = i; /* LANE2 addition */ priv->lecd = vcc; vcc->dev = &lecatm_dev; vcc_insert_socket(sk_atm(vcc)); vcc->proto_data = dev_lec[i]; set_bit(ATM_VF_META, &vcc->flags); set_bit(ATM_VF_READY, &vcc->flags); /* Set default values to these variables */ priv->maximum_unknown_frame_count = 1; priv->max_unknown_frame_time = (1 * HZ); priv->vcc_timeout_period = (1200 * HZ); priv->max_retry_count = 1; priv->aging_time = (300 * HZ); priv->forward_delay_time = (15 * HZ); priv->topology_change = 0; priv->arp_response_time = (1 * HZ); priv->flush_timeout = (4 * HZ); priv->path_switching_delay = (6 * HZ); if (dev_lec[i]->flags & IFF_UP) netif_start_queue(dev_lec[i]); __module_get(THIS_MODULE); return i; } #ifdef CONFIG_PROC_FS static const char *lec_arp_get_status_string(unsigned char status) { static const char *const lec_arp_status_string[] = { "ESI_UNKNOWN ", "ESI_ARP_PENDING ", "ESI_VC_PENDING ", "<Undefined> ", "ESI_FLUSH_PENDING ", "ESI_FORWARD_DIRECT" }; if (status > ESI_FORWARD_DIRECT) status = 3; /* ESI_UNDEFINED */ return lec_arp_status_string[status]; } static void lec_info(struct seq_file *seq, struct lec_arp_table *entry) { int i; for (i = 0; i < ETH_ALEN; i++) seq_printf(seq, "%2.2x", entry->mac_addr[i] & 0xff); seq_printf(seq, " "); for (i = 0; i < ATM_ESA_LEN; i++) seq_printf(seq, "%2.2x", entry->atm_addr[i] & 0xff); seq_printf(seq, " %s %4.4x", lec_arp_get_status_string(entry->status), entry->flags & 0xffff); if (entry->vcc) seq_printf(seq, "%3d %3d ", entry->vcc->vpi, entry->vcc->vci); else seq_printf(seq, " "); if (entry->recv_vcc) { seq_printf(seq, " %3d %3d", entry->recv_vcc->vpi, entry->recv_vcc->vci); } seq_putc(seq, '\n'); } struct lec_state { unsigned long flags; struct lec_priv *locked; struct hlist_node *node; struct net_device *dev; int itf; int arp_table; int misc_table; }; static void *lec_tbl_walk(struct lec_state *state, struct hlist_head *tbl, loff_t *l) { struct hlist_node *e = state->node; struct lec_arp_table *tmp; if (!e) e = tbl->first; if (e == SEQ_START_TOKEN) { e = tbl->first; --*l; } hlist_for_each_entry_from(tmp, e, next) { if (--*l < 0) break; } state->node = e; return (*l < 0) ? state : NULL; } static void *lec_arp_walk(struct lec_state *state, loff_t *l, struct lec_priv *priv) { void *v = NULL; int p; for (p = state->arp_table; p < LEC_ARP_TABLE_SIZE; p++) { v = lec_tbl_walk(state, &priv->lec_arp_tables[p], l); if (v) break; } state->arp_table = p; return v; } static void *lec_misc_walk(struct lec_state *state, loff_t *l, struct lec_priv *priv) { struct hlist_head *lec_misc_tables[] = { &priv->lec_arp_empty_ones, &priv->lec_no_forward, &priv->mcast_fwds }; void *v = NULL; int q; for (q = state->misc_table; q < ARRAY_SIZE(lec_misc_tables); q++) { v = lec_tbl_walk(state, lec_misc_tables[q], l); if (v) break; } state->misc_table = q; return v; } static void *lec_priv_walk(struct lec_state *state, loff_t *l, struct lec_priv *priv) { if (!state->locked) { state->locked = priv; spin_lock_irqsave(&priv->lec_arp_lock, state->flags); } if (!lec_arp_walk(state, l, priv) && !lec_misc_walk(state, l, priv)) { spin_unlock_irqrestore(&priv->lec_arp_lock, state->flags); state->locked = NULL; /* Partial state reset for the next time we get called */ state->arp_table = state->misc_table = 0; } return state->locked; } static void *lec_itf_walk(struct lec_state *state, loff_t *l) { struct net_device *dev; void *v; dev = state->dev ? state->dev : dev_lec[state->itf]; v = (dev && netdev_priv(dev)) ? lec_priv_walk(state, l, netdev_priv(dev)) : NULL; if (!v && dev) { dev_put(dev); /* Partial state reset for the next time we get called */ dev = NULL; } state->dev = dev; return v; } static void *lec_get_idx(struct lec_state *state, loff_t l) { void *v = NULL; for (; state->itf < MAX_LEC_ITF; state->itf++) { v = lec_itf_walk(state, &l); if (v) break; } return v; } static void *lec_seq_start(struct seq_file *seq, loff_t *pos) { struct lec_state *state = seq->private; state->itf = 0; state->dev = NULL; state->locked = NULL; state->arp_table = 0; state->misc_table = 0; state->node = SEQ_START_TOKEN; return *pos ? lec_get_idx(state, *pos) : SEQ_START_TOKEN; } static void lec_seq_stop(struct seq_file *seq, void *v) { struct lec_state *state = seq->private; if (state->dev) { spin_unlock_irqrestore(&state->locked->lec_arp_lock, state->flags); dev_put(state->dev); } } static void *lec_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct lec_state *state = seq->private; v = lec_get_idx(state, 1); *pos += !!PTR_ERR(v); return v; } static int lec_seq_show(struct seq_file *seq, void *v) { static const char lec_banner[] = "Itf MAC ATM destination" " Status Flags " "VPI/VCI Recv VPI/VCI\n"; if (v == SEQ_START_TOKEN) seq_puts(seq, lec_banner); else { struct lec_state *state = seq->private; struct net_device *dev = state->dev; struct lec_arp_table *entry = hlist_entry(state->node, struct lec_arp_table, next); seq_printf(seq, "%s ", dev->name); lec_info(seq, entry); } return 0; } static const struct seq_operations lec_seq_ops = { .start = lec_seq_start, .next = lec_seq_next, .stop = lec_seq_stop, .show = lec_seq_show, }; static int lec_seq_open(struct inode *inode, struct file *file) { return seq_open_private(file, &lec_seq_ops, sizeof(struct lec_state)); } static const struct file_operations lec_seq_fops = { .owner = THIS_MODULE, .open = lec_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; #endif static int lane_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct atm_vcc *vcc = ATM_SD(sock); int err = 0; switch (cmd) { case ATMLEC_CTRL: case ATMLEC_MCAST: case ATMLEC_DATA: if (!capable(CAP_NET_ADMIN)) return -EPERM; break; default: return -ENOIOCTLCMD; } switch (cmd) { case ATMLEC_CTRL: err = lecd_attach(vcc, (int)arg); if (err >= 0) sock->state = SS_CONNECTED; break; case ATMLEC_MCAST: err = lec_mcast_attach(vcc, (int)arg); break; case ATMLEC_DATA: err = lec_vcc_attach(vcc, (void __user *)arg); break; } return err; } static struct atm_ioctl lane_ioctl_ops = { .owner = THIS_MODULE, .ioctl = lane_ioctl, }; static int __init lane_module_init(void) { #ifdef CONFIG_PROC_FS struct proc_dir_entry *p; p = proc_create("lec", S_IRUGO, atm_proc_root, &lec_seq_fops); if (!p) { pr_err("Unable to initialize /proc/net/atm/lec\n"); return -ENOMEM; } #endif register_atm_ioctl(&lane_ioctl_ops); pr_info("lec.c: initialized\n"); return 0; } static void __exit lane_module_cleanup(void) { int i; remove_proc_entry("lec", atm_proc_root); deregister_atm_ioctl(&lane_ioctl_ops); for (i = 0; i < MAX_LEC_ITF; i++) { if (dev_lec[i] != NULL) { unregister_netdev(dev_lec[i]); free_netdev(dev_lec[i]); dev_lec[i] = NULL; } } } module_init(lane_module_init); module_exit(lane_module_cleanup); /* * LANE2: 3.1.3, LE_RESOLVE.request * Non force allocates memory and fills in *tlvs, fills in *sizeoftlvs. * If sizeoftlvs == NULL the default TLVs associated with with this * lec will be used. * If dst_mac == NULL, targetless LE_ARP will be sent */ static int lane2_resolve(struct net_device *dev, const u8 *dst_mac, int force, u8 **tlvs, u32 *sizeoftlvs) { unsigned long flags; struct lec_priv *priv = netdev_priv(dev); struct lec_arp_table *table; struct sk_buff *skb; int retval; if (force == 0) { spin_lock_irqsave(&priv->lec_arp_lock, flags); table = lec_arp_find(priv, dst_mac); spin_unlock_irqrestore(&priv->lec_arp_lock, flags); if (table == NULL) return -1; *tlvs = kmemdup(table->tlvs, table->sizeoftlvs, GFP_ATOMIC); if (*tlvs == NULL) return -1; *sizeoftlvs = table->sizeoftlvs; return 0; } if (sizeoftlvs == NULL) retval = send_to_lecd(priv, l_arp_xmt, dst_mac, NULL, NULL); else { skb = alloc_skb(*sizeoftlvs, GFP_ATOMIC); if (skb == NULL) return -1; skb->len = *sizeoftlvs; skb_copy_to_linear_data(skb, *tlvs, *sizeoftlvs); retval = send_to_lecd(priv, l_arp_xmt, dst_mac, NULL, skb); } return retval; } /* * LANE2: 3.1.4, LE_ASSOCIATE.request * Associate the *tlvs with the *lan_dst address. * Will overwrite any previous association * Returns 1 for success, 0 for failure (out of memory) * */ static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst, const u8 *tlvs, u32 sizeoftlvs) { int retval; struct sk_buff *skb; struct lec_priv *priv = netdev_priv(dev); if (compare_ether_addr(lan_dst, dev->dev_addr)) return 0; /* not our mac address */ kfree(priv->tlvs); /* NULL if there was no previous association */ priv->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL); if (priv->tlvs == NULL) return 0; priv->sizeoftlvs = sizeoftlvs; skb = alloc_skb(sizeoftlvs, GFP_ATOMIC); if (skb == NULL) return 0; skb->len = sizeoftlvs; skb_copy_to_linear_data(skb, tlvs, sizeoftlvs); retval = send_to_lecd(priv, l_associate_req, NULL, NULL, skb); if (retval != 0) pr_info("lec.c: lane2_associate_req() failed\n"); /* * If the previous association has changed we must * somehow notify other LANE entities about the change */ return 1; } /* * LANE2: 3.1.5, LE_ASSOCIATE.indication * */ static void lane2_associate_ind(struct net_device *dev, const u8 *mac_addr, const u8 *tlvs, u32 sizeoftlvs) { #if 0 int i = 0; #endif struct lec_priv *priv = netdev_priv(dev); #if 0 /* * Why have the TLVs in LE_ARP entries * since we do not use them? When you * uncomment this code, make sure the * TLVs get freed when entry is killed */ struct lec_arp_table *entry = lec_arp_find(priv, mac_addr); if (entry == NULL) return; /* should not happen */ kfree(entry->tlvs); entry->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL); if (entry->tlvs == NULL) return; entry->sizeoftlvs = sizeoftlvs; #endif #if 0 pr_info("\n"); pr_info("dump of tlvs, sizeoftlvs=%d\n", sizeoftlvs); while (i < sizeoftlvs) pr_cont("%02x ", tlvs[i++]); pr_cont("\n"); #endif /* tell MPOA about the TLVs we saw */ if (priv->lane2_ops && priv->lane2_ops->associate_indicator) { priv->lane2_ops->associate_indicator(dev, mac_addr, tlvs, sizeoftlvs); } } /* * Here starts what used to lec_arpc.c * * lec_arpc.c was added here when making * lane client modular. October 1997 */ #include <linux/types.h> #include <linux/timer.h> #include <linux/param.h> #include <asm/atomic.h> #include <linux/inetdevice.h> #include <net/route.h> #if 0 #define pr_debug(format, args...) /* #define pr_debug printk */ #endif #define DEBUG_ARP_TABLE 0 #define LEC_ARP_REFRESH_INTERVAL (3*HZ) static void lec_arp_check_expire(struct work_struct *work); static void lec_arp_expire_arp(unsigned long data); /* * Arp table funcs */ #define HASH(ch) (ch & (LEC_ARP_TABLE_SIZE - 1)) /* * Initialization of arp-cache */ static void lec_arp_init(struct lec_priv *priv) { unsigned short i; for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) INIT_HLIST_HEAD(&priv->lec_arp_tables[i]); INIT_HLIST_HEAD(&priv->lec_arp_empty_ones); INIT_HLIST_HEAD(&priv->lec_no_forward); INIT_HLIST_HEAD(&priv->mcast_fwds); spin_lock_init(&priv->lec_arp_lock); INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire); schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL); } static void lec_arp_clear_vccs(struct lec_arp_table *entry) { if (entry->vcc) { struct atm_vcc *vcc = entry->vcc; struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc); struct net_device *dev = (struct net_device *)vcc->proto_data; vcc->pop = vpriv->old_pop; if (vpriv->xoff) netif_wake_queue(dev); kfree(vpriv); vcc->user_back = NULL; vcc->push = entry->old_push; vcc_release_async(vcc, -EPIPE); entry->vcc = NULL; } if (entry->recv_vcc) { entry->recv_vcc->push = entry->old_recv_push; vcc_release_async(entry->recv_vcc, -EPIPE); entry->recv_vcc = NULL; } } /* * Insert entry to lec_arp_table * LANE2: Add to the end of the list to satisfy 8.1.13 */ static inline void lec_arp_add(struct lec_priv *priv, struct lec_arp_table *entry) { struct hlist_head *tmp; tmp = &priv->lec_arp_tables[HASH(entry->mac_addr[ETH_ALEN - 1])]; hlist_add_head(&entry->next, tmp); pr_debug("Added entry:%pM\n", entry->mac_addr); } /* * Remove entry from lec_arp_table */ static int lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove) { struct hlist_node *node; struct lec_arp_table *entry; int i, remove_vcc = 1; if (!to_remove) return -1; hlist_del(&to_remove->next); del_timer(&to_remove->timer); /* * If this is the only MAC connected to this VCC, * also tear down the VCC */ if (to_remove->status >= ESI_FLUSH_PENDING) { /* * ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT */ for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { if (memcmp(to_remove->atm_addr, entry->atm_addr, ATM_ESA_LEN) == 0) { remove_vcc = 0; break; } } } if (remove_vcc) lec_arp_clear_vccs(to_remove); } skb_queue_purge(&to_remove->tx_wait); /* FIXME: good place for this? */ pr_debug("Removed entry:%pM\n", to_remove->mac_addr); return 0; } #if DEBUG_ARP_TABLE static const char *get_status_string(unsigned char st) { switch (st) { case ESI_UNKNOWN: return "ESI_UNKNOWN"; case ESI_ARP_PENDING: return "ESI_ARP_PENDING"; case ESI_VC_PENDING: return "ESI_VC_PENDING"; case ESI_FLUSH_PENDING: return "ESI_FLUSH_PENDING"; case ESI_FORWARD_DIRECT: return "ESI_FORWARD_DIRECT"; } return "<UNKNOWN>"; } static void dump_arp_table(struct lec_priv *priv) { struct hlist_node *node; struct lec_arp_table *rulla; char buf[256]; int i, j, offset; pr_info("Dump %p:\n", priv); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry(rulla, node, &priv->lec_arp_tables[i], next) { offset = 0; offset += sprintf(buf, "%d: %p\n", i, rulla); offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); offset += sprintf(buf + offset, " Atm:"); for (j = 0; j < ATM_ESA_LEN; j++) { offset += sprintf(buf + offset, "%2.2x ", rulla->atm_addr[j] & 0xff); } offset += sprintf(buf + offset, "Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ", rulla->vcc ? rulla->vcc->vpi : 0, rulla->vcc ? rulla->vcc->vci : 0, rulla->recv_vcc ? rulla->recv_vcc-> vpi : 0, rulla->recv_vcc ? rulla->recv_vcc-> vci : 0, rulla->last_used, rulla->timestamp, rulla->no_tries); offset += sprintf(buf + offset, "Flags:%x, Packets_flooded:%x, Status: %s ", rulla->flags, rulla->packets_flooded, get_status_string(rulla->status)); pr_info("%s\n", buf); } } if (!hlist_empty(&priv->lec_no_forward)) pr_info("No forward\n"); hlist_for_each_entry(rulla, node, &priv->lec_no_forward, next) { offset = 0; offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); offset += sprintf(buf + offset, " Atm:"); for (j = 0; j < ATM_ESA_LEN; j++) { offset += sprintf(buf + offset, "%2.2x ", rulla->atm_addr[j] & 0xff); } offset += sprintf(buf + offset, "Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ", rulla->vcc ? rulla->vcc->vpi : 0, rulla->vcc ? rulla->vcc->vci : 0, rulla->recv_vcc ? rulla->recv_vcc->vpi : 0, rulla->recv_vcc ? rulla->recv_vcc->vci : 0, rulla->last_used, rulla->timestamp, rulla->no_tries); offset += sprintf(buf + offset, "Flags:%x, Packets_flooded:%x, Status: %s ", rulla->flags, rulla->packets_flooded, get_status_string(rulla->status)); pr_info("%s\n", buf); } if (!hlist_empty(&priv->lec_arp_empty_ones)) pr_info("Empty ones\n"); hlist_for_each_entry(rulla, node, &priv->lec_arp_empty_ones, next) { offset = 0; offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); offset += sprintf(buf + offset, " Atm:"); for (j = 0; j < ATM_ESA_LEN; j++) { offset += sprintf(buf + offset, "%2.2x ", rulla->atm_addr[j] & 0xff); } offset += sprintf(buf + offset, "Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ", rulla->vcc ? rulla->vcc->vpi : 0, rulla->vcc ? rulla->vcc->vci : 0, rulla->recv_vcc ? rulla->recv_vcc->vpi : 0, rulla->recv_vcc ? rulla->recv_vcc->vci : 0, rulla->last_used, rulla->timestamp, rulla->no_tries); offset += sprintf(buf + offset, "Flags:%x, Packets_flooded:%x, Status: %s ", rulla->flags, rulla->packets_flooded, get_status_string(rulla->status)); pr_info("%s", buf); } if (!hlist_empty(&priv->mcast_fwds)) pr_info("Multicast Forward VCCs\n"); hlist_for_each_entry(rulla, node, &priv->mcast_fwds, next) { offset = 0; offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); offset += sprintf(buf + offset, " Atm:"); for (j = 0; j < ATM_ESA_LEN; j++) { offset += sprintf(buf + offset, "%2.2x ", rulla->atm_addr[j] & 0xff); } offset += sprintf(buf + offset, "Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ", rulla->vcc ? rulla->vcc->vpi : 0, rulla->vcc ? rulla->vcc->vci : 0, rulla->recv_vcc ? rulla->recv_vcc->vpi : 0, rulla->recv_vcc ? rulla->recv_vcc->vci : 0, rulla->last_used, rulla->timestamp, rulla->no_tries); offset += sprintf(buf + offset, "Flags:%x, Packets_flooded:%x, Status: %s ", rulla->flags, rulla->packets_flooded, get_status_string(rulla->status)); pr_info("%s\n", buf); } } #else #define dump_arp_table(priv) do { } while (0) #endif /* * Destruction of arp-cache */ static void lec_arp_destroy(struct lec_priv *priv) { unsigned long flags; struct hlist_node *node, *next; struct lec_arp_table *entry; int i; cancel_delayed_work_sync(&priv->lec_arp_work); /* * Remove all entries */ spin_lock_irqsave(&priv->lec_arp_lock, flags); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { lec_arp_remove(priv, entry); lec_arp_put(entry); } INIT_HLIST_HEAD(&priv->lec_arp_tables[i]); } hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { del_timer_sync(&entry->timer); lec_arp_clear_vccs(entry); hlist_del(&entry->next); lec_arp_put(entry); } INIT_HLIST_HEAD(&priv->lec_arp_empty_ones); hlist_for_each_entry_safe(entry, node, next, &priv->lec_no_forward, next) { del_timer_sync(&entry->timer); lec_arp_clear_vccs(entry); hlist_del(&entry->next); lec_arp_put(entry); } INIT_HLIST_HEAD(&priv->lec_no_forward); hlist_for_each_entry_safe(entry, node, next, &priv->mcast_fwds, next) { /* No timer, LANEv2 7.1.20 and 2.3.5.3 */ lec_arp_clear_vccs(entry); hlist_del(&entry->next); lec_arp_put(entry); } INIT_HLIST_HEAD(&priv->mcast_fwds); priv->mcast_vcc = NULL; spin_unlock_irqrestore(&priv->lec_arp_lock, flags); } /* * Find entry by mac_address */ static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, const unsigned char *mac_addr) { struct hlist_node *node; struct hlist_head *head; struct lec_arp_table *entry; pr_debug("%pM\n", mac_addr); head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])]; hlist_for_each_entry(entry, node, head, next) { if (!compare_ether_addr(mac_addr, entry->mac_addr)) return entry; } return NULL; } static struct lec_arp_table *make_entry(struct lec_priv *priv, const unsigned char *mac_addr) { struct lec_arp_table *to_return; to_return = kzalloc(sizeof(struct lec_arp_table), GFP_ATOMIC); if (!to_return) { pr_info("LEC: Arp entry kmalloc failed\n"); return NULL; } memcpy(to_return->mac_addr, mac_addr, ETH_ALEN); INIT_HLIST_NODE(&to_return->next); setup_timer(&to_return->timer, lec_arp_expire_arp, (unsigned long)to_return); to_return->last_used = jiffies; to_return->priv = priv; skb_queue_head_init(&to_return->tx_wait); atomic_set(&to_return->usage, 1); return to_return; } /* Arp sent timer expired */ static void lec_arp_expire_arp(unsigned long data) { struct lec_arp_table *entry; entry = (struct lec_arp_table *)data; pr_debug("\n"); if (entry->status == ESI_ARP_PENDING) { if (entry->no_tries <= entry->priv->max_retry_count) { if (entry->is_rdesc) send_to_lecd(entry->priv, l_rdesc_arp_xmt, entry->mac_addr, NULL, NULL); else send_to_lecd(entry->priv, l_arp_xmt, entry->mac_addr, NULL, NULL); entry->no_tries++; } mod_timer(&entry->timer, jiffies + (1 * HZ)); } } /* Unknown/unused vcc expire, remove associated entry */ static void lec_arp_expire_vcc(unsigned long data) { unsigned long flags; struct lec_arp_table *to_remove = (struct lec_arp_table *)data; struct lec_priv *priv = (struct lec_priv *)to_remove->priv; del_timer(&to_remove->timer); pr_debug("%p %p: vpi:%d vci:%d\n", to_remove, priv, to_remove->vcc ? to_remove->recv_vcc->vpi : 0, to_remove->vcc ? to_remove->recv_vcc->vci : 0); spin_lock_irqsave(&priv->lec_arp_lock, flags); hlist_del(&to_remove->next); spin_unlock_irqrestore(&priv->lec_arp_lock, flags); lec_arp_clear_vccs(to_remove); lec_arp_put(to_remove); } static bool __lec_arp_check_expire(struct lec_arp_table *entry, unsigned long now, struct lec_priv *priv) { unsigned long time_to_check; if ((entry->flags) & LEC_REMOTE_FLAG && priv->topology_change) time_to_check = priv->forward_delay_time; else time_to_check = priv->aging_time; pr_debug("About to expire: %lx - %lx > %lx\n", now, entry->last_used, time_to_check); if (time_after(now, entry->last_used + time_to_check) && !(entry->flags & LEC_PERMANENT_FLAG) && !(entry->mac_addr[0] & 0x01)) { /* LANE2: 7.1.20 */ /* Remove entry */ pr_debug("Entry timed out\n"); lec_arp_remove(priv, entry); lec_arp_put(entry); } else { /* Something else */ if ((entry->status == ESI_VC_PENDING || entry->status == ESI_ARP_PENDING) && time_after_eq(now, entry->timestamp + priv->max_unknown_frame_time)) { entry->timestamp = jiffies; entry->packets_flooded = 0; if (entry->status == ESI_VC_PENDING) send_to_lecd(priv, l_svc_setup, entry->mac_addr, entry->atm_addr, NULL); } if (entry->status == ESI_FLUSH_PENDING && time_after_eq(now, entry->timestamp + priv->path_switching_delay)) { lec_arp_hold(entry); return true; } } return false; } /* * Expire entries. * 1. Re-set timer * 2. For each entry, delete entries that have aged past the age limit. * 3. For each entry, depending on the status of the entry, perform * the following maintenance. * a. If status is ESI_VC_PENDING or ESI_ARP_PENDING then if the * tick_count is above the max_unknown_frame_time, clear * the tick_count to zero and clear the packets_flooded counter * to zero. This supports the packet rate limit per address * while flooding unknowns. * b. If the status is ESI_FLUSH_PENDING and the tick_count is greater * than or equal to the path_switching_delay, change the status * to ESI_FORWARD_DIRECT. This causes the flush period to end * regardless of the progress of the flush protocol. */ static void lec_arp_check_expire(struct work_struct *work) { unsigned long flags; struct lec_priv *priv = container_of(work, struct lec_priv, lec_arp_work.work); struct hlist_node *node, *next; struct lec_arp_table *entry; unsigned long now; int i; pr_debug("%p\n", priv); now = jiffies; restart: spin_lock_irqsave(&priv->lec_arp_lock, flags); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { if (__lec_arp_check_expire(entry, now, priv)) { struct sk_buff *skb; struct atm_vcc *vcc = entry->vcc; spin_unlock_irqrestore(&priv->lec_arp_lock, flags); while ((skb = skb_dequeue(&entry->tx_wait))) lec_send(vcc, skb); entry->last_used = jiffies; entry->status = ESI_FORWARD_DIRECT; lec_arp_put(entry); goto restart; } } } spin_unlock_irqrestore(&priv->lec_arp_lock, flags); schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL); } /* * Try to find vcc where mac_address is attached. * */ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv, const unsigned char *mac_to_find, int is_rdesc, struct lec_arp_table **ret_entry) { unsigned long flags; struct lec_arp_table *entry; struct atm_vcc *found; if (mac_to_find[0] & 0x01) { switch (priv->lane_version) { case 1: return priv->mcast_vcc; case 2: /* LANE2 wants arp for multicast addresses */ if (!compare_ether_addr(mac_to_find, bus_mac)) return priv->mcast_vcc; break; default: break; } } spin_lock_irqsave(&priv->lec_arp_lock, flags); entry = lec_arp_find(priv, mac_to_find); if (entry) { if (entry->status == ESI_FORWARD_DIRECT) { /* Connection Ok */ entry->last_used = jiffies; lec_arp_hold(entry); *ret_entry = entry; found = entry->vcc; goto out; } /* * If the LE_ARP cache entry is still pending, reset count to 0 * so another LE_ARP request can be made for this frame. */ if (entry->status == ESI_ARP_PENDING) entry->no_tries = 0; /* * Data direct VC not yet set up, check to see if the unknown * frame count is greater than the limit. If the limit has * not been reached, allow the caller to send packet to * BUS. */ if (entry->status != ESI_FLUSH_PENDING && entry->packets_flooded < priv->maximum_unknown_frame_count) { entry->packets_flooded++; pr_debug("Flooding..\n"); found = priv->mcast_vcc; goto out; } /* * We got here because entry->status == ESI_FLUSH_PENDING * or BUS flood limit was reached for an entry which is * in ESI_ARP_PENDING or ESI_VC_PENDING state. */ lec_arp_hold(entry); *ret_entry = entry; pr_debug("entry->status %d entry->vcc %p\n", entry->status, entry->vcc); found = NULL; } else { /* No matching entry was found */ entry = make_entry(priv, mac_to_find); pr_debug("Making entry\n"); if (!entry) { found = priv->mcast_vcc; goto out; } lec_arp_add(priv, entry); /* We want arp-request(s) to be sent */ entry->packets_flooded = 1; entry->status = ESI_ARP_PENDING; entry->no_tries = 1; entry->last_used = entry->timestamp = jiffies; entry->is_rdesc = is_rdesc; if (entry->is_rdesc) send_to_lecd(priv, l_rdesc_arp_xmt, mac_to_find, NULL, NULL); else send_to_lecd(priv, l_arp_xmt, mac_to_find, NULL, NULL); entry->timer.expires = jiffies + (1 * HZ); entry->timer.function = lec_arp_expire_arp; add_timer(&entry->timer); found = priv->mcast_vcc; } out: spin_unlock_irqrestore(&priv->lec_arp_lock, flags); return found; } static int lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr, unsigned long permanent) { unsigned long flags; struct hlist_node *node, *next; struct lec_arp_table *entry; int i; pr_debug("\n"); spin_lock_irqsave(&priv->lec_arp_lock, flags); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) && (permanent || !(entry->flags & LEC_PERMANENT_FLAG))) { lec_arp_remove(priv, entry); lec_arp_put(entry); } spin_unlock_irqrestore(&priv->lec_arp_lock, flags); return 0; } } spin_unlock_irqrestore(&priv->lec_arp_lock, flags); return -1; } /* * Notifies: Response to arp_request (atm_addr != NULL) */ static void lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr, const unsigned char *atm_addr, unsigned long remoteflag, unsigned int targetless_le_arp) { unsigned long flags; struct hlist_node *node, *next; struct lec_arp_table *entry, *tmp; int i; pr_debug("%smac:%pM\n", (targetless_le_arp) ? "targetless " : "", mac_addr); spin_lock_irqsave(&priv->lec_arp_lock, flags); entry = lec_arp_find(priv, mac_addr); if (entry == NULL && targetless_le_arp) goto out; /* * LANE2: ignore targetless LE_ARPs for which * we have no entry in the cache. 7.1.30 */ if (!hlist_empty(&priv->lec_arp_empty_ones)) { hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) { hlist_del(&entry->next); del_timer(&entry->timer); tmp = lec_arp_find(priv, mac_addr); if (tmp) { del_timer(&tmp->timer); tmp->status = ESI_FORWARD_DIRECT; memcpy(tmp->atm_addr, atm_addr, ATM_ESA_LEN); tmp->vcc = entry->vcc; tmp->old_push = entry->old_push; tmp->last_used = jiffies; del_timer(&entry->timer); lec_arp_put(entry); entry = tmp; } else { entry->status = ESI_FORWARD_DIRECT; memcpy(entry->mac_addr, mac_addr, ETH_ALEN); entry->last_used = jiffies; lec_arp_add(priv, entry); } if (remoteflag) entry->flags |= LEC_REMOTE_FLAG; else entry->flags &= ~LEC_REMOTE_FLAG; pr_debug("After update\n"); dump_arp_table(priv); goto out; } } } entry = lec_arp_find(priv, mac_addr); if (!entry) { entry = make_entry(priv, mac_addr); if (!entry) goto out; entry->status = ESI_UNKNOWN; lec_arp_add(priv, entry); /* Temporary, changes before end of function */ } memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN); del_timer(&entry->timer); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry(tmp, node, &priv->lec_arp_tables[i], next) { if (entry != tmp && !memcmp(tmp->atm_addr, atm_addr, ATM_ESA_LEN)) { /* Vcc to this host exists */ if (tmp->status > ESI_VC_PENDING) { /* * ESI_FLUSH_PENDING, * ESI_FORWARD_DIRECT */ entry->vcc = tmp->vcc; entry->old_push = tmp->old_push; } entry->status = tmp->status; break; } } } if (remoteflag) entry->flags |= LEC_REMOTE_FLAG; else entry->flags &= ~LEC_REMOTE_FLAG; if (entry->status == ESI_ARP_PENDING || entry->status == ESI_UNKNOWN) { entry->status = ESI_VC_PENDING; send_to_lecd(priv, l_svc_setup, entry->mac_addr, atm_addr, NULL); } pr_debug("After update2\n"); dump_arp_table(priv); out: spin_unlock_irqrestore(&priv->lec_arp_lock, flags); } /* * Notifies: Vcc setup ready */ static void lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data, struct atm_vcc *vcc, void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb)) { unsigned long flags; struct hlist_node *node; struct lec_arp_table *entry; int i, found_entry = 0; spin_lock_irqsave(&priv->lec_arp_lock, flags); /* Vcc for Multicast Forward. No timer, LANEv2 7.1.20 and 2.3.5.3 */ if (ioc_data->receive == 2) { pr_debug("LEC_ARP: Attaching mcast forward\n"); #if 0 entry = lec_arp_find(priv, bus_mac); if (!entry) { pr_info("LEC_ARP: Multicast entry not found!\n"); goto out; } memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); entry->recv_vcc = vcc; entry->old_recv_push = old_push; #endif entry = make_entry(priv, bus_mac); if (entry == NULL) goto out; del_timer(&entry->timer); memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); entry->recv_vcc = vcc; entry->old_recv_push = old_push; hlist_add_head(&entry->next, &priv->mcast_fwds); goto out; } else if (ioc_data->receive == 1) { /* * Vcc which we don't want to make default vcc, * attach it anyway. */ pr_debug("LEC_ARP:Attaching data direct, not default: %2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", ioc_data->atm_addr[0], ioc_data->atm_addr[1], ioc_data->atm_addr[2], ioc_data->atm_addr[3], ioc_data->atm_addr[4], ioc_data->atm_addr[5], ioc_data->atm_addr[6], ioc_data->atm_addr[7], ioc_data->atm_addr[8], ioc_data->atm_addr[9], ioc_data->atm_addr[10], ioc_data->atm_addr[11], ioc_data->atm_addr[12], ioc_data->atm_addr[13], ioc_data->atm_addr[14], ioc_data->atm_addr[15], ioc_data->atm_addr[16], ioc_data->atm_addr[17], ioc_data->atm_addr[18], ioc_data->atm_addr[19]); entry = make_entry(priv, bus_mac); if (entry == NULL) goto out; memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); memset(entry->mac_addr, 0, ETH_ALEN); entry->recv_vcc = vcc; entry->old_recv_push = old_push; entry->status = ESI_UNKNOWN; entry->timer.expires = jiffies + priv->vcc_timeout_period; entry->timer.function = lec_arp_expire_vcc; hlist_add_head(&entry->next, &priv->lec_no_forward); add_timer(&entry->timer); dump_arp_table(priv); goto out; } pr_debug("LEC_ARP:Attaching data direct, default: %2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", ioc_data->atm_addr[0], ioc_data->atm_addr[1], ioc_data->atm_addr[2], ioc_data->atm_addr[3], ioc_data->atm_addr[4], ioc_data->atm_addr[5], ioc_data->atm_addr[6], ioc_data->atm_addr[7], ioc_data->atm_addr[8], ioc_data->atm_addr[9], ioc_data->atm_addr[10], ioc_data->atm_addr[11], ioc_data->atm_addr[12], ioc_data->atm_addr[13], ioc_data->atm_addr[14], ioc_data->atm_addr[15], ioc_data->atm_addr[16], ioc_data->atm_addr[17], ioc_data->atm_addr[18], ioc_data->atm_addr[19]); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { if (memcmp (ioc_data->atm_addr, entry->atm_addr, ATM_ESA_LEN) == 0) { pr_debug("LEC_ARP: Attaching data direct\n"); pr_debug("Currently -> Vcc: %d, Rvcc:%d\n", entry->vcc ? entry->vcc->vci : 0, entry->recv_vcc ? entry->recv_vcc-> vci : 0); found_entry = 1; del_timer(&entry->timer); entry->vcc = vcc; entry->old_push = old_push; if (entry->status == ESI_VC_PENDING) { if (priv->maximum_unknown_frame_count == 0) entry->status = ESI_FORWARD_DIRECT; else { entry->timestamp = jiffies; entry->status = ESI_FLUSH_PENDING; #if 0 send_to_lecd(priv, l_flush_xmt, NULL, entry->atm_addr, NULL); #endif } } else { /* * They were forming a connection * to us, and we to them. Our * ATM address is numerically lower * than theirs, so we make connection * we formed into default VCC (8.1.11). * Connection they made gets torn * down. This might confuse some * clients. Can be changed if * someone reports trouble... */ ; } } } } if (found_entry) { pr_debug("After vcc was added\n"); dump_arp_table(priv); goto out; } /* * Not found, snatch address from first data packet that arrives * from this vcc */ entry = make_entry(priv, bus_mac); if (!entry) goto out; entry->vcc = vcc; entry->old_push = old_push; memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); memset(entry->mac_addr, 0, ETH_ALEN); entry->status = ESI_UNKNOWN; hlist_add_head(&entry->next, &priv->lec_arp_empty_ones); entry->timer.expires = jiffies + priv->vcc_timeout_period; entry->timer.function = lec_arp_expire_vcc; add_timer(&entry->timer); pr_debug("After vcc was added\n"); dump_arp_table(priv); out: spin_unlock_irqrestore(&priv->lec_arp_lock, flags); } static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id) { unsigned long flags; struct hlist_node *node; struct lec_arp_table *entry; int i; pr_debug("%lx\n", tran_id); restart: spin_lock_irqsave(&priv->lec_arp_lock, flags); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { if (entry->flush_tran_id == tran_id && entry->status == ESI_FLUSH_PENDING) { struct sk_buff *skb; struct atm_vcc *vcc = entry->vcc; lec_arp_hold(entry); spin_unlock_irqrestore(&priv->lec_arp_lock, flags); while ((skb = skb_dequeue(&entry->tx_wait))) lec_send(vcc, skb); entry->last_used = jiffies; entry->status = ESI_FORWARD_DIRECT; lec_arp_put(entry); pr_debug("LEC_ARP: Flushed\n"); goto restart; } } } spin_unlock_irqrestore(&priv->lec_arp_lock, flags); dump_arp_table(priv); } static void lec_set_flush_tran_id(struct lec_priv *priv, const unsigned char *atm_addr, unsigned long tran_id) { unsigned long flags; struct hlist_node *node; struct lec_arp_table *entry; int i; spin_lock_irqsave(&priv->lec_arp_lock, flags); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) { entry->flush_tran_id = tran_id; pr_debug("Set flush transaction id to %lx for %p\n", tran_id, entry); } } spin_unlock_irqrestore(&priv->lec_arp_lock, flags); } static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc) { unsigned long flags; unsigned char mac_addr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; struct lec_arp_table *to_add; struct lec_vcc_priv *vpriv; int err = 0; vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL); if (!vpriv) return -ENOMEM; vpriv->xoff = 0; vpriv->old_pop = vcc->pop; vcc->user_back = vpriv; vcc->pop = lec_pop; spin_lock_irqsave(&priv->lec_arp_lock, flags); to_add = make_entry(priv, mac_addr); if (!to_add) { vcc->pop = vpriv->old_pop; kfree(vpriv); err = -ENOMEM; goto out; } memcpy(to_add->atm_addr, vcc->remote.sas_addr.prv, ATM_ESA_LEN); to_add->status = ESI_FORWARD_DIRECT; to_add->flags |= LEC_PERMANENT_FLAG; to_add->vcc = vcc; to_add->old_push = vcc->push; vcc->push = lec_push; priv->mcast_vcc = vcc; lec_arp_add(priv, to_add); out: spin_unlock_irqrestore(&priv->lec_arp_lock, flags); return err; } static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc) { unsigned long flags; struct hlist_node *node, *next; struct lec_arp_table *entry; int i; pr_debug("LEC_ARP: lec_vcc_close vpi:%d vci:%d\n", vcc->vpi, vcc->vci); dump_arp_table(priv); spin_lock_irqsave(&priv->lec_arp_lock, flags); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { if (vcc == entry->vcc) { lec_arp_remove(priv, entry); lec_arp_put(entry); if (priv->mcast_vcc == vcc) priv->mcast_vcc = NULL; } } } hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { if (entry->vcc == vcc) { lec_arp_clear_vccs(entry); del_timer(&entry->timer); hlist_del(&entry->next); lec_arp_put(entry); } } hlist_for_each_entry_safe(entry, node, next, &priv->lec_no_forward, next) { if (entry->recv_vcc == vcc) { lec_arp_clear_vccs(entry); del_timer(&entry->timer); hlist_del(&entry->next); lec_arp_put(entry); } } hlist_for_each_entry_safe(entry, node, next, &priv->mcast_fwds, next) { if (entry->recv_vcc == vcc) { lec_arp_clear_vccs(entry); /* No timer, LANEv2 7.1.20 and 2.3.5.3 */ hlist_del(&entry->next); lec_arp_put(entry); } } spin_unlock_irqrestore(&priv->lec_arp_lock, flags); dump_arp_table(priv); } static void lec_arp_check_empties(struct lec_priv *priv, struct atm_vcc *vcc, struct sk_buff *skb) { unsigned long flags; struct hlist_node *node, *next; struct lec_arp_table *entry, *tmp; struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data; unsigned char *src; #ifdef CONFIG_TR struct lecdatahdr_8025 *tr_hdr = (struct lecdatahdr_8025 *)skb->data; if (priv->is_trdev) src = tr_hdr->h_source; else #endif src = hdr->h_source; spin_lock_irqsave(&priv->lec_arp_lock, flags); hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { if (vcc == entry->vcc) { del_timer(&entry->timer); memcpy(entry->mac_addr, src, ETH_ALEN); entry->status = ESI_FORWARD_DIRECT; entry->last_used = jiffies; /* We might have got an entry */ tmp = lec_arp_find(priv, src); if (tmp) { lec_arp_remove(priv, tmp); lec_arp_put(tmp); } hlist_del(&entry->next); lec_arp_add(priv, entry); goto out; } } pr_debug("LEC_ARP: Arp_check_empties: entry not found!\n"); out: spin_unlock_irqrestore(&priv->lec_arp_lock, flags); } MODULE_LICENSE("GPL");
gpl-2.0
ch33kybutt/kernel_skipjack_tuna
net/atm/lec.c
2365
64855
/* * lec.c: Lan Emulation driver * * Marko Kiiskila <mkiiskila@yahoo.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/bitops.h> #include <linux/capability.h> /* We are ethernet device */ #include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <net/sock.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <asm/byteorder.h> #include <linux/uaccess.h> #include <net/arp.h> #include <net/dst.h> #include <linux/proc_fs.h> #include <linux/spinlock.h> #include <linux/seq_file.h> /* TokenRing if needed */ #ifdef CONFIG_TR #include <linux/trdevice.h> #endif /* And atm device */ #include <linux/atmdev.h> #include <linux/atmlec.h> /* Proxy LEC knows about bridging */ #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) #include "../bridge/br_private.h" static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 }; #endif /* Modular too */ #include <linux/module.h> #include <linux/init.h> #include "lec.h" #include "lec_arpc.h" #include "resources.h" #define DUMP_PACKETS 0 /* * 0 = None, * 1 = 30 first bytes * 2 = Whole packet */ #define LEC_UNRES_QUE_LEN 8 /* * number of tx packets to queue for a * single destination while waiting for SVC */ static int lec_open(struct net_device *dev); static netdev_tx_t lec_start_xmit(struct sk_buff *skb, struct net_device *dev); static int lec_close(struct net_device *dev); static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, const unsigned char *mac_addr); static int lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove); /* LANE2 functions */ static void lane2_associate_ind(struct net_device *dev, const u8 *mac_address, const u8 *tlvs, u32 sizeoftlvs); static int lane2_resolve(struct net_device *dev, const u8 *dst_mac, int force, u8 **tlvs, u32 *sizeoftlvs); static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst, const u8 *tlvs, u32 sizeoftlvs); static int lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr, unsigned long permanent); static void lec_arp_check_empties(struct lec_priv *priv, struct atm_vcc *vcc, struct sk_buff *skb); static void lec_arp_destroy(struct lec_priv *priv); static void lec_arp_init(struct lec_priv *priv); static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv, const unsigned char *mac_to_find, int is_rdesc, struct lec_arp_table **ret_entry); static void lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr, const unsigned char *atm_addr, unsigned long remoteflag, unsigned int targetless_le_arp); static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id); static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc); static void lec_set_flush_tran_id(struct lec_priv *priv, const unsigned char *atm_addr, unsigned long tran_id); static void lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data, struct atm_vcc *vcc, void (*old_push)(struct atm_vcc *vcc, struct sk_buff *skb)); static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc); /* must be done under lec_arp_lock */ static inline void lec_arp_hold(struct lec_arp_table *entry) { atomic_inc(&entry->usage); } static inline void lec_arp_put(struct lec_arp_table *entry) { if (atomic_dec_and_test(&entry->usage)) kfree(entry); } static struct lane2_ops lane2_ops = { lane2_resolve, /* resolve, spec 3.1.3 */ lane2_associate_req, /* associate_req, spec 3.1.4 */ NULL /* associate indicator, spec 3.1.5 */ }; static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; /* Device structures */ static struct net_device *dev_lec[MAX_LEC_ITF]; #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev) { char *buff; struct lec_priv *priv; /* * Check if this is a BPDU. If so, ask zeppelin to send * LE_TOPOLOGY_REQUEST with the same value of Topology Change bit * as the Config BPDU has */ buff = skb->data + skb->dev->hard_header_len; if (*buff++ == 0x42 && *buff++ == 0x42 && *buff++ == 0x03) { struct sock *sk; struct sk_buff *skb2; struct atmlec_msg *mesg; skb2 = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC); if (skb2 == NULL) return; skb2->len = sizeof(struct atmlec_msg); mesg = (struct atmlec_msg *)skb2->data; mesg->type = l_topology_change; buff += 4; mesg->content.normal.flag = *buff & 0x01; /* 0x01 is topology change */ priv = netdev_priv(dev); atm_force_charge(priv->lecd, skb2->truesize); sk = sk_atm(priv->lecd); skb_queue_tail(&sk->sk_receive_queue, skb2); sk->sk_data_ready(sk, skb2->len); } } #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ /* * Modelled after tr_type_trans * All multicast and ARE or STE frames go to BUS. * Non source routed frames go by destination address. * Last hop source routed frames go by destination address. * Not last hop source routed frames go by _next_ route descriptor. * Returns pointer to destination MAC address or fills in rdesc * and returns NULL. */ #ifdef CONFIG_TR static unsigned char *get_tr_dst(unsigned char *packet, unsigned char *rdesc) { struct trh_hdr *trh; unsigned int riflen, num_rdsc; trh = (struct trh_hdr *)packet; if (trh->daddr[0] & (uint8_t) 0x80) return bus_mac; /* multicast */ if (trh->saddr[0] & TR_RII) { riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8; if ((ntohs(trh->rcf) >> 13) != 0) return bus_mac; /* ARE or STE */ } else return trh->daddr; /* not source routed */ if (riflen < 6) return trh->daddr; /* last hop, source routed */ /* riflen is 6 or more, packet has more than one route descriptor */ num_rdsc = (riflen / 2) - 1; memset(rdesc, 0, ETH_ALEN); /* offset 4 comes from LAN destination field in LE control frames */ if (trh->rcf & htons((uint16_t) TR_RCF_DIR_BIT)) memcpy(&rdesc[4], &trh->rseg[num_rdsc - 2], sizeof(__be16)); else { memcpy(&rdesc[4], &trh->rseg[1], sizeof(__be16)); rdesc[5] = ((ntohs(trh->rseg[0]) & 0x000f) | (rdesc[5] & 0xf0)); } return NULL; } #endif /* CONFIG_TR */ /* * Open/initialize the netdevice. This is called (in the current kernel) * sometime after booting when the 'ifconfig' program is run. * * This routine should set everything up anew at each open, even * registers that "should" only need to be set once at boot, so that * there is non-reboot way to recover if something goes wrong. */ static int lec_open(struct net_device *dev) { netif_start_queue(dev); return 0; } static void lec_send(struct atm_vcc *vcc, struct sk_buff *skb) { struct net_device *dev = skb->dev; ATM_SKB(skb)->vcc = vcc; ATM_SKB(skb)->atm_options = vcc->atm_options; atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); if (vcc->send(vcc, skb) < 0) { dev->stats.tx_dropped++; return; } dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; } static void lec_tx_timeout(struct net_device *dev) { pr_info("%s\n", dev->name); dev->trans_start = jiffies; netif_wake_queue(dev); } static netdev_tx_t lec_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct sk_buff *skb2; struct lec_priv *priv = netdev_priv(dev); struct lecdatahdr_8023 *lec_h; struct atm_vcc *vcc; struct lec_arp_table *entry; unsigned char *dst; int min_frame_size; #ifdef CONFIG_TR unsigned char rdesc[ETH_ALEN]; /* Token Ring route descriptor */ #endif int is_rdesc; pr_debug("called\n"); if (!priv->lecd) { pr_info("%s:No lecd attached\n", dev->name); dev->stats.tx_errors++; netif_stop_queue(dev); kfree_skb(skb); return NETDEV_TX_OK; } pr_debug("skbuff head:%lx data:%lx tail:%lx end:%lx\n", (long)skb->head, (long)skb->data, (long)skb_tail_pointer(skb), (long)skb_end_pointer(skb)); #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) if (memcmp(skb->data, bridge_ula_lec, sizeof(bridge_ula_lec)) == 0) lec_handle_bridge(skb, dev); #endif /* Make sure we have room for lec_id */ if (skb_headroom(skb) < 2) { pr_debug("reallocating skb\n"); skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN); kfree_skb(skb); if (skb2 == NULL) return NETDEV_TX_OK; skb = skb2; } skb_push(skb, 2); /* Put le header to place, works for TokenRing too */ lec_h = (struct lecdatahdr_8023 *)skb->data; lec_h->le_header = htons(priv->lecid); #ifdef CONFIG_TR /* * Ugly. Use this to realign Token Ring packets for * e.g. PCA-200E driver. */ if (priv->is_trdev) { skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN); kfree_skb(skb); if (skb2 == NULL) return NETDEV_TX_OK; skb = skb2; } #endif #if DUMP_PACKETS >= 2 #define MAX_DUMP_SKB 99 #elif DUMP_PACKETS >= 1 #define MAX_DUMP_SKB 30 #endif #if DUMP_PACKETS >= 1 printk(KERN_DEBUG "%s: send datalen:%ld lecid:%4.4x\n", dev->name, skb->len, priv->lecid); print_hex_dump(KERN_DEBUG, "", DUMP_OFFSET, 16, 1, skb->data, min(skb->len, MAX_DUMP_SKB), true); #endif /* DUMP_PACKETS >= 1 */ /* Minimum ethernet-frame size */ #ifdef CONFIG_TR if (priv->is_trdev) min_frame_size = LEC_MINIMUM_8025_SIZE; else #endif min_frame_size = LEC_MINIMUM_8023_SIZE; if (skb->len < min_frame_size) { if ((skb->len + skb_tailroom(skb)) < min_frame_size) { skb2 = skb_copy_expand(skb, 0, min_frame_size - skb->truesize, GFP_ATOMIC); dev_kfree_skb(skb); if (skb2 == NULL) { dev->stats.tx_dropped++; return NETDEV_TX_OK; } skb = skb2; } skb_put(skb, min_frame_size - skb->len); } /* Send to right vcc */ is_rdesc = 0; dst = lec_h->h_dest; #ifdef CONFIG_TR if (priv->is_trdev) { dst = get_tr_dst(skb->data + 2, rdesc); if (dst == NULL) { dst = rdesc; is_rdesc = 1; } } #endif entry = NULL; vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry); pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n", dev->name, vcc, vcc ? vcc->flags : 0, entry); if (!vcc || !test_bit(ATM_VF_READY, &vcc->flags)) { if (entry && (entry->tx_wait.qlen < LEC_UNRES_QUE_LEN)) { pr_debug("%s:queuing packet, MAC address %pM\n", dev->name, lec_h->h_dest); skb_queue_tail(&entry->tx_wait, skb); } else { pr_debug("%s:tx queue full or no arp entry, dropping, MAC address: %pM\n", dev->name, lec_h->h_dest); dev->stats.tx_dropped++; dev_kfree_skb(skb); } goto out; } #if DUMP_PACKETS > 0 printk(KERN_DEBUG "%s:sending to vpi:%d vci:%d\n", dev->name, vcc->vpi, vcc->vci); #endif /* DUMP_PACKETS > 0 */ while (entry && (skb2 = skb_dequeue(&entry->tx_wait))) { pr_debug("emptying tx queue, MAC address %pM\n", lec_h->h_dest); lec_send(vcc, skb2); } lec_send(vcc, skb); if (!atm_may_send(vcc, 0)) { struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc); vpriv->xoff = 1; netif_stop_queue(dev); /* * vcc->pop() might have occurred in between, making * the vcc usuable again. Since xmit is serialized, * this is the only situation we have to re-test. */ if (atm_may_send(vcc, 0)) netif_wake_queue(dev); } out: if (entry) lec_arp_put(entry); dev->trans_start = jiffies; return NETDEV_TX_OK; } /* The inverse routine to net_open(). */ static int lec_close(struct net_device *dev) { netif_stop_queue(dev); return 0; } static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb) { unsigned long flags; struct net_device *dev = (struct net_device *)vcc->proto_data; struct lec_priv *priv = netdev_priv(dev); struct atmlec_msg *mesg; struct lec_arp_table *entry; int i; char *tmp; /* FIXME */ atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); mesg = (struct atmlec_msg *)skb->data; tmp = skb->data; tmp += sizeof(struct atmlec_msg); pr_debug("%s: msg from zeppelin:%d\n", dev->name, mesg->type); switch (mesg->type) { case l_set_mac_addr: for (i = 0; i < 6; i++) dev->dev_addr[i] = mesg->content.normal.mac_addr[i]; break; case l_del_mac_addr: for (i = 0; i < 6; i++) dev->dev_addr[i] = 0; break; case l_addr_delete: lec_addr_delete(priv, mesg->content.normal.atm_addr, mesg->content.normal.flag); break; case l_topology_change: priv->topology_change = mesg->content.normal.flag; break; case l_flush_complete: lec_flush_complete(priv, mesg->content.normal.flag); break; case l_narp_req: /* LANE2: see 7.1.35 in the lane2 spec */ spin_lock_irqsave(&priv->lec_arp_lock, flags); entry = lec_arp_find(priv, mesg->content.normal.mac_addr); lec_arp_remove(priv, entry); spin_unlock_irqrestore(&priv->lec_arp_lock, flags); if (mesg->content.normal.no_source_le_narp) break; /* FALL THROUGH */ case l_arp_update: lec_arp_update(priv, mesg->content.normal.mac_addr, mesg->content.normal.atm_addr, mesg->content.normal.flag, mesg->content.normal.targetless_le_arp); pr_debug("in l_arp_update\n"); if (mesg->sizeoftlvs != 0) { /* LANE2 3.1.5 */ pr_debug("LANE2 3.1.5, got tlvs, size %d\n", mesg->sizeoftlvs); lane2_associate_ind(dev, mesg->content.normal.mac_addr, tmp, mesg->sizeoftlvs); } break; case l_config: priv->maximum_unknown_frame_count = mesg->content.config.maximum_unknown_frame_count; priv->max_unknown_frame_time = (mesg->content.config.max_unknown_frame_time * HZ); priv->max_retry_count = mesg->content.config.max_retry_count; priv->aging_time = (mesg->content.config.aging_time * HZ); priv->forward_delay_time = (mesg->content.config.forward_delay_time * HZ); priv->arp_response_time = (mesg->content.config.arp_response_time * HZ); priv->flush_timeout = (mesg->content.config.flush_timeout * HZ); priv->path_switching_delay = (mesg->content.config.path_switching_delay * HZ); priv->lane_version = mesg->content.config.lane_version; /* LANE2 */ priv->lane2_ops = NULL; if (priv->lane_version > 1) priv->lane2_ops = &lane2_ops; if (dev_set_mtu(dev, mesg->content.config.mtu)) pr_info("%s: change_mtu to %d failed\n", dev->name, mesg->content.config.mtu); priv->is_proxy = mesg->content.config.is_proxy; break; case l_flush_tran_id: lec_set_flush_tran_id(priv, mesg->content.normal.atm_addr, mesg->content.normal.flag); break; case l_set_lecid: priv->lecid = (unsigned short)(0xffff & mesg->content.normal.flag); break; case l_should_bridge: #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) { pr_debug("%s: bridge zeppelin asks about %pM\n", dev->name, mesg->content.proxy.mac_addr); if (br_fdb_test_addr_hook == NULL) break; if (br_fdb_test_addr_hook(dev, mesg->content.proxy.mac_addr)) { /* hit from bridge table, send LE_ARP_RESPONSE */ struct sk_buff *skb2; struct sock *sk; pr_debug("%s: entry found, responding to zeppelin\n", dev->name); skb2 = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC); if (skb2 == NULL) break; skb2->len = sizeof(struct atmlec_msg); skb_copy_to_linear_data(skb2, mesg, sizeof(*mesg)); atm_force_charge(priv->lecd, skb2->truesize); sk = sk_atm(priv->lecd); skb_queue_tail(&sk->sk_receive_queue, skb2); sk->sk_data_ready(sk, skb2->len); } } #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ break; default: pr_info("%s: Unknown message type %d\n", dev->name, mesg->type); dev_kfree_skb(skb); return -EINVAL; } dev_kfree_skb(skb); return 0; } static void lec_atm_close(struct atm_vcc *vcc) { struct sk_buff *skb; struct net_device *dev = (struct net_device *)vcc->proto_data; struct lec_priv *priv = netdev_priv(dev); priv->lecd = NULL; /* Do something needful? */ netif_stop_queue(dev); lec_arp_destroy(priv); if (skb_peek(&sk_atm(vcc)->sk_receive_queue)) pr_info("%s closing with messages pending\n", dev->name); while ((skb = skb_dequeue(&sk_atm(vcc)->sk_receive_queue))) { atm_return(vcc, skb->truesize); dev_kfree_skb(skb); } pr_info("%s: Shut down!\n", dev->name); module_put(THIS_MODULE); } static struct atmdev_ops lecdev_ops = { .close = lec_atm_close, .send = lec_atm_send }; static struct atm_dev lecatm_dev = { .ops = &lecdev_ops, .type = "lec", .number = 999, /* dummy device number */ .lock = __SPIN_LOCK_UNLOCKED(lecatm_dev.lock) }; /* * LANE2: new argument struct sk_buff *data contains * the LE_ARP based TLVs introduced in the LANE2 spec */ static int send_to_lecd(struct lec_priv *priv, atmlec_msg_type type, const unsigned char *mac_addr, const unsigned char *atm_addr, struct sk_buff *data) { struct sock *sk; struct sk_buff *skb; struct atmlec_msg *mesg; if (!priv || !priv->lecd) return -1; skb = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC); if (!skb) return -1; skb->len = sizeof(struct atmlec_msg); mesg = (struct atmlec_msg *)skb->data; memset(mesg, 0, sizeof(struct atmlec_msg)); mesg->type = type; if (data != NULL) mesg->sizeoftlvs = data->len; if (mac_addr) memcpy(&mesg->content.normal.mac_addr, mac_addr, ETH_ALEN); else mesg->content.normal.targetless_le_arp = 1; if (atm_addr) memcpy(&mesg->content.normal.atm_addr, atm_addr, ATM_ESA_LEN); atm_force_charge(priv->lecd, skb->truesize); sk = sk_atm(priv->lecd); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, skb->len); if (data != NULL) { pr_debug("about to send %d bytes of data\n", data->len); atm_force_charge(priv->lecd, data->truesize); skb_queue_tail(&sk->sk_receive_queue, data); sk->sk_data_ready(sk, skb->len); } return 0; } /* shamelessly stolen from drivers/net/net_init.c */ static int lec_change_mtu(struct net_device *dev, int new_mtu) { if ((new_mtu < 68) || (new_mtu > 18190)) return -EINVAL; dev->mtu = new_mtu; return 0; } static void lec_set_multicast_list(struct net_device *dev) { /* * by default, all multicast frames arrive over the bus. * eventually support selective multicast service */ } static const struct net_device_ops lec_netdev_ops = { .ndo_open = lec_open, .ndo_stop = lec_close, .ndo_start_xmit = lec_start_xmit, .ndo_change_mtu = lec_change_mtu, .ndo_tx_timeout = lec_tx_timeout, .ndo_set_multicast_list = lec_set_multicast_list, }; static const unsigned char lec_ctrl_magic[] = { 0xff, 0x00, 0x01, 0x01 }; #define LEC_DATA_DIRECT_8023 2 #define LEC_DATA_DIRECT_8025 3 static int lec_is_data_direct(struct atm_vcc *vcc) { return ((vcc->sap.blli[0].l3.tr9577.snap[4] == LEC_DATA_DIRECT_8023) || (vcc->sap.blli[0].l3.tr9577.snap[4] == LEC_DATA_DIRECT_8025)); } static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb) { unsigned long flags; struct net_device *dev = (struct net_device *)vcc->proto_data; struct lec_priv *priv = netdev_priv(dev); #if DUMP_PACKETS > 0 printk(KERN_DEBUG "%s: vcc vpi:%d vci:%d\n", dev->name, vcc->vpi, vcc->vci); #endif if (!skb) { pr_debug("%s: null skb\n", dev->name); lec_vcc_close(priv, vcc); return; } #if DUMP_PACKETS >= 2 #define MAX_SKB_DUMP 99 #elif DUMP_PACKETS >= 1 #define MAX_SKB_DUMP 30 #endif #if DUMP_PACKETS > 0 printk(KERN_DEBUG "%s: rcv datalen:%ld lecid:%4.4x\n", dev->name, skb->len, priv->lecid); print_hex_dump(KERN_DEBUG, "", DUMP_OFFSET, 16, 1, skb->data, min(MAX_SKB_DUMP, skb->len), true); #endif /* DUMP_PACKETS > 0 */ if (memcmp(skb->data, lec_ctrl_magic, 4) == 0) { /* Control frame, to daemon */ struct sock *sk = sk_atm(vcc); pr_debug("%s: To daemon\n", dev->name); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, skb->len); } else { /* Data frame, queue to protocol handlers */ struct lec_arp_table *entry; unsigned char *src, *dst; atm_return(vcc, skb->truesize); if (*(__be16 *) skb->data == htons(priv->lecid) || !priv->lecd || !(dev->flags & IFF_UP)) { /* * Probably looping back, or if lecd is missing, * lecd has gone down */ pr_debug("Ignoring frame...\n"); dev_kfree_skb(skb); return; } #ifdef CONFIG_TR if (priv->is_trdev) dst = ((struct lecdatahdr_8025 *)skb->data)->h_dest; else #endif dst = ((struct lecdatahdr_8023 *)skb->data)->h_dest; /* * If this is a Data Direct VCC, and the VCC does not match * the LE_ARP cache entry, delete the LE_ARP cache entry. */ spin_lock_irqsave(&priv->lec_arp_lock, flags); if (lec_is_data_direct(vcc)) { #ifdef CONFIG_TR if (priv->is_trdev) src = ((struct lecdatahdr_8025 *)skb->data)-> h_source; else #endif src = ((struct lecdatahdr_8023 *)skb->data)-> h_source; entry = lec_arp_find(priv, src); if (entry && entry->vcc != vcc) { lec_arp_remove(priv, entry); lec_arp_put(entry); } } spin_unlock_irqrestore(&priv->lec_arp_lock, flags); if (!(dst[0] & 0x01) && /* Never filter Multi/Broadcast */ !priv->is_proxy && /* Proxy wants all the packets */ memcmp(dst, dev->dev_addr, dev->addr_len)) { dev_kfree_skb(skb); return; } if (!hlist_empty(&priv->lec_arp_empty_ones)) lec_arp_check_empties(priv, vcc, skb); skb_pull(skb, 2); /* skip lec_id */ #ifdef CONFIG_TR if (priv->is_trdev) skb->protocol = tr_type_trans(skb, dev); else #endif skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); netif_rx(skb); } } static void lec_pop(struct atm_vcc *vcc, struct sk_buff *skb) { struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc); struct net_device *dev = skb->dev; if (vpriv == NULL) { pr_info("vpriv = NULL!?!?!?\n"); return; } vpriv->old_pop(vcc, skb); if (vpriv->xoff && atm_may_send(vcc, 0)) { vpriv->xoff = 0; if (netif_running(dev) && netif_queue_stopped(dev)) netif_wake_queue(dev); } } static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg) { struct lec_vcc_priv *vpriv; int bytes_left; struct atmlec_ioc ioc_data; /* Lecd must be up in this case */ bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc)); if (bytes_left != 0) pr_info("copy from user failed for %d bytes\n", bytes_left); if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF || !dev_lec[ioc_data.dev_num]) return -EINVAL; vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL); if (!vpriv) return -ENOMEM; vpriv->xoff = 0; vpriv->old_pop = vcc->pop; vcc->user_back = vpriv; vcc->pop = lec_pop; lec_vcc_added(netdev_priv(dev_lec[ioc_data.dev_num]), &ioc_data, vcc, vcc->push); vcc->proto_data = dev_lec[ioc_data.dev_num]; vcc->push = lec_push; return 0; } static int lec_mcast_attach(struct atm_vcc *vcc, int arg) { if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg]) return -EINVAL; vcc->proto_data = dev_lec[arg]; return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc); } /* Initialize device. */ static int lecd_attach(struct atm_vcc *vcc, int arg) { int i; struct lec_priv *priv; if (arg < 0) i = 0; else i = arg; #ifdef CONFIG_TR if (arg >= MAX_LEC_ITF) return -EINVAL; #else /* Reserve the top NUM_TR_DEVS for TR */ if (arg >= (MAX_LEC_ITF - NUM_TR_DEVS)) return -EINVAL; #endif if (!dev_lec[i]) { int is_trdev, size; is_trdev = 0; if (i >= (MAX_LEC_ITF - NUM_TR_DEVS)) is_trdev = 1; size = sizeof(struct lec_priv); #ifdef CONFIG_TR if (is_trdev) dev_lec[i] = alloc_trdev(size); else #endif dev_lec[i] = alloc_etherdev(size); if (!dev_lec[i]) return -ENOMEM; dev_lec[i]->netdev_ops = &lec_netdev_ops; snprintf(dev_lec[i]->name, IFNAMSIZ, "lec%d", i); if (register_netdev(dev_lec[i])) { free_netdev(dev_lec[i]); return -EINVAL; } priv = netdev_priv(dev_lec[i]); priv->is_trdev = is_trdev; } else { priv = netdev_priv(dev_lec[i]); if (priv->lecd) return -EADDRINUSE; } lec_arp_init(priv); priv->itfnum = i; /* LANE2 addition */ priv->lecd = vcc; vcc->dev = &lecatm_dev; vcc_insert_socket(sk_atm(vcc)); vcc->proto_data = dev_lec[i]; set_bit(ATM_VF_META, &vcc->flags); set_bit(ATM_VF_READY, &vcc->flags); /* Set default values to these variables */ priv->maximum_unknown_frame_count = 1; priv->max_unknown_frame_time = (1 * HZ); priv->vcc_timeout_period = (1200 * HZ); priv->max_retry_count = 1; priv->aging_time = (300 * HZ); priv->forward_delay_time = (15 * HZ); priv->topology_change = 0; priv->arp_response_time = (1 * HZ); priv->flush_timeout = (4 * HZ); priv->path_switching_delay = (6 * HZ); if (dev_lec[i]->flags & IFF_UP) netif_start_queue(dev_lec[i]); __module_get(THIS_MODULE); return i; } #ifdef CONFIG_PROC_FS static const char *lec_arp_get_status_string(unsigned char status) { static const char *const lec_arp_status_string[] = { "ESI_UNKNOWN ", "ESI_ARP_PENDING ", "ESI_VC_PENDING ", "<Undefined> ", "ESI_FLUSH_PENDING ", "ESI_FORWARD_DIRECT" }; if (status > ESI_FORWARD_DIRECT) status = 3; /* ESI_UNDEFINED */ return lec_arp_status_string[status]; } static void lec_info(struct seq_file *seq, struct lec_arp_table *entry) { int i; for (i = 0; i < ETH_ALEN; i++) seq_printf(seq, "%2.2x", entry->mac_addr[i] & 0xff); seq_printf(seq, " "); for (i = 0; i < ATM_ESA_LEN; i++) seq_printf(seq, "%2.2x", entry->atm_addr[i] & 0xff); seq_printf(seq, " %s %4.4x", lec_arp_get_status_string(entry->status), entry->flags & 0xffff); if (entry->vcc) seq_printf(seq, "%3d %3d ", entry->vcc->vpi, entry->vcc->vci); else seq_printf(seq, " "); if (entry->recv_vcc) { seq_printf(seq, " %3d %3d", entry->recv_vcc->vpi, entry->recv_vcc->vci); } seq_putc(seq, '\n'); } struct lec_state { unsigned long flags; struct lec_priv *locked; struct hlist_node *node; struct net_device *dev; int itf; int arp_table; int misc_table; }; static void *lec_tbl_walk(struct lec_state *state, struct hlist_head *tbl, loff_t *l) { struct hlist_node *e = state->node; struct lec_arp_table *tmp; if (!e) e = tbl->first; if (e == SEQ_START_TOKEN) { e = tbl->first; --*l; } hlist_for_each_entry_from(tmp, e, next) { if (--*l < 0) break; } state->node = e; return (*l < 0) ? state : NULL; } static void *lec_arp_walk(struct lec_state *state, loff_t *l, struct lec_priv *priv) { void *v = NULL; int p; for (p = state->arp_table; p < LEC_ARP_TABLE_SIZE; p++) { v = lec_tbl_walk(state, &priv->lec_arp_tables[p], l); if (v) break; } state->arp_table = p; return v; } static void *lec_misc_walk(struct lec_state *state, loff_t *l, struct lec_priv *priv) { struct hlist_head *lec_misc_tables[] = { &priv->lec_arp_empty_ones, &priv->lec_no_forward, &priv->mcast_fwds }; void *v = NULL; int q; for (q = state->misc_table; q < ARRAY_SIZE(lec_misc_tables); q++) { v = lec_tbl_walk(state, lec_misc_tables[q], l); if (v) break; } state->misc_table = q; return v; } static void *lec_priv_walk(struct lec_state *state, loff_t *l, struct lec_priv *priv) { if (!state->locked) { state->locked = priv; spin_lock_irqsave(&priv->lec_arp_lock, state->flags); } if (!lec_arp_walk(state, l, priv) && !lec_misc_walk(state, l, priv)) { spin_unlock_irqrestore(&priv->lec_arp_lock, state->flags); state->locked = NULL; /* Partial state reset for the next time we get called */ state->arp_table = state->misc_table = 0; } return state->locked; } static void *lec_itf_walk(struct lec_state *state, loff_t *l) { struct net_device *dev; void *v; dev = state->dev ? state->dev : dev_lec[state->itf]; v = (dev && netdev_priv(dev)) ? lec_priv_walk(state, l, netdev_priv(dev)) : NULL; if (!v && dev) { dev_put(dev); /* Partial state reset for the next time we get called */ dev = NULL; } state->dev = dev; return v; } static void *lec_get_idx(struct lec_state *state, loff_t l) { void *v = NULL; for (; state->itf < MAX_LEC_ITF; state->itf++) { v = lec_itf_walk(state, &l); if (v) break; } return v; } static void *lec_seq_start(struct seq_file *seq, loff_t *pos) { struct lec_state *state = seq->private; state->itf = 0; state->dev = NULL; state->locked = NULL; state->arp_table = 0; state->misc_table = 0; state->node = SEQ_START_TOKEN; return *pos ? lec_get_idx(state, *pos) : SEQ_START_TOKEN; } static void lec_seq_stop(struct seq_file *seq, void *v) { struct lec_state *state = seq->private; if (state->dev) { spin_unlock_irqrestore(&state->locked->lec_arp_lock, state->flags); dev_put(state->dev); } } static void *lec_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct lec_state *state = seq->private; v = lec_get_idx(state, 1); *pos += !!PTR_ERR(v); return v; } static int lec_seq_show(struct seq_file *seq, void *v) { static const char lec_banner[] = "Itf MAC ATM destination" " Status Flags " "VPI/VCI Recv VPI/VCI\n"; if (v == SEQ_START_TOKEN) seq_puts(seq, lec_banner); else { struct lec_state *state = seq->private; struct net_device *dev = state->dev; struct lec_arp_table *entry = hlist_entry(state->node, struct lec_arp_table, next); seq_printf(seq, "%s ", dev->name); lec_info(seq, entry); } return 0; } static const struct seq_operations lec_seq_ops = { .start = lec_seq_start, .next = lec_seq_next, .stop = lec_seq_stop, .show = lec_seq_show, }; static int lec_seq_open(struct inode *inode, struct file *file) { return seq_open_private(file, &lec_seq_ops, sizeof(struct lec_state)); } static const struct file_operations lec_seq_fops = { .owner = THIS_MODULE, .open = lec_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; #endif static int lane_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct atm_vcc *vcc = ATM_SD(sock); int err = 0; switch (cmd) { case ATMLEC_CTRL: case ATMLEC_MCAST: case ATMLEC_DATA: if (!capable(CAP_NET_ADMIN)) return -EPERM; break; default: return -ENOIOCTLCMD; } switch (cmd) { case ATMLEC_CTRL: err = lecd_attach(vcc, (int)arg); if (err >= 0) sock->state = SS_CONNECTED; break; case ATMLEC_MCAST: err = lec_mcast_attach(vcc, (int)arg); break; case ATMLEC_DATA: err = lec_vcc_attach(vcc, (void __user *)arg); break; } return err; } static struct atm_ioctl lane_ioctl_ops = { .owner = THIS_MODULE, .ioctl = lane_ioctl, }; static int __init lane_module_init(void) { #ifdef CONFIG_PROC_FS struct proc_dir_entry *p; p = proc_create("lec", S_IRUGO, atm_proc_root, &lec_seq_fops); if (!p) { pr_err("Unable to initialize /proc/net/atm/lec\n"); return -ENOMEM; } #endif register_atm_ioctl(&lane_ioctl_ops); pr_info("lec.c: initialized\n"); return 0; } static void __exit lane_module_cleanup(void) { int i; remove_proc_entry("lec", atm_proc_root); deregister_atm_ioctl(&lane_ioctl_ops); for (i = 0; i < MAX_LEC_ITF; i++) { if (dev_lec[i] != NULL) { unregister_netdev(dev_lec[i]); free_netdev(dev_lec[i]); dev_lec[i] = NULL; } } } module_init(lane_module_init); module_exit(lane_module_cleanup); /* * LANE2: 3.1.3, LE_RESOLVE.request * Non force allocates memory and fills in *tlvs, fills in *sizeoftlvs. * If sizeoftlvs == NULL the default TLVs associated with with this * lec will be used. * If dst_mac == NULL, targetless LE_ARP will be sent */ static int lane2_resolve(struct net_device *dev, const u8 *dst_mac, int force, u8 **tlvs, u32 *sizeoftlvs) { unsigned long flags; struct lec_priv *priv = netdev_priv(dev); struct lec_arp_table *table; struct sk_buff *skb; int retval; if (force == 0) { spin_lock_irqsave(&priv->lec_arp_lock, flags); table = lec_arp_find(priv, dst_mac); spin_unlock_irqrestore(&priv->lec_arp_lock, flags); if (table == NULL) return -1; *tlvs = kmemdup(table->tlvs, table->sizeoftlvs, GFP_ATOMIC); if (*tlvs == NULL) return -1; *sizeoftlvs = table->sizeoftlvs; return 0; } if (sizeoftlvs == NULL) retval = send_to_lecd(priv, l_arp_xmt, dst_mac, NULL, NULL); else { skb = alloc_skb(*sizeoftlvs, GFP_ATOMIC); if (skb == NULL) return -1; skb->len = *sizeoftlvs; skb_copy_to_linear_data(skb, *tlvs, *sizeoftlvs); retval = send_to_lecd(priv, l_arp_xmt, dst_mac, NULL, skb); } return retval; } /* * LANE2: 3.1.4, LE_ASSOCIATE.request * Associate the *tlvs with the *lan_dst address. * Will overwrite any previous association * Returns 1 for success, 0 for failure (out of memory) * */ static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst, const u8 *tlvs, u32 sizeoftlvs) { int retval; struct sk_buff *skb; struct lec_priv *priv = netdev_priv(dev); if (compare_ether_addr(lan_dst, dev->dev_addr)) return 0; /* not our mac address */ kfree(priv->tlvs); /* NULL if there was no previous association */ priv->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL); if (priv->tlvs == NULL) return 0; priv->sizeoftlvs = sizeoftlvs; skb = alloc_skb(sizeoftlvs, GFP_ATOMIC); if (skb == NULL) return 0; skb->len = sizeoftlvs; skb_copy_to_linear_data(skb, tlvs, sizeoftlvs); retval = send_to_lecd(priv, l_associate_req, NULL, NULL, skb); if (retval != 0) pr_info("lec.c: lane2_associate_req() failed\n"); /* * If the previous association has changed we must * somehow notify other LANE entities about the change */ return 1; } /* * LANE2: 3.1.5, LE_ASSOCIATE.indication * */ static void lane2_associate_ind(struct net_device *dev, const u8 *mac_addr, const u8 *tlvs, u32 sizeoftlvs) { #if 0 int i = 0; #endif struct lec_priv *priv = netdev_priv(dev); #if 0 /* * Why have the TLVs in LE_ARP entries * since we do not use them? When you * uncomment this code, make sure the * TLVs get freed when entry is killed */ struct lec_arp_table *entry = lec_arp_find(priv, mac_addr); if (entry == NULL) return; /* should not happen */ kfree(entry->tlvs); entry->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL); if (entry->tlvs == NULL) return; entry->sizeoftlvs = sizeoftlvs; #endif #if 0 pr_info("\n"); pr_info("dump of tlvs, sizeoftlvs=%d\n", sizeoftlvs); while (i < sizeoftlvs) pr_cont("%02x ", tlvs[i++]); pr_cont("\n"); #endif /* tell MPOA about the TLVs we saw */ if (priv->lane2_ops && priv->lane2_ops->associate_indicator) { priv->lane2_ops->associate_indicator(dev, mac_addr, tlvs, sizeoftlvs); } } /* * Here starts what used to lec_arpc.c * * lec_arpc.c was added here when making * lane client modular. October 1997 */ #include <linux/types.h> #include <linux/timer.h> #include <linux/param.h> #include <asm/atomic.h> #include <linux/inetdevice.h> #include <net/route.h> #if 0 #define pr_debug(format, args...) /* #define pr_debug printk */ #endif #define DEBUG_ARP_TABLE 0 #define LEC_ARP_REFRESH_INTERVAL (3*HZ) static void lec_arp_check_expire(struct work_struct *work); static void lec_arp_expire_arp(unsigned long data); /* * Arp table funcs */ #define HASH(ch) (ch & (LEC_ARP_TABLE_SIZE - 1)) /* * Initialization of arp-cache */ static void lec_arp_init(struct lec_priv *priv) { unsigned short i; for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) INIT_HLIST_HEAD(&priv->lec_arp_tables[i]); INIT_HLIST_HEAD(&priv->lec_arp_empty_ones); INIT_HLIST_HEAD(&priv->lec_no_forward); INIT_HLIST_HEAD(&priv->mcast_fwds); spin_lock_init(&priv->lec_arp_lock); INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire); schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL); } static void lec_arp_clear_vccs(struct lec_arp_table *entry) { if (entry->vcc) { struct atm_vcc *vcc = entry->vcc; struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc); struct net_device *dev = (struct net_device *)vcc->proto_data; vcc->pop = vpriv->old_pop; if (vpriv->xoff) netif_wake_queue(dev); kfree(vpriv); vcc->user_back = NULL; vcc->push = entry->old_push; vcc_release_async(vcc, -EPIPE); entry->vcc = NULL; } if (entry->recv_vcc) { entry->recv_vcc->push = entry->old_recv_push; vcc_release_async(entry->recv_vcc, -EPIPE); entry->recv_vcc = NULL; } } /* * Insert entry to lec_arp_table * LANE2: Add to the end of the list to satisfy 8.1.13 */ static inline void lec_arp_add(struct lec_priv *priv, struct lec_arp_table *entry) { struct hlist_head *tmp; tmp = &priv->lec_arp_tables[HASH(entry->mac_addr[ETH_ALEN - 1])]; hlist_add_head(&entry->next, tmp); pr_debug("Added entry:%pM\n", entry->mac_addr); } /* * Remove entry from lec_arp_table */ static int lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove) { struct hlist_node *node; struct lec_arp_table *entry; int i, remove_vcc = 1; if (!to_remove) return -1; hlist_del(&to_remove->next); del_timer(&to_remove->timer); /* * If this is the only MAC connected to this VCC, * also tear down the VCC */ if (to_remove->status >= ESI_FLUSH_PENDING) { /* * ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT */ for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { if (memcmp(to_remove->atm_addr, entry->atm_addr, ATM_ESA_LEN) == 0) { remove_vcc = 0; break; } } } if (remove_vcc) lec_arp_clear_vccs(to_remove); } skb_queue_purge(&to_remove->tx_wait); /* FIXME: good place for this? */ pr_debug("Removed entry:%pM\n", to_remove->mac_addr); return 0; } #if DEBUG_ARP_TABLE static const char *get_status_string(unsigned char st) { switch (st) { case ESI_UNKNOWN: return "ESI_UNKNOWN"; case ESI_ARP_PENDING: return "ESI_ARP_PENDING"; case ESI_VC_PENDING: return "ESI_VC_PENDING"; case ESI_FLUSH_PENDING: return "ESI_FLUSH_PENDING"; case ESI_FORWARD_DIRECT: return "ESI_FORWARD_DIRECT"; } return "<UNKNOWN>"; } static void dump_arp_table(struct lec_priv *priv) { struct hlist_node *node; struct lec_arp_table *rulla; char buf[256]; int i, j, offset; pr_info("Dump %p:\n", priv); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry(rulla, node, &priv->lec_arp_tables[i], next) { offset = 0; offset += sprintf(buf, "%d: %p\n", i, rulla); offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); offset += sprintf(buf + offset, " Atm:"); for (j = 0; j < ATM_ESA_LEN; j++) { offset += sprintf(buf + offset, "%2.2x ", rulla->atm_addr[j] & 0xff); } offset += sprintf(buf + offset, "Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ", rulla->vcc ? rulla->vcc->vpi : 0, rulla->vcc ? rulla->vcc->vci : 0, rulla->recv_vcc ? rulla->recv_vcc-> vpi : 0, rulla->recv_vcc ? rulla->recv_vcc-> vci : 0, rulla->last_used, rulla->timestamp, rulla->no_tries); offset += sprintf(buf + offset, "Flags:%x, Packets_flooded:%x, Status: %s ", rulla->flags, rulla->packets_flooded, get_status_string(rulla->status)); pr_info("%s\n", buf); } } if (!hlist_empty(&priv->lec_no_forward)) pr_info("No forward\n"); hlist_for_each_entry(rulla, node, &priv->lec_no_forward, next) { offset = 0; offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); offset += sprintf(buf + offset, " Atm:"); for (j = 0; j < ATM_ESA_LEN; j++) { offset += sprintf(buf + offset, "%2.2x ", rulla->atm_addr[j] & 0xff); } offset += sprintf(buf + offset, "Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ", rulla->vcc ? rulla->vcc->vpi : 0, rulla->vcc ? rulla->vcc->vci : 0, rulla->recv_vcc ? rulla->recv_vcc->vpi : 0, rulla->recv_vcc ? rulla->recv_vcc->vci : 0, rulla->last_used, rulla->timestamp, rulla->no_tries); offset += sprintf(buf + offset, "Flags:%x, Packets_flooded:%x, Status: %s ", rulla->flags, rulla->packets_flooded, get_status_string(rulla->status)); pr_info("%s\n", buf); } if (!hlist_empty(&priv->lec_arp_empty_ones)) pr_info("Empty ones\n"); hlist_for_each_entry(rulla, node, &priv->lec_arp_empty_ones, next) { offset = 0; offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); offset += sprintf(buf + offset, " Atm:"); for (j = 0; j < ATM_ESA_LEN; j++) { offset += sprintf(buf + offset, "%2.2x ", rulla->atm_addr[j] & 0xff); } offset += sprintf(buf + offset, "Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ", rulla->vcc ? rulla->vcc->vpi : 0, rulla->vcc ? rulla->vcc->vci : 0, rulla->recv_vcc ? rulla->recv_vcc->vpi : 0, rulla->recv_vcc ? rulla->recv_vcc->vci : 0, rulla->last_used, rulla->timestamp, rulla->no_tries); offset += sprintf(buf + offset, "Flags:%x, Packets_flooded:%x, Status: %s ", rulla->flags, rulla->packets_flooded, get_status_string(rulla->status)); pr_info("%s", buf); } if (!hlist_empty(&priv->mcast_fwds)) pr_info("Multicast Forward VCCs\n"); hlist_for_each_entry(rulla, node, &priv->mcast_fwds, next) { offset = 0; offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); offset += sprintf(buf + offset, " Atm:"); for (j = 0; j < ATM_ESA_LEN; j++) { offset += sprintf(buf + offset, "%2.2x ", rulla->atm_addr[j] & 0xff); } offset += sprintf(buf + offset, "Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ", rulla->vcc ? rulla->vcc->vpi : 0, rulla->vcc ? rulla->vcc->vci : 0, rulla->recv_vcc ? rulla->recv_vcc->vpi : 0, rulla->recv_vcc ? rulla->recv_vcc->vci : 0, rulla->last_used, rulla->timestamp, rulla->no_tries); offset += sprintf(buf + offset, "Flags:%x, Packets_flooded:%x, Status: %s ", rulla->flags, rulla->packets_flooded, get_status_string(rulla->status)); pr_info("%s\n", buf); } } #else #define dump_arp_table(priv) do { } while (0) #endif /* * Destruction of arp-cache */ static void lec_arp_destroy(struct lec_priv *priv) { unsigned long flags; struct hlist_node *node, *next; struct lec_arp_table *entry; int i; cancel_delayed_work_sync(&priv->lec_arp_work); /* * Remove all entries */ spin_lock_irqsave(&priv->lec_arp_lock, flags); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { lec_arp_remove(priv, entry); lec_arp_put(entry); } INIT_HLIST_HEAD(&priv->lec_arp_tables[i]); } hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { del_timer_sync(&entry->timer); lec_arp_clear_vccs(entry); hlist_del(&entry->next); lec_arp_put(entry); } INIT_HLIST_HEAD(&priv->lec_arp_empty_ones); hlist_for_each_entry_safe(entry, node, next, &priv->lec_no_forward, next) { del_timer_sync(&entry->timer); lec_arp_clear_vccs(entry); hlist_del(&entry->next); lec_arp_put(entry); } INIT_HLIST_HEAD(&priv->lec_no_forward); hlist_for_each_entry_safe(entry, node, next, &priv->mcast_fwds, next) { /* No timer, LANEv2 7.1.20 and 2.3.5.3 */ lec_arp_clear_vccs(entry); hlist_del(&entry->next); lec_arp_put(entry); } INIT_HLIST_HEAD(&priv->mcast_fwds); priv->mcast_vcc = NULL; spin_unlock_irqrestore(&priv->lec_arp_lock, flags); } /* * Find entry by mac_address */ static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, const unsigned char *mac_addr) { struct hlist_node *node; struct hlist_head *head; struct lec_arp_table *entry; pr_debug("%pM\n", mac_addr); head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])]; hlist_for_each_entry(entry, node, head, next) { if (!compare_ether_addr(mac_addr, entry->mac_addr)) return entry; } return NULL; } static struct lec_arp_table *make_entry(struct lec_priv *priv, const unsigned char *mac_addr) { struct lec_arp_table *to_return; to_return = kzalloc(sizeof(struct lec_arp_table), GFP_ATOMIC); if (!to_return) { pr_info("LEC: Arp entry kmalloc failed\n"); return NULL; } memcpy(to_return->mac_addr, mac_addr, ETH_ALEN); INIT_HLIST_NODE(&to_return->next); setup_timer(&to_return->timer, lec_arp_expire_arp, (unsigned long)to_return); to_return->last_used = jiffies; to_return->priv = priv; skb_queue_head_init(&to_return->tx_wait); atomic_set(&to_return->usage, 1); return to_return; } /* Arp sent timer expired */ static void lec_arp_expire_arp(unsigned long data) { struct lec_arp_table *entry; entry = (struct lec_arp_table *)data; pr_debug("\n"); if (entry->status == ESI_ARP_PENDING) { if (entry->no_tries <= entry->priv->max_retry_count) { if (entry->is_rdesc) send_to_lecd(entry->priv, l_rdesc_arp_xmt, entry->mac_addr, NULL, NULL); else send_to_lecd(entry->priv, l_arp_xmt, entry->mac_addr, NULL, NULL); entry->no_tries++; } mod_timer(&entry->timer, jiffies + (1 * HZ)); } } /* Unknown/unused vcc expire, remove associated entry */ static void lec_arp_expire_vcc(unsigned long data) { unsigned long flags; struct lec_arp_table *to_remove = (struct lec_arp_table *)data; struct lec_priv *priv = (struct lec_priv *)to_remove->priv; del_timer(&to_remove->timer); pr_debug("%p %p: vpi:%d vci:%d\n", to_remove, priv, to_remove->vcc ? to_remove->recv_vcc->vpi : 0, to_remove->vcc ? to_remove->recv_vcc->vci : 0); spin_lock_irqsave(&priv->lec_arp_lock, flags); hlist_del(&to_remove->next); spin_unlock_irqrestore(&priv->lec_arp_lock, flags); lec_arp_clear_vccs(to_remove); lec_arp_put(to_remove); } static bool __lec_arp_check_expire(struct lec_arp_table *entry, unsigned long now, struct lec_priv *priv) { unsigned long time_to_check; if ((entry->flags) & LEC_REMOTE_FLAG && priv->topology_change) time_to_check = priv->forward_delay_time; else time_to_check = priv->aging_time; pr_debug("About to expire: %lx - %lx > %lx\n", now, entry->last_used, time_to_check); if (time_after(now, entry->last_used + time_to_check) && !(entry->flags & LEC_PERMANENT_FLAG) && !(entry->mac_addr[0] & 0x01)) { /* LANE2: 7.1.20 */ /* Remove entry */ pr_debug("Entry timed out\n"); lec_arp_remove(priv, entry); lec_arp_put(entry); } else { /* Something else */ if ((entry->status == ESI_VC_PENDING || entry->status == ESI_ARP_PENDING) && time_after_eq(now, entry->timestamp + priv->max_unknown_frame_time)) { entry->timestamp = jiffies; entry->packets_flooded = 0; if (entry->status == ESI_VC_PENDING) send_to_lecd(priv, l_svc_setup, entry->mac_addr, entry->atm_addr, NULL); } if (entry->status == ESI_FLUSH_PENDING && time_after_eq(now, entry->timestamp + priv->path_switching_delay)) { lec_arp_hold(entry); return true; } } return false; } /* * Expire entries. * 1. Re-set timer * 2. For each entry, delete entries that have aged past the age limit. * 3. For each entry, depending on the status of the entry, perform * the following maintenance. * a. If status is ESI_VC_PENDING or ESI_ARP_PENDING then if the * tick_count is above the max_unknown_frame_time, clear * the tick_count to zero and clear the packets_flooded counter * to zero. This supports the packet rate limit per address * while flooding unknowns. * b. If the status is ESI_FLUSH_PENDING and the tick_count is greater * than or equal to the path_switching_delay, change the status * to ESI_FORWARD_DIRECT. This causes the flush period to end * regardless of the progress of the flush protocol. */ static void lec_arp_check_expire(struct work_struct *work) { unsigned long flags; struct lec_priv *priv = container_of(work, struct lec_priv, lec_arp_work.work); struct hlist_node *node, *next; struct lec_arp_table *entry; unsigned long now; int i; pr_debug("%p\n", priv); now = jiffies; restart: spin_lock_irqsave(&priv->lec_arp_lock, flags); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { if (__lec_arp_check_expire(entry, now, priv)) { struct sk_buff *skb; struct atm_vcc *vcc = entry->vcc; spin_unlock_irqrestore(&priv->lec_arp_lock, flags); while ((skb = skb_dequeue(&entry->tx_wait))) lec_send(vcc, skb); entry->last_used = jiffies; entry->status = ESI_FORWARD_DIRECT; lec_arp_put(entry); goto restart; } } } spin_unlock_irqrestore(&priv->lec_arp_lock, flags); schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL); } /* * Try to find vcc where mac_address is attached. * */ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv, const unsigned char *mac_to_find, int is_rdesc, struct lec_arp_table **ret_entry) { unsigned long flags; struct lec_arp_table *entry; struct atm_vcc *found; if (mac_to_find[0] & 0x01) { switch (priv->lane_version) { case 1: return priv->mcast_vcc; case 2: /* LANE2 wants arp for multicast addresses */ if (!compare_ether_addr(mac_to_find, bus_mac)) return priv->mcast_vcc; break; default: break; } } spin_lock_irqsave(&priv->lec_arp_lock, flags); entry = lec_arp_find(priv, mac_to_find); if (entry) { if (entry->status == ESI_FORWARD_DIRECT) { /* Connection Ok */ entry->last_used = jiffies; lec_arp_hold(entry); *ret_entry = entry; found = entry->vcc; goto out; } /* * If the LE_ARP cache entry is still pending, reset count to 0 * so another LE_ARP request can be made for this frame. */ if (entry->status == ESI_ARP_PENDING) entry->no_tries = 0; /* * Data direct VC not yet set up, check to see if the unknown * frame count is greater than the limit. If the limit has * not been reached, allow the caller to send packet to * BUS. */ if (entry->status != ESI_FLUSH_PENDING && entry->packets_flooded < priv->maximum_unknown_frame_count) { entry->packets_flooded++; pr_debug("Flooding..\n"); found = priv->mcast_vcc; goto out; } /* * We got here because entry->status == ESI_FLUSH_PENDING * or BUS flood limit was reached for an entry which is * in ESI_ARP_PENDING or ESI_VC_PENDING state. */ lec_arp_hold(entry); *ret_entry = entry; pr_debug("entry->status %d entry->vcc %p\n", entry->status, entry->vcc); found = NULL; } else { /* No matching entry was found */ entry = make_entry(priv, mac_to_find); pr_debug("Making entry\n"); if (!entry) { found = priv->mcast_vcc; goto out; } lec_arp_add(priv, entry); /* We want arp-request(s) to be sent */ entry->packets_flooded = 1; entry->status = ESI_ARP_PENDING; entry->no_tries = 1; entry->last_used = entry->timestamp = jiffies; entry->is_rdesc = is_rdesc; if (entry->is_rdesc) send_to_lecd(priv, l_rdesc_arp_xmt, mac_to_find, NULL, NULL); else send_to_lecd(priv, l_arp_xmt, mac_to_find, NULL, NULL); entry->timer.expires = jiffies + (1 * HZ); entry->timer.function = lec_arp_expire_arp; add_timer(&entry->timer); found = priv->mcast_vcc; } out: spin_unlock_irqrestore(&priv->lec_arp_lock, flags); return found; } static int lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr, unsigned long permanent) { unsigned long flags; struct hlist_node *node, *next; struct lec_arp_table *entry; int i; pr_debug("\n"); spin_lock_irqsave(&priv->lec_arp_lock, flags); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) && (permanent || !(entry->flags & LEC_PERMANENT_FLAG))) { lec_arp_remove(priv, entry); lec_arp_put(entry); } spin_unlock_irqrestore(&priv->lec_arp_lock, flags); return 0; } } spin_unlock_irqrestore(&priv->lec_arp_lock, flags); return -1; } /* * Notifies: Response to arp_request (atm_addr != NULL) */ static void lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr, const unsigned char *atm_addr, unsigned long remoteflag, unsigned int targetless_le_arp) { unsigned long flags; struct hlist_node *node, *next; struct lec_arp_table *entry, *tmp; int i; pr_debug("%smac:%pM\n", (targetless_le_arp) ? "targetless " : "", mac_addr); spin_lock_irqsave(&priv->lec_arp_lock, flags); entry = lec_arp_find(priv, mac_addr); if (entry == NULL && targetless_le_arp) goto out; /* * LANE2: ignore targetless LE_ARPs for which * we have no entry in the cache. 7.1.30 */ if (!hlist_empty(&priv->lec_arp_empty_ones)) { hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) { hlist_del(&entry->next); del_timer(&entry->timer); tmp = lec_arp_find(priv, mac_addr); if (tmp) { del_timer(&tmp->timer); tmp->status = ESI_FORWARD_DIRECT; memcpy(tmp->atm_addr, atm_addr, ATM_ESA_LEN); tmp->vcc = entry->vcc; tmp->old_push = entry->old_push; tmp->last_used = jiffies; del_timer(&entry->timer); lec_arp_put(entry); entry = tmp; } else { entry->status = ESI_FORWARD_DIRECT; memcpy(entry->mac_addr, mac_addr, ETH_ALEN); entry->last_used = jiffies; lec_arp_add(priv, entry); } if (remoteflag) entry->flags |= LEC_REMOTE_FLAG; else entry->flags &= ~LEC_REMOTE_FLAG; pr_debug("After update\n"); dump_arp_table(priv); goto out; } } } entry = lec_arp_find(priv, mac_addr); if (!entry) { entry = make_entry(priv, mac_addr); if (!entry) goto out; entry->status = ESI_UNKNOWN; lec_arp_add(priv, entry); /* Temporary, changes before end of function */ } memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN); del_timer(&entry->timer); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry(tmp, node, &priv->lec_arp_tables[i], next) { if (entry != tmp && !memcmp(tmp->atm_addr, atm_addr, ATM_ESA_LEN)) { /* Vcc to this host exists */ if (tmp->status > ESI_VC_PENDING) { /* * ESI_FLUSH_PENDING, * ESI_FORWARD_DIRECT */ entry->vcc = tmp->vcc; entry->old_push = tmp->old_push; } entry->status = tmp->status; break; } } } if (remoteflag) entry->flags |= LEC_REMOTE_FLAG; else entry->flags &= ~LEC_REMOTE_FLAG; if (entry->status == ESI_ARP_PENDING || entry->status == ESI_UNKNOWN) { entry->status = ESI_VC_PENDING; send_to_lecd(priv, l_svc_setup, entry->mac_addr, atm_addr, NULL); } pr_debug("After update2\n"); dump_arp_table(priv); out: spin_unlock_irqrestore(&priv->lec_arp_lock, flags); } /* * Notifies: Vcc setup ready */ static void lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data, struct atm_vcc *vcc, void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb)) { unsigned long flags; struct hlist_node *node; struct lec_arp_table *entry; int i, found_entry = 0; spin_lock_irqsave(&priv->lec_arp_lock, flags); /* Vcc for Multicast Forward. No timer, LANEv2 7.1.20 and 2.3.5.3 */ if (ioc_data->receive == 2) { pr_debug("LEC_ARP: Attaching mcast forward\n"); #if 0 entry = lec_arp_find(priv, bus_mac); if (!entry) { pr_info("LEC_ARP: Multicast entry not found!\n"); goto out; } memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); entry->recv_vcc = vcc; entry->old_recv_push = old_push; #endif entry = make_entry(priv, bus_mac); if (entry == NULL) goto out; del_timer(&entry->timer); memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); entry->recv_vcc = vcc; entry->old_recv_push = old_push; hlist_add_head(&entry->next, &priv->mcast_fwds); goto out; } else if (ioc_data->receive == 1) { /* * Vcc which we don't want to make default vcc, * attach it anyway. */ pr_debug("LEC_ARP:Attaching data direct, not default: %2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", ioc_data->atm_addr[0], ioc_data->atm_addr[1], ioc_data->atm_addr[2], ioc_data->atm_addr[3], ioc_data->atm_addr[4], ioc_data->atm_addr[5], ioc_data->atm_addr[6], ioc_data->atm_addr[7], ioc_data->atm_addr[8], ioc_data->atm_addr[9], ioc_data->atm_addr[10], ioc_data->atm_addr[11], ioc_data->atm_addr[12], ioc_data->atm_addr[13], ioc_data->atm_addr[14], ioc_data->atm_addr[15], ioc_data->atm_addr[16], ioc_data->atm_addr[17], ioc_data->atm_addr[18], ioc_data->atm_addr[19]); entry = make_entry(priv, bus_mac); if (entry == NULL) goto out; memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); memset(entry->mac_addr, 0, ETH_ALEN); entry->recv_vcc = vcc; entry->old_recv_push = old_push; entry->status = ESI_UNKNOWN; entry->timer.expires = jiffies + priv->vcc_timeout_period; entry->timer.function = lec_arp_expire_vcc; hlist_add_head(&entry->next, &priv->lec_no_forward); add_timer(&entry->timer); dump_arp_table(priv); goto out; } pr_debug("LEC_ARP:Attaching data direct, default: %2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", ioc_data->atm_addr[0], ioc_data->atm_addr[1], ioc_data->atm_addr[2], ioc_data->atm_addr[3], ioc_data->atm_addr[4], ioc_data->atm_addr[5], ioc_data->atm_addr[6], ioc_data->atm_addr[7], ioc_data->atm_addr[8], ioc_data->atm_addr[9], ioc_data->atm_addr[10], ioc_data->atm_addr[11], ioc_data->atm_addr[12], ioc_data->atm_addr[13], ioc_data->atm_addr[14], ioc_data->atm_addr[15], ioc_data->atm_addr[16], ioc_data->atm_addr[17], ioc_data->atm_addr[18], ioc_data->atm_addr[19]); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { if (memcmp (ioc_data->atm_addr, entry->atm_addr, ATM_ESA_LEN) == 0) { pr_debug("LEC_ARP: Attaching data direct\n"); pr_debug("Currently -> Vcc: %d, Rvcc:%d\n", entry->vcc ? entry->vcc->vci : 0, entry->recv_vcc ? entry->recv_vcc-> vci : 0); found_entry = 1; del_timer(&entry->timer); entry->vcc = vcc; entry->old_push = old_push; if (entry->status == ESI_VC_PENDING) { if (priv->maximum_unknown_frame_count == 0) entry->status = ESI_FORWARD_DIRECT; else { entry->timestamp = jiffies; entry->status = ESI_FLUSH_PENDING; #if 0 send_to_lecd(priv, l_flush_xmt, NULL, entry->atm_addr, NULL); #endif } } else { /* * They were forming a connection * to us, and we to them. Our * ATM address is numerically lower * than theirs, so we make connection * we formed into default VCC (8.1.11). * Connection they made gets torn * down. This might confuse some * clients. Can be changed if * someone reports trouble... */ ; } } } } if (found_entry) { pr_debug("After vcc was added\n"); dump_arp_table(priv); goto out; } /* * Not found, snatch address from first data packet that arrives * from this vcc */ entry = make_entry(priv, bus_mac); if (!entry) goto out; entry->vcc = vcc; entry->old_push = old_push; memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); memset(entry->mac_addr, 0, ETH_ALEN); entry->status = ESI_UNKNOWN; hlist_add_head(&entry->next, &priv->lec_arp_empty_ones); entry->timer.expires = jiffies + priv->vcc_timeout_period; entry->timer.function = lec_arp_expire_vcc; add_timer(&entry->timer); pr_debug("After vcc was added\n"); dump_arp_table(priv); out: spin_unlock_irqrestore(&priv->lec_arp_lock, flags); } static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id) { unsigned long flags; struct hlist_node *node; struct lec_arp_table *entry; int i; pr_debug("%lx\n", tran_id); restart: spin_lock_irqsave(&priv->lec_arp_lock, flags); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { if (entry->flush_tran_id == tran_id && entry->status == ESI_FLUSH_PENDING) { struct sk_buff *skb; struct atm_vcc *vcc = entry->vcc; lec_arp_hold(entry); spin_unlock_irqrestore(&priv->lec_arp_lock, flags); while ((skb = skb_dequeue(&entry->tx_wait))) lec_send(vcc, skb); entry->last_used = jiffies; entry->status = ESI_FORWARD_DIRECT; lec_arp_put(entry); pr_debug("LEC_ARP: Flushed\n"); goto restart; } } } spin_unlock_irqrestore(&priv->lec_arp_lock, flags); dump_arp_table(priv); } static void lec_set_flush_tran_id(struct lec_priv *priv, const unsigned char *atm_addr, unsigned long tran_id) { unsigned long flags; struct hlist_node *node; struct lec_arp_table *entry; int i; spin_lock_irqsave(&priv->lec_arp_lock, flags); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) { entry->flush_tran_id = tran_id; pr_debug("Set flush transaction id to %lx for %p\n", tran_id, entry); } } spin_unlock_irqrestore(&priv->lec_arp_lock, flags); } static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc) { unsigned long flags; unsigned char mac_addr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; struct lec_arp_table *to_add; struct lec_vcc_priv *vpriv; int err = 0; vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL); if (!vpriv) return -ENOMEM; vpriv->xoff = 0; vpriv->old_pop = vcc->pop; vcc->user_back = vpriv; vcc->pop = lec_pop; spin_lock_irqsave(&priv->lec_arp_lock, flags); to_add = make_entry(priv, mac_addr); if (!to_add) { vcc->pop = vpriv->old_pop; kfree(vpriv); err = -ENOMEM; goto out; } memcpy(to_add->atm_addr, vcc->remote.sas_addr.prv, ATM_ESA_LEN); to_add->status = ESI_FORWARD_DIRECT; to_add->flags |= LEC_PERMANENT_FLAG; to_add->vcc = vcc; to_add->old_push = vcc->push; vcc->push = lec_push; priv->mcast_vcc = vcc; lec_arp_add(priv, to_add); out: spin_unlock_irqrestore(&priv->lec_arp_lock, flags); return err; } static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc) { unsigned long flags; struct hlist_node *node, *next; struct lec_arp_table *entry; int i; pr_debug("LEC_ARP: lec_vcc_close vpi:%d vci:%d\n", vcc->vpi, vcc->vci); dump_arp_table(priv); spin_lock_irqsave(&priv->lec_arp_lock, flags); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { if (vcc == entry->vcc) { lec_arp_remove(priv, entry); lec_arp_put(entry); if (priv->mcast_vcc == vcc) priv->mcast_vcc = NULL; } } } hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { if (entry->vcc == vcc) { lec_arp_clear_vccs(entry); del_timer(&entry->timer); hlist_del(&entry->next); lec_arp_put(entry); } } hlist_for_each_entry_safe(entry, node, next, &priv->lec_no_forward, next) { if (entry->recv_vcc == vcc) { lec_arp_clear_vccs(entry); del_timer(&entry->timer); hlist_del(&entry->next); lec_arp_put(entry); } } hlist_for_each_entry_safe(entry, node, next, &priv->mcast_fwds, next) { if (entry->recv_vcc == vcc) { lec_arp_clear_vccs(entry); /* No timer, LANEv2 7.1.20 and 2.3.5.3 */ hlist_del(&entry->next); lec_arp_put(entry); } } spin_unlock_irqrestore(&priv->lec_arp_lock, flags); dump_arp_table(priv); } static void lec_arp_check_empties(struct lec_priv *priv, struct atm_vcc *vcc, struct sk_buff *skb) { unsigned long flags; struct hlist_node *node, *next; struct lec_arp_table *entry, *tmp; struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data; unsigned char *src; #ifdef CONFIG_TR struct lecdatahdr_8025 *tr_hdr = (struct lecdatahdr_8025 *)skb->data; if (priv->is_trdev) src = tr_hdr->h_source; else #endif src = hdr->h_source; spin_lock_irqsave(&priv->lec_arp_lock, flags); hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { if (vcc == entry->vcc) { del_timer(&entry->timer); memcpy(entry->mac_addr, src, ETH_ALEN); entry->status = ESI_FORWARD_DIRECT; entry->last_used = jiffies; /* We might have got an entry */ tmp = lec_arp_find(priv, src); if (tmp) { lec_arp_remove(priv, tmp); lec_arp_put(tmp); } hlist_del(&entry->next); lec_arp_add(priv, entry); goto out; } } pr_debug("LEC_ARP: Arp_check_empties: entry not found!\n"); out: spin_unlock_irqrestore(&priv->lec_arp_lock, flags); } MODULE_LICENSE("GPL");
gpl-2.0
javilonas/Thoth-GT-I9300-Sammy
net/atm/proc.c
2365
11748
/* net/atm/proc.c - ATM /proc interface * * Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA * * seq_file api usage by romieu@fr.zoreil.com * * Evaluating the efficiency of the whole thing if left as an exercise to * the reader. */ #include <linux/module.h> /* for EXPORT_SYMBOL */ #include <linux/string.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/stat.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/errno.h> #include <linux/atm.h> #include <linux/atmdev.h> #include <linux/netdevice.h> #include <linux/atmclip.h> #include <linux/init.h> /* for __init */ #include <linux/slab.h> #include <net/net_namespace.h> #include <net/atmclip.h> #include <linux/uaccess.h> #include <linux/param.h> /* for HZ */ #include <asm/atomic.h> #include "resources.h" #include "common.h" /* atm_proc_init prototype */ #include "signaling.h" /* to get sigd - ugly too */ static ssize_t proc_dev_atm_read(struct file *file, char __user *buf, size_t count, loff_t *pos); static const struct file_operations proc_atm_dev_ops = { .owner = THIS_MODULE, .read = proc_dev_atm_read, .llseek = noop_llseek, }; static void add_stats(struct seq_file *seq, const char *aal, const struct k_atm_aal_stats *stats) { seq_printf(seq, "%s ( %d %d %d %d %d )", aal, atomic_read(&stats->tx), atomic_read(&stats->tx_err), atomic_read(&stats->rx), atomic_read(&stats->rx_err), atomic_read(&stats->rx_drop)); } static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev) { int i; seq_printf(seq, "%3d %-8s", dev->number, dev->type); for (i = 0; i < ESI_LEN; i++) seq_printf(seq, "%02x", dev->esi[i]); seq_puts(seq, " "); add_stats(seq, "0", &dev->stats.aal0); seq_puts(seq, " "); add_stats(seq, "5", &dev->stats.aal5); seq_printf(seq, "\t[%d]", atomic_read(&dev->refcnt)); seq_putc(seq, '\n'); } struct vcc_state { int bucket; struct sock *sk; int family; }; static inline int compare_family(struct sock *sk, int family) { return !family || (sk->sk_family == family); } static int __vcc_walk(struct sock **sock, int family, int *bucket, loff_t l) { struct sock *sk = *sock; if (sk == SEQ_START_TOKEN) { for (*bucket = 0; *bucket < VCC_HTABLE_SIZE; ++*bucket) { struct hlist_head *head = &vcc_hash[*bucket]; sk = hlist_empty(head) ? NULL : __sk_head(head); if (sk) break; } l--; } try_again: for (; sk; sk = sk_next(sk)) { l -= compare_family(sk, family); if (l < 0) goto out; } if (!sk && ++*bucket < VCC_HTABLE_SIZE) { sk = sk_head(&vcc_hash[*bucket]); goto try_again; } sk = SEQ_START_TOKEN; out: *sock = sk; return (l < 0); } static inline void *vcc_walk(struct vcc_state *state, loff_t l) { return __vcc_walk(&state->sk, state->family, &state->bucket, l) ? state : NULL; } static int __vcc_seq_open(struct inode *inode, struct file *file, int family, const struct seq_operations *ops) { struct vcc_state *state; state = __seq_open_private(file, ops, sizeof(*state)); if (state == NULL) return -ENOMEM; state->family = family; return 0; } static void *vcc_seq_start(struct seq_file *seq, loff_t *pos) __acquires(vcc_sklist_lock) { struct vcc_state *state = seq->private; loff_t left = *pos; read_lock(&vcc_sklist_lock); state->sk = SEQ_START_TOKEN; return left ? vcc_walk(state, left) : SEQ_START_TOKEN; } static void vcc_seq_stop(struct seq_file *seq, void *v) __releases(vcc_sklist_lock) { read_unlock(&vcc_sklist_lock); } static void *vcc_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct vcc_state *state = seq->private; v = vcc_walk(state, 1); *pos += !!PTR_ERR(v); return v; } static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc) { static const char *const class_name[] = { "off", "UBR", "CBR", "VBR", "ABR"}; static const char *const aal_name[] = { "---", "1", "2", "3/4", /* 0- 3 */ "???", "5", "???", "???", /* 4- 7 */ "???", "???", "???", "???", /* 8-11 */ "???", "0", "???", "???"}; /* 12-15 */ seq_printf(seq, "%3d %3d %5d %-3s %7d %-5s %7d %-6s", vcc->dev->number, vcc->vpi, vcc->vci, vcc->qos.aal >= ARRAY_SIZE(aal_name) ? "err" : aal_name[vcc->qos.aal], vcc->qos.rxtp.min_pcr, class_name[vcc->qos.rxtp.traffic_class], vcc->qos.txtp.min_pcr, class_name[vcc->qos.txtp.traffic_class]); if (test_bit(ATM_VF_IS_CLIP, &vcc->flags)) { struct clip_vcc *clip_vcc = CLIP_VCC(vcc); struct net_device *dev; dev = clip_vcc->entry ? clip_vcc->entry->neigh->dev : NULL; seq_printf(seq, "CLIP, Itf:%s, Encap:", dev ? dev->name : "none?"); seq_printf(seq, "%s", clip_vcc->encap ? "LLC/SNAP" : "None"); } seq_putc(seq, '\n'); } static const char *vcc_state(struct atm_vcc *vcc) { static const char *const map[] = { ATM_VS2TXT_MAP }; return map[ATM_VF2VS(vcc->flags)]; } static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc) { struct sock *sk = sk_atm(vcc); seq_printf(seq, "%pK ", vcc); if (!vcc->dev) seq_printf(seq, "Unassigned "); else seq_printf(seq, "%3d %3d %5d ", vcc->dev->number, vcc->vpi, vcc->vci); switch (sk->sk_family) { case AF_ATMPVC: seq_printf(seq, "PVC"); break; case AF_ATMSVC: seq_printf(seq, "SVC"); break; default: seq_printf(seq, "%3d", sk->sk_family); } seq_printf(seq, " %04lx %5d %7d/%7d %7d/%7d [%d]\n", vcc->flags, sk->sk_err, sk_wmem_alloc_get(sk), sk->sk_sndbuf, sk_rmem_alloc_get(sk), sk->sk_rcvbuf, atomic_read(&sk->sk_refcnt)); } static void svc_info(struct seq_file *seq, struct atm_vcc *vcc) { if (!vcc->dev) seq_printf(seq, sizeof(void *) == 4 ? "N/A@%pK%10s" : "N/A@%pK%2s", vcc, ""); else seq_printf(seq, "%3d %3d %5d ", vcc->dev->number, vcc->vpi, vcc->vci); seq_printf(seq, "%-10s ", vcc_state(vcc)); seq_printf(seq, "%s%s", vcc->remote.sas_addr.pub, *vcc->remote.sas_addr.pub && *vcc->remote.sas_addr.prv ? "+" : ""); if (*vcc->remote.sas_addr.prv) { int i; for (i = 0; i < ATM_ESA_LEN; i++) seq_printf(seq, "%02x", vcc->remote.sas_addr.prv[i]); } seq_putc(seq, '\n'); } static int atm_dev_seq_show(struct seq_file *seq, void *v) { static char atm_dev_banner[] = "Itf Type ESI/\"MAC\"addr " "AAL(TX,err,RX,err,drop) ... [refcnt]\n"; if (v == &atm_devs) seq_puts(seq, atm_dev_banner); else { struct atm_dev *dev = list_entry(v, struct atm_dev, dev_list); atm_dev_info(seq, dev); } return 0; } static const struct seq_operations atm_dev_seq_ops = { .start = atm_dev_seq_start, .next = atm_dev_seq_next, .stop = atm_dev_seq_stop, .show = atm_dev_seq_show, }; static int atm_dev_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &atm_dev_seq_ops); } static const struct file_operations devices_seq_fops = { .open = atm_dev_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int pvc_seq_show(struct seq_file *seq, void *v) { static char atm_pvc_banner[] = "Itf VPI VCI AAL RX(PCR,Class) TX(PCR,Class)\n"; if (v == SEQ_START_TOKEN) seq_puts(seq, atm_pvc_banner); else { struct vcc_state *state = seq->private; struct atm_vcc *vcc = atm_sk(state->sk); pvc_info(seq, vcc); } return 0; } static const struct seq_operations pvc_seq_ops = { .start = vcc_seq_start, .next = vcc_seq_next, .stop = vcc_seq_stop, .show = pvc_seq_show, }; static int pvc_seq_open(struct inode *inode, struct file *file) { return __vcc_seq_open(inode, file, PF_ATMPVC, &pvc_seq_ops); } static const struct file_operations pvc_seq_fops = { .open = pvc_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; static int vcc_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) { seq_printf(seq, sizeof(void *) == 4 ? "%-8s%s" : "%-16s%s", "Address ", "Itf VPI VCI Fam Flags Reply " "Send buffer Recv buffer [refcnt]\n"); } else { struct vcc_state *state = seq->private; struct atm_vcc *vcc = atm_sk(state->sk); vcc_info(seq, vcc); } return 0; } static const struct seq_operations vcc_seq_ops = { .start = vcc_seq_start, .next = vcc_seq_next, .stop = vcc_seq_stop, .show = vcc_seq_show, }; static int vcc_seq_open(struct inode *inode, struct file *file) { return __vcc_seq_open(inode, file, 0, &vcc_seq_ops); } static const struct file_operations vcc_seq_fops = { .open = vcc_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; static int svc_seq_show(struct seq_file *seq, void *v) { static const char atm_svc_banner[] = "Itf VPI VCI State Remote\n"; if (v == SEQ_START_TOKEN) seq_puts(seq, atm_svc_banner); else { struct vcc_state *state = seq->private; struct atm_vcc *vcc = atm_sk(state->sk); svc_info(seq, vcc); } return 0; } static const struct seq_operations svc_seq_ops = { .start = vcc_seq_start, .next = vcc_seq_next, .stop = vcc_seq_stop, .show = svc_seq_show, }; static int svc_seq_open(struct inode *inode, struct file *file) { return __vcc_seq_open(inode, file, PF_ATMSVC, &svc_seq_ops); } static const struct file_operations svc_seq_fops = { .open = svc_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; static ssize_t proc_dev_atm_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct atm_dev *dev; unsigned long page; int length; if (count == 0) return 0; page = get_zeroed_page(GFP_KERNEL); if (!page) return -ENOMEM; dev = PDE(file->f_path.dentry->d_inode)->data; if (!dev->ops->proc_read) length = -EINVAL; else { length = dev->ops->proc_read(dev, pos, (char *)page); if (length > count) length = -EINVAL; } if (length >= 0) { if (copy_to_user(buf, (char *)page, length)) length = -EFAULT; (*pos)++; } free_page(page); return length; } struct proc_dir_entry *atm_proc_root; EXPORT_SYMBOL(atm_proc_root); int atm_proc_dev_register(struct atm_dev *dev) { int error; /* No proc info */ if (!dev->ops->proc_read) return 0; error = -ENOMEM; dev->proc_name = kasprintf(GFP_KERNEL, "%s:%d", dev->type, dev->number); if (!dev->proc_name) goto err_out; dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root, &proc_atm_dev_ops, dev); if (!dev->proc_entry) goto err_free_name; return 0; err_free_name: kfree(dev->proc_name); err_out: return error; } void atm_proc_dev_deregister(struct atm_dev *dev) { if (!dev->ops->proc_read) return; remove_proc_entry(dev->proc_name, atm_proc_root); kfree(dev->proc_name); } static struct atm_proc_entry { char *name; const struct file_operations *proc_fops; struct proc_dir_entry *dirent; } atm_proc_ents[] = { { .name = "devices", .proc_fops = &devices_seq_fops }, { .name = "pvc", .proc_fops = &pvc_seq_fops }, { .name = "svc", .proc_fops = &svc_seq_fops }, { .name = "vc", .proc_fops = &vcc_seq_fops }, { .name = NULL, .proc_fops = NULL } }; static void atm_proc_dirs_remove(void) { static struct atm_proc_entry *e; for (e = atm_proc_ents; e->name; e++) { if (e->dirent) remove_proc_entry(e->name, atm_proc_root); } proc_net_remove(&init_net, "atm"); } int __init atm_proc_init(void) { static struct atm_proc_entry *e; int ret; atm_proc_root = proc_net_mkdir(&init_net, "atm", init_net.proc_net); if (!atm_proc_root) goto err_out; for (e = atm_proc_ents; e->name; e++) { struct proc_dir_entry *dirent; dirent = proc_create(e->name, S_IRUGO, atm_proc_root, e->proc_fops); if (!dirent) goto err_out_remove; e->dirent = dirent; } ret = 0; out: return ret; err_out_remove: atm_proc_dirs_remove(); err_out: ret = -ENOMEM; goto out; } void atm_proc_exit(void) { atm_proc_dirs_remove(); }
gpl-2.0
keepcalm444/android_kernel_samsung_smdk4412
lib/gen_crc32table.c
3901
2303
#include <stdio.h> #include "crc32defs.h" #include <inttypes.h> #define ENTRIES_PER_LINE 4 #define LE_TABLE_SIZE (1 << CRC_LE_BITS) #define BE_TABLE_SIZE (1 << CRC_BE_BITS) static uint32_t crc32table_le[4][LE_TABLE_SIZE]; static uint32_t crc32table_be[4][BE_TABLE_SIZE]; /** * crc32init_le() - allocate and initialize LE table data * * crc is the crc of the byte i; other entries are filled in based on the * fact that crctable[i^j] = crctable[i] ^ crctable[j]. * */ static void crc32init_le(void) { unsigned i, j; uint32_t crc = 1; crc32table_le[0][0] = 0; for (i = 1 << (CRC_LE_BITS - 1); i; i >>= 1) { crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0); for (j = 0; j < LE_TABLE_SIZE; j += 2 * i) crc32table_le[0][i + j] = crc ^ crc32table_le[0][j]; } for (i = 0; i < LE_TABLE_SIZE; i++) { crc = crc32table_le[0][i]; for (j = 1; j < 4; j++) { crc = crc32table_le[0][crc & 0xff] ^ (crc >> 8); crc32table_le[j][i] = crc; } } } /** * crc32init_be() - allocate and initialize BE table data */ static void crc32init_be(void) { unsigned i, j; uint32_t crc = 0x80000000; crc32table_be[0][0] = 0; for (i = 1; i < BE_TABLE_SIZE; i <<= 1) { crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0); for (j = 0; j < i; j++) crc32table_be[0][i + j] = crc ^ crc32table_be[0][j]; } for (i = 0; i < BE_TABLE_SIZE; i++) { crc = crc32table_be[0][i]; for (j = 1; j < 4; j++) { crc = crc32table_be[0][(crc >> 24) & 0xff] ^ (crc << 8); crc32table_be[j][i] = crc; } } } static void output_table(uint32_t table[4][256], int len, char *trans) { int i, j; for (j = 0 ; j < 4; j++) { printf("{"); for (i = 0; i < len - 1; i++) { if (i % ENTRIES_PER_LINE == 0) printf("\n"); printf("%s(0x%8.8xL), ", trans, table[j][i]); } printf("%s(0x%8.8xL)},\n", trans, table[j][len - 1]); } } int main(int argc, char** argv) { printf("/* this file is generated - do not edit */\n\n"); if (CRC_LE_BITS > 1) { crc32init_le(); printf("static const u32 crc32table_le[4][256] = {"); output_table(crc32table_le, LE_TABLE_SIZE, "tole"); printf("};\n"); } if (CRC_BE_BITS > 1) { crc32init_be(); printf("static const u32 crc32table_be[4][256] = {"); output_table(crc32table_be, BE_TABLE_SIZE, "tobe"); printf("};\n"); } return 0; }
gpl-2.0
andr00ib/victor-oficial-kernel
tools/perf/builtin-kvm.c
4157
3669
#include "builtin.h" #include "perf.h" #include "util/util.h" #include "util/cache.h" #include "util/symbol.h" #include "util/thread.h" #include "util/header.h" #include "util/session.h" #include "util/parse-options.h" #include "util/trace-event.h" #include "util/debug.h" #include <sys/prctl.h> #include <semaphore.h> #include <pthread.h> #include <math.h> static const char *file_name; static char name_buffer[256]; bool perf_host = 1; bool perf_guest; static const char * const kvm_usage[] = { "perf kvm [<options>] {top|record|report|diff|buildid-list}", NULL }; static const struct option kvm_options[] = { OPT_STRING('i', "input", &file_name, "file", "Input file name"), OPT_STRING('o', "output", &file_name, "file", "Output file name"), OPT_BOOLEAN(0, "guest", &perf_guest, "Collect guest os data"), OPT_BOOLEAN(0, "host", &perf_host, "Collect guest os data"), OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory", "guest mount directory under which every guest os" " instance has a subdir"), OPT_STRING(0, "guestvmlinux", &symbol_conf.default_guest_vmlinux_name, "file", "file saving guest os vmlinux"), OPT_STRING(0, "guestkallsyms", &symbol_conf.default_guest_kallsyms, "file", "file saving guest os /proc/kallsyms"), OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules, "file", "file saving guest os /proc/modules"), OPT_END() }; static int __cmd_record(int argc, const char **argv) { int rec_argc, i = 0, j; const char **rec_argv; rec_argc = argc + 2; rec_argv = calloc(rec_argc + 1, sizeof(char *)); rec_argv[i++] = strdup("record"); rec_argv[i++] = strdup("-o"); rec_argv[i++] = strdup(file_name); for (j = 1; j < argc; j++, i++) rec_argv[i] = argv[j]; BUG_ON(i != rec_argc); return cmd_record(i, rec_argv, NULL); } static int __cmd_report(int argc, const char **argv) { int rec_argc, i = 0, j; const char **rec_argv; rec_argc = argc + 2; rec_argv = calloc(rec_argc + 1, sizeof(char *)); rec_argv[i++] = strdup("report"); rec_argv[i++] = strdup("-i"); rec_argv[i++] = strdup(file_name); for (j = 1; j < argc; j++, i++) rec_argv[i] = argv[j]; BUG_ON(i != rec_argc); return cmd_report(i, rec_argv, NULL); } static int __cmd_buildid_list(int argc, const char **argv) { int rec_argc, i = 0, j; const char **rec_argv; rec_argc = argc + 2; rec_argv = calloc(rec_argc + 1, sizeof(char *)); rec_argv[i++] = strdup("buildid-list"); rec_argv[i++] = strdup("-i"); rec_argv[i++] = strdup(file_name); for (j = 1; j < argc; j++, i++) rec_argv[i] = argv[j]; BUG_ON(i != rec_argc); return cmd_buildid_list(i, rec_argv, NULL); } int cmd_kvm(int argc, const char **argv, const char *prefix __used) { perf_host = perf_guest = 0; argc = parse_options(argc, argv, kvm_options, kvm_usage, PARSE_OPT_STOP_AT_NON_OPTION); if (!argc) usage_with_options(kvm_usage, kvm_options); if (!perf_host) perf_guest = 1; if (!file_name) { if (perf_host && !perf_guest) sprintf(name_buffer, "perf.data.host"); else if (!perf_host && perf_guest) sprintf(name_buffer, "perf.data.guest"); else sprintf(name_buffer, "perf.data.kvm"); file_name = name_buffer; } if (!strncmp(argv[0], "rec", 3)) return __cmd_record(argc, argv); else if (!strncmp(argv[0], "rep", 3)) return __cmd_report(argc, argv); else if (!strncmp(argv[0], "diff", 4)) return cmd_diff(argc, argv, NULL); else if (!strncmp(argv[0], "top", 3)) return cmd_top(argc, argv, NULL); else if (!strncmp(argv[0], "buildid-list", 12)) return __cmd_buildid_list(argc, argv); else usage_with_options(kvm_usage, kvm_options); return 0; }
gpl-2.0
GuneetAtwal/kernel_mt6589
arch/powerpc/kvm/book3s.c
4413
12463
/* * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. * * Authors: * Alexander Graf <agraf@suse.de> * Kevin Wolf <mail@kevin-wolf.de> * * Description: * This file is derived from arch/powerpc/kvm/44x.c, * by Hollis Blanchard <hollisb@us.ibm.com>. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. */ #include <linux/kvm_host.h> #include <linux/err.h> #include <linux/export.h> #include <linux/slab.h> #include <asm/reg.h> #include <asm/cputable.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/mmu_context.h> #include <asm/page.h> #include <linux/gfp.h> #include <linux/sched.h> #include <linux/vmalloc.h> #include <linux/highmem.h> #include "trace.h" #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU /* #define EXIT_DEBUG */ struct kvm_stats_debugfs_item debugfs_entries[] = { { "exits", VCPU_STAT(sum_exits) }, { "mmio", VCPU_STAT(mmio_exits) }, { "sig", VCPU_STAT(signal_exits) }, { "sysc", VCPU_STAT(syscall_exits) }, { "inst_emu", VCPU_STAT(emulated_inst_exits) }, { "dec", VCPU_STAT(dec_exits) }, { "ext_intr", VCPU_STAT(ext_intr_exits) }, { "queue_intr", VCPU_STAT(queue_intr) }, { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "pf_storage", VCPU_STAT(pf_storage) }, { "sp_storage", VCPU_STAT(sp_storage) }, { "pf_instruc", VCPU_STAT(pf_instruc) }, { "sp_instruc", VCPU_STAT(sp_instruc) }, { "ld", VCPU_STAT(ld) }, { "ld_slow", VCPU_STAT(ld_slow) }, { "st", VCPU_STAT(st) }, { "st_slow", VCPU_STAT(st_slow) }, { NULL } }; void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu) { } void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) { } void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) { vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu); vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags; kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec); vcpu->arch.mmu.reset_msr(vcpu); } static int kvmppc_book3s_vec2irqprio(unsigned int vec) { unsigned int prio; switch (vec) { case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break; case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break; case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break; case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break; case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break; case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break; case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break; case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break; case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break; case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break; case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break; case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break; case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break; case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break; case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break; case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break; default: prio = BOOK3S_IRQPRIO_MAX; break; } return prio; } static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) { unsigned long old_pending = vcpu->arch.pending_exceptions; clear_bit(kvmppc_book3s_vec2irqprio(vec), &vcpu->arch.pending_exceptions); kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions, old_pending); } void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) { vcpu->stat.queue_intr++; set_bit(kvmppc_book3s_vec2irqprio(vec), &vcpu->arch.pending_exceptions); #ifdef EXIT_DEBUG printk(KERN_INFO "Queueing interrupt %x\n", vec); #endif } void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) { /* might as well deliver this straight away */ kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags); } void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) { kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); } int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) { return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); } void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) { kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); } void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL; if (irq->irq == KVM_INTERRUPT_SET_LEVEL) vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL; kvmppc_book3s_queue_irqprio(vcpu, vec); } void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL); } int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) { int deliver = 1; int vec = 0; bool crit = kvmppc_critical_section(vcpu); switch (priority) { case BOOK3S_IRQPRIO_DECREMENTER: deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit; vec = BOOK3S_INTERRUPT_DECREMENTER; break; case BOOK3S_IRQPRIO_EXTERNAL: case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit; vec = BOOK3S_INTERRUPT_EXTERNAL; break; case BOOK3S_IRQPRIO_SYSTEM_RESET: vec = BOOK3S_INTERRUPT_SYSTEM_RESET; break; case BOOK3S_IRQPRIO_MACHINE_CHECK: vec = BOOK3S_INTERRUPT_MACHINE_CHECK; break; case BOOK3S_IRQPRIO_DATA_STORAGE: vec = BOOK3S_INTERRUPT_DATA_STORAGE; break; case BOOK3S_IRQPRIO_INST_STORAGE: vec = BOOK3S_INTERRUPT_INST_STORAGE; break; case BOOK3S_IRQPRIO_DATA_SEGMENT: vec = BOOK3S_INTERRUPT_DATA_SEGMENT; break; case BOOK3S_IRQPRIO_INST_SEGMENT: vec = BOOK3S_INTERRUPT_INST_SEGMENT; break; case BOOK3S_IRQPRIO_ALIGNMENT: vec = BOOK3S_INTERRUPT_ALIGNMENT; break; case BOOK3S_IRQPRIO_PROGRAM: vec = BOOK3S_INTERRUPT_PROGRAM; break; case BOOK3S_IRQPRIO_VSX: vec = BOOK3S_INTERRUPT_VSX; break; case BOOK3S_IRQPRIO_ALTIVEC: vec = BOOK3S_INTERRUPT_ALTIVEC; break; case BOOK3S_IRQPRIO_FP_UNAVAIL: vec = BOOK3S_INTERRUPT_FP_UNAVAIL; break; case BOOK3S_IRQPRIO_SYSCALL: vec = BOOK3S_INTERRUPT_SYSCALL; break; case BOOK3S_IRQPRIO_DEBUG: vec = BOOK3S_INTERRUPT_TRACE; break; case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR: vec = BOOK3S_INTERRUPT_PERFMON; break; default: deliver = 0; printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority); break; } #if 0 printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver); #endif if (deliver) kvmppc_inject_interrupt(vcpu, vec, 0); return deliver; } /* * This function determines if an irqprio should be cleared once issued. */ static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority) { switch (priority) { case BOOK3S_IRQPRIO_DECREMENTER: /* DEC interrupts get cleared by mtdec */ return false; case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: /* External interrupts get cleared by userspace */ return false; } return true; } void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) { unsigned long *pending = &vcpu->arch.pending_exceptions; unsigned long old_pending = vcpu->arch.pending_exceptions; unsigned int priority; #ifdef EXIT_DEBUG if (vcpu->arch.pending_exceptions) printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); #endif priority = __ffs(*pending); while (priority < BOOK3S_IRQPRIO_MAX) { if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && clear_irqprio(vcpu, priority)) { clear_bit(priority, &vcpu->arch.pending_exceptions); break; } priority = find_next_bit(pending, BITS_PER_BYTE * sizeof(*pending), priority + 1); } /* Tell the guest about our interrupt status */ kvmppc_update_int_pending(vcpu, *pending, old_pending); } pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) { ulong mp_pa = vcpu->arch.magic_page_pa; /* Magic page override */ if (unlikely(mp_pa) && unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) == ((mp_pa & PAGE_MASK) & KVM_PAM))) { ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; pfn_t pfn; pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT; get_page(pfn_to_page(pfn)); return pfn; } return gfn_to_pfn(vcpu->kvm, gfn); } static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, struct kvmppc_pte *pte) { int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR)); int r; if (relocated) { r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data); } else { pte->eaddr = eaddr; pte->raddr = eaddr & KVM_PAM; pte->vpage = VSID_REAL | eaddr >> 12; pte->may_read = true; pte->may_write = true; pte->may_execute = true; r = 0; } return r; } static hva_t kvmppc_bad_hva(void) { return PAGE_OFFSET; } static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte, bool read) { hva_t hpage; if (read && !pte->may_read) goto err; if (!read && !pte->may_write) goto err; hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT); if (kvm_is_error_hva(hpage)) goto err; return hpage | (pte->raddr & ~PAGE_MASK); err: return kvmppc_bad_hva(); } int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data) { struct kvmppc_pte pte; vcpu->stat.st++; if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) return -ENOENT; *eaddr = pte.raddr; if (!pte.may_write) return -EPERM; if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) return EMULATE_DO_MMIO; return EMULATE_DONE; } int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data) { struct kvmppc_pte pte; hva_t hva = *eaddr; vcpu->stat.ld++; if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) goto nopte; *eaddr = pte.raddr; hva = kvmppc_pte_to_hva(vcpu, &pte, true); if (kvm_is_error_hva(hva)) goto mmio; if (copy_from_user(ptr, (void __user *)hva, size)) { printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva); goto mmio; } return EMULATE_DONE; nopte: return -ENOENT; mmio: return EMULATE_DO_MMIO; } int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { return 0; } int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; regs->pc = kvmppc_get_pc(vcpu); regs->cr = kvmppc_get_cr(vcpu); regs->ctr = kvmppc_get_ctr(vcpu); regs->lr = kvmppc_get_lr(vcpu); regs->xer = kvmppc_get_xer(vcpu); regs->msr = vcpu->arch.shared->msr; regs->srr0 = vcpu->arch.shared->srr0; regs->srr1 = vcpu->arch.shared->srr1; regs->pid = vcpu->arch.pid; regs->sprg0 = vcpu->arch.shared->sprg0; regs->sprg1 = vcpu->arch.shared->sprg1; regs->sprg2 = vcpu->arch.shared->sprg2; regs->sprg3 = vcpu->arch.shared->sprg3; regs->sprg4 = vcpu->arch.shared->sprg4; regs->sprg5 = vcpu->arch.shared->sprg5; regs->sprg6 = vcpu->arch.shared->sprg6; regs->sprg7 = vcpu->arch.shared->sprg7; for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) regs->gpr[i] = kvmppc_get_gpr(vcpu, i); return 0; } int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; kvmppc_set_pc(vcpu, regs->pc); kvmppc_set_cr(vcpu, regs->cr); kvmppc_set_ctr(vcpu, regs->ctr); kvmppc_set_lr(vcpu, regs->lr); kvmppc_set_xer(vcpu, regs->xer); kvmppc_set_msr(vcpu, regs->msr); vcpu->arch.shared->srr0 = regs->srr0; vcpu->arch.shared->srr1 = regs->srr1; vcpu->arch.shared->sprg0 = regs->sprg0; vcpu->arch.shared->sprg1 = regs->sprg1; vcpu->arch.shared->sprg2 = regs->sprg2; vcpu->arch.shared->sprg3 = regs->sprg3; vcpu->arch.shared->sprg4 = regs->sprg4; vcpu->arch.shared->sprg5 = regs->sprg5; vcpu->arch.shared->sprg6 = regs->sprg6; vcpu->arch.shared->sprg7 = regs->sprg7; for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) kvmppc_set_gpr(vcpu, i, regs->gpr[i]); return 0; } int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { return -ENOTSUPP; } int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { return -ENOTSUPP; } int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { return 0; } void kvmppc_decrementer_func(unsigned long data) { struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; kvmppc_core_queue_dec(vcpu); kvm_vcpu_kick(vcpu); }
gpl-2.0
googyanas/Googy-Max4-Kernel
net/ipv4/netfilter/nf_nat_proto_udplite.c
4669
2731
/* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> * (C) 2008 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/init.h> #include <linux/ip.h> #include <linux/udp.h> #include <linux/netfilter.h> #include <linux/module.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_protocol.h> static u_int16_t udplite_port_rover; static void udplite_unique_tuple(struct nf_conntrack_tuple *tuple, const struct nf_nat_ipv4_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, &udplite_port_rover); } static bool udplite_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, const struct nf_conntrack_tuple *tuple, enum nf_nat_manip_type maniptype) { const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff); struct udphdr *hdr; unsigned int hdroff = iphdroff + iph->ihl*4; __be32 oldip, newip; __be16 *portptr, newport; if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) return false; iph = (struct iphdr *)(skb->data + iphdroff); hdr = (struct udphdr *)(skb->data + hdroff); if (maniptype == NF_NAT_MANIP_SRC) { /* Get rid of src ip and src pt */ oldip = iph->saddr; newip = tuple->src.u3.ip; newport = tuple->src.u.udp.port; portptr = &hdr->source; } else { /* Get rid of dst ip and dst pt */ oldip = iph->daddr; newip = tuple->dst.u3.ip; newport = tuple->dst.u.udp.port; portptr = &hdr->dest; } inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1); inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, 0); if (!hdr->check) hdr->check = CSUM_MANGLED_0; *portptr = newport; return true; } static const struct nf_nat_protocol nf_nat_protocol_udplite = { .protonum = IPPROTO_UDPLITE, .manip_pkt = udplite_manip_pkt, .in_range = nf_nat_proto_in_range, .unique_tuple = udplite_unique_tuple, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .nlattr_to_range = nf_nat_proto_nlattr_to_range, #endif }; static int __init nf_nat_proto_udplite_init(void) { return nf_nat_protocol_register(&nf_nat_protocol_udplite); } static void __exit nf_nat_proto_udplite_fini(void) { nf_nat_protocol_unregister(&nf_nat_protocol_udplite); } module_init(nf_nat_proto_udplite_init); module_exit(nf_nat_proto_udplite_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("UDP-Lite NAT protocol helper"); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
gpl-2.0
libcg/android_kernel_samsung_exynos4
net/ipv4/netfilter/nf_nat_proto_unknown.c
4669
1471
/* The "unknown" protocol. This is what is used for protocols we * don't understand. It's returned by ip_ct_find_proto(). */ /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/init.h> #include <linux/netfilter.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_rule.h> #include <net/netfilter/nf_nat_protocol.h> static bool unknown_in_range(const struct nf_conntrack_tuple *tuple, enum nf_nat_manip_type manip_type, const union nf_conntrack_man_proto *min, const union nf_conntrack_man_proto *max) { return true; } static void unknown_unique_tuple(struct nf_conntrack_tuple *tuple, const struct nf_nat_ipv4_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { /* Sorry: we can't help you; if it's not unique, we can't frob anything. */ return; } static bool unknown_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, const struct nf_conntrack_tuple *tuple, enum nf_nat_manip_type maniptype) { return true; } const struct nf_nat_protocol nf_nat_unknown_protocol = { .manip_pkt = unknown_manip_pkt, .in_range = unknown_in_range, .unique_tuple = unknown_unique_tuple, };
gpl-2.0
hb72k/android_kernel_huawei_msm8916
sound/soc/blackfin/bf5xx-sport.c
7485
28031
/* * File: bf5xx_sport.c * Based on: * Author: Roy Huang <roy.huang@analog.com> * * Created: Tue Sep 21 10:52:42 CEST 2004 * Description: * Blackfin SPORT Driver * * Copyright 2004-2007 Analog Devices Inc. * * Bugs: Enter bugs at http://blackfin.uclinux.org/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/gpio.h> #include <linux/bug.h> #include <linux/module.h> #include <asm/portmux.h> #include <asm/dma.h> #include <asm/blackfin.h> #include <asm/cacheflush.h> #include "bf5xx-sport.h" /* delay between frame sync pulse and first data bit in multichannel mode */ #define FRAME_DELAY (1<<12) /* note: multichannel is in units of 8 channels, * tdm_count is # channels NOT / 8 ! */ int sport_set_multichannel(struct sport_device *sport, int tdm_count, u32 mask, int packed) { pr_debug("%s tdm_count=%d mask:0x%08x packed=%d\n", __func__, tdm_count, mask, packed); if ((sport->regs->tcr1 & TSPEN) || (sport->regs->rcr1 & RSPEN)) return -EBUSY; if (tdm_count & 0x7) return -EINVAL; if (tdm_count > 32) return -EINVAL; /* Only support less than 32 channels now */ if (tdm_count) { sport->regs->mcmc1 = ((tdm_count>>3)-1) << 12; sport->regs->mcmc2 = FRAME_DELAY | MCMEN | \ (packed ? (MCDTXPE|MCDRXPE) : 0); sport->regs->mtcs0 = mask; sport->regs->mrcs0 = mask; sport->regs->mtcs1 = 0; sport->regs->mrcs1 = 0; sport->regs->mtcs2 = 0; sport->regs->mrcs2 = 0; sport->regs->mtcs3 = 0; sport->regs->mrcs3 = 0; } else { sport->regs->mcmc1 = 0; sport->regs->mcmc2 = 0; sport->regs->mtcs0 = 0; sport->regs->mrcs0 = 0; } sport->regs->mtcs1 = 0; sport->regs->mtcs2 = 0; sport->regs->mtcs3 = 0; sport->regs->mrcs1 = 0; sport->regs->mrcs2 = 0; sport->regs->mrcs3 = 0; SSYNC(); return 0; } EXPORT_SYMBOL(sport_set_multichannel); int sport_config_rx(struct sport_device *sport, unsigned int rcr1, unsigned int rcr2, unsigned int clkdiv, unsigned int fsdiv) { if ((sport->regs->tcr1 & TSPEN) || (sport->regs->rcr1 & RSPEN)) return -EBUSY; sport->regs->rcr1 = rcr1; sport->regs->rcr2 = rcr2; sport->regs->rclkdiv = clkdiv; sport->regs->rfsdiv = fsdiv; SSYNC(); return 0; } EXPORT_SYMBOL(sport_config_rx); int sport_config_tx(struct sport_device *sport, unsigned int tcr1, unsigned int tcr2, unsigned int clkdiv, unsigned int fsdiv) { if ((sport->regs->tcr1 & TSPEN) || (sport->regs->rcr1 & RSPEN)) return -EBUSY; sport->regs->tcr1 = tcr1; sport->regs->tcr2 = tcr2; sport->regs->tclkdiv = clkdiv; sport->regs->tfsdiv = fsdiv; SSYNC(); return 0; } EXPORT_SYMBOL(sport_config_tx); static void setup_desc(struct dmasg *desc, void *buf, int fragcount, size_t fragsize, unsigned int cfg, unsigned int x_count, unsigned int ycount, size_t wdsize) { int i; for (i = 0; i < fragcount; ++i) { desc[i].next_desc_addr = &(desc[i + 1]); desc[i].start_addr = (unsigned long)buf + i*fragsize; desc[i].cfg = cfg; desc[i].x_count = x_count; desc[i].x_modify = wdsize; desc[i].y_count = ycount; desc[i].y_modify = wdsize; } /* make circular */ desc[fragcount-1].next_desc_addr = desc; pr_debug("setup desc: desc0=%p, next0=%p, desc1=%p," "next1=%p\nx_count=%x,y_count=%x,addr=0x%lx,cfs=0x%x\n", desc, desc[0].next_desc_addr, desc+1, desc[1].next_desc_addr, desc[0].x_count, desc[0].y_count, desc[0].start_addr, desc[0].cfg); } static int sport_start(struct sport_device *sport) { enable_dma(sport->dma_rx_chan); enable_dma(sport->dma_tx_chan); sport->regs->rcr1 |= RSPEN; sport->regs->tcr1 |= TSPEN; SSYNC(); return 0; } static int sport_stop(struct sport_device *sport) { sport->regs->tcr1 &= ~TSPEN; sport->regs->rcr1 &= ~RSPEN; SSYNC(); disable_dma(sport->dma_rx_chan); disable_dma(sport->dma_tx_chan); return 0; } static inline int sport_hook_rx_dummy(struct sport_device *sport) { struct dmasg *desc, temp_desc; unsigned long flags; BUG_ON(sport->dummy_rx_desc == NULL); BUG_ON(sport->curr_rx_desc == sport->dummy_rx_desc); /* Maybe the dummy buffer descriptor ring is damaged */ sport->dummy_rx_desc->next_desc_addr = sport->dummy_rx_desc + 1; local_irq_save(flags); desc = get_dma_next_desc_ptr(sport->dma_rx_chan); /* Copy the descriptor which will be damaged to backup */ temp_desc = *desc; desc->x_count = sport->dummy_count / 2; desc->y_count = 0; desc->next_desc_addr = sport->dummy_rx_desc; local_irq_restore(flags); /* Waiting for dummy buffer descriptor is already hooked*/ while ((get_dma_curr_desc_ptr(sport->dma_rx_chan) - sizeof(struct dmasg)) != sport->dummy_rx_desc) continue; sport->curr_rx_desc = sport->dummy_rx_desc; /* Restore the damaged descriptor */ *desc = temp_desc; return 0; } static inline int sport_rx_dma_start(struct sport_device *sport, int dummy) { if (dummy) { sport->dummy_rx_desc->next_desc_addr = sport->dummy_rx_desc; sport->curr_rx_desc = sport->dummy_rx_desc; } else sport->curr_rx_desc = sport->dma_rx_desc; set_dma_next_desc_addr(sport->dma_rx_chan, sport->curr_rx_desc); set_dma_x_count(sport->dma_rx_chan, 0); set_dma_x_modify(sport->dma_rx_chan, 0); set_dma_config(sport->dma_rx_chan, (DMAFLOW_LARGE | NDSIZE_9 | \ WDSIZE_32 | WNR)); set_dma_curr_addr(sport->dma_rx_chan, sport->curr_rx_desc->start_addr); SSYNC(); return 0; } static inline int sport_tx_dma_start(struct sport_device *sport, int dummy) { if (dummy) { sport->dummy_tx_desc->next_desc_addr = sport->dummy_tx_desc; sport->curr_tx_desc = sport->dummy_tx_desc; } else sport->curr_tx_desc = sport->dma_tx_desc; set_dma_next_desc_addr(sport->dma_tx_chan, sport->curr_tx_desc); set_dma_x_count(sport->dma_tx_chan, 0); set_dma_x_modify(sport->dma_tx_chan, 0); set_dma_config(sport->dma_tx_chan, (DMAFLOW_LARGE | NDSIZE_9 | WDSIZE_32)); set_dma_curr_addr(sport->dma_tx_chan, sport->curr_tx_desc->start_addr); SSYNC(); return 0; } int sport_rx_start(struct sport_device *sport) { unsigned long flags; pr_debug("%s enter\n", __func__); if (sport->rx_run) return -EBUSY; if (sport->tx_run) { /* tx is running, rx is not running */ BUG_ON(sport->dma_rx_desc == NULL); BUG_ON(sport->curr_rx_desc != sport->dummy_rx_desc); local_irq_save(flags); while ((get_dma_curr_desc_ptr(sport->dma_rx_chan) - sizeof(struct dmasg)) != sport->dummy_rx_desc) continue; sport->dummy_rx_desc->next_desc_addr = sport->dma_rx_desc; local_irq_restore(flags); sport->curr_rx_desc = sport->dma_rx_desc; } else { sport_tx_dma_start(sport, 1); sport_rx_dma_start(sport, 0); sport_start(sport); } sport->rx_run = 1; return 0; } EXPORT_SYMBOL(sport_rx_start); int sport_rx_stop(struct sport_device *sport) { pr_debug("%s enter\n", __func__); if (!sport->rx_run) return 0; if (sport->tx_run) { /* TX dma is still running, hook the dummy buffer */ sport_hook_rx_dummy(sport); } else { /* Both rx and tx dma will be stopped */ sport_stop(sport); sport->curr_rx_desc = NULL; sport->curr_tx_desc = NULL; } sport->rx_run = 0; return 0; } EXPORT_SYMBOL(sport_rx_stop); static inline int sport_hook_tx_dummy(struct sport_device *sport) { struct dmasg *desc, temp_desc; unsigned long flags; BUG_ON(sport->dummy_tx_desc == NULL); BUG_ON(sport->curr_tx_desc == sport->dummy_tx_desc); sport->dummy_tx_desc->next_desc_addr = sport->dummy_tx_desc + 1; /* Shorten the time on last normal descriptor */ local_irq_save(flags); desc = get_dma_next_desc_ptr(sport->dma_tx_chan); /* Store the descriptor which will be damaged */ temp_desc = *desc; desc->x_count = sport->dummy_count / 2; desc->y_count = 0; desc->next_desc_addr = sport->dummy_tx_desc; local_irq_restore(flags); /* Waiting for dummy buffer descriptor is already hooked*/ while ((get_dma_curr_desc_ptr(sport->dma_tx_chan) - \ sizeof(struct dmasg)) != sport->dummy_tx_desc) continue; sport->curr_tx_desc = sport->dummy_tx_desc; /* Restore the damaged descriptor */ *desc = temp_desc; return 0; } int sport_tx_start(struct sport_device *sport) { unsigned long flags; pr_debug("%s: tx_run:%d, rx_run:%d\n", __func__, sport->tx_run, sport->rx_run); if (sport->tx_run) return -EBUSY; if (sport->rx_run) { BUG_ON(sport->dma_tx_desc == NULL); BUG_ON(sport->curr_tx_desc != sport->dummy_tx_desc); /* Hook the normal buffer descriptor */ local_irq_save(flags); while ((get_dma_curr_desc_ptr(sport->dma_tx_chan) - sizeof(struct dmasg)) != sport->dummy_tx_desc) continue; sport->dummy_tx_desc->next_desc_addr = sport->dma_tx_desc; local_irq_restore(flags); sport->curr_tx_desc = sport->dma_tx_desc; } else { sport_tx_dma_start(sport, 0); /* Let rx dma run the dummy buffer */ sport_rx_dma_start(sport, 1); sport_start(sport); } sport->tx_run = 1; return 0; } EXPORT_SYMBOL(sport_tx_start); int sport_tx_stop(struct sport_device *sport) { if (!sport->tx_run) return 0; if (sport->rx_run) { /* RX is still running, hook the dummy buffer */ sport_hook_tx_dummy(sport); } else { /* Both rx and tx dma stopped */ sport_stop(sport); sport->curr_rx_desc = NULL; sport->curr_tx_desc = NULL; } sport->tx_run = 0; return 0; } EXPORT_SYMBOL(sport_tx_stop); static inline int compute_wdsize(size_t wdsize) { switch (wdsize) { case 1: return WDSIZE_8; case 2: return WDSIZE_16; case 4: default: return WDSIZE_32; } } int sport_config_rx_dma(struct sport_device *sport, void *buf, int fragcount, size_t fragsize) { unsigned int x_count; unsigned int y_count; unsigned int cfg; dma_addr_t addr; pr_debug("%s buf:%p, frag:%d, fragsize:0x%lx\n", __func__, \ buf, fragcount, fragsize); x_count = fragsize / sport->wdsize; y_count = 0; /* for fragments larger than 64k words we use 2d dma, * denote fragecount as two numbers' mutliply and both of them * are less than 64k.*/ if (x_count >= 0x10000) { int i, count = x_count; for (i = 16; i > 0; i--) { x_count = 1 << i; if ((count & (x_count - 1)) == 0) { y_count = count >> i; if (y_count < 0x10000) break; } } if (i == 0) return -EINVAL; } pr_debug("%s(x_count:0x%x, y_count:0x%x)\n", __func__, x_count, y_count); if (sport->dma_rx_desc) dma_free_coherent(NULL, sport->rx_desc_bytes, sport->dma_rx_desc, 0); /* Allocate a new descritor ring as current one. */ sport->dma_rx_desc = dma_alloc_coherent(NULL, \ fragcount * sizeof(struct dmasg), &addr, 0); sport->rx_desc_bytes = fragcount * sizeof(struct dmasg); if (!sport->dma_rx_desc) { pr_err("Failed to allocate memory for rx desc\n"); return -ENOMEM; } sport->rx_buf = buf; sport->rx_fragsize = fragsize; sport->rx_frags = fragcount; cfg = 0x7000 | DI_EN | compute_wdsize(sport->wdsize) | WNR | \ (DESC_ELEMENT_COUNT << 8); /* large descriptor mode */ if (y_count != 0) cfg |= DMA2D; setup_desc(sport->dma_rx_desc, buf, fragcount, fragsize, cfg|DMAEN, x_count, y_count, sport->wdsize); return 0; } EXPORT_SYMBOL(sport_config_rx_dma); int sport_config_tx_dma(struct sport_device *sport, void *buf, \ int fragcount, size_t fragsize) { unsigned int x_count; unsigned int y_count; unsigned int cfg; dma_addr_t addr; pr_debug("%s buf:%p, fragcount:%d, fragsize:0x%lx\n", __func__, buf, fragcount, fragsize); x_count = fragsize/sport->wdsize; y_count = 0; /* for fragments larger than 64k words we use 2d dma, * denote fragecount as two numbers' mutliply and both of them * are less than 64k.*/ if (x_count >= 0x10000) { int i, count = x_count; for (i = 16; i > 0; i--) { x_count = 1 << i; if ((count & (x_count - 1)) == 0) { y_count = count >> i; if (y_count < 0x10000) break; } } if (i == 0) return -EINVAL; } pr_debug("%s x_count:0x%x, y_count:0x%x\n", __func__, x_count, y_count); if (sport->dma_tx_desc) { dma_free_coherent(NULL, sport->tx_desc_bytes, \ sport->dma_tx_desc, 0); } sport->dma_tx_desc = dma_alloc_coherent(NULL, \ fragcount * sizeof(struct dmasg), &addr, 0); sport->tx_desc_bytes = fragcount * sizeof(struct dmasg); if (!sport->dma_tx_desc) { pr_err("Failed to allocate memory for tx desc\n"); return -ENOMEM; } sport->tx_buf = buf; sport->tx_fragsize = fragsize; sport->tx_frags = fragcount; cfg = 0x7000 | DI_EN | compute_wdsize(sport->wdsize) | \ (DESC_ELEMENT_COUNT << 8); /* large descriptor mode */ if (y_count != 0) cfg |= DMA2D; setup_desc(sport->dma_tx_desc, buf, fragcount, fragsize, cfg|DMAEN, x_count, y_count, sport->wdsize); return 0; } EXPORT_SYMBOL(sport_config_tx_dma); /* setup dummy dma descriptor ring, which don't generate interrupts, * the x_modify is set to 0 */ static int sport_config_rx_dummy(struct sport_device *sport) { struct dmasg *desc; unsigned config; pr_debug("%s entered\n", __func__); if (L1_DATA_A_LENGTH) desc = l1_data_sram_zalloc(2 * sizeof(*desc)); else { dma_addr_t addr; desc = dma_alloc_coherent(NULL, 2 * sizeof(*desc), &addr, 0); memset(desc, 0, 2 * sizeof(*desc)); } if (desc == NULL) { pr_err("Failed to allocate memory for dummy rx desc\n"); return -ENOMEM; } sport->dummy_rx_desc = desc; desc->start_addr = (unsigned long)sport->dummy_buf; config = DMAFLOW_LARGE | NDSIZE_9 | compute_wdsize(sport->wdsize) | WNR | DMAEN; desc->cfg = config; desc->x_count = sport->dummy_count/sport->wdsize; desc->x_modify = sport->wdsize; desc->y_count = 0; desc->y_modify = 0; memcpy(desc+1, desc, sizeof(*desc)); desc->next_desc_addr = desc + 1; desc[1].next_desc_addr = desc; return 0; } static int sport_config_tx_dummy(struct sport_device *sport) { struct dmasg *desc; unsigned int config; pr_debug("%s entered\n", __func__); if (L1_DATA_A_LENGTH) desc = l1_data_sram_zalloc(2 * sizeof(*desc)); else { dma_addr_t addr; desc = dma_alloc_coherent(NULL, 2 * sizeof(*desc), &addr, 0); memset(desc, 0, 2 * sizeof(*desc)); } if (!desc) { pr_err("Failed to allocate memory for dummy tx desc\n"); return -ENOMEM; } sport->dummy_tx_desc = desc; desc->start_addr = (unsigned long)sport->dummy_buf + \ sport->dummy_count; config = DMAFLOW_LARGE | NDSIZE_9 | compute_wdsize(sport->wdsize) | DMAEN; desc->cfg = config; desc->x_count = sport->dummy_count/sport->wdsize; desc->x_modify = sport->wdsize; desc->y_count = 0; desc->y_modify = 0; memcpy(desc+1, desc, sizeof(*desc)); desc->next_desc_addr = desc + 1; desc[1].next_desc_addr = desc; return 0; } unsigned long sport_curr_offset_rx(struct sport_device *sport) { unsigned long curr = get_dma_curr_addr(sport->dma_rx_chan); return (unsigned char *)curr - sport->rx_buf; } EXPORT_SYMBOL(sport_curr_offset_rx); unsigned long sport_curr_offset_tx(struct sport_device *sport) { unsigned long curr = get_dma_curr_addr(sport->dma_tx_chan); return (unsigned char *)curr - sport->tx_buf; } EXPORT_SYMBOL(sport_curr_offset_tx); void sport_incfrag(struct sport_device *sport, int *frag, int tx) { ++(*frag); if (tx == 1 && *frag == sport->tx_frags) *frag = 0; if (tx == 0 && *frag == sport->rx_frags) *frag = 0; } EXPORT_SYMBOL(sport_incfrag); void sport_decfrag(struct sport_device *sport, int *frag, int tx) { --(*frag); if (tx == 1 && *frag == 0) *frag = sport->tx_frags; if (tx == 0 && *frag == 0) *frag = sport->rx_frags; } EXPORT_SYMBOL(sport_decfrag); static int sport_check_status(struct sport_device *sport, unsigned int *sport_stat, unsigned int *rx_stat, unsigned int *tx_stat) { int status = 0; if (sport_stat) { SSYNC(); status = sport->regs->stat; if (status & (TOVF|TUVF|ROVF|RUVF)) sport->regs->stat = (status & (TOVF|TUVF|ROVF|RUVF)); SSYNC(); *sport_stat = status; } if (rx_stat) { SSYNC(); status = get_dma_curr_irqstat(sport->dma_rx_chan); if (status & (DMA_DONE|DMA_ERR)) clear_dma_irqstat(sport->dma_rx_chan); SSYNC(); *rx_stat = status; } if (tx_stat) { SSYNC(); status = get_dma_curr_irqstat(sport->dma_tx_chan); if (status & (DMA_DONE|DMA_ERR)) clear_dma_irqstat(sport->dma_tx_chan); SSYNC(); *tx_stat = status; } return 0; } int sport_dump_stat(struct sport_device *sport, char *buf, size_t len) { int ret; ret = snprintf(buf, len, "sts: 0x%04x\n" "rx dma %d sts: 0x%04x tx dma %d sts: 0x%04x\n", sport->regs->stat, sport->dma_rx_chan, get_dma_curr_irqstat(sport->dma_rx_chan), sport->dma_tx_chan, get_dma_curr_irqstat(sport->dma_tx_chan)); buf += ret; len -= ret; ret += snprintf(buf, len, "curr_rx_desc:0x%p, curr_tx_desc:0x%p\n" "dma_rx_desc:0x%p, dma_tx_desc:0x%p\n" "dummy_rx_desc:0x%p, dummy_tx_desc:0x%p\n", sport->curr_rx_desc, sport->curr_tx_desc, sport->dma_rx_desc, sport->dma_tx_desc, sport->dummy_rx_desc, sport->dummy_tx_desc); return ret; } static irqreturn_t rx_handler(int irq, void *dev_id) { unsigned int rx_stat; struct sport_device *sport = dev_id; pr_debug("%s enter\n", __func__); sport_check_status(sport, NULL, &rx_stat, NULL); if (!(rx_stat & DMA_DONE)) pr_err("rx dma is already stopped\n"); if (sport->rx_callback) { sport->rx_callback(sport->rx_data); return IRQ_HANDLED; } return IRQ_NONE; } static irqreturn_t tx_handler(int irq, void *dev_id) { unsigned int tx_stat; struct sport_device *sport = dev_id; pr_debug("%s enter\n", __func__); sport_check_status(sport, NULL, NULL, &tx_stat); if (!(tx_stat & DMA_DONE)) { pr_err("tx dma is already stopped\n"); return IRQ_HANDLED; } if (sport->tx_callback) { sport->tx_callback(sport->tx_data); return IRQ_HANDLED; } return IRQ_NONE; } static irqreturn_t err_handler(int irq, void *dev_id) { unsigned int status = 0; struct sport_device *sport = dev_id; pr_debug("%s\n", __func__); if (sport_check_status(sport, &status, NULL, NULL)) { pr_err("error checking status ??"); return IRQ_NONE; } if (status & (TOVF|TUVF|ROVF|RUVF)) { pr_info("sport status error:%s%s%s%s\n", status & TOVF ? " TOVF" : "", status & TUVF ? " TUVF" : "", status & ROVF ? " ROVF" : "", status & RUVF ? " RUVF" : ""); if (status & TOVF || status & TUVF) { disable_dma(sport->dma_tx_chan); if (sport->tx_run) sport_tx_dma_start(sport, 0); else sport_tx_dma_start(sport, 1); enable_dma(sport->dma_tx_chan); } else { disable_dma(sport->dma_rx_chan); if (sport->rx_run) sport_rx_dma_start(sport, 0); else sport_rx_dma_start(sport, 1); enable_dma(sport->dma_rx_chan); } } status = sport->regs->stat; if (status & (TOVF|TUVF|ROVF|RUVF)) sport->regs->stat = (status & (TOVF|TUVF|ROVF|RUVF)); SSYNC(); if (sport->err_callback) sport->err_callback(sport->err_data); return IRQ_HANDLED; } int sport_set_rx_callback(struct sport_device *sport, void (*rx_callback)(void *), void *rx_data) { BUG_ON(rx_callback == NULL); sport->rx_callback = rx_callback; sport->rx_data = rx_data; return 0; } EXPORT_SYMBOL(sport_set_rx_callback); int sport_set_tx_callback(struct sport_device *sport, void (*tx_callback)(void *), void *tx_data) { BUG_ON(tx_callback == NULL); sport->tx_callback = tx_callback; sport->tx_data = tx_data; return 0; } EXPORT_SYMBOL(sport_set_tx_callback); int sport_set_err_callback(struct sport_device *sport, void (*err_callback)(void *), void *err_data) { BUG_ON(err_callback == NULL); sport->err_callback = err_callback; sport->err_data = err_data; return 0; } EXPORT_SYMBOL(sport_set_err_callback); static int sport_config_pdev(struct platform_device *pdev, struct sport_param *param) { /* Extract settings from platform data */ struct device *dev = &pdev->dev; struct bfin_snd_platform_data *pdata = dev->platform_data; struct resource *res; param->num = pdev->id; if (!pdata) { dev_err(dev, "no platform_data\n"); return -ENODEV; } param->pin_req = pdata->pin_req; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "no MEM resource\n"); return -ENODEV; } param->regs = (struct sport_register *)res->start; /* first RX, then TX */ res = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!res) { dev_err(dev, "no rx DMA resource\n"); return -ENODEV; } param->dma_rx_chan = res->start; res = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (!res) { dev_err(dev, "no tx DMA resource\n"); return -ENODEV; } param->dma_tx_chan = res->start; res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(dev, "no irq resource\n"); return -ENODEV; } param->err_irq = res->start; return 0; } struct sport_device *sport_init(struct platform_device *pdev, unsigned int wdsize, unsigned int dummy_count, size_t priv_size) { struct device *dev = &pdev->dev; struct sport_param param; struct sport_device *sport; int ret; dev_dbg(dev, "%s enter\n", __func__); param.wdsize = wdsize; param.dummy_count = dummy_count; BUG_ON(param.wdsize == 0 || param.dummy_count == 0); ret = sport_config_pdev(pdev, &param); if (ret) return NULL; if (peripheral_request_list(param.pin_req, "soc-audio")) { dev_err(dev, "requesting Peripherals failed\n"); return NULL; } sport = kzalloc(sizeof(*sport), GFP_KERNEL); if (!sport) { dev_err(dev, "failed to allocate for sport device\n"); goto __init_err0; } sport->num = param.num; sport->dma_rx_chan = param.dma_rx_chan; sport->dma_tx_chan = param.dma_tx_chan; sport->err_irq = param.err_irq; sport->regs = param.regs; sport->pin_req = param.pin_req; if (request_dma(sport->dma_rx_chan, "SPORT RX Data") == -EBUSY) { dev_err(dev, "failed to request RX dma %d\n", sport->dma_rx_chan); goto __init_err1; } if (set_dma_callback(sport->dma_rx_chan, rx_handler, sport) != 0) { dev_err(dev, "failed to request RX irq %d\n", sport->dma_rx_chan); goto __init_err2; } if (request_dma(sport->dma_tx_chan, "SPORT TX Data") == -EBUSY) { dev_err(dev, "failed to request TX dma %d\n", sport->dma_tx_chan); goto __init_err2; } if (set_dma_callback(sport->dma_tx_chan, tx_handler, sport) != 0) { dev_err(dev, "failed to request TX irq %d\n", sport->dma_tx_chan); goto __init_err3; } if (request_irq(sport->err_irq, err_handler, IRQF_SHARED, "SPORT err", sport) < 0) { dev_err(dev, "failed to request err irq %d\n", sport->err_irq); goto __init_err3; } dev_info(dev, "dma rx:%d tx:%d, err irq:%d, regs:%p\n", sport->dma_rx_chan, sport->dma_tx_chan, sport->err_irq, sport->regs); sport->wdsize = param.wdsize; sport->dummy_count = param.dummy_count; sport->private_data = kzalloc(priv_size, GFP_KERNEL); if (!sport->private_data) { dev_err(dev, "could not alloc priv data %zu bytes\n", priv_size); goto __init_err4; } if (L1_DATA_A_LENGTH) sport->dummy_buf = l1_data_sram_zalloc(param.dummy_count * 2); else sport->dummy_buf = kzalloc(param.dummy_count * 2, GFP_KERNEL); if (sport->dummy_buf == NULL) { dev_err(dev, "failed to allocate dummy buffer\n"); goto __error1; } ret = sport_config_rx_dummy(sport); if (ret) { dev_err(dev, "failed to config rx dummy ring\n"); goto __error2; } ret = sport_config_tx_dummy(sport); if (ret) { dev_err(dev, "failed to config tx dummy ring\n"); goto __error3; } platform_set_drvdata(pdev, sport); return sport; __error3: if (L1_DATA_A_LENGTH) l1_data_sram_free(sport->dummy_rx_desc); else dma_free_coherent(NULL, 2*sizeof(struct dmasg), sport->dummy_rx_desc, 0); __error2: if (L1_DATA_A_LENGTH) l1_data_sram_free(sport->dummy_buf); else kfree(sport->dummy_buf); __error1: kfree(sport->private_data); __init_err4: free_irq(sport->err_irq, sport); __init_err3: free_dma(sport->dma_tx_chan); __init_err2: free_dma(sport->dma_rx_chan); __init_err1: kfree(sport); __init_err0: peripheral_free_list(param.pin_req); return NULL; } EXPORT_SYMBOL(sport_init); void sport_done(struct sport_device *sport) { if (sport == NULL) return; sport_stop(sport); if (sport->dma_rx_desc) dma_free_coherent(NULL, sport->rx_desc_bytes, sport->dma_rx_desc, 0); if (sport->dma_tx_desc) dma_free_coherent(NULL, sport->tx_desc_bytes, sport->dma_tx_desc, 0); #if L1_DATA_A_LENGTH != 0 l1_data_sram_free(sport->dummy_rx_desc); l1_data_sram_free(sport->dummy_tx_desc); l1_data_sram_free(sport->dummy_buf); #else dma_free_coherent(NULL, 2*sizeof(struct dmasg), sport->dummy_rx_desc, 0); dma_free_coherent(NULL, 2*sizeof(struct dmasg), sport->dummy_tx_desc, 0); kfree(sport->dummy_buf); #endif free_dma(sport->dma_rx_chan); free_dma(sport->dma_tx_chan); free_irq(sport->err_irq, sport); kfree(sport->private_data); peripheral_free_list(sport->pin_req); kfree(sport); } EXPORT_SYMBOL(sport_done); /* * It is only used to send several bytes when dma is not enabled * sport controller is configured but not enabled. * Multichannel cannot works with pio mode */ /* Used by ac97 to write and read codec register */ int sport_send_and_recv(struct sport_device *sport, u8 *out_data, \ u8 *in_data, int len) { unsigned short dma_config; unsigned short status; unsigned long flags; unsigned long wait = 0; pr_debug("%s enter, out_data:%p, in_data:%p len:%d\n", \ __func__, out_data, in_data, len); pr_debug("tcr1:0x%04x, tcr2:0x%04x, tclkdiv:0x%04x, tfsdiv:0x%04x\n" "mcmc1:0x%04x, mcmc2:0x%04x\n", sport->regs->tcr1, sport->regs->tcr2, sport->regs->tclkdiv, sport->regs->tfsdiv, sport->regs->mcmc1, sport->regs->mcmc2); flush_dcache_range((unsigned)out_data, (unsigned)(out_data + len)); /* Enable tx dma */ dma_config = (RESTART | WDSIZE_16 | DI_EN); set_dma_start_addr(sport->dma_tx_chan, (unsigned long)out_data); set_dma_x_count(sport->dma_tx_chan, len/2); set_dma_x_modify(sport->dma_tx_chan, 2); set_dma_config(sport->dma_tx_chan, dma_config); enable_dma(sport->dma_tx_chan); if (in_data != NULL) { invalidate_dcache_range((unsigned)in_data, \ (unsigned)(in_data + len)); /* Enable rx dma */ dma_config = (RESTART | WDSIZE_16 | WNR | DI_EN); set_dma_start_addr(sport->dma_rx_chan, (unsigned long)in_data); set_dma_x_count(sport->dma_rx_chan, len/2); set_dma_x_modify(sport->dma_rx_chan, 2); set_dma_config(sport->dma_rx_chan, dma_config); enable_dma(sport->dma_rx_chan); } local_irq_save(flags); sport->regs->tcr1 |= TSPEN; sport->regs->rcr1 |= RSPEN; SSYNC(); status = get_dma_curr_irqstat(sport->dma_tx_chan); while (status & DMA_RUN) { udelay(1); status = get_dma_curr_irqstat(sport->dma_tx_chan); pr_debug("DMA status:0x%04x\n", status); if (wait++ > 100) goto __over; } status = sport->regs->stat; wait = 0; while (!(status & TXHRE)) { pr_debug("sport status:0x%04x\n", status); udelay(1); status = *(unsigned short *)&sport->regs->stat; if (wait++ > 1000) goto __over; } /* Wait for the last byte sent out */ udelay(20); pr_debug("sport status:0x%04x\n", status); __over: sport->regs->tcr1 &= ~TSPEN; sport->regs->rcr1 &= ~RSPEN; SSYNC(); disable_dma(sport->dma_tx_chan); /* Clear the status */ clear_dma_irqstat(sport->dma_tx_chan); if (in_data != NULL) { disable_dma(sport->dma_rx_chan); clear_dma_irqstat(sport->dma_rx_chan); } SSYNC(); local_irq_restore(flags); return 0; } EXPORT_SYMBOL(sport_send_and_recv); MODULE_AUTHOR("Roy Huang"); MODULE_DESCRIPTION("SPORT driver for ADI Blackfin"); MODULE_LICENSE("GPL");
gpl-2.0
grogg/platform_device_lge_hammerhead-kernel_kernel
net/rose/rose_in.c
9277
7641
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * * Most of this code is based on the SDL diagrams published in the 7th ARRL * Computer Networking Conference papers. The diagrams have mistakes in them, * but are mostly correct. Before you modify the code could you read the SDL * diagrams as the code is not obvious and probably very easy to break. */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/tcp_states.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <net/rose.h> /* * State machine for state 1, Awaiting Call Accepted State. * The handling of the timer(s) is in file rose_timer.c. * Handling of state 0 and connection release is in af_rose.c. */ static int rose_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) { struct rose_sock *rose = rose_sk(sk); switch (frametype) { case ROSE_CALL_ACCEPTED: rose_stop_timer(sk); rose_start_idletimer(sk); rose->condition = 0x00; rose->vs = 0; rose->va = 0; rose->vr = 0; rose->vl = 0; rose->state = ROSE_STATE_3; sk->sk_state = TCP_ESTABLISHED; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_state_change(sk); break; case ROSE_CLEAR_REQUEST: rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION); rose_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]); rose->neighbour->use--; break; default: break; } return 0; } /* * State machine for state 2, Awaiting Clear Confirmation State. * The handling of the timer(s) is in file rose_timer.c * Handling of state 0 and connection release is in af_rose.c. */ static int rose_state2_machine(struct sock *sk, struct sk_buff *skb, int frametype) { struct rose_sock *rose = rose_sk(sk); switch (frametype) { case ROSE_CLEAR_REQUEST: rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION); rose_disconnect(sk, 0, skb->data[3], skb->data[4]); rose->neighbour->use--; break; case ROSE_CLEAR_CONFIRMATION: rose_disconnect(sk, 0, -1, -1); rose->neighbour->use--; break; default: break; } return 0; } /* * State machine for state 3, Connected State. * The handling of the timer(s) is in file rose_timer.c * Handling of state 0 and connection release is in af_rose.c. */ static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m) { struct rose_sock *rose = rose_sk(sk); int queued = 0; switch (frametype) { case ROSE_RESET_REQUEST: rose_stop_timer(sk); rose_start_idletimer(sk); rose_write_internal(sk, ROSE_RESET_CONFIRMATION); rose->condition = 0x00; rose->vs = 0; rose->vr = 0; rose->va = 0; rose->vl = 0; rose_requeue_frames(sk); break; case ROSE_CLEAR_REQUEST: rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION); rose_disconnect(sk, 0, skb->data[3], skb->data[4]); rose->neighbour->use--; break; case ROSE_RR: case ROSE_RNR: if (!rose_validate_nr(sk, nr)) { rose_write_internal(sk, ROSE_RESET_REQUEST); rose->condition = 0x00; rose->vs = 0; rose->vr = 0; rose->va = 0; rose->vl = 0; rose->state = ROSE_STATE_4; rose_start_t2timer(sk); rose_stop_idletimer(sk); } else { rose_frames_acked(sk, nr); if (frametype == ROSE_RNR) { rose->condition |= ROSE_COND_PEER_RX_BUSY; } else { rose->condition &= ~ROSE_COND_PEER_RX_BUSY; } } break; case ROSE_DATA: /* XXX */ rose->condition &= ~ROSE_COND_PEER_RX_BUSY; if (!rose_validate_nr(sk, nr)) { rose_write_internal(sk, ROSE_RESET_REQUEST); rose->condition = 0x00; rose->vs = 0; rose->vr = 0; rose->va = 0; rose->vl = 0; rose->state = ROSE_STATE_4; rose_start_t2timer(sk); rose_stop_idletimer(sk); break; } rose_frames_acked(sk, nr); if (ns == rose->vr) { rose_start_idletimer(sk); if (sock_queue_rcv_skb(sk, skb) == 0) { rose->vr = (rose->vr + 1) % ROSE_MODULUS; queued = 1; } else { /* Should never happen ! */ rose_write_internal(sk, ROSE_RESET_REQUEST); rose->condition = 0x00; rose->vs = 0; rose->vr = 0; rose->va = 0; rose->vl = 0; rose->state = ROSE_STATE_4; rose_start_t2timer(sk); rose_stop_idletimer(sk); break; } if (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1)) rose->condition |= ROSE_COND_OWN_RX_BUSY; } /* * If the window is full, ack the frame, else start the * acknowledge hold back timer. */ if (((rose->vl + sysctl_rose_window_size) % ROSE_MODULUS) == rose->vr) { rose->condition &= ~ROSE_COND_ACK_PENDING; rose_stop_timer(sk); rose_enquiry_response(sk); } else { rose->condition |= ROSE_COND_ACK_PENDING; rose_start_hbtimer(sk); } break; default: printk(KERN_WARNING "ROSE: unknown %02X in state 3\n", frametype); break; } return queued; } /* * State machine for state 4, Awaiting Reset Confirmation State. * The handling of the timer(s) is in file rose_timer.c * Handling of state 0 and connection release is in af_rose.c. */ static int rose_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype) { struct rose_sock *rose = rose_sk(sk); switch (frametype) { case ROSE_RESET_REQUEST: rose_write_internal(sk, ROSE_RESET_CONFIRMATION); case ROSE_RESET_CONFIRMATION: rose_stop_timer(sk); rose_start_idletimer(sk); rose->condition = 0x00; rose->va = 0; rose->vr = 0; rose->vs = 0; rose->vl = 0; rose->state = ROSE_STATE_3; rose_requeue_frames(sk); break; case ROSE_CLEAR_REQUEST: rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION); rose_disconnect(sk, 0, skb->data[3], skb->data[4]); rose->neighbour->use--; break; default: break; } return 0; } /* * State machine for state 5, Awaiting Call Acceptance State. * The handling of the timer(s) is in file rose_timer.c * Handling of state 0 and connection release is in af_rose.c. */ static int rose_state5_machine(struct sock *sk, struct sk_buff *skb, int frametype) { if (frametype == ROSE_CLEAR_REQUEST) { rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION); rose_disconnect(sk, 0, skb->data[3], skb->data[4]); rose_sk(sk)->neighbour->use--; } return 0; } /* Higher level upcall for a LAPB frame */ int rose_process_rx_frame(struct sock *sk, struct sk_buff *skb) { struct rose_sock *rose = rose_sk(sk); int queued = 0, frametype, ns, nr, q, d, m; if (rose->state == ROSE_STATE_0) return 0; frametype = rose_decode(skb, &ns, &nr, &q, &d, &m); switch (rose->state) { case ROSE_STATE_1: queued = rose_state1_machine(sk, skb, frametype); break; case ROSE_STATE_2: queued = rose_state2_machine(sk, skb, frametype); break; case ROSE_STATE_3: queued = rose_state3_machine(sk, skb, frametype, ns, nr, q, d, m); break; case ROSE_STATE_4: queued = rose_state4_machine(sk, skb, frametype); break; case ROSE_STATE_5: queued = rose_state5_machine(sk, skb, frametype); break; } rose_kick(sk); return queued; }
gpl-2.0
J-Team/android_kernel_samsung_u8500
arch/x86/mm/kmemcheck/kmemcheck.c
10813
14463
/** * kmemcheck - a heavyweight memory checker for the linux kernel * Copyright (C) 2007, 2008 Vegard Nossum <vegardno@ifi.uio.no> * (With a lot of help from Ingo Molnar and Pekka Enberg.) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (version 2) as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kallsyms.h> #include <linux/kernel.h> #include <linux/kmemcheck.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/page-flags.h> #include <linux/percpu.h> #include <linux/ptrace.h> #include <linux/string.h> #include <linux/types.h> #include <asm/cacheflush.h> #include <asm/kmemcheck.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include "error.h" #include "opcode.h" #include "pte.h" #include "selftest.h" #include "shadow.h" #ifdef CONFIG_KMEMCHECK_DISABLED_BY_DEFAULT # define KMEMCHECK_ENABLED 0 #endif #ifdef CONFIG_KMEMCHECK_ENABLED_BY_DEFAULT # define KMEMCHECK_ENABLED 1 #endif #ifdef CONFIG_KMEMCHECK_ONESHOT_BY_DEFAULT # define KMEMCHECK_ENABLED 2 #endif int kmemcheck_enabled = KMEMCHECK_ENABLED; int __init kmemcheck_init(void) { #ifdef CONFIG_SMP /* * Limit SMP to use a single CPU. We rely on the fact that this code * runs before SMP is set up. */ if (setup_max_cpus > 1) { printk(KERN_INFO "kmemcheck: Limiting number of CPUs to 1.\n"); setup_max_cpus = 1; } #endif if (!kmemcheck_selftest()) { printk(KERN_INFO "kmemcheck: self-tests failed; disabling\n"); kmemcheck_enabled = 0; return -EINVAL; } printk(KERN_INFO "kmemcheck: Initialized\n"); return 0; } early_initcall(kmemcheck_init); /* * We need to parse the kmemcheck= option before any memory is allocated. */ static int __init param_kmemcheck(char *str) { if (!str) return -EINVAL; sscanf(str, "%d", &kmemcheck_enabled); return 0; } early_param("kmemcheck", param_kmemcheck); int kmemcheck_show_addr(unsigned long address) { pte_t *pte; pte = kmemcheck_pte_lookup(address); if (!pte) return 0; set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); __flush_tlb_one(address); return 1; } int kmemcheck_hide_addr(unsigned long address) { pte_t *pte; pte = kmemcheck_pte_lookup(address); if (!pte) return 0; set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); __flush_tlb_one(address); return 1; } struct kmemcheck_context { bool busy; int balance; /* * There can be at most two memory operands to an instruction, but * each address can cross a page boundary -- so we may need up to * four addresses that must be hidden/revealed for each fault. */ unsigned long addr[4]; unsigned long n_addrs; unsigned long flags; /* Data size of the instruction that caused a fault. */ unsigned int size; }; static DEFINE_PER_CPU(struct kmemcheck_context, kmemcheck_context); bool kmemcheck_active(struct pt_regs *regs) { struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); return data->balance > 0; } /* Save an address that needs to be shown/hidden */ static void kmemcheck_save_addr(unsigned long addr) { struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr)); data->addr[data->n_addrs++] = addr; } static unsigned int kmemcheck_show_all(void) { struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); unsigned int i; unsigned int n; n = 0; for (i = 0; i < data->n_addrs; ++i) n += kmemcheck_show_addr(data->addr[i]); return n; } static unsigned int kmemcheck_hide_all(void) { struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); unsigned int i; unsigned int n; n = 0; for (i = 0; i < data->n_addrs; ++i) n += kmemcheck_hide_addr(data->addr[i]); return n; } /* * Called from the #PF handler. */ void kmemcheck_show(struct pt_regs *regs) { struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); BUG_ON(!irqs_disabled()); if (unlikely(data->balance != 0)) { kmemcheck_show_all(); kmemcheck_error_save_bug(regs); data->balance = 0; return; } /* * None of the addresses actually belonged to kmemcheck. Note that * this is not an error. */ if (kmemcheck_show_all() == 0) return; ++data->balance; /* * The IF needs to be cleared as well, so that the faulting * instruction can run "uninterrupted". Otherwise, we might take * an interrupt and start executing that before we've had a chance * to hide the page again. * * NOTE: In the rare case of multiple faults, we must not override * the original flags: */ if (!(regs->flags & X86_EFLAGS_TF)) data->flags = regs->flags; regs->flags |= X86_EFLAGS_TF; regs->flags &= ~X86_EFLAGS_IF; } /* * Called from the #DB handler. */ void kmemcheck_hide(struct pt_regs *regs) { struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); int n; BUG_ON(!irqs_disabled()); if (unlikely(data->balance != 1)) { kmemcheck_show_all(); kmemcheck_error_save_bug(regs); data->n_addrs = 0; data->balance = 0; if (!(data->flags & X86_EFLAGS_TF)) regs->flags &= ~X86_EFLAGS_TF; if (data->flags & X86_EFLAGS_IF) regs->flags |= X86_EFLAGS_IF; return; } if (kmemcheck_enabled) n = kmemcheck_hide_all(); else n = kmemcheck_show_all(); if (n == 0) return; --data->balance; data->n_addrs = 0; if (!(data->flags & X86_EFLAGS_TF)) regs->flags &= ~X86_EFLAGS_TF; if (data->flags & X86_EFLAGS_IF) regs->flags |= X86_EFLAGS_IF; } void kmemcheck_show_pages(struct page *p, unsigned int n) { unsigned int i; for (i = 0; i < n; ++i) { unsigned long address; pte_t *pte; unsigned int level; address = (unsigned long) page_address(&p[i]); pte = lookup_address(address, &level); BUG_ON(!pte); BUG_ON(level != PG_LEVEL_4K); set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_HIDDEN)); __flush_tlb_one(address); } } bool kmemcheck_page_is_tracked(struct page *p) { /* This will also check the "hidden" flag of the PTE. */ return kmemcheck_pte_lookup((unsigned long) page_address(p)); } void kmemcheck_hide_pages(struct page *p, unsigned int n) { unsigned int i; for (i = 0; i < n; ++i) { unsigned long address; pte_t *pte; unsigned int level; address = (unsigned long) page_address(&p[i]); pte = lookup_address(address, &level); BUG_ON(!pte); BUG_ON(level != PG_LEVEL_4K); set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); set_pte(pte, __pte(pte_val(*pte) | _PAGE_HIDDEN)); __flush_tlb_one(address); } } /* Access may NOT cross page boundary */ static void kmemcheck_read_strict(struct pt_regs *regs, unsigned long addr, unsigned int size) { void *shadow; enum kmemcheck_shadow status; shadow = kmemcheck_shadow_lookup(addr); if (!shadow) return; kmemcheck_save_addr(addr); status = kmemcheck_shadow_test(shadow, size); if (status == KMEMCHECK_SHADOW_INITIALIZED) return; if (kmemcheck_enabled) kmemcheck_error_save(status, addr, size, regs); if (kmemcheck_enabled == 2) kmemcheck_enabled = 0; /* Don't warn about it again. */ kmemcheck_shadow_set(shadow, size); } bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size) { enum kmemcheck_shadow status; void *shadow; shadow = kmemcheck_shadow_lookup(addr); if (!shadow) return true; status = kmemcheck_shadow_test_all(shadow, size); return status == KMEMCHECK_SHADOW_INITIALIZED; } /* Access may cross page boundary */ static void kmemcheck_read(struct pt_regs *regs, unsigned long addr, unsigned int size) { unsigned long page = addr & PAGE_MASK; unsigned long next_addr = addr + size - 1; unsigned long next_page = next_addr & PAGE_MASK; if (likely(page == next_page)) { kmemcheck_read_strict(regs, addr, size); return; } /* * What we do is basically to split the access across the * two pages and handle each part separately. Yes, this means * that we may now see reads that are 3 + 5 bytes, for * example (and if both are uninitialized, there will be two * reports), but it makes the code a lot simpler. */ kmemcheck_read_strict(regs, addr, next_page - addr); kmemcheck_read_strict(regs, next_page, next_addr - next_page); } static void kmemcheck_write_strict(struct pt_regs *regs, unsigned long addr, unsigned int size) { void *shadow; shadow = kmemcheck_shadow_lookup(addr); if (!shadow) return; kmemcheck_save_addr(addr); kmemcheck_shadow_set(shadow, size); } static void kmemcheck_write(struct pt_regs *regs, unsigned long addr, unsigned int size) { unsigned long page = addr & PAGE_MASK; unsigned long next_addr = addr + size - 1; unsigned long next_page = next_addr & PAGE_MASK; if (likely(page == next_page)) { kmemcheck_write_strict(regs, addr, size); return; } /* See comment in kmemcheck_read(). */ kmemcheck_write_strict(regs, addr, next_page - addr); kmemcheck_write_strict(regs, next_page, next_addr - next_page); } /* * Copying is hard. We have two addresses, each of which may be split across * a page (and each page will have different shadow addresses). */ static void kmemcheck_copy(struct pt_regs *regs, unsigned long src_addr, unsigned long dst_addr, unsigned int size) { uint8_t shadow[8]; enum kmemcheck_shadow status; unsigned long page; unsigned long next_addr; unsigned long next_page; uint8_t *x; unsigned int i; unsigned int n; BUG_ON(size > sizeof(shadow)); page = src_addr & PAGE_MASK; next_addr = src_addr + size - 1; next_page = next_addr & PAGE_MASK; if (likely(page == next_page)) { /* Same page */ x = kmemcheck_shadow_lookup(src_addr); if (x) { kmemcheck_save_addr(src_addr); for (i = 0; i < size; ++i) shadow[i] = x[i]; } else { for (i = 0; i < size; ++i) shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; } } else { n = next_page - src_addr; BUG_ON(n > sizeof(shadow)); /* First page */ x = kmemcheck_shadow_lookup(src_addr); if (x) { kmemcheck_save_addr(src_addr); for (i = 0; i < n; ++i) shadow[i] = x[i]; } else { /* Not tracked */ for (i = 0; i < n; ++i) shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; } /* Second page */ x = kmemcheck_shadow_lookup(next_page); if (x) { kmemcheck_save_addr(next_page); for (i = n; i < size; ++i) shadow[i] = x[i - n]; } else { /* Not tracked */ for (i = n; i < size; ++i) shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; } } page = dst_addr & PAGE_MASK; next_addr = dst_addr + size - 1; next_page = next_addr & PAGE_MASK; if (likely(page == next_page)) { /* Same page */ x = kmemcheck_shadow_lookup(dst_addr); if (x) { kmemcheck_save_addr(dst_addr); for (i = 0; i < size; ++i) { x[i] = shadow[i]; shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; } } } else { n = next_page - dst_addr; BUG_ON(n > sizeof(shadow)); /* First page */ x = kmemcheck_shadow_lookup(dst_addr); if (x) { kmemcheck_save_addr(dst_addr); for (i = 0; i < n; ++i) { x[i] = shadow[i]; shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; } } /* Second page */ x = kmemcheck_shadow_lookup(next_page); if (x) { kmemcheck_save_addr(next_page); for (i = n; i < size; ++i) { x[i - n] = shadow[i]; shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; } } } status = kmemcheck_shadow_test(shadow, size); if (status == KMEMCHECK_SHADOW_INITIALIZED) return; if (kmemcheck_enabled) kmemcheck_error_save(status, src_addr, size, regs); if (kmemcheck_enabled == 2) kmemcheck_enabled = 0; } enum kmemcheck_method { KMEMCHECK_READ, KMEMCHECK_WRITE, }; static void kmemcheck_access(struct pt_regs *regs, unsigned long fallback_address, enum kmemcheck_method fallback_method) { const uint8_t *insn; const uint8_t *insn_primary; unsigned int size; struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); /* Recursive fault -- ouch. */ if (data->busy) { kmemcheck_show_addr(fallback_address); kmemcheck_error_save_bug(regs); return; } data->busy = true; insn = (const uint8_t *) regs->ip; insn_primary = kmemcheck_opcode_get_primary(insn); kmemcheck_opcode_decode(insn, &size); switch (insn_primary[0]) { #ifdef CONFIG_KMEMCHECK_BITOPS_OK /* AND, OR, XOR */ /* * Unfortunately, these instructions have to be excluded from * our regular checking since they access only some (and not * all) bits. This clears out "bogus" bitfield-access warnings. */ case 0x80: case 0x81: case 0x82: case 0x83: switch ((insn_primary[1] >> 3) & 7) { /* OR */ case 1: /* AND */ case 4: /* XOR */ case 6: kmemcheck_write(regs, fallback_address, size); goto out; /* ADD */ case 0: /* ADC */ case 2: /* SBB */ case 3: /* SUB */ case 5: /* CMP */ case 7: break; } break; #endif /* MOVS, MOVSB, MOVSW, MOVSD */ case 0xa4: case 0xa5: /* * These instructions are special because they take two * addresses, but we only get one page fault. */ kmemcheck_copy(regs, regs->si, regs->di, size); goto out; /* CMPS, CMPSB, CMPSW, CMPSD */ case 0xa6: case 0xa7: kmemcheck_read(regs, regs->si, size); kmemcheck_read(regs, regs->di, size); goto out; } /* * If the opcode isn't special in any way, we use the data from the * page fault handler to determine the address and type of memory * access. */ switch (fallback_method) { case KMEMCHECK_READ: kmemcheck_read(regs, fallback_address, size); goto out; case KMEMCHECK_WRITE: kmemcheck_write(regs, fallback_address, size); goto out; } out: data->busy = false; } bool kmemcheck_fault(struct pt_regs *regs, unsigned long address, unsigned long error_code) { pte_t *pte; /* * XXX: Is it safe to assume that memory accesses from virtual 86 * mode or non-kernel code segments will _never_ access kernel * memory (e.g. tracked pages)? For now, we need this to avoid * invoking kmemcheck for PnP BIOS calls. */ if (regs->flags & X86_VM_MASK) return false; if (regs->cs != __KERNEL_CS) return false; pte = kmemcheck_pte_lookup(address); if (!pte) return false; WARN_ON_ONCE(in_nmi()); if (error_code & 2) kmemcheck_access(regs, address, KMEMCHECK_WRITE); else kmemcheck_access(regs, address, KMEMCHECK_READ); kmemcheck_show(regs); return true; } bool kmemcheck_trap(struct pt_regs *regs) { if (!kmemcheck_active(regs)) return false; /* We're done. */ kmemcheck_hide(regs); return true; }
gpl-2.0
utopykzebulon/android_kernel_msm7x30-3.0
net/sctp/command.c
12349
2384
/* SCTP kernel implementation Copyright (C) 1999-2001 * Cisco, Motorola, and IBM * Copyright 2001 La Monte H.P. Yarroll * * This file is part of the SCTP kernel implementation * * These functions manipulate sctp command sequences. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, write to * the Free Software Foundation, 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <lksctp-developers@lists.sourceforge.net> * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * * Any bugs reported given to us we will try to fix... any fixes shared will * be incorporated into the next SCTP release. */ #include <linux/types.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> /* Initialize a block of memory as a command sequence. */ int sctp_init_cmd_seq(sctp_cmd_seq_t *seq) { memset(seq, 0, sizeof(sctp_cmd_seq_t)); return 1; /* We always succeed. */ } /* Add a command to a sctp_cmd_seq_t. * Return 0 if the command sequence is full. */ void sctp_add_cmd_sf(sctp_cmd_seq_t *seq, sctp_verb_t verb, sctp_arg_t obj) { BUG_ON(seq->next_free_slot >= SCTP_MAX_NUM_COMMANDS); seq->cmds[seq->next_free_slot].verb = verb; seq->cmds[seq->next_free_slot++].obj = obj; } /* Return the next command structure in a sctp_cmd_seq. * Returns NULL at the end of the sequence. */ sctp_cmd_t *sctp_next_cmd(sctp_cmd_seq_t *seq) { sctp_cmd_t *retval = NULL; if (seq->next_cmd < seq->next_free_slot) retval = &seq->cmds[seq->next_cmd++]; return retval; }
gpl-2.0
thicklizard/Komodo
arch/mips/cobalt/lcd.c
13885
1549
/* * Registration of Cobalt LCD platform device. * * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/errno.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/platform_device.h> static struct resource cobalt_lcd_resource __initdata = { .start = 0x1f000000, .end = 0x1f00001f, .flags = IORESOURCE_MEM, }; static __init int cobalt_lcd_add(void) { struct platform_device *pdev; int retval; pdev = platform_device_alloc("cobalt-lcd", -1); if (!pdev) return -ENOMEM; retval = platform_device_add_resources(pdev, &cobalt_lcd_resource, 1); if (retval) goto err_free_device; retval = platform_device_add(pdev); if (retval) goto err_free_device; return 0; err_free_device: platform_device_put(pdev); return retval; } device_initcall(cobalt_lcd_add);
gpl-2.0
arter97/android_kernel_google_msm
arch/mips/cobalt/lcd.c
13885
1549
/* * Registration of Cobalt LCD platform device. * * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/errno.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/platform_device.h> static struct resource cobalt_lcd_resource __initdata = { .start = 0x1f000000, .end = 0x1f00001f, .flags = IORESOURCE_MEM, }; static __init int cobalt_lcd_add(void) { struct platform_device *pdev; int retval; pdev = platform_device_alloc("cobalt-lcd", -1); if (!pdev) return -ENOMEM; retval = platform_device_add_resources(pdev, &cobalt_lcd_resource, 1); if (retval) goto err_free_device; retval = platform_device_add(pdev); if (retval) goto err_free_device; return 0; err_free_device: platform_device_put(pdev); return retval; } device_initcall(cobalt_lcd_add);
gpl-2.0
kgugala/linux
lib/ts_fsm.c
13885
10866
/* * lib/ts_fsm.c A naive finite state machine text search approach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Thomas Graf <tgraf@suug.ch> * * ========================================================================== * * A finite state machine consists of n states (struct ts_fsm_token) * representing the pattern as a finite automation. The data is read * sequentially on an octet basis. Every state token specifies the number * of recurrences and the type of value accepted which can be either a * specific character or ctype based set of characters. The available * type of recurrences include 1, (0|1), [0 n], and [1 n]. * * The algorithm differs between strict/non-strict mode specifying * whether the pattern has to start at the first octet. Strict mode * is enabled by default and can be disabled by inserting * TS_FSM_HEAD_IGNORE as the first token in the chain. * * The runtime performance of the algorithm should be around O(n), * however while in strict mode the average runtime can be better. */ #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/textsearch.h> #include <linux/textsearch_fsm.h> struct ts_fsm { unsigned int ntokens; struct ts_fsm_token tokens[0]; }; /* other values derived from ctype.h */ #define _A 0x100 /* ascii */ #define _W 0x200 /* wildcard */ /* Map to _ctype flags and some magic numbers */ static const u16 token_map[TS_FSM_TYPE_MAX+1] = { [TS_FSM_SPECIFIC] = 0, [TS_FSM_WILDCARD] = _W, [TS_FSM_CNTRL] = _C, [TS_FSM_LOWER] = _L, [TS_FSM_UPPER] = _U, [TS_FSM_PUNCT] = _P, [TS_FSM_SPACE] = _S, [TS_FSM_DIGIT] = _D, [TS_FSM_XDIGIT] = _D | _X, [TS_FSM_ALPHA] = _U | _L, [TS_FSM_ALNUM] = _U | _L | _D, [TS_FSM_PRINT] = _P | _U | _L | _D | _SP, [TS_FSM_GRAPH] = _P | _U | _L | _D, [TS_FSM_ASCII] = _A, }; static const u16 token_lookup_tbl[256] = { _W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 0- 3 */ _W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 4- 7 */ _W|_A|_C, _W|_A|_C|_S, _W|_A|_C|_S, _W|_A|_C|_S, /* 8- 11 */ _W|_A|_C|_S, _W|_A|_C|_S, _W|_A|_C, _W|_A|_C, /* 12- 15 */ _W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 16- 19 */ _W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 20- 23 */ _W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 24- 27 */ _W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 28- 31 */ _W|_A|_S|_SP, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 32- 35 */ _W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 36- 39 */ _W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 40- 43 */ _W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 44- 47 */ _W|_A|_D, _W|_A|_D, _W|_A|_D, _W|_A|_D, /* 48- 51 */ _W|_A|_D, _W|_A|_D, _W|_A|_D, _W|_A|_D, /* 52- 55 */ _W|_A|_D, _W|_A|_D, _W|_A|_P, _W|_A|_P, /* 56- 59 */ _W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 60- 63 */ _W|_A|_P, _W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U|_X, /* 64- 67 */ _W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U, /* 68- 71 */ _W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 72- 75 */ _W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 76- 79 */ _W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 80- 83 */ _W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 84- 87 */ _W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_P, /* 88- 91 */ _W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 92- 95 */ _W|_A|_P, _W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L|_X, /* 96- 99 */ _W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L, /* 100-103 */ _W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 104-107 */ _W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 108-111 */ _W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 112-115 */ _W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 116-119 */ _W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_P, /* 120-123 */ _W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_C, /* 124-127 */ _W, _W, _W, _W, /* 128-131 */ _W, _W, _W, _W, /* 132-135 */ _W, _W, _W, _W, /* 136-139 */ _W, _W, _W, _W, /* 140-143 */ _W, _W, _W, _W, /* 144-147 */ _W, _W, _W, _W, /* 148-151 */ _W, _W, _W, _W, /* 152-155 */ _W, _W, _W, _W, /* 156-159 */ _W|_S|_SP, _W|_P, _W|_P, _W|_P, /* 160-163 */ _W|_P, _W|_P, _W|_P, _W|_P, /* 164-167 */ _W|_P, _W|_P, _W|_P, _W|_P, /* 168-171 */ _W|_P, _W|_P, _W|_P, _W|_P, /* 172-175 */ _W|_P, _W|_P, _W|_P, _W|_P, /* 176-179 */ _W|_P, _W|_P, _W|_P, _W|_P, /* 180-183 */ _W|_P, _W|_P, _W|_P, _W|_P, /* 184-187 */ _W|_P, _W|_P, _W|_P, _W|_P, /* 188-191 */ _W|_U, _W|_U, _W|_U, _W|_U, /* 192-195 */ _W|_U, _W|_U, _W|_U, _W|_U, /* 196-199 */ _W|_U, _W|_U, _W|_U, _W|_U, /* 200-203 */ _W|_U, _W|_U, _W|_U, _W|_U, /* 204-207 */ _W|_U, _W|_U, _W|_U, _W|_U, /* 208-211 */ _W|_U, _W|_U, _W|_U, _W|_P, /* 212-215 */ _W|_U, _W|_U, _W|_U, _W|_U, /* 216-219 */ _W|_U, _W|_U, _W|_U, _W|_L, /* 220-223 */ _W|_L, _W|_L, _W|_L, _W|_L, /* 224-227 */ _W|_L, _W|_L, _W|_L, _W|_L, /* 228-231 */ _W|_L, _W|_L, _W|_L, _W|_L, /* 232-235 */ _W|_L, _W|_L, _W|_L, _W|_L, /* 236-239 */ _W|_L, _W|_L, _W|_L, _W|_L, /* 240-243 */ _W|_L, _W|_L, _W|_L, _W|_P, /* 244-247 */ _W|_L, _W|_L, _W|_L, _W|_L, /* 248-251 */ _W|_L, _W|_L, _W|_L, _W|_L}; /* 252-255 */ static inline int match_token(struct ts_fsm_token *t, u8 d) { if (t->type) return (token_lookup_tbl[d] & t->type) != 0; else return t->value == d; } static unsigned int fsm_find(struct ts_config *conf, struct ts_state *state) { struct ts_fsm *fsm = ts_config_priv(conf); struct ts_fsm_token *cur = NULL, *next; unsigned int match_start, block_idx = 0, tok_idx; unsigned block_len = 0, strict, consumed = state->offset; const u8 *data; #define GET_NEXT_BLOCK() \ ({ consumed += block_idx; \ block_idx = 0; \ block_len = conf->get_next_block(consumed, &data, conf, state); }) #define TOKEN_MISMATCH() \ do { \ if (strict) \ goto no_match; \ block_idx++; \ goto startover; \ } while(0) #define end_of_data() unlikely(block_idx >= block_len && !GET_NEXT_BLOCK()) if (end_of_data()) goto no_match; strict = fsm->tokens[0].recur != TS_FSM_HEAD_IGNORE; startover: match_start = consumed + block_idx; for (tok_idx = 0; tok_idx < fsm->ntokens; tok_idx++) { cur = &fsm->tokens[tok_idx]; if (likely(tok_idx < (fsm->ntokens - 1))) next = &fsm->tokens[tok_idx + 1]; else next = NULL; switch (cur->recur) { case TS_FSM_SINGLE: if (end_of_data()) goto no_match; if (!match_token(cur, data[block_idx])) TOKEN_MISMATCH(); break; case TS_FSM_PERHAPS: if (end_of_data() || !match_token(cur, data[block_idx])) continue; break; case TS_FSM_MULTI: if (end_of_data()) goto no_match; if (!match_token(cur, data[block_idx])) TOKEN_MISMATCH(); block_idx++; /* fall through */ case TS_FSM_ANY: if (next == NULL) goto found_match; if (end_of_data()) continue; while (!match_token(next, data[block_idx])) { if (!match_token(cur, data[block_idx])) TOKEN_MISMATCH(); block_idx++; if (end_of_data()) goto no_match; } continue; /* * Optimization: Prefer small local loop over jumping * back and forth until garbage at head is munched. */ case TS_FSM_HEAD_IGNORE: if (end_of_data()) continue; while (!match_token(next, data[block_idx])) { /* * Special case, don't start over upon * a mismatch, give the user the * chance to specify the type of data * allowed to be ignored. */ if (!match_token(cur, data[block_idx])) goto no_match; block_idx++; if (end_of_data()) goto no_match; } match_start = consumed + block_idx; continue; } block_idx++; } if (end_of_data()) goto found_match; no_match: return UINT_MAX; found_match: state->offset = consumed + block_idx; return match_start; } static struct ts_config *fsm_init(const void *pattern, unsigned int len, gfp_t gfp_mask, int flags) { int i, err = -EINVAL; struct ts_config *conf; struct ts_fsm *fsm; struct ts_fsm_token *tokens = (struct ts_fsm_token *) pattern; unsigned int ntokens = len / sizeof(*tokens); size_t priv_size = sizeof(*fsm) + len; if (len % sizeof(struct ts_fsm_token) || ntokens < 1) goto errout; if (flags & TS_IGNORECASE) goto errout; for (i = 0; i < ntokens; i++) { struct ts_fsm_token *t = &tokens[i]; if (t->type > TS_FSM_TYPE_MAX || t->recur > TS_FSM_RECUR_MAX) goto errout; if (t->recur == TS_FSM_HEAD_IGNORE && (i != 0 || i == (ntokens - 1))) goto errout; } conf = alloc_ts_config(priv_size, gfp_mask); if (IS_ERR(conf)) return conf; conf->flags = flags; fsm = ts_config_priv(conf); fsm->ntokens = ntokens; memcpy(fsm->tokens, pattern, len); for (i = 0; i < fsm->ntokens; i++) { struct ts_fsm_token *t = &fsm->tokens[i]; t->type = token_map[t->type]; } return conf; errout: return ERR_PTR(err); } static void *fsm_get_pattern(struct ts_config *conf) { struct ts_fsm *fsm = ts_config_priv(conf); return fsm->tokens; } static unsigned int fsm_get_pattern_len(struct ts_config *conf) { struct ts_fsm *fsm = ts_config_priv(conf); return fsm->ntokens * sizeof(struct ts_fsm_token); } static struct ts_ops fsm_ops = { .name = "fsm", .find = fsm_find, .init = fsm_init, .get_pattern = fsm_get_pattern, .get_pattern_len = fsm_get_pattern_len, .owner = THIS_MODULE, .list = LIST_HEAD_INIT(fsm_ops.list) }; static int __init init_fsm(void) { return textsearch_register(&fsm_ops); } static void __exit exit_fsm(void) { textsearch_unregister(&fsm_ops); } MODULE_LICENSE("GPL"); module_init(init_fsm); module_exit(exit_fsm);
gpl-2.0
FEDEVEL/tmp-imx6-tiny-rex-linux
mm/vmalloc.c
62
69486
/* * linux/mm/vmalloc.c * * Copyright (C) 1993 Linus Torvalds * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 * Numa awareness, Christoph Lameter, SGI, June 2005 */ #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/highmem.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/debugobjects.h> #include <linux/kallsyms.h> #include <linux/list.h> #include <linux/rbtree.h> #include <linux/radix-tree.h> #include <linux/rcupdate.h> #include <linux/pfn.h> #include <linux/kmemleak.h> #include <linux/atomic.h> #include <linux/llist.h> #include <asm/uaccess.h> #include <asm/tlbflush.h> #include <asm/shmparam.h> struct vfree_deferred { struct llist_head list; struct work_struct wq; }; static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); static void __vunmap(const void *, int); static void free_work(struct work_struct *w) { struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); struct llist_node *llnode = llist_del_all(&p->list); while (llnode) { void *p = llnode; llnode = llist_next(llnode); __vunmap(p, 1); } } /*** Page table manipulation functions ***/ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) { pte_t *pte; pte = pte_offset_kernel(pmd, addr); do { pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); WARN_ON(!pte_none(ptent) && !pte_present(ptent)); } while (pte++, addr += PAGE_SIZE, addr != end); } static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) { pmd_t *pmd; unsigned long next; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_none_or_clear_bad(pmd)) continue; vunmap_pte_range(pmd, addr, next); } while (pmd++, addr = next, addr != end); } static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) { pud_t *pud; unsigned long next; pud = pud_offset(pgd, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; vunmap_pmd_range(pud, addr, next); } while (pud++, addr = next, addr != end); } static void vunmap_page_range(unsigned long addr, unsigned long end) { pgd_t *pgd; unsigned long next; BUG_ON(addr >= end); pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; vunmap_pud_range(pgd, addr, next); } while (pgd++, addr = next, addr != end); } static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr) { pte_t *pte; /* * nr is a running index into the array which helps higher level * callers keep track of where we're up to. */ pte = pte_alloc_kernel(pmd, addr); if (!pte) return -ENOMEM; do { struct page *page = pages[*nr]; if (WARN_ON(!pte_none(*pte))) return -EBUSY; if (WARN_ON(!page)) return -ENOMEM; set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); (*nr)++; } while (pte++, addr += PAGE_SIZE, addr != end); return 0; } static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr) { pmd_t *pmd; unsigned long next; pmd = pmd_alloc(&init_mm, pud, addr); if (!pmd) return -ENOMEM; do { next = pmd_addr_end(addr, end); if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) return -ENOMEM; } while (pmd++, addr = next, addr != end); return 0; } static int vmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr) { pud_t *pud; unsigned long next; pud = pud_alloc(&init_mm, pgd, addr); if (!pud) return -ENOMEM; do { next = pud_addr_end(addr, end); if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) return -ENOMEM; } while (pud++, addr = next, addr != end); return 0; } /* * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and * will have pfns corresponding to the "pages" array. * * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] */ static int vmap_page_range_noflush(unsigned long start, unsigned long end, pgprot_t prot, struct page **pages) { pgd_t *pgd; unsigned long next; unsigned long addr = start; int err = 0; int nr = 0; BUG_ON(addr >= end); pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); err = vmap_pud_range(pgd, addr, next, prot, pages, &nr); if (err) return err; } while (pgd++, addr = next, addr != end); return nr; } static int vmap_page_range(unsigned long start, unsigned long end, pgprot_t prot, struct page **pages) { int ret; ret = vmap_page_range_noflush(start, end, prot, pages); flush_cache_vmap(start, end); return ret; } int is_vmalloc_or_module_addr(const void *x) { /* * ARM, x86-64 and sparc64 put modules in a special place, * and fall back on vmalloc() if that fails. Others * just put it in the vmalloc space. */ #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) unsigned long addr = (unsigned long)x; if (addr >= MODULES_VADDR && addr < MODULES_END) return 1; #endif return is_vmalloc_addr(x); } /* * Walk a vmap address to the struct page it maps. */ struct page *vmalloc_to_page(const void *vmalloc_addr) { unsigned long addr = (unsigned long) vmalloc_addr; struct page *page = NULL; pgd_t *pgd = pgd_offset_k(addr); /* * XXX we might need to change this if we add VIRTUAL_BUG_ON for * architectures that do not vmalloc module space */ VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); if (!pgd_none(*pgd)) { pud_t *pud = pud_offset(pgd, addr); if (!pud_none(*pud)) { pmd_t *pmd = pmd_offset(pud, addr); if (!pmd_none(*pmd)) { pte_t *ptep, pte; ptep = pte_offset_map(pmd, addr); pte = *ptep; if (pte_present(pte)) page = pte_page(pte); pte_unmap(ptep); } } } return page; } EXPORT_SYMBOL(vmalloc_to_page); /* * Map a vmalloc()-space virtual address to the physical page frame number. */ unsigned long vmalloc_to_pfn(const void *vmalloc_addr) { return page_to_pfn(vmalloc_to_page(vmalloc_addr)); } EXPORT_SYMBOL(vmalloc_to_pfn); /*** Global kva allocator ***/ #define VM_LAZY_FREE 0x01 #define VM_LAZY_FREEING 0x02 #define VM_VM_AREA 0x04 static DEFINE_SPINLOCK(vmap_area_lock); /* Export for kexec only */ LIST_HEAD(vmap_area_list); static struct rb_root vmap_area_root = RB_ROOT; /* The vmap cache globals are protected by vmap_area_lock */ static struct rb_node *free_vmap_cache; static unsigned long cached_hole_size; static unsigned long cached_vstart; static unsigned long cached_align; static unsigned long vmap_area_pcpu_hole; static struct vmap_area *__find_vmap_area(unsigned long addr) { struct rb_node *n = vmap_area_root.rb_node; while (n) { struct vmap_area *va; va = rb_entry(n, struct vmap_area, rb_node); if (addr < va->va_start) n = n->rb_left; else if (addr >= va->va_end) n = n->rb_right; else return va; } return NULL; } static void __insert_vmap_area(struct vmap_area *va) { struct rb_node **p = &vmap_area_root.rb_node; struct rb_node *parent = NULL; struct rb_node *tmp; while (*p) { struct vmap_area *tmp_va; parent = *p; tmp_va = rb_entry(parent, struct vmap_area, rb_node); if (va->va_start < tmp_va->va_end) p = &(*p)->rb_left; else if (va->va_end > tmp_va->va_start) p = &(*p)->rb_right; else BUG(); } rb_link_node(&va->rb_node, parent, p); rb_insert_color(&va->rb_node, &vmap_area_root); /* address-sort this list */ tmp = rb_prev(&va->rb_node); if (tmp) { struct vmap_area *prev; prev = rb_entry(tmp, struct vmap_area, rb_node); list_add_rcu(&va->list, &prev->list); } else list_add_rcu(&va->list, &vmap_area_list); } static void purge_vmap_area_lazy(void); /* * Allocate a region of KVA of the specified size and alignment, within the * vstart and vend. */ static struct vmap_area *alloc_vmap_area(unsigned long size, unsigned long align, unsigned long vstart, unsigned long vend, int node, gfp_t gfp_mask) { struct vmap_area *va; struct rb_node *n; unsigned long addr; int purged = 0; struct vmap_area *first; BUG_ON(!size); BUG_ON(size & ~PAGE_MASK); BUG_ON(!is_power_of_2(align)); va = kmalloc_node(sizeof(struct vmap_area), gfp_mask & GFP_RECLAIM_MASK, node); if (unlikely(!va)) return ERR_PTR(-ENOMEM); /* * Only scan the relevant parts containing pointers to other objects * to avoid false negatives. */ kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK); retry: spin_lock(&vmap_area_lock); /* * Invalidate cache if we have more permissive parameters. * cached_hole_size notes the largest hole noticed _below_ * the vmap_area cached in free_vmap_cache: if size fits * into that hole, we want to scan from vstart to reuse * the hole instead of allocating above free_vmap_cache. * Note that __free_vmap_area may update free_vmap_cache * without updating cached_hole_size or cached_align. */ if (!free_vmap_cache || size < cached_hole_size || vstart < cached_vstart || align < cached_align) { nocache: cached_hole_size = 0; free_vmap_cache = NULL; } /* record if we encounter less permissive parameters */ cached_vstart = vstart; cached_align = align; /* find starting point for our search */ if (free_vmap_cache) { first = rb_entry(free_vmap_cache, struct vmap_area, rb_node); addr = ALIGN(first->va_end, align); if (addr < vstart) goto nocache; if (addr + size < addr) goto overflow; } else { addr = ALIGN(vstart, align); if (addr + size < addr) goto overflow; n = vmap_area_root.rb_node; first = NULL; while (n) { struct vmap_area *tmp; tmp = rb_entry(n, struct vmap_area, rb_node); if (tmp->va_end >= addr) { first = tmp; if (tmp->va_start <= addr) break; n = n->rb_left; } else n = n->rb_right; } if (!first) goto found; } /* from the starting point, walk areas until a suitable hole is found */ while (addr + size > first->va_start && addr + size <= vend) { if (addr + cached_hole_size < first->va_start) cached_hole_size = first->va_start - addr; addr = ALIGN(first->va_end, align); if (addr + size < addr) goto overflow; if (list_is_last(&first->list, &vmap_area_list)) goto found; first = list_entry(first->list.next, struct vmap_area, list); } found: if (addr + size > vend) goto overflow; va->va_start = addr; va->va_end = addr + size; va->flags = 0; __insert_vmap_area(va); free_vmap_cache = &va->rb_node; spin_unlock(&vmap_area_lock); BUG_ON(va->va_start & (align-1)); BUG_ON(va->va_start < vstart); BUG_ON(va->va_end > vend); return va; overflow: spin_unlock(&vmap_area_lock); if (!purged) { purge_vmap_area_lazy(); purged = 1; goto retry; } if (printk_ratelimit()) printk(KERN_WARNING "vmap allocation for size %lu failed: " "use vmalloc=<size> to increase size.\n", size); kfree(va); return ERR_PTR(-EBUSY); } static void __free_vmap_area(struct vmap_area *va) { BUG_ON(RB_EMPTY_NODE(&va->rb_node)); if (free_vmap_cache) { if (va->va_end < cached_vstart) { free_vmap_cache = NULL; } else { struct vmap_area *cache; cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node); if (va->va_start <= cache->va_start) { free_vmap_cache = rb_prev(&va->rb_node); /* * We don't try to update cached_hole_size or * cached_align, but it won't go very wrong. */ } } } rb_erase(&va->rb_node, &vmap_area_root); RB_CLEAR_NODE(&va->rb_node); list_del_rcu(&va->list); /* * Track the highest possible candidate for pcpu area * allocation. Areas outside of vmalloc area can be returned * here too, consider only end addresses which fall inside * vmalloc area proper. */ if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END) vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end); kfree_rcu(va, rcu_head); } /* * Free a region of KVA allocated by alloc_vmap_area */ static void free_vmap_area(struct vmap_area *va) { spin_lock(&vmap_area_lock); __free_vmap_area(va); spin_unlock(&vmap_area_lock); } /* * Clear the pagetable entries of a given vmap_area */ static void unmap_vmap_area(struct vmap_area *va) { vunmap_page_range(va->va_start, va->va_end); } static void vmap_debug_free_range(unsigned long start, unsigned long end) { /* * Unmap page tables and force a TLB flush immediately if * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free * bugs similarly to those in linear kernel virtual address * space after a page has been freed. * * All the lazy freeing logic is still retained, in order to * minimise intrusiveness of this debugging feature. * * This is going to be *slow* (linear kernel virtual address * debugging doesn't do a broadcast TLB flush so it is a lot * faster). */ #ifdef CONFIG_DEBUG_PAGEALLOC vunmap_page_range(start, end); flush_tlb_kernel_range(start, end); #endif } /* * lazy_max_pages is the maximum amount of virtual address space we gather up * before attempting to purge with a TLB flush. * * There is a tradeoff here: a larger number will cover more kernel page tables * and take slightly longer to purge, but it will linearly reduce the number of * global TLB flushes that must be performed. It would seem natural to scale * this number up linearly with the number of CPUs (because vmapping activity * could also scale linearly with the number of CPUs), however it is likely * that in practice, workloads might be constrained in other ways that mean * vmap activity will not scale linearly with CPUs. Also, I want to be * conservative and not introduce a big latency on huge systems, so go with * a less aggressive log scale. It will still be an improvement over the old * code, and it will be simple to change the scale factor if we find that it * becomes a problem on bigger systems. */ static unsigned long lazy_max_pages(void) { unsigned int log; log = fls(num_online_cpus()); return log * (32UL * 1024 * 1024 / PAGE_SIZE); } static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); /* for per-CPU blocks */ static void purge_fragmented_blocks_allcpus(void); /* * called before a call to iounmap() if the caller wants vm_area_struct's * immediately freed. */ void set_iounmap_nonlazy(void) { atomic_set(&vmap_lazy_nr, lazy_max_pages()+1); } /* * Purges all lazily-freed vmap areas. * * If sync is 0 then don't purge if there is already a purge in progress. * If force_flush is 1, then flush kernel TLBs between *start and *end even * if we found no lazy vmap areas to unmap (callers can use this to optimise * their own TLB flushing). * Returns with *start = min(*start, lowest purged address) * *end = max(*end, highest purged address) */ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, int sync, int force_flush) { static DEFINE_SPINLOCK(purge_lock); LIST_HEAD(valist); struct vmap_area *va; struct vmap_area *n_va; int nr = 0; /* * If sync is 0 but force_flush is 1, we'll go sync anyway but callers * should not expect such behaviour. This just simplifies locking for * the case that isn't actually used at the moment anyway. */ if (!sync && !force_flush) { if (!spin_trylock(&purge_lock)) return; } else spin_lock(&purge_lock); if (sync) purge_fragmented_blocks_allcpus(); rcu_read_lock(); list_for_each_entry_rcu(va, &vmap_area_list, list) { if (va->flags & VM_LAZY_FREE) { if (va->va_start < *start) *start = va->va_start; if (va->va_end > *end) *end = va->va_end; nr += (va->va_end - va->va_start) >> PAGE_SHIFT; list_add_tail(&va->purge_list, &valist); va->flags |= VM_LAZY_FREEING; va->flags &= ~VM_LAZY_FREE; } } rcu_read_unlock(); if (nr) atomic_sub(nr, &vmap_lazy_nr); if (nr || force_flush) flush_tlb_kernel_range(*start, *end); if (nr) { spin_lock(&vmap_area_lock); list_for_each_entry_safe(va, n_va, &valist, purge_list) __free_vmap_area(va); spin_unlock(&vmap_area_lock); } spin_unlock(&purge_lock); } /* * Kick off a purge of the outstanding lazy areas. Don't bother if somebody * is already purging. */ static void try_purge_vmap_area_lazy(void) { unsigned long start = ULONG_MAX, end = 0; __purge_vmap_area_lazy(&start, &end, 0, 0); } /* * Kick off a purge of the outstanding lazy areas. */ static void purge_vmap_area_lazy(void) { unsigned long start = ULONG_MAX, end = 0; __purge_vmap_area_lazy(&start, &end, 1, 0); } /* * Free a vmap area, caller ensuring that the area has been unmapped * and flush_cache_vunmap had been called for the correct range * previously. */ static void free_vmap_area_noflush(struct vmap_area *va) { va->flags |= VM_LAZY_FREE; atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) try_purge_vmap_area_lazy(); } /* * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been * called for the correct range previously. */ static void free_unmap_vmap_area_noflush(struct vmap_area *va) { unmap_vmap_area(va); free_vmap_area_noflush(va); } /* * Free and unmap a vmap area */ static void free_unmap_vmap_area(struct vmap_area *va) { flush_cache_vunmap(va->va_start, va->va_end); free_unmap_vmap_area_noflush(va); } static struct vmap_area *find_vmap_area(unsigned long addr) { struct vmap_area *va; spin_lock(&vmap_area_lock); va = __find_vmap_area(addr); spin_unlock(&vmap_area_lock); return va; } static void free_unmap_vmap_area_addr(unsigned long addr) { struct vmap_area *va; va = find_vmap_area(addr); BUG_ON(!va); free_unmap_vmap_area(va); } /*** Per cpu kva allocator ***/ /* * vmap space is limited especially on 32 bit architectures. Ensure there is * room for at least 16 percpu vmap blocks per CPU. */ /* * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess * instead (we just need a rough idea) */ #if BITS_PER_LONG == 32 #define VMALLOC_SPACE (128UL*1024*1024) #else #define VMALLOC_SPACE (128UL*1024*1024*1024) #endif #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ #define VMAP_BBMAP_BITS \ VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) static bool vmap_initialized __read_mostly = false; struct vmap_block_queue { spinlock_t lock; struct list_head free; }; struct vmap_block { spinlock_t lock; struct vmap_area *va; unsigned long free, dirty; DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); struct list_head free_list; struct rcu_head rcu_head; struct list_head purge; }; /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); /* * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block * in the free path. Could get rid of this if we change the API to return a * "cookie" from alloc, to be passed to free. But no big deal yet. */ static DEFINE_SPINLOCK(vmap_block_tree_lock); static RADIX_TREE(vmap_block_tree, GFP_ATOMIC); /* * We should probably have a fallback mechanism to allocate virtual memory * out of partially filled vmap blocks. However vmap block sizing should be * fairly reasonable according to the vmalloc size, so it shouldn't be a * big problem. */ static unsigned long addr_to_vb_idx(unsigned long addr) { addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); addr /= VMAP_BLOCK_SIZE; return addr; } static struct vmap_block *new_vmap_block(gfp_t gfp_mask) { struct vmap_block_queue *vbq; struct vmap_block *vb; struct vmap_area *va; unsigned long vb_idx; int node, err; node = numa_node_id(); vb = kmalloc_node(sizeof(struct vmap_block), gfp_mask & GFP_RECLAIM_MASK, node); if (unlikely(!vb)) return ERR_PTR(-ENOMEM); va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, VMALLOC_START, VMALLOC_END, node, gfp_mask); if (IS_ERR(va)) { kfree(vb); return ERR_CAST(va); } err = radix_tree_preload(gfp_mask); if (unlikely(err)) { kfree(vb); free_vmap_area(va); return ERR_PTR(err); } spin_lock_init(&vb->lock); vb->va = va; vb->free = VMAP_BBMAP_BITS; vb->dirty = 0; bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS); INIT_LIST_HEAD(&vb->free_list); vb_idx = addr_to_vb_idx(va->va_start); spin_lock(&vmap_block_tree_lock); err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); spin_unlock(&vmap_block_tree_lock); BUG_ON(err); radix_tree_preload_end(); vbq = &get_cpu_var(vmap_block_queue); spin_lock(&vbq->lock); list_add_rcu(&vb->free_list, &vbq->free); spin_unlock(&vbq->lock); put_cpu_var(vmap_block_queue); return vb; } static void free_vmap_block(struct vmap_block *vb) { struct vmap_block *tmp; unsigned long vb_idx; vb_idx = addr_to_vb_idx(vb->va->va_start); spin_lock(&vmap_block_tree_lock); tmp = radix_tree_delete(&vmap_block_tree, vb_idx); spin_unlock(&vmap_block_tree_lock); BUG_ON(tmp != vb); free_vmap_area_noflush(vb->va); kfree_rcu(vb, rcu_head); } static void purge_fragmented_blocks(int cpu) { LIST_HEAD(purge); struct vmap_block *vb; struct vmap_block *n_vb; struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); rcu_read_lock(); list_for_each_entry_rcu(vb, &vbq->free, free_list) { if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) continue; spin_lock(&vb->lock); if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { vb->free = 0; /* prevent further allocs after releasing lock */ vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS); spin_lock(&vbq->lock); list_del_rcu(&vb->free_list); spin_unlock(&vbq->lock); spin_unlock(&vb->lock); list_add_tail(&vb->purge, &purge); } else spin_unlock(&vb->lock); } rcu_read_unlock(); list_for_each_entry_safe(vb, n_vb, &purge, purge) { list_del(&vb->purge); free_vmap_block(vb); } } static void purge_fragmented_blocks_allcpus(void) { int cpu; for_each_possible_cpu(cpu) purge_fragmented_blocks(cpu); } static void *vb_alloc(unsigned long size, gfp_t gfp_mask) { struct vmap_block_queue *vbq; struct vmap_block *vb; unsigned long addr = 0; unsigned int order; BUG_ON(size & ~PAGE_MASK); BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); if (WARN_ON(size == 0)) { /* * Allocating 0 bytes isn't what caller wants since * get_order(0) returns funny result. Just warn and terminate * early. */ return NULL; } order = get_order(size); again: rcu_read_lock(); vbq = &get_cpu_var(vmap_block_queue); list_for_each_entry_rcu(vb, &vbq->free, free_list) { int i; spin_lock(&vb->lock); if (vb->free < 1UL << order) goto next; i = VMAP_BBMAP_BITS - vb->free; addr = vb->va->va_start + (i << PAGE_SHIFT); BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(vb->va->va_start)); vb->free -= 1UL << order; if (vb->free == 0) { spin_lock(&vbq->lock); list_del_rcu(&vb->free_list); spin_unlock(&vbq->lock); } spin_unlock(&vb->lock); break; next: spin_unlock(&vb->lock); } put_cpu_var(vmap_block_queue); rcu_read_unlock(); if (!addr) { vb = new_vmap_block(gfp_mask); if (IS_ERR(vb)) return vb; goto again; } return (void *)addr; } static void vb_free(const void *addr, unsigned long size) { unsigned long offset; unsigned long vb_idx; unsigned int order; struct vmap_block *vb; BUG_ON(size & ~PAGE_MASK); BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); order = get_order(size); offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); vb_idx = addr_to_vb_idx((unsigned long)addr); rcu_read_lock(); vb = radix_tree_lookup(&vmap_block_tree, vb_idx); rcu_read_unlock(); BUG_ON(!vb); vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); spin_lock(&vb->lock); BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order)); vb->dirty += 1UL << order; if (vb->dirty == VMAP_BBMAP_BITS) { BUG_ON(vb->free); spin_unlock(&vb->lock); free_vmap_block(vb); } else spin_unlock(&vb->lock); } /** * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer * * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily * to amortize TLB flushing overheads. What this means is that any page you * have now, may, in a former life, have been mapped into kernel virtual * address by the vmap layer and so there might be some CPUs with TLB entries * still referencing that page (additional to the regular 1:1 kernel mapping). * * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can * be sure that none of the pages we have control over will have any aliases * from the vmap layer. */ void vm_unmap_aliases(void) { unsigned long start = ULONG_MAX, end = 0; int cpu; int flush = 0; if (unlikely(!vmap_initialized)) return; for_each_possible_cpu(cpu) { struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); struct vmap_block *vb; rcu_read_lock(); list_for_each_entry_rcu(vb, &vbq->free, free_list) { int i, j; spin_lock(&vb->lock); i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS); if (i < VMAP_BBMAP_BITS) { unsigned long s, e; j = find_last_bit(vb->dirty_map, VMAP_BBMAP_BITS); j = j + 1; /* need exclusive index */ s = vb->va->va_start + (i << PAGE_SHIFT); e = vb->va->va_start + (j << PAGE_SHIFT); flush = 1; if (s < start) start = s; if (e > end) end = e; } spin_unlock(&vb->lock); } rcu_read_unlock(); } __purge_vmap_area_lazy(&start, &end, 1, flush); } EXPORT_SYMBOL_GPL(vm_unmap_aliases); /** * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram * @mem: the pointer returned by vm_map_ram * @count: the count passed to that vm_map_ram call (cannot unmap partial) */ void vm_unmap_ram(const void *mem, unsigned int count) { unsigned long size = count << PAGE_SHIFT; unsigned long addr = (unsigned long)mem; BUG_ON(!addr); BUG_ON(addr < VMALLOC_START); BUG_ON(addr > VMALLOC_END); BUG_ON(addr & (PAGE_SIZE-1)); debug_check_no_locks_freed(mem, size); vmap_debug_free_range(addr, addr+size); if (likely(count <= VMAP_MAX_ALLOC)) vb_free(mem, size); else free_unmap_vmap_area_addr(addr); } EXPORT_SYMBOL(vm_unmap_ram); /** * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) * @pages: an array of pointers to the pages to be mapped * @count: number of pages * @node: prefer to allocate data structures on this node * @prot: memory protection to use. PAGE_KERNEL for regular RAM * * Returns: a pointer to the address that has been mapped, or %NULL on failure */ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) { unsigned long size = count << PAGE_SHIFT; unsigned long addr; void *mem; if (likely(count <= VMAP_MAX_ALLOC)) { mem = vb_alloc(size, GFP_KERNEL); if (IS_ERR(mem)) return NULL; addr = (unsigned long)mem; } else { struct vmap_area *va; va = alloc_vmap_area(size, PAGE_SIZE, VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); if (IS_ERR(va)) return NULL; addr = va->va_start; mem = (void *)addr; } if (vmap_page_range(addr, addr + size, prot, pages) < 0) { vm_unmap_ram(mem, count); return NULL; } return mem; } EXPORT_SYMBOL(vm_map_ram); static struct vm_struct *vmlist __initdata; /** * vm_area_add_early - add vmap area early during boot * @vm: vm_struct to add * * This function is used to add fixed kernel vm area to vmlist before * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags * should contain proper values and the other fields should be zero. * * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. */ void __init vm_area_add_early(struct vm_struct *vm) { struct vm_struct *tmp, **p; BUG_ON(vmap_initialized); for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { if (tmp->addr >= vm->addr) { BUG_ON(tmp->addr < vm->addr + vm->size); break; } else BUG_ON(tmp->addr + tmp->size > vm->addr); } vm->next = *p; *p = vm; } /** * vm_area_register_early - register vmap area early during boot * @vm: vm_struct to register * @align: requested alignment * * This function is used to register kernel vm area before * vmalloc_init() is called. @vm->size and @vm->flags should contain * proper values on entry and other fields should be zero. On return, * vm->addr contains the allocated address. * * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. */ void __init vm_area_register_early(struct vm_struct *vm, size_t align) { static size_t vm_init_off __initdata; unsigned long addr; addr = ALIGN(VMALLOC_START + vm_init_off, align); vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; vm->addr = (void *)addr; vm_area_add_early(vm); } void __init vmalloc_init(void) { struct vmap_area *va; struct vm_struct *tmp; int i; for_each_possible_cpu(i) { struct vmap_block_queue *vbq; struct vfree_deferred *p; vbq = &per_cpu(vmap_block_queue, i); spin_lock_init(&vbq->lock); INIT_LIST_HEAD(&vbq->free); p = &per_cpu(vfree_deferred, i); init_llist_head(&p->list); INIT_WORK(&p->wq, free_work); } /* Import existing vmlist entries. */ for (tmp = vmlist; tmp; tmp = tmp->next) { va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT); va->flags = VM_VM_AREA; va->va_start = (unsigned long)tmp->addr; va->va_end = va->va_start + tmp->size; va->vm = tmp; __insert_vmap_area(va); } vmap_area_pcpu_hole = VMALLOC_END; vmap_initialized = true; } /** * map_kernel_range_noflush - map kernel VM area with the specified pages * @addr: start of the VM area to map * @size: size of the VM area to map * @prot: page protection flags to use * @pages: pages to map * * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size * specify should have been allocated using get_vm_area() and its * friends. * * NOTE: * This function does NOT do any cache flushing. The caller is * responsible for calling flush_cache_vmap() on to-be-mapped areas * before calling this function. * * RETURNS: * The number of pages mapped on success, -errno on failure. */ int map_kernel_range_noflush(unsigned long addr, unsigned long size, pgprot_t prot, struct page **pages) { return vmap_page_range_noflush(addr, addr + size, prot, pages); } /** * unmap_kernel_range_noflush - unmap kernel VM area * @addr: start of the VM area to unmap * @size: size of the VM area to unmap * * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size * specify should have been allocated using get_vm_area() and its * friends. * * NOTE: * This function does NOT do any cache flushing. The caller is * responsible for calling flush_cache_vunmap() on to-be-mapped areas * before calling this function and flush_tlb_kernel_range() after. */ void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) { vunmap_page_range(addr, addr + size); } EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush); /** * unmap_kernel_range - unmap kernel VM area and flush cache and TLB * @addr: start of the VM area to unmap * @size: size of the VM area to unmap * * Similar to unmap_kernel_range_noflush() but flushes vcache before * the unmapping and tlb after. */ void unmap_kernel_range(unsigned long addr, unsigned long size) { unsigned long end = addr + size; flush_cache_vunmap(addr, end); vunmap_page_range(addr, end); flush_tlb_kernel_range(addr, end); } int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) { unsigned long addr = (unsigned long)area->addr; unsigned long end = addr + get_vm_area_size(area); int err; err = vmap_page_range(addr, end, prot, *pages); if (err > 0) { *pages += err; err = 0; } return err; } EXPORT_SYMBOL_GPL(map_vm_area); static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, unsigned long flags, const void *caller) { spin_lock(&vmap_area_lock); vm->flags = flags; vm->addr = (void *)va->va_start; vm->size = va->va_end - va->va_start; vm->caller = caller; va->vm = vm; va->flags |= VM_VM_AREA; spin_unlock(&vmap_area_lock); } static void clear_vm_uninitialized_flag(struct vm_struct *vm) { /* * Before removing VM_UNINITIALIZED, * we should make sure that vm has proper values. * Pair with smp_rmb() in show_numa_info(). */ smp_wmb(); vm->flags &= ~VM_UNINITIALIZED; } static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long align, unsigned long flags, unsigned long start, unsigned long end, int node, gfp_t gfp_mask, const void *caller) { struct vmap_area *va; struct vm_struct *area; BUG_ON(in_interrupt()); if (flags & VM_IOREMAP) align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER); size = PAGE_ALIGN(size); if (unlikely(!size)) return NULL; area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); if (unlikely(!area)) return NULL; /* * We always allocate a guard page. */ size += PAGE_SIZE; va = alloc_vmap_area(size, align, start, end, node, gfp_mask); if (IS_ERR(va)) { kfree(area); return NULL; } setup_vmalloc_vm(area, va, flags, caller); return area; } struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, unsigned long start, unsigned long end) { return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, GFP_KERNEL, __builtin_return_address(0)); } EXPORT_SYMBOL_GPL(__get_vm_area); struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, unsigned long start, unsigned long end, const void *caller) { return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, GFP_KERNEL, caller); } /** * get_vm_area - reserve a contiguous kernel virtual area * @size: size of the area * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC * * Search an area of @size in the kernel virtual mapping area, * and reserved it for out purposes. Returns the area descriptor * on success or %NULL on failure. */ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) { return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, NUMA_NO_NODE, GFP_KERNEL, __builtin_return_address(0)); } struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, const void *caller) { return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, NUMA_NO_NODE, GFP_KERNEL, caller); } /** * find_vm_area - find a continuous kernel virtual area * @addr: base address * * Search for the kernel VM area starting at @addr, and return it. * It is up to the caller to do all required locking to keep the returned * pointer valid. */ struct vm_struct *find_vm_area(const void *addr) { struct vmap_area *va; va = find_vmap_area((unsigned long)addr); if (va && va->flags & VM_VM_AREA) return va->vm; return NULL; } /** * remove_vm_area - find and remove a continuous kernel virtual area * @addr: base address * * Search for the kernel VM area starting at @addr, and remove it. * This function returns the found VM area, but using it is NOT safe * on SMP machines, except for its size or flags. */ struct vm_struct *remove_vm_area(const void *addr) { struct vmap_area *va; va = find_vmap_area((unsigned long)addr); if (va && va->flags & VM_VM_AREA) { struct vm_struct *vm = va->vm; spin_lock(&vmap_area_lock); va->vm = NULL; va->flags &= ~VM_VM_AREA; spin_unlock(&vmap_area_lock); vmap_debug_free_range(va->va_start, va->va_end); free_unmap_vmap_area(va); vm->size -= PAGE_SIZE; return vm; } return NULL; } static void __vunmap(const void *addr, int deallocate_pages) { struct vm_struct *area; if (!addr) return; if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", addr)) return; area = remove_vm_area(addr); if (unlikely(!area)) { WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr); return; } debug_check_no_locks_freed(addr, area->size); debug_check_no_obj_freed(addr, area->size); if (deallocate_pages) { int i; for (i = 0; i < area->nr_pages; i++) { struct page *page = area->pages[i]; BUG_ON(!page); __free_page(page); } if (area->flags & VM_VPAGES) vfree(area->pages); else kfree(area->pages); } kfree(area); return; } /** * vfree - release memory allocated by vmalloc() * @addr: memory base address * * Free the virtually continuous memory area starting at @addr, as * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is * NULL, no operation is performed. * * Must not be called in NMI context (strictly speaking, only if we don't * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling * conventions for vfree() arch-depenedent would be a really bad idea) * * NOTE: assumes that the object at *addr has a size >= sizeof(llist_node) */ void vfree(const void *addr) { BUG_ON(in_nmi()); kmemleak_free(addr); if (!addr) return; if (unlikely(in_interrupt())) { struct vfree_deferred *p = &__get_cpu_var(vfree_deferred); if (llist_add((struct llist_node *)addr, &p->list)) schedule_work(&p->wq); } else __vunmap(addr, 1); } EXPORT_SYMBOL(vfree); /** * vunmap - release virtual mapping obtained by vmap() * @addr: memory base address * * Free the virtually contiguous memory area starting at @addr, * which was created from the page array passed to vmap(). * * Must not be called in interrupt context. */ void vunmap(const void *addr) { BUG_ON(in_interrupt()); might_sleep(); if (addr) __vunmap(addr, 0); } EXPORT_SYMBOL(vunmap); /** * vmap - map an array of pages into virtually contiguous space * @pages: array of page pointers * @count: number of pages to map * @flags: vm_area->flags * @prot: page protection for the mapping * * Maps @count pages from @pages into contiguous kernel virtual * space. */ void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) { struct vm_struct *area; might_sleep(); if (count > totalram_pages) return NULL; area = get_vm_area_caller((count << PAGE_SHIFT), flags, __builtin_return_address(0)); if (!area) return NULL; if (map_vm_area(area, prot, &pages)) { vunmap(area->addr); return NULL; } return area->addr; } EXPORT_SYMBOL(vmap); static void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, pgprot_t prot, int node, const void *caller); static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot, int node) { const int order = 0; struct page **pages; unsigned int nr_pages, array_size, i; gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; array_size = (nr_pages * sizeof(struct page *)); area->nr_pages = nr_pages; /* Please note that the recursion is strictly bounded. */ if (array_size > PAGE_SIZE) { pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM, PAGE_KERNEL, node, area->caller); area->flags |= VM_VPAGES; } else { pages = kmalloc_node(array_size, nested_gfp, node); } area->pages = pages; if (!area->pages) { remove_vm_area(area->addr); kfree(area); return NULL; } for (i = 0; i < area->nr_pages; i++) { struct page *page; gfp_t tmp_mask = gfp_mask | __GFP_NOWARN; if (node == NUMA_NO_NODE) page = alloc_page(tmp_mask); else page = alloc_pages_node(node, tmp_mask, order); if (unlikely(!page)) { /* Successfully allocated i pages, free them in __vunmap() */ area->nr_pages = i; goto fail; } area->pages[i] = page; } if (map_vm_area(area, prot, &pages)) goto fail; return area->addr; fail: warn_alloc_failed(gfp_mask, order, "vmalloc: allocation failure, allocated %ld of %ld bytes\n", (area->nr_pages*PAGE_SIZE), area->size); vfree(area->addr); return NULL; } /** * __vmalloc_node_range - allocate virtually contiguous memory * @size: allocation size * @align: desired alignment * @start: vm area range start * @end: vm area range end * @gfp_mask: flags for the page level allocator * @prot: protection mask for the allocated pages * @node: node to use for allocation or NUMA_NO_NODE * @caller: caller's return address * * Allocate enough pages to cover @size from the page level * allocator with @gfp_mask flags. Map them into contiguous * kernel virtual space, using a pagetable protection of @prot. */ void *__vmalloc_node_range(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, pgprot_t prot, int node, const void *caller) { struct vm_struct *area; void *addr; unsigned long real_size = size; size = PAGE_ALIGN(size); if (!size || (size >> PAGE_SHIFT) > totalram_pages) goto fail; area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED, start, end, node, gfp_mask, caller); if (!area) goto fail; addr = __vmalloc_area_node(area, gfp_mask, prot, node); if (!addr) return NULL; /* * In this function, newly allocated vm_struct has VM_UNINITIALIZED * flag. It means that vm_struct is not fully initialized. * Now, it is fully initialized, so remove this flag here. */ clear_vm_uninitialized_flag(area); /* * A ref_count = 2 is needed because vm_struct allocated in * __get_vm_area_node() contains a reference to the virtual address of * the vmalloc'ed block. */ kmemleak_alloc(addr, real_size, 2, gfp_mask); return addr; fail: warn_alloc_failed(gfp_mask, 0, "vmalloc: allocation failure: %lu bytes\n", real_size); return NULL; } /** * __vmalloc_node - allocate virtually contiguous memory * @size: allocation size * @align: desired alignment * @gfp_mask: flags for the page level allocator * @prot: protection mask for the allocated pages * @node: node to use for allocation or NUMA_NO_NODE * @caller: caller's return address * * Allocate enough pages to cover @size from the page level * allocator with @gfp_mask flags. Map them into contiguous * kernel virtual space, using a pagetable protection of @prot. */ static void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, pgprot_t prot, int node, const void *caller) { return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, gfp_mask, prot, node, caller); } void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) { return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE, __builtin_return_address(0)); } EXPORT_SYMBOL(__vmalloc); static inline void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags) { return __vmalloc_node(size, 1, flags, PAGE_KERNEL, node, __builtin_return_address(0)); } /** * vmalloc - allocate virtually contiguous memory * @size: allocation size * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. */ void *vmalloc(unsigned long size) { return __vmalloc_node_flags(size, NUMA_NO_NODE, GFP_KERNEL | __GFP_HIGHMEM); } EXPORT_SYMBOL(vmalloc); /** * vzalloc - allocate virtually contiguous memory with zero fill * @size: allocation size * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * The memory allocated is set to zero. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. */ void *vzalloc(unsigned long size) { return __vmalloc_node_flags(size, NUMA_NO_NODE, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); } EXPORT_SYMBOL(vzalloc); /** * vmalloc_user - allocate zeroed virtually contiguous memory for userspace * @size: allocation size * * The resulting memory area is zeroed so it can be mapped to userspace * without leaking data. */ void *vmalloc_user(unsigned long size) { struct vm_struct *area; void *ret; ret = __vmalloc_node(size, SHMLBA, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL, NUMA_NO_NODE, __builtin_return_address(0)); if (ret) { area = find_vm_area(ret); area->flags |= VM_USERMAP; } return ret; } EXPORT_SYMBOL(vmalloc_user); /** * vmalloc_node - allocate memory on a specific node * @size: allocation size * @node: numa node * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. */ void *vmalloc_node(unsigned long size, int node) { return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc_node); /** * vzalloc_node - allocate memory on a specific node with zero fill * @size: allocation size * @node: numa node * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * The memory allocated is set to zero. * * For tight control over page level allocator and protection flags * use __vmalloc_node() instead. */ void *vzalloc_node(unsigned long size, int node) { return __vmalloc_node_flags(size, node, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); } EXPORT_SYMBOL(vzalloc_node); #ifndef PAGE_KERNEL_EXEC # define PAGE_KERNEL_EXEC PAGE_KERNEL #endif /** * vmalloc_exec - allocate virtually contiguous, executable memory * @size: allocation size * * Kernel-internal function to allocate enough pages to cover @size * the page level allocator and map them into contiguous and * executable kernel virtual space. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. */ void *vmalloc_exec(unsigned long size) { return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, NUMA_NO_NODE, __builtin_return_address(0)); } #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL #else #define GFP_VMALLOC32 GFP_KERNEL #endif /** * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) * @size: allocation size * * Allocate enough 32bit PA addressable pages to cover @size from the * page level allocator and map them into contiguous kernel virtual space. */ void *vmalloc_32(unsigned long size) { return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, NUMA_NO_NODE, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc_32); /** * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory * @size: allocation size * * The resulting memory area is 32bit addressable and zeroed so it can be * mapped to userspace without leaking data. */ void *vmalloc_32_user(unsigned long size) { struct vm_struct *area; void *ret; ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, NUMA_NO_NODE, __builtin_return_address(0)); if (ret) { area = find_vm_area(ret); area->flags |= VM_USERMAP; } return ret; } EXPORT_SYMBOL(vmalloc_32_user); /* * small helper routine , copy contents to buf from addr. * If the page is not present, fill zero. */ static int aligned_vread(char *buf, char *addr, unsigned long count) { struct page *p; int copied = 0; while (count) { unsigned long offset, length; offset = (unsigned long)addr & ~PAGE_MASK; length = PAGE_SIZE - offset; if (length > count) length = count; p = vmalloc_to_page(addr); /* * To do safe access to this _mapped_ area, we need * lock. But adding lock here means that we need to add * overhead of vmalloc()/vfree() calles for this _debug_ * interface, rarely used. Instead of that, we'll use * kmap() and get small overhead in this access function. */ if (p) { /* * we can expect USER0 is not used (see vread/vwrite's * function description) */ void *map = kmap_atomic(p); memcpy(buf, map + offset, length); kunmap_atomic(map); } else memset(buf, 0, length); addr += length; buf += length; copied += length; count -= length; } return copied; } static int aligned_vwrite(char *buf, char *addr, unsigned long count) { struct page *p; int copied = 0; while (count) { unsigned long offset, length; offset = (unsigned long)addr & ~PAGE_MASK; length = PAGE_SIZE - offset; if (length > count) length = count; p = vmalloc_to_page(addr); /* * To do safe access to this _mapped_ area, we need * lock. But adding lock here means that we need to add * overhead of vmalloc()/vfree() calles for this _debug_ * interface, rarely used. Instead of that, we'll use * kmap() and get small overhead in this access function. */ if (p) { /* * we can expect USER0 is not used (see vread/vwrite's * function description) */ void *map = kmap_atomic(p); memcpy(map + offset, buf, length); kunmap_atomic(map); } addr += length; buf += length; copied += length; count -= length; } return copied; } /** * vread() - read vmalloc area in a safe way. * @buf: buffer for reading data * @addr: vm address. * @count: number of bytes to be read. * * Returns # of bytes which addr and buf should be increased. * (same number to @count). Returns 0 if [addr...addr+count) doesn't * includes any intersect with alive vmalloc area. * * This function checks that addr is a valid vmalloc'ed area, and * copy data from that area to a given buffer. If the given memory range * of [addr...addr+count) includes some valid address, data is copied to * proper area of @buf. If there are memory holes, they'll be zero-filled. * IOREMAP area is treated as memory hole and no copy is done. * * If [addr...addr+count) doesn't includes any intersects with alive * vm_struct area, returns 0. @buf should be kernel's buffer. * * Note: In usual ops, vread() is never necessary because the caller * should know vmalloc() area is valid and can use memcpy(). * This is for routines which have to access vmalloc area without * any informaion, as /dev/kmem. * */ long vread(char *buf, char *addr, unsigned long count) { struct vmap_area *va; struct vm_struct *vm; char *vaddr, *buf_start = buf; unsigned long buflen = count; unsigned long n; /* Don't allow overflow */ if ((unsigned long) addr + count < count) count = -(unsigned long) addr; spin_lock(&vmap_area_lock); list_for_each_entry(va, &vmap_area_list, list) { if (!count) break; if (!(va->flags & VM_VM_AREA)) continue; vm = va->vm; vaddr = (char *) vm->addr; if (addr >= vaddr + get_vm_area_size(vm)) continue; while (addr < vaddr) { if (count == 0) goto finished; *buf = '\0'; buf++; addr++; count--; } n = vaddr + get_vm_area_size(vm) - addr; if (n > count) n = count; if (!(vm->flags & VM_IOREMAP)) aligned_vread(buf, addr, n); else /* IOREMAP area is treated as memory hole */ memset(buf, 0, n); buf += n; addr += n; count -= n; } finished: spin_unlock(&vmap_area_lock); if (buf == buf_start) return 0; /* zero-fill memory holes */ if (buf != buf_start + buflen) memset(buf, 0, buflen - (buf - buf_start)); return buflen; } /** * vwrite() - write vmalloc area in a safe way. * @buf: buffer for source data * @addr: vm address. * @count: number of bytes to be read. * * Returns # of bytes which addr and buf should be incresed. * (same number to @count). * If [addr...addr+count) doesn't includes any intersect with valid * vmalloc area, returns 0. * * This function checks that addr is a valid vmalloc'ed area, and * copy data from a buffer to the given addr. If specified range of * [addr...addr+count) includes some valid address, data is copied from * proper area of @buf. If there are memory holes, no copy to hole. * IOREMAP area is treated as memory hole and no copy is done. * * If [addr...addr+count) doesn't includes any intersects with alive * vm_struct area, returns 0. @buf should be kernel's buffer. * * Note: In usual ops, vwrite() is never necessary because the caller * should know vmalloc() area is valid and can use memcpy(). * This is for routines which have to access vmalloc area without * any informaion, as /dev/kmem. */ long vwrite(char *buf, char *addr, unsigned long count) { struct vmap_area *va; struct vm_struct *vm; char *vaddr; unsigned long n, buflen; int copied = 0; /* Don't allow overflow */ if ((unsigned long) addr + count < count) count = -(unsigned long) addr; buflen = count; spin_lock(&vmap_area_lock); list_for_each_entry(va, &vmap_area_list, list) { if (!count) break; if (!(va->flags & VM_VM_AREA)) continue; vm = va->vm; vaddr = (char *) vm->addr; if (addr >= vaddr + get_vm_area_size(vm)) continue; while (addr < vaddr) { if (count == 0) goto finished; buf++; addr++; count--; } n = vaddr + get_vm_area_size(vm) - addr; if (n > count) n = count; if (!(vm->flags & VM_IOREMAP)) { aligned_vwrite(buf, addr, n); copied++; } buf += n; addr += n; count -= n; } finished: spin_unlock(&vmap_area_lock); if (!copied) return 0; return buflen; } /** * remap_vmalloc_range_partial - map vmalloc pages to userspace * @vma: vma to cover * @uaddr: target user address to start at * @kaddr: virtual address of vmalloc kernel memory * @size: size of map area * * Returns: 0 for success, -Exxx on failure * * This function checks that @kaddr is a valid vmalloc'ed area, * and that it is big enough to cover the range starting at * @uaddr in @vma. Will return failure if that criteria isn't * met. * * Similar to remap_pfn_range() (see mm/memory.c) */ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, void *kaddr, unsigned long size) { struct vm_struct *area; size = PAGE_ALIGN(size); if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) return -EINVAL; area = find_vm_area(kaddr); if (!area) return -EINVAL; if (!(area->flags & VM_USERMAP)) return -EINVAL; if (kaddr + size > area->addr + area->size) return -EINVAL; do { struct page *page = vmalloc_to_page(kaddr); int ret; ret = vm_insert_page(vma, uaddr, page); if (ret) return ret; uaddr += PAGE_SIZE; kaddr += PAGE_SIZE; size -= PAGE_SIZE; } while (size > 0); vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; return 0; } EXPORT_SYMBOL(remap_vmalloc_range_partial); /** * remap_vmalloc_range - map vmalloc pages to userspace * @vma: vma to cover (map full range of vma) * @addr: vmalloc memory * @pgoff: number of pages into addr before first page to map * * Returns: 0 for success, -Exxx on failure * * This function checks that addr is a valid vmalloc'ed area, and * that it is big enough to cover the vma. Will return failure if * that criteria isn't met. * * Similar to remap_pfn_range() (see mm/memory.c) */ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff) { return remap_vmalloc_range_partial(vma, vma->vm_start, addr + (pgoff << PAGE_SHIFT), vma->vm_end - vma->vm_start); } EXPORT_SYMBOL(remap_vmalloc_range); /* * Implement a stub for vmalloc_sync_all() if the architecture chose not to * have one. */ void __attribute__((weak)) vmalloc_sync_all(void) { } static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) { pte_t ***p = data; if (p) { *(*p) = pte; (*p)++; } return 0; } /** * alloc_vm_area - allocate a range of kernel address space * @size: size of the area * @ptes: returns the PTEs for the address space * * Returns: NULL on failure, vm_struct on success * * This function reserves a range of kernel address space, and * allocates pagetables to map that range. No actual mappings * are created. * * If @ptes is non-NULL, pointers to the PTEs (in init_mm) * allocated for the VM area are returned. */ struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) { struct vm_struct *area; area = get_vm_area_caller(size, VM_IOREMAP, __builtin_return_address(0)); if (area == NULL) return NULL; /* * This ensures that page tables are constructed for this region * of kernel virtual address space and mapped into init_mm. */ if (apply_to_page_range(&init_mm, (unsigned long)area->addr, size, f, ptes ? &ptes : NULL)) { free_vm_area(area); return NULL; } return area; } EXPORT_SYMBOL_GPL(alloc_vm_area); void free_vm_area(struct vm_struct *area) { struct vm_struct *ret; ret = remove_vm_area(area->addr); BUG_ON(ret != area); kfree(area); } EXPORT_SYMBOL_GPL(free_vm_area); #ifdef CONFIG_SMP static struct vmap_area *node_to_va(struct rb_node *n) { return n ? rb_entry(n, struct vmap_area, rb_node) : NULL; } /** * pvm_find_next_prev - find the next and prev vmap_area surrounding @end * @end: target address * @pnext: out arg for the next vmap_area * @pprev: out arg for the previous vmap_area * * Returns: %true if either or both of next and prev are found, * %false if no vmap_area exists * * Find vmap_areas end addresses of which enclose @end. ie. if not * NULL, *pnext->va_end > @end and *pprev->va_end <= @end. */ static bool pvm_find_next_prev(unsigned long end, struct vmap_area **pnext, struct vmap_area **pprev) { struct rb_node *n = vmap_area_root.rb_node; struct vmap_area *va = NULL; while (n) { va = rb_entry(n, struct vmap_area, rb_node); if (end < va->va_end) n = n->rb_left; else if (end > va->va_end) n = n->rb_right; else break; } if (!va) return false; if (va->va_end > end) { *pnext = va; *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); } else { *pprev = va; *pnext = node_to_va(rb_next(&(*pprev)->rb_node)); } return true; } /** * pvm_determine_end - find the highest aligned address between two vmap_areas * @pnext: in/out arg for the next vmap_area * @pprev: in/out arg for the previous vmap_area * @align: alignment * * Returns: determined end address * * Find the highest aligned address between *@pnext and *@pprev below * VMALLOC_END. *@pnext and *@pprev are adjusted so that the aligned * down address is between the end addresses of the two vmap_areas. * * Please note that the address returned by this function may fall * inside *@pnext vmap_area. The caller is responsible for checking * that. */ static unsigned long pvm_determine_end(struct vmap_area **pnext, struct vmap_area **pprev, unsigned long align) { const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); unsigned long addr; if (*pnext) addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end); else addr = vmalloc_end; while (*pprev && (*pprev)->va_end > addr) { *pnext = *pprev; *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); } return addr; } /** * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator * @offsets: array containing offset of each area * @sizes: array containing size of each area * @nr_vms: the number of areas to allocate * @align: alignment, all entries in @offsets and @sizes must be aligned to this * * Returns: kmalloc'd vm_struct pointer array pointing to allocated * vm_structs on success, %NULL on failure * * Percpu allocator wants to use congruent vm areas so that it can * maintain the offsets among percpu areas. This function allocates * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to * be scattered pretty far, distance between two areas easily going up * to gigabytes. To avoid interacting with regular vmallocs, these * areas are allocated from top. * * Despite its complicated look, this allocator is rather simple. It * does everything top-down and scans areas from the end looking for * matching slot. While scanning, if any of the areas overlaps with * existing vmap_area, the base address is pulled down to fit the * area. Scanning is repeated till all the areas fit and then all * necessary data structres are inserted and the result is returned. */ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, const size_t *sizes, int nr_vms, size_t align) { const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); struct vmap_area **vas, *prev, *next; struct vm_struct **vms; int area, area2, last_area, term_area; unsigned long base, start, end, last_end; bool purged = false; /* verify parameters and allocate data structures */ BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align)); for (last_area = 0, area = 0; area < nr_vms; area++) { start = offsets[area]; end = start + sizes[area]; /* is everything aligned properly? */ BUG_ON(!IS_ALIGNED(offsets[area], align)); BUG_ON(!IS_ALIGNED(sizes[area], align)); /* detect the area with the highest address */ if (start > offsets[last_area]) last_area = area; for (area2 = 0; area2 < nr_vms; area2++) { unsigned long start2 = offsets[area2]; unsigned long end2 = start2 + sizes[area2]; if (area2 == area) continue; BUG_ON(start2 >= start && start2 < end); BUG_ON(end2 <= end && end2 > start); } } last_end = offsets[last_area] + sizes[last_area]; if (vmalloc_end - vmalloc_start < last_end) { WARN_ON(true); return NULL; } vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); if (!vas || !vms) goto err_free2; for (area = 0; area < nr_vms; area++) { vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL); vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); if (!vas[area] || !vms[area]) goto err_free; } retry: spin_lock(&vmap_area_lock); /* start scanning - we scan from the top, begin with the last area */ area = term_area = last_area; start = offsets[area]; end = start + sizes[area]; if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) { base = vmalloc_end - last_end; goto found; } base = pvm_determine_end(&next, &prev, align) - end; while (true) { BUG_ON(next && next->va_end <= base + end); BUG_ON(prev && prev->va_end > base + end); /* * base might have underflowed, add last_end before * comparing. */ if (base + last_end < vmalloc_start + last_end) { spin_unlock(&vmap_area_lock); if (!purged) { purge_vmap_area_lazy(); purged = true; goto retry; } goto err_free; } /* * If next overlaps, move base downwards so that it's * right below next and then recheck. */ if (next && next->va_start < base + end) { base = pvm_determine_end(&next, &prev, align) - end; term_area = area; continue; } /* * If prev overlaps, shift down next and prev and move * base so that it's right below new next and then * recheck. */ if (prev && prev->va_end > base + start) { next = prev; prev = node_to_va(rb_prev(&next->rb_node)); base = pvm_determine_end(&next, &prev, align) - end; term_area = area; continue; } /* * This area fits, move on to the previous one. If * the previous one is the terminal one, we're done. */ area = (area + nr_vms - 1) % nr_vms; if (area == term_area) break; start = offsets[area]; end = start + sizes[area]; pvm_find_next_prev(base + end, &next, &prev); } found: /* we've found a fitting base, insert all va's */ for (area = 0; area < nr_vms; area++) { struct vmap_area *va = vas[area]; va->va_start = base + offsets[area]; va->va_end = va->va_start + sizes[area]; __insert_vmap_area(va); } vmap_area_pcpu_hole = base + offsets[last_area]; spin_unlock(&vmap_area_lock); /* insert all vm's */ for (area = 0; area < nr_vms; area++) setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC, pcpu_get_vm_areas); kfree(vas); return vms; err_free: for (area = 0; area < nr_vms; area++) { kfree(vas[area]); kfree(vms[area]); } err_free2: kfree(vas); kfree(vms); return NULL; } /** * pcpu_free_vm_areas - free vmalloc areas for percpu allocator * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() * @nr_vms: the number of allocated areas * * Free vm_structs and the array allocated by pcpu_get_vm_areas(). */ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) { int i; for (i = 0; i < nr_vms; i++) free_vm_area(vms[i]); kfree(vms); } #endif /* CONFIG_SMP */ #ifdef CONFIG_PROC_FS static void *s_start(struct seq_file *m, loff_t *pos) __acquires(&vmap_area_lock) { loff_t n = *pos; struct vmap_area *va; spin_lock(&vmap_area_lock); va = list_entry((&vmap_area_list)->next, typeof(*va), list); while (n > 0 && &va->list != &vmap_area_list) { n--; va = list_entry(va->list.next, typeof(*va), list); } if (!n && &va->list != &vmap_area_list) return va; return NULL; } static void *s_next(struct seq_file *m, void *p, loff_t *pos) { struct vmap_area *va = p, *next; ++*pos; next = list_entry(va->list.next, typeof(*va), list); if (&next->list != &vmap_area_list) return next; return NULL; } static void s_stop(struct seq_file *m, void *p) __releases(&vmap_area_lock) { spin_unlock(&vmap_area_lock); } static void show_numa_info(struct seq_file *m, struct vm_struct *v) { if (IS_ENABLED(CONFIG_NUMA)) { unsigned int nr, *counters = m->private; if (!counters) return; /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ smp_rmb(); if (v->flags & VM_UNINITIALIZED) return; memset(counters, 0, nr_node_ids * sizeof(unsigned int)); for (nr = 0; nr < v->nr_pages; nr++) counters[page_to_nid(v->pages[nr])]++; for_each_node_state(nr, N_HIGH_MEMORY) if (counters[nr]) seq_printf(m, " N%u=%u", nr, counters[nr]); } } static int s_show(struct seq_file *m, void *p) { struct vmap_area *va = p; struct vm_struct *v; /* * s_show can encounter race with remove_vm_area, !VM_VM_AREA on * behalf of vmap area is being tear down or vm_map_ram allocation. */ if (!(va->flags & VM_VM_AREA)) return 0; v = va->vm; seq_printf(m, "0x%pK-0x%pK %7ld", v->addr, v->addr + v->size, v->size); if (v->caller) seq_printf(m, " %pS", v->caller); if (v->nr_pages) seq_printf(m, " pages=%d", v->nr_pages); if (v->phys_addr) seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr); if (v->flags & VM_IOREMAP) seq_printf(m, " ioremap"); if (v->flags & VM_ALLOC) seq_printf(m, " vmalloc"); if (v->flags & VM_MAP) seq_printf(m, " vmap"); if (v->flags & VM_USERMAP) seq_printf(m, " user"); if (v->flags & VM_VPAGES) seq_printf(m, " vpages"); show_numa_info(m, v); seq_putc(m, '\n'); return 0; } static const struct seq_operations vmalloc_op = { .start = s_start, .next = s_next, .stop = s_stop, .show = s_show, }; static int vmalloc_open(struct inode *inode, struct file *file) { unsigned int *ptr = NULL; int ret; if (IS_ENABLED(CONFIG_NUMA)) { ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL); if (ptr == NULL) return -ENOMEM; } ret = seq_open(file, &vmalloc_op); if (!ret) { struct seq_file *m = file->private_data; m->private = ptr; } else kfree(ptr); return ret; } static const struct file_operations proc_vmalloc_operations = { .open = vmalloc_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; static int __init proc_vmalloc_init(void) { proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations); return 0; } module_init(proc_vmalloc_init); void get_vmalloc_info(struct vmalloc_info *vmi) { struct vmap_area *va; unsigned long free_area_size; unsigned long prev_end; vmi->used = 0; vmi->largest_chunk = 0; prev_end = VMALLOC_START; rcu_read_lock(); if (list_empty(&vmap_area_list)) { vmi->largest_chunk = VMALLOC_TOTAL; goto out; } list_for_each_entry_rcu(va, &vmap_area_list, list) { unsigned long addr = va->va_start; /* * Some archs keep another range for modules in vmalloc space */ if (addr < VMALLOC_START) continue; if (addr >= VMALLOC_END) break; if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING)) continue; vmi->used += (va->va_end - va->va_start); free_area_size = addr - prev_end; if (vmi->largest_chunk < free_area_size) vmi->largest_chunk = free_area_size; prev_end = va->va_end; } if (VMALLOC_END - prev_end > vmi->largest_chunk) vmi->largest_chunk = VMALLOC_END - prev_end; out: rcu_read_unlock(); } #endif
gpl-2.0
drod2169/Linux-3.11.x
drivers/md/dm-cache-target.c
62
65975
/* * Copyright (C) 2012 Red Hat. All rights reserved. * * This file is released under the GPL. */ #include "dm.h" #include "dm-bio-prison.h" #include "dm-bio-record.h" #include "dm-cache-metadata.h" #include <linux/dm-io.h> #include <linux/dm-kcopyd.h> #include <linux/init.h> #include <linux/mempool.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/vmalloc.h> #define DM_MSG_PREFIX "cache" DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle, "A percentage of time allocated for copying to and/or from cache"); /*----------------------------------------------------------------*/ /* * Glossary: * * oblock: index of an origin block * cblock: index of a cache block * promotion: movement of a block from origin to cache * demotion: movement of a block from cache to origin * migration: movement of a block between the origin and cache device, * either direction */ /*----------------------------------------------------------------*/ static size_t bitset_size_in_bytes(unsigned nr_entries) { return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG); } static unsigned long *alloc_bitset(unsigned nr_entries) { size_t s = bitset_size_in_bytes(nr_entries); return vzalloc(s); } static void clear_bitset(void *bitset, unsigned nr_entries) { size_t s = bitset_size_in_bytes(nr_entries); memset(bitset, 0, s); } static void free_bitset(unsigned long *bits) { vfree(bits); } /*----------------------------------------------------------------*/ #define PRISON_CELLS 1024 #define MIGRATION_POOL_SIZE 128 #define COMMIT_PERIOD HZ #define MIGRATION_COUNT_WINDOW 10 /* * The block size of the device holding cache data must be >= 32KB */ #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT) /* * FIXME: the cache is read/write for the time being. */ enum cache_mode { CM_WRITE, /* metadata may be changed */ CM_READ_ONLY, /* metadata may not be changed */ }; struct cache_features { enum cache_mode mode; bool write_through:1; }; struct cache_stats { atomic_t read_hit; atomic_t read_miss; atomic_t write_hit; atomic_t write_miss; atomic_t demotion; atomic_t promotion; atomic_t copies_avoided; atomic_t cache_cell_clash; atomic_t commit_count; atomic_t discard_count; }; struct cache { struct dm_target *ti; struct dm_target_callbacks callbacks; /* * Metadata is written to this device. */ struct dm_dev *metadata_dev; /* * The slower of the two data devices. Typically a spindle. */ struct dm_dev *origin_dev; /* * The faster of the two data devices. Typically an SSD. */ struct dm_dev *cache_dev; /* * Cache features such as write-through. */ struct cache_features features; /* * Size of the origin device in _complete_ blocks and native sectors. */ dm_oblock_t origin_blocks; sector_t origin_sectors; /* * Size of the cache device in blocks. */ dm_cblock_t cache_size; /* * Fields for converting from sectors to blocks. */ uint32_t sectors_per_block; int sectors_per_block_shift; struct dm_cache_metadata *cmd; spinlock_t lock; struct bio_list deferred_bios; struct bio_list deferred_flush_bios; struct bio_list deferred_writethrough_bios; struct list_head quiesced_migrations; struct list_head completed_migrations; struct list_head need_commit_migrations; sector_t migration_threshold; atomic_t nr_migrations; wait_queue_head_t migration_wait; /* * cache_size entries, dirty if set */ dm_cblock_t nr_dirty; unsigned long *dirty_bitset; /* * origin_blocks entries, discarded if set. */ uint32_t discard_block_size; /* a power of 2 times sectors per block */ dm_dblock_t discard_nr_blocks; unsigned long *discard_bitset; struct dm_kcopyd_client *copier; struct workqueue_struct *wq; struct work_struct worker; struct delayed_work waker; unsigned long last_commit_jiffies; struct dm_bio_prison *prison; struct dm_deferred_set *all_io_ds; mempool_t *migration_pool; struct dm_cache_migration *next_migration; struct dm_cache_policy *policy; unsigned policy_nr_args; bool need_tick_bio:1; bool sized:1; bool quiescing:1; bool commit_requested:1; bool loaded_mappings:1; bool loaded_discards:1; struct cache_stats stats; /* * Rather than reconstructing the table line for the status we just * save it and regurgitate. */ unsigned nr_ctr_args; const char **ctr_args; }; struct per_bio_data { bool tick:1; unsigned req_nr:2; struct dm_deferred_entry *all_io_entry; /* * writethrough fields. These MUST remain at the end of this * structure and the 'cache' member must be the first as it * is used to determine the offset of the writethrough fields. */ struct cache *cache; dm_cblock_t cblock; bio_end_io_t *saved_bi_end_io; struct dm_bio_details bio_details; }; struct dm_cache_migration { struct list_head list; struct cache *cache; unsigned long start_jiffies; dm_oblock_t old_oblock; dm_oblock_t new_oblock; dm_cblock_t cblock; bool err:1; bool writeback:1; bool demote:1; bool promote:1; struct dm_bio_prison_cell *old_ocell; struct dm_bio_prison_cell *new_ocell; }; /* * Processing a bio in the worker thread may require these memory * allocations. We prealloc to avoid deadlocks (the same worker thread * frees them back to the mempool). */ struct prealloc { struct dm_cache_migration *mg; struct dm_bio_prison_cell *cell1; struct dm_bio_prison_cell *cell2; }; static void wake_worker(struct cache *cache) { queue_work(cache->wq, &cache->worker); } /*----------------------------------------------------------------*/ static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache) { /* FIXME: change to use a local slab. */ return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT); } static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell) { dm_bio_prison_free_cell(cache->prison, cell); } static int prealloc_data_structs(struct cache *cache, struct prealloc *p) { if (!p->mg) { p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT); if (!p->mg) return -ENOMEM; } if (!p->cell1) { p->cell1 = alloc_prison_cell(cache); if (!p->cell1) return -ENOMEM; } if (!p->cell2) { p->cell2 = alloc_prison_cell(cache); if (!p->cell2) return -ENOMEM; } return 0; } static void prealloc_free_structs(struct cache *cache, struct prealloc *p) { if (p->cell2) free_prison_cell(cache, p->cell2); if (p->cell1) free_prison_cell(cache, p->cell1); if (p->mg) mempool_free(p->mg, cache->migration_pool); } static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p) { struct dm_cache_migration *mg = p->mg; BUG_ON(!mg); p->mg = NULL; return mg; } /* * You must have a cell within the prealloc struct to return. If not this * function will BUG() rather than returning NULL. */ static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p) { struct dm_bio_prison_cell *r = NULL; if (p->cell1) { r = p->cell1; p->cell1 = NULL; } else if (p->cell2) { r = p->cell2; p->cell2 = NULL; } else BUG(); return r; } /* * You can't have more than two cells in a prealloc struct. BUG() will be * called if you try and overfill. */ static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell) { if (!p->cell2) p->cell2 = cell; else if (!p->cell1) p->cell1 = cell; else BUG(); } /*----------------------------------------------------------------*/ static void build_key(dm_oblock_t oblock, struct dm_cell_key *key) { key->virtual = 0; key->dev = 0; key->block = from_oblock(oblock); } /* * The caller hands in a preallocated cell, and a free function for it. * The cell will be freed if there's an error, or if it wasn't used because * a cell with that key already exists. */ typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell); static int bio_detain(struct cache *cache, dm_oblock_t oblock, struct bio *bio, struct dm_bio_prison_cell *cell_prealloc, cell_free_fn free_fn, void *free_context, struct dm_bio_prison_cell **cell_result) { int r; struct dm_cell_key key; build_key(oblock, &key); r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result); if (r) free_fn(free_context, cell_prealloc); return r; } static int get_cell(struct cache *cache, dm_oblock_t oblock, struct prealloc *structs, struct dm_bio_prison_cell **cell_result) { int r; struct dm_cell_key key; struct dm_bio_prison_cell *cell_prealloc; cell_prealloc = prealloc_get_cell(structs); build_key(oblock, &key); r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result); if (r) prealloc_put_cell(structs, cell_prealloc); return r; } /*----------------------------------------------------------------*/ static bool is_dirty(struct cache *cache, dm_cblock_t b) { return test_bit(from_cblock(b), cache->dirty_bitset); } static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock) { if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) { cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1); policy_set_dirty(cache->policy, oblock); } } static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock) { if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) { policy_clear_dirty(cache->policy, oblock); cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1); if (!from_cblock(cache->nr_dirty)) dm_table_event(cache->ti->table); } } /*----------------------------------------------------------------*/ static bool block_size_is_power_of_two(struct cache *cache) { return cache->sectors_per_block_shift >= 0; } /* gcc on ARM generates spurious references to __udivdi3 and __umoddi3 */ #if defined(CONFIG_ARM) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 __always_inline #endif static dm_block_t block_div(dm_block_t b, uint32_t n) { do_div(b, n); return b; } static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) { uint32_t discard_blocks = cache->discard_block_size; dm_block_t b = from_oblock(oblock); if (!block_size_is_power_of_two(cache)) discard_blocks = discard_blocks / cache->sectors_per_block; else discard_blocks >>= cache->sectors_per_block_shift; b = block_div(b, discard_blocks); return to_dblock(b); } static void set_discard(struct cache *cache, dm_dblock_t b) { unsigned long flags; atomic_inc(&cache->stats.discard_count); spin_lock_irqsave(&cache->lock, flags); set_bit(from_dblock(b), cache->discard_bitset); spin_unlock_irqrestore(&cache->lock, flags); } static void clear_discard(struct cache *cache, dm_dblock_t b) { unsigned long flags; spin_lock_irqsave(&cache->lock, flags); clear_bit(from_dblock(b), cache->discard_bitset); spin_unlock_irqrestore(&cache->lock, flags); } static bool is_discarded(struct cache *cache, dm_dblock_t b) { int r; unsigned long flags; spin_lock_irqsave(&cache->lock, flags); r = test_bit(from_dblock(b), cache->discard_bitset); spin_unlock_irqrestore(&cache->lock, flags); return r; } static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b) { int r; unsigned long flags; spin_lock_irqsave(&cache->lock, flags); r = test_bit(from_dblock(oblock_to_dblock(cache, b)), cache->discard_bitset); spin_unlock_irqrestore(&cache->lock, flags); return r; } /*----------------------------------------------------------------*/ static void load_stats(struct cache *cache) { struct dm_cache_statistics stats; dm_cache_metadata_get_stats(cache->cmd, &stats); atomic_set(&cache->stats.read_hit, stats.read_hits); atomic_set(&cache->stats.read_miss, stats.read_misses); atomic_set(&cache->stats.write_hit, stats.write_hits); atomic_set(&cache->stats.write_miss, stats.write_misses); } static void save_stats(struct cache *cache) { struct dm_cache_statistics stats; stats.read_hits = atomic_read(&cache->stats.read_hit); stats.read_misses = atomic_read(&cache->stats.read_miss); stats.write_hits = atomic_read(&cache->stats.write_hit); stats.write_misses = atomic_read(&cache->stats.write_miss); dm_cache_metadata_set_stats(cache->cmd, &stats); } /*---------------------------------------------------------------- * Per bio data *--------------------------------------------------------------*/ /* * If using writeback, leave out struct per_bio_data's writethrough fields. */ #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache)) #define PB_DATA_SIZE_WT (sizeof(struct per_bio_data)) static size_t get_per_bio_data_size(struct cache *cache) { return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB; } static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size) { struct per_bio_data *pb = dm_per_bio_data(bio, data_size); BUG_ON(!pb); return pb; } static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size) { struct per_bio_data *pb = get_per_bio_data(bio, data_size); pb->tick = false; pb->req_nr = dm_bio_get_target_bio_nr(bio); pb->all_io_entry = NULL; return pb; } /*---------------------------------------------------------------- * Remapping *--------------------------------------------------------------*/ static void remap_to_origin(struct cache *cache, struct bio *bio) { bio->bi_bdev = cache->origin_dev->bdev; } static void remap_to_cache(struct cache *cache, struct bio *bio, dm_cblock_t cblock) { sector_t bi_sector = bio->bi_sector; bio->bi_bdev = cache->cache_dev->bdev; if (!block_size_is_power_of_two(cache)) bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) + sector_div(bi_sector, cache->sectors_per_block); else bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) | (bi_sector & (cache->sectors_per_block - 1)); } static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) { unsigned long flags; size_t pb_data_size = get_per_bio_data_size(cache); struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); spin_lock_irqsave(&cache->lock, flags); if (cache->need_tick_bio && !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) { pb->tick = true; cache->need_tick_bio = false; } spin_unlock_irqrestore(&cache->lock, flags); } static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, dm_oblock_t oblock) { check_if_tick_bio_needed(cache, bio); remap_to_origin(cache, bio); if (bio_data_dir(bio) == WRITE) clear_discard(cache, oblock_to_dblock(cache, oblock)); } static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, dm_oblock_t oblock, dm_cblock_t cblock) { remap_to_cache(cache, bio, cblock); if (bio_data_dir(bio) == WRITE) { set_dirty(cache, oblock, cblock); clear_discard(cache, oblock_to_dblock(cache, oblock)); } } static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) { sector_t block_nr = bio->bi_sector; if (!block_size_is_power_of_two(cache)) (void) sector_div(block_nr, cache->sectors_per_block); else block_nr >>= cache->sectors_per_block_shift; return to_oblock(block_nr); } static int bio_triggers_commit(struct cache *cache, struct bio *bio) { return bio->bi_rw & (REQ_FLUSH | REQ_FUA); } static void issue(struct cache *cache, struct bio *bio) { unsigned long flags; if (!bio_triggers_commit(cache, bio)) { generic_make_request(bio); return; } /* * Batch together any bios that trigger commits and then issue a * single commit for them in do_worker(). */ spin_lock_irqsave(&cache->lock, flags); cache->commit_requested = true; bio_list_add(&cache->deferred_flush_bios, bio); spin_unlock_irqrestore(&cache->lock, flags); } static void defer_writethrough_bio(struct cache *cache, struct bio *bio) { unsigned long flags; spin_lock_irqsave(&cache->lock, flags); bio_list_add(&cache->deferred_writethrough_bios, bio); spin_unlock_irqrestore(&cache->lock, flags); wake_worker(cache); } static void writethrough_endio(struct bio *bio, int err) { struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); bio->bi_end_io = pb->saved_bi_end_io; if (err) { bio_endio(bio, err); return; } dm_bio_restore(&pb->bio_details, bio); remap_to_cache(pb->cache, bio, pb->cblock); /* * We can't issue this bio directly, since we're in interrupt * context. So it gets put on a bio list for processing by the * worker thread. */ defer_writethrough_bio(pb->cache, bio); } /* * When running in writethrough mode we need to send writes to clean blocks * to both the cache and origin devices. In future we'd like to clone the * bio and send them in parallel, but for now we're doing them in * series as this is easier. */ static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, dm_oblock_t oblock, dm_cblock_t cblock) { struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); pb->cache = cache; pb->cblock = cblock; pb->saved_bi_end_io = bio->bi_end_io; dm_bio_record(&pb->bio_details, bio); bio->bi_end_io = writethrough_endio; remap_to_origin_clear_discard(pb->cache, bio, oblock); } /*---------------------------------------------------------------- * Migration processing * * Migration covers moving data from the origin device to the cache, or * vice versa. *--------------------------------------------------------------*/ static void free_migration(struct dm_cache_migration *mg) { mempool_free(mg, mg->cache->migration_pool); } static void inc_nr_migrations(struct cache *cache) { atomic_inc(&cache->nr_migrations); } static void dec_nr_migrations(struct cache *cache) { atomic_dec(&cache->nr_migrations); /* * Wake the worker in case we're suspending the target. */ wake_up(&cache->migration_wait); } static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, bool holder) { (holder ? dm_cell_release : dm_cell_release_no_holder) (cache->prison, cell, &cache->deferred_bios); free_prison_cell(cache, cell); } static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, bool holder) { unsigned long flags; spin_lock_irqsave(&cache->lock, flags); __cell_defer(cache, cell, holder); spin_unlock_irqrestore(&cache->lock, flags); wake_worker(cache); } static void cleanup_migration(struct dm_cache_migration *mg) { dec_nr_migrations(mg->cache); free_migration(mg); } static void migration_failure(struct dm_cache_migration *mg) { struct cache *cache = mg->cache; if (mg->writeback) { DMWARN_LIMIT("writeback failed; couldn't copy block"); set_dirty(cache, mg->old_oblock, mg->cblock); cell_defer(cache, mg->old_ocell, false); } else if (mg->demote) { DMWARN_LIMIT("demotion failed; couldn't copy block"); policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock); cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1); if (mg->promote) cell_defer(cache, mg->new_ocell, 1); } else { DMWARN_LIMIT("promotion failed; couldn't copy block"); policy_remove_mapping(cache->policy, mg->new_oblock); cell_defer(cache, mg->new_ocell, 1); } cleanup_migration(mg); } static void migration_success_pre_commit(struct dm_cache_migration *mg) { unsigned long flags; struct cache *cache = mg->cache; if (mg->writeback) { cell_defer(cache, mg->old_ocell, false); clear_dirty(cache, mg->old_oblock, mg->cblock); cleanup_migration(mg); return; } else if (mg->demote) { if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) { DMWARN_LIMIT("demotion failed; couldn't update on disk metadata"); policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock); if (mg->promote) cell_defer(cache, mg->new_ocell, true); cleanup_migration(mg); return; } } else { if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) { DMWARN_LIMIT("promotion failed; couldn't update on disk metadata"); policy_remove_mapping(cache->policy, mg->new_oblock); cleanup_migration(mg); return; } } spin_lock_irqsave(&cache->lock, flags); list_add_tail(&mg->list, &cache->need_commit_migrations); cache->commit_requested = true; spin_unlock_irqrestore(&cache->lock, flags); } static void migration_success_post_commit(struct dm_cache_migration *mg) { unsigned long flags; struct cache *cache = mg->cache; if (mg->writeback) { DMWARN("writeback unexpectedly triggered commit"); return; } else if (mg->demote) { cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1); if (mg->promote) { mg->demote = false; spin_lock_irqsave(&cache->lock, flags); list_add_tail(&mg->list, &cache->quiesced_migrations); spin_unlock_irqrestore(&cache->lock, flags); } else cleanup_migration(mg); } else { cell_defer(cache, mg->new_ocell, true); clear_dirty(cache, mg->new_oblock, mg->cblock); cleanup_migration(mg); } } static void copy_complete(int read_err, unsigned long write_err, void *context) { unsigned long flags; struct dm_cache_migration *mg = (struct dm_cache_migration *) context; struct cache *cache = mg->cache; if (read_err || write_err) mg->err = true; spin_lock_irqsave(&cache->lock, flags); list_add_tail(&mg->list, &cache->completed_migrations); spin_unlock_irqrestore(&cache->lock, flags); wake_worker(cache); } static void issue_copy_real(struct dm_cache_migration *mg) { int r; struct dm_io_region o_region, c_region; struct cache *cache = mg->cache; o_region.bdev = cache->origin_dev->bdev; o_region.count = cache->sectors_per_block; c_region.bdev = cache->cache_dev->bdev; c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block; c_region.count = cache->sectors_per_block; if (mg->writeback || mg->demote) { /* demote */ o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block; r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg); } else { /* promote */ o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block; r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg); } if (r < 0) migration_failure(mg); } static void avoid_copy(struct dm_cache_migration *mg) { atomic_inc(&mg->cache->stats.copies_avoided); migration_success_pre_commit(mg); } static void issue_copy(struct dm_cache_migration *mg) { bool avoid; struct cache *cache = mg->cache; if (mg->writeback || mg->demote) avoid = !is_dirty(cache, mg->cblock) || is_discarded_oblock(cache, mg->old_oblock); else avoid = is_discarded_oblock(cache, mg->new_oblock); avoid ? avoid_copy(mg) : issue_copy_real(mg); } static void complete_migration(struct dm_cache_migration *mg) { if (mg->err) migration_failure(mg); else migration_success_pre_commit(mg); } static void process_migrations(struct cache *cache, struct list_head *head, void (*fn)(struct dm_cache_migration *)) { unsigned long flags; struct list_head list; struct dm_cache_migration *mg, *tmp; INIT_LIST_HEAD(&list); spin_lock_irqsave(&cache->lock, flags); list_splice_init(head, &list); spin_unlock_irqrestore(&cache->lock, flags); list_for_each_entry_safe(mg, tmp, &list, list) fn(mg); } static void __queue_quiesced_migration(struct dm_cache_migration *mg) { list_add_tail(&mg->list, &mg->cache->quiesced_migrations); } static void queue_quiesced_migration(struct dm_cache_migration *mg) { unsigned long flags; struct cache *cache = mg->cache; spin_lock_irqsave(&cache->lock, flags); __queue_quiesced_migration(mg); spin_unlock_irqrestore(&cache->lock, flags); wake_worker(cache); } static void queue_quiesced_migrations(struct cache *cache, struct list_head *work) { unsigned long flags; struct dm_cache_migration *mg, *tmp; spin_lock_irqsave(&cache->lock, flags); list_for_each_entry_safe(mg, tmp, work, list) __queue_quiesced_migration(mg); spin_unlock_irqrestore(&cache->lock, flags); wake_worker(cache); } static void check_for_quiesced_migrations(struct cache *cache, struct per_bio_data *pb) { struct list_head work; if (!pb->all_io_entry) return; INIT_LIST_HEAD(&work); if (pb->all_io_entry) dm_deferred_entry_dec(pb->all_io_entry, &work); if (!list_empty(&work)) queue_quiesced_migrations(cache, &work); } static void quiesce_migration(struct dm_cache_migration *mg) { if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list)) queue_quiesced_migration(mg); } static void promote(struct cache *cache, struct prealloc *structs, dm_oblock_t oblock, dm_cblock_t cblock, struct dm_bio_prison_cell *cell) { struct dm_cache_migration *mg = prealloc_get_migration(structs); mg->err = false; mg->writeback = false; mg->demote = false; mg->promote = true; mg->cache = cache; mg->new_oblock = oblock; mg->cblock = cblock; mg->old_ocell = NULL; mg->new_ocell = cell; mg->start_jiffies = jiffies; inc_nr_migrations(cache); quiesce_migration(mg); } static void writeback(struct cache *cache, struct prealloc *structs, dm_oblock_t oblock, dm_cblock_t cblock, struct dm_bio_prison_cell *cell) { struct dm_cache_migration *mg = prealloc_get_migration(structs); mg->err = false; mg->writeback = true; mg->demote = false; mg->promote = false; mg->cache = cache; mg->old_oblock = oblock; mg->cblock = cblock; mg->old_ocell = cell; mg->new_ocell = NULL; mg->start_jiffies = jiffies; inc_nr_migrations(cache); quiesce_migration(mg); } static void demote_then_promote(struct cache *cache, struct prealloc *structs, dm_oblock_t old_oblock, dm_oblock_t new_oblock, dm_cblock_t cblock, struct dm_bio_prison_cell *old_ocell, struct dm_bio_prison_cell *new_ocell) { struct dm_cache_migration *mg = prealloc_get_migration(structs); mg->err = false; mg->writeback = false; mg->demote = true; mg->promote = true; mg->cache = cache; mg->old_oblock = old_oblock; mg->new_oblock = new_oblock; mg->cblock = cblock; mg->old_ocell = old_ocell; mg->new_ocell = new_ocell; mg->start_jiffies = jiffies; inc_nr_migrations(cache); quiesce_migration(mg); } /*---------------------------------------------------------------- * bio processing *--------------------------------------------------------------*/ static void defer_bio(struct cache *cache, struct bio *bio) { unsigned long flags; spin_lock_irqsave(&cache->lock, flags); bio_list_add(&cache->deferred_bios, bio); spin_unlock_irqrestore(&cache->lock, flags); wake_worker(cache); } static void process_flush_bio(struct cache *cache, struct bio *bio) { size_t pb_data_size = get_per_bio_data_size(cache); struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); BUG_ON(bio->bi_size); if (!pb->req_nr) remap_to_origin(cache, bio); else remap_to_cache(cache, bio, 0); issue(cache, bio); } /* * People generally discard large parts of a device, eg, the whole device * when formatting. Splitting these large discards up into cache block * sized ios and then quiescing (always neccessary for discard) takes too * long. * * We keep it simple, and allow any size of discard to come in, and just * mark off blocks on the discard bitset. No passdown occurs! * * To implement passdown we need to change the bio_prison such that a cell * can have a key that spans many blocks. */ static void process_discard_bio(struct cache *cache, struct bio *bio) { dm_block_t start_block = dm_sector_div_up(bio->bi_sector, cache->discard_block_size); dm_block_t end_block = bio->bi_sector + bio_sectors(bio); dm_block_t b; end_block = block_div(end_block, cache->discard_block_size); for (b = start_block; b < end_block; b++) set_discard(cache, to_dblock(b)); bio_endio(bio, 0); } static bool spare_migration_bandwidth(struct cache *cache) { sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) * cache->sectors_per_block; return current_volume < cache->migration_threshold; } static bool is_writethrough_io(struct cache *cache, struct bio *bio, dm_cblock_t cblock) { return bio_data_dir(bio) == WRITE && cache->features.write_through && !is_dirty(cache, cblock); } static void inc_hit_counter(struct cache *cache, struct bio *bio) { atomic_inc(bio_data_dir(bio) == READ ? &cache->stats.read_hit : &cache->stats.write_hit); } static void inc_miss_counter(struct cache *cache, struct bio *bio) { atomic_inc(bio_data_dir(bio) == READ ? &cache->stats.read_miss : &cache->stats.write_miss); } static void process_bio(struct cache *cache, struct prealloc *structs, struct bio *bio) { int r; bool release_cell = true; dm_oblock_t block = get_bio_block(cache, bio); struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; struct policy_result lookup_result; size_t pb_data_size = get_per_bio_data_size(cache); struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); bool discarded_block = is_discarded_oblock(cache, block); bool can_migrate = discarded_block || spare_migration_bandwidth(cache); /* * Check to see if that block is currently migrating. */ cell_prealloc = prealloc_get_cell(structs); r = bio_detain(cache, block, bio, cell_prealloc, (cell_free_fn) prealloc_put_cell, structs, &new_ocell); if (r > 0) return; r = policy_map(cache->policy, block, true, can_migrate, discarded_block, bio, &lookup_result); if (r == -EWOULDBLOCK) /* migration has been denied */ lookup_result.op = POLICY_MISS; switch (lookup_result.op) { case POLICY_HIT: inc_hit_counter(cache, bio); pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); if (is_writethrough_io(cache, bio, lookup_result.cblock)) remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); else remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); issue(cache, bio); break; case POLICY_MISS: inc_miss_counter(cache, bio); pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); remap_to_origin_clear_discard(cache, bio, block); issue(cache, bio); break; case POLICY_NEW: atomic_inc(&cache->stats.promotion); promote(cache, structs, block, lookup_result.cblock, new_ocell); release_cell = false; break; case POLICY_REPLACE: cell_prealloc = prealloc_get_cell(structs); r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc, (cell_free_fn) prealloc_put_cell, structs, &old_ocell); if (r > 0) { /* * We have to be careful to avoid lock inversion of * the cells. So we back off, and wait for the * old_ocell to become free. */ policy_force_mapping(cache->policy, block, lookup_result.old_oblock); atomic_inc(&cache->stats.cache_cell_clash); break; } atomic_inc(&cache->stats.demotion); atomic_inc(&cache->stats.promotion); demote_then_promote(cache, structs, lookup_result.old_oblock, block, lookup_result.cblock, old_ocell, new_ocell); release_cell = false; break; default: DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__, (unsigned) lookup_result.op); bio_io_error(bio); } if (release_cell) cell_defer(cache, new_ocell, false); } static int need_commit_due_to_time(struct cache *cache) { return jiffies < cache->last_commit_jiffies || jiffies > cache->last_commit_jiffies + COMMIT_PERIOD; } static int commit_if_needed(struct cache *cache) { if (dm_cache_changed_this_transaction(cache->cmd) && (cache->commit_requested || need_commit_due_to_time(cache))) { atomic_inc(&cache->stats.commit_count); cache->last_commit_jiffies = jiffies; cache->commit_requested = false; return dm_cache_commit(cache->cmd, false); } return 0; } static void process_deferred_bios(struct cache *cache) { unsigned long flags; struct bio_list bios; struct bio *bio; struct prealloc structs; memset(&structs, 0, sizeof(structs)); bio_list_init(&bios); spin_lock_irqsave(&cache->lock, flags); bio_list_merge(&bios, &cache->deferred_bios); bio_list_init(&cache->deferred_bios); spin_unlock_irqrestore(&cache->lock, flags); while (!bio_list_empty(&bios)) { /* * If we've got no free migration structs, and processing * this bio might require one, we pause until there are some * prepared mappings to process. */ if (prealloc_data_structs(cache, &structs)) { spin_lock_irqsave(&cache->lock, flags); bio_list_merge(&cache->deferred_bios, &bios); spin_unlock_irqrestore(&cache->lock, flags); break; } bio = bio_list_pop(&bios); if (bio->bi_rw & REQ_FLUSH) process_flush_bio(cache, bio); else if (bio->bi_rw & REQ_DISCARD) process_discard_bio(cache, bio); else process_bio(cache, &structs, bio); } prealloc_free_structs(cache, &structs); } static void process_deferred_flush_bios(struct cache *cache, bool submit_bios) { unsigned long flags; struct bio_list bios; struct bio *bio; bio_list_init(&bios); spin_lock_irqsave(&cache->lock, flags); bio_list_merge(&bios, &cache->deferred_flush_bios); bio_list_init(&cache->deferred_flush_bios); spin_unlock_irqrestore(&cache->lock, flags); while ((bio = bio_list_pop(&bios))) submit_bios ? generic_make_request(bio) : bio_io_error(bio); } static void process_deferred_writethrough_bios(struct cache *cache) { unsigned long flags; struct bio_list bios; struct bio *bio; bio_list_init(&bios); spin_lock_irqsave(&cache->lock, flags); bio_list_merge(&bios, &cache->deferred_writethrough_bios); bio_list_init(&cache->deferred_writethrough_bios); spin_unlock_irqrestore(&cache->lock, flags); while ((bio = bio_list_pop(&bios))) generic_make_request(bio); } static void writeback_some_dirty_blocks(struct cache *cache) { int r = 0; dm_oblock_t oblock; dm_cblock_t cblock; struct prealloc structs; struct dm_bio_prison_cell *old_ocell; memset(&structs, 0, sizeof(structs)); while (spare_migration_bandwidth(cache)) { if (prealloc_data_structs(cache, &structs)) break; r = policy_writeback_work(cache->policy, &oblock, &cblock); if (r) break; r = get_cell(cache, oblock, &structs, &old_ocell); if (r) { policy_set_dirty(cache->policy, oblock); break; } writeback(cache, &structs, oblock, cblock, old_ocell); } prealloc_free_structs(cache, &structs); } /*---------------------------------------------------------------- * Main worker loop *--------------------------------------------------------------*/ static void start_quiescing(struct cache *cache) { unsigned long flags; spin_lock_irqsave(&cache->lock, flags); cache->quiescing = 1; spin_unlock_irqrestore(&cache->lock, flags); } static void stop_quiescing(struct cache *cache) { unsigned long flags; spin_lock_irqsave(&cache->lock, flags); cache->quiescing = 0; spin_unlock_irqrestore(&cache->lock, flags); } static bool is_quiescing(struct cache *cache) { int r; unsigned long flags; spin_lock_irqsave(&cache->lock, flags); r = cache->quiescing; spin_unlock_irqrestore(&cache->lock, flags); return r; } static void wait_for_migrations(struct cache *cache) { wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations)); } static void stop_worker(struct cache *cache) { cancel_delayed_work(&cache->waker); flush_workqueue(cache->wq); } static void requeue_deferred_io(struct cache *cache) { struct bio *bio; struct bio_list bios; bio_list_init(&bios); bio_list_merge(&bios, &cache->deferred_bios); bio_list_init(&cache->deferred_bios); while ((bio = bio_list_pop(&bios))) bio_endio(bio, DM_ENDIO_REQUEUE); } static int more_work(struct cache *cache) { if (is_quiescing(cache)) return !list_empty(&cache->quiesced_migrations) || !list_empty(&cache->completed_migrations) || !list_empty(&cache->need_commit_migrations); else return !bio_list_empty(&cache->deferred_bios) || !bio_list_empty(&cache->deferred_flush_bios) || !bio_list_empty(&cache->deferred_writethrough_bios) || !list_empty(&cache->quiesced_migrations) || !list_empty(&cache->completed_migrations) || !list_empty(&cache->need_commit_migrations); } static void do_worker(struct work_struct *ws) { struct cache *cache = container_of(ws, struct cache, worker); do { if (!is_quiescing(cache)) process_deferred_bios(cache); process_migrations(cache, &cache->quiesced_migrations, issue_copy); process_migrations(cache, &cache->completed_migrations, complete_migration); writeback_some_dirty_blocks(cache); process_deferred_writethrough_bios(cache); if (commit_if_needed(cache)) { process_deferred_flush_bios(cache, false); /* * FIXME: rollback metadata or just go into a * failure mode and error everything */ } else { process_deferred_flush_bios(cache, true); process_migrations(cache, &cache->need_commit_migrations, migration_success_post_commit); } } while (more_work(cache)); } /* * We want to commit periodically so that not too much * unwritten metadata builds up. */ static void do_waker(struct work_struct *ws) { struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker); policy_tick(cache->policy); wake_worker(cache); queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD); } /*----------------------------------------------------------------*/ static int is_congested(struct dm_dev *dev, int bdi_bits) { struct request_queue *q = bdev_get_queue(dev->bdev); return bdi_congested(&q->backing_dev_info, bdi_bits); } static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits) { struct cache *cache = container_of(cb, struct cache, callbacks); return is_congested(cache->origin_dev, bdi_bits) || is_congested(cache->cache_dev, bdi_bits); } /*---------------------------------------------------------------- * Target methods *--------------------------------------------------------------*/ /* * This function gets called on the error paths of the constructor, so we * have to cope with a partially initialised struct. */ static void destroy(struct cache *cache) { unsigned i; if (cache->next_migration) mempool_free(cache->next_migration, cache->migration_pool); if (cache->migration_pool) mempool_destroy(cache->migration_pool); if (cache->all_io_ds) dm_deferred_set_destroy(cache->all_io_ds); if (cache->prison) dm_bio_prison_destroy(cache->prison); if (cache->wq) destroy_workqueue(cache->wq); if (cache->dirty_bitset) free_bitset(cache->dirty_bitset); if (cache->discard_bitset) free_bitset(cache->discard_bitset); if (cache->copier) dm_kcopyd_client_destroy(cache->copier); if (cache->cmd) dm_cache_metadata_close(cache->cmd); if (cache->metadata_dev) dm_put_device(cache->ti, cache->metadata_dev); if (cache->origin_dev) dm_put_device(cache->ti, cache->origin_dev); if (cache->cache_dev) dm_put_device(cache->ti, cache->cache_dev); if (cache->policy) dm_cache_policy_destroy(cache->policy); for (i = 0; i < cache->nr_ctr_args ; i++) kfree(cache->ctr_args[i]); kfree(cache->ctr_args); kfree(cache); } static void cache_dtr(struct dm_target *ti) { struct cache *cache = ti->private; destroy(cache); } static sector_t get_dev_size(struct dm_dev *dev) { return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; } /*----------------------------------------------------------------*/ /* * Construct a cache device mapping. * * cache <metadata dev> <cache dev> <origin dev> <block size> * <#feature args> [<feature arg>]* * <policy> <#policy args> [<policy arg>]* * * metadata dev : fast device holding the persistent metadata * cache dev : fast device holding cached data blocks * origin dev : slow device holding original data blocks * block size : cache unit size in sectors * * #feature args : number of feature arguments passed * feature args : writethrough. (The default is writeback.) * * policy : the replacement policy to use * #policy args : an even number of policy arguments corresponding * to key/value pairs passed to the policy * policy args : key/value pairs passed to the policy * E.g. 'sequential_threshold 1024' * See cache-policies.txt for details. * * Optional feature arguments are: * writethrough : write through caching that prohibits cache block * content from being different from origin block content. * Without this argument, the default behaviour is to write * back cache block contents later for performance reasons, * so they may differ from the corresponding origin blocks. */ struct cache_args { struct dm_target *ti; struct dm_dev *metadata_dev; struct dm_dev *cache_dev; sector_t cache_sectors; struct dm_dev *origin_dev; sector_t origin_sectors; uint32_t block_size; const char *policy_name; int policy_argc; const char **policy_argv; struct cache_features features; }; static void destroy_cache_args(struct cache_args *ca) { if (ca->metadata_dev) dm_put_device(ca->ti, ca->metadata_dev); if (ca->cache_dev) dm_put_device(ca->ti, ca->cache_dev); if (ca->origin_dev) dm_put_device(ca->ti, ca->origin_dev); kfree(ca); } static bool at_least_one_arg(struct dm_arg_set *as, char **error) { if (!as->argc) { *error = "Insufficient args"; return false; } return true; } static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as, char **error) { int r; sector_t metadata_dev_size; char b[BDEVNAME_SIZE]; if (!at_least_one_arg(as, error)) return -EINVAL; r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, &ca->metadata_dev); if (r) { *error = "Error opening metadata device"; return r; } metadata_dev_size = get_dev_size(ca->metadata_dev); if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING) DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.", bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS); return 0; } static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as, char **error) { int r; if (!at_least_one_arg(as, error)) return -EINVAL; r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, &ca->cache_dev); if (r) { *error = "Error opening cache device"; return r; } ca->cache_sectors = get_dev_size(ca->cache_dev); return 0; } static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as, char **error) { int r; if (!at_least_one_arg(as, error)) return -EINVAL; r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, &ca->origin_dev); if (r) { *error = "Error opening origin device"; return r; } ca->origin_sectors = get_dev_size(ca->origin_dev); if (ca->ti->len > ca->origin_sectors) { *error = "Device size larger than cached device"; return -EINVAL; } return 0; } static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as, char **error) { unsigned long tmp; if (!at_least_one_arg(as, error)) return -EINVAL; if (kstrtoul(dm_shift_arg(as), 10, &tmp) || !tmp || tmp < DATA_DEV_BLOCK_SIZE_MIN_SECTORS || tmp & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) { *error = "Invalid data block size"; return -EINVAL; } if (tmp > ca->cache_sectors) { *error = "Data block size is larger than the cache device"; return -EINVAL; } ca->block_size = tmp; return 0; } static void init_features(struct cache_features *cf) { cf->mode = CM_WRITE; cf->write_through = false; } static int parse_features(struct cache_args *ca, struct dm_arg_set *as, char **error) { static struct dm_arg _args[] = { {0, 1, "Invalid number of cache feature arguments"}, }; int r; unsigned argc; const char *arg; struct cache_features *cf = &ca->features; init_features(cf); r = dm_read_arg_group(_args, as, &argc, error); if (r) return -EINVAL; while (argc--) { arg = dm_shift_arg(as); if (!strcasecmp(arg, "writeback")) cf->write_through = false; else if (!strcasecmp(arg, "writethrough")) cf->write_through = true; else { *error = "Unrecognised cache feature requested"; return -EINVAL; } } return 0; } static int parse_policy(struct cache_args *ca, struct dm_arg_set *as, char **error) { static struct dm_arg _args[] = { {0, 1024, "Invalid number of policy arguments"}, }; int r; if (!at_least_one_arg(as, error)) return -EINVAL; ca->policy_name = dm_shift_arg(as); r = dm_read_arg_group(_args, as, &ca->policy_argc, error); if (r) return -EINVAL; ca->policy_argv = (const char **)as->argv; dm_consume_args(as, ca->policy_argc); return 0; } static int parse_cache_args(struct cache_args *ca, int argc, char **argv, char **error) { int r; struct dm_arg_set as; as.argc = argc; as.argv = argv; r = parse_metadata_dev(ca, &as, error); if (r) return r; r = parse_cache_dev(ca, &as, error); if (r) return r; r = parse_origin_dev(ca, &as, error); if (r) return r; r = parse_block_size(ca, &as, error); if (r) return r; r = parse_features(ca, &as, error); if (r) return r; r = parse_policy(ca, &as, error); if (r) return r; return 0; } /*----------------------------------------------------------------*/ static struct kmem_cache *migration_cache; #define NOT_CORE_OPTION 1 static int process_config_option(struct cache *cache, const char *key, const char *value) { unsigned long tmp; if (!strcasecmp(key, "migration_threshold")) { if (kstrtoul(value, 10, &tmp)) return -EINVAL; cache->migration_threshold = tmp; return 0; } return NOT_CORE_OPTION; } static int set_config_value(struct cache *cache, const char *key, const char *value) { int r = process_config_option(cache, key, value); if (r == NOT_CORE_OPTION) r = policy_set_config_value(cache->policy, key, value); if (r) DMWARN("bad config value for %s: %s", key, value); return r; } static int set_config_values(struct cache *cache, int argc, const char **argv) { int r = 0; if (argc & 1) { DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs."); return -EINVAL; } while (argc) { r = set_config_value(cache, argv[0], argv[1]); if (r) break; argc -= 2; argv += 2; } return r; } static int create_cache_policy(struct cache *cache, struct cache_args *ca, char **error) { cache->policy = dm_cache_policy_create(ca->policy_name, cache->cache_size, cache->origin_sectors, cache->sectors_per_block); if (!cache->policy) { *error = "Error creating cache's policy"; return -ENOMEM; } return 0; } /* * We want the discard block size to be a power of two, at least the size * of the cache block size, and have no more than 2^14 discard blocks * across the origin. */ #define MAX_DISCARD_BLOCKS (1 << 14) static bool too_many_discard_blocks(sector_t discard_block_size, sector_t origin_size) { (void) sector_div(origin_size, discard_block_size); return origin_size > MAX_DISCARD_BLOCKS; } static sector_t calculate_discard_block_size(sector_t cache_block_size, sector_t origin_size) { sector_t discard_block_size; discard_block_size = roundup_pow_of_two(cache_block_size); if (origin_size) while (too_many_discard_blocks(discard_block_size, origin_size)) discard_block_size *= 2; return discard_block_size; } #define DEFAULT_MIGRATION_THRESHOLD 2048 static int cache_create(struct cache_args *ca, struct cache **result) { int r = 0; char **error = &ca->ti->error; struct cache *cache; struct dm_target *ti = ca->ti; dm_block_t origin_blocks; struct dm_cache_metadata *cmd; bool may_format = ca->features.mode == CM_WRITE; cache = kzalloc(sizeof(*cache), GFP_KERNEL); if (!cache) return -ENOMEM; cache->ti = ca->ti; ti->private = cache; ti->num_flush_bios = 2; ti->flush_supported = true; ti->num_discard_bios = 1; ti->discards_supported = true; ti->discard_zeroes_data_unsupported = true; cache->features = ca->features; ti->per_bio_data_size = get_per_bio_data_size(cache); cache->callbacks.congested_fn = cache_is_congested; dm_table_add_target_callbacks(ti->table, &cache->callbacks); cache->metadata_dev = ca->metadata_dev; cache->origin_dev = ca->origin_dev; cache->cache_dev = ca->cache_dev; ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL; /* FIXME: factor out this whole section */ origin_blocks = cache->origin_sectors = ca->origin_sectors; origin_blocks = block_div(origin_blocks, ca->block_size); cache->origin_blocks = to_oblock(origin_blocks); cache->sectors_per_block = ca->block_size; if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) { r = -EINVAL; goto bad; } if (ca->block_size & (ca->block_size - 1)) { dm_block_t cache_size = ca->cache_sectors; cache->sectors_per_block_shift = -1; cache_size = block_div(cache_size, ca->block_size); cache->cache_size = to_cblock(cache_size); } else { cache->sectors_per_block_shift = __ffs(ca->block_size); cache->cache_size = to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift); } r = create_cache_policy(cache, ca, error); if (r) goto bad; cache->policy_nr_args = ca->policy_argc; cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD; r = set_config_values(cache, ca->policy_argc, ca->policy_argv); if (r) { *error = "Error setting cache policy's config values"; goto bad; } cmd = dm_cache_metadata_open(cache->metadata_dev->bdev, ca->block_size, may_format, dm_cache_policy_get_hint_size(cache->policy)); if (IS_ERR(cmd)) { *error = "Error creating metadata object"; r = PTR_ERR(cmd); goto bad; } cache->cmd = cmd; spin_lock_init(&cache->lock); bio_list_init(&cache->deferred_bios); bio_list_init(&cache->deferred_flush_bios); bio_list_init(&cache->deferred_writethrough_bios); INIT_LIST_HEAD(&cache->quiesced_migrations); INIT_LIST_HEAD(&cache->completed_migrations); INIT_LIST_HEAD(&cache->need_commit_migrations); atomic_set(&cache->nr_migrations, 0); init_waitqueue_head(&cache->migration_wait); r = -ENOMEM; cache->nr_dirty = 0; cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); if (!cache->dirty_bitset) { *error = "could not allocate dirty bitset"; goto bad; } clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); cache->discard_block_size = calculate_discard_block_size(cache->sectors_per_block, cache->origin_sectors); cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks); cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks)); if (!cache->discard_bitset) { *error = "could not allocate discard bitset"; goto bad; } clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); if (IS_ERR(cache->copier)) { *error = "could not create kcopyd client"; r = PTR_ERR(cache->copier); goto bad; } cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); if (!cache->wq) { *error = "could not create workqueue for metadata object"; goto bad; } INIT_WORK(&cache->worker, do_worker); INIT_DELAYED_WORK(&cache->waker, do_waker); cache->last_commit_jiffies = jiffies; cache->prison = dm_bio_prison_create(PRISON_CELLS); if (!cache->prison) { *error = "could not create bio prison"; goto bad; } cache->all_io_ds = dm_deferred_set_create(); if (!cache->all_io_ds) { *error = "could not create all_io deferred set"; goto bad; } cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE, migration_cache); if (!cache->migration_pool) { *error = "Error creating cache's migration mempool"; goto bad; } cache->next_migration = NULL; cache->need_tick_bio = true; cache->sized = false; cache->quiescing = false; cache->commit_requested = false; cache->loaded_mappings = false; cache->loaded_discards = false; load_stats(cache); atomic_set(&cache->stats.demotion, 0); atomic_set(&cache->stats.promotion, 0); atomic_set(&cache->stats.copies_avoided, 0); atomic_set(&cache->stats.cache_cell_clash, 0); atomic_set(&cache->stats.commit_count, 0); atomic_set(&cache->stats.discard_count, 0); *result = cache; return 0; bad: destroy(cache); return r; } static int copy_ctr_args(struct cache *cache, int argc, const char **argv) { unsigned i; const char **copy; copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL); if (!copy) return -ENOMEM; for (i = 0; i < argc; i++) { copy[i] = kstrdup(argv[i], GFP_KERNEL); if (!copy[i]) { while (i--) kfree(copy[i]); kfree(copy); return -ENOMEM; } } cache->nr_ctr_args = argc; cache->ctr_args = copy; return 0; } static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv) { int r = -EINVAL; struct cache_args *ca; struct cache *cache = NULL; ca = kzalloc(sizeof(*ca), GFP_KERNEL); if (!ca) { ti->error = "Error allocating memory for cache"; return -ENOMEM; } ca->ti = ti; r = parse_cache_args(ca, argc, argv, &ti->error); if (r) goto out; r = cache_create(ca, &cache); if (r) goto out; r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3); if (r) { destroy(cache); goto out; } ti->private = cache; out: destroy_cache_args(ca); return r; } static int cache_map(struct dm_target *ti, struct bio *bio) { struct cache *cache = ti->private; int r; dm_oblock_t block = get_bio_block(cache, bio); size_t pb_data_size = get_per_bio_data_size(cache); bool can_migrate = false; bool discarded_block; struct dm_bio_prison_cell *cell; struct policy_result lookup_result; struct per_bio_data *pb; if (from_oblock(block) > from_oblock(cache->origin_blocks)) { /* * This can only occur if the io goes to a partial block at * the end of the origin device. We don't cache these. * Just remap to the origin and carry on. */ remap_to_origin_clear_discard(cache, bio, block); return DM_MAPIO_REMAPPED; } pb = init_per_bio_data(bio, pb_data_size); if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { defer_bio(cache, bio); return DM_MAPIO_SUBMITTED; } /* * Check to see if that block is currently migrating. */ cell = alloc_prison_cell(cache); if (!cell) { defer_bio(cache, bio); return DM_MAPIO_SUBMITTED; } r = bio_detain(cache, block, bio, cell, (cell_free_fn) free_prison_cell, cache, &cell); if (r) { if (r < 0) defer_bio(cache, bio); return DM_MAPIO_SUBMITTED; } discarded_block = is_discarded_oblock(cache, block); r = policy_map(cache->policy, block, false, can_migrate, discarded_block, bio, &lookup_result); if (r == -EWOULDBLOCK) { cell_defer(cache, cell, true); return DM_MAPIO_SUBMITTED; } else if (r) { DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r); bio_io_error(bio); return DM_MAPIO_SUBMITTED; } switch (lookup_result.op) { case POLICY_HIT: inc_hit_counter(cache, bio); pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); if (is_writethrough_io(cache, bio, lookup_result.cblock)) remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); else remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); cell_defer(cache, cell, false); break; case POLICY_MISS: inc_miss_counter(cache, bio); pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); if (pb->req_nr != 0) { /* * This is a duplicate writethrough io that is no * longer needed because the block has been demoted. */ bio_endio(bio, 0); cell_defer(cache, cell, false); return DM_MAPIO_SUBMITTED; } else { remap_to_origin_clear_discard(cache, bio, block); cell_defer(cache, cell, false); } break; default: DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__, (unsigned) lookup_result.op); bio_io_error(bio); return DM_MAPIO_SUBMITTED; } return DM_MAPIO_REMAPPED; } static int cache_end_io(struct dm_target *ti, struct bio *bio, int error) { struct cache *cache = ti->private; unsigned long flags; size_t pb_data_size = get_per_bio_data_size(cache); struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); if (pb->tick) { policy_tick(cache->policy); spin_lock_irqsave(&cache->lock, flags); cache->need_tick_bio = true; spin_unlock_irqrestore(&cache->lock, flags); } check_for_quiesced_migrations(cache, pb); return 0; } static int write_dirty_bitset(struct cache *cache) { unsigned i, r; for (i = 0; i < from_cblock(cache->cache_size); i++) { r = dm_cache_set_dirty(cache->cmd, to_cblock(i), is_dirty(cache, to_cblock(i))); if (r) return r; } return 0; } static int write_discard_bitset(struct cache *cache) { unsigned i, r; r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size, cache->discard_nr_blocks); if (r) { DMERR("could not resize on-disk discard bitset"); return r; } for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) { r = dm_cache_set_discard(cache->cmd, to_dblock(i), is_discarded(cache, to_dblock(i))); if (r) return r; } return 0; } static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock, uint32_t hint) { struct cache *cache = context; return dm_cache_save_hint(cache->cmd, cblock, hint); } static int write_hints(struct cache *cache) { int r; r = dm_cache_begin_hints(cache->cmd, cache->policy); if (r) { DMERR("dm_cache_begin_hints failed"); return r; } r = policy_walk_mappings(cache->policy, save_hint, cache); if (r) DMERR("policy_walk_mappings failed"); return r; } /* * returns true on success */ static bool sync_metadata(struct cache *cache) { int r1, r2, r3, r4; r1 = write_dirty_bitset(cache); if (r1) DMERR("could not write dirty bitset"); r2 = write_discard_bitset(cache); if (r2) DMERR("could not write discard bitset"); save_stats(cache); r3 = write_hints(cache); if (r3) DMERR("could not write hints"); /* * If writing the above metadata failed, we still commit, but don't * set the clean shutdown flag. This will effectively force every * dirty bit to be set on reload. */ r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3); if (r4) DMERR("could not write cache metadata. Data loss may occur."); return !r1 && !r2 && !r3 && !r4; } static void cache_postsuspend(struct dm_target *ti) { struct cache *cache = ti->private; start_quiescing(cache); wait_for_migrations(cache); stop_worker(cache); requeue_deferred_io(cache); stop_quiescing(cache); (void) sync_metadata(cache); } static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock, bool dirty, uint32_t hint, bool hint_valid) { int r; struct cache *cache = context; r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid); if (r) return r; if (dirty) set_dirty(cache, oblock, cblock); else clear_dirty(cache, oblock, cblock); return 0; } static int load_discard(void *context, sector_t discard_block_size, dm_dblock_t dblock, bool discard) { struct cache *cache = context; /* FIXME: handle mis-matched block size */ if (discard) set_discard(cache, dblock); else clear_discard(cache, dblock); return 0; } static int cache_preresume(struct dm_target *ti) { int r = 0; struct cache *cache = ti->private; sector_t actual_cache_size = get_dev_size(cache->cache_dev); (void) sector_div(actual_cache_size, cache->sectors_per_block); /* * Check to see if the cache has resized. */ if (from_cblock(cache->cache_size) != actual_cache_size || !cache->sized) { cache->cache_size = to_cblock(actual_cache_size); r = dm_cache_resize(cache->cmd, cache->cache_size); if (r) { DMERR("could not resize cache metadata"); return r; } cache->sized = true; } if (!cache->loaded_mappings) { r = dm_cache_load_mappings(cache->cmd, cache->policy, load_mapping, cache); if (r) { DMERR("could not load cache mappings"); return r; } cache->loaded_mappings = true; } if (!cache->loaded_discards) { r = dm_cache_load_discards(cache->cmd, load_discard, cache); if (r) { DMERR("could not load origin discards"); return r; } cache->loaded_discards = true; } return r; } static void cache_resume(struct dm_target *ti) { struct cache *cache = ti->private; cache->need_tick_bio = true; do_waker(&cache->waker.work); } /* * Status format: * * <#used metadata blocks>/<#total metadata blocks> * <#read hits> <#read misses> <#write hits> <#write misses> * <#demotions> <#promotions> <#blocks in cache> <#dirty> * <#features> <features>* * <#core args> <core args> * <#policy args> <policy args>* */ static void cache_status(struct dm_target *ti, status_type_t type, unsigned status_flags, char *result, unsigned maxlen) { int r = 0; unsigned i; ssize_t sz = 0; dm_block_t nr_free_blocks_metadata = 0; dm_block_t nr_blocks_metadata = 0; char buf[BDEVNAME_SIZE]; struct cache *cache = ti->private; dm_cblock_t residency; switch (type) { case STATUSTYPE_INFO: /* Commit to ensure statistics aren't out-of-date */ if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) { r = dm_cache_commit(cache->cmd, false); if (r) DMERR("could not commit metadata for accurate status"); } r = dm_cache_get_free_metadata_block_count(cache->cmd, &nr_free_blocks_metadata); if (r) { DMERR("could not get metadata free block count"); goto err; } r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata); if (r) { DMERR("could not get metadata device size"); goto err; } residency = policy_residency(cache->policy); DMEMIT("%llu/%llu %u %u %u %u %u %u %llu %u ", (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata), (unsigned long long)nr_blocks_metadata, (unsigned) atomic_read(&cache->stats.read_hit), (unsigned) atomic_read(&cache->stats.read_miss), (unsigned) atomic_read(&cache->stats.write_hit), (unsigned) atomic_read(&cache->stats.write_miss), (unsigned) atomic_read(&cache->stats.demotion), (unsigned) atomic_read(&cache->stats.promotion), (unsigned long long) from_cblock(residency), cache->nr_dirty); if (cache->features.write_through) DMEMIT("1 writethrough "); else DMEMIT("0 "); DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold); if (sz < maxlen) { r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz); if (r) DMERR("policy_emit_config_values returned %d", r); } break; case STATUSTYPE_TABLE: format_dev_t(buf, cache->metadata_dev->bdev->bd_dev); DMEMIT("%s ", buf); format_dev_t(buf, cache->cache_dev->bdev->bd_dev); DMEMIT("%s ", buf); format_dev_t(buf, cache->origin_dev->bdev->bd_dev); DMEMIT("%s", buf); for (i = 0; i < cache->nr_ctr_args - 1; i++) DMEMIT(" %s", cache->ctr_args[i]); if (cache->nr_ctr_args) DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]); } return; err: DMEMIT("Error"); } /* * Supports <key> <value>. * * The key migration_threshold is supported by the cache target core. */ static int cache_message(struct dm_target *ti, unsigned argc, char **argv) { struct cache *cache = ti->private; if (argc != 2) return -EINVAL; return set_config_value(cache, argv[0], argv[1]); } static int cache_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { int r = 0; struct cache *cache = ti->private; r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data); if (!r) r = fn(ti, cache->origin_dev, 0, ti->len, data); return r; } /* * We assume I/O is going to the origin (which is the volume * more likely to have restrictions e.g. by being striped). * (Looking up the exact location of the data would be expensive * and could always be out of date by the time the bio is submitted.) */ static int cache_bvec_merge(struct dm_target *ti, struct bvec_merge_data *bvm, struct bio_vec *biovec, int max_size) { struct cache *cache = ti->private; struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev); if (!q->merge_bvec_fn) return max_size; bvm->bi_bdev = cache->origin_dev->bdev; return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); } static void set_discard_limits(struct cache *cache, struct queue_limits *limits) { /* * FIXME: these limits may be incompatible with the cache device */ limits->max_discard_sectors = cache->discard_block_size * 1024; limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; } static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) { struct cache *cache = ti->private; blk_limits_io_min(limits, 0); blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT); set_discard_limits(cache, limits); } /*----------------------------------------------------------------*/ static struct target_type cache_target = { .name = "cache", .version = {1, 1, 1}, .module = THIS_MODULE, .ctr = cache_ctr, .dtr = cache_dtr, .map = cache_map, .end_io = cache_end_io, .postsuspend = cache_postsuspend, .preresume = cache_preresume, .resume = cache_resume, .status = cache_status, .message = cache_message, .iterate_devices = cache_iterate_devices, .merge = cache_bvec_merge, .io_hints = cache_io_hints, }; static int __init dm_cache_init(void) { int r; r = dm_register_target(&cache_target); if (r) { DMERR("cache target registration failed: %d", r); return r; } migration_cache = KMEM_CACHE(dm_cache_migration, 0); if (!migration_cache) { dm_unregister_target(&cache_target); return -ENOMEM; } return 0; } static void __exit dm_cache_exit(void) { dm_unregister_target(&cache_target); kmem_cache_destroy(migration_cache); } module_init(dm_cache_init); module_exit(dm_cache_exit); MODULE_DESCRIPTION(DM_NAME " cache target"); MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>"); MODULE_LICENSE("GPL");
gpl-2.0
ronenil/net-next
drivers/iio/industrialio-event.c
62
13698
/* Industrial I/O event handling * * Copyright (c) 2008 Jonathan Cameron * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * Based on elements of hwmon and input subsystems. */ #include <linux/anon_inodes.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/kfifo.h> #include <linux/module.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/wait.h> #include <linux/iio/iio.h> #include "iio_core.h" #include <linux/iio/sysfs.h> #include <linux/iio/events.h> /** * struct iio_event_interface - chrdev interface for an event line * @wait: wait queue to allow blocking reads of events * @det_events: list of detected events * @dev_attr_list: list of event interface sysfs attribute * @flags: file operations related flags including busy flag. * @group: event interface sysfs attribute group */ struct iio_event_interface { wait_queue_head_t wait; DECLARE_KFIFO(det_events, struct iio_event_data, 16); struct list_head dev_attr_list; unsigned long flags; struct attribute_group group; struct mutex read_lock; }; /** * iio_push_event() - try to add event to the list for userspace reading * @indio_dev: IIO device structure * @ev_code: What event * @timestamp: When the event occurred * * Note: The caller must make sure that this function is not running * concurrently for the same indio_dev more than once. **/ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp) { struct iio_event_interface *ev_int = indio_dev->event_interface; struct iio_event_data ev; int copied; /* Does anyone care? */ if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { ev.id = ev_code; ev.timestamp = timestamp; copied = kfifo_put(&ev_int->det_events, ev); if (copied != 0) wake_up_poll(&ev_int->wait, POLLIN); } return 0; } EXPORT_SYMBOL(iio_push_event); /** * iio_event_poll() - poll the event queue to find out if it has data */ static unsigned int iio_event_poll(struct file *filep, struct poll_table_struct *wait) { struct iio_dev *indio_dev = filep->private_data; struct iio_event_interface *ev_int = indio_dev->event_interface; unsigned int events = 0; if (!indio_dev->info) return -ENODEV; poll_wait(filep, &ev_int->wait, wait); if (!kfifo_is_empty(&ev_int->det_events)) events = POLLIN | POLLRDNORM; return events; } static ssize_t iio_event_chrdev_read(struct file *filep, char __user *buf, size_t count, loff_t *f_ps) { struct iio_dev *indio_dev = filep->private_data; struct iio_event_interface *ev_int = indio_dev->event_interface; unsigned int copied; int ret; if (!indio_dev->info) return -ENODEV; if (count < sizeof(struct iio_event_data)) return -EINVAL; do { if (kfifo_is_empty(&ev_int->det_events)) { if (filep->f_flags & O_NONBLOCK) return -EAGAIN; ret = wait_event_interruptible(ev_int->wait, !kfifo_is_empty(&ev_int->det_events) || indio_dev->info == NULL); if (ret) return ret; if (indio_dev->info == NULL) return -ENODEV; } if (mutex_lock_interruptible(&ev_int->read_lock)) return -ERESTARTSYS; ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied); mutex_unlock(&ev_int->read_lock); if (ret) return ret; /* * If we couldn't read anything from the fifo (a different * thread might have been faster) we either return -EAGAIN if * the file descriptor is non-blocking, otherwise we go back to * sleep and wait for more data to arrive. */ if (copied == 0 && (filep->f_flags & O_NONBLOCK)) return -EAGAIN; } while (copied == 0); return copied; } static int iio_event_chrdev_release(struct inode *inode, struct file *filep) { struct iio_dev *indio_dev = filep->private_data; struct iio_event_interface *ev_int = indio_dev->event_interface; clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); iio_device_put(indio_dev); return 0; } static const struct file_operations iio_event_chrdev_fileops = { .read = iio_event_chrdev_read, .poll = iio_event_poll, .release = iio_event_chrdev_release, .owner = THIS_MODULE, .llseek = noop_llseek, }; int iio_event_getfd(struct iio_dev *indio_dev) { struct iio_event_interface *ev_int = indio_dev->event_interface; int fd; if (ev_int == NULL) return -ENODEV; if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) return -EBUSY; iio_device_get(indio_dev); fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops, indio_dev, O_RDONLY | O_CLOEXEC); if (fd < 0) { clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); iio_device_put(indio_dev); } else { kfifo_reset_out(&ev_int->det_events); } return fd; } static const char * const iio_ev_type_text[] = { [IIO_EV_TYPE_THRESH] = "thresh", [IIO_EV_TYPE_MAG] = "mag", [IIO_EV_TYPE_ROC] = "roc", [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive", [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive", [IIO_EV_TYPE_CHANGE] = "change", }; static const char * const iio_ev_dir_text[] = { [IIO_EV_DIR_EITHER] = "either", [IIO_EV_DIR_RISING] = "rising", [IIO_EV_DIR_FALLING] = "falling" }; static const char * const iio_ev_info_text[] = { [IIO_EV_INFO_ENABLE] = "en", [IIO_EV_INFO_VALUE] = "value", [IIO_EV_INFO_HYSTERESIS] = "hysteresis", [IIO_EV_INFO_PERIOD] = "period", }; static enum iio_event_direction iio_ev_attr_dir(struct iio_dev_attr *attr) { return attr->c->event_spec[attr->address & 0xffff].dir; } static enum iio_event_type iio_ev_attr_type(struct iio_dev_attr *attr) { return attr->c->event_spec[attr->address & 0xffff].type; } static enum iio_event_info iio_ev_attr_info(struct iio_dev_attr *attr) { return (attr->address >> 16) & 0xffff; } static ssize_t iio_ev_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int ret; bool val; ret = strtobool(buf, &val); if (ret < 0) return ret; ret = indio_dev->info->write_event_config(indio_dev, this_attr->c, iio_ev_attr_type(this_attr), iio_ev_attr_dir(this_attr), val); return (ret < 0) ? ret : len; } static ssize_t iio_ev_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int val; val = indio_dev->info->read_event_config(indio_dev, this_attr->c, iio_ev_attr_type(this_attr), iio_ev_attr_dir(this_attr)); if (val < 0) return val; else return sprintf(buf, "%d\n", val); } static ssize_t iio_ev_value_show(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int val, val2, val_arr[2]; int ret; ret = indio_dev->info->read_event_value(indio_dev, this_attr->c, iio_ev_attr_type(this_attr), iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr), &val, &val2); if (ret < 0) return ret; val_arr[0] = val; val_arr[1] = val2; return iio_format_value(buf, ret, 2, val_arr); } static ssize_t iio_ev_value_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int val, val2; int ret; if (!indio_dev->info->write_event_value) return -EINVAL; ret = iio_str_to_fixpoint(buf, 100000, &val, &val2); if (ret) return ret; ret = indio_dev->info->write_event_value(indio_dev, this_attr->c, iio_ev_attr_type(this_attr), iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr), val, val2); if (ret < 0) return ret; return len; } static int iio_device_add_event(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, unsigned int spec_index, enum iio_event_type type, enum iio_event_direction dir, enum iio_shared_by shared_by, const unsigned long *mask) { ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t); unsigned int attrcount = 0; unsigned int i; char *postfix; int ret; for_each_set_bit(i, mask, sizeof(*mask)*8) { if (i >= ARRAY_SIZE(iio_ev_info_text)) return -EINVAL; if (dir != IIO_EV_DIR_NONE) postfix = kasprintf(GFP_KERNEL, "%s_%s_%s", iio_ev_type_text[type], iio_ev_dir_text[dir], iio_ev_info_text[i]); else postfix = kasprintf(GFP_KERNEL, "%s_%s", iio_ev_type_text[type], iio_ev_info_text[i]); if (postfix == NULL) return -ENOMEM; if (i == IIO_EV_INFO_ENABLE) { show = iio_ev_state_show; store = iio_ev_state_store; } else { show = iio_ev_value_show; store = iio_ev_value_store; } ret = __iio_add_chan_devattr(postfix, chan, show, store, (i << 16) | spec_index, shared_by, &indio_dev->dev, &indio_dev->event_interface->dev_attr_list); kfree(postfix); if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) continue; if (ret) return ret; attrcount++; } return attrcount; } static int iio_device_add_event_sysfs(struct iio_dev *indio_dev, struct iio_chan_spec const *chan) { int ret = 0, i, attrcount = 0; enum iio_event_direction dir; enum iio_event_type type; for (i = 0; i < chan->num_event_specs; i++) { type = chan->event_spec[i].type; dir = chan->event_spec[i].dir; ret = iio_device_add_event(indio_dev, chan, i, type, dir, IIO_SEPARATE, &chan->event_spec[i].mask_separate); if (ret < 0) return ret; attrcount += ret; ret = iio_device_add_event(indio_dev, chan, i, type, dir, IIO_SHARED_BY_TYPE, &chan->event_spec[i].mask_shared_by_type); if (ret < 0) return ret; attrcount += ret; ret = iio_device_add_event(indio_dev, chan, i, type, dir, IIO_SHARED_BY_DIR, &chan->event_spec[i].mask_shared_by_dir); if (ret < 0) return ret; attrcount += ret; ret = iio_device_add_event(indio_dev, chan, i, type, dir, IIO_SHARED_BY_ALL, &chan->event_spec[i].mask_shared_by_all); if (ret < 0) return ret; attrcount += ret; } ret = attrcount; return ret; } static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev) { int j, ret, attrcount = 0; /* Dynamically created from the channels array */ for (j = 0; j < indio_dev->num_channels; j++) { ret = iio_device_add_event_sysfs(indio_dev, &indio_dev->channels[j]); if (ret < 0) return ret; attrcount += ret; } return attrcount; } static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev) { int j; for (j = 0; j < indio_dev->num_channels; j++) { if (indio_dev->channels[j].num_event_specs != 0) return true; } return false; } static void iio_setup_ev_int(struct iio_event_interface *ev_int) { INIT_KFIFO(ev_int->det_events); init_waitqueue_head(&ev_int->wait); mutex_init(&ev_int->read_lock); } static const char *iio_event_group_name = "events"; int iio_device_register_eventset(struct iio_dev *indio_dev) { struct iio_dev_attr *p; int ret = 0, attrcount_orig = 0, attrcount, attrn; struct attribute **attr; if (!(indio_dev->info->event_attrs || iio_check_for_dynamic_events(indio_dev))) return 0; indio_dev->event_interface = kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL); if (indio_dev->event_interface == NULL) return -ENOMEM; INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list); iio_setup_ev_int(indio_dev->event_interface); if (indio_dev->info->event_attrs != NULL) { attr = indio_dev->info->event_attrs->attrs; while (*attr++ != NULL) attrcount_orig++; } attrcount = attrcount_orig; if (indio_dev->channels) { ret = __iio_add_event_config_attrs(indio_dev); if (ret < 0) goto error_free_setup_event_lines; attrcount += ret; } indio_dev->event_interface->group.name = iio_event_group_name; indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1, sizeof(indio_dev->event_interface->group.attrs[0]), GFP_KERNEL); if (indio_dev->event_interface->group.attrs == NULL) { ret = -ENOMEM; goto error_free_setup_event_lines; } if (indio_dev->info->event_attrs) memcpy(indio_dev->event_interface->group.attrs, indio_dev->info->event_attrs->attrs, sizeof(indio_dev->event_interface->group.attrs[0]) *attrcount_orig); attrn = attrcount_orig; /* Add all elements from the list. */ list_for_each_entry(p, &indio_dev->event_interface->dev_attr_list, l) indio_dev->event_interface->group.attrs[attrn++] = &p->dev_attr.attr; indio_dev->groups[indio_dev->groupcounter++] = &indio_dev->event_interface->group; return 0; error_free_setup_event_lines: iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list); kfree(indio_dev->event_interface); return ret; } /** * iio_device_wakeup_eventset - Wakes up the event waitqueue * @indio_dev: The IIO device * * Wakes up the event waitqueue used for poll() and blocking read(). * Should usually be called when the device is unregistered. */ void iio_device_wakeup_eventset(struct iio_dev *indio_dev) { if (indio_dev->event_interface == NULL) return; wake_up(&indio_dev->event_interface->wait); } void iio_device_unregister_eventset(struct iio_dev *indio_dev) { if (indio_dev->event_interface == NULL) return; iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list); kfree(indio_dev->event_interface->group.attrs); kfree(indio_dev->event_interface); }
gpl-2.0
EPDCenter/android_kernel_rockchip_mk908
drivers/i2c/i2c-core.c
62
71201
/* i2c-core.c - a device driver for the iic-bus interface */ /* ------------------------------------------------------------------------- */ /* Copyright (C) 1995-99 Simon G. Vogl This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* ------------------------------------------------------------------------- */ /* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi>. All SMBus-related things are written by Frodo Looijaard <frodol@dds.nl> SMBus 2.0 support by Mark Studebaker <mdsxyz123@yahoo.com> and Jean Delvare <khali@linux-fr.org> Mux support by Rodolfo Giometti <giometti@enneenne.com> and Michael Lawnick <michael.lawnick.ext@nsn.com> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/idr.h> #include <linux/mutex.h> #include <linux/of_device.h> #include <linux/completion.h> #include <linux/hardirq.h> #include <linux/irqflags.h> #include <linux/rwsem.h> #include <linux/pm_runtime.h> #include <asm/uaccess.h> #include "i2c-core.h" /* core_lock protects i2c_adapter_idr, and guarantees that device detection, deletion of detected devices, and attach_adapter and detach_adapter calls are serialized */ static DEFINE_MUTEX(core_lock); static DEFINE_IDR(i2c_adapter_idr); static struct device_type i2c_client_type; static int i2c_check_addr(struct i2c_adapter *adapter, int addr); static int i2c_check_addr_ex(struct i2c_adapter *adapter, int addr); static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver); /* ------------------------------------------------------------------------- */ #ifdef CONFIG_I2C_DEV_RK29 extern struct completion i2c_dev_complete; extern void i2c_dev_dump_start(struct i2c_adapter *adap, struct i2c_msg *msgs, int num); extern void i2c_dev_dump_stop(struct i2c_adapter *adap, struct i2c_msg *msgs, int num, int ret); #endif static const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id, const struct i2c_client *client) { while (id->name[0]) { if (strcmp(client->name, id->name) == 0) return id; id++; } return NULL; } static int i2c_device_match(struct device *dev, struct device_driver *drv) { struct i2c_client *client = i2c_verify_client(dev); struct i2c_driver *driver; if (!client) return 0; /* Attempt an OF style match */ if (of_driver_match_device(dev, drv)) return 1; driver = to_i2c_driver(drv); /* match on an id table if there is one */ if (driver->id_table) return i2c_match_id(driver->id_table, client) != NULL; return 0; } #ifdef CONFIG_HOTPLUG /* uevent helps with hotplug: modprobe -q $(MODALIAS) */ static int i2c_device_uevent(struct device *dev, struct kobj_uevent_env *env) { struct i2c_client *client = to_i2c_client(dev); if (add_uevent_var(env, "MODALIAS=%s%s", I2C_MODULE_PREFIX, client->name)) return -ENOMEM; dev_dbg(dev, "uevent\n"); return 0; } #else #define i2c_device_uevent NULL #endif /* CONFIG_HOTPLUG */ static int i2c_device_probe(struct device *dev) { struct i2c_client *client = i2c_verify_client(dev); struct i2c_driver *driver; int status; if (!client) return 0; driver = to_i2c_driver(dev->driver); if (!driver->probe || !driver->id_table) return -ENODEV; client->driver = driver; if (!device_can_wakeup(&client->dev)) device_init_wakeup(&client->dev, client->flags & I2C_CLIENT_WAKE); dev_dbg(dev, "probe\n"); status = driver->probe(client, i2c_match_id(driver->id_table, client)); if (status) { client->driver = NULL; i2c_set_clientdata(client, NULL); } return status; } static int i2c_device_remove(struct device *dev) { struct i2c_client *client = i2c_verify_client(dev); struct i2c_driver *driver; int status; if (!client || !dev->driver) return 0; driver = to_i2c_driver(dev->driver); if (driver->remove) { dev_dbg(dev, "remove\n"); status = driver->remove(client); } else { dev->driver = NULL; status = 0; } if (status == 0) { client->driver = NULL; i2c_set_clientdata(client, NULL); } return status; } static void i2c_device_shutdown(struct device *dev) { struct i2c_client *client = i2c_verify_client(dev); struct i2c_driver *driver; if (!client || !dev->driver) return; driver = to_i2c_driver(dev->driver); if (driver->shutdown) driver->shutdown(client); } #ifdef CONFIG_PM_SLEEP static int i2c_legacy_suspend(struct device *dev, pm_message_t mesg) { struct i2c_client *client = i2c_verify_client(dev); struct i2c_driver *driver; if (!client || !dev->driver) return 0; driver = to_i2c_driver(dev->driver); if (!driver->suspend) return 0; return driver->suspend(client, mesg); } static int i2c_legacy_resume(struct device *dev) { struct i2c_client *client = i2c_verify_client(dev); struct i2c_driver *driver; if (!client || !dev->driver) return 0; driver = to_i2c_driver(dev->driver); if (!driver->resume) return 0; return driver->resume(client); } static int i2c_device_pm_suspend(struct device *dev) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; if (pm) return pm_generic_suspend(dev); else return i2c_legacy_suspend(dev, PMSG_SUSPEND); } static int i2c_device_pm_resume(struct device *dev) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; if (pm) return pm_generic_resume(dev); else return i2c_legacy_resume(dev); } static int i2c_device_pm_freeze(struct device *dev) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; if (pm) return pm_generic_freeze(dev); else return i2c_legacy_suspend(dev, PMSG_FREEZE); } static int i2c_device_pm_thaw(struct device *dev) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; if (pm) return pm_generic_thaw(dev); else return i2c_legacy_resume(dev); } static int i2c_device_pm_poweroff(struct device *dev) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; if (pm) return pm_generic_poweroff(dev); else return i2c_legacy_suspend(dev, PMSG_HIBERNATE); } static int i2c_device_pm_restore(struct device *dev) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; if (pm) return pm_generic_restore(dev); else return i2c_legacy_resume(dev); } #else /* !CONFIG_PM_SLEEP */ #define i2c_device_pm_suspend NULL #define i2c_device_pm_resume NULL #define i2c_device_pm_freeze NULL #define i2c_device_pm_thaw NULL #define i2c_device_pm_poweroff NULL #define i2c_device_pm_restore NULL #endif /* !CONFIG_PM_SLEEP */ static void i2c_client_dev_release(struct device *dev) { kfree(to_i2c_client(dev)); } static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%s\n", dev->type == &i2c_client_type ? to_i2c_client(dev)->name : to_i2c_adapter(dev)->name); } static ssize_t show_modalias(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); return sprintf(buf, "%s%s\n", I2C_MODULE_PREFIX, client->name); } static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL); static struct attribute *i2c_dev_attrs[] = { &dev_attr_name.attr, /* modalias helps coldplug: modprobe $(cat .../modalias) */ &dev_attr_modalias.attr, NULL }; static struct attribute_group i2c_dev_attr_group = { .attrs = i2c_dev_attrs, }; static const struct attribute_group *i2c_dev_attr_groups[] = { &i2c_dev_attr_group, NULL }; static const struct dev_pm_ops i2c_device_pm_ops = { .suspend = i2c_device_pm_suspend, .resume = i2c_device_pm_resume, .freeze = i2c_device_pm_freeze, .thaw = i2c_device_pm_thaw, .poweroff = i2c_device_pm_poweroff, .restore = i2c_device_pm_restore, SET_RUNTIME_PM_OPS( pm_generic_runtime_suspend, pm_generic_runtime_resume, pm_generic_runtime_idle ) }; struct bus_type i2c_bus_type = { .name = "i2c", .match = i2c_device_match, .probe = i2c_device_probe, .remove = i2c_device_remove, .shutdown = i2c_device_shutdown, .pm = &i2c_device_pm_ops, }; EXPORT_SYMBOL_GPL(i2c_bus_type); static struct device_type i2c_client_type = { .groups = i2c_dev_attr_groups, .uevent = i2c_device_uevent, .release = i2c_client_dev_release, }; /** * i2c_verify_client - return parameter as i2c_client, or NULL * @dev: device, probably from some driver model iterator * * When traversing the driver model tree, perhaps using driver model * iterators like @device_for_each_child(), you can't assume very much * about the nodes you find. Use this function to avoid oopses caused * by wrongly treating some non-I2C device as an i2c_client. */ struct i2c_client *i2c_verify_client(struct device *dev) { return (dev->type == &i2c_client_type) ? to_i2c_client(dev) : NULL; } EXPORT_SYMBOL(i2c_verify_client); /* This is a permissive address validity check, I2C address map constraints * are purposely not enforced, except for the general call address. */ static int i2c_check_client_addr_validity(const struct i2c_client *client) { if (client->flags & I2C_CLIENT_TEN) { /* 10-bit address, all values are valid */ if (client->addr > 0x3ff) return -EINVAL; } else { /* 7-bit address, reject the general call address */ if (client->addr == 0x00 || client->addr > 0x7f) return -EINVAL; } return 0; } /* And this is a strict address validity check, used when probing. If a * device uses a reserved address, then it shouldn't be probed. 7-bit * addressing is assumed, 10-bit address devices are rare and should be * explicitly enumerated. */ static int i2c_check_addr_validity(unsigned short addr) { /* * Reserved addresses per I2C specification: * 0x00 General call address / START byte * 0x01 CBUS address * 0x02 Reserved for different bus format * 0x03 Reserved for future purposes * 0x04-0x07 Hs-mode master code * 0x78-0x7b 10-bit slave addressing * 0x7c-0x7f Reserved for future purposes */ if (addr < 0x08 || addr > 0x77) return -EINVAL; return 0; } static int __i2c_check_addr_busy(struct device *dev, void *addrp) { struct i2c_client *client = i2c_verify_client(dev); int addr = *(int *)addrp; if (client && client->addr == addr) return -EBUSY; return 0; } /* walk up mux tree */ static int i2c_check_mux_parents(struct i2c_adapter *adapter, int addr) { struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter); int result; result = device_for_each_child(&adapter->dev, &addr, __i2c_check_addr_busy); if (!result && parent) result = i2c_check_mux_parents(parent, addr); return result; } /* recurse down mux tree */ static int i2c_check_mux_children(struct device *dev, void *addrp) { int result; if (dev->type == &i2c_adapter_type) result = device_for_each_child(dev, addrp, i2c_check_mux_children); else result = __i2c_check_addr_busy(dev, addrp); return result; } static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr) { struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter); int result = 0; if (parent) result = i2c_check_mux_parents(parent, addr); if (!result) result = device_for_each_child(&adapter->dev, &addr, i2c_check_mux_children); return result; } /** * i2c_lock_adapter - Get exclusive access to an I2C bus segment * @adapter: Target I2C bus segment */ void i2c_lock_adapter(struct i2c_adapter *adapter) { struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter); if (parent) i2c_lock_adapter(parent); else rt_mutex_lock(&adapter->bus_lock); } EXPORT_SYMBOL_GPL(i2c_lock_adapter); /** * i2c_trylock_adapter - Try to get exclusive access to an I2C bus segment * @adapter: Target I2C bus segment */ static int i2c_trylock_adapter(struct i2c_adapter *adapter) { struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter); if (parent) return i2c_trylock_adapter(parent); else return rt_mutex_trylock(&adapter->bus_lock); } /** * i2c_unlock_adapter - Release exclusive access to an I2C bus segment * @adapter: Target I2C bus segment */ void i2c_unlock_adapter(struct i2c_adapter *adapter) { struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter); if (parent) i2c_unlock_adapter(parent); else rt_mutex_unlock(&adapter->bus_lock); } EXPORT_SYMBOL_GPL(i2c_unlock_adapter); /** * i2c_new_device - instantiate an i2c device * @adap: the adapter managing the device * @info: describes one I2C device; bus_num is ignored * Context: can sleep * * Create an i2c device. Binding is handled through driver model * probe()/remove() methods. A driver may be bound to this device when we * return from this function, or any later moment (e.g. maybe hotplugging will * load the driver module). This call is not appropriate for use by mainboard * initialization logic, which usually runs during an arch_initcall() long * before any i2c_adapter could exist. * * This returns the new i2c client, which may be saved for later use with * i2c_unregister_device(); or NULL to indicate an error. */ struct i2c_client * i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info) { struct i2c_client *client; int status; client = kzalloc(sizeof *client, GFP_KERNEL); if (!client) return NULL; client->adapter = adap; client->dev.platform_data = info->platform_data; if (info->archdata) client->dev.archdata = *info->archdata; client->flags = info->flags; client->addr = info->addr; client->irq = info->irq; client->udelay = info->udelay; // add by kfx strlcpy(client->name, info->type, sizeof(client->name)); /* Check for address validity */ status = i2c_check_client_addr_validity(client); if (status) { dev_err(&adap->dev, "Invalid %d-bit I2C address 0x%02hx\n", client->flags & I2C_CLIENT_TEN ? 10 : 7, client->addr); goto out_err_silent; } /* Check for address business */ #if 0 status = i2c_check_addr_busy(adap, client->addr); if (status) goto out_err; #else /* ddl@rock-chips.com : Devices which have some i2c addr can work in same i2c bus, if devices havn't work at the same time.*/ status = i2c_check_addr_ex(adap, client->addr); if (status != 0) dev_err(&adap->dev, "%d i2c clients have been registered at 0x%02x", status, client->addr); #endif client->dev.parent = &client->adapter->dev; client->dev.bus = &i2c_bus_type; client->dev.type = &i2c_client_type; client->dev.of_node = info->of_node; /* ddl@rock-chips.com : Devices which have some i2c addr can work in same i2c bus, if devices havn't work at the same time.*/ #if 0 dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), client->addr); #else if (status == 0) dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), client->addr); else dev_set_name(&client->dev, "%d-%04x-%01x", i2c_adapter_id(adap), client->addr,status); #endif status = device_register(&client->dev); if (status) goto out_err; dev_dbg(&adap->dev, "client [%s] registered with bus id %s\n", client->name, dev_name(&client->dev)); return client; out_err: dev_err(&adap->dev, "Failed to register i2c client %s at 0x%02x " "(%d)\n", client->name, client->addr, status); out_err_silent: kfree(client); return NULL; } EXPORT_SYMBOL_GPL(i2c_new_device); #ifdef CONFIG_PLAT_RK #define RK610_KEY "rk610" static int __i2c_client_print(struct device *dev, void *param) { struct i2c_client *client = i2c_verify_client(dev); if(client) printk(KERN_WARNING "client: %s, addr: 0x%x\n", client->name, client->addr); return 0; } static int __i2c_check_rk610_ex(struct device *dev, void *ex) { struct i2c_client *client = i2c_verify_client(dev); if(!client) return 0; if(strstr(client->name, RK610_KEY) != NULL) *(int *)ex += 1 << 8; else *(int *)ex += 1; return 0; } int i2c_check_rk610_ex(int nr) { int ex = 0, rk610_ex = 0, oth_ex = 0; struct i2c_adapter *adap = i2c_get_adapter(nr); if(!adap){ printk(KERN_ERR "%s: adap(%d) is not exist\n", __func__, nr); return -EINVAL; } device_for_each_child(&adap->dev, &ex, __i2c_check_rk610_ex); if(ex & (1 << 8)) rk610_ex = 1; oth_ex = ex & 0xff; if(rk610_ex && oth_ex){ ex = 1; printk(KERN_WARNING "******************* WARNING ********************\n"); dev_warn(&adap->dev, "%s is exist, clients:\n", RK610_KEY); device_for_each_child(&adap->dev, NULL, __i2c_client_print); printk(KERN_WARNING "************************************************\n"); } else ex = 0; return ex; } #ifdef CONFIG_I2C_RK30 int i2c_add_device(int nr, struct i2c_board_info const *info) { int status; struct i2c_client *client; struct i2c_adapter *adap = i2c_get_adapter(nr); if(!adap){ printk(KERN_ERR "%s: adap(%d) is not exist\n", __func__, nr); return -EINVAL; } client = kzalloc(sizeof *client, GFP_KERNEL); if (!client){ dev_err(&adap->dev, "no memory for client\n"); return -ENOMEM; } client->adapter = adap; client->dev.platform_data = info->platform_data; if (info->archdata) client->dev.archdata = *info->archdata; client->flags = info->flags; client->addr = info->addr; client->irq = info->irq; client->udelay = info->udelay; // add by kfx strlcpy(client->name, info->type, sizeof(client->name)); /* Check for address validity */ status = i2c_check_client_addr_validity(client); if (status) { dev_err(&adap->dev, "Invalid %d-bit I2C address 0x%02hx\n", client->flags & I2C_CLIENT_TEN ? 10 : 7, client->addr); goto out_err_silent; } /* Check for address business */ status = i2c_check_addr_busy(adap, client->addr); if (status){ status = -EEXIST; dev_warn(&adap->dev, "i2c clients have been registered at 0x%02x\n", client->addr); goto out_err_silent; } client->dev.parent = &client->adapter->dev; client->dev.bus = &i2c_bus_type; client->dev.type = &i2c_client_type; client->dev.of_node = info->of_node; dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), client->addr); status = device_register(&client->dev); if (status) goto out_err; dev_dbg(&adap->dev, "client [%s] registered with bus id %s\n", client->name, dev_name(&client->dev)); return 0; out_err: dev_err(&adap->dev, "Failed to register i2c client %s at 0x%02x " "(%d)\n", client->name, client->addr, status); out_err_silent: kfree(client); return status; } #else int i2c_check_rk610_ex(int nr) { return 0; } int i2c_add_device(int nr, struct i2c_board_info const *info) { return 0; } #endif EXPORT_SYMBOL_GPL(i2c_check_rk610_ex); EXPORT_SYMBOL_GPL(i2c_add_device); #endif /** * i2c_unregister_device - reverse effect of i2c_new_device() * @client: value returned from i2c_new_device() * Context: can sleep */ void i2c_unregister_device(struct i2c_client *client) { device_unregister(&client->dev); } EXPORT_SYMBOL_GPL(i2c_unregister_device); static const struct i2c_device_id dummy_id[] = { { "dummy", 0 }, { }, }; static int dummy_probe(struct i2c_client *client, const struct i2c_device_id *id) { return 0; } static int dummy_remove(struct i2c_client *client) { return 0; } static struct i2c_driver dummy_driver = { .driver.name = "dummy", .probe = dummy_probe, .remove = dummy_remove, .id_table = dummy_id, }; /** * i2c_new_dummy - return a new i2c device bound to a dummy driver * @adapter: the adapter managing the device * @address: seven bit address to be used * Context: can sleep * * This returns an I2C client bound to the "dummy" driver, intended for use * with devices that consume multiple addresses. Examples of such chips * include various EEPROMS (like 24c04 and 24c08 models). * * These dummy devices have two main uses. First, most I2C and SMBus calls * except i2c_transfer() need a client handle; the dummy will be that handle. * And second, this prevents the specified address from being bound to a * different driver. * * This returns the new i2c client, which should be saved for later use with * i2c_unregister_device(); or NULL to indicate an error. */ struct i2c_client *i2c_new_dummy(struct i2c_adapter *adapter, u16 address) { struct i2c_board_info info = { I2C_BOARD_INFO("dummy", address), }; return i2c_new_device(adapter, &info); } EXPORT_SYMBOL_GPL(i2c_new_dummy); /* ------------------------------------------------------------------------- */ /* I2C bus adapters -- one roots each I2C or SMBUS segment */ static void i2c_adapter_dev_release(struct device *dev) { struct i2c_adapter *adap = to_i2c_adapter(dev); complete(&adap->dev_released); } /* * Let users instantiate I2C devices through sysfs. This can be used when * platform initialization code doesn't contain the proper data for * whatever reason. Also useful for drivers that do device detection and * detection fails, either because the device uses an unexpected address, * or this is a compatible device with different ID register values. * * Parameter checking may look overzealous, but we really don't want * the user to provide incorrect parameters. */ static ssize_t i2c_sysfs_new_device(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_adapter *adap = to_i2c_adapter(dev); struct i2c_board_info info; struct i2c_client *client; char *blank, end; int res; memset(&info, 0, sizeof(struct i2c_board_info)); blank = strchr(buf, ' '); if (!blank) { dev_err(dev, "%s: Missing parameters\n", "new_device"); return -EINVAL; } if (blank - buf > I2C_NAME_SIZE - 1) { dev_err(dev, "%s: Invalid device name\n", "new_device"); return -EINVAL; } memcpy(info.type, buf, blank - buf); /* Parse remaining parameters, reject extra parameters */ res = sscanf(++blank, "%hi%c", &info.addr, &end); if (res < 1) { dev_err(dev, "%s: Can't parse I2C address\n", "new_device"); return -EINVAL; } if (res > 1 && end != '\n') { dev_err(dev, "%s: Extra parameters\n", "new_device"); return -EINVAL; } client = i2c_new_device(adap, &info); if (!client) return -EINVAL; /* Keep track of the added device */ mutex_lock(&adap->userspace_clients_lock); list_add_tail(&client->detected, &adap->userspace_clients); mutex_unlock(&adap->userspace_clients_lock); dev_info(dev, "%s: Instantiated device %s at 0x%02hx\n", "new_device", info.type, info.addr); return count; } /* * And of course let the users delete the devices they instantiated, if * they got it wrong. This interface can only be used to delete devices * instantiated by i2c_sysfs_new_device above. This guarantees that we * don't delete devices to which some kernel code still has references. * * Parameter checking may look overzealous, but we really don't want * the user to delete the wrong device. */ static ssize_t i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_adapter *adap = to_i2c_adapter(dev); struct i2c_client *client, *next; unsigned short addr; char end; int res; /* Parse parameters, reject extra parameters */ res = sscanf(buf, "%hi%c", &addr, &end); if (res < 1) { dev_err(dev, "%s: Can't parse I2C address\n", "delete_device"); return -EINVAL; } if (res > 1 && end != '\n') { dev_err(dev, "%s: Extra parameters\n", "delete_device"); return -EINVAL; } /* Make sure the device was added through sysfs */ res = -ENOENT; mutex_lock(&adap->userspace_clients_lock); list_for_each_entry_safe(client, next, &adap->userspace_clients, detected) { if (client->addr == addr) { dev_info(dev, "%s: Deleting device %s at 0x%02hx\n", "delete_device", client->name, client->addr); list_del(&client->detected); i2c_unregister_device(client); res = count; break; } } mutex_unlock(&adap->userspace_clients_lock); if (res < 0) dev_err(dev, "%s: Can't find device in list\n", "delete_device"); return res; } static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device); static DEVICE_ATTR(delete_device, S_IWUSR, NULL, i2c_sysfs_delete_device); static struct attribute *i2c_adapter_attrs[] = { &dev_attr_name.attr, &dev_attr_new_device.attr, &dev_attr_delete_device.attr, NULL }; static struct attribute_group i2c_adapter_attr_group = { .attrs = i2c_adapter_attrs, }; static const struct attribute_group *i2c_adapter_attr_groups[] = { &i2c_adapter_attr_group, NULL }; struct device_type i2c_adapter_type = { .groups = i2c_adapter_attr_groups, .release = i2c_adapter_dev_release, }; EXPORT_SYMBOL_GPL(i2c_adapter_type); #ifdef CONFIG_I2C_COMPAT static struct class_compat *i2c_adapter_compat_class; #endif static void i2c_scan_static_board_info(struct i2c_adapter *adapter) { struct i2c_devinfo *devinfo; down_read(&__i2c_board_lock); list_for_each_entry(devinfo, &__i2c_board_list, list) { if (devinfo->busnum == adapter->nr && !i2c_new_device(adapter, &devinfo->board_info)) dev_err(&adapter->dev, "Can't create device at 0x%02x\n", devinfo->board_info.addr); } up_read(&__i2c_board_lock); } static int i2c_do_add_adapter(struct i2c_driver *driver, struct i2c_adapter *adap) { /* Detect supported devices on that bus, and instantiate them */ i2c_detect(adap, driver); /* Let legacy drivers scan this bus for matching devices */ if (driver->attach_adapter) { dev_warn(&adap->dev, "%s: attach_adapter method is deprecated\n", driver->driver.name); dev_warn(&adap->dev, "Please use another way to instantiate " "your i2c_client\n"); /* We ignore the return code; if it fails, too bad */ driver->attach_adapter(adap); } return 0; } static int __process_new_adapter(struct device_driver *d, void *data) { return i2c_do_add_adapter(to_i2c_driver(d), data); } static int i2c_register_adapter(struct i2c_adapter *adap) { int res = 0; /* Can't register until after driver model init */ if (unlikely(WARN_ON(!i2c_bus_type.p))) { res = -EAGAIN; goto out_list; } /* Sanity checks */ if (unlikely(adap->name[0] == '\0')) { pr_err("i2c-core: Attempt to register an adapter with " "no name!\n"); return -EINVAL; } if (unlikely(!adap->algo)) { pr_err("i2c-core: Attempt to register adapter '%s' with " "no algo!\n", adap->name); return -EINVAL; } rt_mutex_init(&adap->bus_lock); mutex_init(&adap->userspace_clients_lock); INIT_LIST_HEAD(&adap->userspace_clients); /* Set default timeout to 1 second if not already set */ if (adap->timeout == 0) adap->timeout = HZ; dev_set_name(&adap->dev, "i2c-%d", adap->nr); adap->dev.bus = &i2c_bus_type; adap->dev.type = &i2c_adapter_type; res = device_register(&adap->dev); if (res) goto out_list; dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name); #ifdef CONFIG_I2C_COMPAT res = class_compat_create_link(i2c_adapter_compat_class, &adap->dev, adap->dev.parent); if (res) dev_warn(&adap->dev, "Failed to create compatibility class link\n"); #endif /* create pre-declared device nodes */ if (adap->nr < __i2c_first_dynamic_bus_num) i2c_scan_static_board_info(adap); /* Notify drivers */ mutex_lock(&core_lock); bus_for_each_drv(&i2c_bus_type, NULL, adap, __process_new_adapter); mutex_unlock(&core_lock); return 0; out_list: mutex_lock(&core_lock); idr_remove(&i2c_adapter_idr, adap->nr); mutex_unlock(&core_lock); return res; } /** * i2c_add_adapter - declare i2c adapter, use dynamic bus number * @adapter: the adapter to add * Context: can sleep * * This routine is used to declare an I2C adapter when its bus number * doesn't matter. Examples: for I2C adapters dynamically added by * USB links or PCI plugin cards. * * When this returns zero, a new bus number was allocated and stored * in adap->nr, and the specified adapter became available for clients. * Otherwise, a negative errno value is returned. */ int i2c_add_adapter(struct i2c_adapter *adapter) { int id, res = 0; retry: if (idr_pre_get(&i2c_adapter_idr, GFP_KERNEL) == 0) return -ENOMEM; mutex_lock(&core_lock); /* "above" here means "above or equal to", sigh */ res = idr_get_new_above(&i2c_adapter_idr, adapter, __i2c_first_dynamic_bus_num, &id); mutex_unlock(&core_lock); if (res < 0) { if (res == -EAGAIN) goto retry; return res; } adapter->nr = id; return i2c_register_adapter(adapter); } EXPORT_SYMBOL(i2c_add_adapter); /** * i2c_add_numbered_adapter - declare i2c adapter, use static bus number * @adap: the adapter to register (with adap->nr initialized) * Context: can sleep * * This routine is used to declare an I2C adapter when its bus number * matters. For example, use it for I2C adapters from system-on-chip CPUs, * or otherwise built in to the system's mainboard, and where i2c_board_info * is used to properly configure I2C devices. * * If no devices have pre-been declared for this bus, then be sure to * register the adapter before any dynamically allocated ones. Otherwise * the required bus ID may not be available. * * When this returns zero, the specified adapter became available for * clients using the bus number provided in adap->nr. Also, the table * of I2C devices pre-declared using i2c_register_board_info() is scanned, * and the appropriate driver model device nodes are created. Otherwise, a * negative errno value is returned. */ int i2c_add_numbered_adapter(struct i2c_adapter *adap) { int id; int status; if (adap->nr & ~MAX_ID_MASK) return -EINVAL; retry: if (idr_pre_get(&i2c_adapter_idr, GFP_KERNEL) == 0) return -ENOMEM; mutex_lock(&core_lock); /* "above" here means "above or equal to", sigh; * we need the "equal to" result to force the result */ status = idr_get_new_above(&i2c_adapter_idr, adap, adap->nr, &id); if (status == 0 && id != adap->nr) { status = -EBUSY; idr_remove(&i2c_adapter_idr, id); } mutex_unlock(&core_lock); if (status == -EAGAIN) goto retry; if (status == 0) status = i2c_register_adapter(adap); return status; } EXPORT_SYMBOL_GPL(i2c_add_numbered_adapter); static int i2c_do_del_adapter(struct i2c_driver *driver, struct i2c_adapter *adapter) { struct i2c_client *client, *_n; int res; /* Remove the devices we created ourselves as the result of hardware * probing (using a driver's detect method) */ list_for_each_entry_safe(client, _n, &driver->clients, detected) { if (client->adapter == adapter) { dev_dbg(&adapter->dev, "Removing %s at 0x%x\n", client->name, client->addr); list_del(&client->detected); i2c_unregister_device(client); } } if (!driver->detach_adapter) return 0; dev_warn(&adapter->dev, "%s: detach_adapter method is deprecated\n", driver->driver.name); res = driver->detach_adapter(adapter); if (res) dev_err(&adapter->dev, "detach_adapter failed (%d) " "for driver [%s]\n", res, driver->driver.name); return res; } static int __unregister_client(struct device *dev, void *dummy) { struct i2c_client *client = i2c_verify_client(dev); if (client && strcmp(client->name, "dummy")) i2c_unregister_device(client); return 0; } static int __unregister_dummy(struct device *dev, void *dummy) { struct i2c_client *client = i2c_verify_client(dev); if (client) i2c_unregister_device(client); return 0; } static int __process_removed_adapter(struct device_driver *d, void *data) { return i2c_do_del_adapter(to_i2c_driver(d), data); } /** * i2c_del_adapter - unregister I2C adapter * @adap: the adapter being unregistered * Context: can sleep * * This unregisters an I2C adapter which was previously registered * by @i2c_add_adapter or @i2c_add_numbered_adapter. */ int i2c_del_adapter(struct i2c_adapter *adap) { int res = 0; struct i2c_adapter *found; struct i2c_client *client, *next; /* First make sure that this adapter was ever added */ mutex_lock(&core_lock); found = idr_find(&i2c_adapter_idr, adap->nr); mutex_unlock(&core_lock); if (found != adap) { pr_debug("i2c-core: attempting to delete unregistered " "adapter [%s]\n", adap->name); return -EINVAL; } /* Tell drivers about this removal */ mutex_lock(&core_lock); res = bus_for_each_drv(&i2c_bus_type, NULL, adap, __process_removed_adapter); mutex_unlock(&core_lock); if (res) return res; /* Remove devices instantiated from sysfs */ mutex_lock(&adap->userspace_clients_lock); list_for_each_entry_safe(client, next, &adap->userspace_clients, detected) { dev_dbg(&adap->dev, "Removing %s at 0x%x\n", client->name, client->addr); list_del(&client->detected); i2c_unregister_device(client); } mutex_unlock(&adap->userspace_clients_lock); /* Detach any active clients. This can't fail, thus we do not * check the returned value. This is a two-pass process, because * we can't remove the dummy devices during the first pass: they * could have been instantiated by real devices wishing to clean * them up properly, so we give them a chance to do that first. */ res = device_for_each_child(&adap->dev, NULL, __unregister_client); res = device_for_each_child(&adap->dev, NULL, __unregister_dummy); #ifdef CONFIG_I2C_COMPAT class_compat_remove_link(i2c_adapter_compat_class, &adap->dev, adap->dev.parent); #endif /* device name is gone after device_unregister */ dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name); /* clean up the sysfs representation */ init_completion(&adap->dev_released); device_unregister(&adap->dev); /* wait for sysfs to drop all references */ wait_for_completion(&adap->dev_released); /* free bus id */ mutex_lock(&core_lock); idr_remove(&i2c_adapter_idr, adap->nr); mutex_unlock(&core_lock); /* Clear the device structure in case this adapter is ever going to be added again */ memset(&adap->dev, 0, sizeof(adap->dev)); return 0; } EXPORT_SYMBOL(i2c_del_adapter); /* ------------------------------------------------------------------------- */ int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *)) { int res; mutex_lock(&core_lock); res = bus_for_each_dev(&i2c_bus_type, NULL, data, fn); mutex_unlock(&core_lock); return res; } EXPORT_SYMBOL_GPL(i2c_for_each_dev); static int __process_new_driver(struct device *dev, void *data) { if (dev->type != &i2c_adapter_type) return 0; return i2c_do_add_adapter(data, to_i2c_adapter(dev)); } /* * An i2c_driver is used with one or more i2c_client (device) nodes to access * i2c slave chips, on a bus instance associated with some i2c_adapter. */ int i2c_register_driver(struct module *owner, struct i2c_driver *driver) { int res; /* Can't register until after driver model init */ if (unlikely(WARN_ON(!i2c_bus_type.p))) return -EAGAIN; /* add the driver to the list of i2c drivers in the driver core */ driver->driver.owner = owner; driver->driver.bus = &i2c_bus_type; /* When registration returns, the driver core * will have called probe() for all matching-but-unbound devices. */ res = driver_register(&driver->driver); if (res) return res; /* Drivers should switch to dev_pm_ops instead. */ if (driver->suspend) pr_warn("i2c-core: driver [%s] using legacy suspend method\n", driver->driver.name); if (driver->resume) pr_warn("i2c-core: driver [%s] using legacy resume method\n", driver->driver.name); pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name); INIT_LIST_HEAD(&driver->clients); /* Walk the adapters that are already present */ i2c_for_each_dev(driver, __process_new_driver); return 0; } EXPORT_SYMBOL(i2c_register_driver); static int __process_removed_driver(struct device *dev, void *data) { if (dev->type != &i2c_adapter_type) return 0; return i2c_do_del_adapter(data, to_i2c_adapter(dev)); } /** * i2c_del_driver - unregister I2C driver * @driver: the driver being unregistered * Context: can sleep */ void i2c_del_driver(struct i2c_driver *driver) { i2c_for_each_dev(driver, __process_removed_driver); driver_unregister(&driver->driver); pr_debug("i2c-core: driver [%s] unregistered\n", driver->driver.name); } EXPORT_SYMBOL(i2c_del_driver); /* ------------------------------------------------------------------------- */ /* ddl@rock-chips.com : Devices which have some i2c addr can work in same i2c bus, if devices havn't work at the same time.*/ struct i2c_addr_cnt { int addr; int cnt; }; static int __i2c_check_addr_ex(struct device *dev, void *addrp) { struct i2c_client *client = i2c_verify_client(dev); struct i2c_addr_cnt *addrinfo = (struct i2c_addr_cnt *)addrp; int addr = addrinfo->addr; if (client && client->addr == addr) { addrinfo->cnt++; } return 0; } static int i2c_check_addr_ex(struct i2c_adapter *adapter, int addr) { struct i2c_addr_cnt addrinfo; addrinfo.addr = addr; addrinfo.cnt = 0; device_for_each_child(&adapter->dev, &addrinfo, __i2c_check_addr_ex); return addrinfo.cnt; } /** * i2c_use_client - increments the reference count of the i2c client structure * @client: the client being referenced * * Each live reference to a client should be refcounted. The driver model does * that automatically as part of driver binding, so that most drivers don't * need to do this explicitly: they hold a reference until they're unbound * from the device. * * A pointer to the client with the incremented reference counter is returned. */ struct i2c_client *i2c_use_client(struct i2c_client *client) { if (client && get_device(&client->dev)) return client; return NULL; } EXPORT_SYMBOL(i2c_use_client); /** * i2c_release_client - release a use of the i2c client structure * @client: the client being no longer referenced * * Must be called when a user of a client is finished with it. */ void i2c_release_client(struct i2c_client *client) { if (client) put_device(&client->dev); } EXPORT_SYMBOL(i2c_release_client); struct i2c_cmd_arg { unsigned cmd; void *arg; }; static int i2c_cmd(struct device *dev, void *_arg) { struct i2c_client *client = i2c_verify_client(dev); struct i2c_cmd_arg *arg = _arg; if (client && client->driver && client->driver->command) client->driver->command(client, arg->cmd, arg->arg); return 0; } void i2c_clients_command(struct i2c_adapter *adap, unsigned int cmd, void *arg) { struct i2c_cmd_arg cmd_arg; cmd_arg.cmd = cmd; cmd_arg.arg = arg; device_for_each_child(&adap->dev, &cmd_arg, i2c_cmd); } EXPORT_SYMBOL(i2c_clients_command); static int __init i2c_init(void) { int retval; retval = bus_register(&i2c_bus_type); if (retval) return retval; #ifdef CONFIG_I2C_COMPAT i2c_adapter_compat_class = class_compat_register("i2c-adapter"); if (!i2c_adapter_compat_class) { retval = -ENOMEM; goto bus_err; } #endif retval = i2c_add_driver(&dummy_driver); if (retval) goto class_err; #ifdef CONFIG_I2C_DEV_RK29 init_completion(&i2c_dev_complete); #endif return 0; class_err: #ifdef CONFIG_I2C_COMPAT class_compat_unregister(i2c_adapter_compat_class); bus_err: #endif bus_unregister(&i2c_bus_type); return retval; } static void __exit i2c_exit(void) { i2c_del_driver(&dummy_driver); #ifdef CONFIG_I2C_COMPAT class_compat_unregister(i2c_adapter_compat_class); #endif bus_unregister(&i2c_bus_type); } /* We must initialize early, because some subsystems register i2c drivers * in subsys_initcall() code, but are linked (and initialized) before i2c. */ postcore_initcall(i2c_init); module_exit(i2c_exit); /* ---------------------------------------------------- * the functional interface to the i2c busses. * ---------------------------------------------------- */ /** * i2c_transfer - execute a single or combined I2C message * @adap: Handle to I2C bus * @msgs: One or more messages to execute before STOP is issued to * terminate the operation; each message begins with a START. * @num: Number of messages to be executed. * * Returns negative errno, else the number of messages executed. * * Note that there is no requirement that each message be sent to * the same slave address, although that is the most common model. */ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { unsigned long orig_jiffies; int ret, try; /* REVISIT the fault reporting model here is weak: * * - When we get an error after receiving N bytes from a slave, * there is no way to report "N". * * - When we get a NAK after transmitting N bytes to a slave, * there is no way to report "N" ... or to let the master * continue executing the rest of this combined message, if * that's the appropriate response. * * - When for example "num" is two and we successfully complete * the first message but get an error part way through the * second, it's unclear whether that should be reported as * one (discarding status on the second message) or errno * (discarding status on the first one). */ if (adap->algo->master_xfer) { #ifdef DEBUG for (ret = 0; ret < num; ret++) { dev_dbg(&adap->dev, "master_xfer[%d] %c, addr=0x%02x, " "len=%d%s\n", ret, (msgs[ret].flags & I2C_M_RD) ? 'R' : 'W', msgs[ret].addr, msgs[ret].len, (msgs[ret].flags & I2C_M_RECV_LEN) ? "+" : ""); } #endif #if defined (CONFIG_I2C_RK2818) || defined(CONFIG_I2C_RK29) if (!(i2c_suspended(adap)) && (in_atomic() || irqs_disabled())) { #else if (in_atomic() || irqs_disabled()) { #endif ret = i2c_trylock_adapter(adap); if (!ret) /* I2C activity is ongoing. */ return -EAGAIN; } else { i2c_lock_adapter(adap); } /* Retry automatically on arbitration loss */ orig_jiffies = jiffies; #ifdef CONFIG_I2C_DEV_RK29 i2c_dev_dump_start(adap, msgs, num); #endif for (ret = 0, try = 0; try <= adap->retries; try++) { ret = adap->algo->master_xfer(adap, msgs, num); if (ret != -EAGAIN) break; dev_err(&adap->dev, "No ack, Maybe slave(addr: 0x%x) not exist or abnormal power-on, retry %d...\n", msgs[0].addr, adap->retries - try); if (time_after(jiffies, orig_jiffies + adap->timeout)) break; } #ifdef CONFIG_I2C_DEV_RK29 i2c_dev_dump_stop(adap, msgs, num ,ret); #endif i2c_unlock_adapter(adap); return ret; } else { dev_dbg(&adap->dev, "I2C level transfers not supported\n"); return -EOPNOTSUPP; } } EXPORT_SYMBOL(i2c_transfer); #ifdef CONFIG_PLAT_RK int i2c_master_send(const struct i2c_client *client, const char *buf, int count) { int ret; struct i2c_adapter *adap=client->adapter; struct i2c_msg msg; msg.addr = client->addr; msg.flags = client->flags; msg.len = count; msg.buf = (char *)buf; msg.scl_rate = 100 * 1000; msg.udelay = client->udelay; ret = i2c_transfer(adap, &msg, 1); return (ret == 1) ? count : ret; } EXPORT_SYMBOL(i2c_master_send); int i2c_master_recv(const struct i2c_client *client, char *buf, int count) { struct i2c_adapter *adap=client->adapter; struct i2c_msg msg; int ret; msg.addr = client->addr; msg.flags = client->flags | I2C_M_RD; msg.len = count; msg.buf = (char *)buf; msg.scl_rate = 100 * 1000; msg.udelay = client->udelay; ret = i2c_transfer(adap, &msg, 1); return (ret == 1) ? count : ret; } EXPORT_SYMBOL(i2c_master_recv); int i2c_master_normal_send(const struct i2c_client *client, const char *buf, int count, int scl_rate) { int ret; struct i2c_adapter *adap=client->adapter; struct i2c_msg msg; msg.addr = client->addr; msg.flags = client->flags; msg.len = count; msg.buf = (char *)buf; msg.scl_rate = scl_rate; msg.udelay = client->udelay; ret = i2c_transfer(adap, &msg, 1); return (ret == 1) ? count : ret; } EXPORT_SYMBOL(i2c_master_normal_send); int i2c_master_normal_recv(const struct i2c_client *client, char *buf, int count, int scl_rate) { struct i2c_adapter *adap=client->adapter; struct i2c_msg msg; int ret; msg.addr = client->addr; msg.flags = client->flags | I2C_M_RD; msg.len = count; msg.buf = (char *)buf; msg.scl_rate = scl_rate; msg.udelay = client->udelay; ret = i2c_transfer(adap, &msg, 1); return (ret == 1) ? count : ret; } EXPORT_SYMBOL(i2c_master_normal_recv); int i2c_master_reg8_send(const struct i2c_client *client, const char reg, const char *buf, int count, int scl_rate) { struct i2c_adapter *adap=client->adapter; struct i2c_msg msg; int ret; char *tx_buf = (char *)kmalloc(count + 1, GFP_KERNEL); if(!tx_buf) return -ENOMEM; tx_buf[0] = reg; memcpy(tx_buf+1, buf, count); msg.addr = client->addr; msg.flags = client->flags; msg.len = count + 1; msg.buf = (char *)tx_buf; msg.scl_rate = scl_rate; msg.udelay = client->udelay; ret = i2c_transfer(adap, &msg, 1); kfree(tx_buf); return (ret == 1) ? count : ret; } EXPORT_SYMBOL(i2c_master_reg8_send); int i2c_master_reg8_recv(const struct i2c_client *client, const char reg, char *buf, int count, int scl_rate) { struct i2c_adapter *adap=client->adapter; struct i2c_msg msgs[2]; int ret; char reg_buf = reg; msgs[0].addr = client->addr; msgs[0].flags = client->flags; msgs[0].len = 1; msgs[0].buf = &reg_buf; msgs[0].scl_rate = scl_rate; msgs[0].udelay = client->udelay; msgs[1].addr = client->addr; msgs[1].flags = client->flags | I2C_M_RD; msgs[1].len = count; msgs[1].buf = (char *)buf; msgs[1].scl_rate = scl_rate; msgs[1].udelay = client->udelay; ret = i2c_transfer(adap, msgs, 2); return (ret == 2)? count : ret; } EXPORT_SYMBOL(i2c_master_reg8_recv); int i2c_master_reg8_direct_send(const struct i2c_client *client, const char reg, const char *buf, int count, int scl_rate) { return i2c_master_reg8_send(client, reg, buf, count, scl_rate); } EXPORT_SYMBOL(i2c_master_reg8_direct_send); int i2c_master_reg8_direct_recv(const struct i2c_client *client, const char reg, char *buf, int count, int scl_rate) { #ifdef CONFIG_ARCH_RK29 struct i2c_adapter *adap=client->adapter; struct i2c_msg msg; int ret; char tx_buf[count+1]; tx_buf[0] = reg; msg.addr = client->addr; msg.flags = client->flags | I2C_M_REG8_DIRECT | I2C_M_RD; msg.len = count + 1; msg.buf = tx_buf; msg.scl_rate = scl_rate; msg.udelay = client->udelay; ret = i2c_transfer(adap, &msg, 1); memcpy(buf, tx_buf + 1, count); return (ret == 1) ? count : ret; #else struct i2c_adapter *adap=client->adapter; struct i2c_msg msgs[2]; int ret; char reg_buf = reg; msgs[0].addr = client->addr; msgs[0].flags = client->flags | I2C_M_RD; msgs[0].len = 1; msgs[0].buf = &reg_buf; msgs[0].scl_rate = scl_rate; msgs[0].udelay = client->udelay; msgs[1].addr = client->addr; msgs[1].flags = client->flags | I2C_M_RD; msgs[1].len = count; msgs[1].buf = (char *)buf; msgs[1].scl_rate = scl_rate; msgs[1].udelay = client->udelay; ret = i2c_transfer(adap, msgs, 2); return (ret == 2)? count : ret; #endif } EXPORT_SYMBOL(i2c_master_reg8_direct_recv); int i2c_master_reg16_send(const struct i2c_client *client, const short regs, const short *buf, int count, int scl_rate) { struct i2c_adapter *adap=client->adapter; struct i2c_msg msg; int ret; char *tx_buf = (char *)kmalloc(2 * (count + 1), GFP_KERNEL); if(!tx_buf) return -ENOMEM; memcpy(tx_buf, &regs, 2); memcpy(tx_buf+2, (char *)buf, count * 2); msg.addr = client->addr; msg.flags = client->flags; msg.len = 2 * (count + 1); msg.buf = (char *)tx_buf; msg.scl_rate = scl_rate; msg.udelay = client->udelay; ret = i2c_transfer(adap, &msg, 1); kfree(tx_buf); return (ret == 1) ? count : ret; } EXPORT_SYMBOL(i2c_master_reg16_send); int i2c_master_reg16_recv(const struct i2c_client *client, const short regs, short *buf, int count, int scl_rate) { struct i2c_adapter *adap=client->adapter; struct i2c_msg msgs[2]; int ret; char reg_buf[2]; memcpy(reg_buf, &regs, 2); msgs[0].addr = client->addr; msgs[0].flags = client->flags; msgs[0].len = 2; msgs[0].buf = reg_buf; msgs[0].scl_rate = scl_rate; msgs[0].udelay = client->udelay; msgs[1].addr = client->addr; msgs[1].flags = client->flags | I2C_M_RD; msgs[1].len = count * 2; msgs[1].buf = (char *)buf; msgs[1].scl_rate = scl_rate; msgs[1].udelay = client->udelay; ret = i2c_transfer(adap, msgs, 2); return (ret == 2)? count : ret; } EXPORT_SYMBOL(i2c_master_reg16_recv); #else /** * i2c_master_send - issue a single I2C message in master transmit mode * @client: Handle to slave device * @buf: Data that will be written to the slave * @count: How many bytes to write, must be less than 64k since msg.len is u16 * * Returns negative errno, or else the number of bytes written. */ int i2c_master_send(const struct i2c_client *client, const char *buf, int count) { int ret; struct i2c_adapter *adap = client->adapter; struct i2c_msg msg; msg.addr = client->addr; msg.flags = client->flags & I2C_M_TEN; msg.len = count; msg.buf = (char *)buf; ret = i2c_transfer(adap, &msg, 1); /* If everything went ok (i.e. 1 msg transmitted), return #bytes transmitted, else error code. */ return (ret == 1) ? count : ret; } EXPORT_SYMBOL(i2c_master_send); /** * i2c_master_recv - issue a single I2C message in master receive mode * @client: Handle to slave device * @buf: Where to store data read from slave * @count: How many bytes to read, must be less than 64k since msg.len is u16 * * Returns negative errno, or else the number of bytes read. */ int i2c_master_recv(const struct i2c_client *client, char *buf, int count) { struct i2c_adapter *adap = client->adapter; struct i2c_msg msg; int ret; msg.addr = client->addr; msg.flags = client->flags & I2C_M_TEN; msg.flags |= I2C_M_RD; msg.len = count; msg.buf = buf; ret = i2c_transfer(adap, &msg, 1); /* If everything went ok (i.e. 1 msg transmitted), return #bytes transmitted, else error code. */ return (ret == 1) ? count : ret; } EXPORT_SYMBOL(i2c_master_recv); #endif /* ---------------------------------------------------- * the i2c address scanning function * Will not work for 10-bit addresses! * ---------------------------------------------------- */ /* * Legacy default probe function, mostly relevant for SMBus. The default * probe method is a quick write, but it is known to corrupt the 24RF08 * EEPROMs due to a state machine bug, and could also irreversibly * write-protect some EEPROMs, so for address ranges 0x30-0x37 and 0x50-0x5f, * we use a short byte read instead. Also, some bus drivers don't implement * quick write, so we fallback to a byte read in that case too. * On x86, there is another special case for FSC hardware monitoring chips, * which want regular byte reads (address 0x73.) Fortunately, these are the * only known chips using this I2C address on PC hardware. * Returns 1 if probe succeeded, 0 if not. */ static int i2c_default_probe(struct i2c_adapter *adap, unsigned short addr) { int err; union i2c_smbus_data dummy; #ifdef CONFIG_X86 if (addr == 0x73 && (adap->class & I2C_CLASS_HWMON) && i2c_check_functionality(adap, I2C_FUNC_SMBUS_READ_BYTE_DATA)) err = i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE_DATA, &dummy); else #endif if (!((addr & ~0x07) == 0x30 || (addr & ~0x0f) == 0x50) && i2c_check_functionality(adap, I2C_FUNC_SMBUS_QUICK)) err = i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_WRITE, 0, I2C_SMBUS_QUICK, NULL); else if (i2c_check_functionality(adap, I2C_FUNC_SMBUS_READ_BYTE)) err = i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE, &dummy); else { dev_warn(&adap->dev, "No suitable probing method supported\n"); err = -EOPNOTSUPP; } return err >= 0; } static int i2c_detect_address(struct i2c_client *temp_client, struct i2c_driver *driver) { struct i2c_board_info info; struct i2c_adapter *adapter = temp_client->adapter; int addr = temp_client->addr; int err; /* Make sure the address is valid */ err = i2c_check_addr_validity(addr); if (err) { dev_warn(&adapter->dev, "Invalid probe address 0x%02x\n", addr); return err; } /* Skip if already in use */ if (i2c_check_addr_busy(adapter, addr)) return 0; /* Make sure there is something at this address */ if (!i2c_default_probe(adapter, addr)) return 0; /* Finally call the custom detection function */ memset(&info, 0, sizeof(struct i2c_board_info)); info.addr = addr; err = driver->detect(temp_client, &info); if (err) { /* -ENODEV is returned if the detection fails. We catch it here as this isn't an error. */ return err == -ENODEV ? 0 : err; } /* Consistency check */ if (info.type[0] == '\0') { dev_err(&adapter->dev, "%s detection function provided " "no name for 0x%x\n", driver->driver.name, addr); } else { struct i2c_client *client; /* Detection succeeded, instantiate the device */ dev_dbg(&adapter->dev, "Creating %s at 0x%02x\n", info.type, info.addr); client = i2c_new_device(adapter, &info); if (client) list_add_tail(&client->detected, &driver->clients); else dev_err(&adapter->dev, "Failed creating %s at 0x%02x\n", info.type, info.addr); } return 0; } static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver) { const unsigned short *address_list; struct i2c_client *temp_client; int i, err = 0; int adap_id = i2c_adapter_id(adapter); address_list = driver->address_list; if (!driver->detect || !address_list) return 0; /* Stop here if the classes do not match */ if (!(adapter->class & driver->class)) return 0; /* Set up a temporary client to help detect callback */ temp_client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL); if (!temp_client) return -ENOMEM; temp_client->adapter = adapter; for (i = 0; address_list[i] != I2C_CLIENT_END; i += 1) { dev_dbg(&adapter->dev, "found normal entry for adapter %d, " "addr 0x%02x\n", adap_id, address_list[i]); temp_client->addr = address_list[i]; err = i2c_detect_address(temp_client, driver); if (unlikely(err)) break; } kfree(temp_client); return err; } int i2c_probe_func_quick_read(struct i2c_adapter *adap, unsigned short addr) { return i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_READ, 0, I2C_SMBUS_QUICK, NULL) >= 0; } EXPORT_SYMBOL_GPL(i2c_probe_func_quick_read); struct i2c_client * i2c_new_probed_device(struct i2c_adapter *adap, struct i2c_board_info *info, unsigned short const *addr_list, int (*probe)(struct i2c_adapter *, unsigned short addr)) { int i; if (!probe) probe = i2c_default_probe; for (i = 0; addr_list[i] != I2C_CLIENT_END; i++) { /* Check address validity */ if (i2c_check_addr_validity(addr_list[i]) < 0) { dev_warn(&adap->dev, "Invalid 7-bit address " "0x%02x\n", addr_list[i]); continue; } /* Check address availability */ if (i2c_check_addr_busy(adap, addr_list[i])) { dev_dbg(&adap->dev, "Address 0x%02x already in " "use, not probing\n", addr_list[i]); continue; } /* Test address responsiveness */ if (probe(adap, addr_list[i])) break; } if (addr_list[i] == I2C_CLIENT_END) { dev_dbg(&adap->dev, "Probing failed, no device found\n"); return NULL; } info->addr = addr_list[i]; return i2c_new_device(adap, info); } EXPORT_SYMBOL_GPL(i2c_new_probed_device); struct i2c_adapter *i2c_get_adapter(int nr) { struct i2c_adapter *adapter; mutex_lock(&core_lock); adapter = idr_find(&i2c_adapter_idr, nr); if (adapter && !try_module_get(adapter->owner)) adapter = NULL; mutex_unlock(&core_lock); return adapter; } EXPORT_SYMBOL(i2c_get_adapter); void i2c_put_adapter(struct i2c_adapter *adap) { module_put(adap->owner); } EXPORT_SYMBOL(i2c_put_adapter); /* The SMBus parts */ #define POLY (0x1070U << 3) static u8 crc8(u16 data) { int i; for (i = 0; i < 8; i++) { if (data & 0x8000) data = data ^ POLY; data = data << 1; } return (u8)(data >> 8); } /* Incremental CRC8 over count bytes in the array pointed to by p */ static u8 i2c_smbus_pec(u8 crc, u8 *p, size_t count) { int i; for (i = 0; i < count; i++) crc = crc8((crc ^ p[i]) << 8); return crc; } /* Assume a 7-bit address, which is reasonable for SMBus */ static u8 i2c_smbus_msg_pec(u8 pec, struct i2c_msg *msg) { /* The address will be sent first */ u8 addr = (msg->addr << 1) | !!(msg->flags & I2C_M_RD); pec = i2c_smbus_pec(pec, &addr, 1); /* The data buffer follows */ return i2c_smbus_pec(pec, msg->buf, msg->len); } /* Used for write only transactions */ static inline void i2c_smbus_add_pec(struct i2c_msg *msg) { msg->buf[msg->len] = i2c_smbus_msg_pec(0, msg); msg->len++; } /* Return <0 on CRC error If there was a write before this read (most cases) we need to take the partial CRC from the write part into account. Note that this function does modify the message (we need to decrease the message length to hide the CRC byte from the caller). */ static int i2c_smbus_check_pec(u8 cpec, struct i2c_msg *msg) { u8 rpec = msg->buf[--msg->len]; cpec = i2c_smbus_msg_pec(cpec, msg); if (rpec != cpec) { pr_debug("i2c-core: Bad PEC 0x%02x vs. 0x%02x\n", rpec, cpec); return -EBADMSG; } return 0; } /** * i2c_smbus_read_byte - SMBus "receive byte" protocol * @client: Handle to slave device * * This executes the SMBus "receive byte" protocol, returning negative errno * else the byte received from the device. */ s32 i2c_smbus_read_byte(const struct i2c_client *client) { union i2c_smbus_data data; int status; status = i2c_smbus_xfer(client->adapter, client->addr, client->flags, I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE, &data); return (status < 0) ? status : data.byte; } EXPORT_SYMBOL(i2c_smbus_read_byte); /** * i2c_smbus_write_byte - SMBus "send byte" protocol * @client: Handle to slave device * @value: Byte to be sent * * This executes the SMBus "send byte" protocol, returning negative errno * else zero on success. */ s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value) { return i2c_smbus_xfer(client->adapter, client->addr, client->flags, I2C_SMBUS_WRITE, value, I2C_SMBUS_BYTE, NULL); } EXPORT_SYMBOL(i2c_smbus_write_byte); /** * i2c_smbus_read_byte_data - SMBus "read byte" protocol * @client: Handle to slave device * @command: Byte interpreted by slave * * This executes the SMBus "read byte" protocol, returning negative errno * else a data byte received from the device. */ s32 i2c_smbus_read_byte_data(const struct i2c_client *client, u8 command) { union i2c_smbus_data data; int status; status = i2c_smbus_xfer(client->adapter, client->addr, client->flags, I2C_SMBUS_READ, command, I2C_SMBUS_BYTE_DATA, &data); return (status < 0) ? status : data.byte; } EXPORT_SYMBOL(i2c_smbus_read_byte_data); /** * i2c_smbus_write_byte_data - SMBus "write byte" protocol * @client: Handle to slave device * @command: Byte interpreted by slave * @value: Byte being written * * This executes the SMBus "write byte" protocol, returning negative errno * else zero on success. */ s32 i2c_smbus_write_byte_data(const struct i2c_client *client, u8 command, u8 value) { union i2c_smbus_data data; data.byte = value; return i2c_smbus_xfer(client->adapter, client->addr, client->flags, I2C_SMBUS_WRITE, command, I2C_SMBUS_BYTE_DATA, &data); } EXPORT_SYMBOL(i2c_smbus_write_byte_data); /** * i2c_smbus_read_word_data - SMBus "read word" protocol * @client: Handle to slave device * @command: Byte interpreted by slave * * This executes the SMBus "read word" protocol, returning negative errno * else a 16-bit unsigned "word" received from the device. */ s32 i2c_smbus_read_word_data(const struct i2c_client *client, u8 command) { union i2c_smbus_data data; int status; status = i2c_smbus_xfer(client->adapter, client->addr, client->flags, I2C_SMBUS_READ, command, I2C_SMBUS_WORD_DATA, &data); return (status < 0) ? status : data.word; } EXPORT_SYMBOL(i2c_smbus_read_word_data); /** * i2c_smbus_write_word_data - SMBus "write word" protocol * @client: Handle to slave device * @command: Byte interpreted by slave * @value: 16-bit "word" being written * * This executes the SMBus "write word" protocol, returning negative errno * else zero on success. */ s32 i2c_smbus_write_word_data(const struct i2c_client *client, u8 command, u16 value) { union i2c_smbus_data data; data.word = value; return i2c_smbus_xfer(client->adapter, client->addr, client->flags, I2C_SMBUS_WRITE, command, I2C_SMBUS_WORD_DATA, &data); } EXPORT_SYMBOL(i2c_smbus_write_word_data); /** * i2c_smbus_process_call - SMBus "process call" protocol * @client: Handle to slave device * @command: Byte interpreted by slave * @value: 16-bit "word" being written * * This executes the SMBus "process call" protocol, returning negative errno * else a 16-bit unsigned "word" received from the device. */ s32 i2c_smbus_process_call(const struct i2c_client *client, u8 command, u16 value) { union i2c_smbus_data data; int status; data.word = value; status = i2c_smbus_xfer(client->adapter, client->addr, client->flags, I2C_SMBUS_WRITE, command, I2C_SMBUS_PROC_CALL, &data); return (status < 0) ? status : data.word; } EXPORT_SYMBOL(i2c_smbus_process_call); /** * i2c_smbus_read_block_data - SMBus "block read" protocol * @client: Handle to slave device * @command: Byte interpreted by slave * @values: Byte array into which data will be read; big enough to hold * the data returned by the slave. SMBus allows at most 32 bytes. * * This executes the SMBus "block read" protocol, returning negative errno * else the number of data bytes in the slave's response. * * Note that using this function requires that the client's adapter support * the I2C_FUNC_SMBUS_READ_BLOCK_DATA functionality. Not all adapter drivers * support this; its emulation through I2C messaging relies on a specific * mechanism (I2C_M_RECV_LEN) which may not be implemented. */ s32 i2c_smbus_read_block_data(const struct i2c_client *client, u8 command, u8 *values) { union i2c_smbus_data data; int status; status = i2c_smbus_xfer(client->adapter, client->addr, client->flags, I2C_SMBUS_READ, command, I2C_SMBUS_BLOCK_DATA, &data); if (status) return status; memcpy(values, &data.block[1], data.block[0]); return data.block[0]; } EXPORT_SYMBOL(i2c_smbus_read_block_data); /** * i2c_smbus_write_block_data - SMBus "block write" protocol * @client: Handle to slave device * @command: Byte interpreted by slave * @length: Size of data block; SMBus allows at most 32 bytes * @values: Byte array which will be written. * * This executes the SMBus "block write" protocol, returning negative errno * else zero on success. */ s32 i2c_smbus_write_block_data(const struct i2c_client *client, u8 command, u8 length, const u8 *values) { union i2c_smbus_data data; if (length > I2C_SMBUS_BLOCK_MAX) length = I2C_SMBUS_BLOCK_MAX; data.block[0] = length; memcpy(&data.block[1], values, length); return i2c_smbus_xfer(client->adapter, client->addr, client->flags, I2C_SMBUS_WRITE, command, I2C_SMBUS_BLOCK_DATA, &data); } EXPORT_SYMBOL(i2c_smbus_write_block_data); /* Returns the number of read bytes */ s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client, u8 command, u8 length, u8 *values) { union i2c_smbus_data data; int status; if (length > I2C_SMBUS_BLOCK_MAX) length = I2C_SMBUS_BLOCK_MAX; data.block[0] = length; status = i2c_smbus_xfer(client->adapter, client->addr, client->flags, I2C_SMBUS_READ, command, I2C_SMBUS_I2C_BLOCK_DATA, &data); if (status < 0) return status; memcpy(values, &data.block[1], data.block[0]); return data.block[0]; } EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data); s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client, u8 command, u8 length, const u8 *values) { union i2c_smbus_data data; if (length > I2C_SMBUS_BLOCK_MAX) length = I2C_SMBUS_BLOCK_MAX; data.block[0] = length; memcpy(data.block + 1, values, length); return i2c_smbus_xfer(client->adapter, client->addr, client->flags, I2C_SMBUS_WRITE, command, I2C_SMBUS_I2C_BLOCK_DATA, &data); } EXPORT_SYMBOL(i2c_smbus_write_i2c_block_data); /* Simulate a SMBus command using the i2c protocol No checking of parameters is done! */ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { /* So we need to generate a series of msgs. In the case of writing, we need to use only one message; when reading, we need two. We initialize most things with sane defaults, to keep the code below somewhat simpler. */ unsigned char msgbuf0[I2C_SMBUS_BLOCK_MAX+3]; unsigned char msgbuf1[I2C_SMBUS_BLOCK_MAX+2]; int num = read_write == I2C_SMBUS_READ ? 2 : 1; struct i2c_msg msg[2] = { { addr, flags, 1, msgbuf0, 100000, 0, 0 }, { addr, flags | I2C_M_RD, 0, msgbuf1, 100000, 0, 0 } }; int i; u8 partial_pec = 0; int status; msgbuf0[0] = command; switch (size) { case I2C_SMBUS_QUICK: msg[0].len = 0; /* Special case: The read/write field is used as data */ msg[0].flags = flags | (read_write == I2C_SMBUS_READ ? I2C_M_RD : 0); num = 1; break; case I2C_SMBUS_BYTE: if (read_write == I2C_SMBUS_READ) { /* Special case: only a read! */ msg[0].flags = I2C_M_RD | flags; num = 1; } break; case I2C_SMBUS_BYTE_DATA: if (read_write == I2C_SMBUS_READ) msg[1].len = 1; else { msg[0].len = 2; msgbuf0[1] = data->byte; } break; case I2C_SMBUS_WORD_DATA: if (read_write == I2C_SMBUS_READ) msg[1].len = 2; else { msg[0].len = 3; msgbuf0[1] = data->word & 0xff; msgbuf0[2] = data->word >> 8; } break; case I2C_SMBUS_PROC_CALL: num = 2; /* Special case */ read_write = I2C_SMBUS_READ; msg[0].len = 3; msg[1].len = 2; msgbuf0[1] = data->word & 0xff; msgbuf0[2] = data->word >> 8; break; case I2C_SMBUS_BLOCK_DATA: if (read_write == I2C_SMBUS_READ) { msg[1].flags |= I2C_M_RECV_LEN; msg[1].len = 1; /* block length will be added by the underlying bus driver */ } else { msg[0].len = data->block[0] + 2; if (msg[0].len > I2C_SMBUS_BLOCK_MAX + 2) { dev_err(&adapter->dev, "Invalid block write size %d\n", data->block[0]); return -EINVAL; } for (i = 1; i < msg[0].len; i++) msgbuf0[i] = data->block[i-1]; } break; case I2C_SMBUS_BLOCK_PROC_CALL: num = 2; /* Another special case */ read_write = I2C_SMBUS_READ; if (data->block[0] > I2C_SMBUS_BLOCK_MAX) { dev_err(&adapter->dev, "Invalid block write size %d\n", data->block[0]); return -EINVAL; } msg[0].len = data->block[0] + 2; for (i = 1; i < msg[0].len; i++) msgbuf0[i] = data->block[i-1]; msg[1].flags |= I2C_M_RECV_LEN; msg[1].len = 1; /* block length will be added by the underlying bus driver */ break; case I2C_SMBUS_I2C_BLOCK_DATA: if (read_write == I2C_SMBUS_READ) { msg[1].len = data->block[0]; } else { msg[0].len = data->block[0] + 1; if (msg[0].len > I2C_SMBUS_BLOCK_MAX + 1) { dev_err(&adapter->dev, "Invalid block write size %d\n", data->block[0]); return -EINVAL; } for (i = 1; i <= data->block[0]; i++) msgbuf0[i] = data->block[i]; } break; default: dev_err(&adapter->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } i = ((flags & I2C_CLIENT_PEC) && size != I2C_SMBUS_QUICK && size != I2C_SMBUS_I2C_BLOCK_DATA); if (i) { /* Compute PEC if first message is a write */ if (!(msg[0].flags & I2C_M_RD)) { if (num == 1) /* Write only */ i2c_smbus_add_pec(&msg[0]); else /* Write followed by read */ partial_pec = i2c_smbus_msg_pec(0, &msg[0]); } /* Ask for PEC if last message is a read */ if (msg[num-1].flags & I2C_M_RD) msg[num-1].len++; } status = i2c_transfer(adapter, msg, num); if (status < 0) return status; /* Check PEC if last message is a read */ if (i && (msg[num-1].flags & I2C_M_RD)) { status = i2c_smbus_check_pec(partial_pec, &msg[num-1]); if (status < 0) return status; } if (read_write == I2C_SMBUS_READ) switch (size) { case I2C_SMBUS_BYTE: data->byte = msgbuf0[0]; break; case I2C_SMBUS_BYTE_DATA: data->byte = msgbuf1[0]; break; case I2C_SMBUS_WORD_DATA: case I2C_SMBUS_PROC_CALL: data->word = msgbuf1[0] | (msgbuf1[1] << 8); break; case I2C_SMBUS_I2C_BLOCK_DATA: for (i = 0; i < data->block[0]; i++) data->block[i+1] = msgbuf1[i]; break; case I2C_SMBUS_BLOCK_DATA: case I2C_SMBUS_BLOCK_PROC_CALL: for (i = 0; i < msgbuf1[0] + 1; i++) data->block[i] = msgbuf1[i]; break; } return 0; } /** * i2c_smbus_xfer - execute SMBus protocol operations * @adapter: Handle to I2C bus * @addr: Address of SMBus slave on that bus * @flags: I2C_CLIENT_* flags (usually zero or I2C_CLIENT_PEC) * @read_write: I2C_SMBUS_READ or I2C_SMBUS_WRITE * @command: Byte interpreted by slave, for protocols which use such bytes * @protocol: SMBus protocol operation to execute, such as I2C_SMBUS_PROC_CALL * @data: Data to be read or written * * This executes an SMBus protocol operation, and returns a negative * errno code else zero on success. */ s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags, char read_write, u8 command, int protocol, union i2c_smbus_data *data) { unsigned long orig_jiffies; int try; s32 res; flags &= I2C_M_TEN | I2C_CLIENT_PEC; if (adapter->algo->smbus_xfer) { i2c_lock_adapter(adapter); /* Retry automatically on arbitration loss */ orig_jiffies = jiffies; for (res = 0, try = 0; try <= adapter->retries; try++) { res = adapter->algo->smbus_xfer(adapter, addr, flags, read_write, command, protocol, data); if (res != -EAGAIN) break; if (time_after(jiffies, orig_jiffies + adapter->timeout)) break; } i2c_unlock_adapter(adapter); } else res = i2c_smbus_xfer_emulated(adapter, addr, flags, read_write, command, protocol, data); return res; } EXPORT_SYMBOL(i2c_smbus_xfer); MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); MODULE_DESCRIPTION("I2C-Bus main module"); MODULE_LICENSE("GPL");
gpl-2.0
noguxun/linux_hx
drivers/thermal/int340x_thermal/processor_thermal_device.c
62
7959
/* * processor_thermal_device.c * Copyright (c) 2014, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/acpi.h> /* Broadwell-U/HSB thermal reporting device */ #define PCI_DEVICE_ID_PROC_BDW_THERMAL 0x1603 #define PCI_DEVICE_ID_PROC_HSB_THERMAL 0x0A03 /* Braswell thermal reporting device */ #define PCI_DEVICE_ID_PROC_BSW_THERMAL 0x22DC struct power_config { u32 index; u32 min_uw; u32 max_uw; u32 tmin_us; u32 tmax_us; u32 step_uw; }; struct proc_thermal_device { struct device *dev; struct acpi_device *adev; struct power_config power_limits[2]; }; enum proc_thermal_emum_mode_type { PROC_THERMAL_NONE, PROC_THERMAL_PCI, PROC_THERMAL_PLATFORM_DEV }; /* * We can have only one type of enumeration, PCI or Platform, * not both. So we don't need instance specific data. */ static enum proc_thermal_emum_mode_type proc_thermal_emum_mode = PROC_THERMAL_NONE; #define POWER_LIMIT_SHOW(index, suffix) \ static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct pci_dev *pci_dev; \ struct platform_device *pdev; \ struct proc_thermal_device *proc_dev; \ \ if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \ pdev = to_platform_device(dev); \ proc_dev = platform_get_drvdata(pdev); \ } else { \ pci_dev = to_pci_dev(dev); \ proc_dev = pci_get_drvdata(pci_dev); \ } \ return sprintf(buf, "%lu\n",\ (unsigned long)proc_dev->power_limits[index].suffix * 1000); \ } POWER_LIMIT_SHOW(0, min_uw) POWER_LIMIT_SHOW(0, max_uw) POWER_LIMIT_SHOW(0, step_uw) POWER_LIMIT_SHOW(0, tmin_us) POWER_LIMIT_SHOW(0, tmax_us) POWER_LIMIT_SHOW(1, min_uw) POWER_LIMIT_SHOW(1, max_uw) POWER_LIMIT_SHOW(1, step_uw) POWER_LIMIT_SHOW(1, tmin_us) POWER_LIMIT_SHOW(1, tmax_us) static DEVICE_ATTR_RO(power_limit_0_min_uw); static DEVICE_ATTR_RO(power_limit_0_max_uw); static DEVICE_ATTR_RO(power_limit_0_step_uw); static DEVICE_ATTR_RO(power_limit_0_tmin_us); static DEVICE_ATTR_RO(power_limit_0_tmax_us); static DEVICE_ATTR_RO(power_limit_1_min_uw); static DEVICE_ATTR_RO(power_limit_1_max_uw); static DEVICE_ATTR_RO(power_limit_1_step_uw); static DEVICE_ATTR_RO(power_limit_1_tmin_us); static DEVICE_ATTR_RO(power_limit_1_tmax_us); static struct attribute *power_limit_attrs[] = { &dev_attr_power_limit_0_min_uw.attr, &dev_attr_power_limit_1_min_uw.attr, &dev_attr_power_limit_0_max_uw.attr, &dev_attr_power_limit_1_max_uw.attr, &dev_attr_power_limit_0_step_uw.attr, &dev_attr_power_limit_1_step_uw.attr, &dev_attr_power_limit_0_tmin_us.attr, &dev_attr_power_limit_1_tmin_us.attr, &dev_attr_power_limit_0_tmax_us.attr, &dev_attr_power_limit_1_tmax_us.attr, NULL }; static struct attribute_group power_limit_attribute_group = { .attrs = power_limit_attrs, .name = "power_limits" }; static int proc_thermal_add(struct device *dev, struct proc_thermal_device **priv) { struct proc_thermal_device *proc_priv; struct acpi_device *adev; acpi_status status; struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *elements, *ppcc; union acpi_object *p; int i; int ret; adev = ACPI_COMPANION(dev); if (!adev) return -ENODEV; status = acpi_evaluate_object(adev->handle, "PPCC", NULL, &buf); if (ACPI_FAILURE(status)) return -ENODEV; p = buf.pointer; if (!p || (p->type != ACPI_TYPE_PACKAGE)) { dev_err(dev, "Invalid PPCC data\n"); ret = -EFAULT; goto free_buffer; } if (!p->package.count) { dev_err(dev, "Invalid PPCC package size\n"); ret = -EFAULT; goto free_buffer; } proc_priv = devm_kzalloc(dev, sizeof(*proc_priv), GFP_KERNEL); if (!proc_priv) { ret = -ENOMEM; goto free_buffer; } proc_priv->dev = dev; proc_priv->adev = adev; for (i = 0; i < min((int)p->package.count - 1, 2); ++i) { elements = &(p->package.elements[i+1]); if (elements->type != ACPI_TYPE_PACKAGE || elements->package.count != 6) { ret = -EFAULT; goto free_buffer; } ppcc = elements->package.elements; proc_priv->power_limits[i].index = ppcc[0].integer.value; proc_priv->power_limits[i].min_uw = ppcc[1].integer.value; proc_priv->power_limits[i].max_uw = ppcc[2].integer.value; proc_priv->power_limits[i].tmin_us = ppcc[3].integer.value; proc_priv->power_limits[i].tmax_us = ppcc[4].integer.value; proc_priv->power_limits[i].step_uw = ppcc[5].integer.value; } *priv = proc_priv; ret = sysfs_create_group(&dev->kobj, &power_limit_attribute_group); free_buffer: kfree(buf.pointer); return ret; } void proc_thermal_remove(struct proc_thermal_device *proc_priv) { sysfs_remove_group(&proc_priv->dev->kobj, &power_limit_attribute_group); } static int int3401_add(struct platform_device *pdev) { struct proc_thermal_device *proc_priv; int ret; if (proc_thermal_emum_mode == PROC_THERMAL_PCI) { dev_err(&pdev->dev, "error: enumerated as PCI dev\n"); return -ENODEV; } ret = proc_thermal_add(&pdev->dev, &proc_priv); if (ret) return ret; platform_set_drvdata(pdev, proc_priv); proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV; return 0; } static int int3401_remove(struct platform_device *pdev) { proc_thermal_remove(platform_get_drvdata(pdev)); return 0; } static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_id *unused) { struct proc_thermal_device *proc_priv; int ret; if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { dev_err(&pdev->dev, "error: enumerated as platform dev\n"); return -ENODEV; } ret = pci_enable_device(pdev); if (ret < 0) { dev_err(&pdev->dev, "error: could not enable device\n"); return ret; } ret = proc_thermal_add(&pdev->dev, &proc_priv); if (ret) { pci_disable_device(pdev); return ret; } pci_set_drvdata(pdev, proc_priv); proc_thermal_emum_mode = PROC_THERMAL_PCI; return 0; } static void proc_thermal_pci_remove(struct pci_dev *pdev) { proc_thermal_remove(pci_get_drvdata(pdev)); pci_disable_device(pdev); } static const struct pci_device_id proc_thermal_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BDW_THERMAL)}, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_HSB_THERMAL)}, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BSW_THERMAL)}, { 0, }, }; MODULE_DEVICE_TABLE(pci, proc_thermal_pci_ids); static struct pci_driver proc_thermal_pci_driver = { .name = "proc_thermal", .probe = proc_thermal_pci_probe, .remove = proc_thermal_pci_remove, .id_table = proc_thermal_pci_ids, }; static const struct acpi_device_id int3401_device_ids[] = { {"INT3401", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, int3401_device_ids); static struct platform_driver int3401_driver = { .probe = int3401_add, .remove = int3401_remove, .driver = { .name = "int3401 thermal", .acpi_match_table = int3401_device_ids, }, }; static int __init proc_thermal_init(void) { int ret; ret = platform_driver_register(&int3401_driver); if (ret) return ret; ret = pci_register_driver(&proc_thermal_pci_driver); return ret; } static void __exit proc_thermal_exit(void) { platform_driver_unregister(&int3401_driver); pci_unregister_driver(&proc_thermal_pci_driver); } module_init(proc_thermal_init); module_exit(proc_thermal_exit); MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>"); MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
jdkernel/mecha_aosp_2.6.35
kernel/trace/trace_events.c
574
34316
/* * event tracer * * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> * * - Added format output of fields of the trace point. * This was based off of work by Tom Zanussi <tzanussi@gmail.com>. * */ #include <linux/workqueue.h> #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/ctype.h> #include <linux/slab.h> #include <linux/delay.h> #include <asm/setup.h> #include "trace_output.h" #undef TRACE_SYSTEM #define TRACE_SYSTEM "TRACE_SYSTEM" DEFINE_MUTEX(event_mutex); LIST_HEAD(ftrace_events); struct list_head * trace_get_fields(struct ftrace_event_call *event_call) { if (!event_call->class->get_fields) return &event_call->class->fields; return event_call->class->get_fields(event_call); } int trace_define_field(struct ftrace_event_call *call, const char *type, const char *name, int offset, int size, int is_signed, int filter_type) { struct ftrace_event_field *field; struct list_head *head; if (WARN_ON(!call->class)) return 0; field = kzalloc(sizeof(*field), GFP_KERNEL); if (!field) goto err; field->name = kstrdup(name, GFP_KERNEL); if (!field->name) goto err; field->type = kstrdup(type, GFP_KERNEL); if (!field->type) goto err; if (filter_type == FILTER_OTHER) field->filter_type = filter_assign_type(type); else field->filter_type = filter_type; field->offset = offset; field->size = size; field->is_signed = is_signed; head = trace_get_fields(call); list_add(&field->link, head); return 0; err: if (field) kfree(field->name); kfree(field); return -ENOMEM; } EXPORT_SYMBOL_GPL(trace_define_field); #define __common_field(type, item) \ ret = trace_define_field(call, #type, "common_" #item, \ offsetof(typeof(ent), item), \ sizeof(ent.item), \ is_signed_type(type), FILTER_OTHER); \ if (ret) \ return ret; static int trace_define_common_fields(struct ftrace_event_call *call) { int ret; struct trace_entry ent; __common_field(unsigned short, type); __common_field(unsigned char, flags); __common_field(unsigned char, preempt_count); __common_field(int, pid); __common_field(int, lock_depth); return ret; } void trace_destroy_fields(struct ftrace_event_call *call) { struct ftrace_event_field *field, *next; struct list_head *head; head = trace_get_fields(call); list_for_each_entry_safe(field, next, head, link) { list_del(&field->link); kfree(field->type); kfree(field->name); kfree(field); } } int trace_event_raw_init(struct ftrace_event_call *call) { int id; id = register_ftrace_event(&call->event); if (!id) return -ENODEV; return 0; } EXPORT_SYMBOL_GPL(trace_event_raw_init); static int ftrace_event_enable_disable(struct ftrace_event_call *call, int enable) { int ret = 0; switch (enable) { case 0: if (call->flags & TRACE_EVENT_FL_ENABLED) { call->flags &= ~TRACE_EVENT_FL_ENABLED; tracing_stop_cmdline_record(); if (call->class->reg) call->class->reg(call, TRACE_REG_UNREGISTER); else tracepoint_probe_unregister(call->name, call->class->probe, call); } break; case 1: if (!(call->flags & TRACE_EVENT_FL_ENABLED)) { tracing_start_cmdline_record(); if (call->class->reg) ret = call->class->reg(call, TRACE_REG_REGISTER); else ret = tracepoint_probe_register(call->name, call->class->probe, call); if (ret) { tracing_stop_cmdline_record(); pr_info("event trace: Could not enable event " "%s\n", call->name); break; } call->flags |= TRACE_EVENT_FL_ENABLED; } break; } return ret; } static void ftrace_clear_events(void) { struct ftrace_event_call *call; mutex_lock(&event_mutex); list_for_each_entry(call, &ftrace_events, list) { ftrace_event_enable_disable(call, 0); } mutex_unlock(&event_mutex); } /* * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events. */ static int __ftrace_set_clr_event(const char *match, const char *sub, const char *event, int set) { struct ftrace_event_call *call; int ret = -EINVAL; mutex_lock(&event_mutex); list_for_each_entry(call, &ftrace_events, list) { if (!call->name || !call->class || (!call->class->probe && !call->class->reg)) continue; if (match && strcmp(match, call->name) != 0 && strcmp(match, call->class->system) != 0) continue; if (sub && strcmp(sub, call->class->system) != 0) continue; if (event && strcmp(event, call->name) != 0) continue; ftrace_event_enable_disable(call, set); ret = 0; } mutex_unlock(&event_mutex); return ret; } static int ftrace_set_clr_event(char *buf, int set) { char *event = NULL, *sub = NULL, *match; /* * The buf format can be <subsystem>:<event-name> * *:<event-name> means any event by that name. * :<event-name> is the same. * * <subsystem>:* means all events in that subsystem * <subsystem>: means the same. * * <name> (no ':') means all events in a subsystem with * the name <name> or any event that matches <name> */ match = strsep(&buf, ":"); if (buf) { sub = match; event = buf; match = NULL; if (!strlen(sub) || strcmp(sub, "*") == 0) sub = NULL; if (!strlen(event) || strcmp(event, "*") == 0) event = NULL; } return __ftrace_set_clr_event(match, sub, event, set); } /** * trace_set_clr_event - enable or disable an event * @system: system name to match (NULL for any system) * @event: event name to match (NULL for all events, within system) * @set: 1 to enable, 0 to disable * * This is a way for other parts of the kernel to enable or disable * event recording. * * Returns 0 on success, -EINVAL if the parameters do not match any * registered events. */ int trace_set_clr_event(const char *system, const char *event, int set) { return __ftrace_set_clr_event(NULL, system, event, set); } /* 128 should be much more than enough */ #define EVENT_BUF_SIZE 127 static ssize_t ftrace_event_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_parser parser; ssize_t read, ret; if (!cnt) return 0; ret = tracing_update_buffers(); if (ret < 0) return ret; if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1)) return -ENOMEM; read = trace_get_user(&parser, ubuf, cnt, ppos); if (read >= 0 && trace_parser_loaded((&parser))) { int set = 1; if (*parser.buffer == '!') set = 0; parser.buffer[parser.idx] = 0; ret = ftrace_set_clr_event(parser.buffer + !set, set); if (ret) goto out_put; } ret = read; out_put: trace_parser_put(&parser); return ret; } static void * t_next(struct seq_file *m, void *v, loff_t *pos) { struct ftrace_event_call *call = v; (*pos)++; list_for_each_entry_continue(call, &ftrace_events, list) { /* * The ftrace subsystem is for showing formats only. * They can not be enabled or disabled via the event files. */ if (call->class && (call->class->probe || call->class->reg)) return call; } return NULL; } static void *t_start(struct seq_file *m, loff_t *pos) { struct ftrace_event_call *call; loff_t l; mutex_lock(&event_mutex); call = list_entry(&ftrace_events, struct ftrace_event_call, list); for (l = 0; l <= *pos; ) { call = t_next(m, call, &l); if (!call) break; } return call; } static void * s_next(struct seq_file *m, void *v, loff_t *pos) { struct ftrace_event_call *call = v; (*pos)++; list_for_each_entry_continue(call, &ftrace_events, list) { if (call->flags & TRACE_EVENT_FL_ENABLED) return call; } return NULL; } static void *s_start(struct seq_file *m, loff_t *pos) { struct ftrace_event_call *call; loff_t l; mutex_lock(&event_mutex); call = list_entry(&ftrace_events, struct ftrace_event_call, list); for (l = 0; l <= *pos; ) { call = s_next(m, call, &l); if (!call) break; } return call; } static int t_show(struct seq_file *m, void *v) { struct ftrace_event_call *call = v; if (strcmp(call->class->system, TRACE_SYSTEM) != 0) seq_printf(m, "%s:", call->class->system); seq_printf(m, "%s\n", call->name); return 0; } static void t_stop(struct seq_file *m, void *p) { mutex_unlock(&event_mutex); } static int ftrace_event_seq_open(struct inode *inode, struct file *file) { const struct seq_operations *seq_ops; if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) ftrace_clear_events(); seq_ops = inode->i_private; return seq_open(file, seq_ops); } static ssize_t event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct ftrace_event_call *call = filp->private_data; char *buf; if (call->flags & TRACE_EVENT_FL_ENABLED) buf = "1\n"; else buf = "0\n"; return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); } static ssize_t event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct ftrace_event_call *call = filp->private_data; char buf[64]; unsigned long val; int ret; if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(&buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; ret = strict_strtoul(buf, 10, &val); if (ret < 0) return ret; ret = tracing_update_buffers(); if (ret < 0) return ret; switch (val) { case 0: case 1: mutex_lock(&event_mutex); ret = ftrace_event_enable_disable(call, val); mutex_unlock(&event_mutex); break; default: return -EINVAL; } *ppos += cnt; return ret ? ret : cnt; } static ssize_t system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { const char set_to_char[4] = { '?', '0', '1', 'X' }; const char *system = filp->private_data; struct ftrace_event_call *call; char buf[2]; int set = 0; int ret; mutex_lock(&event_mutex); list_for_each_entry(call, &ftrace_events, list) { if (!call->name || !call->class || (!call->class->probe && !call->class->reg)) continue; if (system && strcmp(call->class->system, system) != 0) continue; /* * We need to find out if all the events are set * or if all events or cleared, or if we have * a mixture. */ set |= (1 << !!(call->flags & TRACE_EVENT_FL_ENABLED)); /* * If we have a mixture, no need to look further. */ if (set == 3) break; } mutex_unlock(&event_mutex); buf[0] = set_to_char[set]; buf[1] = '\n'; ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); return ret; } static ssize_t system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { const char *system = filp->private_data; unsigned long val; char buf[64]; ssize_t ret; if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(&buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; ret = strict_strtoul(buf, 10, &val); if (ret < 0) return ret; ret = tracing_update_buffers(); if (ret < 0) return ret; if (val != 0 && val != 1) return -EINVAL; ret = __ftrace_set_clr_event(NULL, system, NULL, val); if (ret) goto out; ret = cnt; out: *ppos += cnt; return ret; } static ssize_t event_format_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct ftrace_event_call *call = filp->private_data; struct ftrace_event_field *field; struct list_head *head; struct trace_seq *s; int common_field_count = 5; char *buf; int r = 0; if (*ppos) return 0; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; trace_seq_init(s); trace_seq_printf(s, "name: %s\n", call->name); trace_seq_printf(s, "ID: %d\n", call->event.type); trace_seq_printf(s, "format:\n"); head = trace_get_fields(call); list_for_each_entry_reverse(field, head, link) { /* * Smartly shows the array type(except dynamic array). * Normal: * field:TYPE VAR * If TYPE := TYPE[LEN], it is shown: * field:TYPE VAR[LEN] */ const char *array_descriptor = strchr(field->type, '['); if (!strncmp(field->type, "__data_loc", 10)) array_descriptor = NULL; if (!array_descriptor) { r = trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;" "\tsize:%u;\tsigned:%d;\n", field->type, field->name, field->offset, field->size, !!field->is_signed); } else { r = trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;" "\tsize:%u;\tsigned:%d;\n", (int)(array_descriptor - field->type), field->type, field->name, array_descriptor, field->offset, field->size, !!field->is_signed); } if (--common_field_count == 0) r = trace_seq_printf(s, "\n"); if (!r) break; } if (r) r = trace_seq_printf(s, "\nprint fmt: %s\n", call->print_fmt); if (!r) { /* * ug! The format output is bigger than a PAGE!! */ buf = "FORMAT TOO BIG\n"; r = simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf)); goto out; } r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); out: kfree(s); return r; } static ssize_t event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct ftrace_event_call *call = filp->private_data; struct trace_seq *s; int r; if (*ppos) return 0; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; trace_seq_init(s); trace_seq_printf(s, "%d\n", call->event.type); r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); kfree(s); return r; } static ssize_t event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct ftrace_event_call *call = filp->private_data; struct trace_seq *s; int r; if (*ppos) return 0; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; trace_seq_init(s); print_event_filter(call, s); r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); kfree(s); return r; } static ssize_t event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct ftrace_event_call *call = filp->private_data; char *buf; int err; if (cnt >= PAGE_SIZE) return -EINVAL; buf = (char *)__get_free_page(GFP_TEMPORARY); if (!buf) return -ENOMEM; if (copy_from_user(buf, ubuf, cnt)) { free_page((unsigned long) buf); return -EFAULT; } buf[cnt] = '\0'; err = apply_event_filter(call, buf); free_page((unsigned long) buf); if (err < 0) return err; *ppos += cnt; return cnt; } static ssize_t subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct event_subsystem *system = filp->private_data; struct trace_seq *s; int r; if (*ppos) return 0; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; trace_seq_init(s); print_subsystem_event_filter(system, s); r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); kfree(s); return r; } static ssize_t subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct event_subsystem *system = filp->private_data; char *buf; int err; if (cnt >= PAGE_SIZE) return -EINVAL; buf = (char *)__get_free_page(GFP_TEMPORARY); if (!buf) return -ENOMEM; if (copy_from_user(buf, ubuf, cnt)) { free_page((unsigned long) buf); return -EFAULT; } buf[cnt] = '\0'; err = apply_subsystem_event_filter(system, buf); free_page((unsigned long) buf); if (err < 0) return err; *ppos += cnt; return cnt; } static ssize_t show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { int (*func)(struct trace_seq *s) = filp->private_data; struct trace_seq *s; int r; if (*ppos) return 0; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; trace_seq_init(s); func(s); r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); kfree(s); return r; } static const struct seq_operations show_event_seq_ops = { .start = t_start, .next = t_next, .show = t_show, .stop = t_stop, }; static const struct seq_operations show_set_event_seq_ops = { .start = s_start, .next = s_next, .show = t_show, .stop = t_stop, }; static const struct file_operations ftrace_avail_fops = { .open = ftrace_event_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct file_operations ftrace_set_event_fops = { .open = ftrace_event_seq_open, .read = seq_read, .write = ftrace_event_write, .llseek = seq_lseek, .release = seq_release, }; static const struct file_operations ftrace_enable_fops = { .open = tracing_open_generic, .read = event_enable_read, .write = event_enable_write, }; static const struct file_operations ftrace_event_format_fops = { .open = tracing_open_generic, .read = event_format_read, }; static const struct file_operations ftrace_event_id_fops = { .open = tracing_open_generic, .read = event_id_read, }; static const struct file_operations ftrace_event_filter_fops = { .open = tracing_open_generic, .read = event_filter_read, .write = event_filter_write, }; static const struct file_operations ftrace_subsystem_filter_fops = { .open = tracing_open_generic, .read = subsystem_filter_read, .write = subsystem_filter_write, }; static const struct file_operations ftrace_system_enable_fops = { .open = tracing_open_generic, .read = system_enable_read, .write = system_enable_write, }; static const struct file_operations ftrace_show_header_fops = { .open = tracing_open_generic, .read = show_header, }; static struct dentry *event_trace_events_dir(void) { static struct dentry *d_tracer; static struct dentry *d_events; if (d_events) return d_events; d_tracer = tracing_init_dentry(); if (!d_tracer) return NULL; d_events = debugfs_create_dir("events", d_tracer); if (!d_events) pr_warning("Could not create debugfs " "'events' directory\n"); return d_events; } static LIST_HEAD(event_subsystems); static struct dentry * event_subsystem_dir(const char *name, struct dentry *d_events) { struct event_subsystem *system; struct dentry *entry; /* First see if we did not already create this dir */ list_for_each_entry(system, &event_subsystems, list) { if (strcmp(system->name, name) == 0) { system->nr_events++; return system->entry; } } /* need to create new entry */ system = kmalloc(sizeof(*system), GFP_KERNEL); if (!system) { pr_warning("No memory to create event subsystem %s\n", name); return d_events; } system->entry = debugfs_create_dir(name, d_events); if (!system->entry) { pr_warning("Could not create event subsystem %s\n", name); kfree(system); return d_events; } system->nr_events = 1; system->name = kstrdup(name, GFP_KERNEL); if (!system->name) { debugfs_remove(system->entry); kfree(system); return d_events; } list_add(&system->list, &event_subsystems); system->filter = NULL; system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL); if (!system->filter) { pr_warning("Could not allocate filter for subsystem " "'%s'\n", name); return system->entry; } entry = debugfs_create_file("filter", 0644, system->entry, system, &ftrace_subsystem_filter_fops); if (!entry) { kfree(system->filter); system->filter = NULL; pr_warning("Could not create debugfs " "'%s/filter' entry\n", name); } trace_create_file("enable", 0644, system->entry, (void *)system->name, &ftrace_system_enable_fops); return system->entry; } static int event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, const struct file_operations *id, const struct file_operations *enable, const struct file_operations *filter, const struct file_operations *format) { struct list_head *head; int ret; /* * If the trace point header did not define TRACE_SYSTEM * then the system would be called "TRACE_SYSTEM". */ if (strcmp(call->class->system, TRACE_SYSTEM) != 0) d_events = event_subsystem_dir(call->class->system, d_events); call->dir = debugfs_create_dir(call->name, d_events); if (!call->dir) { pr_warning("Could not create debugfs " "'%s' directory\n", call->name); return -1; } if (call->class->probe || call->class->reg) trace_create_file("enable", 0644, call->dir, call, enable); #ifdef CONFIG_PERF_EVENTS if (call->event.type && (call->class->perf_probe || call->class->reg)) trace_create_file("id", 0444, call->dir, call, id); #endif if (call->class->define_fields) { /* * Other events may have the same class. Only update * the fields if they are not already defined. */ head = trace_get_fields(call); if (list_empty(head)) { ret = trace_define_common_fields(call); if (!ret) ret = call->class->define_fields(call); if (ret < 0) { pr_warning("Could not initialize trace point" " events/%s\n", call->name); return ret; } } trace_create_file("filter", 0644, call->dir, call, filter); } trace_create_file("format", 0444, call->dir, call, format); return 0; } static int __trace_add_event_call(struct ftrace_event_call *call) { struct dentry *d_events; int ret; if (!call->name) return -EINVAL; if (call->class->raw_init) { ret = call->class->raw_init(call); if (ret < 0) { if (ret != -ENOSYS) pr_warning("Could not initialize trace " "events/%s\n", call->name); return ret; } } d_events = event_trace_events_dir(); if (!d_events) return -ENOENT; ret = event_create_dir(call, d_events, &ftrace_event_id_fops, &ftrace_enable_fops, &ftrace_event_filter_fops, &ftrace_event_format_fops); if (!ret) list_add(&call->list, &ftrace_events); return ret; } /* Add an additional event_call dynamically */ int trace_add_event_call(struct ftrace_event_call *call) { int ret; mutex_lock(&event_mutex); ret = __trace_add_event_call(call); mutex_unlock(&event_mutex); return ret; } static void remove_subsystem_dir(const char *name) { struct event_subsystem *system; if (strcmp(name, TRACE_SYSTEM) == 0) return; list_for_each_entry(system, &event_subsystems, list) { if (strcmp(system->name, name) == 0) { if (!--system->nr_events) { struct event_filter *filter = system->filter; debugfs_remove_recursive(system->entry); list_del(&system->list); if (filter) { kfree(filter->filter_string); kfree(filter); } kfree(system->name); kfree(system); } break; } } } /* * Must be called under locking both of event_mutex and trace_event_mutex. */ static void __trace_remove_event_call(struct ftrace_event_call *call) { ftrace_event_enable_disable(call, 0); if (call->event.funcs) __unregister_ftrace_event(&call->event); debugfs_remove_recursive(call->dir); list_del(&call->list); trace_destroy_fields(call); destroy_preds(call); remove_subsystem_dir(call->class->system); } /* Remove an event_call */ void trace_remove_event_call(struct ftrace_event_call *call) { mutex_lock(&event_mutex); down_write(&trace_event_mutex); __trace_remove_event_call(call); up_write(&trace_event_mutex); mutex_unlock(&event_mutex); } #define for_each_event(event, start, end) \ for (event = start; \ (unsigned long)event < (unsigned long)end; \ event++) #ifdef CONFIG_MODULES static LIST_HEAD(ftrace_module_file_list); /* * Modules must own their file_operations to keep up with * reference counting. */ struct ftrace_module_file_ops { struct list_head list; struct module *mod; struct file_operations id; struct file_operations enable; struct file_operations format; struct file_operations filter; }; static struct ftrace_module_file_ops * trace_create_file_ops(struct module *mod) { struct ftrace_module_file_ops *file_ops; /* * This is a bit of a PITA. To allow for correct reference * counting, modules must "own" their file_operations. * To do this, we allocate the file operations that will be * used in the event directory. */ file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL); if (!file_ops) return NULL; file_ops->mod = mod; file_ops->id = ftrace_event_id_fops; file_ops->id.owner = mod; file_ops->enable = ftrace_enable_fops; file_ops->enable.owner = mod; file_ops->filter = ftrace_event_filter_fops; file_ops->filter.owner = mod; file_ops->format = ftrace_event_format_fops; file_ops->format.owner = mod; list_add(&file_ops->list, &ftrace_module_file_list); return file_ops; } static void trace_module_add_events(struct module *mod) { struct ftrace_module_file_ops *file_ops = NULL; struct ftrace_event_call *call, *start, *end; struct dentry *d_events; int ret; start = mod->trace_events; end = mod->trace_events + mod->num_trace_events; if (start == end) return; d_events = event_trace_events_dir(); if (!d_events) return; for_each_event(call, start, end) { /* The linker may leave blanks */ if (!call->name) continue; if (call->class->raw_init) { ret = call->class->raw_init(call); if (ret < 0) { if (ret != -ENOSYS) pr_warning("Could not initialize trace " "point events/%s\n", call->name); continue; } } /* * This module has events, create file ops for this module * if not already done. */ if (!file_ops) { file_ops = trace_create_file_ops(mod); if (!file_ops) return; } call->mod = mod; ret = event_create_dir(call, d_events, &file_ops->id, &file_ops->enable, &file_ops->filter, &file_ops->format); if (!ret) list_add(&call->list, &ftrace_events); } } static void trace_module_remove_events(struct module *mod) { struct ftrace_module_file_ops *file_ops; struct ftrace_event_call *call, *p; bool found = false; down_write(&trace_event_mutex); list_for_each_entry_safe(call, p, &ftrace_events, list) { if (call->mod == mod) { found = true; __trace_remove_event_call(call); } } /* Now free the file_operations */ list_for_each_entry(file_ops, &ftrace_module_file_list, list) { if (file_ops->mod == mod) break; } if (&file_ops->list != &ftrace_module_file_list) { list_del(&file_ops->list); kfree(file_ops); } /* * It is safest to reset the ring buffer if the module being unloaded * registered any events. */ if (found) tracing_reset_current_online_cpus(); up_write(&trace_event_mutex); } static int trace_module_notify(struct notifier_block *self, unsigned long val, void *data) { struct module *mod = data; mutex_lock(&event_mutex); switch (val) { case MODULE_STATE_COMING: trace_module_add_events(mod); break; case MODULE_STATE_GOING: trace_module_remove_events(mod); break; } mutex_unlock(&event_mutex); return 0; } #else static int trace_module_notify(struct notifier_block *self, unsigned long val, void *data) { return 0; } #endif /* CONFIG_MODULES */ static struct notifier_block trace_module_nb = { .notifier_call = trace_module_notify, .priority = 0, }; extern struct ftrace_event_call __start_ftrace_events[]; extern struct ftrace_event_call __stop_ftrace_events[]; static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata; static __init int setup_trace_event(char *str) { strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE); ring_buffer_expanded = 1; tracing_selftest_disabled = 1; return 1; } __setup("trace_event=", setup_trace_event); static __init int event_trace_init(void) { struct ftrace_event_call *call; struct dentry *d_tracer; struct dentry *entry; struct dentry *d_events; int ret; char *buf = bootup_event_buf; char *token; d_tracer = tracing_init_dentry(); if (!d_tracer) return 0; entry = debugfs_create_file("available_events", 0444, d_tracer, (void *)&show_event_seq_ops, &ftrace_avail_fops); if (!entry) pr_warning("Could not create debugfs " "'available_events' entry\n"); entry = debugfs_create_file("set_event", 0644, d_tracer, (void *)&show_set_event_seq_ops, &ftrace_set_event_fops); if (!entry) pr_warning("Could not create debugfs " "'set_event' entry\n"); d_events = event_trace_events_dir(); if (!d_events) return 0; /* ring buffer internal formats */ trace_create_file("header_page", 0444, d_events, ring_buffer_print_page_header, &ftrace_show_header_fops); trace_create_file("header_event", 0444, d_events, ring_buffer_print_entry_header, &ftrace_show_header_fops); trace_create_file("enable", 0644, d_events, NULL, &ftrace_system_enable_fops); for_each_event(call, __start_ftrace_events, __stop_ftrace_events) { /* The linker may leave blanks */ if (!call->name) continue; if (call->class->raw_init) { ret = call->class->raw_init(call); if (ret < 0) { if (ret != -ENOSYS) pr_warning("Could not initialize trace " "point events/%s\n", call->name); continue; } } ret = event_create_dir(call, d_events, &ftrace_event_id_fops, &ftrace_enable_fops, &ftrace_event_filter_fops, &ftrace_event_format_fops); if (!ret) list_add(&call->list, &ftrace_events); } while (true) { token = strsep(&buf, ","); if (!token) break; if (!*token) continue; ret = ftrace_set_clr_event(token, 1); if (ret) pr_warning("Failed to enable trace event: %s\n", token); } ret = register_module_notifier(&trace_module_nb); if (ret) pr_warning("Failed to register trace events module notifier\n"); return 0; } fs_initcall(event_trace_init); #ifdef CONFIG_FTRACE_STARTUP_TEST static DEFINE_SPINLOCK(test_spinlock); static DEFINE_SPINLOCK(test_spinlock_irq); static DEFINE_MUTEX(test_mutex); static __init void test_work(struct work_struct *dummy) { spin_lock(&test_spinlock); spin_lock_irq(&test_spinlock_irq); udelay(1); spin_unlock_irq(&test_spinlock_irq); spin_unlock(&test_spinlock); mutex_lock(&test_mutex); msleep(1); mutex_unlock(&test_mutex); } static __init int event_test_thread(void *unused) { void *test_malloc; test_malloc = kmalloc(1234, GFP_KERNEL); if (!test_malloc) pr_info("failed to kmalloc\n"); schedule_on_each_cpu(test_work); kfree(test_malloc); set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) schedule(); return 0; } /* * Do various things that may trigger events. */ static __init void event_test_stuff(void) { struct task_struct *test_thread; test_thread = kthread_run(event_test_thread, NULL, "test-events"); msleep(1); kthread_stop(test_thread); } /* * For every trace event defined, we will test each trace point separately, * and then by groups, and finally all trace points. */ static __init void event_trace_self_tests(void) { struct ftrace_event_call *call; struct event_subsystem *system; int ret; pr_info("Running tests on trace events:\n"); list_for_each_entry(call, &ftrace_events, list) { /* Only test those that have a probe */ if (!call->class || !call->class->probe) continue; /* * Testing syscall events here is pretty useless, but * we still do it if configured. But this is time consuming. * What we really need is a user thread to perform the * syscalls as we test. */ #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS if (call->class->system && strcmp(call->class->system, "syscalls") == 0) continue; #endif pr_info("Testing event %s: ", call->name); /* * If an event is already enabled, someone is using * it and the self test should not be on. */ if (call->flags & TRACE_EVENT_FL_ENABLED) { pr_warning("Enabled event during self test!\n"); WARN_ON_ONCE(1); continue; } ftrace_event_enable_disable(call, 1); event_test_stuff(); ftrace_event_enable_disable(call, 0); pr_cont("OK\n"); } /* Now test at the sub system level */ pr_info("Running tests on trace event systems:\n"); list_for_each_entry(system, &event_subsystems, list) { /* the ftrace system is special, skip it */ if (strcmp(system->name, "ftrace") == 0) continue; pr_info("Testing event system %s: ", system->name); ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1); if (WARN_ON_ONCE(ret)) { pr_warning("error enabling system %s\n", system->name); continue; } event_test_stuff(); ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0); if (WARN_ON_ONCE(ret)) pr_warning("error disabling system %s\n", system->name); pr_cont("OK\n"); } /* Test with all events enabled */ pr_info("Running tests on all trace events:\n"); pr_info("Testing all events: "); ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1); if (WARN_ON_ONCE(ret)) { pr_warning("error enabling all events\n"); return; } event_test_stuff(); /* reset sysname */ ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0); if (WARN_ON_ONCE(ret)) { pr_warning("error disabling all events\n"); return; } pr_cont("OK\n"); } #ifdef CONFIG_FUNCTION_TRACER static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); static void function_test_events_call(unsigned long ip, unsigned long parent_ip) { struct ring_buffer_event *event; struct ring_buffer *buffer; struct ftrace_entry *entry; unsigned long flags; long disabled; int resched; int cpu; int pc; pc = preempt_count(); resched = ftrace_preempt_disable(); cpu = raw_smp_processor_id(); disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); if (disabled != 1) goto out; local_save_flags(flags); event = trace_current_buffer_lock_reserve(&buffer, TRACE_FN, sizeof(*entry), flags, pc); if (!event) goto out; entry = ring_buffer_event_data(event); entry->ip = ip; entry->parent_ip = parent_ip; trace_nowake_buffer_unlock_commit(buffer, event, flags, pc); out: atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); ftrace_preempt_enable(resched); } static struct ftrace_ops trace_ops __initdata = { .func = function_test_events_call, }; static __init void event_trace_self_test_with_function(void) { register_ftrace_function(&trace_ops); pr_info("Running tests again, along with the function tracer\n"); event_trace_self_tests(); unregister_ftrace_function(&trace_ops); } #else static __init void event_trace_self_test_with_function(void) { } #endif static __init int event_trace_self_tests_init(void) { if (!tracing_selftest_disabled) { event_trace_self_tests(); event_trace_self_test_with_function(); } return 0; } late_initcall(event_trace_self_tests_init); #endif
gpl-2.0
ghdk/os
drivers/media/usb/go7007/snd-go7007.c
1598
7841
/* * Copyright (C) 2005-2006 Micronas USA Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/vmalloc.h> #include <linux/time.h> #include <linux/mm.h> #include <linux/i2c.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/initval.h> #include "go7007-priv.h" static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; module_param_array(index, int, NULL, 0444); module_param_array(id, charp, NULL, 0444); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(index, "Index value for the go7007 audio driver"); MODULE_PARM_DESC(id, "ID string for the go7007 audio driver"); MODULE_PARM_DESC(enable, "Enable for the go7007 audio driver"); struct go7007_snd { struct snd_card *card; struct snd_pcm *pcm; struct snd_pcm_substream *substream; spinlock_t lock; int w_idx; int hw_ptr; int avail; int capturing; }; static struct snd_pcm_hardware go7007_snd_capture_hw = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 4096, .period_bytes_max = (128*1024), .periods_min = 1, .periods_max = 32, }; static void parse_audio_stream_data(struct go7007 *go, u8 *buf, int length) { struct go7007_snd *gosnd = go->snd_context; struct snd_pcm_runtime *runtime = gosnd->substream->runtime; int frames = bytes_to_frames(runtime, length); spin_lock(&gosnd->lock); gosnd->hw_ptr += frames; if (gosnd->hw_ptr >= runtime->buffer_size) gosnd->hw_ptr -= runtime->buffer_size; gosnd->avail += frames; spin_unlock(&gosnd->lock); if (gosnd->w_idx + length > runtime->dma_bytes) { int cpy = runtime->dma_bytes - gosnd->w_idx; memcpy(runtime->dma_area + gosnd->w_idx, buf, cpy); length -= cpy; buf += cpy; gosnd->w_idx = 0; } memcpy(runtime->dma_area + gosnd->w_idx, buf, length); gosnd->w_idx += length; spin_lock(&gosnd->lock); if (gosnd->avail < runtime->period_size) { spin_unlock(&gosnd->lock); return; } gosnd->avail -= runtime->period_size; spin_unlock(&gosnd->lock); if (gosnd->capturing) snd_pcm_period_elapsed(gosnd->substream); } static int go7007_snd_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct go7007 *go = snd_pcm_substream_chip(substream); unsigned int bytes; bytes = params_buffer_bytes(hw_params); if (substream->runtime->dma_bytes > 0) vfree(substream->runtime->dma_area); substream->runtime->dma_bytes = 0; substream->runtime->dma_area = vmalloc(bytes); if (substream->runtime->dma_area == NULL) return -ENOMEM; substream->runtime->dma_bytes = bytes; go->audio_deliver = parse_audio_stream_data; return 0; } static int go7007_snd_hw_free(struct snd_pcm_substream *substream) { struct go7007 *go = snd_pcm_substream_chip(substream); go->audio_deliver = NULL; if (substream->runtime->dma_bytes > 0) vfree(substream->runtime->dma_area); substream->runtime->dma_bytes = 0; return 0; } static int go7007_snd_capture_open(struct snd_pcm_substream *substream) { struct go7007 *go = snd_pcm_substream_chip(substream); struct go7007_snd *gosnd = go->snd_context; unsigned long flags; int r; spin_lock_irqsave(&gosnd->lock, flags); if (gosnd->substream == NULL) { gosnd->substream = substream; substream->runtime->hw = go7007_snd_capture_hw; r = 0; } else r = -EBUSY; spin_unlock_irqrestore(&gosnd->lock, flags); return r; } static int go7007_snd_capture_close(struct snd_pcm_substream *substream) { struct go7007 *go = snd_pcm_substream_chip(substream); struct go7007_snd *gosnd = go->snd_context; gosnd->substream = NULL; return 0; } static int go7007_snd_pcm_prepare(struct snd_pcm_substream *substream) { return 0; } static int go7007_snd_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct go7007 *go = snd_pcm_substream_chip(substream); struct go7007_snd *gosnd = go->snd_context; switch (cmd) { case SNDRV_PCM_TRIGGER_START: /* Just set a flag to indicate we should signal ALSA when * sound comes in */ gosnd->capturing = 1; return 0; case SNDRV_PCM_TRIGGER_STOP: gosnd->hw_ptr = gosnd->w_idx = gosnd->avail = 0; gosnd->capturing = 0; return 0; default: return -EINVAL; } } static snd_pcm_uframes_t go7007_snd_pcm_pointer(struct snd_pcm_substream *substream) { struct go7007 *go = snd_pcm_substream_chip(substream); struct go7007_snd *gosnd = go->snd_context; return gosnd->hw_ptr; } static struct page *go7007_snd_pcm_page(struct snd_pcm_substream *substream, unsigned long offset) { return vmalloc_to_page(substream->runtime->dma_area + offset); } static struct snd_pcm_ops go7007_snd_capture_ops = { .open = go7007_snd_capture_open, .close = go7007_snd_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = go7007_snd_hw_params, .hw_free = go7007_snd_hw_free, .prepare = go7007_snd_pcm_prepare, .trigger = go7007_snd_pcm_trigger, .pointer = go7007_snd_pcm_pointer, .page = go7007_snd_pcm_page, }; static int go7007_snd_free(struct snd_device *device) { struct go7007 *go = device->device_data; kfree(go->snd_context); go->snd_context = NULL; return 0; } static struct snd_device_ops go7007_snd_device_ops = { .dev_free = go7007_snd_free, }; int go7007_snd_init(struct go7007 *go) { static int dev; struct go7007_snd *gosnd; int ret = 0; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } gosnd = kmalloc(sizeof(struct go7007_snd), GFP_KERNEL); if (gosnd == NULL) return -ENOMEM; spin_lock_init(&gosnd->lock); gosnd->hw_ptr = gosnd->w_idx = gosnd->avail = 0; gosnd->capturing = 0; ret = snd_card_new(go->dev, index[dev], id[dev], THIS_MODULE, 0, &gosnd->card); if (ret < 0) { kfree(gosnd); return ret; } ret = snd_device_new(gosnd->card, SNDRV_DEV_LOWLEVEL, go, &go7007_snd_device_ops); if (ret < 0) { kfree(gosnd); return ret; } ret = snd_pcm_new(gosnd->card, "go7007", 0, 0, 1, &gosnd->pcm); if (ret < 0) { snd_card_free(gosnd->card); kfree(gosnd); return ret; } strlcpy(gosnd->card->driver, "go7007", sizeof(gosnd->card->driver)); strlcpy(gosnd->card->shortname, go->name, sizeof(gosnd->card->driver)); strlcpy(gosnd->card->longname, gosnd->card->shortname, sizeof(gosnd->card->longname)); gosnd->pcm->private_data = go; snd_pcm_set_ops(gosnd->pcm, SNDRV_PCM_STREAM_CAPTURE, &go7007_snd_capture_ops); ret = snd_card_register(gosnd->card); if (ret < 0) { snd_card_free(gosnd->card); kfree(gosnd); return ret; } gosnd->substream = NULL; go->snd_context = gosnd; v4l2_device_get(&go->v4l2_dev); ++dev; return 0; } EXPORT_SYMBOL(go7007_snd_init); int go7007_snd_remove(struct go7007 *go) { struct go7007_snd *gosnd = go->snd_context; snd_card_disconnect(gosnd->card); snd_card_free_when_closed(gosnd->card); v4l2_device_put(&go->v4l2_dev); return 0; } EXPORT_SYMBOL(go7007_snd_remove); MODULE_LICENSE("GPL v2");
gpl-2.0
faux123/flounder
drivers/scsi/qla2xxx/qla_nx.c
2110
117151
/* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" #include <linux/delay.h> #include <linux/pci.h> #include <linux/ratelimit.h> #include <linux/vmalloc.h> #include <scsi/scsi_tcq.h> #define MASK(n) ((1ULL<<(n))-1) #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \ ((addr >> 25) & 0x3ff)) #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | \ ((addr >> 25) & 0x3ff)) #define MS_WIN(addr) (addr & 0x0ffc0000) #define QLA82XX_PCI_MN_2M (0) #define QLA82XX_PCI_MS_2M (0x80000) #define QLA82XX_PCI_OCM0_2M (0xc0000) #define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800) #define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) #define BLOCK_PROTECT_BITS 0x0F /* CRB window related */ #define CRB_BLK(off) ((off >> 20) & 0x3f) #define CRB_SUBBLK(off) ((off >> 16) & 0xf) #define CRB_WINDOW_2M (0x130060) #define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL) #define CRB_HI(off) ((qla82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \ ((off) & 0xf0000)) #define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL) #define CRB_INDIRECT_2M (0x1e0000UL) #define MAX_CRB_XFORM 60 static unsigned long crb_addr_xform[MAX_CRB_XFORM]; static int qla82xx_crb_table_initialized; #define qla82xx_crb_addr_transform(name) \ (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \ QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20) static void qla82xx_crb_addr_transform_setup(void) { qla82xx_crb_addr_transform(XDMA); qla82xx_crb_addr_transform(TIMR); qla82xx_crb_addr_transform(SRE); qla82xx_crb_addr_transform(SQN3); qla82xx_crb_addr_transform(SQN2); qla82xx_crb_addr_transform(SQN1); qla82xx_crb_addr_transform(SQN0); qla82xx_crb_addr_transform(SQS3); qla82xx_crb_addr_transform(SQS2); qla82xx_crb_addr_transform(SQS1); qla82xx_crb_addr_transform(SQS0); qla82xx_crb_addr_transform(RPMX7); qla82xx_crb_addr_transform(RPMX6); qla82xx_crb_addr_transform(RPMX5); qla82xx_crb_addr_transform(RPMX4); qla82xx_crb_addr_transform(RPMX3); qla82xx_crb_addr_transform(RPMX2); qla82xx_crb_addr_transform(RPMX1); qla82xx_crb_addr_transform(RPMX0); qla82xx_crb_addr_transform(ROMUSB); qla82xx_crb_addr_transform(SN); qla82xx_crb_addr_transform(QMN); qla82xx_crb_addr_transform(QMS); qla82xx_crb_addr_transform(PGNI); qla82xx_crb_addr_transform(PGND); qla82xx_crb_addr_transform(PGN3); qla82xx_crb_addr_transform(PGN2); qla82xx_crb_addr_transform(PGN1); qla82xx_crb_addr_transform(PGN0); qla82xx_crb_addr_transform(PGSI); qla82xx_crb_addr_transform(PGSD); qla82xx_crb_addr_transform(PGS3); qla82xx_crb_addr_transform(PGS2); qla82xx_crb_addr_transform(PGS1); qla82xx_crb_addr_transform(PGS0); qla82xx_crb_addr_transform(PS); qla82xx_crb_addr_transform(PH); qla82xx_crb_addr_transform(NIU); qla82xx_crb_addr_transform(I2Q); qla82xx_crb_addr_transform(EG); qla82xx_crb_addr_transform(MN); qla82xx_crb_addr_transform(MS); qla82xx_crb_addr_transform(CAS2); qla82xx_crb_addr_transform(CAS1); qla82xx_crb_addr_transform(CAS0); qla82xx_crb_addr_transform(CAM); qla82xx_crb_addr_transform(C2C1); qla82xx_crb_addr_transform(C2C0); qla82xx_crb_addr_transform(SMB); qla82xx_crb_addr_transform(OCM0); /* * Used only in P3 just define it for P2 also. */ qla82xx_crb_addr_transform(I2C0); qla82xx_crb_table_initialized = 1; } static struct crb_128M_2M_block_map crb_128M_2M_map[64] = { {{{0, 0, 0, 0} } }, {{{1, 0x0100000, 0x0102000, 0x120000}, {1, 0x0110000, 0x0120000, 0x130000}, {1, 0x0120000, 0x0122000, 0x124000}, {1, 0x0130000, 0x0132000, 0x126000}, {1, 0x0140000, 0x0142000, 0x128000}, {1, 0x0150000, 0x0152000, 0x12a000}, {1, 0x0160000, 0x0170000, 0x110000}, {1, 0x0170000, 0x0172000, 0x12e000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x01e0000, 0x01e0800, 0x122000}, {0, 0x0000000, 0x0000000, 0x000000} } } , {{{1, 0x0200000, 0x0210000, 0x180000} } }, {{{0, 0, 0, 0} } }, {{{1, 0x0400000, 0x0401000, 0x169000} } }, {{{1, 0x0500000, 0x0510000, 0x140000} } }, {{{1, 0x0600000, 0x0610000, 0x1c0000} } }, {{{1, 0x0700000, 0x0704000, 0x1b8000} } }, {{{1, 0x0800000, 0x0802000, 0x170000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x08f0000, 0x08f2000, 0x172000} } }, {{{1, 0x0900000, 0x0902000, 0x174000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x09f0000, 0x09f2000, 0x176000} } }, {{{0, 0x0a00000, 0x0a02000, 0x178000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x0af0000, 0x0af2000, 0x17a000} } }, {{{0, 0x0b00000, 0x0b02000, 0x17c000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } }, {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } }, {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } }, {{{1, 0x0f00000, 0x0f01000, 0x164000} } }, {{{0, 0x1000000, 0x1004000, 0x1a8000} } }, {{{1, 0x1100000, 0x1101000, 0x160000} } }, {{{1, 0x1200000, 0x1201000, 0x161000} } }, {{{1, 0x1300000, 0x1301000, 0x162000} } }, {{{1, 0x1400000, 0x1401000, 0x163000} } }, {{{1, 0x1500000, 0x1501000, 0x165000} } }, {{{1, 0x1600000, 0x1601000, 0x166000} } }, {{{0, 0, 0, 0} } }, {{{0, 0, 0, 0} } }, {{{0, 0, 0, 0} } }, {{{0, 0, 0, 0} } }, {{{0, 0, 0, 0} } }, {{{0, 0, 0, 0} } }, {{{1, 0x1d00000, 0x1d10000, 0x190000} } }, {{{1, 0x1e00000, 0x1e01000, 0x16a000} } }, {{{1, 0x1f00000, 0x1f10000, 0x150000} } }, {{{0} } }, {{{1, 0x2100000, 0x2102000, 0x120000}, {1, 0x2110000, 0x2120000, 0x130000}, {1, 0x2120000, 0x2122000, 0x124000}, {1, 0x2130000, 0x2132000, 0x126000}, {1, 0x2140000, 0x2142000, 0x128000}, {1, 0x2150000, 0x2152000, 0x12a000}, {1, 0x2160000, 0x2170000, 0x110000}, {1, 0x2170000, 0x2172000, 0x12e000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000} } }, {{{1, 0x2200000, 0x2204000, 0x1b0000} } }, {{{0} } }, {{{0} } }, {{{0} } }, {{{0} } }, {{{0} } }, {{{1, 0x2800000, 0x2804000, 0x1a4000} } }, {{{1, 0x2900000, 0x2901000, 0x16b000} } }, {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } }, {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } }, {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } }, {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } }, {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } }, {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } }, {{{1, 0x3000000, 0x3000400, 0x1adc00} } }, {{{0, 0x3100000, 0x3104000, 0x1a8000} } }, {{{1, 0x3200000, 0x3204000, 0x1d4000} } }, {{{1, 0x3300000, 0x3304000, 0x1a0000} } }, {{{0} } }, {{{1, 0x3500000, 0x3500400, 0x1ac000} } }, {{{1, 0x3600000, 0x3600400, 0x1ae000} } }, {{{1, 0x3700000, 0x3700400, 0x1ae400} } }, {{{1, 0x3800000, 0x3804000, 0x1d0000} } }, {{{1, 0x3900000, 0x3904000, 0x1b4000} } }, {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } }, {{{0} } }, {{{0} } }, {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } }, {{{1, 0x3e00000, 0x3e01000, 0x167000} } }, {{{1, 0x3f00000, 0x3f01000, 0x168000} } } }; /* * top 12 bits of crb internal address (hub, agent) */ static unsigned qla82xx_crb_hub_agt[64] = { 0, QLA82XX_HW_CRB_HUB_AGT_ADR_PS, QLA82XX_HW_CRB_HUB_AGT_ADR_MN, QLA82XX_HW_CRB_HUB_AGT_ADR_MS, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_SRE, QLA82XX_HW_CRB_HUB_AGT_ADR_NIU, QLA82XX_HW_CRB_HUB_AGT_ADR_QMN, QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0, QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1, QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2, QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3, QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q, QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR, QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB, QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4, QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA, QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0, QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1, QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2, QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3, QLA82XX_HW_CRB_HUB_AGT_ADR_PGND, QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI, QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0, QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1, QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2, QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI, QLA82XX_HW_CRB_HUB_AGT_ADR_SN, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_EG, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_PS, QLA82XX_HW_CRB_HUB_AGT_ADR_CAM, 0, 0, 0, 0, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7, QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA, QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q, QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9, QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_SMB, QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0, QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC, 0, }; /* Device states */ static char *q_dev_state[] = { "Unknown", "Cold", "Initializing", "Ready", "Need Reset", "Need Quiescent", "Failed", "Quiescent", }; char *qdev_state(uint32_t dev_state) { return q_dev_state[dev_state]; } /* * In: 'off' is offset from CRB space in 128M pci map * Out: 'off' is 2M pci map addr * side effect: lock crb window */ static void qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off) { u32 win_read; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ha->crb_win = CRB_HI(*off); writel(ha->crb_win, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); /* Read back value to make sure write has gone through before trying * to use it. */ win_read = RD_REG_DWORD((void __iomem *) (CRB_WINDOW_2M + ha->nx_pcibase)); if (win_read != ha->crb_win) { ql_dbg(ql_dbg_p3p, vha, 0xb000, "%s: Written crbwin (0x%x) " "!= Read crbwin (0x%x), off=0x%lx.\n", __func__, ha->crb_win, win_read, *off); } *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase; } static inline unsigned long qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); /* See if we are currently pointing to the region we want to use next */ if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) { /* No need to change window. PCIX and PCIEregs are in both * regs are in both windows. */ return off; } if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) { /* We are in first CRB window */ if (ha->curr_window != 0) WARN_ON(1); return off; } if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) { /* We are in second CRB window */ off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST; if (ha->curr_window != 1) return off; /* We are in the QM or direct access * register region - do nothing */ if ((off >= QLA82XX_PCI_DIRECT_CRB) && (off < QLA82XX_PCI_CAMQM_MAX)) return off; } /* strange address given */ ql_dbg(ql_dbg_p3p, vha, 0xb001, "%s: Warning: unm_nic_pci_set_crbwindow " "called with an unknown address(%llx).\n", QLA2XXX_DRIVER_NAME, off); return off; } static int qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off) { struct crb_128M_2M_sub_block_map *m; if (*off >= QLA82XX_CRB_MAX) return -1; if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) { *off = (*off - QLA82XX_PCI_CAMQM) + QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase; return 0; } if (*off < QLA82XX_PCI_CRBSPACE) return -1; *off -= QLA82XX_PCI_CRBSPACE; /* Try direct map */ m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)]; if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) { *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase; return 0; } /* Not in direct map, use crb window */ return 1; } #define CRB_WIN_LOCK_TIMEOUT 100000000 static int qla82xx_crb_win_lock(struct qla_hw_data *ha) { int done = 0, timeout = 0; while (!done) { /* acquire semaphore3 from PCI HW block */ done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK)); if (done == 1) break; if (timeout >= CRB_WIN_LOCK_TIMEOUT) return -1; timeout++; } qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum); return 0; } int qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data) { unsigned long flags = 0; int rv; rv = qla82xx_pci_get_crb_addr_2M(ha, &off); BUG_ON(rv == -1); if (rv == 1) { write_lock_irqsave(&ha->hw_lock, flags); qla82xx_crb_win_lock(ha); qla82xx_pci_set_crbwindow_2M(ha, &off); } writel(data, (void __iomem *)off); if (rv == 1) { qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); write_unlock_irqrestore(&ha->hw_lock, flags); } return 0; } int qla82xx_rd_32(struct qla_hw_data *ha, ulong off) { unsigned long flags = 0; int rv; u32 data; rv = qla82xx_pci_get_crb_addr_2M(ha, &off); BUG_ON(rv == -1); if (rv == 1) { write_lock_irqsave(&ha->hw_lock, flags); qla82xx_crb_win_lock(ha); qla82xx_pci_set_crbwindow_2M(ha, &off); } data = RD_REG_DWORD((void __iomem *)off); if (rv == 1) { qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); write_unlock_irqrestore(&ha->hw_lock, flags); } return data; } #define IDC_LOCK_TIMEOUT 100000000 int qla82xx_idc_lock(struct qla_hw_data *ha) { int i; int done = 0, timeout = 0; while (!done) { /* acquire semaphore5 from PCI HW block */ done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK)); if (done == 1) break; if (timeout >= IDC_LOCK_TIMEOUT) return -1; timeout++; /* Yield CPU */ if (!in_interrupt()) schedule(); else { for (i = 0; i < 20; i++) cpu_relax(); } } return 0; } void qla82xx_idc_unlock(struct qla_hw_data *ha) { qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK)); } /* PCI Windowing for DDR regions. */ #define QLA82XX_ADDR_IN_RANGE(addr, low, high) \ (((addr) <= (high)) && ((addr) >= (low))) /* * check memory access boundary. * used by test agent. support ddr access only for now */ static unsigned long qla82xx_pci_mem_bound_check(struct qla_hw_data *ha, unsigned long long addr, int size) { if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX) || !QLA82XX_ADDR_IN_RANGE(addr + size - 1, QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX) || ((size != 1) && (size != 2) && (size != 4) && (size != 8))) return 0; else return 1; } static int qla82xx_pci_set_window_warning_count; static unsigned long qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr) { int window; u32 win_read; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX)) { /* DDR network side */ window = MN_WIN(addr); ha->ddr_mn_window = window; qla82xx_wr_32(ha, ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window); win_read = qla82xx_rd_32(ha, ha->mn_win_crb | QLA82XX_PCI_CRBSPACE); if ((win_read << 17) != window) { ql_dbg(ql_dbg_p3p, vha, 0xb003, "%s: Written MNwin (0x%x) != Read MNwin (0x%x).\n", __func__, window, win_read); } addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET; } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0, QLA82XX_ADDR_OCM0_MAX)) { unsigned int temp1; if ((addr & 0x00ff800) == 0xff800) { ql_log(ql_log_warn, vha, 0xb004, "%s: QM access not handled.\n", __func__); addr = -1UL; } window = OCM_WIN(addr); ha->ddr_mn_window = window; qla82xx_wr_32(ha, ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window); win_read = qla82xx_rd_32(ha, ha->mn_win_crb | QLA82XX_PCI_CRBSPACE); temp1 = ((window & 0x1FF) << 7) | ((window & 0x0FFFE0000) >> 17); if (win_read != temp1) { ql_log(ql_log_warn, vha, 0xb005, "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x).\n", __func__, temp1, win_read); } addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M; } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, QLA82XX_P3_ADDR_QDR_NET_MAX)) { /* QDR network side */ window = MS_WIN(addr); ha->qdr_sn_window = window; qla82xx_wr_32(ha, ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window); win_read = qla82xx_rd_32(ha, ha->ms_win_crb | QLA82XX_PCI_CRBSPACE); if (win_read != window) { ql_log(ql_log_warn, vha, 0xb006, "%s: Written MSwin (0x%x) != Read MSwin (0x%x).\n", __func__, window, win_read); } addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET; } else { /* * peg gdb frequently accesses memory that doesn't exist, * this limits the chit chat so debugging isn't slowed down. */ if ((qla82xx_pci_set_window_warning_count++ < 8) || (qla82xx_pci_set_window_warning_count%64 == 0)) { ql_log(ql_log_warn, vha, 0xb007, "%s: Warning:%s Unknown address range!.\n", __func__, QLA2XXX_DRIVER_NAME); } addr = -1UL; } return addr; } /* check if address is in the same windows as the previous access */ static int qla82xx_pci_is_same_window(struct qla_hw_data *ha, unsigned long long addr) { int window; unsigned long long qdr_max; qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX; /* DDR network side */ if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX)) BUG(); else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0, QLA82XX_ADDR_OCM0_MAX)) return 1; else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1, QLA82XX_ADDR_OCM1_MAX)) return 1; else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) { /* QDR network side */ window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f; if (ha->qdr_sn_window == window) return 1; } return 0; } static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha, u64 off, void *data, int size) { unsigned long flags; void __iomem *addr = NULL; int ret = 0; u64 start; uint8_t __iomem *mem_ptr = NULL; unsigned long mem_base; unsigned long mem_page; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); write_lock_irqsave(&ha->hw_lock, flags); /* * If attempting to access unknown address or straddle hw windows, * do not access. */ start = qla82xx_pci_set_window(ha, off); if ((start == -1UL) || (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { write_unlock_irqrestore(&ha->hw_lock, flags); ql_log(ql_log_fatal, vha, 0xb008, "%s out of bound pci memory " "access, offset is 0x%llx.\n", QLA2XXX_DRIVER_NAME, off); return -1; } write_unlock_irqrestore(&ha->hw_lock, flags); mem_base = pci_resource_start(ha->pdev, 0); mem_page = start & PAGE_MASK; /* Map two pages whenever user tries to access addresses in two * consecutive pages. */ if (mem_page != ((start + size - 1) & PAGE_MASK)) mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2); else mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); if (mem_ptr == NULL) { *(u8 *)data = 0; return -1; } addr = mem_ptr; addr += start & (PAGE_SIZE - 1); write_lock_irqsave(&ha->hw_lock, flags); switch (size) { case 1: *(u8 *)data = readb(addr); break; case 2: *(u16 *)data = readw(addr); break; case 4: *(u32 *)data = readl(addr); break; case 8: *(u64 *)data = readq(addr); break; default: ret = -1; break; } write_unlock_irqrestore(&ha->hw_lock, flags); if (mem_ptr) iounmap(mem_ptr); return ret; } static int qla82xx_pci_mem_write_direct(struct qla_hw_data *ha, u64 off, void *data, int size) { unsigned long flags; void __iomem *addr = NULL; int ret = 0; u64 start; uint8_t __iomem *mem_ptr = NULL; unsigned long mem_base; unsigned long mem_page; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); write_lock_irqsave(&ha->hw_lock, flags); /* * If attempting to access unknown address or straddle hw windows, * do not access. */ start = qla82xx_pci_set_window(ha, off); if ((start == -1UL) || (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { write_unlock_irqrestore(&ha->hw_lock, flags); ql_log(ql_log_fatal, vha, 0xb009, "%s out of bount memory " "access, offset is 0x%llx.\n", QLA2XXX_DRIVER_NAME, off); return -1; } write_unlock_irqrestore(&ha->hw_lock, flags); mem_base = pci_resource_start(ha->pdev, 0); mem_page = start & PAGE_MASK; /* Map two pages whenever user tries to access addresses in two * consecutive pages. */ if (mem_page != ((start + size - 1) & PAGE_MASK)) mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2); else mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); if (mem_ptr == NULL) return -1; addr = mem_ptr; addr += start & (PAGE_SIZE - 1); write_lock_irqsave(&ha->hw_lock, flags); switch (size) { case 1: writeb(*(u8 *)data, addr); break; case 2: writew(*(u16 *)data, addr); break; case 4: writel(*(u32 *)data, addr); break; case 8: writeq(*(u64 *)data, addr); break; default: ret = -1; break; } write_unlock_irqrestore(&ha->hw_lock, flags); if (mem_ptr) iounmap(mem_ptr); return ret; } #define MTU_FUDGE_FACTOR 100 static unsigned long qla82xx_decode_crb_addr(unsigned long addr) { int i; unsigned long base_addr, offset, pci_base; if (!qla82xx_crb_table_initialized) qla82xx_crb_addr_transform_setup(); pci_base = ADDR_ERROR; base_addr = addr & 0xfff00000; offset = addr & 0x000fffff; for (i = 0; i < MAX_CRB_XFORM; i++) { if (crb_addr_xform[i] == base_addr) { pci_base = i << 20; break; } } if (pci_base == ADDR_ERROR) return pci_base; return pci_base + offset; } static long rom_max_timeout = 100; static long qla82xx_rom_lock_timeout = 100; static int qla82xx_rom_lock(struct qla_hw_data *ha) { int done = 0, timeout = 0; uint32_t lock_owner = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while (!done) { /* acquire semaphore2 from PCI HW block */ done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK)); if (done == 1) break; if (timeout >= qla82xx_rom_lock_timeout) { lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); ql_dbg(ql_dbg_p3p, vha, 0xb085, "Failed to acquire rom lock, acquired by %d.\n", lock_owner); return -1; } timeout++; } qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER); return 0; } static void qla82xx_rom_unlock(struct qla_hw_data *ha) { qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); } static int qla82xx_wait_rom_busy(struct qla_hw_data *ha) { long timeout = 0; long done = 0 ; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while (done == 0) { done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); done &= 4; timeout++; if (timeout >= rom_max_timeout) { ql_dbg(ql_dbg_p3p, vha, 0xb00a, "%s: Timeout reached waiting for rom busy.\n", QLA2XXX_DRIVER_NAME); return -1; } } return 0; } static int qla82xx_wait_rom_done(struct qla_hw_data *ha) { long timeout = 0; long done = 0 ; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while (done == 0) { done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); done &= 2; timeout++; if (timeout >= rom_max_timeout) { ql_dbg(ql_dbg_p3p, vha, 0xb00b, "%s: Timeout reached waiting for rom done.\n", QLA2XXX_DRIVER_NAME); return -1; } } return 0; } static int qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag) { uint32_t off_value, rval = 0; WRT_REG_DWORD((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase), (off & 0xFFFF0000)); /* Read back value to make sure write has gone through */ RD_REG_DWORD((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); off_value = (off & 0x0000FFFF); if (flag) WRT_REG_DWORD((void __iomem *) (off_value + CRB_INDIRECT_2M + ha->nx_pcibase), data); else rval = RD_REG_DWORD((void __iomem *) (off_value + CRB_INDIRECT_2M + ha->nx_pcibase)); return rval; } static int qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) { /* Dword reads to flash. */ qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW, (addr & 0xFFFF0000), 1); *valp = qla82xx_md_rw_32(ha, MD_DIRECT_ROM_READ_BASE + (addr & 0x0000FFFF), 0, 0); return 0; } static int qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) { int ret, loops = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { udelay(100); schedule(); loops++; } if (loops >= 50000) { ql_log(ql_log_fatal, vha, 0x00b9, "Failed to acquire SEM2 lock.\n"); return -1; } ret = qla82xx_do_rom_fast_read(ha, addr, valp); qla82xx_rom_unlock(ha); return ret; } static int qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR); qla82xx_wait_rom_busy(ha); if (qla82xx_wait_rom_done(ha)) { ql_log(ql_log_warn, vha, 0xb00c, "Error waiting for rom done.\n"); return -1; } *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA); return 0; } static int qla82xx_flash_wait_write_finish(struct qla_hw_data *ha) { long timeout = 0; uint32_t done = 1 ; uint32_t val; int ret = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); while ((done != 0) && (ret == 0)) { ret = qla82xx_read_status_reg(ha, &val); done = val & 1; timeout++; udelay(10); cond_resched(); if (timeout >= 50000) { ql_log(ql_log_warn, vha, 0xb00d, "Timeout reached waiting for write finish.\n"); return -1; } } return ret; } static int qla82xx_flash_set_write_enable(struct qla_hw_data *ha) { uint32_t val; qla82xx_wait_rom_busy(ha); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN); qla82xx_wait_rom_busy(ha); if (qla82xx_wait_rom_done(ha)) return -1; if (qla82xx_read_status_reg(ha, &val) != 0) return -1; if ((val & 2) != 2) return -1; return 0; } static int qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); if (qla82xx_flash_set_write_enable(ha)) return -1; qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1); if (qla82xx_wait_rom_done(ha)) { ql_log(ql_log_warn, vha, 0xb00e, "Error waiting for rom done.\n"); return -1; } return qla82xx_flash_wait_write_finish(ha); } static int qla82xx_write_disable_flash(struct qla_hw_data *ha) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI); if (qla82xx_wait_rom_done(ha)) { ql_log(ql_log_warn, vha, 0xb00f, "Error waiting for rom done.\n"); return -1; } return 0; } static int ql82xx_rom_lock_d(struct qla_hw_data *ha) { int loops = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { udelay(100); cond_resched(); loops++; } if (loops >= 50000) { ql_log(ql_log_warn, vha, 0xb010, "ROM lock failed.\n"); return -1; } return 0; } static int qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr, uint32_t data) { int ret = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ret = ql82xx_rom_lock_d(ha); if (ret < 0) { ql_log(ql_log_warn, vha, 0xb011, "ROM lock failed.\n"); return ret; } if (qla82xx_flash_set_write_enable(ha)) goto done_write; qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, flashaddr); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP); qla82xx_wait_rom_busy(ha); if (qla82xx_wait_rom_done(ha)) { ql_log(ql_log_warn, vha, 0xb012, "Error waiting for rom done.\n"); ret = -1; goto done_write; } ret = qla82xx_flash_wait_write_finish(ha); done_write: qla82xx_rom_unlock(ha); return ret; } /* This routine does CRB initialize sequence * to put the ISP into operational state */ static int qla82xx_pinit_from_rom(scsi_qla_host_t *vha) { int addr, val; int i ; struct crb_addr_pair *buf; unsigned long off; unsigned offset, n; struct qla_hw_data *ha = vha->hw; struct crb_addr_pair { long addr; long data; }; /* Halt all the individual PEGs and other blocks of the ISP */ qla82xx_rom_lock(ha); /* disable all I2Q */ qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0); /* disable all niu interrupts */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff); /* disable xge rx/tx */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00); /* disable xg1 rx/tx */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00); /* disable sideband mac */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00); /* disable ap0 mac */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00); /* disable ap1 mac */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00); /* halt sre */ val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000); qla82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1))); /* halt epg */ qla82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1); /* halt timers */ qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0); /* halt pegs */ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1); msleep(20); /* big hammer */ if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) /* don't reset CAM block on reset */ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); else qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff); qla82xx_rom_unlock(ha); /* Read the signature value from the flash. * Offset 0: Contain signature (0xcafecafe) * Offset 4: Offset and number of addr/value pairs * that present in CRB initialize sequence */ if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL || qla82xx_rom_fast_read(ha, 4, &n) != 0) { ql_log(ql_log_fatal, vha, 0x006e, "Error Reading crb_init area: n: %08x.\n", n); return -1; } /* Offset in flash = lower 16 bits * Number of entries = upper 16 bits */ offset = n & 0xffffU; n = (n >> 16) & 0xffffU; /* number of addr/value pair should not exceed 1024 entries */ if (n >= 1024) { ql_log(ql_log_fatal, vha, 0x0071, "Card flash not initialized:n=0x%x.\n", n); return -1; } ql_log(ql_log_info, vha, 0x0072, "%d CRB init values found in ROM.\n", n); buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL); if (buf == NULL) { ql_log(ql_log_fatal, vha, 0x010c, "Unable to allocate memory.\n"); return -1; } for (i = 0; i < n; i++) { if (qla82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 || qla82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) { kfree(buf); return -1; } buf[i].addr = addr; buf[i].data = val; } for (i = 0; i < n; i++) { /* Translate internal CRB initialization * address to PCI bus address */ off = qla82xx_decode_crb_addr((unsigned long)buf[i].addr) + QLA82XX_PCI_CRBSPACE; /* Not all CRB addr/value pair to be written, * some of them are skipped */ /* skipping cold reboot MAGIC */ if (off == QLA82XX_CAM_RAM(0x1fc)) continue; /* do not reset PCI */ if (off == (ROMUSB_GLB + 0xbc)) continue; /* skip core clock, so that firmware can increase the clock */ if (off == (ROMUSB_GLB + 0xc8)) continue; /* skip the function enable register */ if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION)) continue; if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2)) continue; if ((off & 0x0ff00000) == QLA82XX_CRB_SMB) continue; if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET) continue; if (off == ADDR_ERROR) { ql_log(ql_log_fatal, vha, 0x0116, "Unknow addr: 0x%08lx.\n", buf[i].addr); continue; } qla82xx_wr_32(ha, off, buf[i].data); /* ISP requires much bigger delay to settle down, * else crb_window returns 0xffffffff */ if (off == QLA82XX_ROMUSB_GLB_SW_RESET) msleep(1000); /* ISP requires millisec delay between * successive CRB register updation */ msleep(1); } kfree(buf); /* Resetting the data and instruction cache */ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8); /* Clear all protocol processing engines */ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0); return 0; } static int qla82xx_pci_mem_write_2M(struct qla_hw_data *ha, u64 off, void *data, int size) { int i, j, ret = 0, loop, sz[2], off0; int scale, shift_amount, startword; uint32_t temp; uint64_t off8, mem_crb, tmpw, word[2] = {0, 0}; /* * If not MN, go check for MS or invalid. */ if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) mem_crb = QLA82XX_CRB_QDR_NET; else { mem_crb = QLA82XX_CRB_DDR_NET; if (qla82xx_pci_mem_bound_check(ha, off, size) == 0) return qla82xx_pci_mem_write_direct(ha, off, data, size); } off0 = off & 0x7; sz[0] = (size < (8 - off0)) ? size : (8 - off0); sz[1] = size - sz[0]; off8 = off & 0xfffffff0; loop = (((off & 0xf) + size - 1) >> 4) + 1; shift_amount = 4; scale = 2; startword = (off & 0xf)/8; for (i = 0; i < loop; i++) { if (qla82xx_pci_mem_read_2M(ha, off8 + (i << shift_amount), &word[i * scale], 8)) return -1; } switch (size) { case 1: tmpw = *((uint8_t *)data); break; case 2: tmpw = *((uint16_t *)data); break; case 4: tmpw = *((uint32_t *)data); break; case 8: default: tmpw = *((uint64_t *)data); break; } if (sz[0] == 8) { word[startword] = tmpw; } else { word[startword] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8)); word[startword] |= tmpw << (off0 * 8); } if (sz[1] != 0) { word[startword+1] &= ~(~0ULL << (sz[1] * 8)); word[startword+1] |= tmpw >> (sz[0] * 8); } for (i = 0; i < loop; i++) { temp = off8 + (i << shift_amount); qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp); temp = 0; qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp); temp = word[i * scale] & 0xffffffff; qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp); temp = (word[i * scale] >> 32) & 0xffffffff; qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp); temp = word[i*scale + 1] & 0xffffffff; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_LO, temp); temp = (word[i*scale + 1] >> 32) & 0xffffffff; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_HI, temp); temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); if ((temp & MIU_TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&ha->pdev->dev, "failed to write through agent.\n"); ret = -1; break; } } return ret; } static int qla82xx_fw_load_from_flash(struct qla_hw_data *ha) { int i; long size = 0; long flashaddr = ha->flt_region_bootload << 2; long memaddr = BOOTLD_START; u64 data; u32 high, low; size = (IMAGE_START - BOOTLD_START) / 8; for (i = 0; i < size; i++) { if ((qla82xx_rom_fast_read(ha, flashaddr, (int *)&low)) || (qla82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) { return -1; } data = ((u64)high << 32) | low ; qla82xx_pci_mem_write_2M(ha, memaddr, &data, 8); flashaddr += 8; memaddr += 8; if (i % 0x1000 == 0) msleep(1); } udelay(100); read_lock(&ha->hw_lock); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020); qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e); read_unlock(&ha->hw_lock); return 0; } int qla82xx_pci_mem_read_2M(struct qla_hw_data *ha, u64 off, void *data, int size) { int i, j = 0, k, start, end, loop, sz[2], off0[2]; int shift_amount; uint32_t temp; uint64_t off8, val, mem_crb, word[2] = {0, 0}; /* * If not MN, go check for MS or invalid. */ if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) mem_crb = QLA82XX_CRB_QDR_NET; else { mem_crb = QLA82XX_CRB_DDR_NET; if (qla82xx_pci_mem_bound_check(ha, off, size) == 0) return qla82xx_pci_mem_read_direct(ha, off, data, size); } off8 = off & 0xfffffff0; off0[0] = off & 0xf; sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]); shift_amount = 4; loop = ((off0[0] + size - 1) >> shift_amount) + 1; off0[1] = 0; sz[1] = size - sz[0]; for (i = 0; i < loop; i++) { temp = off8 + (i << shift_amount); qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp); temp = 0; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp); temp = MIU_TA_CTL_ENABLE; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); if ((temp & MIU_TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&ha->pdev->dev, "failed to read through agent.\n"); break; } start = off0[i] >> 2; end = (off0[i] + sz[i] - 1) >> 2; for (k = start; k <= end; k++) { temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_RDDATA(k)); word[i] |= ((uint64_t)temp << (32 * (k & 1))); } } if (j >= MAX_CTL_CHECK) return -1; if ((off0[0] & 7) == 0) { val = word[0]; } else { val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) | ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8)); } switch (size) { case 1: *(uint8_t *)data = val; break; case 2: *(uint16_t *)data = val; break; case 4: *(uint32_t *)data = val; break; case 8: *(uint64_t *)data = val; break; } return 0; } static struct qla82xx_uri_table_desc * qla82xx_get_table_desc(const u8 *unirom, int section) { uint32_t i; struct qla82xx_uri_table_desc *directory = (struct qla82xx_uri_table_desc *)&unirom[0]; __le32 offset; __le32 tab_type; __le32 entries = cpu_to_le32(directory->num_entries); for (i = 0; i < entries; i++) { offset = cpu_to_le32(directory->findex) + (i * cpu_to_le32(directory->entry_size)); tab_type = cpu_to_le32(*((u32 *)&unirom[offset] + 8)); if (tab_type == section) return (struct qla82xx_uri_table_desc *)&unirom[offset]; } return NULL; } static struct qla82xx_uri_data_desc * qla82xx_get_data_desc(struct qla_hw_data *ha, u32 section, u32 idx_offset) { const u8 *unirom = ha->hablob->fw->data; int idx = cpu_to_le32(*((int *)&unirom[ha->file_prd_off] + idx_offset)); struct qla82xx_uri_table_desc *tab_desc = NULL; __le32 offset; tab_desc = qla82xx_get_table_desc(unirom, section); if (!tab_desc) return NULL; offset = cpu_to_le32(tab_desc->findex) + (cpu_to_le32(tab_desc->entry_size) * idx); return (struct qla82xx_uri_data_desc *)&unirom[offset]; } static u8 * qla82xx_get_bootld_offset(struct qla_hw_data *ha) { u32 offset = BOOTLD_START; struct qla82xx_uri_data_desc *uri_desc = NULL; if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) { uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF); if (uri_desc) offset = cpu_to_le32(uri_desc->findex); } return (u8 *)&ha->hablob->fw->data[offset]; } static __le32 qla82xx_get_fw_size(struct qla_hw_data *ha) { struct qla82xx_uri_data_desc *uri_desc = NULL; if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) { uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW, QLA82XX_URI_FIRMWARE_IDX_OFF); if (uri_desc) return cpu_to_le32(uri_desc->size); } return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]); } static u8 * qla82xx_get_fw_offs(struct qla_hw_data *ha) { u32 offset = IMAGE_START; struct qla82xx_uri_data_desc *uri_desc = NULL; if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) { uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW, QLA82XX_URI_FIRMWARE_IDX_OFF); if (uri_desc) offset = cpu_to_le32(uri_desc->findex); } return (u8 *)&ha->hablob->fw->data[offset]; } /* PCI related functions */ int qla82xx_pci_region_offset(struct pci_dev *pdev, int region) { unsigned long val = 0; u32 control; switch (region) { case 0: val = 0; break; case 1: pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control); val = control + QLA82XX_MSIX_TBL_SPACE; break; } return val; } int qla82xx_iospace_config(struct qla_hw_data *ha) { uint32_t len = 0; if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) { ql_log_pci(ql_log_fatal, ha->pdev, 0x000c, "Failed to reserver selected regions.\n"); goto iospace_error_exit; } /* Use MMIO operations for all accesses. */ if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { ql_log_pci(ql_log_fatal, ha->pdev, 0x000d, "Region #0 not an MMIO resource, aborting.\n"); goto iospace_error_exit; } len = pci_resource_len(ha->pdev, 0); ha->nx_pcibase = (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len); if (!ha->nx_pcibase) { ql_log_pci(ql_log_fatal, ha->pdev, 0x000e, "Cannot remap pcibase MMIO, aborting.\n"); goto iospace_error_exit; } /* Mapping of IO base pointer */ ha->iobase = (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase + 0xbc000 + (ha->pdev->devfn << 11)); if (!ql2xdbwr) { ha->nxdb_wr_ptr = (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) + (ha->pdev->devfn << 12)), 4); if (!ha->nxdb_wr_ptr) { ql_log_pci(ql_log_fatal, ha->pdev, 0x000f, "Cannot remap MMIO, aborting.\n"); goto iospace_error_exit; } /* Mapping of IO base pointer, * door bell read and write pointer */ ha->nxdb_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) + (ha->pdev->devfn * 8); } else { ha->nxdb_wr_ptr = (ha->pdev->devfn == 6 ? QLA82XX_CAMRAM_DB1 : QLA82XX_CAMRAM_DB2); } ha->max_req_queues = ha->max_rsp_queues = 1; ha->msix_count = ha->max_rsp_queues + 1; ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006, "nx_pci_base=%p iobase=%p " "max_req_queues=%d msix_count=%d.\n", (void *)ha->nx_pcibase, ha->iobase, ha->max_req_queues, ha->msix_count); ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010, "nx_pci_base=%p iobase=%p " "max_req_queues=%d msix_count=%d.\n", (void *)ha->nx_pcibase, ha->iobase, ha->max_req_queues, ha->msix_count); return 0; iospace_error_exit: return -ENOMEM; } /* GS related functions */ /* Initialization related functions */ /** * qla82xx_pci_config() - Setup ISP82xx PCI configuration registers. * @ha: HA context * * Returns 0 on success. */ int qla82xx_pci_config(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; int ret; pci_set_master(ha->pdev); ret = pci_set_mwi(ha->pdev); ha->chip_revision = ha->pdev->revision; ql_dbg(ql_dbg_init, vha, 0x0043, "Chip revision:%d.\n", ha->chip_revision); return 0; } /** * qla82xx_reset_chip() - Setup ISP82xx PCI configuration registers. * @ha: HA context * * Returns 0 on success. */ void qla82xx_reset_chip(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; ha->isp_ops->disable_intrs(ha); } void qla82xx_config_rings(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; struct init_cb_81xx *icb; struct req_que *req = ha->req_q_map[0]; struct rsp_que *rsp = ha->rsp_q_map[0]; /* Setup ring parameters in initialization control block. */ icb = (struct init_cb_81xx *)ha->init_cb; icb->request_q_outpointer = __constant_cpu_to_le16(0); icb->response_q_inpointer = __constant_cpu_to_le16(0); icb->request_q_length = cpu_to_le16(req->length); icb->response_q_length = cpu_to_le16(rsp->length); icb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); icb->request_q_address[1] = cpu_to_le32(MSD(req->dma)); icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); WRT_REG_DWORD((unsigned long __iomem *)&reg->req_q_out[0], 0); WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_in[0], 0); WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_out[0], 0); } static int qla82xx_fw_load_from_blob(struct qla_hw_data *ha) { u64 *ptr64; u32 i, flashaddr, size; __le64 data; size = (IMAGE_START - BOOTLD_START) / 8; ptr64 = (u64 *)qla82xx_get_bootld_offset(ha); flashaddr = BOOTLD_START; for (i = 0; i < size; i++) { data = cpu_to_le64(ptr64[i]); if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8)) return -EIO; flashaddr += 8; } flashaddr = FLASH_ADDR_START; size = (__force u32)qla82xx_get_fw_size(ha) / 8; ptr64 = (u64 *)qla82xx_get_fw_offs(ha); for (i = 0; i < size; i++) { data = cpu_to_le64(ptr64[i]); if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8)) return -EIO; flashaddr += 8; } udelay(100); /* Write a magic value to CAMRAM register * at a specified offset to indicate * that all data is written and * ready for firmware to initialize. */ qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), QLA82XX_BDINFO_MAGIC); read_lock(&ha->hw_lock); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020); qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e); read_unlock(&ha->hw_lock); return 0; } static int qla82xx_set_product_offset(struct qla_hw_data *ha) { struct qla82xx_uri_table_desc *ptab_desc = NULL; const uint8_t *unirom = ha->hablob->fw->data; uint32_t i; __le32 entries; __le32 flags, file_chiprev, offset; uint8_t chiprev = ha->chip_revision; /* Hardcoding mn_present flag for P3P */ int mn_present = 0; uint32_t flagbit; ptab_desc = qla82xx_get_table_desc(unirom, QLA82XX_URI_DIR_SECT_PRODUCT_TBL); if (!ptab_desc) return -1; entries = cpu_to_le32(ptab_desc->num_entries); for (i = 0; i < entries; i++) { offset = cpu_to_le32(ptab_desc->findex) + (i * cpu_to_le32(ptab_desc->entry_size)); flags = cpu_to_le32(*((int *)&unirom[offset] + QLA82XX_URI_FLAGS_OFF)); file_chiprev = cpu_to_le32(*((int *)&unirom[offset] + QLA82XX_URI_CHIP_REV_OFF)); flagbit = mn_present ? 1 : 2; if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) { ha->file_prd_off = offset; return 0; } } return -1; } static int qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type) { __le32 val; uint32_t min_size; struct qla_hw_data *ha = vha->hw; const struct firmware *fw = ha->hablob->fw; ha->fw_type = fw_type; if (fw_type == QLA82XX_UNIFIED_ROMIMAGE) { if (qla82xx_set_product_offset(ha)) return -EINVAL; min_size = QLA82XX_URI_FW_MIN_SIZE; } else { val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]); if ((__force u32)val != QLA82XX_BDINFO_MAGIC) return -EINVAL; min_size = QLA82XX_FW_MIN_SIZE; } if (fw->size < min_size) return -EINVAL; return 0; } static int qla82xx_check_cmdpeg_state(struct qla_hw_data *ha) { u32 val = 0; int retries = 60; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); do { read_lock(&ha->hw_lock); val = qla82xx_rd_32(ha, CRB_CMDPEG_STATE); read_unlock(&ha->hw_lock); switch (val) { case PHAN_INITIALIZE_COMPLETE: case PHAN_INITIALIZE_ACK: return QLA_SUCCESS; case PHAN_INITIALIZE_FAILED: break; default: break; } ql_log(ql_log_info, vha, 0x00a8, "CRB_CMDPEG_STATE: 0x%x and retries:0x%x.\n", val, retries); msleep(500); } while (--retries); ql_log(ql_log_fatal, vha, 0x00a9, "Cmd Peg initialization failed: 0x%x.\n", val); val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE); read_lock(&ha->hw_lock); qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); read_unlock(&ha->hw_lock); return QLA_FUNCTION_FAILED; } static int qla82xx_check_rcvpeg_state(struct qla_hw_data *ha) { u32 val = 0; int retries = 60; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); do { read_lock(&ha->hw_lock); val = qla82xx_rd_32(ha, CRB_RCVPEG_STATE); read_unlock(&ha->hw_lock); switch (val) { case PHAN_INITIALIZE_COMPLETE: case PHAN_INITIALIZE_ACK: return QLA_SUCCESS; case PHAN_INITIALIZE_FAILED: break; default: break; } ql_log(ql_log_info, vha, 0x00ab, "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x.\n", val, retries); msleep(500); } while (--retries); ql_log(ql_log_fatal, vha, 0x00ac, "Rcv Peg initializatin failed: 0x%x.\n", val); read_lock(&ha->hw_lock); qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED); read_unlock(&ha->hw_lock); return QLA_FUNCTION_FAILED; } /* ISR related functions */ static struct qla82xx_legacy_intr_set legacy_intr[] = \ QLA82XX_LEGACY_INTR_CONFIG; /* * qla82xx_mbx_completion() - Process mailbox command completions. * @ha: SCSI driver HA context * @mb0: Mailbox0 register */ static void qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) { uint16_t cnt; uint16_t __iomem *wptr; struct qla_hw_data *ha = vha->hw; struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; wptr = (uint16_t __iomem *)&reg->mailbox_out[1]; /* Load return mailbox registers. */ ha->flags.mbox_int = 1; ha->mailbox_out[0] = mb0; for (cnt = 1; cnt < ha->mbx_count; cnt++) { ha->mailbox_out[cnt] = RD_REG_WORD(wptr); wptr++; } if (!ha->mcp) ql_dbg(ql_dbg_async, vha, 0x5053, "MBX pointer ERROR.\n"); } /* * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. * @irq: * @dev_id: SCSI driver HA context * @regs: * * Called by system whenever the host adapter generates an interrupt. * * Returns handled flag. */ irqreturn_t qla82xx_intr_handler(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_82xx __iomem *reg; int status = 0, status1 = 0; unsigned long flags; unsigned long iter; uint32_t stat = 0; uint16_t mb[4]; rsp = (struct rsp_que *) dev_id; if (!rsp) { ql_log(ql_log_info, NULL, 0xb053, "%s: NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = rsp->hw; if (!ha->flags.msi_enabled) { status = qla82xx_rd_32(ha, ISR_INT_VECTOR); if (!(status & ha->nx_legacy_intr.int_vec_bit)) return IRQ_NONE; status1 = qla82xx_rd_32(ha, ISR_INT_STATE_REG); if (!ISR_IS_LEGACY_INTR_TRIGGERED(status1)) return IRQ_NONE; } /* clear the interrupt */ qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff); /* read twice to ensure write is flushed */ qla82xx_rd_32(ha, ISR_INT_VECTOR); qla82xx_rd_32(ha, ISR_INT_VECTOR); reg = &ha->iobase->isp82; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); for (iter = 1; iter--; ) { if (RD_REG_DWORD(&reg->host_int)) { stat = RD_REG_DWORD(&reg->host_status); switch (stat & 0xff) { case 0x1: case 0x2: case 0x10: case 0x11: qla82xx_mbx_completion(vha, MSW(stat)); status |= MBX_INTERRUPT; break; case 0x12: mb[0] = MSW(stat); mb[1] = RD_REG_WORD(&reg->mailbox_out[1]); mb[2] = RD_REG_WORD(&reg->mailbox_out[2]); mb[3] = RD_REG_WORD(&reg->mailbox_out[3]); qla2x00_async_event(vha, rsp, mb); break; case 0x13: qla24xx_process_response_queue(vha, rsp); break; default: ql_dbg(ql_dbg_async, vha, 0x5054, "Unrecognized interrupt type (%d).\n", stat & 0xff); break; } } WRT_REG_DWORD(&reg->host_int, 0); } #ifdef QL_DEBUG_LEVEL_17 if (!irq && ha->flags.eeh_busy) ql_log(ql_log_warn, vha, 0x503d, "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n", status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); #endif qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (!ha->flags.msi_enabled) qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); return IRQ_HANDLED; } irqreturn_t qla82xx_msix_default(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_82xx __iomem *reg; int status = 0; unsigned long flags; uint32_t stat = 0; uint16_t mb[4]; rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO "%s(): NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = rsp->hw; reg = &ha->iobase->isp82; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); do { if (RD_REG_DWORD(&reg->host_int)) { stat = RD_REG_DWORD(&reg->host_status); switch (stat & 0xff) { case 0x1: case 0x2: case 0x10: case 0x11: qla82xx_mbx_completion(vha, MSW(stat)); status |= MBX_INTERRUPT; break; case 0x12: mb[0] = MSW(stat); mb[1] = RD_REG_WORD(&reg->mailbox_out[1]); mb[2] = RD_REG_WORD(&reg->mailbox_out[2]); mb[3] = RD_REG_WORD(&reg->mailbox_out[3]); qla2x00_async_event(vha, rsp, mb); break; case 0x13: qla24xx_process_response_queue(vha, rsp); break; default: ql_dbg(ql_dbg_async, vha, 0x5041, "Unrecognized interrupt type (%d).\n", stat & 0xff); break; } } WRT_REG_DWORD(&reg->host_int, 0); } while (0); #ifdef QL_DEBUG_LEVEL_17 if (!irq && ha->flags.eeh_busy) ql_log(ql_log_warn, vha, 0x5044, "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n", status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); #endif qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } irqreturn_t qla82xx_msix_rsp_q(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_82xx __iomem *reg; unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO "%s(): NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = rsp->hw; reg = &ha->iobase->isp82; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); qla24xx_process_response_queue(vha, rsp); WRT_REG_DWORD(&reg->host_int, 0); spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } void qla82xx_poll(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_82xx __iomem *reg; int status = 0; uint32_t stat; uint16_t mb[4]; unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO "%s(): NULL response queue pointer.\n", __func__); return; } ha = rsp->hw; reg = &ha->iobase->isp82; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); if (RD_REG_DWORD(&reg->host_int)) { stat = RD_REG_DWORD(&reg->host_status); switch (stat & 0xff) { case 0x1: case 0x2: case 0x10: case 0x11: qla82xx_mbx_completion(vha, MSW(stat)); status |= MBX_INTERRUPT; break; case 0x12: mb[0] = MSW(stat); mb[1] = RD_REG_WORD(&reg->mailbox_out[1]); mb[2] = RD_REG_WORD(&reg->mailbox_out[2]); mb[3] = RD_REG_WORD(&reg->mailbox_out[3]); qla2x00_async_event(vha, rsp, mb); break; case 0x13: qla24xx_process_response_queue(vha, rsp); break; default: ql_dbg(ql_dbg_p3p, vha, 0xb013, "Unrecognized interrupt type (%d).\n", stat * 0xff); break; } } WRT_REG_DWORD(&reg->host_int, 0); spin_unlock_irqrestore(&ha->hardware_lock, flags); } void qla82xx_enable_intrs(struct qla_hw_data *ha) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_mbx_intr_enable(vha); spin_lock_irq(&ha->hardware_lock); qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); spin_unlock_irq(&ha->hardware_lock); ha->interrupts_on = 1; } void qla82xx_disable_intrs(struct qla_hw_data *ha) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_mbx_intr_disable(vha); spin_lock_irq(&ha->hardware_lock); qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400); spin_unlock_irq(&ha->hardware_lock); ha->interrupts_on = 0; } void qla82xx_init_flags(struct qla_hw_data *ha) { struct qla82xx_legacy_intr_set *nx_legacy_intr; /* ISP 8021 initializations */ rwlock_init(&ha->hw_lock); ha->qdr_sn_window = -1; ha->ddr_mn_window = -1; ha->curr_window = 255; ha->portnum = PCI_FUNC(ha->pdev->devfn); nx_legacy_intr = &legacy_intr[ha->portnum]; ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit; ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg; ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; } inline void qla82xx_set_idc_version(scsi_qla_host_t *vha) { int idc_ver; uint32_t drv_active; struct qla_hw_data *ha = vha->hw; drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); if (drv_active == (QLA82XX_DRV_ACTIVE << (ha->portnum * 4))) { qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION); ql_log(ql_log_info, vha, 0xb082, "IDC version updated to %d\n", QLA82XX_IDC_VERSION); } else { idc_ver = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_IDC_VERSION); if (idc_ver != QLA82XX_IDC_VERSION) ql_log(ql_log_info, vha, 0xb083, "qla2xxx driver IDC version %d is not compatible " "with IDC version %d of the other drivers\n", QLA82XX_IDC_VERSION, idc_ver); } } inline void qla82xx_set_drv_active(scsi_qla_host_t *vha) { uint32_t drv_active; struct qla_hw_data *ha = vha->hw; drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); /* If reset value is all FF's, initialize DRV_ACTIVE */ if (drv_active == 0xffffffff) { qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, QLA82XX_DRV_NOT_ACTIVE); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); } drv_active |= (QLA82XX_DRV_ACTIVE << (ha->portnum * 4)); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); } inline void qla82xx_clear_drv_active(struct qla_hw_data *ha) { uint32_t drv_active; drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); drv_active &= ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4)); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); } static inline int qla82xx_need_reset(struct qla_hw_data *ha) { uint32_t drv_state; int rval; if (ha->flags.nic_core_reset_owner) return 1; else { drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); return rval; } } static inline void qla82xx_set_rst_ready(struct qla_hw_data *ha) { uint32_t drv_state; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); /* If reset value is all FF's, initialize DRV_STATE */ if (drv_state == 0xffffffff) { qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, QLA82XX_DRVST_NOT_RDY); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); } drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); ql_dbg(ql_dbg_init, vha, 0x00bb, "drv_state = 0x%08x.\n", drv_state); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); } static inline void qla82xx_clear_rst_ready(struct qla_hw_data *ha) { uint32_t drv_state; drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_state &= ~(QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); } static inline void qla82xx_set_qsnt_ready(struct qla_hw_data *ha) { uint32_t qsnt_state; qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); qsnt_state |= (QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4)); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state); } void qla82xx_clear_qsnt_ready(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint32_t qsnt_state; qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); qsnt_state &= ~(QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4)); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state); } static int qla82xx_load_fw(scsi_qla_host_t *vha) { int rst; struct fw_blob *blob; struct qla_hw_data *ha = vha->hw; if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) { ql_log(ql_log_fatal, vha, 0x009f, "Error during CRB initialization.\n"); return QLA_FUNCTION_FAILED; } udelay(500); /* Bring QM and CAMRAM out of reset */ rst = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET); rst &= ~((1 << 28) | (1 << 24)); qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst); /* * FW Load priority: * 1) Operational firmware residing in flash. * 2) Firmware via request-firmware interface (.bin file). */ if (ql2xfwloadbin == 2) goto try_blob_fw; ql_log(ql_log_info, vha, 0x00a0, "Attempting to load firmware from flash.\n"); if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) { ql_log(ql_log_info, vha, 0x00a1, "Firmware loaded successfully from flash.\n"); return QLA_SUCCESS; } else { ql_log(ql_log_warn, vha, 0x0108, "Firmware load from flash failed.\n"); } try_blob_fw: ql_log(ql_log_info, vha, 0x00a2, "Attempting to load firmware from blob.\n"); /* Load firmware blob. */ blob = ha->hablob = qla2x00_request_firmware(vha); if (!blob) { ql_log(ql_log_fatal, vha, 0x00a3, "Firmware image not present.\n"); goto fw_load_failed; } /* Validating firmware blob */ if (qla82xx_validate_firmware_blob(vha, QLA82XX_FLASH_ROMIMAGE)) { /* Fallback to URI format */ if (qla82xx_validate_firmware_blob(vha, QLA82XX_UNIFIED_ROMIMAGE)) { ql_log(ql_log_fatal, vha, 0x00a4, "No valid firmware image found.\n"); return QLA_FUNCTION_FAILED; } } if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) { ql_log(ql_log_info, vha, 0x00a5, "Firmware loaded successfully from binary blob.\n"); return QLA_SUCCESS; } else { ql_log(ql_log_fatal, vha, 0x00a6, "Firmware load failed for binary blob.\n"); blob->fw = NULL; blob = NULL; goto fw_load_failed; } return QLA_SUCCESS; fw_load_failed: return QLA_FUNCTION_FAILED; } int qla82xx_start_firmware(scsi_qla_host_t *vha) { uint16_t lnk; struct qla_hw_data *ha = vha->hw; /* scrub dma mask expansion register */ qla82xx_wr_32(ha, CRB_DMA_SHIFT, QLA82XX_DMA_SHIFT_VALUE); /* Put both the PEG CMD and RCV PEG to default state * of 0 before resetting the hardware */ qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0); qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0); /* Overwrite stale initialization register values */ qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0); qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0); if (qla82xx_load_fw(vha) != QLA_SUCCESS) { ql_log(ql_log_fatal, vha, 0x00a7, "Error trying to start fw.\n"); return QLA_FUNCTION_FAILED; } /* Handshake with the card before we register the devices. */ if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) { ql_log(ql_log_fatal, vha, 0x00aa, "Error during card handshake.\n"); return QLA_FUNCTION_FAILED; } /* Negotiated Link width */ pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk); ha->link_width = (lnk >> 4) & 0x3f; /* Synchronize with Receive peg */ return qla82xx_check_rcvpeg_state(ha); } static uint32_t * qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, uint32_t length) { uint32_t i; uint32_t val; struct qla_hw_data *ha = vha->hw; /* Dword reads to flash. */ for (i = 0; i < length/4; i++, faddr += 4) { if (qla82xx_rom_fast_read(ha, faddr, &val)) { ql_log(ql_log_warn, vha, 0x0106, "Do ROM fast read failed.\n"); goto done_read; } dwptr[i] = __constant_cpu_to_le32(val); } done_read: return dwptr; } static int qla82xx_unprotect_flash(struct qla_hw_data *ha) { int ret; uint32_t val; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ret = ql82xx_rom_lock_d(ha); if (ret < 0) { ql_log(ql_log_warn, vha, 0xb014, "ROM Lock failed.\n"); return ret; } ret = qla82xx_read_status_reg(ha, &val); if (ret < 0) goto done_unprotect; val &= ~(BLOCK_PROTECT_BITS << 2); ret = qla82xx_write_status_reg(ha, val); if (ret < 0) { val |= (BLOCK_PROTECT_BITS << 2); qla82xx_write_status_reg(ha, val); } if (qla82xx_write_disable_flash(ha) != 0) ql_log(ql_log_warn, vha, 0xb015, "Write disable failed.\n"); done_unprotect: qla82xx_rom_unlock(ha); return ret; } static int qla82xx_protect_flash(struct qla_hw_data *ha) { int ret; uint32_t val; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ret = ql82xx_rom_lock_d(ha); if (ret < 0) { ql_log(ql_log_warn, vha, 0xb016, "ROM Lock failed.\n"); return ret; } ret = qla82xx_read_status_reg(ha, &val); if (ret < 0) goto done_protect; val |= (BLOCK_PROTECT_BITS << 2); /* LOCK all sectors */ ret = qla82xx_write_status_reg(ha, val); if (ret < 0) ql_log(ql_log_warn, vha, 0xb017, "Write status register failed.\n"); if (qla82xx_write_disable_flash(ha) != 0) ql_log(ql_log_warn, vha, 0xb018, "Write disable failed.\n"); done_protect: qla82xx_rom_unlock(ha); return ret; } static int qla82xx_erase_sector(struct qla_hw_data *ha, int addr) { int ret = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ret = ql82xx_rom_lock_d(ha); if (ret < 0) { ql_log(ql_log_warn, vha, 0xb019, "ROM Lock failed.\n"); return ret; } qla82xx_flash_set_write_enable(ha); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE); if (qla82xx_wait_rom_done(ha)) { ql_log(ql_log_warn, vha, 0xb01a, "Error waiting for rom done.\n"); ret = -1; goto done; } ret = qla82xx_flash_wait_write_finish(ha); done: qla82xx_rom_unlock(ha); return ret; } /* * Address and length are byte address */ uint8_t * qla82xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, uint32_t offset, uint32_t length) { scsi_block_requests(vha->host); qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length); scsi_unblock_requests(vha->host); return buf; } static int qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr, uint32_t faddr, uint32_t dwords) { int ret; uint32_t liter; uint32_t sec_mask, rest_addr; dma_addr_t optrom_dma; void *optrom = NULL; int page_mode = 0; struct qla_hw_data *ha = vha->hw; ret = -1; /* Prepare burst-capable write on supported ISPs. */ if (page_mode && !(faddr & 0xfff) && dwords > OPTROM_BURST_DWORDS) { optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, &optrom_dma, GFP_KERNEL); if (!optrom) { ql_log(ql_log_warn, vha, 0xb01b, "Unable to allocate memory " "for optrom burst write (%x KB).\n", OPTROM_BURST_SIZE / 1024); } } rest_addr = ha->fdt_block_size - 1; sec_mask = ~rest_addr; ret = qla82xx_unprotect_flash(ha); if (ret) { ql_log(ql_log_warn, vha, 0xb01c, "Unable to unprotect flash for update.\n"); goto write_done; } for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) { /* Are we at the beginning of a sector? */ if ((faddr & rest_addr) == 0) { ret = qla82xx_erase_sector(ha, faddr); if (ret) { ql_log(ql_log_warn, vha, 0xb01d, "Unable to erase sector: address=%x.\n", faddr); break; } } /* Go with burst-write. */ if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) { /* Copy data to DMA'ble buffer. */ memcpy(optrom, dwptr, OPTROM_BURST_SIZE); ret = qla2x00_load_ram(vha, optrom_dma, (ha->flash_data_off | faddr), OPTROM_BURST_DWORDS); if (ret != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xb01e, "Unable to burst-write optrom segment " "(%x/%x/%llx).\n", ret, (ha->flash_data_off | faddr), (unsigned long long)optrom_dma); ql_log(ql_log_warn, vha, 0xb01f, "Reverting to slow-write.\n"); dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, optrom, optrom_dma); optrom = NULL; } else { liter += OPTROM_BURST_DWORDS - 1; faddr += OPTROM_BURST_DWORDS - 1; dwptr += OPTROM_BURST_DWORDS - 1; continue; } } ret = qla82xx_write_flash_dword(ha, faddr, cpu_to_le32(*dwptr)); if (ret) { ql_dbg(ql_dbg_p3p, vha, 0xb020, "Unable to program flash address=%x data=%x.\n", faddr, *dwptr); break; } } ret = qla82xx_protect_flash(ha); if (ret) ql_log(ql_log_warn, vha, 0xb021, "Unable to protect flash after update.\n"); write_done: if (optrom) dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, optrom, optrom_dma); return ret; } int qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, uint32_t offset, uint32_t length) { int rval; /* Suspend HBA. */ scsi_block_requests(vha->host); rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset, length >> 2); scsi_unblock_requests(vha->host); /* Convert return ISP82xx to generic */ if (rval) rval = QLA_FUNCTION_FAILED; else rval = QLA_SUCCESS; return rval; } void qla82xx_start_iocbs(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; struct device_reg_82xx __iomem *reg; uint32_t dbval; /* Adjust ring index. */ req->ring_index++; if (req->ring_index == req->length) { req->ring_index = 0; req->ring_ptr = req->ring; } else req->ring_ptr++; reg = &ha->iobase->isp82; dbval = 0x04 | (ha->portnum << 5); dbval = dbval | (req->id << 8) | (req->ring_index << 16); if (ql2xdbwr) qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval); else { WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval); wmb(); while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) { WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval); wmb(); } } } static void qla82xx_rom_lock_recovery(struct qla_hw_data *ha) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); if (qla82xx_rom_lock(ha)) /* Someone else is holding the lock. */ ql_log(ql_log_info, vha, 0xb022, "Resetting rom_lock.\n"); /* * Either we got the lock, or someone * else died while holding it. * In either case, unlock. */ qla82xx_rom_unlock(ha); } /* * qla82xx_device_bootstrap * Initialize device, set DEV_READY, start fw * * Note: * IDC lock must be held upon entry * * Return: * Success : 0 * Failed : 1 */ static int qla82xx_device_bootstrap(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; int i, timeout; uint32_t old_count, count; struct qla_hw_data *ha = vha->hw; int need_reset = 0, peg_stuck = 1; need_reset = qla82xx_need_reset(ha); old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); for (i = 0; i < 10; i++) { timeout = msleep_interruptible(200); if (timeout) { qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_FAILED); return QLA_FUNCTION_FAILED; } count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); if (count != old_count) peg_stuck = 0; } if (need_reset) { /* We are trying to perform a recovery here. */ if (peg_stuck) qla82xx_rom_lock_recovery(ha); goto dev_initialize; } else { /* Start of day for this ha context. */ if (peg_stuck) { /* Either we are the first or recovery in progress. */ qla82xx_rom_lock_recovery(ha); goto dev_initialize; } else /* Firmware already running. */ goto dev_ready; } return rval; dev_initialize: /* set to DEV_INITIALIZING */ ql_log(ql_log_info, vha, 0x009e, "HW State: INITIALIZING.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_INITIALIZING); qla82xx_idc_unlock(ha); rval = qla82xx_start_firmware(vha); qla82xx_idc_lock(ha); if (rval != QLA_SUCCESS) { ql_log(ql_log_fatal, vha, 0x00ad, "HW State: FAILED.\n"); qla82xx_clear_drv_active(ha); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_FAILED); return rval; } dev_ready: ql_log(ql_log_info, vha, 0x00ae, "HW State: READY.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_READY); return QLA_SUCCESS; } /* * qla82xx_need_qsnt_handler * Code to start quiescence sequence * * Note: * IDC lock must be held upon entry * * Return: void */ static void qla82xx_need_qsnt_handler(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint32_t dev_state, drv_state, drv_active; unsigned long reset_timeout; if (vha->flags.online) { /*Block any further I/O and wait for pending cmnds to complete*/ qla2x00_quiesce_io(vha); } /* Set the quiescence ready bit */ qla82xx_set_qsnt_ready(ha); /*wait for 30 secs for other functions to ack */ reset_timeout = jiffies + (30 * HZ); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); /* Its 2 that is written when qsnt is acked, moving one bit */ drv_active = drv_active << 0x01; while (drv_state != drv_active) { if (time_after_eq(jiffies, reset_timeout)) { /* quiescence timeout, other functions didn't ack * changing the state to DEV_READY */ ql_log(ql_log_info, vha, 0xb023, "%s : QUIESCENT TIMEOUT DRV_ACTIVE:%d " "DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME, drv_active, drv_state); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_READY); ql_log(ql_log_info, vha, 0xb025, "HW State: DEV_READY.\n"); qla82xx_idc_unlock(ha); qla2x00_perform_loop_resync(vha); qla82xx_idc_lock(ha); qla82xx_clear_qsnt_ready(vha); return; } qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); drv_active = drv_active << 0x01; } dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); /* everyone acked so set the state to DEV_QUIESCENCE */ if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) { ql_log(ql_log_info, vha, 0xb026, "HW State: DEV_QUIESCENT.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_QUIESCENT); } } /* * qla82xx_wait_for_state_change * Wait for device state to change from given current state * * Note: * IDC lock must not be held upon entry * * Return: * Changed device state. */ uint32_t qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state) { struct qla_hw_data *ha = vha->hw; uint32_t dev_state; do { msleep(1000); qla82xx_idc_lock(ha); dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); qla82xx_idc_unlock(ha); } while (dev_state == curr_state); return dev_state; } void qla8xxx_dev_failed_handler(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; /* Disable the board */ ql_log(ql_log_fatal, vha, 0x00b8, "Disabling the board.\n"); if (IS_QLA82XX(ha)) { qla82xx_clear_drv_active(ha); qla82xx_idc_unlock(ha); } /* Set DEV_FAILED flag to disable timer */ vha->device_flags |= DFLG_DEV_FAILED; qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); qla2x00_mark_all_devices_lost(vha, 0); vha->flags.online = 0; vha->flags.init_done = 0; } /* * qla82xx_need_reset_handler * Code to start reset sequence * * Note: * IDC lock must be held upon entry * * Return: * Success : 0 * Failed : 1 */ static void qla82xx_need_reset_handler(scsi_qla_host_t *vha) { uint32_t dev_state, drv_state, drv_active; uint32_t active_mask = 0; unsigned long reset_timeout; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; if (vha->flags.online) { qla82xx_idc_unlock(ha); qla2x00_abort_isp_cleanup(vha); ha->isp_ops->get_flash_version(vha, req->ring); ha->isp_ops->nvram_config(vha); qla82xx_idc_lock(ha); } drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); if (!ha->flags.nic_core_reset_owner) { ql_dbg(ql_dbg_p3p, vha, 0xb028, "reset_acknowledged by 0x%x\n", ha->portnum); qla82xx_set_rst_ready(ha); } else { active_mask = ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4)); drv_active &= active_mask; ql_dbg(ql_dbg_p3p, vha, 0xb029, "active_mask: 0x%08x\n", active_mask); } /* wait for 10 seconds for reset ack from all functions */ reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); ql_dbg(ql_dbg_p3p, vha, 0xb02a, "drv_state: 0x%08x, drv_active: 0x%08x, " "dev_state: 0x%08x, active_mask: 0x%08x\n", drv_state, drv_active, dev_state, active_mask); while (drv_state != drv_active && dev_state != QLA8XXX_DEV_INITIALIZING) { if (time_after_eq(jiffies, reset_timeout)) { ql_log(ql_log_warn, vha, 0x00b5, "Reset timeout.\n"); break; } qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); if (ha->flags.nic_core_reset_owner) drv_active &= active_mask; dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); } ql_dbg(ql_dbg_p3p, vha, 0xb02b, "drv_state: 0x%08x, drv_active: 0x%08x, " "dev_state: 0x%08x, active_mask: 0x%08x\n", drv_state, drv_active, dev_state, active_mask); ql_log(ql_log_info, vha, 0x00b6, "Device state is 0x%x = %s.\n", dev_state, dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); /* Force to DEV_COLD unless someone else is starting a reset */ if (dev_state != QLA8XXX_DEV_INITIALIZING && dev_state != QLA8XXX_DEV_COLD) { ql_log(ql_log_info, vha, 0x00b7, "HW State: COLD/RE-INIT.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_COLD); qla82xx_set_rst_ready(ha); if (ql2xmdenable) { if (qla82xx_md_collect(vha)) ql_log(ql_log_warn, vha, 0xb02c, "Minidump not collected.\n"); } else ql_log(ql_log_warn, vha, 0xb04f, "Minidump disabled.\n"); } } int qla82xx_check_md_needed(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint16_t fw_major_version, fw_minor_version, fw_subminor_version; int rval = QLA_SUCCESS; fw_major_version = ha->fw_major_version; fw_minor_version = ha->fw_minor_version; fw_subminor_version = ha->fw_subminor_version; rval = qla2x00_get_fw_version(vha); if (rval != QLA_SUCCESS) return rval; if (ql2xmdenable) { if (!ha->fw_dumped) { if (fw_major_version != ha->fw_major_version || fw_minor_version != ha->fw_minor_version || fw_subminor_version != ha->fw_subminor_version) { ql_log(ql_log_info, vha, 0xb02d, "Firmware version differs " "Previous version: %d:%d:%d - " "New version: %d:%d:%d\n", fw_major_version, fw_minor_version, fw_subminor_version, ha->fw_major_version, ha->fw_minor_version, ha->fw_subminor_version); /* Release MiniDump resources */ qla82xx_md_free(vha); /* ALlocate MiniDump resources */ qla82xx_md_prep(vha); } } else ql_log(ql_log_info, vha, 0xb02e, "Firmware dump available to retrieve\n"); } return rval; } static int qla82xx_check_fw_alive(scsi_qla_host_t *vha) { uint32_t fw_heartbeat_counter; int status = 0; fw_heartbeat_counter = qla82xx_rd_32(vha->hw, QLA82XX_PEG_ALIVE_COUNTER); /* all 0xff, assume AER/EEH in progress, ignore */ if (fw_heartbeat_counter == 0xffffffff) { ql_dbg(ql_dbg_timer, vha, 0x6003, "FW heartbeat counter is 0xffffffff, " "returning status=%d.\n", status); return status; } if (vha->fw_heartbeat_counter == fw_heartbeat_counter) { vha->seconds_since_last_heartbeat++; /* FW not alive after 2 seconds */ if (vha->seconds_since_last_heartbeat == 2) { vha->seconds_since_last_heartbeat = 0; status = 1; } } else vha->seconds_since_last_heartbeat = 0; vha->fw_heartbeat_counter = fw_heartbeat_counter; if (status) ql_dbg(ql_dbg_timer, vha, 0x6004, "Returning status=%d.\n", status); return status; } /* * qla82xx_device_state_handler * Main state handler * * Note: * IDC lock must be held upon entry * * Return: * Success : 0 * Failed : 1 */ int qla82xx_device_state_handler(scsi_qla_host_t *vha) { uint32_t dev_state; uint32_t old_dev_state; int rval = QLA_SUCCESS; unsigned long dev_init_timeout; struct qla_hw_data *ha = vha->hw; int loopcount = 0; qla82xx_idc_lock(ha); if (!vha->flags.init_done) { qla82xx_set_drv_active(vha); qla82xx_set_idc_version(vha); } dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); old_dev_state = dev_state; ql_log(ql_log_info, vha, 0x009b, "Device state is 0x%x = %s.\n", dev_state, dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); /* wait for 30 seconds for device to go ready */ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); while (1) { if (time_after_eq(jiffies, dev_init_timeout)) { ql_log(ql_log_fatal, vha, 0x009c, "Device init failed.\n"); rval = QLA_FUNCTION_FAILED; break; } dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); if (old_dev_state != dev_state) { loopcount = 0; old_dev_state = dev_state; } if (loopcount < 5) { ql_log(ql_log_info, vha, 0x009d, "Device state is 0x%x = %s.\n", dev_state, dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); } switch (dev_state) { case QLA8XXX_DEV_READY: ha->flags.nic_core_reset_owner = 0; goto rel_lock; case QLA8XXX_DEV_COLD: rval = qla82xx_device_bootstrap(vha); break; case QLA8XXX_DEV_INITIALIZING: qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); break; case QLA8XXX_DEV_NEED_RESET: if (!ql2xdontresethba) qla82xx_need_reset_handler(vha); else { qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); } dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); break; case QLA8XXX_DEV_NEED_QUIESCENT: qla82xx_need_qsnt_handler(vha); /* Reset timeout value after quiescence handler */ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\ * HZ); break; case QLA8XXX_DEV_QUIESCENT: /* Owner will exit and other will wait for the state * to get changed */ if (ha->flags.quiesce_owner) goto rel_lock; qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); /* Reset timeout value after quiescence handler */ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\ * HZ); break; case QLA8XXX_DEV_FAILED: qla8xxx_dev_failed_handler(vha); rval = QLA_FUNCTION_FAILED; goto exit; default: qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); } loopcount++; } rel_lock: qla82xx_idc_unlock(ha); exit: return rval; } static int qla82xx_check_temp(scsi_qla_host_t *vha) { uint32_t temp, temp_state, temp_val; struct qla_hw_data *ha = vha->hw; temp = qla82xx_rd_32(ha, CRB_TEMP_STATE); temp_state = qla82xx_get_temp_state(temp); temp_val = qla82xx_get_temp_val(temp); if (temp_state == QLA82XX_TEMP_PANIC) { ql_log(ql_log_warn, vha, 0x600e, "Device temperature %d degrees C exceeds " " maximum allowed. Hardware has been shut down.\n", temp_val); return 1; } else if (temp_state == QLA82XX_TEMP_WARN) { ql_log(ql_log_warn, vha, 0x600f, "Device temperature %d degrees C exceeds " "operating range. Immediate action needed.\n", temp_val); } return 0; } void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; if (ha->flags.mbox_busy) { ha->flags.mbox_int = 1; ha->flags.mbox_busy = 0; ql_log(ql_log_warn, vha, 0x6010, "Doing premature completion of mbx command.\n"); if (test_and_clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) complete(&ha->mbx_intr_comp); } } void qla82xx_watchdog(scsi_qla_host_t *vha) { uint32_t dev_state, halt_status; struct qla_hw_data *ha = vha->hw; /* don't poll if reset is going on */ if (!ha->flags.nic_core_reset_hdlr_active) { dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); if (qla82xx_check_temp(vha)) { set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); ha->flags.isp82xx_fw_hung = 1; qla82xx_clear_pending_mbx(vha); } else if (dev_state == QLA8XXX_DEV_NEED_RESET && !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { ql_log(ql_log_warn, vha, 0x6001, "Adapter reset needed.\n"); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT && !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { ql_log(ql_log_warn, vha, 0x6002, "Quiescent needed.\n"); set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); } else if (dev_state == QLA8XXX_DEV_FAILED && !test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) && vha->flags.online == 1) { ql_log(ql_log_warn, vha, 0xb055, "Adapter state is failed. Offlining.\n"); set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); ha->flags.isp82xx_fw_hung = 1; qla82xx_clear_pending_mbx(vha); } else { if (qla82xx_check_fw_alive(vha)) { ql_dbg(ql_dbg_timer, vha, 0x6011, "disabling pause transmit on port 0 & 1.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, CRB_NIU_XG_PAUSE_CTL_P0|CRB_NIU_XG_PAUSE_CTL_P1); halt_status = qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS1); ql_log(ql_log_info, vha, 0x6005, "dumping hw/fw registers:.\n " " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n " " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n " " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,.\n " " PEG_NET_4_PC: 0x%x.\n", halt_status, qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2), qla82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c), qla82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c), qla82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c), qla82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c), qla82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c)); if (((halt_status & 0x1fffff00) >> 8) == 0x67) ql_log(ql_log_warn, vha, 0xb052, "Firmware aborted with " "error code 0x00006700. Device is " "being reset.\n"); if (halt_status & HALT_STATUS_UNRECOVERABLE) { set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); } else { ql_log(ql_log_info, vha, 0x6006, "Detect abort needed.\n"); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } ha->flags.isp82xx_fw_hung = 1; ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n"); qla82xx_clear_pending_mbx(vha); } } } } int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) { int rval; rval = qla82xx_device_state_handler(vha); return rval; } void qla82xx_set_reset_owner(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint32_t dev_state; dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); if (dev_state == QLA8XXX_DEV_READY) { ql_log(ql_log_info, vha, 0xb02f, "HW State: NEED RESET\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_NEED_RESET); ha->flags.nic_core_reset_owner = 1; ql_dbg(ql_dbg_p3p, vha, 0xb030, "reset_owner is 0x%x\n", ha->portnum); } else ql_log(ql_log_info, vha, 0xb031, "Device state is 0x%x = %s.\n", dev_state, dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); } /* * qla82xx_abort_isp * Resets ISP and aborts all outstanding commands. * * Input: * ha = adapter block pointer. * * Returns: * 0 = success */ int qla82xx_abort_isp(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; if (vha->device_flags & DFLG_DEV_FAILED) { ql_log(ql_log_warn, vha, 0x8024, "Device in failed state, exiting.\n"); return QLA_SUCCESS; } ha->flags.nic_core_reset_hdlr_active = 1; qla82xx_idc_lock(ha); qla82xx_set_reset_owner(vha); qla82xx_idc_unlock(ha); rval = qla82xx_device_state_handler(vha); qla82xx_idc_lock(ha); qla82xx_clear_rst_ready(ha); qla82xx_idc_unlock(ha); if (rval == QLA_SUCCESS) { ha->flags.isp82xx_fw_hung = 0; ha->flags.nic_core_reset_hdlr_active = 0; qla82xx_restart_isp(vha); } if (rval) { vha->flags.online = 1; if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { if (ha->isp_abort_cnt == 0) { ql_log(ql_log_warn, vha, 0x8027, "ISP error recover failed - board " "disabled.\n"); /* * The next call disables the board * completely. */ ha->isp_ops->reset_adapter(vha); vha->flags.online = 0; clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); rval = QLA_SUCCESS; } else { /* schedule another ISP abort */ ha->isp_abort_cnt--; ql_log(ql_log_warn, vha, 0x8036, "ISP abort - retry remaining %d.\n", ha->isp_abort_cnt); rval = QLA_FUNCTION_FAILED; } } else { ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; ql_dbg(ql_dbg_taskm, vha, 0x8029, "ISP error recovery - retrying (%d) more times.\n", ha->isp_abort_cnt); set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); rval = QLA_FUNCTION_FAILED; } } return rval; } /* * qla82xx_fcoe_ctx_reset * Perform a quick reset and aborts all outstanding commands. * This will only perform an FCoE context reset and avoids a full blown * chip reset. * * Input: * ha = adapter block pointer. * is_reset_path = flag for identifying the reset path. * * Returns: * 0 = success */ int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha) { int rval = QLA_FUNCTION_FAILED; if (vha->flags.online) { /* Abort all outstanding commands, so as to be requeued later */ qla2x00_abort_isp_cleanup(vha); } /* Stop currently executing firmware. * This will destroy existing FCoE context at the F/W end. */ qla2x00_try_to_stop_firmware(vha); /* Restart. Creates a new FCoE context on INIT_FIRMWARE. */ rval = qla82xx_restart_isp(vha); return rval; } /* * qla2x00_wait_for_fcoe_ctx_reset * Wait till the FCoE context is reset. * * Note: * Does context switching here. * Release SPIN_LOCK (if any) before calling this routine. * * Return: * Success (fcoe_ctx reset is done) : 0 * Failed (fcoe_ctx reset not completed within max loop timout ) : 1 */ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha) { int status = QLA_FUNCTION_FAILED; unsigned long wait_reset; wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ); while ((test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && time_before(jiffies, wait_reset)) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(HZ); if (!test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) && !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { status = QLA_SUCCESS; break; } } ql_dbg(ql_dbg_p3p, vha, 0xb027, "%s: status=%d.\n", __func__, status); return status; } void qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) { int i; unsigned long flags; struct qla_hw_data *ha = vha->hw; /* Check if 82XX firmware is alive or not * We may have arrived here from NEED_RESET * detection only */ if (!ha->flags.isp82xx_fw_hung) { for (i = 0; i < 2; i++) { msleep(1000); if (qla82xx_check_fw_alive(vha)) { ha->flags.isp82xx_fw_hung = 1; qla82xx_clear_pending_mbx(vha); break; } } } ql_dbg(ql_dbg_init, vha, 0x00b0, "Entered %s fw_hung=%d.\n", __func__, ha->flags.isp82xx_fw_hung); /* Abort all commands gracefully if fw NOT hung */ if (!ha->flags.isp82xx_fw_hung) { int cnt, que; srb_t *sp; struct req_que *req; spin_lock_irqsave(&ha->hardware_lock, flags); for (que = 0; que < ha->max_req_queues; que++) { req = ha->req_q_map[que]; if (!req) continue; for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { sp = req->outstanding_cmds[cnt]; if (sp) { if (!sp->u.scmd.ctx || (sp->flags & SRB_FCP_CMND_DMA_VALID)) { spin_unlock_irqrestore( &ha->hardware_lock, flags); if (ha->isp_ops->abort_command(sp)) { ql_log(ql_log_info, vha, 0x00b1, "mbx abort failed.\n"); } else { ql_log(ql_log_info, vha, 0x00b2, "mbx abort success.\n"); } spin_lock_irqsave(&ha->hardware_lock, flags); } } } } spin_unlock_irqrestore(&ha->hardware_lock, flags); /* Wait for pending cmds (physical and virtual) to complete */ if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) == QLA_SUCCESS) { ql_dbg(ql_dbg_init, vha, 0x00b3, "Done wait for " "pending commands.\n"); } } } /* Minidump related functions */ static int qla82xx_minidump_process_control(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; struct qla82xx_md_entry_crb *crb_entry; uint32_t read_value, opcode, poll_time; uint32_t addr, index, crb_addr; unsigned long wtime; struct qla82xx_md_template_hdr *tmplt_hdr; uint32_t rval = QLA_SUCCESS; int i; tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; crb_entry = (struct qla82xx_md_entry_crb *)entry_hdr; crb_addr = crb_entry->addr; for (i = 0; i < crb_entry->op_count; i++) { opcode = crb_entry->crb_ctrl.opcode; if (opcode & QLA82XX_DBG_OPCODE_WR) { qla82xx_md_rw_32(ha, crb_addr, crb_entry->value_1, 1); opcode &= ~QLA82XX_DBG_OPCODE_WR; } if (opcode & QLA82XX_DBG_OPCODE_RW) { read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); qla82xx_md_rw_32(ha, crb_addr, read_value, 1); opcode &= ~QLA82XX_DBG_OPCODE_RW; } if (opcode & QLA82XX_DBG_OPCODE_AND) { read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); read_value &= crb_entry->value_2; opcode &= ~QLA82XX_DBG_OPCODE_AND; if (opcode & QLA82XX_DBG_OPCODE_OR) { read_value |= crb_entry->value_3; opcode &= ~QLA82XX_DBG_OPCODE_OR; } qla82xx_md_rw_32(ha, crb_addr, read_value, 1); } if (opcode & QLA82XX_DBG_OPCODE_OR) { read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); read_value |= crb_entry->value_3; qla82xx_md_rw_32(ha, crb_addr, read_value, 1); opcode &= ~QLA82XX_DBG_OPCODE_OR; } if (opcode & QLA82XX_DBG_OPCODE_POLL) { poll_time = crb_entry->crb_strd.poll_timeout; wtime = jiffies + poll_time; read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); do { if ((read_value & crb_entry->value_2) == crb_entry->value_1) break; else if (time_after_eq(jiffies, wtime)) { /* capturing dump failed */ rval = QLA_FUNCTION_FAILED; break; } else read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); } while (1); opcode &= ~QLA82XX_DBG_OPCODE_POLL; } if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) { if (crb_entry->crb_strd.state_index_a) { index = crb_entry->crb_strd.state_index_a; addr = tmplt_hdr->saved_state_array[index]; } else addr = crb_addr; read_value = qla82xx_md_rw_32(ha, addr, 0, 0); index = crb_entry->crb_ctrl.state_index_v; tmplt_hdr->saved_state_array[index] = read_value; opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE; } if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) { if (crb_entry->crb_strd.state_index_a) { index = crb_entry->crb_strd.state_index_a; addr = tmplt_hdr->saved_state_array[index]; } else addr = crb_addr; if (crb_entry->crb_ctrl.state_index_v) { index = crb_entry->crb_ctrl.state_index_v; read_value = tmplt_hdr->saved_state_array[index]; } else read_value = crb_entry->value_1; qla82xx_md_rw_32(ha, addr, read_value, 1); opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE; } if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) { index = crb_entry->crb_ctrl.state_index_v; read_value = tmplt_hdr->saved_state_array[index]; read_value <<= crb_entry->crb_ctrl.shl; read_value >>= crb_entry->crb_ctrl.shr; if (crb_entry->value_2) read_value &= crb_entry->value_2; read_value |= crb_entry->value_3; read_value += crb_entry->value_1; tmplt_hdr->saved_state_array[index] = read_value; opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE; } crb_addr += crb_entry->crb_strd.addr_stride; } return rval; } static void qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t r_addr, r_stride, loop_cnt, i, r_value; struct qla82xx_md_entry_rdocm *ocm_hdr; uint32_t *data_ptr = *d_ptr; ocm_hdr = (struct qla82xx_md_entry_rdocm *)entry_hdr; r_addr = ocm_hdr->read_addr; r_stride = ocm_hdr->read_addr_stride; loop_cnt = ocm_hdr->op_count; for (i = 0; i < loop_cnt; i++) { r_value = RD_REG_DWORD((void __iomem *) (r_addr + ha->nx_pcibase)); *data_ptr++ = cpu_to_le32(r_value); r_addr += r_stride; } *d_ptr = data_ptr; } static void qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value; struct qla82xx_md_entry_mux *mux_hdr; uint32_t *data_ptr = *d_ptr; mux_hdr = (struct qla82xx_md_entry_mux *)entry_hdr; r_addr = mux_hdr->read_addr; s_addr = mux_hdr->select_addr; s_stride = mux_hdr->select_value_stride; s_value = mux_hdr->select_value; loop_cnt = mux_hdr->op_count; for (i = 0; i < loop_cnt; i++) { qla82xx_md_rw_32(ha, s_addr, s_value, 1); r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0); *data_ptr++ = cpu_to_le32(s_value); *data_ptr++ = cpu_to_le32(r_value); s_value += s_stride; } *d_ptr = data_ptr; } static void qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t r_addr, r_stride, loop_cnt, i, r_value; struct qla82xx_md_entry_crb *crb_hdr; uint32_t *data_ptr = *d_ptr; crb_hdr = (struct qla82xx_md_entry_crb *)entry_hdr; r_addr = crb_hdr->addr; r_stride = crb_hdr->crb_strd.addr_stride; loop_cnt = crb_hdr->op_count; for (i = 0; i < loop_cnt; i++) { r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0); *data_ptr++ = cpu_to_le32(r_addr); *data_ptr++ = cpu_to_le32(r_value); r_addr += r_stride; } *d_ptr = data_ptr; } static int qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t addr, r_addr, c_addr, t_r_addr; uint32_t i, k, loop_count, t_value, r_cnt, r_value; unsigned long p_wait, w_time, p_mask; uint32_t c_value_w, c_value_r; struct qla82xx_md_entry_cache *cache_hdr; int rval = QLA_FUNCTION_FAILED; uint32_t *data_ptr = *d_ptr; cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr; loop_count = cache_hdr->op_count; r_addr = cache_hdr->read_addr; c_addr = cache_hdr->control_addr; c_value_w = cache_hdr->cache_ctrl.write_value; t_r_addr = cache_hdr->tag_reg_addr; t_value = cache_hdr->addr_ctrl.init_tag_value; r_cnt = cache_hdr->read_ctrl.read_addr_cnt; p_wait = cache_hdr->cache_ctrl.poll_wait; p_mask = cache_hdr->cache_ctrl.poll_mask; for (i = 0; i < loop_count; i++) { qla82xx_md_rw_32(ha, t_r_addr, t_value, 1); if (c_value_w) qla82xx_md_rw_32(ha, c_addr, c_value_w, 1); if (p_mask) { w_time = jiffies + p_wait; do { c_value_r = qla82xx_md_rw_32(ha, c_addr, 0, 0); if ((c_value_r & p_mask) == 0) break; else if (time_after_eq(jiffies, w_time)) { /* capturing dump failed */ ql_dbg(ql_dbg_p3p, vha, 0xb032, "c_value_r: 0x%x, poll_mask: 0x%lx, " "w_time: 0x%lx\n", c_value_r, p_mask, w_time); return rval; } } while (1); } addr = r_addr; for (k = 0; k < r_cnt; k++) { r_value = qla82xx_md_rw_32(ha, addr, 0, 0); *data_ptr++ = cpu_to_le32(r_value); addr += cache_hdr->read_ctrl.read_addr_stride; } t_value += cache_hdr->addr_ctrl.tag_value_stride; } *d_ptr = data_ptr; return QLA_SUCCESS; } static void qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t addr, r_addr, c_addr, t_r_addr; uint32_t i, k, loop_count, t_value, r_cnt, r_value; uint32_t c_value_w; struct qla82xx_md_entry_cache *cache_hdr; uint32_t *data_ptr = *d_ptr; cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr; loop_count = cache_hdr->op_count; r_addr = cache_hdr->read_addr; c_addr = cache_hdr->control_addr; c_value_w = cache_hdr->cache_ctrl.write_value; t_r_addr = cache_hdr->tag_reg_addr; t_value = cache_hdr->addr_ctrl.init_tag_value; r_cnt = cache_hdr->read_ctrl.read_addr_cnt; for (i = 0; i < loop_count; i++) { qla82xx_md_rw_32(ha, t_r_addr, t_value, 1); qla82xx_md_rw_32(ha, c_addr, c_value_w, 1); addr = r_addr; for (k = 0; k < r_cnt; k++) { r_value = qla82xx_md_rw_32(ha, addr, 0, 0); *data_ptr++ = cpu_to_le32(r_value); addr += cache_hdr->read_ctrl.read_addr_stride; } t_value += cache_hdr->addr_ctrl.tag_value_stride; } *d_ptr = data_ptr; } static void qla82xx_minidump_process_queue(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t s_addr, r_addr; uint32_t r_stride, r_value, r_cnt, qid = 0; uint32_t i, k, loop_cnt; struct qla82xx_md_entry_queue *q_hdr; uint32_t *data_ptr = *d_ptr; q_hdr = (struct qla82xx_md_entry_queue *)entry_hdr; s_addr = q_hdr->select_addr; r_cnt = q_hdr->rd_strd.read_addr_cnt; r_stride = q_hdr->rd_strd.read_addr_stride; loop_cnt = q_hdr->op_count; for (i = 0; i < loop_cnt; i++) { qla82xx_md_rw_32(ha, s_addr, qid, 1); r_addr = q_hdr->read_addr; for (k = 0; k < r_cnt; k++) { r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0); *data_ptr++ = cpu_to_le32(r_value); r_addr += r_stride; } qid += q_hdr->q_strd.queue_id_stride; } *d_ptr = data_ptr; } static void qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t r_addr, r_value; uint32_t i, loop_cnt; struct qla82xx_md_entry_rdrom *rom_hdr; uint32_t *data_ptr = *d_ptr; rom_hdr = (struct qla82xx_md_entry_rdrom *)entry_hdr; r_addr = rom_hdr->read_addr; loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t); for (i = 0; i < loop_cnt; i++) { qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW, (r_addr & 0xFFFF0000), 1); r_value = qla82xx_md_rw_32(ha, MD_DIRECT_ROM_READ_BASE + (r_addr & 0x0000FFFF), 0, 0); *data_ptr++ = cpu_to_le32(r_value); r_addr += sizeof(uint32_t); } *d_ptr = data_ptr; } static int qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t r_addr, r_value, r_data; uint32_t i, j, loop_cnt; struct qla82xx_md_entry_rdmem *m_hdr; unsigned long flags; int rval = QLA_FUNCTION_FAILED; uint32_t *data_ptr = *d_ptr; m_hdr = (struct qla82xx_md_entry_rdmem *)entry_hdr; r_addr = m_hdr->read_addr; loop_cnt = m_hdr->read_data_size/16; if (r_addr & 0xf) { ql_log(ql_log_warn, vha, 0xb033, "Read addr 0x%x not 16 bytes aligned\n", r_addr); return rval; } if (m_hdr->read_data_size % 16) { ql_log(ql_log_warn, vha, 0xb034, "Read data[0x%x] not multiple of 16 bytes\n", m_hdr->read_data_size); return rval; } ql_dbg(ql_dbg_p3p, vha, 0xb035, "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n", __func__, r_addr, m_hdr->read_data_size, loop_cnt); write_lock_irqsave(&ha->hw_lock, flags); for (i = 0; i < loop_cnt; i++) { qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1); r_value = 0; qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1); r_value = MIU_TA_CTL_ENABLE; qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1); r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE; qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1); for (j = 0; j < MAX_CTL_CHECK; j++) { r_value = qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, 0, 0); if ((r_value & MIU_TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { printk_ratelimited(KERN_ERR "failed to read through agent\n"); write_unlock_irqrestore(&ha->hw_lock, flags); return rval; } for (j = 0; j < 4; j++) { r_data = qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_RDDATA[j], 0, 0); *data_ptr++ = cpu_to_le32(r_data); } r_addr += 16; } write_unlock_irqrestore(&ha->hw_lock, flags); *d_ptr = data_ptr; return QLA_SUCCESS; } static int qla82xx_validate_template_chksum(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint64_t chksum = 0; uint32_t *d_ptr = (uint32_t *)ha->md_tmplt_hdr; int count = ha->md_template_size/sizeof(uint32_t); while (count-- > 0) chksum += *d_ptr++; while (chksum >> 32) chksum = (chksum & 0xFFFFFFFF) + (chksum >> 32); return ~chksum; } static void qla82xx_mark_entry_skipped(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, int index) { entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG; ql_dbg(ql_dbg_p3p, vha, 0xb036, "Skipping entry[%d]: " "ETYPE[0x%x]-ELEVEL[0x%x]\n", index, entry_hdr->entry_type, entry_hdr->d_ctrl.entry_capture_mask); } int qla82xx_md_collect(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; int no_entry_hdr = 0; qla82xx_md_entry_hdr_t *entry_hdr; struct qla82xx_md_template_hdr *tmplt_hdr; uint32_t *data_ptr; uint32_t total_data_size = 0, f_capture_mask, data_collected = 0; int i = 0, rval = QLA_FUNCTION_FAILED; tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; data_ptr = (uint32_t *)ha->md_dump; if (ha->fw_dumped) { ql_log(ql_log_warn, vha, 0xb037, "Firmware has been previously dumped (%p) " "-- ignoring request.\n", ha->fw_dump); goto md_failed; } ha->fw_dumped = 0; if (!ha->md_tmplt_hdr || !ha->md_dump) { ql_log(ql_log_warn, vha, 0xb038, "Memory not allocated for minidump capture\n"); goto md_failed; } if (ha->flags.isp82xx_no_md_cap) { ql_log(ql_log_warn, vha, 0xb054, "Forced reset from application, " "ignore minidump capture\n"); ha->flags.isp82xx_no_md_cap = 0; goto md_failed; } if (qla82xx_validate_template_chksum(vha)) { ql_log(ql_log_info, vha, 0xb039, "Template checksum validation error\n"); goto md_failed; } no_entry_hdr = tmplt_hdr->num_of_entries; ql_dbg(ql_dbg_p3p, vha, 0xb03a, "No of entry headers in Template: 0x%x\n", no_entry_hdr); ql_dbg(ql_dbg_p3p, vha, 0xb03b, "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level); f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF; /* Validate whether required debug level is set */ if ((f_capture_mask & 0x3) != 0x3) { ql_log(ql_log_warn, vha, 0xb03c, "Minimum required capture mask[0x%x] level not set\n", f_capture_mask); goto md_failed; } tmplt_hdr->driver_capture_mask = ql2xmdcapmask; tmplt_hdr->driver_info[0] = vha->host_no; tmplt_hdr->driver_info[1] = (QLA_DRIVER_MAJOR_VER << 24) | (QLA_DRIVER_MINOR_VER << 16) | (QLA_DRIVER_PATCH_VER << 8) | QLA_DRIVER_BETA_VER; total_data_size = ha->md_dump_size; ql_dbg(ql_dbg_p3p, vha, 0xb03d, "Total minidump data_size 0x%x to be captured\n", total_data_size); /* Check whether template obtained is valid */ if (tmplt_hdr->entry_type != QLA82XX_TLHDR) { ql_log(ql_log_warn, vha, 0xb04e, "Bad template header entry type: 0x%x obtained\n", tmplt_hdr->entry_type); goto md_failed; } entry_hdr = (qla82xx_md_entry_hdr_t *) \ (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset); /* Walk through the entry headers */ for (i = 0; i < no_entry_hdr; i++) { if (data_collected > total_data_size) { ql_log(ql_log_warn, vha, 0xb03e, "More MiniDump data collected: [0x%x]\n", data_collected); goto md_failed; } if (!(entry_hdr->d_ctrl.entry_capture_mask & ql2xmdcapmask)) { entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG; ql_dbg(ql_dbg_p3p, vha, 0xb03f, "Skipping entry[%d]: " "ETYPE[0x%x]-ELEVEL[0x%x]\n", i, entry_hdr->entry_type, entry_hdr->d_ctrl.entry_capture_mask); goto skip_nxt_entry; } ql_dbg(ql_dbg_p3p, vha, 0xb040, "[%s]: data ptr[%d]: %p, entry_hdr: %p\n" "entry_type: 0x%x, captrue_mask: 0x%x\n", __func__, i, data_ptr, entry_hdr, entry_hdr->entry_type, entry_hdr->d_ctrl.entry_capture_mask); ql_dbg(ql_dbg_p3p, vha, 0xb041, "Data collected: [0x%x], Dump size left:[0x%x]\n", data_collected, (ha->md_dump_size - data_collected)); /* Decode the entry type and take * required action to capture debug data */ switch (entry_hdr->entry_type) { case QLA82XX_RDEND: qla82xx_mark_entry_skipped(vha, entry_hdr, i); break; case QLA82XX_CNTRL: rval = qla82xx_minidump_process_control(vha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) { qla82xx_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } break; case QLA82XX_RDCRB: qla82xx_minidump_process_rdcrb(vha, entry_hdr, &data_ptr); break; case QLA82XX_RDMEM: rval = qla82xx_minidump_process_rdmem(vha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) { qla82xx_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } break; case QLA82XX_BOARD: case QLA82XX_RDROM: qla82xx_minidump_process_rdrom(vha, entry_hdr, &data_ptr); break; case QLA82XX_L2DTG: case QLA82XX_L2ITG: case QLA82XX_L2DAT: case QLA82XX_L2INS: rval = qla82xx_minidump_process_l2tag(vha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) { qla82xx_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } break; case QLA82XX_L1DAT: case QLA82XX_L1INS: qla82xx_minidump_process_l1cache(vha, entry_hdr, &data_ptr); break; case QLA82XX_RDOCM: qla82xx_minidump_process_rdocm(vha, entry_hdr, &data_ptr); break; case QLA82XX_RDMUX: qla82xx_minidump_process_rdmux(vha, entry_hdr, &data_ptr); break; case QLA82XX_QUEUE: qla82xx_minidump_process_queue(vha, entry_hdr, &data_ptr); break; case QLA82XX_RDNOP: default: qla82xx_mark_entry_skipped(vha, entry_hdr, i); break; } ql_dbg(ql_dbg_p3p, vha, 0xb042, "[%s]: data ptr[%d]: %p\n", __func__, i, data_ptr); data_collected = (uint8_t *)data_ptr - (uint8_t *)ha->md_dump; skip_nxt_entry: entry_hdr = (qla82xx_md_entry_hdr_t *) \ (((uint8_t *)entry_hdr) + entry_hdr->entry_size); } if (data_collected != total_data_size) { ql_dbg(ql_dbg_p3p, vha, 0xb043, "MiniDump data mismatch: Data collected: [0x%x]," "total_data_size:[0x%x]\n", data_collected, total_data_size); goto md_failed; } ql_log(ql_log_info, vha, 0xb044, "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n", vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump); ha->fw_dumped = 1; qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); md_failed: return rval; } int qla82xx_md_alloc(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; int i, k; struct qla82xx_md_template_hdr *tmplt_hdr; tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; if (ql2xmdcapmask < 0x3 || ql2xmdcapmask > 0x7F) { ql2xmdcapmask = tmplt_hdr->capture_debug_level & 0xFF; ql_log(ql_log_info, vha, 0xb045, "Forcing driver capture mask to firmware default capture mask: 0x%x.\n", ql2xmdcapmask); } for (i = 0x2, k = 1; (i & QLA82XX_DEFAULT_CAP_MASK); i <<= 1, k++) { if (i & ql2xmdcapmask) ha->md_dump_size += tmplt_hdr->capture_size_array[k]; } if (ha->md_dump) { ql_log(ql_log_warn, vha, 0xb046, "Firmware dump previously allocated.\n"); return 1; } ha->md_dump = vmalloc(ha->md_dump_size); if (ha->md_dump == NULL) { ql_log(ql_log_warn, vha, 0xb047, "Unable to allocate memory for Minidump size " "(0x%x).\n", ha->md_dump_size); return 1; } return 0; } void qla82xx_md_free(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; /* Release the template header allocated */ if (ha->md_tmplt_hdr) { ql_log(ql_log_info, vha, 0xb048, "Free MiniDump template: %p, size (%d KB)\n", ha->md_tmplt_hdr, ha->md_template_size / 1024); dma_free_coherent(&ha->pdev->dev, ha->md_template_size, ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma); ha->md_tmplt_hdr = NULL; } /* Release the template data buffer allocated */ if (ha->md_dump) { ql_log(ql_log_info, vha, 0xb049, "Free MiniDump memory: %p, size (%d KB)\n", ha->md_dump, ha->md_dump_size / 1024); vfree(ha->md_dump); ha->md_dump_size = 0; ha->md_dump = NULL; } } void qla82xx_md_prep(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; int rval; /* Get Minidump template size */ rval = qla82xx_md_get_template_size(vha); if (rval == QLA_SUCCESS) { ql_log(ql_log_info, vha, 0xb04a, "MiniDump Template size obtained (%d KB)\n", ha->md_template_size / 1024); /* Get Minidump template */ rval = qla82xx_md_get_template(vha); if (rval == QLA_SUCCESS) { ql_dbg(ql_dbg_p3p, vha, 0xb04b, "MiniDump Template obtained\n"); /* Allocate memory for minidump */ rval = qla82xx_md_alloc(vha); if (rval == QLA_SUCCESS) ql_log(ql_log_info, vha, 0xb04c, "MiniDump memory allocated (%d KB)\n", ha->md_dump_size / 1024); else { ql_log(ql_log_info, vha, 0xb04d, "Free MiniDump template: %p, size: (%d KB)\n", ha->md_tmplt_hdr, ha->md_template_size / 1024); dma_free_coherent(&ha->pdev->dev, ha->md_template_size, ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma); ha->md_tmplt_hdr = NULL; } } } } int qla82xx_beacon_on(struct scsi_qla_host *vha) { int rval; struct qla_hw_data *ha = vha->hw; qla82xx_idc_lock(ha); rval = qla82xx_mbx_beacon_ctl(vha, 1); if (rval) { ql_log(ql_log_warn, vha, 0xb050, "mbx set led config failed in %s\n", __func__); goto exit; } ha->beacon_blink_led = 1; exit: qla82xx_idc_unlock(ha); return rval; } int qla82xx_beacon_off(struct scsi_qla_host *vha) { int rval; struct qla_hw_data *ha = vha->hw; qla82xx_idc_lock(ha); rval = qla82xx_mbx_beacon_ctl(vha, 0); if (rval) { ql_log(ql_log_warn, vha, 0xb051, "mbx set led config failed in %s\n", __func__); goto exit; } ha->beacon_blink_led = 0; exit: qla82xx_idc_unlock(ha); return rval; }
gpl-2.0
myfluxi/xxICSKernel
fs/seq_file.c
2110
18258
/* * linux/fs/seq_file.c * * helper functions for making synthetic files from sequences of records. * initial implementation -- AV, Oct 2001. */ #include <linux/fs.h> #include <linux/module.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <asm/page.h> /** * seq_open - initialize sequential file * @file: file we initialize * @op: method table describing the sequence * * seq_open() sets @file, associating it with a sequence described * by @op. @op->start() sets the iterator up and returns the first * element of sequence. @op->stop() shuts it down. @op->next() * returns the next element of sequence. @op->show() prints element * into the buffer. In case of error ->start() and ->next() return * ERR_PTR(error). In the end of sequence they return %NULL. ->show() * returns 0 in case of success and negative number in case of error. * Returning SEQ_SKIP means "discard this element and move on". */ int seq_open(struct file *file, const struct seq_operations *op) { struct seq_file *p = file->private_data; if (!p) { p = kmalloc(sizeof(*p), GFP_KERNEL); if (!p) return -ENOMEM; file->private_data = p; } memset(p, 0, sizeof(*p)); mutex_init(&p->lock); p->op = op; /* * Wrappers around seq_open(e.g. swaps_open) need to be * aware of this. If they set f_version themselves, they * should call seq_open first and then set f_version. */ file->f_version = 0; /* * seq_files support lseek() and pread(). They do not implement * write() at all, but we clear FMODE_PWRITE here for historical * reasons. * * If a client of seq_files a) implements file.write() and b) wishes to * support pwrite() then that client will need to implement its own * file.open() which calls seq_open() and then sets FMODE_PWRITE. */ file->f_mode &= ~FMODE_PWRITE; return 0; } EXPORT_SYMBOL(seq_open); static int traverse(struct seq_file *m, loff_t offset) { loff_t pos = 0, index; int error = 0; void *p; m->version = 0; index = 0; m->count = m->from = 0; if (!offset) { m->index = index; return 0; } if (!m->buf) { m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL); if (!m->buf) return -ENOMEM; } p = m->op->start(m, &index); while (p) { error = PTR_ERR(p); if (IS_ERR(p)) break; error = m->op->show(m, p); if (error < 0) break; if (unlikely(error)) { error = 0; m->count = 0; } if (m->count == m->size) goto Eoverflow; if (pos + m->count > offset) { m->from = offset - pos; m->count -= m->from; m->index = index; break; } pos += m->count; m->count = 0; if (pos == offset) { index++; m->index = index; break; } p = m->op->next(m, p, &index); } m->op->stop(m, p); m->index = index; return error; Eoverflow: m->op->stop(m, p); kfree(m->buf); m->buf = kmalloc(m->size <<= 1, GFP_KERNEL); return !m->buf ? -ENOMEM : -EAGAIN; } /** * seq_read - ->read() method for sequential files. * @file: the file to read from * @buf: the buffer to read to * @size: the maximum number of bytes to read * @ppos: the current position in the file * * Ready-made ->f_op->read() */ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) { struct seq_file *m = file->private_data; size_t copied = 0; loff_t pos; size_t n; void *p; int err = 0; mutex_lock(&m->lock); /* Don't assume *ppos is where we left it */ if (unlikely(*ppos != m->read_pos)) { m->read_pos = *ppos; while ((err = traverse(m, *ppos)) == -EAGAIN) ; if (err) { /* With prejudice... */ m->read_pos = 0; m->version = 0; m->index = 0; m->count = 0; goto Done; } } /* * seq_file->op->..m_start/m_stop/m_next may do special actions * or optimisations based on the file->f_version, so we want to * pass the file->f_version to those methods. * * seq_file->version is just copy of f_version, and seq_file * methods can treat it simply as file version. * It is copied in first and copied out after all operations. * It is convenient to have it as part of structure to avoid the * need of passing another argument to all the seq_file methods. */ m->version = file->f_version; /* grab buffer if we didn't have one */ if (!m->buf) { m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL); if (!m->buf) goto Enomem; } /* if not empty - flush it first */ if (m->count) { n = min(m->count, size); err = copy_to_user(buf, m->buf + m->from, n); if (err) goto Efault; m->count -= n; m->from += n; size -= n; buf += n; copied += n; if (!m->count) m->index++; if (!size) goto Done; } /* we need at least one record in buffer */ pos = m->index; p = m->op->start(m, &pos); while (1) { err = PTR_ERR(p); if (!p || IS_ERR(p)) break; err = m->op->show(m, p); if (err < 0) break; if (unlikely(err)) m->count = 0; if (unlikely(!m->count)) { p = m->op->next(m, p, &pos); m->index = pos; continue; } if (m->count < m->size) goto Fill; m->op->stop(m, p); kfree(m->buf); m->buf = kmalloc(m->size <<= 1, GFP_KERNEL); if (!m->buf) goto Enomem; m->count = 0; m->version = 0; pos = m->index; p = m->op->start(m, &pos); } m->op->stop(m, p); m->count = 0; goto Done; Fill: /* they want more? let's try to get some more */ while (m->count < size) { size_t offs = m->count; loff_t next = pos; p = m->op->next(m, p, &next); if (!p || IS_ERR(p)) { err = PTR_ERR(p); break; } err = m->op->show(m, p); if (m->count == m->size || err) { m->count = offs; if (likely(err <= 0)) break; } pos = next; } m->op->stop(m, p); n = min(m->count, size); err = copy_to_user(buf, m->buf, n); if (err) goto Efault; copied += n; m->count -= n; if (m->count) m->from = n; else pos++; m->index = pos; Done: if (!copied) copied = err; else { *ppos += copied; m->read_pos += copied; } file->f_version = m->version; mutex_unlock(&m->lock); return copied; Enomem: err = -ENOMEM; goto Done; Efault: err = -EFAULT; goto Done; } EXPORT_SYMBOL(seq_read); /** * seq_lseek - ->llseek() method for sequential files. * @file: the file in question * @offset: new position * @origin: 0 for absolute, 1 for relative position * * Ready-made ->f_op->llseek() */ loff_t seq_lseek(struct file *file, loff_t offset, int origin) { struct seq_file *m = file->private_data; loff_t retval = -EINVAL; mutex_lock(&m->lock); m->version = file->f_version; switch (origin) { case 1: offset += file->f_pos; case 0: if (offset < 0) break; retval = offset; if (offset != m->read_pos) { while ((retval=traverse(m, offset)) == -EAGAIN) ; if (retval) { /* with extreme prejudice... */ file->f_pos = 0; m->read_pos = 0; m->version = 0; m->index = 0; m->count = 0; } else { m->read_pos = offset; retval = file->f_pos = offset; } } } file->f_version = m->version; mutex_unlock(&m->lock); return retval; } EXPORT_SYMBOL(seq_lseek); /** * seq_release - free the structures associated with sequential file. * @file: file in question * @inode: file->f_path.dentry->d_inode * * Frees the structures associated with sequential file; can be used * as ->f_op->release() if you don't have private data to destroy. */ int seq_release(struct inode *inode, struct file *file) { struct seq_file *m = file->private_data; kfree(m->buf); kfree(m); return 0; } EXPORT_SYMBOL(seq_release); /** * seq_escape - print string into buffer, escaping some characters * @m: target buffer * @s: string * @esc: set of characters that need escaping * * Puts string into buffer, replacing each occurrence of character from * @esc with usual octal escape. Returns 0 in case of success, -1 - in * case of overflow. */ int seq_escape(struct seq_file *m, const char *s, const char *esc) { char *end = m->buf + m->size; char *p; char c; for (p = m->buf + m->count; (c = *s) != '\0' && p < end; s++) { if (!strchr(esc, c)) { *p++ = c; continue; } if (p + 3 < end) { *p++ = '\\'; *p++ = '0' + ((c & 0300) >> 6); *p++ = '0' + ((c & 070) >> 3); *p++ = '0' + (c & 07); continue; } m->count = m->size; return -1; } m->count = p - m->buf; return 0; } EXPORT_SYMBOL(seq_escape); int seq_printf(struct seq_file *m, const char *f, ...) { va_list args; int len; if (m->count < m->size) { va_start(args, f); len = vsnprintf(m->buf + m->count, m->size - m->count, f, args); va_end(args); if (m->count + len < m->size) { m->count += len; return 0; } } m->count = m->size; return -1; } EXPORT_SYMBOL(seq_printf); /** * mangle_path - mangle and copy path to buffer beginning * @s: buffer start * @p: beginning of path in above buffer * @esc: set of characters that need escaping * * Copy the path from @p to @s, replacing each occurrence of character from * @esc with usual octal escape. * Returns pointer past last written character in @s, or NULL in case of * failure. */ char *mangle_path(char *s, char *p, char *esc) { while (s <= p) { char c = *p++; if (!c) { return s; } else if (!strchr(esc, c)) { *s++ = c; } else if (s + 4 > p) { break; } else { *s++ = '\\'; *s++ = '0' + ((c & 0300) >> 6); *s++ = '0' + ((c & 070) >> 3); *s++ = '0' + (c & 07); } } return NULL; } EXPORT_SYMBOL(mangle_path); /** * seq_path - seq_file interface to print a pathname * @m: the seq_file handle * @path: the struct path to print * @esc: set of characters to escape in the output * * return the absolute path of 'path', as represented by the * dentry / mnt pair in the path parameter. */ int seq_path(struct seq_file *m, struct path *path, char *esc) { char *buf; size_t size = seq_get_buf(m, &buf); int res = -1; if (size) { char *p = d_path(path, buf, size); if (!IS_ERR(p)) { char *end = mangle_path(buf, p, esc); if (end) res = end - buf; } } seq_commit(m, res); return res; } EXPORT_SYMBOL(seq_path); /* * Same as seq_path, but relative to supplied root. */ int seq_path_root(struct seq_file *m, struct path *path, struct path *root, char *esc) { char *buf; size_t size = seq_get_buf(m, &buf); int res = -ENAMETOOLONG; if (size) { char *p; p = __d_path(path, root, buf, size); if (!p) return SEQ_SKIP; res = PTR_ERR(p); if (!IS_ERR(p)) { char *end = mangle_path(buf, p, esc); if (end) res = end - buf; else res = -ENAMETOOLONG; } } seq_commit(m, res); return res < 0 && res != -ENAMETOOLONG ? res : 0; } /* * returns the path of the 'dentry' from the root of its filesystem. */ int seq_dentry(struct seq_file *m, struct dentry *dentry, char *esc) { char *buf; size_t size = seq_get_buf(m, &buf); int res = -1; if (size) { char *p = dentry_path(dentry, buf, size); if (!IS_ERR(p)) { char *end = mangle_path(buf, p, esc); if (end) res = end - buf; } } seq_commit(m, res); return res; } int seq_bitmap(struct seq_file *m, const unsigned long *bits, unsigned int nr_bits) { if (m->count < m->size) { int len = bitmap_scnprintf(m->buf + m->count, m->size - m->count, bits, nr_bits); if (m->count + len < m->size) { m->count += len; return 0; } } m->count = m->size; return -1; } EXPORT_SYMBOL(seq_bitmap); int seq_bitmap_list(struct seq_file *m, const unsigned long *bits, unsigned int nr_bits) { if (m->count < m->size) { int len = bitmap_scnlistprintf(m->buf + m->count, m->size - m->count, bits, nr_bits); if (m->count + len < m->size) { m->count += len; return 0; } } m->count = m->size; return -1; } EXPORT_SYMBOL(seq_bitmap_list); static void *single_start(struct seq_file *p, loff_t *pos) { return NULL + (*pos == 0); } static void *single_next(struct seq_file *p, void *v, loff_t *pos) { ++*pos; return NULL; } static void single_stop(struct seq_file *p, void *v) { } int single_open(struct file *file, int (*show)(struct seq_file *, void *), void *data) { struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL); int res = -ENOMEM; if (op) { op->start = single_start; op->next = single_next; op->stop = single_stop; op->show = show; res = seq_open(file, op); if (!res) ((struct seq_file *)file->private_data)->private = data; else kfree(op); } return res; } EXPORT_SYMBOL(single_open); int single_release(struct inode *inode, struct file *file) { const struct seq_operations *op = ((struct seq_file *)file->private_data)->op; int res = seq_release(inode, file); kfree(op); return res; } EXPORT_SYMBOL(single_release); int seq_release_private(struct inode *inode, struct file *file) { struct seq_file *seq = file->private_data; kfree(seq->private); seq->private = NULL; return seq_release(inode, file); } EXPORT_SYMBOL(seq_release_private); void *__seq_open_private(struct file *f, const struct seq_operations *ops, int psize) { int rc; void *private; struct seq_file *seq; private = kzalloc(psize, GFP_KERNEL); if (private == NULL) goto out; rc = seq_open(f, ops); if (rc < 0) goto out_free; seq = f->private_data; seq->private = private; return private; out_free: kfree(private); out: return NULL; } EXPORT_SYMBOL(__seq_open_private); int seq_open_private(struct file *filp, const struct seq_operations *ops, int psize) { return __seq_open_private(filp, ops, psize) ? 0 : -ENOMEM; } EXPORT_SYMBOL(seq_open_private); int seq_putc(struct seq_file *m, char c) { if (m->count < m->size) { m->buf[m->count++] = c; return 0; } return -1; } EXPORT_SYMBOL(seq_putc); int seq_puts(struct seq_file *m, const char *s) { int len = strlen(s); if (m->count + len < m->size) { memcpy(m->buf + m->count, s, len); m->count += len; return 0; } m->count = m->size; return -1; } EXPORT_SYMBOL(seq_puts); /** * seq_write - write arbitrary data to buffer * @seq: seq_file identifying the buffer to which data should be written * @data: data address * @len: number of bytes * * Return 0 on success, non-zero otherwise. */ int seq_write(struct seq_file *seq, const void *data, size_t len) { if (seq->count + len < seq->size) { memcpy(seq->buf + seq->count, data, len); seq->count += len; return 0; } seq->count = seq->size; return -1; } EXPORT_SYMBOL(seq_write); struct list_head *seq_list_start(struct list_head *head, loff_t pos) { struct list_head *lh; list_for_each(lh, head) if (pos-- == 0) return lh; return NULL; } EXPORT_SYMBOL(seq_list_start); struct list_head *seq_list_start_head(struct list_head *head, loff_t pos) { if (!pos) return head; return seq_list_start(head, pos - 1); } EXPORT_SYMBOL(seq_list_start_head); struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos) { struct list_head *lh; lh = ((struct list_head *)v)->next; ++*ppos; return lh == head ? NULL : lh; } EXPORT_SYMBOL(seq_list_next); /** * seq_hlist_start - start an iteration of a hlist * @head: the head of the hlist * @pos: the start position of the sequence * * Called at seq_file->op->start(). */ struct hlist_node *seq_hlist_start(struct hlist_head *head, loff_t pos) { struct hlist_node *node; hlist_for_each(node, head) if (pos-- == 0) return node; return NULL; } EXPORT_SYMBOL(seq_hlist_start); /** * seq_hlist_start_head - start an iteration of a hlist * @head: the head of the hlist * @pos: the start position of the sequence * * Called at seq_file->op->start(). Call this function if you want to * print a header at the top of the output. */ struct hlist_node *seq_hlist_start_head(struct hlist_head *head, loff_t pos) { if (!pos) return SEQ_START_TOKEN; return seq_hlist_start(head, pos - 1); } EXPORT_SYMBOL(seq_hlist_start_head); /** * seq_hlist_next - move to the next position of the hlist * @v: the current iterator * @head: the head of the hlist * @ppos: the current position * * Called at seq_file->op->next(). */ struct hlist_node *seq_hlist_next(void *v, struct hlist_head *head, loff_t *ppos) { struct hlist_node *node = v; ++*ppos; if (v == SEQ_START_TOKEN) return head->first; else return node->next; } EXPORT_SYMBOL(seq_hlist_next); /** * seq_hlist_start_rcu - start an iteration of a hlist protected by RCU * @head: the head of the hlist * @pos: the start position of the sequence * * Called at seq_file->op->start(). * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as hlist_add_head_rcu() * as long as the traversal is guarded by rcu_read_lock(). */ struct hlist_node *seq_hlist_start_rcu(struct hlist_head *head, loff_t pos) { struct hlist_node *node; __hlist_for_each_rcu(node, head) if (pos-- == 0) return node; return NULL; } EXPORT_SYMBOL(seq_hlist_start_rcu); /** * seq_hlist_start_head_rcu - start an iteration of a hlist protected by RCU * @head: the head of the hlist * @pos: the start position of the sequence * * Called at seq_file->op->start(). Call this function if you want to * print a header at the top of the output. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as hlist_add_head_rcu() * as long as the traversal is guarded by rcu_read_lock(). */ struct hlist_node *seq_hlist_start_head_rcu(struct hlist_head *head, loff_t pos) { if (!pos) return SEQ_START_TOKEN; return seq_hlist_start_rcu(head, pos - 1); } EXPORT_SYMBOL(seq_hlist_start_head_rcu); /** * seq_hlist_next_rcu - move to the next position of the hlist protected by RCU * @v: the current iterator * @head: the head of the hlist * @ppos: the current position * * Called at seq_file->op->next(). * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as hlist_add_head_rcu() * as long as the traversal is guarded by rcu_read_lock(). */ struct hlist_node *seq_hlist_next_rcu(void *v, struct hlist_head *head, loff_t *ppos) { struct hlist_node *node = v; ++*ppos; if (v == SEQ_START_TOKEN) return rcu_dereference(head->first); else return rcu_dereference(node->next); } EXPORT_SYMBOL(seq_hlist_next_rcu);
gpl-2.0
mikeNG/android_kernel_oneplus_msm8974
drivers/video/msm/mipi_novatek_cmd_qhd_pt.c
3390
3018
/* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "msm_fb.h" #include "mipi_dsi.h" #include "mipi_novatek.h" static struct msm_panel_info pinfo; static struct mipi_dsi_phy_ctrl dsi_cmd_mode_phy_db = { /* DSI_BIT_CLK at 500MHz, 2 lane, RGB888 */ {0x03, 0x01, 0x01, 0x00}, /* regulator */ /* timing */ {0xB4, 0x8D, 0x1D, 0x00, 0x20, 0x94, 0x20, 0x8F, 0x20, 0x03, 0x04}, {0x7f, 0x00, 0x00, 0x00}, /* phy ctrl */ {0xee, 0x02, 0x86, 0x00}, /* strength */ /* pll control */ {0x40, 0xf9, 0xb0, 0xda, 0x00, 0x50, 0x48, 0x63, #if defined(NOVATEK_TWO_LANE) 0x30, 0x07, 0x03, #else /* default set to 1 lane */ 0x30, 0x07, 0x07, #endif 0x05, 0x14, 0x03, 0x0, 0x0, 0x54, 0x06, 0x10, 0x04, 0x0}, }; static int __init mipi_cmd_novatek_blue_qhd_pt_init(void) { int ret; if (msm_fb_detect_client("mipi_cmd_novatek_qhd")) return 0; pinfo.xres = 540; pinfo.yres = 960; pinfo.type = MIPI_CMD_PANEL; pinfo.pdest = DISPLAY_1; pinfo.wait_cycle = 0; pinfo.bpp = 24; pinfo.lcdc.h_back_porch = 50; pinfo.lcdc.h_front_porch = 50; pinfo.lcdc.h_pulse_width = 20; pinfo.lcdc.v_back_porch = 11; pinfo.lcdc.v_front_porch = 10; pinfo.lcdc.v_pulse_width = 5; pinfo.lcdc.border_clr = 0; /* blk */ pinfo.lcdc.underflow_clr = 0xff; /* blue */ pinfo.lcdc.hsync_skew = 0; pinfo.bl_max = 255; pinfo.bl_min = 1; pinfo.fb_num = 2; pinfo.clk_rate = 454000000; pinfo.is_3d_panel = FB_TYPE_3D_PANEL; pinfo.lcd.vsync_enable = TRUE; pinfo.lcd.hw_vsync_mode = TRUE; pinfo.lcd.refx100 = 6200; /* adjust refx100 to prevent tearing */ pinfo.lcd.v_back_porch = 11; pinfo.lcd.v_front_porch = 10; pinfo.lcd.v_pulse_width = 5; pinfo.mipi.mode = DSI_CMD_MODE; pinfo.mipi.dst_format = DSI_CMD_DST_FORMAT_RGB888; pinfo.mipi.vc = 0; pinfo.mipi.data_lane0 = TRUE; pinfo.mipi.esc_byte_ratio = 4; #if defined(NOVATEK_TWO_LANE) pinfo.mipi.data_lane1 = TRUE; #endif pinfo.mipi.t_clk_post = 0x22; pinfo.mipi.t_clk_pre = 0x3f; pinfo.mipi.stream = 0; /* dma_p */ pinfo.mipi.mdp_trigger = DSI_CMD_TRIGGER_NONE; pinfo.mipi.dma_trigger = DSI_CMD_TRIGGER_SW; pinfo.mipi.te_sel = 1; /* TE from vsycn gpio */ pinfo.mipi.interleave_max = 1; pinfo.mipi.insert_dcs_cmd = TRUE; pinfo.mipi.wr_mem_continue = 0x3c; pinfo.mipi.wr_mem_start = 0x2c; pinfo.mipi.dsi_phy_db = &dsi_cmd_mode_phy_db; ret = mipi_novatek_device_register(&pinfo, MIPI_DSI_PRIM, MIPI_DSI_PANEL_QHD_PT); if (ret) pr_err("%s: failed to register device!\n", __func__); return ret; } module_init(mipi_cmd_novatek_blue_qhd_pt_init);
gpl-2.0
FrancescoCG/CrazySuperKernel-CM13-KLTE
drivers/gpu/drm/radeon/radeon_agp.c
5950
9988
/* * Copyright 2008 Red Hat Inc. * Copyright 2009 Jerome Glisse. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Dave Airlie * Jerome Glisse <glisse@freedesktop.org> */ #include "drmP.h" #include "drm.h" #include "radeon.h" #include "radeon_drm.h" #if __OS_HAS_AGP struct radeon_agpmode_quirk { u32 hostbridge_vendor; u32 hostbridge_device; u32 chip_vendor; u32 chip_device; u32 subsys_vendor; u32 subsys_device; u32 default_mode; }; static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = { /* Intel E7505 Memory Controller Hub / RV350 AR [Radeon 9600XT] Needs AGPMode 4 (deb #515326) */ { PCI_VENDOR_ID_INTEL, 0x2550, PCI_VENDOR_ID_ATI, 0x4152, 0x1458, 0x4038, 4}, /* Intel 82865G/PE/P DRAM Controller/Host-Hub / Mobility 9800 Needs AGPMode 4 (deb #462590) */ { PCI_VENDOR_ID_INTEL, 0x2570, PCI_VENDOR_ID_ATI, 0x4a4e, PCI_VENDOR_ID_DELL, 0x5106, 4}, /* Intel 82865G/PE/P DRAM Controller/Host-Hub / RV280 [Radeon 9200 SE] Needs AGPMode 4 (lp #300304) */ { PCI_VENDOR_ID_INTEL, 0x2570, PCI_VENDOR_ID_ATI, 0x5964, 0x148c, 0x2073, 4}, /* Intel 82855PM Processor to I/O Controller / Mobility M6 LY Needs AGPMode 1 (deb #467235) */ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c59, PCI_VENDOR_ID_IBM, 0x052f, 1}, /* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50, PCI_VENDOR_ID_IBM, 0x0550, 1}, /* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57, PCI_VENDOR_ID_IBM, 0x0530, 1}, /* Intel 82855PM host bridge / FireGL Mobility T2 RV350 Needs AGPMode 2 (fdo #20647) */ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e54, PCI_VENDOR_ID_IBM, 0x054f, 2}, /* Intel 82855PM host bridge / Mobility M9+ / VaioPCG-V505DX Needs AGPMode 2 (fdo #17928) */ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x5c61, PCI_VENDOR_ID_SONY, 0x816b, 2}, /* Intel 82855PM Processor to I/O Controller / Mobility M9+ Needs AGPMode 8 (phoronix forum) */ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x5c61, PCI_VENDOR_ID_SONY, 0x8195, 8}, /* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/ { PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59, PCI_VENDOR_ID_DELL, 0x00e3, 2}, /* Intel 82852/82855 host bridge / Mobility FireGL 9000 R250 Needs AGPMode 1 (lp #296617) */ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66, PCI_VENDOR_ID_DELL, 0x0149, 1}, /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50, 0x1025, 0x0061, 1}, /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #203007) */ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50, 0x1025, 0x0064, 1}, /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #141551) */ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50, PCI_VENDOR_ID_ASUSTEK, 0x1942, 1}, /* Intel 82852/82855 host bridge / Mobility 9600/9700 Needs AGPMode 1 (deb #510208) */ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50, 0x10cf, 0x127f, 1}, /* ASRock K7VT4A+ AGP 8x / ATI Radeon 9250 AGP Needs AGPMode 4 (lp #133192) */ { 0x1849, 0x3189, PCI_VENDOR_ID_ATI, 0x5960, 0x1787, 0x5960, 4}, /* VIA K8M800 Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 4 (fdo #12544) */ { PCI_VENDOR_ID_VIA, 0x0204, PCI_VENDOR_ID_ATI, 0x5960, 0x17af, 0x2020, 4}, /* VIA KT880 Host Bridge / RV350 [Radeon 9550] Needs AGPMode 4 (fdo #19981) */ { PCI_VENDOR_ID_VIA, 0x0269, PCI_VENDOR_ID_ATI, 0x4153, PCI_VENDOR_ID_ASUSTEK, 0x003c, 4}, /* VIA VT8363 Host Bridge / R200 QL [Radeon 8500] Needs AGPMode 2 (lp #141551) */ { PCI_VENDOR_ID_VIA, 0x0305, PCI_VENDOR_ID_ATI, 0x514c, PCI_VENDOR_ID_ATI, 0x013a, 2}, /* VIA VT82C693A Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 2 (deb #515512) */ { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_ATI, 0x5960, PCI_VENDOR_ID_ASUSTEK, 0x004c, 2}, /* VIA VT82C693A Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 2 */ { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_ATI, 0x5960, PCI_VENDOR_ID_ASUSTEK, 0x0054, 2}, /* VIA VT8377 Host Bridge / R200 QM [Radeon 9100] Needs AGPMode 4 (deb #461144) */ { PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x514d, 0x174b, 0x7149, 4}, /* VIA VT8377 Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 4 (lp #312693) */ { PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x5960, 0x1462, 0x0380, 4}, /* VIA VT8377 Host Bridge / RV280 Needs AGPMode 4 (ati ML) */ { PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x5964, 0x148c, 0x2073, 4}, /* ATI Host Bridge / RV280 [M9+] Needs AGPMode 1 (phoronix forum) */ { PCI_VENDOR_ID_ATI, 0xcbb2, PCI_VENDOR_ID_ATI, 0x5c61, PCI_VENDOR_ID_SONY, 0x8175, 1}, /* HP Host Bridge / R300 [FireGL X1] Needs AGPMode 2 (fdo #7770) */ { PCI_VENDOR_ID_HP, 0x122e, PCI_VENDOR_ID_ATI, 0x4e47, PCI_VENDOR_ID_ATI, 0x0152, 2}, { 0, 0, 0, 0, 0, 0, 0 }, }; #endif int radeon_agp_init(struct radeon_device *rdev) { #if __OS_HAS_AGP struct radeon_agpmode_quirk *p = radeon_agpmode_quirk_list; struct drm_agp_mode mode; struct drm_agp_info info; uint32_t agp_status; int default_mode; bool is_v3; int ret; /* Acquire AGP. */ ret = drm_agp_acquire(rdev->ddev); if (ret) { DRM_ERROR("Unable to acquire AGP: %d\n", ret); return ret; } ret = drm_agp_info(rdev->ddev, &info); if (ret) { drm_agp_release(rdev->ddev); DRM_ERROR("Unable to get AGP info: %d\n", ret); return ret; } if (rdev->ddev->agp->agp_info.aper_size < 32) { drm_agp_release(rdev->ddev); dev_warn(rdev->dev, "AGP aperture too small (%zuM) " "need at least 32M, disabling AGP\n", rdev->ddev->agp->agp_info.aper_size); return -EINVAL; } mode.mode = info.mode; /* chips with the agp to pcie bridge don't have the AGP_STATUS register * Just use the whatever mode the host sets up. */ if (rdev->family <= CHIP_RV350) agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode; else agp_status = mode.mode; is_v3 = !!(agp_status & RADEON_AGPv3_MODE); if (is_v3) { default_mode = (agp_status & RADEON_AGPv3_8X_MODE) ? 8 : 4; } else { if (agp_status & RADEON_AGP_4X_MODE) { default_mode = 4; } else if (agp_status & RADEON_AGP_2X_MODE) { default_mode = 2; } else { default_mode = 1; } } /* Apply AGPMode Quirks */ while (p && p->chip_device != 0) { if (info.id_vendor == p->hostbridge_vendor && info.id_device == p->hostbridge_device && rdev->pdev->vendor == p->chip_vendor && rdev->pdev->device == p->chip_device && rdev->pdev->subsystem_vendor == p->subsys_vendor && rdev->pdev->subsystem_device == p->subsys_device) { default_mode = p->default_mode; } ++p; } if (radeon_agpmode > 0) { if ((radeon_agpmode < (is_v3 ? 4 : 1)) || (radeon_agpmode > (is_v3 ? 8 : 4)) || (radeon_agpmode & (radeon_agpmode - 1))) { DRM_ERROR("Illegal AGP Mode: %d (valid %s), leaving at %d\n", radeon_agpmode, is_v3 ? "4, 8" : "1, 2, 4", default_mode); radeon_agpmode = default_mode; } else { DRM_INFO("AGP mode requested: %d\n", radeon_agpmode); } } else { radeon_agpmode = default_mode; } mode.mode &= ~RADEON_AGP_MODE_MASK; if (is_v3) { switch (radeon_agpmode) { case 8: mode.mode |= RADEON_AGPv3_8X_MODE; break; case 4: default: mode.mode |= RADEON_AGPv3_4X_MODE; break; } } else { switch (radeon_agpmode) { case 4: mode.mode |= RADEON_AGP_4X_MODE; break; case 2: mode.mode |= RADEON_AGP_2X_MODE; break; case 1: default: mode.mode |= RADEON_AGP_1X_MODE; break; } } mode.mode &= ~RADEON_AGP_FW_MODE; /* disable fw */ ret = drm_agp_enable(rdev->ddev, mode); if (ret) { DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode); drm_agp_release(rdev->ddev); return ret; } rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base; rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20; rdev->mc.gtt_start = rdev->mc.agp_base; rdev->mc.gtt_end = rdev->mc.gtt_start + rdev->mc.gtt_size - 1; dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n", rdev->mc.gtt_size >> 20, rdev->mc.gtt_start, rdev->mc.gtt_end); /* workaround some hw issues */ if (rdev->family < CHIP_R200) { WREG32(RADEON_AGP_CNTL, RREG32(RADEON_AGP_CNTL) | 0x000e0000); } return 0; #else return 0; #endif } void radeon_agp_resume(struct radeon_device *rdev) { #if __OS_HAS_AGP int r; if (rdev->flags & RADEON_IS_AGP) { r = radeon_agp_init(rdev); if (r) dev_warn(rdev->dev, "radeon AGP reinit failed\n"); } #endif } void radeon_agp_fini(struct radeon_device *rdev) { #if __OS_HAS_AGP if (rdev->ddev->agp && rdev->ddev->agp->acquired) { drm_agp_release(rdev->ddev); } #endif } void radeon_agp_suspend(struct radeon_device *rdev) { radeon_agp_fini(rdev); }
gpl-2.0
fkfk/linux_gt-i9000-gb
arch/mips/pci/fixup-pmcmsp.c
9534
10453
/* * PMC-Sierra MSP board specific pci fixups. * * Copyright 2001 MontaVista Software Inc. * Copyright 2005-2007 PMC-Sierra, Inc * * Author: MontaVista Software, Inc. * ppopov@mvista.com or source@mvista.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifdef CONFIG_PCI #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/init.h> #include <asm/byteorder.h> #include <msp_pci.h> #include <msp_cic_int.h> /* PCI interrupt pins */ #define IRQ4 MSP_INT_EXT4 #define IRQ5 MSP_INT_EXT5 #define IRQ6 MSP_INT_EXT6 #if defined(CONFIG_PMC_MSP7120_GW) /* Garibaldi Board IRQ wiring to PCI slots */ static char irq_tab[][5] __initdata = { /* INTA INTB INTC INTD */ {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[2]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[3]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[4]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[5]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[6]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[7]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[8]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[9]): Unused */ {0, 0, 0, 0, 0 }, /* 0 (AD[10]): Unused */ {0, 0, 0, 0, 0 }, /* 1 (AD[11]): Unused */ {0, 0, 0, 0, 0 }, /* 2 (AD[12]): Unused */ {0, 0, 0, 0, 0 }, /* 3 (AD[13]): Unused */ {0, 0, 0, 0, 0 }, /* 4 (AD[14]): Unused */ {0, 0, 0, 0, 0 }, /* 5 (AD[15]): Unused */ {0, 0, 0, 0, 0 }, /* 6 (AD[16]): Unused */ {0, 0, 0, 0, 0 }, /* 7 (AD[17]): Unused */ {0, 0, 0, 0, 0 }, /* 8 (AD[18]): Unused */ {0, 0, 0, 0, 0 }, /* 9 (AD[19]): Unused */ {0, 0, 0, 0, 0 }, /* 10 (AD[20]): Unused */ {0, 0, 0, 0, 0 }, /* 11 (AD[21]): Unused */ {0, 0, 0, 0, 0 }, /* 12 (AD[22]): Unused */ {0, 0, 0, 0, 0 }, /* 13 (AD[23]): Unused */ {0, 0, 0, 0, 0 }, /* 14 (AD[24]): Unused */ {0, 0, 0, 0, 0 }, /* 15 (AD[25]): Unused */ {0, 0, 0, 0, 0 }, /* 16 (AD[26]): Unused */ {0, 0, 0, 0, 0 }, /* 17 (AD[27]): Unused */ {0, IRQ4, IRQ4, 0, 0 }, /* 18 (AD[28]): slot 0 */ {0, 0, 0, 0, 0 }, /* 19 (AD[29]): Unused */ {0, IRQ5, IRQ5, 0, 0 }, /* 20 (AD[30]): slot 1 */ {0, IRQ6, IRQ6, 0, 0 } /* 21 (AD[31]): slot 2 */ }; #elif defined(CONFIG_PMC_MSP7120_EVAL) /* MSP7120 Eval Board IRQ wiring to PCI slots */ static char irq_tab[][5] __initdata = { /* INTA INTB INTC INTD */ {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[2]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[3]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[4]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[5]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[6]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[7]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[8]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[9]): Unused */ {0, 0, 0, 0, 0 }, /* 0 (AD[10]): Unused */ {0, 0, 0, 0, 0 }, /* 1 (AD[11]): Unused */ {0, 0, 0, 0, 0 }, /* 2 (AD[12]): Unused */ {0, 0, 0, 0, 0 }, /* 3 (AD[13]): Unused */ {0, 0, 0, 0, 0 }, /* 4 (AD[14]): Unused */ {0, 0, 0, 0, 0 }, /* 5 (AD[15]): Unused */ {0, IRQ6, IRQ6, 0, 0 }, /* 6 (AD[16]): slot 3 (mini) */ {0, IRQ5, IRQ5, 0, 0 }, /* 7 (AD[17]): slot 2 (mini) */ {0, IRQ4, IRQ4, IRQ4, IRQ4}, /* 8 (AD[18]): slot 0 (PCI) */ {0, IRQ5, IRQ5, IRQ5, IRQ5}, /* 9 (AD[19]): slot 1 (PCI) */ {0, 0, 0, 0, 0 }, /* 10 (AD[20]): Unused */ {0, 0, 0, 0, 0 }, /* 11 (AD[21]): Unused */ {0, 0, 0, 0, 0 }, /* 12 (AD[22]): Unused */ {0, 0, 0, 0, 0 }, /* 13 (AD[23]): Unused */ {0, 0, 0, 0, 0 }, /* 14 (AD[24]): Unused */ {0, 0, 0, 0, 0 }, /* 15 (AD[25]): Unused */ {0, 0, 0, 0, 0 }, /* 16 (AD[26]): Unused */ {0, 0, 0, 0, 0 }, /* 17 (AD[27]): Unused */ {0, 0, 0, 0, 0 }, /* 18 (AD[28]): Unused */ {0, 0, 0, 0, 0 }, /* 19 (AD[29]): Unused */ {0, 0, 0, 0, 0 }, /* 20 (AD[30]): Unused */ {0, 0, 0, 0, 0 } /* 21 (AD[31]): Unused */ }; #else /* Unknown board -- don't assign any IRQs */ static char irq_tab[][5] __initdata = { /* INTA INTB INTC INTD */ {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[2]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[3]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[4]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[5]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[6]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[7]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[8]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[9]): Unused */ {0, 0, 0, 0, 0 }, /* 0 (AD[10]): Unused */ {0, 0, 0, 0, 0 }, /* 1 (AD[11]): Unused */ {0, 0, 0, 0, 0 }, /* 2 (AD[12]): Unused */ {0, 0, 0, 0, 0 }, /* 3 (AD[13]): Unused */ {0, 0, 0, 0, 0 }, /* 4 (AD[14]): Unused */ {0, 0, 0, 0, 0 }, /* 5 (AD[15]): Unused */ {0, 0, 0, 0, 0 }, /* 6 (AD[16]): Unused */ {0, 0, 0, 0, 0 }, /* 7 (AD[17]): Unused */ {0, 0, 0, 0, 0 }, /* 8 (AD[18]): Unused */ {0, 0, 0, 0, 0 }, /* 9 (AD[19]): Unused */ {0, 0, 0, 0, 0 }, /* 10 (AD[20]): Unused */ {0, 0, 0, 0, 0 }, /* 11 (AD[21]): Unused */ {0, 0, 0, 0, 0 }, /* 12 (AD[22]): Unused */ {0, 0, 0, 0, 0 }, /* 13 (AD[23]): Unused */ {0, 0, 0, 0, 0 }, /* 14 (AD[24]): Unused */ {0, 0, 0, 0, 0 }, /* 15 (AD[25]): Unused */ {0, 0, 0, 0, 0 }, /* 16 (AD[26]): Unused */ {0, 0, 0, 0, 0 }, /* 17 (AD[27]): Unused */ {0, 0, 0, 0, 0 }, /* 18 (AD[28]): Unused */ {0, 0, 0, 0, 0 }, /* 19 (AD[29]): Unused */ {0, 0, 0, 0, 0 }, /* 20 (AD[30]): Unused */ {0, 0, 0, 0, 0 } /* 21 (AD[31]): Unused */ }; #endif /***************************************************************************** * * FUNCTION: pcibios_plat_dev_init * _________________________________________________________________________ * * DESCRIPTION: Perform platform specific device initialization at * pci_enable_device() time. * None are needed for the MSP7120 PCI Controller. * * INPUTS: dev - structure describing the PCI device * * OUTPUTS: none * * RETURNS: PCIBIOS_SUCCESSFUL * ****************************************************************************/ int pcibios_plat_dev_init(struct pci_dev *dev) { return PCIBIOS_SUCCESSFUL; } /***************************************************************************** * * FUNCTION: pcibios_map_irq * _________________________________________________________________________ * * DESCRIPTION: Perform board supplied PCI IRQ mapping routine. * * INPUTS: dev - unused * slot - PCI slot. Identified by which bit of the AD[] bus * drives the IDSEL line. AD[10] is 0, AD[31] is * slot 21. * pin - numbered using the scheme of the PCI_INTERRUPT_PIN * field of the config header. * * OUTPUTS: none * * RETURNS: IRQ number * ****************************************************************************/ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { #if !defined(CONFIG_PMC_MSP7120_GW) && !defined(CONFIG_PMC_MSP7120_EVAL) printk(KERN_WARNING "PCI: unknown board, no PCI IRQs assigned.\n"); #endif printk(KERN_WARNING "PCI: irq_tab returned %d for slot=%d pin=%d\n", irq_tab[slot][pin], slot, pin); return irq_tab[slot][pin]; } #endif /* CONFIG_PCI */
gpl-2.0
eoghan2t9/Oppo-Find5-4.2-Kernel
arch/mips/pci/fixup-pmcmsp.c
9534
10453
/* * PMC-Sierra MSP board specific pci fixups. * * Copyright 2001 MontaVista Software Inc. * Copyright 2005-2007 PMC-Sierra, Inc * * Author: MontaVista Software, Inc. * ppopov@mvista.com or source@mvista.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifdef CONFIG_PCI #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/init.h> #include <asm/byteorder.h> #include <msp_pci.h> #include <msp_cic_int.h> /* PCI interrupt pins */ #define IRQ4 MSP_INT_EXT4 #define IRQ5 MSP_INT_EXT5 #define IRQ6 MSP_INT_EXT6 #if defined(CONFIG_PMC_MSP7120_GW) /* Garibaldi Board IRQ wiring to PCI slots */ static char irq_tab[][5] __initdata = { /* INTA INTB INTC INTD */ {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[2]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[3]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[4]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[5]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[6]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[7]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[8]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[9]): Unused */ {0, 0, 0, 0, 0 }, /* 0 (AD[10]): Unused */ {0, 0, 0, 0, 0 }, /* 1 (AD[11]): Unused */ {0, 0, 0, 0, 0 }, /* 2 (AD[12]): Unused */ {0, 0, 0, 0, 0 }, /* 3 (AD[13]): Unused */ {0, 0, 0, 0, 0 }, /* 4 (AD[14]): Unused */ {0, 0, 0, 0, 0 }, /* 5 (AD[15]): Unused */ {0, 0, 0, 0, 0 }, /* 6 (AD[16]): Unused */ {0, 0, 0, 0, 0 }, /* 7 (AD[17]): Unused */ {0, 0, 0, 0, 0 }, /* 8 (AD[18]): Unused */ {0, 0, 0, 0, 0 }, /* 9 (AD[19]): Unused */ {0, 0, 0, 0, 0 }, /* 10 (AD[20]): Unused */ {0, 0, 0, 0, 0 }, /* 11 (AD[21]): Unused */ {0, 0, 0, 0, 0 }, /* 12 (AD[22]): Unused */ {0, 0, 0, 0, 0 }, /* 13 (AD[23]): Unused */ {0, 0, 0, 0, 0 }, /* 14 (AD[24]): Unused */ {0, 0, 0, 0, 0 }, /* 15 (AD[25]): Unused */ {0, 0, 0, 0, 0 }, /* 16 (AD[26]): Unused */ {0, 0, 0, 0, 0 }, /* 17 (AD[27]): Unused */ {0, IRQ4, IRQ4, 0, 0 }, /* 18 (AD[28]): slot 0 */ {0, 0, 0, 0, 0 }, /* 19 (AD[29]): Unused */ {0, IRQ5, IRQ5, 0, 0 }, /* 20 (AD[30]): slot 1 */ {0, IRQ6, IRQ6, 0, 0 } /* 21 (AD[31]): slot 2 */ }; #elif defined(CONFIG_PMC_MSP7120_EVAL) /* MSP7120 Eval Board IRQ wiring to PCI slots */ static char irq_tab[][5] __initdata = { /* INTA INTB INTC INTD */ {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[2]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[3]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[4]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[5]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[6]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[7]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[8]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[9]): Unused */ {0, 0, 0, 0, 0 }, /* 0 (AD[10]): Unused */ {0, 0, 0, 0, 0 }, /* 1 (AD[11]): Unused */ {0, 0, 0, 0, 0 }, /* 2 (AD[12]): Unused */ {0, 0, 0, 0, 0 }, /* 3 (AD[13]): Unused */ {0, 0, 0, 0, 0 }, /* 4 (AD[14]): Unused */ {0, 0, 0, 0, 0 }, /* 5 (AD[15]): Unused */ {0, IRQ6, IRQ6, 0, 0 }, /* 6 (AD[16]): slot 3 (mini) */ {0, IRQ5, IRQ5, 0, 0 }, /* 7 (AD[17]): slot 2 (mini) */ {0, IRQ4, IRQ4, IRQ4, IRQ4}, /* 8 (AD[18]): slot 0 (PCI) */ {0, IRQ5, IRQ5, IRQ5, IRQ5}, /* 9 (AD[19]): slot 1 (PCI) */ {0, 0, 0, 0, 0 }, /* 10 (AD[20]): Unused */ {0, 0, 0, 0, 0 }, /* 11 (AD[21]): Unused */ {0, 0, 0, 0, 0 }, /* 12 (AD[22]): Unused */ {0, 0, 0, 0, 0 }, /* 13 (AD[23]): Unused */ {0, 0, 0, 0, 0 }, /* 14 (AD[24]): Unused */ {0, 0, 0, 0, 0 }, /* 15 (AD[25]): Unused */ {0, 0, 0, 0, 0 }, /* 16 (AD[26]): Unused */ {0, 0, 0, 0, 0 }, /* 17 (AD[27]): Unused */ {0, 0, 0, 0, 0 }, /* 18 (AD[28]): Unused */ {0, 0, 0, 0, 0 }, /* 19 (AD[29]): Unused */ {0, 0, 0, 0, 0 }, /* 20 (AD[30]): Unused */ {0, 0, 0, 0, 0 } /* 21 (AD[31]): Unused */ }; #else /* Unknown board -- don't assign any IRQs */ static char irq_tab[][5] __initdata = { /* INTA INTB INTC INTD */ {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[2]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[3]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[4]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[5]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[6]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[7]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[8]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[9]): Unused */ {0, 0, 0, 0, 0 }, /* 0 (AD[10]): Unused */ {0, 0, 0, 0, 0 }, /* 1 (AD[11]): Unused */ {0, 0, 0, 0, 0 }, /* 2 (AD[12]): Unused */ {0, 0, 0, 0, 0 }, /* 3 (AD[13]): Unused */ {0, 0, 0, 0, 0 }, /* 4 (AD[14]): Unused */ {0, 0, 0, 0, 0 }, /* 5 (AD[15]): Unused */ {0, 0, 0, 0, 0 }, /* 6 (AD[16]): Unused */ {0, 0, 0, 0, 0 }, /* 7 (AD[17]): Unused */ {0, 0, 0, 0, 0 }, /* 8 (AD[18]): Unused */ {0, 0, 0, 0, 0 }, /* 9 (AD[19]): Unused */ {0, 0, 0, 0, 0 }, /* 10 (AD[20]): Unused */ {0, 0, 0, 0, 0 }, /* 11 (AD[21]): Unused */ {0, 0, 0, 0, 0 }, /* 12 (AD[22]): Unused */ {0, 0, 0, 0, 0 }, /* 13 (AD[23]): Unused */ {0, 0, 0, 0, 0 }, /* 14 (AD[24]): Unused */ {0, 0, 0, 0, 0 }, /* 15 (AD[25]): Unused */ {0, 0, 0, 0, 0 }, /* 16 (AD[26]): Unused */ {0, 0, 0, 0, 0 }, /* 17 (AD[27]): Unused */ {0, 0, 0, 0, 0 }, /* 18 (AD[28]): Unused */ {0, 0, 0, 0, 0 }, /* 19 (AD[29]): Unused */ {0, 0, 0, 0, 0 }, /* 20 (AD[30]): Unused */ {0, 0, 0, 0, 0 } /* 21 (AD[31]): Unused */ }; #endif /***************************************************************************** * * FUNCTION: pcibios_plat_dev_init * _________________________________________________________________________ * * DESCRIPTION: Perform platform specific device initialization at * pci_enable_device() time. * None are needed for the MSP7120 PCI Controller. * * INPUTS: dev - structure describing the PCI device * * OUTPUTS: none * * RETURNS: PCIBIOS_SUCCESSFUL * ****************************************************************************/ int pcibios_plat_dev_init(struct pci_dev *dev) { return PCIBIOS_SUCCESSFUL; } /***************************************************************************** * * FUNCTION: pcibios_map_irq * _________________________________________________________________________ * * DESCRIPTION: Perform board supplied PCI IRQ mapping routine. * * INPUTS: dev - unused * slot - PCI slot. Identified by which bit of the AD[] bus * drives the IDSEL line. AD[10] is 0, AD[31] is * slot 21. * pin - numbered using the scheme of the PCI_INTERRUPT_PIN * field of the config header. * * OUTPUTS: none * * RETURNS: IRQ number * ****************************************************************************/ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { #if !defined(CONFIG_PMC_MSP7120_GW) && !defined(CONFIG_PMC_MSP7120_EVAL) printk(KERN_WARNING "PCI: unknown board, no PCI IRQs assigned.\n"); #endif printk(KERN_WARNING "PCI: irq_tab returned %d for slot=%d pin=%d\n", irq_tab[slot][pin], slot, pin); return irq_tab[slot][pin]; } #endif /* CONFIG_PCI */
gpl-2.0
SlimRoms/kernel_samsung_espresso10
sound/pci/ice1712/revo.c
10046
15797
/* * ALSA driver for ICEnsemble ICE1712 (Envy24) * * Lowlevel functions for M-Audio Audiophile 192, Revolution 7.1 and 5.1 * * Copyright (c) 2003 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <asm/io.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/slab.h> #include <sound/core.h> #include "ice1712.h" #include "envy24ht.h" #include "revo.h" /* a non-standard I2C device for revo51 */ struct revo51_spec { struct snd_i2c_device *dev; struct snd_pt2258 *pt2258; }; static void revo_i2s_mclk_changed(struct snd_ice1712 *ice) { /* assert PRST# to converters; MT05 bit 7 */ outb(inb(ICEMT1724(ice, AC97_CMD)) | 0x80, ICEMT1724(ice, AC97_CMD)); mdelay(5); /* deassert PRST# */ outb(inb(ICEMT1724(ice, AC97_CMD)) & ~0x80, ICEMT1724(ice, AC97_CMD)); } /* * change the rate of Envy24HT, AK4355 and AK4381 */ static void revo_set_rate_val(struct snd_akm4xxx *ak, unsigned int rate) { unsigned char old, tmp, dfs; int reg, shift; if (rate == 0) /* no hint - S/PDIF input is master, simply return */ return; /* adjust DFS on codecs */ if (rate > 96000) dfs = 2; else if (rate > 48000) dfs = 1; else dfs = 0; if (ak->type == SND_AK4355 || ak->type == SND_AK4358) { reg = 2; shift = 4; } else { reg = 1; shift = 3; } tmp = snd_akm4xxx_get(ak, 0, reg); old = (tmp >> shift) & 0x03; if (old == dfs) return; /* reset DFS */ snd_akm4xxx_reset(ak, 1); tmp = snd_akm4xxx_get(ak, 0, reg); tmp &= ~(0x03 << shift); tmp |= dfs << shift; /* snd_akm4xxx_write(ak, 0, reg, tmp); */ snd_akm4xxx_set(ak, 0, reg, tmp); /* value is written in reset(0) */ snd_akm4xxx_reset(ak, 0); } /* * I2C access to the PT2258 volume controller on GPIO 6/7 (Revolution 5.1) */ static void revo_i2c_start(struct snd_i2c_bus *bus) { struct snd_ice1712 *ice = bus->private_data; snd_ice1712_save_gpio_status(ice); } static void revo_i2c_stop(struct snd_i2c_bus *bus) { struct snd_ice1712 *ice = bus->private_data; snd_ice1712_restore_gpio_status(ice); } static void revo_i2c_direction(struct snd_i2c_bus *bus, int clock, int data) { struct snd_ice1712 *ice = bus->private_data; unsigned int mask, val; val = 0; if (clock) val |= VT1724_REVO_I2C_CLOCK; /* write SCL */ if (data) val |= VT1724_REVO_I2C_DATA; /* write SDA */ mask = VT1724_REVO_I2C_CLOCK | VT1724_REVO_I2C_DATA; ice->gpio.direction &= ~mask; ice->gpio.direction |= val; snd_ice1712_gpio_set_dir(ice, ice->gpio.direction); snd_ice1712_gpio_set_mask(ice, ~mask); } static void revo_i2c_setlines(struct snd_i2c_bus *bus, int clk, int data) { struct snd_ice1712 *ice = bus->private_data; unsigned int val = 0; if (clk) val |= VT1724_REVO_I2C_CLOCK; if (data) val |= VT1724_REVO_I2C_DATA; snd_ice1712_gpio_write_bits(ice, VT1724_REVO_I2C_DATA | VT1724_REVO_I2C_CLOCK, val); udelay(5); } static int revo_i2c_getdata(struct snd_i2c_bus *bus, int ack) { struct snd_ice1712 *ice = bus->private_data; int bit; if (ack) udelay(5); bit = snd_ice1712_gpio_read_bits(ice, VT1724_REVO_I2C_DATA) ? 1 : 0; return bit; } static struct snd_i2c_bit_ops revo51_bit_ops = { .start = revo_i2c_start, .stop = revo_i2c_stop, .direction = revo_i2c_direction, .setlines = revo_i2c_setlines, .getdata = revo_i2c_getdata, }; static int revo51_i2c_init(struct snd_ice1712 *ice, struct snd_pt2258 *pt) { struct revo51_spec *spec; int err; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return -ENOMEM; ice->spec = spec; /* create the I2C bus */ err = snd_i2c_bus_create(ice->card, "ICE1724 GPIO6", NULL, &ice->i2c); if (err < 0) return err; ice->i2c->private_data = ice; ice->i2c->hw_ops.bit = &revo51_bit_ops; /* create the I2C device */ err = snd_i2c_device_create(ice->i2c, "PT2258", 0x40, &spec->dev); if (err < 0) return err; pt->card = ice->card; pt->i2c_bus = ice->i2c; pt->i2c_dev = spec->dev; spec->pt2258 = pt; snd_pt2258_reset(pt); return 0; } /* * initialize the chips on M-Audio Revolution cards */ #define AK_DAC(xname,xch) { .name = xname, .num_channels = xch } static const struct snd_akm4xxx_dac_channel revo71_front[] = { { .name = "PCM Playback Volume", .num_channels = 2, /* front channels DAC supports muting */ .switch_name = "PCM Playback Switch", }, }; static const struct snd_akm4xxx_dac_channel revo71_surround[] = { AK_DAC("PCM Center Playback Volume", 1), AK_DAC("PCM LFE Playback Volume", 1), AK_DAC("PCM Side Playback Volume", 2), AK_DAC("PCM Rear Playback Volume", 2), }; static const struct snd_akm4xxx_dac_channel revo51_dac[] = { AK_DAC("PCM Playback Volume", 2), AK_DAC("PCM Center Playback Volume", 1), AK_DAC("PCM LFE Playback Volume", 1), AK_DAC("PCM Rear Playback Volume", 2), AK_DAC("PCM Headphone Volume", 2), }; static const char *revo51_adc_input_names[] = { "Mic", "Line", "CD", NULL }; static const struct snd_akm4xxx_adc_channel revo51_adc[] = { { .name = "PCM Capture Volume", .switch_name = "PCM Capture Switch", .num_channels = 2, .input_names = revo51_adc_input_names }, }; static struct snd_akm4xxx akm_revo_front __devinitdata = { .type = SND_AK4381, .num_dacs = 2, .ops = { .set_rate_val = revo_set_rate_val }, .dac_info = revo71_front, }; static struct snd_ak4xxx_private akm_revo_front_priv __devinitdata = { .caddr = 1, .cif = 0, .data_mask = VT1724_REVO_CDOUT, .clk_mask = VT1724_REVO_CCLK, .cs_mask = VT1724_REVO_CS0 | VT1724_REVO_CS1 | VT1724_REVO_CS2, .cs_addr = VT1724_REVO_CS0 | VT1724_REVO_CS2, .cs_none = VT1724_REVO_CS0 | VT1724_REVO_CS1 | VT1724_REVO_CS2, .add_flags = VT1724_REVO_CCLK, /* high at init */ .mask_flags = 0, }; static struct snd_akm4xxx akm_revo_surround __devinitdata = { .type = SND_AK4355, .idx_offset = 1, .num_dacs = 6, .ops = { .set_rate_val = revo_set_rate_val }, .dac_info = revo71_surround, }; static struct snd_ak4xxx_private akm_revo_surround_priv __devinitdata = { .caddr = 3, .cif = 0, .data_mask = VT1724_REVO_CDOUT, .clk_mask = VT1724_REVO_CCLK, .cs_mask = VT1724_REVO_CS0 | VT1724_REVO_CS1 | VT1724_REVO_CS2, .cs_addr = VT1724_REVO_CS0 | VT1724_REVO_CS1, .cs_none = VT1724_REVO_CS0 | VT1724_REVO_CS1 | VT1724_REVO_CS2, .add_flags = VT1724_REVO_CCLK, /* high at init */ .mask_flags = 0, }; static struct snd_akm4xxx akm_revo51 __devinitdata = { .type = SND_AK4358, .num_dacs = 8, .ops = { .set_rate_val = revo_set_rate_val }, .dac_info = revo51_dac, }; static struct snd_ak4xxx_private akm_revo51_priv __devinitdata = { .caddr = 2, .cif = 0, .data_mask = VT1724_REVO_CDOUT, .clk_mask = VT1724_REVO_CCLK, .cs_mask = VT1724_REVO_CS0 | VT1724_REVO_CS1, .cs_addr = VT1724_REVO_CS1, .cs_none = VT1724_REVO_CS0 | VT1724_REVO_CS1, .add_flags = VT1724_REVO_CCLK, /* high at init */ .mask_flags = 0, }; static struct snd_akm4xxx akm_revo51_adc __devinitdata = { .type = SND_AK5365, .num_adcs = 2, .adc_info = revo51_adc, }; static struct snd_ak4xxx_private akm_revo51_adc_priv __devinitdata = { .caddr = 2, .cif = 0, .data_mask = VT1724_REVO_CDOUT, .clk_mask = VT1724_REVO_CCLK, .cs_mask = VT1724_REVO_CS0 | VT1724_REVO_CS1, .cs_addr = VT1724_REVO_CS0, .cs_none = VT1724_REVO_CS0 | VT1724_REVO_CS1, .add_flags = VT1724_REVO_CCLK, /* high at init */ .mask_flags = 0, }; static struct snd_pt2258 ptc_revo51_volume; /* AK4358 for AP192 DAC, AK5385A for ADC */ static void ap192_set_rate_val(struct snd_akm4xxx *ak, unsigned int rate) { struct snd_ice1712 *ice = ak->private_data[0]; int dfs; revo_set_rate_val(ak, rate); /* reset CKS */ snd_ice1712_gpio_write_bits(ice, 1 << 8, rate > 96000 ? 1 << 8 : 0); /* reset DFS pins of AK5385A for ADC, too */ if (rate > 96000) dfs = 2; else if (rate > 48000) dfs = 1; else dfs = 0; snd_ice1712_gpio_write_bits(ice, 3 << 9, dfs << 9); /* reset ADC */ snd_ice1712_gpio_write_bits(ice, 1 << 11, 0); snd_ice1712_gpio_write_bits(ice, 1 << 11, 1 << 11); } static const struct snd_akm4xxx_dac_channel ap192_dac[] = { AK_DAC("PCM Playback Volume", 2) }; static struct snd_akm4xxx akm_ap192 __devinitdata = { .type = SND_AK4358, .num_dacs = 2, .ops = { .set_rate_val = ap192_set_rate_val }, .dac_info = ap192_dac, }; static struct snd_ak4xxx_private akm_ap192_priv __devinitdata = { .caddr = 2, .cif = 0, .data_mask = VT1724_REVO_CDOUT, .clk_mask = VT1724_REVO_CCLK, .cs_mask = VT1724_REVO_CS0 | VT1724_REVO_CS1, .cs_addr = VT1724_REVO_CS1, .cs_none = VT1724_REVO_CS0 | VT1724_REVO_CS1, .add_flags = VT1724_REVO_CCLK, /* high at init */ .mask_flags = 0, }; /* AK4114 support on Audiophile 192 */ /* CDTO (pin 32) -- GPIO2 pin 52 * CDTI (pin 33) -- GPIO3 pin 53 (shared with AK4358) * CCLK (pin 34) -- GPIO1 pin 51 (shared with AK4358) * CSN (pin 35) -- GPIO7 pin 59 */ #define AK4114_ADDR 0x02 static void write_data(struct snd_ice1712 *ice, unsigned int gpio, unsigned int data, int idx) { for (; idx >= 0; idx--) { /* drop clock */ gpio &= ~VT1724_REVO_CCLK; snd_ice1712_gpio_write(ice, gpio); udelay(1); /* set data */ if (data & (1 << idx)) gpio |= VT1724_REVO_CDOUT; else gpio &= ~VT1724_REVO_CDOUT; snd_ice1712_gpio_write(ice, gpio); udelay(1); /* raise clock */ gpio |= VT1724_REVO_CCLK; snd_ice1712_gpio_write(ice, gpio); udelay(1); } } static unsigned char read_data(struct snd_ice1712 *ice, unsigned int gpio, int idx) { unsigned char data = 0; for (; idx >= 0; idx--) { /* drop clock */ gpio &= ~VT1724_REVO_CCLK; snd_ice1712_gpio_write(ice, gpio); udelay(1); /* read data */ if (snd_ice1712_gpio_read(ice) & VT1724_REVO_CDIN) data |= (1 << idx); udelay(1); /* raise clock */ gpio |= VT1724_REVO_CCLK; snd_ice1712_gpio_write(ice, gpio); udelay(1); } return data; } static unsigned int ap192_4wire_start(struct snd_ice1712 *ice) { unsigned int tmp; snd_ice1712_save_gpio_status(ice); tmp = snd_ice1712_gpio_read(ice); tmp |= VT1724_REVO_CCLK; /* high at init */ tmp |= VT1724_REVO_CS0; tmp &= ~VT1724_REVO_CS1; snd_ice1712_gpio_write(ice, tmp); udelay(1); return tmp; } static void ap192_4wire_finish(struct snd_ice1712 *ice, unsigned int tmp) { tmp |= VT1724_REVO_CS1; tmp |= VT1724_REVO_CS0; snd_ice1712_gpio_write(ice, tmp); udelay(1); snd_ice1712_restore_gpio_status(ice); } static void ap192_ak4114_write(void *private_data, unsigned char addr, unsigned char data) { struct snd_ice1712 *ice = private_data; unsigned int tmp, addrdata; tmp = ap192_4wire_start(ice); addrdata = (AK4114_ADDR << 6) | 0x20 | (addr & 0x1f); addrdata = (addrdata << 8) | data; write_data(ice, tmp, addrdata, 15); ap192_4wire_finish(ice, tmp); } static unsigned char ap192_ak4114_read(void *private_data, unsigned char addr) { struct snd_ice1712 *ice = private_data; unsigned int tmp; unsigned char data; tmp = ap192_4wire_start(ice); write_data(ice, tmp, (AK4114_ADDR << 6) | (addr & 0x1f), 7); data = read_data(ice, tmp, 7); ap192_4wire_finish(ice, tmp); return data; } static int __devinit ap192_ak4114_init(struct snd_ice1712 *ice) { static const unsigned char ak4114_init_vals[] = { AK4114_RST | AK4114_PWN | AK4114_OCKS0 | AK4114_OCKS1, AK4114_DIF_I24I2S, AK4114_TX1E, AK4114_EFH_1024 | AK4114_DIT | AK4114_IPS(1), 0, 0 }; static const unsigned char ak4114_init_txcsb[] = { 0x41, 0x02, 0x2c, 0x00, 0x00 }; struct ak4114 *ak; int err; err = snd_ak4114_create(ice->card, ap192_ak4114_read, ap192_ak4114_write, ak4114_init_vals, ak4114_init_txcsb, ice, &ak); /* AK4114 in Revo cannot detect external rate correctly. * No reason to stop capture stream due to incorrect checks */ ak->check_flags = AK4114_CHECK_NO_RATE; return 0; /* error ignored; it's no fatal error */ } static int __devinit revo_init(struct snd_ice1712 *ice) { struct snd_akm4xxx *ak; int err; /* determine I2C, DACs and ADCs */ switch (ice->eeprom.subvendor) { case VT1724_SUBDEVICE_REVOLUTION71: ice->num_total_dacs = 8; ice->num_total_adcs = 2; ice->gpio.i2s_mclk_changed = revo_i2s_mclk_changed; break; case VT1724_SUBDEVICE_REVOLUTION51: ice->num_total_dacs = 8; ice->num_total_adcs = 2; break; case VT1724_SUBDEVICE_AUDIOPHILE192: ice->num_total_dacs = 2; ice->num_total_adcs = 2; break; default: snd_BUG(); return -EINVAL; } /* second stage of initialization, analog parts and others */ ak = ice->akm = kcalloc(2, sizeof(struct snd_akm4xxx), GFP_KERNEL); if (! ak) return -ENOMEM; switch (ice->eeprom.subvendor) { case VT1724_SUBDEVICE_REVOLUTION71: ice->akm_codecs = 2; err = snd_ice1712_akm4xxx_init(ak, &akm_revo_front, &akm_revo_front_priv, ice); if (err < 0) return err; err = snd_ice1712_akm4xxx_init(ak+1, &akm_revo_surround, &akm_revo_surround_priv, ice); if (err < 0) return err; /* unmute all codecs */ snd_ice1712_gpio_write_bits(ice, VT1724_REVO_MUTE, VT1724_REVO_MUTE); break; case VT1724_SUBDEVICE_REVOLUTION51: ice->akm_codecs = 2; err = snd_ice1712_akm4xxx_init(ak, &akm_revo51, &akm_revo51_priv, ice); if (err < 0) return err; err = snd_ice1712_akm4xxx_init(ak+1, &akm_revo51_adc, &akm_revo51_adc_priv, ice); if (err < 0) return err; err = revo51_i2c_init(ice, &ptc_revo51_volume); if (err < 0) return err; /* unmute all codecs */ snd_ice1712_gpio_write_bits(ice, VT1724_REVO_MUTE, VT1724_REVO_MUTE); break; case VT1724_SUBDEVICE_AUDIOPHILE192: ice->akm_codecs = 1; err = snd_ice1712_akm4xxx_init(ak, &akm_ap192, &akm_ap192_priv, ice); if (err < 0) return err; /* unmute all codecs */ snd_ice1712_gpio_write_bits(ice, VT1724_REVO_MUTE, VT1724_REVO_MUTE); break; } return 0; } static int __devinit revo_add_controls(struct snd_ice1712 *ice) { struct revo51_spec *spec; int err; switch (ice->eeprom.subvendor) { case VT1724_SUBDEVICE_REVOLUTION71: err = snd_ice1712_akm4xxx_build_controls(ice); if (err < 0) return err; break; case VT1724_SUBDEVICE_REVOLUTION51: err = snd_ice1712_akm4xxx_build_controls(ice); if (err < 0) return err; spec = ice->spec; err = snd_pt2258_build_controls(spec->pt2258); if (err < 0) return err; break; case VT1724_SUBDEVICE_AUDIOPHILE192: err = snd_ice1712_akm4xxx_build_controls(ice); if (err < 0) return err; err = ap192_ak4114_init(ice); if (err < 0) return err; break; } return 0; } /* entry point */ struct snd_ice1712_card_info snd_vt1724_revo_cards[] __devinitdata = { { .subvendor = VT1724_SUBDEVICE_REVOLUTION71, .name = "M Audio Revolution-7.1", .model = "revo71", .chip_init = revo_init, .build_controls = revo_add_controls, }, { .subvendor = VT1724_SUBDEVICE_REVOLUTION51, .name = "M Audio Revolution-5.1", .model = "revo51", .chip_init = revo_init, .build_controls = revo_add_controls, }, { .subvendor = VT1724_SUBDEVICE_AUDIOPHILE192, .name = "M Audio Audiophile192", .model = "ap192", .chip_init = revo_init, .build_controls = revo_add_controls, }, { } /* terminator */ };
gpl-2.0
uwehermann/easybox-904-xdsl-firmware
linux/linux-2.6.32.32/sound/pci/ice1712/revo.c
10046
15797
/* * ALSA driver for ICEnsemble ICE1712 (Envy24) * * Lowlevel functions for M-Audio Audiophile 192, Revolution 7.1 and 5.1 * * Copyright (c) 2003 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <asm/io.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/slab.h> #include <sound/core.h> #include "ice1712.h" #include "envy24ht.h" #include "revo.h" /* a non-standard I2C device for revo51 */ struct revo51_spec { struct snd_i2c_device *dev; struct snd_pt2258 *pt2258; }; static void revo_i2s_mclk_changed(struct snd_ice1712 *ice) { /* assert PRST# to converters; MT05 bit 7 */ outb(inb(ICEMT1724(ice, AC97_CMD)) | 0x80, ICEMT1724(ice, AC97_CMD)); mdelay(5); /* deassert PRST# */ outb(inb(ICEMT1724(ice, AC97_CMD)) & ~0x80, ICEMT1724(ice, AC97_CMD)); } /* * change the rate of Envy24HT, AK4355 and AK4381 */ static void revo_set_rate_val(struct snd_akm4xxx *ak, unsigned int rate) { unsigned char old, tmp, dfs; int reg, shift; if (rate == 0) /* no hint - S/PDIF input is master, simply return */ return; /* adjust DFS on codecs */ if (rate > 96000) dfs = 2; else if (rate > 48000) dfs = 1; else dfs = 0; if (ak->type == SND_AK4355 || ak->type == SND_AK4358) { reg = 2; shift = 4; } else { reg = 1; shift = 3; } tmp = snd_akm4xxx_get(ak, 0, reg); old = (tmp >> shift) & 0x03; if (old == dfs) return; /* reset DFS */ snd_akm4xxx_reset(ak, 1); tmp = snd_akm4xxx_get(ak, 0, reg); tmp &= ~(0x03 << shift); tmp |= dfs << shift; /* snd_akm4xxx_write(ak, 0, reg, tmp); */ snd_akm4xxx_set(ak, 0, reg, tmp); /* value is written in reset(0) */ snd_akm4xxx_reset(ak, 0); } /* * I2C access to the PT2258 volume controller on GPIO 6/7 (Revolution 5.1) */ static void revo_i2c_start(struct snd_i2c_bus *bus) { struct snd_ice1712 *ice = bus->private_data; snd_ice1712_save_gpio_status(ice); } static void revo_i2c_stop(struct snd_i2c_bus *bus) { struct snd_ice1712 *ice = bus->private_data; snd_ice1712_restore_gpio_status(ice); } static void revo_i2c_direction(struct snd_i2c_bus *bus, int clock, int data) { struct snd_ice1712 *ice = bus->private_data; unsigned int mask, val; val = 0; if (clock) val |= VT1724_REVO_I2C_CLOCK; /* write SCL */ if (data) val |= VT1724_REVO_I2C_DATA; /* write SDA */ mask = VT1724_REVO_I2C_CLOCK | VT1724_REVO_I2C_DATA; ice->gpio.direction &= ~mask; ice->gpio.direction |= val; snd_ice1712_gpio_set_dir(ice, ice->gpio.direction); snd_ice1712_gpio_set_mask(ice, ~mask); } static void revo_i2c_setlines(struct snd_i2c_bus *bus, int clk, int data) { struct snd_ice1712 *ice = bus->private_data; unsigned int val = 0; if (clk) val |= VT1724_REVO_I2C_CLOCK; if (data) val |= VT1724_REVO_I2C_DATA; snd_ice1712_gpio_write_bits(ice, VT1724_REVO_I2C_DATA | VT1724_REVO_I2C_CLOCK, val); udelay(5); } static int revo_i2c_getdata(struct snd_i2c_bus *bus, int ack) { struct snd_ice1712 *ice = bus->private_data; int bit; if (ack) udelay(5); bit = snd_ice1712_gpio_read_bits(ice, VT1724_REVO_I2C_DATA) ? 1 : 0; return bit; } static struct snd_i2c_bit_ops revo51_bit_ops = { .start = revo_i2c_start, .stop = revo_i2c_stop, .direction = revo_i2c_direction, .setlines = revo_i2c_setlines, .getdata = revo_i2c_getdata, }; static int revo51_i2c_init(struct snd_ice1712 *ice, struct snd_pt2258 *pt) { struct revo51_spec *spec; int err; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return -ENOMEM; ice->spec = spec; /* create the I2C bus */ err = snd_i2c_bus_create(ice->card, "ICE1724 GPIO6", NULL, &ice->i2c); if (err < 0) return err; ice->i2c->private_data = ice; ice->i2c->hw_ops.bit = &revo51_bit_ops; /* create the I2C device */ err = snd_i2c_device_create(ice->i2c, "PT2258", 0x40, &spec->dev); if (err < 0) return err; pt->card = ice->card; pt->i2c_bus = ice->i2c; pt->i2c_dev = spec->dev; spec->pt2258 = pt; snd_pt2258_reset(pt); return 0; } /* * initialize the chips on M-Audio Revolution cards */ #define AK_DAC(xname,xch) { .name = xname, .num_channels = xch } static const struct snd_akm4xxx_dac_channel revo71_front[] = { { .name = "PCM Playback Volume", .num_channels = 2, /* front channels DAC supports muting */ .switch_name = "PCM Playback Switch", }, }; static const struct snd_akm4xxx_dac_channel revo71_surround[] = { AK_DAC("PCM Center Playback Volume", 1), AK_DAC("PCM LFE Playback Volume", 1), AK_DAC("PCM Side Playback Volume", 2), AK_DAC("PCM Rear Playback Volume", 2), }; static const struct snd_akm4xxx_dac_channel revo51_dac[] = { AK_DAC("PCM Playback Volume", 2), AK_DAC("PCM Center Playback Volume", 1), AK_DAC("PCM LFE Playback Volume", 1), AK_DAC("PCM Rear Playback Volume", 2), AK_DAC("PCM Headphone Volume", 2), }; static const char *revo51_adc_input_names[] = { "Mic", "Line", "CD", NULL }; static const struct snd_akm4xxx_adc_channel revo51_adc[] = { { .name = "PCM Capture Volume", .switch_name = "PCM Capture Switch", .num_channels = 2, .input_names = revo51_adc_input_names }, }; static struct snd_akm4xxx akm_revo_front __devinitdata = { .type = SND_AK4381, .num_dacs = 2, .ops = { .set_rate_val = revo_set_rate_val }, .dac_info = revo71_front, }; static struct snd_ak4xxx_private akm_revo_front_priv __devinitdata = { .caddr = 1, .cif = 0, .data_mask = VT1724_REVO_CDOUT, .clk_mask = VT1724_REVO_CCLK, .cs_mask = VT1724_REVO_CS0 | VT1724_REVO_CS1 | VT1724_REVO_CS2, .cs_addr = VT1724_REVO_CS0 | VT1724_REVO_CS2, .cs_none = VT1724_REVO_CS0 | VT1724_REVO_CS1 | VT1724_REVO_CS2, .add_flags = VT1724_REVO_CCLK, /* high at init */ .mask_flags = 0, }; static struct snd_akm4xxx akm_revo_surround __devinitdata = { .type = SND_AK4355, .idx_offset = 1, .num_dacs = 6, .ops = { .set_rate_val = revo_set_rate_val }, .dac_info = revo71_surround, }; static struct snd_ak4xxx_private akm_revo_surround_priv __devinitdata = { .caddr = 3, .cif = 0, .data_mask = VT1724_REVO_CDOUT, .clk_mask = VT1724_REVO_CCLK, .cs_mask = VT1724_REVO_CS0 | VT1724_REVO_CS1 | VT1724_REVO_CS2, .cs_addr = VT1724_REVO_CS0 | VT1724_REVO_CS1, .cs_none = VT1724_REVO_CS0 | VT1724_REVO_CS1 | VT1724_REVO_CS2, .add_flags = VT1724_REVO_CCLK, /* high at init */ .mask_flags = 0, }; static struct snd_akm4xxx akm_revo51 __devinitdata = { .type = SND_AK4358, .num_dacs = 8, .ops = { .set_rate_val = revo_set_rate_val }, .dac_info = revo51_dac, }; static struct snd_ak4xxx_private akm_revo51_priv __devinitdata = { .caddr = 2, .cif = 0, .data_mask = VT1724_REVO_CDOUT, .clk_mask = VT1724_REVO_CCLK, .cs_mask = VT1724_REVO_CS0 | VT1724_REVO_CS1, .cs_addr = VT1724_REVO_CS1, .cs_none = VT1724_REVO_CS0 | VT1724_REVO_CS1, .add_flags = VT1724_REVO_CCLK, /* high at init */ .mask_flags = 0, }; static struct snd_akm4xxx akm_revo51_adc __devinitdata = { .type = SND_AK5365, .num_adcs = 2, .adc_info = revo51_adc, }; static struct snd_ak4xxx_private akm_revo51_adc_priv __devinitdata = { .caddr = 2, .cif = 0, .data_mask = VT1724_REVO_CDOUT, .clk_mask = VT1724_REVO_CCLK, .cs_mask = VT1724_REVO_CS0 | VT1724_REVO_CS1, .cs_addr = VT1724_REVO_CS0, .cs_none = VT1724_REVO_CS0 | VT1724_REVO_CS1, .add_flags = VT1724_REVO_CCLK, /* high at init */ .mask_flags = 0, }; static struct snd_pt2258 ptc_revo51_volume; /* AK4358 for AP192 DAC, AK5385A for ADC */ static void ap192_set_rate_val(struct snd_akm4xxx *ak, unsigned int rate) { struct snd_ice1712 *ice = ak->private_data[0]; int dfs; revo_set_rate_val(ak, rate); /* reset CKS */ snd_ice1712_gpio_write_bits(ice, 1 << 8, rate > 96000 ? 1 << 8 : 0); /* reset DFS pins of AK5385A for ADC, too */ if (rate > 96000) dfs = 2; else if (rate > 48000) dfs = 1; else dfs = 0; snd_ice1712_gpio_write_bits(ice, 3 << 9, dfs << 9); /* reset ADC */ snd_ice1712_gpio_write_bits(ice, 1 << 11, 0); snd_ice1712_gpio_write_bits(ice, 1 << 11, 1 << 11); } static const struct snd_akm4xxx_dac_channel ap192_dac[] = { AK_DAC("PCM Playback Volume", 2) }; static struct snd_akm4xxx akm_ap192 __devinitdata = { .type = SND_AK4358, .num_dacs = 2, .ops = { .set_rate_val = ap192_set_rate_val }, .dac_info = ap192_dac, }; static struct snd_ak4xxx_private akm_ap192_priv __devinitdata = { .caddr = 2, .cif = 0, .data_mask = VT1724_REVO_CDOUT, .clk_mask = VT1724_REVO_CCLK, .cs_mask = VT1724_REVO_CS0 | VT1724_REVO_CS1, .cs_addr = VT1724_REVO_CS1, .cs_none = VT1724_REVO_CS0 | VT1724_REVO_CS1, .add_flags = VT1724_REVO_CCLK, /* high at init */ .mask_flags = 0, }; /* AK4114 support on Audiophile 192 */ /* CDTO (pin 32) -- GPIO2 pin 52 * CDTI (pin 33) -- GPIO3 pin 53 (shared with AK4358) * CCLK (pin 34) -- GPIO1 pin 51 (shared with AK4358) * CSN (pin 35) -- GPIO7 pin 59 */ #define AK4114_ADDR 0x02 static void write_data(struct snd_ice1712 *ice, unsigned int gpio, unsigned int data, int idx) { for (; idx >= 0; idx--) { /* drop clock */ gpio &= ~VT1724_REVO_CCLK; snd_ice1712_gpio_write(ice, gpio); udelay(1); /* set data */ if (data & (1 << idx)) gpio |= VT1724_REVO_CDOUT; else gpio &= ~VT1724_REVO_CDOUT; snd_ice1712_gpio_write(ice, gpio); udelay(1); /* raise clock */ gpio |= VT1724_REVO_CCLK; snd_ice1712_gpio_write(ice, gpio); udelay(1); } } static unsigned char read_data(struct snd_ice1712 *ice, unsigned int gpio, int idx) { unsigned char data = 0; for (; idx >= 0; idx--) { /* drop clock */ gpio &= ~VT1724_REVO_CCLK; snd_ice1712_gpio_write(ice, gpio); udelay(1); /* read data */ if (snd_ice1712_gpio_read(ice) & VT1724_REVO_CDIN) data |= (1 << idx); udelay(1); /* raise clock */ gpio |= VT1724_REVO_CCLK; snd_ice1712_gpio_write(ice, gpio); udelay(1); } return data; } static unsigned int ap192_4wire_start(struct snd_ice1712 *ice) { unsigned int tmp; snd_ice1712_save_gpio_status(ice); tmp = snd_ice1712_gpio_read(ice); tmp |= VT1724_REVO_CCLK; /* high at init */ tmp |= VT1724_REVO_CS0; tmp &= ~VT1724_REVO_CS1; snd_ice1712_gpio_write(ice, tmp); udelay(1); return tmp; } static void ap192_4wire_finish(struct snd_ice1712 *ice, unsigned int tmp) { tmp |= VT1724_REVO_CS1; tmp |= VT1724_REVO_CS0; snd_ice1712_gpio_write(ice, tmp); udelay(1); snd_ice1712_restore_gpio_status(ice); } static void ap192_ak4114_write(void *private_data, unsigned char addr, unsigned char data) { struct snd_ice1712 *ice = private_data; unsigned int tmp, addrdata; tmp = ap192_4wire_start(ice); addrdata = (AK4114_ADDR << 6) | 0x20 | (addr & 0x1f); addrdata = (addrdata << 8) | data; write_data(ice, tmp, addrdata, 15); ap192_4wire_finish(ice, tmp); } static unsigned char ap192_ak4114_read(void *private_data, unsigned char addr) { struct snd_ice1712 *ice = private_data; unsigned int tmp; unsigned char data; tmp = ap192_4wire_start(ice); write_data(ice, tmp, (AK4114_ADDR << 6) | (addr & 0x1f), 7); data = read_data(ice, tmp, 7); ap192_4wire_finish(ice, tmp); return data; } static int __devinit ap192_ak4114_init(struct snd_ice1712 *ice) { static const unsigned char ak4114_init_vals[] = { AK4114_RST | AK4114_PWN | AK4114_OCKS0 | AK4114_OCKS1, AK4114_DIF_I24I2S, AK4114_TX1E, AK4114_EFH_1024 | AK4114_DIT | AK4114_IPS(1), 0, 0 }; static const unsigned char ak4114_init_txcsb[] = { 0x41, 0x02, 0x2c, 0x00, 0x00 }; struct ak4114 *ak; int err; err = snd_ak4114_create(ice->card, ap192_ak4114_read, ap192_ak4114_write, ak4114_init_vals, ak4114_init_txcsb, ice, &ak); /* AK4114 in Revo cannot detect external rate correctly. * No reason to stop capture stream due to incorrect checks */ ak->check_flags = AK4114_CHECK_NO_RATE; return 0; /* error ignored; it's no fatal error */ } static int __devinit revo_init(struct snd_ice1712 *ice) { struct snd_akm4xxx *ak; int err; /* determine I2C, DACs and ADCs */ switch (ice->eeprom.subvendor) { case VT1724_SUBDEVICE_REVOLUTION71: ice->num_total_dacs = 8; ice->num_total_adcs = 2; ice->gpio.i2s_mclk_changed = revo_i2s_mclk_changed; break; case VT1724_SUBDEVICE_REVOLUTION51: ice->num_total_dacs = 8; ice->num_total_adcs = 2; break; case VT1724_SUBDEVICE_AUDIOPHILE192: ice->num_total_dacs = 2; ice->num_total_adcs = 2; break; default: snd_BUG(); return -EINVAL; } /* second stage of initialization, analog parts and others */ ak = ice->akm = kcalloc(2, sizeof(struct snd_akm4xxx), GFP_KERNEL); if (! ak) return -ENOMEM; switch (ice->eeprom.subvendor) { case VT1724_SUBDEVICE_REVOLUTION71: ice->akm_codecs = 2; err = snd_ice1712_akm4xxx_init(ak, &akm_revo_front, &akm_revo_front_priv, ice); if (err < 0) return err; err = snd_ice1712_akm4xxx_init(ak+1, &akm_revo_surround, &akm_revo_surround_priv, ice); if (err < 0) return err; /* unmute all codecs */ snd_ice1712_gpio_write_bits(ice, VT1724_REVO_MUTE, VT1724_REVO_MUTE); break; case VT1724_SUBDEVICE_REVOLUTION51: ice->akm_codecs = 2; err = snd_ice1712_akm4xxx_init(ak, &akm_revo51, &akm_revo51_priv, ice); if (err < 0) return err; err = snd_ice1712_akm4xxx_init(ak+1, &akm_revo51_adc, &akm_revo51_adc_priv, ice); if (err < 0) return err; err = revo51_i2c_init(ice, &ptc_revo51_volume); if (err < 0) return err; /* unmute all codecs */ snd_ice1712_gpio_write_bits(ice, VT1724_REVO_MUTE, VT1724_REVO_MUTE); break; case VT1724_SUBDEVICE_AUDIOPHILE192: ice->akm_codecs = 1; err = snd_ice1712_akm4xxx_init(ak, &akm_ap192, &akm_ap192_priv, ice); if (err < 0) return err; /* unmute all codecs */ snd_ice1712_gpio_write_bits(ice, VT1724_REVO_MUTE, VT1724_REVO_MUTE); break; } return 0; } static int __devinit revo_add_controls(struct snd_ice1712 *ice) { struct revo51_spec *spec; int err; switch (ice->eeprom.subvendor) { case VT1724_SUBDEVICE_REVOLUTION71: err = snd_ice1712_akm4xxx_build_controls(ice); if (err < 0) return err; break; case VT1724_SUBDEVICE_REVOLUTION51: err = snd_ice1712_akm4xxx_build_controls(ice); if (err < 0) return err; spec = ice->spec; err = snd_pt2258_build_controls(spec->pt2258); if (err < 0) return err; break; case VT1724_SUBDEVICE_AUDIOPHILE192: err = snd_ice1712_akm4xxx_build_controls(ice); if (err < 0) return err; err = ap192_ak4114_init(ice); if (err < 0) return err; break; } return 0; } /* entry point */ struct snd_ice1712_card_info snd_vt1724_revo_cards[] __devinitdata = { { .subvendor = VT1724_SUBDEVICE_REVOLUTION71, .name = "M Audio Revolution-7.1", .model = "revo71", .chip_init = revo_init, .build_controls = revo_add_controls, }, { .subvendor = VT1724_SUBDEVICE_REVOLUTION51, .name = "M Audio Revolution-5.1", .model = "revo51", .chip_init = revo_init, .build_controls = revo_add_controls, }, { .subvendor = VT1724_SUBDEVICE_AUDIOPHILE192, .name = "M Audio Audiophile192", .model = "ap192", .chip_init = revo_init, .build_controls = revo_add_controls, }, { } /* terminator */ };
gpl-2.0
buaaqbh/linux-imx6-solo
arch/sh/boot/compressed/misc.c
11326
2699
/* * arch/sh/boot/compressed/misc.c * * This is a collection of several routines from gzip-1.0.3 * adapted for Linux. * * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 * * Adapted for SH by Stuart Menefy, Aug 1999 * * Modified to use standard LinuxSH BIOS by Greg Banks 7Jul2000 */ #include <asm/uaccess.h> #include <asm/addrspace.h> #include <asm/page.h> /* * gzip declarations */ #define STATIC static #undef memset #undef memcpy #define memzero(s, n) memset ((s), 0, (n)) /* cache.c */ #define CACHE_ENABLE 0 #define CACHE_DISABLE 1 int cache_control(unsigned int command); extern char input_data[]; extern int input_len; static unsigned char *output; static void error(char *m); int puts(const char *); extern int _text; /* Defined in vmlinux.lds.S */ extern int _end; static unsigned long free_mem_ptr; static unsigned long free_mem_end_ptr; #ifdef CONFIG_HAVE_KERNEL_BZIP2 #define HEAP_SIZE 0x400000 #else #define HEAP_SIZE 0x10000 #endif #ifdef CONFIG_KERNEL_GZIP #include "../../../../lib/decompress_inflate.c" #endif #ifdef CONFIG_KERNEL_BZIP2 #include "../../../../lib/decompress_bunzip2.c" #endif #ifdef CONFIG_KERNEL_LZMA #include "../../../../lib/decompress_unlzma.c" #endif #ifdef CONFIG_KERNEL_XZ #include "../../../../lib/decompress_unxz.c" #endif #ifdef CONFIG_KERNEL_LZO #include "../../../../lib/decompress_unlzo.c" #endif int puts(const char *s) { /* This should be updated to use the sh-sci routines */ return 0; } void* memset(void* s, int c, size_t n) { int i; char *ss = (char*)s; for (i=0;i<n;i++) ss[i] = c; return s; } void* memcpy(void* __dest, __const void* __src, size_t __n) { int i; char *d = (char *)__dest, *s = (char *)__src; for (i=0;i<__n;i++) d[i] = s[i]; return __dest; } static void error(char *x) { puts("\n\n"); puts(x); puts("\n\n -- System halted"); while(1); /* Halt */ } #ifdef CONFIG_SUPERH64 #define stackalign 8 #else #define stackalign 4 #endif #define STACK_SIZE (4096) long __attribute__ ((aligned(stackalign))) user_stack[STACK_SIZE]; long *stack_start = &user_stack[STACK_SIZE]; void decompress_kernel(void) { unsigned long output_addr; #ifdef CONFIG_SUPERH64 output_addr = (CONFIG_MEMORY_START + 0x2000); #else output_addr = __pa((unsigned long)&_text+PAGE_SIZE); #if defined(CONFIG_29BIT) output_addr |= P2SEG; #endif #endif output = (unsigned char *)output_addr; free_mem_ptr = (unsigned long)&_end; free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; puts("Uncompressing Linux... "); cache_control(CACHE_ENABLE); decompress(input_data, input_len, NULL, NULL, output, NULL, error); cache_control(CACHE_DISABLE); puts("Ok, booting the kernel.\n"); }
gpl-2.0
HenryGiraldo/source
target/linux/ar71xx/files/arch/mips/ath79/mach-tl-wr942n-v1.c
63
8153
/* * TP-Link TL-WR942N(RU) v1 board support * * Copyright (C) 2017 Sergey Studzinski <serguzhg@gmail.com> * Thanks to Henryk Heisig <hyniu@o2.pl> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/platform_device.h> #include <linux/ath9k_platform.h> #include <asm/mach-ath79/ar71xx_regs.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/spi/spi_gpio.h> #include <linux/spi/74x164.h> #include "common.h" #include "dev-m25p80.h" #include "machtypes.h" #include "dev-eth.h" #include "dev-gpio-buttons.h" #include "dev-leds-gpio.h" #include "dev-spi.h" #include "dev-usb.h" #include "dev-wmac.h" #include "nvram.h" #define TL_WR942N_V1_KEYS_POLL_INTERVAL 20 #define TL_WR942N_V1_KEYS_DEBOUNCE_INTERVAL \ (3 * TL_WR942N_V1_KEYS_POLL_INTERVAL) #define TL_WR942N_V1_GPIO_BTN_RESET 1 #define TL_WR942N_V1_GPIO_BTN_RFKILL 2 #define TL_WR942N_V1_GPIO_UART_TX 4 #define TL_WR942N_V1_GPIO_UART_RX 5 #define TL_WR942N_V1_GPIO_LED_USB2 14 #define TL_WR942N_V1_GPIO_LED_USB1 15 #define TL_WR942N_V1_GPIO_SHIFT_OE 16 #define TL_WR942N_V1_GPIO_SHIFT_SER 17 #define TL_WR942N_V1_GPIO_SHIFT_SRCLK 18 #define TL_WR942N_V1_GPIO_SHIFT_SRCLR 19 #define TL_WR942N_V1_GPIO_SHIFT_RCLK 20 #define TL_WR942N_V1_GPIO_LED_WPS 21 #define TL_WR942N_V1_GPIO_LED_STATUS 22 #define TL_WR942N_V1_74HC_GPIO_BASE 32 #define TL_WR942N_V1_74HC_GPIO_LED_LAN4 (TL_WR942N_V1_74HC_GPIO_BASE + 0) #define TL_WR942N_V1_74HC_GPIO_LED_LAN3 (TL_WR942N_V1_74HC_GPIO_BASE + 1) #define TL_WR942N_V1_74HC_GPIO_LED_LAN2 (TL_WR942N_V1_74HC_GPIO_BASE + 2) #define TL_WR942N_V1_74HC_GPIO_LED_LAN1 (TL_WR942N_V1_74HC_GPIO_BASE + 3) #define TL_WR942N_V1_74HC_GPIO_LED_WAN_GREEN (TL_WR942N_V1_74HC_GPIO_BASE + 4) #define TL_WR942N_V1_74HC_GPIO_LED_WAN_AMBER (TL_WR942N_V1_74HC_GPIO_BASE + 5) #define TL_WR942N_V1_74HC_GPIO_LED_WLAN (TL_WR942N_V1_74HC_GPIO_BASE + 6) #define TL_WR942N_V1_74HC_GPIO_HUB_RESET (TL_WR942N_V1_74HC_GPIO_BASE + 7) /* from u-boot sources */ #define TL_WR942N_V1_SSR_BIT_0 0 #define TL_WR942N_V1_SSR_BIT_1 1 #define TL_WR942N_V1_SSR_BIT_2 2 #define TL_WR942N_V1_SSR_BIT_3 3 #define TL_WR942N_V1_SSR_BIT_4 4 #define TL_WR942N_V1_SSR_BIT_5 5 #define TL_WR942N_V1_SSR_BIT_6 6 #define TL_WR942N_V1_SSR_BIT_7 7 #define TL_WR942N_V1_WMAC_CALDATA_OFFSET 0x1000 #define TL_WR942N_V1_DEFAULT_MAC_ADDR 0x1fe40008 #define TL_WR942N_V1_DEFAULT_MAC_SIZE 0x200 #define GPIO_IN_ENABLE0_UART_SIN_LSB 8 #define GPIO_IN_ENABLE0_UART_SIN_MASK 0x0000ff00 static struct gpio_led tl_wr942n_v1_leds_gpio[] __initdata = { { .name = "tl-wr942n-v1:green:status", .gpio = TL_WR942N_V1_GPIO_LED_STATUS, .active_low = 1, }, { .name = "tl-wr942n-v1:green:wlan", .gpio = TL_WR942N_V1_74HC_GPIO_LED_WLAN, .active_low = 1, }, { .name = "tl-wr942n-v1:green:lan1", .gpio = TL_WR942N_V1_74HC_GPIO_LED_LAN1, .active_low = 1, }, { .name = "tl-wr942n-v1:green:lan2", .gpio = TL_WR942N_V1_74HC_GPIO_LED_LAN2, .active_low = 1, }, { .name = "tl-wr942n-v1:green:lan3", .gpio = TL_WR942N_V1_74HC_GPIO_LED_LAN3, .active_low = 1, }, { .name = "tl-wr942n-v1:green:lan4", .gpio = TL_WR942N_V1_74HC_GPIO_LED_LAN4, .active_low = 1, }, { .name = "tl-wr942n-v1:green:wan", .gpio = TL_WR942N_V1_74HC_GPIO_LED_WAN_GREEN, .active_low = 1, }, { .name = "tl-wr942n-v1:amber:wan", .gpio = TL_WR942N_V1_74HC_GPIO_LED_WAN_AMBER, .active_low = 1, }, { .name = "tl-wr942n-v1:green:wps", .gpio = TL_WR942N_V1_GPIO_LED_WPS, .active_low = 1, }, { .name = "tl-wr942n-v1:green:usb1", .gpio = TL_WR942N_V1_GPIO_LED_USB1, .active_low = 1, }, { .name = "tl-wr942n-v1:green:usb2", .gpio = TL_WR942N_V1_GPIO_LED_USB2, .active_low = 1, }, }; static struct gpio_keys_button tl_wr942n_v1_gpio_keys[] __initdata = { { .desc = "Reset button", .type = EV_KEY, .code = KEY_RESTART, .debounce_interval = TL_WR942N_V1_KEYS_DEBOUNCE_INTERVAL, .gpio = TL_WR942N_V1_GPIO_BTN_RESET, .active_low = 1, }, { .desc = "RFKILL button", .type = EV_KEY, .code = KEY_RFKILL, .debounce_interval = TL_WR942N_V1_KEYS_DEBOUNCE_INTERVAL, .gpio = TL_WR942N_V1_GPIO_BTN_RFKILL, .active_low = 1, }, }; static struct spi_gpio_platform_data tl_wr942n_v1_spi_data = { .sck = TL_WR942N_V1_GPIO_SHIFT_SRCLK, .miso = SPI_GPIO_NO_MISO, .mosi = TL_WR942N_V1_GPIO_SHIFT_SER, .num_chipselect = 1, }; static u8 tl_wr942n_v1_ssr_initdata[] = { BIT(TL_WR942N_V1_SSR_BIT_7) | BIT(TL_WR942N_V1_SSR_BIT_6) | BIT(TL_WR942N_V1_SSR_BIT_5) | BIT(TL_WR942N_V1_SSR_BIT_4) | BIT(TL_WR942N_V1_SSR_BIT_3) | BIT(TL_WR942N_V1_SSR_BIT_2) | BIT(TL_WR942N_V1_SSR_BIT_1) | BIT(TL_WR942N_V1_SSR_BIT_0) }; static struct gen_74x164_chip_platform_data tl_wr942n_v1_ssr_data = { .base = TL_WR942N_V1_74HC_GPIO_BASE, .num_registers = ARRAY_SIZE(tl_wr942n_v1_ssr_initdata), .init_data = tl_wr942n_v1_ssr_initdata, }; static struct platform_device tl_wr942n_v1_spi_device = { .name = "spi_gpio", .id = 1, .dev = { .platform_data = &tl_wr942n_v1_spi_data, }, }; static struct spi_board_info tl_wr942n_v1_spi_info[] = { { .bus_num = 1, .chip_select = 0, .max_speed_hz = 10000000, .modalias = "74x164", .platform_data = &tl_wr942n_v1_ssr_data, .controller_data = (void *) TL_WR942N_V1_GPIO_SHIFT_RCLK, }, }; static void tl_wr942n_v1_get_mac(const char *name, char *mac) { u8 *nvram = (u8 *) KSEG1ADDR(TL_WR942N_V1_DEFAULT_MAC_ADDR); int err; err = ath79_nvram_parse_mac_addr(nvram, TL_WR942N_V1_DEFAULT_MAC_SIZE, name, mac); if (err) pr_err("no MAC address found for %s\n", name); } static void __init tl_wr942n_v1_setup(void) { u8 *art = (u8 *) KSEG1ADDR(0x1fff0000); u8 tmpmac[ETH_ALEN]; void __iomem *base; u32 t; ath79_register_m25p80(NULL); spi_register_board_info(tl_wr942n_v1_spi_info, ARRAY_SIZE(tl_wr942n_v1_spi_info)); platform_device_register(&tl_wr942n_v1_spi_device); /* Check inherited UART RX GPIO definition */ base = ioremap(AR71XX_GPIO_BASE, AR71XX_GPIO_SIZE); t = __raw_readl(base + QCA956X_GPIO_REG_IN_ENABLE0); if (((t & GPIO_IN_ENABLE0_UART_SIN_MASK) >> GPIO_IN_ENABLE0_UART_SIN_LSB) == TL_WR942N_V1_GPIO_LED_USB1) { pr_warn("Active UART detected on USBLED's GPIOs!\n"); tl_wr942n_v1_leds_gpio[9].gpio = TL_WR942N_V1_GPIO_UART_TX; tl_wr942n_v1_leds_gpio[10].gpio = TL_WR942N_V1_GPIO_UART_RX; } ath79_register_leds_gpio(-1, ARRAY_SIZE(tl_wr942n_v1_leds_gpio), tl_wr942n_v1_leds_gpio); ath79_register_gpio_keys_polled(-1, TL_WR942N_V1_KEYS_POLL_INTERVAL, ARRAY_SIZE(tl_wr942n_v1_gpio_keys), tl_wr942n_v1_gpio_keys); tl_wr942n_v1_get_mac("MAC:", tmpmac); /* swap PHYs */ ath79_setup_qca956x_eth_cfg(QCA956X_ETH_CFG_SW_PHY_SWAP | QCA956X_ETH_CFG_SW_PHY_ADDR_SWAP); ath79_register_mdio(0, 0x0); ath79_register_mdio(1, 0x0); /* WAN port */ ath79_init_mac(ath79_eth0_data.mac_addr, tmpmac, 1); ath79_eth0_data.phy_if_mode = PHY_INTERFACE_MODE_MII; ath79_eth0_data.speed = SPEED_100; ath79_eth0_data.duplex = DUPLEX_FULL; /* swaped PHYs */ ath79_eth0_data.phy_mask = BIT(0); ath79_register_eth(0); /* LAN ports */ ath79_eth1_data.phy_if_mode = PHY_INTERFACE_MODE_GMII; ath79_init_mac(ath79_eth1_data.mac_addr, tmpmac, 0); ath79_eth1_data.speed = SPEED_1000; ath79_eth1_data.duplex = DUPLEX_FULL; /* swaped PHYs */ ath79_switch_data.phy_poll_mask |= BIT(0); ath79_switch_data.phy4_mii_en = 1; ath79_register_eth(1); ath79_register_wmac(art + TL_WR942N_V1_WMAC_CALDATA_OFFSET, tmpmac); ath79_register_usb(); gpio_request_one(TL_WR942N_V1_74HC_GPIO_HUB_RESET, GPIOF_OUT_INIT_HIGH | GPIOF_EXPORT_DIR_FIXED, "USB power"); gpio_request_one(TL_WR942N_V1_GPIO_SHIFT_OE, GPIOF_OUT_INIT_LOW | GPIOF_EXPORT_DIR_FIXED, "LED control"); gpio_request_one(TL_WR942N_V1_GPIO_SHIFT_SRCLR, GPIOF_OUT_INIT_HIGH | GPIOF_EXPORT_DIR_FIXED, "LED reset"); } MIPS_MACHINE(ATH79_MACH_TL_WR942N_V1, "TL-WR942N-V1", "TP-LINK TL-WR942N v1", tl_wr942n_v1_setup);
gpl-2.0